Commit 7d5969da authored by mlippautz's avatar mlippautz Committed by Commit bot

Reland "[heap] Add page evacuation mode for new->new"

Adds an evacuation mode that allows moving pages within new space without
copying objects.

Basic idea:
a) Move page within new space
b) Sweep page to make iterable and process ArrayBuffers
c) Finish sweep till next scavenge

Threshold is currently 70% live bytes, i.e., the same threshold we use
to determine fragmented pages.

This reverts commit 2263ee9b.

BUG=chromium:581412
LOG=N
CQ_EXTRA_TRYBOTS=tryserver.v8:v8_linux_arm64_gc_stress_dbg,v8_linux_gc_stress_dbg,v8_mac_gc_stress_dbg,v8_linux64_tsan_rel,v8_mac64_asan_rel

Review-Url: https://codereview.chromium.org/2078863002
Cr-Commit-Position: refs/heads/master@{#37104}
parent 11eb9d22
......@@ -1611,6 +1611,8 @@ void Heap::Scavenge() {
// Pause the inline allocation steps.
PauseAllocationObserversScope pause_observers(this);
mark_compact_collector()->sweeper().EnsureNewSpaceCompleted();
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);
#endif
......
This diff is collapsed.
......@@ -408,6 +408,7 @@ class MarkCompactCollector {
enum SweepingMode { SWEEP_ONLY, SWEEP_AND_VISIT_LIVE_OBJECTS };
enum SkipListRebuildingMode { REBUILD_SKIP_LIST, IGNORE_SKIP_LIST };
enum FreeListRebuildingMode { REBUILD_FREE_LIST, IGNORE_FREE_LIST };
enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
enum SweepingParallelism { SWEEP_ON_MAIN_THREAD, SWEEP_IN_PARALLEL };
......@@ -416,6 +417,7 @@ class MarkCompactCollector {
template <SweepingMode sweeping_mode, SweepingParallelism parallelism,
SkipListRebuildingMode skip_list_mode,
FreeListRebuildingMode free_list_mode,
FreeSpaceTreatmentMode free_space_mode>
static int RawSweep(PagedSpace* space, Page* p, ObjectVisitor* v);
......@@ -434,11 +436,12 @@ class MarkCompactCollector {
int ParallelSweepSpace(AllocationSpace identity, int required_freed_bytes,
int max_pages = 0);
int ParallelSweepPage(Page* page, PagedSpace* space);
int ParallelSweepPage(Page* page, AllocationSpace identity);
void StartSweeping();
void StartSweepingHelper(AllocationSpace space_to_start);
void EnsureCompleted();
void EnsureNewSpaceCompleted();
bool IsSweepingCompleted();
void SweepOrWaitUntilSweepingCompleted(Page* page);
......@@ -791,7 +794,6 @@ class MarkCompactCollector {
void SweepSpaces();
void EvacuateNewSpacePrologue();
void EvacuateNewSpaceEpilogue();
void EvacuatePagesInParallel();
......
......@@ -1386,7 +1386,6 @@ void NewSpace::TearDown() {
from_space_.TearDown();
}
void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
......@@ -1432,6 +1431,48 @@ void NewSpace::Shrink() {
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
bool NewSpace::Rebalance() {
CHECK(heap()->promotion_queue()->is_empty());
// Order here is important to make use of the page pool.
return to_space_.EnsureCurrentCapacity() &&
from_space_.EnsureCurrentCapacity();
}
bool SemiSpace::EnsureCurrentCapacity() {
if (is_committed()) {
const int expected_pages = current_capacity_ / Page::kPageSize;
int actual_pages = 0;
Page* current_page = anchor()->next_page();
while (current_page != anchor()) {
actual_pages++;
current_page = current_page->next_page();
if (actual_pages > expected_pages) {
Page* to_remove = current_page->prev_page();
// Make sure we don't overtake the actual top pointer.
CHECK_NE(to_remove, current_page_);
to_remove->Unlink();
heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
to_remove);
}
}
while (actual_pages < expected_pages) {
actual_pages++;
current_page =
heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
Page::kAllocatableMemory, this, executable());
if (current_page == nullptr) return false;
DCHECK_NOT_NULL(current_page);
current_page->InsertAfter(anchor());
Bitmap::Clear(current_page);
current_page->SetFlags(anchor()->prev_page()->GetFlags(),
Page::kCopyAllFlags);
heap()->CreateFillerObjectAt(current_page->area_start(),
current_page->area_size(),
ClearRecordedSlots::kNo);
}
}
return true;
}
void LocalAllocationBuffer::Close() {
if (IsValid()) {
......@@ -1488,7 +1529,6 @@ void NewSpace::ResetAllocationInfo() {
Address old_top = allocation_info_.top();
to_space_.Reset();
UpdateAllocationInfo();
pages_used_ = 0;
// Clear all mark-bits in the to-space.
NewSpacePageIterator it(&to_space_);
while (it.has_next()) {
......@@ -1534,7 +1574,6 @@ bool NewSpace::AddFreshPage() {
int remaining_in_page = static_cast<int>(limit - top);
heap()->CreateFillerObjectAt(top, remaining_in_page, ClearRecordedSlots::kNo);
pages_used_++;
UpdateAllocationInfo();
return true;
......@@ -1872,23 +1911,21 @@ void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) {
void SemiSpace::Reset() {
DCHECK_NE(anchor_.next_page(), &anchor_);
current_page_ = anchor_.next_page();
pages_used_ = 0;
}
bool SemiSpace::ReplaceWithEmptyPage(Page* old_page) {
// TODO(mlippautz): We do not have to get a new page here when the semispace
// is uncommitted later on.
Page* new_page = heap()->memory_allocator()->AllocatePage(
Page::kAllocatableMemory, this, executable());
if (new_page == nullptr) return false;
Bitmap::Clear(new_page);
new_page->SetFlags(old_page->GetFlags(), Page::kCopyAllFlags);
new_page->set_next_page(old_page->next_page());
new_page->set_prev_page(old_page->prev_page());
old_page->next_page()->set_prev_page(new_page);
old_page->prev_page()->set_next_page(new_page);
heap()->CreateFillerObjectAt(new_page->area_start(), new_page->area_size(),
ClearRecordedSlots::kNo);
return true;
void SemiSpace::RemovePage(Page* page) {
if (current_page_ == page) {
current_page_ = page->prev_page();
}
page->Unlink();
}
void SemiSpace::PrependPage(Page* page) {
page->SetFlags(current_page()->GetFlags(), Page::kCopyAllFlags);
page->set_owner(this);
page->InsertAfter(anchor());
pages_used_++;
}
void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
......
......@@ -425,6 +425,10 @@ class MemoryChunk {
// from new to old space during evacuation.
PAGE_NEW_OLD_PROMOTION,
// |PAGE_NEW_NEW_PROMOTION|: A page tagged with this flag has been moved
// within the new space during evacuation.
PAGE_NEW_NEW_PROMOTION,
// A black page has all mark bits set to 1 (black). A black page currently
// cannot be iterated because it is not swept. Moreover live bytes are also
// not updated.
......@@ -2408,7 +2412,8 @@ class SemiSpace : public Space {
committed_(false),
id_(semispace),
anchor_(this),
current_page_(nullptr) {}
current_page_(nullptr),
pages_used_(0) {}
inline bool Contains(HeapObject* o);
inline bool Contains(Object* o);
......@@ -2431,6 +2436,8 @@ class SemiSpace : public Space {
// than the current capacity.
bool ShrinkTo(int new_capacity);
bool EnsureCurrentCapacity();
// Returns the start address of the first page of the space.
Address space_start() {
DCHECK_NE(anchor_.next_page(), anchor());
......@@ -2439,6 +2446,7 @@ class SemiSpace : public Space {
Page* first_page() { return anchor_.next_page(); }
Page* current_page() { return current_page_; }
int pages_used() { return pages_used_; }
// Returns one past the end address of the space.
Address space_end() { return anchor_.prev_page()->area_end(); }
......@@ -2451,15 +2459,19 @@ class SemiSpace : public Space {
bool AdvancePage() {
Page* next_page = current_page_->next_page();
if (next_page == anchor()) return false;
if (next_page == anchor() || pages_used_ == max_pages()) {
return false;
}
current_page_ = next_page;
pages_used_++;
return true;
}
// Resets the space to using the first page.
void Reset();
bool ReplaceWithEmptyPage(Page* page);
void RemovePage(Page* page);
void PrependPage(Page* page);
// Age mark accessors.
Address age_mark() { return age_mark_; }
......@@ -2513,6 +2525,7 @@ class SemiSpace : public Space {
void RewindPages(Page* start, int num_pages);
inline Page* anchor() { return &anchor_; }
inline int max_pages() { return current_capacity_ / Page::kPageSize; }
// Copies the flags into the masked positions on all pages in the space.
void FixPagesFlags(intptr_t flags, intptr_t flag_mask);
......@@ -2520,7 +2533,8 @@ class SemiSpace : public Space {
// The currently committed space capacity.
int current_capacity_;
// The maximum capacity that can be used by this space.
// The maximum capacity that can be used by this space. A space cannot grow
// beyond that size.
int maximum_capacity_;
// The minimum capacity for the space. A space cannot shrink below this size.
......@@ -2534,9 +2548,11 @@ class SemiSpace : public Space {
Page anchor_;
Page* current_page_;
int pages_used_;
friend class SemiSpaceIterator;
friend class NewSpace;
friend class NewSpacePageIterator;
friend class SemiSpaceIterator;
};
......@@ -2606,7 +2622,6 @@ class NewSpace : public Space {
to_space_(heap, kToSpace),
from_space_(heap, kFromSpace),
reservation_(),
pages_used_(0),
top_on_previous_step_(0),
allocated_histogram_(nullptr),
promoted_histogram_(nullptr) {}
......@@ -2638,7 +2653,7 @@ class NewSpace : public Space {
// Return the allocated bytes in the active semispace.
intptr_t Size() override {
return pages_used_ * Page::kAllocatableMemory +
return to_space_.pages_used() * Page::kAllocatableMemory +
static_cast<int>(top() - to_space_.page_low());
}
......@@ -2715,12 +2730,14 @@ class NewSpace : public Space {
return static_cast<size_t>(allocated);
}
bool ReplaceWithEmptyPage(Page* page) {
// This method is called after flipping the semispace.
void MovePageFromSpaceToSpace(Page* page) {
DCHECK(page->InFromSpace());
return from_space_.ReplaceWithEmptyPage(page);
from_space_.RemovePage(page);
to_space_.PrependPage(page);
}
bool Rebalance();
// Return the maximum capacity of a semispace.
int MaximumCapacity() {
DCHECK(to_space_.maximum_capacity() == from_space_.maximum_capacity());
......@@ -2873,7 +2890,6 @@ class NewSpace : public Space {
SemiSpace to_space_;
SemiSpace from_space_;
base::VirtualMemory reservation_;
int pages_used_;
// Allocation pointer and limit for normal allocation and allocation during
// mark-compact collection.
......
......@@ -70,6 +70,7 @@ v8_executable("cctest") {
"heap/test-incremental-marking.cc",
"heap/test-lab.cc",
"heap/test-mark-compact.cc",
"heap/test-page-promotion.cc",
"heap/test-spaces.cc",
"interpreter/bytecode-expectations-printer.cc",
"interpreter/bytecode-expectations-printer.h",
......
......@@ -114,6 +114,7 @@
'heap/test-incremental-marking.cc',
'heap/test-lab.cc',
'heap/test-mark-compact.cc',
'heap/test-page-promotion.cc',
'heap/test-spaces.cc',
'libsampler/test-sampler.cc',
'print-extension.cc',
......
......@@ -6561,56 +6561,6 @@ HEAP_TEST(Regress589413) {
heap->CollectGarbage(OLD_SPACE);
}
UNINITIALIZED_TEST(PagePromotion) {
FLAG_page_promotion = true;
FLAG_page_promotion_threshold = 0; // %
i::FLAG_min_semi_space_size = 8 * (Page::kPageSize / MB);
// We cannot optimize for size as we require a new space with more than one
// page.
i::FLAG_optimize_for_size = false;
// Set max_semi_space_size because it could've been initialized by an
// implication of optimize_for_size.
i::FLAG_max_semi_space_size = i::FLAG_min_semi_space_size;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
{
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
v8::Context::New(isolate)->Enter();
Heap* heap = i_isolate->heap();
// Clean up any left over objects from cctest initialization.
heap->CollectAllGarbage();
heap->CollectAllGarbage();
std::vector<Handle<FixedArray>> handles;
heap::SimulateFullSpace(heap->new_space(), &handles);
heap->CollectGarbage(NEW_SPACE);
CHECK_GT(handles.size(), 0u);
// First object in handle should be on the first page.
Handle<FixedArray> first_object = handles.front();
Page* first_page = Page::FromAddress(first_object->address());
// The age mark should not be on the first page.
CHECK(!first_page->ContainsLimit(heap->new_space()->age_mark()));
// To perform a sanity check on live bytes we need to mark the heap.
heap::SimulateIncrementalMarking(heap, true);
// Sanity check that the page meets the requirements for promotion.
const int threshold_bytes =
FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100;
CHECK_GE(first_page->LiveBytes(), threshold_bytes);
// Actual checks: The page is in new space first, but is moved to old space
// during a full GC.
CHECK(heap->new_space()->ContainsSlow(first_page->address()));
CHECK(!heap->old_space()->ContainsSlow(first_page->address()));
heap->CollectGarbage(OLD_SPACE);
CHECK(!heap->new_space()->ContainsSlow(first_page->address()));
CHECK(heap->old_space()->ContainsSlow(first_page->address()));
}
}
TEST(Regress598319) {
// This test ensures that no white objects can cross the progress bar of large
// objects during incremental marking. It checks this by using Shift() during
......
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/array-buffer-tracker.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-utils.h"
namespace {
v8::Isolate* NewIsolateForPagePromotion() {
i::FLAG_page_promotion = true;
i::FLAG_page_promotion_threshold = 0; // %
i::FLAG_min_semi_space_size = 8 * (i::Page::kPageSize / i::MB);
// We cannot optimize for size as we require a new space with more than one
// page.
i::FLAG_optimize_for_size = false;
// Set max_semi_space_size because it could've been initialized by an
// implication of optimize_for_size.
i::FLAG_max_semi_space_size = i::FLAG_min_semi_space_size;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
return isolate;
}
} // namespace
namespace v8 {
namespace internal {
UNINITIALIZED_TEST(PagePromotion_NewToOld) {
v8::Isolate* isolate = NewIsolateForPagePromotion();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
{
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
v8::Context::New(isolate)->Enter();
Heap* heap = i_isolate->heap();
std::vector<Handle<FixedArray>> handles;
heap::SimulateFullSpace(heap->new_space(), &handles);
heap->CollectGarbage(NEW_SPACE);
CHECK_GT(handles.size(), 0u);
// First object in handle should be on the first page.
Handle<FixedArray> first_object = handles.front();
Page* first_page = Page::FromAddress(first_object->address());
// To perform a sanity check on live bytes we need to mark the heap.
heap::SimulateIncrementalMarking(heap, true);
// Sanity check that the page meets the requirements for promotion.
const int threshold_bytes =
FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100;
CHECK_GE(first_page->LiveBytes(), threshold_bytes);
// Actual checks: The page is in new space first, but is moved to old space
// during a full GC.
CHECK(heap->new_space()->ContainsSlow(first_page->address()));
CHECK(!heap->old_space()->ContainsSlow(first_page->address()));
heap::GcAndSweep(heap, OLD_SPACE);
CHECK(!heap->new_space()->ContainsSlow(first_page->address()));
CHECK(heap->old_space()->ContainsSlow(first_page->address()));
}
}
UNINITIALIZED_TEST(PagePromotion_NewToNew) {
v8::Isolate* isolate = NewIsolateForPagePromotion();
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
{
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
v8::Context::New(isolate)->Enter();
Heap* heap = i_isolate->heap();
std::vector<Handle<FixedArray>> handles;
heap::SimulateFullSpace(heap->new_space(), &handles);
CHECK_GT(handles.size(), 0u);
// Last object in handles should definitely be on the last page which does
// not contain the age mark.
Handle<FixedArray> last_object = handles.back();
Page* to_be_promoted_page = Page::FromAddress(last_object->address());
CHECK(to_be_promoted_page->Contains(last_object->address()));
CHECK(heap->new_space()->ToSpaceContainsSlow(last_object->address()));
heap::GcAndSweep(heap, OLD_SPACE);
CHECK(heap->new_space()->ToSpaceContainsSlow(last_object->address()));
CHECK(to_be_promoted_page->Contains(last_object->address()));
}
}
UNINITIALIZED_TEST(PagePromotion_NewToNewJSArrayBuffer) {
// Test makes sure JSArrayBuffer backing stores are still tracked after
// new-to-new promotion.
v8::Isolate* isolate = NewIsolateForPagePromotion();
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
{
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
v8::Context::New(isolate)->Enter();
Heap* heap = i_isolate->heap();
// Fill the current page which potentially contains the age mark.
heap::FillCurrentPage(heap->new_space());
// Allocate a buffer we would like to check against.
Handle<JSArrayBuffer> buffer =
i_isolate->factory()->NewJSArrayBuffer(SharedFlag::kNotShared);
JSArrayBuffer::SetupAllocatingData(buffer, i_isolate, 100);
std::vector<Handle<FixedArray>> handles;
// Simulate a full space, filling the interesting page with live objects.
heap::SimulateFullSpace(heap->new_space(), &handles);
CHECK_GT(handles.size(), 0u);
// Last object in handles should definitely be on the last page which does
// not contain the age mark.
Handle<FixedArray> first_object = handles.front();
Page* to_be_promoted_page = Page::FromAddress(first_object->address());
CHECK(to_be_promoted_page->Contains(first_object->address()));
CHECK(to_be_promoted_page->Contains(buffer->address()));
CHECK(heap->new_space()->ToSpaceContainsSlow(first_object->address()));
CHECK(heap->new_space()->ToSpaceContainsSlow(buffer->address()));
heap::GcAndSweep(heap, OLD_SPACE);
CHECK(heap->new_space()->ToSpaceContainsSlow(first_object->address()));
CHECK(heap->new_space()->ToSpaceContainsSlow(buffer->address()));
CHECK(to_be_promoted_page->Contains(first_object->address()));
CHECK(to_be_promoted_page->Contains(buffer->address()));
CHECK(ArrayBufferTracker::IsTracked(*buffer));
}
}
} // namespace internal
} // namespace v8
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment