Commit 2263ee9b authored by mlippautz's avatar mlippautz Committed by Commit bot

Revert of [heap] Add page evacuation mode for new->new (patchset #18 id:440001...

Revert of [heap] Add page evacuation mode for new->new (patchset #18 id:440001 of https://codereview.chromium.org/1957323003/ )

Reason for revert:
Fragmentation of LABs could result in increasing memory usage (pages) instead of shrinking.

BUG=chromium:620320
LOG=N

Original issue's description:
> [heap] Add page evacuation mode for new->new
>
> Adds an evacuation mode that allows moving pages within new space without
> copying objects.
>
> Basic idea:
> a) Move page within new space
> b) Sweep page to make iterable and process ArrayBuffers
> c) Finish sweep till next scavenge
>
> Threshold is currently 70% live bytes, i.e., the same threshold we use
> to determine fragmented pages.
>
> BUG=chromium:581412
> LOG=N
> CQ_EXTRA_TRYBOTS=tryserver.v8:v8_linux_arm64_gc_stress_dbg,v8_linux_gc_stress_dbg,v8_mac_gc_stress_dbg,v8_linux64_tsan_rel,v8_mac64_asan_rel
>
> Committed: https://crrev.com/49b23201671b25092a3c22eb85783f39b95a5f87
> Cr-Commit-Position: refs/heads/master@{#36990}

TBR=ulan@chromium.org
# Not skipping CQ checks because original CL landed more than 1 days ago.
BUG=chromium:581412

Review-Url: https://codereview.chromium.org/2063013005
Cr-Commit-Position: refs/heads/master@{#37042}
parent b60da28c
......@@ -1611,8 +1611,6 @@ void Heap::Scavenge() {
// Pause the inline allocation steps.
PauseAllocationObserversScope pause_observers(this);
mark_compact_collector()->sweeper().EnsureNewSpaceCompleted();
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);
#endif
......
This diff is collapsed.
......@@ -408,7 +408,6 @@ class MarkCompactCollector {
enum SweepingMode { SWEEP_ONLY, SWEEP_AND_VISIT_LIVE_OBJECTS };
enum SkipListRebuildingMode { REBUILD_SKIP_LIST, IGNORE_SKIP_LIST };
enum FreeListRebuildingMode { REBUILD_FREE_LIST, IGNORE_FREE_LIST };
enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
enum SweepingParallelism { SWEEP_ON_MAIN_THREAD, SWEEP_IN_PARALLEL };
......@@ -417,7 +416,6 @@ class MarkCompactCollector {
template <SweepingMode sweeping_mode, SweepingParallelism parallelism,
SkipListRebuildingMode skip_list_mode,
FreeListRebuildingMode free_list_mode,
FreeSpaceTreatmentMode free_space_mode>
static int RawSweep(PagedSpace* space, Page* p, ObjectVisitor* v);
......@@ -436,12 +434,11 @@ class MarkCompactCollector {
int ParallelSweepSpace(AllocationSpace identity, int required_freed_bytes,
int max_pages = 0);
int ParallelSweepPage(Page* page, AllocationSpace identity);
int ParallelSweepPage(Page* page, PagedSpace* space);
void StartSweeping();
void StartSweepingHelper(AllocationSpace space_to_start);
void EnsureCompleted();
void EnsureNewSpaceCompleted();
bool IsSweepingCompleted();
void SweepOrWaitUntilSweepingCompleted(Page* page);
......@@ -794,6 +791,7 @@ class MarkCompactCollector {
void SweepSpaces();
void EvacuateNewSpacePrologue();
void EvacuateNewSpaceEpilogue();
void EvacuatePagesInParallel();
......
......@@ -1386,6 +1386,7 @@ void NewSpace::TearDown() {
from_space_.TearDown();
}
void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
......@@ -1431,48 +1432,6 @@ void NewSpace::Shrink() {
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
bool NewSpace::Rebalance() {
CHECK(heap()->promotion_queue()->is_empty());
// Order here is important to make use of the page pool.
return to_space_.EnsureCurrentCapacity() &&
from_space_.EnsureCurrentCapacity();
}
bool SemiSpace::EnsureCurrentCapacity() {
if (is_committed()) {
const int expected_pages = current_capacity_ / Page::kPageSize;
int actual_pages = 0;
Page* current_page = anchor()->next_page();
while (current_page != anchor()) {
actual_pages++;
current_page = current_page->next_page();
if (actual_pages > expected_pages) {
Page* to_remove = current_page->prev_page();
// Make sure we don't overtake the actual top pointer.
DCHECK_NE(to_remove, current_page_);
to_remove->Unlink();
heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
to_remove);
}
}
while (actual_pages < expected_pages) {
actual_pages++;
current_page =
heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
Page::kAllocatableMemory, this, executable());
if (current_page == nullptr) return false;
DCHECK_NOT_NULL(current_page);
current_page->InsertAfter(anchor());
Bitmap::Clear(current_page);
current_page->SetFlags(anchor()->prev_page()->GetFlags(),
Page::kCopyAllFlags);
heap()->CreateFillerObjectAt(current_page->area_start(),
current_page->area_size(),
ClearRecordedSlots::kNo);
}
}
return true;
}
void LocalAllocationBuffer::Close() {
if (IsValid()) {
......@@ -1915,17 +1874,21 @@ void SemiSpace::Reset() {
current_page_ = anchor_.next_page();
}
void SemiSpace::RemovePage(Page* page) {
if (current_page_ == page) {
current_page_ = page->prev_page();
}
page->Unlink();
}
void SemiSpace::PrependPage(Page* page) {
page->SetFlags(current_page()->GetFlags(), Page::kCopyAllFlags);
page->set_owner(this);
page->InsertAfter(anchor());
bool SemiSpace::ReplaceWithEmptyPage(Page* old_page) {
// TODO(mlippautz): We do not have to get a new page here when the semispace
// is uncommitted later on.
Page* new_page = heap()->memory_allocator()->AllocatePage(
Page::kAllocatableMemory, this, executable());
if (new_page == nullptr) return false;
Bitmap::Clear(new_page);
new_page->SetFlags(old_page->GetFlags(), Page::kCopyAllFlags);
new_page->set_next_page(old_page->next_page());
new_page->set_prev_page(old_page->prev_page());
old_page->next_page()->set_prev_page(new_page);
old_page->prev_page()->set_next_page(new_page);
heap()->CreateFillerObjectAt(new_page->area_start(), new_page->area_size(),
ClearRecordedSlots::kNo);
return true;
}
void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
......
......@@ -425,10 +425,6 @@ class MemoryChunk {
// from new to old space during evacuation.
PAGE_NEW_OLD_PROMOTION,
// |PAGE_NEW_NEW_PROMOTION|: A page tagged with this flag has been moved
// within the new space during evacuation.
PAGE_NEW_NEW_PROMOTION,
// A black page has all mark bits set to 1 (black). A black page currently
// cannot be iterated because it is not swept. Moreover live bytes are also
// not updated.
......@@ -2435,8 +2431,6 @@ class SemiSpace : public Space {
// than the current capacity.
bool ShrinkTo(int new_capacity);
bool EnsureCurrentCapacity();
// Returns the start address of the first page of the space.
Address space_start() {
DCHECK_NE(anchor_.next_page(), anchor());
......@@ -2465,8 +2459,7 @@ class SemiSpace : public Space {
// Resets the space to using the first page.
void Reset();
void RemovePage(Page* page);
void PrependPage(Page* page);
bool ReplaceWithEmptyPage(Page* page);
// Age mark accessors.
Address age_mark() { return age_mark_; }
......@@ -2542,9 +2535,8 @@ class SemiSpace : public Space {
Page anchor_;
Page* current_page_;
friend class NewSpace;
friend class NewSpacePageIterator;
friend class SemiSpaceIterator;
friend class NewSpacePageIterator;
};
......@@ -2723,15 +2715,12 @@ class NewSpace : public Space {
return static_cast<size_t>(allocated);
}
void MovePageFromSpaceToSpace(Page* page) {
bool ReplaceWithEmptyPage(Page* page) {
// This method is called after flipping the semispace.
DCHECK(page->InFromSpace());
from_space_.RemovePage(page);
to_space_.PrependPage(page);
pages_used_++;
return from_space_.ReplaceWithEmptyPage(page);
}
bool Rebalance();
// Return the maximum capacity of a semispace.
int MaximumCapacity() {
DCHECK(to_space_.maximum_capacity() == from_space_.maximum_capacity());
......
......@@ -112,7 +112,6 @@
'heap/test-incremental-marking.cc',
'heap/test-lab.cc',
'heap/test-mark-compact.cc',
'heap/test-page-promotion.cc',
'heap/test-spaces.cc',
'libsampler/test-sampler.cc',
'print-extension.cc',
......
......@@ -6561,6 +6561,56 @@ HEAP_TEST(Regress589413) {
heap->CollectGarbage(OLD_SPACE);
}
UNINITIALIZED_TEST(PagePromotion) {
FLAG_page_promotion = true;
FLAG_page_promotion_threshold = 0; // %
i::FLAG_min_semi_space_size = 8 * (Page::kPageSize / MB);
// We cannot optimize for size as we require a new space with more than one
// page.
i::FLAG_optimize_for_size = false;
// Set max_semi_space_size because it could've been initialized by an
// implication of optimize_for_size.
i::FLAG_max_semi_space_size = i::FLAG_min_semi_space_size;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
{
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
v8::Context::New(isolate)->Enter();
Heap* heap = i_isolate->heap();
// Clean up any left over objects from cctest initialization.
heap->CollectAllGarbage();
heap->CollectAllGarbage();
std::vector<Handle<FixedArray>> handles;
heap::SimulateFullSpace(heap->new_space(), &handles);
heap->CollectGarbage(NEW_SPACE);
CHECK_GT(handles.size(), 0u);
// First object in handle should be on the first page.
Handle<FixedArray> first_object = handles.front();
Page* first_page = Page::FromAddress(first_object->address());
// The age mark should not be on the first page.
CHECK(!first_page->ContainsLimit(heap->new_space()->age_mark()));
// To perform a sanity check on live bytes we need to mark the heap.
heap::SimulateIncrementalMarking(heap, true);
// Sanity check that the page meets the requirements for promotion.
const int threshold_bytes =
FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100;
CHECK_GE(first_page->LiveBytes(), threshold_bytes);
// Actual checks: The page is in new space first, but is moved to old space
// during a full GC.
CHECK(heap->new_space()->ContainsSlow(first_page->address()));
CHECK(!heap->old_space()->ContainsSlow(first_page->address()));
heap->CollectGarbage(OLD_SPACE);
CHECK(!heap->new_space()->ContainsSlow(first_page->address()));
CHECK(heap->old_space()->ContainsSlow(first_page->address()));
}
}
TEST(Regress598319) {
// This test ensures that no white objects can cross the progress bar of large
// objects during incremental marking. It checks this by using Shift() during
......
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/array-buffer-tracker.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-utils.h"
namespace {
v8::Isolate* NewIsolateForPagePromotion() {
i::FLAG_page_promotion = true;
i::FLAG_page_promotion_threshold = 0; // %
i::FLAG_min_semi_space_size = 8 * (i::Page::kPageSize / i::MB);
// We cannot optimize for size as we require a new space with more than one
// page.
i::FLAG_optimize_for_size = false;
// Set max_semi_space_size because it could've been initialized by an
// implication of optimize_for_size.
i::FLAG_max_semi_space_size = i::FLAG_min_semi_space_size;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
return isolate;
}
} // namespace
namespace v8 {
namespace internal {
UNINITIALIZED_TEST(PagePromotion_NewToOld) {
v8::Isolate* isolate = NewIsolateForPagePromotion();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
{
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
v8::Context::New(isolate)->Enter();
Heap* heap = i_isolate->heap();
std::vector<Handle<FixedArray>> handles;
heap::SimulateFullSpace(heap->new_space(), &handles);
heap->CollectGarbage(NEW_SPACE);
CHECK_GT(handles.size(), 0u);
// First object in handle should be on the first page.
Handle<FixedArray> first_object = handles.front();
Page* first_page = Page::FromAddress(first_object->address());
// To perform a sanity check on live bytes we need to mark the heap.
heap::SimulateIncrementalMarking(heap, true);
// Sanity check that the page meets the requirements for promotion.
const int threshold_bytes =
FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100;
CHECK_GE(first_page->LiveBytes(), threshold_bytes);
// Actual checks: The page is in new space first, but is moved to old space
// during a full GC.
CHECK(heap->new_space()->ContainsSlow(first_page->address()));
CHECK(!heap->old_space()->ContainsSlow(first_page->address()));
heap::GcAndSweep(heap, OLD_SPACE);
CHECK(!heap->new_space()->ContainsSlow(first_page->address()));
CHECK(heap->old_space()->ContainsSlow(first_page->address()));
}
}
UNINITIALIZED_TEST(PagePromotion_NewToNew) {
v8::Isolate* isolate = NewIsolateForPagePromotion();
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
{
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
v8::Context::New(isolate)->Enter();
Heap* heap = i_isolate->heap();
std::vector<Handle<FixedArray>> handles;
heap::SimulateFullSpace(heap->new_space(), &handles);
CHECK_GT(handles.size(), 0u);
// Last object in handles should definitely be on the last page which does
// not contain the age mark.
Handle<FixedArray> last_object = handles.back();
Page* to_be_promoted_page = Page::FromAddress(last_object->address());
CHECK(to_be_promoted_page->Contains(last_object->address()));
CHECK(heap->new_space()->ToSpaceContainsSlow(last_object->address()));
heap::GcAndSweep(heap, OLD_SPACE);
CHECK(heap->new_space()->ToSpaceContainsSlow(last_object->address()));
CHECK(to_be_promoted_page->Contains(last_object->address()));
}
}
UNINITIALIZED_TEST(PagePromotion_NewToNewJSArrayBuffer) {
// Test makes sure JSArrayBuffer backing stores are still tracked after
// new-to-new promotion.
v8::Isolate* isolate = NewIsolateForPagePromotion();
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
{
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
v8::Context::New(isolate)->Enter();
Heap* heap = i_isolate->heap();
// Fill the current page which potentially contains the age mark.
heap::FillCurrentPage(heap->new_space());
// Allocate a buffer we would like to check against.
Handle<JSArrayBuffer> buffer =
i_isolate->factory()->NewJSArrayBuffer(SharedFlag::kNotShared);
JSArrayBuffer::SetupAllocatingData(buffer, i_isolate, 100);
std::vector<Handle<FixedArray>> handles;
// Simulate a full space, filling the interesting page with live objects.
heap::SimulateFullSpace(heap->new_space(), &handles);
CHECK_GT(handles.size(), 0u);
// Last object in handles should definitely be on the last page which does
// not contain the age mark.
Handle<FixedArray> first_object = handles.front();
Page* to_be_promoted_page = Page::FromAddress(first_object->address());
CHECK(to_be_promoted_page->Contains(first_object->address()));
CHECK(to_be_promoted_page->Contains(buffer->address()));
CHECK(heap->new_space()->ToSpaceContainsSlow(first_object->address()));
CHECK(heap->new_space()->ToSpaceContainsSlow(buffer->address()));
heap::GcAndSweep(heap, OLD_SPACE);
CHECK(heap->new_space()->ToSpaceContainsSlow(first_object->address()));
CHECK(heap->new_space()->ToSpaceContainsSlow(buffer->address()));
CHECK(to_be_promoted_page->Contains(first_object->address()));
CHECK(to_be_promoted_page->Contains(buffer->address()));
CHECK(ArrayBufferTracker::IsTracked(*buffer));
}
}
} // namespace internal
} // namespace v8
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment