Commit bf74d43d authored by mlippautz's avatar mlippautz Committed by Commit bot

[heap] MinorMC: Evacuation for young generation

In the spirit of the full MC, we evacuate and update pointers in parallel for
the young generation.

The collectors are connected during incremental marking when mark bits are
transferred from the young generation bitmap to the old generation bitmap.

The evacuation phase cannot (yet) move pages and relies completely on copying
objects.

BUG=chromium:651354

Review-Url: https://codereview.chromium.org/2796233003
Cr-Commit-Position: refs/heads/master@{#45074}
parent 8ab39ebc
......@@ -676,7 +676,6 @@ DEFINE_INT(min_progress_during_incremental_marking_finalization, 32,
DEFINE_INT(max_incremental_marking_finalization_rounds, 3,
"at most try this many times to finalize incremental marking")
DEFINE_BOOL(minor_mc, false, "perform young generation mark compact GCs")
DEFINE_NEG_IMPLICATION(minor_mc, page_promotion)
DEFINE_NEG_IMPLICATION(minor_mc, flush_code)
DEFINE_BOOL(black_allocation, true, "use black allocation")
DEFINE_BOOL(concurrent_store_buffer, true,
......
......@@ -529,11 +529,19 @@ void GCTracer::PrintNVP() const {
"reduce_memory=%d "
"mark=%.2f "
"mark.roots=%.2f "
"mark.old_to_new=%.2f\n",
"mark.old_to_new=%.2f "
"evacuate=%.2f "
"evacuate.copy=%.2f "
"evacuate.update_pointers=%.2f "
"evacuate.update_pointers.to_new=%.2f\n",
duration, spent_in_mutator, "mmc", current_.reduce_memory,
current_.scopes[Scope::MINOR_MC_MARK],
current_.scopes[Scope::MINOR_MC_MARK_ROOTS],
current_.scopes[Scope::MINOR_MC_MARK_OLD_TO_NEW_POINTERS]);
current_.scopes[Scope::MINOR_MC_MARK_OLD_TO_NEW_POINTERS],
current_.scopes[Scope::MC_EVACUATE],
current_.scopes[Scope::MC_EVACUATE_COPY],
current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS],
current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW]);
break;
case Event::MARK_COMPACTOR:
case Event::INCREMENTAL_MARK_COMPACTOR:
......
......@@ -34,67 +34,69 @@ enum ScavengeSpeedMode { kForAllObjects, kForSurvivedObjects };
F(MC_INCREMENTAL_EXTERNAL_EPILOGUE) \
F(MC_INCREMENTAL_EXTERNAL_PROLOGUE)
#define TRACER_SCOPES(F) \
INCREMENTAL_SCOPES(F) \
F(HEAP_EPILOGUE) \
F(HEAP_EPILOGUE_REDUCE_NEW_SPACE) \
F(HEAP_EXTERNAL_EPILOGUE) \
F(HEAP_EXTERNAL_PROLOGUE) \
F(HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES) \
F(HEAP_PROLOGUE) \
F(MC_CLEAR) \
F(MC_CLEAR_CODE_FLUSH) \
F(MC_CLEAR_DEPENDENT_CODE) \
F(MC_CLEAR_MAPS) \
F(MC_CLEAR_SLOTS_BUFFER) \
F(MC_CLEAR_STORE_BUFFER) \
F(MC_CLEAR_STRING_TABLE) \
F(MC_CLEAR_WEAK_CELLS) \
F(MC_CLEAR_WEAK_COLLECTIONS) \
F(MC_CLEAR_WEAK_LISTS) \
F(MC_EPILOGUE) \
F(MC_EVACUATE) \
F(MC_EVACUATE_CANDIDATES) \
F(MC_EVACUATE_CLEAN_UP) \
F(MC_EVACUATE_COPY) \
F(MC_EVACUATE_EPILOGUE) \
F(MC_EVACUATE_PROLOGUE) \
F(MC_EVACUATE_REBALANCE) \
F(MC_EVACUATE_UPDATE_POINTERS) \
F(MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED) \
F(MC_EVACUATE_UPDATE_POINTERS_TO_NEW) \
F(MC_EVACUATE_UPDATE_POINTERS_WEAK) \
F(MC_FINISH) \
F(MC_MARK) \
F(MC_MARK_FINISH_INCREMENTAL) \
F(MC_MARK_PREPARE_CODE_FLUSH) \
F(MC_MARK_ROOTS) \
F(MC_MARK_WEAK_CLOSURE) \
F(MC_MARK_WEAK_CLOSURE_EPHEMERAL) \
F(MC_MARK_WEAK_CLOSURE_WEAK_HANDLES) \
F(MC_MARK_WEAK_CLOSURE_WEAK_ROOTS) \
F(MC_MARK_WEAK_CLOSURE_HARMONY) \
F(MC_MARK_WRAPPER_EPILOGUE) \
F(MC_MARK_WRAPPER_PROLOGUE) \
F(MC_MARK_WRAPPER_TRACING) \
F(MC_PROLOGUE) \
F(MC_SWEEP) \
F(MC_SWEEP_CODE) \
F(MC_SWEEP_MAP) \
F(MC_SWEEP_OLD) \
F(MC_MINOR_MC) \
F(MINOR_MC_MARK) \
F(MINOR_MC_MARK_CODE_FLUSH_CANDIDATES) \
F(MINOR_MC_MARK_GLOBAL_HANDLES) \
F(MINOR_MC_MARK_OLD_TO_NEW_POINTERS) \
F(MINOR_MC_MARK_ROOTS) \
F(MINOR_MC_MARK_WEAK) \
F(SCAVENGER_CODE_FLUSH_CANDIDATES) \
F(SCAVENGER_EVACUATE) \
F(SCAVENGER_OLD_TO_NEW_POINTERS) \
F(SCAVENGER_ROOTS) \
F(SCAVENGER_SCAVENGE) \
F(SCAVENGER_SEMISPACE) \
#define TRACER_SCOPES(F) \
INCREMENTAL_SCOPES(F) \
F(HEAP_EPILOGUE) \
F(HEAP_EPILOGUE_REDUCE_NEW_SPACE) \
F(HEAP_EXTERNAL_EPILOGUE) \
F(HEAP_EXTERNAL_PROLOGUE) \
F(HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES) \
F(HEAP_PROLOGUE) \
F(MC_CLEAR) \
F(MC_CLEAR_CODE_FLUSH) \
F(MC_CLEAR_DEPENDENT_CODE) \
F(MC_CLEAR_MAPS) \
F(MC_CLEAR_SLOTS_BUFFER) \
F(MC_CLEAR_STORE_BUFFER) \
F(MC_CLEAR_STRING_TABLE) \
F(MC_CLEAR_WEAK_CELLS) \
F(MC_CLEAR_WEAK_COLLECTIONS) \
F(MC_CLEAR_WEAK_LISTS) \
F(MC_EPILOGUE) \
F(MC_EVACUATE) \
F(MC_EVACUATE_CANDIDATES) \
F(MC_EVACUATE_CLEAN_UP) \
F(MC_EVACUATE_COPY) \
F(MC_EVACUATE_EPILOGUE) \
F(MC_EVACUATE_PROLOGUE) \
F(MC_EVACUATE_REBALANCE) \
F(MC_EVACUATE_UPDATE_POINTERS) \
F(MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED) \
F(MC_EVACUATE_UPDATE_POINTERS_TO_NEW) \
F(MC_EVACUATE_UPDATE_POINTERS_WEAK) \
F(MC_FINISH) \
F(MC_MARK) \
F(MC_MARK_FINISH_INCREMENTAL) \
F(MC_MARK_PREPARE_CODE_FLUSH) \
F(MC_MARK_ROOTS) \
F(MC_MARK_WEAK_CLOSURE) \
F(MC_MARK_WEAK_CLOSURE_EPHEMERAL) \
F(MC_MARK_WEAK_CLOSURE_WEAK_HANDLES) \
F(MC_MARK_WEAK_CLOSURE_WEAK_ROOTS) \
F(MC_MARK_WEAK_CLOSURE_HARMONY) \
F(MC_MARK_WRAPPER_EPILOGUE) \
F(MC_MARK_WRAPPER_PROLOGUE) \
F(MC_MARK_WRAPPER_TRACING) \
F(MC_PROLOGUE) \
F(MC_SWEEP) \
F(MC_SWEEP_CODE) \
F(MC_SWEEP_MAP) \
F(MC_SWEEP_OLD) \
F(MC_MINOR_MC) \
F(MINOR_MC_CLEAR_LIVENESS) \
F(MINOR_MC_EVACUATE_PROCESS_BLACK_ALLOCATION) \
F(MINOR_MC_MARK) \
F(MINOR_MC_MARK_CODE_FLUSH_CANDIDATES) \
F(MINOR_MC_MARK_GLOBAL_HANDLES) \
F(MINOR_MC_MARK_OLD_TO_NEW_POINTERS) \
F(MINOR_MC_MARK_ROOTS) \
F(MINOR_MC_MARK_WEAK) \
F(SCAVENGER_CODE_FLUSH_CANDIDATES) \
F(SCAVENGER_EVACUATE) \
F(SCAVENGER_OLD_TO_NEW_POINTERS) \
F(SCAVENGER_ROOTS) \
F(SCAVENGER_SCAVENGE) \
F(SCAVENGER_SEMISPACE) \
F(SCAVENGER_WEAK)
#define TRACE_GC(tracer, scope_id) \
......
This diff is collapsed.
......@@ -305,7 +305,19 @@ class MarkCompactCollectorBase {
protected:
explicit MarkCompactCollectorBase(Heap* heap) : heap_(heap) {}
// Marking operations for objects reachable from roots.
virtual void MarkLiveObjects() = 0;
// Mark objects reachable (transitively) from objects in the marking
// stack.
virtual void EmptyMarkingDeque() = 0;
virtual void ProcessMarkingDeque() = 0;
// Clear non-live references held in side data structures.
virtual void ClearNonLiveReferences() = 0;
virtual void EvacuatePrologue() = 0;
virtual void EvacuateEpilogue() = 0;
virtual void Evacuate() = 0;
virtual void EvacuatePagesInParallel() = 0;
virtual void UpdatePointersAfterEvacuation() = 0;
// The number of parallel compaction tasks, including the main thread.
int NumberOfParallelCompactionTasks(int pages, intptr_t live_bytes);
......@@ -313,7 +325,8 @@ class MarkCompactCollectorBase {
template <class Evacuator, class Collector>
void CreateAndExecuteEvacuationTasks(
Collector* collector, PageParallelJob<EvacuationJobTraits>* job,
RecordMigratedSlotVisitor* record_visitor, const intptr_t live_bytes,
RecordMigratedSlotVisitor* record_visitor,
MigrationObserver* migration_observer, const intptr_t live_bytes,
const int& abandoned_pages);
Heap* heap_;
......@@ -323,7 +336,9 @@ class MarkCompactCollectorBase {
class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
public:
explicit MinorMarkCompactCollector(Heap* heap)
: MarkCompactCollectorBase(heap), marking_deque_(heap) {}
: MarkCompactCollectorBase(heap),
marking_deque_(heap),
page_parallel_job_semaphore_(0) {}
MarkingState marking_state(HeapObject* object) const override {
return MarkingState::External(object);
......@@ -347,10 +362,19 @@ class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
SlotCallbackResult CheckAndMarkObject(Heap* heap, Address slot_address);
void MarkLiveObjects() override;
void ProcessMarkingDeque();
void EmptyMarkingDeque();
void ProcessMarkingDeque() override;
void EmptyMarkingDeque() override;
void ClearNonLiveReferences() override;
void EvacuatePrologue() override;
void EvacuateEpilogue() override;
void Evacuate() override;
void EvacuatePagesInParallel() override;
void UpdatePointersAfterEvacuation() override;
MarkingDeque marking_deque_;
base::Semaphore page_parallel_job_semaphore_;
List<Page*> new_space_evacuation_pages_;
friend class StaticYoungGenerationMarkingVisitor;
};
......@@ -557,7 +581,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void PrepareForCodeFlushing();
// Marking operations for objects reachable from roots.
void MarkLiveObjects() override;
// Pushes a black object onto the marking stack and accounts for live bytes.
......@@ -579,9 +602,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// the string table are weak.
void MarkStringTable(RootMarkingVisitor* visitor);
// Mark objects reachable (transitively) from objects in the marking stack
// or overflowed in the heap.
void ProcessMarkingDeque();
void ProcessMarkingDeque() override;
// Mark objects reachable (transitively) from objects in the marking stack
// or overflowed in the heap. This respects references only considered in
......@@ -599,11 +620,9 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// Collects a list of dependent code from maps embedded in optimize code.
DependentCode* DependentCodeListFromNonLiveMaps();
// Mark objects reachable (transitively) from objects in the marking
// stack. This function empties the marking stack, but may leave
// overflowed objects in the heap, in which case the marking stack's
// overflow flag will be set.
void EmptyMarkingDeque();
// This function empties the marking stack, but may leave overflowed objects
// in the heap, in which case the marking stack's overflow flag will be set.
void EmptyMarkingDeque() override;
// Refill the marking stack with overflowed objects from the heap. This
// function either leaves the marking stack full or clears the overflow
......@@ -624,7 +643,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// Clear non-live references in weak cells, transition and descriptor arrays,
// and deoptimize dependent code of non-live maps.
void ClearNonLiveReferences();
void ClearNonLiveReferences() override;
void MarkDependentCodeForDeoptimization(DependentCode* list);
// Find non-live targets of simple transitions in the given list. Clear
// transitions to non-live targets and if needed trim descriptors arrays.
......@@ -663,13 +682,11 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void StartSweepSpaces();
void StartSweepSpace(PagedSpace* space);
void EvacuatePrologue();
void EvacuateEpilogue();
void EvacuatePagesInParallel();
void EvacuateNewSpaceAndCandidates();
void UpdatePointersAfterEvacuation();
void EvacuatePrologue() override;
void EvacuateEpilogue() override;
void Evacuate() override;
void EvacuatePagesInParallel() override;
void UpdatePointersAfterEvacuation() override;
void ReleaseEvacuationCandidates();
......
......@@ -337,8 +337,13 @@ MemoryChunk* MemoryChunkIterator::next() {
return nullptr;
}
Page* FreeListCategory::page() {
return Page::FromAddress(reinterpret_cast<Address>(this));
Page* FreeListCategory::page() const {
return Page::FromAddress(
reinterpret_cast<Address>(const_cast<FreeListCategory*>(this)));
}
Page* FreeList::GetPageForCategoryType(FreeListCategoryType type) {
return top(type) ? top(type)->page() : nullptr;
}
FreeList* FreeListCategory::owner() {
......
......@@ -1356,6 +1356,39 @@ bool PagedSpace::ContainsSlow(Address addr) {
return false;
}
Page* PagedSpace::RemovePageSafe(int size_in_bytes) {
base::LockGuard<base::Mutex> guard(mutex());
// Check for pages that still contain free list entries. Bail out for smaller
// categories.
const int minimum_category =
static_cast<int>(FreeList::SelectFreeListCategoryType(size_in_bytes));
Page* page = free_list()->GetPageForCategoryType(kHuge);
if (!page && static_cast<int>(kLarge) >= minimum_category)
page = free_list()->GetPageForCategoryType(kLarge);
if (!page && static_cast<int>(kMedium) >= minimum_category)
page = free_list()->GetPageForCategoryType(kMedium);
if (!page && static_cast<int>(kSmall) >= minimum_category)
page = free_list()->GetPageForCategoryType(kSmall);
if (!page) return nullptr;
AccountUncommitted(page->size());
accounting_stats_.DeallocateBytes(page->LiveBytesFromFreeList());
accounting_stats_.DecreaseCapacity(page->area_size());
page->Unlink();
UnlinkFreeListCategories(page);
return page;
}
void PagedSpace::AddPage(Page* page) {
AccountCommitted(page->size());
accounting_stats_.IncreaseCapacity(page->area_size());
accounting_stats_.AllocateBytes(page->LiveBytesFromFreeList());
page->set_owner(this);
RelinkFreeListCategories(page);
page->InsertAfter(anchor()->prev_page());
}
void PagedSpace::ShrinkImmortalImmovablePages() {
DCHECK(!heap()->deserialization_complete());
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
......@@ -1366,11 +1399,17 @@ void PagedSpace::ShrinkImmortalImmovablePages() {
DCHECK(page->IsFlagSet(Page::NEVER_EVACUATE));
size_t unused = page->ShrinkToHighWaterMark();
accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused));
AccountUncommitted(unused);
// Do not account for the unused space as uncommitted because the counter
// is kept in sync with page size which is also not adjusted for those
// chunks.
}
}
bool PagedSpace::Expand() {
// Always lock against the main space as we can only adjust capacity and
// pages concurrently for the main paged space.
base::LockGuard<base::Mutex> guard(heap()->paged_space(identity())->mutex());
const int size = AreaSize();
if (!heap()->CanExpandOldGeneration(size)) return false;
......@@ -2937,6 +2976,17 @@ HeapObject* PagedSpace::RawSlowAllocateRaw(int size_in_bytes) {
object = free_list_.Allocate(static_cast<size_t>(size_in_bytes));
if (object != nullptr) return object;
}
} else if (is_local()) {
// Sweeping not in progress and we are on a {CompactionSpace}. This can
// only happen when we are evacuating for the young generation.
PagedSpace* main_space = heap()->paged_space(identity());
Page* page = main_space->RemovePageSafe(size_in_bytes);
if (page != nullptr) {
AddPage(page);
HeapObject* object =
free_list_.Allocate(static_cast<size_t>(size_in_bytes));
if (object != nullptr) return object;
}
}
if (heap()->ShouldExpandOldGenerationOnSlowAllocation() && Expand()) {
......
......@@ -190,6 +190,7 @@ class FreeListCategory {
FreeSpace* SearchForNodeInList(size_t minimum_size, size_t* node_size);
inline FreeList* owner();
inline Page* page() const;
inline bool is_linked();
bool is_empty() { return top() == nullptr; }
size_t available() const { return available_; }
......@@ -204,8 +205,6 @@ class FreeListCategory {
// {kVeryLongFreeList} by manually walking the list.
static const int kVeryLongFreeList = 500;
inline Page* page();
FreeSpace* top() { return top_; }
void set_top(FreeSpace* top) { top_ = top; }
FreeListCategory* prev() { return prev_; }
......@@ -1722,6 +1721,21 @@ class V8_EXPORT_PRIVATE FreeList {
return maximum_freed;
}
static FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) {
if (size_in_bytes <= kTiniestListMax) {
return kTiniest;
} else if (size_in_bytes <= kTinyListMax) {
return kTiny;
} else if (size_in_bytes <= kSmallListMax) {
return kSmall;
} else if (size_in_bytes <= kMediumListMax) {
return kMedium;
} else if (size_in_bytes <= kLargeListMax) {
return kLarge;
}
return kHuge;
}
explicit FreeList(PagedSpace* owner);
// Adds a node on the free list. The block of size {size_in_bytes} starting
......@@ -1793,6 +1807,9 @@ class V8_EXPORT_PRIVATE FreeList {
void RemoveCategory(FreeListCategory* category);
void PrintCategories(FreeListCategoryType type);
// Returns a page containing an entry for a given type, or nullptr otherwise.
inline Page* GetPageForCategoryType(FreeListCategoryType type);
#ifdef DEBUG
size_t SumFreeLists();
bool IsVeryLong();
......@@ -1846,21 +1863,6 @@ class V8_EXPORT_PRIVATE FreeList {
FreeSpace* SearchForNodeInList(FreeListCategoryType type, size_t* node_size,
size_t minimum_size);
FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) {
if (size_in_bytes <= kTiniestListMax) {
return kTiniest;
} else if (size_in_bytes <= kTinyListMax) {
return kTiny;
} else if (size_in_bytes <= kSmallListMax) {
return kSmall;
} else if (size_in_bytes <= kMediumListMax) {
return kMedium;
} else if (size_in_bytes <= kLargeListMax) {
return kLarge;
}
return kHuge;
}
// The tiny categories are not used for fast allocation.
FreeListCategoryType SelectFastAllocationFreeListCategoryType(
size_t size_in_bytes) {
......@@ -1874,7 +1876,9 @@ class V8_EXPORT_PRIVATE FreeList {
return kHuge;
}
FreeListCategory* top(FreeListCategoryType type) { return categories_[type]; }
FreeListCategory* top(FreeListCategoryType type) const {
return categories_[type];
}
PagedSpace* owner_;
base::AtomicNumber<size_t> wasted_bytes_;
......@@ -2150,6 +2154,11 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
std::unique_ptr<ObjectIterator> GetObjectIterator() override;
// Remove a page if it has at least |size_in_bytes| bytes available that can
// be used for allocation.
Page* RemovePageSafe(int size_in_bytes);
void AddPage(Page* page);
protected:
// PagedSpaces that should be included in snapshots have different, i.e.,
// smaller, initial pages.
......
......@@ -21,6 +21,10 @@ namespace {
v8::Isolate* NewIsolateForPagePromotion(int min_semi_space_size = 8,
int max_semi_space_size = 8) {
// Parallel evacuation messes with fragmentation in a way that objects that
// should be copied in semi space are promoted to old space because of
// fragmentation.
i::FLAG_parallel_compaction = false;
i::FLAG_page_promotion = true;
i::FLAG_page_promotion_threshold = 0; // %
i::FLAG_min_semi_space_size = min_semi_space_size;
......
......@@ -471,7 +471,7 @@ TEST(SizeOfInitialHeap) {
page_count[i] = heap->paged_space(i)->CountTotalPages();
// Check that the initial heap is also below the limit.
CHECK_LT(heap->paged_space(i)->CommittedMemory(), kMaxInitialSizePerSpace);
CHECK_LE(heap->paged_space(i)->CommittedMemory(), kMaxInitialSizePerSpace);
}
// Executing the empty script gets by with the same number of pages, i.e.,
......
......@@ -1190,6 +1190,9 @@ class OneByteVectorResource : public v8::String::ExternalOneByteStringResource {
};
TEST(InternalizeExternal) {
// TODO(mlippautz): Remove once we add support for forwarding ThinStrings in
// minor MC.
if (FLAG_minor_mc) return;
FLAG_thin_strings = true;
CcTest::InitializeVM();
i::Isolate* isolate = CcTest::i_isolate();
......
......@@ -22,6 +22,12 @@ TEST_F(SpacesTest, CompactionSpaceMerge) {
EXPECT_TRUE(compaction_space != NULL);
EXPECT_TRUE(compaction_space->SetUp());
for (Page* p : *old_space) {
// Unlink free lists from the main space to avoid reusing the memory for
// compaction spaces.
old_space->UnlinkFreeListCategories(p);
}
// Cannot loop until "Available()" since we initially have 0 bytes available
// and would thus neither grow, nor be able to allocate an object.
const int kNumObjects = 10;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment