Commit bf74d43d authored by mlippautz's avatar mlippautz Committed by Commit bot

[heap] MinorMC: Evacuation for young generation

In the spirit of the full MC, we evacuate and update pointers in parallel for
the young generation.

The collectors are connected during incremental marking when mark bits are
transferred from the young generation bitmap to the old generation bitmap.

The evacuation phase cannot (yet) move pages and relies completely on copying
objects.

BUG=chromium:651354

Review-Url: https://codereview.chromium.org/2796233003
Cr-Commit-Position: refs/heads/master@{#45074}
parent 8ab39ebc
......@@ -676,7 +676,6 @@ DEFINE_INT(min_progress_during_incremental_marking_finalization, 32,
DEFINE_INT(max_incremental_marking_finalization_rounds, 3,
"at most try this many times to finalize incremental marking")
DEFINE_BOOL(minor_mc, false, "perform young generation mark compact GCs")
DEFINE_NEG_IMPLICATION(minor_mc, page_promotion)
DEFINE_NEG_IMPLICATION(minor_mc, flush_code)
DEFINE_BOOL(black_allocation, true, "use black allocation")
DEFINE_BOOL(concurrent_store_buffer, true,
......
......@@ -529,11 +529,19 @@ void GCTracer::PrintNVP() const {
"reduce_memory=%d "
"mark=%.2f "
"mark.roots=%.2f "
"mark.old_to_new=%.2f\n",
"mark.old_to_new=%.2f "
"evacuate=%.2f "
"evacuate.copy=%.2f "
"evacuate.update_pointers=%.2f "
"evacuate.update_pointers.to_new=%.2f\n",
duration, spent_in_mutator, "mmc", current_.reduce_memory,
current_.scopes[Scope::MINOR_MC_MARK],
current_.scopes[Scope::MINOR_MC_MARK_ROOTS],
current_.scopes[Scope::MINOR_MC_MARK_OLD_TO_NEW_POINTERS]);
current_.scopes[Scope::MINOR_MC_MARK_OLD_TO_NEW_POINTERS],
current_.scopes[Scope::MC_EVACUATE],
current_.scopes[Scope::MC_EVACUATE_COPY],
current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS],
current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW]);
break;
case Event::MARK_COMPACTOR:
case Event::INCREMENTAL_MARK_COMPACTOR:
......
......@@ -34,67 +34,69 @@ enum ScavengeSpeedMode { kForAllObjects, kForSurvivedObjects };
F(MC_INCREMENTAL_EXTERNAL_EPILOGUE) \
F(MC_INCREMENTAL_EXTERNAL_PROLOGUE)
#define TRACER_SCOPES(F) \
INCREMENTAL_SCOPES(F) \
F(HEAP_EPILOGUE) \
F(HEAP_EPILOGUE_REDUCE_NEW_SPACE) \
F(HEAP_EXTERNAL_EPILOGUE) \
F(HEAP_EXTERNAL_PROLOGUE) \
F(HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES) \
F(HEAP_PROLOGUE) \
F(MC_CLEAR) \
F(MC_CLEAR_CODE_FLUSH) \
F(MC_CLEAR_DEPENDENT_CODE) \
F(MC_CLEAR_MAPS) \
F(MC_CLEAR_SLOTS_BUFFER) \
F(MC_CLEAR_STORE_BUFFER) \
F(MC_CLEAR_STRING_TABLE) \
F(MC_CLEAR_WEAK_CELLS) \
F(MC_CLEAR_WEAK_COLLECTIONS) \
F(MC_CLEAR_WEAK_LISTS) \
F(MC_EPILOGUE) \
F(MC_EVACUATE) \
F(MC_EVACUATE_CANDIDATES) \
F(MC_EVACUATE_CLEAN_UP) \
F(MC_EVACUATE_COPY) \
F(MC_EVACUATE_EPILOGUE) \
F(MC_EVACUATE_PROLOGUE) \
F(MC_EVACUATE_REBALANCE) \
F(MC_EVACUATE_UPDATE_POINTERS) \
F(MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED) \
F(MC_EVACUATE_UPDATE_POINTERS_TO_NEW) \
F(MC_EVACUATE_UPDATE_POINTERS_WEAK) \
F(MC_FINISH) \
F(MC_MARK) \
F(MC_MARK_FINISH_INCREMENTAL) \
F(MC_MARK_PREPARE_CODE_FLUSH) \
F(MC_MARK_ROOTS) \
F(MC_MARK_WEAK_CLOSURE) \
F(MC_MARK_WEAK_CLOSURE_EPHEMERAL) \
F(MC_MARK_WEAK_CLOSURE_WEAK_HANDLES) \
F(MC_MARK_WEAK_CLOSURE_WEAK_ROOTS) \
F(MC_MARK_WEAK_CLOSURE_HARMONY) \
F(MC_MARK_WRAPPER_EPILOGUE) \
F(MC_MARK_WRAPPER_PROLOGUE) \
F(MC_MARK_WRAPPER_TRACING) \
F(MC_PROLOGUE) \
F(MC_SWEEP) \
F(MC_SWEEP_CODE) \
F(MC_SWEEP_MAP) \
F(MC_SWEEP_OLD) \
F(MC_MINOR_MC) \
F(MINOR_MC_MARK) \
F(MINOR_MC_MARK_CODE_FLUSH_CANDIDATES) \
F(MINOR_MC_MARK_GLOBAL_HANDLES) \
F(MINOR_MC_MARK_OLD_TO_NEW_POINTERS) \
F(MINOR_MC_MARK_ROOTS) \
F(MINOR_MC_MARK_WEAK) \
F(SCAVENGER_CODE_FLUSH_CANDIDATES) \
F(SCAVENGER_EVACUATE) \
F(SCAVENGER_OLD_TO_NEW_POINTERS) \
F(SCAVENGER_ROOTS) \
F(SCAVENGER_SCAVENGE) \
F(SCAVENGER_SEMISPACE) \
#define TRACER_SCOPES(F) \
INCREMENTAL_SCOPES(F) \
F(HEAP_EPILOGUE) \
F(HEAP_EPILOGUE_REDUCE_NEW_SPACE) \
F(HEAP_EXTERNAL_EPILOGUE) \
F(HEAP_EXTERNAL_PROLOGUE) \
F(HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES) \
F(HEAP_PROLOGUE) \
F(MC_CLEAR) \
F(MC_CLEAR_CODE_FLUSH) \
F(MC_CLEAR_DEPENDENT_CODE) \
F(MC_CLEAR_MAPS) \
F(MC_CLEAR_SLOTS_BUFFER) \
F(MC_CLEAR_STORE_BUFFER) \
F(MC_CLEAR_STRING_TABLE) \
F(MC_CLEAR_WEAK_CELLS) \
F(MC_CLEAR_WEAK_COLLECTIONS) \
F(MC_CLEAR_WEAK_LISTS) \
F(MC_EPILOGUE) \
F(MC_EVACUATE) \
F(MC_EVACUATE_CANDIDATES) \
F(MC_EVACUATE_CLEAN_UP) \
F(MC_EVACUATE_COPY) \
F(MC_EVACUATE_EPILOGUE) \
F(MC_EVACUATE_PROLOGUE) \
F(MC_EVACUATE_REBALANCE) \
F(MC_EVACUATE_UPDATE_POINTERS) \
F(MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED) \
F(MC_EVACUATE_UPDATE_POINTERS_TO_NEW) \
F(MC_EVACUATE_UPDATE_POINTERS_WEAK) \
F(MC_FINISH) \
F(MC_MARK) \
F(MC_MARK_FINISH_INCREMENTAL) \
F(MC_MARK_PREPARE_CODE_FLUSH) \
F(MC_MARK_ROOTS) \
F(MC_MARK_WEAK_CLOSURE) \
F(MC_MARK_WEAK_CLOSURE_EPHEMERAL) \
F(MC_MARK_WEAK_CLOSURE_WEAK_HANDLES) \
F(MC_MARK_WEAK_CLOSURE_WEAK_ROOTS) \
F(MC_MARK_WEAK_CLOSURE_HARMONY) \
F(MC_MARK_WRAPPER_EPILOGUE) \
F(MC_MARK_WRAPPER_PROLOGUE) \
F(MC_MARK_WRAPPER_TRACING) \
F(MC_PROLOGUE) \
F(MC_SWEEP) \
F(MC_SWEEP_CODE) \
F(MC_SWEEP_MAP) \
F(MC_SWEEP_OLD) \
F(MC_MINOR_MC) \
F(MINOR_MC_CLEAR_LIVENESS) \
F(MINOR_MC_EVACUATE_PROCESS_BLACK_ALLOCATION) \
F(MINOR_MC_MARK) \
F(MINOR_MC_MARK_CODE_FLUSH_CANDIDATES) \
F(MINOR_MC_MARK_GLOBAL_HANDLES) \
F(MINOR_MC_MARK_OLD_TO_NEW_POINTERS) \
F(MINOR_MC_MARK_ROOTS) \
F(MINOR_MC_MARK_WEAK) \
F(SCAVENGER_CODE_FLUSH_CANDIDATES) \
F(SCAVENGER_EVACUATE) \
F(SCAVENGER_OLD_TO_NEW_POINTERS) \
F(SCAVENGER_ROOTS) \
F(SCAVENGER_SCAVENGE) \
F(SCAVENGER_SEMISPACE) \
F(SCAVENGER_WEAK)
#define TRACE_GC(tracer, scope_id) \
......
......@@ -304,6 +304,30 @@ class FullEvacuationVerifier : public EvacuationVerifier {
}
};
class YoungGenerationEvacuationVerifier : public EvacuationVerifier {
public:
explicit YoungGenerationEvacuationVerifier(Heap* heap)
: EvacuationVerifier(heap) {}
void Run() override {
VerifyRoots(VISIT_ALL_IN_SCAVENGE);
VerifyEvacuation(heap_->new_space());
VerifyEvacuation(heap_->old_space());
VerifyEvacuation(heap_->code_space());
VerifyEvacuation(heap_->map_space());
}
protected:
void VerifyPointers(Object** start, Object** end) override {
for (Object** current = start; current < end; current++) {
if ((*current)->IsHeapObject()) {
HeapObject* object = HeapObject::cast(*current);
CHECK_IMPLIES(heap()->InNewSpace(object), heap()->InToSpace(object));
}
}
}
};
} // namespace
#endif // VERIFY_HEAP
......@@ -440,7 +464,7 @@ void MarkCompactCollector::CollectGarbage() {
StartSweepSpaces();
EvacuateNewSpaceAndCandidates();
Evacuate();
Finish();
}
......@@ -1600,6 +1624,66 @@ class ExternalStringTableCleaner : public RootVisitor {
Heap* heap_;
};
// Helper class for pruning the string table.
class YoungGenerationExternalStringTableCleaner : public RootVisitor {
public:
YoungGenerationExternalStringTableCleaner(
const MinorMarkCompactCollector& collector)
: heap_(collector.heap()), collector_(collector) {}
void VisitRootPointers(Root root, Object** start, Object** end) override {
DCHECK_EQ(static_cast<int>(root),
static_cast<int>(Root::kExternalStringsTable));
// Visit all HeapObject pointers in [start, end).
for (Object** p = start; p < end; p++) {
Object* o = *p;
if (o->IsHeapObject()) {
HeapObject* heap_object = HeapObject::cast(o);
if (ObjectMarking::IsWhite(heap_object,
collector_.marking_state(heap_object))) {
if (o->IsExternalString()) {
heap_->FinalizeExternalString(String::cast(*p));
} else {
// The original external string may have been internalized.
DCHECK(o->IsThinString());
}
// Set the entry to the_hole_value (as deleted).
*p = heap_->the_hole_value();
}
}
}
}
private:
Heap* heap_;
const MinorMarkCompactCollector& collector_;
};
// Marked young generation objects and all old generation objects will be
// retained.
class MinorMarkCompactWeakObjectRetainer : public WeakObjectRetainer {
public:
explicit MinorMarkCompactWeakObjectRetainer(
const MinorMarkCompactCollector& collector)
: collector_(collector) {}
virtual Object* RetainAs(Object* object) {
HeapObject* heap_object = HeapObject::cast(object);
if (!collector_.heap()->InNewSpace(heap_object)) return object;
DCHECK(!ObjectMarking::IsGrey(heap_object,
collector_.marking_state(heap_object)));
if (ObjectMarking::IsBlack(heap_object,
collector_.marking_state(heap_object))) {
return object;
}
return nullptr;
}
private:
const MinorMarkCompactCollector& collector_;
};
// Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
// are retained.
class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
......@@ -1724,7 +1808,6 @@ class RecordMigratedSlotVisitor : public ObjectVisitor {
collector_->RecordRelocSlot(host, rinfo, cell);
}
// Entries that will never move.
inline void VisitCodeAgeSequence(Code* host, RelocInfo* rinfo) override {
DCHECK_EQ(host, rinfo->host());
DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
......@@ -1781,6 +1864,76 @@ class ProfilingMigrationObserver final : public MigrationObserver {
}
};
class YoungGenerationMigrationObserver final : public MigrationObserver {
public:
YoungGenerationMigrationObserver(Heap* heap,
MarkCompactCollector* mark_compact_collector)
: MigrationObserver(heap),
mark_compact_collector_(mark_compact_collector) {}
inline void Move(AllocationSpace dest, HeapObject* src, HeapObject* dst,
int size) final {
// Migrate color to old generation marking in case the object survived young
// generation garbage collection.
if (heap_->incremental_marking()->IsMarking()) {
DCHECK(ObjectMarking::IsWhite(
dst, mark_compact_collector_->marking_state(dst)));
heap_->incremental_marking()->TransferColor<MarkBit::ATOMIC>(src, dst);
}
}
protected:
base::Mutex mutex_;
MarkCompactCollector* mark_compact_collector_;
};
class YoungGenerationRecordMigratedSlotVisitor final
: public RecordMigratedSlotVisitor {
public:
explicit YoungGenerationRecordMigratedSlotVisitor(
MarkCompactCollector* collector)
: RecordMigratedSlotVisitor(collector) {}
inline void VisitCodeEntry(JSFunction* host, Address code_entry_slot) final {
Address code_entry = Memory::Address_at(code_entry_slot);
if (Page::FromAddress(code_entry)->IsEvacuationCandidate() &&
IsLive(host)) {
RememberedSet<OLD_TO_OLD>::InsertTyped(Page::FromAddress(code_entry_slot),
nullptr, CODE_ENTRY_SLOT,
code_entry_slot);
}
}
void VisitCodeTarget(Code* host, RelocInfo* rinfo) final { UNREACHABLE(); }
void VisitDebugTarget(Code* host, RelocInfo* rinfo) final { UNREACHABLE(); }
void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) final {
UNREACHABLE();
}
void VisitCellPointer(Code* host, RelocInfo* rinfo) final { UNREACHABLE(); }
void VisitCodeAgeSequence(Code* host, RelocInfo* rinfo) final {
UNREACHABLE();
}
private:
// Only record slots for host objects that are considered as live by the full
// collector.
inline bool IsLive(HeapObject* object) {
return ObjectMarking::IsBlack(object, collector_->marking_state(object));
}
inline void RecordMigratedSlot(HeapObject* host, Object* value,
Address slot) final {
if (value->IsHeapObject()) {
Page* p = Page::FromAddress(reinterpret_cast<Address>(value));
if (p->InNewSpace()) {
RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot), slot);
} else if (p->IsEvacuationCandidate() && IsLive(host)) {
RememberedSet<OLD_TO_OLD>::Insert(Page::FromAddress(slot), slot);
}
}
}
};
class HeapObjectVisitor {
public:
virtual ~HeapObjectVisitor() {}
......@@ -2363,11 +2516,11 @@ SlotCallbackResult MinorMarkCompactCollector::CheckAndMarkObject(
return REMOVE_SLOT;
}
static bool IsUnmarkedObject(Heap* heap, Object** p) {
static bool IsUnmarkedObjectForYoungGeneration(Heap* heap, Object** p) {
DCHECK_IMPLIES(heap->InNewSpace(*p), heap->InToSpace(*p));
return heap->InNewSpace(*p) &&
!ObjectMarking::IsBlack(HeapObject::cast(*p),
MarkingState::Internal(HeapObject::cast(*p)));
MarkingState::External(HeapObject::cast(*p)));
}
void MinorMarkCompactCollector::MarkLiveObjects() {
......@@ -2416,11 +2569,11 @@ void MinorMarkCompactCollector::MarkLiveObjects() {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_GLOBAL_HANDLES);
isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending(
&IsUnmarkedObject);
&IsUnmarkedObjectForYoungGeneration);
isolate()
->global_handles()
->IterateNewSpaceWeakUnmodifiedRoots<GlobalHandles::VISIT_OTHERS>(
&root_visitor);
->IterateNewSpaceWeakUnmodifiedRoots<
GlobalHandles::HANDLE_PHANTOM_NODES_VISIT_OTHERS>(&root_visitor);
ProcessMarkingDeque();
}
......@@ -2452,14 +2605,106 @@ void MinorMarkCompactCollector::EmptyMarkingDeque() {
}
void MinorMarkCompactCollector::CollectGarbage() {
MarkLiveObjects();
heap()->mark_compact_collector()->sweeper().EnsureNewSpaceCompleted();
MarkLiveObjects();
ClearNonLiveReferences();
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
YoungGenerationMarkingVerifier verifier(heap());
verifier.Run();
}
#endif // VERIFY_HEAP
Evacuate();
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
YoungGenerationEvacuationVerifier verifier(heap());
verifier.Run();
}
#endif // VERIFY_HEAP
heap()->incremental_marking()->UpdateMarkingDequeAfterScavenge();
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR_LIVENESS);
for (Page* p : PageRange(heap()->new_space()->FromSpaceStart(),
heap()->new_space()->FromSpaceEnd())) {
marking_state(p).ClearLiveness();
}
}
}
void MinorMarkCompactCollector::ClearNonLiveReferences() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR);
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_STRING_TABLE);
// Internalized strings are always stored in old space, so there is no need
// to clean them here.
YoungGenerationExternalStringTableCleaner external_visitor(*this);
heap()->external_string_table_.IterateNewSpaceStrings(&external_visitor);
heap()->external_string_table_.CleanUpNewSpaceStrings();
}
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_LISTS);
// Process the weak references.
MinorMarkCompactWeakObjectRetainer retainer(*this);
heap()->ProcessYoungWeakReferences(&retainer);
}
}
void MinorMarkCompactCollector::EvacuatePrologue() {
NewSpace* new_space = heap()->new_space();
// Append the list of new space pages to be processed.
for (Page* p : PageRange(new_space->bottom(), new_space->top())) {
new_space_evacuation_pages_.Add(p);
}
new_space->Flip();
new_space->ResetAllocationInfo();
}
void MinorMarkCompactCollector::EvacuateEpilogue() {
heap()->new_space()->set_age_mark(heap()->new_space()->top());
}
void MinorMarkCompactCollector::Evacuate() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE);
Heap::RelocationLock relocation_lock(heap());
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_PROLOGUE);
EvacuatePrologue();
}
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_COPY);
EvacuatePagesInParallel();
}
UpdatePointersAfterEvacuation();
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_REBALANCE);
if (!heap()->new_space()->Rebalance()) {
FatalProcessOutOfMemory("NewSpace::Rebalance");
}
}
// Give pages that are queued to be freed back to the OS.
heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
// TODO(mlippautz): Implement page promotion.
new_space_evacuation_pages_.Rewind(0);
}
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_EPILOGUE);
EvacuateEpilogue();
}
}
void MarkCompactCollector::MarkLiveObjects() {
......@@ -3316,9 +3561,55 @@ bool FullEvacuator::RawEvacuatePage(Page* page, intptr_t* live_bytes) {
return success;
}
class YoungGenerationEvacuator : public Evacuator {
public:
YoungGenerationEvacuator(MinorMarkCompactCollector* collector,
RecordMigratedSlotVisitor* record_visitor)
: Evacuator(collector->heap(), record_visitor), collector_(collector) {}
protected:
bool RawEvacuatePage(Page* page, intptr_t* live_bytes) override;
MinorMarkCompactCollector* collector_;
};
bool YoungGenerationEvacuator::RawEvacuatePage(Page* page,
intptr_t* live_bytes) {
bool success = false;
LiveObjectVisitor object_visitor;
const MarkingState state = collector_->marking_state(page);
*live_bytes = state.live_bytes();
switch (ComputeEvacuationMode(page)) {
case kObjectsNewToOld:
success = object_visitor.VisitBlackObjects(
page, state, &new_space_visitor_, LiveObjectVisitor::kClearMarkbits);
DCHECK(success);
ArrayBufferTracker::ProcessBuffers(
page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
break;
case kPageNewToOld:
// TODO(mlippautz): Implement page promotion.
UNREACHABLE();
break;
case kPageNewToNew:
// TODO(mlippautz): Implement page promotion.
UNREACHABLE();
break;
case kObjectsOldToOld:
UNREACHABLE();
break;
}
return success;
}
class EvacuationJobTraits {
public:
typedef int* PerPageData; // Pointer to number of aborted pages.
struct PageData {
int* abandoned_pages; // Pointer to number of aborted pages.
MarkingState marking_state;
};
typedef PageData PerPageData;
typedef Evacuator* PerTaskData;
static const bool NeedSequentialFinalization = true;
......@@ -3351,7 +3642,7 @@ class EvacuationJobTraits {
p->ClearEvacuationCandidate();
// Slots have already been recorded so we just need to add it to the
// sweeper, which will happen after updating pointers.
*data += 1;
*data.abandoned_pages += 1;
}
break;
default:
......@@ -3363,7 +3654,8 @@ class EvacuationJobTraits {
template <class Evacuator, class Collector>
void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks(
Collector* collector, PageParallelJob<EvacuationJobTraits>* job,
RecordMigratedSlotVisitor* record_visitor, const intptr_t live_bytes,
RecordMigratedSlotVisitor* record_visitor,
MigrationObserver* migration_observer, const intptr_t live_bytes,
const int& abandoned_pages) {
// Used for trace summary.
double compaction_speed = 0;
......@@ -3383,6 +3675,8 @@ void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks(
for (int i = 0; i < wanted_num_tasks; i++) {
evacuators[i] = new Evacuator(collector, record_visitor);
if (profiling) evacuators[i]->AddObserver(&profiling_observer);
if (migration_observer != nullptr)
evacuators[i]->AddObserver(migration_observer);
}
job->Run(wanted_num_tasks, [evacuators](int i) { return evacuators[i]; });
const Address top = heap()->new_space()->top();
......@@ -3421,7 +3715,7 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
intptr_t live_bytes = 0;
for (Page* page : old_space_evacuation_pages_) {
live_bytes += MarkingState::Internal(page).live_bytes();
job.AddPage(page, &abandoned_pages);
job.AddPage(page, {&abandoned_pages, marking_state(page)});
}
const bool reduce_memory = heap()->ShouldReduceMemory();
......@@ -3440,13 +3734,36 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
}
}
job.AddPage(page, &abandoned_pages);
job.AddPage(page, {&abandoned_pages, marking_state(page)});
}
DCHECK_GE(job.NumberOfPages(), 1);
RecordMigratedSlotVisitor record_visitor(this);
CreateAndExecuteEvacuationTasks<FullEvacuator>(this, &job, &record_visitor,
live_bytes, abandoned_pages);
CreateAndExecuteEvacuationTasks<FullEvacuator>(
this, &job, &record_visitor, nullptr, live_bytes, abandoned_pages);
}
void MinorMarkCompactCollector::EvacuatePagesInParallel() {
PageParallelJob<EvacuationJobTraits> job(
heap_, heap_->isolate()->cancelable_task_manager(),
&page_parallel_job_semaphore_);
int abandoned_pages = 0;
intptr_t live_bytes = 0;
for (Page* page : new_space_evacuation_pages_) {
intptr_t live_bytes_on_page = marking_state(page).live_bytes();
live_bytes += live_bytes_on_page;
// TODO(mlippautz): Implement page promotion.
job.AddPage(page, {&abandoned_pages, marking_state(page)});
}
DCHECK_GE(job.NumberOfPages(), 1);
YoungGenerationMigrationObserver observer(heap(),
heap()->mark_compact_collector());
YoungGenerationRecordMigratedSlotVisitor record_visitor(
heap()->mark_compact_collector());
CreateAndExecuteEvacuationTasks<YoungGenerationEvacuator>(
this, &job, &record_visitor, &observer, live_bytes, abandoned_pages);
}
class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
......@@ -3681,7 +3998,7 @@ void MarkCompactCollector::Sweeper::AddSweptPageSafe(PagedSpace* space,
swept_list_[space->identity()].Add(page);
}
void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
void MarkCompactCollector::Evacuate() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE);
Heap::RelocationLock relocation_lock(heap());
......@@ -3771,6 +4088,7 @@ class PointerUpdateJobTraits {
private:
static void UpdateUntypedPointers(Heap* heap, MemoryChunk* chunk) {
base::LockGuard<base::RecursiveMutex> guard(chunk->mutex());
if (type == OLD_TO_NEW) {
RememberedSet<OLD_TO_NEW>::Iterate(chunk, [heap](Address slot) {
return CheckAndUpdateOldToNewSlot(heap, slot);
......@@ -3877,17 +4195,23 @@ void UpdatePointersInParallel(Heap* heap, base::Semaphore* semaphore) {
class ToSpacePointerUpdateJobTraits {
public:
typedef std::pair<Address, Address> PerPageData;
struct PageData {
Address start;
Address end;
MarkingState marking_state;
};
typedef PageData PerPageData;
typedef PointersUpdatingVisitor* PerTaskData;
static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor,
MemoryChunk* chunk, PerPageData limits) {
MemoryChunk* chunk, PerPageData page_data) {
if (chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
// New->new promoted pages contain garbage so they require iteration
// using markbits.
ProcessPageInParallelVisitLive(heap, visitor, chunk, limits);
ProcessPageInParallelVisitLive(heap, visitor, chunk, page_data);
} else {
ProcessPageInParallelVisitAll(heap, visitor, chunk, limits);
ProcessPageInParallelVisitAll(heap, visitor, chunk, page_data);
}
return true;
}
......@@ -3899,8 +4223,8 @@ class ToSpacePointerUpdateJobTraits {
private:
static void ProcessPageInParallelVisitAll(Heap* heap, PerTaskData visitor,
MemoryChunk* chunk,
PerPageData limits) {
for (Address cur = limits.first; cur < limits.second;) {
PerPageData page_data) {
for (Address cur = page_data.start; cur < page_data.end;) {
HeapObject* object = HeapObject::FromAddress(cur);
Map* map = object->map();
int size = object->SizeFromMap(map);
......@@ -3911,8 +4235,8 @@ class ToSpacePointerUpdateJobTraits {
static void ProcessPageInParallelVisitLive(Heap* heap, PerTaskData visitor,
MemoryChunk* chunk,
PerPageData limits) {
LiveObjectIterator<kBlackObjects> it(chunk, MarkingState::Internal(chunk));
PerPageData page_data) {
LiveObjectIterator<kBlackObjects> it(chunk, page_data.marking_state);
HeapObject* object = NULL;
while ((object = it.Next()) != NULL) {
Map* map = object->map();
......@@ -3922,7 +4246,10 @@ class ToSpacePointerUpdateJobTraits {
}
};
void UpdateToSpacePointersInParallel(Heap* heap, base::Semaphore* semaphore) {
template <class MarkingStateProvider>
void UpdateToSpacePointersInParallel(
Heap* heap, base::Semaphore* semaphore,
const MarkingStateProvider& marking_state_provider) {
PageParallelJob<ToSpacePointerUpdateJobTraits> job(
heap, heap->isolate()->cancelable_task_manager(), semaphore);
Address space_start = heap->new_space()->bottom();
......@@ -3931,7 +4258,7 @@ void UpdateToSpacePointersInParallel(Heap* heap, base::Semaphore* semaphore) {
Address start =
page->Contains(space_start) ? space_start : page->area_start();
Address end = page->Contains(space_end) ? space_end : page->area_end();
job.AddPage(page, std::make_pair(start, end));
job.AddPage(page, {start, end, marking_state_provider.marking_state(page)});
}
PointersUpdatingVisitor visitor;
int num_tasks = FLAG_parallel_pointer_update ? job.NumberOfPages() : 1;
......@@ -3945,7 +4272,8 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW);
UpdateToSpacePointersInParallel(heap_, &page_parallel_job_semaphore_);
UpdateToSpacePointersInParallel(heap_, &page_parallel_job_semaphore_,
*this);
// Update roots.
PointersUpdatingVisitor updating_visitor;
heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
......@@ -3971,6 +4299,36 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
}
}
void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS);
PointersUpdatingVisitor updating_visitor;
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW);
UpdateToSpacePointersInParallel(heap_, &page_parallel_job_semaphore_,
*this);
// TODO(mlippautz): Iteration mode is not optimal as we process all
// global handles. Find a way to only process the ones related to new
// space.
heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
UpdatePointersInParallel<OLD_TO_NEW>(heap_, &page_parallel_job_semaphore_);
}
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK);
EvacuationWeakObjectRetainer evacuation_object_retainer;
heap()->ProcessWeakListRoots(&evacuation_object_retainer);
// Update pointers from external string table.
heap()->UpdateNewSpaceReferencesInExternalStringTable(
&UpdateReferenceInExternalStringTableEntry);
heap()->IterateEncounteredWeakCollections(&updating_visitor);
}
}
void MarkCompactCollector::ReleaseEvacuationCandidates() {
for (Page* p : old_space_evacuation_pages_) {
......@@ -4086,7 +4444,7 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
DCHECK(p->SweepingDone());
if (p->IsEvacuationCandidate()) {
// Will be processed in EvacuateNewSpaceAndCandidates.
// Will be processed in Evacuate.
DCHECK(evacuation_candidates_.length() > 0);
continue;
}
......
......@@ -305,7 +305,19 @@ class MarkCompactCollectorBase {
protected:
explicit MarkCompactCollectorBase(Heap* heap) : heap_(heap) {}
// Marking operations for objects reachable from roots.
virtual void MarkLiveObjects() = 0;
// Mark objects reachable (transitively) from objects in the marking
// stack.
virtual void EmptyMarkingDeque() = 0;
virtual void ProcessMarkingDeque() = 0;
// Clear non-live references held in side data structures.
virtual void ClearNonLiveReferences() = 0;
virtual void EvacuatePrologue() = 0;
virtual void EvacuateEpilogue() = 0;
virtual void Evacuate() = 0;
virtual void EvacuatePagesInParallel() = 0;
virtual void UpdatePointersAfterEvacuation() = 0;
// The number of parallel compaction tasks, including the main thread.
int NumberOfParallelCompactionTasks(int pages, intptr_t live_bytes);
......@@ -313,7 +325,8 @@ class MarkCompactCollectorBase {
template <class Evacuator, class Collector>
void CreateAndExecuteEvacuationTasks(
Collector* collector, PageParallelJob<EvacuationJobTraits>* job,
RecordMigratedSlotVisitor* record_visitor, const intptr_t live_bytes,
RecordMigratedSlotVisitor* record_visitor,
MigrationObserver* migration_observer, const intptr_t live_bytes,
const int& abandoned_pages);
Heap* heap_;
......@@ -323,7 +336,9 @@ class MarkCompactCollectorBase {
class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
public:
explicit MinorMarkCompactCollector(Heap* heap)
: MarkCompactCollectorBase(heap), marking_deque_(heap) {}
: MarkCompactCollectorBase(heap),
marking_deque_(heap),
page_parallel_job_semaphore_(0) {}
MarkingState marking_state(HeapObject* object) const override {
return MarkingState::External(object);
......@@ -347,10 +362,19 @@ class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
SlotCallbackResult CheckAndMarkObject(Heap* heap, Address slot_address);
void MarkLiveObjects() override;
void ProcessMarkingDeque();
void EmptyMarkingDeque();
void ProcessMarkingDeque() override;
void EmptyMarkingDeque() override;
void ClearNonLiveReferences() override;
void EvacuatePrologue() override;
void EvacuateEpilogue() override;
void Evacuate() override;
void EvacuatePagesInParallel() override;
void UpdatePointersAfterEvacuation() override;
MarkingDeque marking_deque_;
base::Semaphore page_parallel_job_semaphore_;
List<Page*> new_space_evacuation_pages_;
friend class StaticYoungGenerationMarkingVisitor;
};
......@@ -557,7 +581,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void PrepareForCodeFlushing();
// Marking operations for objects reachable from roots.
void MarkLiveObjects() override;
// Pushes a black object onto the marking stack and accounts for live bytes.
......@@ -579,9 +602,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// the string table are weak.
void MarkStringTable(RootMarkingVisitor* visitor);
// Mark objects reachable (transitively) from objects in the marking stack
// or overflowed in the heap.
void ProcessMarkingDeque();
void ProcessMarkingDeque() override;
// Mark objects reachable (transitively) from objects in the marking stack
// or overflowed in the heap. This respects references only considered in
......@@ -599,11 +620,9 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// Collects a list of dependent code from maps embedded in optimize code.
DependentCode* DependentCodeListFromNonLiveMaps();
// Mark objects reachable (transitively) from objects in the marking
// stack. This function empties the marking stack, but may leave
// overflowed objects in the heap, in which case the marking stack's
// overflow flag will be set.
void EmptyMarkingDeque();
// This function empties the marking stack, but may leave overflowed objects
// in the heap, in which case the marking stack's overflow flag will be set.
void EmptyMarkingDeque() override;
// Refill the marking stack with overflowed objects from the heap. This
// function either leaves the marking stack full or clears the overflow
......@@ -624,7 +643,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// Clear non-live references in weak cells, transition and descriptor arrays,
// and deoptimize dependent code of non-live maps.
void ClearNonLiveReferences();
void ClearNonLiveReferences() override;
void MarkDependentCodeForDeoptimization(DependentCode* list);
// Find non-live targets of simple transitions in the given list. Clear
// transitions to non-live targets and if needed trim descriptors arrays.
......@@ -663,13 +682,11 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void StartSweepSpaces();
void StartSweepSpace(PagedSpace* space);
void EvacuatePrologue();
void EvacuateEpilogue();
void EvacuatePagesInParallel();
void EvacuateNewSpaceAndCandidates();
void UpdatePointersAfterEvacuation();
void EvacuatePrologue() override;
void EvacuateEpilogue() override;
void Evacuate() override;
void EvacuatePagesInParallel() override;
void UpdatePointersAfterEvacuation() override;
void ReleaseEvacuationCandidates();
......
......@@ -337,8 +337,13 @@ MemoryChunk* MemoryChunkIterator::next() {
return nullptr;
}
Page* FreeListCategory::page() {
return Page::FromAddress(reinterpret_cast<Address>(this));
Page* FreeListCategory::page() const {
return Page::FromAddress(
reinterpret_cast<Address>(const_cast<FreeListCategory*>(this)));
}
Page* FreeList::GetPageForCategoryType(FreeListCategoryType type) {
return top(type) ? top(type)->page() : nullptr;
}
FreeList* FreeListCategory::owner() {
......
......@@ -1356,6 +1356,39 @@ bool PagedSpace::ContainsSlow(Address addr) {
return false;
}
Page* PagedSpace::RemovePageSafe(int size_in_bytes) {
base::LockGuard<base::Mutex> guard(mutex());
// Check for pages that still contain free list entries. Bail out for smaller
// categories.
const int minimum_category =
static_cast<int>(FreeList::SelectFreeListCategoryType(size_in_bytes));
Page* page = free_list()->GetPageForCategoryType(kHuge);
if (!page && static_cast<int>(kLarge) >= minimum_category)
page = free_list()->GetPageForCategoryType(kLarge);
if (!page && static_cast<int>(kMedium) >= minimum_category)
page = free_list()->GetPageForCategoryType(kMedium);
if (!page && static_cast<int>(kSmall) >= minimum_category)
page = free_list()->GetPageForCategoryType(kSmall);
if (!page) return nullptr;
AccountUncommitted(page->size());
accounting_stats_.DeallocateBytes(page->LiveBytesFromFreeList());
accounting_stats_.DecreaseCapacity(page->area_size());
page->Unlink();
UnlinkFreeListCategories(page);
return page;
}
void PagedSpace::AddPage(Page* page) {
AccountCommitted(page->size());
accounting_stats_.IncreaseCapacity(page->area_size());
accounting_stats_.AllocateBytes(page->LiveBytesFromFreeList());
page->set_owner(this);
RelinkFreeListCategories(page);
page->InsertAfter(anchor()->prev_page());
}
void PagedSpace::ShrinkImmortalImmovablePages() {
DCHECK(!heap()->deserialization_complete());
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
......@@ -1366,11 +1399,17 @@ void PagedSpace::ShrinkImmortalImmovablePages() {
DCHECK(page->IsFlagSet(Page::NEVER_EVACUATE));
size_t unused = page->ShrinkToHighWaterMark();
accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused));
AccountUncommitted(unused);
// Do not account for the unused space as uncommitted because the counter
// is kept in sync with page size which is also not adjusted for those
// chunks.
}
}
bool PagedSpace::Expand() {
// Always lock against the main space as we can only adjust capacity and
// pages concurrently for the main paged space.
base::LockGuard<base::Mutex> guard(heap()->paged_space(identity())->mutex());
const int size = AreaSize();
if (!heap()->CanExpandOldGeneration(size)) return false;
......@@ -2937,6 +2976,17 @@ HeapObject* PagedSpace::RawSlowAllocateRaw(int size_in_bytes) {
object = free_list_.Allocate(static_cast<size_t>(size_in_bytes));
if (object != nullptr) return object;
}
} else if (is_local()) {
// Sweeping not in progress and we are on a {CompactionSpace}. This can
// only happen when we are evacuating for the young generation.
PagedSpace* main_space = heap()->paged_space(identity());
Page* page = main_space->RemovePageSafe(size_in_bytes);
if (page != nullptr) {
AddPage(page);
HeapObject* object =
free_list_.Allocate(static_cast<size_t>(size_in_bytes));
if (object != nullptr) return object;
}
}
if (heap()->ShouldExpandOldGenerationOnSlowAllocation() && Expand()) {
......
......@@ -190,6 +190,7 @@ class FreeListCategory {
FreeSpace* SearchForNodeInList(size_t minimum_size, size_t* node_size);
inline FreeList* owner();
inline Page* page() const;
inline bool is_linked();
bool is_empty() { return top() == nullptr; }
size_t available() const { return available_; }
......@@ -204,8 +205,6 @@ class FreeListCategory {
// {kVeryLongFreeList} by manually walking the list.
static const int kVeryLongFreeList = 500;
inline Page* page();
FreeSpace* top() { return top_; }
void set_top(FreeSpace* top) { top_ = top; }
FreeListCategory* prev() { return prev_; }
......@@ -1722,6 +1721,21 @@ class V8_EXPORT_PRIVATE FreeList {
return maximum_freed;
}
static FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) {
if (size_in_bytes <= kTiniestListMax) {
return kTiniest;
} else if (size_in_bytes <= kTinyListMax) {
return kTiny;
} else if (size_in_bytes <= kSmallListMax) {
return kSmall;
} else if (size_in_bytes <= kMediumListMax) {
return kMedium;
} else if (size_in_bytes <= kLargeListMax) {
return kLarge;
}
return kHuge;
}
explicit FreeList(PagedSpace* owner);
// Adds a node on the free list. The block of size {size_in_bytes} starting
......@@ -1793,6 +1807,9 @@ class V8_EXPORT_PRIVATE FreeList {
void RemoveCategory(FreeListCategory* category);
void PrintCategories(FreeListCategoryType type);
// Returns a page containing an entry for a given type, or nullptr otherwise.
inline Page* GetPageForCategoryType(FreeListCategoryType type);
#ifdef DEBUG
size_t SumFreeLists();
bool IsVeryLong();
......@@ -1846,21 +1863,6 @@ class V8_EXPORT_PRIVATE FreeList {
FreeSpace* SearchForNodeInList(FreeListCategoryType type, size_t* node_size,
size_t minimum_size);
FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) {
if (size_in_bytes <= kTiniestListMax) {
return kTiniest;
} else if (size_in_bytes <= kTinyListMax) {
return kTiny;
} else if (size_in_bytes <= kSmallListMax) {
return kSmall;
} else if (size_in_bytes <= kMediumListMax) {
return kMedium;
} else if (size_in_bytes <= kLargeListMax) {
return kLarge;
}
return kHuge;
}
// The tiny categories are not used for fast allocation.
FreeListCategoryType SelectFastAllocationFreeListCategoryType(
size_t size_in_bytes) {
......@@ -1874,7 +1876,9 @@ class V8_EXPORT_PRIVATE FreeList {
return kHuge;
}
FreeListCategory* top(FreeListCategoryType type) { return categories_[type]; }
FreeListCategory* top(FreeListCategoryType type) const {
return categories_[type];
}
PagedSpace* owner_;
base::AtomicNumber<size_t> wasted_bytes_;
......@@ -2150,6 +2154,11 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
std::unique_ptr<ObjectIterator> GetObjectIterator() override;
// Remove a page if it has at least |size_in_bytes| bytes available that can
// be used for allocation.
Page* RemovePageSafe(int size_in_bytes);
void AddPage(Page* page);
protected:
// PagedSpaces that should be included in snapshots have different, i.e.,
// smaller, initial pages.
......
......@@ -21,6 +21,10 @@ namespace {
v8::Isolate* NewIsolateForPagePromotion(int min_semi_space_size = 8,
int max_semi_space_size = 8) {
// Parallel evacuation messes with fragmentation in a way that objects that
// should be copied in semi space are promoted to old space because of
// fragmentation.
i::FLAG_parallel_compaction = false;
i::FLAG_page_promotion = true;
i::FLAG_page_promotion_threshold = 0; // %
i::FLAG_min_semi_space_size = min_semi_space_size;
......
......@@ -471,7 +471,7 @@ TEST(SizeOfInitialHeap) {
page_count[i] = heap->paged_space(i)->CountTotalPages();
// Check that the initial heap is also below the limit.
CHECK_LT(heap->paged_space(i)->CommittedMemory(), kMaxInitialSizePerSpace);
CHECK_LE(heap->paged_space(i)->CommittedMemory(), kMaxInitialSizePerSpace);
}
// Executing the empty script gets by with the same number of pages, i.e.,
......
......@@ -1190,6 +1190,9 @@ class OneByteVectorResource : public v8::String::ExternalOneByteStringResource {
};
TEST(InternalizeExternal) {
// TODO(mlippautz): Remove once we add support for forwarding ThinStrings in
// minor MC.
if (FLAG_minor_mc) return;
FLAG_thin_strings = true;
CcTest::InitializeVM();
i::Isolate* isolate = CcTest::i_isolate();
......
......@@ -22,6 +22,12 @@ TEST_F(SpacesTest, CompactionSpaceMerge) {
EXPECT_TRUE(compaction_space != NULL);
EXPECT_TRUE(compaction_space->SetUp());
for (Page* p : *old_space) {
// Unlink free lists from the main space to avoid reusing the memory for
// compaction spaces.
old_space->UnlinkFreeListCategories(p);
}
// Cannot loop until "Available()" since we initially have 0 bytes available
// and would thus neither grow, nor be able to allocate an object.
const int kNumObjects = 10;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment