Commit cf37556f authored by mlippautz's avatar mlippautz Committed by Commit bot

[heap] Minor MC: Implement page moving

BUG=chromium:651354

Review-Url: https://codereview.chromium.org/2855143003
Cr-Commit-Position: refs/heads/master@{#45223}
parent 540419b6
......@@ -5519,8 +5519,7 @@ bool Heap::SetUp() {
#else
concurrent_marking_ = new ConcurrentMarking(this, nullptr);
#endif
if (FLAG_minor_mc)
minor_mark_compact_collector_ = new MinorMarkCompactCollector(this);
minor_mark_compact_collector_ = new MinorMarkCompactCollector(this);
gc_idle_time_handler_ = new GCIdleTimeHandler();
memory_reducer_ = new MemoryReducer(this);
if (V8_UNLIKELY(FLAG_gc_stats)) {
......
......@@ -2409,6 +2409,7 @@ class Heap {
friend class IncrementalMarkingJob;
friend class LargeObjectSpace;
friend class MarkCompactCollector;
friend class MarkCompactCollectorBase;
friend class MinorMarkCompactCollector;
friend class MarkCompactMarkingVisitor;
friend class NewSpace;
......
......@@ -821,7 +821,23 @@ void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
(obj->IsFiller() &&
ObjectMarking::IsWhite<kAtomicity>(obj, marking_state(obj))));
return dest;
} else if (heap_->InToSpace(obj)) {
// The object may be on a page that was moved in new space.
DCHECK(
Page::FromAddress(obj->address())->IsFlagSet(Page::SWEEP_TO_ITERATE));
return ObjectMarking::IsBlack<kAtomicity>(obj,
MarkingState::External(obj))
? obj
: nullptr;
} else {
// The object may be on a page that was moved from new to old space.
if (Page::FromAddress(obj->address())
->IsFlagSet(Page::SWEEP_TO_ITERATE)) {
return ObjectMarking::IsBlack<kAtomicity>(obj,
MarkingState::External(obj))
? obj
: nullptr;
}
DCHECK(ObjectMarking::IsGrey<kAtomicity>(obj, marking_state(obj)) ||
(obj->IsFiller() &&
ObjectMarking::IsWhite<kAtomicity>(obj, marking_state(obj))) ||
......
......@@ -447,6 +447,8 @@ void MarkCompactCollector::CollectGarbage() {
// update the state as they proceed.
DCHECK(state_ == PREPARE_GC);
heap()->minor_mark_compact_collector()->CleanupSweepToIteratePages();
MarkLiveObjects();
DCHECK(heap_->incremental_marking()->IsStopped());
......@@ -1458,6 +1460,16 @@ void MarkCompactCollector::PrepareForCodeFlushing() {
ProcessMarkingDeque();
}
void MinorMarkCompactCollector::CleanupSweepToIteratePages() {
for (Page* p : sweep_to_iterate_pages_) {
if (p->IsFlagSet(Page::SWEEP_TO_ITERATE)) {
p->ClearFlag(Page::SWEEP_TO_ITERATE);
marking_state(p).ClearLiveness();
}
}
sweep_to_iterate_pages_.clear();
}
class MinorMarkCompactCollector::RootMarkingVisitor : public RootVisitor {
public:
explicit RootMarkingVisitor(MinorMarkCompactCollector* collector)
......@@ -2588,6 +2600,8 @@ void MinorMarkCompactCollector::EmptyMarkingDeque() {
void MinorMarkCompactCollector::CollectGarbage() {
heap()->mark_compact_collector()->sweeper().EnsureNewSpaceCompleted();
CleanupSweepToIteratePages();
MarkLiveObjects();
ClearNonLiveReferences();
#ifdef VERIFY_HEAP
......@@ -2611,6 +2625,7 @@ void MinorMarkCompactCollector::CollectGarbage() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR_LIVENESS);
for (Page* p : PageRange(heap()->new_space()->FromSpaceStart(),
heap()->new_space()->FromSpaceEnd())) {
DCHECK(!p->IsFlagSet(Page::SWEEP_TO_ITERATE));
marking_state(p).ClearLiveness();
}
}
......@@ -2618,6 +2633,56 @@ void MinorMarkCompactCollector::CollectGarbage() {
heap()->account_external_memory_concurrently_freed();
}
void MinorMarkCompactCollector::MakeIterable(
Page* p, MarkingTreatmentMode marking_mode,
FreeSpaceTreatmentMode free_space_mode) {
// We have to clear the full collectors markbits for the areas that we
// remove here.
MarkCompactCollector* full_collector = heap()->mark_compact_collector();
Address free_start = p->area_start();
DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
LiveObjectIterator<kBlackObjects> it(p, marking_state(p));
HeapObject* object = nullptr;
while ((object = it.Next()) != nullptr) {
DCHECK(ObjectMarking::IsBlack(object, marking_state(object)));
Address free_end = object->address();
if (free_end != free_start) {
CHECK_GT(free_end, free_start);
size_t size = static_cast<size_t>(free_end - free_start);
if (free_space_mode == ZAP_FREE_SPACE) {
memset(free_start, 0xcc, size);
full_collector->marking_state(p).bitmap()->ClearRange(
p->AddressToMarkbitIndex(free_start),
p->AddressToMarkbitIndex(free_end));
}
p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
ClearRecordedSlots::kNo);
}
Map* map = object->synchronized_map();
int size = object->SizeFromMap(map);
free_start = free_end + size;
}
if (free_start != p->area_end()) {
CHECK_GT(p->area_end(), free_start);
size_t size = static_cast<size_t>(p->area_end() - free_start);
if (free_space_mode == ZAP_FREE_SPACE) {
memset(free_start, 0xcc, size);
full_collector->marking_state(p).bitmap()->ClearRange(
p->AddressToMarkbitIndex(free_start),
p->AddressToMarkbitIndex(p->area_end()));
}
p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
ClearRecordedSlots::kNo);
}
if (marking_mode == MarkingTreatmentMode::CLEAR) {
marking_state(p).ClearLiveness();
p->ClearFlag(Page::SWEEP_TO_ITERATE);
}
}
void MinorMarkCompactCollector::ClearNonLiveReferences() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR);
......@@ -2680,7 +2745,15 @@ void MinorMarkCompactCollector::Evacuate() {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
// TODO(mlippautz): Implement page promotion.
for (Page* p : new_space_evacuation_pages_) {
if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION) ||
p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
p->SetFlag(Page::SWEEP_TO_ITERATE);
sweep_to_iterate_pages_.push_back(p);
}
}
new_space_evacuation_pages_.Rewind(0);
}
......@@ -3571,12 +3644,30 @@ bool YoungGenerationEvacuator::RawEvacuatePage(Page* page,
page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
break;
case kPageNewToOld:
// TODO(mlippautz): Implement page promotion.
UNREACHABLE();
success = object_visitor.VisitBlackObjects(
page, state, &new_to_old_page_visitor_,
LiveObjectVisitor::kKeepMarking);
DCHECK(success);
new_to_old_page_visitor_.account_moved_bytes(state.live_bytes());
// TODO(mlippautz): If cleaning array buffers is too slow here we can
// delay it until the next GC.
ArrayBufferTracker::FreeDead(page, state);
if (heap()->ShouldZapGarbage())
collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
ZAP_FREE_SPACE);
break;
case kPageNewToNew:
// TODO(mlippautz): Implement page promotion.
UNREACHABLE();
success = object_visitor.VisitBlackObjects(
page, state, &new_to_new_page_visitor_,
LiveObjectVisitor::kKeepMarking);
DCHECK(success);
new_to_new_page_visitor_.account_moved_bytes(state.live_bytes());
// TODO(mlippautz): If cleaning array buffers is too slow here we can
// delay it until the next GC.
ArrayBufferTracker::FreeDead(page, state);
if (heap()->ShouldZapGarbage())
collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
ZAP_FREE_SPACE);
break;
case kObjectsOldToOld:
UNREACHABLE();
......@@ -3689,6 +3780,14 @@ void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks(
}
}
bool MarkCompactCollectorBase::ShouldMovePage(Page* p, intptr_t live_bytes) {
const bool reduce_memory = heap()->ShouldReduceMemory();
const Address age_mark = heap()->new_space()->age_mark();
return !reduce_memory && !p->NeverEvacuate() &&
(live_bytes > Evacuator::PageEvacuationThreshold()) &&
!p->Contains(age_mark) && heap()->CanExpandOldGeneration(live_bytes);
}
void MarkCompactCollector::EvacuatePagesInParallel() {
PageParallelJob<EvacuationJobTraits> job(
heap_, heap_->isolate()->cancelable_task_manager(),
......@@ -3701,22 +3800,16 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
job.AddPage(page, {&abandoned_pages, marking_state(page)});
}
const bool reduce_memory = heap()->ShouldReduceMemory();
const Address age_mark = heap()->new_space()->age_mark();
for (Page* page : new_space_evacuation_pages_) {
intptr_t live_bytes_on_page = MarkingState::Internal(page).live_bytes();
live_bytes += live_bytes_on_page;
if (!reduce_memory && !page->NeverEvacuate() &&
(live_bytes_on_page > Evacuator::PageEvacuationThreshold()) &&
!page->Contains(age_mark) &&
heap()->CanExpandOldGeneration(live_bytes_on_page)) {
if (ShouldMovePage(page, live_bytes_on_page)) {
if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
} else {
EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
}
}
job.AddPage(page, {&abandoned_pages, marking_state(page)});
}
DCHECK_GE(job.NumberOfPages(), 1);
......@@ -3736,7 +3829,13 @@ void MinorMarkCompactCollector::EvacuatePagesInParallel() {
for (Page* page : new_space_evacuation_pages_) {
intptr_t live_bytes_on_page = marking_state(page).live_bytes();
live_bytes += live_bytes_on_page;
// TODO(mlippautz): Implement page promotion.
if (ShouldMovePage(page, live_bytes_on_page)) {
if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
} else {
EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
}
}
job.AddPage(page, {&abandoned_pages, marking_state(page)});
}
DCHECK_GE(job.NumberOfPages(), 1);
......@@ -4060,12 +4159,12 @@ template <RememberedSetType type>
class PointerUpdateJobTraits {
public:
typedef int PerPageData; // Per page data is not used in this job.
typedef int PerTaskData; // Per task data is not used in this job.
typedef const MarkCompactCollectorBase* PerTaskData;
static bool ProcessPageInParallel(Heap* heap, PerTaskData, MemoryChunk* chunk,
PerPageData) {
UpdateUntypedPointers(heap, chunk);
UpdateTypedPointers(heap, chunk);
static bool ProcessPageInParallel(Heap* heap, PerTaskData task_data,
MemoryChunk* chunk, PerPageData) {
UpdateUntypedPointers(heap, chunk, task_data);
UpdateTypedPointers(heap, chunk, task_data);
return true;
}
static const bool NeedSequentialFinalization = false;
......@@ -4073,12 +4172,14 @@ class PointerUpdateJobTraits {
}
private:
static void UpdateUntypedPointers(Heap* heap, MemoryChunk* chunk) {
static void UpdateUntypedPointers(Heap* heap, MemoryChunk* chunk,
const MarkCompactCollectorBase* collector) {
base::LockGuard<base::RecursiveMutex> guard(chunk->mutex());
if (type == OLD_TO_NEW) {
RememberedSet<OLD_TO_NEW>::Iterate(chunk, [heap](Address slot) {
return CheckAndUpdateOldToNewSlot(heap, slot);
});
RememberedSet<OLD_TO_NEW>::Iterate(
chunk, [heap, collector](Address slot) {
return CheckAndUpdateOldToNewSlot(heap, slot, collector);
});
} else {
RememberedSet<OLD_TO_OLD>::Iterate(chunk, [](Address slot) {
return UpdateSlot(reinterpret_cast<Object**>(slot));
......@@ -4086,7 +4187,8 @@ class PointerUpdateJobTraits {
}
}
static void UpdateTypedPointers(Heap* heap, MemoryChunk* chunk) {
static void UpdateTypedPointers(Heap* heap, MemoryChunk* chunk,
const MarkCompactCollectorBase* collector) {
if (type == OLD_TO_OLD) {
Isolate* isolate = heap->isolate();
RememberedSet<OLD_TO_OLD>::IterateTyped(
......@@ -4098,19 +4200,20 @@ class PointerUpdateJobTraits {
} else {
Isolate* isolate = heap->isolate();
RememberedSet<OLD_TO_NEW>::IterateTyped(
chunk,
[isolate, heap](SlotType slot_type, Address host_addr, Address slot) {
chunk, [isolate, heap, collector](SlotType slot_type,
Address host_addr, Address slot) {
return UpdateTypedSlotHelper::UpdateTypedSlot(
isolate, slot_type, slot, [heap](Object** slot) {
isolate, slot_type, slot, [heap, collector](Object** slot) {
return CheckAndUpdateOldToNewSlot(
heap, reinterpret_cast<Address>(slot));
heap, reinterpret_cast<Address>(slot), collector);
});
});
}
}
static SlotCallbackResult CheckAndUpdateOldToNewSlot(Heap* heap,
Address slot_address) {
static SlotCallbackResult CheckAndUpdateOldToNewSlot(
Heap* heap, Address slot_address,
const MarkCompactCollectorBase* collector) {
// There may be concurrent action on slots in dead objects. Concurrent
// sweeper threads may overwrite the slot content with a free space object.
// Moreover, the pointed-to object may also get concurrently overwritten
......@@ -4150,7 +4253,7 @@ class PointerUpdateJobTraits {
// markbits to determine liveness.
HeapObject* heap_object = reinterpret_cast<HeapObject*>(slot_reference);
if (ObjectMarking::IsBlack(heap_object,
MarkingState::Internal(heap_object)))
collector->marking_state(heap_object)))
return KEEP_SLOT;
} else {
DCHECK(!heap->InNewSpace(slot_reference));
......@@ -4169,14 +4272,15 @@ int NumberOfPointerUpdateTasks(int pages) {
}
template <RememberedSetType type>
void UpdatePointersInParallel(Heap* heap, base::Semaphore* semaphore) {
void UpdatePointersInParallel(Heap* heap, base::Semaphore* semaphore,
const MarkCompactCollectorBase* collector) {
PageParallelJob<PointerUpdateJobTraits<type> > job(
heap, heap->isolate()->cancelable_task_manager(), semaphore);
RememberedSet<type>::IterateMemoryChunks(
heap, [&job](MemoryChunk* chunk) { job.AddPage(chunk, 0); });
int num_pages = job.NumberOfPages();
int num_tasks = NumberOfPointerUpdateTasks(num_pages);
job.Run(num_tasks, [](int i) { return 0; });
job.Run(num_tasks, [collector](int i) { return collector; });
}
class ToSpacePointerUpdateJobTraits {
......@@ -4263,14 +4367,16 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
// Update roots.
PointersUpdatingVisitor updating_visitor;
heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
UpdatePointersInParallel<OLD_TO_NEW>(heap_, &page_parallel_job_semaphore_);
UpdatePointersInParallel<OLD_TO_NEW>(heap_, &page_parallel_job_semaphore_,
this);
}
{
Heap* heap = this->heap();
TRACE_GC(heap->tracer(),
GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED);
UpdatePointersInParallel<OLD_TO_OLD>(heap_, &page_parallel_job_semaphore_);
UpdatePointersInParallel<OLD_TO_OLD>(heap_, &page_parallel_job_semaphore_,
this);
}
{
......@@ -4299,7 +4405,8 @@ void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
// global handles. Find a way to only process the ones related to new
// space.
heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
UpdatePointersInParallel<OLD_TO_NEW>(heap_, &page_parallel_job_semaphore_);
UpdatePointersInParallel<OLD_TO_NEW>(heap_, &page_parallel_job_semaphore_,
this);
}
{
......@@ -4357,7 +4464,7 @@ int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page,
DCHECK_EQ(Page::kSweepingPending,
page->concurrent_sweeping_state().Value());
page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
const Sweeper::FreeSpaceTreatmentMode free_space_mode =
const FreeSpaceTreatmentMode free_space_mode =
Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
if (identity == NEW_SPACE) {
RawSweep(page, IGNORE_FREE_LIST, free_space_mode);
......@@ -4442,8 +4549,9 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
// testing this is fine.
p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
Sweeper::RawSweep(p, Sweeper::IGNORE_FREE_LIST,
Heap::ShouldZapGarbage() ? Sweeper::ZAP_FREE_SPACE
: Sweeper::IGNORE_FREE_SPACE);
Heap::ShouldZapGarbage()
? FreeSpaceTreatmentMode::ZAP_FREE_SPACE
: FreeSpaceTreatmentMode::IGNORE_FREE_SPACE);
continue;
}
......
......@@ -277,6 +277,8 @@ class LiveObjectVisitor BASE_EMBEDDED {
};
enum PageEvacuationMode { NEW_TO_NEW, NEW_TO_OLD };
enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
enum MarkingTreatmentMode { KEEP, CLEAR };
// Base class for minor and full MC collectors.
class MarkCompactCollectorBase {
......@@ -322,6 +324,9 @@ class MarkCompactCollectorBase {
MigrationObserver* migration_observer, const intptr_t live_bytes,
const int& abandoned_pages);
// Returns whether this page should be moved according to heuristics.
bool ShouldMovePage(Page* p, intptr_t live_bytes);
Heap* heap_;
};
......@@ -345,6 +350,10 @@ class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
void TearDown() override;
void CollectGarbage() override;
void MakeIterable(Page* page, MarkingTreatmentMode marking_mode,
FreeSpaceTreatmentMode free_space_mode);
void CleanupSweepToIteratePages();
private:
class RootMarkingVisitor;
......@@ -368,6 +377,7 @@ class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
MarkingDeque marking_deque_;
base::Semaphore page_parallel_job_semaphore_;
List<Page*> new_space_evacuation_pages_;
std::vector<Page*> sweep_to_iterate_pages_;
friend class StaticYoungGenerationMarkingVisitor;
};
......@@ -382,7 +392,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
class SweeperTask;
enum FreeListRebuildingMode { REBUILD_FREE_LIST, IGNORE_FREE_LIST };
enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
enum ClearOldToNewSlotsMode {
DO_NOT_CLEAR,
CLEAR_REGULAR_SLOTS,
......
......@@ -56,10 +56,14 @@ bool HeapObjectIterator::AdvanceToNextPage() {
DCHECK_EQ(cur_addr_, cur_end_);
if (current_page_ == page_range_.end()) return false;
Page* cur_page = *(current_page_++);
space_->heap()
->mark_compact_collector()
->sweeper()
.SweepOrWaitUntilSweepingCompleted(cur_page);
Heap* heap = space_->heap();
heap->mark_compact_collector()->sweeper().SweepOrWaitUntilSweepingCompleted(
cur_page);
if (cur_page->IsFlagSet(Page::SWEEP_TO_ITERATE))
heap->minor_mark_compact_collector()->MakeIterable(
cur_page, MarkingTreatmentMode::CLEAR,
FreeSpaceTreatmentMode::IGNORE_FREE_SPACE);
cur_addr_ = cur_page->area_start();
cur_end_ = cur_page->area_end();
DCHECK(cur_page->SweepingDone());
......
......@@ -289,6 +289,10 @@ class MemoryChunk {
// |ANCHOR|: Flag is set if page is an anchor.
ANCHOR = 1u << 17,
// |SWEEP_TO_ITERATE|: The page requires sweeping using external markbits
// to iterate the page.
SWEEP_TO_ITERATE = 1u << 18,
};
typedef base::Flags<Flag, uintptr_t> Flags;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment