Commit 4cb6ef83 authored by Michael Lippautz's avatar Michael Lippautz Committed by Commit Bot

[heap] Move sweeper to separate file

Bug: 
Change-Id: Ie516167f047e48cda47a5dbfb156ea9ae164046c
Reviewed-on: https://chromium-review.googlesource.com/789878Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#49656}
parent 888acb2f
......@@ -1672,6 +1672,8 @@ v8_source_set("v8_base") {
"src/heap/spaces.h",
"src/heap/store-buffer.cc",
"src/heap/store-buffer.h",
"src/heap/sweeper.cc",
"src/heap/sweeper.h",
"src/heap/worklist.h",
"src/ic/call-optimization.cc",
"src/ic/call-optimization.h",
......
......@@ -43,6 +43,7 @@
#include "src/heap/scavenge-job.h"
#include "src/heap/scavenger-inl.h"
#include "src/heap/store-buffer.h"
#include "src/heap/sweeper.h"
#include "src/interpreter/interpreter.h"
#include "src/objects/object-macros.h"
#include "src/objects/shared-function-info.h"
......@@ -1818,7 +1819,7 @@ void Heap::EvacuateYoungGeneration() {
DCHECK(CanExpandOldGeneration(new_space()->Size()));
}
mark_compact_collector()->sweeper().EnsureNewSpaceCompleted();
mark_compact_collector()->sweeper()->EnsureNewSpaceCompleted();
SetGCState(SCAVENGE);
LOG(isolate_, ResourceEvent("scavenge", "begin"));
......@@ -1934,14 +1935,14 @@ void Heap::Scavenge() {
IncrementalMarking::PauseBlackAllocationScope pause_black_allocation(
incremental_marking());
if (mark_compact_collector()->sweeper().sweeping_in_progress() &&
if (mark_compact_collector()->sweeper()->sweeping_in_progress() &&
memory_allocator_->unmapper()->NumberOfDelayedChunks() >
static_cast<int>(new_space_->MaximumCapacity() / Page::kPageSize)) {
mark_compact_collector()->EnsureSweepingCompleted();
}
// TODO(mlippautz): Untangle the dependency of the unmapper from the sweeper.
mark_compact_collector()->sweeper().EnsureNewSpaceCompleted();
mark_compact_collector()->sweeper()->EnsureNewSpaceCompleted();
SetGCState(SCAVENGE);
......@@ -1969,17 +1970,15 @@ void Heap::Scavenge() {
}
{
MarkCompactCollector::Sweeper* sweeper =
&mark_compact_collector()->sweeper();
Sweeper* sweeper = mark_compact_collector()->sweeper();
// Pause the concurrent sweeper.
MarkCompactCollector::Sweeper::PauseOrCompleteScope pause_scope(sweeper);
Sweeper::PauseOrCompleteScope pause_scope(sweeper);
// Filter out pages from the sweeper that need to be processed for old to
// new slots by the Scavenger. After processing, the Scavenger adds back
// pages that are still unsweeped. This way the Scavenger has exclusive
// access to the slots of a page and can completely avoid any locks on
// the page itself.
MarkCompactCollector::Sweeper::FilterSweepingPagesScope filter_scope(
sweeper, pause_scope);
Sweeper::FilterSweepingPagesScope filter_scope(sweeper, pause_scope);
filter_scope.FilterOldSpaceSweepingPages(
[](Page* page) { return !page->ContainsSlots<OLD_TO_NEW>(); });
RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
......@@ -6613,7 +6612,7 @@ Code* Heap::GcSafeFindCodeForInnerPointer(Address inner_pointer) {
// after the inner pointer.
Page* page = Page::FromAddress(inner_pointer);
DCHECK_EQ(page->owner(), code_space());
mark_compact_collector()->sweeper().SweepOrWaitUntilSweepingCompleted(page);
mark_compact_collector()->sweeper()->SweepOrWaitUntilSweepingCompleted(page);
Address addr = page->skip_list()->StartFor(inner_pointer);
Address top = code_space()->top();
......
......@@ -2583,6 +2583,7 @@ class Heap {
friend class PagedSpace;
friend class Scavenger;
friend class StoreBuffer;
friend class Sweeper;
friend class heap::TestMemoryAllocatorScope;
// The allocator interface.
......
......@@ -15,6 +15,7 @@
#include "src/heap/object-stats.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/sweeper.h"
#include "src/tracing/trace-event.h"
#include "src/v8.h"
#include "src/visitors.h"
......@@ -867,7 +868,7 @@ void IncrementalMarking::FinalizeSweeping() {
DCHECK(state_ == SWEEPING);
if (heap_->mark_compact_collector()->sweeping_in_progress() &&
(!FLAG_concurrent_sweeping ||
!heap_->mark_compact_collector()->sweeper().AreSweeperTasksRunning())) {
!heap_->mark_compact_collector()->sweeper()->AreSweeperTasksRunning())) {
heap_->mark_compact_collector()->EnsureSweepingCompleted();
}
if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
......
......@@ -26,6 +26,7 @@
#include "src/heap/object-stats.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/spaces-inl.h"
#include "src/heap/sweeper.h"
#include "src/heap/worklist.h"
#include "src/ic/stub-cache.h"
#include "src/transitions-inl.h"
......@@ -462,10 +463,12 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap)
black_allocation_(false),
have_code_to_deoptimize_(false),
marking_worklist_(heap),
sweeper_(heap, non_atomic_marking_state()) {
sweeper_(new Sweeper(heap, non_atomic_marking_state())) {
old_to_new_slots_ = -1;
}
MarkCompactCollector::~MarkCompactCollector() { delete sweeper_; }
void MarkCompactCollector::SetUp() {
DCHECK_EQ(0, strcmp(Marking::kWhiteBitPattern, "00"));
DCHECK_EQ(0, strcmp(Marking::kBlackBitPattern, "11"));
......@@ -608,228 +611,10 @@ void MarkCompactCollector::ClearMarkbits() {
heap_->lo_space()->ClearMarkingStateOfLiveObjects();
}
MarkCompactCollector::Sweeper::PauseOrCompleteScope::PauseOrCompleteScope(
MarkCompactCollector::Sweeper* sweeper)
: sweeper_(sweeper) {
sweeper_->stop_sweeper_tasks_.SetValue(true);
if (!sweeper_->sweeping_in_progress()) return;
sweeper_->AbortAndWaitForTasks();
// Complete sweeping if there's nothing more to do.
if (sweeper_->IsDoneSweeping()) {
sweeper_->heap_->mark_compact_collector()->EnsureSweepingCompleted();
DCHECK(!sweeper_->sweeping_in_progress());
} else {
// Unless sweeping is complete the flag still indicates that the sweeper
// is enabled. It just cannot use tasks anymore.
DCHECK(sweeper_->sweeping_in_progress());
}
}
MarkCompactCollector::Sweeper::PauseOrCompleteScope::~PauseOrCompleteScope() {
sweeper_->stop_sweeper_tasks_.SetValue(false);
if (!sweeper_->sweeping_in_progress()) return;
sweeper_->StartSweeperTasks();
}
MarkCompactCollector::Sweeper::FilterSweepingPagesScope::
FilterSweepingPagesScope(
MarkCompactCollector::Sweeper* sweeper,
const PauseOrCompleteScope& pause_or_complete_scope)
: sweeper_(sweeper),
pause_or_complete_scope_(pause_or_complete_scope),
sweeping_in_progress_(sweeper_->sweeping_in_progress()) {
USE(pause_or_complete_scope_);
if (!sweeping_in_progress_) return;
old_space_sweeping_list_ = std::move(sweeper_->sweeping_list_[OLD_SPACE]);
sweeper_->sweeping_list_[OLD_SPACE].clear();
}
MarkCompactCollector::Sweeper::FilterSweepingPagesScope::
~FilterSweepingPagesScope() {
DCHECK_EQ(sweeping_in_progress_, sweeper_->sweeping_in_progress());
if (!sweeping_in_progress_) return;
sweeper_->sweeping_list_[OLD_SPACE] = std::move(old_space_sweeping_list_);
// old_space_sweeping_list_ does not need to be cleared as we don't use it.
}
class MarkCompactCollector::Sweeper::SweeperTask final : public CancelableTask {
public:
SweeperTask(Isolate* isolate, Sweeper* sweeper,
base::Semaphore* pending_sweeper_tasks,
base::AtomicNumber<intptr_t>* num_sweeping_tasks,
AllocationSpace space_to_start)
: CancelableTask(isolate),
sweeper_(sweeper),
pending_sweeper_tasks_(pending_sweeper_tasks),
num_sweeping_tasks_(num_sweeping_tasks),
space_to_start_(space_to_start) {}
virtual ~SweeperTask() {}
private:
void RunInternal() final {
DCHECK_GE(space_to_start_, FIRST_SPACE);
DCHECK_LE(space_to_start_, LAST_PAGED_SPACE);
const int offset = space_to_start_ - FIRST_SPACE;
const int num_spaces = LAST_PAGED_SPACE - FIRST_SPACE + 1;
for (int i = 0; i < num_spaces; i++) {
const int space_id = FIRST_SPACE + ((i + offset) % num_spaces);
// Do not sweep code space concurrently.
if (static_cast<AllocationSpace>(space_id) == CODE_SPACE) continue;
DCHECK_GE(space_id, FIRST_SPACE);
DCHECK_LE(space_id, LAST_PAGED_SPACE);
sweeper_->SweepSpaceFromTask(static_cast<AllocationSpace>(space_id));
}
num_sweeping_tasks_->Decrement(1);
pending_sweeper_tasks_->Signal();
}
Sweeper* const sweeper_;
base::Semaphore* const pending_sweeper_tasks_;
base::AtomicNumber<intptr_t>* const num_sweeping_tasks_;
AllocationSpace space_to_start_;
DISALLOW_COPY_AND_ASSIGN(SweeperTask);
};
class MarkCompactCollector::Sweeper::IncrementalSweeperTask final
: public CancelableTask {
public:
IncrementalSweeperTask(Isolate* isolate, Sweeper* sweeper)
: CancelableTask(isolate), isolate_(isolate), sweeper_(sweeper) {}
virtual ~IncrementalSweeperTask() {}
private:
void RunInternal() final {
VMState<GC> state(isolate_);
TRACE_EVENT_CALL_STATS_SCOPED(isolate_, "v8", "V8.Task");
sweeper_->incremental_sweeper_pending_ = false;
if (sweeper_->sweeping_in_progress()) {
if (!sweeper_->SweepSpaceIncrementallyFromTask(CODE_SPACE)) {
sweeper_->ScheduleIncrementalSweepingTask();
}
}
}
Isolate* const isolate_;
Sweeper* const sweeper_;
DISALLOW_COPY_AND_ASSIGN(IncrementalSweeperTask);
};
void MarkCompactCollector::Sweeper::StartSweeping() {
CHECK(!stop_sweeper_tasks_.Value());
sweeping_in_progress_ = true;
NonAtomicMarkingState* marking_state =
heap_->mark_compact_collector()->non_atomic_marking_state();
ForAllSweepingSpaces([this, marking_state](AllocationSpace space) {
std::sort(sweeping_list_[space].begin(), sweeping_list_[space].end(),
[marking_state](Page* a, Page* b) {
return marking_state->live_bytes(a) <
marking_state->live_bytes(b);
});
});
}
void MarkCompactCollector::Sweeper::StartSweeperTasks() {
DCHECK_EQ(0, num_tasks_);
DCHECK_EQ(0, num_sweeping_tasks_.Value());
if (FLAG_concurrent_sweeping && sweeping_in_progress_ &&
!heap_->delay_sweeper_tasks_for_testing_) {
ForAllSweepingSpaces([this](AllocationSpace space) {
if (space == NEW_SPACE) return;
num_sweeping_tasks_.Increment(1);
SweeperTask* task = new SweeperTask(heap_->isolate(), this,
&pending_sweeper_tasks_semaphore_,
&num_sweeping_tasks_, space);
DCHECK_LT(num_tasks_, kMaxSweeperTasks);
task_ids_[num_tasks_++] = task->id();
V8::GetCurrentPlatform()->CallOnBackgroundThread(
task, v8::Platform::kShortRunningTask);
});
ScheduleIncrementalSweepingTask();
}
}
void MarkCompactCollector::Sweeper::SweepOrWaitUntilSweepingCompleted(
Page* page) {
if (!page->SweepingDone()) {
ParallelSweepPage(page, page->owner()->identity());
if (!page->SweepingDone()) {
// We were not able to sweep that page, i.e., a concurrent
// sweeper thread currently owns this page. Wait for the sweeper
// thread to be done with this page.
page->WaitUntilSweepingCompleted();
}
}
}
Page* MarkCompactCollector::Sweeper::GetSweptPageSafe(PagedSpace* space) {
base::LockGuard<base::Mutex> guard(&mutex_);
SweptList& list = swept_list_[space->identity()];
if (!list.empty()) {
auto last_page = list.back();
list.pop_back();
return last_page;
}
return nullptr;
}
void MarkCompactCollector::Sweeper::AbortAndWaitForTasks() {
if (!FLAG_concurrent_sweeping) return;
for (int i = 0; i < num_tasks_; i++) {
if (heap_->isolate()->cancelable_task_manager()->TryAbort(task_ids_[i]) !=
CancelableTaskManager::kTaskAborted) {
pending_sweeper_tasks_semaphore_.Wait();
} else {
// Aborted case.
num_sweeping_tasks_.Decrement(1);
}
}
num_tasks_ = 0;
DCHECK_EQ(0, num_sweeping_tasks_.Value());
}
void MarkCompactCollector::Sweeper::EnsureCompleted() {
if (!sweeping_in_progress_) return;
// If sweeping is not completed or not running at all, we try to complete it
// here.
ForAllSweepingSpaces(
[this](AllocationSpace space) { ParallelSweepSpace(space, 0); });
AbortAndWaitForTasks();
ForAllSweepingSpaces([this](AllocationSpace space) {
if (space == NEW_SPACE) {
swept_list_[NEW_SPACE].clear();
}
DCHECK(sweeping_list_[space].empty());
});
sweeping_in_progress_ = false;
}
void MarkCompactCollector::Sweeper::EnsureNewSpaceCompleted() {
if (!sweeping_in_progress_) return;
if (!FLAG_concurrent_sweeping || sweeping_in_progress()) {
for (Page* p : *heap_->new_space()) {
SweepOrWaitUntilSweepingCompleted(p);
}
}
}
void MarkCompactCollector::EnsureSweepingCompleted() {
if (!sweeper().sweeping_in_progress()) return;
if (!sweeper()->sweeping_in_progress()) return;
sweeper().EnsureCompleted();
sweeper()->EnsureCompleted();
heap()->old_space()->RefillFreeList();
heap()->code_space()->RefillFreeList();
heap()->map_space()->RefillFreeList();
......@@ -845,10 +630,6 @@ void MarkCompactCollector::EnsureSweepingCompleted() {
heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
}
bool MarkCompactCollector::Sweeper::AreSweeperTasksRunning() {
return num_sweeping_tasks_.Value() != 0;
}
void MarkCompactCollector::ComputeEvacuationHeuristics(
size_t area_size, int* target_fragmentation_percent,
size_t* max_evacuated_bytes) {
......@@ -1143,7 +924,7 @@ void MarkCompactCollector::Finish() {
heap()->VerifyCountersBeforeConcurrentSweeping();
#endif
sweeper().StartSweeperTasks();
sweeper()->StartSweeperTasks();
// The hashing of weak_object_to_code_table is no longer valid.
heap()->weak_object_to_code_table()->Rehash();
......@@ -2365,7 +2146,7 @@ void MinorMarkCompactCollector::ProcessMarkingWorklist() {
void MinorMarkCompactCollector::CollectGarbage() {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_SWEEPING);
heap()->mark_compact_collector()->sweeper().EnsureNewSpaceCompleted();
heap()->mark_compact_collector()->sweeper()->EnsureNewSpaceCompleted();
CleanupSweepToIteratePages();
}
......@@ -3576,149 +3357,6 @@ class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
}
};
int MarkCompactCollector::Sweeper::RawSweep(
Page* p, FreeListRebuildingMode free_list_mode,
FreeSpaceTreatmentMode free_space_mode) {
Space* space = p->owner();
DCHECK_NOT_NULL(space);
DCHECK(free_list_mode == IGNORE_FREE_LIST || space->identity() == OLD_SPACE ||
space->identity() == CODE_SPACE || space->identity() == MAP_SPACE);
DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
// TODO(ulan): we don't have to clear type old-to-old slots in code space
// because the concurrent marker doesn't mark code objects. This requires
// the write barrier for code objects to check the color of the code object.
bool non_empty_typed_slots = p->typed_slot_set<OLD_TO_NEW>() != nullptr ||
p->typed_slot_set<OLD_TO_OLD>() != nullptr;
// The free ranges map is used for filtering typed slots.
std::map<uint32_t, uint32_t> free_ranges;
// Before we sweep objects on the page, we free dead array buffers which
// requires valid mark bits.
ArrayBufferTracker::FreeDead(p, marking_state_);
Address free_start = p->area_start();
DCHECK_EQ(0, reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize));
// If we use the skip list for code space pages, we have to lock the skip
// list because it could be accessed concurrently by the runtime or the
// deoptimizer.
const bool rebuild_skip_list =
space->identity() == CODE_SPACE && p->skip_list() != nullptr;
SkipList* skip_list = p->skip_list();
if (rebuild_skip_list) {
skip_list->Clear();
}
intptr_t live_bytes = 0;
intptr_t freed_bytes = 0;
intptr_t max_freed_bytes = 0;
int curr_region = -1;
// Set the allocated_bytes counter to area_size. The free operations below
// will decrease the counter to actual live bytes.
p->ResetAllocatedBytes();
for (auto object_and_size :
LiveObjectRange<kBlackObjects>(p, marking_state_->bitmap(p))) {
HeapObject* const object = object_and_size.first;
DCHECK(marking_state_->IsBlack(object));
Address free_end = object->address();
if (free_end != free_start) {
CHECK_GT(free_end, free_start);
size_t size = static_cast<size_t>(free_end - free_start);
if (free_space_mode == ZAP_FREE_SPACE) {
memset(free_start, 0xcc, size);
}
if (free_list_mode == REBUILD_FREE_LIST) {
freed_bytes = reinterpret_cast<PagedSpace*>(space)->UnaccountedFree(
free_start, size);
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
} else {
p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
ClearRecordedSlots::kNo);
}
RememberedSet<OLD_TO_NEW>::RemoveRange(p, free_start, free_end,
SlotSet::KEEP_EMPTY_BUCKETS);
RememberedSet<OLD_TO_OLD>::RemoveRange(p, free_start, free_end,
SlotSet::KEEP_EMPTY_BUCKETS);
if (non_empty_typed_slots) {
free_ranges.insert(std::pair<uint32_t, uint32_t>(
static_cast<uint32_t>(free_start - p->address()),
static_cast<uint32_t>(free_end - p->address())));
}
}
Map* map = object->synchronized_map();
int size = object->SizeFromMap(map);
live_bytes += size;
if (rebuild_skip_list) {
int new_region_start = SkipList::RegionNumber(free_end);
int new_region_end =
SkipList::RegionNumber(free_end + size - kPointerSize);
if (new_region_start != curr_region || new_region_end != curr_region) {
skip_list->AddObject(free_end, size);
curr_region = new_region_end;
}
}
free_start = free_end + size;
}
if (free_start != p->area_end()) {
CHECK_GT(p->area_end(), free_start);
size_t size = static_cast<size_t>(p->area_end() - free_start);
if (free_space_mode == ZAP_FREE_SPACE) {
memset(free_start, 0xcc, size);
}
if (free_list_mode == REBUILD_FREE_LIST) {
freed_bytes = reinterpret_cast<PagedSpace*>(space)->UnaccountedFree(
free_start, size);
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
} else {
p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
ClearRecordedSlots::kNo);
}
RememberedSet<OLD_TO_NEW>::RemoveRange(p, free_start, p->area_end(),
SlotSet::KEEP_EMPTY_BUCKETS);
RememberedSet<OLD_TO_OLD>::RemoveRange(p, free_start, p->area_end(),
SlotSet::KEEP_EMPTY_BUCKETS);
if (non_empty_typed_slots) {
free_ranges.insert(std::pair<uint32_t, uint32_t>(
static_cast<uint32_t>(free_start - p->address()),
static_cast<uint32_t>(p->area_end() - p->address())));
}
}
// Clear invalid typed slots after collection all free ranges.
if (!free_ranges.empty()) {
TypedSlotSet* old_to_new = p->typed_slot_set<OLD_TO_NEW>();
if (old_to_new != nullptr) {
old_to_new->RemoveInvaldSlots(free_ranges);
}
TypedSlotSet* old_to_old = p->typed_slot_set<OLD_TO_OLD>();
if (old_to_old != nullptr) {
old_to_old->RemoveInvaldSlots(free_ranges);
}
}
marking_state_->bitmap(p)->Clear();
if (free_list_mode == IGNORE_FREE_LIST) {
marking_state_->SetLiveBytes(p, 0);
// We did not free memory, so have to adjust allocated bytes here.
intptr_t freed_bytes = p->area_size() - live_bytes;
p->DecreaseAllocatedBytes(freed_bytes);
} else {
// Keep the old live bytes counter of the page until RefillFreeList, where
// the space size is refined.
// The allocated_bytes() counter is precisely the total size of objects.
DCHECK_EQ(live_bytes, p->allocated_bytes());
}
p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
if (free_list_mode == IGNORE_FREE_LIST) return 0;
return static_cast<int>(FreeList::GuaranteedAllocatable(max_freed_bytes));
}
// Return true if the given code is deoptimized or will be deoptimized.
bool MarkCompactCollector::WillBeDeoptimized(Code* code) {
return code->is_optimized_code() && code->marked_for_deoptimization();
......@@ -3803,12 +3441,6 @@ void LiveObjectVisitor::RecomputeLiveBytes(MemoryChunk* chunk,
marking_state->SetLiveBytes(chunk, new_live_size);
}
void MarkCompactCollector::Sweeper::AddSweptPageSafe(PagedSpace* space,
Page* page) {
base::LockGuard<base::Mutex> guard(&mutex_);
swept_list_[space->identity()].push_back(page);
}
void MarkCompactCollector::Evacuate() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE);
base::LockGuard<base::Mutex> guard(heap()->relocation_mutex());
......@@ -3845,12 +3477,12 @@ void MarkCompactCollector::Evacuate() {
for (Page* p : new_space_evacuation_pages_) {
if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
sweeper().AddPage(p->owner()->identity(), p, Sweeper::REGULAR);
sweeper()->AddPage(p->owner()->identity(), p, Sweeper::REGULAR);
} else if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
p->ForAllFreeListCategories(
[](FreeListCategory* category) { DCHECK(!category->is_linked()); });
sweeper().AddPage(p->owner()->identity(), p, Sweeper::REGULAR);
sweeper()->AddPage(p->owner()->identity(), p, Sweeper::REGULAR);
}
}
new_space_evacuation_pages_.clear();
......@@ -3862,7 +3494,7 @@ void MarkCompactCollector::Evacuate() {
SkipList* list = p->skip_list();
if (list != nullptr) list->Clear();
if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
sweeper().AddPage(p->owner()->identity(), p, Sweeper::REGULAR);
sweeper()->AddPage(p->owner()->identity(), p, Sweeper::REGULAR);
p->ClearFlag(Page::COMPACTION_WAS_ABORTED);
}
}
......@@ -3874,7 +3506,7 @@ void MarkCompactCollector::Evacuate() {
}
#ifdef VERIFY_HEAP
if (FLAG_verify_heap && !sweeper().sweeping_in_progress()) {
if (FLAG_verify_heap && !sweeper()->sweeping_in_progress()) {
FullEvacuationVerifier verifier(heap());
verifier.Run();
}
......@@ -4461,136 +4093,6 @@ void MarkCompactCollector::ReleaseEvacuationCandidates() {
compacting_ = false;
}
void MarkCompactCollector::Sweeper::SweepSpaceFromTask(
AllocationSpace identity) {
Page* page = nullptr;
while (!stop_sweeper_tasks_.Value() &&
((page = GetSweepingPageSafe(identity)) != nullptr)) {
ParallelSweepPage(page, identity);
}
}
bool MarkCompactCollector::Sweeper::SweepSpaceIncrementallyFromTask(
AllocationSpace identity) {
if (Page* page = GetSweepingPageSafe(identity)) {
ParallelSweepPage(page, identity);
}
return sweeping_list_[identity].empty();
}
int MarkCompactCollector::Sweeper::ParallelSweepSpace(AllocationSpace identity,
int required_freed_bytes,
int max_pages) {
int max_freed = 0;
int pages_freed = 0;
Page* page = nullptr;
while ((page = GetSweepingPageSafe(identity)) != nullptr) {
int freed = ParallelSweepPage(page, identity);
pages_freed += 1;
DCHECK_GE(freed, 0);
max_freed = Max(max_freed, freed);
if ((required_freed_bytes) > 0 && (max_freed >= required_freed_bytes))
return max_freed;
if ((max_pages > 0) && (pages_freed >= max_pages)) return max_freed;
}
return max_freed;
}
int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page,
AllocationSpace identity) {
// Early bailout for pages that are swept outside of the regular sweeping
// path. This check here avoids taking the lock first, avoiding deadlocks.
if (page->SweepingDone()) return 0;
int max_freed = 0;
{
base::LockGuard<base::Mutex> guard(page->mutex());
// If this page was already swept in the meantime, we can return here.
if (page->SweepingDone()) return 0;
// If the page is a code page, the CodePageMemoryModificationScope changes
// the page protection mode from rx -> rw while sweeping.
CodePageMemoryModificationScope code_page_scope(page);
DCHECK_EQ(Page::kSweepingPending,
page->concurrent_sweeping_state().Value());
page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
const FreeSpaceTreatmentMode free_space_mode =
Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
if (identity == NEW_SPACE) {
RawSweep(page, IGNORE_FREE_LIST, free_space_mode);
} else {
max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
}
DCHECK(page->SweepingDone());
// After finishing sweeping of a page we clean up its remembered set.
TypedSlotSet* typed_slot_set = page->typed_slot_set<OLD_TO_NEW>();
if (typed_slot_set) {
typed_slot_set->FreeToBeFreedChunks();
}
SlotSet* slot_set = page->slot_set<OLD_TO_NEW>();
if (slot_set) {
slot_set->FreeToBeFreedBuckets();
}
}
{
base::LockGuard<base::Mutex> guard(&mutex_);
swept_list_[identity].push_back(page);
}
return max_freed;
}
void MarkCompactCollector::Sweeper::ScheduleIncrementalSweepingTask() {
if (!incremental_sweeper_pending_) {
incremental_sweeper_pending_ = true;
IncrementalSweeperTask* task =
new IncrementalSweeperTask(heap_->isolate(), this);
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap_->isolate());
V8::GetCurrentPlatform()->CallOnForegroundThread(isolate, task);
}
}
void MarkCompactCollector::Sweeper::AddPage(
AllocationSpace space, Page* page,
MarkCompactCollector::Sweeper::AddPageMode mode) {
base::LockGuard<base::Mutex> guard(&mutex_);
DCHECK(!FLAG_concurrent_sweeping || !AreSweeperTasksRunning());
if (mode == Sweeper::REGULAR) {
DCHECK_EQ(Page::kSweepingDone, page->concurrent_sweeping_state().Value());
PrepareToBeSweptPage(space, page);
} else {
// Page has been temporarily removed from the sweeper. Accounting already
// happened when the page was initially added, so it is skipped here.
DCHECK_EQ(Sweeper::READD_TEMPORARY_REMOVED_PAGE, mode);
}
DCHECK_EQ(Page::kSweepingPending, page->concurrent_sweeping_state().Value());
sweeping_list_[space].push_back(page);
}
void MarkCompactCollector::Sweeper::PrepareToBeSweptPage(AllocationSpace space,
Page* page) {
page->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
DCHECK_GE(page->area_size(),
static_cast<size_t>(marking_state_->live_bytes(page)));
if (space != NEW_SPACE) {
heap_->paged_space(space)->IncreaseAllocatedBytes(
marking_state_->live_bytes(page), page);
}
}
Page* MarkCompactCollector::Sweeper::GetSweepingPageSafe(
AllocationSpace space) {
base::LockGuard<base::Mutex> guard(&mutex_);
Page* page = nullptr;
if (!sweeping_list_[space].empty()) {
page = sweeping_list_[space].front();
sweeping_list_[space].pop_front();
}
return page;
}
void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
space->ClearStats();
......@@ -4614,7 +4116,7 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
// (in the free list) dropped again. Since we only use the flag for
// testing this is fine.
p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
sweeper().RawSweep(p, Sweeper::IGNORE_FREE_LIST,
sweeper()->RawSweep(p, Sweeper::IGNORE_FREE_LIST,
Heap::ShouldZapGarbage()
? FreeSpaceTreatmentMode::ZAP_FREE_SPACE
: FreeSpaceTreatmentMode::IGNORE_FREE_SPACE);
......@@ -4636,7 +4138,7 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
unused_page_present = true;
}
sweeper().AddPage(space->identity(), p, Sweeper::REGULAR);
sweeper()->AddPage(space->identity(), p, Sweeper::REGULAR);
will_be_swept++;
}
......@@ -4668,7 +4170,7 @@ void MarkCompactCollector::StartSweepSpaces() {
GCTracer::Scope::MC_SWEEP_MAP);
StartSweepSpace(heap()->map_space());
}
sweeper().StartSweeping();
sweeper()->StartSweeping();
}
}
......
......@@ -11,6 +11,7 @@
#include "src/heap/marking.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/spaces.h"
#include "src/heap/sweeper.h"
#include "src/heap/worklist.h"
namespace v8 {
......@@ -243,7 +244,6 @@ class LiveObjectVisitor : AllStatic {
};
enum PageEvacuationMode { NEW_TO_NEW, NEW_TO_OLD };
enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
enum MarkingTreatmentMode { KEEP, CLEAR };
enum class RememberedSetUpdatingMode { ALL, OLD_TO_NEW_ONLY };
......@@ -617,149 +617,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
class RootMarkingVisitor;
class CustomRootBodyMarkingVisitor;
class Sweeper {
public:
typedef std::deque<Page*> SweepingList;
typedef std::vector<Page*> SweptList;
// Pauses the sweeper tasks or completes sweeping.
class PauseOrCompleteScope final {
public:
explicit PauseOrCompleteScope(Sweeper* sweeper);
~PauseOrCompleteScope();
private:
Sweeper* const sweeper_;
};
// Temporary filters old space sweeping lists. Requires the concurrent
// sweeper to be paused. Allows for pages to be added to the sweeper while
// in this scope. Note that the original list of sweeping pages is restored
// after exiting this scope.
class FilterSweepingPagesScope final {
public:
explicit FilterSweepingPagesScope(
Sweeper* sweeper,
const PauseOrCompleteScope& pause_or_complete_scope);
~FilterSweepingPagesScope();
template <typename Callback>
void FilterOldSpaceSweepingPages(Callback callback) {
if (!sweeping_in_progress_) return;
SweepingList* sweeper_list = &sweeper_->sweeping_list_[OLD_SPACE];
// Iteration here is from most free space to least free space.
for (auto it = old_space_sweeping_list_.begin();
it != old_space_sweeping_list_.end(); it++) {
if (callback(*it)) {
sweeper_list->push_back(*it);
}
}
}
private:
Sweeper* const sweeper_;
SweepingList old_space_sweeping_list_;
const PauseOrCompleteScope& pause_or_complete_scope_;
bool sweeping_in_progress_;
};
enum FreeListRebuildingMode { REBUILD_FREE_LIST, IGNORE_FREE_LIST };
enum ClearOldToNewSlotsMode {
DO_NOT_CLEAR,
CLEAR_REGULAR_SLOTS,
CLEAR_TYPED_SLOTS
};
enum AddPageMode { REGULAR, READD_TEMPORARY_REMOVED_PAGE };
int RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
FreeSpaceTreatmentMode free_space_mode);
Sweeper(Heap* heap,
MarkCompactCollector::NonAtomicMarkingState* marking_state)
: heap_(heap),
marking_state_(marking_state),
num_tasks_(0),
pending_sweeper_tasks_semaphore_(0),
incremental_sweeper_pending_(false),
sweeping_in_progress_(false),
num_sweeping_tasks_(0),
stop_sweeper_tasks_(false) {}
bool sweeping_in_progress() const { return sweeping_in_progress_; }
void AddPage(AllocationSpace space, Page* page, AddPageMode mode);
int ParallelSweepSpace(AllocationSpace identity, int required_freed_bytes,
int max_pages = 0);
int ParallelSweepPage(Page* page, AllocationSpace identity);
void ScheduleIncrementalSweepingTask();
// After calling this function sweeping is considered to be in progress
// and the main thread can sweep lazily, but the background sweeper tasks
// are not running yet.
void StartSweeping();
void StartSweeperTasks();
void EnsureCompleted();
void EnsureNewSpaceCompleted();
bool AreSweeperTasksRunning();
void SweepOrWaitUntilSweepingCompleted(Page* page);
void AddSweptPageSafe(PagedSpace* space, Page* page);
Page* GetSweptPageSafe(PagedSpace* space);
private:
class IncrementalSweeperTask;
class SweeperTask;
static const int kAllocationSpaces = LAST_PAGED_SPACE + 1;
static const int kMaxSweeperTasks = kAllocationSpaces;
template <typename Callback>
void ForAllSweepingSpaces(Callback callback) {
for (int i = 0; i < kAllocationSpaces; i++) {
callback(static_cast<AllocationSpace>(i));
}
}
// Can only be called on the main thread when no tasks are running.
bool IsDoneSweeping() const {
for (int i = 0; i < kAllocationSpaces; i++) {
if (!sweeping_list_[i].empty()) return false;
}
return true;
}
void SweepSpaceFromTask(AllocationSpace identity);
// Sweeps incrementally one page from the given space. Returns true if
// there are no more pages to sweep in the given space.
bool SweepSpaceIncrementallyFromTask(AllocationSpace identity);
void AbortAndWaitForTasks();
Page* GetSweepingPageSafe(AllocationSpace space);
void PrepareToBeSweptPage(AllocationSpace space, Page* page);
Heap* const heap_;
MarkCompactCollector::NonAtomicMarkingState* marking_state_;
int num_tasks_;
CancelableTaskManager::Id task_ids_[kMaxSweeperTasks];
base::Semaphore pending_sweeper_tasks_semaphore_;
base::Mutex mutex_;
SweptList swept_list_[kAllocationSpaces];
SweepingList sweeping_list_[kAllocationSpaces];
bool incremental_sweeper_pending_;
bool sweeping_in_progress_;
// Counter is actively maintained by the concurrent tasks to avoid querying
// the semaphore for maintaining a task counter on the main thread.
base::AtomicNumber<intptr_t> num_sweeping_tasks_;
// Used by PauseOrCompleteScope to signal early bailout to tasks.
base::AtomicValue<bool> stop_sweeper_tasks_;
};
enum IterationMode {
kKeepMarking,
kClearMarkbits,
......@@ -813,7 +670,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void EnsureSweepingCompleted();
// Checks if sweeping is in progress right now on any space.
bool sweeping_in_progress() { return sweeper().sweeping_in_progress(); }
bool sweeping_in_progress() const { return sweeper_->sweeping_in_progress(); }
void set_evacuation(bool evacuation) { evacuation_ = evacuation; }
......@@ -831,7 +688,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
weak_objects_.transition_arrays.Push(kMainThread, array);
}
Sweeper& sweeper() { return sweeper_; }
Sweeper* sweeper() { return sweeper_; }
#ifdef DEBUG
// Checks whether performing mark-compact collection.
......@@ -850,6 +707,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
private:
explicit MarkCompactCollector(Heap* heap);
~MarkCompactCollector();
bool WillBeDeoptimized(Code* code);
......@@ -1015,7 +873,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
std::vector<Page*> new_space_evacuation_pages_;
std::vector<std::pair<HeapObject*, Page*>> aborted_evacuation_candidates_;
Sweeper sweeper_;
Sweeper* sweeper_;
MarkingState marking_state_;
NonAtomicMarkingState non_atomic_marking_state_;
......
......@@ -9,6 +9,7 @@
#include "src/heap/mark-compact-inl.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/scavenger-inl.h"
#include "src/heap/sweeper.h"
#include "src/objects-body-descriptors-inl.h"
namespace v8 {
......@@ -89,9 +90,9 @@ void Scavenger::IterateAndScavengePromotedObject(HeapObject* target, int size) {
void Scavenger::AddPageToSweeperIfNecessary(MemoryChunk* page) {
AllocationSpace space = page->owner()->identity();
if ((space == OLD_SPACE) && !page->SweepingDone()) {
heap()->mark_compact_collector()->sweeper().AddPage(
heap()->mark_compact_collector()->sweeper()->AddPage(
space, reinterpret_cast<Page*>(page),
MarkCompactCollector::Sweeper::READD_TEMPORARY_REMOVED_PAGE);
Sweeper::READD_TEMPORARY_REMOVED_PAGE);
}
}
......
......@@ -15,6 +15,7 @@
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact.h"
#include "src/heap/slot-set.h"
#include "src/heap/sweeper.h"
#include "src/msan.h"
#include "src/objects-inl.h"
#include "src/snapshot/snapshot.h"
......@@ -56,7 +57,7 @@ bool HeapObjectIterator::AdvanceToNextPage() {
Page* cur_page = *(current_page_++);
Heap* heap = space_->heap();
heap->mark_compact_collector()->sweeper().SweepOrWaitUntilSweepingCompleted(
heap->mark_compact_collector()->sweeper()->SweepOrWaitUntilSweepingCompleted(
cur_page);
if (cur_page->IsFlagSet(Page::SWEEP_TO_ITERATE))
heap->minor_mark_compact_collector()->MakeIterable(
......@@ -412,7 +413,7 @@ bool MemoryAllocator::CanFreeMemoryChunk(MemoryChunk* chunk) {
// Chunks in old generation are unmapped if they are empty.
DCHECK(chunk->InNewSpace() || chunk->SweepingDone());
return !chunk->InNewSpace() || mc == nullptr ||
!mc->sweeper().sweeping_in_progress();
!mc->sweeper()->sweeping_in_progress();
}
bool MemoryAllocator::CommitMemory(Address base, size_t size,
......@@ -1437,7 +1438,7 @@ void PagedSpace::RefillFreeList() {
size_t added = 0;
{
Page* p = nullptr;
while ((p = collector->sweeper().GetSweptPageSafe(this)) != nullptr) {
while ((p = collector->sweeper()->GetSweptPageSafe(this)) != nullptr) {
// Only during compaction pages can actually change ownership. This is
// safe because there exists no other competing action on the page links
// during compaction.
......@@ -3201,7 +3202,7 @@ bool PagedSpace::SweepAndRetryAllocation(int size_in_bytes) {
bool CompactionSpace::SweepAndRetryAllocation(int size_in_bytes) {
MarkCompactCollector* collector = heap()->mark_compact_collector();
if (FLAG_concurrent_sweeping && collector->sweeping_in_progress()) {
collector->sweeper().ParallelSweepSpace(identity(), 0);
collector->sweeper()->ParallelSweepSpace(identity(), 0);
RefillFreeList();
return free_list_.Allocate(size_in_bytes);
}
......@@ -3228,7 +3229,7 @@ bool PagedSpace::RawSlowAllocateRaw(int size_in_bytes) {
// Sweeping is still in progress.
if (collector->sweeping_in_progress()) {
if (FLAG_concurrent_sweeping && !is_local() &&
!collector->sweeper().AreSweeperTasksRunning()) {
!collector->sweeper()->AreSweeperTasksRunning()) {
collector->EnsureSweepingCompleted();
}
......@@ -3240,7 +3241,7 @@ bool PagedSpace::RawSlowAllocateRaw(int size_in_bytes) {
if (free_list_.Allocate(static_cast<size_t>(size_in_bytes))) return true;
// If sweeping is still in progress try to sweep pages.
int max_freed = collector->sweeper().ParallelSweepSpace(
int max_freed = collector->sweeper()->ParallelSweepSpace(
identity(), size_in_bytes, kMaxPagesToSweep);
RefillFreeList();
if (max_freed >= size_in_bytes) {
......
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/sweeper.h"
#include "src/heap/array-buffer-tracker-inl.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/remembered-set.h"
#include "src/objects-inl.h"
#include "src/vm-state-inl.h"
namespace v8 {
namespace internal {
Sweeper::PauseOrCompleteScope::PauseOrCompleteScope(Sweeper* sweeper)
: sweeper_(sweeper) {
sweeper_->stop_sweeper_tasks_.SetValue(true);
if (!sweeper_->sweeping_in_progress()) return;
sweeper_->AbortAndWaitForTasks();
// Complete sweeping if there's nothing more to do.
if (sweeper_->IsDoneSweeping()) {
sweeper_->heap_->mark_compact_collector()->EnsureSweepingCompleted();
DCHECK(!sweeper_->sweeping_in_progress());
} else {
// Unless sweeping is complete the flag still indicates that the sweeper
// is enabled. It just cannot use tasks anymore.
DCHECK(sweeper_->sweeping_in_progress());
}
}
Sweeper::PauseOrCompleteScope::~PauseOrCompleteScope() {
sweeper_->stop_sweeper_tasks_.SetValue(false);
if (!sweeper_->sweeping_in_progress()) return;
sweeper_->StartSweeperTasks();
}
Sweeper::FilterSweepingPagesScope::FilterSweepingPagesScope(
Sweeper* sweeper, const PauseOrCompleteScope& pause_or_complete_scope)
: sweeper_(sweeper),
pause_or_complete_scope_(pause_or_complete_scope),
sweeping_in_progress_(sweeper_->sweeping_in_progress()) {
USE(pause_or_complete_scope_);
if (!sweeping_in_progress_) return;
old_space_sweeping_list_ = std::move(sweeper_->sweeping_list_[OLD_SPACE]);
sweeper_->sweeping_list_[OLD_SPACE].clear();
}
Sweeper::FilterSweepingPagesScope::~FilterSweepingPagesScope() {
DCHECK_EQ(sweeping_in_progress_, sweeper_->sweeping_in_progress());
if (!sweeping_in_progress_) return;
sweeper_->sweeping_list_[OLD_SPACE] = std::move(old_space_sweeping_list_);
// old_space_sweeping_list_ does not need to be cleared as we don't use it.
}
class Sweeper::SweeperTask final : public CancelableTask {
public:
SweeperTask(Isolate* isolate, Sweeper* sweeper,
base::Semaphore* pending_sweeper_tasks,
base::AtomicNumber<intptr_t>* num_sweeping_tasks,
AllocationSpace space_to_start)
: CancelableTask(isolate),
sweeper_(sweeper),
pending_sweeper_tasks_(pending_sweeper_tasks),
num_sweeping_tasks_(num_sweeping_tasks),
space_to_start_(space_to_start) {}
virtual ~SweeperTask() {}
private:
void RunInternal() final {
DCHECK_GE(space_to_start_, FIRST_SPACE);
DCHECK_LE(space_to_start_, LAST_PAGED_SPACE);
const int offset = space_to_start_ - FIRST_SPACE;
const int num_spaces = LAST_PAGED_SPACE - FIRST_SPACE + 1;
for (int i = 0; i < num_spaces; i++) {
const int space_id = FIRST_SPACE + ((i + offset) % num_spaces);
// Do not sweep code space concurrently.
if (static_cast<AllocationSpace>(space_id) == CODE_SPACE) continue;
DCHECK_GE(space_id, FIRST_SPACE);
DCHECK_LE(space_id, LAST_PAGED_SPACE);
sweeper_->SweepSpaceFromTask(static_cast<AllocationSpace>(space_id));
}
num_sweeping_tasks_->Decrement(1);
pending_sweeper_tasks_->Signal();
}
Sweeper* const sweeper_;
base::Semaphore* const pending_sweeper_tasks_;
base::AtomicNumber<intptr_t>* const num_sweeping_tasks_;
AllocationSpace space_to_start_;
DISALLOW_COPY_AND_ASSIGN(SweeperTask);
};
class Sweeper::IncrementalSweeperTask final : public CancelableTask {
public:
IncrementalSweeperTask(Isolate* isolate, Sweeper* sweeper)
: CancelableTask(isolate), isolate_(isolate), sweeper_(sweeper) {}
virtual ~IncrementalSweeperTask() {}
private:
void RunInternal() final {
VMState<GC> state(isolate_);
TRACE_EVENT_CALL_STATS_SCOPED(isolate_, "v8", "V8.Task");
sweeper_->incremental_sweeper_pending_ = false;
if (sweeper_->sweeping_in_progress()) {
if (!sweeper_->SweepSpaceIncrementallyFromTask(CODE_SPACE)) {
sweeper_->ScheduleIncrementalSweepingTask();
}
}
}
Isolate* const isolate_;
Sweeper* const sweeper_;
DISALLOW_COPY_AND_ASSIGN(IncrementalSweeperTask);
};
void Sweeper::StartSweeping() {
CHECK(!stop_sweeper_tasks_.Value());
sweeping_in_progress_ = true;
MajorNonAtomicMarkingState* marking_state =
heap_->mark_compact_collector()->non_atomic_marking_state();
ForAllSweepingSpaces([this, marking_state](AllocationSpace space) {
std::sort(sweeping_list_[space].begin(), sweeping_list_[space].end(),
[marking_state](Page* a, Page* b) {
return marking_state->live_bytes(a) <
marking_state->live_bytes(b);
});
});
}
void Sweeper::StartSweeperTasks() {
DCHECK_EQ(0, num_tasks_);
DCHECK_EQ(0, num_sweeping_tasks_.Value());
if (FLAG_concurrent_sweeping && sweeping_in_progress_ &&
!heap_->delay_sweeper_tasks_for_testing_) {
ForAllSweepingSpaces([this](AllocationSpace space) {
if (space == NEW_SPACE) return;
num_sweeping_tasks_.Increment(1);
SweeperTask* task = new SweeperTask(heap_->isolate(), this,
&pending_sweeper_tasks_semaphore_,
&num_sweeping_tasks_, space);
DCHECK_LT(num_tasks_, kMaxSweeperTasks);
task_ids_[num_tasks_++] = task->id();
V8::GetCurrentPlatform()->CallOnBackgroundThread(
task, v8::Platform::kShortRunningTask);
});
ScheduleIncrementalSweepingTask();
}
}
void Sweeper::SweepOrWaitUntilSweepingCompleted(Page* page) {
if (!page->SweepingDone()) {
ParallelSweepPage(page, page->owner()->identity());
if (!page->SweepingDone()) {
// We were not able to sweep that page, i.e., a concurrent
// sweeper thread currently owns this page. Wait for the sweeper
// thread to be done with this page.
page->WaitUntilSweepingCompleted();
}
}
}
Page* Sweeper::GetSweptPageSafe(PagedSpace* space) {
base::LockGuard<base::Mutex> guard(&mutex_);
SweptList& list = swept_list_[space->identity()];
if (!list.empty()) {
auto last_page = list.back();
list.pop_back();
return last_page;
}
return nullptr;
}
void Sweeper::AbortAndWaitForTasks() {
if (!FLAG_concurrent_sweeping) return;
for (int i = 0; i < num_tasks_; i++) {
if (heap_->isolate()->cancelable_task_manager()->TryAbort(task_ids_[i]) !=
CancelableTaskManager::kTaskAborted) {
pending_sweeper_tasks_semaphore_.Wait();
} else {
// Aborted case.
num_sweeping_tasks_.Decrement(1);
}
}
num_tasks_ = 0;
DCHECK_EQ(0, num_sweeping_tasks_.Value());
}
void Sweeper::EnsureCompleted() {
if (!sweeping_in_progress_) return;
// If sweeping is not completed or not running at all, we try to complete it
// here.
ForAllSweepingSpaces(
[this](AllocationSpace space) { ParallelSweepSpace(space, 0); });
AbortAndWaitForTasks();
ForAllSweepingSpaces([this](AllocationSpace space) {
if (space == NEW_SPACE) {
swept_list_[NEW_SPACE].clear();
}
DCHECK(sweeping_list_[space].empty());
});
sweeping_in_progress_ = false;
}
void Sweeper::EnsureNewSpaceCompleted() {
if (!sweeping_in_progress_) return;
if (!FLAG_concurrent_sweeping || sweeping_in_progress()) {
for (Page* p : *heap_->new_space()) {
SweepOrWaitUntilSweepingCompleted(p);
}
}
}
bool Sweeper::AreSweeperTasksRunning() {
return num_sweeping_tasks_.Value() != 0;
}
int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
FreeSpaceTreatmentMode free_space_mode) {
Space* space = p->owner();
DCHECK_NOT_NULL(space);
DCHECK(free_list_mode == IGNORE_FREE_LIST || space->identity() == OLD_SPACE ||
space->identity() == CODE_SPACE || space->identity() == MAP_SPACE);
DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
// TODO(ulan): we don't have to clear type old-to-old slots in code space
// because the concurrent marker doesn't mark code objects. This requires
// the write barrier for code objects to check the color of the code object.
bool non_empty_typed_slots = p->typed_slot_set<OLD_TO_NEW>() != nullptr ||
p->typed_slot_set<OLD_TO_OLD>() != nullptr;
// The free ranges map is used for filtering typed slots.
std::map<uint32_t, uint32_t> free_ranges;
// Before we sweep objects on the page, we free dead array buffers which
// requires valid mark bits.
ArrayBufferTracker::FreeDead(p, marking_state_);
Address free_start = p->area_start();
DCHECK_EQ(0, reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize));
// If we use the skip list for code space pages, we have to lock the skip
// list because it could be accessed concurrently by the runtime or the
// deoptimizer.
const bool rebuild_skip_list =
space->identity() == CODE_SPACE && p->skip_list() != nullptr;
SkipList* skip_list = p->skip_list();
if (rebuild_skip_list) {
skip_list->Clear();
}
intptr_t live_bytes = 0;
intptr_t freed_bytes = 0;
intptr_t max_freed_bytes = 0;
int curr_region = -1;
// Set the allocated_bytes counter to area_size. The free operations below
// will decrease the counter to actual live bytes.
p->ResetAllocatedBytes();
for (auto object_and_size :
LiveObjectRange<kBlackObjects>(p, marking_state_->bitmap(p))) {
HeapObject* const object = object_and_size.first;
DCHECK(marking_state_->IsBlack(object));
Address free_end = object->address();
if (free_end != free_start) {
CHECK_GT(free_end, free_start);
size_t size = static_cast<size_t>(free_end - free_start);
if (free_space_mode == ZAP_FREE_SPACE) {
memset(free_start, 0xcc, size);
}
if (free_list_mode == REBUILD_FREE_LIST) {
freed_bytes = reinterpret_cast<PagedSpace*>(space)->UnaccountedFree(
free_start, size);
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
} else {
p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
ClearRecordedSlots::kNo);
}
RememberedSet<OLD_TO_NEW>::RemoveRange(p, free_start, free_end,
SlotSet::KEEP_EMPTY_BUCKETS);
RememberedSet<OLD_TO_OLD>::RemoveRange(p, free_start, free_end,
SlotSet::KEEP_EMPTY_BUCKETS);
if (non_empty_typed_slots) {
free_ranges.insert(std::pair<uint32_t, uint32_t>(
static_cast<uint32_t>(free_start - p->address()),
static_cast<uint32_t>(free_end - p->address())));
}
}
Map* map = object->synchronized_map();
int size = object->SizeFromMap(map);
live_bytes += size;
if (rebuild_skip_list) {
int new_region_start = SkipList::RegionNumber(free_end);
int new_region_end =
SkipList::RegionNumber(free_end + size - kPointerSize);
if (new_region_start != curr_region || new_region_end != curr_region) {
skip_list->AddObject(free_end, size);
curr_region = new_region_end;
}
}
free_start = free_end + size;
}
if (free_start != p->area_end()) {
CHECK_GT(p->area_end(), free_start);
size_t size = static_cast<size_t>(p->area_end() - free_start);
if (free_space_mode == ZAP_FREE_SPACE) {
memset(free_start, 0xcc, size);
}
if (free_list_mode == REBUILD_FREE_LIST) {
freed_bytes = reinterpret_cast<PagedSpace*>(space)->UnaccountedFree(
free_start, size);
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
} else {
p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
ClearRecordedSlots::kNo);
}
RememberedSet<OLD_TO_NEW>::RemoveRange(p, free_start, p->area_end(),
SlotSet::KEEP_EMPTY_BUCKETS);
RememberedSet<OLD_TO_OLD>::RemoveRange(p, free_start, p->area_end(),
SlotSet::KEEP_EMPTY_BUCKETS);
if (non_empty_typed_slots) {
free_ranges.insert(std::pair<uint32_t, uint32_t>(
static_cast<uint32_t>(free_start - p->address()),
static_cast<uint32_t>(p->area_end() - p->address())));
}
}
// Clear invalid typed slots after collection all free ranges.
if (!free_ranges.empty()) {
TypedSlotSet* old_to_new = p->typed_slot_set<OLD_TO_NEW>();
if (old_to_new != nullptr) {
old_to_new->RemoveInvaldSlots(free_ranges);
}
TypedSlotSet* old_to_old = p->typed_slot_set<OLD_TO_OLD>();
if (old_to_old != nullptr) {
old_to_old->RemoveInvaldSlots(free_ranges);
}
}
marking_state_->bitmap(p)->Clear();
if (free_list_mode == IGNORE_FREE_LIST) {
marking_state_->SetLiveBytes(p, 0);
// We did not free memory, so have to adjust allocated bytes here.
intptr_t freed_bytes = p->area_size() - live_bytes;
p->DecreaseAllocatedBytes(freed_bytes);
} else {
// Keep the old live bytes counter of the page until RefillFreeList, where
// the space size is refined.
// The allocated_bytes() counter is precisely the total size of objects.
DCHECK_EQ(live_bytes, p->allocated_bytes());
}
p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
if (free_list_mode == IGNORE_FREE_LIST) return 0;
return static_cast<int>(FreeList::GuaranteedAllocatable(max_freed_bytes));
}
void Sweeper::AddSweptPageSafe(PagedSpace* space, Page* page) {
base::LockGuard<base::Mutex> guard(&mutex_);
swept_list_[space->identity()].push_back(page);
}
void Sweeper::SweepSpaceFromTask(AllocationSpace identity) {
Page* page = nullptr;
while (!stop_sweeper_tasks_.Value() &&
((page = GetSweepingPageSafe(identity)) != nullptr)) {
ParallelSweepPage(page, identity);
}
}
bool Sweeper::SweepSpaceIncrementallyFromTask(AllocationSpace identity) {
if (Page* page = GetSweepingPageSafe(identity)) {
ParallelSweepPage(page, identity);
}
return sweeping_list_[identity].empty();
}
int Sweeper::ParallelSweepSpace(AllocationSpace identity,
int required_freed_bytes, int max_pages) {
int max_freed = 0;
int pages_freed = 0;
Page* page = nullptr;
while ((page = GetSweepingPageSafe(identity)) != nullptr) {
int freed = ParallelSweepPage(page, identity);
pages_freed += 1;
DCHECK_GE(freed, 0);
max_freed = Max(max_freed, freed);
if ((required_freed_bytes) > 0 && (max_freed >= required_freed_bytes))
return max_freed;
if ((max_pages > 0) && (pages_freed >= max_pages)) return max_freed;
}
return max_freed;
}
int Sweeper::ParallelSweepPage(Page* page, AllocationSpace identity) {
// Early bailout for pages that are swept outside of the regular sweeping
// path. This check here avoids taking the lock first, avoiding deadlocks.
if (page->SweepingDone()) return 0;
int max_freed = 0;
{
base::LockGuard<base::Mutex> guard(page->mutex());
// If this page was already swept in the meantime, we can return here.
if (page->SweepingDone()) return 0;
// If the page is a code page, the CodePageMemoryModificationScope changes
// the page protection mode from rx -> rw while sweeping.
CodePageMemoryModificationScope code_page_scope(page);
DCHECK_EQ(Page::kSweepingPending,
page->concurrent_sweeping_state().Value());
page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
const FreeSpaceTreatmentMode free_space_mode =
Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
if (identity == NEW_SPACE) {
RawSweep(page, IGNORE_FREE_LIST, free_space_mode);
} else {
max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
}
DCHECK(page->SweepingDone());
// After finishing sweeping of a page we clean up its remembered set.
TypedSlotSet* typed_slot_set = page->typed_slot_set<OLD_TO_NEW>();
if (typed_slot_set) {
typed_slot_set->FreeToBeFreedChunks();
}
SlotSet* slot_set = page->slot_set<OLD_TO_NEW>();
if (slot_set) {
slot_set->FreeToBeFreedBuckets();
}
}
{
base::LockGuard<base::Mutex> guard(&mutex_);
swept_list_[identity].push_back(page);
}
return max_freed;
}
void Sweeper::ScheduleIncrementalSweepingTask() {
if (!incremental_sweeper_pending_) {
incremental_sweeper_pending_ = true;
IncrementalSweeperTask* task =
new IncrementalSweeperTask(heap_->isolate(), this);
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap_->isolate());
V8::GetCurrentPlatform()->CallOnForegroundThread(isolate, task);
}
}
void Sweeper::AddPage(AllocationSpace space, Page* page,
Sweeper::AddPageMode mode) {
base::LockGuard<base::Mutex> guard(&mutex_);
DCHECK(!FLAG_concurrent_sweeping || !AreSweeperTasksRunning());
if (mode == Sweeper::REGULAR) {
DCHECK_EQ(Page::kSweepingDone, page->concurrent_sweeping_state().Value());
PrepareToBeSweptPage(space, page);
} else {
// Page has been temporarily removed from the sweeper. Accounting already
// happened when the page was initially added, so it is skipped here.
DCHECK_EQ(Sweeper::READD_TEMPORARY_REMOVED_PAGE, mode);
}
DCHECK_EQ(Page::kSweepingPending, page->concurrent_sweeping_state().Value());
sweeping_list_[space].push_back(page);
}
void Sweeper::PrepareToBeSweptPage(AllocationSpace space, Page* page) {
page->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
DCHECK_GE(page->area_size(),
static_cast<size_t>(marking_state_->live_bytes(page)));
if (space != NEW_SPACE) {
heap_->paged_space(space)->IncreaseAllocatedBytes(
marking_state_->live_bytes(page), page);
}
}
Page* Sweeper::GetSweepingPageSafe(AllocationSpace space) {
base::LockGuard<base::Mutex> guard(&mutex_);
Page* page = nullptr;
if (!sweeping_list_[space].empty()) {
page = sweeping_list_[space].front();
sweeping_list_[space].pop_front();
}
return page;
}
} // namespace internal
} // namespace v8
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_SWEEPER_H_
#define V8_HEAP_SWEEPER_H_
#include <deque>
#include <vector>
#include "src/base/platform/semaphore.h"
#include "src/cancelable-task.h"
#include "src/globals.h"
namespace v8 {
namespace internal {
class MajorNonAtomicMarkingState;
class Page;
class PagedSpace;
enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
class Sweeper {
public:
typedef std::deque<Page*> SweepingList;
typedef std::vector<Page*> SweptList;
// Pauses the sweeper tasks or completes sweeping.
class PauseOrCompleteScope final {
public:
explicit PauseOrCompleteScope(Sweeper* sweeper);
~PauseOrCompleteScope();
private:
Sweeper* const sweeper_;
};
// Temporary filters old space sweeping lists. Requires the concurrent
// sweeper to be paused. Allows for pages to be added to the sweeper while
// in this scope. Note that the original list of sweeping pages is restored
// after exiting this scope.
class FilterSweepingPagesScope final {
public:
explicit FilterSweepingPagesScope(
Sweeper* sweeper, const PauseOrCompleteScope& pause_or_complete_scope);
~FilterSweepingPagesScope();
template <typename Callback>
void FilterOldSpaceSweepingPages(Callback callback) {
if (!sweeping_in_progress_) return;
SweepingList* sweeper_list = &sweeper_->sweeping_list_[OLD_SPACE];
// Iteration here is from most free space to least free space.
for (auto it = old_space_sweeping_list_.begin();
it != old_space_sweeping_list_.end(); it++) {
if (callback(*it)) {
sweeper_list->push_back(*it);
}
}
}
private:
Sweeper* const sweeper_;
SweepingList old_space_sweeping_list_;
const PauseOrCompleteScope& pause_or_complete_scope_;
bool sweeping_in_progress_;
};
enum FreeListRebuildingMode { REBUILD_FREE_LIST, IGNORE_FREE_LIST };
enum ClearOldToNewSlotsMode {
DO_NOT_CLEAR,
CLEAR_REGULAR_SLOTS,
CLEAR_TYPED_SLOTS
};
enum AddPageMode { REGULAR, READD_TEMPORARY_REMOVED_PAGE };
Sweeper(Heap* heap, MajorNonAtomicMarkingState* marking_state)
: heap_(heap),
marking_state_(marking_state),
num_tasks_(0),
pending_sweeper_tasks_semaphore_(0),
incremental_sweeper_pending_(false),
sweeping_in_progress_(false),
num_sweeping_tasks_(0),
stop_sweeper_tasks_(false) {}
bool sweeping_in_progress() const { return sweeping_in_progress_; }
void AddPage(AllocationSpace space, Page* page, AddPageMode mode);
int ParallelSweepSpace(AllocationSpace identity, int required_freed_bytes,
int max_pages = 0);
int ParallelSweepPage(Page* page, AllocationSpace identity);
void ScheduleIncrementalSweepingTask();
int RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
FreeSpaceTreatmentMode free_space_mode);
// After calling this function sweeping is considered to be in progress
// and the main thread can sweep lazily, but the background sweeper tasks
// are not running yet.
void StartSweeping();
void StartSweeperTasks();
void EnsureCompleted();
void EnsureNewSpaceCompleted();
bool AreSweeperTasksRunning();
void SweepOrWaitUntilSweepingCompleted(Page* page);
void AddSweptPageSafe(PagedSpace* space, Page* page);
Page* GetSweptPageSafe(PagedSpace* space);
private:
class IncrementalSweeperTask;
class SweeperTask;
static const int kAllocationSpaces = LAST_PAGED_SPACE + 1;
static const int kMaxSweeperTasks = kAllocationSpaces;
template <typename Callback>
void ForAllSweepingSpaces(Callback callback) {
for (int i = 0; i < kAllocationSpaces; i++) {
callback(static_cast<AllocationSpace>(i));
}
}
// Can only be called on the main thread when no tasks are running.
bool IsDoneSweeping() const {
for (int i = 0; i < kAllocationSpaces; i++) {
if (!sweeping_list_[i].empty()) return false;
}
return true;
}
void SweepSpaceFromTask(AllocationSpace identity);
// Sweeps incrementally one page from the given space. Returns true if
// there are no more pages to sweep in the given space.
bool SweepSpaceIncrementallyFromTask(AllocationSpace identity);
void AbortAndWaitForTasks();
Page* GetSweepingPageSafe(AllocationSpace space);
void PrepareToBeSweptPage(AllocationSpace space, Page* page);
Heap* const heap_;
MajorNonAtomicMarkingState* marking_state_;
int num_tasks_;
CancelableTaskManager::Id task_ids_[kMaxSweeperTasks];
base::Semaphore pending_sweeper_tasks_semaphore_;
base::Mutex mutex_;
SweptList swept_list_[kAllocationSpaces];
SweepingList sweeping_list_[kAllocationSpaces];
bool incremental_sweeper_pending_;
bool sweeping_in_progress_;
// Counter is actively maintained by the concurrent tasks to avoid querying
// the semaphore for maintaining a task counter on the main thread.
base::AtomicNumber<intptr_t> num_sweeping_tasks_;
// Used by PauseOrCompleteScope to signal early bailout to tasks.
base::AtomicValue<bool> stop_sweeper_tasks_;
};
} // namespace internal
} // namespace v8
#endif // V8_HEAP_SWEEPER_H_
......@@ -1043,6 +1043,8 @@
'heap/spaces.h',
'heap/store-buffer.cc',
'heap/store-buffer.h',
'heap/sweeper.cc',
'heap/sweeper.h',
'heap/worklist.h',
'intl.cc',
'intl.h',
......
......@@ -192,7 +192,7 @@ UNINITIALIZED_HEAP_TEST(Regress658718) {
heap->new_space()->Shrink();
heap->memory_allocator()->unmapper()->WaitUntilCompleted();
heap->delay_sweeper_tasks_for_testing_ = false;
heap->mark_compact_collector()->sweeper().StartSweeperTasks();
heap->mark_compact_collector()->sweeper()->StartSweeperTasks();
heap->mark_compact_collector()->EnsureSweepingCompleted();
}
isolate->Dispose();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment