Commit 18d92181 authored by mlippautz's avatar mlippautz Committed by Commit bot

[heap] Refactor Sweeper

- Additionally allow to commit late lists to an already started sweeper

BUG=chromium:581412
LOG=N

Review URL: https://codereview.chromium.org/1871423002

Cr-Commit-Position: refs/heads/master@{#35432}
parent 1ef7487b
......@@ -1641,7 +1641,8 @@ Code* InnerPointerToCodeCache::GcSafeFindCodeForInnerPointer(
Page* page = Page::FromAddress(inner_pointer);
DCHECK_EQ(page->owner(), heap->code_space());
heap->mark_compact_collector()->SweepOrWaitUntilSweepingCompleted(page);
heap->mark_compact_collector()->sweeper().SweepOrWaitUntilSweepingCompleted(
page);
Address addr = page->skip_list()->StartFor(inner_pointer);
......
......@@ -1121,7 +1121,7 @@ void IncrementalMarking::SpeedUp() {
void IncrementalMarking::FinalizeSweeping() {
DCHECK(state_ == SWEEPING);
if (heap_->mark_compact_collector()->sweeping_in_progress() &&
(heap_->mark_compact_collector()->IsSweepingCompleted() ||
(heap_->mark_compact_collector()->sweeper().IsSweepingCompleted() ||
!FLAG_concurrent_sweeping)) {
heap_->mark_compact_collector()->EnsureSweepingCompleted();
}
......
......@@ -12,17 +12,6 @@
namespace v8 {
namespace internal {
inline std::vector<Page*>& MarkCompactCollector::sweeping_list(Space* space) {
if (space == heap()->old_space()) {
return sweeping_list_old_space_;
} else if (space == heap()->code_space()) {
return sweeping_list_code_space_;
}
DCHECK_EQ(space, heap()->map_space());
return sweeping_list_map_space_;
}
void MarkCompactCollector::PushBlack(HeapObject* obj) {
DCHECK(Marking::IsBlack(Marking::MarkBitFrom(obj)));
if (marking_deque_.Push(obj)) {
......
......@@ -62,9 +62,8 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap)
embedder_heap_tracer_(nullptr),
have_code_to_deoptimize_(false),
compacting_(false),
sweeping_in_progress_(false),
pending_sweeper_tasks_semaphore_(0),
pending_compaction_tasks_semaphore_(0) {
pending_compaction_tasks_semaphore_(0),
sweeper_(heap) {
}
#ifdef VERIFY_HEAP
......@@ -460,11 +459,13 @@ void MarkCompactCollector::ClearMarkbits() {
}
}
class MarkCompactCollector::SweeperTask : public v8::Task {
class MarkCompactCollector::Sweeper::SweeperTask : public v8::Task {
public:
SweeperTask(Heap* heap, AllocationSpace space_to_start)
: heap_(heap), space_to_start_(space_to_start) {}
SweeperTask(Sweeper* sweeper, base::Semaphore* pending_sweeper_tasks,
AllocationSpace space_to_start)
: sweeper_(sweeper),
pending_sweeper_tasks_(pending_sweeper_tasks),
space_to_start_(space_to_start) {}
virtual ~SweeperTask() {}
......@@ -479,33 +480,45 @@ class MarkCompactCollector::SweeperTask : public v8::Task {
const int space_id = FIRST_PAGED_SPACE + ((i + offset) % num_spaces);
DCHECK_GE(space_id, FIRST_PAGED_SPACE);
DCHECK_LE(space_id, LAST_PAGED_SPACE);
heap_->mark_compact_collector()->SweepInParallel(
heap_->paged_space(space_id), 0);
sweeper_->ParallelSweepSpace(static_cast<AllocationSpace>(space_id), 0);
}
heap_->mark_compact_collector()->pending_sweeper_tasks_semaphore_.Signal();
pending_sweeper_tasks_->Signal();
}
Heap* heap_;
Sweeper* sweeper_;
base::Semaphore* pending_sweeper_tasks_;
AllocationSpace space_to_start_;
DISALLOW_COPY_AND_ASSIGN(SweeperTask);
};
void MarkCompactCollector::Sweeper::StartSweeping() {
sweeping_in_progress_ = true;
ForAllSweepingSpaces([this](AllocationSpace space) {
std::sort(sweeping_list_[space].begin(), sweeping_list_[space].end(),
[](Page* a, Page* b) { return a->LiveBytes() < b->LiveBytes(); });
});
if (FLAG_concurrent_sweeping) {
ForAllSweepingSpaces([this](AllocationSpace space) {
if (space == NEW_SPACE) return;
StartSweepingHelper(space);
});
}
}
void MarkCompactCollector::StartSweeperThreads() {
V8::GetCurrentPlatform()->CallOnBackgroundThread(
new SweeperTask(heap(), OLD_SPACE), v8::Platform::kShortRunningTask);
V8::GetCurrentPlatform()->CallOnBackgroundThread(
new SweeperTask(heap(), CODE_SPACE), v8::Platform::kShortRunningTask);
void MarkCompactCollector::Sweeper::StartSweepingHelper(
AllocationSpace space_to_start) {
num_sweeping_tasks_++;
V8::GetCurrentPlatform()->CallOnBackgroundThread(
new SweeperTask(heap(), MAP_SPACE), v8::Platform::kShortRunningTask);
new SweeperTask(this, &pending_sweeper_tasks_semaphore_, space_to_start),
v8::Platform::kShortRunningTask);
}
void MarkCompactCollector::SweepOrWaitUntilSweepingCompleted(Page* page) {
void MarkCompactCollector::Sweeper::SweepOrWaitUntilSweepingCompleted(
Page* page) {
PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
if (!page->SweepingDone()) {
SweepInParallel(page, owner);
ParallelSweepPage(page, owner);
if (!page->SweepingDone()) {
// We were not able to sweep that page, i.e., a concurrent
// sweeper thread currently owns this page. Wait for the sweeper
......@@ -515,34 +528,55 @@ void MarkCompactCollector::SweepOrWaitUntilSweepingCompleted(Page* page) {
}
}
void MarkCompactCollector::SweepAndRefill(CompactionSpace* space) {
if (FLAG_concurrent_sweeping && !IsSweepingCompleted()) {
SweepInParallel(heap()->paged_space(space->identity()), 0);
if (FLAG_concurrent_sweeping && !sweeper().IsSweepingCompleted()) {
sweeper().ParallelSweepSpace(space->identity(), 0);
space->RefillFreeList();
}
}
Page* MarkCompactCollector::Sweeper::GetSweptPageSafe(PagedSpace* space) {
base::LockGuard<base::Mutex> guard(&mutex_);
SweptList& list = swept_list_[space->identity()];
if (list.length() > 0) {
return list.RemoveLast();
}
return nullptr;
}
void MarkCompactCollector::EnsureSweepingCompleted() {
void MarkCompactCollector::Sweeper::EnsureCompleted() {
DCHECK(sweeping_in_progress_ == true);
// If sweeping is not completed or not running at all, we try to complete it
// here.
if (!FLAG_concurrent_sweeping || !IsSweepingCompleted()) {
SweepInParallel(heap()->paged_space(OLD_SPACE), 0);
SweepInParallel(heap()->paged_space(CODE_SPACE), 0);
SweepInParallel(heap()->paged_space(MAP_SPACE), 0);
ForAllSweepingSpaces(
[this](AllocationSpace space) { ParallelSweepSpace(space, 0); });
}
if (FLAG_concurrent_sweeping) {
pending_sweeper_tasks_semaphore_.Wait();
pending_sweeper_tasks_semaphore_.Wait();
pending_sweeper_tasks_semaphore_.Wait();
while (num_sweeping_tasks_ > 0) {
pending_sweeper_tasks_semaphore_.Wait();
num_sweeping_tasks_--;
}
}
ParallelSweepSpacesComplete();
DCHECK(sweeping_list_[NEW_SPACE].empty());
ForAllSweepingSpaces([this](AllocationSpace space) {
DCHECK_NULL(tmp_late_sweeping_list_[space]);
sweeping_list_[space].clear();
if (late_sweeping_list_[space] != nullptr) {
delete late_sweeping_list_[space];
}
});
sweeping_in_progress_ = false;
}
void MarkCompactCollector::EnsureSweepingCompleted() {
DCHECK(sweeper().sweeping_in_progress() == true);
sweeper().EnsureCompleted();
heap()->old_space()->RefillFreeList();
heap()->code_space()->RefillFreeList();
heap()->map_space()->RefillFreeList();
......@@ -554,8 +588,7 @@ void MarkCompactCollector::EnsureSweepingCompleted() {
#endif
}
bool MarkCompactCollector::IsSweepingCompleted() {
bool MarkCompactCollector::Sweeper::IsSweepingCompleted() {
if (!pending_sweeper_tasks_semaphore_.WaitFor(
base::TimeDelta::FromSeconds(0))) {
return false;
......@@ -564,7 +597,6 @@ bool MarkCompactCollector::IsSweepingCompleted() {
return true;
}
void Marking::TransferMark(Heap* heap, Address old_start, Address new_start) {
// This is only used when resizing an object.
DCHECK(MemoryChunk::FromAddress(old_start) ==
......@@ -3210,28 +3242,18 @@ class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
}
};
enum SweepingMode { SWEEP_ONLY, SWEEP_AND_VISIT_LIVE_OBJECTS };
enum SkipListRebuildingMode { REBUILD_SKIP_LIST, IGNORE_SKIP_LIST };
enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
// Sweeps a page. After sweeping the page can be iterated.
// Slots in live objects pointing into evacuation candidates are updated
// if requested.
// Returns the size of the biggest continuous freed memory chunk in bytes.
template <SweepingMode sweeping_mode,
MarkCompactCollector::SweepingParallelism parallelism,
SkipListRebuildingMode skip_list_mode,
FreeSpaceTreatmentMode free_space_mode>
static int Sweep(PagedSpace* space, Page* p, ObjectVisitor* v) {
template <MarkCompactCollector::Sweeper::SweepingMode sweeping_mode,
MarkCompactCollector::Sweeper::SweepingParallelism parallelism,
MarkCompactCollector::Sweeper::SkipListRebuildingMode skip_list_mode,
MarkCompactCollector::Sweeper::FreeSpaceTreatmentMode free_space_mode>
int MarkCompactCollector::Sweeper::RawSweep(PagedSpace* space, Page* p,
ObjectVisitor* v) {
DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
DCHECK(!p->IsFlagSet(Page::BLACK_PAGE));
DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST,
space->identity() == CODE_SPACE);
DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
DCHECK(parallelism == MarkCompactCollector::SWEEP_ON_MAIN_THREAD ||
sweeping_mode == SWEEP_ONLY);
DCHECK(parallelism == SWEEP_ON_MAIN_THREAD || sweeping_mode == SWEEP_ONLY);
Address free_start = p->area_start();
DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
......@@ -3293,7 +3315,6 @@ static int Sweep(PagedSpace* space, Page* p, ObjectVisitor* v) {
return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
}
void MarkCompactCollector::InvalidateCode(Code* code) {
if (heap_->incremental_marking()->IsCompacting() &&
!ShouldSkipEvacuationSlotRecording(code)) {
......@@ -3390,6 +3411,11 @@ void MarkCompactCollector::VisitLiveObjectsBody(Page* page,
}
}
void MarkCompactCollector::Sweeper::AddSweptPageSafe(PagedSpace* space,
Page* page) {
base::LockGuard<base::Mutex> guard(&mutex_);
swept_list_[space->identity()].Add(page);
}
void MarkCompactCollector::SweepAbortedPages() {
// Second pass on aborted pages.
......@@ -3400,26 +3426,28 @@ void MarkCompactCollector::SweepAbortedPages() {
PagedSpace* space = static_cast<PagedSpace*>(p->owner());
switch (space->identity()) {
case OLD_SPACE:
Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
IGNORE_FREE_SPACE>(space, p, nullptr);
Sweeper::RawSweep<Sweeper::SWEEP_ONLY, Sweeper::SWEEP_ON_MAIN_THREAD,
Sweeper::IGNORE_SKIP_LIST,
Sweeper::IGNORE_FREE_SPACE>(space, p, nullptr);
break;
case CODE_SPACE:
if (FLAG_zap_code_space) {
Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
ZAP_FREE_SPACE>(space, p, nullptr);
Sweeper::RawSweep<
Sweeper::SWEEP_ONLY, Sweeper::SWEEP_ON_MAIN_THREAD,
Sweeper::REBUILD_SKIP_LIST, Sweeper::ZAP_FREE_SPACE>(space, p,
nullptr);
} else {
Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
IGNORE_FREE_SPACE>(space, p, nullptr);
Sweeper::RawSweep<
Sweeper::SWEEP_ONLY, Sweeper::SWEEP_ON_MAIN_THREAD,
Sweeper::REBUILD_SKIP_LIST, Sweeper::IGNORE_FREE_SPACE>(
space, p, nullptr);
}
break;
default:
UNREACHABLE();
break;
}
{
base::LockGuard<base::Mutex> guard(&swept_pages_mutex_);
swept_pages(space->identity())->Add(p);
}
sweeper().AddSweptPageSafe(space, p);
}
}
}
......@@ -3463,7 +3491,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
}
#ifdef VERIFY_HEAP
if (FLAG_verify_heap && !sweeping_in_progress_) {
if (FLAG_verify_heap && !sweeper().sweeping_in_progress()) {
VerifyEvacuation(heap());
}
#endif
......@@ -3650,30 +3678,40 @@ void MarkCompactCollector::ReleaseEvacuationCandidates() {
heap()->FreeQueuedChunks();
}
int MarkCompactCollector::SweepInParallel(PagedSpace* space,
int required_freed_bytes,
int max_pages) {
int MarkCompactCollector::Sweeper::ParallelSweepSpace(AllocationSpace identity,
int required_freed_bytes,
int max_pages) {
int max_freed = 0;
int max_freed_overall = 0;
int page_count = 0;
for (Page* p : sweeping_list(space)) {
max_freed = SweepInParallel(p, space);
DCHECK(max_freed >= 0);
if (required_freed_bytes > 0 && max_freed >= required_freed_bytes) {
return max_freed;
}
max_freed_overall = Max(max_freed, max_freed_overall);
page_count++;
if (max_pages > 0 && page_count >= max_pages) {
break;
}
int pages_freed = 0;
ParallelSweepList(sweeping_list_[identity], identity, required_freed_bytes,
max_pages, &max_freed, &pages_freed);
if ((required_freed_bytes) > 0 && (max_freed >= required_freed_bytes))
return max_freed;
if ((max_pages > 0) && (pages_freed >= max_pages)) return max_freed;
SweepingList* late_list = GetLateSweepingListSafe(identity);
if (late_list != nullptr) {
ParallelSweepList(*late_list, identity, required_freed_bytes, max_pages,
&max_freed, &pages_freed);
}
return max_freed_overall;
return max_freed;
}
void MarkCompactCollector::Sweeper::ParallelSweepList(
SweepingList& list, AllocationSpace out_space, int required_freed_bytes,
int max_pages, int* max_freed, int* pages_freed) {
for (Page* p : list) {
int freed = ParallelSweepPage(p, heap_->paged_space(out_space));
*pages_freed += 1;
DCHECK_GE(freed, 0);
*max_freed = Max(*max_freed, freed);
if ((required_freed_bytes) > 0 && (*max_freed >= required_freed_bytes))
return;
if ((max_pages > 0) && (*pages_freed >= max_pages)) return;
}
}
int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) {
int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page,
PagedSpace* space) {
int max_freed = 0;
if (page->mutex()->TryLock()) {
// If this page was already swept in the meantime, we can return here.
......@@ -3683,18 +3721,18 @@ int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) {
}
page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
if (space->identity() == OLD_SPACE) {
max_freed = Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
IGNORE_FREE_SPACE>(space, page, NULL);
max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
IGNORE_FREE_SPACE>(space, page, NULL);
} else if (space->identity() == CODE_SPACE) {
max_freed = Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, REBUILD_SKIP_LIST,
IGNORE_FREE_SPACE>(space, page, NULL);
max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, REBUILD_SKIP_LIST,
IGNORE_FREE_SPACE>(space, page, NULL);
} else {
max_freed = Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
IGNORE_FREE_SPACE>(space, page, NULL);
max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
IGNORE_FREE_SPACE>(space, page, NULL);
}
{
base::LockGuard<base::Mutex> guard(&swept_pages_mutex_);
swept_pages(space->identity())->Add(page);
base::LockGuard<base::Mutex> guard(&mutex_);
swept_list_[space->identity()].Add(page);
}
page->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
page->mutex()->Unlock();
......@@ -3702,6 +3740,41 @@ int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) {
return max_freed;
}
void MarkCompactCollector::Sweeper::AddPage(AllocationSpace space, Page* page) {
DCHECK(!sweeping_in_progress_);
PrepareToBeSweptPage(space, page);
sweeping_list_[space].push_back(page);
}
void MarkCompactCollector::Sweeper::AddLatePage(AllocationSpace space,
Page* page) {
DCHECK(sweeping_in_progress_);
PrepareToBeSweptPage(space, page);
if (tmp_late_sweeping_list_[space] == nullptr) {
tmp_late_sweeping_list_[space] = new SweepingList();
}
tmp_late_sweeping_list_[space]->push_back(page);
}
void MarkCompactCollector::Sweeper::PrepareToBeSweptPage(AllocationSpace space,
Page* page) {
page->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
int to_sweep = page->area_size() - page->LiveBytes();
heap_->paged_space(space)->accounting_stats_.ShrinkSpace(to_sweep);
}
void MarkCompactCollector::Sweeper::CommitLateList(AllocationSpace space) {
DCHECK(sweeping_in_progress_);
base::LockGuard<base::Mutex> guard(&mutex_);
DCHECK_NULL(late_sweeping_list_[space]);
late_sweeping_list_[space] = tmp_late_sweeping_list_[space];
}
MarkCompactCollector::Sweeper::SweepingList*
MarkCompactCollector::Sweeper::GetLateSweepingListSafe(AllocationSpace space) {
base::LockGuard<base::Mutex> guard(&mutex_);
return late_sweeping_list_[space];
}
void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
space->ClearStats();
......@@ -3737,8 +3810,9 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
// (in the free list) dropped again. Since we only use the flag for
// testing this is fine.
p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
IGNORE_FREE_SPACE>(space, p, nullptr);
Sweeper::RawSweep<Sweeper::SWEEP_ONLY, Sweeper::SWEEP_ON_MAIN_THREAD,
Sweeper::IGNORE_SKIP_LIST, Sweeper::IGNORE_FREE_SPACE>(
space, p, nullptr);
continue;
}
......@@ -3754,10 +3828,7 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
unused_page_present = true;
}
p->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
sweeping_list(space).push_back(p);
int to_sweep = p->area_size() - p->LiveBytes();
space->accounting_stats_.ShrinkSpace(to_sweep);
sweeper().AddPage(space->identity(), p);
will_be_swept++;
}
......@@ -3765,8 +3836,6 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
PrintIsolate(isolate(), "sweeping: space=%s initialized_for_sweeping=%d",
AllocationSpaceName(space->identity()), will_be_swept);
}
std::sort(sweeping_list(space).begin(), sweeping_list(space).end(),
[](Page* a, Page* b) { return a->LiveBytes() < b->LiveBytes(); });
}
......@@ -3782,7 +3851,6 @@ void MarkCompactCollector::SweepSpaces() {
#endif
{
sweeping_in_progress_ = true;
{
GCTracer::Scope sweep_scope(heap()->tracer(),
GCTracer::Scope::MC_SWEEP_OLD);
......@@ -3798,9 +3866,7 @@ void MarkCompactCollector::SweepSpaces() {
GCTracer::Scope::MC_SWEEP_MAP);
StartSweepSpace(heap()->map_space());
}
if (FLAG_concurrent_sweeping) {
StartSweeperThreads();
}
sweeper().StartSweeping();
}
// Deallocate unmarked large objects.
......@@ -3812,13 +3878,6 @@ void MarkCompactCollector::SweepSpaces() {
}
}
void MarkCompactCollector::ParallelSweepSpacesComplete() {
sweeping_list(heap()->old_space()).clear();
sweeping_list(heap()->code_space()).clear();
sweeping_list(heap()->map_space()).clear();
}
Isolate* MarkCompactCollector::isolate() const { return heap_->isolate(); }
......
......@@ -400,6 +400,81 @@ class MarkCompactCollector {
public:
class Evacuator;
class Sweeper {
public:
class SweeperTask;
enum SweepingMode { SWEEP_ONLY, SWEEP_AND_VISIT_LIVE_OBJECTS };
enum SkipListRebuildingMode { REBUILD_SKIP_LIST, IGNORE_SKIP_LIST };
enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
enum SweepingParallelism { SWEEP_ON_MAIN_THREAD, SWEEP_IN_PARALLEL };
typedef std::vector<Page*> SweepingList;
typedef List<Page*> SweptList;
template <SweepingMode sweeping_mode, SweepingParallelism parallelism,
SkipListRebuildingMode skip_list_mode,
FreeSpaceTreatmentMode free_space_mode>
static int RawSweep(PagedSpace* space, Page* p, ObjectVisitor* v);
explicit Sweeper(Heap* heap)
: heap_(heap),
pending_sweeper_tasks_semaphore_(0),
sweeping_in_progress_(false),
num_sweeping_tasks_(0) {
ForAllSweepingSpaces([this](AllocationSpace space) {
late_sweeping_list_[space] = nullptr;
tmp_late_sweeping_list_[space] = nullptr;
});
}
bool sweeping_in_progress() { return sweeping_in_progress_; }
void AddPage(AllocationSpace space, Page* page);
void AddLatePage(AllocationSpace space, Page* page);
void CommitLateList(AllocationSpace space);
int ParallelSweepSpace(AllocationSpace identity, int required_freed_bytes,
int max_pages = 0);
int ParallelSweepPage(Page* page, PagedSpace* space);
void StartSweeping();
void StartSweepingHelper(AllocationSpace space_to_start);
void EnsureCompleted();
bool IsSweepingCompleted();
void SweepOrWaitUntilSweepingCompleted(Page* page);
void AddSweptPageSafe(PagedSpace* space, Page* page);
Page* GetSweptPageSafe(PagedSpace* space);
private:
static const int kAllocationSpaces = LAST_PAGED_SPACE + 1;
template <typename Callback>
void ForAllSweepingSpaces(Callback callback) {
for (int i = 0; i < kAllocationSpaces; i++) {
callback(static_cast<AllocationSpace>(i));
}
}
SweepingList* GetLateSweepingListSafe(AllocationSpace space);
void PrepareToBeSweptPage(AllocationSpace space, Page* page);
void ParallelSweepList(SweepingList& list, AllocationSpace out_space,
int required_freed_bytes, int max_pages,
int* max_freed, int* pages_freed);
Heap* heap_;
base::Mutex mutex_;
base::Semaphore pending_sweeper_tasks_semaphore_;
SweptList swept_list_[kAllocationSpaces];
SweepingList sweeping_list_[kAllocationSpaces];
SweepingList* late_sweeping_list_[kAllocationSpaces];
SweepingList* tmp_late_sweeping_list_[kAllocationSpaces];
bool sweeping_in_progress_;
int num_sweeping_tasks_;
};
enum IterationMode {
kKeepMarking,
kClearMarkbits,
......@@ -451,8 +526,6 @@ class MarkCompactCollector {
CodeFlusher* code_flusher() { return code_flusher_; }
inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
enum SweepingParallelism { SWEEP_ON_MAIN_THREAD, SWEEP_IN_PARALLEL };
#ifdef VERIFY_HEAP
void VerifyValidStoreAndSlotsBufferEntries();
void VerifyMarkbitsAreClean();
......@@ -490,38 +563,19 @@ class MarkCompactCollector {
MarkingParity marking_parity() { return marking_parity_; }
// Concurrent and parallel sweeping support. If required_freed_bytes was set
// to a value larger than 0, then sweeping returns after a block of at least
// required_freed_bytes was freed. If required_freed_bytes was set to zero
// then the whole given space is swept. It returns the size of the maximum
// continuous freed memory chunk.
int SweepInParallel(PagedSpace* space, int required_freed_bytes,
int max_pages = 0);
// Sweeps a given page concurrently to the sweeper threads. It returns the
// size of the maximum continuous freed memory chunk.
int SweepInParallel(Page* page, PagedSpace* space);
// Ensures that sweeping is finished.
//
// Note: Can only be called safely from main thread.
void EnsureSweepingCompleted();
void SweepOrWaitUntilSweepingCompleted(Page* page);
// Help out in sweeping the corresponding space and refill memory that has
// been regained.
//
// Note: Thread-safe.
void SweepAndRefill(CompactionSpace* space);
// If sweeper threads are not active this method will return true. If
// this is a latency issue we should be smarter here. Otherwise, it will
// return true if the sweeper threads are done processing the pages.
bool IsSweepingCompleted();
// Checks if sweeping is in progress right now on any space.
bool sweeping_in_progress() { return sweeping_in_progress_; }
bool sweeping_in_progress() { return sweeper().sweeping_in_progress(); }
void set_evacuation(bool evacuation) { evacuation_ = evacuation; }
......@@ -562,20 +616,7 @@ class MarkCompactCollector {
// address range.
void RemoveObjectSlots(Address start_slot, Address end_slot);
base::Mutex* swept_pages_mutex() { return &swept_pages_mutex_; }
List<Page*>* swept_pages(AllocationSpace id) {
switch (id) {
case OLD_SPACE:
return &swept_old_space_pages_;
case CODE_SPACE:
return &swept_code_space_pages_;
case MAP_SPACE:
return &swept_map_space_pages_;
default:
UNREACHABLE();
}
return nullptr;
}
Sweeper& sweeper() { return sweeper_; }
std::vector<std::pair<void*, void*>>& wrappers_to_trace() {
return wrappers_to_trace_;
......@@ -596,7 +637,6 @@ class MarkCompactCollector {
class EvacuateOldSpaceVisitor;
class EvacuateVisitorBase;
class HeapObjectVisitor;
class SweeperTask;
typedef std::vector<Page*> SweepingList;
......@@ -605,8 +645,6 @@ class MarkCompactCollector {
bool WillBeDeoptimized(Code* code);
void ClearInvalidRememberedSetSlots();
void StartSweeperThreads();
void ComputeEvacuationHeuristics(int area_size,
int* target_fragmentation_percent,
int* max_evacuated_bytes);
......@@ -775,8 +813,6 @@ class MarkCompactCollector {
// evacuation.
//
inline SweepingList& sweeping_list(Space* space);
// If we are not compacting the heap, we simply sweep the spaces except
// for the large object space, clearing mark bits and adding unmarked
// regions to each space's free list.
......@@ -811,10 +847,6 @@ class MarkCompactCollector {
// up other pages for sweeping.
void StartSweepSpace(PagedSpace* space);
// Finalizes the parallel sweeping phase. Marks all the pages that were
// swept in parallel.
void ParallelSweepSpacesComplete();
#ifdef DEBUG
friend class MarkObjectVisitor;
static void VisitObject(HeapObject* obj);
......@@ -838,30 +870,17 @@ class MarkCompactCollector {
List<Page*> evacuation_candidates_;
List<NewSpacePage*> newspace_evacuation_candidates_;
base::Mutex swept_pages_mutex_;
List<Page*> swept_old_space_pages_;
List<Page*> swept_code_space_pages_;
List<Page*> swept_map_space_pages_;
SweepingList sweeping_list_old_space_;
SweepingList sweeping_list_code_space_;
SweepingList sweeping_list_map_space_;
// True if we are collecting slots to perform evacuation from evacuation
// candidates.
bool compacting_;
// True if concurrent or parallel sweeping is currently in progress.
bool sweeping_in_progress_;
// Semaphore used to synchronize sweeper tasks.
base::Semaphore pending_sweeper_tasks_semaphore_;
// Semaphore used to synchronize compaction tasks.
base::Semaphore pending_compaction_tasks_semaphore_;
bool black_allocation_;
Sweeper sweeper_;
friend class Heap;
friend class StoreBuffer;
};
......
......@@ -62,8 +62,10 @@ bool HeapObjectIterator::AdvanceToNextPage() {
}
cur_page = cur_page->next_page();
if (cur_page == space_->anchor()) return false;
cur_page->heap()->mark_compact_collector()->SweepOrWaitUntilSweepingCompleted(
cur_page);
cur_page->heap()
->mark_compact_collector()
->sweeper()
.SweepOrWaitUntilSweepingCompleted(cur_page);
cur_addr_ = cur_page->area_start();
cur_end_ = cur_page->area_end();
DCHECK(cur_page->SweepingDone());
......@@ -1052,17 +1054,14 @@ void PagedSpace::RefillFreeList() {
return;
}
MarkCompactCollector* collector = heap()->mark_compact_collector();
List<Page*>* swept_pages = collector->swept_pages(identity());
intptr_t added = 0;
{
base::LockGuard<base::Mutex> guard(collector->swept_pages_mutex());
for (int i = swept_pages->length() - 1; i >= 0; --i) {
Page* p = (*swept_pages)[i];
Page* p = nullptr;
while ((p = collector->sweeper().GetSweptPageSafe(this)) != nullptr) {
// Only during compaction pages can actually change ownership. This is
// safe because there exists no other competing action on the page links
// during compaction.
if (is_local() && (p->owner() != this)) {
if (added > kCompactionMemoryWanted) break;
base::LockGuard<base::Mutex> guard(
reinterpret_cast<PagedSpace*>(p->owner())->mutex());
p->Unlink();
......@@ -1071,7 +1070,7 @@ void PagedSpace::RefillFreeList() {
}
added += RelinkFreeListCategories(p);
added += p->wasted_memory();
swept_pages->Remove(i);
if (is_local() && (added > kCompactionMemoryWanted)) break;
}
}
accounting_stats_.IncreaseCapacity(added);
......@@ -2618,8 +2617,8 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
if (object != NULL) return object;
// If sweeping is still in progress try to sweep pages on the main thread.
int max_freed = collector->SweepInParallel(heap()->paged_space(identity()),
size_in_bytes, kMaxPagesToSweep);
int max_freed = collector->sweeper().ParallelSweepSpace(
identity(), size_in_bytes, kMaxPagesToSweep);
RefillFreeList();
if (max_freed >= size_in_bytes) {
object = free_list_.Allocate(size_in_bytes);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment