Commit 4490a760 authored by ulan's avatar ulan Committed by Commit bot

Revert of [heap] Start sweeper tasks after evacuation. (patchset #2 id:20001...

Revert of [heap] Start sweeper tasks after evacuation. (patchset #2 id:20001 of https://chromiumcodereview.appspot.com/2428043002/ )

Reason for revert:
Performance regression on arm64: crbug.com/657776

Original issue's description:
> [heap] Start sweeper tasks after evacuation.
>
> This allows us to use more tasks for parallel evacuation.
>
> BUG=

TBR=mlippautz@chromium.org
# Skipping CQ checks because original CL landed less than 1 days ago.
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=

Review-Url: https://chromiumcodereview.appspot.com/2440693002
Cr-Commit-Position: refs/heads/master@{#40465}
parent 555a7258
......@@ -1053,7 +1053,7 @@ void IncrementalMarking::FinalizeSweeping() {
DCHECK(state_ == SWEEPING);
if (heap_->mark_compact_collector()->sweeping_in_progress() &&
(!FLAG_concurrent_sweeping ||
!heap_->mark_compact_collector()->sweeper().AreSweeperTasksRunning())) {
heap_->mark_compact_collector()->sweeper().IsSweepingCompleted())) {
heap_->mark_compact_collector()->EnsureSweepingCompleted();
}
if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
......
......@@ -321,7 +321,7 @@ void MarkCompactCollector::CollectGarbage() {
}
#endif
StartSweepSpaces();
SweepSpaces();
EvacuateNewSpaceAndCandidates();
......@@ -451,20 +451,22 @@ void MarkCompactCollector::Sweeper::StartSweeping() {
std::sort(sweeping_list_[space].begin(), sweeping_list_[space].end(),
[](Page* a, Page* b) { return a->LiveBytes() < b->LiveBytes(); });
});
}
void MarkCompactCollector::Sweeper::StartSweeperTasks() {
if (FLAG_concurrent_sweeping && sweeping_in_progress_) {
if (FLAG_concurrent_sweeping) {
ForAllSweepingSpaces([this](AllocationSpace space) {
if (space == NEW_SPACE) return;
num_sweeping_tasks_.Increment(1);
V8::GetCurrentPlatform()->CallOnBackgroundThread(
new SweeperTask(this, &pending_sweeper_tasks_semaphore_, space),
v8::Platform::kShortRunningTask);
StartSweepingHelper(space);
});
}
}
void MarkCompactCollector::Sweeper::StartSweepingHelper(
AllocationSpace space_to_start) {
num_sweeping_tasks_.Increment(1);
V8::GetCurrentPlatform()->CallOnBackgroundThread(
new SweeperTask(this, &pending_sweeper_tasks_semaphore_, space_to_start),
v8::Platform::kShortRunningTask);
}
void MarkCompactCollector::Sweeper::SweepOrWaitUntilSweepingCompleted(
Page* page) {
if (!page->SweepingDone()) {
......@@ -479,8 +481,7 @@ void MarkCompactCollector::Sweeper::SweepOrWaitUntilSweepingCompleted(
}
void MarkCompactCollector::SweepAndRefill(CompactionSpace* space) {
if (FLAG_concurrent_sweeping &&
!sweeper().IsSweepingCompleted(space->identity())) {
if (FLAG_concurrent_sweeping && !sweeper().IsSweepingCompleted()) {
sweeper().ParallelSweepSpace(space->identity(), 0);
space->RefillFreeList();
}
......@@ -500,11 +501,10 @@ void MarkCompactCollector::Sweeper::EnsureCompleted() {
// If sweeping is not completed or not running at all, we try to complete it
// here.
ForAllSweepingSpaces([this](AllocationSpace space) {
if (!FLAG_concurrent_sweeping || !this->IsSweepingCompleted(space)) {
ParallelSweepSpace(space, 0);
}
});
if (!FLAG_concurrent_sweeping || !IsSweepingCompleted()) {
ForAllSweepingSpaces(
[this](AllocationSpace space) { ParallelSweepSpace(space, 0); });
}
if (FLAG_concurrent_sweeping) {
while (num_sweeping_tasks_.Value() > 0) {
......@@ -519,12 +519,13 @@ void MarkCompactCollector::Sweeper::EnsureCompleted() {
}
DCHECK(sweeping_list_[space].empty());
});
late_pages_ = false;
sweeping_in_progress_ = false;
}
void MarkCompactCollector::Sweeper::EnsureNewSpaceCompleted() {
if (!sweeping_in_progress_) return;
if (!FLAG_concurrent_sweeping || !IsSweepingCompleted(NEW_SPACE)) {
if (!FLAG_concurrent_sweeping || !IsSweepingCompleted()) {
for (Page* p : *heap_->new_space()) {
SweepOrWaitUntilSweepingCompleted(p);
}
......@@ -546,20 +547,13 @@ void MarkCompactCollector::EnsureSweepingCompleted() {
#endif
}
bool MarkCompactCollector::Sweeper::AreSweeperTasksRunning() {
bool MarkCompactCollector::Sweeper::IsSweepingCompleted() {
DCHECK(FLAG_concurrent_sweeping);
while (pending_sweeper_tasks_semaphore_.WaitFor(
base::TimeDelta::FromSeconds(0))) {
num_sweeping_tasks_.Increment(-1);
}
return num_sweeping_tasks_.Value() != 0;
}
bool MarkCompactCollector::Sweeper::IsSweepingCompleted(AllocationSpace space) {
DCHECK(FLAG_concurrent_sweeping);
if (AreSweeperTasksRunning()) return false;
base::LockGuard<base::Mutex> guard(&mutex_);
return sweeping_list_[space].empty();
return num_sweeping_tasks_.Value() == 0;
}
const char* AllocationSpaceName(AllocationSpace space) {
......@@ -838,7 +832,11 @@ void MarkCompactCollector::Prepare() {
void MarkCompactCollector::Finish() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH);
sweeper().StartSweeperTasks();
if (sweeper().contains_late_pages() && FLAG_concurrent_sweeping) {
// If we added some more pages during MC, we need to start at least one
// more task as all other tasks might already be finished.
sweeper().StartSweepingHelper(OLD_SPACE);
}
// The hashing of weak_object_to_code_table is no longer valid.
heap()->weak_object_to_code_table()->Rehash(
......@@ -3175,15 +3173,17 @@ int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages,
//
// The number of parallel compaction tasks is limited by:
// - #evacuation pages
// - #cores
// - (#cores - 1)
const double kTargetCompactionTimeInMs = .5;
const int kNumSweepingTasks = 3;
double compaction_speed =
heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
const int available_cores = Max(
1, static_cast<int>(
V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()));
V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()) -
kNumSweepingTasks - 1);
int tasks;
if (compaction_speed > 0) {
tasks = 1 + static_cast<int>(live_bytes / compaction_speed /
......@@ -3543,12 +3543,12 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
for (Page* p : newspace_evacuation_candidates_) {
if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
sweeper().AddPage(p->owner()->identity(), p);
sweeper().AddLatePage(p->owner()->identity(), p);
} else if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
p->ForAllFreeListCategories(
[](FreeListCategory* category) { DCHECK(!category->is_linked()); });
sweeper().AddPage(p->owner()->identity(), p);
sweeper().AddLatePage(p->owner()->identity(), p);
}
}
newspace_evacuation_candidates_.Rewind(0);
......@@ -3560,7 +3560,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
SkipList* list = p->skip_list();
if (list != NULL) list->Clear();
if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
sweeper().AddPage(p->owner()->identity(), p);
sweeper().AddLatePage(p->owner()->identity(), p);
p->ClearFlag(Page::COMPACTION_WAS_ABORTED);
}
}
......@@ -3862,11 +3862,19 @@ int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page,
}
void MarkCompactCollector::Sweeper::AddPage(AllocationSpace space, Page* page) {
DCHECK(!FLAG_concurrent_sweeping || !AreSweeperTasksRunning());
DCHECK(!sweeping_in_progress_);
PrepareToBeSweptPage(space, page);
sweeping_list_[space].push_back(page);
}
void MarkCompactCollector::Sweeper::AddLatePage(AllocationSpace space,
Page* page) {
DCHECK(sweeping_in_progress_);
PrepareToBeSweptPage(space, page);
late_pages_ = true;
AddSweepingPageSafe(space, page);
}
void MarkCompactCollector::Sweeper::PrepareToBeSweptPage(AllocationSpace space,
Page* page) {
page->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
......@@ -3947,7 +3955,8 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
}
}
void MarkCompactCollector::StartSweepSpaces() {
void MarkCompactCollector::SweepSpaces() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_SWEEP);
#ifdef DEBUG
state_ = SWEEP_SPACES;
......
......@@ -299,25 +299,24 @@ class MarkCompactCollector {
: heap_(heap),
pending_sweeper_tasks_semaphore_(0),
sweeping_in_progress_(false),
late_pages_(false),
num_sweeping_tasks_(0) {}
bool sweeping_in_progress() { return sweeping_in_progress_; }
bool contains_late_pages() { return late_pages_; }
void AddPage(AllocationSpace space, Page* page);
void AddLatePage(AllocationSpace space, Page* page);
int ParallelSweepSpace(AllocationSpace identity, int required_freed_bytes,
int max_pages = 0);
int ParallelSweepPage(Page* page, AllocationSpace identity);
// After calling this function sweeping is considered to be in progress
// and the main thread can sweep lazily, but the background sweeper tasks
// are not running yet.
void StartSweeping();
void StartSweeperTasks();
void StartSweepingHelper(AllocationSpace space_to_start);
void EnsureCompleted();
void EnsureNewSpaceCompleted();
bool AreSweeperTasksRunning();
bool IsSweepingCompleted(AllocationSpace space);
bool IsSweepingCompleted();
void SweepOrWaitUntilSweepingCompleted(Page* page);
void AddSweptPageSafe(PagedSpace* space, Page* page);
......@@ -344,6 +343,7 @@ class MarkCompactCollector {
SweptList swept_list_[kAllocationSpaces];
SweepingList sweeping_list_[kAllocationSpaces];
bool sweeping_in_progress_;
bool late_pages_;
base::AtomicNumber<intptr_t> num_sweeping_tasks_;
};
......@@ -643,10 +643,21 @@ class MarkCompactCollector {
void AbortTransitionArrays();
// Starts sweeping of spaces by contributing on the main thread and setting
// up other pages for sweeping. Does not start sweeper tasks.
void StartSweepSpaces();
void StartSweepSpace(PagedSpace* space);
// -----------------------------------------------------------------------
// Phase 2: Sweeping to clear mark bits and free non-live objects for
// a non-compacting collection.
//
// Before: Live objects are marked and non-live objects are unmarked.
//
// After: Live objects are unmarked, non-live regions have been added to
// their space's free list. Active eden semispace is compacted by
// evacuation.
//
// If we are not compacting the heap, we simply sweep the spaces except
// for the large object space, clearing mark bits and adding unmarked
// regions to each space's free list.
void SweepSpaces();
void EvacuateNewSpacePrologue();
......@@ -669,6 +680,9 @@ class MarkCompactCollector {
void ReleaseEvacuationCandidates();
// Starts sweeping of a space by contributing on the main thread and setting
// up other pages for sweeping.
void StartSweepSpace(PagedSpace* space);
#ifdef DEBUG
friend class MarkObjectVisitor;
......
......@@ -394,14 +394,12 @@ void MemoryAllocator::Unmapper::ReconsiderDelayedChunks() {
}
bool MemoryAllocator::CanFreeMemoryChunk(MemoryChunk* chunk) {
MarkCompactCollector* mc = isolate_->heap()->mark_compact_collector();
// We cannot free memory chunks in new space while the sweeper is running
// since a sweeper thread might be stuck right before trying to lock the
// corresponding page.
// Chunks in old generation are unmapped if they are empty.
DCHECK(chunk->InNewSpace() || chunk->SweepingDone());
return !chunk->InNewSpace() || !FLAG_concurrent_sweeping ||
chunk->SweepingDone();
return !chunk->InNewSpace() || (mc == nullptr) || !FLAG_concurrent_sweeping ||
mc->sweeper().IsSweepingCompleted();
}
bool MemoryAllocator::CommitMemory(Address base, size_t size,
......
......@@ -413,10 +413,6 @@ class MemoryChunk {
return concurrent_sweeping_;
}
bool SweepingDone() {
return concurrent_sweeping_state().Value() == kSweepingDone;
}
// Manage live byte count, i.e., count of bytes in black objects.
inline void ResetLiveBytes();
inline void IncrementLiveBytes(int by);
......@@ -771,6 +767,10 @@ class Page : public MemoryChunk {
DCHECK(SweepingDone());
}
bool SweepingDone() {
return concurrent_sweeping_state().Value() == kSweepingDone;
}
void ResetFreeListStatistics();
size_t AvailableInFreeList();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment