Commit b17764d2 authored by Michael Achenbach's avatar Michael Achenbach Committed by Commit Bot

Revert "Reland "[Heap]: Convert Sweep to Job""

This reverts commit b16c7e5b.

Reason for revert: Suspect for lots of crashes on GPU bots, e.g.:
https://ci.chromium.org/p/v8/builders/ci/Mac%20V8%20FYI%20Release%20(Intel)/11228
https://chromium-swarm.appspot.com/task?id=4f88d01781db5a10

Original change's description:
> Reland "[Heap]: Convert Sweep to Job"
>
> This is a reland of 795c0b1c
> Reason for revert:
> TSAN failures https://ci.chromium.org/p/v8/builders/ci/V8%20Linux64%20TSAN/33884
> Safe to reland as-is with fix to EagerUnmappingInCollectAllAvailableGarbage
> https://chromium-review.googlesource.com/c/v8/v8/+/2502809
>
> Original change's description:
> > [Heap]: Convert Sweep to Job
> >
> > max concurrency is inferred from queue size for OLD_SPACE & MAP_SPACE.
> > Extra Sweeper::TearDown() in MarkCompactCollector::TearDown() is needed
> > to cancel job.
> >
> > Change-Id: Iafba7d7d24e8f6e5c5a1d5c0348dea731f0ac224
> > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2480783
> > Commit-Queue: Etienne Pierre-Doray <etiennep@chromium.org>
> > Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
> > Cr-Commit-Position: refs/heads/master@{#70767}
>
> Change-Id: Id9a5baceed4664f53da39597af56a2067e4f3c6f
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2502808
> Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
> Commit-Queue: Etienne Pierre-Doray <etiennep@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#70845}

TBR=ulan@chromium.org,etiennep@chromium.org

Change-Id: Id6e9fe99f016652dd0fedbdbf65662f8e02ed67a
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2505974Reviewed-by: 's avatarMichael Achenbach <machenbach@chromium.org>
Commit-Queue: Michael Achenbach <machenbach@chromium.org>
Cr-Commit-Position: refs/heads/master@{#70859}
parent 6bcf28ba
......@@ -434,7 +434,6 @@ void MarkCompactCollector::TearDown() {
// Marking barriers of LocalHeaps will be published in their destructors.
marking_worklists()->Clear();
}
sweeper()->TearDown();
}
void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
......
......@@ -19,8 +19,12 @@ namespace internal {
Sweeper::Sweeper(Heap* heap, MajorNonAtomicMarkingState* marking_state)
: heap_(heap),
marking_state_(marking_state),
num_tasks_(0),
pending_sweeper_tasks_semaphore_(0),
incremental_sweeper_pending_(false),
sweeping_in_progress_(false),
num_sweeping_tasks_(0),
stop_sweeper_tasks_(false),
iterability_task_semaphore_(0),
iterability_in_progress_(false),
iterability_task_started_(false),
......@@ -28,10 +32,10 @@ Sweeper::Sweeper(Heap* heap, MajorNonAtomicMarkingState* marking_state)
Sweeper::PauseOrCompleteScope::PauseOrCompleteScope(Sweeper* sweeper)
: sweeper_(sweeper) {
sweeper_->stop_sweeper_tasks_ = true;
if (!sweeper_->sweeping_in_progress()) return;
if (sweeper_->job_handle_ && sweeper_->job_handle_->IsValid())
sweeper_->job_handle_->Cancel();
sweeper_->AbortAndWaitForTasks();
// Complete sweeping if there's nothing more to do.
if (sweeper_->IsDoneSweeping()) {
......@@ -45,6 +49,7 @@ Sweeper::PauseOrCompleteScope::PauseOrCompleteScope(Sweeper* sweeper)
}
Sweeper::PauseOrCompleteScope::~PauseOrCompleteScope() {
sweeper_->stop_sweeper_tasks_ = false;
if (!sweeper_->sweeping_in_progress()) return;
sweeper_->StartSweeperTasks();
......@@ -73,17 +78,27 @@ Sweeper::FilterSweepingPagesScope::~FilterSweepingPagesScope() {
// old_space_sweeping_list_ does not need to be cleared as we don't use it.
}
class Sweeper::SweeperJob final : public JobTask {
class Sweeper::SweeperTask final : public CancelableTask {
public:
SweeperJob(Isolate* isolate, Sweeper* sweeper)
: sweeper_(sweeper), tracer_(isolate->heap()->tracer()) {}
SweeperTask(Isolate* isolate, Sweeper* sweeper,
base::Semaphore* pending_sweeper_tasks,
std::atomic<intptr_t>* num_sweeping_tasks,
AllocationSpace space_to_start)
: CancelableTask(isolate),
sweeper_(sweeper),
pending_sweeper_tasks_(pending_sweeper_tasks),
num_sweeping_tasks_(num_sweeping_tasks),
space_to_start_(space_to_start),
tracer_(isolate->heap()->tracer()) {}
~SweeperJob() override = default;
~SweeperTask() override = default;
void Run(JobDelegate* delegate) final {
private:
void RunInternal() final {
TRACE_BACKGROUND_GC(tracer_,
GCTracer::BackgroundScope::MC_BACKGROUND_SWEEPING);
const int offset = delegate->GetTaskId();
DCHECK(IsValidSweepingSpace(space_to_start_));
const int offset = space_to_start_ - FIRST_GROWABLE_PAGED_SPACE;
for (int i = 0; i < kNumberOfSweepingSpaces; i++) {
const AllocationSpace space_id = static_cast<AllocationSpace>(
FIRST_GROWABLE_PAGED_SPACE +
......@@ -91,24 +106,19 @@ class Sweeper::SweeperJob final : public JobTask {
// Do not sweep code space concurrently.
if (space_id == CODE_SPACE) continue;
DCHECK(IsValidSweepingSpace(space_id));
sweeper_->ConcurrentSweepSpace(space_id, delegate);
sweeper_->SweepSpaceFromTask(space_id);
}
(*num_sweeping_tasks_)--;
pending_sweeper_tasks_->Signal();
}
size_t GetMaxConcurrency(size_t worker_count) const override {
const size_t kPagePerTask = 2;
return std::min<size_t>(
kMaxSweeperTasks,
worker_count +
(sweeper_->ConcurrentSweepingPageCount() + kPagePerTask - 1) /
kPagePerTask);
}
private:
Sweeper* const sweeper_;
base::Semaphore* const pending_sweeper_tasks_;
std::atomic<intptr_t>* const num_sweeping_tasks_;
AllocationSpace space_to_start_;
GCTracer* const tracer_;
DISALLOW_COPY_AND_ASSIGN(SweeperJob);
DISALLOW_COPY_AND_ASSIGN(SweeperTask);
};
class Sweeper::IncrementalSweeperTask final : public CancelableTask {
......@@ -126,7 +136,7 @@ class Sweeper::IncrementalSweeperTask final : public CancelableTask {
sweeper_->incremental_sweeper_pending_ = false;
if (sweeper_->sweeping_in_progress()) {
if (!sweeper_->IncrementalSweepSpace(CODE_SPACE)) {
if (!sweeper_->SweepSpaceIncrementallyFromTask(CODE_SPACE)) {
sweeper_->ScheduleIncrementalSweepingTask();
}
}
......@@ -137,11 +147,8 @@ class Sweeper::IncrementalSweeperTask final : public CancelableTask {
DISALLOW_COPY_AND_ASSIGN(IncrementalSweeperTask);
};
void Sweeper::TearDown() {
if (job_handle_ && job_handle_->IsValid()) job_handle_->Cancel();
}
void Sweeper::StartSweeping() {
CHECK(!stop_sweeper_tasks_);
sweeping_in_progress_ = true;
iterability_in_progress_ = true;
should_reduce_memory_ = heap_->ShouldReduceMemory();
......@@ -167,12 +174,20 @@ void Sweeper::StartSweeping() {
}
void Sweeper::StartSweeperTasks() {
DCHECK(!job_handle_ || !job_handle_->IsValid());
DCHECK_EQ(0, num_tasks_);
DCHECK_EQ(0, num_sweeping_tasks_);
if (FLAG_concurrent_sweeping && sweeping_in_progress_ &&
!heap_->delay_sweeper_tasks_for_testing_) {
job_handle_ = V8::GetCurrentPlatform()->PostJob(
TaskPriority::kUserVisible,
std::make_unique<SweeperJob>(heap_->isolate(), this));
ForAllSweepingSpaces([this](AllocationSpace space) {
DCHECK(IsValidSweepingSpace(space));
num_sweeping_tasks_++;
auto task = std::make_unique<SweeperTask>(
heap_->isolate(), this, &pending_sweeper_tasks_semaphore_,
&num_sweeping_tasks_, space);
DCHECK_LT(num_tasks_, kMaxSweeperTasks);
task_ids_[num_tasks_++] = task->id();
V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
});
ScheduleIncrementalSweepingTask();
}
}
......@@ -197,6 +212,22 @@ void Sweeper::MergeOldToNewRememberedSetsForSweptPages() {
});
}
void Sweeper::AbortAndWaitForTasks() {
if (!FLAG_concurrent_sweeping) return;
for (int i = 0; i < num_tasks_; i++) {
if (heap_->isolate()->cancelable_task_manager()->TryAbort(task_ids_[i]) !=
TryAbortResult::kTaskAborted) {
pending_sweeper_tasks_semaphore_.Wait();
} else {
// Aborted case.
num_sweeping_tasks_--;
}
}
num_tasks_ = 0;
DCHECK_EQ(0, num_sweeping_tasks_);
}
void Sweeper::EnsureCompleted() {
if (!sweeping_in_progress_) return;
......@@ -207,7 +238,7 @@ void Sweeper::EnsureCompleted() {
ForAllSweepingSpaces(
[this](AllocationSpace space) { ParallelSweepSpace(space, 0); });
if (job_handle_ && job_handle_->IsValid()) job_handle_->Join();
AbortAndWaitForTasks();
ForAllSweepingSpaces([this](AllocationSpace space) {
CHECK(sweeping_list_[GetSweepSpaceIndex(space)].empty());
......@@ -234,9 +265,7 @@ void Sweeper::SupportConcurrentSweeping() {
});
}
bool Sweeper::AreSweeperTasksRunning() {
return job_handle_ && job_handle_->IsValid() && !job_handle_->IsCompleted();
}
bool Sweeper::AreSweeperTasksRunning() { return num_sweeping_tasks_ != 0; }
V8_INLINE size_t Sweeper::FreeAndProcessFreedMemory(
Address free_start, Address free_end, Page* page, Space* space,
......@@ -416,16 +445,9 @@ int Sweeper::RawSweep(
p->owner()->free_list()->GuaranteedAllocatable(max_freed_bytes));
}
size_t Sweeper::ConcurrentSweepingPageCount() {
base::MutexGuard guard(&mutex_);
return sweeping_list_[GetSweepSpaceIndex(OLD_SPACE)].size() +
sweeping_list_[GetSweepSpaceIndex(MAP_SPACE)].size();
}
void Sweeper::ConcurrentSweepSpace(AllocationSpace identity,
JobDelegate* delegate) {
void Sweeper::SweepSpaceFromTask(AllocationSpace identity) {
Page* page = nullptr;
while (!delegate->ShouldYield() &&
while (!stop_sweeper_tasks_ &&
((page = GetSweepingPageSafe(identity)) != nullptr)) {
// Typed slot sets are only recorded on code pages. Code pages
// are not swept concurrently to the application to ensure W^X.
......@@ -435,7 +457,7 @@ void Sweeper::ConcurrentSweepSpace(AllocationSpace identity,
}
}
bool Sweeper::IncrementalSweepSpace(AllocationSpace identity) {
bool Sweeper::SweepSpaceIncrementallyFromTask(AllocationSpace identity) {
if (Page* page = GetSweepingPageSafe(identity)) {
ParallelSweepPage(page, identity);
}
......@@ -514,7 +536,7 @@ void Sweeper::AddPage(AllocationSpace space, Page* page,
Sweeper::AddPageMode mode) {
base::MutexGuard guard(&mutex_);
DCHECK(IsValidSweepingSpace(space));
DCHECK(!FLAG_concurrent_sweeping || !job_handle_ || !job_handle_->IsValid());
DCHECK(!FLAG_concurrent_sweeping || !AreSweeperTasksRunning());
if (mode == Sweeper::REGULAR) {
PrepareToBeSweptPage(space, page);
} else {
......
......@@ -81,8 +81,6 @@ class Sweeper {
bool sweeping_in_progress() const { return sweeping_in_progress_; }
void TearDown();
void AddPage(AllocationSpace space, Page* page, AddPageMode mode);
int ParallelSweepSpace(
......@@ -125,7 +123,7 @@ class Sweeper {
private:
class IncrementalSweeperTask;
class IterabilityTask;
class SweeperJob;
class SweeperTask;
static const int kNumberOfSweepingSpaces =
LAST_GROWABLE_PAGED_SPACE - FIRST_GROWABLE_PAGED_SPACE + 1;
......@@ -174,13 +172,13 @@ class Sweeper {
return is_done;
}
size_t ConcurrentSweepingPageCount();
void ConcurrentSweepSpace(AllocationSpace identity, JobDelegate* delegate);
void SweepSpaceFromTask(AllocationSpace identity);
// Sweeps incrementally one page from the given space. Returns true if
// there are no more pages to sweep in the given space.
bool IncrementalSweepSpace(AllocationSpace identity);
bool SweepSpaceIncrementallyFromTask(AllocationSpace identity);
void AbortAndWaitForTasks();
Page* GetSweepingPageSafe(AllocationSpace space);
......@@ -204,7 +202,9 @@ class Sweeper {
Heap* const heap_;
MajorNonAtomicMarkingState* marking_state_;
std::unique_ptr<JobHandle> job_handle_;
int num_tasks_;
CancelableTaskManager::Id task_ids_[kNumberOfSweepingSpaces];
base::Semaphore pending_sweeper_tasks_semaphore_;
base::Mutex mutex_;
SweptList swept_list_[kNumberOfSweepingSpaces];
SweepingList sweeping_list_[kNumberOfSweepingSpaces];
......@@ -212,6 +212,11 @@ class Sweeper {
// Main thread can finalize sweeping, while background threads allocation slow
// path checks this flag to see whether it could support concurrent sweeping.
std::atomic<bool> sweeping_in_progress_;
// Counter is actively maintained by the concurrent tasks to avoid querying
// the semaphore for maintaining a task counter on the main thread.
std::atomic<intptr_t> num_sweeping_tasks_;
// Used by PauseOrCompleteScope to signal early bailout to tasks.
std::atomic<bool> stop_sweeper_tasks_;
// Pages that are only made iterable but have their free lists ignored.
IterabilityList iterability_list_;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment