Commit f8ef845e authored by Etienne Pierre-doray's avatar Etienne Pierre-doray Committed by Commit Bot

Reland^3 "[Heap]: Convert Sweep to Job"""

This is a reland of 873e5aa3

Reason for revert: Flaky cctest/test-incremental-marking/IncrementalMarkingUsingTasks
Safe to reland as-is after
https://chromium-review.googlesource.com/c/v8/v8/+/2562121

Original change's description:
> Reland "Reland "[Heap]: Convert Sweep to Job""
>
> This is a reland of b16c7e5b
>
> Issue: ShouldYield is called multiple time.
> Fix: ConcurrentSweepSpace returns false if not done (yielding), to avoid
> calling it again.
>
> Issue: failing test-streaming-compilation
> Safe to reland after
> https://chromium-review.googlesource.com/c/v8/v8/+/2507379
>
> Original change's description:
> > Reland "[Heap]: Convert Sweep to Job"
> >
> > This is a reland of 795c0b1c
> > Reason for revert:
> > TSAN failures https://ci.chromium.org/p/v8/builders/ci/V8%20Linux64%20TSAN/33884
> > Safe to reland as-is with fix to EagerUnmappingInCollectAllAvailableGarbage
> > https://chromium-review.googlesource.com/c/v8/v8/+/2502809
> >
> > Original change's description:
> > > [Heap]: Convert Sweep to Job
> > >
> > > max concurrency is inferred from queue size for OLD_SPACE & MAP_SPACE.
> > > Extra Sweeper::TearDown() in MarkCompactCollector::TearDown() is needed
> > > to cancel job.
> > >
> > > Change-Id: Iafba7d7d24e8f6e5c5a1d5c0348dea731f0ac224
> > > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2480783
> > > Commit-Queue: Etienne Pierre-Doray <etiennep@chromium.org>
> > > Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
> > > Cr-Commit-Position: refs/heads/master@{#70767}
> >
> > Change-Id: Id9a5baceed4664f53da39597af56a2067e4f3c6f
> > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2502808
> > Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
> > Commit-Queue: Etienne Pierre-Doray <etiennep@chromium.org>
> > Cr-Commit-Position: refs/heads/master@{#70845}
>
> Change-Id: I32de9faebdbd2f7f6d7f9a9525871fc691fb3f2c
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2507378
> Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
> Commit-Queue: Etienne Pierre-Doray <etiennep@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#71347}

Bug: v8:11198
Change-Id: Ie61b5e90e2e4984e72beb8374fa73814acedb8fe
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2562749
Commit-Queue: Etienne Pierre-Doray <etiennep@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#71463}
parent 767b5a91
...@@ -435,6 +435,7 @@ void MarkCompactCollector::TearDown() { ...@@ -435,6 +435,7 @@ void MarkCompactCollector::TearDown() {
// Marking barriers of LocalHeaps will be published in their destructors. // Marking barriers of LocalHeaps will be published in their destructors.
marking_worklists()->Clear(); marking_worklists()->Clear();
} }
sweeper()->TearDown();
} }
void MarkCompactCollector::AddEvacuationCandidate(Page* p) { void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
......
...@@ -20,12 +20,8 @@ namespace internal { ...@@ -20,12 +20,8 @@ namespace internal {
Sweeper::Sweeper(Heap* heap, MajorNonAtomicMarkingState* marking_state) Sweeper::Sweeper(Heap* heap, MajorNonAtomicMarkingState* marking_state)
: heap_(heap), : heap_(heap),
marking_state_(marking_state), marking_state_(marking_state),
num_tasks_(0),
pending_sweeper_tasks_semaphore_(0),
incremental_sweeper_pending_(false), incremental_sweeper_pending_(false),
sweeping_in_progress_(false), sweeping_in_progress_(false),
num_sweeping_tasks_(0),
stop_sweeper_tasks_(false),
iterability_task_semaphore_(0), iterability_task_semaphore_(0),
iterability_in_progress_(false), iterability_in_progress_(false),
iterability_task_started_(false), iterability_task_started_(false),
...@@ -33,10 +29,10 @@ Sweeper::Sweeper(Heap* heap, MajorNonAtomicMarkingState* marking_state) ...@@ -33,10 +29,10 @@ Sweeper::Sweeper(Heap* heap, MajorNonAtomicMarkingState* marking_state)
Sweeper::PauseOrCompleteScope::PauseOrCompleteScope(Sweeper* sweeper) Sweeper::PauseOrCompleteScope::PauseOrCompleteScope(Sweeper* sweeper)
: sweeper_(sweeper) { : sweeper_(sweeper) {
sweeper_->stop_sweeper_tasks_ = true;
if (!sweeper_->sweeping_in_progress()) return; if (!sweeper_->sweeping_in_progress()) return;
sweeper_->AbortAndWaitForTasks(); if (sweeper_->job_handle_ && sweeper_->job_handle_->IsValid())
sweeper_->job_handle_->Cancel();
// Complete sweeping if there's nothing more to do. // Complete sweeping if there's nothing more to do.
if (sweeper_->IsDoneSweeping()) { if (sweeper_->IsDoneSweeping()) {
...@@ -50,7 +46,6 @@ Sweeper::PauseOrCompleteScope::PauseOrCompleteScope(Sweeper* sweeper) ...@@ -50,7 +46,6 @@ Sweeper::PauseOrCompleteScope::PauseOrCompleteScope(Sweeper* sweeper)
} }
Sweeper::PauseOrCompleteScope::~PauseOrCompleteScope() { Sweeper::PauseOrCompleteScope::~PauseOrCompleteScope() {
sweeper_->stop_sweeper_tasks_ = false;
if (!sweeper_->sweeping_in_progress()) return; if (!sweeper_->sweeping_in_progress()) return;
sweeper_->StartSweeperTasks(); sweeper_->StartSweeperTasks();
...@@ -79,27 +74,36 @@ Sweeper::FilterSweepingPagesScope::~FilterSweepingPagesScope() { ...@@ -79,27 +74,36 @@ Sweeper::FilterSweepingPagesScope::~FilterSweepingPagesScope() {
// old_space_sweeping_list_ does not need to be cleared as we don't use it. // old_space_sweeping_list_ does not need to be cleared as we don't use it.
} }
class Sweeper::SweeperTask final : public CancelableTask { class Sweeper::SweeperJob final : public JobTask {
public: public:
SweeperTask(Isolate* isolate, Sweeper* sweeper, SweeperJob(Isolate* isolate, Sweeper* sweeper)
base::Semaphore* pending_sweeper_tasks, : sweeper_(sweeper), tracer_(isolate->heap()->tracer()) {}
std::atomic<intptr_t>* num_sweeping_tasks,
AllocationSpace space_to_start)
: CancelableTask(isolate),
sweeper_(sweeper),
pending_sweeper_tasks_(pending_sweeper_tasks),
num_sweeping_tasks_(num_sweeping_tasks),
space_to_start_(space_to_start),
tracer_(isolate->heap()->tracer()) {}
~SweeperTask() override = default; ~SweeperJob() override = default;
private: void Run(JobDelegate* delegate) final {
void RunInternal() final { if (delegate->IsJoiningThread()) {
TRACE_GC(tracer_, GCTracer::Scope::MC_SWEEP);
RunImpl(delegate);
} else {
TRACE_GC1(tracer_, GCTracer::Scope::MC_BACKGROUND_SWEEPING, TRACE_GC1(tracer_, GCTracer::Scope::MC_BACKGROUND_SWEEPING,
ThreadKind::kBackground); ThreadKind::kBackground);
DCHECK(IsValidSweepingSpace(space_to_start_)); RunImpl(delegate);
const int offset = space_to_start_ - FIRST_GROWABLE_PAGED_SPACE; }
}
size_t GetMaxConcurrency(size_t worker_count) const override {
const size_t kPagePerTask = 2;
return std::min<size_t>(
kMaxSweeperTasks,
worker_count +
(sweeper_->ConcurrentSweepingPageCount() + kPagePerTask - 1) /
kPagePerTask);
}
private:
void RunImpl(JobDelegate* delegate) {
const int offset = delegate->GetTaskId();
for (int i = 0; i < kNumberOfSweepingSpaces; i++) { for (int i = 0; i < kNumberOfSweepingSpaces; i++) {
const AllocationSpace space_id = static_cast<AllocationSpace>( const AllocationSpace space_id = static_cast<AllocationSpace>(
FIRST_GROWABLE_PAGED_SPACE + FIRST_GROWABLE_PAGED_SPACE +
...@@ -107,19 +111,13 @@ class Sweeper::SweeperTask final : public CancelableTask { ...@@ -107,19 +111,13 @@ class Sweeper::SweeperTask final : public CancelableTask {
// Do not sweep code space concurrently. // Do not sweep code space concurrently.
if (space_id == CODE_SPACE) continue; if (space_id == CODE_SPACE) continue;
DCHECK(IsValidSweepingSpace(space_id)); DCHECK(IsValidSweepingSpace(space_id));
sweeper_->SweepSpaceFromTask(space_id); if (!sweeper_->ConcurrentSweepSpace(space_id, delegate)) return;
} }
(*num_sweeping_tasks_)--;
pending_sweeper_tasks_->Signal();
} }
Sweeper* const sweeper_; Sweeper* const sweeper_;
base::Semaphore* const pending_sweeper_tasks_;
std::atomic<intptr_t>* const num_sweeping_tasks_;
AllocationSpace space_to_start_;
GCTracer* const tracer_; GCTracer* const tracer_;
DISALLOW_COPY_AND_ASSIGN(SweeperTask); DISALLOW_COPY_AND_ASSIGN(SweeperJob);
}; };
class Sweeper::IncrementalSweeperTask final : public CancelableTask { class Sweeper::IncrementalSweeperTask final : public CancelableTask {
...@@ -137,7 +135,7 @@ class Sweeper::IncrementalSweeperTask final : public CancelableTask { ...@@ -137,7 +135,7 @@ class Sweeper::IncrementalSweeperTask final : public CancelableTask {
sweeper_->incremental_sweeper_pending_ = false; sweeper_->incremental_sweeper_pending_ = false;
if (sweeper_->sweeping_in_progress()) { if (sweeper_->sweeping_in_progress()) {
if (!sweeper_->SweepSpaceIncrementallyFromTask(CODE_SPACE)) { if (!sweeper_->IncrementalSweepSpace(CODE_SPACE)) {
sweeper_->ScheduleIncrementalSweepingTask(); sweeper_->ScheduleIncrementalSweepingTask();
} }
} }
...@@ -148,8 +146,11 @@ class Sweeper::IncrementalSweeperTask final : public CancelableTask { ...@@ -148,8 +146,11 @@ class Sweeper::IncrementalSweeperTask final : public CancelableTask {
DISALLOW_COPY_AND_ASSIGN(IncrementalSweeperTask); DISALLOW_COPY_AND_ASSIGN(IncrementalSweeperTask);
}; };
void Sweeper::TearDown() {
if (job_handle_ && job_handle_->IsValid()) job_handle_->Cancel();
}
void Sweeper::StartSweeping() { void Sweeper::StartSweeping() {
CHECK(!stop_sweeper_tasks_);
sweeping_in_progress_ = true; sweeping_in_progress_ = true;
iterability_in_progress_ = true; iterability_in_progress_ = true;
should_reduce_memory_ = heap_->ShouldReduceMemory(); should_reduce_memory_ = heap_->ShouldReduceMemory();
...@@ -175,20 +176,12 @@ void Sweeper::StartSweeping() { ...@@ -175,20 +176,12 @@ void Sweeper::StartSweeping() {
} }
void Sweeper::StartSweeperTasks() { void Sweeper::StartSweeperTasks() {
DCHECK_EQ(0, num_tasks_); DCHECK(!job_handle_ || !job_handle_->IsValid());
DCHECK_EQ(0, num_sweeping_tasks_);
if (FLAG_concurrent_sweeping && sweeping_in_progress_ && if (FLAG_concurrent_sweeping && sweeping_in_progress_ &&
!heap_->delay_sweeper_tasks_for_testing_) { !heap_->delay_sweeper_tasks_for_testing_) {
ForAllSweepingSpaces([this](AllocationSpace space) { job_handle_ = V8::GetCurrentPlatform()->PostJob(
DCHECK(IsValidSweepingSpace(space)); TaskPriority::kUserVisible,
num_sweeping_tasks_++; std::make_unique<SweeperJob>(heap_->isolate(), this));
auto task = std::make_unique<SweeperTask>(
heap_->isolate(), this, &pending_sweeper_tasks_semaphore_,
&num_sweeping_tasks_, space);
DCHECK_LT(num_tasks_, kMaxSweeperTasks);
task_ids_[num_tasks_++] = task->id();
V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
});
ScheduleIncrementalSweepingTask(); ScheduleIncrementalSweepingTask();
} }
} }
...@@ -213,22 +206,6 @@ void Sweeper::MergeOldToNewRememberedSetsForSweptPages() { ...@@ -213,22 +206,6 @@ void Sweeper::MergeOldToNewRememberedSetsForSweptPages() {
}); });
} }
void Sweeper::AbortAndWaitForTasks() {
if (!FLAG_concurrent_sweeping) return;
for (int i = 0; i < num_tasks_; i++) {
if (heap_->isolate()->cancelable_task_manager()->TryAbort(task_ids_[i]) !=
TryAbortResult::kTaskAborted) {
pending_sweeper_tasks_semaphore_.Wait();
} else {
// Aborted case.
num_sweeping_tasks_--;
}
}
num_tasks_ = 0;
DCHECK_EQ(0, num_sweeping_tasks_);
}
void Sweeper::EnsureCompleted() { void Sweeper::EnsureCompleted() {
if (!sweeping_in_progress_) return; if (!sweeping_in_progress_) return;
...@@ -239,7 +216,7 @@ void Sweeper::EnsureCompleted() { ...@@ -239,7 +216,7 @@ void Sweeper::EnsureCompleted() {
ForAllSweepingSpaces( ForAllSweepingSpaces(
[this](AllocationSpace space) { ParallelSweepSpace(space, 0); }); [this](AllocationSpace space) { ParallelSweepSpace(space, 0); });
AbortAndWaitForTasks(); if (job_handle_ && job_handle_->IsValid()) job_handle_->Join();
ForAllSweepingSpaces([this](AllocationSpace space) { ForAllSweepingSpaces([this](AllocationSpace space) {
CHECK(sweeping_list_[GetSweepSpaceIndex(space)].empty()); CHECK(sweeping_list_[GetSweepSpaceIndex(space)].empty());
...@@ -266,7 +243,9 @@ void Sweeper::SupportConcurrentSweeping() { ...@@ -266,7 +243,9 @@ void Sweeper::SupportConcurrentSweeping() {
}); });
} }
bool Sweeper::AreSweeperTasksRunning() { return num_sweeping_tasks_ != 0; } bool Sweeper::AreSweeperTasksRunning() {
return job_handle_ && job_handle_->IsValid() && !job_handle_->IsCompleted();
}
V8_INLINE size_t Sweeper::FreeAndProcessFreedMemory( V8_INLINE size_t Sweeper::FreeAndProcessFreedMemory(
Address free_start, Address free_end, Page* page, Space* space, Address free_start, Address free_end, Page* page, Space* space,
...@@ -446,19 +425,27 @@ int Sweeper::RawSweep( ...@@ -446,19 +425,27 @@ int Sweeper::RawSweep(
p->owner()->free_list()->GuaranteedAllocatable(max_freed_bytes)); p->owner()->free_list()->GuaranteedAllocatable(max_freed_bytes));
} }
void Sweeper::SweepSpaceFromTask(AllocationSpace identity) { size_t Sweeper::ConcurrentSweepingPageCount() {
Page* page = nullptr; base::MutexGuard guard(&mutex_);
while (!stop_sweeper_tasks_ && return sweeping_list_[GetSweepSpaceIndex(OLD_SPACE)].size() +
((page = GetSweepingPageSafe(identity)) != nullptr)) { sweeping_list_[GetSweepSpaceIndex(MAP_SPACE)].size();
}
bool Sweeper::ConcurrentSweepSpace(AllocationSpace identity,
JobDelegate* delegate) {
while (!delegate->ShouldYield()) {
Page* page = GetSweepingPageSafe(identity);
if (page == nullptr) return true;
// Typed slot sets are only recorded on code pages. Code pages // Typed slot sets are only recorded on code pages. Code pages
// are not swept concurrently to the application to ensure W^X. // are not swept concurrently to the application to ensure W^X.
DCHECK(!page->typed_slot_set<OLD_TO_NEW>() && DCHECK(!page->typed_slot_set<OLD_TO_NEW>() &&
!page->typed_slot_set<OLD_TO_OLD>()); !page->typed_slot_set<OLD_TO_OLD>());
ParallelSweepPage(page, identity); ParallelSweepPage(page, identity);
} }
return false;
} }
bool Sweeper::SweepSpaceIncrementallyFromTask(AllocationSpace identity) { bool Sweeper::IncrementalSweepSpace(AllocationSpace identity) {
if (Page* page = GetSweepingPageSafe(identity)) { if (Page* page = GetSweepingPageSafe(identity)) {
ParallelSweepPage(page, identity); ParallelSweepPage(page, identity);
} }
...@@ -537,7 +524,7 @@ void Sweeper::AddPage(AllocationSpace space, Page* page, ...@@ -537,7 +524,7 @@ void Sweeper::AddPage(AllocationSpace space, Page* page,
Sweeper::AddPageMode mode) { Sweeper::AddPageMode mode) {
base::MutexGuard guard(&mutex_); base::MutexGuard guard(&mutex_);
DCHECK(IsValidSweepingSpace(space)); DCHECK(IsValidSweepingSpace(space));
DCHECK(!FLAG_concurrent_sweeping || !AreSweeperTasksRunning()); DCHECK(!FLAG_concurrent_sweeping || !job_handle_ || !job_handle_->IsValid());
if (mode == Sweeper::REGULAR) { if (mode == Sweeper::REGULAR) {
PrepareToBeSweptPage(space, page); PrepareToBeSweptPage(space, page);
} else { } else {
......
...@@ -81,6 +81,8 @@ class Sweeper { ...@@ -81,6 +81,8 @@ class Sweeper {
bool sweeping_in_progress() const { return sweeping_in_progress_; } bool sweeping_in_progress() const { return sweeping_in_progress_; }
void TearDown();
void AddPage(AllocationSpace space, Page* page, AddPageMode mode); void AddPage(AllocationSpace space, Page* page, AddPageMode mode);
int ParallelSweepSpace( int ParallelSweepSpace(
...@@ -123,7 +125,7 @@ class Sweeper { ...@@ -123,7 +125,7 @@ class Sweeper {
private: private:
class IncrementalSweeperTask; class IncrementalSweeperTask;
class IterabilityTask; class IterabilityTask;
class SweeperTask; class SweeperJob;
static const int kNumberOfSweepingSpaces = static const int kNumberOfSweepingSpaces =
LAST_GROWABLE_PAGED_SPACE - FIRST_GROWABLE_PAGED_SPACE + 1; LAST_GROWABLE_PAGED_SPACE - FIRST_GROWABLE_PAGED_SPACE + 1;
...@@ -172,13 +174,15 @@ class Sweeper { ...@@ -172,13 +174,15 @@ class Sweeper {
return is_done; return is_done;
} }
void SweepSpaceFromTask(AllocationSpace identity); size_t ConcurrentSweepingPageCount();
// Concurrently sweeps many page from the given space. Returns true if there
// are no more pages to sweep in the given space.
bool ConcurrentSweepSpace(AllocationSpace identity, JobDelegate* delegate);
// Sweeps incrementally one page from the given space. Returns true if // Sweeps incrementally one page from the given space. Returns true if
// there are no more pages to sweep in the given space. // there are no more pages to sweep in the given space.
bool SweepSpaceIncrementallyFromTask(AllocationSpace identity); bool IncrementalSweepSpace(AllocationSpace identity);
void AbortAndWaitForTasks();
Page* GetSweepingPageSafe(AllocationSpace space); Page* GetSweepingPageSafe(AllocationSpace space);
...@@ -202,9 +206,7 @@ class Sweeper { ...@@ -202,9 +206,7 @@ class Sweeper {
Heap* const heap_; Heap* const heap_;
MajorNonAtomicMarkingState* marking_state_; MajorNonAtomicMarkingState* marking_state_;
int num_tasks_; std::unique_ptr<JobHandle> job_handle_;
CancelableTaskManager::Id task_ids_[kNumberOfSweepingSpaces];
base::Semaphore pending_sweeper_tasks_semaphore_;
base::Mutex mutex_; base::Mutex mutex_;
SweptList swept_list_[kNumberOfSweepingSpaces]; SweptList swept_list_[kNumberOfSweepingSpaces];
SweepingList sweeping_list_[kNumberOfSweepingSpaces]; SweepingList sweeping_list_[kNumberOfSweepingSpaces];
...@@ -212,11 +214,6 @@ class Sweeper { ...@@ -212,11 +214,6 @@ class Sweeper {
// Main thread can finalize sweeping, while background threads allocation slow // Main thread can finalize sweeping, while background threads allocation slow
// path checks this flag to see whether it could support concurrent sweeping. // path checks this flag to see whether it could support concurrent sweeping.
std::atomic<bool> sweeping_in_progress_; std::atomic<bool> sweeping_in_progress_;
// Counter is actively maintained by the concurrent tasks to avoid querying
// the semaphore for maintaining a task counter on the main thread.
std::atomic<intptr_t> num_sweeping_tasks_;
// Used by PauseOrCompleteScope to signal early bailout to tasks.
std::atomic<bool> stop_sweeper_tasks_;
// Pages that are only made iterable but have their free lists ignored. // Pages that are only made iterable but have their free lists ignored.
IterabilityList iterability_list_; IterabilityList iterability_list_;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment