Commit 98c3f98f authored by Shu-yu Guo's avatar Shu-yu Guo Committed by Commit Bot

Revert "[Heap] ScavengerCollector use Jobs."

This reverts commit 9e8c54f8.

Reason for revert: Data races (https://ci.chromium.org/p/v8/builders/ci/V8%20Linux64%20TSAN/33100)

Original change's description:
> [Heap] ScavengerCollector use Jobs.
> 
> No yielding is necessary since the main thread Join()s.
> 
> max concurrency is determined based on either
> remaining_memory_chunks_ or global pool size
> (copied_list_ + promotion_list_)
> 
> Change-Id: Ie30fa86c44d3224b04df5d79569bce126ce7d96b
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2354390
> Commit-Queue: Etienne Pierre-Doray <etiennep@chromium.org>
> Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#69746}

TBR=ulan@chromium.org,etiennep@chromium.org

Change-Id: I206397c6086cf18d5e08cf615bdaaf0f34c6d36b
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2398852Reviewed-by: 's avatarShu-yu Guo <syg@chromium.org>
Commit-Queue: Shu-yu Guo <syg@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69748}
parent 08c26b80
......@@ -38,10 +38,6 @@ bool Scavenger::PromotionList::View::Pop(struct PromotionListEntry* entry) {
return promotion_list_->Pop(task_id_, entry);
}
void Scavenger::PromotionList::View::FlushToGlobal() {
promotion_list_->FlushToGlobal(task_id_);
}
bool Scavenger::PromotionList::View::IsGlobalPoolEmpty() {
return promotion_list_->IsGlobalPoolEmpty();
}
......@@ -82,16 +78,6 @@ bool Scavenger::PromotionList::Pop(int task_id,
return large_object_promotion_list_.Pop(task_id, entry);
}
void Scavenger::PromotionList::FlushToGlobal(int task_id) {
regular_object_promotion_list_.FlushToGlobal(task_id);
large_object_promotion_list_.FlushToGlobal(task_id);
}
size_t Scavenger::PromotionList::GlobalPoolSize() const {
return regular_object_promotion_list_.GlobalPoolSize() +
large_object_promotion_list_.GlobalPoolSize();
}
bool Scavenger::PromotionList::IsGlobalPoolEmpty() {
return regular_object_promotion_list_.IsGlobalPoolEmpty() &&
large_object_promotion_list_.IsGlobalPoolEmpty();
......
......@@ -25,6 +25,65 @@
namespace v8 {
namespace internal {
class PageScavengingItem final : public ItemParallelJob::Item {
public:
explicit PageScavengingItem(MemoryChunk* chunk) : chunk_(chunk) {}
~PageScavengingItem() override = default;
void Process(Scavenger* scavenger) { scavenger->ScavengePage(chunk_); }
private:
MemoryChunk* const chunk_;
};
class ScavengingTask final : public ItemParallelJob::Task {
public:
ScavengingTask(Heap* heap, Scavenger* scavenger, OneshotBarrier* barrier)
: ItemParallelJob::Task(heap->isolate()),
heap_(heap),
scavenger_(scavenger),
barrier_(barrier) {}
void RunInParallel(Runner runner) final {
if (runner == Runner::kForeground) {
TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_PARALLEL);
ProcessItems();
} else {
TRACE_BACKGROUND_GC(
heap_->tracer(),
GCTracer::BackgroundScope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL);
ProcessItems();
}
}
private:
void ProcessItems() {
double scavenging_time = 0.0;
{
barrier_->Start();
TimedScope scope(&scavenging_time);
PageScavengingItem* item = nullptr;
while ((item = GetItem<PageScavengingItem>()) != nullptr) {
item->Process(scavenger_);
item->MarkFinished();
}
do {
scavenger_->Process(barrier_);
} while (!barrier_->Wait());
scavenger_->Process();
}
if (FLAG_trace_parallel_scavenge) {
PrintIsolate(heap_->isolate(),
"scavenge[%p]: time=%.2f copied=%zu promoted=%zu\n",
static_cast<void*>(this), scavenging_time,
scavenger_->bytes_copied(), scavenger_->bytes_promoted());
}
}
Heap* const heap_;
Scavenger* const scavenger_;
OneshotBarrier* const barrier_;
};
class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
public:
IterateAndScavengePromotedObjectsVisitor(Scavenger* scavenger,
......@@ -160,64 +219,8 @@ class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
}
};
ScavengerCollector::JobTask::JobTask(
ScavengerCollector* outer,
std::vector<std::unique_ptr<Scavenger>>* scavengers,
std::vector<std::pair<bool, MemoryChunk*>> memory_chunks,
Scavenger::CopiedList* copied_list,
Scavenger::PromotionList* promotion_list)
: outer_(outer),
scavengers_(scavengers),
memory_chunks_(std::move(memory_chunks)),
remaining_memory_chunks_(memory_chunks_.size()),
copied_list_(copied_list),
promotion_list_(promotion_list) {}
void ScavengerCollector::JobTask::Run(JobDelegate* delegate) {
TRACE_BACKGROUND_GC(
outer_->heap_->tracer(),
GCTracer::BackgroundScope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL);
Scavenger* scavenger = (*scavengers_)[delegate->GetTaskId()].get();
ConcurrentScavengePages(scavenger);
scavenger->Process(delegate);
}
size_t ScavengerCollector::JobTask::GetMaxConcurrency(
size_t worker_count) const {
// We need to account for local segments held by worker_count in addition to
// GlobalPoolSize() of copied_list_ and promotion_list_.
return std::min<size_t>(
kMaxScavengerTasks,
std::max<size_t>(remaining_memory_chunks_.load(std::memory_order_relaxed),
worker_count + copied_list_->GlobalPoolSize() +
promotion_list_->GlobalPoolSize()));
}
void ScavengerCollector::JobTask::ConcurrentScavengePages(
Scavenger* scavenger) {
if (remaining_memory_chunks_.load(std::memory_order_relaxed) == 0) return;
for (size_t i = 0; i < memory_chunks_.size(); ++i) {
auto& work_item = memory_chunks_[i];
// memory_order_relaxed is sufficient as the work item's state itself hasn't
// been modified since the beginning of its associated job. This is only
// atomically acquiring the right to work on it.
if (reinterpret_cast<std::atomic<bool>*>(&work_item.first)
->exchange(true, std::memory_order_relaxed) == false) {
scavenger->ScavengePage(work_item.second);
// memory_order_relaxed is sufficient since this is not synchronized with
// other state.
if (remaining_memory_chunks_.fetch_sub(1, std::memory_order_relaxed) <=
1) {
return;
}
}
}
}
ScavengerCollector::ScavengerCollector(Heap* heap)
: isolate_(heap->isolate()), heap_(heap) {}
: isolate_(heap->isolate()), heap_(heap), parallel_scavenge_semaphore_(0) {}
// Remove this crashkey after chromium:1010312 is fixed.
class ScopedFullHeapCrashKey {
......@@ -243,11 +246,23 @@ void ScavengerCollector::CollectGarbage() {
}
DCHECK(surviving_new_large_objects_.empty());
std::vector<std::unique_ptr<Scavenger>> scavengers;
ItemParallelJob job(isolate_->cancelable_task_manager(),
&parallel_scavenge_semaphore_);
const int kMainThreadId = 0;
Scavenger* scavengers[kMaxScavengerTasks];
const bool is_logging = isolate_->LogObjectRelocation();
const int num_scavenge_tasks = NumberOfScavengeTasks();
OneshotBarrier barrier(base::TimeDelta::FromMilliseconds(kMaxWaitTimeMs));
Worklist<MemoryChunk*, 64> empty_chunks;
Scavenger::CopiedList copied_list(kMaxScavengerTasks);
Scavenger::PromotionList promotion_list(kMaxScavengerTasks);
EphemeronTableList ephemeron_table_list(kMaxScavengerTasks);
Scavenger::CopiedList copied_list(num_scavenge_tasks);
Scavenger::PromotionList promotion_list(num_scavenge_tasks);
EphemeronTableList ephemeron_table_list(num_scavenge_tasks);
for (int i = 0; i < num_scavenge_tasks; i++) {
scavengers[i] =
new Scavenger(this, heap_, is_logging, &empty_chunks, &copied_list,
&promotion_list, &ephemeron_table_list, i);
job.AddTask(new ScavengingTask(heap_, scavengers[i], &barrier));
}
{
Sweeper* sweeper = heap_->mark_compact_collector()->sweeper();
......@@ -274,20 +289,12 @@ void ScavengerCollector::CollectGarbage() {
return !page->ContainsSlots<OLD_TO_NEW>() && !page->sweeping_slot_set();
});
const bool is_logging = isolate_->LogObjectRelocation();
for (int i = 0; i < kMaxScavengerTasks; ++i) {
scavengers.emplace_back(
new Scavenger(this, heap_, is_logging, &empty_chunks, &copied_list,
&promotion_list, &ephemeron_table_list, i));
}
std::vector<std::pair<bool, MemoryChunk*>> memory_chunks;
RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
heap_, [&memory_chunks](MemoryChunk* chunk) {
memory_chunks.emplace_back(false, chunk);
heap_, [&job](MemoryChunk* chunk) {
job.AddItem(new PageScavengingItem(chunk));
});
RootScavengeVisitor root_scavenge_visitor(scavengers[kMainThreadId].get());
RootScavengeVisitor root_scavenge_visitor(scavengers[kMainThreadId]);
{
// Identify weak unmodified handles. Requires an unmodified graph.
......@@ -312,24 +319,18 @@ void ScavengerCollector::CollectGarbage() {
heap_->IterateRoots(&root_scavenge_visitor, options);
isolate_->global_handles()->IterateYoungStrongAndDependentRoots(
&root_scavenge_visitor);
scavengers[kMainThreadId]->Flush();
}
{
// Parallel phase scavenging all copied and promoted objects.
TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_PARALLEL);
V8::GetCurrentPlatform()
->PostJob(v8::TaskPriority::kUserBlocking,
std::make_unique<JobTask>(this, &scavengers,
std::move(memory_chunks),
&copied_list, &promotion_list))
->Join();
job.Run();
DCHECK(copied_list.IsEmpty());
DCHECK(promotion_list.IsEmpty());
}
if (V8_UNLIKELY(FLAG_scavenge_separate_stack_scanning)) {
IterateStackAndScavenge(&root_scavenge_visitor, &scavengers,
kMainThreadId);
IterateStackAndScavenge(&root_scavenge_visitor, scavengers,
num_scavenge_tasks, kMainThreadId);
DCHECK(copied_list.IsEmpty());
DCHECK(promotion_list.IsEmpty());
}
......@@ -356,10 +357,10 @@ void ScavengerCollector::CollectGarbage() {
DCHECK(surviving_new_large_objects_.empty());
for (auto& scavenger : scavengers) {
scavenger->Finalize();
for (int i = 0; i < num_scavenge_tasks; i++) {
scavengers[i]->Finalize();
delete scavengers[i];
}
scavengers.clear();
HandleSurvivingNewLargeObjects();
}
......@@ -419,24 +420,23 @@ void ScavengerCollector::CollectGarbage() {
}
void ScavengerCollector::IterateStackAndScavenge(
RootScavengeVisitor* root_scavenge_visitor,
std::vector<std::unique_ptr<Scavenger>>* scavengers, int main_thread_id) {
RootScavengeVisitor* root_scavenge_visitor, Scavenger** scavengers,
int num_scavenge_tasks, int main_thread_id) {
// Scan the stack, scavenge the newly discovered objects, and report
// the survival statistics before and afer the stack scanning.
// This code is not intended for production.
TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_STACK_ROOTS);
size_t survived_bytes_before = 0;
for (auto& scavenger : *scavengers) {
for (int i = 0; i < num_scavenge_tasks; i++) {
survived_bytes_before +=
scavenger->bytes_copied() + scavenger->bytes_promoted();
scavengers[i]->bytes_copied() + scavengers[i]->bytes_promoted();
}
heap_->IterateStackRoots(root_scavenge_visitor);
(*scavengers)[main_thread_id]->Process();
scavengers[main_thread_id]->Process();
size_t survived_bytes_after = 0;
for (auto& scavenger : *scavengers) {
for (int i = 0; i < num_scavenge_tasks; i++) {
survived_bytes_after +=
scavenger->bytes_copied() + scavenger->bytes_promoted();
scavengers[i]->bytes_copied() + scavengers[i]->bytes_promoted();
}
TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"V8.GCScavengerStackScanning", "survived_bytes_before",
......@@ -590,9 +590,10 @@ void Scavenger::ScavengePage(MemoryChunk* page) {
AddPageToSweeperIfNecessary(page);
}
void Scavenger::Process(JobDelegate* delegate) {
void Scavenger::Process(OneshotBarrier* barrier) {
ScavengeVisitor scavenge_visitor(this);
const bool have_barrier = barrier != nullptr;
bool done;
size_t objects = 0;
do {
......@@ -602,9 +603,9 @@ void Scavenger::Process(JobDelegate* delegate) {
copied_list_.Pop(&object_and_size)) {
scavenge_visitor.Visit(object_and_size.first);
done = false;
if (delegate && ((++objects % kInterruptThreshold) == 0)) {
if (have_barrier && ((++objects % kInterruptThreshold) == 0)) {
if (!copied_list_.IsGlobalPoolEmpty()) {
delegate->NotifyConcurrencyIncrease();
barrier->NotifyAll();
}
}
}
......@@ -614,9 +615,9 @@ void Scavenger::Process(JobDelegate* delegate) {
HeapObject target = entry.heap_object;
IterateAndScavengePromotedObject(target, entry.map, entry.size);
done = false;
if (delegate && ((++objects % kInterruptThreshold) == 0)) {
if (have_barrier && ((++objects % kInterruptThreshold) == 0)) {
if (!promotion_list_.IsGlobalPoolEmpty()) {
delegate->NotifyConcurrencyIncrease();
barrier->NotifyAll();
}
}
}
......@@ -704,11 +705,6 @@ void Scavenger::Finalize() {
}
}
void Scavenger::Flush() {
copied_list_.FlushToGlobal();
promotion_list_.FlushToGlobal();
}
void Scavenger::AddEphemeronHashTable(EphemeronHashTable table) {
ephemeron_table_list_.Push(table);
}
......
......@@ -33,7 +33,38 @@ constexpr int kEphemeronTableListSegmentSize = 128;
using EphemeronTableList =
Worklist<EphemeronHashTable, kEphemeronTableListSegmentSize>;
class ScavengerCollector;
class ScavengerCollector {
public:
static const int kMaxScavengerTasks = 8;
static const int kMaxWaitTimeMs = 2;
explicit ScavengerCollector(Heap* heap);
void CollectGarbage();
private:
void MergeSurvivingNewLargeObjects(
const SurvivingNewLargeObjectsMap& objects);
int NumberOfScavengeTasks();
void ProcessWeakReferences(EphemeronTableList* ephemeron_table_list);
void ClearYoungEphemerons(EphemeronTableList* ephemeron_table_list);
void ClearOldEphemerons();
void HandleSurvivingNewLargeObjects();
void SweepArrayBufferExtensions();
void IterateStackAndScavenge(RootScavengeVisitor* root_scavenge_visitor,
Scavenger** scavengers, int num_scavenge_tasks,
int main_thread_id);
Isolate* const isolate_;
Heap* const heap_;
base::Semaphore parallel_scavenge_semaphore_;
SurvivingNewLargeObjectsMap surviving_new_large_objects_;
friend class Scavenger;
};
class Scavenger {
public:
......@@ -57,7 +88,6 @@ class Scavenger {
inline bool Pop(struct PromotionListEntry* entry);
inline bool IsGlobalPoolEmpty();
inline bool ShouldEagerlyProcessPromotionList();
inline void FlushToGlobal();
private:
PromotionList* promotion_list_;
......@@ -72,12 +102,10 @@ class Scavenger {
inline void PushLargeObject(int task_id, HeapObject object, Map map,
int size);
inline bool IsEmpty();
inline size_t GlobalPoolSize() const;
inline size_t LocalPushSegmentSize(int task_id);
inline bool Pop(int task_id, struct PromotionListEntry* entry);
inline bool IsGlobalPoolEmpty();
inline bool ShouldEagerlyProcessPromotionList(int task_id);
inline void FlushToGlobal(int task_id);
private:
static const int kRegularObjectPromotionListSegmentSize = 256;
......@@ -106,11 +134,10 @@ class Scavenger {
// Processes remaining work (=objects) after single objects have been
// manually scavenged using ScavengeObject or CheckAndScavengeObject.
void Process(JobDelegate* delegate = nullptr);
void Process(OneshotBarrier* barrier = nullptr);
// Finalize the Scavenger. Needs to be called from the main thread.
void Finalize();
void Flush();
void AddEphemeronHashTable(EphemeronHashTable table);
......@@ -249,63 +276,6 @@ class ScavengeVisitor final : public NewSpaceVisitor<ScavengeVisitor> {
Scavenger* const scavenger_;
};
class ScavengerCollector {
public:
static const int kMaxScavengerTasks = 8;
static const int kMainThreadId = 0;
explicit ScavengerCollector(Heap* heap);
void CollectGarbage();
private:
class JobTask : public v8::JobTask {
public:
explicit JobTask(ScavengerCollector* outer,
std::vector<std::unique_ptr<Scavenger>>* scavengers,
std::vector<std::pair<bool, MemoryChunk*>> memory_chunks,
Scavenger::CopiedList* copied_list,
Scavenger::PromotionList* promotion_list);
void Run(JobDelegate* delegate) override;
size_t GetMaxConcurrency(size_t worker_count) const override;
private:
void ConcurrentScavengePages(Scavenger* scavenger);
ScavengerCollector* outer_;
std::vector<std::unique_ptr<Scavenger>>* scavengers_;
std::vector<std::pair<bool, MemoryChunk*>> memory_chunks_;
std::atomic<size_t> remaining_memory_chunks_{0};
Scavenger::CopiedList* copied_list_;
Scavenger::PromotionList* promotion_list_;
};
void MergeSurvivingNewLargeObjects(
const SurvivingNewLargeObjectsMap& objects);
int NumberOfScavengeTasks();
void ProcessWeakReferences(EphemeronTableList* ephemeron_table_list);
void ClearYoungEphemerons(EphemeronTableList* ephemeron_table_list);
void ClearOldEphemerons();
void HandleSurvivingNewLargeObjects();
void SweepArrayBufferExtensions();
void IterateStackAndScavenge(
RootScavengeVisitor* root_scavenge_visitor,
std::vector<std::unique_ptr<Scavenger>>* scavengers, int main_thread_id);
Isolate* const isolate_;
Heap* const heap_;
SurvivingNewLargeObjectsMap surviving_new_large_objects_;
friend class Scavenger;
};
} // namespace internal
} // namespace v8
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment