Commit fef977e7 authored by Leon Bettscheider's avatar Leon Bettscheider Committed by V8 LUCI CQ

[heap] Pass GarbageCollector to ScheduleJob

This CL removes the marking_worklists parameter from the
ConcurrentMarking constructor, and instead sets marking_worklists_
in ScheduleJob based on the new GarbageCollector parameter.

We will use the ConcurrentMarking class for both major and minor
marking later, and this CL does preparatory work for that by allowing
to change the mode of operation (minor/major) through ScheduleJob.

Bug: v8:13012
Change-Id: I44a35155cf19e1df139a6a4e5bc5cbedbc3e00aa
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3850289
Commit-Queue: Leon Bettscheider <bettscheider@google.com>
Reviewed-by: 's avatarOmer Katz <omerkatz@chromium.org>
Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/main@{#82910}
parent 0d591e91
......@@ -615,12 +615,8 @@ class ConcurrentMarking::JobTask : public v8::JobTask {
const bool should_keep_ages_unchanged_;
};
ConcurrentMarking::ConcurrentMarking(Heap* heap,
MarkingWorklists* marking_worklists,
WeakObjects* weak_objects)
: heap_(heap),
marking_worklists_(marking_worklists),
weak_objects_(weak_objects) {
ConcurrentMarking::ConcurrentMarking(Heap* heap, WeakObjects* weak_objects)
: heap_(heap), weak_objects_(weak_objects) {
#ifndef V8_ATOMIC_OBJECT_FIELD_WRITES
// Concurrent marking requires atomic object field writes.
CHECK(!FLAG_concurrent_marking);
......@@ -780,10 +776,15 @@ size_t ConcurrentMarking::GetMaxConcurrency(size_t worker_count) {
weak_objects_->current_ephemerons.Size()}));
}
void ConcurrentMarking::ScheduleJob(TaskPriority priority) {
void ConcurrentMarking::ScheduleJob(GarbageCollector garbage_collector,
TaskPriority priority) {
DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
DCHECK(!heap_->IsTearingDown());
DCHECK(!job_handle_ || !job_handle_->IsValid());
DCHECK(IsStopped());
garbage_collector_ = garbage_collector;
// TODO(v8:13012): Set marking_worklists_ based on GarbageCollector later.
marking_worklists_ = heap_->mark_compact_collector()->marking_worklists();
job_handle_ = V8::GetCurrentPlatform()->PostJob(
priority, std::make_unique<JobTask>(
......@@ -793,18 +794,25 @@ void ConcurrentMarking::ScheduleJob(TaskPriority priority) {
DCHECK(job_handle_->IsValid());
}
void ConcurrentMarking::RescheduleJobIfNeeded(TaskPriority priority) {
bool ConcurrentMarking::IsWorkLeft() {
return !marking_worklists_->shared()->IsEmpty() ||
!weak_objects_->current_ephemerons.IsEmpty() ||
!weak_objects_->discovered_ephemerons.IsEmpty();
}
void ConcurrentMarking::RescheduleJobIfNeeded(
GarbageCollector garbage_collector, TaskPriority priority) {
DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
if (heap_->IsTearingDown()) return;
if (marking_worklists_->shared()->IsEmpty() &&
weak_objects_->current_ephemerons.IsEmpty() &&
weak_objects_->discovered_ephemerons.IsEmpty()) {
return;
}
if (!job_handle_ || !job_handle_->IsValid()) {
ScheduleJob(priority);
if (IsStopped()) {
// This DCHECK is for the case that concurrent marking was paused.
DCHECK_IMPLIES(garbage_collector_.has_value(),
garbage_collector == garbage_collector_);
ScheduleJob(garbage_collector, priority);
} else {
DCHECK_EQ(garbage_collector, garbage_collector_);
if (!IsWorkLeft()) return;
if (priority != TaskPriority::kUserVisible)
job_handle_->UpdatePriority(priority);
job_handle_->NotifyConcurrencyIncrease();
......@@ -815,6 +823,7 @@ void ConcurrentMarking::Join() {
DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
if (!job_handle_ || !job_handle_->IsValid()) return;
job_handle_->Join();
garbage_collector_.reset();
}
bool ConcurrentMarking::Pause() {
......@@ -826,11 +835,16 @@ bool ConcurrentMarking::Pause() {
}
bool ConcurrentMarking::IsStopped() {
if (!FLAG_concurrent_marking) return true;
if (!FLAG_concurrent_marking && !FLAG_parallel_marking) return true;
return !job_handle_ || !job_handle_->IsValid();
}
void ConcurrentMarking::Resume() {
DCHECK(garbage_collector_.has_value());
RescheduleJobIfNeeded(garbage_collector_.value());
}
void ConcurrentMarking::FlushNativeContexts(NativeContextStats* main_stats) {
DCHECK(!job_handle_ || !job_handle_->IsValid());
for (size_t i = 1; i < task_state_.size(); i++) {
......@@ -891,7 +905,11 @@ ConcurrentMarking::PauseScope::PauseScope(ConcurrentMarking* concurrent_marking)
}
ConcurrentMarking::PauseScope::~PauseScope() {
if (resume_on_exit_) concurrent_marking_->RescheduleJobIfNeeded();
if (resume_on_exit_) {
DCHECK_EQ(concurrent_marking_->garbage_collector_,
GarbageCollector::MARK_COMPACTOR);
concurrent_marking_->Resume();
}
}
} // namespace internal
......
......@@ -9,6 +9,7 @@
#include "include/v8-platform.h"
#include "src/base/atomic-utils.h"
#include "src/base/optional.h"
#include "src/base/platform/condition-variable.h"
#include "src/base/platform/mutex.h"
#include "src/heap/marking-visitor.h"
......@@ -53,13 +54,13 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
const bool resume_on_exit_;
};
ConcurrentMarking(Heap* heap, MarkingWorklists* marking_worklists,
WeakObjects* weak_objects);
ConcurrentMarking(Heap* heap, WeakObjects* weak_objects);
// Schedules asynchronous job to perform concurrent marking at |priority|.
// Objects in the heap should not be moved while these are active (can be
// stopped safely via Stop() or PauseScope).
void ScheduleJob(TaskPriority priority = TaskPriority::kUserVisible);
void ScheduleJob(GarbageCollector garbage_collector,
TaskPriority priority = TaskPriority::kUserVisible);
// Waits for scheduled job to complete.
void Join();
......@@ -71,6 +72,7 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
// not already running, otherwise adjusts the number of workers running job
// and the priority if different from the default kUserVisible.
void RescheduleJobIfNeeded(
GarbageCollector garbage_collector,
TaskPriority priority = TaskPriority::kUserVisible);
// Flushes native context sizes to the given table of the main thread.
void FlushNativeContexts(NativeContextStats* main_stats);
......@@ -104,14 +106,19 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
void Run(JobDelegate* delegate, base::EnumSet<CodeFlushMode> code_flush_mode,
unsigned mark_compact_epoch, bool should_keep_ages_unchanged);
size_t GetMaxConcurrency(size_t worker_count);
bool IsWorkLeft();
void Resume();
std::unique_ptr<JobHandle> job_handle_;
Heap* const heap_;
MarkingWorklists* const marking_worklists_;
base::Optional<GarbageCollector> garbage_collector_;
MarkingWorklists* marking_worklists_;
WeakObjects* const weak_objects_;
std::vector<std::unique_ptr<TaskState>> task_state_;
std::atomic<size_t> total_marked_bytes_{0};
std::atomic<bool> another_ephemeron_iteration_{false};
friend class Heap;
};
} // namespace internal
......
......@@ -2466,7 +2466,7 @@ void Heap::PerformSharedGarbageCollection(Isolate* initiator,
if (FLAG_concurrent_marking &&
client->heap()->incremental_marking()->IsMarking()) {
client->heap()->concurrent_marking()->RescheduleJobIfNeeded();
client->heap()->concurrent_marking()->Resume();
}
});
......@@ -5378,11 +5378,10 @@ void Heap::SetUp(LocalHeap* main_thread_local_heap) {
new IncrementalMarking(this, mark_compact_collector_->weak_objects()));
if (FLAG_concurrent_marking || FLAG_parallel_marking) {
concurrent_marking_.reset(new ConcurrentMarking(
this, mark_compact_collector_->marking_worklists(),
mark_compact_collector_->weak_objects()));
concurrent_marking_.reset(
new ConcurrentMarking(this, mark_compact_collector_->weak_objects()));
} else {
concurrent_marking_.reset(new ConcurrentMarking(this, nullptr, nullptr));
concurrent_marking_.reset(new ConcurrentMarking(this, nullptr));
}
for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
......
......@@ -326,7 +326,7 @@ void IncrementalMarking::StartMarkingMajor() {
}
if (FLAG_concurrent_marking && !heap_->IsTearingDown()) {
heap_->concurrent_marking()->ScheduleJob();
heap_->concurrent_marking()->ScheduleJob(GarbageCollector::MARK_COMPACTOR);
}
// Ready to start incremental marking.
......@@ -872,7 +872,7 @@ void IncrementalMarking::Step(double max_step_size_in_ms,
heap_->tracer()->CurrentEpoch(GCTracer::Scope::MC_INCREMENTAL));
TRACE_GC_EPOCH(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL,
ThreadKind::kMain);
DCHECK(IsMarking());
DCHECK(IsMajorMarking());
double start = heap_->MonotonicallyIncreasingTimeInMs();
size_t bytes_to_process = 0;
......@@ -930,7 +930,8 @@ void IncrementalMarking::Step(double max_step_size_in_ms,
if (FLAG_concurrent_marking) {
local_marking_worklists()->ShareWork();
heap_->concurrent_marking()->RescheduleJobIfNeeded();
heap_->concurrent_marking()->RescheduleJobIfNeeded(
GarbageCollector::MARK_COMPACTOR);
}
const double current_time = heap_->MonotonicallyIncreasingTimeInMs();
......
......@@ -2456,7 +2456,8 @@ std::pair<size_t, size_t> MarkCompactCollector::ProcessMarkingWorklist(
"Marking of Code objects require write access to Code page headers");
if (parallel_marking_)
heap_->concurrent_marking()->RescheduleJobIfNeeded(
TaskPriority::kUserBlocking);
GarbageCollector::MARK_COMPACTOR, TaskPriority::kUserBlocking);
while (local_marking_worklists()->Pop(&object) ||
local_marking_worklists()->PopOnHold(&object)) {
// Left trimming may result in grey or black filler objects on the marking
......@@ -2700,7 +2701,7 @@ void MarkCompactCollector::MarkLiveObjects() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_FULL_CLOSURE_PARALLEL);
parallel_marking_ = true;
heap_->concurrent_marking()->RescheduleJobIfNeeded(
TaskPriority::kUserBlocking);
GarbageCollector::MARK_COMPACTOR, TaskPriority::kUserBlocking);
MarkTransitiveClosure();
{
TRACE_GC(heap()->tracer(),
......
......@@ -38,13 +38,12 @@ TEST(ConcurrentMarking) {
MarkCompactCollector::SweepingForcedFinalizationMode::kV8Only);
}
MarkingWorklists marking_worklists;
WeakObjects weak_objects;
ConcurrentMarking* concurrent_marking =
new ConcurrentMarking(heap, &marking_worklists, &weak_objects);
PublishSegment(*marking_worklists.shared(),
new ConcurrentMarking(heap, &weak_objects);
PublishSegment(*collector->marking_worklists()->shared(),
ReadOnlyRoots(heap).undefined_value());
concurrent_marking->ScheduleJob();
concurrent_marking->ScheduleJob(GarbageCollector::MARK_COMPACTOR);
concurrent_marking->Join();
delete concurrent_marking;
}
......@@ -61,17 +60,16 @@ TEST(ConcurrentMarkingReschedule) {
MarkCompactCollector::SweepingForcedFinalizationMode::kV8Only);
}
MarkingWorklists marking_worklists;
WeakObjects weak_objects;
ConcurrentMarking* concurrent_marking =
new ConcurrentMarking(heap, &marking_worklists, &weak_objects);
PublishSegment(*marking_worklists.shared(),
new ConcurrentMarking(heap, &weak_objects);
PublishSegment(*collector->marking_worklists()->shared(),
ReadOnlyRoots(heap).undefined_value());
concurrent_marking->ScheduleJob();
concurrent_marking->ScheduleJob(GarbageCollector::MARK_COMPACTOR);
concurrent_marking->Join();
PublishSegment(*marking_worklists.shared(),
PublishSegment(*collector->marking_worklists()->shared(),
ReadOnlyRoots(heap).undefined_value());
concurrent_marking->RescheduleJobIfNeeded();
concurrent_marking->RescheduleJobIfNeeded(GarbageCollector::MARK_COMPACTOR);
concurrent_marking->Join();
delete concurrent_marking;
}
......@@ -88,19 +86,18 @@ TEST(ConcurrentMarkingPreemptAndReschedule) {
MarkCompactCollector::SweepingForcedFinalizationMode::kV8Only);
}
MarkingWorklists marking_worklists;
WeakObjects weak_objects;
ConcurrentMarking* concurrent_marking =
new ConcurrentMarking(heap, &marking_worklists, &weak_objects);
new ConcurrentMarking(heap, &weak_objects);
for (int i = 0; i < 5000; i++)
PublishSegment(*marking_worklists.shared(),
PublishSegment(*collector->marking_worklists()->shared(),
ReadOnlyRoots(heap).undefined_value());
concurrent_marking->ScheduleJob();
concurrent_marking->ScheduleJob(GarbageCollector::MARK_COMPACTOR);
concurrent_marking->Pause();
for (int i = 0; i < 5000; i++)
PublishSegment(*marking_worklists.shared(),
PublishSegment(*collector->marking_worklists()->shared(),
ReadOnlyRoots(heap).undefined_value());
concurrent_marking->RescheduleJobIfNeeded();
concurrent_marking->RescheduleJobIfNeeded(GarbageCollector::MARK_COMPACTOR);
concurrent_marking->Join();
delete concurrent_marking;
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment