Commit 4320ed7d authored by Leon Bettscheider's avatar Leon Bettscheider Committed by V8 LUCI CQ

[heap] Enable MinorMC concurrent marking

This CL implements ConcurrentMarking::RunMinor which uses
YoungGenerationConcurrentMarkingVisitor (go/YGCMV).

This CL also implements Teardown and FinishConcurrentMarking, and
schedules minor concurrent marking in
IncrementalMarking::StartMarkingMinor.

Additionally, this CL opts out of ConcurrentMarking::PauseScope in
Heap::MinorMarkCompact if concurrent MinorMC is active because
concurrent marking will be finalized in FinishConcurrentMarking
subsequentially.

Bug: v8:13012
Change-Id: I78fe18416e564565c6421243ff40dec7561fb20a
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3850292
Commit-Queue: Leon Bettscheider <bettscheider@google.com>
Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Reviewed-by: 's avatarOmer Katz <omerkatz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#82978}
parent 633cc57f
......@@ -633,7 +633,9 @@ class ConcurrentMarking::JobTaskMinor : public v8::JobTask {
// TRACE_GC is not needed here because the caller opens the right scope.
concurrent_marking_->RunMinor(delegate);
} else {
// TODO(v8:13012): TRACE_GC_EPOCH for MinorMC here.
TRACE_GC_EPOCH(concurrent_marking_->heap_->tracer(),
GCTracer::Scope::MINOR_MC_BACKGROUND_MARKING,
ThreadKind::kBackground);
concurrent_marking_->RunMinor(delegate);
}
}
......@@ -796,7 +798,83 @@ void ConcurrentMarking::RunMajor(JobDelegate* delegate,
}
void ConcurrentMarking::RunMinor(JobDelegate* delegate) {
// TODO(v8:13012): Implement
RwxMemoryWriteScope::SetDefaultPermissionsForNewThread();
size_t kBytesUntilInterruptCheck = 64 * KB;
int kObjectsUntilInterruptCheck = 1000;
uint8_t task_id = delegate->GetTaskId() + 1;
TaskState* task_state = task_state_[task_id].get();
MarkingWorklists::Local local_marking_worklists(
marking_worklists_, MarkingWorklists::Local::kNoCppMarkingState);
YoungGenerationConcurrentMarkingVisitor visitor(
heap_, &local_marking_worklists, &task_state->memory_chunk_data);
double time_ms;
size_t marked_bytes = 0;
Isolate* isolate = heap_->isolate();
if (v8_flags.trace_concurrent_marking) {
isolate->PrintWithTimestamp("Starting minor concurrent marking task %d\n",
task_id);
}
{
TimedScope scope(&time_ms);
bool done = false;
CodePageHeaderModificationScope rwx_write_scope(
"Marking a Code object requires write access to the Code page header");
while (!done) {
size_t current_marked_bytes = 0;
int objects_processed = 0;
while (current_marked_bytes < kBytesUntilInterruptCheck &&
objects_processed < kObjectsUntilInterruptCheck) {
HeapObject object;
if (!local_marking_worklists.Pop(&object)) {
done = true;
break;
}
objects_processed++;
Address new_space_top = kNullAddress;
Address new_space_limit = kNullAddress;
Address new_large_object = kNullAddress;
if (heap_->new_space()) {
// The order of the two loads is important.
new_space_top = heap_->new_space()->original_top_acquire();
new_space_limit = heap_->new_space()->original_limit_relaxed();
}
if (heap_->new_lo_space()) {
new_large_object = heap_->new_lo_space()->pending_object();
}
Address addr = object.address();
if ((new_space_top <= addr && addr < new_space_limit) ||
addr == new_large_object) {
local_marking_worklists.PushOnHold(object);
} else {
Map map = object.map(isolate, kAcquireLoad);
current_marked_bytes += visitor.Visit(map, object);
}
}
marked_bytes += current_marked_bytes;
base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes,
marked_bytes);
if (delegate->ShouldYield()) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"ConcurrentMarking::RunMinor Preempted");
break;
}
}
local_marking_worklists.Publish();
base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes, 0);
total_marked_bytes_ += marked_bytes;
}
if (v8_flags.trace_concurrent_marking) {
heap_->isolate()->PrintWithTimestamp(
"Minor task %d concurrently marked %dKB in %.2fms\n", task_id,
static_cast<int>(marked_bytes / KB), time_ms);
}
}
size_t ConcurrentMarking::GetMaxConcurrency(size_t worker_count) {
......
......@@ -93,6 +93,9 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
bool another_ephemeron_iteration() {
return another_ephemeron_iteration_.load();
}
base::Optional<GarbageCollector> garbage_collector() const {
return garbage_collector_;
}
private:
struct TaskState {
......
......@@ -2654,6 +2654,7 @@ void Heap::MinorMarkCompact() {
DCHECK(v8_flags.minor_mc);
CHECK_EQ(NOT_IN_GC, gc_state());
DCHECK(new_space());
DCHECK(!incremental_marking()->IsMajorMarking());
PauseAllocationObserversScope pause_observers(this);
SetGCState(MINOR_MARK_COMPACT);
......@@ -2665,16 +2666,6 @@ void Heap::MinorMarkCompact() {
OptionalAlwaysAllocateScope always_allocate_shared_heap(
isolate()->shared_isolate() ? isolate()->shared_isolate()->heap()
: nullptr);
IncrementalMarking::PauseBlackAllocationScope pause_black_allocation(
incremental_marking());
// Young generation garbage collection is orthogonal from full GC marking. It
// is possible that objects that are currently being processed for marking are
// reclaimed in the young generation GC that interleaves concurrent marking.
// Pause concurrent markers to allow processing them using
// `UpdateMarkingWorklistAfterYoungGenGC()`.
ConcurrentMarking::PauseScope pause_js_marking(concurrent_marking());
CppHeap::PauseConcurrentMarkingScope pause_cpp_marking(
CppHeap::From(cpp_heap_));
minor_mark_compact_collector_->Prepare();
minor_mark_compact_collector_->CollectGarbage();
......
......@@ -358,6 +358,7 @@ void IncrementalMarking::StartMarkingMinor() {
is_marking_ = true;
heap_->SetIsMarkingFlag(true);
heap_->SetIsMinorMarkingFlag(true);
MarkingBarrier::ActivateAll(heap(), false, MarkingBarrierType::kMinor);
......@@ -366,9 +367,10 @@ void IncrementalMarking::StartMarkingMinor() {
MarkRoots();
}
local_marking_worklists()->Publish();
// TODO(v8:13012): Schedule concurrent marking.
if (v8_flags.concurrent_marking && !heap_->IsTearingDown()) {
heap_->concurrent_marking()->ScheduleJob(
GarbageCollector::MINOR_MARK_COMPACTOR);
}
if (v8_flags.trace_incremental_marking) {
heap()->isolate()->PrintWithTimestamp(
......@@ -588,6 +590,7 @@ bool IncrementalMarking::Stop() {
is_marking_ = false;
heap_->SetIsMarkingFlag(false);
heap_->SetIsMinorMarkingFlag(false);
is_compacting_ = false;
FinishBlackAllocation();
......
......@@ -521,7 +521,7 @@ void MarkCompactCollector::SetUp() {
void MarkCompactCollector::TearDown() {
AbortCompaction();
if (heap()->incremental_marking()->IsMarking()) {
if (heap()->incremental_marking()->IsMajorMarking()) {
local_marking_worklists()->Publish();
heap()->main_thread_local_heap()->marking_barrier()->Publish();
// Marking barriers of LocalHeaps will be published in their destructors.
......@@ -1054,6 +1054,8 @@ void MarkCompactCollector::Prepare() {
void MarkCompactCollector::FinishConcurrentMarking() {
// FinishConcurrentMarking is called for both, concurrent and parallel,
// marking. It is safe to call this function when tasks are already finished.
DCHECK_EQ(heap()->concurrent_marking()->garbage_collector(),
GarbageCollector::MARK_COMPACTOR);
if (v8_flags.parallel_marking || v8_flags.concurrent_marking) {
heap()->concurrent_marking()->Join();
heap()->concurrent_marking()->FlushMemoryChunkData(
......@@ -5672,7 +5674,24 @@ MinorMarkCompactCollector::~MinorMarkCompactCollector() = default;
void MinorMarkCompactCollector::SetUp() {}
void MinorMarkCompactCollector::TearDown() {}
void MinorMarkCompactCollector::TearDown() {
if (heap()->incremental_marking()->IsMinorMarking()) {
local_marking_worklists()->Publish();
heap()->main_thread_local_heap()->marking_barrier()->Publish();
// Marking barriers of LocalHeaps will be published in their destructors.
marking_worklists()->Clear();
}
}
void MinorMarkCompactCollector::FinishConcurrentMarking() {
if (v8_flags.concurrent_marking) {
DCHECK_EQ(heap()->concurrent_marking()->garbage_collector(),
GarbageCollector::MINOR_MARK_COMPACTOR);
heap()->concurrent_marking()->Join();
heap()->concurrent_marking()->FlushMemoryChunkData(
non_atomic_marking_state());
}
}
// static
constexpr size_t MinorMarkCompactCollector::kMaxParallelTasks;
......@@ -6324,6 +6343,10 @@ void MinorMarkCompactCollector::MarkLiveObjects() {
// TODO(v8:13012): TRACE_GC with MINOR_MC_MARK_FINISH_INCREMENTAL.
if (heap_->incremental_marking()->Stop()) {
MarkingBarrier::PublishAll(heap());
// TODO(v8:13012): TRACE_GC with MINOR_MC_MARK_FULL_CLOSURE_PARALLEL_JOIN.
// TODO(v8:13012): Instead of finishing concurrent marking here, we could
// continue running it to replace parallel marking.
FinishConcurrentMarking();
was_marked_incrementally = true;
}
}
......
......@@ -840,6 +840,7 @@ class MinorMarkCompactCollector final : public CollectorBase {
void Evacuate();
void EvacuatePagesInParallel();
void UpdatePointersAfterEvacuation();
void FinishConcurrentMarking();
void SweepArrayBufferExtensions();
......
......@@ -662,12 +662,15 @@
F(MINOR_MC_BACKGROUND_MARKING) \
F(SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL)
#define TRACER_YOUNG_EPOCH_SCOPES(F) \
F(BACKGROUND_YOUNG_ARRAY_BUFFER_SWEEP) \
F(MINOR_MARK_COMPACTOR) \
F(MINOR_MC_COMPLETE_SWEEP_ARRAY_BUFFERS) \
F(SCAVENGER) \
F(SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL) \
#define TRACER_YOUNG_EPOCH_SCOPES(F) \
F(BACKGROUND_YOUNG_ARRAY_BUFFER_SWEEP) \
F(MINOR_MARK_COMPACTOR) \
F(MINOR_MC_COMPLETE_SWEEP_ARRAY_BUFFERS) \
F(MINOR_MC_BACKGROUND_EVACUATE_COPY) \
F(MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS) \
F(MINOR_MC_BACKGROUND_MARKING) \
F(SCAVENGER) \
F(SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL) \
F(SCAVENGER_COMPLETE_SWEEP_ARRAY_BUFFERS)
#endif // V8_INIT_HEAP_SYMBOLS_H_
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment