Commit 349d4513 authored by Adam Klein's avatar Adam Klein Committed by V8 LUCI CQ

Revert "[heap] Refactor atomic marking phase"

This reverts commit a3f66927.

Reason for revert: test failures on TSAN/no-concurrent-marking bot:
https://ci.chromium.org/ui/p/v8/builders/ci/V8%20Linux64%20TSAN%20-%20no-concurrent-marking/8549/overview

Original change's description:
> [heap] Refactor atomic marking phase
>
> The atomic marking phase was organized in many distinct smaller
> phases. In particular, before http://crrev.com/c/3584115 the marking
> phase split into two large separate phases.
>
> This CL reorganizes marking into two phases that perform regular V8
> heap marking, Oilpan, and ephemerons:
> - A parallel phase that likely drains all marking worklists;
> - A single-threaded final phase to catch any left overs;
>
> This avoids artificial splitting in phases and also avoids repeated
> starting and joining of jobs.
>
> Change-Id: I5cccfc5777837d9ece10d8f4925781bf2d07d9da
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3602507
> Reviewed-by: Omer Katz <omerkatz@chromium.org>
> Reviewed-by: Dominik Inführ <dinfuehr@chromium.org>
> Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
> Cr-Commit-Position: refs/heads/main@{#80265}

Change-Id: I4838e9316bd30f8a0b78fa6a27820d3457e1e579
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3614972
Bot-Commit: Rubber Stamper <rubber-stamper@appspot.gserviceaccount.com>
Commit-Queue: Rubber Stamper <rubber-stamper@appspot.gserviceaccount.com>
Auto-Submit: Adam Klein <adamk@chromium.org>
Cr-Commit-Position: refs/heads/main@{#80267}
parent 48123d6b
......@@ -196,8 +196,6 @@ void Worklist<EntryType, SegmentSize>::Merge(
template <typename EntryType, uint16_t SegmentSize>
void Worklist<EntryType, SegmentSize>::Swap(
Worklist<EntryType, SegmentSize>* other) {
v8::base::MutexGuard guard1(&lock_);
v8::base::MutexGuard guard2(&other->lock_);
Segment* top = top_;
set_top(other->top_);
other->set_top(top);
......
......@@ -604,10 +604,6 @@ bool CppHeap::AdvanceTracing(double max_duration) {
: v8::base::TimeDelta::FromMillisecondsD(max_duration);
const size_t marked_bytes_limit = in_atomic_pause_ ? SIZE_MAX : 0;
DCHECK_NOT_NULL(marker_);
if (in_atomic_pause_) {
marker_->NotifyConcurrentMarkingOfWorkIfNeeded(
cppgc::TaskPriority::kUserBlocking);
}
// TODO(chromium:1056170): Replace when unified heap transitions to
// bytes-based deadline.
marking_done_ =
......
......@@ -238,13 +238,6 @@ void ConcurrentMarkerBase::NotifyIncrementalMutatorStepCompleted() {
}
}
void ConcurrentMarkerBase::NotifyOfWorkIfNeeded(cppgc::TaskPriority priority) {
if (HasWorkForConcurrentMarking(marking_worklists_)) {
concurrent_marking_handle_->UpdatePriority(priority);
concurrent_marking_handle_->NotifyConcurrencyIncrease();
}
}
void ConcurrentMarkerBase::IncreaseMarkingPriorityIfNeeded() {
if (!concurrent_marking_handle_->UpdatePriorityEnabled()) return;
if (concurrent_marking_priority_increased_) return;
......
......@@ -30,7 +30,6 @@ class V8_EXPORT_PRIVATE ConcurrentMarkerBase {
bool Cancel();
void NotifyIncrementalMutatorStepCompleted();
void NotifyOfWorkIfNeeded(cppgc::TaskPriority priority);
bool IsActive() const;
......
......@@ -462,13 +462,6 @@ bool MarkerBase::JoinConcurrentMarkingIfNeeded() {
return true;
}
void MarkerBase::NotifyConcurrentMarkingOfWorkIfNeeded(
cppgc::TaskPriority priority) {
if (concurrent_marker_->IsActive()) {
concurrent_marker_->NotifyOfWorkIfNeeded(priority);
}
}
bool MarkerBase::AdvanceMarkingWithLimits(v8::base::TimeDelta max_duration,
size_t marked_bytes_limit) {
bool is_done = false;
......
......@@ -8,7 +8,6 @@
#include <memory>
#include "include/cppgc/heap.h"
#include "include/cppgc/platform.h"
#include "include/cppgc/visitor.h"
#include "src/base/macros.h"
#include "src/base/platform/time.h"
......@@ -113,7 +112,6 @@ class V8_EXPORT_PRIVATE MarkerBase {
void ProcessWeakness();
bool JoinConcurrentMarkingIfNeeded();
void NotifyConcurrentMarkingOfWorkIfNeeded(cppgc::TaskPriority);
inline void WriteBarrierForInConstructionObject(HeapObjectHeader&);
......
......@@ -908,10 +908,11 @@ void GCTracer::PrintNVP() const {
"mark=%.1f "
"mark.finish_incremental=%.1f "
"mark.roots=%.1f "
"mark.full_closure_parallel=%.1f "
"mark.full_closure=%.1f "
"mark.ephemeron.marking=%.1f "
"mark.ephemeron.linear=%.1f "
"mark.main=%.1f "
"mark.weak_closure=%.1f "
"mark.weak_closure.ephemeron=%.1f "
"mark.weak_closure.ephemeron.marking=%.1f "
"mark.weak_closure.ephemeron.linear=%.1f "
"mark.embedder_prologue=%.1f "
"mark.embedder_tracing=%.1f "
"prologue=%.1f "
......@@ -995,8 +996,9 @@ void GCTracer::PrintNVP() const {
current_scope(Scope::MC_MARK),
current_scope(Scope::MC_MARK_FINISH_INCREMENTAL),
current_scope(Scope::MC_MARK_ROOTS),
current_scope(Scope::MC_MARK_FULL_CLOSURE_PARALLEL),
current_scope(Scope::MC_MARK_FULL_CLOSURE),
current_scope(Scope::MC_MARK_MAIN),
current_scope(Scope::MC_MARK_WEAK_CLOSURE),
current_scope(Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON),
current_scope(Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING),
current_scope(Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_LINEAR),
current_scope(Scope::MC_MARK_EMBEDDER_PROLOGUE),
......
......@@ -2018,7 +2018,7 @@ void MarkCompactCollector::RevisitObject(HeapObject obj) {
marking_visitor_->Visit(obj.map(marking_visitor_->cage_base()), obj);
}
bool MarkCompactCollector::MarkTransitiveClosureUntilFixpoint() {
bool MarkCompactCollector::ProcessEphemeronsUntilFixpoint() {
int iterations = 0;
int max_iterations = FLAG_ephemeron_fixpoint_iterations;
......@@ -2042,7 +2042,14 @@ bool MarkCompactCollector::MarkTransitiveClosureUntilFixpoint() {
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING);
if (FLAG_parallel_marking) {
heap_->concurrent_marking()->RescheduleJobIfNeeded(
TaskPriority::kUserBlocking);
}
another_ephemeron_iteration_main_thread = ProcessEphemerons();
FinishConcurrentMarking();
}
CHECK(
......@@ -2057,6 +2064,10 @@ bool MarkCompactCollector::MarkTransitiveClosureUntilFixpoint() {
!local_marking_worklists()->IsWrapperEmpty() ||
!heap()->local_embedder_heap_tracer()->IsRemoteTracingDone());
CHECK(local_marking_worklists()->IsEmpty());
CHECK(local_weak_objects()->current_ephemerons_local.IsLocalAndGlobalEmpty());
CHECK(local_weak_objects()
->discovered_ephemerons_local.IsLocalAndGlobalEmpty());
return true;
}
......@@ -2098,7 +2109,7 @@ bool MarkCompactCollector::ProcessEphemerons() {
return another_ephemeron_iteration;
}
void MarkCompactCollector::MarkTransitiveClosureLinear() {
void MarkCompactCollector::ProcessEphemeronsLinear() {
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_LINEAR);
CHECK(heap()->concurrent_marking()->IsStopped());
......@@ -2222,9 +2233,6 @@ std::pair<size_t, size_t> MarkCompactCollector::ProcessMarkingWorklist(
PtrComprCageBase cage_base(isolate);
CodePageHeaderModificationScope rwx_write_scope(
"Marking of Code objects require write access to Code page headers");
if (parallel_marking_)
heap_->concurrent_marking()->RescheduleJobIfNeeded(
TaskPriority::kUserBlocking);
while (local_marking_worklists()->Pop(&object) ||
local_marking_worklists()->PopOnHold(&object)) {
// Left trimming may result in grey or black filler objects on the marking
......@@ -2291,7 +2299,19 @@ bool MarkCompactCollector::ProcessEphemeron(HeapObject key, HeapObject value) {
return false;
}
void MarkCompactCollector::VerifyEphemeronMarking() {
void MarkCompactCollector::ProcessEphemeronMarking() {
DCHECK(local_marking_worklists()->IsEmpty());
// Incremental marking might leave ephemerons in main task's local
// buffer, flush it into global pool.
local_weak_objects()->next_ephemerons_local.Publish();
if (!ProcessEphemeronsUntilFixpoint()) {
// Fixpoint iteration needed too many iterations and was cancelled. Use the
// guaranteed linear algorithm.
ProcessEphemeronsLinear();
}
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
Ephemeron ephemeron;
......@@ -2303,19 +2323,10 @@ void MarkCompactCollector::VerifyEphemeronMarking() {
CHECK(!ProcessEphemeron(ephemeron.key, ephemeron.value));
}
}
#endif // VERIFY_HEAP
}
void MarkCompactCollector::MarkTransitiveClosure() {
// Incremental marking might leave ephemerons in main task's local
// buffer, flush it into global pool.
local_weak_objects()->next_ephemerons_local.Publish();
#endif
if (!MarkTransitiveClosureUntilFixpoint()) {
// Fixpoint iteration needed too many iterations and was cancelled. Use the
// guaranteed linear algorithm.
MarkTransitiveClosureLinear();
}
CHECK(local_marking_worklists()->IsEmpty());
CHECK(heap()->local_embedder_heap_tracer()->IsRemoteTracingDone());
}
void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor,
......@@ -2396,32 +2407,50 @@ void MarkCompactCollector::MarkLiveObjects() {
MarkObjectsFromClientHeaps();
}
if (FLAG_parallel_marking) {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_FULL_CLOSURE_PARALLEL);
parallel_marking_ = true;
heap_->concurrent_marking()->RescheduleJobIfNeeded(
TaskPriority::kUserBlocking);
MarkTransitiveClosure();
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_MARK_FULL_CLOSURE_PARALLEL_JOIN);
FinishConcurrentMarking();
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_MAIN);
if (FLAG_parallel_marking) {
heap_->concurrent_marking()->RescheduleJobIfNeeded(
TaskPriority::kUserBlocking);
}
parallel_marking_ = false;
DrainMarkingWorklist();
FinishConcurrentMarking();
DrainMarkingWorklist();
}
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_FULL_CLOSURE);
// Complete the transitive closure single-threaded to avoid races with
// multiple threads when processing weak maps and embedder heaps.
MarkTransitiveClosure();
CHECK(local_marking_worklists()->IsEmpty());
CHECK(
local_weak_objects()->current_ephemerons_local.IsLocalAndGlobalEmpty());
CHECK(local_weak_objects()
->discovered_ephemerons_local.IsLocalAndGlobalEmpty());
CHECK(heap()->local_embedder_heap_tracer()->IsRemoteTracingDone());
VerifyEphemeronMarking();
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE);
DCHECK(local_marking_worklists()->IsEmpty());
// Mark objects reachable through the embedder heap. This phase is
// opportunistic as it may not discover graphs that are only reachable
// through ephemerons.
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_MARK_EMBEDDER_TRACING_CLOSURE);
do {
// PerformWrapperTracing() also empties the work items collected by
// concurrent markers. As a result this call needs to happen at least
// once.
PerformWrapperTracing();
DrainMarkingWorklist();
} while (!heap_->local_embedder_heap_tracer()->IsRemoteTracingDone() ||
!local_marking_worklists()->IsWrapperEmpty());
DCHECK(local_marking_worklists()->IsWrapperEmpty());
DCHECK(local_marking_worklists()->IsEmpty());
}
// The objects reachable from the roots are marked, yet unreachable objects
// are unmarked. Mark objects reachable due to embedder heap tracing or
// harmony weak maps.
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON);
ProcessEphemeronMarking();
DCHECK(local_marking_worklists()->IsEmpty());
}
}
if (was_marked_incrementally) {
......
......@@ -613,8 +613,7 @@ class MarkCompactCollector final {
void UpdatePointersInClientHeap(Isolate* client);
// Marks object reachable from harmony weak maps and wrapper tracing.
void MarkTransitiveClosure();
void VerifyEphemeronMarking();
void ProcessEphemeronMarking();
// If the call-site of the top optimized code was not prepared for
// deoptimization, then treat embedded pointers in the code as strong as
......@@ -629,21 +628,19 @@ class MarkCompactCollector final {
// Returns true if value was actually marked.
bool ProcessEphemeron(HeapObject key, HeapObject value);
// Marks the transitive closure by draining the marking worklist iteratively,
// applying ephemerons semantics and invoking embedder tracing until a
// fixpoint is reached. Returns false if too many iterations have been tried
// and the linear approach should be used.
bool MarkTransitiveClosureUntilFixpoint();
// Marks the transitive closure applying ephemeron semantics and invoking
// embedder tracing with a linear algorithm for ephemerons. Only used if
// fixpoint iteration doesn't finish within a few iterations.
void MarkTransitiveClosureLinear();
// Marks ephemerons and drains marking worklist iteratively
// until a fixpoint is reached. Returns false if too many iterations have been
// tried and the linear approach should be used.
bool ProcessEphemeronsUntilFixpoint();
// Drains ephemeron and marking worklists. Single iteration of the
// fixpoint iteration.
bool ProcessEphemerons();
// Mark ephemerons and drain marking worklist with a linear algorithm.
// Only used if fixpoint iteration doesn't finish within a few iterations.
void ProcessEphemeronsLinear();
// Perform Wrapper Tracing if in use.
void PerformWrapperTracing();
......@@ -748,7 +745,6 @@ class MarkCompactCollector final {
bool compacting_ = false;
bool black_allocation_ = false;
bool have_code_to_deoptimize_ = false;
bool parallel_marking_ = false;
MarkingWorklists marking_worklists_;
......
......@@ -42,7 +42,7 @@ class TransitionArray;
/* Keep track of all ephemerons for concurrent marking tasks. Only store \
ephemerons in these worklists if both (key, value) are unreachable at \
the moment. \
MarkCompactCollector::MarkTransitiveClosureUntilFixpoint drains/fills \
MarkCompactCollector::ProcessEphemeronsUntilFixpoint drains/fills \
these worklists. current_ephemerons is used as draining worklist in \
the current fixpoint iteration. */ \
F(Ephemeron, current_ephemerons, CurrentEphemerons) \
......
......@@ -566,11 +566,12 @@
F(MC_MARK_CLIENT_HEAPS) \
F(MC_MARK_EMBEDDER_PROLOGUE) \
F(MC_MARK_EMBEDDER_TRACING) \
F(MC_MARK_EMBEDDER_TRACING_CLOSURE) \
F(MC_MARK_FINISH_INCREMENTAL) \
F(MC_MARK_FULL_CLOSURE_PARALLEL) \
F(MC_MARK_FULL_CLOSURE_PARALLEL_JOIN) \
F(MC_MARK_MAIN) \
F(MC_MARK_ROOTS) \
F(MC_MARK_FULL_CLOSURE) \
F(MC_MARK_WEAK_CLOSURE) \
F(MC_MARK_WEAK_CLOSURE_EPHEMERON) \
F(MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING) \
F(MC_MARK_WEAK_CLOSURE_EPHEMERON_LINEAR) \
F(MC_SWEEP_CODE) \
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment