Commit 4085827c authored by Leon Bettscheider's avatar Leon Bettscheider Committed by V8 LUCI CQ

[heap] Enable MinorMC incremental marking on soft limit

This CL adds a soft limit (via AllocationObserver) to run
incremental marking for MinorMC.

Once the soft limit is triggered, roots are marked.
This a stepping stone for concurrent marking
(YoungGenerationConcurrentMarkingVisitor, go/YGCMV) integration.

Bug: v8:13012
Change-Id: I5bc9aeb80511159561845deb494023ade3fb7365
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3824339Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Leon Bettscheider <bettscheider@google.com>
Cr-Commit-Position: refs/heads/main@{#82695}
parent 5b78f174
......@@ -1179,7 +1179,7 @@ DEFINE_BOOL(huge_max_old_generation_size, true,
"the physical memory bigger than 16 GB")
DEFINE_SIZE_T(initial_old_space_size, 0, "initial old space size (in Mbytes)")
DEFINE_BOOL(separate_gc_phases, false,
"yound and full garbage collection phases are not overlapping")
"young and full garbage collection phases are not overlapping")
DEFINE_BOOL(global_gc_scheduling, true,
"enable GC scheduling based on global memory")
DEFINE_BOOL(gc_global, false, "always perform global GCs")
......@@ -1255,6 +1255,8 @@ DEFINE_INT(incremental_marking_hard_trigger, 0,
"threshold for starting incremental marking immediately in percent "
"of available space: limit - size")
DEFINE_BOOL(trace_unmapper, false, "Trace the unmapping")
DEFINE_INT(minor_mc_task_trigger, 80,
"minormc task trigger in percent of the current heap limit")
DEFINE_BOOL(parallel_scavenge, true, "parallel scavenge")
DEFINE_BOOL(scavenge_task, true, "schedule scavenge tasks")
DEFINE_INT(scavenge_task_trigger, 80,
......@@ -1326,6 +1328,8 @@ DEFINE_GENERIC_IMPLICATION(
TracingFlags::gc_stats.store(
v8::tracing::TracingCategoryObserver::ENABLED_BY_NATIVE))
DEFINE_NEG_IMPLICATION(trace_gc_object_stats, incremental_marking)
DEFINE_NEG_NEG_IMPLICATION(incremental_marking, concurrent_marking)
DEFINE_IMPLICATION(concurrent_marking, incremental_marking)
DEFINE_NEG_IMPLICATION(track_retaining_path, parallel_marking)
DEFINE_NEG_IMPLICATION(track_retaining_path, concurrent_marking)
DEFINE_BOOL(track_detached_contexts, true,
......@@ -1924,6 +1928,11 @@ DEFINE_BOOL(trace_minor_mc_parallel_marking, false,
"trace parallel marking for the young generation")
DEFINE_BOOL(minor_mc, false, "perform young generation mark compact GCs")
DEFINE_IMPLICATION(minor_mc, separate_gc_phases)
DEFINE_BOOL(concurrent_minor_mc, false,
"perform young generation mark compact GCs concurrently")
DEFINE_NEG_NEG_IMPLICATION(concurrent_marking, concurrent_minor_mc)
DEFINE_IMPLICATION(concurrent_minor_mc, minor_mc)
DEFINE_IMPLICATION(concurrent_minor_mc, concurrent_marking)
//
// Dev shell flags
......
......@@ -108,7 +108,8 @@ constexpr int GCTracer::Scope::IncrementalOffset(ScopeId id) {
constexpr bool GCTracer::Event::IsYoungGenerationEvent(Type type) {
DCHECK_NE(START, type);
return type == SCAVENGER || type == MINOR_MARK_COMPACTOR;
return type == SCAVENGER || type == MINOR_MARK_COMPACTOR ||
type == INCREMENTAL_MINOR_MARK_COMPACTOR;
}
CollectionEpoch GCTracer::CurrentEpoch(Scope::ScopeId id) const {
......@@ -124,7 +125,8 @@ bool GCTracer::IsConsistentWithCollector(GarbageCollector collector) const {
return (collector == GarbageCollector::SCAVENGER &&
current_.type == Event::SCAVENGER) ||
(collector == GarbageCollector::MINOR_MARK_COMPACTOR &&
current_.type == Event::MINOR_MARK_COMPACTOR) ||
(current_.type == Event::MINOR_MARK_COMPACTOR ||
current_.type == Event::INCREMENTAL_MINOR_MARK_COMPACTOR)) ||
(collector == GarbageCollector::MARK_COMPACTOR &&
(current_.type == Event::MARK_COMPACTOR ||
current_.type == Event::INCREMENTAL_MARK_COMPACTOR));
......
......@@ -78,6 +78,7 @@ const char* GCTracer::Event::TypeName(bool short_name) const {
case INCREMENTAL_MARK_COMPACTOR:
return (short_name) ? "ms" : "Mark-sweep";
case MINOR_MARK_COMPACTOR:
case INCREMENTAL_MINOR_MARK_COMPACTOR:
return (short_name) ? "mmc" : "Minor Mark-Compact";
case START:
return (short_name) ? "st" : "Start";
......@@ -275,7 +276,9 @@ void GCTracer::StartCycle(GarbageCollector collector,
type = Event::SCAVENGER;
break;
case GarbageCollector::MINOR_MARK_COMPACTOR:
type = Event::MINOR_MARK_COMPACTOR;
type = marking == MarkingType::kIncremental
? Event::INCREMENTAL_MINOR_MARK_COMPACTOR
: Event::MINOR_MARK_COMPACTOR;
break;
case GarbageCollector::MARK_COMPACTOR:
type = marking == MarkingType::kIncremental
......@@ -302,7 +305,9 @@ void GCTracer::StartCycle(GarbageCollector collector,
break;
case MarkingType::kIncremental:
// The current event will be updated later.
DCHECK(!Heap::IsYoungGenerationCollector(collector));
DCHECK_IMPLIES(Heap::IsYoungGenerationCollector(collector),
(FLAG_minor_mc &&
collector == GarbageCollector::MINOR_MARK_COMPACTOR));
DCHECK(!IsInObservablePause());
break;
}
......
......@@ -120,7 +120,8 @@ class V8_EXPORT_PRIVATE GCTracer {
MARK_COMPACTOR = 1,
INCREMENTAL_MARK_COMPACTOR = 2,
MINOR_MARK_COMPACTOR = 3,
START = 4
START = 4,
INCREMENTAL_MINOR_MARK_COMPACTOR = 5,
};
// Returns true if the event corresponds to a young generation GC.
......
......@@ -179,7 +179,7 @@ void Heap::SetSerializedGlobalProxySizes(FixedArray sizes) {
void Heap::SetBasicBlockProfilingData(Handle<ArrayList> list) {
set_basic_block_profiling_data(*list);
}
class ScavengeTaskObserver : public AllocationObserver {
class ScavengeTaskObserver final : public AllocationObserver {
public:
ScavengeTaskObserver(Heap* heap, intptr_t step_size)
: AllocationObserver(step_size), heap_(heap) {}
......@@ -192,6 +192,19 @@ class ScavengeTaskObserver : public AllocationObserver {
Heap* heap_;
};
class MinorMCTaskObserver final : public AllocationObserver {
public:
MinorMCTaskObserver(Heap* heap, intptr_t step_size)
: AllocationObserver(step_size), heap_(heap) {}
void Step(int bytes_allocated, Address, size_t) override {
heap_->StartMinorMCIncrementalMarkingIfNeeded();
}
private:
Heap* heap_;
};
Heap::Heap()
: isolate_(isolate()),
heap_allocator_(this),
......@@ -1552,6 +1565,21 @@ void Heap::ScheduleScavengeTaskIfNeeded() {
scavenge_job_->ScheduleTaskIfNeeded(this);
}
size_t Heap::MinorMCTaskTriggerSize() const {
return new_space()->Capacity() * FLAG_minor_mc_task_trigger / 100;
}
void Heap::StartMinorMCIncrementalMarkingIfNeeded() {
if (FLAG_concurrent_minor_mc && !IsTearingDown() &&
!incremental_marking()->IsMarking() &&
incremental_marking()->CanBeStarted() && V8_LIKELY(!FLAG_gc_global) &&
(new_space()->Size() >= MinorMCTaskTriggerSize())) {
StartIncrementalMarking(Heap::kNoGCFlags, GarbageCollectionReason::kTask,
kNoGCCallbackFlags,
GarbageCollector::MINOR_MARK_COMPACTOR);
}
}
void Heap::CollectAllGarbage(int flags, GarbageCollectionReason gc_reason,
const v8::GCCallbackFlags gc_callback_flags) {
// Since we are ignoring the return value, the exact choice of space does
......@@ -1769,13 +1797,25 @@ bool Heap::CollectGarbage(AllocationSpace space,
DCHECK(AllowGarbageCollection::IsAllowed());
GarbageCollector collector;
const char* collector_reason = nullptr;
if (gc_reason == GarbageCollectionReason::kFinalizeMinorMC) {
collector = GarbageCollector::MINOR_MARK_COMPACTOR;
collector_reason = "finalize MinorMC";
} else {
collector = SelectGarbageCollector(space, &collector_reason);
}
if (collector == GarbageCollector::MARK_COMPACTOR &&
incremental_marking()->IsMinorMarking()) {
CollectGarbage(NEW_SPACE, GarbageCollectionReason::kFinalizeMinorMC);
}
// Ensure that all pending phantom callbacks are invoked.
isolate()->global_handles()->InvokeSecondPassPhantomCallbacks();
const char* collector_reason = nullptr;
GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
GCType gc_type = GetGCTypeFromGarbageCollector(collector);
{
GCCallbacksScope scope(this);
// Temporary override any embedder stack state as callbacks may create
......@@ -1973,7 +2013,8 @@ int Heap::NotifyContextDisposed(bool dependant_context) {
void Heap::StartIncrementalMarking(int gc_flags,
GarbageCollectionReason gc_reason,
GCCallbackFlags gc_callback_flags) {
GCCallbackFlags gc_callback_flags,
GarbageCollector collector) {
DCHECK(incremental_marking()->IsStopped());
// Sweeping needs to be completed such that markbits are all cleared before
......@@ -1993,12 +2034,13 @@ void Heap::StartIncrementalMarking(int gc_flags,
#endif
// Now that sweeping is completed, we can start the next full GC cycle.
tracer()->StartCycle(GarbageCollector::MARK_COMPACTOR, gc_reason, nullptr,
tracer()->StartCycle(collector, gc_reason, nullptr,
GCTracer::MarkingType::kIncremental);
set_current_gc_flags(gc_flags);
current_gc_callback_flags_ = gc_callback_flags;
incremental_marking()->Start(gc_reason);
incremental_marking()->Start(collector, gc_reason);
}
void Heap::CompleteSweepingFull() {
......@@ -2222,8 +2264,13 @@ size_t Heap::PerformGarbageCollection(
CompleteSweepingFull();
}
#endif // VERIFY_HEAP
tracer()->StartCycle(collector, gc_reason, collector_reason,
GCTracer::MarkingType::kAtomic);
if (!FLAG_minor_mc || incremental_marking_->IsStopped()) {
// If FLAG_minor_mc is false, then the young GC is Scavenger, which may
// interrupt an incremental full GC. If MinorMC incremental marking was
// running before, there is already an active GCTracer cycle.
tracer()->StartCycle(collector, gc_reason, collector_reason,
GCTracer::MarkingType::kAtomic);
}
} else {
DCHECK_EQ(GarbageCollector::MARK_COMPACTOR, collector);
CompleteSweepingFull();
......@@ -4350,6 +4397,8 @@ const char* Heap::GarbageCollectionReasonToString(
return "unknown";
case GarbageCollectionReason::kBackgroundAllocationFailure:
return "background allocation failure";
case GarbageCollectionReason::kFinalizeMinorMC:
return "finalize MinorMC";
}
UNREACHABLE();
}
......@@ -5828,6 +5877,10 @@ void Heap::SetUpSpaces(LinearAllocationArea& new_allocation_info,
scavenge_task_observer_.reset(new ScavengeTaskObserver(
this, ScavengeJob::YoungGenerationTaskTriggerSize(this)));
new_space()->AddAllocationObserver(scavenge_task_observer_.get());
minor_mc_task_observer_.reset(
new MinorMCTaskObserver(this, MinorMCTaskTriggerSize()));
new_space()->AddAllocationObserver(minor_mc_task_observer_.get());
}
SetGetExternallyAllocatedMemoryInBytesCallback(
......@@ -6093,11 +6146,14 @@ void Heap::TearDown() {
if (new_space()) {
new_space()->RemoveAllocationObserver(scavenge_task_observer_.get());
new_space()->RemoveAllocationObserver(minor_mc_task_observer_.get());
}
scavenge_task_observer_.reset();
scavenge_job_.reset();
minor_mc_task_observer_.reset();
if (need_to_remove_stress_concurrent_allocation_observer_) {
RemoveAllocationObserversFromAllSpaces(
stress_concurrent_allocation_observer_.get(),
......
......@@ -177,6 +177,7 @@ enum class GarbageCollectionReason : int {
kGlobalAllocationLimit = 23,
kMeasureMemory = 24,
kBackgroundAllocationFailure = 25,
kFinalizeMinorMC = 26,
kLastReason = kBackgroundAllocationFailure,
};
......@@ -870,17 +871,17 @@ class Heap {
inline Address NewSpaceTop();
NewSpace* new_space() { return new_space_; }
OldSpace* old_space() { return old_space_; }
OldSpace* shared_old_space() { return shared_old_space_; }
CodeSpace* code_space() { return code_space_; }
MapSpace* map_space() { return map_space_; }
NewSpace* new_space() const { return new_space_; }
OldSpace* old_space() const { return old_space_; }
OldSpace* shared_old_space() const { return shared_old_space_; }
CodeSpace* code_space() const { return code_space_; }
MapSpace* map_space() const { return map_space_; }
inline PagedSpace* space_for_maps();
OldLargeObjectSpace* lo_space() { return lo_space_; }
OldLargeObjectSpace* shared_lo_space() { return shared_lo_space_; }
CodeLargeObjectSpace* code_lo_space() { return code_lo_space_; }
NewLargeObjectSpace* new_lo_space() { return new_lo_space_; }
ReadOnlySpace* read_only_space() { return read_only_space_; }
OldLargeObjectSpace* lo_space() const { return lo_space_; }
OldLargeObjectSpace* shared_lo_space() const { return shared_lo_space_; }
CodeLargeObjectSpace* code_lo_space() const { return code_lo_space_; }
NewLargeObjectSpace* new_lo_space() const { return new_lo_space_; }
ReadOnlySpace* read_only_space() const { return read_only_space_; }
inline PagedSpace* paged_space(int idx);
inline Space* space(int idx);
......@@ -1104,7 +1105,8 @@ class Heap {
// stopped.
V8_EXPORT_PRIVATE void StartIncrementalMarking(
int gc_flags, GarbageCollectionReason gc_reason,
GCCallbackFlags gc_callback_flags = GCCallbackFlags::kNoGCCallbackFlags);
GCCallbackFlags gc_callback_flags = GCCallbackFlags::kNoGCCallbackFlags,
GarbageCollector collector = GarbageCollector::MARK_COMPACTOR);
V8_EXPORT_PRIVATE void StartIncrementalMarkingIfAllocationLimitIsReached(
int gc_flags,
......@@ -2063,6 +2065,9 @@ class Heap {
// ===========================================================================
void ScheduleScavengeTaskIfNeeded();
void StartMinorMCIncrementalMarkingIfNeeded();
size_t MinorMCTaskTriggerSize() const;
bool MinorMCSizeTaskTriggerReached() const;
// ===========================================================================
// Allocation methods. =======================================================
......@@ -2334,6 +2339,7 @@ class Heap {
std::unique_ptr<ObjectStats> dead_object_stats_;
std::unique_ptr<ScavengeJob> scavenge_job_;
std::unique_ptr<AllocationObserver> scavenge_task_observer_;
std::unique_ptr<AllocationObserver> minor_mc_task_observer_;
std::unique_ptr<AllocationObserver> stress_concurrent_allocation_observer_;
std::unique_ptr<LocalEmbedderHeapTracer> local_embedder_heap_tracer_;
std::unique_ptr<AllocationTrackerForDebugging>
......@@ -2473,6 +2479,7 @@ class Heap {
friend class MarkCompactCollector;
friend class MarkCompactCollectorBase;
friend class MinorMarkCompactCollector;
friend class MinorMCTaskObserver;
friend class NewLargeObjectSpace;
friend class NewSpace;
friend class ObjectStatsCollector;
......
......@@ -52,7 +52,8 @@ void IncrementalMarking::Observer::Step(int bytes_allocated, Address addr,
IncrementalMarking::IncrementalMarking(Heap* heap, WeakObjects* weak_objects)
: heap_(heap),
collector_(heap->mark_compact_collector()),
major_collector_(heap->mark_compact_collector()),
minor_collector_(heap->minor_mark_compact_collector()),
weak_objects_(weak_objects),
incremental_marking_job_(heap),
new_generation_observer_(this, kYoungGenerationAllocatedThreshold),
......@@ -63,10 +64,16 @@ IncrementalMarking::IncrementalMarking(Heap* heap, WeakObjects* weak_objects)
void IncrementalMarking::MarkBlackAndVisitObjectDueToLayoutChange(
HeapObject obj) {
// TODO(v8:13012): Add scope for MinorMC.
TRACE_EVENT0("v8", "V8.GCIncrementalMarkingLayoutChange");
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_LAYOUT_CHANGE);
marking_state()->WhiteToGrey(obj);
collector_->VisitObject(obj);
if (IsMajorMarking()) {
major_collector_->VisitObject(obj);
} else {
// Not covered by tests.
minor_collector_->VisitObject(obj);
}
}
void IncrementalMarking::MarkBlackBackground(HeapObject obj, int object_size) {
......@@ -122,8 +129,9 @@ bool IncrementalMarking::IsBelowActivationThresholds() const {
heap_->EmbedderSizeOfObjects() <= kEmbedderActivationThreshold;
}
void IncrementalMarking::Start(GarbageCollectionReason gc_reason) {
DCHECK(!collector_->sweeping_in_progress());
void IncrementalMarking::Start(GarbageCollector garbage_collector,
GarbageCollectionReason gc_reason) {
DCHECK(!major_collector_->sweeping_in_progress());
DCHECK(!heap_->IsShared());
if (FLAG_trace_incremental_marking) {
......@@ -173,11 +181,18 @@ void IncrementalMarking::Start(GarbageCollectionReason gc_reason) {
schedule_update_time_ms_ = start_time_ms_;
bytes_marked_concurrently_ = 0;
StartMarking();
heap_->AddAllocationObserversToAllSpaces(&old_generation_observer_,
&new_generation_observer_);
incremental_marking_job()->ScheduleTask();
if (garbage_collector == GarbageCollector::MARK_COMPACTOR) {
current_collector_ = CurrentCollector::kMajorMC;
StartMarkingMajor();
heap_->AddAllocationObserversToAllSpaces(&old_generation_observer_,
&new_generation_observer_);
incremental_marking_job()->ScheduleTask();
} else {
current_collector_ = CurrentCollector::kMinorMC;
// Allocation observers are not currently used by MinorMC because we don't
// do incremental marking.
StartMarkingMinor();
}
}
bool IncrementalMarking::WhiteToGreyAndPush(HeapObject obj) {
......@@ -217,10 +232,14 @@ class IncrementalMarking::IncrementalMarkingRootMarkingVisitor final
if (heap_object.InSharedHeap()) return;
if (incremental_marking_->WhiteToGreyAndPush(heap_object)) {
if (V8_UNLIKELY(FLAG_track_retaining_path)) {
heap_->AddRetainingRoot(root, heap_object);
if (incremental_marking_->IsMajorMarking()) {
if (incremental_marking_->WhiteToGreyAndPush(heap_object)) {
if (V8_UNLIKELY(FLAG_track_retaining_path)) {
heap_->AddRetainingRoot(root, heap_object);
}
}
} else if (Heap::InYoungGeneration(heap_object)) {
incremental_marking_->WhiteToGreyAndPush(heap_object);
}
}
......@@ -233,15 +252,27 @@ void IncrementalMarking::MarkRoots() {
CodePageHeaderModificationScope rwx_write_scope(
"Marking of builtins table entries require write access to Code page "
"header");
heap_->IterateRoots(
&visitor,
base::EnumSet<SkipRoot>{SkipRoot::kStack, SkipRoot::kMainThreadHandles,
SkipRoot::kWeak});
if (IsMajorMarking()) {
heap_->IterateRoots(
&visitor,
base::EnumSet<SkipRoot>{SkipRoot::kStack, SkipRoot::kMainThreadHandles,
SkipRoot::kWeak});
} else {
heap_->IterateRoots(
&visitor, base::EnumSet<SkipRoot>{
SkipRoot::kStack, SkipRoot::kMainThreadHandles,
SkipRoot::kWeak, SkipRoot::kExternalStringTable,
SkipRoot::kGlobalHandles, SkipRoot::kOldGeneration});
heap()->isolate()->global_handles()->IterateYoungStrongAndDependentRoots(
&visitor);
// TODO(v8:13012): Do PageMarkingItem processing.
}
}
void IncrementalMarking::MarkRootsForTesting() { MarkRoots(); }
void IncrementalMarking::StartMarking() {
void IncrementalMarking::StartMarkingMajor() {
if (heap_->isolate()->serializer_enabled()) {
// Black allocation currently starts when we start incremental marking,
// but we cannot enable black allocation while deserializing. Hence, we
......@@ -259,7 +290,7 @@ void IncrementalMarking::StartMarking() {
heap_->InvokeIncrementalMarkingPrologueCallbacks();
is_compacting_ = collector_->StartCompaction(
is_compacting_ = major_collector_->StartCompaction(
MarkCompactCollector::StartCompactionMode::kIncremental);
#ifdef V8_COMPRESS_POINTERS
......@@ -275,7 +306,8 @@ void IncrementalMarking::StartMarking() {
heap_->local_embedder_heap_tracer()->PrepareForTrace(embedder_flags);
}
collector_->StartMarking();
major_collector_->StartMarking();
current_local_marking_worklists = major_collector_->local_marking_worklists();
is_marking_ = true;
heap_->SetIsMarkingFlag(true);
......@@ -313,6 +345,38 @@ void IncrementalMarking::StartMarking() {
heap_->InvokeIncrementalMarkingEpilogueCallbacks();
}
void IncrementalMarking::StartMarkingMinor() {
// Removed serializer_enabled() check because we don't do black allocation.
if (FLAG_trace_incremental_marking) {
heap()->isolate()->PrintWithTimestamp(
"[IncrementalMarking] (MinorMC) Start marking\n");
}
minor_collector_->StartMarking();
current_local_marking_worklists = minor_collector_->local_marking_worklists();
is_marking_ = true;
heap_->SetIsMarkingFlag(true);
MarkingBarrier::ActivateAll(heap(), false, MarkingBarrierType::kMinor);
GlobalHandles::EnableMarkingBarrier(heap()->isolate());
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_ROOTS);
MarkRoots();
}
local_marking_worklists()->Publish();
// TODO(v8:13012): Schedule concurrent marking.
if (FLAG_trace_incremental_marking) {
heap()->isolate()->PrintWithTimestamp(
"[IncrementalMarking] (MinorMC) Running\n");
}
}
void IncrementalMarking::StartBlackAllocation() {
DCHECK(!black_allocation_);
DCHECK(IsMarking());
......@@ -363,24 +427,21 @@ void IncrementalMarking::FinishBlackAllocation() {
void IncrementalMarking::UpdateMarkingWorklistAfterYoungGenGC() {
if (!IsMarking()) return;
DCHECK(!FLAG_separate_gc_phases);
DCHECK(IsMajorMarking());
Map filler_map = ReadOnlyRoots(heap_).one_pointer_filler_map();
MarkingState* minor_marking_state =
heap()->minor_mark_compact_collector()->marking_state();
collector_->local_marking_worklists()->Publish();
major_collector_->local_marking_worklists()->Publish();
MarkingBarrier::PublishAll(heap());
PtrComprCageBase cage_base(heap_->isolate());
collector_->marking_worklists()->Update([
#ifdef DEBUG
// this is referred inside DCHECK.
this,
#endif
minor_marking_state, cage_base,
filler_map](
HeapObject obj,
HeapObject* out) -> bool {
major_collector_->marking_worklists()->Update([this, minor_marking_state,
cage_base, filler_map](
HeapObject obj,
HeapObject* out) -> bool {
DCHECK(obj.IsHeapObject());
// Only pointers to from space have to be updated.
if (Heap::InFromPage(obj)) {
......@@ -395,6 +456,7 @@ void IncrementalMarking::UpdateMarkingWorklistAfterYoungGenGC() {
return false;
}
HeapObject dest = map_word.ToForwardingAddress();
USE(this);
DCHECK_IMPLIES(marking_state()->IsWhite(obj), obj.IsFreeSpaceOrFiller());
if (dest.InSharedHeap()) {
// Object got promoted into the shared heap. Drop it from the client
......@@ -443,7 +505,7 @@ void IncrementalMarking::UpdateMarkingWorklistAfterYoungGenGC() {
}
});
collector_->local_weak_objects()->Publish();
major_collector_->local_weak_objects()->Publish();
weak_objects_->UpdateAfterScavenge();
}
......@@ -511,12 +573,14 @@ bool IncrementalMarking::Stop() {
std::max(0, old_generation_size_mb - old_generation_limit_mb));
}
for (SpaceIterator it(heap_); it.HasNext();) {
Space* space = it.Next();
if (space == heap_->new_space()) {
space->RemoveAllocationObserver(&new_generation_observer_);
} else {
space->RemoveAllocationObserver(&old_generation_observer_);
if (IsMajorMarking()) {
for (SpaceIterator it(heap_); it.HasNext();) {
Space* space = it.Next();
if (space == heap_->new_space()) {
space->RemoveAllocationObserver(&new_generation_observer_);
} else {
space->RemoveAllocationObserver(&old_generation_observer_);
}
}
}
......@@ -537,6 +601,7 @@ bool IncrementalMarking::Stop() {
}
}
background_live_bytes_.clear();
current_collector_ = CurrentCollector::kNone;
return true;
}
......@@ -822,7 +887,7 @@ void IncrementalMarking::Step(double max_step_size_in_ms,
#ifdef DEBUG
if (FLAG_trace_incremental_marking && FLAG_trace_concurrent_marking &&
FLAG_trace_gc_verbose) {
collector_->marking_worklists()->Print();
major_collector_->marking_worklists()->Print();
}
#endif
if (FLAG_trace_incremental_marking) {
......@@ -847,7 +912,7 @@ void IncrementalMarking::Step(double max_step_size_in_ms,
// assumption is that large graphs are well connected and can mostly be
// processed on their own. For small graphs, helping is not necessary.
std::tie(v8_bytes_processed, std::ignore) =
collector_->ProcessMarkingWorklist(bytes_to_process);
major_collector_->ProcessMarkingWorklist(bytes_to_process);
if (heap_->local_embedder_heap_tracer()->InUse()) {
embedder_deadline =
std::min(max_step_size_in_ms,
......
......@@ -7,6 +7,7 @@
#include "src/base/logging.h"
#include "src/base/platform/mutex.h"
#include "src/common/globals.h"
#include "src/heap/heap.h"
#include "src/heap/incremental-marking-job.h"
#include "src/heap/mark-compact.h"
......@@ -35,6 +36,8 @@ enum class StepOrigin {
kTask
};
enum class CurrentCollector { kNone, kMinorMC, kMajorMC };
class V8_EXPORT_PRIVATE IncrementalMarking final {
public:
class V8_NODISCARD PauseBlackAllocationScope {
......@@ -103,7 +106,8 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
bool CanBeStarted() const;
void Start(GarbageCollectionReason gc_reason);
void Start(GarbageCollector garbage_collector,
GarbageCollectionReason gc_reason);
// Returns true if incremental marking was running and false otherwise.
bool Stop();
......@@ -140,7 +144,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
bool black_allocation() { return black_allocation_; }
MarkingWorklists::Local* local_marking_worklists() const {
return collector_->local_marking_worklists();
return current_local_marking_worklists;
}
bool IsBelowActivationThresholds() const;
......@@ -155,7 +159,12 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
// Performs incremental marking step for unit tests.
void AdvanceForTesting(double max_step_size_in_ms);
bool is_minor() const { return false; }
bool IsMinorMarking() const {
return IsMarking() && current_collector_ == CurrentCollector::kMinorMC;
}
bool IsMajorMarking() const {
return IsMarking() && current_collector_ == CurrentCollector::kMajorMC;
}
private:
class IncrementalMarkingRootMarkingVisitor;
......@@ -172,7 +181,8 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
IncrementalMarking* incremental_marking_;
};
void StartMarking();
void StartMarkingMajor();
void StartMarkingMinor();
void EmbedderStep(double expected_duration_ms, double* duration_ms);
......@@ -224,9 +234,16 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
double CurrentTimeToMarkingTask() const;
Heap* const heap_;
MarkCompactCollector* const collector_;
CurrentCollector current_collector_{CurrentCollector::kNone};
MarkCompactCollector* const major_collector_;
MinorMarkCompactCollector* const minor_collector_;
WeakObjects* weak_objects_;
MarkingWorklists::Local* current_local_marking_worklists;
double start_time_ms_ = 0.0;
size_t initial_old_generation_size_ = 0;
size_t old_generation_allocation_counter_ = 0;
......
......@@ -63,10 +63,11 @@ LocalHeap::LocalHeap(Heap* heap, ThreadKind kind,
if (!is_main_thread()) {
WriteBarrier::SetForThread(marking_barrier_.get());
if (heap_->incremental_marking()->IsMarking()) {
marking_barrier_->Activate(heap_->incremental_marking()->IsCompacting(),
heap_->incremental_marking()->is_minor()
? MarkingBarrierType::kMinor
: MarkingBarrierType::kMajor);
marking_barrier_->Activate(
heap_->incremental_marking()->IsCompacting(),
heap_->incremental_marking()->IsMinorMarking()
? MarkingBarrierType::kMinor
: MarkingBarrierType::kMajor);
}
}
});
......
......@@ -5772,6 +5772,12 @@ void MinorMarkCompactCollector::StartMarking() {
std::make_unique<MarkingWorklists::Local>(&marking_worklists_);
main_marking_visitor_ = std::make_unique<YoungGenerationMainMarkingVisitor>(
heap()->isolate(), marking_state(), local_marking_worklists());
#ifdef VERIFY_HEAP
for (Page* page : *heap()->new_space()) {
CHECK(page->marking_bitmap<AccessMode::NON_ATOMIC>()->IsClean());
}
#endif // VERIFY_HEAP
}
void MinorMarkCompactCollector::Finish() {
......@@ -5782,11 +5788,6 @@ void MinorMarkCompactCollector::Finish() {
void MinorMarkCompactCollector::CollectGarbage() {
DCHECK(!heap()->mark_compact_collector()->in_use());
#ifdef VERIFY_HEAP
for (Page* page : *heap()->new_space()) {
CHECK(page->marking_bitmap<AccessMode::NON_ATOMIC>()->IsClean());
}
#endif // VERIFY_HEAP
// Minor MC does not support processing the ephemeron remembered set.
DCHECK(heap()->ephemeron_remembered_set_.empty());
......@@ -6235,6 +6236,15 @@ void MinorMarkCompactCollector::MarkLiveObjects() {
PostponeInterruptsScope postpone(isolate());
bool was_marked_incrementally = false;
{
// TODO(v8:13012): TRACE_GC with MINOR_MC_MARK_FINISH_INCREMENTAL.
if (heap_->incremental_marking()->Stop()) {
MarkingBarrier::PublishAll(heap());
was_marked_incrementally = true;
}
}
RootMarkingVisitor root_visitor(this);
MarkRootSetInParallel(&root_visitor);
......@@ -6255,6 +6265,11 @@ void MinorMarkCompactCollector::MarkLiveObjects() {
if (FLAG_minor_mc_trace_fragmentation) {
TraceFragmentation();
}
if (was_marked_incrementally) {
MarkingBarrier::DeactivateAll(heap());
GlobalHandles::DisableMarkingBarrier(heap()->isolate());
}
}
void MinorMarkCompactCollector::DrainMarkingWorklist() {
......
......@@ -179,6 +179,13 @@ void SimulateIncrementalMarking(i::Heap* heap, bool force_completion) {
MarkCompactCollector::SweepingForcedFinalizationMode::kV8Only);
}
if (marking->IsMinorMarking()) {
// If minor incremental marking is running, we need to finalize it first
// because of the AdvanceForTesting call in this function which is currently
// only possible for MajorMC.
heap->CollectGarbage(NEW_SPACE, GarbageCollectionReason::kFinalizeMinorMC);
}
if (marking->IsStopped()) {
heap->StartIncrementalMarking(i::Heap::kNoGCFlags,
i::GarbageCollectionReason::kTesting);
......
......@@ -6508,7 +6508,8 @@ HEAP_TEST(Regress670675) {
heap->tracer()->StartCycle(
GarbageCollector::MARK_COMPACTOR, GarbageCollectionReason::kTesting,
"collector cctest", GCTracer::MarkingType::kIncremental);
marking->Start(i::GarbageCollectionReason::kTesting);
marking->Start(GarbageCollector::MARK_COMPACTOR,
i::GarbageCollectionReason::kTesting);
}
size_t array_length = 128 * KB;
size_t n = heap->OldGenerationSpaceAvailable() / array_length;
......
......@@ -122,7 +122,8 @@ TEST_WITH_PLATFORM(IncrementalMarkingUsingTasks, MockPlatform) {
heap->tracer()->StartCycle(
GarbageCollector::MARK_COMPACTOR, GarbageCollectionReason::kTesting,
"collector cctest", GCTracer::MarkingType::kIncremental);
marking->Start(i::GarbageCollectionReason::kTesting);
marking->Start(GarbageCollector::MARK_COMPACTOR,
i::GarbageCollectionReason::kTesting);
}
CHECK(platform.PendingTask());
while (platform.PendingTask()) {
......
......@@ -474,7 +474,8 @@ TEST_F(EmbedderTracingTest, FinalizeTracingWhenMarking) {
heap->tracer()->StartCycle(
GarbageCollector::MARK_COMPACTOR, GarbageCollectionReason::kTesting,
"collector cctest", GCTracer::MarkingType::kIncremental);
marking->Start(GarbageCollectionReason::kTesting);
marking->Start(GarbageCollector::MARK_COMPACTOR,
GarbageCollectionReason::kTesting);
}
// Sweeping is not runing so we should immediately start marking.
......
......@@ -388,7 +388,8 @@ TEST_F(HeapTest, Regress978156) {
heap->tracer()->StartCycle(
GarbageCollector::MARK_COMPACTOR, GarbageCollectionReason::kTesting,
"collector cctest", GCTracer::MarkingType::kIncremental);
marking->Start(i::GarbageCollectionReason::kTesting);
marking->Start(GarbageCollector::MARK_COMPACTOR,
i::GarbageCollectionReason::kTesting);
}
MarkingState* marking_state = marking->marking_state();
// 6. Mark the filler black to access its two markbits. This triggers
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment