Commit f146ada4 authored by Omer Katz's avatar Omer Katz Committed by V8 LUCI CQ

heap: Avoid copying objects in MinorMC

This CL replaces evacuation in MinorMC with always promoting pages.
Pages in new space are promoted first within new space and then to old
space upon a second GC.

This implementation should not yet be used in production and is guarded
behind a runtime flag.

In case all pages in new space have at least one live object on them,
all pages will be promoted and new space will still be out of memory,
thus immediately triggering a second young gen GC that will promote all
objects to old space.
Further CLs will mitigate this issue.

Bug: v8:12612
Change-Id: I329ea01d6d15c0942db2eeeea87c5cefcac75502
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3448385Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Commit-Queue: Omer Katz <omerkatz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79099}
parent f219f698
...@@ -1853,6 +1853,8 @@ DEFINE_NEG_NEG_IMPLICATION(text_is_readable, partial_constant_pool) ...@@ -1853,6 +1853,8 @@ DEFINE_NEG_NEG_IMPLICATION(text_is_readable, partial_constant_pool)
DEFINE_BOOL(trace_minor_mc_parallel_marking, false, DEFINE_BOOL(trace_minor_mc_parallel_marking, false,
"trace parallel marking for the young generation") "trace parallel marking for the young generation")
DEFINE_BOOL(minor_mc, false, "perform young generation mark compact GCs") DEFINE_BOOL(minor_mc, false, "perform young generation mark compact GCs")
DEFINE_BOOL(minor_mc_sweeping, false,
"perform sweeping in young generation mark compact GCs")
// //
// Dev shell flags // Dev shell flags
......
...@@ -459,7 +459,9 @@ void IncrementalMarking::UpdateMarkingWorklistAfterYoungGenGC() { ...@@ -459,7 +459,9 @@ void IncrementalMarking::UpdateMarkingWorklistAfterYoungGenGC() {
DCHECK(obj.IsHeapObject()); DCHECK(obj.IsHeapObject());
// Only pointers to from space have to be updated. // Only pointers to from space have to be updated.
if (Heap::InFromPage(obj)) { if (Heap::InFromPage(obj)) {
DCHECK_IMPLIES(FLAG_minor_mc_sweeping, minor_marking_state->IsWhite(obj));
MapWord map_word = obj.map_word(cage_base, kRelaxedLoad); MapWord map_word = obj.map_word(cage_base, kRelaxedLoad);
DCHECK_IMPLIES(FLAG_minor_mc_sweeping, !map_word.IsForwardingAddress());
if (!map_word.IsForwardingAddress()) { if (!map_word.IsForwardingAddress()) {
// There may be objects on the marking deque that do not exist // There may be objects on the marking deque that do not exist
// anymore, e.g. left trimmed objects or objects from the root set // anymore, e.g. left trimmed objects or objects from the root set
......
...@@ -5861,7 +5861,7 @@ void MinorMarkCompactCollector::Evacuate() { ...@@ -5861,7 +5861,7 @@ void MinorMarkCompactCollector::Evacuate() {
EvacuatePagesInParallel(); EvacuatePagesInParallel();
} }
UpdatePointersAfterEvacuation(); if (!FLAG_minor_mc_sweeping) UpdatePointersAfterEvacuation();
{ {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_REBALANCE); TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_REBALANCE);
...@@ -5924,6 +5924,7 @@ void YoungGenerationEvacuator::RawEvacuatePage(MemoryChunk* chunk, ...@@ -5924,6 +5924,7 @@ void YoungGenerationEvacuator::RawEvacuatePage(MemoryChunk* chunk,
*live_bytes = marking_state->live_bytes(chunk); *live_bytes = marking_state->live_bytes(chunk);
switch (ComputeEvacuationMode(chunk)) { switch (ComputeEvacuationMode(chunk)) {
case kObjectsNewToOld: case kObjectsNewToOld:
DCHECK(!FLAG_minor_mc_sweeping);
LiveObjectVisitor::VisitGreyObjectsNoFail( LiveObjectVisitor::VisitGreyObjectsNoFail(
chunk, marking_state, &new_space_visitor_, chunk, marking_state, &new_space_visitor_,
LiveObjectVisitor::kClearMarkbits); LiveObjectVisitor::kClearMarkbits);
...@@ -5977,7 +5978,8 @@ void MinorMarkCompactCollector::EvacuatePagesInParallel() { ...@@ -5977,7 +5978,8 @@ void MinorMarkCompactCollector::EvacuatePagesInParallel() {
intptr_t live_bytes_on_page = non_atomic_marking_state()->live_bytes(page); intptr_t live_bytes_on_page = non_atomic_marking_state()->live_bytes(page);
if (live_bytes_on_page == 0) continue; if (live_bytes_on_page == 0) continue;
live_bytes += live_bytes_on_page; live_bytes += live_bytes_on_page;
if (ShouldMovePage(page, live_bytes_on_page, AlwaysPromoteYoung::kNo)) { if (FLAG_minor_mc_sweeping ||
ShouldMovePage(page, live_bytes_on_page, AlwaysPromoteYoung::kNo)) {
if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) { if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page); EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
} else { } else {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment