Commit ea8d75a1 authored by Michael Lippautz's avatar Michael Lippautz Committed by V8 LUCI CQ

heap: Add support for aborting compaction when finalizing with stack

Adds support for aborting compaction when finalizing with stack:
- never_compact_with_stack: All pages are aborted;
- never_compact_code_space_with_stack: Only code space pages are
  aborted;

This flags allow simulating a worst case where a stack cannot
be considered precise, or evacuation candiate is refered to from
a stack slot that V8 has no info for.

Bug: v8:12251
Change-Id: Ice24ac87a985b8ecf7b5cbb5c106ad4a3ae1944b
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3173682
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/main@{#77792}
parent fd9e1262
......@@ -1340,6 +1340,11 @@ DEFINE_BOOL(allocation_buffer_parking, true, "allocation buffer parking")
DEFINE_BOOL(always_compact, false, "Perform compaction on every full GC")
DEFINE_BOOL(never_compact, false,
"Never perform compaction on full GC - testing only")
DEFINE_BOOL(never_compact_with_stack, false,
"Never perform compaction when finalizing a full GC with stack")
DEFINE_BOOL(
never_compact_code_space_with_stack, false,
"Never perform code space compaction when finalizing a full GC with stack")
DEFINE_BOOL(compact_code_space, true, "Compact code space on full collections")
DEFINE_BOOL(flush_baseline_code, false,
"flush of baseline code when it has not been executed recently")
......
......@@ -34,6 +34,11 @@ void LocalEmbedderHeapTracer::TracePrologue(
void LocalEmbedderHeapTracer::TraceEpilogue() {
if (!InUse()) return;
// Resetting to state unknown as there may be follow up garbage collections
// triggered from callbacks that have a different stack state.
embedder_stack_state_ =
EmbedderHeapTracer::EmbedderStackState::kMayContainHeapPointers;
EmbedderHeapTracer::TraceSummary summary;
remote_tracer_->TraceEpilogue(&summary);
if (summary.allocated_size == SIZE_MAX) return;
......@@ -56,10 +61,6 @@ void LocalEmbedderHeapTracer::EnterFinalPause() {
if (!InUse()) return;
remote_tracer_->EnterFinalPause(embedder_stack_state_);
// Resetting to state unknown as there may be follow up garbage collections
// triggered from callbacks that have a different stack state.
embedder_stack_state_ =
EmbedderHeapTracer::EmbedderStackState::kMayContainHeapPointers;
}
bool LocalEmbedderHeapTracer::Trace(double deadline) {
......
......@@ -135,6 +135,10 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
void NotifyEmptyEmbedderStack();
EmbedderHeapTracer::EmbedderStackState embedder_stack_state() const {
return embedder_stack_state_;
}
private:
static constexpr size_t kEmbedderAllocatedThreshold = 128 * KB;
......
......@@ -497,6 +497,11 @@ void Heap::SetGCState(HeapState state) {
gc_state_.store(state, std::memory_order_relaxed);
}
bool Heap::IsGCWithoutStack() const {
return local_embedder_heap_tracer()->embedder_stack_state() ==
cppgc::EmbedderStackState::kNoHeapPointers;
}
void Heap::PrintShortHeapStatistics() {
if (!FLAG_trace_gc_verbose) return;
PrintIsolate(isolate_,
......
......@@ -689,6 +689,8 @@ class Heap {
inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; }
bool IsGCWithoutStack() const;
// If an object has an AllocationMemento trailing it, return it, otherwise
// return a null AllocationMemento.
template <FindMementoMode mode>
......
......@@ -533,7 +533,9 @@ bool MarkCompactCollector::StartCompaction() {
CollectEvacuationCandidates(heap()->old_space());
if (FLAG_compact_code_space) {
if (FLAG_compact_code_space &&
(heap()->IsGCWithoutStack() ||
!FLAG_never_compact_code_space_with_stack)) {
CollectEvacuationCandidates(heap()->code_space());
} else if (FLAG_trace_fragmentation) {
TraceFragmentation(heap()->code_space());
......@@ -917,7 +919,9 @@ void MarkCompactCollector::Prepare() {
heap_->local_embedder_heap_tracer()->TracePrologue(
heap_->flags_for_embedder_tracer());
}
if (!FLAG_never_compact) {
const bool should_compact =
heap()->IsGCWithoutStack() || !FLAG_never_compact_with_stack;
if (!FLAG_never_compact && should_compact) {
StartCompaction();
}
StartMarking();
......@@ -3124,7 +3128,8 @@ void MarkCompactCollector::EvacuatePrologue() {
}
void MarkCompactCollector::EvacuateEpilogue() {
aborted_evacuation_candidates_.clear();
aborted_evacuation_candidates_due_to_oom_.clear();
aborted_evacuation_candidates_due_to_flags_.clear();
// New space.
if (heap()->new_space()) {
......@@ -3390,8 +3395,8 @@ void FullEvacuator::RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) {
} else {
// Aborted compaction page. Actual processing happens on the main
// thread for simplicity reasons.
collector_->ReportAbortedEvacuationCandidate(failed_object.address(),
chunk);
collector_->ReportAbortedEvacuationCandidateDueToOOM(
failed_object.address(), static_cast<Page*>(chunk));
}
}
break;
......@@ -3547,7 +3552,24 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
evacuation_items.emplace_back(ParallelWorkItem{}, page);
}
if (!heap()->IsGCWithoutStack()) {
if (FLAG_never_compact_with_stack ||
FLAG_never_compact_code_space_with_stack) {
for (Page* page : old_space_evacuation_pages_) {
if (FLAG_never_compact_with_stack ||
page->owner_identity() == CODE_SPACE) {
ReportAbortedEvacuationCandidateDueToFlags(page->area_start(), page);
// Set this flag early on in this case to allow filtering such pages
// below.
page->SetFlag(Page::COMPACTION_WAS_ABORTED);
}
}
}
}
for (Page* page : old_space_evacuation_pages_) {
if (page->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) continue;
live_bytes += non_atomic_marking_state()->live_bytes(page);
evacuation_items.emplace_back(ParallelWorkItem{}, page);
}
......@@ -4294,52 +4316,71 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
}
}
void MarkCompactCollector::ReportAbortedEvacuationCandidate(
Address failed_start, MemoryChunk* chunk) {
void MarkCompactCollector::ReportAbortedEvacuationCandidateDueToOOM(
Address failed_start, Page* page) {
base::MutexGuard guard(&mutex_);
aborted_evacuation_candidates_due_to_oom_.push_back(
std::make_pair(failed_start, page));
}
aborted_evacuation_candidates_.push_back(
std::make_pair(failed_start, static_cast<Page*>(chunk)));
void MarkCompactCollector::ReportAbortedEvacuationCandidateDueToFlags(
Address failed_start, Page* page) {
base::MutexGuard guard(&mutex_);
aborted_evacuation_candidates_due_to_flags_.push_back(
std::make_pair(failed_start, page));
}
namespace {
void ReRecordPage(
Heap* heap,
v8::internal::MarkCompactCollector::NonAtomicMarkingState* marking_state,
Address failed_start, Page* page) {
page->SetFlag(Page::COMPACTION_WAS_ABORTED);
// Aborted compaction page. We have to record slots here, since we
// might not have recorded them in first place.
// Remove outdated slots.
RememberedSetSweeping::RemoveRange(page, page->address(), failed_start,
SlotSet::FREE_EMPTY_BUCKETS);
RememberedSet<OLD_TO_NEW>::RemoveRange(page, page->address(), failed_start,
SlotSet::FREE_EMPTY_BUCKETS);
RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, page->address(),
failed_start);
// Remove invalidated slots.
if (failed_start > page->area_start()) {
InvalidatedSlotsCleanup old_to_new_cleanup =
InvalidatedSlotsCleanup::OldToNew(page);
old_to_new_cleanup.Free(page->area_start(), failed_start);
}
// Recompute live bytes.
LiveObjectVisitor::RecomputeLiveBytes(page, marking_state);
// Re-record slots.
EvacuateRecordOnlyVisitor record_visitor(heap);
LiveObjectVisitor::VisitBlackObjectsNoFail(
page, marking_state, &record_visitor, LiveObjectVisitor::kKeepMarking);
// Array buffers will be processed during pointer updating.
}
} // namespace
size_t MarkCompactCollector::PostProcessEvacuationCandidates() {
CHECK_IMPLIES(FLAG_crash_on_aborted_evacuation,
aborted_evacuation_candidates_.empty());
for (auto start_and_page : aborted_evacuation_candidates_) {
Address failed_start = start_and_page.first;
Page* page = start_and_page.second;
page->SetFlag(Page::COMPACTION_WAS_ABORTED);
// Aborted compaction page. We have to record slots here, since we
// might not have recorded them in first place.
// Remove outdated slots.
RememberedSetSweeping::RemoveRange(page, page->address(), failed_start,
SlotSet::FREE_EMPTY_BUCKETS);
RememberedSet<OLD_TO_NEW>::RemoveRange(page, page->address(), failed_start,
SlotSet::FREE_EMPTY_BUCKETS);
RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, page->address(),
failed_start);
// Remove invalidated slots.
if (failed_start > page->area_start()) {
InvalidatedSlotsCleanup old_to_new_cleanup =
InvalidatedSlotsCleanup::OldToNew(page);
old_to_new_cleanup.Free(page->area_start(), failed_start);
}
// Recompute live bytes.
LiveObjectVisitor::RecomputeLiveBytes(page, non_atomic_marking_state());
// Re-record slots.
EvacuateRecordOnlyVisitor record_visitor(heap());
LiveObjectVisitor::VisitBlackObjectsNoFail(page, non_atomic_marking_state(),
&record_visitor,
LiveObjectVisitor::kKeepMarking);
// Array buffers will be processed during pointer updating.
}
const int aborted_pages =
static_cast<int>(aborted_evacuation_candidates_.size());
int aborted_pages_verified = 0;
aborted_evacuation_candidates_due_to_oom_.empty());
for (auto start_and_page : aborted_evacuation_candidates_due_to_oom_) {
ReRecordPage(heap(), non_atomic_marking_state(), start_and_page.first,
start_and_page.second);
}
for (auto start_and_page : aborted_evacuation_candidates_due_to_flags_) {
ReRecordPage(heap(), non_atomic_marking_state(), start_and_page.first,
start_and_page.second);
}
const size_t aborted_pages =
aborted_evacuation_candidates_due_to_oom_.size() +
aborted_evacuation_candidates_due_to_flags_.size();
size_t aborted_pages_verified = 0;
for (Page* p : old_space_evacuation_pages_) {
if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
// After clearing the evacuation candidate flag the page is again in a
......
......@@ -8,6 +8,7 @@
#include <atomic>
#include <vector>
#include "include/v8-internal.h"
#include "src/heap/concurrent-marking.h"
#include "src/heap/marking-visitor.h"
#include "src/heap/marking-worklist.h"
......@@ -720,8 +721,10 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void ReleaseEvacuationCandidates();
// Returns number of aborted pages.
size_t PostProcessEvacuationCandidates();
void ReportAbortedEvacuationCandidate(Address failed_start,
MemoryChunk* chunk);
void ReportAbortedEvacuationCandidateDueToOOM(Address failed_start,
Page* page);
void ReportAbortedEvacuationCandidateDueToFlags(Address failed_start,
Page* page);
static const int kEphemeronChunkSize = 8 * KB;
......@@ -774,7 +777,10 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// Pages that are actually processed during evacuation.
std::vector<Page*> old_space_evacuation_pages_;
std::vector<Page*> new_space_evacuation_pages_;
std::vector<std::pair<Address, Page*>> aborted_evacuation_candidates_;
std::vector<std::pair<Address, Page*>>
aborted_evacuation_candidates_due_to_oom_;
std::vector<std::pair<Address, Page*>>
aborted_evacuation_candidates_due_to_flags_;
Sweeper* sweeper_;
......
......@@ -169,7 +169,7 @@ TEST_F(LocalEmbedderHeapTracerWithIsolate,
}
}
TEST_F(LocalEmbedderHeapTracerWithIsolate, EnterFinalPauseStackStateResets) {
TEST_F(LocalEmbedderHeapTracerWithIsolate, TraceEpilogueStackStateResets) {
StrictMock<MockEmbedderHeapTracer> remote_tracer;
LocalEmbedderHeapTracer local_tracer(isolate());
local_tracer.SetRemoteTracer(&remote_tracer);
......@@ -179,6 +179,8 @@ TEST_F(LocalEmbedderHeapTracerWithIsolate, EnterFinalPauseStackStateResets) {
remote_tracer,
EnterFinalPause(EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers));
local_tracer.EnterFinalPause();
EXPECT_CALL(remote_tracer, TraceEpilogue(_));
local_tracer.TraceEpilogue();
EXPECT_CALL(
remote_tracer,
EnterFinalPause(
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment