Commit ea8d75a1 authored by Michael Lippautz's avatar Michael Lippautz Committed by V8 LUCI CQ

heap: Add support for aborting compaction when finalizing with stack

Adds support for aborting compaction when finalizing with stack:
- never_compact_with_stack: All pages are aborted;
- never_compact_code_space_with_stack: Only code space pages are
  aborted;

This flags allow simulating a worst case where a stack cannot
be considered precise, or evacuation candiate is refered to from
a stack slot that V8 has no info for.

Bug: v8:12251
Change-Id: Ice24ac87a985b8ecf7b5cbb5c106ad4a3ae1944b
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3173682
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/main@{#77792}
parent fd9e1262
...@@ -1340,6 +1340,11 @@ DEFINE_BOOL(allocation_buffer_parking, true, "allocation buffer parking") ...@@ -1340,6 +1340,11 @@ DEFINE_BOOL(allocation_buffer_parking, true, "allocation buffer parking")
DEFINE_BOOL(always_compact, false, "Perform compaction on every full GC") DEFINE_BOOL(always_compact, false, "Perform compaction on every full GC")
DEFINE_BOOL(never_compact, false, DEFINE_BOOL(never_compact, false,
"Never perform compaction on full GC - testing only") "Never perform compaction on full GC - testing only")
DEFINE_BOOL(never_compact_with_stack, false,
"Never perform compaction when finalizing a full GC with stack")
DEFINE_BOOL(
never_compact_code_space_with_stack, false,
"Never perform code space compaction when finalizing a full GC with stack")
DEFINE_BOOL(compact_code_space, true, "Compact code space on full collections") DEFINE_BOOL(compact_code_space, true, "Compact code space on full collections")
DEFINE_BOOL(flush_baseline_code, false, DEFINE_BOOL(flush_baseline_code, false,
"flush of baseline code when it has not been executed recently") "flush of baseline code when it has not been executed recently")
......
...@@ -34,6 +34,11 @@ void LocalEmbedderHeapTracer::TracePrologue( ...@@ -34,6 +34,11 @@ void LocalEmbedderHeapTracer::TracePrologue(
void LocalEmbedderHeapTracer::TraceEpilogue() { void LocalEmbedderHeapTracer::TraceEpilogue() {
if (!InUse()) return; if (!InUse()) return;
// Resetting to state unknown as there may be follow up garbage collections
// triggered from callbacks that have a different stack state.
embedder_stack_state_ =
EmbedderHeapTracer::EmbedderStackState::kMayContainHeapPointers;
EmbedderHeapTracer::TraceSummary summary; EmbedderHeapTracer::TraceSummary summary;
remote_tracer_->TraceEpilogue(&summary); remote_tracer_->TraceEpilogue(&summary);
if (summary.allocated_size == SIZE_MAX) return; if (summary.allocated_size == SIZE_MAX) return;
...@@ -56,10 +61,6 @@ void LocalEmbedderHeapTracer::EnterFinalPause() { ...@@ -56,10 +61,6 @@ void LocalEmbedderHeapTracer::EnterFinalPause() {
if (!InUse()) return; if (!InUse()) return;
remote_tracer_->EnterFinalPause(embedder_stack_state_); remote_tracer_->EnterFinalPause(embedder_stack_state_);
// Resetting to state unknown as there may be follow up garbage collections
// triggered from callbacks that have a different stack state.
embedder_stack_state_ =
EmbedderHeapTracer::EmbedderStackState::kMayContainHeapPointers;
} }
bool LocalEmbedderHeapTracer::Trace(double deadline) { bool LocalEmbedderHeapTracer::Trace(double deadline) {
......
...@@ -135,6 +135,10 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final { ...@@ -135,6 +135,10 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
void NotifyEmptyEmbedderStack(); void NotifyEmptyEmbedderStack();
EmbedderHeapTracer::EmbedderStackState embedder_stack_state() const {
return embedder_stack_state_;
}
private: private:
static constexpr size_t kEmbedderAllocatedThreshold = 128 * KB; static constexpr size_t kEmbedderAllocatedThreshold = 128 * KB;
......
...@@ -497,6 +497,11 @@ void Heap::SetGCState(HeapState state) { ...@@ -497,6 +497,11 @@ void Heap::SetGCState(HeapState state) {
gc_state_.store(state, std::memory_order_relaxed); gc_state_.store(state, std::memory_order_relaxed);
} }
bool Heap::IsGCWithoutStack() const {
return local_embedder_heap_tracer()->embedder_stack_state() ==
cppgc::EmbedderStackState::kNoHeapPointers;
}
void Heap::PrintShortHeapStatistics() { void Heap::PrintShortHeapStatistics() {
if (!FLAG_trace_gc_verbose) return; if (!FLAG_trace_gc_verbose) return;
PrintIsolate(isolate_, PrintIsolate(isolate_,
......
...@@ -689,6 +689,8 @@ class Heap { ...@@ -689,6 +689,8 @@ class Heap {
inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; } inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; }
bool IsGCWithoutStack() const;
// If an object has an AllocationMemento trailing it, return it, otherwise // If an object has an AllocationMemento trailing it, return it, otherwise
// return a null AllocationMemento. // return a null AllocationMemento.
template <FindMementoMode mode> template <FindMementoMode mode>
......
...@@ -533,7 +533,9 @@ bool MarkCompactCollector::StartCompaction() { ...@@ -533,7 +533,9 @@ bool MarkCompactCollector::StartCompaction() {
CollectEvacuationCandidates(heap()->old_space()); CollectEvacuationCandidates(heap()->old_space());
if (FLAG_compact_code_space) { if (FLAG_compact_code_space &&
(heap()->IsGCWithoutStack() ||
!FLAG_never_compact_code_space_with_stack)) {
CollectEvacuationCandidates(heap()->code_space()); CollectEvacuationCandidates(heap()->code_space());
} else if (FLAG_trace_fragmentation) { } else if (FLAG_trace_fragmentation) {
TraceFragmentation(heap()->code_space()); TraceFragmentation(heap()->code_space());
...@@ -917,7 +919,9 @@ void MarkCompactCollector::Prepare() { ...@@ -917,7 +919,9 @@ void MarkCompactCollector::Prepare() {
heap_->local_embedder_heap_tracer()->TracePrologue( heap_->local_embedder_heap_tracer()->TracePrologue(
heap_->flags_for_embedder_tracer()); heap_->flags_for_embedder_tracer());
} }
if (!FLAG_never_compact) { const bool should_compact =
heap()->IsGCWithoutStack() || !FLAG_never_compact_with_stack;
if (!FLAG_never_compact && should_compact) {
StartCompaction(); StartCompaction();
} }
StartMarking(); StartMarking();
...@@ -3124,7 +3128,8 @@ void MarkCompactCollector::EvacuatePrologue() { ...@@ -3124,7 +3128,8 @@ void MarkCompactCollector::EvacuatePrologue() {
} }
void MarkCompactCollector::EvacuateEpilogue() { void MarkCompactCollector::EvacuateEpilogue() {
aborted_evacuation_candidates_.clear(); aborted_evacuation_candidates_due_to_oom_.clear();
aborted_evacuation_candidates_due_to_flags_.clear();
// New space. // New space.
if (heap()->new_space()) { if (heap()->new_space()) {
...@@ -3390,8 +3395,8 @@ void FullEvacuator::RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) { ...@@ -3390,8 +3395,8 @@ void FullEvacuator::RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) {
} else { } else {
// Aborted compaction page. Actual processing happens on the main // Aborted compaction page. Actual processing happens on the main
// thread for simplicity reasons. // thread for simplicity reasons.
collector_->ReportAbortedEvacuationCandidate(failed_object.address(), collector_->ReportAbortedEvacuationCandidateDueToOOM(
chunk); failed_object.address(), static_cast<Page*>(chunk));
} }
} }
break; break;
...@@ -3547,7 +3552,24 @@ void MarkCompactCollector::EvacuatePagesInParallel() { ...@@ -3547,7 +3552,24 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
evacuation_items.emplace_back(ParallelWorkItem{}, page); evacuation_items.emplace_back(ParallelWorkItem{}, page);
} }
if (!heap()->IsGCWithoutStack()) {
if (FLAG_never_compact_with_stack ||
FLAG_never_compact_code_space_with_stack) {
for (Page* page : old_space_evacuation_pages_) { for (Page* page : old_space_evacuation_pages_) {
if (FLAG_never_compact_with_stack ||
page->owner_identity() == CODE_SPACE) {
ReportAbortedEvacuationCandidateDueToFlags(page->area_start(), page);
// Set this flag early on in this case to allow filtering such pages
// below.
page->SetFlag(Page::COMPACTION_WAS_ABORTED);
}
}
}
}
for (Page* page : old_space_evacuation_pages_) {
if (page->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) continue;
live_bytes += non_atomic_marking_state()->live_bytes(page); live_bytes += non_atomic_marking_state()->live_bytes(page);
evacuation_items.emplace_back(ParallelWorkItem{}, page); evacuation_items.emplace_back(ParallelWorkItem{}, page);
} }
...@@ -4294,21 +4316,26 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() { ...@@ -4294,21 +4316,26 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
} }
} }
void MarkCompactCollector::ReportAbortedEvacuationCandidate( void MarkCompactCollector::ReportAbortedEvacuationCandidateDueToOOM(
Address failed_start, MemoryChunk* chunk) { Address failed_start, Page* page) {
base::MutexGuard guard(&mutex_); base::MutexGuard guard(&mutex_);
aborted_evacuation_candidates_due_to_oom_.push_back(
std::make_pair(failed_start, page));
}
aborted_evacuation_candidates_.push_back( void MarkCompactCollector::ReportAbortedEvacuationCandidateDueToFlags(
std::make_pair(failed_start, static_cast<Page*>(chunk))); Address failed_start, Page* page) {
base::MutexGuard guard(&mutex_);
aborted_evacuation_candidates_due_to_flags_.push_back(
std::make_pair(failed_start, page));
} }
size_t MarkCompactCollector::PostProcessEvacuationCandidates() { namespace {
CHECK_IMPLIES(FLAG_crash_on_aborted_evacuation,
aborted_evacuation_candidates_.empty());
for (auto start_and_page : aborted_evacuation_candidates_) { void ReRecordPage(
Address failed_start = start_and_page.first; Heap* heap,
Page* page = start_and_page.second; v8::internal::MarkCompactCollector::NonAtomicMarkingState* marking_state,
Address failed_start, Page* page) {
page->SetFlag(Page::COMPACTION_WAS_ABORTED); page->SetFlag(Page::COMPACTION_WAS_ABORTED);
// Aborted compaction page. We have to record slots here, since we // Aborted compaction page. We have to record slots here, since we
// might not have recorded them in first place. // might not have recorded them in first place.
...@@ -4329,17 +4356,31 @@ size_t MarkCompactCollector::PostProcessEvacuationCandidates() { ...@@ -4329,17 +4356,31 @@ size_t MarkCompactCollector::PostProcessEvacuationCandidates() {
} }
// Recompute live bytes. // Recompute live bytes.
LiveObjectVisitor::RecomputeLiveBytes(page, non_atomic_marking_state()); LiveObjectVisitor::RecomputeLiveBytes(page, marking_state);
// Re-record slots. // Re-record slots.
EvacuateRecordOnlyVisitor record_visitor(heap()); EvacuateRecordOnlyVisitor record_visitor(heap);
LiveObjectVisitor::VisitBlackObjectsNoFail(page, non_atomic_marking_state(), LiveObjectVisitor::VisitBlackObjectsNoFail(
&record_visitor, page, marking_state, &record_visitor, LiveObjectVisitor::kKeepMarking);
LiveObjectVisitor::kKeepMarking);
// Array buffers will be processed during pointer updating. // Array buffers will be processed during pointer updating.
} }
const int aborted_pages =
static_cast<int>(aborted_evacuation_candidates_.size()); } // namespace
int aborted_pages_verified = 0;
size_t MarkCompactCollector::PostProcessEvacuationCandidates() {
CHECK_IMPLIES(FLAG_crash_on_aborted_evacuation,
aborted_evacuation_candidates_due_to_oom_.empty());
for (auto start_and_page : aborted_evacuation_candidates_due_to_oom_) {
ReRecordPage(heap(), non_atomic_marking_state(), start_and_page.first,
start_and_page.second);
}
for (auto start_and_page : aborted_evacuation_candidates_due_to_flags_) {
ReRecordPage(heap(), non_atomic_marking_state(), start_and_page.first,
start_and_page.second);
}
const size_t aborted_pages =
aborted_evacuation_candidates_due_to_oom_.size() +
aborted_evacuation_candidates_due_to_flags_.size();
size_t aborted_pages_verified = 0;
for (Page* p : old_space_evacuation_pages_) { for (Page* p : old_space_evacuation_pages_) {
if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) { if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
// After clearing the evacuation candidate flag the page is again in a // After clearing the evacuation candidate flag the page is again in a
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <atomic> #include <atomic>
#include <vector> #include <vector>
#include "include/v8-internal.h"
#include "src/heap/concurrent-marking.h" #include "src/heap/concurrent-marking.h"
#include "src/heap/marking-visitor.h" #include "src/heap/marking-visitor.h"
#include "src/heap/marking-worklist.h" #include "src/heap/marking-worklist.h"
...@@ -720,8 +721,10 @@ class MarkCompactCollector final : public MarkCompactCollectorBase { ...@@ -720,8 +721,10 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void ReleaseEvacuationCandidates(); void ReleaseEvacuationCandidates();
// Returns number of aborted pages. // Returns number of aborted pages.
size_t PostProcessEvacuationCandidates(); size_t PostProcessEvacuationCandidates();
void ReportAbortedEvacuationCandidate(Address failed_start, void ReportAbortedEvacuationCandidateDueToOOM(Address failed_start,
MemoryChunk* chunk); Page* page);
void ReportAbortedEvacuationCandidateDueToFlags(Address failed_start,
Page* page);
static const int kEphemeronChunkSize = 8 * KB; static const int kEphemeronChunkSize = 8 * KB;
...@@ -774,7 +777,10 @@ class MarkCompactCollector final : public MarkCompactCollectorBase { ...@@ -774,7 +777,10 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// Pages that are actually processed during evacuation. // Pages that are actually processed during evacuation.
std::vector<Page*> old_space_evacuation_pages_; std::vector<Page*> old_space_evacuation_pages_;
std::vector<Page*> new_space_evacuation_pages_; std::vector<Page*> new_space_evacuation_pages_;
std::vector<std::pair<Address, Page*>> aborted_evacuation_candidates_; std::vector<std::pair<Address, Page*>>
aborted_evacuation_candidates_due_to_oom_;
std::vector<std::pair<Address, Page*>>
aborted_evacuation_candidates_due_to_flags_;
Sweeper* sweeper_; Sweeper* sweeper_;
......
...@@ -169,7 +169,7 @@ TEST_F(LocalEmbedderHeapTracerWithIsolate, ...@@ -169,7 +169,7 @@ TEST_F(LocalEmbedderHeapTracerWithIsolate,
} }
} }
TEST_F(LocalEmbedderHeapTracerWithIsolate, EnterFinalPauseStackStateResets) { TEST_F(LocalEmbedderHeapTracerWithIsolate, TraceEpilogueStackStateResets) {
StrictMock<MockEmbedderHeapTracer> remote_tracer; StrictMock<MockEmbedderHeapTracer> remote_tracer;
LocalEmbedderHeapTracer local_tracer(isolate()); LocalEmbedderHeapTracer local_tracer(isolate());
local_tracer.SetRemoteTracer(&remote_tracer); local_tracer.SetRemoteTracer(&remote_tracer);
...@@ -179,6 +179,8 @@ TEST_F(LocalEmbedderHeapTracerWithIsolate, EnterFinalPauseStackStateResets) { ...@@ -179,6 +179,8 @@ TEST_F(LocalEmbedderHeapTracerWithIsolate, EnterFinalPauseStackStateResets) {
remote_tracer, remote_tracer,
EnterFinalPause(EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers)); EnterFinalPause(EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers));
local_tracer.EnterFinalPause(); local_tracer.EnterFinalPause();
EXPECT_CALL(remote_tracer, TraceEpilogue(_));
local_tracer.TraceEpilogue();
EXPECT_CALL( EXPECT_CALL(
remote_tracer, remote_tracer,
EnterFinalPause( EnterFinalPause(
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment