Commit 1054ee7f authored by Dominik Inführ's avatar Dominik Inführ Committed by V8 LUCI CQ

[heap] Improve ephemeron processing

Refactor code such that the linear algorithm is actually executed
outside the method for the fixpoint iteration. Also added a CHECK
which verifies that iterating the ephemerons one more time results in
no further marked objects.

Also force another iteration when ProcessMarkingWorklist() processed
some object. In such cases we need to re-process all ephemerons
otherwise ephemeron semantics might be broken.

Bug: chromium:1252918
Change-Id: I9123dda50e34227a04825fd8b3172368286cc76f
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3190100
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#77131}
parent bbd44150
......@@ -449,7 +449,7 @@ void ConcurrentMarking::Run(JobDelegate* delegate,
isolate->PrintWithTimestamp("Starting concurrent marking task %d\n",
task_id);
}
bool ephemeron_marked = false;
bool another_ephemeron_iteration = false;
{
TimedScope scope(&time_ms);
......@@ -459,7 +459,7 @@ void ConcurrentMarking::Run(JobDelegate* delegate,
while (weak_objects_->current_ephemerons.Pop(task_id, &ephemeron)) {
if (visitor.ProcessEphemeron(ephemeron.key, ephemeron.value)) {
ephemeron_marked = true;
another_ephemeron_iteration = true;
}
}
}
......@@ -512,6 +512,7 @@ void ConcurrentMarking::Run(JobDelegate* delegate,
current_marked_bytes += visited_size;
}
}
if (objects_processed > 0) another_ephemeron_iteration = true;
marked_bytes += current_marked_bytes;
base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes,
marked_bytes);
......@@ -527,7 +528,7 @@ void ConcurrentMarking::Run(JobDelegate* delegate,
while (weak_objects_->discovered_ephemerons.Pop(task_id, &ephemeron)) {
if (visitor.ProcessEphemeron(ephemeron.key, ephemeron.value)) {
ephemeron_marked = true;
another_ephemeron_iteration = true;
}
}
}
......@@ -548,8 +549,8 @@ void ConcurrentMarking::Run(JobDelegate* delegate,
base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes, 0);
total_marked_bytes_ += marked_bytes;
if (ephemeron_marked) {
set_ephemeron_marked(true);
if (another_ephemeron_iteration) {
set_another_ephemeron_iteration(true);
}
}
if (FLAG_trace_concurrent_marking) {
......
......@@ -91,10 +91,12 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
size_t TotalMarkedBytes();
void set_ephemeron_marked(bool ephemeron_marked) {
ephemeron_marked_.store(ephemeron_marked);
void set_another_ephemeron_iteration(bool another_ephemeron_iteration) {
another_ephemeron_iteration_.store(another_ephemeron_iteration);
}
bool another_ephemeron_iteration() {
return another_ephemeron_iteration_.load();
}
bool ephemeron_marked() { return ephemeron_marked_.load(); }
private:
struct TaskState {
......@@ -115,7 +117,7 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
WeakObjects* const weak_objects_;
TaskState task_state_[kMaxTasks + 1];
std::atomic<size_t> total_marked_bytes_{0};
std::atomic<bool> ephemeron_marked_{false};
std::atomic<bool> another_ephemeron_iteration_{false};
};
} // namespace internal
......
......@@ -944,7 +944,8 @@ StepResult IncrementalMarking::Step(double max_step_size_in_ms,
// This ignores that case where the embedder finds new V8-side objects. The
// assumption is that large graphs are well connected and can mostly be
// processed on their own. For small graphs, helping is not necessary.
v8_bytes_processed = collector_->ProcessMarkingWorklist(bytes_to_process);
std::tie(v8_bytes_processed, std::ignore) =
collector_->ProcessMarkingWorklist(bytes_to_process);
StepResult v8_result = local_marking_worklists()->IsEmpty()
? StepResult::kNoImmediateWork
: StepResult::kMoreWorkRemaining;
......
......@@ -1714,24 +1714,24 @@ void MarkCompactCollector::RevisitObject(HeapObject obj) {
marking_visitor_->Visit(obj.map(), obj);
}
void MarkCompactCollector::ProcessEphemeronsUntilFixpoint() {
bool work_to_do = true;
bool MarkCompactCollector::ProcessEphemeronsUntilFixpoint() {
int iterations = 0;
int max_iterations = FLAG_ephemeron_fixpoint_iterations;
while (work_to_do) {
bool another_ephemeron_iteration_main_thread;
do {
PerformWrapperTracing();
if (iterations >= max_iterations) {
// Give up fixpoint iteration and switch to linear algorithm.
ProcessEphemeronsLinear();
break;
return false;
}
// Move ephemerons from next_ephemerons into current_ephemerons to
// drain them in this iteration.
weak_objects_.current_ephemerons.Swap(weak_objects_.next_ephemerons);
heap()->concurrent_marking()->set_ephemeron_marked(false);
heap()->concurrent_marking()->set_another_ephemeron_iteration(false);
{
TRACE_GC(heap()->tracer(),
......@@ -1742,47 +1742,54 @@ void MarkCompactCollector::ProcessEphemeronsUntilFixpoint() {
TaskPriority::kUserBlocking);
}
work_to_do = ProcessEphemerons();
another_ephemeron_iteration_main_thread = ProcessEphemerons();
FinishConcurrentMarking();
}
CHECK(weak_objects_.current_ephemerons.IsEmpty());
CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
work_to_do = work_to_do || !local_marking_worklists()->IsEmpty() ||
heap()->concurrent_marking()->ephemeron_marked() ||
!local_marking_worklists()->IsEmbedderEmpty() ||
!heap()->local_embedder_heap_tracer()->IsRemoteTracingDone();
++iterations;
}
} while (another_ephemeron_iteration_main_thread ||
heap()->concurrent_marking()->another_ephemeron_iteration() ||
!local_marking_worklists()->IsEmpty() ||
!local_marking_worklists()->IsEmbedderEmpty() ||
!heap()->local_embedder_heap_tracer()->IsRemoteTracingDone());
CHECK(local_marking_worklists()->IsEmpty());
CHECK(weak_objects_.current_ephemerons.IsEmpty());
CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
return true;
}
bool MarkCompactCollector::ProcessEphemerons() {
Ephemeron ephemeron;
bool ephemeron_marked = false;
bool another_ephemeron_iteration = false;
// Drain current_ephemerons and push ephemerons where key and value are still
// unreachable into next_ephemerons.
while (weak_objects_.current_ephemerons.Pop(kMainThreadTask, &ephemeron)) {
if (ProcessEphemeron(ephemeron.key, ephemeron.value)) {
ephemeron_marked = true;
another_ephemeron_iteration = true;
}
}
// Drain marking worklist and push discovered ephemerons into
// discovered_ephemerons.
DrainMarkingWorklist();
size_t objects_processed;
std::tie(std::ignore, objects_processed) = ProcessMarkingWorklist(0);
// As soon as a single object was processed and potentially marked another
// object we need another iteration. Otherwise we might miss to apply
// ephemeron semantics on it.
if (objects_processed > 0) another_ephemeron_iteration = true;
// Drain discovered_ephemerons (filled in the drain MarkingWorklist-phase
// before) and push ephemerons where key and value are still unreachable into
// next_ephemerons.
while (weak_objects_.discovered_ephemerons.Pop(kMainThreadTask, &ephemeron)) {
if (ProcessEphemeron(ephemeron.key, ephemeron.value)) {
ephemeron_marked = true;
another_ephemeron_iteration = true;
}
}
......@@ -1790,7 +1797,7 @@ bool MarkCompactCollector::ProcessEphemerons() {
weak_objects_.ephemeron_hash_tables.FlushToGlobal(kMainThreadTask);
weak_objects_.next_ephemerons.FlushToGlobal(kMainThreadTask);
return ephemeron_marked;
return another_ephemeron_iteration;
}
void MarkCompactCollector::ProcessEphemeronsLinear() {
......@@ -1876,6 +1883,12 @@ void MarkCompactCollector::ProcessEphemeronsLinear() {
ephemeron_marking_.newly_discovered.shrink_to_fit();
CHECK(local_marking_worklists()->IsEmpty());
CHECK(weak_objects_.current_ephemerons.IsEmpty());
CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
// Flush local ephemerons for main task to global pool.
weak_objects_.ephemeron_hash_tables.FlushToGlobal(kMainThreadTask);
weak_objects_.next_ephemerons.FlushToGlobal(kMainThreadTask);
}
void MarkCompactCollector::PerformWrapperTracing() {
......@@ -1897,9 +1910,11 @@ void MarkCompactCollector::PerformWrapperTracing() {
void MarkCompactCollector::DrainMarkingWorklist() { ProcessMarkingWorklist(0); }
template <MarkCompactCollector::MarkingWorklistProcessingMode mode>
size_t MarkCompactCollector::ProcessMarkingWorklist(size_t bytes_to_process) {
std::pair<size_t, size_t> MarkCompactCollector::ProcessMarkingWorklist(
size_t bytes_to_process) {
HeapObject object;
size_t bytes_processed = 0;
size_t objects_processed = 0;
bool is_per_context_mode = local_marking_worklists()->IsPerContextMode();
Isolate* isolate = heap()->isolate();
while (local_marking_worklists()->Pop(&object) ||
......@@ -1939,18 +1954,19 @@ size_t MarkCompactCollector::ProcessMarkingWorklist(size_t bytes_to_process) {
map, object, visited_size);
}
bytes_processed += visited_size;
objects_processed++;
if (bytes_to_process && bytes_processed >= bytes_to_process) {
break;
}
}
return bytes_processed;
return std::make_pair(bytes_processed, objects_processed);
}
// Generate definitions for use in other files.
template size_t MarkCompactCollector::ProcessMarkingWorklist<
template std::pair<size_t, size_t> MarkCompactCollector::ProcessMarkingWorklist<
MarkCompactCollector::MarkingWorklistProcessingMode::kDefault>(
size_t bytes_to_process);
template size_t MarkCompactCollector::ProcessMarkingWorklist<
template std::pair<size_t, size_t> MarkCompactCollector::ProcessMarkingWorklist<
MarkCompactCollector::MarkingWorklistProcessingMode::
kTrackNewlyDiscoveredObjects>(size_t bytes_to_process);
......@@ -1975,7 +1991,23 @@ void MarkCompactCollector::ProcessEphemeronMarking() {
// buffer, flush it into global pool.
weak_objects_.next_ephemerons.FlushToGlobal(kMainThreadTask);
ProcessEphemeronsUntilFixpoint();
if (!ProcessEphemeronsUntilFixpoint()) {
// Fixpoint iteration needed too many iterations and was cancelled. Use the
// guaranteed linear algorithm.
ProcessEphemeronsLinear();
}
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
Ephemeron ephemeron;
weak_objects_.current_ephemerons.Swap(weak_objects_.next_ephemerons);
while (weak_objects_.current_ephemerons.Pop(kMainThreadTask, &ephemeron)) {
CHECK(!ProcessEphemeron(ephemeron.key, ephemeron.value));
}
}
#endif
CHECK(local_marking_worklists()->IsEmpty());
CHECK(heap()->local_embedder_heap_tracer()->IsRemoteTracingDone());
......
......@@ -586,7 +586,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// is drained until it is empty.
template <MarkingWorklistProcessingMode mode =
MarkingWorklistProcessingMode::kDefault>
size_t ProcessMarkingWorklist(size_t bytes_to_process);
std::pair<size_t, size_t> ProcessMarkingWorklist(size_t bytes_to_process);
private:
void ComputeEvacuationHeuristics(size_t area_size,
......@@ -632,8 +632,9 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
bool ProcessEphemeron(HeapObject key, HeapObject value);
// Marks ephemerons and drains marking worklist iteratively
// until a fixpoint is reached.
void ProcessEphemeronsUntilFixpoint();
// until a fixpoint is reached. Returns false if too many iterations have been
// tried and the linear approach should be used.
bool ProcessEphemeronsUntilFixpoint();
// Drains ephemeron and marking worklists. Single iteration of the
// fixpoint iteration.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment