Commit 28133adc authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

[heap] Split marking worklist into global worklist and local worklists

This is the first step in refactoring Worklist to allow arbitrary
number of local worklists with private segments:
- Introduce MarkingWorklistImpl<> which will eventually replace
  (and will be renamed to) Worklist.
- MarkingWorklistImpl<> owns the global pool of segments but does not
  keep track of private segments.
- MarkingWorklistImpl<>::Local owns private segments and can be
  constructed dynamically on background threads.
- Rename the existing MarkingWorklistsHolder to MarkingWorklists.
- Rename the existing MarkingWorklists to MarkingWorklists::Local.
- Rename the existing marking_workists_holder to marking_worklists.
- Rename the existing marking_worklists to local_marking_worklists.

Design doc: https://bit.ly/2XMtjLi
Bug: v8:10315

Change-Id: I9da34883ad34f4572fccd40c51e51eaf50c617bc
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2343330Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69330}
parent 45928320
......@@ -2587,6 +2587,7 @@ v8_source_set("v8_base_without_compiler") {
"src/heap/marking-barrier.h",
"src/heap/marking-visitor-inl.h",
"src/heap/marking-visitor.h",
"src/heap/marking-worklist-inl.h",
"src/heap/marking-worklist.cc",
"src/heap/marking-worklist.h",
"src/heap/marking.cc",
......
......@@ -79,13 +79,14 @@ class ConcurrentMarkingVisitor final
: public MarkingVisitorBase<ConcurrentMarkingVisitor,
ConcurrentMarkingState> {
public:
ConcurrentMarkingVisitor(int task_id, MarkingWorklists* marking_worklists,
ConcurrentMarkingVisitor(int task_id,
MarkingWorklists::Local* local_marking_worklists,
WeakObjects* weak_objects, Heap* heap,
unsigned mark_compact_epoch,
BytecodeFlushMode bytecode_flush_mode,
bool embedder_tracing_enabled, bool is_forced_gc,
MemoryChunkDataMap* memory_chunk_data)
: MarkingVisitorBase(task_id, marking_worklists, weak_objects, heap,
: MarkingVisitorBase(task_id, local_marking_worklists, weak_objects, heap,
mark_compact_epoch, bytecode_flush_mode,
embedder_tracing_enabled, is_forced_gc),
marking_state_(memory_chunk_data),
......@@ -145,7 +146,7 @@ class ConcurrentMarkingVisitor final
bool ProcessEphemeron(HeapObject key, HeapObject value) {
if (marking_state_.IsBlackOrGrey(key)) {
if (marking_state_.WhiteToGrey(value)) {
marking_worklists_->Push(value);
local_marking_worklists_->Push(value);
return true;
}
......@@ -369,11 +370,11 @@ class ConcurrentMarking::Task : public CancelableTask {
DISALLOW_COPY_AND_ASSIGN(Task);
};
ConcurrentMarking::ConcurrentMarking(
Heap* heap, MarkingWorklistsHolder* marking_worklists_holder,
WeakObjects* weak_objects)
ConcurrentMarking::ConcurrentMarking(Heap* heap,
MarkingWorklists* marking_worklists,
WeakObjects* weak_objects)
: heap_(heap),
marking_worklists_holder_(marking_worklists_holder),
marking_worklists_(marking_worklists),
weak_objects_(weak_objects) {
// The runtime flag should be set only if the compile time flag was set.
#ifndef V8_CONCURRENT_MARKING
......@@ -386,9 +387,9 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
GCTracer::BackgroundScope::MC_BACKGROUND_MARKING);
size_t kBytesUntilInterruptCheck = 64 * KB;
int kObjectsUntilInterrupCheck = 1000;
MarkingWorklists marking_worklists(task_id, marking_worklists_holder_);
MarkingWorklists::Local local_marking_worklists(marking_worklists_);
ConcurrentMarkingVisitor visitor(
task_id, &marking_worklists, weak_objects_, heap_,
task_id, &local_marking_worklists, weak_objects_, heap_,
task_state->mark_compact_epoch, Heap::GetBytecodeFlushMode(),
heap_->local_embedder_heap_tracer()->InUse(), task_state->is_forced_gc,
&task_state->memory_chunk_data);
......@@ -416,7 +417,7 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
}
}
}
bool is_per_context_mode = marking_worklists.IsPerContextMode();
bool is_per_context_mode = local_marking_worklists.IsPerContextMode();
bool done = false;
while (!done) {
size_t current_marked_bytes = 0;
......@@ -424,7 +425,7 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
while (current_marked_bytes < kBytesUntilInterruptCheck &&
objects_processed < kObjectsUntilInterrupCheck) {
HeapObject object;
if (!marking_worklists.Pop(&object)) {
if (!local_marking_worklists.Pop(&object)) {
done = true;
break;
}
......@@ -436,19 +437,19 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
Address addr = object.address();
if ((new_space_top <= addr && addr < new_space_limit) ||
addr == new_large_object) {
marking_worklists.PushOnHold(object);
local_marking_worklists.PushOnHold(object);
} else {
Map map = object.synchronized_map(isolate);
if (is_per_context_mode) {
Address context;
if (native_context_inferrer.Infer(isolate, map, object, &context)) {
marking_worklists.SwitchToContext(context);
local_marking_worklists.SwitchToContext(context);
}
}
size_t visited_size = visitor.Visit(map, object);
if (is_per_context_mode) {
native_context_stats.IncrementSize(marking_worklists.Context(), map,
object, visited_size);
native_context_stats.IncrementSize(
local_marking_worklists.Context(), map, object, visited_size);
}
current_marked_bytes += visited_size;
}
......@@ -473,7 +474,7 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
}
}
marking_worklists.FlushToGlobal();
local_marking_worklists.Publish();
weak_objects_->transition_arrays.FlushToGlobal(task_id);
weak_objects_->ephemeron_hash_tables.FlushToGlobal(task_id);
weak_objects_->current_ephemerons.FlushToGlobal(task_id);
......@@ -529,8 +530,6 @@ void ConcurrentMarking::ScheduleTasks() {
total_task_count_ = Max(1, Min(kMaxTasks, (num_cores / 2) - 1));
}
DCHECK_LE(total_task_count_, kMaxTasks);
// One task is for the main thread.
STATIC_ASSERT(kMaxTasks + 1 <= MarkingWorklist::kMaxNumTasks);
}
// Task id 0 is for the main thread.
for (int i = 1; i <= total_task_count_; i++) {
......@@ -565,7 +564,7 @@ void ConcurrentMarking::RescheduleTasksIfNeeded() {
return;
}
}
if (!marking_worklists_holder_->shared()->IsGlobalPoolEmpty() ||
if (!marking_worklists_->shared()->IsEmpty() ||
!weak_objects_->current_ephemerons.IsGlobalPoolEmpty() ||
!weak_objects_->discovered_ephemerons.IsGlobalPoolEmpty()) {
ScheduleTasks();
......
......@@ -70,8 +70,7 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
// task 0, reserved for the main thread).
static constexpr int kMaxTasks = 7;
ConcurrentMarking(Heap* heap,
MarkingWorklistsHolder* marking_worklists_holder,
ConcurrentMarking(Heap* heap, MarkingWorklists* marking_worklists,
WeakObjects* weak_objects);
// Schedules asynchronous tasks to perform concurrent marking. Objects in the
......@@ -118,7 +117,7 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
class Task;
void Run(int task_id, TaskState* task_state);
Heap* const heap_;
MarkingWorklistsHolder* const marking_worklists_holder_;
MarkingWorklists* const marking_worklists_;
WeakObjects* const weak_objects_;
TaskState task_state_[kMaxTasks + 1];
std::atomic<size_t> total_marked_bytes_{0};
......
......@@ -1259,7 +1259,7 @@ TimedHistogram* Heap::GCTypeTimer(GarbageCollector collector) {
return isolate_->counters()->gc_finalize_reduce_memory();
}
if (incremental_marking()->IsMarking() &&
incremental_marking()->marking_worklists()->IsPerContextMode()) {
incremental_marking()->local_marking_worklists()->IsPerContextMode()) {
return isolate_->counters()->gc_finalize_measure_memory();
}
return isolate_->counters()->gc_finalize();
......@@ -3489,12 +3489,12 @@ void Heap::FinalizeIncrementalMarkingIfComplete(
if (incremental_marking()->IsMarking() &&
(incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
(!incremental_marking()->finalize_marking_completed() &&
mark_compact_collector()->marking_worklists()->IsEmpty() &&
mark_compact_collector()->local_marking_worklists()->IsEmpty() &&
local_embedder_heap_tracer()->ShouldFinalizeIncrementalMarking()))) {
FinalizeIncrementalMarkingIncrementally(gc_reason);
} else if (incremental_marking()->IsComplete() ||
(incremental_marking()->IsMarking() &&
mark_compact_collector()->marking_worklists()->IsEmpty() &&
mark_compact_collector()->local_marking_worklists()->IsEmpty() &&
local_embedder_heap_tracer()
->ShouldFinalizeIncrementalMarking())) {
CollectAllGarbage(current_gc_flags_, gc_reason, current_gc_callback_flags_);
......@@ -5277,7 +5277,7 @@ void Heap::SetUp() {
if (FLAG_concurrent_marking || FLAG_parallel_marking) {
concurrent_marking_.reset(new ConcurrentMarking(
this, mark_compact_collector_->marking_worklists_holder(),
this, mark_compact_collector_->marking_worklists(),
mark_compact_collector_->weak_objects()));
} else {
concurrent_marking_.reset(new ConcurrentMarking(this, nullptr, nullptr));
......
......@@ -35,7 +35,7 @@ void IncrementalMarking::TransferColor(HeapObject from, HeapObject to) {
bool IncrementalMarking::WhiteToGreyAndPush(HeapObject obj) {
if (marking_state()->WhiteToGrey(obj)) {
marking_worklists()->Push(obj);
local_marking_worklists()->Push(obj);
return true;
}
return false;
......
......@@ -432,7 +432,8 @@ void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
heap()->minor_mark_compact_collector()->marking_state();
#endif // ENABLE_MINOR_MC
collector_->marking_worklists_holder()->Update(
collector_->local_marking_worklists()->Publish();
collector_->marking_worklists()->Update(
[
#ifdef DEBUG
// this is referred inside DCHECK.
......@@ -633,7 +634,7 @@ StepResult IncrementalMarking::EmbedderStep(double expected_duration_ms,
HeapObject object;
size_t cnt = 0;
empty_worklist = true;
while (marking_worklists()->PopEmbedder(&object)) {
while (local_marking_worklists()->PopEmbedder(&object)) {
scope.TracePossibleWrapper(JSObject::cast(object));
if (++cnt == kObjectsToProcessBeforeDeadlineCheck) {
if (deadline <= heap_->MonotonicallyIncreasingTimeInMs()) {
......@@ -658,7 +659,7 @@ StepResult IncrementalMarking::EmbedderStep(double expected_duration_ms,
}
void IncrementalMarking::Hurry() {
if (!marking_worklists()->IsEmpty()) {
if (!local_marking_worklists()->IsEmpty()) {
double start = 0.0;
if (FLAG_trace_incremental_marking) {
start = heap_->MonotonicallyIncreasingTimeInMs();
......@@ -1042,14 +1043,14 @@ StepResult IncrementalMarking::Step(double max_step_size_in_ms,
// It is safe to merge back all objects that were on hold to the shared
// work list at Step because we are at a safepoint where all objects
// are properly initialized.
marking_worklists()->MergeOnHold();
local_marking_worklists()->MergeOnHold();
}
// Only print marking worklist in debug mode to save ~40KB of code size.
#ifdef DEBUG
if (FLAG_trace_incremental_marking && FLAG_trace_concurrent_marking &&
FLAG_trace_gc_verbose) {
collector_->marking_worklists_holder()->Print();
collector_->marking_worklists()->Print();
}
#endif
if (FLAG_trace_incremental_marking) {
......@@ -1073,7 +1074,7 @@ StepResult IncrementalMarking::Step(double max_step_size_in_ms,
// assumption is that large graphs are well connected and can mostly be
// processed on their own. For small graphs, helping is not necessary.
v8_bytes_processed = collector_->ProcessMarkingWorklist(bytes_to_process);
StepResult v8_result = marking_worklists()->IsEmpty()
StepResult v8_result = local_marking_worklists()->IsEmpty()
? StepResult::kNoImmediateWork
: StepResult::kMoreWorkRemaining;
StepResult embedder_result = StepResult::kNoImmediateWork;
......@@ -1098,7 +1099,7 @@ StepResult IncrementalMarking::Step(double max_step_size_in_ms,
}
}
if (FLAG_concurrent_marking) {
marking_worklists()->ShareWorkIfGlobalPoolIsEmpty();
local_marking_worklists()->ShareWork();
heap_->concurrent_marking()->RescheduleTasksIfNeeded();
}
}
......
......@@ -209,8 +209,8 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
}
}
MarkingWorklists* marking_worklists() const {
return collector_->marking_worklists();
MarkingWorklists::Local* local_marking_worklists() const {
return collector_->local_marking_worklists();
}
void Deactivate();
......
......@@ -10,6 +10,8 @@
#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact.h"
#include "src/heap/marking-worklist-inl.h"
#include "src/heap/marking-worklist.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/remembered-set-inl.h"
#include "src/objects/js-collection-inl.h"
......@@ -22,7 +24,7 @@ namespace internal {
void MarkCompactCollector::MarkObject(HeapObject host, HeapObject obj) {
if (marking_state()->WhiteToGrey(obj)) {
marking_worklists()->Push(obj);
local_marking_worklists()->Push(obj);
if (V8_UNLIKELY(FLAG_track_retaining_path)) {
heap_->AddRetainer(host, obj);
}
......@@ -31,7 +33,7 @@ void MarkCompactCollector::MarkObject(HeapObject host, HeapObject obj) {
void MarkCompactCollector::MarkRootObject(Root root, HeapObject obj) {
if (marking_state()->WhiteToGrey(obj)) {
marking_worklists()->Push(obj);
local_marking_worklists()->Push(obj);
if (V8_UNLIKELY(FLAG_track_retaining_path)) {
heap_->AddRetainingRoot(root, obj);
}
......@@ -51,7 +53,7 @@ void MinorMarkCompactCollector::MarkRootObject(HeapObject obj) {
void MarkCompactCollector::MarkExternallyReferencedObject(HeapObject obj) {
if (marking_state()->WhiteToGrey(obj)) {
marking_worklists()->Push(obj);
local_marking_worklists()->Push(obj);
if (V8_UNLIKELY(FLAG_track_retaining_path)) {
heap_->AddRetainingRoot(Root::kWrapperTracing, obj);
}
......
......@@ -455,7 +455,8 @@ void MarkCompactCollector::TearDown() {
AbortCompaction();
AbortWeakObjects();
if (heap()->incremental_marking()->IsMarking()) {
marking_worklists_holder()->Clear();
local_marking_worklists()->Publish();
marking_worklists()->Clear();
}
}
......@@ -520,12 +521,12 @@ void MarkCompactCollector::StartMarking() {
contexts.push_back(context->ptr());
}
}
marking_worklists_holder()->CreateContextWorklists(contexts);
marking_worklists_ = std::make_unique<MarkingWorklists>(
kMainThreadTask, marking_worklists_holder());
marking_worklists()->CreateContextWorklists(contexts);
local_marking_worklists_ =
std::make_unique<MarkingWorklists::Local>(marking_worklists());
marking_visitor_ = std::make_unique<MarkingVisitor>(
marking_state(), marking_worklists(), weak_objects(), heap_, epoch(),
Heap::GetBytecodeFlushMode(),
marking_state(), local_marking_worklists(), weak_objects(), heap_,
epoch(), Heap::GetBytecodeFlushMode(),
heap_->local_embedder_heap_tracer()->InUse(),
heap_->is_current_gc_forced());
// Marking bits are cleared by the sweeper.
......@@ -912,7 +913,7 @@ void MarkCompactCollector::FinishConcurrentMarking(
}
void MarkCompactCollector::VerifyMarking() {
CHECK(marking_worklists()->IsEmpty());
CHECK(local_marking_worklists()->IsEmpty());
DCHECK(heap_->incremental_marking()->IsStopped());
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
......@@ -939,8 +940,8 @@ void MarkCompactCollector::Finish() {
#endif
marking_visitor_.reset();
marking_worklists_.reset();
marking_worklists_holder_.ReleaseContextWorklists();
local_marking_worklists_.reset();
marking_worklists_.ReleaseContextWorklists();
native_context_stats_.Clear();
CHECK(weak_objects_.current_ephemerons.IsEmpty());
......@@ -1663,14 +1664,14 @@ void MarkCompactCollector::ProcessEphemeronsUntilFixpoint() {
CHECK(weak_objects_.current_ephemerons.IsEmpty());
CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
work_to_do = work_to_do || !marking_worklists()->IsEmpty() ||
work_to_do = work_to_do || !local_marking_worklists()->IsEmpty() ||
heap()->concurrent_marking()->ephemeron_marked() ||
!marking_worklists()->IsEmbedderEmpty() ||
!local_marking_worklists()->IsEmbedderEmpty() ||
!heap()->local_embedder_heap_tracer()->IsRemoteTracingDone();
++iterations;
}
CHECK(marking_worklists()->IsEmpty());
CHECK(local_marking_worklists()->IsEmpty());
CHECK(weak_objects_.current_ephemerons.IsEmpty());
CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
}
......@@ -1759,7 +1760,7 @@ void MarkCompactCollector::ProcessEphemeronsLinear() {
weak_objects_.next_ephemerons.Iterate([&](Ephemeron ephemeron) {
if (non_atomic_marking_state()->IsBlackOrGrey(ephemeron.key) &&
non_atomic_marking_state()->WhiteToGrey(ephemeron.value)) {
marking_worklists()->Push(ephemeron.value);
local_marking_worklists()->Push(ephemeron.value);
}
});
......@@ -1780,8 +1781,8 @@ void MarkCompactCollector::ProcessEphemeronsLinear() {
// for work_to_do are not sufficient for determining if another iteration
// is necessary.
work_to_do = !marking_worklists()->IsEmpty() ||
!marking_worklists()->IsEmbedderEmpty() ||
work_to_do = !local_marking_worklists()->IsEmpty() ||
!local_marking_worklists()->IsEmbedderEmpty() ||
!heap()->local_embedder_heap_tracer()->IsRemoteTracingDone();
CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
}
......@@ -1789,7 +1790,7 @@ void MarkCompactCollector::ProcessEphemeronsLinear() {
ResetNewlyDiscovered();
ephemeron_marking_.newly_discovered.shrink_to_fit();
CHECK(marking_worklists()->IsEmpty());
CHECK(local_marking_worklists()->IsEmpty());
}
void MarkCompactCollector::PerformWrapperTracing() {
......@@ -1799,7 +1800,7 @@ void MarkCompactCollector::PerformWrapperTracing() {
LocalEmbedderHeapTracer::ProcessingScope scope(
heap_->local_embedder_heap_tracer());
HeapObject object;
while (marking_worklists()->PopEmbedder(&object)) {
while (local_marking_worklists()->PopEmbedder(&object)) {
scope.TracePossibleWrapper(JSObject::cast(object));
}
}
......@@ -1814,10 +1815,10 @@ template <MarkCompactCollector::MarkingWorklistProcessingMode mode>
size_t MarkCompactCollector::ProcessMarkingWorklist(size_t bytes_to_process) {
HeapObject object;
size_t bytes_processed = 0;
bool is_per_context_mode = marking_worklists()->IsPerContextMode();
bool is_per_context_mode = local_marking_worklists()->IsPerContextMode();
Isolate* isolate = heap()->isolate();
while (marking_worklists()->Pop(&object) ||
marking_worklists()->PopOnHold(&object)) {
while (local_marking_worklists()->Pop(&object) ||
local_marking_worklists()->PopOnHold(&object)) {
// Left trimming may result in grey or black filler objects on the marking
// worklist. Ignore these objects.
if (object.IsFreeSpaceOrFiller()) {
......@@ -1844,13 +1845,13 @@ size_t MarkCompactCollector::ProcessMarkingWorklist(size_t bytes_to_process) {
if (is_per_context_mode) {
Address context;
if (native_context_inferrer_.Infer(isolate, map, object, &context)) {
marking_worklists()->SwitchToContext(context);
local_marking_worklists()->SwitchToContext(context);
}
}
size_t visited_size = marking_visitor_->Visit(map, object);
if (is_per_context_mode) {
native_context_stats_.IncrementSize(marking_worklists()->Context(), map,
object, visited_size);
native_context_stats_.IncrementSize(local_marking_worklists()->Context(),
map, object, visited_size);
}
bytes_processed += visited_size;
if (bytes_to_process && bytes_processed >= bytes_to_process) {
......@@ -1871,7 +1872,7 @@ template size_t MarkCompactCollector::ProcessMarkingWorklist<
bool MarkCompactCollector::ProcessEphemeron(HeapObject key, HeapObject value) {
if (marking_state()->IsBlackOrGrey(key)) {
if (marking_state()->WhiteToGrey(value)) {
marking_worklists()->Push(value);
local_marking_worklists()->Push(value);
return true;
}
......@@ -1883,7 +1884,7 @@ bool MarkCompactCollector::ProcessEphemeron(HeapObject key, HeapObject value) {
}
void MarkCompactCollector::ProcessEphemeronMarking() {
DCHECK(marking_worklists()->IsEmpty());
DCHECK(local_marking_worklists()->IsEmpty());
// Incremental marking might leave ephemerons in main task's local
// buffer, flush it into global pool.
......@@ -1891,7 +1892,7 @@ void MarkCompactCollector::ProcessEphemeronMarking() {
ProcessEphemeronsUntilFixpoint();
CHECK(marking_worklists()->IsEmpty());
CHECK(local_marking_worklists()->IsEmpty());
CHECK(heap()->local_embedder_heap_tracer()->IsRemoteTracingDone());
}
......@@ -1983,7 +1984,7 @@ void MarkCompactCollector::MarkLiveObjects() {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE);
DCHECK(marking_worklists()->IsEmpty());
DCHECK(local_marking_worklists()->IsEmpty());
// Mark objects reachable through the embedder heap. This phase is
// opportunistic as it may not discover graphs that are only reachable
......@@ -1998,9 +1999,9 @@ void MarkCompactCollector::MarkLiveObjects() {
PerformWrapperTracing();
DrainMarkingWorklist();
} while (!heap_->local_embedder_heap_tracer()->IsRemoteTracingDone() ||
!marking_worklists()->IsEmbedderEmpty());
DCHECK(marking_worklists()->IsEmbedderEmpty());
DCHECK(marking_worklists()->IsEmpty());
!local_marking_worklists()->IsEmbedderEmpty());
DCHECK(local_marking_worklists()->IsEmbedderEmpty());
DCHECK(local_marking_worklists()->IsEmpty());
}
// The objects reachable from the roots are marked, yet unreachable objects
......@@ -2010,7 +2011,7 @@ void MarkCompactCollector::MarkLiveObjects() {
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON);
ProcessEphemeronMarking();
DCHECK(marking_worklists()->IsEmpty());
DCHECK(local_marking_worklists()->IsEmpty());
}
// The objects reachable from the roots, weak maps, and embedder heap
......@@ -2042,8 +2043,8 @@ void MarkCompactCollector::MarkLiveObjects() {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE_HARMONY);
ProcessEphemeronMarking();
DCHECK(marking_worklists()->IsEmbedderEmpty());
DCHECK(marking_worklists()->IsEmpty());
DCHECK(local_marking_worklists()->IsEmbedderEmpty());
DCHECK(local_marking_worklists()->IsEmpty());
}
{
......
......@@ -377,13 +377,13 @@ class MainMarkingVisitor final
};
MainMarkingVisitor(MarkingState* marking_state,
MarkingWorklists* marking_worklists,
MarkingWorklists::Local* local_marking_worklists,
WeakObjects* weak_objects, Heap* heap,
unsigned mark_compact_epoch,
BytecodeFlushMode bytecode_flush_mode,
bool embedder_tracing_enabled, bool is_forced_gc)
: MarkingVisitorBase<MainMarkingVisitor<MarkingState>, MarkingState>(
kMainThreadTask, marking_worklists, weak_objects, heap,
kMainThreadTask, local_marking_worklists, weak_objects, heap,
mark_compact_epoch, bytecode_flush_mode, embedder_tracing_enabled,
is_forced_gc),
marking_state_(marking_state),
......@@ -527,10 +527,11 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
bool evacuation() const { return evacuation_; }
MarkingWorklistsHolder* marking_worklists_holder() {
return &marking_worklists_holder_;
MarkingWorklists* marking_worklists() { return &marking_worklists_; }
MarkingWorklists::Local* local_marking_worklists() {
return local_marking_worklists_.get();
}
MarkingWorklists* marking_worklists() { return marking_worklists_.get(); }
WeakObjects* weak_objects() { return &weak_objects_; }
......@@ -758,13 +759,13 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
bool have_code_to_deoptimize_;
MarkingWorklistsHolder marking_worklists_holder_;
MarkingWorklists marking_worklists_;
WeakObjects weak_objects_;
EphemeronMarking ephemeron_marking_;
std::unique_ptr<MarkingVisitor> marking_visitor_;
std::unique_ptr<MarkingWorklists> marking_worklists_;
std::unique_ptr<MarkingWorklists::Local> local_marking_worklists_;
NativeContextInferrer native_context_inferrer_;
NativeContextStats native_context_stats_;
......
......@@ -35,7 +35,7 @@ bool MarkingBarrier::MarkValue(HeapObject host, HeapObject value) {
bool MarkingBarrier::WhiteToGreyAndPush(HeapObject obj) {
if (marking_state_.WhiteToGrey(obj)) {
collector_->marking_worklists()->Push(obj);
collector_->local_marking_worklists()->Push(obj);
return true;
}
return false;
......
......@@ -22,7 +22,7 @@ void MarkingVisitorBase<ConcreteVisitor, MarkingState>::MarkObject(
HeapObject host, HeapObject object) {
concrete_visitor()->SynchronizePageAccess(object);
if (concrete_visitor()->marking_state()->WhiteToGrey(object)) {
marking_worklists_->Push(object);
local_marking_worklists_->Push(object);
if (V8_UNLIKELY(concrete_visitor()->retaining_path_mode() ==
TraceRetainingPathMode::kEnabled)) {
heap_->AddRetainer(host, object);
......@@ -183,7 +183,7 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::
if (end < size) {
// The object can be pushed back onto the marking worklist only after
// progress bar was updated.
marking_worklists_->Push(object);
local_marking_worklists_->Push(object);
}
}
return end - start;
......@@ -220,7 +220,7 @@ int MarkingVisitorBase<ConcreteVisitor,
if (size && is_embedder_tracing_enabled_) {
// Success: The object needs to be processed for embedder references on
// the main thread.
marking_worklists_->PushEmbedder(object);
local_marking_worklists_->PushEmbedder(object);
}
return size;
}
......
......@@ -147,12 +147,13 @@ class MarkingStateBase {
template <typename ConcreteVisitor, typename MarkingState>
class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
public:
MarkingVisitorBase(int task_id, MarkingWorklists* marking_worklists,
MarkingVisitorBase(int task_id,
MarkingWorklists::Local* local_marking_worklists,
WeakObjects* weak_objects, Heap* heap,
unsigned mark_compact_epoch,
BytecodeFlushMode bytecode_flush_mode,
bool is_embedder_tracing_enabled, bool is_forced_gc)
: marking_worklists_(marking_worklists),
: local_marking_worklists_(local_marking_worklists),
weak_objects_(weak_objects),
heap_(heap),
task_id_(task_id),
......@@ -231,7 +232,7 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
// Marks the object grey and pushes it on the marking work list.
V8_INLINE void MarkObject(HeapObject host, HeapObject obj);
MarkingWorklists* const marking_worklists_;
MarkingWorklists::Local* const local_marking_worklists_;
WeakObjects* const weak_objects_;
Heap* const heap_;
const int task_id_;
......
This diff is collapsed.
......@@ -7,6 +7,7 @@
#include <algorithm>
#include <map>
#include "src/heap/marking-worklist-inl.h"
#include "src/objects/heap-object-inl.h"
#include "src/objects/heap-object.h"
#include "src/objects/instance-type-inl.h"
......@@ -17,27 +18,35 @@
namespace v8 {
namespace internal {
MarkingWorklistsHolder::~MarkingWorklistsHolder() {
MarkingWorklists::~MarkingWorklists() {
DCHECK(shared_.IsEmpty());
DCHECK(on_hold_.IsEmpty());
DCHECK(other_.IsEmpty());
DCHECK(worklists_.empty());
DCHECK(context_worklists_.empty());
}
void MarkingWorklistsHolder::Clear() {
void MarkingWorklists::Clear() {
shared_.Clear();
on_hold_.Clear();
embedder_.Clear();
other_.Clear();
for (auto cw : context_worklists_) {
if (cw.context == kSharedContext || cw.context == kOtherContext) {
// These contexts were cleared above.
continue;
}
cw.worklist->Clear();
}
ReleaseContextWorklists();
}
void MarkingWorklistsHolder::Print() {
void MarkingWorklists::Print() {
PrintWorklist("shared", &shared_);
PrintWorklist("on_hold", &on_hold_);
}
void MarkingWorklistsHolder::CreateContextWorklists(
void MarkingWorklists::CreateContextWorklists(
const std::vector<Address>& contexts) {
DCHECK(worklists_.empty());
DCHECK(context_worklists_.empty());
......@@ -53,17 +62,17 @@ void MarkingWorklistsHolder::CreateContextWorklists(
}
}
void MarkingWorklistsHolder::ReleaseContextWorklists() {
void MarkingWorklists::ReleaseContextWorklists() {
context_worklists_.clear();
worklists_.clear();
}
void MarkingWorklistsHolder::PrintWorklist(const char* worklist_name,
MarkingWorklist* worklist) {
void MarkingWorklists::PrintWorklist(const char* worklist_name,
MarkingWorklist* worklist) {
#ifdef DEBUG
std::map<InstanceType, int> count;
int total_count = 0;
worklist->IterateGlobalPool([&count, &total_count](HeapObject obj) {
worklist->Iterate([&count, &total_count](HeapObject obj) {
++total_count;
count[obj.map().instance_type()]++;
});
......@@ -85,111 +94,128 @@ void MarkingWorklistsHolder::PrintWorklist(const char* worklist_name,
#endif
}
MarkingWorklists::MarkingWorklists(int task_id, MarkingWorklistsHolder* holder)
: shared_(holder->shared()),
on_hold_(holder->on_hold()),
embedder_(holder->embedder()),
active_(shared_),
active_context_(kSharedContext),
task_id_(task_id),
is_per_context_mode_(false),
context_worklists_(holder->context_worklists()) {
if (!context_worklists_.empty()) {
const Address MarkingWorklists::Local::kSharedContext;
const Address MarkingWorklists::Local::kOtherContext;
MarkingWorklists::Local::Local(MarkingWorklists* global)
: on_hold_(global->on_hold()),
embedder_(global->embedder()),
is_per_context_mode_(false) {
if (global->context_worklists().empty()) {
MarkingWorklist::Local shared(global->shared());
active_ = std::move(shared);
active_context_ = kSharedContext;
active_owner_ = nullptr;
} else {
is_per_context_mode_ = true;
worklist_by_context_.reserve(context_worklists_.size());
for (auto& cw : context_worklists_) {
worklist_by_context_[cw.context] = cw.worklist;
worklist_by_context_.reserve(global->context_worklists().size());
for (auto& cw : global->context_worklists()) {
worklist_by_context_[cw.context] =
std::make_unique<MarkingWorklist::Local>(cw.worklist);
}
active_owner_ = worklist_by_context_[kSharedContext].get();
active_ = std::move(*active_owner_);
active_context_ = kSharedContext;
}
}
MarkingWorklists::Local::~Local() {
DCHECK(active_.IsLocalEmpty());
if (is_per_context_mode_) {
for (auto& cw : worklist_by_context_) {
if (cw.first != active_context_) {
DCHECK(cw.second->IsLocalEmpty());
}
}
}
}
void MarkingWorklists::FlushToGlobal() {
shared_->FlushToGlobal(task_id_);
on_hold_->FlushToGlobal(task_id_);
embedder_->FlushToGlobal(task_id_);
void MarkingWorklists::Local::Publish() {
active_.Publish();
on_hold_.Publish();
embedder_.Publish();
if (is_per_context_mode_) {
for (auto& cw : context_worklists_) {
cw.worklist->FlushToGlobal(task_id_);
for (auto& cw : worklist_by_context_) {
if (cw.first != active_context_) {
cw.second->Publish();
}
}
}
}
bool MarkingWorklists::IsEmpty() {
bool MarkingWorklists::Local::IsEmpty() {
// This function checks the on_hold_ worklist, so it works only for the main
// thread.
DCHECK_EQ(kMainThreadTask, task_id_);
if (!active_->IsLocalEmpty(task_id_) || !on_hold_->IsLocalEmpty(task_id_) ||
!active_->IsGlobalPoolEmpty() || !on_hold_->IsGlobalPoolEmpty()) {
if (!active_.IsLocalEmpty() || !on_hold_.IsLocalEmpty() ||
!active_.IsGlobalEmpty() || !on_hold_.IsGlobalEmpty()) {
return false;
}
if (!is_per_context_mode_) {
DCHECK_EQ(active_, shared_);
return true;
}
for (auto& cw : context_worklists_) {
if (!cw.worklist->IsLocalEmpty(task_id_) ||
!cw.worklist->IsGlobalPoolEmpty()) {
active_ = cw.worklist;
active_context_ = cw.context;
for (auto& cw : worklist_by_context_) {
if (cw.first != active_context_ &&
!(cw.second->IsLocalEmpty() && cw.second->IsGlobalEmpty())) {
SwitchToContext(cw.first, cw.second.get());
return false;
}
}
return true;
}
bool MarkingWorklists::IsEmbedderEmpty() {
return embedder_->IsLocalEmpty(task_id_) && embedder_->IsGlobalPoolEmpty();
bool MarkingWorklists::Local::IsEmbedderEmpty() const {
return embedder_.IsLocalEmpty() && embedder_.IsGlobalEmpty();
}
void MarkingWorklists::ShareWorkIfGlobalPoolIsEmpty() {
if (!shared_->IsLocalEmpty(task_id_) && shared_->IsGlobalPoolEmpty()) {
shared_->FlushToGlobal(task_id_);
void MarkingWorklists::Local::ShareWork() {
if (!active_.IsLocalEmpty() && active_.IsGlobalEmpty()) {
active_.Publish();
}
if (is_per_context_mode_ && shared_ != active_) {
if (!active_->IsLocalEmpty(task_id_) && active_->IsGlobalPoolEmpty()) {
active_->FlushToGlobal(task_id_);
if (is_per_context_mode_ && active_context_ != kSharedContext) {
MarkingWorklist::Local* shared = worklist_by_context_[kSharedContext].get();
if (!shared->IsLocalEmpty() && shared->IsGlobalEmpty()) {
shared->Publish();
}
}
}
void MarkingWorklists::MergeOnHold() {
DCHECK_EQ(kMainThreadTask, task_id_);
shared_->MergeGlobalPool(on_hold_);
void MarkingWorklists::Local::MergeOnHold() {
MarkingWorklist::Local* shared =
active_context_ == kSharedContext
? &active_
: worklist_by_context_[kSharedContext].get();
shared->Merge(&on_hold_);
}
bool MarkingWorklists::PopContext(HeapObject* object) {
bool MarkingWorklists::Local::PopContext(HeapObject* object) {
DCHECK(is_per_context_mode_);
// As an optimization we first check only the local segments to avoid locks.
for (auto& cw : context_worklists_) {
if (!cw.worklist->IsLocalEmpty(task_id_)) {
active_ = cw.worklist;
active_context_ = cw.context;
return active_->Pop(task_id_, object);
for (auto& cw : worklist_by_context_) {
if (cw.first != active_context_ && !cw.second->IsLocalEmpty()) {
SwitchToContext(cw.first, cw.second.get());
return active_.Pop(object);
}
}
// All local segments are empty. Check global segments.
for (auto& cw : context_worklists_) {
if (cw.worklist->Pop(task_id_, object)) {
active_ = cw.worklist;
active_context_ = cw.context;
for (auto& cw : worklist_by_context_) {
if (cw.first != active_context_ && cw.second->Pop(object)) {
SwitchToContext(cw.first, cw.second.get());
return true;
}
}
// All worklists are empty. Switch to the default shared worklist.
SwitchToShared();
SwitchToContext(kSharedContext);
return false;
}
Address MarkingWorklists::SwitchToContextSlow(Address context) {
Address MarkingWorklists::Local::SwitchToContextSlow(Address context) {
const auto& it = worklist_by_context_.find(context);
if (V8_UNLIKELY(it == worklist_by_context_.end())) {
// This context was created during marking or is not being measured,
// so we don't have a specific worklist for it.
active_context_ = kOtherContext;
active_ = worklist_by_context_[active_context_];
SwitchToContext(kOtherContext, worklist_by_context_[kOtherContext].get());
} else {
active_ = it->second;
active_context_ = context;
SwitchToContext(it->first, it->second.get());
}
return active_context_;
}
......
This diff is collapsed.
......@@ -4,13 +4,14 @@
#include <stdlib.h>
#include "src/init/v8.h"
#include "src/heap/concurrent-marking.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
#include "src/heap/mark-compact.h"
#include "src/heap/marking-worklist-inl.h"
#include "src/heap/marking-worklist.h"
#include "src/heap/worklist.h"
#include "src/init/v8.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-utils.h"
......@@ -19,10 +20,11 @@ namespace internal {
namespace heap {
void PublishSegment(MarkingWorklist* worklist, HeapObject object) {
for (size_t i = 0; i <= MarkingWorklist::kSegmentCapacity; i++) {
worklist->Push(0, object);
MarkingWorklist::Local local(worklist);
for (size_t i = 0; i <= MarkingWorklist::kSegmentSize; i++) {
local.Push(object);
}
CHECK(worklist->Pop(0, &object));
CHECK(local.Pop(&object));
}
TEST(ConcurrentMarking) {
......@@ -36,11 +38,11 @@ TEST(ConcurrentMarking) {
collector->EnsureSweepingCompleted();
}
MarkingWorklistsHolder marking_worklists_holder;
MarkingWorklists marking_worklists;
WeakObjects weak_objects;
ConcurrentMarking* concurrent_marking =
new ConcurrentMarking(heap, &marking_worklists_holder, &weak_objects);
PublishSegment(marking_worklists_holder.shared(),
new ConcurrentMarking(heap, &marking_worklists, &weak_objects);
PublishSegment(marking_worklists.shared(),
ReadOnlyRoots(heap).undefined_value());
concurrent_marking->ScheduleTasks();
concurrent_marking->Stop(
......@@ -59,16 +61,16 @@ TEST(ConcurrentMarkingReschedule) {
collector->EnsureSweepingCompleted();
}
MarkingWorklistsHolder marking_worklists_holder;
MarkingWorklists marking_worklists;
WeakObjects weak_objects;
ConcurrentMarking* concurrent_marking =
new ConcurrentMarking(heap, &marking_worklists_holder, &weak_objects);
PublishSegment(marking_worklists_holder.shared(),
new ConcurrentMarking(heap, &marking_worklists, &weak_objects);
PublishSegment(marking_worklists.shared(),
ReadOnlyRoots(heap).undefined_value());
concurrent_marking->ScheduleTasks();
concurrent_marking->Stop(
ConcurrentMarking::StopRequest::COMPLETE_ONGOING_TASKS);
PublishSegment(marking_worklists_holder.shared(),
PublishSegment(marking_worklists.shared(),
ReadOnlyRoots(heap).undefined_value());
concurrent_marking->RescheduleTasksIfNeeded();
concurrent_marking->Stop(
......@@ -87,17 +89,17 @@ TEST(ConcurrentMarkingPreemptAndReschedule) {
collector->EnsureSweepingCompleted();
}
MarkingWorklistsHolder marking_worklists_holder;
MarkingWorklists marking_worklists;
WeakObjects weak_objects;
ConcurrentMarking* concurrent_marking =
new ConcurrentMarking(heap, &marking_worklists_holder, &weak_objects);
new ConcurrentMarking(heap, &marking_worklists, &weak_objects);
for (int i = 0; i < 5000; i++)
PublishSegment(marking_worklists_holder.shared(),
PublishSegment(marking_worklists.shared(),
ReadOnlyRoots(heap).undefined_value());
concurrent_marking->ScheduleTasks();
concurrent_marking->Stop(ConcurrentMarking::StopRequest::PREEMPT_TASKS);
for (int i = 0; i < 5000; i++)
PublishSegment(marking_worklists_holder.shared(),
PublishSegment(marking_worklists.shared(),
ReadOnlyRoots(heap).undefined_value());
concurrent_marking->RescheduleTasksIfNeeded();
concurrent_marking->Stop(
......
......@@ -2365,7 +2365,7 @@ TEST(IdleNotificationFinishMarking) {
StepOrigin::kV8);
} while (!CcTest::heap()
->mark_compact_collector()
->marking_worklists()
->local_marking_worklists()
->IsEmpty());
marking->SetWeakClosureWasOverApproximatedForTesting(true);
......
......@@ -2,12 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/marking-worklist.h"
#include <cmath>
#include <limits>
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
#include "src/heap/marking-worklist.h"
#include "src/heap/marking-worklist-inl.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
......@@ -17,8 +19,8 @@ namespace internal {
using MarkingWorklistTest = TestWithContext;
TEST_F(MarkingWorklistTest, PushPop) {
MarkingWorklistsHolder holder;
MarkingWorklists worklists(kMainThreadTask, &holder);
MarkingWorklists holder;
MarkingWorklists::Local worklists(&holder);
HeapObject pushed_object =
ReadOnlyRoots(i_isolate()->heap()).undefined_value();
worklists.Push(pushed_object);
......@@ -28,8 +30,8 @@ TEST_F(MarkingWorklistTest, PushPop) {
}
TEST_F(MarkingWorklistTest, PushPopOnHold) {
MarkingWorklistsHolder holder;
MarkingWorklists worklists(kMainThreadTask, &holder);
MarkingWorklists holder;
MarkingWorklists::Local worklists(&holder);
HeapObject pushed_object =
ReadOnlyRoots(i_isolate()->heap()).undefined_value();
worklists.PushOnHold(pushed_object);
......@@ -39,8 +41,8 @@ TEST_F(MarkingWorklistTest, PushPopOnHold) {
}
TEST_F(MarkingWorklistTest, PushPopEmbedder) {
MarkingWorklistsHolder holder;
MarkingWorklists worklists(kMainThreadTask, &holder);
MarkingWorklists holder;
MarkingWorklists::Local worklists(&holder);
HeapObject pushed_object =
ReadOnlyRoots(i_isolate()->heap()).undefined_value();
worklists.PushEmbedder(pushed_object);
......@@ -50,13 +52,13 @@ TEST_F(MarkingWorklistTest, PushPopEmbedder) {
}
TEST_F(MarkingWorklistTest, MergeOnHold) {
MarkingWorklistsHolder holder;
MarkingWorklists main_worklists(kMainThreadTask, &holder);
MarkingWorklists worker_worklists(kMainThreadTask + 1, &holder);
MarkingWorklists holder;
MarkingWorklists::Local main_worklists(&holder);
MarkingWorklists::Local worker_worklists(&holder);
HeapObject pushed_object =
ReadOnlyRoots(i_isolate()->heap()).undefined_value();
worker_worklists.PushOnHold(pushed_object);
worker_worklists.FlushToGlobal();
worker_worklists.Publish();
main_worklists.MergeOnHold();
HeapObject popped_object;
EXPECT_TRUE(main_worklists.Pop(&popped_object));
......@@ -64,13 +66,13 @@ TEST_F(MarkingWorklistTest, MergeOnHold) {
}
TEST_F(MarkingWorklistTest, ShareWorkIfGlobalPoolIsEmpty) {
MarkingWorklistsHolder holder;
MarkingWorklists main_worklists(kMainThreadTask, &holder);
MarkingWorklists worker_worklists(kMainThreadTask + 1, &holder);
MarkingWorklists holder;
MarkingWorklists::Local main_worklists(&holder);
MarkingWorklists::Local worker_worklists(&holder);
HeapObject pushed_object =
ReadOnlyRoots(i_isolate()->heap()).undefined_value();
main_worklists.Push(pushed_object);
main_worklists.ShareWorkIfGlobalPoolIsEmpty();
main_worklists.ShareWork();
HeapObject popped_object;
EXPECT_TRUE(worker_worklists.Pop(&popped_object));
EXPECT_EQ(popped_object, pushed_object);
......@@ -78,9 +80,9 @@ TEST_F(MarkingWorklistTest, ShareWorkIfGlobalPoolIsEmpty) {
TEST_F(MarkingWorklistTest, ContextWorklistsPushPop) {
const Address context = 0xabcdef;
MarkingWorklistsHolder holder;
MarkingWorklists holder;
holder.CreateContextWorklists({context});
MarkingWorklists worklists(kMainThreadTask, &holder);
MarkingWorklists::Local worklists(&holder);
HeapObject pushed_object =
ReadOnlyRoots(i_isolate()->heap()).undefined_value();
worklists.SwitchToContext(context);
......@@ -94,9 +96,9 @@ TEST_F(MarkingWorklistTest, ContextWorklistsPushPop) {
TEST_F(MarkingWorklistTest, ContextWorklistsEmpty) {
const Address context = 0xabcdef;
MarkingWorklistsHolder holder;
MarkingWorklists holder;
holder.CreateContextWorklists({context});
MarkingWorklists worklists(kMainThreadTask, &holder);
MarkingWorklists::Local worklists(&holder);
HeapObject pushed_object =
ReadOnlyRoots(i_isolate()->heap()).undefined_value();
worklists.SwitchToContext(context);
......@@ -114,15 +116,15 @@ TEST_F(MarkingWorklistTest, ContextWorklistsEmpty) {
TEST_F(MarkingWorklistTest, ContextWorklistCrossTask) {
const Address context1 = 0x1abcdef;
const Address context2 = 0x2abcdef;
MarkingWorklistsHolder holder;
MarkingWorklists holder;
holder.CreateContextWorklists({context1, context2});
MarkingWorklists main_worklists(kMainThreadTask, &holder);
MarkingWorklists worker_worklists(kMainThreadTask + 1, &holder);
MarkingWorklists::Local main_worklists(&holder);
MarkingWorklists::Local worker_worklists(&holder);
HeapObject pushed_object =
ReadOnlyRoots(i_isolate()->heap()).undefined_value();
main_worklists.SwitchToContext(context1);
main_worklists.Push(pushed_object);
main_worklists.ShareWorkIfGlobalPoolIsEmpty();
main_worklists.ShareWork();
worker_worklists.SwitchToContext(context2);
HeapObject popped_object;
EXPECT_TRUE(worker_worklists.Pop(&popped_object));
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment