Commit 6b5bc5e9 authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

[heap] Refactor marking worklists

This unifies marking worklists handling by the main thread marker and
by the concurrent markers. A new class called MarkingWorklistsHolder
owns all marking worklists: the default worklist, the on-hold worklist,
and the embedder worklist. Each thread creates a local view of the
marking worklists by creating an instance of MarkingWorklists.

Additionally, marking visitors now work on MarkingWorklists instead of
accessing each worklist individually.

Besides cleaning the code up, this CL provides a bottleneck for
implementing per-context worklists.

Bug: chromium:973627
Change-Id: I52ad65c94bc0695287ba7bf4d8a814a9035e2888
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1941947Reviewed-by: 's avatarHannes Payer <hpayer@chromium.org>
Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#65421}
parent 0958dac2
......@@ -2273,6 +2273,8 @@ v8_source_set("v8_base_without_compiler") {
"src/heap/mark-compact.h",
"src/heap/marking-visitor-inl.h",
"src/heap/marking-visitor.h",
"src/heap/marking-worklist.cc",
"src/heap/marking-worklist.h",
"src/heap/marking.cc",
"src/heap/marking.h",
"src/heap/memory-measurement.cc",
......
......@@ -79,17 +79,15 @@ class ConcurrentMarkingVisitor final
: public MarkingVisitorBase<ConcurrentMarkingVisitor,
ConcurrentMarkingState> {
public:
ConcurrentMarkingVisitor(int task_id, MarkingWorklist* marking_worklist,
EmbedderTracingWorklist* embedder_worklist,
ConcurrentMarkingVisitor(int task_id, MarkingWorklists* marking_worklists,
WeakObjects* weak_objects, Heap* heap,
unsigned mark_compact_epoch,
BytecodeFlushMode bytecode_flush_mode,
bool embedder_tracing_enabled, bool is_forced_gc,
MemoryChunkDataMap* memory_chunk_data)
: MarkingVisitorBase(task_id, marking_worklist, embedder_worklist,
weak_objects, heap, mark_compact_epoch,
bytecode_flush_mode, embedder_tracing_enabled,
is_forced_gc),
: MarkingVisitorBase(task_id, marking_worklists, weak_objects, heap,
mark_compact_epoch, bytecode_flush_mode,
embedder_tracing_enabled, is_forced_gc),
marking_state_(memory_chunk_data),
memory_chunk_data_(memory_chunk_data) {}
......@@ -147,7 +145,7 @@ class ConcurrentMarkingVisitor final
bool ProcessEphemeron(HeapObject key, HeapObject value) {
if (marking_state_.IsBlackOrGrey(key)) {
if (marking_state_.WhiteToGrey(value)) {
marking_worklist_->Push(task_id_, value);
marking_worklists_->Push(value);
return true;
}
......@@ -371,15 +369,11 @@ class ConcurrentMarking::Task : public CancelableTask {
DISALLOW_COPY_AND_ASSIGN(Task);
};
ConcurrentMarking::ConcurrentMarking(Heap* heap,
MarkingWorklist* marking_worklist,
MarkingWorklist* on_hold,
EmbedderTracingWorklist* embedder_worklist,
WeakObjects* weak_objects)
ConcurrentMarking::ConcurrentMarking(
Heap* heap, MarkingWorklistsHolder* marking_worklists_holder,
WeakObjects* weak_objects)
: heap_(heap),
marking_worklist_(marking_worklist),
on_hold_(on_hold),
embedder_worklist_(embedder_worklist),
marking_worklists_holder_(marking_worklists_holder),
weak_objects_(weak_objects) {
// The runtime flag should be set only if the compile time flag was set.
#ifndef V8_CONCURRENT_MARKING
......@@ -392,8 +386,9 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
GCTracer::BackgroundScope::MC_BACKGROUND_MARKING);
size_t kBytesUntilInterruptCheck = 64 * KB;
int kObjectsUntilInterrupCheck = 1000;
MarkingWorklists marking_worklists(task_id, marking_worklists_holder_);
ConcurrentMarkingVisitor visitor(
task_id, marking_worklist_, embedder_worklist_, weak_objects_, heap_,
task_id, &marking_worklists, weak_objects_, heap_,
task_state->mark_compact_epoch, Heap::GetBytecodeFlushMode(),
heap_->local_embedder_heap_tracer()->InUse(), task_state->is_forced_gc,
&task_state->memory_chunk_data);
......@@ -425,7 +420,7 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
while (current_marked_bytes < kBytesUntilInterruptCheck &&
objects_processed < kObjectsUntilInterrupCheck) {
HeapObject object;
if (!marking_worklist_->Pop(task_id, &object)) {
if (!marking_worklists.Pop(&object)) {
done = true;
break;
}
......@@ -437,7 +432,7 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
Address addr = object.address();
if ((new_space_top <= addr && addr < new_space_limit) ||
addr == new_large_object) {
on_hold_->Push(task_id, object);
marking_worklists.PushOnHold(object);
} else {
Map map = object.synchronized_map();
current_marked_bytes += visitor.Visit(map, object);
......@@ -463,10 +458,7 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
}
}
marking_worklist_->FlushToGlobal(task_id);
on_hold_->FlushToGlobal(task_id);
embedder_worklist_->FlushToGlobal(task_id);
marking_worklists.FlushToGlobal();
weak_objects_->transition_arrays.FlushToGlobal(task_id);
weak_objects_->ephemeron_hash_tables.FlushToGlobal(task_id);
weak_objects_->current_ephemerons.FlushToGlobal(task_id);
......@@ -554,7 +546,7 @@ void ConcurrentMarking::RescheduleTasksIfNeeded() {
return;
}
}
if (!marking_worklist_->IsGlobalPoolEmpty() ||
if (!marking_worklists_holder_->shared()->IsGlobalPoolEmpty() ||
!weak_objects_->current_ephemerons.IsGlobalPoolEmpty() ||
!weak_objects_->discovered_ephemerons.IsGlobalPoolEmpty()) {
ScheduleTasks();
......
......@@ -12,6 +12,7 @@
#include "src/base/platform/condition-variable.h"
#include "src/base/platform/mutex.h"
#include "src/heap/marking-visitor.h"
#include "src/heap/marking-worklist.h"
#include "src/heap/slot-set.h"
#include "src/heap/spaces.h"
#include "src/heap/worklist.h"
......@@ -67,9 +68,8 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
// task 0, reserved for the main thread).
static constexpr int kMaxTasks = 7;
ConcurrentMarking(Heap* heap, MarkingWorklist* marking_worklist,
MarkingWorklist* on_hold,
EmbedderTracingWorklist* embedder_worklist,
ConcurrentMarking(Heap* heap,
MarkingWorklistsHolder* marking_worklists_holder,
WeakObjects* weak_objects);
// Schedules asynchronous tasks to perform concurrent marking. Objects in the
......@@ -112,9 +112,7 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
class Task;
void Run(int task_id, TaskState* task_state);
Heap* const heap_;
MarkingWorklist* const marking_worklist_;
MarkingWorklist* const on_hold_;
EmbedderTracingWorklist* const embedder_worklist_;
MarkingWorklistsHolder* const marking_worklists_holder_;
WeakObjects* const weak_objects_;
TaskState task_state_[kMaxTasks + 1];
std::atomic<size_t> total_marked_bytes_{0};
......
......@@ -3327,11 +3327,11 @@ void Heap::FinalizeIncrementalMarkingIfComplete(
if (incremental_marking()->IsMarking() &&
(incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
(!incremental_marking()->finalize_marking_completed() &&
mark_compact_collector()->marking_worklist()->IsEmpty() &&
mark_compact_collector()->marking_worklists()->IsEmpty() &&
local_embedder_heap_tracer()->ShouldFinalizeIncrementalMarking()))) {
FinalizeIncrementalMarkingIncrementally(gc_reason);
} else if (incremental_marking()->IsComplete() ||
(mark_compact_collector()->marking_worklist()->IsEmpty() &&
(mark_compact_collector()->marking_worklists()->IsEmpty() &&
local_embedder_heap_tracer()
->ShouldFinalizeIncrementalMarking())) {
CollectAllGarbage(current_gc_flags_, gc_reason, current_gc_callback_flags_);
......@@ -5030,18 +5030,15 @@ void Heap::SetUp() {
scavenger_collector_.reset(new ScavengerCollector(this));
incremental_marking_.reset(
new IncrementalMarking(this, mark_compact_collector_->marking_worklist(),
new IncrementalMarking(this, mark_compact_collector_->marking_worklists(),
mark_compact_collector_->weak_objects()));
if (FLAG_concurrent_marking || FLAG_parallel_marking) {
MarkCompactCollector::MarkingWorklist* marking_worklist =
mark_compact_collector_->marking_worklist();
concurrent_marking_.reset(new ConcurrentMarking(
this, marking_worklist->shared(), marking_worklist->on_hold(),
marking_worklist->embedder(), mark_compact_collector_->weak_objects()));
this, mark_compact_collector_->marking_worklists_holder(),
mark_compact_collector_->weak_objects()));
} else {
concurrent_marking_.reset(
new ConcurrentMarking(this, nullptr, nullptr, nullptr, nullptr));
concurrent_marking_.reset(new ConcurrentMarking(this, nullptr, nullptr));
}
for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
......
......@@ -66,7 +66,7 @@ void IncrementalMarking::RecordWrite(HeapObject obj, TSlot slot,
bool IncrementalMarking::WhiteToGreyAndPush(HeapObject obj) {
if (marking_state()->WhiteToGrey(obj)) {
marking_worklist()->Push(obj);
marking_worklists()->Push(obj);
return true;
}
return false;
......
......@@ -44,12 +44,12 @@ void IncrementalMarking::Observer::Step(int bytes_allocated, Address addr,
incremental_marking_->EnsureBlackAllocated(addr, size);
}
IncrementalMarking::IncrementalMarking(
Heap* heap, MarkCompactCollector::MarkingWorklist* marking_worklist,
WeakObjects* weak_objects)
IncrementalMarking::IncrementalMarking(Heap* heap,
MarkingWorklists* marking_worklists,
WeakObjects* weak_objects)
: heap_(heap),
collector_(heap->mark_compact_collector()),
marking_worklist_(marking_worklist),
marking_worklists_(marking_worklists),
weak_objects_(weak_objects),
initial_old_generation_size_(0),
bytes_marked_(0),
......@@ -64,7 +64,7 @@ IncrementalMarking::IncrementalMarking(
request_type_(NONE),
new_generation_observer_(this, kYoungGenerationAllocatedThreshold),
old_generation_observer_(this, kOldGenerationAllocatedThreshold) {
DCHECK_NOT_NULL(marking_worklist_);
DCHECK_NOT_NULL(marking_worklists_);
SetState(STOPPED);
}
......@@ -520,64 +520,68 @@ void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
void* minor_marking_state = nullptr;
#endif // ENABLE_MINOR_MC
marking_worklist()->Update([
collector_->marking_worklists_holder()->Update(
[
#ifdef DEBUG
// this is referred inside DCHECK.
this,
// this is referred inside DCHECK.
this,
#endif
filler_map, minor_marking_state](
HeapObject obj, HeapObject* out) -> bool {
DCHECK(obj.IsHeapObject());
// Only pointers to from space have to be updated.
if (Heap::InFromPage(obj)) {
MapWord map_word = obj.map_word();
if (!map_word.IsForwardingAddress()) {
// There may be objects on the marking deque that do not exist anymore,
// e.g. left trimmed objects or objects from the root set (frames).
// If these object are dead at scavenging time, their marking deque
// entries will not point to forwarding addresses. Hence, we can discard
// them.
return false;
}
HeapObject dest = map_word.ToForwardingAddress();
DCHECK_IMPLIES(marking_state()->IsWhite(obj), obj.IsFreeSpaceOrFiller());
*out = dest;
return true;
} else if (Heap::InToPage(obj)) {
// The object may be on a large page or on a page that was moved in new
// space.
DCHECK(Heap::IsLargeObject(obj) ||
Page::FromHeapObject(obj)->IsFlagSet(Page::SWEEP_TO_ITERATE));
filler_map,
minor_marking_state](HeapObject obj, HeapObject* out) -> bool {
DCHECK(obj.IsHeapObject());
// Only pointers to from space have to be updated.
if (Heap::InFromPage(obj)) {
MapWord map_word = obj.map_word();
if (!map_word.IsForwardingAddress()) {
// There may be objects on the marking deque that do not exist
// anymore, e.g. left trimmed objects or objects from the root set
// (frames). If these object are dead at scavenging time, their
// marking deque entries will not point to forwarding addresses.
// Hence, we can discard them.
return false;
}
HeapObject dest = map_word.ToForwardingAddress();
DCHECK_IMPLIES(marking_state()->IsWhite(obj),
obj.IsFreeSpaceOrFiller());
*out = dest;
return true;
} else if (Heap::InToPage(obj)) {
// The object may be on a large page or on a page that was moved in
// new space.
DCHECK(Heap::IsLargeObject(obj) ||
Page::FromHeapObject(obj)->IsFlagSet(Page::SWEEP_TO_ITERATE));
#ifdef ENABLE_MINOR_MC
if (minor_marking_state->IsWhite(obj)) {
return false;
}
if (minor_marking_state->IsWhite(obj)) {
return false;
}
#endif // ENABLE_MINOR_MC
// Either a large object or an object marked by the minor mark-compactor.
*out = obj;
return true;
} else {
// The object may be on a page that was moved from new to old space. Only
// applicable during minor MC garbage collections.
if (Page::FromHeapObject(obj)->IsFlagSet(Page::SWEEP_TO_ITERATE)) {
// Either a large object or an object marked by the minor
// mark-compactor.
*out = obj;
return true;
} else {
// The object may be on a page that was moved from new to old space.
// Only applicable during minor MC garbage collections.
if (Page::FromHeapObject(obj)->IsFlagSet(Page::SWEEP_TO_ITERATE)) {
#ifdef ENABLE_MINOR_MC
if (minor_marking_state->IsWhite(obj)) {
if (minor_marking_state->IsWhite(obj)) {
return false;
}
#endif // ENABLE_MINOR_MC
*out = obj;
return true;
}
DCHECK_IMPLIES(marking_state()->IsWhite(obj),
obj.IsFreeSpaceOrFiller());
// Skip one word filler objects that appear on the
// stack when we perform in place array shift.
if (obj.map() != filler_map) {
*out = obj;
return true;
}
return false;
}
#endif // ENABLE_MINOR_MC
*out = obj;
return true;
}
DCHECK_IMPLIES(marking_state()->IsWhite(obj), obj.IsFreeSpaceOrFiller());
// Skip one word filler objects that appear on the
// stack when we perform in place array shift.
if (obj.map() != filler_map) {
*out = obj;
return true;
}
return false;
}
});
});
UpdateWeakReferencesAfterScavenge();
}
......@@ -690,7 +694,7 @@ StepResult IncrementalMarking::EmbedderStep(double duration_ms) {
HeapObject object;
size_t cnt = 0;
empty_worklist = true;
while (marking_worklist()->embedder()->Pop(0, &object)) {
while (marking_worklists()->PopEmbedder(&object)) {
scope.TracePossibleWrapper(JSObject::cast(object));
if (++cnt == kObjectsToProcessBeforeInterrupt) {
cnt = 0;
......@@ -713,7 +717,7 @@ void IncrementalMarking::Hurry() {
// forced e.g. in tests. It should not happen when COMPLETE was set when
// incremental marking finished and a regular GC was triggered after that
// because should_hurry_ will force a full GC.
if (!marking_worklist()->IsEmpty()) {
if (!marking_worklists()->IsEmpty()) {
double start = 0.0;
if (FLAG_trace_incremental_marking) {
start = heap_->MonotonicallyIncreasingTimeInMs();
......@@ -1032,15 +1036,14 @@ StepResult IncrementalMarking::V8Step(double max_step_size_in_ms,
// It is safe to merge back all objects that were on hold to the shared
// work list at Step because we are at a safepoint where all objects
// are properly initialized.
marking_worklist()->shared()->MergeGlobalPool(
marking_worklist()->on_hold());
marking_worklists()->MergeOnHold();
}
// Only print marking worklist in debug mode to save ~40KB of code size.
#ifdef DEBUG
if (FLAG_trace_incremental_marking && FLAG_trace_concurrent_marking &&
FLAG_trace_gc_verbose) {
marking_worklist()->Print();
collector_->marking_worklists_holder()->Print();
}
#endif
if (FLAG_trace_incremental_marking) {
......@@ -1063,7 +1066,7 @@ StepResult IncrementalMarking::V8Step(double max_step_size_in_ms,
bytes_marked_ += bytes_processed;
if (marking_worklist()->IsEmpty()) {
if (marking_worklists()->IsEmpty()) {
result = StepResult::kNoImmediateWork;
if (heap_->local_embedder_heap_tracer()
->ShouldFinalizeIncrementalMarking()) {
......@@ -1082,7 +1085,7 @@ StepResult IncrementalMarking::V8Step(double max_step_size_in_ms,
}
}
if (FLAG_concurrent_marking) {
marking_worklist()->ShareWorkIfGlobalPoolIsEmpty();
marking_worklists()->ShareWorkIfGlobalPoolIsEmpty();
heap_->concurrent_marking()->RescheduleTasksIfNeeded();
}
......
......@@ -86,8 +86,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
static const AccessMode kAtomicity = AccessMode::NON_ATOMIC;
#endif
IncrementalMarking(Heap* heap,
MarkCompactCollector::MarkingWorklist* marking_worklist,
IncrementalMarking(Heap* heap, MarkingWorklists* marking_worklists,
WeakObjects* weak_objects);
MarkingState* marking_state() { return &marking_state_; }
......@@ -228,9 +227,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
}
}
MarkCompactCollector::MarkingWorklist* marking_worklist() const {
return marking_worklist_;
}
MarkingWorklists* marking_worklists() const { return marking_worklists_; }
void Deactivate();
......@@ -305,7 +302,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
Heap* const heap_;
MarkCompactCollector* const collector_;
MarkCompactCollector::MarkingWorklist* const marking_worklist_;
MarkingWorklists* const marking_worklists_;
WeakObjects* weak_objects_;
double start_time_ms_;
......
......@@ -23,7 +23,7 @@ namespace internal {
void MarkCompactCollector::MarkObject(HeapObject host, HeapObject obj) {
if (marking_state()->WhiteToGrey(obj)) {
marking_worklist()->Push(obj);
marking_worklists()->Push(obj);
if (V8_UNLIKELY(FLAG_track_retaining_path)) {
heap_->AddRetainer(host, obj);
}
......@@ -32,7 +32,7 @@ void MarkCompactCollector::MarkObject(HeapObject host, HeapObject obj) {
void MarkCompactCollector::MarkRootObject(Root root, HeapObject obj) {
if (marking_state()->WhiteToGrey(obj)) {
marking_worklist()->Push(obj);
marking_worklists()->Push(obj);
if (V8_UNLIKELY(FLAG_track_retaining_path)) {
heap_->AddRetainingRoot(root, obj);
}
......@@ -52,7 +52,7 @@ void MinorMarkCompactCollector::MarkRootObject(HeapObject obj) {
void MarkCompactCollector::MarkExternallyReferencedObject(HeapObject obj) {
if (marking_state()->WhiteToGrey(obj)) {
marking_worklist()->Push(obj);
marking_worklists()->Push(obj);
if (V8_UNLIKELY(FLAG_track_retaining_path)) {
heap_->AddRetainingRoot(Root::kWrapperTracing, obj);
}
......
......@@ -429,7 +429,7 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap)
compacting_(false),
black_allocation_(false),
have_code_to_deoptimize_(false),
marking_worklist_(heap),
marking_worklists_(kMainThreadTask, marking_worklists_holder()),
sweeper_(new Sweeper(heap, non_atomic_marking_state())) {
old_to_new_slots_ = -1;
}
......@@ -447,7 +447,7 @@ void MarkCompactCollector::TearDown() {
AbortCompaction();
AbortWeakObjects();
if (heap()->incremental_marking()->IsMarking()) {
marking_worklist()->Clear();
marking_worklists_holder()->Clear();
}
}
......@@ -500,8 +500,7 @@ bool MarkCompactCollector::StartCompaction() {
void MarkCompactCollector::StartMarking() {
marking_visitor_ = std::make_unique<MarkingVisitor>(
marking_state(), marking_worklist()->shared(),
marking_worklist()->embedder(), weak_objects(), heap_, epoch(),
marking_state(), marking_worklists(), weak_objects(), heap_, epoch(),
Heap::GetBytecodeFlushMode(),
heap_->local_embedder_heap_tracer()->InUse(),
heap_->is_current_gc_forced());
......@@ -857,7 +856,7 @@ void MarkCompactCollector::FinishConcurrentMarking(
}
void MarkCompactCollector::VerifyMarking() {
CHECK(marking_worklist()->IsEmpty());
CHECK(marking_worklists()->IsEmpty());
DCHECK(heap_->incremental_marking()->IsStopped());
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
......@@ -1614,14 +1613,14 @@ void MarkCompactCollector::ProcessEphemeronsUntilFixpoint() {
CHECK(weak_objects_.current_ephemerons.IsEmpty());
CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
work_to_do = work_to_do || !marking_worklist()->IsEmpty() ||
work_to_do = work_to_do || !marking_worklists()->IsEmpty() ||
heap()->concurrent_marking()->ephemeron_marked() ||
!marking_worklist()->IsEmbedderEmpty() ||
!marking_worklists()->IsEmbedderEmpty() ||
!heap()->local_embedder_heap_tracer()->IsRemoteTracingDone();
++iterations;
}
CHECK(marking_worklist()->IsEmpty());
CHECK(marking_worklists()->IsEmpty());
CHECK(weak_objects_.current_ephemerons.IsEmpty());
CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
}
......@@ -1710,7 +1709,7 @@ void MarkCompactCollector::ProcessEphemeronsLinear() {
weak_objects_.next_ephemerons.Iterate([&](Ephemeron ephemeron) {
if (non_atomic_marking_state()->IsBlackOrGrey(ephemeron.key) &&
non_atomic_marking_state()->WhiteToGrey(ephemeron.value)) {
marking_worklist()->Push(ephemeron.value);
marking_worklists()->Push(ephemeron.value);
}
});
......@@ -1731,8 +1730,8 @@ void MarkCompactCollector::ProcessEphemeronsLinear() {
// for work_to_do are not sufficient for determining if another iteration
// is necessary.
work_to_do = !marking_worklist()->IsEmpty() ||
!marking_worklist()->IsEmbedderEmpty() ||
work_to_do = !marking_worklists()->IsEmpty() ||
!marking_worklists()->IsEmbedderEmpty() ||
!heap()->local_embedder_heap_tracer()->IsRemoteTracingDone();
CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
}
......@@ -1740,7 +1739,7 @@ void MarkCompactCollector::ProcessEphemeronsLinear() {
ResetNewlyDiscovered();
ephemeron_marking_.newly_discovered.shrink_to_fit();
CHECK(marking_worklist()->IsEmpty());
CHECK(marking_worklists()->IsEmpty());
}
void MarkCompactCollector::PerformWrapperTracing() {
......@@ -1750,7 +1749,7 @@ void MarkCompactCollector::PerformWrapperTracing() {
LocalEmbedderHeapTracer::ProcessingScope scope(
heap_->local_embedder_heap_tracer());
HeapObject object;
while (marking_worklist()->embedder()->Pop(kMainThreadTask, &object)) {
while (marking_worklists()->PopEmbedder(&object)) {
scope.TracePossibleWrapper(JSObject::cast(object));
}
}
......@@ -1765,7 +1764,8 @@ template <MarkCompactCollector::MarkingWorklistProcessingMode mode>
size_t MarkCompactCollector::ProcessMarkingWorklist(size_t bytes_to_process) {
HeapObject object;
size_t bytes_processed = 0;
while (!(object = marking_worklist()->Pop()).is_null()) {
while (marking_worklists()->Pop(&object) ||
marking_worklists()->PopOnHold(&object)) {
// Left trimming may result in grey or black filler objects on the marking
// worklist. Ignore these objects.
if (object.IsFreeSpaceOrFiller()) {
......@@ -1807,7 +1807,7 @@ template size_t MarkCompactCollector::ProcessMarkingWorklist<
bool MarkCompactCollector::ProcessEphemeron(HeapObject key, HeapObject value) {
if (marking_state()->IsBlackOrGrey(key)) {
if (marking_state()->WhiteToGrey(value)) {
marking_worklist()->Push(value);
marking_worklists()->Push(value);
return true;
}
......@@ -1819,7 +1819,7 @@ bool MarkCompactCollector::ProcessEphemeron(HeapObject key, HeapObject value) {
}
void MarkCompactCollector::ProcessEphemeronMarking() {
DCHECK(marking_worklist()->IsEmpty());
DCHECK(marking_worklists()->IsEmpty());
// Incremental marking might leave ephemerons in main task's local
// buffer, flush it into global pool.
......@@ -1827,7 +1827,7 @@ void MarkCompactCollector::ProcessEphemeronMarking() {
ProcessEphemeronsUntilFixpoint();
CHECK(marking_worklist()->IsEmpty());
CHECK(marking_worklists()->IsEmpty());
CHECK(heap()->local_embedder_heap_tracer()->IsRemoteTracingDone());
}
......@@ -1919,7 +1919,7 @@ void MarkCompactCollector::MarkLiveObjects() {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE);
DCHECK(marking_worklist()->IsEmpty());
DCHECK(marking_worklists()->IsEmpty());
// Mark objects reachable through the embedder heap. This phase is
// opportunistic as it may not discover graphs that are only reachable
......@@ -1934,9 +1934,9 @@ void MarkCompactCollector::MarkLiveObjects() {
PerformWrapperTracing();
DrainMarkingWorklist();
} while (!heap_->local_embedder_heap_tracer()->IsRemoteTracingDone() ||
!marking_worklist()->IsEmbedderEmpty());
DCHECK(marking_worklist()->IsEmbedderEmpty());
DCHECK(marking_worklist()->IsEmpty());
!marking_worklists()->IsEmbedderEmpty());
DCHECK(marking_worklists()->IsEmbedderEmpty());
DCHECK(marking_worklists()->IsEmpty());
}
// The objects reachable from the roots are marked, yet unreachable objects
......@@ -1946,7 +1946,7 @@ void MarkCompactCollector::MarkLiveObjects() {
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON);
ProcessEphemeronMarking();
DCHECK(marking_worklist()->IsEmpty());
DCHECK(marking_worklists()->IsEmpty());
}
// The objects reachable from the roots, weak maps, and embedder heap
......@@ -1978,8 +1978,8 @@ void MarkCompactCollector::MarkLiveObjects() {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE_HARMONY);
ProcessEphemeronMarking();
DCHECK(marking_worklist()->IsEmbedderEmpty());
DCHECK(marking_worklist()->IsEmpty());
DCHECK(marking_worklists()->IsEmbedderEmpty());
DCHECK(marking_worklists()->IsEmpty());
}
{
......@@ -4010,31 +4010,6 @@ void MarkCompactCollector::StartSweepSpaces() {
}
}
void MarkCompactCollector::MarkingWorklist::PrintWorklist(
const char* worklist_name, ConcurrentMarkingWorklist* worklist) {
std::map<InstanceType, int> count;
int total_count = 0;
worklist->IterateGlobalPool([&count, &total_count](HeapObject obj) {
++total_count;
count[obj.map().instance_type()]++;
});
std::vector<std::pair<int, InstanceType>> rank;
rank.reserve(count.size());
for (const auto& i : count) {
rank.emplace_back(i.second, i.first);
}
std::map<InstanceType, std::string> instance_type_name;
#define INSTANCE_TYPE_NAME(name) instance_type_name[name] = #name;
INSTANCE_TYPE_LIST(INSTANCE_TYPE_NAME)
#undef INSTANCE_TYPE_NAME
std::sort(rank.begin(), rank.end(),
std::greater<std::pair<int, InstanceType>>());
PrintF("Worklist %s: %d\n", worklist_name, total_count);
for (auto i : rank) {
PrintF(" [%s]: %d\n", instance_type_name[i.second].c_str(), i.first);
}
}
#ifdef ENABLE_MINOR_MC
namespace {
......
......@@ -10,6 +10,7 @@
#include "src/heap/concurrent-marking.h"
#include "src/heap/marking-visitor.h"
#include "src/heap/marking-worklist.h"
#include "src/heap/marking.h"
#include "src/heap/spaces.h"
#include "src/heap/sweeper.h"
......@@ -355,9 +356,6 @@ class MajorNonAtomicMarkingState final
}
};
// The index of the main thread task used by concurrent/parallel GC.
const int kMainThreadTask = 0;
// This visitor is used for marking on the main thread. It is cheaper than
// the concurrent marking visitor because it does not snapshot JSObjects.
template <typename MarkingState>
......@@ -382,16 +380,15 @@ class MainMarkingVisitor final
};
MainMarkingVisitor(MarkingState* marking_state,
MarkingWorklist* marking_worklist,
EmbedderTracingWorklist* embedder_worklist,
MarkingWorklists* marking_worklists,
WeakObjects* weak_objects, Heap* heap,
unsigned mark_compact_epoch,
BytecodeFlushMode bytecode_flush_mode,
bool embedder_tracing_enabled, bool is_forced_gc)
: MarkingVisitorBase<MainMarkingVisitor<MarkingState>, MarkingState>(
kMainThreadTask, marking_worklist, embedder_worklist, weak_objects,
heap, mark_compact_epoch, bytecode_flush_mode,
embedder_tracing_enabled, is_forced_gc),
kMainThreadTask, marking_worklists, weak_objects, heap,
mark_compact_epoch, bytecode_flush_mode, embedder_tracing_enabled,
is_forced_gc),
marking_state_(marking_state),
revisiting_object_(false) {}
......@@ -451,101 +448,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
using MarkingVisitor = MainMarkingVisitor<MarkingState>;
// Wrapper for the shared worklist.
class MarkingWorklist {
public:
using ConcurrentMarkingWorklist = Worklist<HeapObject, 64>;
using EmbedderTracingWorklist = Worklist<HeapObject, 16>;
// The heap parameter is not used but needed to match the sequential case.
explicit MarkingWorklist(Heap* heap) {}
void Push(HeapObject object) {
bool success = shared_.Push(kMainThreadTask, object);
USE(success);
DCHECK(success);
}
HeapObject Pop() {
HeapObject result;
if (shared_.Pop(kMainThreadTask, &result)) return result;
#ifdef V8_CONCURRENT_MARKING
// The expectation is that this work list is empty almost all the time
// and we can thus avoid the emptiness checks by putting it last.
if (on_hold_.Pop(kMainThreadTask, &result)) return result;
#endif
return HeapObject();
}
void Clear() {
shared_.Clear();
on_hold_.Clear();
embedder_.Clear();
}
bool IsEmpty() {
return shared_.IsLocalEmpty(kMainThreadTask) &&
on_hold_.IsLocalEmpty(kMainThreadTask) &&
shared_.IsGlobalPoolEmpty() && on_hold_.IsGlobalPoolEmpty();
}
bool IsEmbedderEmpty() {
return embedder_.IsLocalEmpty(kMainThreadTask) &&
embedder_.IsGlobalPoolEmpty();
}
int Size() {
return static_cast<int>(shared_.LocalSize(kMainThreadTask) +
on_hold_.LocalSize(kMainThreadTask));
}
// Calls the specified callback on each element of the deques and replaces
// the element with the result of the callback. If the callback returns
// nullptr then the element is removed from the deque.
// The callback must accept HeapObject and return HeapObject.
template <typename Callback>
void Update(Callback callback) {
shared_.Update(callback);
on_hold_.Update(callback);
embedder_.Update(callback);
}
void ShareWorkIfGlobalPoolIsEmpty() {
if (!shared_.IsLocalEmpty(kMainThreadTask) &&
shared_.IsGlobalPoolEmpty()) {
shared_.FlushToGlobal(kMainThreadTask);
}
}
ConcurrentMarkingWorklist* shared() { return &shared_; }
ConcurrentMarkingWorklist* on_hold() { return &on_hold_; }
EmbedderTracingWorklist* embedder() { return &embedder_; }
void Print() {
PrintWorklist("shared", &shared_);
PrintWorklist("on_hold", &on_hold_);
}
private:
// Prints the stats about the global pool of the worklist.
void PrintWorklist(const char* worklist_name,
ConcurrentMarkingWorklist* worklist);
// Worklist used for most objects.
ConcurrentMarkingWorklist shared_;
// Concurrent marking uses this worklist to bail out of marking objects
// in new space's linear allocation area. Used to avoid black allocation
// for new space. This allow the compiler to remove write barriers
// for freshly allocatd objects.
ConcurrentMarkingWorklist on_hold_;
// Worklist for objects that potentially require embedder tracing, i.e.,
// these objects need to be handed over to the embedder to find the full
// transitive closure.
EmbedderTracingWorklist embedder_;
};
class RootMarkingVisitor;
class CustomRootBodyMarkingVisitor;
......@@ -628,7 +530,10 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
bool evacuation() const { return evacuation_; }
MarkingWorklist* marking_worklist() { return &marking_worklist_; }
MarkingWorklistsHolder* marking_worklists_holder() {
return &marking_worklists_holder_;
}
MarkingWorklists* marking_worklists() { return &marking_worklists_; }
WeakObjects* weak_objects() { return &weak_objects_; }
......@@ -858,7 +763,9 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
bool have_code_to_deoptimize_;
MarkingWorklist marking_worklist_;
MarkingWorklistsHolder marking_worklists_holder_;
MarkingWorklists marking_worklists_;
WeakObjects weak_objects_;
EphemeronMarking ephemeron_marking_;
......
......@@ -22,7 +22,7 @@ void MarkingVisitorBase<ConcreteVisitor, MarkingState>::MarkObject(
HeapObject host, HeapObject object) {
concrete_visitor()->SynchronizePageAccess(object);
if (concrete_visitor()->marking_state()->WhiteToGrey(object)) {
marking_worklist_->Push(task_id_, object);
marking_worklists_->Push(object);
if (V8_UNLIKELY(concrete_visitor()->retaining_path_mode() ==
TraceRetainingPathMode::kEnabled)) {
heap_->AddRetainer(host, object);
......@@ -183,7 +183,7 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::
if (end < size) {
// The object can be pushed back onto the marking worklist only after
// progress bar was updated.
marking_worklist_->Push(task_id_, object);
marking_worklists_->Push(object);
}
}
return end - start;
......@@ -220,7 +220,7 @@ int MarkingVisitorBase<ConcreteVisitor,
if (size && is_embedder_tracing_enabled_) {
// Success: The object needs to be processed for embedder references on
// the main thread.
embedder_worklist_->Push(task_id_, object);
marking_worklists_->PushEmbedder(object);
}
return size;
}
......
......@@ -6,6 +6,7 @@
#define V8_HEAP_MARKING_VISITOR_H_
#include "src/common/globals.h"
#include "src/heap/marking-worklist.h"
#include "src/heap/marking.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/spaces.h"
......@@ -16,12 +17,6 @@
namespace v8 {
namespace internal {
using MarkingWorklist = Worklist<HeapObject, 64 /* segment size */>;
// Worklist for objects that potentially require embedder tracing, i.e.,
// these objects need to be handed over to the embedder to find the full
// transitive closure.
using EmbedderTracingWorklist = Worklist<HeapObject, 16 /* segment size */>;
struct Ephemeron {
HeapObject key;
HeapObject value;
......@@ -150,14 +145,12 @@ class MarkingStateBase {
template <typename ConcreteVisitor, typename MarkingState>
class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
public:
MarkingVisitorBase(int task_id, MarkingWorklist* marking_worklist,
EmbedderTracingWorklist* embedder_worklist,
MarkingVisitorBase(int task_id, MarkingWorklists* marking_worklists,
WeakObjects* weak_objects, Heap* heap,
unsigned mark_compact_epoch,
BytecodeFlushMode bytecode_flush_mode,
bool is_embedder_tracing_enabled, bool is_forced_gc)
: marking_worklist_(marking_worklist),
embedder_worklist_(embedder_worklist),
: marking_worklists_(marking_worklists),
weak_objects_(weak_objects),
heap_(heap),
task_id_(task_id),
......@@ -235,8 +228,7 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
// Marks the object grey and pushes it on the marking work list.
V8_INLINE void MarkObject(HeapObject host, HeapObject obj);
MarkingWorklist* const marking_worklist_;
EmbedderTracingWorklist* const embedder_worklist_;
MarkingWorklists* const marking_worklists_;
WeakObjects* const weak_objects_;
Heap* const heap_;
const int task_id_;
......
// Copyright 2019 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/marking-worklist.h"
#include <algorithm>
#include <map>
#include "src/objects/heap-object-inl.h"
#include "src/objects/heap-object.h"
#include "src/objects/instance-type-inl.h"
#include "src/objects/instance-type.h"
#include "src/objects/map.h"
#include "src/objects/objects-definitions.h"
namespace v8 {
namespace internal {
void MarkingWorklistsHolder::Clear() {
shared_.Clear();
on_hold_.Clear();
embedder_.Clear();
}
void MarkingWorklistsHolder::Print() {
PrintWorklist("shared", &shared_);
PrintWorklist("on_hold", &on_hold_);
}
void MarkingWorklistsHolder::PrintWorklist(const char* worklist_name,
MarkingWorklist* worklist) {
#ifdef DEBUG
std::map<InstanceType, int> count;
int total_count = 0;
worklist->IterateGlobalPool([&count, &total_count](HeapObject obj) {
++total_count;
count[obj.map().instance_type()]++;
});
std::vector<std::pair<int, InstanceType>> rank;
rank.reserve(count.size());
for (const auto& i : count) {
rank.emplace_back(i.second, i.first);
}
std::map<InstanceType, std::string> instance_type_name;
#define INSTANCE_TYPE_NAME(name) instance_type_name[name] = #name;
INSTANCE_TYPE_LIST(INSTANCE_TYPE_NAME)
#undef INSTANCE_TYPE_NAME
std::sort(rank.begin(), rank.end(),
std::greater<std::pair<int, InstanceType>>());
PrintF("Worklist %s: %d\n", worklist_name, total_count);
for (auto i : rank) {
PrintF(" [%s]: %d\n", instance_type_name[i.second].c_str(), i.first);
}
#endif
}
MarkingWorklists::MarkingWorklists(int task_id, MarkingWorklistsHolder* holder)
: shared_(holder->shared()),
on_hold_(holder->on_hold()),
embedder_(holder->embedder()),
task_id_(task_id) {}
void MarkingWorklists::FlushToGlobal() {
shared_->FlushToGlobal(task_id_);
on_hold_->FlushToGlobal(task_id_);
embedder_->FlushToGlobal(task_id_);
}
bool MarkingWorklists::IsEmpty() {
// This function checks the on_hold_ worklist, so it works only for the main
// thread.
DCHECK_EQ(kMainThreadTask, task_id_);
return shared_->IsLocalEmpty(task_id_) && on_hold_->IsLocalEmpty(task_id_) &&
shared_->IsGlobalPoolEmpty() && on_hold_->IsGlobalPoolEmpty();
}
bool MarkingWorklists::IsEmbedderEmpty() {
return embedder_->IsLocalEmpty(task_id_) && embedder_->IsGlobalPoolEmpty();
}
void MarkingWorklists::ShareWorkIfGlobalPoolIsEmpty() {
if (!shared_->IsLocalEmpty(task_id_) && shared_->IsGlobalPoolEmpty()) {
shared_->FlushToGlobal(task_id_);
}
}
void MarkingWorklists::MergeOnHold() {
DCHECK_EQ(kMainThreadTask, task_id_);
shared_->MergeGlobalPool(on_hold_);
}
} // namespace internal
} // namespace v8
// Copyright 2019 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_MARKING_WORKLIST_H_
#define V8_HEAP_MARKING_WORKLIST_H_
#include "src/heap/marking.h"
#include "src/heap/worklist.h"
#include "src/objects/heap-object.h"
namespace v8 {
namespace internal {
using MarkingWorklist = Worklist<HeapObject, 64>;
using EmbedderTracingWorklist = Worklist<HeapObject, 16>;
// The index of the main thread task used by concurrent/parallel GC.
const int kMainThreadTask = 0;
// A helper class that owns all marking worklists.
class MarkingWorklistsHolder {
public:
// Calls the specified callback on each element of the deques and replaces
// the element with the result of the callback. If the callback returns
// nullptr then the element is removed from the deque.
// The callback must accept HeapObject and return HeapObject.
template <typename Callback>
void Update(Callback callback) {
shared_.Update(callback);
on_hold_.Update(callback);
embedder_.Update(callback);
}
MarkingWorklist* shared() { return &shared_; }
MarkingWorklist* on_hold() { return &on_hold_; }
EmbedderTracingWorklist* embedder() { return &embedder_; }
void Clear();
void Print();
private:
// Prints the stats about the global pool of the worklist.
void PrintWorklist(const char* worklist_name, MarkingWorklist* worklist);
// Worklist used for most objects.
MarkingWorklist shared_;
// Concurrent marking uses this worklist to bail out of marking objects
// in new space's linear allocation area. Used to avoid black allocation
// for new space. This allow the compiler to remove write barriers
// for freshly allocatd objects.
MarkingWorklist on_hold_;
// Worklist for objects that potentially require embedder tracing, i.e.,
// these objects need to be handed over to the embedder to find the full
// transitive closure.
EmbedderTracingWorklist embedder_;
};
// A thread-local view of the marking worklists.
class V8_EXPORT_PRIVATE MarkingWorklists {
public:
MarkingWorklists(int task_id, MarkingWorklistsHolder* holder);
void Push(HeapObject object) {
bool success = shared_->Push(task_id_, object);
USE(success);
DCHECK(success);
}
bool Pop(HeapObject* object) { return shared_->Pop(task_id_, object); }
void PushOnHold(HeapObject object) {
DCHECK_NE(kMainThreadTask, task_id_);
bool success = on_hold_->Push(task_id_, object);
USE(success);
DCHECK(success);
}
bool PopOnHold(HeapObject* object) {
DCHECK_EQ(kMainThreadTask, task_id_);
return on_hold_->Pop(task_id_, object);
}
void PushEmbedder(HeapObject object) {
bool success = embedder_->Push(task_id_, object);
USE(success);
DCHECK(success);
}
bool PopEmbedder(HeapObject* object) {
return embedder_->Pop(task_id_, object);
}
void FlushToGlobal();
bool IsEmpty();
bool IsEmbedderEmpty();
void MergeOnHold();
void ShareWorkIfGlobalPoolIsEmpty();
private:
MarkingWorklist* shared_;
MarkingWorklist* on_hold_;
EmbedderTracingWorklist* embedder_;
int task_id_;
};
} // namespace internal
} // namespace v8
#endif // V8_HEAP_MARKING_WORKLIST_H_
......@@ -36,12 +36,12 @@ TEST(ConcurrentMarking) {
collector->EnsureSweepingCompleted();
}
MarkingWorklist shared, on_hold;
EmbedderTracingWorklist embedder_objects;
MarkingWorklistsHolder marking_worklists_holder;
WeakObjects weak_objects;
ConcurrentMarking* concurrent_marking = new ConcurrentMarking(
heap, &shared, &on_hold, &embedder_objects, &weak_objects);
PublishSegment(&shared, ReadOnlyRoots(heap).undefined_value());
ConcurrentMarking* concurrent_marking =
new ConcurrentMarking(heap, &marking_worklists_holder, &weak_objects);
PublishSegment(marking_worklists_holder.shared(),
ReadOnlyRoots(heap).undefined_value());
concurrent_marking->ScheduleTasks();
concurrent_marking->Stop(
ConcurrentMarking::StopRequest::COMPLETE_TASKS_FOR_TESTING);
......@@ -59,16 +59,17 @@ TEST(ConcurrentMarkingReschedule) {
collector->EnsureSweepingCompleted();
}
MarkingWorklist shared, on_hold;
EmbedderTracingWorklist embedder_objects;
MarkingWorklistsHolder marking_worklists_holder;
WeakObjects weak_objects;
ConcurrentMarking* concurrent_marking = new ConcurrentMarking(
heap, &shared, &on_hold, &embedder_objects, &weak_objects);
PublishSegment(&shared, ReadOnlyRoots(heap).undefined_value());
ConcurrentMarking* concurrent_marking =
new ConcurrentMarking(heap, &marking_worklists_holder, &weak_objects);
PublishSegment(marking_worklists_holder.shared(),
ReadOnlyRoots(heap).undefined_value());
concurrent_marking->ScheduleTasks();
concurrent_marking->Stop(
ConcurrentMarking::StopRequest::COMPLETE_ONGOING_TASKS);
PublishSegment(&shared, ReadOnlyRoots(heap).undefined_value());
PublishSegment(marking_worklists_holder.shared(),
ReadOnlyRoots(heap).undefined_value());
concurrent_marking->RescheduleTasksIfNeeded();
concurrent_marking->Stop(
ConcurrentMarking::StopRequest::COMPLETE_TASKS_FOR_TESTING);
......@@ -86,17 +87,18 @@ TEST(ConcurrentMarkingPreemptAndReschedule) {
collector->EnsureSweepingCompleted();
}
MarkingWorklist shared, on_hold;
EmbedderTracingWorklist embedder_objects;
MarkingWorklistsHolder marking_worklists_holder;
WeakObjects weak_objects;
ConcurrentMarking* concurrent_marking = new ConcurrentMarking(
heap, &shared, &on_hold, &embedder_objects, &weak_objects);
ConcurrentMarking* concurrent_marking =
new ConcurrentMarking(heap, &marking_worklists_holder, &weak_objects);
for (int i = 0; i < 5000; i++)
PublishSegment(&shared, ReadOnlyRoots(heap).undefined_value());
PublishSegment(marking_worklists_holder.shared(),
ReadOnlyRoots(heap).undefined_value());
concurrent_marking->ScheduleTasks();
concurrent_marking->Stop(ConcurrentMarking::StopRequest::PREEMPT_TASKS);
for (int i = 0; i < 5000; i++)
PublishSegment(&shared, ReadOnlyRoots(heap).undefined_value());
PublishSegment(marking_worklists_holder.shared(),
ReadOnlyRoots(heap).undefined_value());
concurrent_marking->RescheduleTasksIfNeeded();
concurrent_marking->Stop(
ConcurrentMarking::StopRequest::COMPLETE_TASKS_FOR_TESTING);
......
......@@ -2271,8 +2271,10 @@ TEST(IdleNotificationFinishMarking) {
do {
marking->V8Step(kStepSizeInMs, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
StepOrigin::kV8);
} while (
!CcTest::heap()->mark_compact_collector()->marking_worklist()->IsEmpty());
} while (!CcTest::heap()
->mark_compact_collector()
->marking_worklists()
->IsEmpty());
marking->SetWeakClosureWasOverApproximatedForTesting(true);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment