Commit 28133adc authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

[heap] Split marking worklist into global worklist and local worklists

This is the first step in refactoring Worklist to allow arbitrary
number of local worklists with private segments:
- Introduce MarkingWorklistImpl<> which will eventually replace
  (and will be renamed to) Worklist.
- MarkingWorklistImpl<> owns the global pool of segments but does not
  keep track of private segments.
- MarkingWorklistImpl<>::Local owns private segments and can be
  constructed dynamically on background threads.
- Rename the existing MarkingWorklistsHolder to MarkingWorklists.
- Rename the existing MarkingWorklists to MarkingWorklists::Local.
- Rename the existing marking_workists_holder to marking_worklists.
- Rename the existing marking_worklists to local_marking_worklists.

Design doc: https://bit.ly/2XMtjLi
Bug: v8:10315

Change-Id: I9da34883ad34f4572fccd40c51e51eaf50c617bc
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2343330Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69330}
parent 45928320
......@@ -2587,6 +2587,7 @@ v8_source_set("v8_base_without_compiler") {
"src/heap/marking-barrier.h",
"src/heap/marking-visitor-inl.h",
"src/heap/marking-visitor.h",
"src/heap/marking-worklist-inl.h",
"src/heap/marking-worklist.cc",
"src/heap/marking-worklist.h",
"src/heap/marking.cc",
......
......@@ -79,13 +79,14 @@ class ConcurrentMarkingVisitor final
: public MarkingVisitorBase<ConcurrentMarkingVisitor,
ConcurrentMarkingState> {
public:
ConcurrentMarkingVisitor(int task_id, MarkingWorklists* marking_worklists,
ConcurrentMarkingVisitor(int task_id,
MarkingWorklists::Local* local_marking_worklists,
WeakObjects* weak_objects, Heap* heap,
unsigned mark_compact_epoch,
BytecodeFlushMode bytecode_flush_mode,
bool embedder_tracing_enabled, bool is_forced_gc,
MemoryChunkDataMap* memory_chunk_data)
: MarkingVisitorBase(task_id, marking_worklists, weak_objects, heap,
: MarkingVisitorBase(task_id, local_marking_worklists, weak_objects, heap,
mark_compact_epoch, bytecode_flush_mode,
embedder_tracing_enabled, is_forced_gc),
marking_state_(memory_chunk_data),
......@@ -145,7 +146,7 @@ class ConcurrentMarkingVisitor final
bool ProcessEphemeron(HeapObject key, HeapObject value) {
if (marking_state_.IsBlackOrGrey(key)) {
if (marking_state_.WhiteToGrey(value)) {
marking_worklists_->Push(value);
local_marking_worklists_->Push(value);
return true;
}
......@@ -369,11 +370,11 @@ class ConcurrentMarking::Task : public CancelableTask {
DISALLOW_COPY_AND_ASSIGN(Task);
};
ConcurrentMarking::ConcurrentMarking(
Heap* heap, MarkingWorklistsHolder* marking_worklists_holder,
WeakObjects* weak_objects)
ConcurrentMarking::ConcurrentMarking(Heap* heap,
MarkingWorklists* marking_worklists,
WeakObjects* weak_objects)
: heap_(heap),
marking_worklists_holder_(marking_worklists_holder),
marking_worklists_(marking_worklists),
weak_objects_(weak_objects) {
// The runtime flag should be set only if the compile time flag was set.
#ifndef V8_CONCURRENT_MARKING
......@@ -386,9 +387,9 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
GCTracer::BackgroundScope::MC_BACKGROUND_MARKING);
size_t kBytesUntilInterruptCheck = 64 * KB;
int kObjectsUntilInterrupCheck = 1000;
MarkingWorklists marking_worklists(task_id, marking_worklists_holder_);
MarkingWorklists::Local local_marking_worklists(marking_worklists_);
ConcurrentMarkingVisitor visitor(
task_id, &marking_worklists, weak_objects_, heap_,
task_id, &local_marking_worklists, weak_objects_, heap_,
task_state->mark_compact_epoch, Heap::GetBytecodeFlushMode(),
heap_->local_embedder_heap_tracer()->InUse(), task_state->is_forced_gc,
&task_state->memory_chunk_data);
......@@ -416,7 +417,7 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
}
}
}
bool is_per_context_mode = marking_worklists.IsPerContextMode();
bool is_per_context_mode = local_marking_worklists.IsPerContextMode();
bool done = false;
while (!done) {
size_t current_marked_bytes = 0;
......@@ -424,7 +425,7 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
while (current_marked_bytes < kBytesUntilInterruptCheck &&
objects_processed < kObjectsUntilInterrupCheck) {
HeapObject object;
if (!marking_worklists.Pop(&object)) {
if (!local_marking_worklists.Pop(&object)) {
done = true;
break;
}
......@@ -436,19 +437,19 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
Address addr = object.address();
if ((new_space_top <= addr && addr < new_space_limit) ||
addr == new_large_object) {
marking_worklists.PushOnHold(object);
local_marking_worklists.PushOnHold(object);
} else {
Map map = object.synchronized_map(isolate);
if (is_per_context_mode) {
Address context;
if (native_context_inferrer.Infer(isolate, map, object, &context)) {
marking_worklists.SwitchToContext(context);
local_marking_worklists.SwitchToContext(context);
}
}
size_t visited_size = visitor.Visit(map, object);
if (is_per_context_mode) {
native_context_stats.IncrementSize(marking_worklists.Context(), map,
object, visited_size);
native_context_stats.IncrementSize(
local_marking_worklists.Context(), map, object, visited_size);
}
current_marked_bytes += visited_size;
}
......@@ -473,7 +474,7 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
}
}
marking_worklists.FlushToGlobal();
local_marking_worklists.Publish();
weak_objects_->transition_arrays.FlushToGlobal(task_id);
weak_objects_->ephemeron_hash_tables.FlushToGlobal(task_id);
weak_objects_->current_ephemerons.FlushToGlobal(task_id);
......@@ -529,8 +530,6 @@ void ConcurrentMarking::ScheduleTasks() {
total_task_count_ = Max(1, Min(kMaxTasks, (num_cores / 2) - 1));
}
DCHECK_LE(total_task_count_, kMaxTasks);
// One task is for the main thread.
STATIC_ASSERT(kMaxTasks + 1 <= MarkingWorklist::kMaxNumTasks);
}
// Task id 0 is for the main thread.
for (int i = 1; i <= total_task_count_; i++) {
......@@ -565,7 +564,7 @@ void ConcurrentMarking::RescheduleTasksIfNeeded() {
return;
}
}
if (!marking_worklists_holder_->shared()->IsGlobalPoolEmpty() ||
if (!marking_worklists_->shared()->IsEmpty() ||
!weak_objects_->current_ephemerons.IsGlobalPoolEmpty() ||
!weak_objects_->discovered_ephemerons.IsGlobalPoolEmpty()) {
ScheduleTasks();
......
......@@ -70,8 +70,7 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
// task 0, reserved for the main thread).
static constexpr int kMaxTasks = 7;
ConcurrentMarking(Heap* heap,
MarkingWorklistsHolder* marking_worklists_holder,
ConcurrentMarking(Heap* heap, MarkingWorklists* marking_worklists,
WeakObjects* weak_objects);
// Schedules asynchronous tasks to perform concurrent marking. Objects in the
......@@ -118,7 +117,7 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
class Task;
void Run(int task_id, TaskState* task_state);
Heap* const heap_;
MarkingWorklistsHolder* const marking_worklists_holder_;
MarkingWorklists* const marking_worklists_;
WeakObjects* const weak_objects_;
TaskState task_state_[kMaxTasks + 1];
std::atomic<size_t> total_marked_bytes_{0};
......
......@@ -1259,7 +1259,7 @@ TimedHistogram* Heap::GCTypeTimer(GarbageCollector collector) {
return isolate_->counters()->gc_finalize_reduce_memory();
}
if (incremental_marking()->IsMarking() &&
incremental_marking()->marking_worklists()->IsPerContextMode()) {
incremental_marking()->local_marking_worklists()->IsPerContextMode()) {
return isolate_->counters()->gc_finalize_measure_memory();
}
return isolate_->counters()->gc_finalize();
......@@ -3489,12 +3489,12 @@ void Heap::FinalizeIncrementalMarkingIfComplete(
if (incremental_marking()->IsMarking() &&
(incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
(!incremental_marking()->finalize_marking_completed() &&
mark_compact_collector()->marking_worklists()->IsEmpty() &&
mark_compact_collector()->local_marking_worklists()->IsEmpty() &&
local_embedder_heap_tracer()->ShouldFinalizeIncrementalMarking()))) {
FinalizeIncrementalMarkingIncrementally(gc_reason);
} else if (incremental_marking()->IsComplete() ||
(incremental_marking()->IsMarking() &&
mark_compact_collector()->marking_worklists()->IsEmpty() &&
mark_compact_collector()->local_marking_worklists()->IsEmpty() &&
local_embedder_heap_tracer()
->ShouldFinalizeIncrementalMarking())) {
CollectAllGarbage(current_gc_flags_, gc_reason, current_gc_callback_flags_);
......@@ -5277,7 +5277,7 @@ void Heap::SetUp() {
if (FLAG_concurrent_marking || FLAG_parallel_marking) {
concurrent_marking_.reset(new ConcurrentMarking(
this, mark_compact_collector_->marking_worklists_holder(),
this, mark_compact_collector_->marking_worklists(),
mark_compact_collector_->weak_objects()));
} else {
concurrent_marking_.reset(new ConcurrentMarking(this, nullptr, nullptr));
......
......@@ -35,7 +35,7 @@ void IncrementalMarking::TransferColor(HeapObject from, HeapObject to) {
bool IncrementalMarking::WhiteToGreyAndPush(HeapObject obj) {
if (marking_state()->WhiteToGrey(obj)) {
marking_worklists()->Push(obj);
local_marking_worklists()->Push(obj);
return true;
}
return false;
......
......@@ -432,7 +432,8 @@ void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
heap()->minor_mark_compact_collector()->marking_state();
#endif // ENABLE_MINOR_MC
collector_->marking_worklists_holder()->Update(
collector_->local_marking_worklists()->Publish();
collector_->marking_worklists()->Update(
[
#ifdef DEBUG
// this is referred inside DCHECK.
......@@ -633,7 +634,7 @@ StepResult IncrementalMarking::EmbedderStep(double expected_duration_ms,
HeapObject object;
size_t cnt = 0;
empty_worklist = true;
while (marking_worklists()->PopEmbedder(&object)) {
while (local_marking_worklists()->PopEmbedder(&object)) {
scope.TracePossibleWrapper(JSObject::cast(object));
if (++cnt == kObjectsToProcessBeforeDeadlineCheck) {
if (deadline <= heap_->MonotonicallyIncreasingTimeInMs()) {
......@@ -658,7 +659,7 @@ StepResult IncrementalMarking::EmbedderStep(double expected_duration_ms,
}
void IncrementalMarking::Hurry() {
if (!marking_worklists()->IsEmpty()) {
if (!local_marking_worklists()->IsEmpty()) {
double start = 0.0;
if (FLAG_trace_incremental_marking) {
start = heap_->MonotonicallyIncreasingTimeInMs();
......@@ -1042,14 +1043,14 @@ StepResult IncrementalMarking::Step(double max_step_size_in_ms,
// It is safe to merge back all objects that were on hold to the shared
// work list at Step because we are at a safepoint where all objects
// are properly initialized.
marking_worklists()->MergeOnHold();
local_marking_worklists()->MergeOnHold();
}
// Only print marking worklist in debug mode to save ~40KB of code size.
#ifdef DEBUG
if (FLAG_trace_incremental_marking && FLAG_trace_concurrent_marking &&
FLAG_trace_gc_verbose) {
collector_->marking_worklists_holder()->Print();
collector_->marking_worklists()->Print();
}
#endif
if (FLAG_trace_incremental_marking) {
......@@ -1073,7 +1074,7 @@ StepResult IncrementalMarking::Step(double max_step_size_in_ms,
// assumption is that large graphs are well connected and can mostly be
// processed on their own. For small graphs, helping is not necessary.
v8_bytes_processed = collector_->ProcessMarkingWorklist(bytes_to_process);
StepResult v8_result = marking_worklists()->IsEmpty()
StepResult v8_result = local_marking_worklists()->IsEmpty()
? StepResult::kNoImmediateWork
: StepResult::kMoreWorkRemaining;
StepResult embedder_result = StepResult::kNoImmediateWork;
......@@ -1098,7 +1099,7 @@ StepResult IncrementalMarking::Step(double max_step_size_in_ms,
}
}
if (FLAG_concurrent_marking) {
marking_worklists()->ShareWorkIfGlobalPoolIsEmpty();
local_marking_worklists()->ShareWork();
heap_->concurrent_marking()->RescheduleTasksIfNeeded();
}
}
......
......@@ -209,8 +209,8 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
}
}
MarkingWorklists* marking_worklists() const {
return collector_->marking_worklists();
MarkingWorklists::Local* local_marking_worklists() const {
return collector_->local_marking_worklists();
}
void Deactivate();
......
......@@ -10,6 +10,8 @@
#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact.h"
#include "src/heap/marking-worklist-inl.h"
#include "src/heap/marking-worklist.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/remembered-set-inl.h"
#include "src/objects/js-collection-inl.h"
......@@ -22,7 +24,7 @@ namespace internal {
void MarkCompactCollector::MarkObject(HeapObject host, HeapObject obj) {
if (marking_state()->WhiteToGrey(obj)) {
marking_worklists()->Push(obj);
local_marking_worklists()->Push(obj);
if (V8_UNLIKELY(FLAG_track_retaining_path)) {
heap_->AddRetainer(host, obj);
}
......@@ -31,7 +33,7 @@ void MarkCompactCollector::MarkObject(HeapObject host, HeapObject obj) {
void MarkCompactCollector::MarkRootObject(Root root, HeapObject obj) {
if (marking_state()->WhiteToGrey(obj)) {
marking_worklists()->Push(obj);
local_marking_worklists()->Push(obj);
if (V8_UNLIKELY(FLAG_track_retaining_path)) {
heap_->AddRetainingRoot(root, obj);
}
......@@ -51,7 +53,7 @@ void MinorMarkCompactCollector::MarkRootObject(HeapObject obj) {
void MarkCompactCollector::MarkExternallyReferencedObject(HeapObject obj) {
if (marking_state()->WhiteToGrey(obj)) {
marking_worklists()->Push(obj);
local_marking_worklists()->Push(obj);
if (V8_UNLIKELY(FLAG_track_retaining_path)) {
heap_->AddRetainingRoot(Root::kWrapperTracing, obj);
}
......
......@@ -455,7 +455,8 @@ void MarkCompactCollector::TearDown() {
AbortCompaction();
AbortWeakObjects();
if (heap()->incremental_marking()->IsMarking()) {
marking_worklists_holder()->Clear();
local_marking_worklists()->Publish();
marking_worklists()->Clear();
}
}
......@@ -520,12 +521,12 @@ void MarkCompactCollector::StartMarking() {
contexts.push_back(context->ptr());
}
}
marking_worklists_holder()->CreateContextWorklists(contexts);
marking_worklists_ = std::make_unique<MarkingWorklists>(
kMainThreadTask, marking_worklists_holder());
marking_worklists()->CreateContextWorklists(contexts);
local_marking_worklists_ =
std::make_unique<MarkingWorklists::Local>(marking_worklists());
marking_visitor_ = std::make_unique<MarkingVisitor>(
marking_state(), marking_worklists(), weak_objects(), heap_, epoch(),
Heap::GetBytecodeFlushMode(),
marking_state(), local_marking_worklists(), weak_objects(), heap_,
epoch(), Heap::GetBytecodeFlushMode(),
heap_->local_embedder_heap_tracer()->InUse(),
heap_->is_current_gc_forced());
// Marking bits are cleared by the sweeper.
......@@ -912,7 +913,7 @@ void MarkCompactCollector::FinishConcurrentMarking(
}
void MarkCompactCollector::VerifyMarking() {
CHECK(marking_worklists()->IsEmpty());
CHECK(local_marking_worklists()->IsEmpty());
DCHECK(heap_->incremental_marking()->IsStopped());
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
......@@ -939,8 +940,8 @@ void MarkCompactCollector::Finish() {
#endif
marking_visitor_.reset();
marking_worklists_.reset();
marking_worklists_holder_.ReleaseContextWorklists();
local_marking_worklists_.reset();
marking_worklists_.ReleaseContextWorklists();
native_context_stats_.Clear();
CHECK(weak_objects_.current_ephemerons.IsEmpty());
......@@ -1663,14 +1664,14 @@ void MarkCompactCollector::ProcessEphemeronsUntilFixpoint() {
CHECK(weak_objects_.current_ephemerons.IsEmpty());
CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
work_to_do = work_to_do || !marking_worklists()->IsEmpty() ||
work_to_do = work_to_do || !local_marking_worklists()->IsEmpty() ||
heap()->concurrent_marking()->ephemeron_marked() ||
!marking_worklists()->IsEmbedderEmpty() ||
!local_marking_worklists()->IsEmbedderEmpty() ||
!heap()->local_embedder_heap_tracer()->IsRemoteTracingDone();
++iterations;
}
CHECK(marking_worklists()->IsEmpty());
CHECK(local_marking_worklists()->IsEmpty());
CHECK(weak_objects_.current_ephemerons.IsEmpty());
CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
}
......@@ -1759,7 +1760,7 @@ void MarkCompactCollector::ProcessEphemeronsLinear() {
weak_objects_.next_ephemerons.Iterate([&](Ephemeron ephemeron) {
if (non_atomic_marking_state()->IsBlackOrGrey(ephemeron.key) &&
non_atomic_marking_state()->WhiteToGrey(ephemeron.value)) {
marking_worklists()->Push(ephemeron.value);
local_marking_worklists()->Push(ephemeron.value);
}
});
......@@ -1780,8 +1781,8 @@ void MarkCompactCollector::ProcessEphemeronsLinear() {
// for work_to_do are not sufficient for determining if another iteration
// is necessary.
work_to_do = !marking_worklists()->IsEmpty() ||
!marking_worklists()->IsEmbedderEmpty() ||
work_to_do = !local_marking_worklists()->IsEmpty() ||
!local_marking_worklists()->IsEmbedderEmpty() ||
!heap()->local_embedder_heap_tracer()->IsRemoteTracingDone();
CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
}
......@@ -1789,7 +1790,7 @@ void MarkCompactCollector::ProcessEphemeronsLinear() {
ResetNewlyDiscovered();
ephemeron_marking_.newly_discovered.shrink_to_fit();
CHECK(marking_worklists()->IsEmpty());
CHECK(local_marking_worklists()->IsEmpty());
}
void MarkCompactCollector::PerformWrapperTracing() {
......@@ -1799,7 +1800,7 @@ void MarkCompactCollector::PerformWrapperTracing() {
LocalEmbedderHeapTracer::ProcessingScope scope(
heap_->local_embedder_heap_tracer());
HeapObject object;
while (marking_worklists()->PopEmbedder(&object)) {
while (local_marking_worklists()->PopEmbedder(&object)) {
scope.TracePossibleWrapper(JSObject::cast(object));
}
}
......@@ -1814,10 +1815,10 @@ template <MarkCompactCollector::MarkingWorklistProcessingMode mode>
size_t MarkCompactCollector::ProcessMarkingWorklist(size_t bytes_to_process) {
HeapObject object;
size_t bytes_processed = 0;
bool is_per_context_mode = marking_worklists()->IsPerContextMode();
bool is_per_context_mode = local_marking_worklists()->IsPerContextMode();
Isolate* isolate = heap()->isolate();
while (marking_worklists()->Pop(&object) ||
marking_worklists()->PopOnHold(&object)) {
while (local_marking_worklists()->Pop(&object) ||
local_marking_worklists()->PopOnHold(&object)) {
// Left trimming may result in grey or black filler objects on the marking
// worklist. Ignore these objects.
if (object.IsFreeSpaceOrFiller()) {
......@@ -1844,13 +1845,13 @@ size_t MarkCompactCollector::ProcessMarkingWorklist(size_t bytes_to_process) {
if (is_per_context_mode) {
Address context;
if (native_context_inferrer_.Infer(isolate, map, object, &context)) {
marking_worklists()->SwitchToContext(context);
local_marking_worklists()->SwitchToContext(context);
}
}
size_t visited_size = marking_visitor_->Visit(map, object);
if (is_per_context_mode) {
native_context_stats_.IncrementSize(marking_worklists()->Context(), map,
object, visited_size);
native_context_stats_.IncrementSize(local_marking_worklists()->Context(),
map, object, visited_size);
}
bytes_processed += visited_size;
if (bytes_to_process && bytes_processed >= bytes_to_process) {
......@@ -1871,7 +1872,7 @@ template size_t MarkCompactCollector::ProcessMarkingWorklist<
bool MarkCompactCollector::ProcessEphemeron(HeapObject key, HeapObject value) {
if (marking_state()->IsBlackOrGrey(key)) {
if (marking_state()->WhiteToGrey(value)) {
marking_worklists()->Push(value);
local_marking_worklists()->Push(value);
return true;
}
......@@ -1883,7 +1884,7 @@ bool MarkCompactCollector::ProcessEphemeron(HeapObject key, HeapObject value) {
}
void MarkCompactCollector::ProcessEphemeronMarking() {
DCHECK(marking_worklists()->IsEmpty());
DCHECK(local_marking_worklists()->IsEmpty());
// Incremental marking might leave ephemerons in main task's local
// buffer, flush it into global pool.
......@@ -1891,7 +1892,7 @@ void MarkCompactCollector::ProcessEphemeronMarking() {
ProcessEphemeronsUntilFixpoint();
CHECK(marking_worklists()->IsEmpty());
CHECK(local_marking_worklists()->IsEmpty());
CHECK(heap()->local_embedder_heap_tracer()->IsRemoteTracingDone());
}
......@@ -1983,7 +1984,7 @@ void MarkCompactCollector::MarkLiveObjects() {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE);
DCHECK(marking_worklists()->IsEmpty());
DCHECK(local_marking_worklists()->IsEmpty());
// Mark objects reachable through the embedder heap. This phase is
// opportunistic as it may not discover graphs that are only reachable
......@@ -1998,9 +1999,9 @@ void MarkCompactCollector::MarkLiveObjects() {
PerformWrapperTracing();
DrainMarkingWorklist();
} while (!heap_->local_embedder_heap_tracer()->IsRemoteTracingDone() ||
!marking_worklists()->IsEmbedderEmpty());
DCHECK(marking_worklists()->IsEmbedderEmpty());
DCHECK(marking_worklists()->IsEmpty());
!local_marking_worklists()->IsEmbedderEmpty());
DCHECK(local_marking_worklists()->IsEmbedderEmpty());
DCHECK(local_marking_worklists()->IsEmpty());
}
// The objects reachable from the roots are marked, yet unreachable objects
......@@ -2010,7 +2011,7 @@ void MarkCompactCollector::MarkLiveObjects() {
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON);
ProcessEphemeronMarking();
DCHECK(marking_worklists()->IsEmpty());
DCHECK(local_marking_worklists()->IsEmpty());
}
// The objects reachable from the roots, weak maps, and embedder heap
......@@ -2042,8 +2043,8 @@ void MarkCompactCollector::MarkLiveObjects() {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE_HARMONY);
ProcessEphemeronMarking();
DCHECK(marking_worklists()->IsEmbedderEmpty());
DCHECK(marking_worklists()->IsEmpty());
DCHECK(local_marking_worklists()->IsEmbedderEmpty());
DCHECK(local_marking_worklists()->IsEmpty());
}
{
......
......@@ -377,13 +377,13 @@ class MainMarkingVisitor final
};
MainMarkingVisitor(MarkingState* marking_state,
MarkingWorklists* marking_worklists,
MarkingWorklists::Local* local_marking_worklists,
WeakObjects* weak_objects, Heap* heap,
unsigned mark_compact_epoch,
BytecodeFlushMode bytecode_flush_mode,
bool embedder_tracing_enabled, bool is_forced_gc)
: MarkingVisitorBase<MainMarkingVisitor<MarkingState>, MarkingState>(
kMainThreadTask, marking_worklists, weak_objects, heap,
kMainThreadTask, local_marking_worklists, weak_objects, heap,
mark_compact_epoch, bytecode_flush_mode, embedder_tracing_enabled,
is_forced_gc),
marking_state_(marking_state),
......@@ -527,10 +527,11 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
bool evacuation() const { return evacuation_; }
MarkingWorklistsHolder* marking_worklists_holder() {
return &marking_worklists_holder_;
MarkingWorklists* marking_worklists() { return &marking_worklists_; }
MarkingWorklists::Local* local_marking_worklists() {
return local_marking_worklists_.get();
}
MarkingWorklists* marking_worklists() { return marking_worklists_.get(); }
WeakObjects* weak_objects() { return &weak_objects_; }
......@@ -758,13 +759,13 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
bool have_code_to_deoptimize_;
MarkingWorklistsHolder marking_worklists_holder_;
MarkingWorklists marking_worklists_;
WeakObjects weak_objects_;
EphemeronMarking ephemeron_marking_;
std::unique_ptr<MarkingVisitor> marking_visitor_;
std::unique_ptr<MarkingWorklists> marking_worklists_;
std::unique_ptr<MarkingWorklists::Local> local_marking_worklists_;
NativeContextInferrer native_context_inferrer_;
NativeContextStats native_context_stats_;
......
......@@ -35,7 +35,7 @@ bool MarkingBarrier::MarkValue(HeapObject host, HeapObject value) {
bool MarkingBarrier::WhiteToGreyAndPush(HeapObject obj) {
if (marking_state_.WhiteToGrey(obj)) {
collector_->marking_worklists()->Push(obj);
collector_->local_marking_worklists()->Push(obj);
return true;
}
return false;
......
......@@ -22,7 +22,7 @@ void MarkingVisitorBase<ConcreteVisitor, MarkingState>::MarkObject(
HeapObject host, HeapObject object) {
concrete_visitor()->SynchronizePageAccess(object);
if (concrete_visitor()->marking_state()->WhiteToGrey(object)) {
marking_worklists_->Push(object);
local_marking_worklists_->Push(object);
if (V8_UNLIKELY(concrete_visitor()->retaining_path_mode() ==
TraceRetainingPathMode::kEnabled)) {
heap_->AddRetainer(host, object);
......@@ -183,7 +183,7 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::
if (end < size) {
// The object can be pushed back onto the marking worklist only after
// progress bar was updated.
marking_worklists_->Push(object);
local_marking_worklists_->Push(object);
}
}
return end - start;
......@@ -220,7 +220,7 @@ int MarkingVisitorBase<ConcreteVisitor,
if (size && is_embedder_tracing_enabled_) {
// Success: The object needs to be processed for embedder references on
// the main thread.
marking_worklists_->PushEmbedder(object);
local_marking_worklists_->PushEmbedder(object);
}
return size;
}
......
......@@ -147,12 +147,13 @@ class MarkingStateBase {
template <typename ConcreteVisitor, typename MarkingState>
class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
public:
MarkingVisitorBase(int task_id, MarkingWorklists* marking_worklists,
MarkingVisitorBase(int task_id,
MarkingWorklists::Local* local_marking_worklists,
WeakObjects* weak_objects, Heap* heap,
unsigned mark_compact_epoch,
BytecodeFlushMode bytecode_flush_mode,
bool is_embedder_tracing_enabled, bool is_forced_gc)
: marking_worklists_(marking_worklists),
: local_marking_worklists_(local_marking_worklists),
weak_objects_(weak_objects),
heap_(heap),
task_id_(task_id),
......@@ -231,7 +232,7 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
// Marks the object grey and pushes it on the marking work list.
V8_INLINE void MarkObject(HeapObject host, HeapObject obj);
MarkingWorklists* const marking_worklists_;
MarkingWorklists::Local* const local_marking_worklists_;
WeakObjects* const weak_objects_;
Heap* const heap_;
const int task_id_;
......
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_MARKING_WORKLIST_INL_H_
#define V8_HEAP_MARKING_WORKLIST_INL_H_
#include <unordered_map>
#include <vector>
#include "src/heap/marking-worklist.h"
namespace v8 {
namespace internal {
template <typename EntryType, int SegmentSize>
void MarkingWorklistImpl<EntryType, SegmentSize>::Push(Segment* segment) {
base::MutexGuard guard(&lock_);
segment->set_next(top_);
set_top(segment);
size_.fetch_add(1, std::memory_order_relaxed);
}
template <typename EntryType, int SegmentSize>
bool MarkingWorklistImpl<EntryType, SegmentSize>::Pop(Segment** segment) {
base::MutexGuard guard(&lock_);
if (top_ != nullptr) {
DCHECK_LT(0U, size_);
size_.fetch_sub(1, std::memory_order_relaxed);
*segment = top_;
set_top(top_->next());
return true;
}
return false;
}
template <typename EntryType, int SegmentSize>
bool MarkingWorklistImpl<EntryType, SegmentSize>::IsEmpty() {
return base::AsAtomicPointer::Relaxed_Load(&top_) == nullptr;
}
template <typename EntryType, int SegmentSize>
size_t MarkingWorklistImpl<EntryType, SegmentSize>::Size() {
// It is safe to read |size_| without a lock since this variable is
// atomic, keeping in mind that threads may not immediately see the new
// value when it is updated.
return size_.load(std::memory_order_relaxed);
}
template <typename EntryType, int SegmentSize>
void MarkingWorklistImpl<EntryType, SegmentSize>::Clear() {
base::MutexGuard guard(&lock_);
size_.store(0, std::memory_order_relaxed);
Segment* current = top_;
while (current != nullptr) {
Segment* tmp = current;
current = current->next();
delete tmp;
}
set_top(nullptr);
}
template <typename EntryType, int SegmentSize>
template <typename Callback>
void MarkingWorklistImpl<EntryType, SegmentSize>::Update(Callback callback) {
base::MutexGuard guard(&lock_);
Segment* prev = nullptr;
Segment* current = top_;
size_t num_deleted = 0;
while (current != nullptr) {
current->Update(callback);
if (current->IsEmpty()) {
DCHECK_LT(0U, size_);
++num_deleted;
if (prev == nullptr) {
top_ = current->next();
} else {
prev->set_next(current->next());
}
Segment* tmp = current;
current = current->next();
delete tmp;
} else {
prev = current;
current = current->next();
}
}
size_.fetch_sub(num_deleted, std::memory_order_relaxed);
}
template <typename EntryType, int SegmentSize>
template <typename Callback>
void MarkingWorklistImpl<EntryType, SegmentSize>::Iterate(Callback callback) {
base::MutexGuard guard(&lock_);
for (Segment* current = top_; current != nullptr; current = current->next()) {
current->Iterate(callback);
}
}
template <typename EntryType, int SegmentSize>
void MarkingWorklistImpl<EntryType, SegmentSize>::Merge(
MarkingWorklistImpl<EntryType, SegmentSize>* other) {
Segment* top = nullptr;
size_t other_size = 0;
{
base::MutexGuard guard(&other->lock_);
if (!other->top_) return;
top = other->top_;
other_size = other->size_.load(std::memory_order_relaxed);
other->size_.store(0, std::memory_order_relaxed);
other->set_top(nullptr);
}
// It's safe to iterate through these segments because the top was
// extracted from |other|.
Segment* end = top;
while (end->next()) end = end->next();
{
base::MutexGuard guard(&lock_);
size_.fetch_add(other_size, std::memory_order_relaxed);
end->set_next(top_);
set_top(top);
}
}
template <typename EntryType, int SegmentSize>
bool MarkingWorklistImpl<EntryType, SegmentSize>::Segment::Push(
EntryType entry) {
if (IsFull()) return false;
entries_[index_++] = entry;
return true;
}
template <typename EntryType, int SegmentSize>
bool MarkingWorklistImpl<EntryType, SegmentSize>::Segment::Pop(
EntryType* entry) {
if (IsEmpty()) return false;
*entry = entries_[--index_];
return true;
}
template <typename EntryType, int SegmentSize>
template <typename Callback>
void MarkingWorklistImpl<EntryType, SegmentSize>::Segment::Update(
Callback callback) {
size_t new_index = 0;
for (size_t i = 0; i < index_; i++) {
if (callback(entries_[i], &entries_[new_index])) {
new_index++;
}
}
index_ = new_index;
}
template <typename EntryType, int SegmentSize>
template <typename Callback>
void MarkingWorklistImpl<EntryType, SegmentSize>::Segment::Iterate(
Callback callback) const {
for (size_t i = 0; i < index_; i++) {
callback(entries_[i]);
}
}
template <typename EntryType, int SegmentSize>
MarkingWorklistImpl<EntryType, SegmentSize>::Local::Local(
MarkingWorklistImpl<EntryType, SegmentSize>* worklist)
: worklist_(worklist),
push_segment_(NewSegment()),
pop_segment_(NewSegment()) {}
template <typename EntryType, int SegmentSize>
MarkingWorklistImpl<EntryType, SegmentSize>::Local::~Local() {
CHECK_IMPLIES(push_segment_, push_segment_->IsEmpty());
CHECK_IMPLIES(pop_segment_, pop_segment_->IsEmpty());
delete push_segment_;
delete pop_segment_;
}
template <typename EntryType, int SegmentSize>
MarkingWorklistImpl<EntryType, SegmentSize>::Local::Local(
MarkingWorklistImpl<EntryType, SegmentSize>::Local&& other) V8_NOEXCEPT {
worklist_ = other.worklist_;
push_segment_ = other.push_segment_;
pop_segment_ = other.pop_segment_;
other.worklist_ = nullptr;
other.push_segment_ = nullptr;
other.pop_segment_ = nullptr;
}
template <typename EntryType, int SegmentSize>
typename MarkingWorklistImpl<EntryType, SegmentSize>::Local&
MarkingWorklistImpl<EntryType, SegmentSize>::Local::operator=(
MarkingWorklistImpl<EntryType, SegmentSize>::Local&& other) V8_NOEXCEPT {
if (this != &other) {
DCHECK_NULL(worklist_);
DCHECK_NULL(push_segment_);
DCHECK_NULL(pop_segment_);
worklist_ = other.worklist_;
push_segment_ = other.push_segment_;
pop_segment_ = other.pop_segment_;
other.worklist_ = nullptr;
other.push_segment_ = nullptr;
other.pop_segment_ = nullptr;
}
return *this;
}
template <typename EntryType, int SegmentSize>
void MarkingWorklistImpl<EntryType, SegmentSize>::Local::Push(EntryType entry) {
if (V8_UNLIKELY(!push_segment_->Push(entry))) {
PublishPushSegment();
bool success = push_segment_->Push(entry);
USE(success);
DCHECK(success);
}
}
template <typename EntryType, int SegmentSize>
bool MarkingWorklistImpl<EntryType, SegmentSize>::Local::Pop(EntryType* entry) {
if (!pop_segment_->Pop(entry)) {
if (!push_segment_->IsEmpty()) {
std::swap(push_segment_, pop_segment_);
} else if (!StealPopSegment()) {
return false;
}
bool success = pop_segment_->Pop(entry);
USE(success);
DCHECK(success);
}
return true;
}
template <typename EntryType, int SegmentSize>
bool MarkingWorklistImpl<EntryType, SegmentSize>::Local::IsLocalEmpty() const {
return push_segment_->IsEmpty() && pop_segment_->IsEmpty();
}
template <typename EntryType, int SegmentSize>
bool MarkingWorklistImpl<EntryType, SegmentSize>::Local::IsGlobalEmpty() const {
return worklist_->IsEmpty();
}
template <typename EntryType, int SegmentSize>
void MarkingWorklistImpl<EntryType, SegmentSize>::Local::Publish() {
if (!push_segment_->IsEmpty()) {
PublishPushSegment();
}
if (!pop_segment_->IsEmpty()) {
PublishPopSegment();
}
}
template <typename EntryType, int SegmentSize>
void MarkingWorklistImpl<EntryType, SegmentSize>::Local::Merge(
MarkingWorklistImpl<EntryType, SegmentSize>::Local* other) {
other->Publish();
worklist_->Merge(other->worklist_);
}
template <typename EntryType, int SegmentSize>
void MarkingWorklistImpl<EntryType, SegmentSize>::Local::PublishPushSegment() {
worklist_->Push(push_segment_);
push_segment_ = NewSegment();
}
template <typename EntryType, int SegmentSize>
void MarkingWorklistImpl<EntryType, SegmentSize>::Local::PublishPopSegment() {
worklist_->Push(pop_segment_);
pop_segment_ = NewSegment();
}
template <typename EntryType, int SegmentSize>
bool MarkingWorklistImpl<EntryType, SegmentSize>::Local::StealPopSegment() {
if (worklist_->IsEmpty()) return false;
Segment* new_segment = nullptr;
if (worklist_->Pop(&new_segment)) {
delete pop_segment_;
pop_segment_ = new_segment;
return true;
}
return false;
}
template <typename Callback>
void MarkingWorklists::Update(Callback callback) {
shared_.Update(callback);
on_hold_.Update(callback);
embedder_.Update(callback);
other_.Update(callback);
for (auto cw : context_worklists_) {
if (cw.context == kSharedContext || cw.context == kOtherContext) {
// These contexts were updated above.
continue;
}
cw.worklist->Update(callback);
}
}
void MarkingWorklists::Local::Push(HeapObject object) { active_.Push(object); }
bool MarkingWorklists::Local::Pop(HeapObject* object) {
if (active_.Pop(object)) return true;
if (!is_per_context_mode_) return false;
// The active worklist is empty. Find any other non-empty worklist and
// switch the active worklist to it.
return PopContext(object);
}
void MarkingWorklists::Local::PushOnHold(HeapObject object) {
on_hold_.Push(object);
}
bool MarkingWorklists::Local::PopOnHold(HeapObject* object) {
return on_hold_.Pop(object);
}
void MarkingWorklists::Local::PushEmbedder(HeapObject object) {
embedder_.Push(object);
}
bool MarkingWorklists::Local::PopEmbedder(HeapObject* object) {
return embedder_.Pop(object);
}
Address MarkingWorklists::Local::SwitchToContext(Address context) {
if (context == active_context_) return context;
return SwitchToContextSlow(context);
}
Address MarkingWorklists::Local::SwitchToShared() {
return SwitchToContext(kSharedContext);
}
void MarkingWorklists::Local::SwitchToContext(
Address context, MarkingWorklist::Local* worklist) {
// Save the current worklist.
*active_owner_ = std::move(active_);
// Switch to the new worklist.
active_owner_ = worklist;
active_ = std::move(*worklist);
active_context_ = context;
}
} // namespace internal
} // namespace v8
#endif // V8_HEAP_MARKING_WORKLIST_INL_H_
......@@ -7,6 +7,7 @@
#include <algorithm>
#include <map>
#include "src/heap/marking-worklist-inl.h"
#include "src/objects/heap-object-inl.h"
#include "src/objects/heap-object.h"
#include "src/objects/instance-type-inl.h"
......@@ -17,27 +18,35 @@
namespace v8 {
namespace internal {
MarkingWorklistsHolder::~MarkingWorklistsHolder() {
MarkingWorklists::~MarkingWorklists() {
DCHECK(shared_.IsEmpty());
DCHECK(on_hold_.IsEmpty());
DCHECK(other_.IsEmpty());
DCHECK(worklists_.empty());
DCHECK(context_worklists_.empty());
}
void MarkingWorklistsHolder::Clear() {
void MarkingWorklists::Clear() {
shared_.Clear();
on_hold_.Clear();
embedder_.Clear();
other_.Clear();
for (auto cw : context_worklists_) {
if (cw.context == kSharedContext || cw.context == kOtherContext) {
// These contexts were cleared above.
continue;
}
cw.worklist->Clear();
}
ReleaseContextWorklists();
}
void MarkingWorklistsHolder::Print() {
void MarkingWorklists::Print() {
PrintWorklist("shared", &shared_);
PrintWorklist("on_hold", &on_hold_);
}
void MarkingWorklistsHolder::CreateContextWorklists(
void MarkingWorklists::CreateContextWorklists(
const std::vector<Address>& contexts) {
DCHECK(worklists_.empty());
DCHECK(context_worklists_.empty());
......@@ -53,17 +62,17 @@ void MarkingWorklistsHolder::CreateContextWorklists(
}
}
void MarkingWorklistsHolder::ReleaseContextWorklists() {
void MarkingWorklists::ReleaseContextWorklists() {
context_worklists_.clear();
worklists_.clear();
}
void MarkingWorklistsHolder::PrintWorklist(const char* worklist_name,
MarkingWorklist* worklist) {
void MarkingWorklists::PrintWorklist(const char* worklist_name,
MarkingWorklist* worklist) {
#ifdef DEBUG
std::map<InstanceType, int> count;
int total_count = 0;
worklist->IterateGlobalPool([&count, &total_count](HeapObject obj) {
worklist->Iterate([&count, &total_count](HeapObject obj) {
++total_count;
count[obj.map().instance_type()]++;
});
......@@ -85,111 +94,128 @@ void MarkingWorklistsHolder::PrintWorklist(const char* worklist_name,
#endif
}
MarkingWorklists::MarkingWorklists(int task_id, MarkingWorklistsHolder* holder)
: shared_(holder->shared()),
on_hold_(holder->on_hold()),
embedder_(holder->embedder()),
active_(shared_),
active_context_(kSharedContext),
task_id_(task_id),
is_per_context_mode_(false),
context_worklists_(holder->context_worklists()) {
if (!context_worklists_.empty()) {
const Address MarkingWorklists::Local::kSharedContext;
const Address MarkingWorklists::Local::kOtherContext;
MarkingWorklists::Local::Local(MarkingWorklists* global)
: on_hold_(global->on_hold()),
embedder_(global->embedder()),
is_per_context_mode_(false) {
if (global->context_worklists().empty()) {
MarkingWorklist::Local shared(global->shared());
active_ = std::move(shared);
active_context_ = kSharedContext;
active_owner_ = nullptr;
} else {
is_per_context_mode_ = true;
worklist_by_context_.reserve(context_worklists_.size());
for (auto& cw : context_worklists_) {
worklist_by_context_[cw.context] = cw.worklist;
worklist_by_context_.reserve(global->context_worklists().size());
for (auto& cw : global->context_worklists()) {
worklist_by_context_[cw.context] =
std::make_unique<MarkingWorklist::Local>(cw.worklist);
}
active_owner_ = worklist_by_context_[kSharedContext].get();
active_ = std::move(*active_owner_);
active_context_ = kSharedContext;
}
}
MarkingWorklists::Local::~Local() {
DCHECK(active_.IsLocalEmpty());
if (is_per_context_mode_) {
for (auto& cw : worklist_by_context_) {
if (cw.first != active_context_) {
DCHECK(cw.second->IsLocalEmpty());
}
}
}
}
void MarkingWorklists::FlushToGlobal() {
shared_->FlushToGlobal(task_id_);
on_hold_->FlushToGlobal(task_id_);
embedder_->FlushToGlobal(task_id_);
void MarkingWorklists::Local::Publish() {
active_.Publish();
on_hold_.Publish();
embedder_.Publish();
if (is_per_context_mode_) {
for (auto& cw : context_worklists_) {
cw.worklist->FlushToGlobal(task_id_);
for (auto& cw : worklist_by_context_) {
if (cw.first != active_context_) {
cw.second->Publish();
}
}
}
}
bool MarkingWorklists::IsEmpty() {
bool MarkingWorklists::Local::IsEmpty() {
// This function checks the on_hold_ worklist, so it works only for the main
// thread.
DCHECK_EQ(kMainThreadTask, task_id_);
if (!active_->IsLocalEmpty(task_id_) || !on_hold_->IsLocalEmpty(task_id_) ||
!active_->IsGlobalPoolEmpty() || !on_hold_->IsGlobalPoolEmpty()) {
if (!active_.IsLocalEmpty() || !on_hold_.IsLocalEmpty() ||
!active_.IsGlobalEmpty() || !on_hold_.IsGlobalEmpty()) {
return false;
}
if (!is_per_context_mode_) {
DCHECK_EQ(active_, shared_);
return true;
}
for (auto& cw : context_worklists_) {
if (!cw.worklist->IsLocalEmpty(task_id_) ||
!cw.worklist->IsGlobalPoolEmpty()) {
active_ = cw.worklist;
active_context_ = cw.context;
for (auto& cw : worklist_by_context_) {
if (cw.first != active_context_ &&
!(cw.second->IsLocalEmpty() && cw.second->IsGlobalEmpty())) {
SwitchToContext(cw.first, cw.second.get());
return false;
}
}
return true;
}
bool MarkingWorklists::IsEmbedderEmpty() {
return embedder_->IsLocalEmpty(task_id_) && embedder_->IsGlobalPoolEmpty();
bool MarkingWorklists::Local::IsEmbedderEmpty() const {
return embedder_.IsLocalEmpty() && embedder_.IsGlobalEmpty();
}
void MarkingWorklists::ShareWorkIfGlobalPoolIsEmpty() {
if (!shared_->IsLocalEmpty(task_id_) && shared_->IsGlobalPoolEmpty()) {
shared_->FlushToGlobal(task_id_);
void MarkingWorklists::Local::ShareWork() {
if (!active_.IsLocalEmpty() && active_.IsGlobalEmpty()) {
active_.Publish();
}
if (is_per_context_mode_ && shared_ != active_) {
if (!active_->IsLocalEmpty(task_id_) && active_->IsGlobalPoolEmpty()) {
active_->FlushToGlobal(task_id_);
if (is_per_context_mode_ && active_context_ != kSharedContext) {
MarkingWorklist::Local* shared = worklist_by_context_[kSharedContext].get();
if (!shared->IsLocalEmpty() && shared->IsGlobalEmpty()) {
shared->Publish();
}
}
}
void MarkingWorklists::MergeOnHold() {
DCHECK_EQ(kMainThreadTask, task_id_);
shared_->MergeGlobalPool(on_hold_);
void MarkingWorklists::Local::MergeOnHold() {
MarkingWorklist::Local* shared =
active_context_ == kSharedContext
? &active_
: worklist_by_context_[kSharedContext].get();
shared->Merge(&on_hold_);
}
bool MarkingWorklists::PopContext(HeapObject* object) {
bool MarkingWorklists::Local::PopContext(HeapObject* object) {
DCHECK(is_per_context_mode_);
// As an optimization we first check only the local segments to avoid locks.
for (auto& cw : context_worklists_) {
if (!cw.worklist->IsLocalEmpty(task_id_)) {
active_ = cw.worklist;
active_context_ = cw.context;
return active_->Pop(task_id_, object);
for (auto& cw : worklist_by_context_) {
if (cw.first != active_context_ && !cw.second->IsLocalEmpty()) {
SwitchToContext(cw.first, cw.second.get());
return active_.Pop(object);
}
}
// All local segments are empty. Check global segments.
for (auto& cw : context_worklists_) {
if (cw.worklist->Pop(task_id_, object)) {
active_ = cw.worklist;
active_context_ = cw.context;
for (auto& cw : worklist_by_context_) {
if (cw.first != active_context_ && cw.second->Pop(object)) {
SwitchToContext(cw.first, cw.second.get());
return true;
}
}
// All worklists are empty. Switch to the default shared worklist.
SwitchToShared();
SwitchToContext(kSharedContext);
return false;
}
Address MarkingWorklists::SwitchToContextSlow(Address context) {
Address MarkingWorklists::Local::SwitchToContextSlow(Address context) {
const auto& it = worklist_by_context_.find(context);
if (V8_UNLIKELY(it == worklist_by_context_.end())) {
// This context was created during marking or is not being measured,
// so we don't have a specific worklist for it.
active_context_ = kOtherContext;
active_ = worklist_by_context_[active_context_];
SwitchToContext(kOtherContext, worklist_by_context_[kOtherContext].get());
} else {
active_ = it->second;
active_context_ = context;
SwitchToContext(it->first, it->second.get());
}
return active_context_;
}
......
......@@ -9,18 +9,129 @@
#include <vector>
#include "src/heap/marking.h"
#include "src/heap/worklist.h"
#include "src/objects/heap-object.h"
namespace v8 {
namespace internal {
using MarkingWorklist = Worklist<HeapObject, 64>;
using EmbedderTracingWorklist = Worklist<HeapObject, 16>;
// The index of the main thread task used by concurrent/parallel GC.
const int kMainThreadTask = 0;
// A global marking worklist that is similar the existing Worklist
// but does not reserve space and keep track of the local segments.
// Eventually this will replace Worklist after all its current uses
// are migrated.
template <typename EntryType, int SegmentSize>
class MarkingWorklistImpl {
public:
static const int kSegmentSize = SegmentSize;
class Segment;
class Local;
MarkingWorklistImpl() = default;
~MarkingWorklistImpl() { CHECK(IsEmpty()); }
void Push(Segment* segment);
bool Pop(Segment** segment);
// Returns true if the list of segments is empty.
bool IsEmpty();
// Returns the number of segments in the list.
size_t Size();
// Moves the segments of the given marking worklist into this
// marking worklist.
void Merge(MarkingWorklistImpl<EntryType, SegmentSize>* other);
// These functions are not thread-safe. They should be called only
// if all local marking worklists that use the current worklist have
// been published and are empty.
void Clear();
template <typename Callback>
void Update(Callback callback);
template <typename Callback>
void Iterate(Callback callback);
private:
void set_top(Segment* segment) {
base::AsAtomicPointer::Relaxed_Store(&top_, segment);
}
base::Mutex lock_;
Segment* top_ = nullptr;
std::atomic<size_t> size_{0};
};
template <typename EntryType, int SegmentSize>
class MarkingWorklistImpl<EntryType, SegmentSize>::Segment {
public:
static const size_t kSize = SegmentSize;
Segment() = default;
bool Push(EntryType entry);
bool Pop(EntryType* entry);
size_t Size() const { return index_; }
bool IsEmpty() const { return index_ == 0; }
bool IsFull() const { return index_ == kSize; }
void Clear() { index_ = 0; }
template <typename Callback>
void Update(Callback callback);
template <typename Callback>
void Iterate(Callback callback) const;
Segment* next() const { return next_; }
void set_next(Segment* segment) { next_ = segment; }
private:
Segment* next_;
size_t index_;
EntryType entries_[kSize];
};
// A thread-local view of the marking worklist.
template <typename EntryType, int SegmentSize>
class MarkingWorklistImpl<EntryType, SegmentSize>::Local {
public:
Local() = default;
explicit Local(MarkingWorklistImpl<EntryType, SegmentSize>* worklist);
~Local();
Local(Local&&) V8_NOEXCEPT;
Local& operator=(Local&&) V8_NOEXCEPT;
// Disable copying since having multiple copies of the same
// local marking worklist is unsafe.
Local(const Local&) = delete;
Local& operator=(const Local& other) = delete;
void Push(EntryType entry);
bool Pop(EntryType* entry);
bool IsLocalEmpty() const;
bool IsGlobalEmpty() const;
void Publish();
void Merge(MarkingWorklistImpl<EntryType, SegmentSize>::Local* other);
size_t PushSegmentSize() const { return push_segment_->Size(); }
private:
void PublishPushSegment();
void PublishPopSegment();
bool StealPopSegment();
Segment* NewSegment() const {
// Bottleneck for filtering in crash dumps.
return new Segment();
}
MarkingWorklistImpl<EntryType, SegmentSize>* worklist_ = nullptr;
Segment* push_segment_ = nullptr;
Segment* pop_segment_ = nullptr;
};
using MarkingWorklist = MarkingWorklistImpl<HeapObject, 64>;
using EmbedderTracingWorklist = MarkingWorklistImpl<HeapObject, 16>;
// We piggyback on marking to compute object sizes per native context that is
// needed for the new memory measurement API. The algorithm works as follows:
// 1) At the start of marking we create a marking worklist for each context.
......@@ -59,9 +170,10 @@ struct ContextWorklistPair {
MarkingWorklist* worklist;
};
// A helper class that owns all marking worklists.
class V8_EXPORT_PRIVATE MarkingWorklistsHolder {
// A helper class that owns all global marking worklists.
class V8_EXPORT_PRIVATE MarkingWorklists {
public:
class Local;
// Fake addresses of special contexts used for per-context accounting.
// - kSharedContext is for objects that are not attributed to any context.
// - kOtherContext is for objects that are attributed to contexts that are
......@@ -69,25 +181,15 @@ class V8_EXPORT_PRIVATE MarkingWorklistsHolder {
static const Address kSharedContext = 0;
static const Address kOtherContext = 8;
~MarkingWorklistsHolder();
MarkingWorklists() = default;
~MarkingWorklists();
// Calls the specified callback on each element of the deques and replaces
// the element with the result of the callback. If the callback returns
// nullptr then the element is removed from the deque.
// The callback must accept HeapObject and return HeapObject.
template <typename Callback>
void Update(Callback callback) {
shared_.Update(callback);
on_hold_.Update(callback);
embedder_.Update(callback);
for (auto cw : context_worklists_) {
if (cw.context == kSharedContext) {
// The shared context was updated above.
continue;
}
cw.worklist->Update(callback);
}
}
void Update(Callback callback);
MarkingWorklist* shared() { return &shared_; }
MarkingWorklist* on_hold() { return &on_hold_; }
......@@ -95,7 +197,7 @@ class V8_EXPORT_PRIVATE MarkingWorklistsHolder {
// A list of (context, worklist) pairs that was set up at the start of
// marking by CreateContextWorklists.
const std::vector<ContextWorklistPair>& context_worklists() {
const std::vector<ContextWorklistPair>& context_worklists() const {
return context_worklists_;
}
// This should be invoked at the start of marking with the list of contexts
......@@ -136,80 +238,60 @@ class V8_EXPORT_PRIVATE MarkingWorklistsHolder {
MarkingWorklist other_;
};
// A thread-local view of the marking worklists.
class V8_EXPORT_PRIVATE MarkingWorklists {
// A thread-local view of the marking worklists. It owns all local marking
// worklists and keeps track of the currently active local marking worklist
// for per-context marking. In order to avoid additional indirections for
// pushing and popping entries, the active_ worklist is not a pointer to
// Local but an actual instance of Local with the following invariants:
// - active_owner == worlist_by_context[active_context_].get()
// - *active_owner is empty (all fields are null) because its content has
// been moved to active_.
class V8_EXPORT_PRIVATE MarkingWorklists::Local {
public:
static const Address kSharedContext = MarkingWorklistsHolder::kSharedContext;
static const Address kOtherContext = MarkingWorklistsHolder::kOtherContext;
static const Address kSharedContext = MarkingWorklists::kSharedContext;
static const Address kOtherContext = MarkingWorklists::kOtherContext;
MarkingWorklists(int task_id, MarkingWorklistsHolder* holder);
explicit Local(MarkingWorklists* global);
~Local();
void Push(HeapObject object) {
bool success = active_->Push(task_id_, object);
USE(success);
DCHECK(success);
}
inline void Push(HeapObject object);
inline bool Pop(HeapObject* object);
bool Pop(HeapObject* object) {
if (active_->Pop(task_id_, object)) return true;
if (!is_per_context_mode_) return false;
// The active worklist is empty. Find any other non-empty worklist and
// switch the active worklist to it.
return PopContext(object);
}
inline void PushOnHold(HeapObject object);
inline bool PopOnHold(HeapObject* object);
void PushOnHold(HeapObject object) {
bool success = on_hold_->Push(task_id_, object);
USE(success);
DCHECK(success);
}
bool PopOnHold(HeapObject* object) { return on_hold_->Pop(task_id_, object); }
inline void PushEmbedder(HeapObject object);
inline bool PopEmbedder(HeapObject* object);
void PushEmbedder(HeapObject object) {
bool success = embedder_->Push(task_id_, object);
USE(success);
DCHECK(success);
}
bool PopEmbedder(HeapObject* object) {
return embedder_->Pop(task_id_, object);
}
void FlushToGlobal();
void Publish();
bool IsEmpty();
bool IsEmbedderEmpty();
bool IsEmbedderEmpty() const;
// Publishes the local active marking worklist if its global worklist is
// empty. In the per-context marking mode it also publishes the shared
// worklist.
void ShareWork();
// Merges the on-hold worklist to the shared worklist.
void MergeOnHold();
void ShareWorkIfGlobalPoolIsEmpty();
// Returns the context of the active worklist.
Address Context() { return active_context_; }
// Switches the active worklist to that of the given context.
Address SwitchToContext(Address context) {
if (context == active_context_) return context;
return SwitchToContextSlow(context);
}
// Switches the active worklist to the shared worklist.
void SwitchToShared() {
active_context_ = kSharedContext;
active_ = shared_;
}
bool IsPerContextMode() { return is_per_context_mode_; }
Address Context() const { return active_context_; }
inline Address SwitchToContext(Address context);
inline Address SwitchToShared();
bool IsPerContextMode() const { return is_per_context_mode_; }
private:
bool PopContext(HeapObject* object);
Address SwitchToContextSlow(Address context);
MarkingWorklist* shared_;
MarkingWorklist* on_hold_;
EmbedderTracingWorklist* embedder_;
MarkingWorklist* active_;
inline void SwitchToContext(Address context,
MarkingWorklist::Local* worklist);
MarkingWorklist::Local on_hold_;
EmbedderTracingWorklist::Local embedder_;
MarkingWorklist::Local active_;
Address active_context_;
int task_id_;
MarkingWorklist::Local* active_owner_;
bool is_per_context_mode_;
// Per-context worklists. For simplicity we treat the shared worklist as
// the worklist of dummy kSharedContext.
std::vector<ContextWorklistPair> context_worklists_;
std::unordered_map<Address, MarkingWorklist*> worklist_by_context_;
std::unordered_map<Address, std::unique_ptr<MarkingWorklist::Local>>
worklist_by_context_;
};
} // namespace internal
......
......@@ -4,13 +4,14 @@
#include <stdlib.h>
#include "src/init/v8.h"
#include "src/heap/concurrent-marking.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
#include "src/heap/mark-compact.h"
#include "src/heap/marking-worklist-inl.h"
#include "src/heap/marking-worklist.h"
#include "src/heap/worklist.h"
#include "src/init/v8.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-utils.h"
......@@ -19,10 +20,11 @@ namespace internal {
namespace heap {
void PublishSegment(MarkingWorklist* worklist, HeapObject object) {
for (size_t i = 0; i <= MarkingWorklist::kSegmentCapacity; i++) {
worklist->Push(0, object);
MarkingWorklist::Local local(worklist);
for (size_t i = 0; i <= MarkingWorklist::kSegmentSize; i++) {
local.Push(object);
}
CHECK(worklist->Pop(0, &object));
CHECK(local.Pop(&object));
}
TEST(ConcurrentMarking) {
......@@ -36,11 +38,11 @@ TEST(ConcurrentMarking) {
collector->EnsureSweepingCompleted();
}
MarkingWorklistsHolder marking_worklists_holder;
MarkingWorklists marking_worklists;
WeakObjects weak_objects;
ConcurrentMarking* concurrent_marking =
new ConcurrentMarking(heap, &marking_worklists_holder, &weak_objects);
PublishSegment(marking_worklists_holder.shared(),
new ConcurrentMarking(heap, &marking_worklists, &weak_objects);
PublishSegment(marking_worklists.shared(),
ReadOnlyRoots(heap).undefined_value());
concurrent_marking->ScheduleTasks();
concurrent_marking->Stop(
......@@ -59,16 +61,16 @@ TEST(ConcurrentMarkingReschedule) {
collector->EnsureSweepingCompleted();
}
MarkingWorklistsHolder marking_worklists_holder;
MarkingWorklists marking_worklists;
WeakObjects weak_objects;
ConcurrentMarking* concurrent_marking =
new ConcurrentMarking(heap, &marking_worklists_holder, &weak_objects);
PublishSegment(marking_worklists_holder.shared(),
new ConcurrentMarking(heap, &marking_worklists, &weak_objects);
PublishSegment(marking_worklists.shared(),
ReadOnlyRoots(heap).undefined_value());
concurrent_marking->ScheduleTasks();
concurrent_marking->Stop(
ConcurrentMarking::StopRequest::COMPLETE_ONGOING_TASKS);
PublishSegment(marking_worklists_holder.shared(),
PublishSegment(marking_worklists.shared(),
ReadOnlyRoots(heap).undefined_value());
concurrent_marking->RescheduleTasksIfNeeded();
concurrent_marking->Stop(
......@@ -87,17 +89,17 @@ TEST(ConcurrentMarkingPreemptAndReschedule) {
collector->EnsureSweepingCompleted();
}
MarkingWorklistsHolder marking_worklists_holder;
MarkingWorklists marking_worklists;
WeakObjects weak_objects;
ConcurrentMarking* concurrent_marking =
new ConcurrentMarking(heap, &marking_worklists_holder, &weak_objects);
new ConcurrentMarking(heap, &marking_worklists, &weak_objects);
for (int i = 0; i < 5000; i++)
PublishSegment(marking_worklists_holder.shared(),
PublishSegment(marking_worklists.shared(),
ReadOnlyRoots(heap).undefined_value());
concurrent_marking->ScheduleTasks();
concurrent_marking->Stop(ConcurrentMarking::StopRequest::PREEMPT_TASKS);
for (int i = 0; i < 5000; i++)
PublishSegment(marking_worklists_holder.shared(),
PublishSegment(marking_worklists.shared(),
ReadOnlyRoots(heap).undefined_value());
concurrent_marking->RescheduleTasksIfNeeded();
concurrent_marking->Stop(
......
......@@ -2365,7 +2365,7 @@ TEST(IdleNotificationFinishMarking) {
StepOrigin::kV8);
} while (!CcTest::heap()
->mark_compact_collector()
->marking_worklists()
->local_marking_worklists()
->IsEmpty());
marking->SetWeakClosureWasOverApproximatedForTesting(true);
......
......@@ -2,12 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/marking-worklist.h"
#include <cmath>
#include <limits>
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
#include "src/heap/marking-worklist.h"
#include "src/heap/marking-worklist-inl.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
......@@ -17,8 +19,8 @@ namespace internal {
using MarkingWorklistTest = TestWithContext;
TEST_F(MarkingWorklistTest, PushPop) {
MarkingWorklistsHolder holder;
MarkingWorklists worklists(kMainThreadTask, &holder);
MarkingWorklists holder;
MarkingWorklists::Local worklists(&holder);
HeapObject pushed_object =
ReadOnlyRoots(i_isolate()->heap()).undefined_value();
worklists.Push(pushed_object);
......@@ -28,8 +30,8 @@ TEST_F(MarkingWorklistTest, PushPop) {
}
TEST_F(MarkingWorklistTest, PushPopOnHold) {
MarkingWorklistsHolder holder;
MarkingWorklists worklists(kMainThreadTask, &holder);
MarkingWorklists holder;
MarkingWorklists::Local worklists(&holder);
HeapObject pushed_object =
ReadOnlyRoots(i_isolate()->heap()).undefined_value();
worklists.PushOnHold(pushed_object);
......@@ -39,8 +41,8 @@ TEST_F(MarkingWorklistTest, PushPopOnHold) {
}
TEST_F(MarkingWorklistTest, PushPopEmbedder) {
MarkingWorklistsHolder holder;
MarkingWorklists worklists(kMainThreadTask, &holder);
MarkingWorklists holder;
MarkingWorklists::Local worklists(&holder);
HeapObject pushed_object =
ReadOnlyRoots(i_isolate()->heap()).undefined_value();
worklists.PushEmbedder(pushed_object);
......@@ -50,13 +52,13 @@ TEST_F(MarkingWorklistTest, PushPopEmbedder) {
}
TEST_F(MarkingWorklistTest, MergeOnHold) {
MarkingWorklistsHolder holder;
MarkingWorklists main_worklists(kMainThreadTask, &holder);
MarkingWorklists worker_worklists(kMainThreadTask + 1, &holder);
MarkingWorklists holder;
MarkingWorklists::Local main_worklists(&holder);
MarkingWorklists::Local worker_worklists(&holder);
HeapObject pushed_object =
ReadOnlyRoots(i_isolate()->heap()).undefined_value();
worker_worklists.PushOnHold(pushed_object);
worker_worklists.FlushToGlobal();
worker_worklists.Publish();
main_worklists.MergeOnHold();
HeapObject popped_object;
EXPECT_TRUE(main_worklists.Pop(&popped_object));
......@@ -64,13 +66,13 @@ TEST_F(MarkingWorklistTest, MergeOnHold) {
}
TEST_F(MarkingWorklistTest, ShareWorkIfGlobalPoolIsEmpty) {
MarkingWorklistsHolder holder;
MarkingWorklists main_worklists(kMainThreadTask, &holder);
MarkingWorklists worker_worklists(kMainThreadTask + 1, &holder);
MarkingWorklists holder;
MarkingWorklists::Local main_worklists(&holder);
MarkingWorklists::Local worker_worklists(&holder);
HeapObject pushed_object =
ReadOnlyRoots(i_isolate()->heap()).undefined_value();
main_worklists.Push(pushed_object);
main_worklists.ShareWorkIfGlobalPoolIsEmpty();
main_worklists.ShareWork();
HeapObject popped_object;
EXPECT_TRUE(worker_worklists.Pop(&popped_object));
EXPECT_EQ(popped_object, pushed_object);
......@@ -78,9 +80,9 @@ TEST_F(MarkingWorklistTest, ShareWorkIfGlobalPoolIsEmpty) {
TEST_F(MarkingWorklistTest, ContextWorklistsPushPop) {
const Address context = 0xabcdef;
MarkingWorklistsHolder holder;
MarkingWorklists holder;
holder.CreateContextWorklists({context});
MarkingWorklists worklists(kMainThreadTask, &holder);
MarkingWorklists::Local worklists(&holder);
HeapObject pushed_object =
ReadOnlyRoots(i_isolate()->heap()).undefined_value();
worklists.SwitchToContext(context);
......@@ -94,9 +96,9 @@ TEST_F(MarkingWorklistTest, ContextWorklistsPushPop) {
TEST_F(MarkingWorklistTest, ContextWorklistsEmpty) {
const Address context = 0xabcdef;
MarkingWorklistsHolder holder;
MarkingWorklists holder;
holder.CreateContextWorklists({context});
MarkingWorklists worklists(kMainThreadTask, &holder);
MarkingWorklists::Local worklists(&holder);
HeapObject pushed_object =
ReadOnlyRoots(i_isolate()->heap()).undefined_value();
worklists.SwitchToContext(context);
......@@ -114,15 +116,15 @@ TEST_F(MarkingWorklistTest, ContextWorklistsEmpty) {
TEST_F(MarkingWorklistTest, ContextWorklistCrossTask) {
const Address context1 = 0x1abcdef;
const Address context2 = 0x2abcdef;
MarkingWorklistsHolder holder;
MarkingWorklists holder;
holder.CreateContextWorklists({context1, context2});
MarkingWorklists main_worklists(kMainThreadTask, &holder);
MarkingWorklists worker_worklists(kMainThreadTask + 1, &holder);
MarkingWorklists::Local main_worklists(&holder);
MarkingWorklists::Local worker_worklists(&holder);
HeapObject pushed_object =
ReadOnlyRoots(i_isolate()->heap()).undefined_value();
main_worklists.SwitchToContext(context1);
main_worklists.Push(pushed_object);
main_worklists.ShareWorkIfGlobalPoolIsEmpty();
main_worklists.ShareWork();
worker_worklists.SwitchToContext(context2);
HeapObject popped_object;
EXPECT_TRUE(worker_worklists.Pop(&popped_object));
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment