Commit fe566be0 authored by Michael Lippautz's avatar Michael Lippautz Committed by Commit Bot

[heap] Concurrently process wrapper objects

Concurrently process objects and only read embedder fields on the main
thread.

Also prepares the concurrent marking infrastructure to plug this
processing into different types.

Bug: chromium:885125, chromium:843903
Change-Id: I23b7f778c16cff118dec93e11e2bbd02aaf11a78
Reviewed-on: https://chromium-review.googlesource.com/1231175Reviewed-by: 's avatarHannes Payer <hpayer@chromium.org>
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#56043}
parent 46573e51
......@@ -74,15 +74,19 @@ class ConcurrentMarkingVisitor final
public:
using BaseClass = HeapVisitor<int, ConcurrentMarkingVisitor>;
explicit ConcurrentMarkingVisitor(ConcurrentMarking::MarkingWorklist* shared,
ConcurrentMarking::MarkingWorklist* bailout,
LiveBytesMap* live_bytes,
WeakObjects* weak_objects, int task_id)
explicit ConcurrentMarkingVisitor(
ConcurrentMarking::MarkingWorklist* shared,
ConcurrentMarking::MarkingWorklist* bailout, LiveBytesMap* live_bytes,
WeakObjects* weak_objects,
ConcurrentMarking::EmbedderTracingWorklist* embedder_objects, int task_id,
bool embedder_tracing_enabled)
: shared_(shared, task_id),
bailout_(bailout, task_id),
weak_objects_(weak_objects),
embedder_objects_(embedder_objects, task_id),
marking_state_(live_bytes),
task_id_(task_id) {}
task_id_(task_id),
embedder_tracing_enabled_(embedder_tracing_enabled) {}
template <typename T>
static V8_INLINE T* Cast(HeapObject* object) {
......@@ -197,11 +201,7 @@ class ConcurrentMarkingVisitor final
}
int VisitJSApiObject(Map* map, JSObject* object) {
if (marking_state_.IsGrey(object)) {
// The main thread will do wrapper tracing in Blink.
bailout_.Push(object);
}
return 0;
return VisitEmbedderTracingSubclass(map, object);
}
// ===========================================================================
......@@ -420,6 +420,18 @@ class ConcurrentMarkingVisitor final
return size;
}
template <typename T>
int VisitEmbedderTracingSubclass(Map* map, T* object) {
DCHECK(object->IsApiWrapper());
int size = VisitJSObjectSubclass(map, object);
if (size && embedder_tracing_enabled_) {
// Success: The object needs to be processed for embedder references on
// the main thread.
embedder_objects_.Push(object);
}
return size;
}
template <typename T>
int VisitLeftTrimmableArray(Map* map, T* object) {
// The synchronized_length() function checks that the length is a Smi.
......@@ -447,9 +459,11 @@ class ConcurrentMarkingVisitor final
ConcurrentMarking::MarkingWorklist::View shared_;
ConcurrentMarking::MarkingWorklist::View bailout_;
WeakObjects* weak_objects_;
ConcurrentMarking::EmbedderTracingWorklist::View embedder_objects_;
ConcurrentMarkingState marking_state_;
int task_id_;
SlotSnapshot slot_snapshot_;
bool embedder_tracing_enabled_;
};
// Strings can change maps due to conversion to thin string or external strings.
......@@ -511,12 +525,14 @@ class ConcurrentMarking::Task : public CancelableTask {
ConcurrentMarking::ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
MarkingWorklist* bailout,
MarkingWorklist* on_hold,
WeakObjects* weak_objects)
WeakObjects* weak_objects,
EmbedderTracingWorklist* embedder_objects)
: heap_(heap),
shared_(shared),
bailout_(bailout),
on_hold_(on_hold),
weak_objects_(weak_objects) {
weak_objects_(weak_objects),
embedder_objects_(embedder_objects) {
// The runtime flag should be set only if the compile time flag was set.
#ifndef V8_CONCURRENT_MARKING
CHECK(!FLAG_concurrent_marking);
......@@ -528,8 +544,9 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
GCTracer::BackgroundScope::MC_BACKGROUND_MARKING);
size_t kBytesUntilInterruptCheck = 64 * KB;
int kObjectsUntilInterrupCheck = 1000;
ConcurrentMarkingVisitor visitor(shared_, bailout_, &task_state->live_bytes,
weak_objects_, task_id);
ConcurrentMarkingVisitor visitor(
shared_, bailout_, &task_state->live_bytes, weak_objects_,
embedder_objects_, task_id, heap_->local_embedder_heap_tracer()->InUse());
double time_ms;
size_t marked_bytes = 0;
if (FLAG_trace_concurrent_marking) {
......@@ -596,6 +613,7 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
shared_->FlushToGlobal(task_id);
bailout_->FlushToGlobal(task_id);
on_hold_->FlushToGlobal(task_id);
embedder_objects_->FlushToGlobal(task_id);
weak_objects_->transition_arrays.FlushToGlobal(task_id);
weak_objects_->ephemeron_hash_tables.FlushToGlobal(task_id);
......
......@@ -58,10 +58,12 @@ class ConcurrentMarking {
// task 0, reserved for the main thread).
static constexpr int kMaxTasks = 7;
using MarkingWorklist = Worklist<HeapObject*, 64 /* segment size */>;
using EmbedderTracingWorklist = Worklist<HeapObject*, 16 /* segment size */>;
ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
MarkingWorklist* bailout, MarkingWorklist* on_hold,
WeakObjects* weak_objects);
WeakObjects* weak_objects,
EmbedderTracingWorklist* embedder_objects);
// Schedules asynchronous tasks to perform concurrent marking. Objects in the
// heap should not be moved while these are active (can be stopped safely via
......@@ -108,6 +110,7 @@ class ConcurrentMarking {
MarkingWorklist* const bailout_;
MarkingWorklist* const on_hold_;
WeakObjects* const weak_objects_;
EmbedderTracingWorklist* const embedder_objects_;
TaskState task_state_[kMaxTasks + 1];
std::atomic<size_t> total_marked_bytes_{0};
std::atomic<bool> ephemeron_marked_{false};
......
......@@ -4720,10 +4720,11 @@ void Heap::SetUp() {
mark_compact_collector_->marking_worklist();
concurrent_marking_ = new ConcurrentMarking(
this, marking_worklist->shared(), marking_worklist->bailout(),
marking_worklist->on_hold(), mark_compact_collector_->weak_objects());
marking_worklist->on_hold(), mark_compact_collector_->weak_objects(),
marking_worklist->embedder());
} else {
concurrent_marking_ =
new ConcurrentMarking(this, nullptr, nullptr, nullptr, nullptr);
concurrent_marking_ = new ConcurrentMarking(this, nullptr, nullptr, nullptr,
nullptr, nullptr);
}
for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
......
......@@ -793,6 +793,32 @@ intptr_t IncrementalMarking::ProcessMarkingWorklist(
return bytes_processed;
}
void IncrementalMarking::EmbedderStep(double duration_ms) {
constexpr int kObjectsToProcessBeforeInterrupt = 100;
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_WRAPPER_TRACING);
const double deadline =
heap_->MonotonicallyIncreasingTimeInMs() + duration_ms;
HeapObject* object;
int cnt = 0;
while (marking_worklist()->embedder()->Pop(0, &object)) {
heap_->TracePossibleWrapper(JSObject::cast(object));
if (++cnt == kObjectsToProcessBeforeInterrupt) {
cnt = 0;
if (heap_->MonotonicallyIncreasingTimeInMs() > deadline) {
break;
}
}
}
heap_->local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
if (!heap_->local_embedder_heap_tracer()
->ShouldFinalizeIncrementalMarking()) {
heap_->local_embedder_heap_tracer()->Trace(deadline);
}
}
void IncrementalMarking::Hurry() {
// A scavenge may have pushed new objects on the marking deque (due to black
......@@ -922,14 +948,7 @@ double IncrementalMarking::AdvanceIncrementalMarking(
heap_->local_embedder_heap_tracer()->InUse();
do {
if (incremental_wrapper_tracing && trace_wrappers_toggle_) {
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_INCREMENTAL_WRAPPER_TRACING);
const double wrapper_deadline =
heap_->MonotonicallyIncreasingTimeInMs() + kStepSizeInMs;
if (!heap_->local_embedder_heap_tracer()
->ShouldFinalizeIncrementalMarking()) {
heap_->local_embedder_heap_tracer()->Trace(wrapper_deadline);
}
EmbedderStep(kStepSizeInMs);
} else {
Step(step_size_in_bytes, completion_action, step_origin);
}
......
......@@ -193,6 +193,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
size_t Step(size_t bytes_to_process, CompletionAction action,
StepOrigin step_origin,
WorklistToProcess worklist_to_process = WorklistToProcess::kAll);
void EmbedderStep(double duration);
inline void RestartIfNotMarking();
......
......@@ -49,7 +49,6 @@ template <FixedArrayVisitationMode fixed_array_mode,
int MarkingVisitor<fixed_array_mode, retaining_path_mode,
MarkingState>::VisitJSApiObject(Map* map, JSObject* object) {
if (heap_->local_embedder_heap_tracer()->InUse()) {
DCHECK(object->IsJSObject());
heap_->TracePossibleWrapper(object);
}
int size = JSObject::BodyDescriptor::SizeOf(map, object);
......
......@@ -1598,6 +1598,10 @@ void MarkCompactCollector::ProcessEphemeronsLinear() {
void MarkCompactCollector::PerformWrapperTracing() {
if (heap_->local_embedder_heap_tracer()->InUse()) {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_TRACING);
HeapObject* object;
while (marking_worklist()->embedder()->Pop(kMainThread, &object)) {
heap_->TracePossibleWrapper(JSObject::cast(object));
}
heap_->local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
heap_->local_embedder_heap_tracer()->Trace(
std::numeric_limits<double>::infinity());
......@@ -1753,10 +1757,14 @@ void MarkCompactCollector::MarkLiveObjects() {
// through ephemerons.
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPERS);
while (!heap_->local_embedder_heap_tracer()->IsRemoteTracingDone()) {
do {
// PerformWrapperTracing() also empties the work items collected by
// concurrent markers. As a result this call needs to happen at least
// once.
PerformWrapperTracing();
ProcessMarkingWorklist();
}
} while (!heap_->local_embedder_heap_tracer()->IsRemoteTracingDone());
DCHECK(marking_worklist()->IsEmbedderEmpty());
DCHECK(marking_worklist()->IsEmpty());
}
......@@ -1803,6 +1811,7 @@ void MarkCompactCollector::MarkLiveObjects() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_EPILOGUE);
heap()->local_embedder_heap_tracer()->TraceEpilogue();
}
DCHECK(marking_worklist()->IsEmbedderEmpty());
DCHECK(marking_worklist()->IsEmpty());
}
......
......@@ -464,11 +464,14 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
#else
using MarkingState = MajorNonAtomicMarkingState;
#endif // V8_CONCURRENT_MARKING
using NonAtomicMarkingState = MajorNonAtomicMarkingState;
// Wrapper for the shared and bailout worklists.
class MarkingWorklist {
public:
using ConcurrentMarkingWorklist = Worklist<HeapObject*, 64>;
using EmbedderTracingWorklist = Worklist<HeapObject*, 16>;
// The heap parameter is not used but needed to match the sequential case.
explicit MarkingWorklist(Heap* heap) {}
......@@ -511,6 +514,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
bailout_.Clear();
shared_.Clear();
on_hold_.Clear();
embedder_.Clear();
}
bool IsBailoutEmpty() { return bailout_.IsLocalEmpty(kMainThread); }
......@@ -523,6 +527,11 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
on_hold_.IsGlobalPoolEmpty();
}
bool IsEmbedderEmpty() {
return embedder_.IsLocalEmpty(kMainThread) &&
embedder_.IsGlobalPoolEmpty();
}
int Size() {
return static_cast<int>(bailout_.LocalSize(kMainThread) +
shared_.LocalSize(kMainThread) +
......@@ -538,11 +547,13 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
bailout_.Update(callback);
shared_.Update(callback);
on_hold_.Update(callback);
embedder_.Update(callback);
}
ConcurrentMarkingWorklist* shared() { return &shared_; }
ConcurrentMarkingWorklist* bailout() { return &bailout_; }
ConcurrentMarkingWorklist* on_hold() { return &on_hold_; }
EmbedderTracingWorklist* embedder() { return &embedder_; }
void Print() {
PrintWorklist("shared", &shared_);
......@@ -568,6 +579,11 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// for new space. This allow the compiler to remove write barriers
// for freshly allocatd objects.
ConcurrentMarkingWorklist on_hold_;
// Worklist for objects that potentially require embedder tracing, i.e.,
// these objects need to be handed over to the embedder to find the full
// transitive closure.
EmbedderTracingWorklist embedder_;
};
class RootMarkingVisitor;
......
......@@ -39,9 +39,10 @@ TEST(ConcurrentMarking) {
}
ConcurrentMarking::MarkingWorklist shared, bailout, on_hold;
ConcurrentMarking::EmbedderTracingWorklist embedder_objects;
WeakObjects weak_objects;
ConcurrentMarking* concurrent_marking =
new ConcurrentMarking(heap, &shared, &bailout, &on_hold, &weak_objects);
ConcurrentMarking* concurrent_marking = new ConcurrentMarking(
heap, &shared, &bailout, &on_hold, &weak_objects, &embedder_objects);
PublishSegment(&shared, ReadOnlyRoots(heap).undefined_value());
concurrent_marking->ScheduleTasks();
concurrent_marking->Stop(
......@@ -61,9 +62,10 @@ TEST(ConcurrentMarkingReschedule) {
}
ConcurrentMarking::MarkingWorklist shared, bailout, on_hold;
ConcurrentMarking::EmbedderTracingWorklist embedder_objects;
WeakObjects weak_objects;
ConcurrentMarking* concurrent_marking =
new ConcurrentMarking(heap, &shared, &bailout, &on_hold, &weak_objects);
ConcurrentMarking* concurrent_marking = new ConcurrentMarking(
heap, &shared, &bailout, &on_hold, &weak_objects, &embedder_objects);
PublishSegment(&shared, ReadOnlyRoots(heap).undefined_value());
concurrent_marking->ScheduleTasks();
concurrent_marking->Stop(
......@@ -87,9 +89,10 @@ TEST(ConcurrentMarkingPreemptAndReschedule) {
}
ConcurrentMarking::MarkingWorklist shared, bailout, on_hold;
ConcurrentMarking::EmbedderTracingWorklist embedder_objects;
WeakObjects weak_objects;
ConcurrentMarking* concurrent_marking =
new ConcurrentMarking(heap, &shared, &bailout, &on_hold, &weak_objects);
ConcurrentMarking* concurrent_marking = new ConcurrentMarking(
heap, &shared, &bailout, &on_hold, &weak_objects, &embedder_objects);
for (int i = 0; i < 5000; i++)
PublishSegment(&shared, ReadOnlyRoots(heap).undefined_value());
concurrent_marking->ScheduleTasks();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment