Commit 68a8bdd8 authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

[heap] Remove bailout marking worklist.

The concurrent marker can now process all objects.
This patch also eagerly visits the objects that undergo layout
changes. This is because previously such objects were pushed
onto the bailout worklist, which is gone now.
To preserve the incremental step accounting, the patch introduces
a new GC tracer scope called MC_INCREMENTAL_LAYOUT_CHANGE.

Bug: v8:8486
Change-Id: Ic1c2f0d4e2ac0602fc945f3258af9624247bd65f
Reviewed-on: https://chromium-review.googlesource.com/c/1386486
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#58712}
parent e8316b64
......@@ -352,14 +352,15 @@
#define INCREMENTAL_SCOPES(F) \
/* MC_INCREMENTAL is the top-level incremental marking scope. */ \
F(MC_INCREMENTAL) \
F(MC_INCREMENTAL_START) \
F(MC_INCREMENTAL_SWEEPING) \
F(MC_INCREMENTAL_EMBEDDER_PROLOGUE) \
F(MC_INCREMENTAL_EMBEDDER_TRACING) \
F(MC_INCREMENTAL_EXTERNAL_EPILOGUE) \
F(MC_INCREMENTAL_EXTERNAL_PROLOGUE) \
F(MC_INCREMENTAL_FINALIZE) \
F(MC_INCREMENTAL_FINALIZE_BODY) \
F(MC_INCREMENTAL_EXTERNAL_EPILOGUE) \
F(MC_INCREMENTAL_EXTERNAL_PROLOGUE)
F(MC_INCREMENTAL_LAYOUT_CHANGE) \
F(MC_INCREMENTAL_START) \
F(MC_INCREMENTAL_SWEEPING)
#define TOP_MC_SCOPES(F) \
F(MC_CLEAR) \
......
......@@ -78,12 +78,10 @@ class ConcurrentMarkingVisitor final
explicit ConcurrentMarkingVisitor(
ConcurrentMarking::MarkingWorklist* shared,
ConcurrentMarking::MarkingWorklist* bailout,
MemoryChunkDataMap* memory_chunk_data, WeakObjects* weak_objects,
ConcurrentMarking::EmbedderTracingWorklist* embedder_objects, int task_id,
bool embedder_tracing_enabled, unsigned mark_compact_epoch)
: shared_(shared, task_id),
bailout_(bailout, task_id),
weak_objects_(weak_objects),
embedder_objects_(embedder_objects, task_id),
marking_state_(memory_chunk_data),
......@@ -654,7 +652,6 @@ class ConcurrentMarkingVisitor final
}
ConcurrentMarking::MarkingWorklist::View shared_;
ConcurrentMarking::MarkingWorklist::View bailout_;
WeakObjects* weak_objects_;
ConcurrentMarking::EmbedderTracingWorklist::View embedder_objects_;
ConcurrentMarkingState marking_state_;
......@@ -722,13 +719,11 @@ class ConcurrentMarking::Task : public CancelableTask {
};
ConcurrentMarking::ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
MarkingWorklist* bailout,
MarkingWorklist* on_hold,
WeakObjects* weak_objects,
EmbedderTracingWorklist* embedder_objects)
: heap_(heap),
shared_(shared),
bailout_(bailout),
on_hold_(on_hold),
weak_objects_(weak_objects),
embedder_objects_(embedder_objects) {
......@@ -743,10 +738,10 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
GCTracer::BackgroundScope::MC_BACKGROUND_MARKING);
size_t kBytesUntilInterruptCheck = 64 * KB;
int kObjectsUntilInterrupCheck = 1000;
ConcurrentMarkingVisitor visitor(
shared_, bailout_, &task_state->memory_chunk_data, weak_objects_,
embedder_objects_, task_id, heap_->local_embedder_heap_tracer()->InUse(),
task_state->mark_compact_epoch);
ConcurrentMarkingVisitor visitor(shared_, &task_state->memory_chunk_data,
weak_objects_, embedder_objects_, task_id,
heap_->local_embedder_heap_tracer()->InUse(),
task_state->mark_compact_epoch);
double time_ms;
size_t marked_bytes = 0;
if (FLAG_trace_concurrent_marking) {
......@@ -812,7 +807,6 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
}
shared_->FlushToGlobal(task_id);
bailout_->FlushToGlobal(task_id);
on_hold_->FlushToGlobal(task_id);
embedder_objects_->FlushToGlobal(task_id);
......
......@@ -67,8 +67,7 @@ class ConcurrentMarking {
using EmbedderTracingWorklist = Worklist<HeapObject, 16 /* segment size */>;
ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
MarkingWorklist* bailout, MarkingWorklist* on_hold,
WeakObjects* weak_objects,
MarkingWorklist* on_hold, WeakObjects* weak_objects,
EmbedderTracingWorklist* embedder_objects);
// Schedules asynchronous tasks to perform concurrent marking. Objects in the
......@@ -113,7 +112,6 @@ class ConcurrentMarking {
void Run(int task_id, TaskState* task_state);
Heap* const heap_;
MarkingWorklist* const shared_;
MarkingWorklist* const bailout_;
MarkingWorklist* const on_hold_;
WeakObjects* const weak_objects_;
EmbedderTracingWorklist* const embedder_objects_;
......
......@@ -710,6 +710,8 @@ void GCTracer::PrintNVP() const {
"incremental.finalize.body=%.1f "
"incremental.finalize.external.prologue=%.1f "
"incremental.finalize.external.epilogue=%.1f "
"incremental.layout_change=%.1f "
"incremental.start=%.1f "
"incremental.sweeping=%.1f "
"incremental.embedder_prologue=%.1f "
"incremental.embedder_tracing=%.1f "
......@@ -804,6 +806,8 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::MC_INCREMENTAL_FINALIZE_BODY],
current_.scopes[Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE],
current_.scopes[Scope::MC_INCREMENTAL_EXTERNAL_EPILOGUE],
current_.scopes[Scope::MC_INCREMENTAL_LAYOUT_CHANGE],
current_.scopes[Scope::MC_INCREMENTAL_START],
current_.scopes[Scope::MC_INCREMENTAL_SWEEPING],
current_.scopes[Scope::MC_INCREMENTAL_EMBEDDER_PROLOGUE],
current_.scopes[Scope::MC_INCREMENTAL_EMBEDDER_TRACING],
......@@ -1129,6 +1133,8 @@ void GCTracer::RecordGCSumCounters(double atomic_pause_duration) {
base::MutexGuard guard(&background_counter_mutex_);
const double overall_duration =
current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_LAYOUT_CHANGE]
.duration +
current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_START]
.duration +
current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_SWEEPING]
......@@ -1149,6 +1155,8 @@ void GCTracer::RecordGCSumCounters(double atomic_pause_duration) {
.total_duration_ms;
const double marking_duration =
current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_LAYOUT_CHANGE]
.duration +
current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_START]
.duration +
incremental_marking_duration_ +
......
......@@ -71,7 +71,7 @@ class V8_EXPORT_PRIVATE GCTracer {
NUMBER_OF_SCOPES,
FIRST_INCREMENTAL_SCOPE = MC_INCREMENTAL,
LAST_INCREMENTAL_SCOPE = MC_INCREMENTAL_EXTERNAL_PROLOGUE,
LAST_INCREMENTAL_SCOPE = MC_INCREMENTAL_SWEEPING,
FIRST_SCOPE = MC_INCREMENTAL,
NUMBER_OF_INCREMENTAL_SCOPES =
LAST_INCREMENTAL_SCOPE - FIRST_INCREMENTAL_SCOPE + 1,
......
......@@ -2967,7 +2967,7 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation(
void Heap::NotifyObjectLayoutChange(HeapObject object, int size,
const DisallowHeapAllocation&) {
if (incremental_marking()->IsMarking()) {
incremental_marking()->MarkBlackAndPush(object);
incremental_marking()->MarkBlackAndVisitObjectDueToLayoutChange(object);
if (incremental_marking()->IsCompacting() &&
MayContainRecordedSlots(object)) {
MemoryChunk::FromHeapObject(object)->RegisterObjectWithInvalidatedSlots(
......@@ -4382,12 +4382,11 @@ void Heap::SetUp() {
MarkCompactCollector::MarkingWorklist* marking_worklist =
mark_compact_collector_->marking_worklist();
concurrent_marking_ = new ConcurrentMarking(
this, marking_worklist->shared(), marking_worklist->bailout(),
marking_worklist->on_hold(), mark_compact_collector_->weak_objects(),
marking_worklist->embedder());
this, marking_worklist->shared(), marking_worklist->on_hold(),
mark_compact_collector_->weak_objects(), marking_worklist->embedder());
} else {
concurrent_marking_ = new ConcurrentMarking(this, nullptr, nullptr, nullptr,
nullptr, nullptr);
concurrent_marking_ =
new ConcurrentMarking(this, nullptr, nullptr, nullptr, nullptr);
}
for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
......
......@@ -133,18 +133,13 @@ bool IncrementalMarking::WhiteToGreyAndPush(HeapObject obj) {
return false;
}
void IncrementalMarking::MarkBlackAndPush(HeapObject obj) {
// Marking left-trimmable fixed array black is unsafe because left-trimming
// re-pushes only grey arrays onto the marking worklist.
DCHECK(!obj->IsFixedArray() && !obj->IsFixedDoubleArray());
// Color the object black and push it into the bailout deque.
void IncrementalMarking::MarkBlackAndVisitObjectDueToLayoutChange(
HeapObject obj) {
TRACE_EVENT0("v8", "V8.GCIncrementalMarkingLayoutChange");
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_LAYOUT_CHANGE);
marking_state()->WhiteToGrey(obj);
if (marking_state()->GreyToBlack(obj)) {
if (FLAG_concurrent_marking) {
marking_worklist()->PushBailout(obj);
} else {
marking_worklist()->Push(obj);
}
RevisitObject(obj);
}
}
......@@ -155,61 +150,26 @@ void IncrementalMarking::NotifyLeftTrimming(HeapObject from, HeapObject to) {
MemoryChunk::FromAddress(to->address()));
DCHECK_NE(from, to);
MarkBit old_mark_bit = marking_state()->MarkBitFrom(from);
MarkBit new_mark_bit = marking_state()->MarkBitFrom(to);
if (black_allocation() && Marking::IsBlack<kAtomicity>(new_mark_bit)) {
// Nothing to do if the object is in black area.
return;
}
bool marked_black_due_to_left_trimming = false;
if (FLAG_concurrent_marking) {
// We need to mark the array black before overwriting its map and length
// so that the concurrent marker does not observe inconsistent state.
Marking::WhiteToGrey<kAtomicity>(old_mark_bit);
if (Marking::GreyToBlack<kAtomicity>(old_mark_bit)) {
// The concurrent marker will not mark the array. We need to push the
// new array start in marking deque to ensure that it will be marked.
marked_black_due_to_left_trimming = true;
}
DCHECK(Marking::IsBlack<kAtomicity>(old_mark_bit));
}
if (Marking::IsBlack<kAtomicity>(old_mark_bit) &&
!marked_black_due_to_left_trimming) {
// The array was black before left trimming or was marked black by the
// concurrent marker. Simply transfer the color.
if (from->address() + kTaggedSize == to->address()) {
// The old and the new markbits overlap. The |to| object has the
// grey color. To make it black, we need to set the second bit.
DCHECK(new_mark_bit.Get<kAtomicity>());
new_mark_bit.Next().Set<kAtomicity>();
} else {
bool success = Marking::WhiteToBlack<kAtomicity>(new_mark_bit);
DCHECK(success);
USE(success);
}
} else if (Marking::IsGrey<kAtomicity>(old_mark_bit) ||
marked_black_due_to_left_trimming) {
// The array was already grey or was marked black by this function.
// Mark the new array grey and push it to marking deque.
if (from->address() + kTaggedSize == to->address()) {
// The old and the new markbits overlap. The |to| object is either white
// or grey. Set the first bit to make sure that it is grey.
new_mark_bit.Set<kAtomicity>();
DCHECK(!new_mark_bit.Next().Get<kAtomicity>());
} else {
bool success = Marking::WhiteToGrey<kAtomicity>(new_mark_bit);
DCHECK(success);
USE(success);
}
// Subsequent left-trimming will re-push only grey arrays.
// Ensure that this array is grey.
DCHECK(Marking::IsGrey<kAtomicity>(new_mark_bit));
marking_worklist()->PushBailout(to);
RestartIfNotMarking();
MarkBlackAndVisitObjectDueToLayoutChange(from);
DCHECK(marking_state()->IsBlack(from));
// Mark the new address as black.
if (from->address() + kTaggedSize == to->address()) {
// The old and the new markbits overlap. The |to| object has the
// grey color. To make it black, we need to set the second bit.
DCHECK(new_mark_bit.Get<kAtomicity>());
new_mark_bit.Next().Set<kAtomicity>();
} else {
bool success = Marking::WhiteToBlack<kAtomicity>(new_mark_bit);
DCHECK(success);
USE(success);
}
DCHECK(marking_state()->IsBlack(to));
}
class IncrementalMarkingRootMarkingVisitor : public RootVisitor {
......@@ -797,17 +757,11 @@ void IncrementalMarking::VisitDescriptors(HeapObject host,
visitor.VisitDescriptors(descriptors, number_of_own_descriptors);
}
template <WorklistToProcess worklist_to_process>
intptr_t IncrementalMarking::ProcessMarkingWorklist(
intptr_t bytes_to_process, ForceCompletionAction completion) {
intptr_t bytes_processed = 0;
while (bytes_processed < bytes_to_process || completion == FORCE_COMPLETION) {
HeapObject obj;
if (worklist_to_process == WorklistToProcess::kBailout) {
obj = marking_worklist()->PopBailout();
} else {
obj = marking_worklist()->Pop();
}
HeapObject obj = marking_worklist()->Pop();
if (obj.is_null()) break;
// Left trimming may result in white, grey, or black filler objects on the
// marking deque. Ignore these objects.
......@@ -1069,11 +1023,6 @@ void IncrementalMarking::StepOnAllocation(size_t bytes_to_process,
bytes_to_process = Min(bytes_to_process, step_size);
size_t bytes_processed = 0;
if (FLAG_concurrent_marking) {
bytes_processed = Step(bytes_to_process, GC_VIA_STACK_GUARD,
StepOrigin::kV8, WorklistToProcess::kBailout);
bytes_to_process = (bytes_processed >= bytes_to_process)
? 0
: bytes_to_process - bytes_processed;
size_t current_bytes_marked_concurrently =
heap()->concurrent_marking()->TotalMarkedBytes();
// The concurrent_marking()->TotalMarkedBytes() is not monothonic for a
......@@ -1092,14 +1041,14 @@ void IncrementalMarking::StepOnAllocation(size_t bytes_to_process,
bytes_processed += bytes_to_process;
bytes_to_process = IncrementalMarking::kMinStepSizeInBytes;
}
bytes_processed += Step(bytes_to_process, GC_VIA_STACK_GUARD, StepOrigin::kV8,
WorklistToProcess::kAll);
bytes_processed +=
Step(bytes_to_process, GC_VIA_STACK_GUARD, StepOrigin::kV8);
bytes_allocated_ -= Min(bytes_allocated_, bytes_processed);
}
size_t IncrementalMarking::Step(size_t bytes_to_process,
CompletionAction action, StepOrigin step_origin,
WorklistToProcess worklist_to_process) {
CompletionAction action,
StepOrigin step_origin) {
double start = heap_->MonotonicallyIncreasingTimeInMs();
if (state_ == SWEEPING) {
......@@ -1126,13 +1075,7 @@ size_t IncrementalMarking::Step(size_t bytes_to_process,
}
#endif
if (worklist_to_process == WorklistToProcess::kBailout) {
bytes_processed =
ProcessMarkingWorklist<WorklistToProcess::kBailout>(bytes_to_process);
} else {
bytes_processed =
ProcessMarkingWorklist<WorklistToProcess::kAll>(bytes_to_process);
}
ProcessMarkingWorklist(bytes_to_process);
if (step_origin == StepOrigin::kTask) {
bytes_marked_ahead_of_schedule_ += bytes_processed;
......
......@@ -20,7 +20,6 @@ class Object;
class PagedSpace;
enum class StepOrigin { kV8, kTask };
enum class WorklistToProcess { kAll, kBailout };
class V8_EXPORT_PRIVATE IncrementalMarking {
public:
......@@ -175,8 +174,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
void FinalizeSweeping();
size_t Step(size_t bytes_to_process, CompletionAction action,
StepOrigin step_origin,
WorklistToProcess worklist_to_process = WorklistToProcess::kAll);
StepOrigin step_origin);
void StepOnAllocation(size_t bytes_to_process, double max_step_size);
bool ShouldDoEmbedderStep();
......@@ -216,7 +214,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
// This function is used to color the object black before it undergoes an
// unsafe layout change. This is a part of synchronization protocol with
// the concurrent marker.
void MarkBlackAndPush(HeapObject obj);
void MarkBlackAndVisitObjectDueToLayoutChange(HeapObject obj);
bool IsCompacting() { return IsMarking() && is_compacting_; }
......@@ -279,7 +277,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
void DeactivateIncrementalWriteBarrierForSpace(NewSpace* space);
void DeactivateIncrementalWriteBarrier();
template <WorklistToProcess worklist_to_process = WorklistToProcess::kAll>
V8_INLINE intptr_t ProcessMarkingWorklist(
intptr_t bytes_to_process,
ForceCompletionAction completion = DO_NOT_FORCE_COMPLETION);
......
......@@ -1691,7 +1691,6 @@ void MarkCompactCollector::ProcessMarkingWorklistInternal() {
MarkObject(object, map);
visitor.Visit(map, object);
}
DCHECK(marking_worklist()->IsBailoutEmpty());
}
bool MarkCompactCollector::VisitEphemeron(HeapObject key, HeapObject value) {
......
......@@ -468,7 +468,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
using NonAtomicMarkingState = MajorNonAtomicMarkingState;
// Wrapper for the shared and bailout worklists.
// Wrapper for the shared worklist.
class MarkingWorklist {
public:
using ConcurrentMarkingWorklist = Worklist<HeapObject, 64>;
......@@ -483,17 +483,8 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
DCHECK(success);
}
void PushBailout(HeapObject object) {
bool success = bailout_.Push(kMainThread, object);
USE(success);
DCHECK(success);
}
HeapObject Pop() {
HeapObject result;
#ifdef V8_CONCURRENT_MARKING
if (bailout_.Pop(kMainThread, &result)) return result;
#endif
if (shared_.Pop(kMainThread, &result)) return result;
#ifdef V8_CONCURRENT_MARKING
// The expectation is that this work list is empty almost all the time
......@@ -503,29 +494,16 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
return HeapObject();
}
HeapObject PopBailout() {
#ifdef V8_CONCURRENT_MARKING
HeapObject result;
if (bailout_.Pop(kMainThread, &result)) return result;
#endif
return HeapObject();
}
void Clear() {
bailout_.Clear();
shared_.Clear();
on_hold_.Clear();
embedder_.Clear();
}
bool IsBailoutEmpty() { return bailout_.IsLocalEmpty(kMainThread); }
bool IsEmpty() {
return bailout_.IsLocalEmpty(kMainThread) &&
shared_.IsLocalEmpty(kMainThread) &&
return shared_.IsLocalEmpty(kMainThread) &&
on_hold_.IsLocalEmpty(kMainThread) &&
bailout_.IsGlobalPoolEmpty() && shared_.IsGlobalPoolEmpty() &&
on_hold_.IsGlobalPoolEmpty();
shared_.IsGlobalPoolEmpty() && on_hold_.IsGlobalPoolEmpty();
}
bool IsEmbedderEmpty() {
......@@ -534,8 +512,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
}
int Size() {
return static_cast<int>(bailout_.LocalSize(kMainThread) +
shared_.LocalSize(kMainThread) +
return static_cast<int>(shared_.LocalSize(kMainThread) +
on_hold_.LocalSize(kMainThread));
}
......@@ -545,20 +522,17 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// The callback must accept HeapObject and return HeapObject.
template <typename Callback>
void Update(Callback callback) {
bailout_.Update(callback);
shared_.Update(callback);
on_hold_.Update(callback);
embedder_.Update(callback);
}
ConcurrentMarkingWorklist* shared() { return &shared_; }
ConcurrentMarkingWorklist* bailout() { return &bailout_; }
ConcurrentMarkingWorklist* on_hold() { return &on_hold_; }
EmbedderTracingWorklist* embedder() { return &embedder_; }
void Print() {
PrintWorklist("shared", &shared_);
PrintWorklist("bailout", &bailout_);
PrintWorklist("on_hold", &on_hold_);
}
......@@ -570,11 +544,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// Worklist used for most objects.
ConcurrentMarkingWorklist shared_;
// Concurrent marking uses this worklist to bail out of concurrently
// marking certain object types. These objects are handled later in a STW
// pause after concurrent marking has finished.
ConcurrentMarkingWorklist bailout_;
// Concurrent marking uses this worklist to bail out of marking objects
// in new space's linear allocation area. Used to avoid black allocation
// for new space. This allow the compiler to remove write barriers
......
......@@ -38,11 +38,11 @@ TEST(ConcurrentMarking) {
collector->EnsureSweepingCompleted();
}
ConcurrentMarking::MarkingWorklist shared, bailout, on_hold;
ConcurrentMarking::MarkingWorklist shared, on_hold;
ConcurrentMarking::EmbedderTracingWorklist embedder_objects;
WeakObjects weak_objects;
ConcurrentMarking* concurrent_marking = new ConcurrentMarking(
heap, &shared, &bailout, &on_hold, &weak_objects, &embedder_objects);
heap, &shared, &on_hold, &weak_objects, &embedder_objects);
PublishSegment(&shared, ReadOnlyRoots(heap).undefined_value());
concurrent_marking->ScheduleTasks();
concurrent_marking->Stop(
......@@ -61,11 +61,11 @@ TEST(ConcurrentMarkingReschedule) {
collector->EnsureSweepingCompleted();
}
ConcurrentMarking::MarkingWorklist shared, bailout, on_hold;
ConcurrentMarking::MarkingWorklist shared, on_hold;
ConcurrentMarking::EmbedderTracingWorklist embedder_objects;
WeakObjects weak_objects;
ConcurrentMarking* concurrent_marking = new ConcurrentMarking(
heap, &shared, &bailout, &on_hold, &weak_objects, &embedder_objects);
heap, &shared, &on_hold, &weak_objects, &embedder_objects);
PublishSegment(&shared, ReadOnlyRoots(heap).undefined_value());
concurrent_marking->ScheduleTasks();
concurrent_marking->Stop(
......@@ -88,11 +88,11 @@ TEST(ConcurrentMarkingPreemptAndReschedule) {
collector->EnsureSweepingCompleted();
}
ConcurrentMarking::MarkingWorklist shared, bailout, on_hold;
ConcurrentMarking::MarkingWorklist shared, on_hold;
ConcurrentMarking::EmbedderTracingWorklist embedder_objects;
WeakObjects weak_objects;
ConcurrentMarking* concurrent_marking = new ConcurrentMarking(
heap, &shared, &bailout, &on_hold, &weak_objects, &embedder_objects);
heap, &shared, &on_hold, &weak_objects, &embedder_objects);
for (int i = 0; i < 5000; i++)
PublishSegment(&shared, ReadOnlyRoots(heap).undefined_value());
concurrent_marking->ScheduleTasks();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment