Commit 13e07389 authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

Revert "[heap] Remove bailout marking worklist."

This reverts commit 68a8bdd8.

Reason for revert: memory regression: crbug.com/921239

Original change's description:
> [heap] Remove bailout marking worklist.
> 
> The concurrent marker can now process all objects.
> This patch also eagerly visits the objects that undergo layout
> changes. This is because previously such objects were pushed
> onto the bailout worklist, which is gone now.
> To preserve the incremental step accounting, the patch introduces
> a new GC tracer scope called MC_INCREMENTAL_LAYOUT_CHANGE.
> 
> Bug: v8:8486
> Change-Id: Ic1c2f0d4e2ac0602fc945f3258af9624247bd65f
> Reviewed-on: https://chromium-review.googlesource.com/c/1386486
> Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
> Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#58712}

TBR=ulan@chromium.org,mlippautz@chromium.org

# Not skipping CQ checks because original CL landed > 1 day ago.

Bug: v8:8486, chromium:921239
Change-Id: I1f851b948f4ce403316e469999f0b16e8dfdb62d
Reviewed-on: https://chromium-review.googlesource.com/c/1408990
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#58787}
parent 6d3d8f7b
......@@ -352,15 +352,14 @@
#define INCREMENTAL_SCOPES(F) \
/* MC_INCREMENTAL is the top-level incremental marking scope. */ \
F(MC_INCREMENTAL) \
F(MC_INCREMENTAL_START) \
F(MC_INCREMENTAL_SWEEPING) \
F(MC_INCREMENTAL_EMBEDDER_PROLOGUE) \
F(MC_INCREMENTAL_EMBEDDER_TRACING) \
F(MC_INCREMENTAL_EXTERNAL_EPILOGUE) \
F(MC_INCREMENTAL_EXTERNAL_PROLOGUE) \
F(MC_INCREMENTAL_FINALIZE) \
F(MC_INCREMENTAL_FINALIZE_BODY) \
F(MC_INCREMENTAL_LAYOUT_CHANGE) \
F(MC_INCREMENTAL_START) \
F(MC_INCREMENTAL_SWEEPING)
F(MC_INCREMENTAL_EXTERNAL_EPILOGUE) \
F(MC_INCREMENTAL_EXTERNAL_PROLOGUE)
#define TOP_MC_SCOPES(F) \
F(MC_CLEAR) \
......
......@@ -78,11 +78,13 @@ class ConcurrentMarkingVisitor final
explicit ConcurrentMarkingVisitor(
ConcurrentMarking::MarkingWorklist* shared,
ConcurrentMarking::MarkingWorklist* bailout,
MemoryChunkDataMap* memory_chunk_data, WeakObjects* weak_objects,
ConcurrentMarking::EmbedderTracingWorklist* embedder_objects, int task_id,
bool embedder_tracing_enabled, unsigned mark_compact_epoch,
bool is_forced_gc)
: shared_(shared, task_id),
bailout_(bailout, task_id),
weak_objects_(weak_objects),
embedder_objects_(embedder_objects, task_id),
marking_state_(memory_chunk_data),
......@@ -656,6 +658,7 @@ class ConcurrentMarkingVisitor final
}
ConcurrentMarking::MarkingWorklist::View shared_;
ConcurrentMarking::MarkingWorklist::View bailout_;
WeakObjects* weak_objects_;
ConcurrentMarking::EmbedderTracingWorklist::View embedder_objects_;
ConcurrentMarkingState marking_state_;
......@@ -724,11 +727,13 @@ class ConcurrentMarking::Task : public CancelableTask {
};
ConcurrentMarking::ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
MarkingWorklist* bailout,
MarkingWorklist* on_hold,
WeakObjects* weak_objects,
EmbedderTracingWorklist* embedder_objects)
: heap_(heap),
shared_(shared),
bailout_(bailout),
on_hold_(on_hold),
weak_objects_(weak_objects),
embedder_objects_(embedder_objects) {
......@@ -744,8 +749,8 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
size_t kBytesUntilInterruptCheck = 64 * KB;
int kObjectsUntilInterrupCheck = 1000;
ConcurrentMarkingVisitor visitor(
shared_, &task_state->memory_chunk_data, weak_objects_, embedder_objects_,
task_id, heap_->local_embedder_heap_tracer()->InUse(),
shared_, bailout_, &task_state->memory_chunk_data, weak_objects_,
embedder_objects_, task_id, heap_->local_embedder_heap_tracer()->InUse(),
task_state->mark_compact_epoch, task_state->is_forced_gc);
double time_ms;
size_t marked_bytes = 0;
......@@ -812,6 +817,7 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
}
shared_->FlushToGlobal(task_id);
bailout_->FlushToGlobal(task_id);
on_hold_->FlushToGlobal(task_id);
embedder_objects_->FlushToGlobal(task_id);
......
......@@ -67,7 +67,8 @@ class ConcurrentMarking {
using EmbedderTracingWorklist = Worklist<HeapObject, 16 /* segment size */>;
ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
MarkingWorklist* on_hold, WeakObjects* weak_objects,
MarkingWorklist* bailout, MarkingWorklist* on_hold,
WeakObjects* weak_objects,
EmbedderTracingWorklist* embedder_objects);
// Schedules asynchronous tasks to perform concurrent marking. Objects in the
......@@ -113,6 +114,7 @@ class ConcurrentMarking {
void Run(int task_id, TaskState* task_state);
Heap* const heap_;
MarkingWorklist* const shared_;
MarkingWorklist* const bailout_;
MarkingWorklist* const on_hold_;
WeakObjects* const weak_objects_;
EmbedderTracingWorklist* const embedder_objects_;
......
......@@ -710,8 +710,6 @@ void GCTracer::PrintNVP() const {
"incremental.finalize.body=%.1f "
"incremental.finalize.external.prologue=%.1f "
"incremental.finalize.external.epilogue=%.1f "
"incremental.layout_change=%.1f "
"incremental.start=%.1f "
"incremental.sweeping=%.1f "
"incremental.embedder_prologue=%.1f "
"incremental.embedder_tracing=%.1f "
......@@ -806,8 +804,6 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::MC_INCREMENTAL_FINALIZE_BODY],
current_.scopes[Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE],
current_.scopes[Scope::MC_INCREMENTAL_EXTERNAL_EPILOGUE],
current_.scopes[Scope::MC_INCREMENTAL_LAYOUT_CHANGE],
current_.scopes[Scope::MC_INCREMENTAL_START],
current_.scopes[Scope::MC_INCREMENTAL_SWEEPING],
current_.scopes[Scope::MC_INCREMENTAL_EMBEDDER_PROLOGUE],
current_.scopes[Scope::MC_INCREMENTAL_EMBEDDER_TRACING],
......@@ -1133,8 +1129,6 @@ void GCTracer::RecordGCSumCounters(double atomic_pause_duration) {
base::MutexGuard guard(&background_counter_mutex_);
const double overall_duration =
current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_LAYOUT_CHANGE]
.duration +
current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_START]
.duration +
current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_SWEEPING]
......@@ -1155,8 +1149,6 @@ void GCTracer::RecordGCSumCounters(double atomic_pause_duration) {
.total_duration_ms;
const double marking_duration =
current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_LAYOUT_CHANGE]
.duration +
current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_START]
.duration +
incremental_marking_duration_ +
......
......@@ -71,7 +71,7 @@ class V8_EXPORT_PRIVATE GCTracer {
NUMBER_OF_SCOPES,
FIRST_INCREMENTAL_SCOPE = MC_INCREMENTAL,
LAST_INCREMENTAL_SCOPE = MC_INCREMENTAL_SWEEPING,
LAST_INCREMENTAL_SCOPE = MC_INCREMENTAL_EXTERNAL_PROLOGUE,
FIRST_SCOPE = MC_INCREMENTAL,
NUMBER_OF_INCREMENTAL_SCOPES =
LAST_INCREMENTAL_SCOPE - FIRST_INCREMENTAL_SCOPE + 1,
......
......@@ -2974,7 +2974,7 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation(
void Heap::NotifyObjectLayoutChange(HeapObject object, int size,
const DisallowHeapAllocation&) {
if (incremental_marking()->IsMarking()) {
incremental_marking()->MarkBlackAndVisitObjectDueToLayoutChange(object);
incremental_marking()->MarkBlackAndPush(object);
if (incremental_marking()->IsCompacting() &&
MayContainRecordedSlots(object)) {
MemoryChunk::FromHeapObject(object)->RegisterObjectWithInvalidatedSlots(
......@@ -4389,11 +4389,12 @@ void Heap::SetUp() {
MarkCompactCollector::MarkingWorklist* marking_worklist =
mark_compact_collector_->marking_worklist();
concurrent_marking_ = new ConcurrentMarking(
this, marking_worklist->shared(), marking_worklist->on_hold(),
mark_compact_collector_->weak_objects(), marking_worklist->embedder());
this, marking_worklist->shared(), marking_worklist->bailout(),
marking_worklist->on_hold(), mark_compact_collector_->weak_objects(),
marking_worklist->embedder());
} else {
concurrent_marking_ =
new ConcurrentMarking(this, nullptr, nullptr, nullptr, nullptr);
concurrent_marking_ = new ConcurrentMarking(this, nullptr, nullptr, nullptr,
nullptr, nullptr);
}
for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
......
......@@ -133,13 +133,18 @@ bool IncrementalMarking::WhiteToGreyAndPush(HeapObject obj) {
return false;
}
void IncrementalMarking::MarkBlackAndVisitObjectDueToLayoutChange(
HeapObject obj) {
TRACE_EVENT0("v8", "V8.GCIncrementalMarkingLayoutChange");
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_LAYOUT_CHANGE);
void IncrementalMarking::MarkBlackAndPush(HeapObject obj) {
// Marking left-trimmable fixed array black is unsafe because left-trimming
// re-pushes only grey arrays onto the marking worklist.
DCHECK(!obj->IsFixedArray() && !obj->IsFixedDoubleArray());
// Color the object black and push it into the bailout deque.
marking_state()->WhiteToGrey(obj);
if (marking_state()->GreyToBlack(obj)) {
RevisitObject(obj);
if (FLAG_concurrent_marking) {
marking_worklist()->PushBailout(obj);
} else {
marking_worklist()->Push(obj);
}
}
}
......@@ -150,26 +155,61 @@ void IncrementalMarking::NotifyLeftTrimming(HeapObject from, HeapObject to) {
MemoryChunk::FromAddress(to->address()));
DCHECK_NE(from, to);
MarkBit old_mark_bit = marking_state()->MarkBitFrom(from);
MarkBit new_mark_bit = marking_state()->MarkBitFrom(to);
if (black_allocation() && Marking::IsBlack<kAtomicity>(new_mark_bit)) {
// Nothing to do if the object is in black area.
return;
}
MarkBlackAndVisitObjectDueToLayoutChange(from);
DCHECK(marking_state()->IsBlack(from));
// Mark the new address as black.
if (from->address() + kTaggedSize == to->address()) {
// The old and the new markbits overlap. The |to| object has the
// grey color. To make it black, we need to set the second bit.
DCHECK(new_mark_bit.Get<kAtomicity>());
new_mark_bit.Next().Set<kAtomicity>();
} else {
bool success = Marking::WhiteToBlack<kAtomicity>(new_mark_bit);
DCHECK(success);
USE(success);
bool marked_black_due_to_left_trimming = false;
if (FLAG_concurrent_marking) {
// We need to mark the array black before overwriting its map and length
// so that the concurrent marker does not observe inconsistent state.
Marking::WhiteToGrey<kAtomicity>(old_mark_bit);
if (Marking::GreyToBlack<kAtomicity>(old_mark_bit)) {
// The concurrent marker will not mark the array. We need to push the
// new array start in marking deque to ensure that it will be marked.
marked_black_due_to_left_trimming = true;
}
DCHECK(Marking::IsBlack<kAtomicity>(old_mark_bit));
}
if (Marking::IsBlack<kAtomicity>(old_mark_bit) &&
!marked_black_due_to_left_trimming) {
// The array was black before left trimming or was marked black by the
// concurrent marker. Simply transfer the color.
if (from->address() + kTaggedSize == to->address()) {
// The old and the new markbits overlap. The |to| object has the
// grey color. To make it black, we need to set the second bit.
DCHECK(new_mark_bit.Get<kAtomicity>());
new_mark_bit.Next().Set<kAtomicity>();
} else {
bool success = Marking::WhiteToBlack<kAtomicity>(new_mark_bit);
DCHECK(success);
USE(success);
}
} else if (Marking::IsGrey<kAtomicity>(old_mark_bit) ||
marked_black_due_to_left_trimming) {
// The array was already grey or was marked black by this function.
// Mark the new array grey and push it to marking deque.
if (from->address() + kTaggedSize == to->address()) {
// The old and the new markbits overlap. The |to| object is either white
// or grey. Set the first bit to make sure that it is grey.
new_mark_bit.Set<kAtomicity>();
DCHECK(!new_mark_bit.Next().Get<kAtomicity>());
} else {
bool success = Marking::WhiteToGrey<kAtomicity>(new_mark_bit);
DCHECK(success);
USE(success);
}
// Subsequent left-trimming will re-push only grey arrays.
// Ensure that this array is grey.
DCHECK(Marking::IsGrey<kAtomicity>(new_mark_bit));
marking_worklist()->PushBailout(to);
RestartIfNotMarking();
}
DCHECK(marking_state()->IsBlack(to));
}
class IncrementalMarkingRootMarkingVisitor : public RootVisitor {
......@@ -757,11 +797,17 @@ void IncrementalMarking::VisitDescriptors(HeapObject host,
visitor.VisitDescriptors(descriptors, number_of_own_descriptors);
}
template <WorklistToProcess worklist_to_process>
intptr_t IncrementalMarking::ProcessMarkingWorklist(
intptr_t bytes_to_process, ForceCompletionAction completion) {
intptr_t bytes_processed = 0;
while (bytes_processed < bytes_to_process || completion == FORCE_COMPLETION) {
HeapObject obj = marking_worklist()->Pop();
HeapObject obj;
if (worklist_to_process == WorklistToProcess::kBailout) {
obj = marking_worklist()->PopBailout();
} else {
obj = marking_worklist()->Pop();
}
if (obj.is_null()) break;
// Left trimming may result in white, grey, or black filler objects on the
// marking deque. Ignore these objects.
......@@ -1023,6 +1069,11 @@ void IncrementalMarking::StepOnAllocation(size_t bytes_to_process,
bytes_to_process = Min(bytes_to_process, step_size);
size_t bytes_processed = 0;
if (FLAG_concurrent_marking) {
bytes_processed = Step(bytes_to_process, GC_VIA_STACK_GUARD,
StepOrigin::kV8, WorklistToProcess::kBailout);
bytes_to_process = (bytes_processed >= bytes_to_process)
? 0
: bytes_to_process - bytes_processed;
size_t current_bytes_marked_concurrently =
heap()->concurrent_marking()->TotalMarkedBytes();
// The concurrent_marking()->TotalMarkedBytes() is not monothonic for a
......@@ -1041,14 +1092,14 @@ void IncrementalMarking::StepOnAllocation(size_t bytes_to_process,
bytes_processed += bytes_to_process;
bytes_to_process = IncrementalMarking::kMinStepSizeInBytes;
}
bytes_processed +=
Step(bytes_to_process, GC_VIA_STACK_GUARD, StepOrigin::kV8);
bytes_processed += Step(bytes_to_process, GC_VIA_STACK_GUARD, StepOrigin::kV8,
WorklistToProcess::kAll);
bytes_allocated_ -= Min(bytes_allocated_, bytes_processed);
}
size_t IncrementalMarking::Step(size_t bytes_to_process,
CompletionAction action,
StepOrigin step_origin) {
CompletionAction action, StepOrigin step_origin,
WorklistToProcess worklist_to_process) {
double start = heap_->MonotonicallyIncreasingTimeInMs();
if (state_ == SWEEPING) {
......@@ -1075,7 +1126,13 @@ size_t IncrementalMarking::Step(size_t bytes_to_process,
}
#endif
ProcessMarkingWorklist(bytes_to_process);
if (worklist_to_process == WorklistToProcess::kBailout) {
bytes_processed =
ProcessMarkingWorklist<WorklistToProcess::kBailout>(bytes_to_process);
} else {
bytes_processed =
ProcessMarkingWorklist<WorklistToProcess::kAll>(bytes_to_process);
}
if (step_origin == StepOrigin::kTask) {
bytes_marked_ahead_of_schedule_ += bytes_processed;
......
......@@ -20,6 +20,7 @@ class Object;
class PagedSpace;
enum class StepOrigin { kV8, kTask };
enum class WorklistToProcess { kAll, kBailout };
class V8_EXPORT_PRIVATE IncrementalMarking {
public:
......@@ -174,7 +175,8 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
void FinalizeSweeping();
size_t Step(size_t bytes_to_process, CompletionAction action,
StepOrigin step_origin);
StepOrigin step_origin,
WorklistToProcess worklist_to_process = WorklistToProcess::kAll);
void StepOnAllocation(size_t bytes_to_process, double max_step_size);
bool ShouldDoEmbedderStep();
......@@ -214,7 +216,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
// This function is used to color the object black before it undergoes an
// unsafe layout change. This is a part of synchronization protocol with
// the concurrent marker.
void MarkBlackAndVisitObjectDueToLayoutChange(HeapObject obj);
void MarkBlackAndPush(HeapObject obj);
bool IsCompacting() { return IsMarking() && is_compacting_; }
......@@ -277,6 +279,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
void DeactivateIncrementalWriteBarrierForSpace(NewSpace* space);
void DeactivateIncrementalWriteBarrier();
template <WorklistToProcess worklist_to_process = WorklistToProcess::kAll>
V8_INLINE intptr_t ProcessMarkingWorklist(
intptr_t bytes_to_process,
ForceCompletionAction completion = DO_NOT_FORCE_COMPLETION);
......
......@@ -1691,6 +1691,7 @@ void MarkCompactCollector::ProcessMarkingWorklistInternal() {
MarkObject(object, map);
visitor.Visit(map, object);
}
DCHECK(marking_worklist()->IsBailoutEmpty());
}
bool MarkCompactCollector::VisitEphemeron(HeapObject key, HeapObject value) {
......
......@@ -468,7 +468,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
using NonAtomicMarkingState = MajorNonAtomicMarkingState;
// Wrapper for the shared worklist.
// Wrapper for the shared and bailout worklists.
class MarkingWorklist {
public:
using ConcurrentMarkingWorklist = Worklist<HeapObject, 64>;
......@@ -483,8 +483,17 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
DCHECK(success);
}
void PushBailout(HeapObject object) {
bool success = bailout_.Push(kMainThread, object);
USE(success);
DCHECK(success);
}
HeapObject Pop() {
HeapObject result;
#ifdef V8_CONCURRENT_MARKING
if (bailout_.Pop(kMainThread, &result)) return result;
#endif
if (shared_.Pop(kMainThread, &result)) return result;
#ifdef V8_CONCURRENT_MARKING
// The expectation is that this work list is empty almost all the time
......@@ -494,16 +503,29 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
return HeapObject();
}
HeapObject PopBailout() {
#ifdef V8_CONCURRENT_MARKING
HeapObject result;
if (bailout_.Pop(kMainThread, &result)) return result;
#endif
return HeapObject();
}
void Clear() {
bailout_.Clear();
shared_.Clear();
on_hold_.Clear();
embedder_.Clear();
}
bool IsBailoutEmpty() { return bailout_.IsLocalEmpty(kMainThread); }
bool IsEmpty() {
return shared_.IsLocalEmpty(kMainThread) &&
return bailout_.IsLocalEmpty(kMainThread) &&
shared_.IsLocalEmpty(kMainThread) &&
on_hold_.IsLocalEmpty(kMainThread) &&
shared_.IsGlobalPoolEmpty() && on_hold_.IsGlobalPoolEmpty();
bailout_.IsGlobalPoolEmpty() && shared_.IsGlobalPoolEmpty() &&
on_hold_.IsGlobalPoolEmpty();
}
bool IsEmbedderEmpty() {
......@@ -512,7 +534,8 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
}
int Size() {
return static_cast<int>(shared_.LocalSize(kMainThread) +
return static_cast<int>(bailout_.LocalSize(kMainThread) +
shared_.LocalSize(kMainThread) +
on_hold_.LocalSize(kMainThread));
}
......@@ -522,17 +545,20 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// The callback must accept HeapObject and return HeapObject.
template <typename Callback>
void Update(Callback callback) {
bailout_.Update(callback);
shared_.Update(callback);
on_hold_.Update(callback);
embedder_.Update(callback);
}
ConcurrentMarkingWorklist* shared() { return &shared_; }
ConcurrentMarkingWorklist* bailout() { return &bailout_; }
ConcurrentMarkingWorklist* on_hold() { return &on_hold_; }
EmbedderTracingWorklist* embedder() { return &embedder_; }
void Print() {
PrintWorklist("shared", &shared_);
PrintWorklist("bailout", &bailout_);
PrintWorklist("on_hold", &on_hold_);
}
......@@ -544,6 +570,11 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// Worklist used for most objects.
ConcurrentMarkingWorklist shared_;
// Concurrent marking uses this worklist to bail out of concurrently
// marking certain object types. These objects are handled later in a STW
// pause after concurrent marking has finished.
ConcurrentMarkingWorklist bailout_;
// Concurrent marking uses this worklist to bail out of marking objects
// in new space's linear allocation area. Used to avoid black allocation
// for new space. This allow the compiler to remove write barriers
......
......@@ -38,11 +38,11 @@ TEST(ConcurrentMarking) {
collector->EnsureSweepingCompleted();
}
ConcurrentMarking::MarkingWorklist shared, on_hold;
ConcurrentMarking::MarkingWorklist shared, bailout, on_hold;
ConcurrentMarking::EmbedderTracingWorklist embedder_objects;
WeakObjects weak_objects;
ConcurrentMarking* concurrent_marking = new ConcurrentMarking(
heap, &shared, &on_hold, &weak_objects, &embedder_objects);
heap, &shared, &bailout, &on_hold, &weak_objects, &embedder_objects);
PublishSegment(&shared, ReadOnlyRoots(heap).undefined_value());
concurrent_marking->ScheduleTasks();
concurrent_marking->Stop(
......@@ -61,11 +61,11 @@ TEST(ConcurrentMarkingReschedule) {
collector->EnsureSweepingCompleted();
}
ConcurrentMarking::MarkingWorklist shared, on_hold;
ConcurrentMarking::MarkingWorklist shared, bailout, on_hold;
ConcurrentMarking::EmbedderTracingWorklist embedder_objects;
WeakObjects weak_objects;
ConcurrentMarking* concurrent_marking = new ConcurrentMarking(
heap, &shared, &on_hold, &weak_objects, &embedder_objects);
heap, &shared, &bailout, &on_hold, &weak_objects, &embedder_objects);
PublishSegment(&shared, ReadOnlyRoots(heap).undefined_value());
concurrent_marking->ScheduleTasks();
concurrent_marking->Stop(
......@@ -88,11 +88,11 @@ TEST(ConcurrentMarkingPreemptAndReschedule) {
collector->EnsureSweepingCompleted();
}
ConcurrentMarking::MarkingWorklist shared, on_hold;
ConcurrentMarking::MarkingWorklist shared, bailout, on_hold;
ConcurrentMarking::EmbedderTracingWorklist embedder_objects;
WeakObjects weak_objects;
ConcurrentMarking* concurrent_marking = new ConcurrentMarking(
heap, &shared, &on_hold, &weak_objects, &embedder_objects);
heap, &shared, &bailout, &on_hold, &weak_objects, &embedder_objects);
for (int i = 0; i < 5000; i++)
PublishSegment(&shared, ReadOnlyRoots(heap).undefined_value());
concurrent_marking->ScheduleTasks();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment