Commit 8b3fbe1d authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

Reland "[heap] Remove bailout marking worklist."

This reverts commit 13e07389.

Original change's description:
> [heap] Remove bailout marking worklist.
>
> The concurrent marker can now process all objects.
> This patch also eagerly visits the objects that undergo layout
> changes. This is because previously such objects were pushed
> onto the bailout worklist, which is gone now.
> To preserve the incremental step accounting, the patch introduces
> a new GC tracer scope called MC_INCREMENTAL_LAYOUT_CHANGE.
>
> Bug: v8:8486
> Change-Id: Ic1c2f0d4e2ac0602fc945f3258af9624247bd65f
> Reviewed-on: https://chromium-review.googlesource.com/c/1386486
> Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
> Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#58712}

Change-Id: I85c99837819f6971c248198bd51ad40eebdb4fac
Reviewed-on: https://chromium-review.googlesource.com/c/1417595Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#58913}
parent e38faab1
...@@ -352,14 +352,15 @@ ...@@ -352,14 +352,15 @@
#define INCREMENTAL_SCOPES(F) \ #define INCREMENTAL_SCOPES(F) \
/* MC_INCREMENTAL is the top-level incremental marking scope. */ \ /* MC_INCREMENTAL is the top-level incremental marking scope. */ \
F(MC_INCREMENTAL) \ F(MC_INCREMENTAL) \
F(MC_INCREMENTAL_START) \
F(MC_INCREMENTAL_SWEEPING) \
F(MC_INCREMENTAL_EMBEDDER_PROLOGUE) \ F(MC_INCREMENTAL_EMBEDDER_PROLOGUE) \
F(MC_INCREMENTAL_EMBEDDER_TRACING) \ F(MC_INCREMENTAL_EMBEDDER_TRACING) \
F(MC_INCREMENTAL_EXTERNAL_EPILOGUE) \
F(MC_INCREMENTAL_EXTERNAL_PROLOGUE) \
F(MC_INCREMENTAL_FINALIZE) \ F(MC_INCREMENTAL_FINALIZE) \
F(MC_INCREMENTAL_FINALIZE_BODY) \ F(MC_INCREMENTAL_FINALIZE_BODY) \
F(MC_INCREMENTAL_EXTERNAL_EPILOGUE) \ F(MC_INCREMENTAL_LAYOUT_CHANGE) \
F(MC_INCREMENTAL_EXTERNAL_PROLOGUE) F(MC_INCREMENTAL_START) \
F(MC_INCREMENTAL_SWEEPING)
#define TOP_MC_SCOPES(F) \ #define TOP_MC_SCOPES(F) \
F(MC_CLEAR) \ F(MC_CLEAR) \
......
...@@ -78,13 +78,11 @@ class ConcurrentMarkingVisitor final ...@@ -78,13 +78,11 @@ class ConcurrentMarkingVisitor final
explicit ConcurrentMarkingVisitor( explicit ConcurrentMarkingVisitor(
ConcurrentMarking::MarkingWorklist* shared, ConcurrentMarking::MarkingWorklist* shared,
ConcurrentMarking::MarkingWorklist* bailout,
MemoryChunkDataMap* memory_chunk_data, WeakObjects* weak_objects, MemoryChunkDataMap* memory_chunk_data, WeakObjects* weak_objects,
ConcurrentMarking::EmbedderTracingWorklist* embedder_objects, int task_id, ConcurrentMarking::EmbedderTracingWorklist* embedder_objects, int task_id,
bool embedder_tracing_enabled, unsigned mark_compact_epoch, bool embedder_tracing_enabled, unsigned mark_compact_epoch,
bool is_forced_gc) bool is_forced_gc)
: shared_(shared, task_id), : shared_(shared, task_id),
bailout_(bailout, task_id),
weak_objects_(weak_objects), weak_objects_(weak_objects),
embedder_objects_(embedder_objects, task_id), embedder_objects_(embedder_objects, task_id),
marking_state_(memory_chunk_data), marking_state_(memory_chunk_data),
...@@ -658,7 +656,6 @@ class ConcurrentMarkingVisitor final ...@@ -658,7 +656,6 @@ class ConcurrentMarkingVisitor final
} }
ConcurrentMarking::MarkingWorklist::View shared_; ConcurrentMarking::MarkingWorklist::View shared_;
ConcurrentMarking::MarkingWorklist::View bailout_;
WeakObjects* weak_objects_; WeakObjects* weak_objects_;
ConcurrentMarking::EmbedderTracingWorklist::View embedder_objects_; ConcurrentMarking::EmbedderTracingWorklist::View embedder_objects_;
ConcurrentMarkingState marking_state_; ConcurrentMarkingState marking_state_;
...@@ -727,13 +724,11 @@ class ConcurrentMarking::Task : public CancelableTask { ...@@ -727,13 +724,11 @@ class ConcurrentMarking::Task : public CancelableTask {
}; };
ConcurrentMarking::ConcurrentMarking(Heap* heap, MarkingWorklist* shared, ConcurrentMarking::ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
MarkingWorklist* bailout,
MarkingWorklist* on_hold, MarkingWorklist* on_hold,
WeakObjects* weak_objects, WeakObjects* weak_objects,
EmbedderTracingWorklist* embedder_objects) EmbedderTracingWorklist* embedder_objects)
: heap_(heap), : heap_(heap),
shared_(shared), shared_(shared),
bailout_(bailout),
on_hold_(on_hold), on_hold_(on_hold),
weak_objects_(weak_objects), weak_objects_(weak_objects),
embedder_objects_(embedder_objects) { embedder_objects_(embedder_objects) {
...@@ -749,8 +744,8 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) { ...@@ -749,8 +744,8 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
size_t kBytesUntilInterruptCheck = 64 * KB; size_t kBytesUntilInterruptCheck = 64 * KB;
int kObjectsUntilInterrupCheck = 1000; int kObjectsUntilInterrupCheck = 1000;
ConcurrentMarkingVisitor visitor( ConcurrentMarkingVisitor visitor(
shared_, bailout_, &task_state->memory_chunk_data, weak_objects_, shared_, &task_state->memory_chunk_data, weak_objects_, embedder_objects_,
embedder_objects_, task_id, heap_->local_embedder_heap_tracer()->InUse(), task_id, heap_->local_embedder_heap_tracer()->InUse(),
task_state->mark_compact_epoch, task_state->is_forced_gc); task_state->mark_compact_epoch, task_state->is_forced_gc);
double time_ms; double time_ms;
size_t marked_bytes = 0; size_t marked_bytes = 0;
...@@ -817,7 +812,6 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) { ...@@ -817,7 +812,6 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
} }
shared_->FlushToGlobal(task_id); shared_->FlushToGlobal(task_id);
bailout_->FlushToGlobal(task_id);
on_hold_->FlushToGlobal(task_id); on_hold_->FlushToGlobal(task_id);
embedder_objects_->FlushToGlobal(task_id); embedder_objects_->FlushToGlobal(task_id);
......
...@@ -67,8 +67,7 @@ class ConcurrentMarking { ...@@ -67,8 +67,7 @@ class ConcurrentMarking {
using EmbedderTracingWorklist = Worklist<HeapObject, 16 /* segment size */>; using EmbedderTracingWorklist = Worklist<HeapObject, 16 /* segment size */>;
ConcurrentMarking(Heap* heap, MarkingWorklist* shared, ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
MarkingWorklist* bailout, MarkingWorklist* on_hold, MarkingWorklist* on_hold, WeakObjects* weak_objects,
WeakObjects* weak_objects,
EmbedderTracingWorklist* embedder_objects); EmbedderTracingWorklist* embedder_objects);
// Schedules asynchronous tasks to perform concurrent marking. Objects in the // Schedules asynchronous tasks to perform concurrent marking. Objects in the
...@@ -114,7 +113,6 @@ class ConcurrentMarking { ...@@ -114,7 +113,6 @@ class ConcurrentMarking {
void Run(int task_id, TaskState* task_state); void Run(int task_id, TaskState* task_state);
Heap* const heap_; Heap* const heap_;
MarkingWorklist* const shared_; MarkingWorklist* const shared_;
MarkingWorklist* const bailout_;
MarkingWorklist* const on_hold_; MarkingWorklist* const on_hold_;
WeakObjects* const weak_objects_; WeakObjects* const weak_objects_;
EmbedderTracingWorklist* const embedder_objects_; EmbedderTracingWorklist* const embedder_objects_;
......
...@@ -710,6 +710,8 @@ void GCTracer::PrintNVP() const { ...@@ -710,6 +710,8 @@ void GCTracer::PrintNVP() const {
"incremental.finalize.body=%.1f " "incremental.finalize.body=%.1f "
"incremental.finalize.external.prologue=%.1f " "incremental.finalize.external.prologue=%.1f "
"incremental.finalize.external.epilogue=%.1f " "incremental.finalize.external.epilogue=%.1f "
"incremental.layout_change=%.1f "
"incremental.start=%.1f "
"incremental.sweeping=%.1f " "incremental.sweeping=%.1f "
"incremental.embedder_prologue=%.1f " "incremental.embedder_prologue=%.1f "
"incremental.embedder_tracing=%.1f " "incremental.embedder_tracing=%.1f "
...@@ -804,6 +806,8 @@ void GCTracer::PrintNVP() const { ...@@ -804,6 +806,8 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::MC_INCREMENTAL_FINALIZE_BODY], current_.scopes[Scope::MC_INCREMENTAL_FINALIZE_BODY],
current_.scopes[Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE], current_.scopes[Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE],
current_.scopes[Scope::MC_INCREMENTAL_EXTERNAL_EPILOGUE], current_.scopes[Scope::MC_INCREMENTAL_EXTERNAL_EPILOGUE],
current_.scopes[Scope::MC_INCREMENTAL_LAYOUT_CHANGE],
current_.scopes[Scope::MC_INCREMENTAL_START],
current_.scopes[Scope::MC_INCREMENTAL_SWEEPING], current_.scopes[Scope::MC_INCREMENTAL_SWEEPING],
current_.scopes[Scope::MC_INCREMENTAL_EMBEDDER_PROLOGUE], current_.scopes[Scope::MC_INCREMENTAL_EMBEDDER_PROLOGUE],
current_.scopes[Scope::MC_INCREMENTAL_EMBEDDER_TRACING], current_.scopes[Scope::MC_INCREMENTAL_EMBEDDER_TRACING],
...@@ -1129,6 +1133,8 @@ void GCTracer::RecordGCSumCounters(double atomic_pause_duration) { ...@@ -1129,6 +1133,8 @@ void GCTracer::RecordGCSumCounters(double atomic_pause_duration) {
base::MutexGuard guard(&background_counter_mutex_); base::MutexGuard guard(&background_counter_mutex_);
const double overall_duration = const double overall_duration =
current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_LAYOUT_CHANGE]
.duration +
current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_START] current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_START]
.duration + .duration +
current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_SWEEPING] current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_SWEEPING]
...@@ -1149,6 +1155,8 @@ void GCTracer::RecordGCSumCounters(double atomic_pause_duration) { ...@@ -1149,6 +1155,8 @@ void GCTracer::RecordGCSumCounters(double atomic_pause_duration) {
.total_duration_ms; .total_duration_ms;
const double marking_duration = const double marking_duration =
current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_LAYOUT_CHANGE]
.duration +
current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_START] current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_START]
.duration + .duration +
incremental_marking_duration_ + incremental_marking_duration_ +
......
...@@ -71,7 +71,7 @@ class V8_EXPORT_PRIVATE GCTracer { ...@@ -71,7 +71,7 @@ class V8_EXPORT_PRIVATE GCTracer {
NUMBER_OF_SCOPES, NUMBER_OF_SCOPES,
FIRST_INCREMENTAL_SCOPE = MC_INCREMENTAL, FIRST_INCREMENTAL_SCOPE = MC_INCREMENTAL,
LAST_INCREMENTAL_SCOPE = MC_INCREMENTAL_EXTERNAL_PROLOGUE, LAST_INCREMENTAL_SCOPE = MC_INCREMENTAL_SWEEPING,
FIRST_SCOPE = MC_INCREMENTAL, FIRST_SCOPE = MC_INCREMENTAL,
NUMBER_OF_INCREMENTAL_SCOPES = NUMBER_OF_INCREMENTAL_SCOPES =
LAST_INCREMENTAL_SCOPE - FIRST_INCREMENTAL_SCOPE + 1, LAST_INCREMENTAL_SCOPE - FIRST_INCREMENTAL_SCOPE + 1,
......
...@@ -2977,7 +2977,7 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation( ...@@ -2977,7 +2977,7 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation(
void Heap::NotifyObjectLayoutChange(HeapObject object, int size, void Heap::NotifyObjectLayoutChange(HeapObject object, int size,
const DisallowHeapAllocation&) { const DisallowHeapAllocation&) {
if (incremental_marking()->IsMarking()) { if (incremental_marking()->IsMarking()) {
incremental_marking()->MarkBlackAndPush(object); incremental_marking()->MarkBlackAndVisitObjectDueToLayoutChange(object);
if (incremental_marking()->IsCompacting() && if (incremental_marking()->IsCompacting() &&
MayContainRecordedSlots(object)) { MayContainRecordedSlots(object)) {
MemoryChunk::FromHeapObject(object)->RegisterObjectWithInvalidatedSlots( MemoryChunk::FromHeapObject(object)->RegisterObjectWithInvalidatedSlots(
...@@ -4397,12 +4397,11 @@ void Heap::SetUp() { ...@@ -4397,12 +4397,11 @@ void Heap::SetUp() {
MarkCompactCollector::MarkingWorklist* marking_worklist = MarkCompactCollector::MarkingWorklist* marking_worklist =
mark_compact_collector_->marking_worklist(); mark_compact_collector_->marking_worklist();
concurrent_marking_ = new ConcurrentMarking( concurrent_marking_ = new ConcurrentMarking(
this, marking_worklist->shared(), marking_worklist->bailout(), this, marking_worklist->shared(), marking_worklist->on_hold(),
marking_worklist->on_hold(), mark_compact_collector_->weak_objects(), mark_compact_collector_->weak_objects(), marking_worklist->embedder());
marking_worklist->embedder());
} else { } else {
concurrent_marking_ = new ConcurrentMarking(this, nullptr, nullptr, nullptr, concurrent_marking_ =
nullptr, nullptr); new ConcurrentMarking(this, nullptr, nullptr, nullptr, nullptr);
} }
for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) { for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
......
...@@ -133,18 +133,13 @@ bool IncrementalMarking::WhiteToGreyAndPush(HeapObject obj) { ...@@ -133,18 +133,13 @@ bool IncrementalMarking::WhiteToGreyAndPush(HeapObject obj) {
return false; return false;
} }
void IncrementalMarking::MarkBlackAndPush(HeapObject obj) { void IncrementalMarking::MarkBlackAndVisitObjectDueToLayoutChange(
// Marking left-trimmable fixed array black is unsafe because left-trimming HeapObject obj) {
// re-pushes only grey arrays onto the marking worklist. TRACE_EVENT0("v8", "V8.GCIncrementalMarkingLayoutChange");
DCHECK(!obj->IsFixedArray() && !obj->IsFixedDoubleArray()); TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_LAYOUT_CHANGE);
// Color the object black and push it into the bailout deque.
marking_state()->WhiteToGrey(obj); marking_state()->WhiteToGrey(obj);
if (marking_state()->GreyToBlack(obj)) { if (marking_state()->GreyToBlack(obj)) {
if (FLAG_concurrent_marking) { RevisitObject(obj);
marking_worklist()->PushBailout(obj);
} else {
marking_worklist()->Push(obj);
}
} }
} }
...@@ -154,31 +149,15 @@ void IncrementalMarking::NotifyLeftTrimming(HeapObject from, HeapObject to) { ...@@ -154,31 +149,15 @@ void IncrementalMarking::NotifyLeftTrimming(HeapObject from, HeapObject to) {
DCHECK_EQ(MemoryChunk::FromHeapObject(from), MemoryChunk::FromHeapObject(to)); DCHECK_EQ(MemoryChunk::FromHeapObject(from), MemoryChunk::FromHeapObject(to));
DCHECK_NE(from, to); DCHECK_NE(from, to);
MarkBit old_mark_bit = marking_state()->MarkBitFrom(from);
MarkBit new_mark_bit = marking_state()->MarkBitFrom(to); MarkBit new_mark_bit = marking_state()->MarkBitFrom(to);
if (black_allocation() && Marking::IsBlack<kAtomicity>(new_mark_bit)) { if (black_allocation() && Marking::IsBlack<kAtomicity>(new_mark_bit)) {
// Nothing to do if the object is in black area. // Nothing to do if the object is in black area.
return; return;
} }
MarkBlackAndVisitObjectDueToLayoutChange(from);
bool marked_black_due_to_left_trimming = false; DCHECK(marking_state()->IsBlack(from));
if (FLAG_concurrent_marking) { // Mark the new address as black.
// We need to mark the array black before overwriting its map and length
// so that the concurrent marker does not observe inconsistent state.
Marking::WhiteToGrey<kAtomicity>(old_mark_bit);
if (Marking::GreyToBlack<kAtomicity>(old_mark_bit)) {
// The concurrent marker will not mark the array. We need to push the
// new array start in marking deque to ensure that it will be marked.
marked_black_due_to_left_trimming = true;
}
DCHECK(Marking::IsBlack<kAtomicity>(old_mark_bit));
}
if (Marking::IsBlack<kAtomicity>(old_mark_bit) &&
!marked_black_due_to_left_trimming) {
// The array was black before left trimming or was marked black by the
// concurrent marker. Simply transfer the color.
if (from->address() + kTaggedSize == to->address()) { if (from->address() + kTaggedSize == to->address()) {
// The old and the new markbits overlap. The |to| object has the // The old and the new markbits overlap. The |to| object has the
// grey color. To make it black, we need to set the second bit. // grey color. To make it black, we need to set the second bit.
...@@ -189,26 +168,7 @@ void IncrementalMarking::NotifyLeftTrimming(HeapObject from, HeapObject to) { ...@@ -189,26 +168,7 @@ void IncrementalMarking::NotifyLeftTrimming(HeapObject from, HeapObject to) {
DCHECK(success); DCHECK(success);
USE(success); USE(success);
} }
} else if (Marking::IsGrey<kAtomicity>(old_mark_bit) || DCHECK(marking_state()->IsBlack(to));
marked_black_due_to_left_trimming) {
// The array was already grey or was marked black by this function.
// Mark the new array grey and push it to marking deque.
if (from->address() + kTaggedSize == to->address()) {
// The old and the new markbits overlap. The |to| object is either white
// or grey. Set the first bit to make sure that it is grey.
new_mark_bit.Set<kAtomicity>();
DCHECK(!new_mark_bit.Next().Get<kAtomicity>());
} else {
bool success = Marking::WhiteToGrey<kAtomicity>(new_mark_bit);
DCHECK(success);
USE(success);
}
// Subsequent left-trimming will re-push only grey arrays.
// Ensure that this array is grey.
DCHECK(Marking::IsGrey<kAtomicity>(new_mark_bit));
marking_worklist()->PushBailout(to);
RestartIfNotMarking();
}
} }
class IncrementalMarkingRootMarkingVisitor : public RootVisitor { class IncrementalMarkingRootMarkingVisitor : public RootVisitor {
...@@ -794,17 +754,11 @@ void IncrementalMarking::VisitDescriptors(HeapObject host, ...@@ -794,17 +754,11 @@ void IncrementalMarking::VisitDescriptors(HeapObject host,
visitor.VisitDescriptors(descriptors, number_of_own_descriptors); visitor.VisitDescriptors(descriptors, number_of_own_descriptors);
} }
template <WorklistToProcess worklist_to_process>
intptr_t IncrementalMarking::ProcessMarkingWorklist( intptr_t IncrementalMarking::ProcessMarkingWorklist(
intptr_t bytes_to_process, ForceCompletionAction completion) { intptr_t bytes_to_process, ForceCompletionAction completion) {
intptr_t bytes_processed = 0; intptr_t bytes_processed = 0;
while (bytes_processed < bytes_to_process || completion == FORCE_COMPLETION) { while (bytes_processed < bytes_to_process || completion == FORCE_COMPLETION) {
HeapObject obj; HeapObject obj = marking_worklist()->Pop();
if (worklist_to_process == WorklistToProcess::kBailout) {
obj = marking_worklist()->PopBailout();
} else {
obj = marking_worklist()->Pop();
}
if (obj.is_null()) break; if (obj.is_null()) break;
// Left trimming may result in white, grey, or black filler objects on the // Left trimming may result in white, grey, or black filler objects on the
// marking deque. Ignore these objects. // marking deque. Ignore these objects.
...@@ -1066,11 +1020,6 @@ void IncrementalMarking::StepOnAllocation(size_t bytes_to_process, ...@@ -1066,11 +1020,6 @@ void IncrementalMarking::StepOnAllocation(size_t bytes_to_process,
bytes_to_process = Min(bytes_to_process, step_size); bytes_to_process = Min(bytes_to_process, step_size);
size_t bytes_processed = 0; size_t bytes_processed = 0;
if (FLAG_concurrent_marking) { if (FLAG_concurrent_marking) {
bytes_processed = Step(bytes_to_process, GC_VIA_STACK_GUARD,
StepOrigin::kV8, WorklistToProcess::kBailout);
bytes_to_process = (bytes_processed >= bytes_to_process)
? 0
: bytes_to_process - bytes_processed;
size_t current_bytes_marked_concurrently = size_t current_bytes_marked_concurrently =
heap()->concurrent_marking()->TotalMarkedBytes(); heap()->concurrent_marking()->TotalMarkedBytes();
// The concurrent_marking()->TotalMarkedBytes() is not monothonic for a // The concurrent_marking()->TotalMarkedBytes() is not monothonic for a
...@@ -1089,14 +1038,14 @@ void IncrementalMarking::StepOnAllocation(size_t bytes_to_process, ...@@ -1089,14 +1038,14 @@ void IncrementalMarking::StepOnAllocation(size_t bytes_to_process,
bytes_processed += bytes_to_process; bytes_processed += bytes_to_process;
bytes_to_process = IncrementalMarking::kMinStepSizeInBytes; bytes_to_process = IncrementalMarking::kMinStepSizeInBytes;
} }
bytes_processed += Step(bytes_to_process, GC_VIA_STACK_GUARD, StepOrigin::kV8, bytes_processed +=
WorklistToProcess::kAll); Step(bytes_to_process, GC_VIA_STACK_GUARD, StepOrigin::kV8);
bytes_allocated_ -= Min(bytes_allocated_, bytes_processed); bytes_allocated_ -= Min(bytes_allocated_, bytes_processed);
} }
size_t IncrementalMarking::Step(size_t bytes_to_process, size_t IncrementalMarking::Step(size_t bytes_to_process,
CompletionAction action, StepOrigin step_origin, CompletionAction action,
WorklistToProcess worklist_to_process) { StepOrigin step_origin) {
double start = heap_->MonotonicallyIncreasingTimeInMs(); double start = heap_->MonotonicallyIncreasingTimeInMs();
if (state_ == SWEEPING) { if (state_ == SWEEPING) {
...@@ -1123,13 +1072,7 @@ size_t IncrementalMarking::Step(size_t bytes_to_process, ...@@ -1123,13 +1072,7 @@ size_t IncrementalMarking::Step(size_t bytes_to_process,
} }
#endif #endif
if (worklist_to_process == WorklistToProcess::kBailout) { bytes_processed = ProcessMarkingWorklist(bytes_to_process);
bytes_processed =
ProcessMarkingWorklist<WorklistToProcess::kBailout>(bytes_to_process);
} else {
bytes_processed =
ProcessMarkingWorklist<WorklistToProcess::kAll>(bytes_to_process);
}
if (step_origin == StepOrigin::kTask) { if (step_origin == StepOrigin::kTask) {
bytes_marked_ahead_of_schedule_ += bytes_processed; bytes_marked_ahead_of_schedule_ += bytes_processed;
......
...@@ -20,7 +20,6 @@ class Object; ...@@ -20,7 +20,6 @@ class Object;
class PagedSpace; class PagedSpace;
enum class StepOrigin { kV8, kTask }; enum class StepOrigin { kV8, kTask };
enum class WorklistToProcess { kAll, kBailout };
class V8_EXPORT_PRIVATE IncrementalMarking { class V8_EXPORT_PRIVATE IncrementalMarking {
public: public:
...@@ -175,8 +174,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking { ...@@ -175,8 +174,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
void FinalizeSweeping(); void FinalizeSweeping();
size_t Step(size_t bytes_to_process, CompletionAction action, size_t Step(size_t bytes_to_process, CompletionAction action,
StepOrigin step_origin, StepOrigin step_origin);
WorklistToProcess worklist_to_process = WorklistToProcess::kAll);
void StepOnAllocation(size_t bytes_to_process, double max_step_size); void StepOnAllocation(size_t bytes_to_process, double max_step_size);
bool ShouldDoEmbedderStep(); bool ShouldDoEmbedderStep();
...@@ -216,7 +214,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking { ...@@ -216,7 +214,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
// This function is used to color the object black before it undergoes an // This function is used to color the object black before it undergoes an
// unsafe layout change. This is a part of synchronization protocol with // unsafe layout change. This is a part of synchronization protocol with
// the concurrent marker. // the concurrent marker.
void MarkBlackAndPush(HeapObject obj); void MarkBlackAndVisitObjectDueToLayoutChange(HeapObject obj);
bool IsCompacting() { return IsMarking() && is_compacting_; } bool IsCompacting() { return IsMarking() && is_compacting_; }
...@@ -279,7 +277,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking { ...@@ -279,7 +277,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
void DeactivateIncrementalWriteBarrierForSpace(NewSpace* space); void DeactivateIncrementalWriteBarrierForSpace(NewSpace* space);
void DeactivateIncrementalWriteBarrier(); void DeactivateIncrementalWriteBarrier();
template <WorklistToProcess worklist_to_process = WorklistToProcess::kAll>
V8_INLINE intptr_t ProcessMarkingWorklist( V8_INLINE intptr_t ProcessMarkingWorklist(
intptr_t bytes_to_process, intptr_t bytes_to_process,
ForceCompletionAction completion = DO_NOT_FORCE_COMPLETION); ForceCompletionAction completion = DO_NOT_FORCE_COMPLETION);
......
...@@ -1690,7 +1690,6 @@ void MarkCompactCollector::ProcessMarkingWorklistInternal() { ...@@ -1690,7 +1690,6 @@ void MarkCompactCollector::ProcessMarkingWorklistInternal() {
MarkObject(object, map); MarkObject(object, map);
visitor.Visit(map, object); visitor.Visit(map, object);
} }
DCHECK(marking_worklist()->IsBailoutEmpty());
} }
bool MarkCompactCollector::VisitEphemeron(HeapObject key, HeapObject value) { bool MarkCompactCollector::VisitEphemeron(HeapObject key, HeapObject value) {
......
...@@ -468,7 +468,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase { ...@@ -468,7 +468,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
using NonAtomicMarkingState = MajorNonAtomicMarkingState; using NonAtomicMarkingState = MajorNonAtomicMarkingState;
// Wrapper for the shared and bailout worklists. // Wrapper for the shared worklist.
class MarkingWorklist { class MarkingWorklist {
public: public:
using ConcurrentMarkingWorklist = Worklist<HeapObject, 64>; using ConcurrentMarkingWorklist = Worklist<HeapObject, 64>;
...@@ -483,17 +483,8 @@ class MarkCompactCollector final : public MarkCompactCollectorBase { ...@@ -483,17 +483,8 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
DCHECK(success); DCHECK(success);
} }
void PushBailout(HeapObject object) {
bool success = bailout_.Push(kMainThread, object);
USE(success);
DCHECK(success);
}
HeapObject Pop() { HeapObject Pop() {
HeapObject result; HeapObject result;
#ifdef V8_CONCURRENT_MARKING
if (bailout_.Pop(kMainThread, &result)) return result;
#endif
if (shared_.Pop(kMainThread, &result)) return result; if (shared_.Pop(kMainThread, &result)) return result;
#ifdef V8_CONCURRENT_MARKING #ifdef V8_CONCURRENT_MARKING
// The expectation is that this work list is empty almost all the time // The expectation is that this work list is empty almost all the time
...@@ -503,29 +494,16 @@ class MarkCompactCollector final : public MarkCompactCollectorBase { ...@@ -503,29 +494,16 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
return HeapObject(); return HeapObject();
} }
HeapObject PopBailout() {
#ifdef V8_CONCURRENT_MARKING
HeapObject result;
if (bailout_.Pop(kMainThread, &result)) return result;
#endif
return HeapObject();
}
void Clear() { void Clear() {
bailout_.Clear();
shared_.Clear(); shared_.Clear();
on_hold_.Clear(); on_hold_.Clear();
embedder_.Clear(); embedder_.Clear();
} }
bool IsBailoutEmpty() { return bailout_.IsLocalEmpty(kMainThread); }
bool IsEmpty() { bool IsEmpty() {
return bailout_.IsLocalEmpty(kMainThread) && return shared_.IsLocalEmpty(kMainThread) &&
shared_.IsLocalEmpty(kMainThread) &&
on_hold_.IsLocalEmpty(kMainThread) && on_hold_.IsLocalEmpty(kMainThread) &&
bailout_.IsGlobalPoolEmpty() && shared_.IsGlobalPoolEmpty() && shared_.IsGlobalPoolEmpty() && on_hold_.IsGlobalPoolEmpty();
on_hold_.IsGlobalPoolEmpty();
} }
bool IsEmbedderEmpty() { bool IsEmbedderEmpty() {
...@@ -534,8 +512,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase { ...@@ -534,8 +512,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
} }
int Size() { int Size() {
return static_cast<int>(bailout_.LocalSize(kMainThread) + return static_cast<int>(shared_.LocalSize(kMainThread) +
shared_.LocalSize(kMainThread) +
on_hold_.LocalSize(kMainThread)); on_hold_.LocalSize(kMainThread));
} }
...@@ -545,20 +522,17 @@ class MarkCompactCollector final : public MarkCompactCollectorBase { ...@@ -545,20 +522,17 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// The callback must accept HeapObject and return HeapObject. // The callback must accept HeapObject and return HeapObject.
template <typename Callback> template <typename Callback>
void Update(Callback callback) { void Update(Callback callback) {
bailout_.Update(callback);
shared_.Update(callback); shared_.Update(callback);
on_hold_.Update(callback); on_hold_.Update(callback);
embedder_.Update(callback); embedder_.Update(callback);
} }
ConcurrentMarkingWorklist* shared() { return &shared_; } ConcurrentMarkingWorklist* shared() { return &shared_; }
ConcurrentMarkingWorklist* bailout() { return &bailout_; }
ConcurrentMarkingWorklist* on_hold() { return &on_hold_; } ConcurrentMarkingWorklist* on_hold() { return &on_hold_; }
EmbedderTracingWorklist* embedder() { return &embedder_; } EmbedderTracingWorklist* embedder() { return &embedder_; }
void Print() { void Print() {
PrintWorklist("shared", &shared_); PrintWorklist("shared", &shared_);
PrintWorklist("bailout", &bailout_);
PrintWorklist("on_hold", &on_hold_); PrintWorklist("on_hold", &on_hold_);
} }
...@@ -570,11 +544,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase { ...@@ -570,11 +544,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// Worklist used for most objects. // Worklist used for most objects.
ConcurrentMarkingWorklist shared_; ConcurrentMarkingWorklist shared_;
// Concurrent marking uses this worklist to bail out of concurrently
// marking certain object types. These objects are handled later in a STW
// pause after concurrent marking has finished.
ConcurrentMarkingWorklist bailout_;
// Concurrent marking uses this worklist to bail out of marking objects // Concurrent marking uses this worklist to bail out of marking objects
// in new space's linear allocation area. Used to avoid black allocation // in new space's linear allocation area. Used to avoid black allocation
// for new space. This allow the compiler to remove write barriers // for new space. This allow the compiler to remove write barriers
......
...@@ -38,11 +38,11 @@ TEST(ConcurrentMarking) { ...@@ -38,11 +38,11 @@ TEST(ConcurrentMarking) {
collector->EnsureSweepingCompleted(); collector->EnsureSweepingCompleted();
} }
ConcurrentMarking::MarkingWorklist shared, bailout, on_hold; ConcurrentMarking::MarkingWorklist shared, on_hold;
ConcurrentMarking::EmbedderTracingWorklist embedder_objects; ConcurrentMarking::EmbedderTracingWorklist embedder_objects;
WeakObjects weak_objects; WeakObjects weak_objects;
ConcurrentMarking* concurrent_marking = new ConcurrentMarking( ConcurrentMarking* concurrent_marking = new ConcurrentMarking(
heap, &shared, &bailout, &on_hold, &weak_objects, &embedder_objects); heap, &shared, &on_hold, &weak_objects, &embedder_objects);
PublishSegment(&shared, ReadOnlyRoots(heap).undefined_value()); PublishSegment(&shared, ReadOnlyRoots(heap).undefined_value());
concurrent_marking->ScheduleTasks(); concurrent_marking->ScheduleTasks();
concurrent_marking->Stop( concurrent_marking->Stop(
...@@ -61,11 +61,11 @@ TEST(ConcurrentMarkingReschedule) { ...@@ -61,11 +61,11 @@ TEST(ConcurrentMarkingReschedule) {
collector->EnsureSweepingCompleted(); collector->EnsureSweepingCompleted();
} }
ConcurrentMarking::MarkingWorklist shared, bailout, on_hold; ConcurrentMarking::MarkingWorklist shared, on_hold;
ConcurrentMarking::EmbedderTracingWorklist embedder_objects; ConcurrentMarking::EmbedderTracingWorklist embedder_objects;
WeakObjects weak_objects; WeakObjects weak_objects;
ConcurrentMarking* concurrent_marking = new ConcurrentMarking( ConcurrentMarking* concurrent_marking = new ConcurrentMarking(
heap, &shared, &bailout, &on_hold, &weak_objects, &embedder_objects); heap, &shared, &on_hold, &weak_objects, &embedder_objects);
PublishSegment(&shared, ReadOnlyRoots(heap).undefined_value()); PublishSegment(&shared, ReadOnlyRoots(heap).undefined_value());
concurrent_marking->ScheduleTasks(); concurrent_marking->ScheduleTasks();
concurrent_marking->Stop( concurrent_marking->Stop(
...@@ -88,11 +88,11 @@ TEST(ConcurrentMarkingPreemptAndReschedule) { ...@@ -88,11 +88,11 @@ TEST(ConcurrentMarkingPreemptAndReschedule) {
collector->EnsureSweepingCompleted(); collector->EnsureSweepingCompleted();
} }
ConcurrentMarking::MarkingWorklist shared, bailout, on_hold; ConcurrentMarking::MarkingWorklist shared, on_hold;
ConcurrentMarking::EmbedderTracingWorklist embedder_objects; ConcurrentMarking::EmbedderTracingWorklist embedder_objects;
WeakObjects weak_objects; WeakObjects weak_objects;
ConcurrentMarking* concurrent_marking = new ConcurrentMarking( ConcurrentMarking* concurrent_marking = new ConcurrentMarking(
heap, &shared, &bailout, &on_hold, &weak_objects, &embedder_objects); heap, &shared, &on_hold, &weak_objects, &embedder_objects);
for (int i = 0; i < 5000; i++) for (int i = 0; i < 5000; i++)
PublishSegment(&shared, ReadOnlyRoots(heap).undefined_value()); PublishSegment(&shared, ReadOnlyRoots(heap).undefined_value());
concurrent_marking->ScheduleTasks(); concurrent_marking->ScheduleTasks();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment