Commit adf0fc8c authored by Michael Achenbach's avatar Michael Achenbach Committed by Commit Bot

Revert "[heap] Tune incremental marking step size."

This reverts commit 90405027.

Reason for revert: Flaky msan:
https://build.chromium.org/p/client.v8/builders/V8%20Linux%20-%20arm64%20-%20sim%20-%20MSAN/builds/18432

Original change's description:
> [heap] Tune incremental marking step size.
> 
> The main thread now can reduce marking step size if concurrent marking
> tasks are making progress and the bailout worklist is empty.
> 
> Bug: chromium:694255
> Change-Id: I2f58530f184c03667ab3a170a1f6309929645c7c
> Reviewed-on: https://chromium-review.googlesource.com/735859
> Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
> Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#49671}

TBR=ulan@chromium.org,mlippautz@chromium.org

Change-Id: Ic10ee9bae51b2b4b78d87c83c67b1307d0c36012
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: chromium:694255
Reviewed-on: https://chromium-review.googlesource.com/794190Reviewed-by: 's avatarMichael Achenbach <machenbach@chromium.org>
Commit-Queue: Michael Achenbach <machenbach@chromium.org>
Cr-Commit-Position: refs/heads/master@{#49680}
parent aea6250b
...@@ -57,7 +57,6 @@ IncrementalMarking::IncrementalMarking( ...@@ -57,7 +57,6 @@ IncrementalMarking::IncrementalMarking(
marking_worklist_(marking_worklist), marking_worklist_(marking_worklist),
initial_old_generation_size_(0), initial_old_generation_size_(0),
bytes_marked_ahead_of_schedule_(0), bytes_marked_ahead_of_schedule_(0),
bytes_marked_concurrently_(0),
unscanned_bytes_of_large_object_(0), unscanned_bytes_of_large_object_(0),
is_compacting_(false), is_compacting_(false),
should_hurry_(false), should_hurry_(false),
...@@ -349,7 +348,6 @@ void IncrementalMarking::Start(GarbageCollectionReason gc_reason) { ...@@ -349,7 +348,6 @@ void IncrementalMarking::Start(GarbageCollectionReason gc_reason) {
old_generation_allocation_counter_ = heap_->OldGenerationAllocationCounter(); old_generation_allocation_counter_ = heap_->OldGenerationAllocationCounter();
bytes_allocated_ = 0; bytes_allocated_ = 0;
bytes_marked_ahead_of_schedule_ = 0; bytes_marked_ahead_of_schedule_ = 0;
bytes_marked_concurrently_ = 0;
should_hurry_ = false; should_hurry_ = false;
was_activated_ = true; was_activated_ = true;
...@@ -690,17 +688,11 @@ void IncrementalMarking::RevisitObject(HeapObject* obj) { ...@@ -690,17 +688,11 @@ void IncrementalMarking::RevisitObject(HeapObject* obj) {
visitor.Visit(map, obj); visitor.Visit(map, obj);
} }
template <WorklistToProcess worklist_to_process>
intptr_t IncrementalMarking::ProcessMarkingWorklist( intptr_t IncrementalMarking::ProcessMarkingWorklist(
intptr_t bytes_to_process, ForceCompletionAction completion) { intptr_t bytes_to_process, ForceCompletionAction completion) {
intptr_t bytes_processed = 0; intptr_t bytes_processed = 0;
while (bytes_processed < bytes_to_process || completion == FORCE_COMPLETION) { while (bytes_processed < bytes_to_process || completion == FORCE_COMPLETION) {
HeapObject* obj; HeapObject* obj = marking_worklist()->Pop();
if (worklist_to_process == WorklistToProcess::kBailout) {
obj = marking_worklist()->PopBailout();
} else {
obj = marking_worklist()->Pop();
}
if (obj == nullptr) break; if (obj == nullptr) break;
// Left trimming may result in white, grey, or black filler objects on the // Left trimming may result in white, grey, or black filler objects on the
// marking deque. Ignore these objects. // marking deque. Ignore these objects.
...@@ -928,50 +920,42 @@ void IncrementalMarking::AdvanceIncrementalMarkingOnAllocation() { ...@@ -928,50 +920,42 @@ void IncrementalMarking::AdvanceIncrementalMarkingOnAllocation() {
StepSizeToKeepUpWithAllocations() + StepSizeToMakeProgress(); StepSizeToKeepUpWithAllocations() + StepSizeToMakeProgress();
if (bytes_to_process >= IncrementalMarking::kMinStepSizeInBytes) { if (bytes_to_process >= IncrementalMarking::kMinStepSizeInBytes) {
HistogramTimerScope incremental_marking_scope(
heap_->isolate()->counters()->gc_incremental_marking());
TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
// The first step after Scavenge will see many allocated bytes. // The first step after Scavenge will see many allocated bytes.
// Cap the step size to distribute the marking work more uniformly. // Cap the step size to distribute the marking work more uniformly.
size_t max_step_size = GCIdleTimeHandler::EstimateMarkingStepSize( size_t max_step_size = GCIdleTimeHandler::EstimateMarkingStepSize(
kMaxStepSizeInMs, kMaxStepSizeInMs,
heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond()); heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
bytes_to_process = Min(bytes_to_process, max_step_size); bytes_to_process = Min(bytes_to_process, max_step_size);
size_t bytes_processed = 0;
if (FLAG_concurrent_marking) { if (FLAG_concurrent_marking && marking_worklist()->IsBailoutEmpty()) {
bytes_processed = Step(bytes_to_process, GC_VIA_STACK_GUARD, // The number of background tasks + the main thread.
StepOrigin::kV8, WorklistToProcess::kBailout); size_t tasks = heap()->concurrent_marking()->TaskCount() + 1;
bytes_to_process = (bytes_processed >= bytes_to_process) bytes_to_process = Max(IncrementalMarking::kMinStepSizeInBytes,
? 0 bytes_to_process / tasks);
: bytes_to_process - bytes_processed;
size_t current_bytes_marked_concurrently =
heap()->concurrent_marking()->TotalMarkedBytes();
// The concurrent_marking()->TotalMarkedBytes() is not monothonic for a
// short period of time when a concurrent marking task is finishing.
if (current_bytes_marked_concurrently > bytes_marked_concurrently_) {
bytes_marked_ahead_of_schedule_ +=
current_bytes_marked_concurrently - bytes_marked_concurrently_;
bytes_marked_concurrently_ = current_bytes_marked_concurrently;
}
} }
size_t bytes_processed = 0;
if (bytes_marked_ahead_of_schedule_ >= bytes_to_process) { if (bytes_marked_ahead_of_schedule_ >= bytes_to_process) {
// Steps performed in tasks and concurrently have put us ahead of // Steps performed in tasks have put us ahead of schedule.
// schedule. We skip processing of marking dequeue here and thus shift // We skip processing of marking dequeue here and thus
// marking time from inside V8 to standalone tasks. // shift marking time from inside V8 to standalone tasks.
bytes_marked_ahead_of_schedule_ -= bytes_to_process; bytes_marked_ahead_of_schedule_ -= bytes_to_process;
bytes_processed += bytes_to_process; bytes_processed = bytes_to_process;
bytes_to_process = IncrementalMarking::kMinStepSizeInBytes; } else {
HistogramTimerScope incremental_marking_scope(
heap_->isolate()->counters()->gc_incremental_marking());
TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
bytes_processed =
Step(bytes_to_process, GC_VIA_STACK_GUARD, StepOrigin::kV8);
} }
bytes_processed += Step(bytes_to_process, GC_VIA_STACK_GUARD,
StepOrigin::kV8, WorklistToProcess::kAll);
bytes_allocated_ -= Min(bytes_allocated_, bytes_processed); bytes_allocated_ -= Min(bytes_allocated_, bytes_processed);
} }
} }
size_t IncrementalMarking::Step(size_t bytes_to_process, size_t IncrementalMarking::Step(size_t bytes_to_process,
CompletionAction action, StepOrigin step_origin, CompletionAction action,
WorklistToProcess worklist_to_process) { StepOrigin step_origin) {
double start = heap_->MonotonicallyIncreasingTimeInMs(); double start = heap_->MonotonicallyIncreasingTimeInMs();
if (state_ == SWEEPING) { if (state_ == SWEEPING) {
...@@ -993,15 +977,7 @@ size_t IncrementalMarking::Step(size_t bytes_to_process, ...@@ -993,15 +977,7 @@ size_t IncrementalMarking::Step(size_t bytes_to_process,
FLAG_trace_gc_verbose) { FLAG_trace_gc_verbose) {
marking_worklist()->Print(); marking_worklist()->Print();
} }
bytes_processed = ProcessMarkingWorklist(bytes_to_process);
if (worklist_to_process == WorklistToProcess::kBailout) {
bytes_processed =
ProcessMarkingWorklist<WorklistToProcess::kBailout>(bytes_to_process);
} else {
bytes_processed =
ProcessMarkingWorklist<WorklistToProcess::kAll>(bytes_to_process);
}
if (step_origin == StepOrigin::kTask) { if (step_origin == StepOrigin::kTask) {
bytes_marked_ahead_of_schedule_ += bytes_processed; bytes_marked_ahead_of_schedule_ += bytes_processed;
} }
......
...@@ -20,7 +20,6 @@ class Object; ...@@ -20,7 +20,6 @@ class Object;
class PagedSpace; class PagedSpace;
enum class StepOrigin { kV8, kTask }; enum class StepOrigin { kV8, kTask };
enum class WorklistToProcess { kAll, kBailout };
class V8_EXPORT_PRIVATE IncrementalMarking { class V8_EXPORT_PRIVATE IncrementalMarking {
public: public:
...@@ -188,8 +187,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking { ...@@ -188,8 +187,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
void FinalizeSweeping(); void FinalizeSweeping();
size_t Step(size_t bytes_to_process, CompletionAction action, size_t Step(size_t bytes_to_process, CompletionAction action,
StepOrigin step_origin, StepOrigin step_origin);
WorklistToProcess worklist_to_process = WorklistToProcess::kAll);
inline void RestartIfNotMarking(); inline void RestartIfNotMarking();
...@@ -297,7 +295,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking { ...@@ -297,7 +295,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
void DeactivateIncrementalWriteBarrierForSpace(NewSpace* space); void DeactivateIncrementalWriteBarrierForSpace(NewSpace* space);
void DeactivateIncrementalWriteBarrier(); void DeactivateIncrementalWriteBarrier();
template <WorklistToProcess worklist_to_process = WorklistToProcess::kAll>
V8_INLINE intptr_t ProcessMarkingWorklist( V8_INLINE intptr_t ProcessMarkingWorklist(
intptr_t bytes_to_process, intptr_t bytes_to_process,
ForceCompletionAction completion = DO_NOT_FORCE_COMPLETION); ForceCompletionAction completion = DO_NOT_FORCE_COMPLETION);
...@@ -329,10 +326,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking { ...@@ -329,10 +326,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
size_t old_generation_allocation_counter_; size_t old_generation_allocation_counter_;
size_t bytes_allocated_; size_t bytes_allocated_;
size_t bytes_marked_ahead_of_schedule_; size_t bytes_marked_ahead_of_schedule_;
// A sample of concurrent_marking()->TotalMarkedBytes() at the last
// incremental marking step. It is used for updating
// bytes_marked_ahead_of_schedule_ with contribution of concurrent marking.
size_t bytes_marked_concurrently_;
size_t unscanned_bytes_of_large_object_; size_t unscanned_bytes_of_large_object_;
// Must use SetState() above to update state_ // Must use SetState() above to update state_
......
...@@ -541,14 +541,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase { ...@@ -541,14 +541,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
return nullptr; return nullptr;
} }
HeapObject* PopBailout() {
HeapObject* result;
#ifdef V8_CONCURRENT_MARKING
if (bailout_.Pop(kMainThread, &result)) return result;
#endif
return nullptr;
}
void Clear() { void Clear() {
bailout_.Clear(); bailout_.Clear();
shared_.Clear(); shared_.Clear();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment