Commit eca8a5eb authored by ulan's avatar ulan Committed by Commit bot

[heap] Refactor incremental marking step.

This patch
- extracts the logic of keeping track of allocated bytes
  from the actual incremental marking step.
- replaces OldSpaceStep with a check for incremental marking start.
- removes the force_marking parameter of AdvanceIncrementalMarking.

BUG=chromium:616434
LOG=NO

Review-Url: https://codereview.chromium.org/2304123003
Cr-Commit-Position: refs/heads/master@{#39213}
parent 1ea41a46
......@@ -936,10 +936,8 @@ void Heap::ReportExternalMemoryPressure(const char* gc_reason) {
const double deadline = MonotonicallyIncreasingTimeInMs() +
pressure * kMaxStepSizeOnExternalLimit;
incremental_marking()->AdvanceIncrementalMarking(
deadline,
IncrementalMarking::StepActions(IncrementalMarking::GC_VIA_STACK_GUARD,
IncrementalMarking::FORCE_MARKING,
IncrementalMarking::FORCE_COMPLETION));
deadline, IncrementalMarking::GC_VIA_STACK_GUARD,
IncrementalMarking::FORCE_COMPLETION);
}
}
......@@ -1056,9 +1054,9 @@ bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason,
// Start incremental marking for the next cycle. The heap snapshot
// generator needs incremental marking to stay off after it aborted.
if (!ShouldAbortIncrementalMarking() && incremental_marking()->IsStopped() &&
incremental_marking()->ShouldActivateEvenWithoutIdleNotification()) {
StartIncrementalMarking(kNoGCFlags, kNoGCCallbackFlags, "GC epilogue");
if (!ShouldAbortIncrementalMarking()) {
StartIncrementalMarkingIfNeeded(kNoGCFlags, kNoGCCallbackFlags,
"GC epilogue");
}
return next_gc_likely_to_collect_more;
......@@ -1094,6 +1092,13 @@ void Heap::StartIncrementalMarking(int gc_flags,
incremental_marking()->Start(reason);
}
void Heap::StartIncrementalMarkingIfNeeded(
int gc_flags, const GCCallbackFlags gc_callback_flags, const char* reason) {
if (incremental_marking()->IsStopped() &&
incremental_marking()->ShouldActivateEvenWithoutIdleNotification()) {
StartIncrementalMarking(gc_flags, gc_callback_flags, reason);
}
}
void Heap::StartIdleIncrementalMarking() {
gc_idle_time_handler_->ResetNoProgressCounter();
......
......@@ -1154,6 +1154,11 @@ class Heap {
GCCallbackFlags::kNoGCCallbackFlags,
const char* reason = nullptr);
void StartIncrementalMarkingIfNeeded(int gc_flags = kNoGCFlags,
const GCCallbackFlags gc_callback_flags =
GCCallbackFlags::kNoGCCallbackFlags,
const char* reason = nullptr);
void FinalizeIncrementalMarkingIfComplete(const char* comment);
bool TryFinalizeIdleIncrementalMarking(double idle_time_in_ms);
......
......@@ -81,7 +81,8 @@ IncrementalMarkingJob::IdleTask::Progress IncrementalMarkingJob::IdleTask::Step(
}
const double remaining_idle_time_in_ms =
incremental_marking->AdvanceIncrementalMarking(
deadline_in_ms, IncrementalMarking::IdleStepActions());
deadline_in_ms, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
IncrementalMarking::DO_NOT_FORCE_COMPLETION);
if (remaining_idle_time_in_ms > 0.0) {
heap->TryFinalizeIdleIncrementalMarking(remaining_idle_time_in_ms);
}
......@@ -119,10 +120,8 @@ void IncrementalMarkingJob::DelayedTask::Step(Heap* heap) {
double deadline =
heap->MonotonicallyIncreasingTimeInMs() + kIncrementalMarkingDelayMs;
heap->incremental_marking()->AdvanceIncrementalMarking(
deadline, i::IncrementalMarking::StepActions(
i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
i::IncrementalMarking::FORCE_MARKING,
i::IncrementalMarking::FORCE_COMPLETION));
deadline, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
IncrementalMarking::FORCE_COMPLETION);
heap->FinalizeIncrementalMarkingIfComplete(
"Incremental marking task: finalize incremental marking");
}
......
......@@ -19,12 +19,6 @@
namespace v8 {
namespace internal {
IncrementalMarking::StepActions IncrementalMarking::IdleStepActions() {
return StepActions(IncrementalMarking::NO_GC_VIA_STACK_GUARD,
IncrementalMarking::FORCE_MARKING,
IncrementalMarking::DO_NOT_FORCE_COMPLETION);
}
IncrementalMarking::IncrementalMarking(Heap* heap)
: heap_(heap),
observer_(*this, kAllocatedThreshold),
......@@ -1057,38 +1051,25 @@ void IncrementalMarking::Epilogue() {
}
double IncrementalMarking::AdvanceIncrementalMarking(
double deadline_in_ms, IncrementalMarking::StepActions step_actions) {
double deadline_in_ms, CompletionAction completion_action,
ForceCompletionAction force_completion) {
DCHECK(!IsStopped());
double remaining_time_in_ms = 0.0;
intptr_t step_size_in_bytes = GCIdleTimeHandler::EstimateMarkingStepSize(
GCIdleTimeHandler::kIncrementalMarkingStepTimeInMs,
kStepSizeInMs,
heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
double remaining_time_in_ms = 0.0;
do {
Step(step_size_in_bytes, step_actions.completion_action,
step_actions.force_marking, step_actions.force_completion);
Step(step_size_in_bytes, completion_action, force_completion);
remaining_time_in_ms =
deadline_in_ms - heap()->MonotonicallyIncreasingTimeInMs();
} while (!heap_->mark_compact_collector()->marking_deque()->IsEmpty() &&
remaining_time_in_ms >=
2.0 * GCIdleTimeHandler::kIncrementalMarkingStepTimeInMs &&
!IsComplete() &&
} while (remaining_time_in_ms >= kStepSizeInMs && !IsComplete() &&
!heap()->mark_compact_collector()->marking_deque()->IsEmpty());
return remaining_time_in_ms;
}
void IncrementalMarking::OldSpaceStep(intptr_t allocated) {
if (IsStopped() && ShouldActivateEvenWithoutIdleNotification()) {
heap()->StartIncrementalMarking(Heap::kNoGCFlags, kNoGCCallbackFlags,
"old space step");
} else {
Step(allocated * kFastMarking / kInitialMarkingSpeed, GC_VIA_STACK_GUARD);
}
}
void IncrementalMarking::SpeedUp() {
bool speed_up = false;
......@@ -1178,39 +1159,17 @@ void IncrementalMarking::FinalizeSweeping() {
}
}
intptr_t IncrementalMarking::Step(intptr_t allocated_bytes,
CompletionAction action,
ForceMarkingAction marking,
ForceCompletionAction completion) {
DCHECK(allocated_bytes >= 0);
void IncrementalMarking::NotifyAllocatedBytes(intptr_t allocated_bytes) {
if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
(state_ != SWEEPING && state_ != MARKING)) {
return 0;
return;
}
allocated_ += allocated_bytes;
if (marking == DO_NOT_FORCE_MARKING && allocated_ < kAllocatedThreshold &&
write_barriers_invoked_since_last_step_ <
if (allocated_ >= kAllocatedThreshold ||
write_barriers_invoked_since_last_step_ >=
kWriteBarriersInvokedThreshold) {
return 0;
}
// If an idle notification happened recently, we delay marking steps.
if (marking == DO_NOT_FORCE_MARKING &&
heap_->RecentIdleNotificationHappened()) {
return 0;
}
intptr_t bytes_processed = 0;
{
HistogramTimerScope incremental_marking_scope(
heap_->isolate()->counters()->gc_incremental_marking());
TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
double start = heap_->MonotonicallyIncreasingTimeInMs();
// The marking speed is driven either by the allocation rate or by the rate
// at which we are having to check the color of objects in the write
// barrier.
......@@ -1222,73 +1181,83 @@ intptr_t IncrementalMarking::Step(intptr_t allocated_bytes,
intptr_t bytes_to_process =
marking_speed_ *
Max(allocated_, write_barriers_invoked_since_last_step_);
allocated_ = 0;
write_barriers_invoked_since_last_step_ = 0;
Step(bytes_to_process, GC_VIA_STACK_GUARD, FORCE_COMPLETION);
}
}
bytes_scanned_ += bytes_to_process;
void IncrementalMarking::Step(intptr_t bytes_to_process,
CompletionAction action,
ForceCompletionAction completion) {
HistogramTimerScope incremental_marking_scope(
heap_->isolate()->counters()->gc_incremental_marking());
TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
double start = heap_->MonotonicallyIncreasingTimeInMs();
if (state_ == SWEEPING) {
TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL_SWEEPING);
FinalizeSweeping();
}
bytes_scanned_ += bytes_to_process;
if (state_ == MARKING) {
const bool incremental_wrapper_tracing =
FLAG_incremental_marking_wrappers && heap_->UsingEmbedderHeapTracer();
const bool process_wrappers =
incremental_wrapper_tracing &&
(heap_->mark_compact_collector()
->RequiresImmediateWrapperProcessing() ||
heap_->mark_compact_collector()->marking_deque()->IsEmpty());
bool wrapper_work_left = incremental_wrapper_tracing;
if (!process_wrappers) {
bytes_processed = ProcessMarkingDeque(bytes_to_process);
} else {
const double kWrapperTracngStepMs = 1.0;
const double wrapper_deadline =
heap_->MonotonicallyIncreasingTimeInMs() + kWrapperTracngStepMs;
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_INCREMENTAL_WRAPPER_TRACING);
heap_->mark_compact_collector()
->RegisterWrappersWithEmbedderHeapTracer();
wrapper_work_left =
heap_->mark_compact_collector()
->embedder_heap_tracer()
->AdvanceTracing(wrapper_deadline,
EmbedderHeapTracer::AdvanceTracingActions(
EmbedderHeapTracer::ForceCompletionAction::
DO_NOT_FORCE_COMPLETION));
}
allocated_ = 0;
write_barriers_invoked_since_last_step_ = 0;
if (state_ == SWEEPING) {
TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL_SWEEPING);
FinalizeSweeping();
}
if (heap_->mark_compact_collector()->marking_deque()->IsEmpty() &&
!wrapper_work_left) {
if (completion == FORCE_COMPLETION ||
IsIdleMarkingDelayCounterLimitReached()) {
if (!finalize_marking_completed_) {
FinalizeMarking(action);
} else {
MarkingComplete(action);
}
intptr_t bytes_processed = 0;
if (state_ == MARKING) {
const bool incremental_wrapper_tracing =
FLAG_incremental_marking_wrappers && heap_->UsingEmbedderHeapTracer();
const bool process_wrappers =
incremental_wrapper_tracing &&
(heap_->mark_compact_collector()
->RequiresImmediateWrapperProcessing() ||
heap_->mark_compact_collector()->marking_deque()->IsEmpty());
bool wrapper_work_left = incremental_wrapper_tracing;
if (!process_wrappers) {
bytes_processed = ProcessMarkingDeque(bytes_to_process);
} else {
const double wrapper_deadline =
heap_->MonotonicallyIncreasingTimeInMs() + kStepSizeInMs;
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_INCREMENTAL_WRAPPER_TRACING);
heap_->mark_compact_collector()->RegisterWrappersWithEmbedderHeapTracer();
wrapper_work_left =
heap_->mark_compact_collector()
->embedder_heap_tracer()
->AdvanceTracing(wrapper_deadline,
EmbedderHeapTracer::AdvanceTracingActions(
EmbedderHeapTracer::ForceCompletionAction::
DO_NOT_FORCE_COMPLETION));
}
if (heap_->mark_compact_collector()->marking_deque()->IsEmpty() &&
!wrapper_work_left) {
if (completion == FORCE_COMPLETION ||
IsIdleMarkingDelayCounterLimitReached()) {
if (!finalize_marking_completed_) {
FinalizeMarking(action);
} else {
IncrementIdleMarkingDelayCounter();
MarkingComplete(action);
}
} else {
IncrementIdleMarkingDelayCounter();
}
}
}
steps_count_++;
steps_count_++;
// Speed up marking if we are marking too slow or if we are almost done
// with marking.
SpeedUp();
// Speed up marking if we are marking too slow or if we are almost done
// with marking.
SpeedUp();
double end = heap_->MonotonicallyIncreasingTimeInMs();
double duration = (end - start);
// Note that we report zero bytes here when sweeping was in progress or
// when we just started incremental marking. In these cases we did not
// process the marking deque.
heap_->tracer()->AddIncrementalMarkingStep(duration, bytes_processed);
}
return bytes_processed;
double end = heap_->MonotonicallyIncreasingTimeInMs();
double duration = (end - start);
// Note that we report zero bytes here when sweeping was in progress or
// when we just started incremental marking. In these cases we did not
// process the marking deque.
heap_->tracer()->AddIncrementalMarkingStep(duration, bytes_processed);
}
......
......@@ -26,27 +26,10 @@ class IncrementalMarking {
enum CompletionAction { GC_VIA_STACK_GUARD, NO_GC_VIA_STACK_GUARD };
enum ForceMarkingAction { FORCE_MARKING, DO_NOT_FORCE_MARKING };
enum ForceCompletionAction { FORCE_COMPLETION, DO_NOT_FORCE_COMPLETION };
enum GCRequestType { NONE, COMPLETE_MARKING, FINALIZATION };
struct StepActions {
StepActions(CompletionAction complete_action_,
ForceMarkingAction force_marking_,
ForceCompletionAction force_completion_)
: completion_action(complete_action_),
force_marking(force_marking_),
force_completion(force_completion_) {}
CompletionAction completion_action;
ForceMarkingAction force_marking;
ForceCompletionAction force_completion;
};
static StepActions IdleStepActions();
explicit IncrementalMarking(Heap* heap);
static void Initialize();
......@@ -113,7 +96,8 @@ class IncrementalMarking {
// returns the remaining time that cannot be used for incremental marking
// anymore because a single step would exceed the deadline.
double AdvanceIncrementalMarking(double deadline_in_ms,
StepActions step_actions);
CompletionAction completion_action,
ForceCompletionAction force_completion);
// It's hard to know how much work the incremental marker should do to make
// progress in the face of the mutator creating new work for it. We start
......@@ -134,17 +118,18 @@ class IncrementalMarking {
static const intptr_t kMarkingSpeedAccelleration = 2;
static const intptr_t kMaxMarkingSpeed = 1000;
static const intptr_t kStepSizeInMs = 1;
// This is the upper bound for how many times we allow finalization of
// incremental marking to be postponed.
static const size_t kMaxIdleMarkingDelayCounter = 3;
void FinalizeSweeping();
void OldSpaceStep(intptr_t allocated);
void NotifyAllocatedBytes(intptr_t allocated_bytes);
intptr_t Step(intptr_t allocated, CompletionAction action,
ForceMarkingAction marking = DO_NOT_FORCE_MARKING,
ForceCompletionAction completion = FORCE_COMPLETION);
void Step(intptr_t bytes_to_process, CompletionAction action,
ForceCompletionAction completion);
inline void RestartIfNotMarking();
......@@ -245,8 +230,7 @@ class IncrementalMarking {
incremental_marking_(incremental_marking) {}
void Step(int bytes_allocated, Address, size_t) override {
incremental_marking_.Step(bytes_allocated,
IncrementalMarking::GC_VIA_STACK_GUARD);
incremental_marking_.NotifyAllocatedBytes(bytes_allocated);
}
private:
......
......@@ -84,10 +84,8 @@ void MemoryReducer::NotifyTimer(const Event& event) {
double deadline = heap()->MonotonicallyIncreasingTimeInMs() +
kIncrementalMarkingDelayMs;
heap()->incremental_marking()->AdvanceIncrementalMarking(
deadline, i::IncrementalMarking::StepActions(
i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
i::IncrementalMarking::FORCE_MARKING,
i::IncrementalMarking::FORCE_COMPLETION));
deadline, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
IncrementalMarking::FORCE_COMPLETION);
heap()->FinalizeIncrementalMarkingIfComplete(
"Memory reducer: finalize incremental marking");
}
......
......@@ -2553,14 +2553,13 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
// Don't free list allocate if there is linear space available.
DCHECK(owner_->limit() - owner_->top() < size_in_bytes);
int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
// Mark the old linear allocation area with a free space map so it can be
// skipped when scanning the heap. This also puts it back in the free list
// if it is big enough.
owner_->EmptyAllocationInfo();
owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes -
old_linear_size);
owner_->heap()->StartIncrementalMarkingIfNeeded(
Heap::kNoGCFlags, kNoGCCallbackFlags, "old space step");
int new_node_size = 0;
FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
......@@ -2996,7 +2995,8 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
}
heap()->incremental_marking()->OldSpaceStep(object_size);
heap()->StartIncrementalMarkingIfNeeded(Heap::kNoGCFlags, kNoGCCallbackFlags,
"old space step");
AllocationStep(object->address(), object_size);
if (heap()->incremental_marking()->black_allocation()) {
......
......@@ -141,20 +141,24 @@ void SimulateFullSpace(v8::internal::NewSpace* space,
}
void SimulateIncrementalMarking(i::Heap* heap, bool force_completion) {
i::MarkCompactCollector* collector = heap->mark_compact_collector();
i::IncrementalMarking* marking = heap->incremental_marking();
i::MarkCompactCollector* collector = heap->mark_compact_collector();
if (collector->sweeping_in_progress()) {
collector->EnsureSweepingCompleted();
}
CHECK(marking->IsMarking() || marking->IsStopped());
if (marking->IsSweeping()) {
marking->FinalizeSweeping();
}
CHECK(marking->IsMarking() || marking->IsStopped() || marking->IsComplete());
if (marking->IsStopped()) {
heap->StartIncrementalMarking();
}
CHECK(marking->IsMarking());
CHECK(marking->IsMarking() || marking->IsComplete());
if (!force_completion) return;
while (!marking->IsComplete()) {
marking->Step(i::MB, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD);
marking->Step(i::MB, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
i::IncrementalMarking::FORCE_COMPLETION);
if (marking->IsReadyToOverApproximateWeakClosure()) {
marking->FinalizeIncrementally();
}
......
......@@ -784,7 +784,6 @@ TEST(PromoteGreyOrBlackObjectsOnScavenge) {
while (
Marking::IsWhite(ObjectMarking::MarkBitFrom(HeapObject::cast(*marked)))) {
marking->Step(MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
IncrementalMarking::FORCE_MARKING,
IncrementalMarking::DO_NOT_FORCE_COMPLETION);
}
......@@ -2681,7 +2680,8 @@ TEST(InstanceOfStubWriteBarrier) {
!marking->IsStopped()) {
// Discard any pending GC requests otherwise we will get GC when we enter
// code below.
marking->Step(MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD);
marking->Step(MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
IncrementalMarking::FORCE_COMPLETION);
}
CHECK(marking->IsMarking());
......@@ -2844,14 +2844,12 @@ TEST(IdleNotificationFinishMarking) {
// marking delay counter.
// Perform a huge incremental marking step but don't complete marking.
intptr_t bytes_processed = 0;
do {
bytes_processed =
marking->Step(1 * MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
IncrementalMarking::FORCE_MARKING,
IncrementalMarking::DO_NOT_FORCE_COMPLETION);
marking->Step(1 * MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
IncrementalMarking::DO_NOT_FORCE_COMPLETION);
CHECK(!marking->IsIdleMarkingDelayCounterLimitReached());
} while (bytes_processed);
} while (
!CcTest::heap()->mark_compact_collector()->marking_deque()->IsEmpty());
// The next invocations of incremental marking are not going to complete
// marking
......@@ -2859,7 +2857,6 @@ TEST(IdleNotificationFinishMarking) {
for (size_t i = 0; i < IncrementalMarking::kMaxIdleMarkingDelayCounter - 2;
i++) {
marking->Step(1 * MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
IncrementalMarking::FORCE_MARKING,
IncrementalMarking::DO_NOT_FORCE_COMPLETION);
CHECK(!marking->IsIdleMarkingDelayCounterLimitReached());
}
......@@ -4613,7 +4610,8 @@ TEST(LargeObjectSlotRecording) {
// Start incremental marking to active write barrier.
heap::SimulateIncrementalMarking(heap, false);
heap->incremental_marking()->AdvanceIncrementalMarking(
10000000, IncrementalMarking::IdleStepActions());
10000000, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
IncrementalMarking::FORCE_COMPLETION);
// Create references from the large object to the object on the evacuation
// candidate.
......@@ -4674,7 +4672,8 @@ TEST(IncrementalMarkingStepMakesBigProgressWithLargeObjects) {
CcTest::heap()->StartIncrementalMarking();
}
// This big step should be sufficient to mark the whole array.
marking->Step(100 * MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD);
marking->Step(100 * MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
IncrementalMarking::FORCE_COMPLETION);
CHECK(marking->IsComplete() ||
marking->IsReadyToOverApproximateWeakClosure());
}
......@@ -5496,7 +5495,8 @@ TEST(WeakCellsWithIncrementalMarking) {
if (marking->IsStopped()) {
heap->StartIncrementalMarking();
}
marking->Step(128, IncrementalMarking::NO_GC_VIA_STACK_GUARD);
marking->Step(128, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
IncrementalMarking::FORCE_COMPLETION);
heap->CollectGarbage(NEW_SPACE);
CHECK(weak_cell->value()->IsFixedArray());
weak_cells[i] = inner_scope.CloseAndEscape(weak_cell);
......@@ -5815,7 +5815,8 @@ TEST(Regress3631) {
while (!Marking::IsBlack(
ObjectMarking::MarkBitFrom(HeapObject::cast(weak_map->table()))) &&
!marking->IsStopped()) {
marking->Step(MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD);
marking->Step(MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
IncrementalMarking::FORCE_COMPLETION);
}
// Stash the backing store in a handle.
Handle<Object> save(weak_map->table(), isolate);
......@@ -6654,7 +6655,8 @@ TEST(Regress598319) {
// Now we search for a state where we are in incremental marking and have
// only partially marked the large object.
while (!marking->IsComplete()) {
marking->Step(i::KB, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD);
marking->Step(i::KB, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
IncrementalMarking::FORCE_COMPLETION);
if (page->IsFlagSet(Page::HAS_PROGRESS_BAR) && page->progress_bar() > 0) {
CHECK_NE(page->progress_bar(), arr.get()->Size());
{
......@@ -6671,7 +6673,8 @@ TEST(Regress598319) {
// Finish marking with bigger steps to speed up test.
while (!marking->IsComplete()) {
marking->Step(10 * i::MB, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD);
marking->Step(10 * i::MB, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
IncrementalMarking::FORCE_COMPLETION);
if (marking->IsReadyToOverApproximateWeakClosure()) {
marking->FinalizeIncrementally();
}
......@@ -6724,7 +6727,8 @@ TEST(Regress615489) {
isolate->factory()->NewFixedArray(500, TENURED)->Size();
}
while (!marking->IsComplete()) {
marking->Step(i::MB, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD);
marking->Step(i::MB, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
IncrementalMarking::FORCE_COMPLETION);
if (marking->IsReadyToOverApproximateWeakClosure()) {
marking->FinalizeIncrementally();
}
......@@ -6783,7 +6787,8 @@ TEST(Regress631969) {
// Finish incremental marking.
IncrementalMarking* marking = heap->incremental_marking();
while (!marking->IsComplete()) {
marking->Step(MB, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD);
marking->Step(MB, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
IncrementalMarking::FORCE_COMPLETION);
if (marking->IsReadyToOverApproximateWeakClosure()) {
marking->FinalizeIncrementally();
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment