Commit a5440d11 authored by machenbach's avatar machenbach Committed by Commit bot

Revert of [heap] New heuristics for starting of incremental marking. (patchset...

Revert of [heap] New heuristics for starting of incremental marking. (patchset #9 id:160001 of https://codereview.chromium.org/2364923002/ )

Reason for revert:
OOMs in nosnap debug:
https://build.chromium.org/p/client.v8/builders/V8%20Linux%20-%20nosnap%20-%20debug/builds/9572

Original issue's description:
> [heap] New heuristics for starting of incremental marking.
>
> The motivation for this patch is to move more marking work to tasks.
> This is done by postponing the start of incremental marking until
> a marking task is running.
>
> This patch introduces a soft and a hard limits for incremental marking.
> When the soft limit is reached, the marking task is scheduled.
> If the hard limit is reached before the task is running, then
> incremental marking is started without waiting for the task.
>
> BUG=chromium:616434
> LOG=NO
>
> Committed: https://crrev.com/55683ddd2a32e0dfb8df66271fbf53e3618cce9d
> Cr-Commit-Position: refs/heads/master@{#39831}

TBR=hpayer@chromium.org,ulan@chromium.org
# Skipping CQ checks because original CL landed less than 1 days ago.
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=chromium:616434

Review-Url: https://codereview.chromium.org/2375983002
Cr-Commit-Position: refs/heads/master@{#39833}
parent d8d964ba
......@@ -169,6 +169,18 @@ Address* Heap::OldSpaceAllocationLimitAddress() {
return old_space_->allocation_limit_address();
}
bool Heap::HeapIsFullEnoughToStartIncrementalMarking(intptr_t limit) {
if (FLAG_stress_compaction && (gc_count_ & 1) != 0) return true;
intptr_t adjusted_allocation_limit = limit - new_space_->Capacity();
if (PromotedTotalSize() >= adjusted_allocation_limit) return true;
if (HighMemoryPressure()) return true;
return false;
}
void Heap::UpdateNewSpaceAllocationCounter() {
new_space_allocation_counter_ = NewSpaceAllocationCounter();
}
......@@ -480,6 +492,13 @@ bool Heap::InOldSpaceSlow(Address address) {
return old_space_->ContainsSlow(address);
}
bool Heap::OldGenerationAllocationLimitReached() {
if (!incremental_marking()->IsStopped() && !ShouldOptimizeForMemoryUsage()) {
return false;
}
return OldGenerationSpaceAvailable() < 0;
}
template <PromotionMode promotion_mode>
bool Heap::ShouldBePromoted(Address old_address, int object_size) {
if (promotion_mode == PROMOTE_MARKED) {
......
......@@ -266,6 +266,13 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
return MARK_COMPACTOR;
}
// Is enough data promoted to justify a global GC?
if (OldGenerationAllocationLimitReached()) {
isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
*reason = "promotion limit reached";
return MARK_COMPACTOR;
}
// Is there enough space left in OLD to guarantee that a scavenge can
// succeed?
//
......@@ -960,7 +967,7 @@ bool Heap::CollectGarbage(GarbageCollector collector,
if (collector == MARK_COMPACTOR && !ShouldFinalizeIncrementalMarking() &&
!ShouldAbortIncrementalMarking() && !incremental_marking()->IsStopped() &&
!incremental_marking()->should_hurry() && FLAG_incremental_marking &&
OldGenerationSpaceAvailable() <= 0) {
OldGenerationAllocationLimitReached()) {
if (!incremental_marking()->IsComplete() &&
!mark_compact_collector()->marking_deque_.IsEmpty() &&
!FLAG_gc_global) {
......@@ -1072,15 +1079,10 @@ void Heap::StartIncrementalMarking(int gc_flags,
void Heap::StartIncrementalMarkingIfAllocationLimitIsReached(
int gc_flags, const GCCallbackFlags gc_callback_flags) {
if (incremental_marking()->IsStopped()) {
IncrementalMarkingLimit reached_limit = IncrementalMarkingLimitReached();
if (reached_limit == IncrementalMarkingLimit::kSoftLimit) {
incremental_marking()->incremental_marking_job()->ScheduleTask(this);
} else if (reached_limit == IncrementalMarkingLimit::kHardLimit) {
StartIncrementalMarking(gc_flags,
GarbageCollectionReason::kAllocationLimit,
gc_callback_flags);
}
if (incremental_marking()->IsStopped() &&
incremental_marking()->ShouldActivateEvenWithoutIdleNotification()) {
StartIncrementalMarking(gc_flags, GarbageCollectionReason::kAllocationLimit,
gc_callback_flags);
}
}
......@@ -5327,6 +5329,7 @@ void Heap::SetOldGenerationAllocationLimit(intptr_t old_gen_size,
}
}
void Heap::DampenOldGenerationAllocationLimit(intptr_t old_gen_size,
double gc_speed,
double mutator_speed) {
......@@ -5345,53 +5348,6 @@ void Heap::DampenOldGenerationAllocationLimit(intptr_t old_gen_size,
}
}
// This predicate is called when an old generation space cannot allocated from
// the free list and is about to add a new page. Returning false will cause a
// major GC. It happens when the old generation allocation limit is reached and
// - either we need to optimize for memory usage,
// - or the incremental marking is not in progress and we cannot start it.
bool Heap::ShouldExpandOldGenerationOnAllocationFailure() {
if (always_allocate() || OldGenerationSpaceAvailable() > 0) return true;
// We reached the old generation allocation limit.
if (ShouldOptimizeForMemoryUsage()) return false;
if (incremental_marking()->IsStopped() &&
IncrementalMarkingLimitReached() == IncrementalMarkingLimit::kNoLimit) {
// We cannot start incremental marking.
return false;
}
return true;
}
// This function returns either kNoLimit, kSoftLimit, or kHardLimit.
// The kNoLimit means that either incremental marking is disabled or it is too
// early to start incremental marking.
// The kSoftLimit means that incremental marking should be started soon.
// The kHardLimit means that incremental marking should be started immediately.
Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
if (!incremental_marking()->CanBeActivated() ||
PromotedSpaceSizeOfObjects() < IncrementalMarking::kActivationThreshold) {
// Incremental marking is disabled or it is too early to start.
return IncrementalMarkingLimit::kNoLimit;
}
if ((FLAG_stress_compaction && (gc_count_ & 1) != 0) ||
HighMemoryPressure()) {
// If there is high memory pressure or stress testing is enabled, then
// start marking immediately.
return IncrementalMarkingLimit::kHardLimit;
}
intptr_t old_generation_space_available = OldGenerationSpaceAvailable();
if (old_generation_space_available > new_space_->Capacity()) {
return IncrementalMarkingLimit::kNoLimit;
}
// We are close to the allocation limit.
// Choose between the hard and the soft limits.
if (old_generation_space_available <= 0 || ShouldOptimizeForMemoryUsage()) {
return IncrementalMarkingLimit::kHardLimit;
}
return IncrementalMarkingLimit::kSoftLimit;
}
void Heap::EnableInlineAllocation() {
if (!inline_allocation_disabled_) return;
......
......@@ -709,11 +709,22 @@ class Heap {
// should not happen during deserialization.
void NotifyDeserializationComplete();
intptr_t old_generation_allocation_limit() const {
return old_generation_allocation_limit_;
}
bool always_allocate() { return always_allocate_scope_count_.Value() != 0; }
inline Address* NewSpaceAllocationTopAddress();
inline Address* NewSpaceAllocationLimitAddress();
inline Address* OldSpaceAllocationTopAddress();
inline Address* OldSpaceAllocationLimitAddress();
bool CanExpandOldGeneration(int size) {
if (force_oom_) return false;
return (OldGenerationCapacity() + size) < MaxOldGenerationSize();
}
// Clear the Instanceof cache (used when a prototype changes).
inline void ClearInstanceofCache();
......@@ -836,6 +847,8 @@ class Heap {
// Check new space expansion criteria and expand semispaces if it was hit.
void CheckNewSpaceExpansionCriteria();
inline bool HeapIsFullEnoughToStartIncrementalMarking(intptr_t limit);
void VisitExternalResources(v8::ExternalResourceVisitor* visitor);
// An object should be promoted if the object has survived a
......@@ -849,6 +862,8 @@ class Heap {
void IncrementDeferredCount(v8::Isolate::UseCounterFeature feature);
inline bool OldGenerationAllocationLimitReached();
// Completely clear the Instanceof cache (to stop it keeping objects alive
// around a GC).
inline void CompletelyClearInstanceofCache();
......@@ -1831,22 +1846,6 @@ class Heap {
intptr_t MinimumAllocationLimitGrowingStep();
intptr_t old_generation_allocation_limit() const {
return old_generation_allocation_limit_;
}
bool always_allocate() { return always_allocate_scope_count_.Value() != 0; }
bool CanExpandOldGeneration(int size) {
if (force_oom_) return false;
return (OldGenerationCapacity() + size) < MaxOldGenerationSize();
}
bool ShouldExpandOldGenerationOnAllocationFailure();
enum class IncrementalMarkingLimit { kNoLimit, kSoftLimit, kHardLimit };
IncrementalMarkingLimit IncrementalMarkingLimitReached();
// ===========================================================================
// Idle notification. ========================================================
// ===========================================================================
......@@ -2323,15 +2322,12 @@ class Heap {
friend class HeapIterator;
friend class IdleScavengeObserver;
friend class IncrementalMarking;
friend class IncrementalMarkingJob;
friend class IteratePromotedObjectsVisitor;
friend class LargeObjectSpace;
friend class MarkCompactCollector;
friend class MarkCompactMarkingVisitor;
friend class NewSpace;
friend class ObjectStatsCollector;
friend class Page;
friend class PagedSpace;
friend class Scavenger;
friend class StoreBuffer;
friend class TestMemoryAllocatorScope;
......
......@@ -45,14 +45,6 @@ void IncrementalMarkingJob::Task::RunInternal() {
Heap* heap = isolate()->heap();
job_->NotifyTask();
IncrementalMarking* incremental_marking = heap->incremental_marking();
if (incremental_marking->IsStopped()) {
if (heap->IncrementalMarkingLimitReached() !=
Heap::IncrementalMarkingLimit::kNoLimit) {
heap->StartIncrementalMarking(Heap::kNoGCFlags,
GarbageCollectionReason::kIdleTask,
kNoGCCallbackFlags);
}
}
if (!incremental_marking->IsStopped()) {
Step(heap);
if (!incremental_marking->IsStopped()) {
......
......@@ -400,6 +400,22 @@ void IncrementalMarking::ActivateIncrementalWriteBarrier() {
}
bool IncrementalMarking::ShouldActivateEvenWithoutIdleNotification() {
#ifndef DEBUG
static const intptr_t kActivationThreshold = 8 * MB;
#else
// TODO(gc) consider setting this to some low level so that some
// debug tests run with incremental marking and some without.
static const intptr_t kActivationThreshold = 0;
#endif
// Don't switch on for very small heaps.
return CanBeActivated() &&
heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold &&
heap_->HeapIsFullEnoughToStartIncrementalMarking(
heap_->old_generation_allocation_limit());
}
bool IncrementalMarking::WasActivated() { return was_activated_; }
......
......@@ -72,6 +72,8 @@ class IncrementalMarking {
bool CanBeActivated();
bool ShouldActivateEvenWithoutIdleNotification();
bool WasActivated();
void Start(GarbageCollectionReason gc_reason);
......@@ -115,12 +117,6 @@ class IncrementalMarking {
// incremental marking to be postponed.
static const int kMaxIdleMarkingDelayCounter = 3;
#ifndef DEBUG
static const intptr_t kActivationThreshold = 8 * MB;
#else
static const intptr_t kActivationThreshold = 0;
#endif
void FinalizeSweeping();
size_t Step(size_t bytes_to_process, CompletionAction action,
......
......@@ -2889,7 +2889,19 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
}
}
if (heap()->ShouldExpandOldGenerationOnAllocationFailure() && Expand()) {
// Free list allocation failed and there is no next page. Fail if we have
// hit the old generation size limit that should cause a garbage
// collection.
if (!heap()->always_allocate() &&
heap()->OldGenerationAllocationLimitReached()) {
// If sweeper threads are active, wait for them at that point and steal
// elements form their free-lists.
HeapObject* object = SweepAndRetryAllocation(size_in_bytes);
return object;
}
// Try to expand the space and allocate in the new next page.
if (Expand()) {
DCHECK((CountTotalPages() > 1) ||
(size_in_bytes <= free_list_.Available()));
return free_list_.Allocate(size_in_bytes);
......
......@@ -30,7 +30,6 @@
V(TestMemoryReducerSampleJsCalls) \
V(TestSizeOfObjects) \
V(Regress587004) \
V(Regress538257) \
V(Regress589413) \
V(WriteBarriersInCopyJSObject)
......
......@@ -5583,7 +5583,8 @@ static void RequestInterrupt(const v8::FunctionCallbackInfo<v8::Value>& args) {
CcTest::isolate()->RequestInterrupt(&InterruptCallback357137, NULL);
}
HEAP_TEST(Regress538257) {
UNINITIALIZED_TEST(Regress538257) {
i::FLAG_manual_evacuation_candidates_selection = true;
v8::Isolate::CreateParams create_params;
// Set heap limits.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment