Commit c12e8d88 authored by jkummerow's avatar jkummerow Committed by Commit bot

Revert of Fix logic for doing incremental marking steps on tenured allocation....

Revert of Fix logic for doing incremental marking steps on tenured allocation. (patchset #4 id:60001 of https://codereview.chromium.org/1040233003/)

Reason for revert:
Suspected of triggering memory corruption issues, e.g. crbug.com/478401.

Original issue's description:
> Fix logic for doing incremental marking steps on tenured allocation.
>
> R=hpayer@chromium.org
> BUG=
>
> Committed: https://crrev.com/9716468ae63500adb74f5188c47de847e195d71b
> Cr-Commit-Position: refs/heads/master@{#27883}

TBR=hpayer@chromium.org,erikcorry@chromium.org
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=

Review URL: https://codereview.chromium.org/1099633002

Cr-Commit-Position: refs/heads/master@{#27944}
parent 9987221c
......@@ -818,7 +818,7 @@ void IncrementalMarking::OldSpaceStep(intptr_t allocated) {
if (IsStopped() && ShouldActivateEvenWithoutIdleNotification()) {
Start();
} else {
Step(allocated * kOldSpaceAllocationMarkingFactor, GC_VIA_STACK_GUARD);
Step(allocated * kFastMarking / kInitialMarkingSpeed, GC_VIA_STACK_GUARD);
}
}
......@@ -894,7 +894,8 @@ intptr_t IncrementalMarking::Step(intptr_t allocated_bytes,
ForceMarkingAction marking,
ForceCompletionAction completion) {
if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
!CanDoSteps()) {
!FLAG_incremental_marking_steps ||
(state_ != SWEEPING && state_ != MARKING)) {
return 0;
}
......
......@@ -50,10 +50,7 @@ class IncrementalMarking {
INLINE(bool IsMarking()) { return state() >= MARKING; }
inline bool CanDoSteps() {
return FLAG_incremental_marking_steps &&
(state() == MARKING || state() == SWEEPING);
}
inline bool IsMarkingIncomplete() { return state() == MARKING; }
inline bool IsComplete() { return state() == COMPLETE; }
......@@ -105,8 +102,6 @@ class IncrementalMarking {
// But if we are promoting a lot of data we need to mark faster to keep up
// with the data that is entering the old space through promotion.
static const intptr_t kFastMarking = 3;
static const intptr_t kOldSpaceAllocationMarkingFactor =
kFastMarking / kInitialMarkingSpeed;
// After this many steps we increase the marking/allocating factor.
static const intptr_t kMarkingSpeedAccellerationInterval = 1024;
// This is how much we increase the marking/allocating factor by.
......
......@@ -2200,7 +2200,6 @@ void FreeList::Reset() {
medium_list_.Reset();
large_list_.Reset();
huge_list_.Reset();
unreported_allocation_ = 0;
}
......@@ -2348,15 +2347,6 @@ FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
}
void PagedSpace::SetTopAndLimit(Address top, Address limit) {
DCHECK(top == limit ||
Page::FromAddress(top) == Page::FromAddress(limit - 1));
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
allocation_info_.set_top(top);
allocation_info_.set_limit(limit);
}
// Allocation on the old space free list. If it succeeds then a new linear
// allocation space has been set up with the top and limit of the space. If
// the allocation fails then NULL is returned, and the caller can perform a GC
......@@ -2374,6 +2364,9 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
// if it is big enough.
owner_->Free(owner_->top(), old_linear_size);
owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes -
old_linear_size);
int new_node_size = 0;
FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
if (new_node == NULL) {
......@@ -2396,27 +2389,21 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
// candidate.
DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
// An old-space step will mark more data per byte allocated, because old space
// allocation is more serious. We don't want the pause to be bigger, so we
// do marking after a smaller amount of allocation.
const int kThreshold = IncrementalMarking::kAllocatedThreshold *
IncrementalMarking::kOldSpaceAllocationMarkingFactor;
const int kThreshold = IncrementalMarking::kAllocatedThreshold;
// Memory in the linear allocation area is counted as allocated. We may free
// a little of this again immediately - see below.
owner_->Allocate(new_node_size);
unreported_allocation_ += new_node_size;
if (owner_->heap()->inline_allocation_disabled()) {
// Keep the linear allocation area empty if requested to do so, just
// return area back to the free list instead.
owner_->Free(new_node->address() + size_in_bytes, bytes_left);
DCHECK(owner_->top() == NULL && owner_->limit() == NULL);
} else if (bytes_left > kThreshold &&
owner_->heap()->incremental_marking()->CanDoSteps()) {
owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
FLAG_incremental_marking_steps) {
int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
// We don't want to give too large linear areas to the allocator while
// incremental marking is going on, because we won't check again whether
// we want to do another increment until the linear area is used up.
......@@ -2424,27 +2411,15 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
new_node_size - size_in_bytes - linear_size);
owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
new_node->address() + size_in_bytes + linear_size);
owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes +
linear_size);
unreported_allocation_ = 0;
} else if (bytes_left > 0) {
// Normally we give the rest of the node to the allocator as its new
// linear allocation area.
owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
new_node->address() + new_node_size);
} else {
if (unreported_allocation_ > kThreshold) {
// This may start the incremental marker, or do a little work if it's
// already started.
owner_->heap()->incremental_marking()->OldSpaceStep(
Min(kThreshold, unreported_allocation_));
unreported_allocation_ = 0;
}
if (bytes_left > 0) {
// Normally we give the rest of the node to the allocator as its new
// linear allocation area.
owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
new_node->address() + new_node_size);
} else {
// TODO(gc) Try not freeing linear allocation region when bytes_left
// are zero.
owner_->SetTopAndLimit(NULL, NULL);
}
// TODO(gc) Try not freeing linear allocation region when bytes_left
// are zero.
owner_->SetTopAndLimit(NULL, NULL);
}
return new_node;
......@@ -2931,16 +2906,7 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
}
// We would like to tell the incremental marker to do a lot of work, since
// we just made a large allocation in old space, but that might cause a huge
// pause. Underreporting here may cause the marker to speed up because it
// will perceive that it is not keeping up with allocation. Although this
// causes some big incremental marking steps they are not as big as this one
// might have been. In testing, a very large pause was divided up into about
// 12 parts.
const int kThreshold = IncrementalMarking::kAllocatedThreshold *
IncrementalMarking::kOldSpaceAllocationMarkingFactor;
heap()->incremental_marking()->OldSpaceStep(kThreshold);
heap()->incremental_marking()->OldSpaceStep(object_size);
return object;
}
......
......@@ -1583,7 +1583,6 @@ class FreeList {
PagedSpace* owner_;
Heap* heap_;
int unreported_allocation_;
static const int kSmallListMax = 0xff * kPointerSize;
static const int kMediumListMax = 0x7ff * kPointerSize;
......@@ -1781,7 +1780,13 @@ class PagedSpace : public Space {
void ResetFreeList() { free_list_.Reset(); }
// Set space allocation info.
void SetTopAndLimit(Address top, Address limit);
void SetTopAndLimit(Address top, Address limit) {
DCHECK(top == limit ||
Page::FromAddress(top) == Page::FromAddress(limit - 1));
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
allocation_info_.set_top(top);
allocation_info_.set_limit(limit);
}
// Empty space allocation info, returning unused area to free list.
void EmptyAllocationInfo() {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment