Commit 0790a8c0 authored by machenbach's avatar machenbach Committed by Commit bot

Revert of Reland: Fix logic for incremental marking steps on tenured...

Revert of Reland: Fix logic for incremental marking steps on tenured allocation (patchset #4 id:60001 of https://codereview.chromium.org/1077153004/)

Reason for revert:
[Sheriff] Speculative revert, see:
https://code.google.com/p/chromium/issues/detail?id=506875

Original issue's description:
> Reland: Fix logic for incremental marking steps on tenured allocation
>
> BUG=
>
> Committed: https://crrev.com/5000650bde2ec0bc90d959b529c97aea20385043
> Cr-Commit-Position: refs/heads/master@{#29442}

TBR=hpayer@chromium.org,erikcorry@chromium.org
BUG=chromium:506875
LOG=n
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=

Review URL: https://codereview.chromium.org/1212063005

Cr-Commit-Position: refs/heads/master@{#29494}
parent 069a47f6
...@@ -825,16 +825,10 @@ void IncrementalMarking::Epilogue() { ...@@ -825,16 +825,10 @@ void IncrementalMarking::Epilogue() {
void IncrementalMarking::OldSpaceStep(intptr_t allocated) { void IncrementalMarking::OldSpaceStep(intptr_t allocated) {
// If we are stressing the GC, then always return the bump allocation area to
// the free list here, which will cause a crash if the top and limit are not
// up to date.
if (FLAG_gc_interval != -1) {
heap()->old_space()->ReturnLinearAllocationAreaToFreeList();
}
if (IsStopped() && ShouldActivateEvenWithoutIdleNotification()) { if (IsStopped() && ShouldActivateEvenWithoutIdleNotification()) {
Start(Heap::kNoGCFlags); Start(Heap::kNoGCFlags);
} else { } else {
Step(allocated * kOldSpaceAllocationMarkingFactor, GC_VIA_STACK_GUARD); Step(allocated * kFastMarking / kInitialMarkingSpeed, GC_VIA_STACK_GUARD);
} }
} }
...@@ -916,7 +910,8 @@ intptr_t IncrementalMarking::Step(intptr_t allocated_bytes, ...@@ -916,7 +910,8 @@ intptr_t IncrementalMarking::Step(intptr_t allocated_bytes,
ForceMarkingAction marking, ForceMarkingAction marking,
ForceCompletionAction completion) { ForceCompletionAction completion) {
if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking || if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
!CanDoSteps()) { !FLAG_incremental_marking_steps ||
(state_ != SWEEPING && state_ != MARKING)) {
return 0; return 0;
} }
......
...@@ -50,10 +50,7 @@ class IncrementalMarking { ...@@ -50,10 +50,7 @@ class IncrementalMarking {
INLINE(bool IsMarking()) { return state() >= MARKING; } INLINE(bool IsMarking()) { return state() >= MARKING; }
inline bool CanDoSteps() { inline bool IsMarkingIncomplete() { return state() == MARKING; }
return FLAG_incremental_marking_steps &&
(state() == MARKING || state() == SWEEPING);
}
inline bool IsComplete() { return state() == COMPLETE; } inline bool IsComplete() { return state() == COMPLETE; }
...@@ -103,8 +100,6 @@ class IncrementalMarking { ...@@ -103,8 +100,6 @@ class IncrementalMarking {
// But if we are promoting a lot of data we need to mark faster to keep up // But if we are promoting a lot of data we need to mark faster to keep up
// with the data that is entering the old space through promotion. // with the data that is entering the old space through promotion.
static const intptr_t kFastMarking = 3; static const intptr_t kFastMarking = 3;
static const intptr_t kOldSpaceAllocationMarkingFactor =
kFastMarking / kInitialMarkingSpeed;
// After this many steps we increase the marking/allocating factor. // After this many steps we increase the marking/allocating factor.
static const intptr_t kMarkingSpeedAccellerationInterval = 1024; static const intptr_t kMarkingSpeedAccellerationInterval = 1024;
// This is how much we increase the marking/allocating factor by. // This is how much we increase the marking/allocating factor by.
......
...@@ -2213,7 +2213,6 @@ void FreeList::Reset() { ...@@ -2213,7 +2213,6 @@ void FreeList::Reset() {
medium_list_.Reset(); medium_list_.Reset();
large_list_.Reset(); large_list_.Reset();
huge_list_.Reset(); huge_list_.Reset();
unreported_allocation_ = 0;
} }
...@@ -2361,22 +2360,6 @@ FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) { ...@@ -2361,22 +2360,6 @@ FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
} }
void PagedSpace::SetTopAndLimit(Address top, Address limit) {
DCHECK(top == limit ||
Page::FromAddress(top) == Page::FromAddress(limit - 1));
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
allocation_info_.set_top(top);
allocation_info_.set_limit(limit);
}
void PagedSpace::ReturnLinearAllocationAreaToFreeList() {
int old_linear_size = static_cast<int>(limit() - top());
Free(top(), old_linear_size);
SetTopAndLimit(NULL, NULL);
}
// Allocation on the old space free list. If it succeeds then a new linear // Allocation on the old space free list. If it succeeds then a new linear
// allocation space has been set up with the top and limit of the space. If // allocation space has been set up with the top and limit of the space. If
// the allocation fails then NULL is returned, and the caller can perform a GC // the allocation fails then NULL is returned, and the caller can perform a GC
...@@ -2394,6 +2377,9 @@ HeapObject* FreeList::Allocate(int size_in_bytes) { ...@@ -2394,6 +2377,9 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
// if it is big enough. // if it is big enough.
owner_->Free(owner_->top(), old_linear_size); owner_->Free(owner_->top(), old_linear_size);
owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes -
old_linear_size);
int new_node_size = 0; int new_node_size = 0;
FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size); FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
if (new_node == NULL) { if (new_node == NULL) {
...@@ -2416,27 +2402,21 @@ HeapObject* FreeList::Allocate(int size_in_bytes) { ...@@ -2416,27 +2402,21 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
// candidate. // candidate.
DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node)); DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
// An old-space step will mark more data per byte allocated, because old space const int kThreshold = IncrementalMarking::kAllocatedThreshold;
// allocation is more serious. We don't want the pause to be bigger, so we
// do marking after a smaller amount of allocation.
const int kThreshold = IncrementalMarking::kAllocatedThreshold *
IncrementalMarking::kOldSpaceAllocationMarkingFactor;
// Memory in the linear allocation area is counted as allocated. We may free // Memory in the linear allocation area is counted as allocated. We may free
// a little of this again immediately - see below. // a little of this again immediately - see below.
owner_->Allocate(new_node_size); owner_->Allocate(new_node_size);
unreported_allocation_ += new_node_size;
if (owner_->heap()->inline_allocation_disabled()) { if (owner_->heap()->inline_allocation_disabled()) {
// Keep the linear allocation area empty if requested to do so, just // Keep the linear allocation area empty if requested to do so, just
// return area back to the free list instead. // return area back to the free list instead.
owner_->Free(new_node->address() + size_in_bytes, bytes_left); owner_->Free(new_node->address() + size_in_bytes, bytes_left);
DCHECK(owner_->top() == NULL && owner_->limit() == NULL); DCHECK(owner_->top() == NULL && owner_->limit() == NULL);
} else if (bytes_left > kThreshold && } else if (bytes_left > kThreshold &&
owner_->heap()->incremental_marking()->CanDoSteps()) { owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
FLAG_incremental_marking_steps) {
int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold); int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
// We don't want to give too large linear areas to the allocator while // We don't want to give too large linear areas to the allocator while
// incremental marking is going on, because we won't check again whether // incremental marking is going on, because we won't check again whether
// we want to do another increment until the linear area is used up. // we want to do another increment until the linear area is used up.
...@@ -2444,32 +2424,15 @@ HeapObject* FreeList::Allocate(int size_in_bytes) { ...@@ -2444,32 +2424,15 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
new_node_size - size_in_bytes - linear_size); new_node_size - size_in_bytes - linear_size);
owner_->SetTopAndLimit(new_node->address() + size_in_bytes, owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
new_node->address() + size_in_bytes + linear_size); new_node->address() + size_in_bytes + linear_size);
// It is important that we are done updating top and limit before we call } else if (bytes_left > 0) {
// this, because it might add the free space between top and limit to the // Normally we give the rest of the node to the allocator as its new
// free list, and that would be very bad if top and new_node were still // linear allocation area.
// pointing to the same place. owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes + new_node->address() + new_node_size);
linear_size);
unreported_allocation_ = 0;
} else { } else {
if (bytes_left > 0) { // TODO(gc) Try not freeing linear allocation region when bytes_left
// Normally we give the rest of the node to the allocator as its new // are zero.
// linear allocation area. owner_->SetTopAndLimit(NULL, NULL);
owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
new_node->address() + new_node_size);
} else {
// TODO(gc) Try not freeing linear allocation region when bytes_left
// are zero.
owner_->SetTopAndLimit(NULL, NULL);
}
if (unreported_allocation_ > kThreshold) {
// This may start the incremental marker, or do a little work if it's
// already started. It is important that we are finished updating top
// and limit before we call this (see above).
owner_->heap()->incremental_marking()->OldSpaceStep(
Min(kThreshold, unreported_allocation_));
unreported_allocation_ = 0;
}
} }
return new_node; return new_node;
...@@ -2956,16 +2919,7 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size, ...@@ -2956,16 +2919,7 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0); reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
} }
// We would like to tell the incremental marker to do a lot of work, since heap()->incremental_marking()->OldSpaceStep(object_size);
// we just made a large allocation in old space, but that might cause a huge
// pause. Underreporting here may cause the marker to speed up because it
// will perceive that it is not keeping up with allocation. Although this
// causes some big incremental marking steps they are not as big as this one
// might have been. In testing, a very large pause was divided up into about
// 12 parts.
const int kThreshold = IncrementalMarking::kAllocatedThreshold *
IncrementalMarking::kOldSpaceAllocationMarkingFactor;
heap()->incremental_marking()->OldSpaceStep(kThreshold);
return object; return object;
} }
......
...@@ -1589,7 +1589,6 @@ class FreeList { ...@@ -1589,7 +1589,6 @@ class FreeList {
PagedSpace* owner_; PagedSpace* owner_;
Heap* heap_; Heap* heap_;
int unreported_allocation_;
static const int kSmallListMax = 0xff * kPointerSize; static const int kSmallListMax = 0xff * kPointerSize;
static const int kMediumListMax = 0x7ff * kPointerSize; static const int kMediumListMax = 0x7ff * kPointerSize;
...@@ -1786,8 +1785,13 @@ class PagedSpace : public Space { ...@@ -1786,8 +1785,13 @@ class PagedSpace : public Space {
void ResetFreeList() { free_list_.Reset(); } void ResetFreeList() { free_list_.Reset(); }
// Set space allocation info. // Set space allocation info.
void SetTopAndLimit(Address top, Address limit); void SetTopAndLimit(Address top, Address limit) {
void ReturnLinearAllocationAreaToFreeList(); DCHECK(top == limit ||
Page::FromAddress(top) == Page::FromAddress(limit - 1));
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
allocation_info_.set_top(top);
allocation_info_.set_limit(limit);
}
// Empty space allocation info, returning unused area to free list. // Empty space allocation info, returning unused area to free list.
void EmptyAllocationInfo() { void EmptyAllocationInfo() {
......
...@@ -2029,10 +2029,6 @@ TEST(TestAlignedOverAllocation) { ...@@ -2029,10 +2029,6 @@ TEST(TestAlignedOverAllocation) {
HeapObject* filler2; HeapObject* filler2;
if (double_misalignment) { if (double_misalignment) {
start = AlignOldSpace(kDoubleAligned, 0); start = AlignOldSpace(kDoubleAligned, 0);
// If we run out of linear allocation area then we might get null here. In
// that case we are unlucky and the test is not going to work, but it's not
// a test failure, this is a reasonable thing to happen. Just abandon.
if (start == NULL) return;
obj = OldSpaceAllocateAligned(kPointerSize, kDoubleAligned); obj = OldSpaceAllocateAligned(kPointerSize, kDoubleAligned);
// The object is aligned, and a filler object is created after. // The object is aligned, and a filler object is created after.
CHECK(IsAddressAligned(obj->address(), kDoubleAlignment)); CHECK(IsAddressAligned(obj->address(), kDoubleAlignment));
...@@ -2041,7 +2037,6 @@ TEST(TestAlignedOverAllocation) { ...@@ -2041,7 +2037,6 @@ TEST(TestAlignedOverAllocation) {
filler1->Size() == kPointerSize); filler1->Size() == kPointerSize);
// Try the opposite alignment case. // Try the opposite alignment case.
start = AlignOldSpace(kDoubleAligned, kPointerSize); start = AlignOldSpace(kDoubleAligned, kPointerSize);
if (start == NULL) return;
obj = OldSpaceAllocateAligned(kPointerSize, kDoubleAligned); obj = OldSpaceAllocateAligned(kPointerSize, kDoubleAligned);
CHECK(IsAddressAligned(obj->address(), kDoubleAlignment)); CHECK(IsAddressAligned(obj->address(), kDoubleAlignment));
filler1 = HeapObject::FromAddress(start); filler1 = HeapObject::FromAddress(start);
...@@ -2053,7 +2048,6 @@ TEST(TestAlignedOverAllocation) { ...@@ -2053,7 +2048,6 @@ TEST(TestAlignedOverAllocation) {
// Similarly for kDoubleUnaligned. // Similarly for kDoubleUnaligned.
start = AlignOldSpace(kDoubleUnaligned, 0); start = AlignOldSpace(kDoubleUnaligned, 0);
if (start == NULL) return;
obj = OldSpaceAllocateAligned(kPointerSize, kDoubleUnaligned); obj = OldSpaceAllocateAligned(kPointerSize, kDoubleUnaligned);
// The object is aligned, and a filler object is created after. // The object is aligned, and a filler object is created after.
CHECK(IsAddressAligned(obj->address(), kDoubleAlignment, kPointerSize)); CHECK(IsAddressAligned(obj->address(), kDoubleAlignment, kPointerSize));
...@@ -2062,7 +2056,6 @@ TEST(TestAlignedOverAllocation) { ...@@ -2062,7 +2056,6 @@ TEST(TestAlignedOverAllocation) {
filler1->Size() == kPointerSize); filler1->Size() == kPointerSize);
// Try the opposite alignment case. // Try the opposite alignment case.
start = AlignOldSpace(kDoubleUnaligned, kPointerSize); start = AlignOldSpace(kDoubleUnaligned, kPointerSize);
if (start == NULL) return;
obj = OldSpaceAllocateAligned(kPointerSize, kDoubleUnaligned); obj = OldSpaceAllocateAligned(kPointerSize, kDoubleUnaligned);
CHECK(IsAddressAligned(obj->address(), kDoubleAlignment, kPointerSize)); CHECK(IsAddressAligned(obj->address(), kDoubleAlignment, kPointerSize));
filler1 = HeapObject::FromAddress(start); filler1 = HeapObject::FromAddress(start);
...@@ -2073,7 +2066,6 @@ TEST(TestAlignedOverAllocation) { ...@@ -2073,7 +2066,6 @@ TEST(TestAlignedOverAllocation) {
// Now test SIMD alignment. There are 2 or 4 possible alignments, depending // Now test SIMD alignment. There are 2 or 4 possible alignments, depending
// on platform. // on platform.
start = AlignOldSpace(kSimd128Unaligned, 0); start = AlignOldSpace(kSimd128Unaligned, 0);
if (start == NULL) return;
obj = OldSpaceAllocateAligned(kPointerSize, kSimd128Unaligned); obj = OldSpaceAllocateAligned(kPointerSize, kSimd128Unaligned);
CHECK(IsAddressAligned(obj->address(), kSimd128Alignment, kPointerSize)); CHECK(IsAddressAligned(obj->address(), kSimd128Alignment, kPointerSize));
// There is a filler object after the object. // There is a filler object after the object.
...@@ -2081,7 +2073,6 @@ TEST(TestAlignedOverAllocation) { ...@@ -2081,7 +2073,6 @@ TEST(TestAlignedOverAllocation) {
CHECK(obj != filler1 && filler1->IsFiller() && CHECK(obj != filler1 && filler1->IsFiller() &&
filler1->Size() == kSimd128Size - kPointerSize); filler1->Size() == kSimd128Size - kPointerSize);
start = AlignOldSpace(kSimd128Unaligned, kPointerSize); start = AlignOldSpace(kSimd128Unaligned, kPointerSize);
if (start == NULL) return;
obj = OldSpaceAllocateAligned(kPointerSize, kSimd128Unaligned); obj = OldSpaceAllocateAligned(kPointerSize, kSimd128Unaligned);
CHECK(IsAddressAligned(obj->address(), kSimd128Alignment, kPointerSize)); CHECK(IsAddressAligned(obj->address(), kSimd128Alignment, kPointerSize));
// There is a filler object before the object. // There is a filler object before the object.
...@@ -2092,7 +2083,6 @@ TEST(TestAlignedOverAllocation) { ...@@ -2092,7 +2083,6 @@ TEST(TestAlignedOverAllocation) {
if (double_misalignment) { if (double_misalignment) {
// Test the 2 other alignments possible on 32 bit platforms. // Test the 2 other alignments possible on 32 bit platforms.
start = AlignOldSpace(kSimd128Unaligned, 2 * kPointerSize); start = AlignOldSpace(kSimd128Unaligned, 2 * kPointerSize);
if (start == NULL) return;
obj = OldSpaceAllocateAligned(kPointerSize, kSimd128Unaligned); obj = OldSpaceAllocateAligned(kPointerSize, kSimd128Unaligned);
CHECK(IsAddressAligned(obj->address(), kSimd128Alignment, kPointerSize)); CHECK(IsAddressAligned(obj->address(), kSimd128Alignment, kPointerSize));
// There are filler objects before and after the object. // There are filler objects before and after the object.
...@@ -2103,7 +2093,6 @@ TEST(TestAlignedOverAllocation) { ...@@ -2103,7 +2093,6 @@ TEST(TestAlignedOverAllocation) {
CHECK(obj != filler2 && filler2->IsFiller() && CHECK(obj != filler2 && filler2->IsFiller() &&
filler2->Size() == kPointerSize); filler2->Size() == kPointerSize);
start = AlignOldSpace(kSimd128Unaligned, 3 * kPointerSize); start = AlignOldSpace(kSimd128Unaligned, 3 * kPointerSize);
if (start == NULL) return;
obj = OldSpaceAllocateAligned(kPointerSize, kSimd128Unaligned); obj = OldSpaceAllocateAligned(kPointerSize, kSimd128Unaligned);
CHECK(IsAddressAligned(obj->address(), kSimd128Alignment, kPointerSize)); CHECK(IsAddressAligned(obj->address(), kSimd128Alignment, kPointerSize));
// There are filler objects before and after the object. // There are filler objects before and after the object.
...@@ -5401,10 +5390,9 @@ TEST(Regress388880) { ...@@ -5401,10 +5390,9 @@ TEST(Regress388880) {
Handle<JSObject> o = factory->NewJSObjectFromMap(map1, TENURED, false); Handle<JSObject> o = factory->NewJSObjectFromMap(map1, TENURED, false);
o->set_properties(*factory->empty_fixed_array()); o->set_properties(*factory->empty_fixed_array());
// Ensure that the object allocated where we need it. If not, then abandon // Ensure that the object allocated where we need it.
// the test, since this isn't actually something we can reasonably require.
Page* page = Page::FromAddress(o->address()); Page* page = Page::FromAddress(o->address());
if (desired_offset != page->Offset(o->address())) return; CHECK_EQ(desired_offset, page->Offset(o->address()));
// Now we have an object right at the end of the page. // Now we have an object right at the end of the page.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment