Commit 9716468a authored by erikcorry's avatar erikcorry Committed by Commit bot

Fix logic for doing incremental marking steps on tenured allocation.

R=hpayer@chromium.org
BUG=

Review URL: https://codereview.chromium.org/1040233003

Cr-Commit-Position: refs/heads/master@{#27883}
parent 0bc1a152
......@@ -815,7 +815,7 @@ void IncrementalMarking::OldSpaceStep(intptr_t allocated) {
if (IsStopped() && ShouldActivate()) {
Start();
} else {
Step(allocated * kFastMarking / kInitialMarkingSpeed, GC_VIA_STACK_GUARD);
Step(allocated * kOldSpaceAllocationMarkingFactor, GC_VIA_STACK_GUARD);
}
}
......@@ -891,8 +891,7 @@ intptr_t IncrementalMarking::Step(intptr_t allocated_bytes,
ForceMarkingAction marking,
ForceCompletionAction completion) {
if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
!FLAG_incremental_marking_steps ||
(state_ != SWEEPING && state_ != MARKING)) {
!CanDoSteps()) {
return 0;
}
......
......@@ -50,7 +50,10 @@ class IncrementalMarking {
INLINE(bool IsMarking()) { return state() >= MARKING; }
inline bool IsMarkingIncomplete() { return state() == MARKING; }
inline bool CanDoSteps() {
return FLAG_incremental_marking_steps &&
(state() == MARKING || state() == SWEEPING);
}
inline bool IsComplete() { return state() == COMPLETE; }
......@@ -102,6 +105,8 @@ class IncrementalMarking {
// But if we are promoting a lot of data we need to mark faster to keep up
// with the data that is entering the old space through promotion.
static const intptr_t kFastMarking = 3;
static const intptr_t kOldSpaceAllocationMarkingFactor =
kFastMarking / kInitialMarkingSpeed;
// After this many steps we increase the marking/allocating factor.
static const intptr_t kMarkingSpeedAccellerationInterval = 1024;
// This is how much we increase the marking/allocating factor by.
......
......@@ -2202,6 +2202,7 @@ void FreeList::Reset() {
medium_list_.Reset();
large_list_.Reset();
huge_list_.Reset();
unreported_allocation_ = 0;
}
......@@ -2349,6 +2350,15 @@ FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
}
void PagedSpace::SetTopAndLimit(Address top, Address limit) {
DCHECK(top == limit ||
Page::FromAddress(top) == Page::FromAddress(limit - 1));
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
allocation_info_.set_top(top);
allocation_info_.set_limit(limit);
}
// Allocation on the old space free list. If it succeeds then a new linear
// allocation space has been set up with the top and limit of the space. If
// the allocation fails then NULL is returned, and the caller can perform a GC
......@@ -2366,9 +2376,6 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
// if it is big enough.
owner_->Free(owner_->top(), old_linear_size);
owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes -
old_linear_size);
int new_node_size = 0;
FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
if (new_node == NULL) {
......@@ -2391,21 +2398,27 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
// candidate.
DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
const int kThreshold = IncrementalMarking::kAllocatedThreshold;
// An old-space step will mark more data per byte allocated, because old space
// allocation is more serious. We don't want the pause to be bigger, so we
// do marking after a smaller amount of allocation.
const int kThreshold = IncrementalMarking::kAllocatedThreshold *
IncrementalMarking::kOldSpaceAllocationMarkingFactor;
// Memory in the linear allocation area is counted as allocated. We may free
// a little of this again immediately - see below.
owner_->Allocate(new_node_size);
unreported_allocation_ += new_node_size;
if (owner_->heap()->inline_allocation_disabled()) {
// Keep the linear allocation area empty if requested to do so, just
// return area back to the free list instead.
owner_->Free(new_node->address() + size_in_bytes, bytes_left);
DCHECK(owner_->top() == NULL && owner_->limit() == NULL);
} else if (bytes_left > kThreshold &&
owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
FLAG_incremental_marking_steps) {
owner_->heap()->incremental_marking()->CanDoSteps()) {
int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
// We don't want to give too large linear areas to the allocator while
// incremental marking is going on, because we won't check again whether
// we want to do another increment until the linear area is used up.
......@@ -2413,15 +2426,27 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
new_node_size - size_in_bytes - linear_size);
owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
new_node->address() + size_in_bytes + linear_size);
} else if (bytes_left > 0) {
// Normally we give the rest of the node to the allocator as its new
// linear allocation area.
owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
new_node->address() + new_node_size);
owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes +
linear_size);
unreported_allocation_ = 0;
} else {
// TODO(gc) Try not freeing linear allocation region when bytes_left
// are zero.
owner_->SetTopAndLimit(NULL, NULL);
if (unreported_allocation_ > kThreshold) {
// This may start the incremental marker, or do a little work if it's
// already started.
owner_->heap()->incremental_marking()->OldSpaceStep(
Min(kThreshold, unreported_allocation_));
unreported_allocation_ = 0;
}
if (bytes_left > 0) {
// Normally we give the rest of the node to the allocator as its new
// linear allocation area.
owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
new_node->address() + new_node_size);
} else {
// TODO(gc) Try not freeing linear allocation region when bytes_left
// are zero.
owner_->SetTopAndLimit(NULL, NULL);
}
}
return new_node;
......@@ -2908,7 +2933,16 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
}
heap()->incremental_marking()->OldSpaceStep(object_size);
// We would like to tell the incremental marker to do a lot of work, since
// we just made a large allocation in old space, but that might cause a huge
// pause. Underreporting here may cause the marker to speed up because it
// will perceive that it is not keeping up with allocation. Although this
// causes some big incremental marking steps they are not as big as this one
// might have been. In testing, a very large pause was divided up into about
// 12 parts.
const int kThreshold = IncrementalMarking::kAllocatedThreshold *
IncrementalMarking::kOldSpaceAllocationMarkingFactor;
heap()->incremental_marking()->OldSpaceStep(kThreshold);
return object;
}
......
......@@ -1574,6 +1574,7 @@ class FreeList {
PagedSpace* owner_;
Heap* heap_;
int unreported_allocation_;
static const int kSmallListMax = 0xff * kPointerSize;
static const int kMediumListMax = 0x7ff * kPointerSize;
......@@ -1771,13 +1772,7 @@ class PagedSpace : public Space {
void ResetFreeList() { free_list_.Reset(); }
// Set space allocation info.
void SetTopAndLimit(Address top, Address limit) {
DCHECK(top == limit ||
Page::FromAddress(top) == Page::FromAddress(limit - 1));
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
allocation_info_.set_top(top);
allocation_info_.set_limit(limit);
}
void SetTopAndLimit(Address top, Address limit);
// Empty space allocation info, returning unused area to free list.
void EmptyAllocationInfo() {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment