Commit 93e93983 authored by mlippautz's avatar mlippautz Committed by Commit bot

[heap] Remove unswept bytes counter

This change removes the unswept free bytes counter.

The new approach
- directly decrements allocated memory and capacity before sweeping (using live
  bytes from the marker), and
- adds back capacity during refilling a free list.

This is another pre-work for moving around free lists while keeping the counters
in a sane state.

The previous approach allowed us to nail down how much memory is to-be-swept.
However, there were no users of this as we only used it do decrement it from
allocated memory (which still accounted for dead objects).  If we want to keep
track of unswept free bytes in a space during compaction we can introduce a
separate new concurrent counter for this purpose.

BUG=chromium:524425
LOG=N

Review URL: https://codereview.chromium.org/1380723002

Cr-Commit-Position: refs/heads/master@{#31175}
parent bae875cc
......@@ -569,9 +569,6 @@ void MarkCompactCollector::EnsureSweepingCompleted() {
RefillFreeList(heap()->paged_space(OLD_SPACE));
RefillFreeList(heap()->paged_space(CODE_SPACE));
RefillFreeList(heap()->paged_space(MAP_SPACE));
heap()->paged_space(OLD_SPACE)->ResetUnsweptFreeBytes();
heap()->paged_space(CODE_SPACE)->ResetUnsweptFreeBytes();
heap()->paged_space(MAP_SPACE)->ResetUnsweptFreeBytes();
#ifdef VERIFY_HEAP
if (FLAG_verify_heap && !evacuation()) {
......@@ -606,9 +603,8 @@ void MarkCompactCollector::RefillFreeList(PagedSpace* space) {
return;
}
intptr_t freed_bytes = space->free_list()->Concatenate(free_list);
space->AddToAccountingStats(freed_bytes);
space->DecrementUnsweptFreeBytes(freed_bytes);
intptr_t added = space->free_list()->Concatenate(free_list);
space->accounting_stats_.IncreaseCapacity(added);
}
......@@ -3864,6 +3860,7 @@ void MarkCompactCollector::ReleaseEvacuationCandidates() {
space->Free(p->area_start(), p->area_size());
p->set_scan_on_scavenge(false);
p->ResetLiveBytes();
CHECK(p->WasSwept());
space->ReleasePage(p);
}
evacuation_candidates_.Rewind(0);
......@@ -4355,9 +4352,6 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n",
reinterpret_cast<intptr_t>(p));
}
// Adjust unswept free bytes because releasing a page expects said
// counter to be accurate for unswept pages.
space->IncreaseUnsweptFreeBytes(p);
space->ReleasePage(p);
continue;
}
......@@ -4391,7 +4385,8 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
reinterpret_cast<intptr_t>(p));
}
p->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingPending);
space->IncreaseUnsweptFreeBytes(p);
int to_sweep = p->area_size() - p->LiveBytes();
space->accounting_stats_.ShrinkSpace(to_sweep);
}
space->set_end_of_unswept_pages(p);
break;
......
......@@ -924,7 +924,7 @@ bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm,
void MemoryChunk::IncrementLiveBytesFromMutator(HeapObject* object, int by) {
MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by);
static_cast<PagedSpace*>(chunk->owner())->Allocate(by);
}
chunk->IncrementLiveBytes(by);
}
......@@ -954,7 +954,6 @@ PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
Executability executable)
: Space(heap, space, executable),
free_list_(this),
unswept_free_bytes_(0),
end_of_unswept_pages_(NULL) {
area_size_ = MemoryAllocator::PageAreaSize(space);
accounting_stats_.Clear();
......@@ -992,13 +991,13 @@ void PagedSpace::MoveOverFreeMemory(PagedSpace* other) {
// Move over the free list. Concatenate makes sure that the source free list
// gets properly reset after moving over all nodes.
intptr_t freed_bytes = free_list_.Concatenate(other->free_list());
intptr_t added = free_list_.Concatenate(other->free_list());
// Moved memory is not recorded as allocated memory, but rather increases and
// decreases capacity of the corresponding spaces. Used size and waste size
// are maintained by the receiving space upon allocating and freeing blocks.
other->accounting_stats_.DecreaseCapacity(freed_bytes);
accounting_stats_.IncreaseCapacity(freed_bytes);
other->accounting_stats_.DecreaseCapacity(added);
accounting_stats_.IncreaseCapacity(added);
}
......@@ -1142,8 +1141,6 @@ void PagedSpace::ReleasePage(Page* page) {
intptr_t size = free_list_.EvictFreeListItems(page);
accounting_stats_.AllocateBytes(size);
DCHECK_EQ(AreaSize(), static_cast<int>(size));
} else {
DecreaseUnsweptFreeBytes(page);
}
if (page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)) {
......@@ -2251,7 +2248,8 @@ FreeList::FreeList(PagedSpace* owner)
intptr_t FreeList::Concatenate(FreeList* other) {
intptr_t free_bytes = 0;
intptr_t usable_bytes = 0;
intptr_t wasted_bytes = 0;
// This is safe (not going to deadlock) since Concatenate operations
// are never performed on the same free lists at the same time in
......@@ -2260,17 +2258,18 @@ intptr_t FreeList::Concatenate(FreeList* other) {
if (!owner()->is_local()) mutex_.Lock();
if (!other->owner()->is_local()) other->mutex()->Lock();
wasted_bytes_ += other->wasted_bytes_;
wasted_bytes = other->wasted_bytes_;
wasted_bytes_ += wasted_bytes;
other->wasted_bytes_ = 0;
free_bytes += small_list_.Concatenate(other->small_list());
free_bytes += medium_list_.Concatenate(other->medium_list());
free_bytes += large_list_.Concatenate(other->large_list());
free_bytes += huge_list_.Concatenate(other->huge_list());
usable_bytes += small_list_.Concatenate(other->small_list());
usable_bytes += medium_list_.Concatenate(other->medium_list());
usable_bytes += large_list_.Concatenate(other->large_list());
usable_bytes += huge_list_.Concatenate(other->huge_list());
if (!other->owner()->is_local()) other->mutex()->Unlock();
if (!owner()->is_local()) mutex_.Unlock();
return free_bytes;
return usable_bytes + wasted_bytes;
}
......@@ -2555,20 +2554,13 @@ void PagedSpace::PrepareForMarkCompact() {
// on the first allocation after the sweep.
EmptyAllocationInfo();
// This counter will be increased for pages which will be swept by the
// sweeper threads.
unswept_free_bytes_ = 0;
// Clear the free list before a full GC---it will be rebuilt afterward.
free_list_.Reset();
}
intptr_t PagedSpace::SizeOfObjects() {
DCHECK(!FLAG_concurrent_sweeping ||
heap()->mark_compact_collector()->sweeping_in_progress() ||
(unswept_free_bytes_ == 0));
const intptr_t size = Size() - unswept_free_bytes_ - (limit() - top());
const intptr_t size = Size() - (limit() - top());
DCHECK_GE(size, 0);
USE(size);
return size;
......
......@@ -1530,7 +1530,7 @@ class AllocationStats BASE_EMBEDDED {
// Free allocated bytes, making them available (size -> available).
void DeallocateBytes(intptr_t size_in_bytes) {
size_ -= size_in_bytes;
DCHECK(size_ >= 0);
DCHECK_GE(size_, 0);
}
// Merge {other} into {this}.
......@@ -1545,6 +1545,7 @@ class AllocationStats BASE_EMBEDDED {
void DecreaseCapacity(intptr_t size_in_bytes) {
capacity_ -= size_in_bytes;
DCHECK_GE(capacity_, 0);
DCHECK_GE(capacity_, size_);
}
void IncreaseCapacity(intptr_t size_in_bytes) { capacity_ += size_in_bytes; }
......@@ -1659,6 +1660,10 @@ class FreeList {
public:
explicit FreeList(PagedSpace* owner);
// The method concatenates {other} into {this} and returns the added bytes,
// including waste.
//
// Can be used concurrently.
intptr_t Concatenate(FreeList* other);
// Clear the free list.
......@@ -1977,22 +1982,6 @@ class PagedSpace : public Space {
!p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && !p->WasSwept();
}
void IncrementUnsweptFreeBytes(intptr_t by) { unswept_free_bytes_ += by; }
void IncreaseUnsweptFreeBytes(Page* p) {
DCHECK(ShouldBeSweptBySweeperThreads(p));
unswept_free_bytes_ += (p->area_size() - p->LiveBytes());
}
void DecrementUnsweptFreeBytes(intptr_t by) { unswept_free_bytes_ -= by; }
void DecreaseUnsweptFreeBytes(Page* p) {
DCHECK(ShouldBeSweptBySweeperThreads(p));
unswept_free_bytes_ -= (p->area_size() - p->LiveBytes());
}
void ResetUnsweptFreeBytes() { unswept_free_bytes_ = 0; }
// This function tries to steal size_in_bytes memory from the sweeper threads
// free-lists. If it does not succeed stealing enough memory, it will wait
// for the sweeper threads to finish sweeping.
......@@ -2075,10 +2064,6 @@ class PagedSpace : public Space {
// Normal allocation information.
AllocationInfo allocation_info_;
// The number of free bytes which could be reclaimed by advancing the
// concurrent sweeper threads.
intptr_t unswept_free_bytes_;
// The sweeper threads iterate over the list of pointer and data space pages
// and sweep these pages concurrently. They will stop sweeping after the
// end_of_unswept_pages_ page.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment