Commit dfc6b4dd authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

[heap] New live byte tracking.

This patch changes how space size and capacity are updated in GC:
- space capacity changes only when a page added/removed from the space.
- space size is reset to zero before sweeping and incremented by
  page->live_bytes_count_ for each to-be-swept page.
- space size is refined after sweeping using the accurate
  page->allocated_bytes counter produces by the sweeper.

Invariants:
1. space.capacity = sum [page.size | for page in space].
2. After marking, before sweeping:
   a) space.size = sum [page.live_bytes_count | for page in space].
3. After sweeping, before marking ends:
   a) space.size = sum [page.allocated_bytes | for page in space].
   b) page.allocated_bytes >= (sum [object.size | for object in page] +
         page.linear_allocation_area).
   c) page.area_size = (page.allocated_bytes + page.wasted_memory +
         sum [free_list_entry.size | for free_list_entry in page].

3.b becomes equality if the mutator is not doing array trimming,
object slack tracking during sweeping.

Bug: chromium:694255
Change-Id: Ic8d16a8171187a113fee2df8bf3c2a4c5e77bc08
Reviewed-on: https://chromium-review.googlesource.com/618889
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#47409}
parent 5c741cbd
......@@ -5257,6 +5257,23 @@ void Heap::VerifyRememberedSetFor(HeapObject* object) {
}
#endif
#ifdef DEBUG
void Heap::VerifyCountersAfterSweeping() {
PagedSpaces spaces(this);
for (PagedSpace* space = spaces.next(); space != nullptr;
space = spaces.next()) {
space->VerifyCountersAfterSweeping();
}
}
void Heap::VerifyCountersBeforeConcurrentSweeping() {
PagedSpaces spaces(this);
for (PagedSpace* space = spaces.next(); space != nullptr;
space = spaces.next()) {
space->VerifyCountersBeforeConcurrentSweeping();
}
}
#endif
void Heap::ZapFromSpace() {
if (!new_space_->IsFromSpaceCommitted()) return;
......
......@@ -1496,6 +1496,9 @@ class Heap {
#endif
#ifdef DEBUG
void VerifyCountersAfterSweeping();
void VerifyCountersBeforeConcurrentSweeping();
void set_allocation_timeout(int timeout) { allocation_timeout_ = timeout; }
void Print();
......
......@@ -1044,6 +1044,9 @@ void IncrementalMarking::FinalizeSweeping() {
heap_->mark_compact_collector()->EnsureSweepingCompleted();
}
if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
#ifdef DEBUG
heap_->VerifyCountersAfterSweeping();
#endif
StartMarking();
}
}
......
......@@ -863,7 +863,7 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
CHECK_NULL(p->typed_slot_set<OLD_TO_OLD>());
CHECK(p->SweepingDone());
DCHECK(p->area_size() == area_size);
pages.push_back(std::make_pair(p->LiveBytesFromFreeList(), p));
pages.push_back(std::make_pair(p->allocated_bytes(), p));
}
int candidate_count = 0;
......@@ -1044,6 +1044,10 @@ void MarkCompactCollector::Prepare() {
void MarkCompactCollector::Finish() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH);
#ifdef DEBUG
heap()->VerifyCountersBeforeConcurrentSweeping();
#endif
if (!heap()->delay_sweeper_tasks_for_testing_) {
sweeper().StartSweeperTasks();
}
......@@ -3703,10 +3707,15 @@ int MarkCompactCollector::Sweeper::RawSweep(
skip_list->Clear();
}
intptr_t live_bytes = 0;
intptr_t freed_bytes = 0;
intptr_t max_freed_bytes = 0;
int curr_region = -1;
// Set the allocated_bytes counter to area_size. The free operations below
// will decrease the counter to actual live bytes.
p->ResetAllocatedBytes();
for (auto object_and_size :
LiveObjectRange<kBlackObjects>(p, marking_state_->bitmap(p))) {
HeapObject* const object = object_and_size.first;
......@@ -3738,6 +3747,7 @@ int MarkCompactCollector::Sweeper::RawSweep(
}
Map* map = object->synchronized_map();
int size = object->SizeFromMap(map);
live_bytes += size;
if (rebuild_skip_list) {
int new_region_start = SkipList::RegionNumber(free_end);
int new_region_end =
......@@ -3788,9 +3798,18 @@ int MarkCompactCollector::Sweeper::RawSweep(
}
}
// Clear the mark bits of that page and reset live bytes count.
marking_state_->ClearLiveness(p);
marking_state_->bitmap(p)->Clear();
if (free_list_mode == IGNORE_FREE_LIST) {
marking_state_->SetLiveBytes(p, 0);
// We did not free memory, so have to adjust allocated bytes here.
intptr_t freed_bytes = p->area_size() - live_bytes;
p->DecreaseAllocatedBytes(freed_bytes);
} else {
// Keep the old live bytes counter of the page until RefillFreeList, where
// the space size is refined.
// The allocated_bytes() counter is precisely the total size of objects.
DCHECK_EQ(live_bytes, p->allocated_bytes());
}
p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
if (free_list_mode == IGNORE_FREE_LIST) return 0;
return static_cast<int>(FreeList::GuaranteedAllocatable(max_freed_bytes));
......@@ -4539,9 +4558,10 @@ void MarkCompactCollector::Sweeper::PrepareToBeSweptPage(AllocationSpace space,
page->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
DCHECK_GE(page->area_size(),
static_cast<size_t>(marking_state_->live_bytes(page)));
size_t to_sweep = page->area_size() - marking_state_->live_bytes(page);
if (space != NEW_SPACE)
heap_->paged_space(space)->accounting_stats_.ShrinkSpace(to_sweep);
if (space != NEW_SPACE) {
heap_->paged_space(space)->IncreaseAllocatedBytes(
marking_state_->live_bytes(page), page);
}
}
Page* MarkCompactCollector::Sweeper::GetSweepingPageSafe(
......@@ -4582,6 +4602,7 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
Heap::ShouldZapGarbage()
? FreeSpaceTreatmentMode::ZAP_FREE_SPACE
: FreeSpaceTreatmentMode::IGNORE_FREE_SPACE);
space->IncreaseAllocatedBytes(p->allocated_bytes(), p);
continue;
}
......
......@@ -168,7 +168,8 @@ intptr_t PagedSpace::RelinkFreeListCategories(Page* page) {
added += category->available();
category->Relink();
});
DCHECK_EQ(page->AvailableInFreeList(), page->available_in_free_list());
DCHECK_EQ(page->AvailableInFreeList(),
page->AvailableInFreeListFromAllocatedBytes());
return added;
}
......
This diff is collapsed.
......@@ -184,7 +184,7 @@ class FreeListCategory {
// category is currently unlinked.
void Relink();
bool Free(FreeSpace* node, size_t size_in_bytes, FreeMode mode);
void Free(FreeSpace* node, size_t size_in_bytes, FreeMode mode);
// Picks a node from the list and stores its size in |node_size|. Returns
// nullptr if the category is empty.
......@@ -677,8 +677,10 @@ class MemoryChunk {
base::AtomicValue<ConcurrentSweepingState> concurrent_sweeping_;
// PagedSpace free-list statistics.
base::AtomicNumber<intptr_t> available_in_free_list_;
// Byte allocated on the page, which includes all objects on the page
// and the linear allocation area.
base::AtomicNumber<intptr_t> allocated_bytes_;
// Freed memory that was not added to the free list.
base::AtomicNumber<intptr_t> wasted_memory_;
// next_chunk_ holds a pointer of type MemoryChunk
......@@ -704,6 +706,7 @@ class MemoryChunk {
friend class MemoryChunkValidator;
friend class MinorMarkingState;
friend class MinorNonAtomicMarkingState;
friend class PagedSpace;
};
static_assert(kMaxRegularHeapObjectSize <= MemoryChunk::kAllocatableMemory,
......@@ -801,9 +804,9 @@ class Page : public MemoryChunk {
size_t AvailableInFreeList();
size_t LiveBytesFromFreeList() {
DCHECK_GE(area_size(), wasted_memory() + available_in_free_list());
return area_size() - wasted_memory() - available_in_free_list();
size_t AvailableInFreeListFromAllocatedBytes() {
DCHECK_GE(area_size(), wasted_memory() + allocated_bytes());
return area_size() - wasted_memory() - allocated_bytes();
}
FreeListCategory* free_list_category(FreeListCategoryType type) {
......@@ -814,17 +817,19 @@ class Page : public MemoryChunk {
size_t wasted_memory() { return wasted_memory_.Value(); }
void add_wasted_memory(size_t waste) { wasted_memory_.Increment(waste); }
size_t available_in_free_list() { return available_in_free_list_.Value(); }
void add_available_in_free_list(size_t available) {
DCHECK_LE(available, area_size());
available_in_free_list_.Increment(available);
size_t allocated_bytes() { return allocated_bytes_.Value(); }
void IncreaseAllocatedBytes(size_t bytes) {
DCHECK_LE(bytes, area_size());
allocated_bytes_.Increment(bytes);
}
void remove_available_in_free_list(size_t available) {
DCHECK_LE(available, area_size());
DCHECK_GE(available_in_free_list(), available);
available_in_free_list_.Decrement(available);
void DecreaseAllocatedBytes(size_t bytes) {
DCHECK_LE(bytes, area_size());
DCHECK_GE(allocated_bytes(), bytes);
allocated_bytes_.Decrement(bytes);
}
void ResetAllocatedBytes();
size_t ShrinkToHighWaterMark();
V8_EXPORT_PRIVATE void CreateBlackArea(Address start, Address end);
......@@ -1611,47 +1616,39 @@ class AllocationStats BASE_EMBEDDED {
void Clear() {
capacity_ = 0;
max_capacity_ = 0;
size_ = 0;
ClearSize();
}
void ClearSize() { size_ = capacity_; }
void ClearSize() {
size_ = 0;
#ifdef DEBUG
allocated_on_page_.clear();
#endif
}
// Accessors for the allocation statistics.
size_t Capacity() { return capacity_; }
size_t MaxCapacity() { return max_capacity_; }
size_t Size() { return size_; }
#ifdef DEBUG
size_t AllocatedOnPage(Page* page) { return allocated_on_page_[page]; }
#endif
// Grow the space by adding available bytes. They are initially marked as
// being in use (part of the size), but will normally be immediately freed,
// putting them on the free list and removing them from size_.
void ExpandSpace(size_t bytes) {
DCHECK_GE(size_ + bytes, size_);
DCHECK_GE(capacity_ + bytes, capacity_);
capacity_ += bytes;
size_ += bytes;
if (capacity_ > max_capacity_) {
max_capacity_ = capacity_;
}
}
// Shrink the space by removing available bytes. Since shrinking is done
// during sweeping, bytes have been marked as being in use (part of the size)
// and are hereby freed.
void ShrinkSpace(size_t bytes) {
DCHECK_GE(capacity_, bytes);
DCHECK_GE(size_, bytes);
capacity_ -= bytes;
size_ -= bytes;
}
void AllocateBytes(size_t bytes) {
void IncreaseAllocatedBytes(size_t bytes, Page* page) {
DCHECK_GE(size_ + bytes, size_);
size_ += bytes;
#ifdef DEBUG
allocated_on_page_[page] += bytes;
#endif
}
void DeallocateBytes(size_t bytes) {
void DecreaseAllocatedBytes(size_t bytes, Page* page) {
DCHECK_GE(size_, bytes);
size_ -= bytes;
#ifdef DEBUG
DCHECK_GE(allocated_on_page_[page], bytes);
allocated_on_page_[page] -= bytes;
#endif
}
void DecreaseCapacity(size_t bytes) {
......@@ -1663,16 +1660,8 @@ class AllocationStats BASE_EMBEDDED {
void IncreaseCapacity(size_t bytes) {
DCHECK_GE(capacity_ + bytes, capacity_);
capacity_ += bytes;
}
// Merge |other| into |this|.
void Merge(const AllocationStats& other) {
DCHECK_GE(capacity_ + other.capacity_, capacity_);
DCHECK_GE(size_ + other.size_, size_);
capacity_ += other.capacity_;
size_ += other.size_;
if (other.max_capacity_ > max_capacity_) {
max_capacity_ = other.max_capacity_;
if (capacity_ > max_capacity_) {
max_capacity_ = capacity_;
}
}
......@@ -1686,6 +1675,10 @@ class AllocationStats BASE_EMBEDDED {
// |size_|: The number of allocated bytes.
size_t size_;
#ifdef DEBUG
std::unordered_map<Page*, size_t, Page::Hasher> allocated_on_page_;
#endif
};
// A free list maintaining free blocks of memory. The free list is organized in
......@@ -2062,7 +2055,8 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
// no attempt to add area to free list is made.
size_t Free(Address start, size_t size_in_bytes) {
size_t wasted = free_list_.Free(start, size_in_bytes, kLinkCategory);
accounting_stats_.DeallocateBytes(size_in_bytes);
Page* page = Page::FromAddress(start);
accounting_stats_.DecreaseAllocatedBytes(size_in_bytes, page);
DCHECK_GE(size_in_bytes, wasted);
return size_in_bytes - wasted;
}
......@@ -2093,15 +2087,26 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
void MarkAllocationInfoBlack();
void UnmarkAllocationInfo();
void AccountAllocatedBytes(size_t bytes) {
accounting_stats_.AllocateBytes(bytes);
void DecreaseAllocatedBytes(size_t bytes, Page* page) {
accounting_stats_.DecreaseAllocatedBytes(bytes, page);
}
void IncreaseAllocatedBytes(size_t bytes, Page* page) {
accounting_stats_.IncreaseAllocatedBytes(bytes, page);
}
void DecreaseCapacity(size_t bytes) {
accounting_stats_.DecreaseCapacity(bytes);
}
void IncreaseCapacity(size_t bytes) {
accounting_stats_.IncreaseCapacity(bytes);
}
void IncreaseCapacity(size_t bytes);
// Releases an unused page and shrinks the space.
void ReleasePage(Page* page);
void AccountAddedPage(Page* page);
void AccountRemovedPage(Page* page);
void RefineAllocatedBytesAfterSweeping(Page* page);
// The dummy page that anchors the linked list of pages.
Page* anchor() { return &anchor_; }
......@@ -2116,6 +2121,8 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
#endif
#ifdef DEBUG
void VerifyCountersAfterSweeping();
void VerifyCountersBeforeConcurrentSweeping();
// Print meta info and objects in this space.
void Print() override;
......@@ -2162,6 +2169,8 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
// using the high water mark.
void ShrinkImmortalImmovablePages();
size_t ShrinkPageToHighWaterMark(Page* page);
std::unique_ptr<ObjectIterator> GetObjectIterator() override;
// Remove a page if it has at least |size_in_bytes| bytes available that can
......
......@@ -176,7 +176,6 @@ void SimulateFullSpace(v8::internal::PagedSpace* space) {
}
space->EmptyAllocationInfo();
space->ResetFreeList();
space->ClearStats();
}
void AbandonCurrentlyFreeMemory(PagedSpace* space) {
......
......@@ -609,13 +609,14 @@ TEST(ShrinkPageToHighWaterMarkFreeSpaceEnd) {
Page* page = Page::FromAddress(array->address());
// Reset space so high water mark is consistent.
CcTest::heap()->old_space()->ResetFreeList();
CcTest::heap()->old_space()->EmptyAllocationInfo();
PagedSpace* old_space = CcTest::heap()->old_space();
old_space->ResetFreeList();
old_space->EmptyAllocationInfo();
HeapObject* filler =
HeapObject::FromAddress(array->address() + array->Size());
CHECK(filler->IsFreeSpace());
size_t shrunk = page->ShrinkToHighWaterMark();
size_t shrunk = old_space->ShrinkPageToHighWaterMark(page);
size_t should_have_shrunk =
RoundDown(static_cast<size_t>(Page::kAllocatableMemory - array->Size()),
base::OS::CommitPageSize());
......@@ -636,10 +637,11 @@ TEST(ShrinkPageToHighWaterMarkNoFiller) {
CHECK_EQ(page->area_end(), array->address() + array->Size() + kFillerSize);
// Reset space so high water mark and fillers are consistent.
CcTest::heap()->old_space()->ResetFreeList();
CcTest::heap()->old_space()->EmptyAllocationInfo();
PagedSpace* old_space = CcTest::heap()->old_space();
old_space->ResetFreeList();
old_space->EmptyAllocationInfo();
const size_t shrunk = page->ShrinkToHighWaterMark();
size_t shrunk = old_space->ShrinkPageToHighWaterMark(page);
CHECK_EQ(0u, shrunk);
}
......@@ -658,14 +660,15 @@ TEST(ShrinkPageToHighWaterMarkOneWordFiller) {
CHECK_EQ(page->area_end(), array->address() + array->Size() + kFillerSize);
// Reset space so high water mark and fillers are consistent.
CcTest::heap()->old_space()->ResetFreeList();
CcTest::heap()->old_space()->EmptyAllocationInfo();
PagedSpace* old_space = CcTest::heap()->old_space();
old_space->ResetFreeList();
old_space->EmptyAllocationInfo();
HeapObject* filler =
HeapObject::FromAddress(array->address() + array->Size());
CHECK_EQ(filler->map(), CcTest::heap()->one_pointer_filler_map());
const size_t shrunk = page->ShrinkToHighWaterMark();
size_t shrunk = old_space->ShrinkPageToHighWaterMark(page);
CHECK_EQ(0u, shrunk);
}
......@@ -684,14 +687,15 @@ TEST(ShrinkPageToHighWaterMarkTwoWordFiller) {
CHECK_EQ(page->area_end(), array->address() + array->Size() + kFillerSize);
// Reset space so high water mark and fillers are consistent.
CcTest::heap()->old_space()->ResetFreeList();
CcTest::heap()->old_space()->EmptyAllocationInfo();
PagedSpace* old_space = CcTest::heap()->old_space();
old_space->ResetFreeList();
old_space->EmptyAllocationInfo();
HeapObject* filler =
HeapObject::FromAddress(array->address() + array->Size());
CHECK_EQ(filler->map(), CcTest::heap()->two_pointer_filler_map());
const size_t shrunk = page->ShrinkToHighWaterMark();
size_t shrunk = old_space->ShrinkPageToHighWaterMark(page);
CHECK_EQ(0u, shrunk);
}
......
......@@ -53,6 +53,7 @@ TEST_F(SequentialUnmapperTest, UnmapOnTeardownAfterAlreadyFreeingPooled) {
static_cast<PagedSpace*>(heap()->old_space()),
Executability::NOT_EXECUTABLE);
heap()->old_space()->UnlinkFreeListCategories(page);
heap()->old_space()->AccountRemovedPage(page);
EXPECT_NE(nullptr, page);
const int page_size = getpagesize();
void* start_address = static_cast<void*>(page->address());
......@@ -72,6 +73,7 @@ TEST_F(SequentialUnmapperTest, UnmapOnTeardown) {
static_cast<PagedSpace*>(heap()->old_space()),
Executability::NOT_EXECUTABLE);
heap()->old_space()->UnlinkFreeListCategories(page);
heap()->old_space()->AccountRemovedPage(page);
EXPECT_NE(nullptr, page);
const int page_size = getpagesize();
void* start_address = static_cast<void*>(page->address());
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment