Commit 7a6f47fe authored by mlippautz's avatar mlippautz Committed by Commit bot

[heap] Properly account for wasted bytes.

- Wasted bytes are now accounted where they accrue, i.e., the corresponding free
  list. The amount of waste is transferred by concatenating free lists.
- During concatenation, free lists are no longer locked on FreeListCategory
  level, but in the FreeList itself, simplifying the sync between contained nodes
  and wasted bytes (which are effectively dropped nodes).

This is pre-work for properly moving memory to compaction spaces, which requires
correct accounting of wasted memory.

BUG=chromium:524425
LOG=N

Review URL: https://codereview.chromium.org/1379833002

Cr-Commit-Position: refs/heads/master@{#31040}
parent 6b629ef7
......@@ -3553,7 +3553,6 @@ static intptr_t Free(PagedSpace* space, FreeList* free_list, Address start,
DCHECK(free_list == NULL);
return space->Free(start, size);
} else {
// TODO(hpayer): account for wasted bytes in concurrent sweeping too.
return size - free_list->Free(start, size);
}
}
......
......@@ -2066,12 +2066,6 @@ size_t NewSpace::CommittedPhysicalMemory() {
intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
intptr_t free_bytes = 0;
if (category->top() != NULL) {
// This is safe (not going to deadlock) since Concatenate operations
// are never performed on the same free lists at the same time in
// reverse order. Furthermore, we only lock if the PagedSpace containing
// the free list is know to be globally available, i.e., not local.
if (!this->owner()->owner()->is_local()) mutex()->Lock();
if (!category->owner()->owner()->is_local()) category->mutex()->Lock();
DCHECK(category->end_ != NULL);
free_bytes = category->available();
if (end_ == NULL) {
......@@ -2083,8 +2077,6 @@ intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
base::NoBarrier_Store(&top_, category->top_);
available_ += category->available();
category->Reset();
if (!category->owner()->owner()->is_local()) category->mutex()->Unlock();
if (!this->owner()->owner()->is_local()) mutex()->Unlock();
}
return free_bytes;
}
......@@ -2195,6 +2187,7 @@ void FreeListCategory::RepairFreeList(Heap* heap) {
FreeList::FreeList(PagedSpace* owner)
: owner_(owner),
heap_(owner->heap()),
wasted_bytes_(0),
small_list_(this),
medium_list_(this),
large_list_(this),
......@@ -2203,12 +2196,26 @@ FreeList::FreeList(PagedSpace* owner)
}
intptr_t FreeList::Concatenate(FreeList* free_list) {
intptr_t FreeList::Concatenate(FreeList* other) {
intptr_t free_bytes = 0;
free_bytes += small_list_.Concatenate(free_list->small_list());
free_bytes += medium_list_.Concatenate(free_list->medium_list());
free_bytes += large_list_.Concatenate(free_list->large_list());
free_bytes += huge_list_.Concatenate(free_list->huge_list());
// This is safe (not going to deadlock) since Concatenate operations
// are never performed on the same free lists at the same time in
// reverse order. Furthermore, we only lock if the PagedSpace containing
// the free list is know to be globally available, i.e., not local.
if (!owner()->is_local()) mutex_.Lock();
if (!other->owner()->is_local()) other->mutex()->Lock();
wasted_bytes_ += other->wasted_bytes_;
other->wasted_bytes_ = 0;
free_bytes += small_list_.Concatenate(other->small_list());
free_bytes += medium_list_.Concatenate(other->medium_list());
free_bytes += large_list_.Concatenate(other->large_list());
free_bytes += huge_list_.Concatenate(other->huge_list());
if (!other->owner()->is_local()) other->mutex()->Unlock();
if (!owner()->is_local()) mutex_.Unlock();
return free_bytes;
}
......@@ -2218,6 +2225,7 @@ void FreeList::Reset() {
medium_list_.Reset();
large_list_.Reset();
huge_list_.Reset();
ResetStats();
}
......@@ -2231,6 +2239,7 @@ int FreeList::Free(Address start, int size_in_bytes) {
// Early return to drop too-small blocks on the floor.
if (size_in_bytes <= kSmallListMin) {
page->add_non_available_small_blocks(size_in_bytes);
wasted_bytes_ += size_in_bytes;
return size_in_bytes;
}
......
......@@ -1420,19 +1420,11 @@ class AllocationInfo {
// An abstraction of the accounting statistics of a page-structured space.
// The 'capacity' of a space is the number of object-area bytes (i.e., not
// including page bookkeeping structures) currently in the space. The 'size'
// of a space is the number of allocated bytes, the 'waste' in the space is
// the number of bytes that are not allocated and not available to
// allocation without reorganizing the space via a GC (e.g. small blocks due
// to internal fragmentation, top of page areas in map space), and the bytes
// 'available' is the number of unallocated bytes that are not waste. The
// capacity is the sum of size, waste, and available.
//
// The stats are only set by functions that ensure they stay balanced. These
// functions increase or decrease one of the non-capacity stats in
// conjunction with capacity, or else they always balance increases and
// decreases to the non-capacity stats.
// functions increase or decrease one of the non-capacity stats in conjunction
// with capacity, or else they always balance increases and decreases to the
// non-capacity stats.
class AllocationStats BASE_EMBEDDED {
public:
AllocationStats() { Clear(); }
......@@ -1442,26 +1434,20 @@ class AllocationStats BASE_EMBEDDED {
capacity_ = 0;
max_capacity_ = 0;
size_ = 0;
waste_ = 0;
}
void ClearSizeWaste() {
size_ = capacity_;
waste_ = 0;
}
void ClearSize() { size_ = capacity_; }
// Reset the allocation statistics (i.e., available = capacity with no
// wasted or allocated bytes).
// Reset the allocation statistics (i.e., available = capacity with no wasted
// or allocated bytes).
void Reset() {
size_ = 0;
waste_ = 0;
}
// Accessors for the allocation statistics.
intptr_t Capacity() { return capacity_; }
intptr_t MaxCapacity() { return max_capacity_; }
intptr_t Size() { return size_; }
intptr_t Waste() { return waste_; }
// Grow the space by adding available bytes. They are initially marked as
// being in use (part of the size), but will normally be immediately freed,
......@@ -1496,17 +1482,10 @@ class AllocationStats BASE_EMBEDDED {
DCHECK(size_ >= 0);
}
// Waste free bytes (available -> waste).
void WasteBytes(int size_in_bytes) {
DCHECK(size_in_bytes >= 0);
waste_ += size_in_bytes;
}
// Merge {other} into {this}.
void Merge(const AllocationStats& other) {
capacity_ += other.capacity_;
size_ += other.size_;
waste_ += other.waste_;
if (other.max_capacity_ > max_capacity_) {
max_capacity_ = other.max_capacity_;
}
......@@ -1520,10 +1499,15 @@ class AllocationStats BASE_EMBEDDED {
void IncreaseCapacity(intptr_t size_in_bytes) { capacity_ += size_in_bytes; }
private:
// |capacity_|: The number of object-area bytes (i.e., not including page
// bookkeeping structures) currently in the space.
intptr_t capacity_;
// |max_capacity_|: The maximum capacity ever observed.
intptr_t max_capacity_;
// |size_|: The number of allocated bytes.
intptr_t size_;
intptr_t waste_;
};
......@@ -1566,8 +1550,6 @@ class FreeListCategory {
int available() const { return available_; }
void set_available(int available) { available_ = available; }
base::Mutex* mutex() { return &mutex_; }
bool IsEmpty() { return top() == 0; }
#ifdef DEBUG
......@@ -1581,8 +1563,6 @@ class FreeListCategory {
// top_ points to the top FreeSpace* in the free list category.
base::AtomicWord top_;
FreeSpace* end_;
base::Mutex mutex_;
// Total available bytes in all blocks of this free list category.
int available_;
......@@ -1617,11 +1597,13 @@ class FreeList {
public:
explicit FreeList(PagedSpace* owner);
intptr_t Concatenate(FreeList* free_list);
intptr_t Concatenate(FreeList* other);
// Clear the free list.
void Reset();
void ResetStats() { wasted_bytes_ = 0; }
// Return the number of bytes available on the free list.
intptr_t available() {
return small_list_.available() + medium_list_.available() +
......@@ -1652,9 +1634,8 @@ class FreeList {
}
// Allocate a block of size 'size_in_bytes' from the free list. The block
// is unitialized. A failure is returned if no block is available. The
// number of bytes lost to fragmentation is returned in the output parameter
// 'wasted_bytes'. The size should be a non-zero multiple of the word size.
// is unitialized. A failure is returned if no block is available.
// The size should be a non-zero multiple of the word size.
MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes);
bool IsEmpty() {
......@@ -1680,6 +1661,8 @@ class FreeList {
FreeListCategory* huge_list() { return &huge_list_; }
PagedSpace* owner() { return owner_; }
intptr_t wasted_bytes() { return wasted_bytes_; }
base::Mutex* mutex() { return &mutex_; }
private:
// The size range of blocks, in bytes.
......@@ -1698,6 +1681,8 @@ class FreeList {
PagedSpace* owner_;
Heap* heap_;
base::Mutex mutex_;
intptr_t wasted_bytes_;
FreeListCategory small_list_;
FreeListCategory medium_list_;
FreeListCategory large_list_;
......@@ -1808,7 +1793,8 @@ class PagedSpace : public Space {
// discovered during the sweeping they are subtracted from the size and added
// to the available and wasted totals.
void ClearStats() {
accounting_stats_.ClearSizeWaste();
accounting_stats_.ClearSize();
free_list_.ResetStats();
ResetFreeListStatistics();
}
......@@ -1834,9 +1820,8 @@ class PagedSpace : public Space {
intptr_t SizeOfObjects() override;
// Wasted bytes in this space. These are just the bytes that were thrown away
// due to being too small to use for allocation. They do not include the
// free bytes that were not found at all due to lazy sweeping.
virtual intptr_t Waste() { return accounting_stats_.Waste(); }
// due to being too small to use for allocation.
virtual intptr_t Waste() { return free_list_.wasted_bytes(); }
// Returns the allocation pointer in this space.
Address top() { return allocation_info_.top(); }
......@@ -1875,7 +1860,6 @@ class PagedSpace : public Space {
int Free(Address start, int size_in_bytes) {
int wasted = free_list_.Free(start, size_in_bytes);
accounting_stats_.DeallocateBytes(size_in_bytes);
accounting_stats_.WasteBytes(wasted);
return size_in_bytes - wasted;
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment