Revert "On-the-fly bookkeeping of PagedSpace memory kept in free-lists."

This reverts r14197 because of tes failures accross the board.

R=svenpanne@chromium.org

Review URL: https://codereview.chromium.org/13958004

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@14201 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent cba3446d
......@@ -672,8 +672,8 @@ static int FreeListFragmentation(PagedSpace* space, Page* p) {
return 0;
}
PagedSpace::SizeStats sizes;
space->ObtainFreeListStatistics(p, &sizes);
FreeList::SizeStats sizes;
space->CountFreeListItems(p, &sizes);
intptr_t ratio;
intptr_t ratio_threshold;
......@@ -812,8 +812,8 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
if (!p->WasSwept()) {
free_bytes = (p->area_size() - p->LiveBytes());
} else {
PagedSpace::SizeStats sizes;
space->ObtainFreeListStatistics(p, &sizes);
FreeList::SizeStats sizes;
space->CountFreeListItems(p, &sizes);
free_bytes = sizes.Total();
}
......
......@@ -697,15 +697,6 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
}
void Page::ResetFreeListStatistics() {
non_available_small_blocks_ = 0;
available_in_small_free_list_ = 0;
available_in_medium_free_list_ = 0;
available_in_large_free_list_ = 0;
available_in_huge_free_list_ = 0;
}
Page* MemoryAllocator::AllocatePage(intptr_t size,
PagedSpace* owner,
Executability executable) {
......@@ -1066,23 +1057,6 @@ int PagedSpace::CountTotalPages() {
}
void PagedSpace::ObtainFreeListStatistics(Page* page, SizeStats* sizes) {
sizes->huge_size_ = page->available_in_huge_free_list();
sizes->small_size_ = page->available_in_small_free_list();
sizes->medium_size_ = page->available_in_medium_free_list();
sizes->large_size_ = page->available_in_large_free_list();
}
void PagedSpace::ResetFreeListStatistics() {
PageIterator page_iterator(this);
while (page_iterator.has_next()) {
Page* page = page_iterator.next();
page->ResetFreeListStatistics();
}
}
void PagedSpace::ReleasePage(Page* page, bool unlink) {
ASSERT(page->LiveBytes() == 0);
ASSERT(AreaSize() == page->area_size());
......@@ -2082,6 +2056,20 @@ void FreeListCategory::Reset() {
}
intptr_t FreeListCategory::CountFreeListItemsInList(Page* p) {
int sum = 0;
FreeListNode* n = top_;
while (n != NULL) {
if (Page::FromAddress(n->address()) == p) {
FreeSpace* free_space = reinterpret_cast<FreeSpace*>(n);
sum += free_space->Size();
}
n = n->next();
}
return sum;
}
intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
int sum = 0;
FreeListNode** n = &top_;
......@@ -2182,28 +2170,20 @@ int FreeList::Free(Address start, int size_in_bytes) {
FreeListNode* node = FreeListNode::FromAddress(start);
node->set_size(heap_, size_in_bytes);
Page* page = Page::FromAddress(start);
// Early return to drop too-small blocks on the floor.
if (size_in_bytes < kSmallListMin) {
page->add_non_available_small_blocks(size_in_bytes);
return size_in_bytes;
}
if (size_in_bytes < kSmallListMin) return size_in_bytes;
// Insert other blocks at the head of a free list of the appropriate
// magnitude.
if (size_in_bytes <= kSmallListMax) {
small_list_.Free(node, size_in_bytes);
page->add_available_in_small_free_list(size_in_bytes);
} else if (size_in_bytes <= kMediumListMax) {
medium_list_.Free(node, size_in_bytes);
page->add_available_in_medium_free_list(size_in_bytes);
} else if (size_in_bytes <= kLargeListMax) {
large_list_.Free(node, size_in_bytes);
page->add_available_in_large_free_list(size_in_bytes);
} else {
huge_list_.Free(node, size_in_bytes);
page->add_available_in_huge_free_list(size_in_bytes);
}
ASSERT(IsVeryLong() || available() == SumFreeLists());
......@@ -2213,33 +2193,20 @@ int FreeList::Free(Address start, int size_in_bytes) {
FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
FreeListNode* node = NULL;
Page* page = NULL;
if (size_in_bytes <= kSmallAllocationMax) {
node = small_list_.PickNodeFromList(node_size);
if (node != NULL) {
page = Page::FromAddress(node->address());
page->add_available_in_small_free_list(-(*node_size));
return node;
}
if (node != NULL) return node;
}
if (size_in_bytes <= kMediumAllocationMax) {
node = medium_list_.PickNodeFromList(node_size);
if (node != NULL) {
page = Page::FromAddress(node->address());
page->add_available_in_medium_free_list(-(*node_size));
return node;
}
if (node != NULL) return node;
}
if (size_in_bytes <= kLargeAllocationMax) {
node = large_list_.PickNodeFromList(node_size);
if (node != NULL) {
page = Page::FromAddress(node->address());
page->add_available_in_large_free_list(-(*node_size));
return node;
}
if (node != NULL) return node;
}
int huge_list_available = huge_list_.available();
......@@ -2249,10 +2216,7 @@ FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
FreeListNode* cur_node = *cur;
while (cur_node != NULL &&
Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
int size = reinterpret_cast<FreeSpace*>(cur_node)->Size();
huge_list_available -= size;
page = Page::FromAddress(node->address());
page->add_available_in_huge_free_list(-size);
huge_list_available -= reinterpret_cast<FreeSpace*>(cur_node)->Size();
cur_node = cur_node->next();
}
......@@ -2271,8 +2235,6 @@ FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
*cur = node->next();
*node_size = size;
huge_list_available -= size;
page = Page::FromAddress(node->address());
page->add_available_in_huge_free_list(-size);
break;
}
}
......@@ -2360,17 +2322,27 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
}
void FreeList::CountFreeListItems(Page* p, SizeStats* sizes) {
sizes->huge_size_ = huge_list_.CountFreeListItemsInList(p);
if (sizes->huge_size_ < p->area_size()) {
sizes->small_size_ = small_list_.CountFreeListItemsInList(p);
sizes->medium_size_ = medium_list_.CountFreeListItemsInList(p);
sizes->large_size_ = large_list_.CountFreeListItemsInList(p);
} else {
sizes->small_size_ = 0;
sizes->medium_size_ = 0;
sizes->large_size_ = 0;
}
}
intptr_t FreeList::EvictFreeListItems(Page* p) {
intptr_t sum = huge_list_.EvictFreeListItemsInList(p);
p->set_available_in_huge_free_list(0);
if (sum < p->area_size()) {
sum += small_list_.EvictFreeListItemsInList(p) +
medium_list_.EvictFreeListItemsInList(p) +
large_list_.EvictFreeListItemsInList(p);
p->set_available_in_small_free_list(0);
p->set_available_in_medium_free_list(0);
p->set_available_in_large_free_list(0);
}
return sum;
......
......@@ -547,8 +547,7 @@ class MemoryChunk {
kSlotsBufferOffset + kPointerSize + kPointerSize;
static const size_t kHeaderSize = kWriteBarrierCounterOffset + kPointerSize +
kIntSize + kIntSize + kPointerSize +
5 * kPointerSize;
kIntSize + kIntSize + kPointerSize;
static const int kBodyOffset =
CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
......@@ -702,13 +701,6 @@ class MemoryChunk {
intptr_t parallel_sweeping_;
// PagedSpace free-list statistics.
intptr_t available_in_small_free_list_;
intptr_t available_in_medium_free_list_;
intptr_t available_in_large_free_list_;
intptr_t available_in_huge_free_list_;
intptr_t non_available_small_blocks_;
static MemoryChunk* Initialize(Heap* heap,
Address base,
size_t size,
......@@ -805,21 +797,6 @@ class Page : public MemoryChunk {
void ClearSweptPrecisely() { ClearFlag(WAS_SWEPT_PRECISELY); }
void ClearSweptConservatively() { ClearFlag(WAS_SWEPT_CONSERVATIVELY); }
void ResetFreeListStatistics();
#define FRAGMENTATION_STATS_ACCESSORS(type, name) \
type name() { return name##_; } \
void set_##name(type name) { name##_ = name; } \
void add_##name(type name) { name##_ += name; }
FRAGMENTATION_STATS_ACCESSORS(intptr_t, non_available_small_blocks)
FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_small_free_list)
FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_medium_free_list)
FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_large_free_list)
FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_huge_free_list)
#undef FRAGMENTATION_STATS_ACCESSORS
#ifdef DEBUG
void Print();
#endif // DEBUG
......@@ -1455,6 +1432,8 @@ class FreeListCategory {
FreeListNode* PickNodeFromList(int *node_size);
intptr_t CountFreeListItemsInList(Page* p);
intptr_t EvictFreeListItemsInList(Page* p);
void RepairFreeList(Heap* heap);
......@@ -1549,6 +1528,19 @@ class FreeList BASE_EMBEDDED {
// Used after booting the VM.
void RepairLists(Heap* heap);
struct SizeStats {
intptr_t Total() {
return small_size_ + medium_size_ + large_size_ + huge_size_;
}
intptr_t small_size_;
intptr_t medium_size_;
intptr_t large_size_;
intptr_t huge_size_;
};
void CountFreeListItems(Page* p, SizeStats* sizes);
intptr_t EvictFreeListItems(Page* p);
FreeListCategory* small_list() { return &small_list_; }
......@@ -1633,20 +1625,6 @@ class PagedSpace : public Space {
// Approximate amount of physical memory committed for this space.
size_t CommittedPhysicalMemory();
struct SizeStats {
intptr_t Total() {
return small_size_ + medium_size_ + large_size_ + huge_size_;
}
intptr_t small_size_;
intptr_t medium_size_;
intptr_t large_size_;
intptr_t huge_size_;
};
void ObtainFreeListStatistics(Page* p, SizeStats* sizes);
void ResetFreeListStatistics();
// Sets the capacity, the available space and the wasted space to zero.
// The stats are rebuilt during sweeping by adding each page to the
// capacity and the size when it is encountered. As free spaces are
......@@ -1654,7 +1632,6 @@ class PagedSpace : public Space {
// to the available and wasted totals.
void ClearStats() {
accounting_stats_.ClearSizeWaste();
ResetFreeListStatistics();
}
// Increases the number of available bytes of that space.
......@@ -1808,6 +1785,10 @@ class PagedSpace : public Space {
Page* FirstPage() { return anchor_.next_page(); }
Page* LastPage() { return anchor_.prev_page(); }
void CountFreeListItems(Page* p, FreeList::SizeStats* sizes) {
free_list_.CountFreeListItems(p, sizes);
}
void EvictEvacuationCandidatesFromFreeLists();
bool CanExpand();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment