Commit e819dd28 authored by hpayer@chromium.org's avatar hpayer@chromium.org

Shrinking of PagedSpace is done only by sweeper.

BUG=

Review URL: https://codereview.chromium.org/12313131

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@13758 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent ddeaa4a3
......@@ -591,7 +591,6 @@ void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
mark_compact_collector()->SetFlags(kNoGCFlags);
new_space_.Shrink();
UncommitFromSpace();
Shrink();
incremental_marking()->UncommitMarkingDeque();
}
......@@ -789,11 +788,6 @@ void Heap::EnsureFromSpaceIsCommitted() {
if (new_space_.CommitFromSpaceIfNeeded()) return;
// Committing memory to from space failed.
// Try shrinking and try again.
Shrink();
if (new_space_.CommitFromSpaceIfNeeded()) return;
// Committing memory to from space failed again.
// Memory is exhausted and we will die.
V8::FatalProcessOutOfMemory("Committing semi space failed.");
}
......@@ -822,7 +816,6 @@ void Heap::ClearJSFunctionResultCaches() {
}
void Heap::ClearNormalizedMapCaches() {
if (isolate_->bootstrapper()->IsActive() &&
!incremental_marking()->IsMarking()) {
......@@ -6414,17 +6407,6 @@ void Heap::TearDown() {
}
void Heap::Shrink() {
// Try to shrink all paged spaces.
PagedSpaces spaces(this);
for (PagedSpace* space = spaces.next();
space != NULL;
space = spaces.next()) {
space->ReleaseAllUnusedPages();
}
}
void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
ASSERT(callback != NULL);
GCPrologueCallbackPair pair(callback, gc_type);
......
......@@ -1384,9 +1384,6 @@ class Heap {
// Given an address occupied by a live code object, return that object.
Object* FindCodeObject(Address a);
// Invoke Shrink on shrinkable spaces.
void Shrink();
enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT };
inline HeapState gc_state() { return gc_state_; }
......
......@@ -1081,36 +1081,6 @@ void PagedSpace::ReleasePage(Page* page) {
}
void PagedSpace::ReleaseAllUnusedPages() {
PageIterator it(this);
while (it.has_next()) {
Page* page = it.next();
if (!page->WasSwept()) {
if (page->LiveBytes() == 0) ReleasePage(page);
} else {
HeapObject* obj = HeapObject::FromAddress(page->area_start());
if (obj->IsFreeSpace() &&
FreeSpace::cast(obj)->size() == AreaSize()) {
// Sometimes we allocate memory from free list but don't
// immediately initialize it (e.g. see PagedSpace::ReserveSpace
// called from Heap::ReserveSpace that can cause GC before
// reserved space is actually initialized).
// Thus we can't simply assume that obj represents a valid
// node still owned by a free list
// Instead we should verify that the page is fully covered
// by free list items.
FreeList::SizeStats sizes;
free_list_.CountFreeListItems(page, &sizes);
if (sizes.Total() == AreaSize()) {
ReleasePage(page);
}
}
}
}
heap()->FreeQueuedChunks();
}
#ifdef DEBUG
void PagedSpace::Print() { }
#endif
......
......@@ -1698,9 +1698,6 @@ class PagedSpace : public Space {
// Releases an unused page and shrinks the space.
void ReleasePage(Page* page);
// Releases all of the unused pages.
void ReleaseAllUnusedPages();
// The dummy page that anchors the linked list of pages.
Page* anchor() { return &anchor_; }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment