Reapply r1900, r1897, r1895 with a fix.

When a paged space shrinks by an even multiple of the chunk size,
ensure that the cached last page in the space is updated.

Review URL: http://codereview.chromium.org/113267

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@1944 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent ebbaeb36
...@@ -538,7 +538,7 @@ class ScavengeVisitor: public ObjectVisitor { ...@@ -538,7 +538,7 @@ class ScavengeVisitor: public ObjectVisitor {
// Shared state read by the scavenge collector and set by ScavengeObject. // Shared state read by the scavenge collector and set by ScavengeObject.
static Address promoted_top = NULL; static Address promoted_rear = NULL;
#ifdef DEBUG #ifdef DEBUG
...@@ -554,24 +554,34 @@ class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor { ...@@ -554,24 +554,34 @@ class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
} }
} }
}; };
#endif
void Heap::Scavenge() {
#ifdef DEBUG static void VerifyNonPointerSpacePointers() {
if (FLAG_enable_slow_asserts) { // Verify that there are no pointers to new space in spaces where we
VerifyNonPointerSpacePointersVisitor v; // do not expect them.
HeapObjectIterator it(code_space_); VerifyNonPointerSpacePointersVisitor v;
while (it.has_next()) { HeapObjectIterator code_it(Heap::code_space());
HeapObject* object = it.next(); while (code_it.has_next()) {
if (object->IsCode()) { HeapObject* object = code_it.next();
Code::cast(object)->ConvertICTargetsFromAddressToObject(); if (object->IsCode()) {
} Code::cast(object)->ConvertICTargetsFromAddressToObject();
object->Iterate(&v);
Code::cast(object)->ConvertICTargetsFromObjectToAddress();
} else {
// If we find non-code objects in code space (e.g., free list
// nodes) we want to verify them as well.
object->Iterate(&v); object->Iterate(&v);
if (object->IsCode()) {
Code::cast(object)->ConvertICTargetsFromObjectToAddress();
}
} }
} }
HeapObjectIterator data_it(Heap::old_data_space());
while (data_it.has_next()) data_it.next()->Iterate(&v);
}
#endif
void Heap::Scavenge() {
#ifdef DEBUG
if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers();
#endif #endif
gc_state_ = SCAVENGE; gc_state_ = SCAVENGE;
...@@ -596,72 +606,70 @@ void Heap::Scavenge() { ...@@ -596,72 +606,70 @@ void Heap::Scavenge() {
new_space_.Flip(); new_space_.Flip();
new_space_.ResetAllocationInfo(); new_space_.ResetAllocationInfo();
// We need to sweep newly copied objects which can be in either the to space // We need to sweep newly copied objects which can be either in the
// or the old space. For to space objects, we use a mark. Newly copied // to space or promoted to the old generation. For to-space
// objects lie between the mark and the allocation top. For objects // objects, we treat the bottom of the to space as a queue. Newly
// promoted to old space, we write their addresses downward from the top of // copied and unswept objects lie between a 'front' mark and the
// the new space. Sweeping newly promoted objects requires an allocation // allocation pointer.
// pointer and a mark. Note that the allocation pointer 'top' actually
// moves downward from the high address in the to space.
// //
// There is guaranteed to be enough room at the top of the to space for the // Promoted objects can go into various old-generation spaces, and
// addresses of promoted objects: every object promoted frees up its size in // can be allocated internally in the spaces (from the free list).
// bytes from the top of the new space, and objects are at least one pointer // We treat the top of the to space as a queue of addresses of
// in size. Using the new space to record promoted addresses makes the // promoted objects. The addresses of newly promoted and unswept
// scavenge collector agnostic to the allocation strategy (eg, linear or // objects lie between a 'front' mark and a 'rear' mark that is
// free-list) used in old space. // updated as a side effect of promoting an object.
Address new_mark = new_space_.ToSpaceLow(); //
Address promoted_mark = new_space_.ToSpaceHigh(); // There is guaranteed to be enough room at the top of the to space
promoted_top = new_space_.ToSpaceHigh(); // for the addresses of promoted objects: every object promoted
// frees up its size in bytes from the top of the new space, and
// objects are at least one pointer in size.
Address new_space_front = new_space_.ToSpaceLow();
Address promoted_front = new_space_.ToSpaceHigh();
promoted_rear = new_space_.ToSpaceHigh();
ScavengeVisitor scavenge_visitor; ScavengeVisitor scavenge_visitor;
// Copy roots. // Copy roots.
IterateRoots(&scavenge_visitor); IterateRoots(&scavenge_visitor);
// Copy objects reachable from the old generation. By definition, there // Copy objects reachable from weak pointers.
// are no intergenerational pointers in code or data spaces. GlobalHandles::IterateWeakRoots(&scavenge_visitor);
// Copy objects reachable from the old generation. By definition,
// there are no intergenerational pointers in code or data spaces.
IterateRSet(old_pointer_space_, &ScavengePointer); IterateRSet(old_pointer_space_, &ScavengePointer);
IterateRSet(map_space_, &ScavengePointer); IterateRSet(map_space_, &ScavengePointer);
lo_space_->IterateRSet(&ScavengePointer); lo_space_->IterateRSet(&ScavengePointer);
bool has_processed_weak_pointers = false; do {
ASSERT(new_space_front <= new_space_.top());
while (true) { ASSERT(promoted_front >= promoted_rear);
ASSERT(new_mark <= new_space_.top());
ASSERT(promoted_mark >= promoted_top); // The addresses new_space_front and new_space_.top() define a
// queue of unprocessed copied objects. Process them until the
// queue is empty.
while (new_space_front < new_space_.top()) {
HeapObject* object = HeapObject::FromAddress(new_space_front);
object->Iterate(&scavenge_visitor);
new_space_front += object->Size();
}
// Copy objects reachable from newly copied objects. // The addresses promoted_front and promoted_rear define a queue
while (new_mark < new_space_.top() || promoted_mark > promoted_top) { // of unprocessed addresses of promoted objects. Process them
// Sweep newly copied objects in the to space. The allocation pointer // until the queue is empty.
// can change during sweeping. while (promoted_front > promoted_rear) {
Address previous_top = new_space_.top(); promoted_front -= kPointerSize;
SemiSpaceIterator new_it(new_space(), new_mark); HeapObject* object =
while (new_it.has_next()) { HeapObject::cast(Memory::Object_at(promoted_front));
new_it.next()->Iterate(&scavenge_visitor); object->Iterate(&scavenge_visitor);
} UpdateRSet(object);
new_mark = previous_top;
// Sweep newly copied objects in the old space. The promotion 'top'
// pointer could change during sweeping.
previous_top = promoted_top;
for (Address current = promoted_mark - kPointerSize;
current >= previous_top;
current -= kPointerSize) {
HeapObject* object = HeapObject::cast(Memory::Object_at(current));
object->Iterate(&scavenge_visitor);
UpdateRSet(object);
}
promoted_mark = previous_top;
} }
if (has_processed_weak_pointers) break; // We are done. // Take another spin if there are now unswept objects in new space
// Copy objects reachable from weak pointers. // (there are currently no more unswept promoted objects).
GlobalHandles::IterateWeakRoots(&scavenge_visitor); } while (new_space_front < new_space_.top());
has_processed_weak_pointers = true;
}
// Set age mark. // Set age mark.
new_space_.set_age_mark(new_mark); new_space_.set_age_mark(new_space_.top());
LOG(ResourceEvent("scavenge", "end")); LOG(ResourceEvent("scavenge", "end"));
...@@ -882,8 +890,8 @@ void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) { ...@@ -882,8 +890,8 @@ void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
if (target_space == Heap::old_pointer_space_) { if (target_space == Heap::old_pointer_space_) {
// Record the object's address at the top of the to space, to allow // Record the object's address at the top of the to space, to allow
// it to be swept by the scavenger. // it to be swept by the scavenger.
promoted_top -= kPointerSize; promoted_rear -= kPointerSize;
Memory::Object_at(promoted_top) = *p; Memory::Object_at(promoted_rear) = *p;
} else { } else {
#ifdef DEBUG #ifdef DEBUG
// Objects promoted to the data space should not have pointers to // Objects promoted to the data space should not have pointers to
......
...@@ -64,15 +64,16 @@ HeapObject* HeapObjectIterator::next() { ...@@ -64,15 +64,16 @@ HeapObject* HeapObjectIterator::next() {
// PageIterator // PageIterator
bool PageIterator::has_next() { bool PageIterator::has_next() {
return cur_page_ != stop_page_; return prev_page_ != stop_page_;
} }
Page* PageIterator::next() { Page* PageIterator::next() {
ASSERT(has_next()); ASSERT(has_next());
Page* result = cur_page_; prev_page_ = (prev_page_ == NULL)
cur_page_ = cur_page_->next_page(); ? space_->first_page_
return result; : prev_page_->next_page();
return prev_page_;
} }
......
...@@ -111,17 +111,26 @@ void HeapObjectIterator::Verify() { ...@@ -111,17 +111,26 @@ void HeapObjectIterator::Verify() {
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// PageIterator // PageIterator
PageIterator::PageIterator(PagedSpace* space, Mode mode) { PageIterator::PageIterator(PagedSpace* space, Mode mode) : space_(space) {
cur_page_ = space->first_page_; prev_page_ = NULL;
switch (mode) { switch (mode) {
case PAGES_IN_USE: case PAGES_IN_USE:
stop_page_ = space->AllocationTopPage()->next_page(); stop_page_ = space->AllocationTopPage();
break; break;
case PAGES_USED_BY_MC: case PAGES_USED_BY_MC:
stop_page_ = space->MCRelocationTopPage()->next_page(); stop_page_ = space->MCRelocationTopPage();
break; break;
case ALL_PAGES: case ALL_PAGES:
stop_page_ = Page::FromAddress(NULL); #ifdef DEBUG
// Verify that the cached last page in the space is actually the
// last page.
for (Page* p = space->first_page_; p->is_valid(); p = p->next_page()) {
if (!p->next_page()->is_valid()) {
ASSERT(space->last_page_ == p);
}
}
#endif
stop_page_ = space->last_page_;
break; break;
default: default:
UNREACHABLE(); UNREACHABLE();
...@@ -496,8 +505,11 @@ bool PagedSpace::Setup(Address start, size_t size) { ...@@ -496,8 +505,11 @@ bool PagedSpace::Setup(Address start, size_t size) {
accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize); accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize);
ASSERT(Capacity() <= max_capacity_); ASSERT(Capacity() <= max_capacity_);
// Sequentially initialize remembered sets in the newly allocated
// pages and cache the current last page in the space.
for (Page* p = first_page_; p->is_valid(); p = p->next_page()) { for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
p->ClearRSet(); p->ClearRSet();
last_page_ = p;
} }
// Use first_page_ for allocation. // Use first_page_ for allocation.
...@@ -676,9 +688,11 @@ bool PagedSpace::Expand(Page* last_page) { ...@@ -676,9 +688,11 @@ bool PagedSpace::Expand(Page* last_page) {
MemoryAllocator::SetNextPage(last_page, p); MemoryAllocator::SetNextPage(last_page, p);
// Clear remembered set of new pages. // Sequentially clear remembered set of new pages and and cache the
// new last page in the space.
while (p->is_valid()) { while (p->is_valid()) {
p->ClearRSet(); p->ClearRSet();
last_page_ = p;
p = p->next_page(); p = p->next_page();
} }
...@@ -723,10 +737,13 @@ void PagedSpace::Shrink() { ...@@ -723,10 +737,13 @@ void PagedSpace::Shrink() {
Page* p = MemoryAllocator::FreePages(last_page_to_keep->next_page()); Page* p = MemoryAllocator::FreePages(last_page_to_keep->next_page());
MemoryAllocator::SetNextPage(last_page_to_keep, p); MemoryAllocator::SetNextPage(last_page_to_keep, p);
// Since pages are only freed in whole chunks, we may have kept more than // Since pages are only freed in whole chunks, we may have kept more
// pages_to_keep. // than pages_to_keep. Count the extra pages and cache the new last
// page in the space.
last_page_ = last_page_to_keep;
while (p->is_valid()) { while (p->is_valid()) {
pages_to_keep++; pages_to_keep++;
last_page_ = p;
p = p->next_page(); p = p->next_page();
} }
......
...@@ -511,11 +511,22 @@ class ObjectIterator : public Malloced { ...@@ -511,11 +511,22 @@ class ObjectIterator : public Malloced {
// //
// A HeapObjectIterator iterates objects from a given address to the // A HeapObjectIterator iterates objects from a given address to the
// top of a space. The given address must be below the current // top of a space. The given address must be below the current
// allocation pointer (space top). If the space top changes during // allocation pointer (space top). There are some caveats.
// iteration (because of allocating new objects), the iterator does //
// not iterate new objects. The caller function must create a new // (1) If the space top changes upward during iteration (because of
// iterator starting from the old top in order to visit these new // allocating new objects), the iterator does not iterate objects
// objects. Heap::Scavenage() is such an example. // above the original space top. The caller must create a new
// iterator starting from the old top in order to visit these new
// objects.
//
// (2) If new objects are allocated below the original allocation top
// (e.g., free-list allocation in paged spaces), the new objects
// may or may not be iterated depending on their position with
// respect to the current point of iteration.
//
// (3) The space top should not change downward during iteration,
// otherwise the iterator will return not-necessarily-valid
// objects.
class HeapObjectIterator: public ObjectIterator { class HeapObjectIterator: public ObjectIterator {
public: public:
...@@ -559,17 +570,35 @@ class HeapObjectIterator: public ObjectIterator { ...@@ -559,17 +570,35 @@ class HeapObjectIterator: public ObjectIterator {
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// A PageIterator iterates pages in a space. // A PageIterator iterates the pages in a paged space.
// //
// The PageIterator class provides three modes for iterating pages in a space: // The PageIterator class provides three modes for iterating pages in a space:
// PAGES_IN_USE iterates pages that are in use by the allocator; // PAGES_IN_USE iterates pages containing allocated objects.
// PAGES_USED_BY_GC iterates pages that hold relocated objects during a // PAGES_USED_BY_MC iterates pages that hold relocated objects during a
// mark-compact collection; // mark-compact collection.
// ALL_PAGES iterates all pages in the space. // ALL_PAGES iterates all pages in the space.
//
// There are some caveats.
//
// (1) If the space expands during iteration, new pages will not be
// returned by the iterator in any mode.
//
// (2) If new objects are allocated during iteration, they will appear
// in pages returned by the iterator. Allocation may cause the
// allocation pointer or MC allocation pointer in the last page to
// change between constructing the iterator and iterating the last
// page.
//
// (3) The space should not shrink during iteration, otherwise the
// iterator will return deallocated pages.
class PageIterator BASE_EMBEDDED { class PageIterator BASE_EMBEDDED {
public: public:
enum Mode {PAGES_IN_USE, PAGES_USED_BY_MC, ALL_PAGES}; enum Mode {
PAGES_IN_USE,
PAGES_USED_BY_MC,
ALL_PAGES
};
PageIterator(PagedSpace* space, Mode mode); PageIterator(PagedSpace* space, Mode mode);
...@@ -577,8 +606,9 @@ class PageIterator BASE_EMBEDDED { ...@@ -577,8 +606,9 @@ class PageIterator BASE_EMBEDDED {
inline Page* next(); inline Page* next();
private: private:
Page* cur_page_; // next page to return PagedSpace* space_;
Page* stop_page_; // page where to stop Page* prev_page_; // Previous page returned.
Page* stop_page_; // Page to stop at (last page returned by the iterator).
}; };
...@@ -809,6 +839,10 @@ class PagedSpace : public Space { ...@@ -809,6 +839,10 @@ class PagedSpace : public Space {
// The first page in this space. // The first page in this space.
Page* first_page_; Page* first_page_;
// The last page in this space. Initially set in Setup, updated in
// Expand and Shrink.
Page* last_page_;
// Normal allocation information. // Normal allocation information.
AllocationInfo allocation_info_; AllocationInfo allocation_info_;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment