Commit f9790085 authored by kasperl@chromium.org's avatar kasperl@chromium.org

Revert r1971. Doesn't compile.

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@1972 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 3c73ec8a
......@@ -191,27 +191,6 @@ void Heap::CopyBlock(Object** dst, Object** src, int byte_size) {
}
void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
ASSERT(InFromSpace(object));
// We use the first word (where the map pointer usually is) of a heap
// object to record the forwarding pointer. A forwarding pointer can
// point to an old space, the code space, or the to space of the new
// generation.
MapWord first_word = object->map_word();
// If the first word is a forwarding address, the object has already been
// copied.
if (first_word.IsForwardingAddress()) {
*p = first_word.ToForwardingAddress();
return;
}
// Call the slow part of scavenge object.
return ScavengeObjectSlow(p, object);
}
Object* Heap::GetKeyedLookupCache() {
if (keyed_lookup_cache()->IsUndefined()) {
Object* obj = LookupCache::Allocate(4);
......
......@@ -537,39 +537,8 @@ class ScavengeVisitor: public ObjectVisitor {
};
// A queue of pointers and maps of to-be-promoted objects during a
// scavenge collection.
class PromotionQueue {
public:
void Initialize(Address start_address) {
front_ = rear_ = reinterpret_cast<HeapObject**>(start_address);
}
bool is_empty() { return front_ <= rear_; }
void insert(HeapObject* object, Map* map) {
*(--rear_) = object;
*(--rear_) = map;
// Assert no overflow into live objects.
ASSERT(rear_ >= Heap::new_space()->top());
}
void remove(HeapObject** object, Map** map) {
*object = *(--front_);
*map = Map::cast(*(--front_));
// Assert no underflow.
ASSERT(front_ >= rear_);
}
private:
// The front of the queue is higher in memory than the rear.
HeapObject** front_;
HeapObject** rear_;
};
// Shared state read by the scavenge collector and set by ScavengeObject.
static PromotionQueue promotion_queue;
static Address promoted_rear = NULL;
#ifdef DEBUG
......@@ -655,7 +624,8 @@ void Heap::Scavenge() {
// frees up its size in bytes from the top of the new space, and
// objects are at least one pointer in size.
Address new_space_front = new_space_.ToSpaceLow();
promotion_queue.Initialize(new_space_.ToSpaceHigh());
Address promoted_front = new_space_.ToSpaceHigh();
promoted_rear = new_space_.ToSpaceHigh();
ScavengeVisitor scavenge_visitor;
// Copy roots.
......@@ -672,6 +642,7 @@ void Heap::Scavenge() {
do {
ASSERT(new_space_front <= new_space_.top());
ASSERT(promoted_front >= promoted_rear);
// The addresses new_space_front and new_space_.top() define a
// queue of unprocessed copied objects. Process them until the
......@@ -682,26 +653,15 @@ void Heap::Scavenge() {
new_space_front += object->Size();
}
// Promote and process all the to-be-promoted objects.
while (!promotion_queue.is_empty()) {
HeapObject* source;
Map* map;
promotion_queue.remove(&source, &map);
// Copy the from-space object to its new location (given by the
// forwarding address) and fix its map.
HeapObject* target = source->map_word().ToForwardingAddress();
CopyBlock(reinterpret_cast<Object**>(target->address()),
reinterpret_cast<Object**>(source->address()),
source->SizeFromMap(map));
target->set_map(map);
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
// Update NewSpace stats if necessary.
RecordCopiedObject(target);
#endif
// Visit the newly copied object for pointers to new space.
target->Iterate(&scavenge_visitor);
UpdateRSet(target);
// The addresses promoted_front and promoted_rear define a queue
// of unprocessed addresses of promoted objects. Process them
// until the queue is empty.
while (promoted_front > promoted_rear) {
promoted_front -= kPointerSize;
HeapObject* object =
HeapObject::cast(Memory::Object_at(promoted_front));
object->Iterate(&scavenge_visitor);
UpdateRSet(object);
}
// Take another spin if there are now unswept objects in new space
......@@ -858,8 +818,8 @@ HeapObject* Heap::MigrateObject(HeapObject* source,
// Set the forwarding address.
source->set_map_word(MapWord::FromForwardingAddress(target));
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
// Update NewSpace stats if necessary.
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
RecordCopiedObject(target);
#endif
......@@ -867,6 +827,28 @@ HeapObject* Heap::MigrateObject(HeapObject* source,
}
// Inlined function.
void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
ASSERT(InFromSpace(object));
// We use the first word (where the map pointer usually is) of a heap
// object to record the forwarding pointer. A forwarding pointer can
// point to an old space, the code space, or the to space of the new
// generation.
MapWord first_word = object->map_word();
// If the first word is a forwarding address, the object has already been
// copied.
if (first_word.IsForwardingAddress()) {
*p = first_word.ToForwardingAddress();
return;
}
// Call the slow part of scavenge object.
return ScavengeObjectSlow(p, object);
}
static inline bool IsShortcutCandidate(HeapObject* object, Map* map) {
STATIC_ASSERT(kNotStringTag != 0 && kSymbolTag != 0);
ASSERT(object->map() == map);
......@@ -897,11 +879,6 @@ void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
}
int object_size = object->SizeFromMap(first_word.ToMap());
// We rely on live objects in new space to be at least two pointers,
// so we can store the from-space address and map pointer of promoted
// objects in the to space.
ASSERT(object_size >= 2 * kPointerSize);
// If the object should be promoted, we try to copy it to old space.
if (ShouldBePromoted(object->address(), object_size)) {
OldSpace* target_space = Heap::TargetSpace(object);
......@@ -909,29 +886,16 @@ void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
target_space == Heap::old_data_space_);
Object* result = target_space->AllocateRaw(object_size);
if (!result->IsFailure()) {
HeapObject* target = HeapObject::cast(result);
*p = MigrateObject(object, HeapObject::cast(result), object_size);
if (target_space == Heap::old_pointer_space_) {
// Save the from-space object pointer and its map pointer at the
// top of the to space to be swept and copied later. Write the
// forwarding address over the map word of the from-space
// object.
promotion_queue.insert(object, first_word.ToMap());
object->set_map_word(MapWord::FromForwardingAddress(target));
// Give the space allocated for the result a proper map by
// treating it as a free list node (not linked into the free
// list).
FreeListNode* node = FreeListNode::FromAddress(target->address());
node->set_size(object_size);
*p = target;
// Record the object's address at the top of the to space, to allow
// it to be swept by the scavenger.
promoted_rear -= kPointerSize;
Memory::Object_at(promoted_rear) = *p;
} else {
// Objects promoted to the data space can be copied immediately
// and not revisited---we will never sweep that space for
// pointers and the copied objects do not contain pointers to
// new space objects.
*p = MigrateObject(object, target, object_size);
#ifdef DEBUG
// Objects promoted to the data space should not have pointers to
// new space.
VerifyNonPointerSpacePointersVisitor v;
(*p)->Iterate(&v);
#endif
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment