Commit 6941a4f3 authored by whesse@chromium.org's avatar whesse@chromium.org

Collects unused maps that are only kept alive by map transitions.

If a map has descendents in the map transition tree that are alive,
it is kept.  Only maps such that they and all their descendants
have no live objects are collected.  This happens in mark-sweep and
mark-compact garbage collections.
Review URL: http://codereview.chromium.org/8099

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@601 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent a510287b
...@@ -134,6 +134,8 @@ DEFINE_bool(gc_global, false, "always perform global GCs") ...@@ -134,6 +134,8 @@ DEFINE_bool(gc_global, false, "always perform global GCs")
DEFINE_int(gc_interval, -1, "garbage collect after <n> allocations") DEFINE_int(gc_interval, -1, "garbage collect after <n> allocations")
DEFINE_bool(trace_gc, false, DEFINE_bool(trace_gc, false,
"print one trace line following each garbage collection") "print one trace line following each garbage collection")
DEFINE_bool(collect_maps, true,
"garbage collect maps from which no objects can be reached")
// ic.cc // ic.cc
DEFINE_bool(use_ic, true, "use inline caching") DEFINE_bool(use_ic, true, "use inline caching")
......
...@@ -78,6 +78,8 @@ void MarkCompactCollector::CollectGarbage(GCTracer* tracer) { ...@@ -78,6 +78,8 @@ void MarkCompactCollector::CollectGarbage(GCTracer* tracer) {
MarkLiveObjects(); MarkLiveObjects();
if (FLAG_collect_maps) ClearNonLiveTransitions();
SweepLargeObjectSpace(); SweepLargeObjectSpace();
if (compacting_collection_) { if (compacting_collection_) {
...@@ -135,6 +137,7 @@ void MarkCompactCollector::Prepare() { ...@@ -135,6 +137,7 @@ void MarkCompactCollector::Prepare() {
} }
if (FLAG_never_compact) compacting_collection_ = false; if (FLAG_never_compact) compacting_collection_ = false;
if (FLAG_collect_maps) CreateBackPointers();
#ifdef DEBUG #ifdef DEBUG
if (compacting_collection_) { if (compacting_collection_) {
...@@ -322,9 +325,13 @@ class MarkingVisitor : public ObjectVisitor { ...@@ -322,9 +325,13 @@ class MarkingVisitor : public ObjectVisitor {
// Visit an unmarked object. // Visit an unmarked object.
void VisitUnmarkedObject(HeapObject* obj) { void VisitUnmarkedObject(HeapObject* obj) {
ASSERT(Heap::Contains(obj));
#ifdef DEBUG #ifdef DEBUG
ASSERT(Heap::Contains(obj));
MarkCompactCollector::UpdateLiveObjectCount(obj); MarkCompactCollector::UpdateLiveObjectCount(obj);
ASSERT(!obj->IsMarked());
// ASSERT(!obj->IsMap()); // Some maps are processed here.
// Their map transitions will be followed. If we do a test
// here to treat maps separately, will there be a performance impact?
#endif #endif
Map* map = obj->map(); Map* map = obj->map();
obj->SetMark(); obj->SetMark();
...@@ -425,14 +432,99 @@ void MarkCompactCollector::MarkUnmarkedObject(HeapObject* object) { ...@@ -425,14 +432,99 @@ void MarkCompactCollector::MarkUnmarkedObject(HeapObject* object) {
ASSERT(!object->IsMarked()); ASSERT(!object->IsMarked());
if (object->IsJSGlobalObject()) Counters::global_objects.Increment(); if (object->IsJSGlobalObject()) Counters::global_objects.Increment();
if (FLAG_cleanup_caches_in_maps_at_gc && object->IsMap()) { tracer_->increment_marked_count();
ASSERT(Heap::Contains(object));
if (object->IsMap()) {
if (FLAG_cleanup_caches_in_maps_at_gc) {
Map::cast(object)->ClearCodeCache(); Map::cast(object)->ClearCodeCache();
} }
object->SetMark(); object->SetMark();
if (FLAG_collect_maps) {
MarkMapContents(reinterpret_cast<Map*>(object));
} else {
marking_stack.Push(object);
}
} else {
object->SetMark();
marking_stack.Push(object);
}
}
void MarkCompactCollector::MarkMapContents(Map* map) {
MarkDescriptorArray(reinterpret_cast<DescriptorArray*>(
HeapObject::RawField(map, Map::kInstanceDescriptorsOffset)));
// Mark the Object* fields of the Map.
// Since the descriptor array has been marked already, it is fine
// that one of these fields contains a pointer to it.
MarkingVisitor visitor; // Has no state or contents.
visitor.VisitPointers(&HeapObject::RawField(map, Map::kPrototypeOffset),
&HeapObject::RawField(map, Map::kSize));
}
void MarkCompactCollector::MarkDescriptorArray(
DescriptorArray *descriptors) {
if (descriptors->IsMarked()) return;
// Empty descriptor array is marked as a root before any maps are marked.
ASSERT(descriptors != Heap::empty_descriptor_array());
tracer_->increment_marked_count(); tracer_->increment_marked_count();
ASSERT(Heap::Contains(object)); #ifdef DEBUG
UpdateLiveObjectCount(descriptors);
#endif
descriptors->SetMark();
FixedArray* contents = reinterpret_cast<FixedArray*>(
descriptors->get(DescriptorArray::kContentArrayIndex));
ASSERT(contents->IsHeapObject());
ASSERT(!contents->IsMarked());
ASSERT(contents->IsFixedArray());
ASSERT(contents->length() >= 2);
tracer_->increment_marked_count();
#ifdef DEBUG
UpdateLiveObjectCount(contents);
#endif
contents->SetMark();
// Contents contains (value, details) pairs. If the details say
// that the type of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION,
// or NULL_DESCRIPTOR, we don't mark the value as live. Only for
// type MAP_TRANSITION is the value a Object* (a Map*).
for (int i = 0; i < contents->length(); i += 2) {
// If the pair (value, details) at index i, i+1 is not
// a transition or null descriptor, mark the value.
PropertyDetails details(Smi::cast(contents->get(i + 1)));
if (details.type() < FIRST_PHANTOM_PROPERTY_TYPE) {
HeapObject* object = reinterpret_cast<HeapObject*>(contents->get(i));
if (object->IsHeapObject() && !object->IsMarked()) {
tracer_->increment_marked_count();
#ifdef DEBUG
UpdateLiveObjectCount(object);
#endif
object->SetMark();
marking_stack.Push(object); marking_stack.Push(object);
}
}
}
// The DescriptorArray descriptors contains a pointer to its contents array,
// but the contents array is already marked.
marking_stack.Push(descriptors);
}
void MarkCompactCollector::CreateBackPointers() {
HeapObjectIterator iterator(Heap::map_space());
while (iterator.has_next()) {
Object* next_object = iterator.next();
if (next_object->IsMap()) { // Could also be ByteArray on free list.
Map* map = Map::cast(next_object);
if (map->instance_type() >= FIRST_JS_OBJECT_TYPE &&
map->instance_type() <= LAST_JS_OBJECT_TYPE) {
map->CreateBackPointers();
}
}
}
} }
...@@ -675,6 +767,13 @@ void MarkCompactCollector::MarkLiveObjects() { ...@@ -675,6 +767,13 @@ void MarkCompactCollector::MarkLiveObjects() {
} }
static int CountMarkedCallback(HeapObject* obj) {
MapWord map_word = obj->map_word();
map_word.ClearMark();
return obj->SizeFromMap(map_word.ToMap());
}
#ifdef DEBUG #ifdef DEBUG
void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) { void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) {
live_bytes_ += obj->Size(); live_bytes_ += obj->Size();
...@@ -697,13 +796,6 @@ void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) { ...@@ -697,13 +796,6 @@ void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) {
} }
static int CountMarkedCallback(HeapObject* obj) {
MapWord map_word = obj->map_word();
map_word.ClearMark();
return obj->SizeFromMap(map_word.ToMap());
}
void MarkCompactCollector::VerifyHeapAfterMarkingPhase() { void MarkCompactCollector::VerifyHeapAfterMarkingPhase() {
Heap::new_space()->Verify(); Heap::new_space()->Verify();
Heap::old_pointer_space()->Verify(); Heap::old_pointer_space()->Verify();
...@@ -755,6 +847,63 @@ void MarkCompactCollector::SweepLargeObjectSpace() { ...@@ -755,6 +847,63 @@ void MarkCompactCollector::SweepLargeObjectSpace() {
Heap::lo_space()->FreeUnmarkedObjects(); Heap::lo_space()->FreeUnmarkedObjects();
} }
// Safe to use during marking phase only.
bool MarkCompactCollector::SafeIsMap(HeapObject* object) {
MapWord metamap = object->map_word();
metamap.ClearMark();
return metamap.ToMap()->instance_type() == MAP_TYPE;
}
void MarkCompactCollector::ClearNonLiveTransitions() {
HeapObjectIterator map_iterator(Heap::map_space(), &CountMarkedCallback);
// Iterate over the map space, setting map transitions that go from
// a marked map to an unmarked map to null transitions. At the same time,
// set all the prototype fields of maps back to their original value,
// dropping the back pointers temporarily stored in the prototype field.
// Setting the prototype field requires following the linked list of
// back pointers, reversing them all at once. This allows us to find
// those maps with map transitions that need to be nulled, and only
// scan the descriptor arrays of those maps, not all maps.
// All of these actions are carried out only on maps of JSObects
// and related subtypes.
while (map_iterator.has_next()) {
Map* map = reinterpret_cast<Map*>(map_iterator.next());
if (!map->IsMarked() && map->IsByteArray()) continue;
ASSERT(SafeIsMap(map));
// Only JSObject and subtypes have map transitions and back pointers.
if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue;
if (map->instance_type() > LAST_JS_OBJECT_TYPE) continue;
// Follow the chain of back pointers to find the prototype.
Map* current = map;
while (SafeIsMap(current)) {
current = reinterpret_cast<Map*>(current->prototype());
ASSERT(current->IsHeapObject());
}
Object* real_prototype = current;
// Follow back pointers, setting them to prototype,
// clearing map transitions when necessary.
current = map;
bool on_dead_path = !current->IsMarked();
Object *next;
while (SafeIsMap(current)) {
next = current->prototype();
// There should never be a dead map above a live map.
ASSERT(on_dead_path || current->IsMarked());
// A live map above a dead map indicates a dead transition.
// This test will always be false on the first iteration.
if (on_dead_path && current->IsMarked()) {
on_dead_path = false;
current->ClearNonLiveTransitions(real_prototype);
}
HeapObject::RawField(current, Map::kPrototypeOffset) =
real_prototype;
current = reinterpret_cast<Map*>(next);
}
}
}
// ------------------------------------------------------------------------- // -------------------------------------------------------------------------
// Phase 2: Encode forwarding addresses. // Phase 2: Encode forwarding addresses.
......
...@@ -155,6 +155,17 @@ class MarkCompactCollector : public AllStatic { ...@@ -155,6 +155,17 @@ class MarkCompactCollector : public AllStatic {
if (!obj->IsMarked()) MarkUnmarkedObject(obj); if (!obj->IsMarked()) MarkUnmarkedObject(obj);
} }
// Creates back pointers for all map transitions, stores them in
// the prototype field. The original prototype pointers are restored
// in ClearNonLiveTransitions(). All JSObject maps
// connected by map transitions have the same prototype object, which
// is why we can use this field temporarily for back pointers.
static void CreateBackPointers();
// Mark a Map and its DescriptorArray together, skipping transitions.
static void MarkMapContents(Map* map);
static void MarkDescriptorArray(DescriptorArray* descriptors);
// Mark the heap roots and all objects reachable from them. // Mark the heap roots and all objects reachable from them.
static void ProcessRoots(RootMarkingVisitor* visitor); static void ProcessRoots(RootMarkingVisitor* visitor);
...@@ -194,6 +205,13 @@ class MarkCompactCollector : public AllStatic { ...@@ -194,6 +205,13 @@ class MarkCompactCollector : public AllStatic {
// compacting or not, because the large object space is never compacted. // compacting or not, because the large object space is never compacted.
static void SweepLargeObjectSpace(); static void SweepLargeObjectSpace();
// Test whether a (possibly marked) object is a Map.
static inline bool SafeIsMap(HeapObject* object);
// Map transitions from a live map to a dead map must be killed.
// We replace them with a null descriptor, with the same key.
static void ClearNonLiveTransitions();
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
// Phase 2: functions related to computing and encoding forwarding pointers // Phase 2: functions related to computing and encoding forwarding pointers
// before: live objects' map pointers are marked as '00' // before: live objects' map pointers are marked as '00'
......
...@@ -552,8 +552,8 @@ Object* Object::GetProperty(String* key, PropertyAttributes* attributes) { ...@@ -552,8 +552,8 @@ Object* Object::GetProperty(String* key, PropertyAttributes* attributes) {
(*reinterpret_cast<byte*>(FIELD_ADDR(p, offset)) = value) (*reinterpret_cast<byte*>(FIELD_ADDR(p, offset)) = value)
Object* HeapObject::GetHeapObjectField(HeapObject* obj, int index) { Object*& HeapObject::RawField(HeapObject* obj, int byte_offset) {
return READ_FIELD(obj, HeapObject::kHeaderSize + kPointerSize * index); return READ_FIELD(obj, byte_offset);
} }
......
...@@ -333,7 +333,7 @@ Object* Object::GetProperty(Object* receiver, ...@@ -333,7 +333,7 @@ Object* Object::GetProperty(Object* receiver,
// Check if we're allowed to read from the current object. Note // Check if we're allowed to read from the current object. Note
// that even though we may not actually end up loading the named // that even though we may not actually end up loading the named
// property from the current object, we still check that we have // property from the current object, we still check that we have
// access to the it. // access to it.
JSObject* checked = JSObject::cast(current); JSObject* checked = JSObject::cast(current);
if (!Top::MayNamedAccess(checked, name, v8::ACCESS_GET)) { if (!Top::MayNamedAccess(checked, name, v8::ACCESS_GET)) {
return checked->GetPropertyWithFailedAccessCheck(receiver, return checked->GetPropertyWithFailedAccessCheck(receiver,
...@@ -4050,6 +4050,67 @@ void String::PrintOn(FILE* file) { ...@@ -4050,6 +4050,67 @@ void String::PrintOn(FILE* file) {
} }
void Map::CreateBackPointers() {
DescriptorArray* descriptors = instance_descriptors();
for (DescriptorReader r(descriptors); !r.eos(); r.advance()) {
if (r.type() == MAP_TRANSITION) {
// Get target.
Map* target = Map::cast(r.GetValue());
#ifdef DEBUG
// Verify target.
Object* source_prototype = prototype();
Object* target_prototype = target->prototype();
ASSERT(source_prototype->IsJSObject() ||
source_prototype->IsMap() ||
source_prototype->IsNull());
ASSERT(target_prototype->IsJSObject() ||
target_prototype->IsNull());
ASSERT(source_prototype->IsMap() ||
source_prototype == target_prototype);
#endif
// Point target back to source. set_prototype() will not let us set
// the prototype to a map, as we do here.
RawField(target, kPrototypeOffset) = this;
}
}
}
void Map::ClearNonLiveTransitions(Object* real_prototype) {
// Live DescriptorArray objects will be marked, so we must use
// low-level accessors to get and modify their data.
DescriptorArray* d = reinterpret_cast<DescriptorArray*>(
RawField(this, Map::kInstanceDescriptorsOffset));
if (d == Heap::empty_descriptor_array()) return;
Smi* NullDescriptorDetails =
PropertyDetails(NONE, NULL_DESCRIPTOR).AsSmi();
FixedArray* contents = reinterpret_cast<FixedArray*>(
d->get(DescriptorArray::kContentArrayIndex));
ASSERT(contents->length() >= 2);
for (int i = 0; i < contents->length(); i += 2) {
// If the pair (value, details) is a map transition,
// check if the target is live. If not, null the descriptor.
// Also drop the back pointer for that map transition, so that this
// map is not reached again by following a back pointer from a
// non-live object.
PropertyDetails details(Smi::cast(contents->get(i + 1)));
if (details.type() == MAP_TRANSITION) {
Map* target = reinterpret_cast<Map*>(contents->get(i));
ASSERT(target->IsHeapObject());
if (!target->IsMarked()) {
ASSERT(target->IsMap());
contents->set(i + 1, NullDescriptorDetails, SKIP_WRITE_BARRIER);
contents->set(i, Heap::null_value(), SKIP_WRITE_BARRIER);
ASSERT(target->prototype() == this ||
target->prototype() == real_prototype);
// Getter prototype() is read-only, set_prototype() has side effects.
RawField(target, Map::kPrototypeOffset) = real_prototype;
}
}
}
}
void Map::MapIterateBody(ObjectVisitor* v) { void Map::MapIterateBody(ObjectVisitor* v) {
// Assumes all Object* members are contiguously allocated! // Assumes all Object* members are contiguously allocated!
IteratePointers(v, kPrototypeOffset, kCodeCacheOffset + kPointerSize); IteratePointers(v, kPrototypeOffset, kCodeCacheOffset + kPointerSize);
...@@ -6728,4 +6789,6 @@ int BreakPointInfo::GetBreakPointCount() { ...@@ -6728,4 +6789,6 @@ int BreakPointInfo::GetBreakPointCount() {
} }
const int FunctionTemplateInfo::kSize;
const int ObjectTemplateInfo::kSize;
} } // namespace v8::internal } } // namespace v8::internal
...@@ -1041,7 +1041,11 @@ class HeapObject: public Object { ...@@ -1041,7 +1041,11 @@ class HeapObject: public Object {
// object is overflowed (ie, partially restore the map pointer). // object is overflowed (ie, partially restore the map pointer).
inline void ClearOverflow(); inline void ClearOverflow();
static inline Object* GetHeapObjectField(HeapObject* obj, int index); // Returns the field at offset in obj, as a read/write Object* reference.
// Does no checking, and is safe to use during GC, while maps are invalid.
// Does not update remembered sets, so should only be assigned to
// during marking GC.
static inline Object* &RawField(HeapObject* obj, int offset);
// Casting. // Casting.
static inline HeapObject* cast(Object* obj); static inline HeapObject* cast(Object* obj);
...@@ -2421,6 +2425,17 @@ class Map: public HeapObject { ...@@ -2421,6 +2425,17 @@ class Map: public HeapObject {
// Removes a code object from the code cache at the given index. // Removes a code object from the code cache at the given index.
void RemoveFromCodeCache(int index); void RemoveFromCodeCache(int index);
// For every transition in this map, makes the transition's
// target's prototype pointer point back to this map.
// This is undone in MarkCompactCollector::ClearNonLiveTransitions().
void CreateBackPointers();
// Set all map transitions from this map to dead maps to null.
// Also, restore the original prototype on the targets of these
// transitions, so that we do not process this map again while
// following back pointers.
void ClearNonLiveTransitions(Object* real_prototype);
// Dispatched behavior. // Dispatched behavior.
void MapIterateBody(ObjectVisitor* v); void MapIterateBody(ObjectVisitor* v);
#ifdef DEBUG #ifdef DEBUG
...@@ -2805,7 +2820,7 @@ class GlobalObject: public JSObject { ...@@ -2805,7 +2820,7 @@ class GlobalObject: public JSObject {
// [builtins]: the object holding the runtime routines written in JS. // [builtins]: the object holding the runtime routines written in JS.
DECL_ACCESSORS(builtins, JSBuiltinsObject) DECL_ACCESSORS(builtins, JSBuiltinsObject)
// [global context]: the global context corresponding to this global objet. // [global context]: the global context corresponding to this global object.
DECL_ACCESSORS(global_context, Context) DECL_ACCESSORS(global_context, Context)
// [global receiver]: the global receiver object of the context // [global receiver]: the global receiver object of the context
......
...@@ -99,7 +99,10 @@ class Descriptor BASE_EMBEDDED { ...@@ -99,7 +99,10 @@ class Descriptor BASE_EMBEDDED {
friend class DescriptorArray; friend class DescriptorArray;
}; };
// A pointer from a map to the new map that is created by adding
// a named property. These are key to the speed and functioning of V8.
// The two maps should always have the same prototype, since
// MapSpace::CreateBackPointers depends on this.
class MapTransitionDescriptor: public Descriptor { class MapTransitionDescriptor: public Descriptor {
public: public:
MapTransitionDescriptor(String* key, Map* map, PropertyAttributes attributes) MapTransitionDescriptor(String* key, Map* map, PropertyAttributes attributes)
......
...@@ -320,9 +320,16 @@ static Object* Runtime_IsTemplate(Arguments args) { ...@@ -320,9 +320,16 @@ static Object* Runtime_IsTemplate(Arguments args) {
static Object* Runtime_GetTemplateField(Arguments args) { static Object* Runtime_GetTemplateField(Arguments args) {
ASSERT(args.length() == 2); ASSERT(args.length() == 2);
CONVERT_CHECKED(HeapObject, templ, args[0]); CONVERT_CHECKED(HeapObject, templ, args[0]);
RUNTIME_ASSERT(templ->IsStruct());
CONVERT_CHECKED(Smi, field, args[1]); CONVERT_CHECKED(Smi, field, args[1]);
return HeapObject::GetHeapObjectField(templ, field->value()); int index = field->value();
int offset = index * kPointerSize + HeapObject::kHeaderSize;
InstanceType type = templ->map()->instance_type();
RUNTIME_ASSERT(type == FUNCTION_TEMPLATE_INFO_TYPE ||
type == OBJECT_TEMPLATE_INFO_TYPE);
RUNTIME_ASSERT(offset > 0);
RUNTIME_ASSERT(offset < ((type == FUNCTION_TEMPLATE_INFO_TYPE) ?
FunctionTemplateInfo::kSize : ObjectTemplateInfo::kSize));
return HeapObject::RawField(templ, offset);
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment