Commit 28cee257 authored by whessev8's avatar whessev8

Remove unused maps during marking garbage collections.

Review URL: http://codereview.chromium.org/8831

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@637 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 5e03ca85
......@@ -134,6 +134,8 @@ DEFINE_bool(gc_global, false, "always perform global GCs")
DEFINE_int(gc_interval, -1, "garbage collect after <n> allocations")
DEFINE_bool(trace_gc, false,
"print one trace line following each garbage collection")
DEFINE_bool(collect_maps, true,
"garbage collect maps from which no objects can be reached")
// ic.cc
DEFINE_bool(use_ic, true, "use inline caching")
......
......@@ -78,6 +78,8 @@ void MarkCompactCollector::CollectGarbage(GCTracer* tracer) {
MarkLiveObjects();
if (FLAG_collect_maps) ClearNonLiveTransitions();
SweepLargeObjectSpace();
if (compacting_collection_) {
......@@ -135,6 +137,7 @@ void MarkCompactCollector::Prepare() {
}
if (FLAG_never_compact) compacting_collection_ = false;
if (FLAG_collect_maps) CreateBackPointers();
#ifdef DEBUG
if (compacting_collection_) {
......@@ -322,9 +325,10 @@ class MarkingVisitor : public ObjectVisitor {
// Visit an unmarked object.
void VisitUnmarkedObject(HeapObject* obj) {
ASSERT(Heap::Contains(obj));
#ifdef DEBUG
ASSERT(Heap::Contains(obj));
MarkCompactCollector::UpdateLiveObjectCount(obj);
ASSERT(!obj->IsMarked());
#endif
Map* map = obj->map();
obj->SetMark();
......@@ -425,14 +429,104 @@ void MarkCompactCollector::MarkUnmarkedObject(HeapObject* object) {
ASSERT(!object->IsMarked());
if (object->IsJSGlobalObject()) Counters::global_objects.Increment();
if (FLAG_cleanup_caches_in_maps_at_gc && object->IsMap()) {
Map::cast(object)->ClearCodeCache();
tracer_->increment_marked_count();
ASSERT(Heap::Contains(object));
if (object->IsMap()) {
Map* map = Map::cast(object);
if (FLAG_cleanup_caches_in_maps_at_gc) {
map->ClearCodeCache();
}
map->SetMark();
if (FLAG_collect_maps &&
map->instance_type() >= FIRST_JS_OBJECT_TYPE &&
map->instance_type() <= JS_FUNCTION_TYPE) {
MarkMapContents(map);
} else {
marking_stack.Push(map);
}
} else {
object->SetMark();
marking_stack.Push(object);
}
}
void MarkCompactCollector::MarkMapContents(Map* map) {
MarkDescriptorArray(reinterpret_cast<DescriptorArray*>(
*HeapObject::RawField(map, Map::kInstanceDescriptorsOffset)));
// Mark the Object* fields of the Map.
// Since the descriptor array has been marked already, it is fine
// that one of these fields contains a pointer to it.
MarkingVisitor visitor; // Has no state or contents.
visitor.VisitPointers(HeapObject::RawField(map, Map::kPrototypeOffset),
HeapObject::RawField(map, Map::kSize));
}
void MarkCompactCollector::MarkDescriptorArray(
DescriptorArray *descriptors) {
if (descriptors->IsMarked()) return;
// Empty descriptor array is marked as a root before any maps are marked.
ASSERT(descriptors != Heap::empty_descriptor_array());
object->SetMark();
tracer_->increment_marked_count();
ASSERT(Heap::Contains(object));
marking_stack.Push(object);
#ifdef DEBUG
UpdateLiveObjectCount(descriptors);
#endif
descriptors->SetMark();
FixedArray* contents = reinterpret_cast<FixedArray*>(
descriptors->get(DescriptorArray::kContentArrayIndex));
ASSERT(contents->IsHeapObject());
ASSERT(!contents->IsMarked());
ASSERT(contents->IsFixedArray());
ASSERT(contents->length() >= 2);
tracer_->increment_marked_count();
#ifdef DEBUG
UpdateLiveObjectCount(contents);
#endif
contents->SetMark();
// Contents contains (value, details) pairs. If the details say
// that the type of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION,
// or NULL_DESCRIPTOR, we don't mark the value as live. Only for
// type MAP_TRANSITION is the value a Object* (a Map*).
for (int i = 0; i < contents->length(); i += 2) {
// If the pair (value, details) at index i, i+1 is not
// a transition or null descriptor, mark the value.
PropertyDetails details(Smi::cast(contents->get(i + 1)));
if (details.type() < FIRST_PHANTOM_PROPERTY_TYPE) {
HeapObject* object = reinterpret_cast<HeapObject*>(contents->get(i));
if (object->IsHeapObject() && !object->IsMarked()) {
tracer_->increment_marked_count();
#ifdef DEBUG
UpdateLiveObjectCount(object);
#endif
object->SetMark();
marking_stack.Push(object);
}
}
}
// The DescriptorArray descriptors contains a pointer to its contents array,
// but the contents array is already marked.
marking_stack.Push(descriptors);
}
void MarkCompactCollector::CreateBackPointers() {
HeapObjectIterator iterator(Heap::map_space());
while (iterator.has_next()) {
Object* next_object = iterator.next();
if (next_object->IsMap()) { // Could also be ByteArray on free list.
Map* map = Map::cast(next_object);
if (map->instance_type() >= FIRST_JS_OBJECT_TYPE &&
map->instance_type() <= JS_FUNCTION_TYPE) {
map->CreateBackPointers();
} else {
ASSERT(map->instance_descriptors() == Heap::empty_descriptor_array());
}
}
}
}
......@@ -675,6 +769,13 @@ void MarkCompactCollector::MarkLiveObjects() {
}
static int CountMarkedCallback(HeapObject* obj) {
MapWord map_word = obj->map_word();
map_word.ClearMark();
return obj->SizeFromMap(map_word.ToMap());
}
#ifdef DEBUG
void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) {
live_bytes_ += obj->Size();
......@@ -697,13 +798,6 @@ void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) {
}
static int CountMarkedCallback(HeapObject* obj) {
MapWord map_word = obj->map_word();
map_word.ClearMark();
return obj->SizeFromMap(map_word.ToMap());
}
void MarkCompactCollector::VerifyHeapAfterMarkingPhase() {
Heap::new_space()->Verify();
Heap::old_pointer_space()->Verify();
......@@ -755,6 +849,63 @@ void MarkCompactCollector::SweepLargeObjectSpace() {
Heap::lo_space()->FreeUnmarkedObjects();
}
// Safe to use during marking phase only.
bool MarkCompactCollector::SafeIsMap(HeapObject* object) {
MapWord metamap = object->map_word();
metamap.ClearMark();
return metamap.ToMap()->instance_type() == MAP_TYPE;
}
void MarkCompactCollector::ClearNonLiveTransitions() {
HeapObjectIterator map_iterator(Heap::map_space(), &CountMarkedCallback);
// Iterate over the map space, setting map transitions that go from
// a marked map to an unmarked map to null transitions. At the same time,
// set all the prototype fields of maps back to their original value,
// dropping the back pointers temporarily stored in the prototype field.
// Setting the prototype field requires following the linked list of
// back pointers, reversing them all at once. This allows us to find
// those maps with map transitions that need to be nulled, and only
// scan the descriptor arrays of those maps, not all maps.
// All of these actions are carried out only on maps of JSObects
// and related subtypes.
while (map_iterator.has_next()) {
Map* map = reinterpret_cast<Map*>(map_iterator.next());
if (!map->IsMarked() && map->IsByteArray()) continue;
ASSERT(SafeIsMap(map));
// Only JSObject and subtypes have map transitions and back pointers.
if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue;
if (map->instance_type() > JS_FUNCTION_TYPE) continue;
// Follow the chain of back pointers to find the prototype.
Map* current = map;
while (SafeIsMap(current)) {
current = reinterpret_cast<Map*>(current->prototype());
ASSERT(current->IsHeapObject());
}
Object* real_prototype = current;
// Follow back pointers, setting them to prototype,
// clearing map transitions when necessary.
current = map;
bool on_dead_path = !current->IsMarked();
Object *next;
while (SafeIsMap(current)) {
next = current->prototype();
// There should never be a dead map above a live map.
ASSERT(on_dead_path || current->IsMarked());
// A live map above a dead map indicates a dead transition.
// This test will always be false on the first iteration.
if (on_dead_path && current->IsMarked()) {
on_dead_path = false;
current->ClearNonLiveTransitions(real_prototype);
}
*HeapObject::RawField(current, Map::kPrototypeOffset) =
real_prototype;
current = reinterpret_cast<Map*>(next);
}
}
}
// -------------------------------------------------------------------------
// Phase 2: Encode forwarding addresses.
......
......@@ -155,6 +155,17 @@ class MarkCompactCollector : public AllStatic {
if (!obj->IsMarked()) MarkUnmarkedObject(obj);
}
// Creates back pointers for all map transitions, stores them in
// the prototype field. The original prototype pointers are restored
// in ClearNonLiveTransitions(). All JSObject maps
// connected by map transitions have the same prototype object, which
// is why we can use this field temporarily for back pointers.
static void CreateBackPointers();
// Mark a Map and its DescriptorArray together, skipping transitions.
static void MarkMapContents(Map* map);
static void MarkDescriptorArray(DescriptorArray* descriptors);
// Mark the heap roots and all objects reachable from them.
static void ProcessRoots(RootMarkingVisitor* visitor);
......@@ -194,6 +205,13 @@ class MarkCompactCollector : public AllStatic {
// compacting or not, because the large object space is never compacted.
static void SweepLargeObjectSpace();
// Test whether a (possibly marked) object is a Map.
static inline bool SafeIsMap(HeapObject* object);
// Map transitions from a live map to a dead map must be killed.
// We replace them with a null descriptor, with the same key.
static void ClearNonLiveTransitions();
// --------------------------------------------------------------------------
// Phase 2: functions related to computing and encoding forwarding pointers
// before: live objects' map pointers are marked as '00'
......
......@@ -557,8 +557,8 @@ Object* Object::GetProperty(String* key, PropertyAttributes* attributes) {
(*reinterpret_cast<byte*>(FIELD_ADDR(p, offset)) = value)
Object* HeapObject::GetHeapObjectField(HeapObject* obj, int index) {
return READ_FIELD(obj, HeapObject::kHeaderSize + kPointerSize * index);
Object** HeapObject::RawField(HeapObject* obj, int byte_offset) {
return &READ_FIELD(obj, byte_offset);
}
......
......@@ -333,7 +333,7 @@ Object* Object::GetProperty(Object* receiver,
// Check if we're allowed to read from the current object. Note
// that even though we may not actually end up loading the named
// property from the current object, we still check that we have
// access to the it.
// access to it.
JSObject* checked = JSObject::cast(current);
if (!Top::MayNamedAccess(checked, name, v8::ACCESS_GET)) {
return checked->GetPropertyWithFailedAccessCheck(receiver,
......@@ -4052,6 +4052,67 @@ void String::PrintOn(FILE* file) {
}
void Map::CreateBackPointers() {
DescriptorArray* descriptors = instance_descriptors();
for (DescriptorReader r(descriptors); !r.eos(); r.advance()) {
if (r.type() == MAP_TRANSITION) {
// Get target.
Map* target = Map::cast(r.GetValue());
#ifdef DEBUG
// Verify target.
Object* source_prototype = prototype();
Object* target_prototype = target->prototype();
ASSERT(source_prototype->IsJSObject() ||
source_prototype->IsMap() ||
source_prototype->IsNull());
ASSERT(target_prototype->IsJSObject() ||
target_prototype->IsNull());
ASSERT(source_prototype->IsMap() ||
source_prototype == target_prototype);
#endif
// Point target back to source. set_prototype() will not let us set
// the prototype to a map, as we do here.
*RawField(target, kPrototypeOffset) = this;
}
}
}
void Map::ClearNonLiveTransitions(Object* real_prototype) {
// Live DescriptorArray objects will be marked, so we must use
// low-level accessors to get and modify their data.
DescriptorArray* d = reinterpret_cast<DescriptorArray*>(
*RawField(this, Map::kInstanceDescriptorsOffset));
if (d == Heap::empty_descriptor_array()) return;
Smi* NullDescriptorDetails =
PropertyDetails(NONE, NULL_DESCRIPTOR).AsSmi();
FixedArray* contents = reinterpret_cast<FixedArray*>(
d->get(DescriptorArray::kContentArrayIndex));
ASSERT(contents->length() >= 2);
for (int i = 0; i < contents->length(); i += 2) {
// If the pair (value, details) is a map transition,
// check if the target is live. If not, null the descriptor.
// Also drop the back pointer for that map transition, so that this
// map is not reached again by following a back pointer from a
// non-live object.
PropertyDetails details(Smi::cast(contents->get(i + 1)));
if (details.type() == MAP_TRANSITION) {
Map* target = reinterpret_cast<Map*>(contents->get(i));
ASSERT(target->IsHeapObject());
if (!target->IsMarked()) {
ASSERT(target->IsMap());
contents->set(i + 1, NullDescriptorDetails, SKIP_WRITE_BARRIER);
contents->set(i, Heap::null_value(), SKIP_WRITE_BARRIER);
ASSERT(target->prototype() == this ||
target->prototype() == real_prototype);
// Getter prototype() is read-only, set_prototype() has side effects.
*RawField(target, Map::kPrototypeOffset) = real_prototype;
}
}
}
}
void Map::MapIterateBody(ObjectVisitor* v) {
// Assumes all Object* members are contiguously allocated!
IteratePointers(v, kPrototypeOffset, kCodeCacheOffset + kPointerSize);
......
......@@ -1043,7 +1043,11 @@ class HeapObject: public Object {
// object is overflowed (ie, partially restore the map pointer).
inline void ClearOverflow();
static inline Object* GetHeapObjectField(HeapObject* obj, int index);
// Returns the field at offset in obj, as a read/write Object* reference.
// Does no checking, and is safe to use during GC, while maps are invalid.
// Does not update remembered sets, so should only be assigned to
// during marking GC.
static inline Object** RawField(HeapObject* obj, int offset);
// Casting.
static inline HeapObject* cast(Object* obj);
......@@ -2420,6 +2424,17 @@ class Map: public HeapObject {
// Removes a code object from the code cache at the given index.
void RemoveFromCodeCache(int index);
// For every transition in this map, makes the transition's
// target's prototype pointer point back to this map.
// This is undone in MarkCompactCollector::ClearNonLiveTransitions().
void CreateBackPointers();
// Set all map transitions from this map to dead maps to null.
// Also, restore the original prototype on the targets of these
// transitions, so that we do not process this map again while
// following back pointers.
void ClearNonLiveTransitions(Object* real_prototype);
// Dispatched behavior.
void MapIterateBody(ObjectVisitor* v);
#ifdef DEBUG
......@@ -2804,7 +2819,7 @@ class GlobalObject: public JSObject {
// [builtins]: the object holding the runtime routines written in JS.
DECL_ACCESSORS(builtins, JSBuiltinsObject)
// [global context]: the global context corresponding to this global objet.
// [global context]: the global context corresponding to this global object.
DECL_ACCESSORS(global_context, Context)
// [global receiver]: the global receiver object of the context
......
......@@ -99,7 +99,10 @@ class Descriptor BASE_EMBEDDED {
friend class DescriptorArray;
};
// A pointer from a map to the new map that is created by adding
// a named property. These are key to the speed and functioning of V8.
// The two maps should always have the same prototype, since
// MapSpace::CreateBackPointers depends on this.
class MapTransitionDescriptor: public Descriptor {
public:
MapTransitionDescriptor(String* key, Map* map, PropertyAttributes attributes)
......
......@@ -320,9 +320,19 @@ static Object* Runtime_IsTemplate(Arguments args) {
static Object* Runtime_GetTemplateField(Arguments args) {
ASSERT(args.length() == 2);
CONVERT_CHECKED(HeapObject, templ, args[0]);
RUNTIME_ASSERT(templ->IsStruct());
CONVERT_CHECKED(Smi, field, args[1]);
return HeapObject::GetHeapObjectField(templ, field->value());
int index = field->value();
int offset = index * kPointerSize + HeapObject::kHeaderSize;
InstanceType type = templ->map()->instance_type();
RUNTIME_ASSERT(type == FUNCTION_TEMPLATE_INFO_TYPE ||
type == OBJECT_TEMPLATE_INFO_TYPE);
RUNTIME_ASSERT(offset > 0);
if (type == FUNCTION_TEMPLATE_INFO_TYPE) {
RUNTIME_ASSERT(offset < FunctionTemplateInfo::kSize);
} else {
RUNTIME_ASSERT(offset < ObjectTemplateInfo::kSize);
}
return *HeapObject::RawField(templ, offset);
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment