Commit e6224d27 authored by ulan@chromium.org's avatar ulan@chromium.org

Make embedded maps in optimized code weak.

Each map has a weak array of dependent codes, where the map tracks all the optimized codes that embed it.
Old space GC either clears the dead dependent codes from the array if the corresponding map is alive or deoptimizes the live dependent codes if the map is dead.

BUG=v8:2073
R=mstarzinger@chromium.org

Review URL: https://chromiumcodereview.appspot.com/11575007

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@13490 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent c500efe1
......@@ -2199,6 +2199,8 @@ MaybeObject* Heap::AllocateMap(InstanceType instance_type,
map->set_inobject_properties(0);
map->set_pre_allocated_property_fields(0);
map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
map->set_dependent_codes(DependentCodes::cast(empty_fixed_array()),
SKIP_WRITE_BARRIER);
map->init_back_pointer(undefined_value());
map->set_unused_property_fields(0);
map->set_instance_descriptors(empty_descriptor_array());
......@@ -2334,14 +2336,18 @@ bool Heap::CreateInitialMaps() {
// Fix the instance_descriptors for the existing maps.
meta_map()->set_code_cache(empty_fixed_array());
meta_map()->set_dependent_codes(DependentCodes::cast(empty_fixed_array()));
meta_map()->init_back_pointer(undefined_value());
meta_map()->set_instance_descriptors(empty_descriptor_array());
fixed_array_map()->set_code_cache(empty_fixed_array());
fixed_array_map()->set_dependent_codes(
DependentCodes::cast(empty_fixed_array()));
fixed_array_map()->init_back_pointer(undefined_value());
fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
oddball_map()->set_code_cache(empty_fixed_array());
oddball_map()->set_dependent_codes(DependentCodes::cast(empty_fixed_array()));
oddball_map()->init_back_pointer(undefined_value());
oddball_map()->set_instance_descriptors(empty_descriptor_array());
......@@ -3790,6 +3796,9 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
code->set_gc_metadata(Smi::FromInt(0));
code->set_ic_age(global_ic_age_);
code->set_prologue_offset(kPrologueOffsetNotSet);
if (code->kind() == Code::OPTIMIZED_FUNCTION) {
code->set_marked_for_deoptimization(false);
}
// Allow self references to created code object by patching the handle to
// point to the newly allocated Code object.
if (!self_reference.is_null()) {
......
......@@ -429,6 +429,7 @@ Handle<Code> LChunk::Codegen(Code::Kind kind) {
Handle<Code> code =
CodeGenerator::MakeCodeEpilogue(&assembler, flags, info());
generator.FinishCode(code);
RegisterDependentCodeForEmbeddedMaps(code);
CodeGenerator::PrintCode(code, info());
return code;
}
......@@ -436,4 +437,22 @@ Handle<Code> LChunk::Codegen(Code::Kind kind) {
}
void LChunk::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
ZoneList<Handle<Map> > maps(1, zone());
int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
RelocInfo::Mode mode = it.rinfo()->rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT &&
it.rinfo()->target_object()->IsMap()) {
Handle<Map> map(Map::cast(it.rinfo()->target_object()));
if (map->CanTransition()) {
maps.Add(map, zone());
}
}
}
for (int i = 0; i < maps.length(); i++) {
maps.at(i)->AddDependentCode(code);
}
}
} } // namespace v8::internal
......@@ -695,6 +695,8 @@ class LChunk: public ZoneObject {
pointer_maps_(8, graph->zone()),
inlined_closures_(1, graph->zone()) { }
void RegisterDependentCodeForEmbeddedMaps(Handle<Code> code);
int spill_slot_count_;
private:
......
......@@ -82,6 +82,15 @@ class VerifyMarkingVisitor: public ObjectVisitor {
}
}
}
void VisitEmbeddedPointer(RelocInfo* rinfo) {
ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
if (rinfo->host()->kind() != Code::OPTIMIZED_FUNCTION ||
!rinfo->target_object()->IsMap() ||
!Map::cast(rinfo->target_object())->CanTransition()) {
VisitPointer(rinfo->target_object_address());
}
}
};
......@@ -382,7 +391,7 @@ void MarkCompactCollector::CollectGarbage() {
MarkLiveObjects();
ASSERT(heap_->incremental_marking()->IsStopped());
if (FLAG_collect_maps) ClearNonLiveTransitions();
if (FLAG_collect_maps) ClearNonLiveReferences();
ClearWeakMaps();
......@@ -823,6 +832,13 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) {
#endif
}
class DeoptimizeMarkedCodeFilter : public OptimizedFunctionFilter {
public:
virtual bool TakeFunction(JSFunction* function) {
return function->code()->marked_for_deoptimization();
}
};
void MarkCompactCollector::Finish() {
#ifdef DEBUG
......@@ -834,6 +850,9 @@ void MarkCompactCollector::Finish() {
// GC, because it relies on the new address of certain old space
// objects (empty string, illegal builtin).
heap()->isolate()->stub_cache()->Clear();
DeoptimizeMarkedCodeFilter filter;
Deoptimizer::DeoptimizeAllFunctionsWith(&filter);
}
......@@ -2165,7 +2184,7 @@ void MarkCompactCollector::ReattachInitialMaps() {
}
void MarkCompactCollector::ClearNonLiveTransitions() {
void MarkCompactCollector::ClearNonLiveReferences() {
HeapObjectIterator map_iterator(heap()->map_space());
// Iterate over the map space, setting map transitions that go from
// a marked map to an unmarked map to null transitions. This action
......@@ -2177,9 +2196,7 @@ void MarkCompactCollector::ClearNonLiveTransitions() {
if (map->IsFreeSpace()) continue;
ASSERT(map->IsMap());
// Only JSObject and subtypes have map transitions and back pointers.
STATIC_ASSERT(LAST_TYPE == LAST_JS_OBJECT_TYPE);
if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue;
if (!map->CanTransition()) continue;
if (map_mark.Get() &&
map->attached_to_shared_function_info()) {
......@@ -2191,6 +2208,12 @@ void MarkCompactCollector::ClearNonLiveTransitions() {
ClearNonLivePrototypeTransitions(map);
ClearNonLiveMapTransitions(map, map_mark);
if (map_mark.Get()) {
ClearNonLiveDependentCodes(map);
} else {
ClearAndDeoptimizeDependentCodes(map);
}
}
}
......@@ -2259,6 +2282,46 @@ void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map,
}
void MarkCompactCollector::ClearAndDeoptimizeDependentCodes(Map* map) {
AssertNoAllocation no_allocation_scope;
DependentCodes* codes = map->dependent_codes();
int number_of_codes = codes->number_of_codes();
if (number_of_codes == 0) return;
for (int i = 0; i < number_of_codes; i++) {
Code* code = codes->code_at(i);
if (IsMarked(code) && !code->marked_for_deoptimization()) {
code->set_marked_for_deoptimization(true);
}
codes->clear_code_at(i);
}
map->set_dependent_codes(DependentCodes::cast(heap()->empty_fixed_array()));
}
void MarkCompactCollector::ClearNonLiveDependentCodes(Map* map) {
AssertNoAllocation no_allocation_scope;
DependentCodes* codes = map->dependent_codes();
int number_of_codes = codes->number_of_codes();
if (number_of_codes == 0) return;
int new_number_of_codes = 0;
for (int i = 0; i < number_of_codes; i++) {
Code* code = codes->code_at(i);
if (IsMarked(code) && !code->marked_for_deoptimization()) {
if (new_number_of_codes != i) {
codes->set_code_at(new_number_of_codes, code);
Object** slot = codes->code_slot_at(new_number_of_codes);
RecordSlot(slot, slot, code);
new_number_of_codes++;
}
}
}
for (int i = new_number_of_codes; i < number_of_codes; i++) {
codes->clear_code_at(i);
}
codes->set_number_of_codes(new_number_of_codes);
}
void MarkCompactCollector::ProcessWeakMaps() {
Object* weak_map_obj = encountered_weak_maps();
while (weak_map_obj != Smi::FromInt(0)) {
......
......@@ -797,10 +797,13 @@ class MarkCompactCollector {
// Map transitions from a live map to a dead map must be killed.
// We replace them with a null descriptor, with the same key.
void ClearNonLiveTransitions();
void ClearNonLiveReferences();
void ClearNonLivePrototypeTransitions(Map* map);
void ClearNonLiveMapTransitions(Map* map, MarkBit map_mark);
void ClearAndDeoptimizeDependentCodes(Map* map);
void ClearNonLiveDependentCodes(Map* map);
// Marking detaches initial maps from SharedFunctionInfo objects
// to make this reference weak. We need to reattach initial maps
// back after collection. This is either done during
......
......@@ -584,6 +584,14 @@ bool Object::IsDeoptimizationOutputData() {
}
bool Object::IsDependentCodes() {
if (!IsFixedArray()) return false;
// There's actually no way to see the difference between a fixed array and
// a dependent codes array.
return true;
}
bool Object::IsTypeFeedbackCells() {
if (!IsFixedArray()) return false;
// There's actually no way to see the difference between a fixed array and
......@@ -2374,6 +2382,7 @@ CAST_ACCESSOR(FixedDoubleArray)
CAST_ACCESSOR(DescriptorArray)
CAST_ACCESSOR(DeoptimizationInputData)
CAST_ACCESSOR(DeoptimizationOutputData)
CAST_ACCESSOR(DependentCodes)
CAST_ACCESSOR(TypeFeedbackCells)
CAST_ACCESSOR(SymbolTable)
CAST_ACCESSOR(JSFunctionResultCache)
......@@ -3406,6 +3415,47 @@ bool Map::is_observed() {
}
void Map::AddDependentCode(Handle<Code> code) {
Handle<DependentCodes> codes =
DependentCodes::Append(Handle<DependentCodes>(dependent_codes()), code);
if (*codes != dependent_codes()) {
set_dependent_codes(*codes);
}
}
int DependentCodes::number_of_codes() {
if (length() == 0) return 0;
return Smi::cast(get(kNumberOfCodesIndex))->value();
}
void DependentCodes::set_number_of_codes(int value) {
set(kNumberOfCodesIndex, Smi::FromInt(value));
}
Code* DependentCodes::code_at(int i) {
return Code::cast(get(kCodesIndex + i));
}
void DependentCodes::set_code_at(int i, Code* value) {
set(kCodesIndex + i, value);
}
Object** DependentCodes::code_slot_at(int i) {
return HeapObject::RawField(
this, FixedArray::OffsetOfElementAt(kCodesIndex + i));
}
void DependentCodes::clear_code_at(int i) {
set_undefined(kCodesIndex + i);
}
void Code::set_flags(Code::Flags flags) {
STATIC_ASSERT(Code::NUMBER_OF_KINDS <= KindField::kMax + 1);
// Make sure that all call stubs have an arguments count.
......@@ -3683,6 +3733,21 @@ void Code::set_has_function_cache(bool flag) {
}
bool Code::marked_for_deoptimization() {
ASSERT(kind() == OPTIMIZED_FUNCTION);
return MarkedForDeoptimizationField::decode(
READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
}
void Code::set_marked_for_deoptimization(bool flag) {
ASSERT(kind() == OPTIMIZED_FUNCTION);
int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
int updated = MarkedForDeoptimizationField::update(previous, flag);
WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
}
bool Code::is_inline_cache_stub() {
Kind kind = this->kind();
return kind >= FIRST_IC_KIND && kind <= LAST_IC_KIND;
......@@ -4011,6 +4076,7 @@ HeapObject* Map::UncheckedPrototypeTransitions() {
ACCESSORS(Map, code_cache, Object, kCodeCacheOffset)
ACCESSORS(Map, dependent_codes, DependentCodes, kDependentCodesOffset)
ACCESSORS(Map, constructor, Object, kConstructorOffset)
ACCESSORS(JSFunction, shared, SharedFunctionInfo, kSharedFunctionInfoOffset)
......
......@@ -175,8 +175,11 @@ void StaticMarkingVisitor<StaticVisitor>::VisitEmbeddedPointer(
ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
ASSERT(!rinfo->target_object()->IsConsString());
HeapObject* object = HeapObject::cast(rinfo->target_object());
heap->mark_compact_collector()->RecordRelocSlot(rinfo, object);
StaticVisitor::MarkObject(heap, object);
if (!FLAG_collect_maps || rinfo->host()->kind() != Code::OPTIMIZED_FUNCTION ||
!object->IsMap() || !Map::cast(object)->CanTransition()) {
heap->mark_compact_collector()->RecordRelocSlot(rinfo, object);
StaticVisitor::MarkObject(heap, object);
}
}
......@@ -262,12 +265,9 @@ void StaticMarkingVisitor<StaticVisitor>::VisitMap(
map_object->ClearCodeCache(heap);
}
// When map collection is enabled we have to mark through map's
// transitions and back pointers in a special way to make these links
// weak. Only maps for subclasses of JSReceiver can have transitions.
STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
if (FLAG_collect_maps &&
map_object->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
// When map collection is enabled we have to mark through map's transitions
// and back pointers in a special way to make these links weak.
if (FLAG_collect_maps && map_object->CanTransition()) {
MarkMapContents(heap, map_object);
} else {
StaticVisitor::VisitPointers(heap,
......@@ -395,6 +395,14 @@ void StaticMarkingVisitor<StaticVisitor>::MarkMapContents(
ASSERT(transitions->IsMap() || transitions->IsUndefined());
}
// Mark prototype dependent codes array but do not push it onto marking
// stack, this will make references from it weak. We will clean dead
// codes when we iterate over maps in ClearNonLiveTransitions.
Object** slot = HeapObject::RawField(map, Map::kDependentCodesOffset);
HeapObject* obj = HeapObject::cast(*slot);
heap->mark_compact_collector()->RecordSlot(slot, slot, obj);
StaticVisitor::MarkObjectWithoutPush(heap, obj);
// Mark the pointer fields of the Map. Since the transitions array has
// been marked already, it is fine that one of these fields contains a
// pointer to it.
......@@ -639,7 +647,7 @@ void Code::CodeIterateBody(ObjectVisitor* v) {
RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
// There are two places where we iterate code bodies: here and the
// templated CodeIterateBody (below). They should be kept in sync.
// templated CodeIterateBody (below). They should be kept in sync.
IteratePointer(v, kRelocationInfoOffset);
IteratePointer(v, kHandlerTableOffset);
IteratePointer(v, kDeoptimizationDataOffset);
......@@ -662,8 +670,8 @@ void Code::CodeIterateBody(Heap* heap) {
RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
// There are two places where we iterate code bodies: here and the
// non-templated CodeIterateBody (above). They should be kept in sync.
// There are two places where we iterate code bodies: here and the non-
// templated CodeIterateBody (above). They should be kept in sync.
StaticVisitor::VisitPointer(
heap,
reinterpret_cast<Object**>(this->address() + kRelocationInfoOffset));
......
......@@ -9480,6 +9480,34 @@ void Map::ZapPrototypeTransitions() {
}
Handle<DependentCodes> DependentCodes::Append(Handle<DependentCodes> codes,
Handle<Code> value) {
int append_index = codes->number_of_codes();
if (append_index > 0 && codes->code_at(append_index - 1) == *value) {
// Do not append the code if it is already in the array.
// It is sufficient to just check only the last element because
// we process embedded maps of an optimized code in one batch.
return codes;
}
if (codes->length() < kCodesIndex + append_index + 1) {
Factory* factory = codes->GetIsolate()->factory();
int capacity = kCodesIndex + append_index + 1;
if (capacity > 5) capacity = capacity * 5 / 4;
Handle<DependentCodes> new_codes = Handle<DependentCodes>::cast(
factory->CopySizeFixedArray(codes, capacity));
// The number of codes can change after GC.
append_index = codes->number_of_codes();
for (int i = 0; i < append_index; i++) {
codes->clear_code_at(i);
}
codes = new_codes;
}
codes->set_code_at(append_index, *value);
codes->set_number_of_codes(append_index + 1);
return codes;
}
MaybeObject* JSReceiver::SetPrototype(Object* value,
bool skip_hidden_prototypes) {
#ifdef DEBUG
......
......@@ -862,6 +862,7 @@ class MaybeObject BASE_EMBEDDED {
V(TransitionArray) \
V(DeoptimizationInputData) \
V(DeoptimizationOutputData) \
V(DependentCodes) \
V(TypeFeedbackCells) \
V(FixedArray) \
V(FixedDoubleArray) \
......@@ -4395,6 +4396,12 @@ class Code: public HeapObject {
inline bool has_function_cache();
inline void set_has_function_cache(bool flag);
// [marked_for_deoptimization]: For kind OPTIMIZED_FUNCTION tells whether
// the code is going to be deoptimized because of dead embedded maps.
inline bool marked_for_deoptimization();
inline void set_marked_for_deoptimization(bool flag);
bool allowed_in_shared_map_code_cache();
// Get the safepoint entry for the given pc.
......@@ -4600,11 +4607,16 @@ class Code: public HeapObject {
static const int kHasFunctionCacheFirstBit =
kStackSlotsFirstBit + kStackSlotsBitCount;
static const int kHasFunctionCacheBitCount = 1;
static const int kMarkedForDeoptimizationFirstBit =
kStackSlotsFirstBit + kStackSlotsBitCount + 1;
static const int kMarkedForDeoptimizationBitCount = 1;
STATIC_ASSERT(kStackSlotsFirstBit + kStackSlotsBitCount <= 32);
STATIC_ASSERT(kUnaryOpTypeFirstBit + kUnaryOpTypeBitCount <= 32);
STATIC_ASSERT(kToBooleanStateFirstBit + kToBooleanStateBitCount <= 32);
STATIC_ASSERT(kHasFunctionCacheFirstBit + kHasFunctionCacheBitCount <= 32);
STATIC_ASSERT(kMarkedForDeoptimizationFirstBit +
kMarkedForDeoptimizationBitCount <= 32);
class StackSlotsField: public BitField<int,
kStackSlotsFirstBit, kStackSlotsBitCount> {}; // NOLINT
......@@ -4614,6 +4626,9 @@ class Code: public HeapObject {
kToBooleanStateFirstBit, kToBooleanStateBitCount> {}; // NOLINT
class HasFunctionCacheField: public BitField<bool,
kHasFunctionCacheFirstBit, kHasFunctionCacheBitCount> {}; // NOLINT
class MarkedForDeoptimizationField: public BitField<bool,
kMarkedForDeoptimizationFirstBit,
kMarkedForDeoptimizationBitCount> {}; // NOLINT
// KindSpecificFlags2 layout (STUB and OPTIMIZED_FUNCTION)
static const int kStubMajorKeyFirstBit = 0;
......@@ -4661,6 +4676,27 @@ class Code: public HeapObject {
};
// This class describes the layout of dependent codes array of a map. The
// first element contains the number of codes as a Smi. The subsequent
// elements contain code objects. The suffix of the array can be filled with the
// undefined value if the number of codes is less than the length of the array.
class DependentCodes: public FixedArray {
public:
inline int number_of_codes();
inline void set_number_of_codes(int value);
inline Code* code_at(int i);
inline void set_code_at(int i, Code* value);
inline Object** code_slot_at(int i);
inline void clear_code_at(int i);
static Handle<DependentCodes> Append(Handle<DependentCodes> codes,
Handle<Code> value);
static inline DependentCodes* cast(Object* object);
private:
static const int kNumberOfCodesIndex = 0;
static const int kCodesIndex = 1;
};
// All heap objects have a Map that describes their structure.
// A Map contains information about:
// - Size information about the object
......@@ -4890,6 +4926,9 @@ class Map: public HeapObject {
// [stub cache]: contains stubs compiled for this map.
DECL_ACCESSORS(code_cache, Object)
// [dependent codes]: list of optimized codes that have this map embedded.
DECL_ACCESSORS(dependent_codes, DependentCodes)
// [back pointer]: points back to the parent map from which a transition
// leads to this map. The field overlaps with prototype transitions and the
// back pointer will be moved into the prototype transitions array if
......@@ -5099,6 +5138,14 @@ class Map: public HeapObject {
void ZapPrototypeTransitions();
void ZapTransitions();
bool CanTransition() {
// Only JSObject and subtypes have map transitions and back pointers.
STATIC_ASSERT(LAST_TYPE == LAST_JS_OBJECT_TYPE);
return instance_type() >= FIRST_JS_OBJECT_TYPE;
}
inline void AddDependentCode(Handle<Code> code);
// Dispatched behavior.
DECLARE_PRINTER(Map)
DECLARE_VERIFIER(Map)
......@@ -5147,7 +5194,8 @@ class Map: public HeapObject {
static const int kDescriptorsOffset =
kTransitionsOrBackPointerOffset + kPointerSize;
static const int kCodeCacheOffset = kDescriptorsOffset + kPointerSize;
static const int kBitField3Offset = kCodeCacheOffset + kPointerSize;
static const int kDependentCodesOffset = kCodeCacheOffset + kPointerSize;
static const int kBitField3Offset = kDependentCodesOffset + kPointerSize;
static const int kSize = kBitField3Offset + kPointerSize;
// Layout of pointer fields. Heap iteration code relies on them
......
......@@ -83,6 +83,10 @@ tools/tickprocessor: PASS, SKIP if ($arch == android_arm || $arch == android_ia3
# This test is the same as math-floor-of-div for non ARM architectures.
math-floor-of-div-nosudiv: PASS, SKIP if ($arch != arm && $arch != android_arm)
##############################################################################
# Long running test that reproduces memory leak and should be run manually.
regress/regress-2073: SKIP
##############################################################################
[ $arch == arm || $arch == android_arm ]
......
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Running this test with --trace_gc will show heap size growth due to
// leaking objects via embedded maps in optimized code.
var counter = 0;
function nextid() {
counter += 1;
return counter;
}
function Scope() {
this.id = nextid();
this.parent = null;
this.left = null;
this.right = null;
this.head = null;
this.tail = null;
this.counter = 0;
}
Scope.prototype = {
new: function() {
var Child,
child;
Child = function() {};
Child.prototype = this;
child = new Child();
child.id = nextid();
child.parent = this;
child.left = this.last;
child.right = null;
child.head = null;
child.tail = null;
child.counter = 0;
if (this.head) {
this.tail.right = child;
this.tail = child;
} else {
this.head = this.tail = child;
}
return child;
},
destroy: function() {
if ($root == this) return;
var parent = this.parent;
if (parent.head == this) parent.head = this.right;
if (parent.tail == this) parent.tail = this.left;
if (this.left) this.left.right = this.right;
if (this.right) this.right.left = this.left;
}
};
function inc(scope) {
scope.counter = scope.counter + 1;
}
var $root = new Scope();
n = 100000;
m = 10;
function doit() {
var a = $root.new();
var b = a.new();
inc(b);
if (i > m) $root.head.destroy();
}
for (var i = 0; i < n; i++) {
doit();
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment