Commit 19875428 authored by erik.corry@gmail.com's avatar erik.corry@gmail.com

Fix invariant so that we cannot record relocation slots for

white objects when compacting.  Add flag for incremental code
compaction.
Review URL: https://chromiumcodereview.appspot.com/10907174

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@12483 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent a64410d9
...@@ -412,6 +412,8 @@ DEFINE_bool(never_compact, false, ...@@ -412,6 +412,8 @@ DEFINE_bool(never_compact, false,
"Never perform compaction on full GC - testing only") "Never perform compaction on full GC - testing only")
DEFINE_bool(compact_code_space, true, DEFINE_bool(compact_code_space, true,
"Compact code space on full non-incremental collections") "Compact code space on full non-incremental collections")
DEFINE_bool(incremental_code_compaction, false,
"Compact code space on full incremental collections")
DEFINE_bool(cleanup_code_caches_at_gc, true, DEFINE_bool(cleanup_code_caches_at_gc, true,
"Flush inline caches prior to mark compact collection and " "Flush inline caches prior to mark compact collection and "
"flush code caches in maps during mark compact cycle.") "flush code caches in maps during mark compact cycle.")
......
...@@ -48,7 +48,9 @@ bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, ...@@ -48,7 +48,9 @@ bool IncrementalMarking::BaseRecordWrite(HeapObject* obj,
// Object is either grey or white. It will be scanned if survives. // Object is either grey or white. It will be scanned if survives.
return false; return false;
} }
return true; if (!is_compacting_) return false;
MarkBit obj_bit = Marking::MarkBitFrom(obj);
return Marking::IsBlack(obj_bit);
} }
......
...@@ -67,7 +67,7 @@ void IncrementalMarking::TearDown() { ...@@ -67,7 +67,7 @@ void IncrementalMarking::TearDown() {
void IncrementalMarking::RecordWriteSlow(HeapObject* obj, void IncrementalMarking::RecordWriteSlow(HeapObject* obj,
Object** slot, Object** slot,
Object* value) { Object* value) {
if (BaseRecordWrite(obj, slot, value) && is_compacting_ && slot != NULL) { if (BaseRecordWrite(obj, slot, value) && slot != NULL) {
MarkBit obj_bit = Marking::MarkBitFrom(obj); MarkBit obj_bit = Marking::MarkBitFrom(obj);
if (Marking::IsBlack(obj_bit)) { if (Marking::IsBlack(obj_bit)) {
// Object is not going to be rescanned we need to record the slot. // Object is not going to be rescanned we need to record the slot.
...@@ -127,9 +127,9 @@ void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) { ...@@ -127,9 +127,9 @@ void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) {
void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host, void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host,
Object** slot, Object** slot,
Code* value) { Code* value) {
if (BaseRecordWrite(host, slot, value) && is_compacting_) { if (BaseRecordWrite(host, slot, value)) {
ASSERT(slot != NULL); ASSERT(slot != NULL);
heap_->mark_compact_collector()-> heap_->mark_compact_collector()->
RecordCodeEntrySlot(reinterpret_cast<Address>(slot), value); RecordCodeEntrySlot(reinterpret_cast<Address>(slot), value);
......
...@@ -132,6 +132,12 @@ class IncrementalMarking { ...@@ -132,6 +132,12 @@ class IncrementalMarking {
Object** slot, Object** slot,
Isolate* isolate); Isolate* isolate);
// Record a slot for compaction. Returns false for objects that are
// guaranteed to be rescanned or not guaranteed to survive.
//
// No slots in white objects should be recorded, as some slots are typed and
// cannot be interpreted corrrectly if the underlying object does not survive
// the incremental cycle (stays white).
INLINE(bool BaseRecordWrite(HeapObject* obj, Object** slot, Object* value)); INLINE(bool BaseRecordWrite(HeapObject* obj, Object** slot, Object* value));
INLINE(void RecordWrite(HeapObject* obj, Object** slot, Object* value)); INLINE(void RecordWrite(HeapObject* obj, Object** slot, Object* value));
INLINE(void RecordWriteIntoCode(HeapObject* obj, INLINE(void RecordWriteIntoCode(HeapObject* obj,
......
...@@ -343,7 +343,9 @@ bool MarkCompactCollector::StartCompaction(CompactionMode mode) { ...@@ -343,7 +343,9 @@ bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
CollectEvacuationCandidates(heap()->old_pointer_space()); CollectEvacuationCandidates(heap()->old_pointer_space());
CollectEvacuationCandidates(heap()->old_data_space()); CollectEvacuationCandidates(heap()->old_data_space());
if (FLAG_compact_code_space && mode == NON_INCREMENTAL_COMPACTION) { if (FLAG_compact_code_space &&
(mode == NON_INCREMENTAL_COMPACTION ||
FLAG_incremental_code_compaction)) {
CollectEvacuationCandidates(heap()->code_space()); CollectEvacuationCandidates(heap()->code_space());
} else if (FLAG_trace_fragmentation) { } else if (FLAG_trace_fragmentation) {
TraceFragmentation(heap()->code_space()); TraceFragmentation(heap()->code_space());
...@@ -1443,7 +1445,7 @@ class MarkCompactMarkingVisitor ...@@ -1443,7 +1445,7 @@ class MarkCompactMarkingVisitor
} else { } else {
// Don't visit code object. // Don't visit code object.
// Visit shared function info to avoid double checking of it's // Visit shared function info to avoid double checking of its
// flushability. // flushability.
SharedFunctionInfo* shared_info = object->unchecked_shared(); SharedFunctionInfo* shared_info = object->unchecked_shared();
MarkBit shared_info_mark = Marking::MarkBitFrom(shared_info); MarkBit shared_info_mark = Marking::MarkBitFrom(shared_info);
...@@ -1704,7 +1706,7 @@ class SharedFunctionInfoMarkingVisitor : public ObjectVisitor { ...@@ -1704,7 +1706,7 @@ class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
void MarkCompactCollector::MarkInlinedFunctionsCode(Code* code) { void MarkCompactCollector::MarkInlinedFunctionsCode(Code* code) {
// For optimized functions we should retain both non-optimized version // For optimized functions we should retain both non-optimized version
// of it's code and non-optimized version of all inlined functions. // of its code and non-optimized version of all inlined functions.
// This is required to support bailing out from inlined code. // This is required to support bailing out from inlined code.
DeoptimizationInputData* data = DeoptimizationInputData* data =
DeoptimizationInputData::cast(code->deoptimization_data()); DeoptimizationInputData::cast(code->deoptimization_data());
...@@ -2300,7 +2302,7 @@ void MarkCompactCollector::MarkLiveObjects() { ...@@ -2300,7 +2302,7 @@ void MarkCompactCollector::MarkLiveObjects() {
// non-incremental marker can deal with them as if overflow // non-incremental marker can deal with them as if overflow
// occured during normal marking. // occured during normal marking.
// But incremental marker uses a separate marking deque // But incremental marker uses a separate marking deque
// so we have to explicitly copy it's overflow state. // so we have to explicitly copy its overflow state.
incremental_marking->Finalize(); incremental_marking->Finalize();
incremental_marking_overflowed = incremental_marking_overflowed =
incremental_marking->marking_deque()->overflowed(); incremental_marking->marking_deque()->overflowed();
......
...@@ -304,6 +304,26 @@ class SlotsBuffer { ...@@ -304,6 +304,26 @@ class SlotsBuffer {
NUMBER_OF_SLOT_TYPES NUMBER_OF_SLOT_TYPES
}; };
static const char* SlotTypeToString(SlotType type) {
switch (type) {
case EMBEDDED_OBJECT_SLOT:
return "EMBEDDED_OBJECT_SLOT";
case RELOCATED_CODE_OBJECT:
return "RELOCATED_CODE_OBJECT";
case CODE_TARGET_SLOT:
return "CODE_TARGET_SLOT";
case CODE_ENTRY_SLOT:
return "CODE_ENTRY_SLOT";
case DEBUG_TARGET_SLOT:
return "DEBUG_TARGET_SLOT";
case JS_RETURN_SLOT:
return "JS_RETURN_SLOT";
case NUMBER_OF_SLOT_TYPES:
return "NUMBER_OF_SLOT_TYPES";
}
return "UNKNOWN SlotType";
}
void UpdateSlots(Heap* heap); void UpdateSlots(Heap* heap);
void UpdateSlotsWithFilter(Heap* heap); void UpdateSlotsWithFilter(Heap* heap);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment