Commit aba09dcf authored by erik.corry@gmail.com's avatar erik.corry@gmail.com

Allow partial scanning of large arrays in order to avoid

mark stack overflow.  This is a reland of r12609 -
https://chromiumcodereview.appspot.com/10959011 - but
this time VisitPointers has been fixed (it used to assume
that the first slot was on the first page of a large object).
Review URL: https://chromiumcodereview.appspot.com/10996018

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@12619 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent e8ffc2be
...@@ -1359,11 +1359,12 @@ void Heap::UpdateNewSpaceReferencesInExternalStringTable( ...@@ -1359,11 +1359,12 @@ void Heap::UpdateNewSpaceReferencesInExternalStringTable(
if (external_string_table_.new_space_strings_.is_empty()) return; if (external_string_table_.new_space_strings_.is_empty()) return;
Object** start = &external_string_table_.new_space_strings_[0]; Object** start_slot = &external_string_table_.new_space_strings_[0];
Object** end = start + external_string_table_.new_space_strings_.length(); Object** end_slot =
Object** last = start; start_slot + external_string_table_.new_space_strings_.length();
Object** last = start_slot;
for (Object** p = start; p < end; ++p) { for (Object** p = start_slot; p < end_slot; ++p) {
ASSERT(InFromSpace(*p)); ASSERT(InFromSpace(*p));
String* target = updater_func(this, p); String* target = updater_func(this, p);
...@@ -1381,8 +1382,8 @@ void Heap::UpdateNewSpaceReferencesInExternalStringTable( ...@@ -1381,8 +1382,8 @@ void Heap::UpdateNewSpaceReferencesInExternalStringTable(
} }
} }
ASSERT(last <= end); ASSERT(last <= end_slot);
external_string_table_.ShrinkNewStrings(static_cast<int>(last - start)); external_string_table_.ShrinkNewStrings(static_cast<int>(last - start_slot));
} }
...@@ -1391,9 +1392,10 @@ void Heap::UpdateReferencesInExternalStringTable( ...@@ -1391,9 +1392,10 @@ void Heap::UpdateReferencesInExternalStringTable(
// Update old space string references. // Update old space string references.
if (external_string_table_.old_space_strings_.length() > 0) { if (external_string_table_.old_space_strings_.length() > 0) {
Object** start = &external_string_table_.old_space_strings_[0]; Object** start_slot = &external_string_table_.old_space_strings_[0];
Object** end = start + external_string_table_.old_space_strings_.length(); Object** end_slot =
for (Object** p = start; p < end; ++p) *p = updater_func(this, p); start_slot + external_string_table_.old_space_strings_.length();
for (Object** p = start_slot; p < end_slot; ++p) *p = updater_func(this, p);
} }
UpdateNewSpaceReferencesInExternalStringTable(updater_func); UpdateNewSpaceReferencesInExternalStringTable(updater_func);
...@@ -6790,11 +6792,11 @@ void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) { ...@@ -6790,11 +6792,11 @@ void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
// Scan the object body. // Scan the object body.
if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) { if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
// This is specialized to scan Context's properly. // This is specialized to scan Context's properly.
Object** start = reinterpret_cast<Object**>(obj->address() + Object** start_slot = reinterpret_cast<Object**>(obj->address() +
Context::kHeaderSize); Context::kHeaderSize);
Object** end = reinterpret_cast<Object**>(obj->address() + Object** end_slot = reinterpret_cast<Object**>(obj->address() +
Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize); Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
mark_visitor->VisitPointers(start, end); mark_visitor->VisitPointers(start_slot, end_slot);
} else { } else {
obj->IterateBody(map_p->instance_type(), obj->IterateBody(map_p->instance_type(),
obj->SizeFromMap(map_p), obj->SizeFromMap(map_p),
......
This diff is collapsed.
...@@ -83,6 +83,9 @@ void MarkCompactCollector::RecordSlot(Object** anchor_slot, ...@@ -83,6 +83,9 @@ void MarkCompactCollector::RecordSlot(Object** anchor_slot,
Object** slot, Object** slot,
Object* object) { Object* object) {
Page* object_page = Page::FromAddress(reinterpret_cast<Address>(object)); Page* object_page = Page::FromAddress(reinterpret_cast<Address>(object));
// Ensure the anchor slot is on the first 'page' of a large object.
ASSERT(Page::FromAddress(reinterpret_cast<Address>(anchor_slot))->owner() !=
NULL);
if (object_page->IsEvacuationCandidate() && if (object_page->IsEvacuationCandidate() &&
!ShouldSkipEvacuationSlotRecording(anchor_slot)) { !ShouldSkipEvacuationSlotRecording(anchor_slot)) {
if (!SlotsBuffer::AddTo(&slots_buffer_allocator_, if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
......
This diff is collapsed.
...@@ -240,6 +240,35 @@ class MarkingDeque { ...@@ -240,6 +240,35 @@ class MarkingDeque {
int mask() { return mask_; } int mask() { return mask_; }
void set_top(int top) { top_ = top; } void set_top(int top) { top_ = top; }
int space_left() {
// If we already overflowed we may as well just say there is lots of
// space left.
if (overflowed_) return mask_ + 1;
if (IsEmpty()) return mask_ + 1;
if (IsFull()) return 0;
return (bottom_ - top_) & mask_;
}
#ifdef DEBUG
const char* Status() {
if (overflowed_) return "Overflowed";
if (IsEmpty()) return "Empty";
if (IsFull()) return "Full";
int oct = (((top_ - bottom_) & mask_) * 8) / (mask_ + 1);
switch (oct) {
case 0: return "Almost empty";
case 1: return "1/8 full";
case 2: return "2/8 full";
case 3: return "3/8 full";
case 4: return "4/8 full";
case 5: return "5/8 full";
case 6: return "6/8 full";
case 7: return "7/8 full";
}
return "??";
}
#endif
private: private:
HeapObject** array_; HeapObject** array_;
// array_[(top - 1) & mask_] is the top element in the deque. The Deque is // array_[(top - 1) & mask_] is the top element in the deque. The Deque is
...@@ -566,6 +595,10 @@ class MarkCompactCollector { ...@@ -566,6 +595,10 @@ class MarkCompactCollector {
bool is_compacting() const { return compacting_; } bool is_compacting() const { return compacting_; }
// Find the large objects that are not completely scanned, but have been
// postponed to later.
static void ProcessLargePostponedArrays(Heap* heap, MarkingDeque* deque);
private: private:
MarkCompactCollector(); MarkCompactCollector();
~MarkCompactCollector(); ~MarkCompactCollector();
......
...@@ -262,9 +262,11 @@ void StaticMarkingVisitor<StaticVisitor>::VisitMap( ...@@ -262,9 +262,11 @@ void StaticMarkingVisitor<StaticVisitor>::VisitMap(
map_object->instance_type() >= FIRST_JS_RECEIVER_TYPE) { map_object->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
MarkMapContents(heap, map_object); MarkMapContents(heap, map_object);
} else { } else {
StaticVisitor::VisitPointers(heap, Object** start_slot =
HeapObject::RawField(object, Map::kPointerFieldsBeginOffset), HeapObject::RawField(object, Map::kPointerFieldsBeginOffset);
HeapObject::RawField(object, Map::kPointerFieldsEndOffset)); Object** end_slot =
HeapObject::RawField(object, Map::kPointerFieldsEndOffset);
StaticVisitor::VisitPointers(heap, start_slot, start_slot, end_slot);
} }
} }
...@@ -286,9 +288,12 @@ void StaticMarkingVisitor<StaticVisitor>::VisitJSRegExp( ...@@ -286,9 +288,12 @@ void StaticMarkingVisitor<StaticVisitor>::VisitJSRegExp(
Map* map, HeapObject* object) { Map* map, HeapObject* object) {
int last_property_offset = int last_property_offset =
JSRegExp::kSize + kPointerSize * map->inobject_properties(); JSRegExp::kSize + kPointerSize * map->inobject_properties();
StaticVisitor::VisitPointers(map->GetHeap(), Object** start_slot =
HeapObject::RawField(object, JSRegExp::kPropertiesOffset), HeapObject::RawField(object, JSRegExp::kPropertiesOffset);
HeapObject::RawField(object, last_property_offset)); Object** end_slot =
HeapObject::RawField(object, last_property_offset);
StaticVisitor::VisitPointers(
map->GetHeap(), start_slot, start_slot, end_slot);
} }
...@@ -315,9 +320,11 @@ void StaticMarkingVisitor<StaticVisitor>::MarkMapContents( ...@@ -315,9 +320,11 @@ void StaticMarkingVisitor<StaticVisitor>::MarkMapContents(
// Mark the pointer fields of the Map. Since the transitions array has // Mark the pointer fields of the Map. Since the transitions array has
// been marked already, it is fine that one of these fields contains a // been marked already, it is fine that one of these fields contains a
// pointer to it. // pointer to it.
StaticVisitor::VisitPointers(heap, Object** start_slot =
HeapObject::RawField(map, Map::kPointerFieldsBeginOffset), HeapObject::RawField(map, Map::kPointerFieldsBeginOffset);
HeapObject::RawField(map, Map::kPointerFieldsEndOffset)); Object** end_slot =
HeapObject::RawField(map, Map::kPointerFieldsEndOffset);
StaticVisitor::VisitPointers(heap, start_slot, start_slot, end_slot);
} }
......
...@@ -213,7 +213,7 @@ class BodyVisitorBase : public AllStatic { ...@@ -213,7 +213,7 @@ class BodyVisitorBase : public AllStatic {
start_offset); start_offset);
Object** end_slot = reinterpret_cast<Object**>(object->address() + Object** end_slot = reinterpret_cast<Object**>(object->address() +
end_offset); end_offset);
StaticVisitor::VisitPointers(heap, start_slot, end_slot); StaticVisitor::VisitPointers(heap, start_slot, start_slot, end_slot);
} }
}; };
...@@ -283,21 +283,26 @@ class StaticNewSpaceVisitor : public StaticVisitorBase { ...@@ -283,21 +283,26 @@ class StaticNewSpaceVisitor : public StaticVisitorBase {
return table_.GetVisitor(map)(map, obj); return table_.GetVisitor(map)(map, obj);
} }
static inline void VisitPointers(Heap* heap, Object** start, Object** end) { static inline void VisitPointers(
Heap* heap, Object** anchor, Object** start, Object** end) {
for (Object** p = start; p < end; p++) StaticVisitor::VisitPointer(heap, p); for (Object** p = start; p < end; p++) StaticVisitor::VisitPointer(heap, p);
} }
private: private:
static inline int VisitJSFunction(Map* map, HeapObject* object) { static inline int VisitJSFunction(Map* map, HeapObject* object) {
Heap* heap = map->GetHeap(); Heap* heap = map->GetHeap();
Object** start_slot =
HeapObject::RawField(object, JSFunction::kPropertiesOffset);
VisitPointers(heap, VisitPointers(heap,
HeapObject::RawField(object, JSFunction::kPropertiesOffset), start_slot,
start_slot,
HeapObject::RawField(object, JSFunction::kCodeEntryOffset)); HeapObject::RawField(object, JSFunction::kCodeEntryOffset));
// Don't visit code entry. We are using this visitor only during scavenges. // Don't visit code entry. We are using this visitor only during scavenges.
VisitPointers( VisitPointers(
heap, heap,
start_slot,
HeapObject::RawField(object, HeapObject::RawField(object,
JSFunction::kCodeEntryOffset + kPointerSize), JSFunction::kCodeEntryOffset + kPointerSize),
HeapObject::RawField(object, HeapObject::RawField(object,
......
...@@ -2679,12 +2679,10 @@ MaybeObject* LargeObjectSpace::AllocateRaw(int object_size, ...@@ -2679,12 +2679,10 @@ MaybeObject* LargeObjectSpace::AllocateRaw(int object_size,
HeapObject* object = page->GetObject(); HeapObject* object = page->GetObject();
#ifdef DEBUG // Make the object consistent so the large object space can be traversed.
// Make the object consistent so the heap can be vefified in OldSpaceStep.
reinterpret_cast<Object**>(object->address())[0] = reinterpret_cast<Object**>(object->address())[0] =
heap()->fixed_array_map(); heap()->fixed_array_map();
reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0); reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
#endif
heap()->incremental_marking()->OldSpaceStep(object_size); heap()->incremental_marking()->OldSpaceStep(object_size);
return object; return object;
......
...@@ -400,6 +400,15 @@ class MemoryChunk { ...@@ -400,6 +400,15 @@ class MemoryChunk {
WAS_SWEPT_PRECISELY, WAS_SWEPT_PRECISELY,
WAS_SWEPT_CONSERVATIVELY, WAS_SWEPT_CONSERVATIVELY,
// Used for large objects only. Indicates that the object has been
// partially scanned by the incremental mark-sweep GC. Objects that have
// been partially scanned are marked black so that the write barrier
// triggers for them, and they are counted as live bytes. If the mutator
// writes to them they may be turned grey and subtracted from the live byte
// list. They move back to the marking deque either by an iteration over
// the large object space or in the write barrier.
IS_PARTIALLY_SCANNED,
// Last flag, keep at bottom. // Last flag, keep at bottom.
NUM_MEMORY_CHUNK_FLAGS NUM_MEMORY_CHUNK_FLAGS
}; };
...@@ -420,6 +429,25 @@ class MemoryChunk { ...@@ -420,6 +429,25 @@ class MemoryChunk {
(1 << IN_FROM_SPACE) | (1 << IN_FROM_SPACE) |
(1 << IN_TO_SPACE); (1 << IN_TO_SPACE);
static const int kIsPartiallyScannedMask = 1 << IS_PARTIALLY_SCANNED;
void SetPartiallyScannedProgress(int progress) {
SetFlag(IS_PARTIALLY_SCANNED);
partially_scanned_progress_ = progress;
}
bool IsPartiallyScanned() {
return IsFlagSet(IS_PARTIALLY_SCANNED);
}
void SetCompletelyScanned() {
ClearFlag(IS_PARTIALLY_SCANNED);
}
int PartiallyScannedProgress() {
ASSERT(IsPartiallyScanned());
return partially_scanned_progress_;
}
void SetFlag(int flag) { void SetFlag(int flag) {
flags_ |= static_cast<uintptr_t>(1) << flag; flags_ |= static_cast<uintptr_t>(1) << flag;
...@@ -506,8 +534,14 @@ class MemoryChunk { ...@@ -506,8 +534,14 @@ class MemoryChunk {
static const size_t kWriteBarrierCounterOffset = static const size_t kWriteBarrierCounterOffset =
kSlotsBufferOffset + kPointerSize + kPointerSize; kSlotsBufferOffset + kPointerSize + kPointerSize;
static const size_t kPartiallyScannedProgress =
kWriteBarrierCounterOffset + kPointerSize;
static const size_t kHeaderSize = kWriteBarrierCounterOffset + kPointerSize; // Actually the partially_scanned_progress_ member is only an int, but on
// 64 bit the size of MemoryChunk gets rounded up to a 64 bit size so we
// have to have the header start kPointerSize after the
// partially_scanned_progress_ member.
static const size_t kHeaderSize = kPartiallyScannedProgress + kPointerSize;
static const int kBodyOffset = static const int kBodyOffset =
CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kHeaderSize + Bitmap::kSize)); CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kHeaderSize + Bitmap::kSize));
...@@ -644,6 +678,7 @@ class MemoryChunk { ...@@ -644,6 +678,7 @@ class MemoryChunk {
SlotsBuffer* slots_buffer_; SlotsBuffer* slots_buffer_;
SkipList* skip_list_; SkipList* skip_list_;
intptr_t write_barrier_counter_; intptr_t write_barrier_counter_;
int partially_scanned_progress_;
static MemoryChunk* Initialize(Heap* heap, static MemoryChunk* Initialize(Heap* heap,
Address base, Address base,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment