Commit a54e007a authored by erik.corry@gmail.com's avatar erik.corry@gmail.com

Allow partial scanning of large arrays in order to avoid

mark stack overflow.
Review URL: https://chromiumcodereview.appspot.com/10959011

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@12609 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 0f37cb0b
......@@ -192,6 +192,41 @@ class IncrementalMarkingMarkingVisitor
void>::Visit(map, object);
}
static const int kScanningChunk = 32 * 1024;
static int VisitHugeArray(FixedArray* array) {
Heap* heap = array->GetHeap();
MemoryChunk* chunk = MemoryChunk::FromAddress(array->address());
Object** start = array->data_start();
int length = array->length();
if (chunk->owner()->identity() != LO_SPACE) {
VisitPointers(heap, start, start + length);
return length;
}
int from =
chunk->IsPartiallyScanned() ? chunk->PartiallyScannedProgress() : 0;
int to = Min(from + kScanningChunk, length);
VisitPointers(heap, start + from, start + to);
if (to == length) {
// If it went from black to grey while it was waiting for the next bit to
// be scanned then we have to start the scan again.
MarkBit mark_bit = Marking::MarkBitFrom(array);
if (!Marking::IsBlack(mark_bit)) {
ASSERT(Marking::IsGrey(mark_bit));
chunk->SetPartiallyScannedProgress(0);
} else {
chunk->SetCompletelyScanned();
}
} else {
chunk->SetPartiallyScannedProgress(to);
}
return to - from;
}
static inline void VisitJSFunction(Map* map, HeapObject* object) {
Heap* heap = map->GetHeap();
// Iterate over all fields in the body but take care in dealing with
......@@ -621,7 +656,8 @@ void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
#ifdef DEBUG
MarkBit mark_bit = Marking::MarkBitFrom(obj);
ASSERT(Marking::IsGrey(mark_bit) ||
(obj->IsFiller() && Marking::IsWhite(mark_bit)));
(obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
MemoryChunk::FromAddress(obj->address())->IsPartiallyScanned());
#endif
}
}
......@@ -644,36 +680,57 @@ void IncrementalMarking::Hurry() {
// was stopped.
Map* filler_map = heap_->one_pointer_filler_map();
Map* native_context_map = heap_->native_context_map();
while (!marking_deque_.IsEmpty()) {
HeapObject* obj = marking_deque_.Pop();
// Explicitly skip one word fillers. Incremental markbit patterns are
// correct only for objects that occupy at least two words.
Map* map = obj->map();
if (map == filler_map) {
continue;
} else if (map == native_context_map) {
// Native contexts have weak fields.
IncrementalMarkingMarkingVisitor::VisitNativeContext(map, obj);
} else {
MarkBit map_mark_bit = Marking::MarkBitFrom(map);
if (Marking::IsWhite(map_mark_bit)) {
WhiteToGreyAndPush(map, map_mark_bit);
do {
while (!marking_deque_.IsEmpty()) {
HeapObject* obj = marking_deque_.Pop();
// Explicitly skip one word fillers. Incremental markbit patterns are
// correct only for objects that occupy at least two words.
Map* map = obj->map();
if (map == filler_map) {
continue;
} else if (map == native_context_map) {
// Native contexts have weak fields.
IncrementalMarkingMarkingVisitor::VisitNativeContext(map, obj);
ASSERT(!Marking::IsBlack(Marking::MarkBitFrom(obj)));
MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
} else if (map->instance_type() == FIXED_ARRAY_TYPE &&
FixedArray::cast(obj)->length() >
IncrementalMarkingMarkingVisitor::kScanningChunk) {
MarkBit map_mark_bit = Marking::MarkBitFrom(map);
if (Marking::IsWhite(map_mark_bit)) {
WhiteToGreyAndPush(map, map_mark_bit);
}
MarkBit mark_bit = Marking::MarkBitFrom(obj);
if (!Marking::IsBlack(mark_bit)) {
MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
} else {
ASSERT(
MemoryChunk::FromAddress(obj->address())->IsPartiallyScanned());
}
IncrementalMarkingMarkingVisitor::VisitHugeArray(
FixedArray::cast(obj));
} else {
MarkBit map_mark_bit = Marking::MarkBitFrom(map);
if (Marking::IsWhite(map_mark_bit)) {
WhiteToGreyAndPush(map, map_mark_bit);
}
IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
ASSERT(!Marking::IsBlack(Marking::MarkBitFrom(obj)));
MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
}
IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
}
MarkBit mark_bit = Marking::MarkBitFrom(obj);
ASSERT(!Marking::IsBlack(mark_bit));
Marking::MarkBlack(mark_bit);
MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
}
state_ = COMPLETE;
if (FLAG_trace_incremental_marking) {
double end = OS::TimeCurrentMillis();
PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n",
static_cast<int>(end - start));
}
MarkBit mark_bit = Marking::MarkBitFrom(obj);
Marking::MarkBlack(mark_bit);
}
state_ = COMPLETE;
if (FLAG_trace_incremental_marking) {
double end = OS::TimeCurrentMillis();
PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n",
static_cast<int>(end - start));
}
MarkCompactCollector::ProcessLargePostponedArrays(heap_, &marking_deque_);
} while (!marking_deque_.IsEmpty());
}
if (FLAG_cleanup_code_caches_at_gc) {
......@@ -794,42 +851,71 @@ void IncrementalMarking::Step(intptr_t allocated_bytes,
} else if (state_ == MARKING) {
Map* filler_map = heap_->one_pointer_filler_map();
Map* native_context_map = heap_->native_context_map();
while (!marking_deque_.IsEmpty() && bytes_to_process > 0) {
HeapObject* obj = marking_deque_.Pop();
// Explicitly skip one word fillers. Incremental markbit patterns are
// correct only for objects that occupy at least two words.
Map* map = obj->map();
if (map == filler_map) continue;
int size = obj->SizeFromMap(map);
bytes_to_process -= size;
MarkBit map_mark_bit = Marking::MarkBitFrom(map);
if (Marking::IsWhite(map_mark_bit)) {
WhiteToGreyAndPush(map, map_mark_bit);
}
while (true) {
while (!marking_deque_.IsEmpty() && bytes_to_process > 0) {
HeapObject* obj = marking_deque_.Pop();
// TODO(gc) switch to static visitor instead of normal visitor.
if (map == native_context_map) {
// Native contexts have weak fields.
Context* ctx = Context::cast(obj);
// Explicitly skip one word fillers. Incremental markbit patterns are
// correct only for objects that occupy at least two words.
Map* map = obj->map();
if (map == filler_map) continue;
// We will mark cache black with a separate pass
// when we finish marking.
MarkObjectGreyDoNotEnqueue(ctx->normalized_map_cache());
int size = obj->SizeFromMap(map);
MarkBit map_mark_bit = Marking::MarkBitFrom(map);
if (Marking::IsWhite(map_mark_bit)) {
WhiteToGreyAndPush(map, map_mark_bit);
}
IncrementalMarkingMarkingVisitor::VisitNativeContext(map, ctx);
} else {
IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
}
// TODO(gc) switch to static visitor instead of normal visitor.
if (map == native_context_map) {
// Native contexts have weak fields.
Context* ctx = Context::cast(obj);
// We will mark cache black with a separate pass
// when we finish marking.
MarkObjectGreyDoNotEnqueue(ctx->normalized_map_cache());
IncrementalMarkingMarkingVisitor::VisitNativeContext(map, ctx);
bytes_to_process -= size;
SLOW_ASSERT(Marking::IsGrey(Marking::MarkBitFrom(obj)));
MemoryChunk::IncrementLiveBytesFromGC(obj->address(), size);
} else if (map->instance_type() == FIXED_ARRAY_TYPE &&
FixedArray::cast(obj)->length() >
IncrementalMarkingMarkingVisitor::kScanningChunk) {
SLOW_ASSERT(
Marking::IsGrey(Marking::MarkBitFrom(obj)) ||
MemoryChunk::FromAddress(obj->address())->IsPartiallyScanned());
bytes_to_process -=
IncrementalMarkingMarkingVisitor::VisitHugeArray(
FixedArray::cast(obj));
MarkBit obj_mark_bit = Marking::MarkBitFrom(obj);
if (!Marking::IsBlack(obj_mark_bit)) {
MemoryChunk::IncrementLiveBytesFromGC(obj->address(), size);
}
} else {
IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
bytes_to_process -= size;
SLOW_ASSERT(
Marking::IsGrey(Marking::MarkBitFrom(obj)) ||
(obj->IsFiller() && Marking::IsWhite(Marking::MarkBitFrom(obj))));
MemoryChunk::IncrementLiveBytesFromGC(obj->address(), size);
}
MarkBit obj_mark_bit = Marking::MarkBitFrom(obj);
SLOW_ASSERT(Marking::IsGrey(obj_mark_bit) ||
(obj->IsFiller() && Marking::IsWhite(obj_mark_bit)));
Marking::MarkBlack(obj_mark_bit);
MemoryChunk::IncrementLiveBytesFromGC(obj->address(), size);
MarkBit obj_mark_bit = Marking::MarkBitFrom(obj);
Marking::MarkBlack(obj_mark_bit);
}
if (marking_deque_.IsEmpty()) {
MarkCompactCollector::ProcessLargePostponedArrays(heap_,
&marking_deque_);
if (marking_deque_.IsEmpty()) {
MarkingComplete(action);
break;
}
} else {
ASSERT(bytes_to_process <= 0);
break;
}
}
if (marking_deque_.IsEmpty()) MarkingComplete(action);
}
allocated_ = 0;
......
......@@ -1066,6 +1066,30 @@ class MarkCompactMarkingVisitor
}
}
static void VisitHugeFixedArray(Heap* heap, FixedArray* array, int length);
// The deque is contiguous and we use new space, it is therefore contained in
// one page minus the header. It also has a size that is a power of two so
// it is half the size of a page. We want to scan a number of array entries
// that is less than the number of entries in the deque, so we divide by 2
// once more.
static const int kScanningChunk = Page::kPageSize / 4 / kPointerSize;
INLINE(static void VisitFixedArray(Map* map, HeapObject* object)) {
FixedArray* array = FixedArray::cast(object);
int length = array->length();
Heap* heap = map->GetHeap();
if (length < kScanningChunk ||
MemoryChunk::FromAddress(array->address())->owner()->identity() !=
LO_SPACE) {
Object** start = array->data_start();
VisitPointers(heap, start, start + length);
} else {
VisitHugeFixedArray(heap, array, length);
}
}
// Marks the object black and pushes it on the marking stack.
INLINE(static void MarkObject(Heap* heap, HeapObject* object)) {
MarkBit mark = Marking::MarkBitFrom(object);
......@@ -1504,6 +1528,27 @@ class MarkCompactMarkingVisitor
};
void MarkCompactMarkingVisitor::VisitHugeFixedArray(Heap* heap,
FixedArray* array,
int length) {
MemoryChunk* chunk = MemoryChunk::FromAddress(array->address());
ASSERT(chunk->owner()->identity() == LO_SPACE);
Object** start = array->data_start();
int from =
chunk->IsPartiallyScanned() ? chunk->PartiallyScannedProgress() : 0;
int to = Min(from + kScanningChunk, length);
VisitPointers(heap, start + from, start + to);
if (to == length) {
chunk->SetCompletelyScanned();
} else {
chunk->SetPartiallyScannedProgress(to);
}
}
void MarkCompactMarkingVisitor::ObjectStatsCountFixedArray(
FixedArrayBase* fixed_array,
FixedArraySubInstanceType fast_type,
......@@ -1645,6 +1690,9 @@ void MarkCompactMarkingVisitor::Initialize() {
table_.Register(kVisitJSRegExp,
&VisitRegExpAndFlushCode);
table_.Register(kVisitFixedArray,
&VisitFixedArray);
if (FLAG_track_gc_object_stats) {
// Copy the visitor table to make call-through possible.
non_count_table_.CopyFrom(&table_);
......@@ -2128,6 +2176,7 @@ void MarkCompactCollector::EmptyMarkingDeque() {
MarkCompactMarkingVisitor::IterateBody(map, object);
}
ProcessLargePostponedArrays(heap(), &marking_deque_);
// Process encountered weak maps, mark objects only reachable by those
// weak maps and repeat until fix-point is reached.
......@@ -2136,12 +2185,29 @@ void MarkCompactCollector::EmptyMarkingDeque() {
}
void MarkCompactCollector::ProcessLargePostponedArrays(Heap* heap,
MarkingDeque* deque) {
ASSERT(deque->IsEmpty());
LargeObjectIterator it(heap->lo_space());
for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
if (!obj->IsFixedArray()) continue;
MemoryChunk* p = MemoryChunk::FromAddress(obj->address());
if (p->IsPartiallyScanned()) {
deque->PushBlack(obj);
}
}
}
// Sweep the heap for overflowed objects, clear their overflow bits, and
// push them on the marking stack. Stop early if the marking stack fills
// before sweeping completes. If sweeping completes, there are no remaining
// overflowed objects in the heap so the overflow flag on the markings stack
// is cleared.
void MarkCompactCollector::RefillMarkingDeque() {
if (FLAG_trace_gc) {
PrintPID("Marking queue overflowed\n");
}
ASSERT(marking_deque_.overflowed());
SemiSpaceIterator new_it(heap()->new_space());
......
......@@ -240,6 +240,35 @@ class MarkingDeque {
int mask() { return mask_; }
void set_top(int top) { top_ = top; }
int space_left() {
// If we already overflowed we may as well just say there is lots of
// space left.
if (overflowed_) return mask_ + 1;
if (IsEmpty()) return mask_ + 1;
if (IsFull()) return 0;
return (bottom_ - top_) & mask_;
}
#ifdef DEBUG
const char* Status() {
if (overflowed_) return "Overflowed";
if (IsEmpty()) return "Empty";
if (IsFull()) return "Full";
int oct = (((top_ - bottom_) & mask_) * 8) / (mask_ + 1);
switch (oct) {
case 0: return "Almost empty";
case 1: return "1/8 full";
case 2: return "2/8 full";
case 3: return "3/8 full";
case 4: return "4/8 full";
case 5: return "5/8 full";
case 6: return "6/8 full";
case 7: return "7/8 full";
}
return "??";
}
#endif
private:
HeapObject** array_;
// array_[(top - 1) & mask_] is the top element in the deque. The Deque is
......@@ -566,6 +595,10 @@ class MarkCompactCollector {
bool is_compacting() const { return compacting_; }
// Find the large objects that are not completely scanned, but have been
// postponed to later.
static void ProcessLargePostponedArrays(Heap* heap, MarkingDeque* deque);
private:
MarkCompactCollector();
~MarkCompactCollector();
......
......@@ -2678,12 +2678,10 @@ MaybeObject* LargeObjectSpace::AllocateRaw(int object_size,
HeapObject* object = page->GetObject();
#ifdef DEBUG
// Make the object consistent so the heap can be vefified in OldSpaceStep.
// Make the object consistent so the large object space can be traversed.
reinterpret_cast<Object**>(object->address())[0] =
heap()->fixed_array_map();
reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
#endif
heap()->incremental_marking()->OldSpaceStep(object_size);
return object;
......
......@@ -393,6 +393,15 @@ class MemoryChunk {
WAS_SWEPT_PRECISELY,
WAS_SWEPT_CONSERVATIVELY,
// Used for large objects only. Indicates that the object has been
// partially scanned by the incremental mark-sweep GC. Objects that have
// been partially scanned are marked black so that the write barrier
// triggers for them, and they are counted as live bytes. If the mutator
// writes to them they may be turned grey and subtracted from the live byte
// list. They move back to the marking deque either by an iteration over
// the large object space or in the write barrier.
IS_PARTIALLY_SCANNED,
// Last flag, keep at bottom.
NUM_MEMORY_CHUNK_FLAGS
};
......@@ -413,6 +422,25 @@ class MemoryChunk {
(1 << IN_FROM_SPACE) |
(1 << IN_TO_SPACE);
static const int kIsPartiallyScannedMask = 1 << IS_PARTIALLY_SCANNED;
void SetPartiallyScannedProgress(int progress) {
SetFlag(IS_PARTIALLY_SCANNED);
partially_scanned_progress_ = progress;
}
bool IsPartiallyScanned() {
return IsFlagSet(IS_PARTIALLY_SCANNED);
}
void SetCompletelyScanned() {
ClearFlag(IS_PARTIALLY_SCANNED);
}
int PartiallyScannedProgress() {
ASSERT(IsPartiallyScanned());
return partially_scanned_progress_;
}
void SetFlag(int flag) {
flags_ |= static_cast<uintptr_t>(1) << flag;
......@@ -488,8 +516,10 @@ class MemoryChunk {
static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize;
static const size_t kHeaderSize =
kSlotsBufferOffset + kPointerSize + kPointerSize;
static const size_t kPartiallyScannedProgress =
kSlotsBufferOffset + kPointerSize + kPointerSize;
static const size_t kHeaderSize = kPartiallyScannedProgress + kIntSize;
static const int kBodyOffset =
CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kHeaderSize + Bitmap::kSize));
......@@ -625,6 +655,7 @@ class MemoryChunk {
int live_byte_count_;
SlotsBuffer* slots_buffer_;
SkipList* skip_list_;
int partially_scanned_progress_;
static MemoryChunk* Initialize(Heap* heap,
Address base,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment