Commit 610281f4 authored by lrn@chromium.org's avatar lrn@chromium.org

Fix calculation of live-bytes in pages.

The "live bytes" count is *really* a "marked black" count - i.e., the count of bytes *known* to be live.

Fix aggravating bug on X64 where assembler code used a value that was off
by a factor of 2^31.

Ensure that sweeping clears live-bytes. Added other missing increments.

Added print statements to trace live-byte modifications, under a flag.

Still a few cases of undercounting left.

(New issue to merge from GC branch to bleeding_edge)

Review URL: http://codereview.chromium.org/7970009

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@9338 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 76c86943
......@@ -459,6 +459,9 @@ DEFINE_bool(collect_heap_spill_statistics, false,
DEFINE_bool(trace_isolates, false, "trace isolate state changes")
DEFINE_bool(trace_live_byte_count, false,
"trace updates to page live byte count")
// VM state
DEFINE_bool(log_state_changes, false, "Log state changes.")
......
......@@ -2595,7 +2595,7 @@ void MacroAssembler::EnsureNotWhite(
ASSERT(SeqAsciiString::kMaxSize <=
static_cast<int>(0xffffffffu >> (2 + kSmiTagSize)));
imul(length, FieldOperand(value, String::kLengthOffset));
shr(length, 2 + kSmiTagSize);
shr(length, 2 + kSmiTagSize + kSmiShiftSize);
add(Operand(length),
Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
and_(Operand(length),
......@@ -2609,6 +2609,11 @@ void MacroAssembler::EnsureNotWhite(
and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
add(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset),
length);
if (FLAG_debug_code) {
mov(length, Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
cmp(length, Operand(bitmap_scratch, MemoryChunk::kSizeOffset));
Check(less_equal, "Live Bytes Count overflow chunk size");
}
bind(&done);
}
......
......@@ -114,8 +114,10 @@ void IncrementalMarking::BlackToGreyAndUnshift(HeapObject* obj,
ASSERT(obj->Size() >= 2*kPointerSize);
ASSERT(IsMarking());
Marking::BlackToGrey(mark_bit);
int obj_size = obj->Size();
MemoryChunk::IncrementLiveBytes(obj->address(), -obj_size);
int64_t old_bytes_rescanned = bytes_rescanned_;
bytes_rescanned_ = old_bytes_rescanned + obj->Size();
bytes_rescanned_ = old_bytes_rescanned + obj_size;
if ((bytes_rescanned_ >> 20) != (old_bytes_rescanned >> 20)) {
if (bytes_rescanned_ > 2 * heap_->PromotedSpaceSize()) {
// If we have queued twice the heap size for rescanning then we are
......
......@@ -403,7 +403,12 @@ void IncrementalMarking::Start() {
static void MarkObjectGreyDoNotEnqueue(Object* obj) {
if (obj->IsHeapObject()) {
HeapObject* heap_obj = HeapObject::cast(obj);
MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj));
if (Marking::IsBlack(mark_bit)) {
MemoryChunk::IncrementLiveBytes(heap_obj->address(),
-heap_obj->Size());
}
Marking::AnyToGrey(mark_bit);
}
}
......@@ -569,14 +574,20 @@ void IncrementalMarking::Hurry() {
}
if (FLAG_cleanup_code_caches_at_gc) {
Marking::GreyToBlack(Marking::MarkBitFrom(heap_->polymorphic_code_cache()));
PolymorphicCodeCache* poly_cache = heap_->polymorphic_code_cache();
Marking::GreyToBlack(Marking::MarkBitFrom(poly_cache));
MemoryChunk::IncrementLiveBytes(poly_cache->address(),
PolymorphicCodeCache::kSize);
}
Object* context = heap_->global_contexts_list();
while (!context->IsUndefined()) {
NormalizedMapCache* cache = Context::cast(context)->normalized_map_cache();
MarkBit mark_bit = Marking::MarkBitFrom(cache);
if (Marking::IsGrey(mark_bit)) Marking::GreyToBlack(mark_bit);
if (Marking::IsGrey(mark_bit)) {
Marking::GreyToBlack(mark_bit);
MemoryChunk::IncrementLiveBytes(cache->address(), cache->Size());
}
context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
}
}
......
......@@ -138,13 +138,16 @@ class IncrementalMarking {
inline void WhiteToGrey(HeapObject* obj, MarkBit mark_bit);
// Does white->black or grey->grey
// Does white->black or keeps gray or black color. Returns true if converting
// white to black.
inline bool MarkBlackOrKeepGrey(MarkBit mark_bit) {
ASSERT(!Marking::IsImpossible(mark_bit));
if (mark_bit.Get()) return false;
if (mark_bit.Get()) {
// Grey or black: Keep the color.
return false;
}
mark_bit.Set();
ASSERT(!Marking::IsWhite(mark_bit));
ASSERT(!Marking::IsImpossible(mark_bit));
ASSERT(Marking::IsBlack(mark_bit));
return true;
}
......
......@@ -295,7 +295,8 @@ void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
while (it.has_next()) {
Page* p = it.next();
ASSERT(p->markbits()->IsClean());
CHECK(p->markbits()->IsClean());
CHECK_EQ(0, p->LiveBytes());
}
}
......@@ -304,7 +305,8 @@ void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
while (it.has_next()) {
NewSpacePage* p = it.next();
ASSERT(p->markbits()->IsClean());
CHECK(p->markbits()->IsClean());
CHECK_EQ(0, p->LiveBytes());
}
}
......@@ -402,7 +404,7 @@ bool Marking::TransferMark(Address old_start, Address new_start) {
}
static const char* AllocationSpaceName(AllocationSpace space) {
const char* AllocationSpaceName(AllocationSpace space) {
switch (space) {
case NEW_SPACE: return "NEW_SPACE";
case OLD_POINTER_SPACE: return "OLD_POINTER_SPACE";
......@@ -2528,9 +2530,10 @@ void MarkCompactCollector::EvacuateNewSpace() {
MarkBit mark_bit = Marking::MarkBitFrom(object);
if (mark_bit.Get()) {
mark_bit.Clear();
// Don't bother decrementing live bytes count. We'll discard the
// entire page at the end.
int size = object->Size();
survivors_size += size;
MemoryChunk::IncrementLiveBytes(object->address(), -size);
// Aggressively promote young survivors to the old space.
if (TryPromoteObject(object, size)) {
......@@ -2621,6 +2624,7 @@ void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
// Clear marking bits for current cell.
cells[cell_index] = 0;
}
p->ResetLiveBytes();
}
......@@ -2819,6 +2823,7 @@ static void SweepPrecisely(PagedSpace* space,
if (free_start != p->ObjectAreaEnd()) {
space->Free(free_start, static_cast<int>(p->ObjectAreaEnd() - free_start));
}
p->ResetLiveBytes();
}
......@@ -3308,6 +3313,7 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
if (cell_index == last_cell_index) {
freed_bytes += static_cast<int>(space->Free(p->ObjectAreaStart(),
static_cast<int>(size)));
ASSERT_EQ(0, p->LiveBytes());
return freed_bytes;
}
// Grow the size of the start-of-page free space a little to get up to the
......@@ -3364,6 +3370,7 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
static_cast<int>(block_address - free_start));
}
p->ResetLiveBytes();
return freed_bytes;
}
......
......@@ -765,6 +765,8 @@ class MarkCompactCollector {
};
const char* AllocationSpaceName(AllocationSpace space);
} } // namespace v8::internal
#endif // V8_MARK_COMPACT_H_
......@@ -432,6 +432,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap,
chunk->InitializeReservedMemory();
chunk->slots_buffer_ = NULL;
chunk->skip_list_ = NULL;
chunk->ResetLiveBytes();
Bitmap::Clear(chunk);
chunk->initialize_scan_on_scavenge(false);
chunk->SetFlag(WAS_SWEPT_PRECISELY);
......@@ -822,8 +823,8 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
ASSERT(object->address() + size <= top);
end_of_previous_object = object->address() + size;
}
// TODO(1672): Page live bytes are off for some tests.
// CHECK_LE(black_size, page->LiveBytes());
CHECK_LE(black_size, page->LiveBytes());
}
}
#endif
......@@ -2504,6 +2505,31 @@ void LargeObjectSpace::CollectCodeStatistics() {
}
}
}
void Page::Print() {
// Make a best-effort to print the objects in the page.
PrintF("Page@%p in %s\n",
this->address(),
AllocationSpaceName(this->owner()->identity()));
printf(" --------------------------------------\n");
HeapObjectIterator objects(this, heap()->GcSafeSizeOfOldObjectFunction());
unsigned mark_size = 0;
for (HeapObject* object = objects.Next();
object != NULL;
object = objects.Next()) {
bool is_marked = Marking::MarkBitFrom(object).Get();
PrintF(" %c ", (is_marked ? '!' : ' ')); // Indent a little.
if (is_marked) {
mark_size += heap()->GcSafeSizeOfOldObjectFunction()(object);
}
object->ShortPrint();
PrintF("\n");
}
printf(" --------------------------------------\n");
printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
}
#endif // DEBUG
} } // namespace v8::internal
......@@ -452,12 +452,27 @@ class MemoryChunk {
// Manage live byte count (count of bytes known to be live,
// because they are marked black).
void ResetLiveBytes() {
if (FLAG_trace_live_byte_count) {
PrintF("ResetLiveBytes:%p:%x->0\n",
static_cast<void*>(this), live_byte_count_);
}
live_byte_count_ = 0;
}
void IncrementLiveBytes(int by) {
ASSERT_LE(static_cast<unsigned>(live_byte_count_), size_);
if (FLAG_trace_live_byte_count) {
printf("UpdateLiveBytes:%p:%x%c=%x->%x\n",
static_cast<void*>(this), live_byte_count_,
((by < 0) ? '-' : '+'), ((by < 0) ? -by : by),
live_byte_count_ + by);
}
live_byte_count_ += by;
ASSERT_LE(static_cast<unsigned>(live_byte_count_), size_);
}
int LiveBytes() {
ASSERT(static_cast<unsigned>(live_byte_count_) <= size_);
return live_byte_count_;
}
int LiveBytes() { return live_byte_count_; }
static void IncrementLiveBytes(Address address, int by) {
MemoryChunk::FromAddress(address)->IncrementLiveBytes(by);
}
......@@ -467,10 +482,11 @@ class MemoryChunk {
static const intptr_t kAlignmentMask = kAlignment - 1;
static const intptr_t kSizeOffset = kPointerSize + kPointerSize;
static const intptr_t kLiveBytesOffset =
kPointerSize + kPointerSize + kPointerSize + kPointerSize +
kPointerSize + kPointerSize + kPointerSize + kPointerSize +
kIntSize;
kSizeOffset + kPointerSize + kPointerSize + kPointerSize +
kPointerSize + kPointerSize + kPointerSize + kIntSize;
static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize;
......@@ -700,6 +716,10 @@ class Page : public MemoryChunk {
void ClearSweptPrecisely() { ClearFlag(WAS_SWEPT_PRECISELY); }
void ClearSweptConservatively() { ClearFlag(WAS_SWEPT_CONSERVATIVELY); }
#ifdef DEBUG
void Print();
#endif // DEBUG
friend class MemoryAllocator;
};
......
......@@ -4184,7 +4184,7 @@ void MacroAssembler::EnsureNotWhite(
addq(length, Immediate(0x04));
// Value now either 4 (if ASCII) or 8 (if UC16), i.e. char-size shifted by 2.
imul(length, FieldOperand(value, String::kLengthOffset));
shr(length, Immediate(2 + kSmiTagSize));
shr(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
addq(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
and_(length, Immediate(~kObjectAlignmentMask));
......
......@@ -66,12 +66,7 @@ debug-liveedit-patch-positions-replace: SKIP
# Known failures after merge from the bleeding edge (issue 1672)
debug-scopes: PASS, SKIP if ($mode == debug)
mirror-object: PASS, SKIP if ($mode == debug)
debug-return-value: PASS, SKIP if ($mode == debug)
tickprocessor: PASS, SKIP if ($mode == debug)
debug-blockscopes: PASS, SKIP if ($mode == debug)
mirror-error: PASS, SKIP if ($mode == debug)
mirror-array: PASS, SKIP if ($mode == debug)
debug-return-value: PASS, SKIP if ($mode == debug)
harmony/debug-blockscopes: PASS, SKIP if ($mode == debug)
##############################################################################
[ $arch == arm ]
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment