Commit 1cca5468 authored by erik.corry@gmail.com's avatar erik.corry@gmail.com

Remove some asserts to speed up debug mode.

Review URL: http://codereview.chromium.org/8256012

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@9606 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent fae807b3
......@@ -359,7 +359,6 @@ AllocationSpace Heap::TargetSpaceId(InstanceType type) {
void Heap::CopyBlock(Address dst, Address src, int byte_size) {
ASSERT(IsAligned(byte_size, kPointerSize));
CopyWords(reinterpret_cast<Object**>(dst),
reinterpret_cast<Object**>(src),
byte_size / kPointerSize);
......
......@@ -1443,9 +1443,9 @@ class ScavengingVisitor : public StaticVisitorBase {
HeapObject** slot,
HeapObject* object,
int object_size) {
ASSERT((size_restriction != SMALL) ||
(object_size <= Page::kMaxHeapObjectSize));
ASSERT(object->Size() == object_size);
SLOW_ASSERT((size_restriction != SMALL) ||
(object_size <= Page::kMaxHeapObjectSize));
SLOW_ASSERT(object->Size() == object_size);
Heap* heap = map->GetHeap();
if (heap->ShouldBePromoted(object->address(), object_size)) {
......@@ -1678,9 +1678,9 @@ void Heap::SelectScavengingVisitorsTable() {
void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
ASSERT(HEAP->InFromSpace(object));
SLOW_ASSERT(HEAP->InFromSpace(object));
MapWord first_word = object->map_word();
ASSERT(!first_word.IsForwardingAddress());
SLOW_ASSERT(!first_word.IsForwardingAddress());
Map* map = first_word.ToMap();
map->GetHeap()->DoScavengeObject(map, p, object);
}
......@@ -3688,7 +3688,7 @@ MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
MaybeObject* Heap::CopyJSObject(JSObject* source) {
// Never used to copy functions. If functions need to be copied we
// have to be careful to clear the literals array.
ASSERT(!source->IsJSFunction());
SLOW_ASSERT(!source->IsJSFunction());
// Make the clone.
Map* map = source->map();
......@@ -3714,7 +3714,7 @@ MaybeObject* Heap::CopyJSObject(JSObject* source) {
{ MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
if (!maybe_clone->ToObject(&clone)) return maybe_clone;
}
ASSERT(InNewSpace(clone));
SLOW_ASSERT(InNewSpace(clone));
// Since we know the clone is allocated in new space, we can copy
// the contents without worrying about updating the write barrier.
CopyBlock(HeapObject::cast(clone)->address(),
......@@ -3722,7 +3722,8 @@ MaybeObject* Heap::CopyJSObject(JSObject* source) {
object_size);
}
ASSERT(JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
SLOW_ASSERT(
JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
FixedArray* properties = FixedArray::cast(source->properties());
// Update elements if necessary.
......@@ -4802,12 +4803,12 @@ void Heap::IterateAndMarkPointersToFromSpace(Address start,
HeapObject::cast(object));
Object* new_object = *slot;
if (InNewSpace(new_object)) {
ASSERT(Heap::InToSpace(new_object));
ASSERT(new_object->IsHeapObject());
SLOW_ASSERT(Heap::InToSpace(new_object));
SLOW_ASSERT(new_object->IsHeapObject());
store_buffer_.EnterDirectlyIntoStoreBuffer(
reinterpret_cast<Address>(slot));
}
ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
SLOW_ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
} else if (record_slots &&
MarkCompactCollector::IsOnEvacuationCandidate(object)) {
mark_compact_collector()->RecordSlot(slot, slot, object);
......
......@@ -143,9 +143,6 @@ void IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit) {
void IncrementalMarking::WhiteToGrey(HeapObject* obj, MarkBit mark_bit) {
ASSERT(Marking::MarkBitFrom(obj) == mark_bit);
ASSERT(obj->Size() >= 2*kPointerSize);
ASSERT(IsMarking());
Marking::WhiteToGrey(mark_bit);
}
......
......@@ -739,8 +739,8 @@ void IncrementalMarking::Step(intptr_t allocated_bytes) {
}
MarkBit obj_mark_bit = Marking::MarkBitFrom(obj);
ASSERT(Marking::IsGrey(obj_mark_bit) ||
(obj->IsFiller() && Marking::IsWhite(obj_mark_bit)));
SLOW_ASSERT(Marking::IsGrey(obj_mark_bit) ||
(obj->IsFiller() && Marking::IsWhite(obj_mark_bit)));
Marking::MarkBlack(obj_mark_bit);
MemoryChunk::IncrementLiveBytes(obj->address(), size);
}
......
......@@ -38,7 +38,7 @@ namespace internal {
MarkBit Marking::MarkBitFrom(Address addr) {
MemoryChunk *p = MemoryChunk::FromAddress(addr);
MemoryChunk* p = MemoryChunk::FromAddress(addr);
return p->markbits()->MarkBitFromIndex(p->AddressToMarkbitIndex(addr),
p->ContainsOnlyData());
}
......
......@@ -61,68 +61,52 @@ class Marking {
// Impossible markbits: 01
static const char* kImpossibleBitPattern;
static inline bool IsImpossible(MarkBit mark_bit) {
ASSERT(strcmp(kImpossibleBitPattern, "01") == 0);
return !mark_bit.Get() && mark_bit.Next().Get();
}
// Black markbits: 10 - this is required by the sweeper.
static const char* kBlackBitPattern;
static inline bool IsBlack(MarkBit mark_bit) {
ASSERT(strcmp(kBlackBitPattern, "10") == 0);
ASSERT(!IsImpossible(mark_bit));
return mark_bit.Get() && !mark_bit.Next().Get();
}
// White markbits: 00 - this is required by the mark bit clearer.
static const char* kWhiteBitPattern;
static inline bool IsWhite(MarkBit mark_bit) {
ASSERT(strcmp(kWhiteBitPattern, "00") == 0);
ASSERT(!IsImpossible(mark_bit));
return !mark_bit.Get();
}
// Grey markbits: 11
static const char* kGreyBitPattern;
static inline bool IsGrey(MarkBit mark_bit) {
ASSERT(strcmp(kGreyBitPattern, "11") == 0);
ASSERT(!IsImpossible(mark_bit));
return mark_bit.Get() && mark_bit.Next().Get();
}
static inline void MarkBlack(MarkBit mark_bit) {
mark_bit.Set();
mark_bit.Next().Clear();
ASSERT(Marking::IsBlack(mark_bit));
}
static inline void BlackToGrey(MarkBit markbit) {
ASSERT(IsBlack(markbit));
markbit.Next().Set();
ASSERT(IsGrey(markbit));
}
static inline void WhiteToGrey(MarkBit markbit) {
ASSERT(IsWhite(markbit));
markbit.Set();
markbit.Next().Set();
ASSERT(IsGrey(markbit));
}
static inline void GreyToBlack(MarkBit markbit) {
ASSERT(IsGrey(markbit));
markbit.Next().Clear();
ASSERT(IsBlack(markbit));
}
static inline void BlackToGrey(HeapObject* obj) {
ASSERT(obj->Size() >= 2 * kPointerSize);
BlackToGrey(MarkBitFrom(obj));
}
static inline void AnyToGrey(MarkBit markbit) {
markbit.Set();
markbit.Next().Set();
ASSERT(IsGrey(markbit));
}
// Returns true if the the object whose mark is transferred is marked black.
......@@ -173,8 +157,6 @@ class Marking {
to_mark_bit.Next().Set();
is_black = false; // Was actually gray.
}
ASSERT(Color(from) == Color(to));
ASSERT(is_black == (Color(to) == BLACK_OBJECT));
return is_black;
}
......@@ -227,7 +209,6 @@ class MarkingDeque {
inline void PushGrey(HeapObject* object) {
ASSERT(object->IsHeapObject());
if (IsFull()) {
ASSERT(Marking::IsGrey(Marking::MarkBitFrom(object)));
SetOverflowed();
} else {
array_[top_] = object;
......@@ -246,7 +227,6 @@ class MarkingDeque {
inline void UnshiftGrey(HeapObject* object) {
ASSERT(object->IsHeapObject());
if (IsFull()) {
ASSERT(Marking::IsGrey(Marking::MarkBitFrom(object)));
SetOverflowed();
} else {
bottom_ = ((bottom_ - 1) & mask_);
......
......@@ -1300,7 +1300,6 @@ ACCESSORS(JSObject, properties, FixedArray, kPropertiesOffset)
FixedArrayBase* JSObject::elements() {
Object* array = READ_FIELD(this, kElementsOffset);
ASSERT(array->HasValidElements());
return static_cast<FixedArrayBase*>(array);
}
......
......@@ -257,16 +257,12 @@ HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
if (new_top > allocation_info_.limit) return NULL;
allocation_info_.top = new_top;
ASSERT(allocation_info_.VerifyPagedAllocation());
ASSERT(current_top != NULL);
return HeapObject::FromAddress(current_top);
}
// Raw allocation.
MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
ASSERT(HasBeenSetup());
ASSERT_OBJECT_SIZE(size_in_bytes);
HeapObject* object = AllocateLinearly(size_in_bytes);
if (object != NULL) {
if (identity() == CODE_SPACE) {
......
......@@ -642,7 +642,6 @@ class Page : public MemoryChunk {
// [page_addr + kObjectStartOffset .. page_addr + kPageSize].
INLINE(static Page* FromAllocationTop(Address top)) {
Page* p = FromAddress(top - kPointerSize);
ASSERT_PAGE_OFFSET(p->Offset(top));
return p;
}
......@@ -666,7 +665,6 @@ class Page : public MemoryChunk {
// Returns the offset of a given address to this page.
INLINE(int Offset(Address a)) {
int offset = static_cast<int>(a - address());
ASSERT_PAGE_OFFSET(offset);
return offset;
}
......@@ -1741,7 +1739,6 @@ class NewSpacePage : public MemoryChunk {
reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address_in_page) &
~Page::kPageAlignmentMask);
NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start);
ASSERT(page->InNewSpace());
return page;
}
......@@ -1818,7 +1815,6 @@ class SemiSpace : public Space {
// Returns the start address of the current page of the space.
Address page_low() {
ASSERT(anchor_.next_page() != &anchor_);
return current_page_->body();
}
......@@ -2084,7 +2080,7 @@ class NewSpace : public Space {
// Return the current capacity of a semispace.
intptr_t EffectiveCapacity() {
ASSERT(to_space_.Capacity() == from_space_.Capacity());
SLOW_ASSERT(to_space_.Capacity() == from_space_.Capacity());
return (to_space_.Capacity() / Page::kPageSize) * Page::kObjectAreaSize;
}
......@@ -2317,9 +2313,9 @@ class OldSpace : public PagedSpace {
// For contiguous spaces, top should be in the space (or at the end) and limit
// should be the end of the space.
#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
ASSERT((space).page_low() <= (info).top \
&& (info).top <= (space).page_high() \
&& (info).limit <= (space).page_high())
SLOW_ASSERT((space).page_low() <= (info).top \
&& (info).top <= (space).page_high() \
&& (info).limit <= (space).page_high())
// -----------------------------------------------------------------------------
......
......@@ -55,10 +55,10 @@ void StoreBuffer::Mark(Address addr) {
void StoreBuffer::EnterDirectlyIntoStoreBuffer(Address addr) {
if (store_buffer_rebuilding_enabled_) {
ASSERT(!heap_->cell_space()->Contains(addr));
ASSERT(!heap_->code_space()->Contains(addr));
ASSERT(!heap_->old_data_space()->Contains(addr));
ASSERT(!heap_->new_space()->Contains(addr));
SLOW_ASSERT(!heap_->cell_space()->Contains(addr) &&
!heap_->code_space()->Contains(addr) &&
!heap_->old_data_space()->Contains(addr) &&
!heap_->new_space()->Contains(addr));
Address* top = old_top_;
*top++ = addr;
old_top_ = top;
......
......@@ -168,7 +168,6 @@ static inline uint32_t RoundDownToPowerOf2(uint32_t x) {
template <typename T, typename U>
static inline bool IsAligned(T value, U alignment) {
ASSERT(IsPowerOf2(alignment));
return (value & (alignment - 1)) == 0;
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment