Commit 0b13c6be authored by kasperl@chromium.org's avatar kasperl@chromium.org

Strengthen a few assertions and add zapping of allocated

memory blocks filling them out with recognizable non-zero
bit pattern in debug mode.
Review URL: http://codereview.chromium.org/558016

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@3729 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent f07a0230
......@@ -152,7 +152,11 @@ Object* Heap::AllocateRawCell() {
bool Heap::InNewSpace(Object* object) {
return new_space_.Contains(object);
bool result = new_space_.Contains(object);
ASSERT(!result || // Either not in new space
gc_state_ != NOT_IN_GC || // ... or in the middle of GC
InToSpace(object)); // ... or in to-space (where we allocate).
return result;
}
......
......@@ -1367,6 +1367,7 @@ void FixedArray::set(int index,
void FixedArray::fast_set(FixedArray* array, int index, Object* value) {
ASSERT(index >= 0 && index < array->length());
ASSERT(!Heap::InNewSpace(value));
WRITE_FIELD(array, kHeaderSize + index * kPointerSize, value);
}
......
......@@ -1669,7 +1669,8 @@ class FixedArray: public Array {
void SortPairs(FixedArray* numbers, uint32_t len);
protected:
// Set operation on FixedArray without using write barriers.
// Set operation on FixedArray without using write barriers. Can
// only be used for storing old space objects or smis.
static inline void fast_set(FixedArray* array, int index, Object* value);
private:
......
......@@ -357,12 +357,18 @@ void* MemoryAllocator::AllocateRawMemory(const size_t requested,
}
int alloced = static_cast<int>(*allocated);
size_ += alloced;
#ifdef DEBUG
ZapBlock(reinterpret_cast<Address>(mem), alloced);
#endif
Counters::memory_allocated.Increment(alloced);
return mem;
}
void MemoryAllocator::FreeRawMemory(void* mem, size_t length) {
#ifdef DEBUG
ZapBlock(reinterpret_cast<Address>(mem), length);
#endif
if (CodeRange::contains(static_cast<Address>(mem))) {
CodeRange::FreeRawMemory(mem, length);
} else {
......@@ -446,6 +452,9 @@ Page* MemoryAllocator::CommitPages(Address start, size_t size,
if (!initial_chunk_->Commit(start, size, owner->executable() == EXECUTABLE)) {
return Page::FromAddress(NULL);
}
#ifdef DEBUG
ZapBlock(start, size);
#endif
Counters::memory_allocated.Increment(static_cast<int>(size));
// So long as we correctly overestimated the number of chunks we should not
......@@ -467,10 +476,14 @@ bool MemoryAllocator::CommitBlock(Address start,
ASSERT(InInitialChunk(start + size - 1));
if (!initial_chunk_->Commit(start, size, executable)) return false;
#ifdef DEBUG
ZapBlock(start, size);
#endif
Counters::memory_allocated.Increment(static_cast<int>(size));
return true;
}
bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
ASSERT(start != NULL);
ASSERT(size > 0);
......@@ -483,6 +496,14 @@ bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
return true;
}
void MemoryAllocator::ZapBlock(Address start, size_t size) {
for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
Memory::Address_at(start + s) = kZapValue;
}
}
Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk,
PagedSpace* owner) {
ASSERT(IsValidChunk(chunk_id));
......@@ -1599,9 +1620,7 @@ void OldSpaceFreeList::RebuildSizeList() {
int OldSpaceFreeList::Free(Address start, int size_in_bytes) {
#ifdef DEBUG
for (int i = 0; i < size_in_bytes; i += kPointerSize) {
Memory::Address_at(start + i) = kZapValue;
}
MemoryAllocator::ZapBlock(start, size_in_bytes);
#endif
FreeListNode* node = FreeListNode::FromAddress(start);
node->set_size(size_in_bytes);
......@@ -1733,9 +1752,7 @@ void FixedSizeFreeList::Reset() {
void FixedSizeFreeList::Free(Address start) {
#ifdef DEBUG
for (int i = 0; i < object_size_; i += kPointerSize) {
Memory::Address_at(start + i) = kZapValue;
}
MemoryAllocator::ZapBlock(start, object_size_);
#endif
// We only use the freelists with mark-sweep.
ASSERT(!MarkCompactCollector::IsCompacting());
......
......@@ -438,13 +438,16 @@ class MemoryAllocator : public AllStatic {
// and false otherwise.
static bool CommitBlock(Address start, size_t size, Executability executable);
// Uncommit a contiguous block of memory [start..(start+size)[.
// start is not NULL, the size is greater than zero, and the
// block is contained in the initial chunk. Returns true if it succeeded
// and false otherwise.
static bool UncommitBlock(Address start, size_t size);
// Zaps a contiguous block of memory [start..(start+size)[ thus
// filling it up with a recognizable non-NULL bit pattern.
static void ZapBlock(Address start, size_t size);
// Attempts to allocate the requested (non-zero) number of pages from the
// OS. Fewer pages might be allocated than requested. If it fails to
// allocate memory for the OS or cannot allocate a single page, this
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment