Skip the write barrier for global property cell writes. The heap

verification code was refactored to avoid verifying that property
cells have correct remembered sets.

Review URL: http://codereview.chromium.org/149392

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@2423 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 4b9eb186
......@@ -913,8 +913,6 @@ Object* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
// Store the value in the cell.
__ mov(r2, Operand(Handle<JSGlobalPropertyCell>(cell)));
__ str(r0, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
__ mov(r1, Operand(JSGlobalPropertyCell::kValueOffset));
__ RecordWrite(r2, r1, r3);
__ Ret();
......
......@@ -2768,12 +2768,20 @@ void Heap::Verify() {
ASSERT(HasBeenSetup());
VerifyPointersVisitor visitor;
Heap::IterateRoots(&visitor);
IterateRoots(&visitor);
AllSpaces spaces;
while (Space* space = spaces.next()) {
space->Verify();
}
new_space_.Verify();
VerifyPointersAndRSetVisitor rset_visitor;
old_pointer_space_->Verify(&rset_visitor);
map_space_->Verify(&rset_visitor);
VerifyPointersVisitor no_rset_visitor;
old_data_space_->Verify(&no_rset_visitor);
code_space_->Verify(&no_rset_visitor);
cell_space_->Verify(&no_rset_visitor);
lo_space_->Verify();
}
#endif // DEBUG
......
......@@ -975,11 +975,6 @@ Object* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
__ mov(ecx, Immediate(Handle<JSGlobalPropertyCell>(cell)));
__ mov(FieldOperand(ecx, JSGlobalPropertyCell::kValueOffset), eax);
// RecordWrite clobbers the value register. Pass the value being stored in
// edx.
__ mov(edx, eax);
__ RecordWrite(ecx, JSGlobalPropertyCell::kValueOffset, edx, ebx);
// Return the value (register eax).
__ ret(0);
......
......@@ -1062,7 +1062,16 @@ ACCESSORS(Oddball, to_string, String, kToStringOffset)
ACCESSORS(Oddball, to_number, Object, kToNumberOffset)
ACCESSORS(JSGlobalPropertyCell, value, Object, kValueOffset)
Object* JSGlobalPropertyCell::value() {
return READ_FIELD(this, kValueOffset);
}
void JSGlobalPropertyCell::set_value(Object* val, WriteBarrierMode ignored) {
// The write barrier is not used for global property cells.
WRITE_FIELD(this, kValueOffset, val);
}
int JSObject::GetHeaderSize() {
switch (map()->instance_type()) {
......
......@@ -786,6 +786,77 @@ void PagedSpace::Print() { }
#endif
#ifdef DEBUG
// We do not assume that the PageIterator works, because it depends on the
// invariants we are checking during verification.
void PagedSpace::Verify(ObjectVisitor* visitor) {
// The allocation pointer should be valid, and it should be in a page in the
// space.
ASSERT(allocation_info_.VerifyPagedAllocation());
Page* top_page = Page::FromAllocationTop(allocation_info_.top);
ASSERT(MemoryAllocator::IsPageInSpace(top_page, this));
// Loop over all the pages.
bool above_allocation_top = false;
Page* current_page = first_page_;
while (current_page->is_valid()) {
if (above_allocation_top) {
// We don't care what's above the allocation top.
} else {
// Unless this is the last page in the space containing allocated
// objects, the allocation top should be at a constant offset from the
// object area end.
Address top = current_page->AllocationTop();
if (current_page == top_page) {
ASSERT(top == allocation_info_.top);
// The next page will be above the allocation top.
above_allocation_top = true;
} else {
ASSERT(top == current_page->ObjectAreaEnd() - page_extra_);
}
// It should be packed with objects from the bottom to the top.
Address current = current_page->ObjectAreaStart();
while (current < top) {
HeapObject* object = HeapObject::FromAddress(current);
// The first word should be a map, and we expect all map pointers to
// be in map space.
Map* map = object->map();
ASSERT(map->IsMap());
ASSERT(Heap::map_space()->Contains(map));
// Perform space-specific object verification.
VerifyObject(object);
// The object itself should look OK.
object->Verify();
// All the interior pointers should be contained in the heap and
// have their remembered set bits set if required as determined
// by the visitor.
int size = object->Size();
if (object->IsCode()) {
Code::cast(object)->ConvertICTargetsFromAddressToObject();
object->IterateBody(map->instance_type(), size, visitor);
Code::cast(object)->ConvertICTargetsFromObjectToAddress();
} else {
object->IterateBody(map->instance_type(), size, visitor);
}
current += size;
}
// The allocation pointer should not be in the middle of an object.
ASSERT(current == top);
}
current_page = current_page->next_page();
}
}
#endif
// -----------------------------------------------------------------------------
// NewSpace implementation
......@@ -1615,76 +1686,6 @@ HeapObject* OldSpace::AllocateInNextPage(Page* current_page,
#ifdef DEBUG
// We do not assume that the PageIterator works, because it depends on the
// invariants we are checking during verification.
void OldSpace::Verify() {
// The allocation pointer should be valid, and it should be in a page in the
// space.
ASSERT(allocation_info_.VerifyPagedAllocation());
Page* top_page = Page::FromAllocationTop(allocation_info_.top);
ASSERT(MemoryAllocator::IsPageInSpace(top_page, this));
// Loop over all the pages.
bool above_allocation_top = false;
Page* current_page = first_page_;
while (current_page->is_valid()) {
if (above_allocation_top) {
// We don't care what's above the allocation top.
} else {
// Unless this is the last page in the space containing allocated
// objects, the allocation top should be at the object area end.
Address top = current_page->AllocationTop();
if (current_page == top_page) {
ASSERT(top == allocation_info_.top);
// The next page will be above the allocation top.
above_allocation_top = true;
} else {
ASSERT(top == current_page->ObjectAreaEnd());
}
// It should be packed with objects from the bottom to the top.
Address current = current_page->ObjectAreaStart();
while (current < top) {
HeapObject* object = HeapObject::FromAddress(current);
// The first word should be a map, and we expect all map pointers to
// be in map space.
Map* map = object->map();
ASSERT(map->IsMap());
ASSERT(Heap::map_space()->Contains(map));
// The object should not be a map.
ASSERT(!object->IsMap());
// The object itself should look OK.
object->Verify();
// All the interior pointers should be contained in the heap and have
// their remembered set bits set if they point to new space. Code
// objects do not have remembered set bits that we care about.
VerifyPointersAndRSetVisitor rset_visitor;
VerifyPointersVisitor no_rset_visitor;
int size = object->Size();
if (object->IsCode()) {
Code::cast(object)->ConvertICTargetsFromAddressToObject();
object->IterateBody(map->instance_type(), size, &no_rset_visitor);
Code::cast(object)->ConvertICTargetsFromObjectToAddress();
} else {
object->IterateBody(map->instance_type(), size, &rset_visitor);
}
current += size;
}
// The allocation pointer should not be in the middle of an object.
ASSERT(current == top);
}
current_page = current_page->next_page();
}
}
struct CommentStatistic {
const char* comment;
int size;
......@@ -2086,69 +2087,6 @@ HeapObject* FixedSpace::AllocateInNextPage(Page* current_page,
#ifdef DEBUG
// We do not assume that the PageIterator works, because it depends on the
// invariants we are checking during verification.
void FixedSpace::Verify() {
// The allocation pointer should be valid, and it should be in a page in the
// space.
ASSERT(allocation_info_.VerifyPagedAllocation());
Page* top_page = Page::FromAllocationTop(allocation_info_.top);
ASSERT(MemoryAllocator::IsPageInSpace(top_page, this));
// Loop over all the pages.
bool above_allocation_top = false;
Page* current_page = first_page_;
while (current_page->is_valid()) {
if (above_allocation_top) {
// We don't care what's above the allocation top.
} else {
// Unless this is the last page in the space containing allocated
// objects, the allocation top should be at a constant offset from the
// object area end.
Address top = current_page->AllocationTop();
if (current_page == top_page) {
ASSERT(top == allocation_info_.top);
// The next page will be above the allocation top.
above_allocation_top = true;
} else {
ASSERT(top == current_page->ObjectAreaEnd() - page_extra_);
}
// It should be packed with objects from the bottom to the top.
Address current = current_page->ObjectAreaStart();
while (current < top) {
HeapObject* object = HeapObject::FromAddress(current);
// The first word should be a map, and we expect all map pointers to
// be in map space.
Map* map = object->map();
ASSERT(map->IsMap());
ASSERT(Heap::map_space()->Contains(map));
// Verify the object in the space.
VerifyObject(object);
// The object itself should look OK.
object->Verify();
// All the interior pointers should be contained in the heap and
// have their remembered set bits set if they point to new space.
VerifyPointersAndRSetVisitor visitor;
int size = object->Size();
object->IterateBody(map->instance_type(), size, &visitor);
current += size;
}
// The allocation pointer should not be in the middle of an object.
ASSERT(current == top);
}
current_page = current_page->next_page();
}
}
void FixedSpace::ReportStatistics() {
int pct = Available() * 100 / Capacity();
PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n",
......
......@@ -302,7 +302,6 @@ class Space : public Malloced {
virtual int Size() = 0;
#ifdef DEBUG
virtual void Verify() = 0;
virtual void Print() = 0;
#endif
......@@ -836,6 +835,13 @@ class PagedSpace : public Space {
// Print meta info and objects in this space.
virtual void Print();
// Verify integrity of this space.
virtual void Verify(ObjectVisitor* visitor);
// Overridden by subclasses to verify space-specific object
// properties (e.g., only maps or free-list nodes are in map space).
virtual void VerifyObject(HeapObject* obj) {}
// Report code object related statistics
void CollectCodeStatistics();
static void ReportCodeStatistics();
......@@ -862,6 +868,12 @@ class PagedSpace : public Space {
// Relocation information during mark-compact collections.
AllocationInfo mc_forwarding_info_;
// Bytes of each page that cannot be allocated. Possibly non-zero
// for pages in spaces with only fixed-size objects. Always zero
// for pages in spaces with variable sized objects (those pages are
// padded with free-list nodes).
int page_extra_;
// Sets allocation pointer to a page bottom.
static void SetAllocationInfo(AllocationInfo* alloc_info, Page* p);
......@@ -1439,6 +1451,7 @@ class OldSpace : public PagedSpace {
AllocationSpace id,
Executability executable)
: PagedSpace(max_capacity, id, executable), free_list_(id) {
page_extra_ = 0;
}
// The bytes available on the free list (ie, not above the linear allocation
......@@ -1467,9 +1480,6 @@ class OldSpace : public PagedSpace {
virtual void MCCommitRelocationInfo();
#ifdef DEBUG
// Verify integrity of this space.
virtual void Verify();
// Reports statistics for the space
void ReportStatistics();
// Dump the remembered sets in the space to stdout.
......@@ -1505,8 +1515,9 @@ class FixedSpace : public PagedSpace {
: PagedSpace(max_capacity, id, NOT_EXECUTABLE),
object_size_in_bytes_(object_size_in_bytes),
name_(name),
free_list_(id, object_size_in_bytes),
page_extra_(Page::kObjectAreaSize % object_size_in_bytes) { }
free_list_(id, object_size_in_bytes) {
page_extra_ = Page::kObjectAreaSize % object_size_in_bytes;
}
// The top of allocation in a page in this space. Undefined if page is unused.
virtual Address PageAllocationTop(Page* page) {
......@@ -1530,12 +1541,6 @@ class FixedSpace : public PagedSpace {
virtual void MCCommitRelocationInfo();
#ifdef DEBUG
// Verify integrity of this space.
virtual void Verify();
// Implement by subclasses to verify an actual object in the space.
virtual void VerifyObject(HeapObject* obj) = 0;
// Reports statistic info of the space
void ReportStatistics();
......@@ -1560,9 +1565,6 @@ class FixedSpace : public PagedSpace {
// The space's free list.
FixedSizeFreeList free_list_;
// Bytes of each page that cannot be allocated.
int page_extra_;
};
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment