Commit 930ca1b5 authored by hpayer@chromium.org's avatar hpayer@chromium.org

Synchronize store buffer processing and concurrent sweeping.

BUG=
R=jarin@chromium.org, mstarzinger@chromium.org

Review URL: https://codereview.chromium.org/227533006

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@20582 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent df4ee493
......@@ -2988,25 +2988,30 @@ class PointersUpdatingVisitor: public ObjectVisitor {
};
static void UpdatePointer(HeapObject** p, HeapObject* object) {
ASSERT(*p == object);
Address old_addr = object->address();
Address new_addr = Memory::Address_at(old_addr);
static void UpdatePointer(HeapObject** address, HeapObject* object) {
Address new_addr = Memory::Address_at(object->address());
// The new space sweep will overwrite the map word of dead objects
// with NULL. In this case we do not need to transfer this entry to
// the store buffer which we are rebuilding.
// We perform the pointer update with a no barrier compare-and-swap. The
// compare and swap may fail in the case where the pointer update tries to
// update garbage memory which was concurrently accessed by the sweeper.
if (new_addr != NULL) {
*p = HeapObject::FromAddress(new_addr);
NoBarrier_CompareAndSwap(
reinterpret_cast<AtomicWord*>(address),
reinterpret_cast<AtomicWord>(object),
reinterpret_cast<AtomicWord>(HeapObject::FromAddress(new_addr)));
} else {
// We have to zap this pointer, because the store buffer may overflow later,
// and then we have to scan the entire heap and we don't want to find
// spurious newspace pointers in the old space.
// TODO(mstarzinger): This was changed to a sentinel value to track down
// rare crashes, change it back to Smi::FromInt(0) later.
*p = reinterpret_cast<HeapObject*>(Smi::FromInt(0x0f100d00 >> 1)); // flood
NoBarrier_CompareAndSwap(
reinterpret_cast<AtomicWord*>(address),
reinterpret_cast<AtomicWord>(object),
reinterpret_cast<AtomicWord>(Smi::FromInt(0x0f100d00 >> 1)));
}
}
......
......@@ -134,6 +134,14 @@ PropertyDetails PropertyDetails::AsDeleted() const {
RELEASE_WRITE_FIELD(this, offset, Smi::FromInt(value)); \
}
#define NOBARRIER_SMI_ACCESSORS(holder, name, offset) \
int holder::nobarrier_##name() { \
Object* value = NOBARRIER_READ_FIELD(this, offset); \
return Smi::cast(value)->value(); \
} \
void holder::nobarrier_set_##name(int value) { \
NOBARRIER_WRITE_FIELD(this, offset, Smi::FromInt(value)); \
}
#define BOOL_GETTER(holder, field, name, offset) \
bool holder::name() { \
......@@ -1108,7 +1116,7 @@ MaybeObject* Object::GetProperty(Name* key, PropertyAttributes* attributes) {
reinterpret_cast<Object*>( \
Acquire_Load(reinterpret_cast<AtomicWord*>(FIELD_ADDR(p, offset))))
#define NO_BARRIER_READ_FIELD(p, offset) \
#define NOBARRIER_READ_FIELD(p, offset) \
reinterpret_cast<Object*>( \
NoBarrier_Load(reinterpret_cast<AtomicWord*>(FIELD_ADDR(p, offset))))
......@@ -1119,7 +1127,7 @@ MaybeObject* Object::GetProperty(Name* key, PropertyAttributes* attributes) {
Release_Store(reinterpret_cast<AtomicWord*>(FIELD_ADDR(p, offset)), \
reinterpret_cast<AtomicWord>(value));
#define NO_BARRIER_WRITE_FIELD(p, offset, value) \
#define NOBARRIER_WRITE_FIELD(p, offset, value) \
NoBarrier_Store(reinterpret_cast<AtomicWord*>(FIELD_ADDR(p, offset)), \
reinterpret_cast<AtomicWord>(value));
......@@ -1388,6 +1396,11 @@ void HeapObject::synchronized_set_map(Map* value) {
}
void HeapObject::synchronized_set_map_no_write_barrier(Map* value) {
synchronized_set_map_word(MapWord::FromMap(value));
}
// Unsafe accessor omitting write barrier.
void HeapObject::set_map_no_write_barrier(Map* value) {
set_map_word(MapWord::FromMap(value));
......@@ -1396,12 +1409,12 @@ void HeapObject::set_map_no_write_barrier(Map* value) {
MapWord HeapObject::map_word() {
return MapWord(
reinterpret_cast<uintptr_t>(NO_BARRIER_READ_FIELD(this, kMapOffset)));
reinterpret_cast<uintptr_t>(NOBARRIER_READ_FIELD(this, kMapOffset)));
}
void HeapObject::set_map_word(MapWord map_word) {
NO_BARRIER_WRITE_FIELD(
NOBARRIER_WRITE_FIELD(
this, kMapOffset, reinterpret_cast<Object*>(map_word.value_));
}
......@@ -3024,6 +3037,7 @@ SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset)
SYNCHRONIZED_SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset)
SMI_ACCESSORS(FreeSpace, size, kSizeOffset)
NOBARRIER_SMI_ACCESSORS(FreeSpace, size, kSizeOffset)
SMI_ACCESSORS(String, length, kLengthOffset)
SYNCHRONIZED_SMI_ACCESSORS(String, length, kLengthOffset)
......@@ -4169,7 +4183,7 @@ int HeapObject::SizeFromMap(Map* map) {
return reinterpret_cast<ByteArray*>(this)->ByteArraySize();
}
if (instance_type == FREE_SPACE_TYPE) {
return reinterpret_cast<FreeSpace*>(this)->size();
return reinterpret_cast<FreeSpace*>(this)->nobarrier_size();
}
if (instance_type == STRING_TYPE ||
instance_type == INTERNALIZED_STRING_TYPE) {
......
......@@ -1807,6 +1807,7 @@ class HeapObject: public Object {
// Set the map using release store
inline void synchronized_set_map(Map* value);
inline void synchronized_set_map_no_write_barrier(Map* value);
inline void synchronized_set_map_word(MapWord map_word);
// During garbage collection, the map word of a heap object does not
......@@ -4830,6 +4831,9 @@ class FreeSpace: public HeapObject {
inline int size();
inline void set_size(int value);
inline int nobarrier_size();
inline void nobarrier_set_size(int value);
inline int Size() { return size(); }
// Casting.
......
......@@ -2017,10 +2017,13 @@ void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
// field and a next pointer, we give it a filler map that gives it the
// correct size.
if (size_in_bytes > FreeSpace::kHeaderSize) {
set_map_no_write_barrier(heap->raw_unchecked_free_space_map());
// Can't use FreeSpace::cast because it fails during deserialization.
// We have to set the size first with a release store before we store
// the map because a concurrent store buffer scan on scavenge must not
// observe a map with an invalid size.
FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
this_as_free_space->set_size(size_in_bytes);
this_as_free_space->nobarrier_set_size(size_in_bytes);
synchronized_set_map_no_write_barrier(heap->raw_unchecked_free_space_map());
} else if (size_in_bytes == kPointerSize) {
set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map());
} else if (size_in_bytes == 2 * kPointerSize) {
......@@ -2064,11 +2067,11 @@ void FreeListNode::set_next(FreeListNode* next) {
// stage.
if (map() == GetHeap()->raw_unchecked_free_space_map()) {
ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
Memory::Address_at(address() + kNextOffset) =
reinterpret_cast<Address>(next);
NoBarrier_Store(reinterpret_cast<AtomicWord*>(address() + kNextOffset),
reinterpret_cast<AtomicWord>(next));
} else {
Memory::Address_at(address() + kPointerSize) =
reinterpret_cast<Address>(next);
NoBarrier_Store(reinterpret_cast<AtomicWord*>(address() + kPointerSize),
reinterpret_cast<AtomicWord>(next));
}
}
......
......@@ -388,7 +388,9 @@ void StoreBuffer::VerifyPointers(LargeObjectSpace* space) {
// When we are not in GC the Heap::InNewSpace() predicate
// checks that pointers which satisfy predicate point into
// the active semispace.
heap_->InNewSpace(*slot);
Object* object = reinterpret_cast<Object*>(
NoBarrier_Load(reinterpret_cast<AtomicWord*>(slot)));
heap_->InNewSpace(object);
slot_address += kPointerSize;
}
}
......@@ -427,14 +429,18 @@ void StoreBuffer::FindPointersToNewSpaceInRegion(
slot_address < end;
slot_address += kPointerSize) {
Object** slot = reinterpret_cast<Object**>(slot_address);
if (heap_->InNewSpace(*slot)) {
HeapObject* object = reinterpret_cast<HeapObject*>(*slot);
ASSERT(object->IsHeapObject());
Object* object = reinterpret_cast<Object*>(
NoBarrier_Load(reinterpret_cast<AtomicWord*>(slot)));
if (heap_->InNewSpace(object)) {
HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
ASSERT(heap_object->IsHeapObject());
// The new space object was not promoted if it still contains a map
// pointer. Clear the map field now lazily.
if (clear_maps) ClearDeadObject(object);
slot_callback(reinterpret_cast<HeapObject**>(slot), object);
if (heap_->InNewSpace(*slot)) {
if (clear_maps) ClearDeadObject(heap_object);
slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
object = reinterpret_cast<Object*>(
NoBarrier_Load(reinterpret_cast<AtomicWord*>(slot)));
if (heap_->InNewSpace(object)) {
EnterDirectlyIntoStoreBuffer(slot_address);
}
}
......@@ -531,7 +537,11 @@ void StoreBuffer::FindPointersToNewSpaceOnPage(
Object* constant_pool_array_map = heap_->constant_pool_array_map();
while (visitable_end < end_of_page) {
Object* o = *reinterpret_cast<Object**>(visitable_end);
// The sweeper thread concurrently may write free space maps and size to
// this page. We need acquire load here to make sure that we get a
// consistent view of maps and their sizes.
Object* o = reinterpret_cast<Object*>(
Acquire_Load(reinterpret_cast<AtomicWord*>(visitable_end)));
// Skip fillers or constant pool arrays (which never contain new-space
// pointers but can contain pointers which can be confused for fillers)
// but not things that look like fillers in the special garbage section
......@@ -595,14 +605,17 @@ void StoreBuffer::IteratePointersInStoreBuffer(
Address* saved_top = old_top_;
#endif
Object** slot = reinterpret_cast<Object**>(*current);
Object* object = *slot;
Object* object = reinterpret_cast<Object*>(
NoBarrier_Load(reinterpret_cast<AtomicWord*>(slot)));
if (heap_->InFromSpace(object)) {
HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
// The new space object was not promoted if it still contains a map
// pointer. Clear the map field now lazily.
if (clear_maps) ClearDeadObject(heap_object);
slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
if (heap_->InNewSpace(*slot)) {
object = reinterpret_cast<Object*>(
NoBarrier_Load(reinterpret_cast<AtomicWord*>(slot)));
if (heap_->InNewSpace(object)) {
EnterDirectlyIntoStoreBuffer(reinterpret_cast<Address>(slot));
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment