Commit 4c3a1725 authored by hpayer's avatar hpayer Committed by Commit bot

[heap] Reland concurrent slot filtering in sweeper threads.

BUG=chromium:648568

Review-Url: https://codereview.chromium.org/2403423007
Cr-Commit-Position: refs/heads/master@{#40221}
parent 9b5a1cf5
...@@ -302,24 +302,6 @@ bool MarkCompactCollector::StartCompaction(CompactionMode mode) { ...@@ -302,24 +302,6 @@ bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
return compacting_; return compacting_;
} }
void MarkCompactCollector::ClearInvalidRememberedSetSlots() {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_STORE_BUFFER);
RememberedSet<OLD_TO_NEW>::ClearInvalidSlots(heap());
}
// There is not need to filter the old to old set because
// it is completely cleared after the mark-compact GC.
// The slots that become invalid due to runtime transitions are
// cleared eagerly immediately after the transition.
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
RememberedSet<OLD_TO_NEW>::VerifyValidSlots(heap());
RememberedSet<OLD_TO_OLD>::VerifyValidSlots(heap());
}
#endif
}
void MarkCompactCollector::CollectGarbage() { void MarkCompactCollector::CollectGarbage() {
// Make sure that Prepare() has been called. The individual steps below will // Make sure that Prepare() has been called. The individual steps below will
// update the state as they proceed. // update the state as they proceed.
...@@ -2410,8 +2392,6 @@ void MarkCompactCollector::ClearNonLiveReferences() { ...@@ -2410,8 +2392,6 @@ void MarkCompactCollector::ClearNonLiveReferences() {
MarkDependentCodeForDeoptimization(dependent_code_list); MarkDependentCodeForDeoptimization(dependent_code_list);
ClearWeakCollections(); ClearWeakCollections();
ClearInvalidRememberedSetSlots();
} }
...@@ -3832,11 +3812,12 @@ int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page, ...@@ -3832,11 +3812,12 @@ int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page,
Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE; Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
if (identity == NEW_SPACE) { if (identity == NEW_SPACE) {
RawSweep(page, IGNORE_FREE_LIST, free_space_mode); RawSweep(page, IGNORE_FREE_LIST, free_space_mode);
} else if (identity == OLD_SPACE) {
max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
} else if (identity == CODE_SPACE) {
max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
} else { } else {
if (identity == OLD_SPACE || identity == MAP_SPACE) {
RememberedSet<OLD_TO_NEW>::ClearInvalidSlots(heap_, page);
} else {
RememberedSet<OLD_TO_NEW>::ClearInvalidTypedSlots(heap_, page);
}
max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode); max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
} }
......
...@@ -502,7 +502,6 @@ class MarkCompactCollector { ...@@ -502,7 +502,6 @@ class MarkCompactCollector {
explicit MarkCompactCollector(Heap* heap); explicit MarkCompactCollector(Heap* heap);
bool WillBeDeoptimized(Code* code); bool WillBeDeoptimized(Code* code);
void ClearInvalidRememberedSetSlots();
void ComputeEvacuationHeuristics(int area_size, void ComputeEvacuationHeuristics(int area_size,
int* target_fragmentation_percent, int* target_fragmentation_percent,
......
...@@ -15,93 +15,55 @@ namespace v8 { ...@@ -15,93 +15,55 @@ namespace v8 {
namespace internal { namespace internal {
template <PointerDirection direction> template <PointerDirection direction>
void RememberedSet<direction>::ClearInvalidSlots(Heap* heap) { void RememberedSet<direction>::ClearInvalidSlots(Heap* heap,
MemoryChunk* chunk) {
STATIC_ASSERT(direction == OLD_TO_NEW); STATIC_ASSERT(direction == OLD_TO_NEW);
for (MemoryChunk* chunk : *heap->old_space()) { DCHECK(chunk->owner()->identity() == OLD_SPACE ||
SlotSet* slots = GetSlotSet(chunk); chunk->owner()->identity() == MAP_SPACE);
if (slots != nullptr) { SlotSet* slots = GetSlotSet(chunk);
slots->Iterate( if (slots != nullptr) {
[heap, chunk](Address addr) { slots->Iterate(
Object** slot = reinterpret_cast<Object**>(addr); [heap, chunk](Address addr) {
return IsValidSlot(heap, chunk, slot) ? KEEP_SLOT : REMOVE_SLOT; Object** slot = reinterpret_cast<Object**>(addr);
}, return IsValidSlot(heap, chunk, slot) ? KEEP_SLOT : REMOVE_SLOT;
SlotSet::PREFREE_EMPTY_BUCKETS); },
} SlotSet::KEEP_EMPTY_BUCKETS);
}
for (MemoryChunk* chunk : *heap->code_space()) {
TypedSlotSet* slots = GetTypedSlotSet(chunk);
if (slots != nullptr) {
slots->Iterate(
[heap, chunk](SlotType type, Address host_addr, Address addr) {
if (Marking::IsBlack(ObjectMarking::MarkBitFrom(host_addr))) {
return KEEP_SLOT;
} else {
return REMOVE_SLOT;
}
},
TypedSlotSet::PREFREE_EMPTY_CHUNKS);
}
}
for (MemoryChunk* chunk : *heap->map_space()) {
SlotSet* slots = GetSlotSet(chunk);
if (slots != nullptr) {
slots->Iterate(
[heap, chunk](Address addr) {
Object** slot = reinterpret_cast<Object**>(addr);
// TODO(mlippautz): In map space all allocations would ideally be
// map
// aligned. After establishing this invariant IsValidSlot could just
// refer to the containing object using alignment and check the mark
// bits.
return IsValidSlot(heap, chunk, slot) ? KEEP_SLOT : REMOVE_SLOT;
},
SlotSet::PREFREE_EMPTY_BUCKETS);
}
} }
} }
template <PointerDirection direction> template <PointerDirection direction>
void RememberedSet<direction>::VerifyValidSlots(Heap* heap) { void RememberedSet<direction>::ClearInvalidTypedSlots(Heap* heap,
Iterate(heap, [heap](Address addr) { MemoryChunk* chunk) {
HeapObject* obj = STATIC_ASSERT(direction == OLD_TO_NEW);
heap->mark_compact_collector()->FindBlackObjectBySlotSlow(addr); DCHECK(chunk->owner()->identity() == CODE_SPACE);
if (obj == nullptr) { TypedSlotSet* slots = GetTypedSlotSet(chunk);
// The slot is in dead object. if (slots != nullptr) {
MemoryChunk* chunk = MemoryChunk::FromAnyPointerAddress(heap, addr); slots->Iterate(
AllocationSpace owner = chunk->owner()->identity(); [heap, chunk](SlotType type, Address host_addr, Address addr) {
// The old to old remembered set should not have dead slots. if (Marking::IsBlack(ObjectMarking::MarkBitFrom(host_addr))) {
CHECK_NE(direction, OLD_TO_OLD); return KEEP_SLOT;
// The old to new remembered set is allowed to have slots in dead } else {
// objects only in map and large object space because these space return REMOVE_SLOT;
// cannot have raw untagged pointers. }
CHECK(owner == MAP_SPACE || owner == LO_SPACE); },
} else { TypedSlotSet::KEEP_EMPTY_CHUNKS);
int offset = static_cast<int>(addr - obj->address()); }
CHECK(obj->IsValidSlot(offset));
}
return KEEP_SLOT;
});
} }
template <PointerDirection direction> template <PointerDirection direction>
bool RememberedSet<direction>::IsValidSlot(Heap* heap, MemoryChunk* chunk, bool RememberedSet<direction>::IsValidSlot(Heap* heap, MemoryChunk* chunk,
Object** slot) { Object** slot) {
STATIC_ASSERT(direction == OLD_TO_NEW); STATIC_ASSERT(direction == OLD_TO_NEW);
Object* object = *slot;
if (!heap->InNewSpace(object)) {
return false;
}
HeapObject* heap_object = HeapObject::cast(object);
// If the target object is not black, the source slot must be part // If the target object is not black, the source slot must be part
// of a non-black (dead) object. // of a non-black (dead) object.
return Marking::IsBlack(ObjectMarking::MarkBitFrom(heap_object)) && return heap->mark_compact_collector()->IsSlotInBlackObject(
heap->mark_compact_collector()->IsSlotInBlackObject( chunk, reinterpret_cast<Address>(slot));
chunk, reinterpret_cast<Address>(slot));
} }
template void RememberedSet<OLD_TO_NEW>::ClearInvalidSlots(Heap* heap); template void RememberedSet<OLD_TO_NEW>::ClearInvalidSlots(Heap* heap,
template void RememberedSet<OLD_TO_NEW>::VerifyValidSlots(Heap* heap); MemoryChunk* chunk);
template void RememberedSet<OLD_TO_OLD>::VerifyValidSlots(Heap* heap); template void RememberedSet<OLD_TO_NEW>::ClearInvalidTypedSlots(
Heap* heap, MemoryChunk* chunk);
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
...@@ -204,6 +204,9 @@ class RememberedSet { ...@@ -204,6 +204,9 @@ class RememberedSet {
// must be called before sweeping when mark bits are still intact. // must be called before sweeping when mark bits are still intact.
static void ClearInvalidSlots(Heap* heap); static void ClearInvalidSlots(Heap* heap);
static void ClearInvalidSlots(Heap* heap, MemoryChunk* chunk);
static void ClearInvalidTypedSlots(Heap* heap, MemoryChunk* chunk);
static void VerifyValidSlots(Heap* heap); static void VerifyValidSlots(Heap* heap);
private: private:
......
...@@ -179,7 +179,7 @@ class SlotSet : public Malloced { ...@@ -179,7 +179,7 @@ class SlotSet : public Malloced {
if (current_bucket[i].Value()) { if (current_bucket[i].Value()) {
uint32_t cell = current_bucket[i].Value(); uint32_t cell = current_bucket[i].Value();
uint32_t old_cell = cell; uint32_t old_cell = cell;
uint32_t new_cell = cell; uint32_t mask = 0;
while (cell) { while (cell) {
int bit_offset = base::bits::CountTrailingZeros32(cell); int bit_offset = base::bits::CountTrailingZeros32(cell);
uint32_t bit_mask = 1u << bit_offset; uint32_t bit_mask = 1u << bit_offset;
...@@ -187,10 +187,11 @@ class SlotSet : public Malloced { ...@@ -187,10 +187,11 @@ class SlotSet : public Malloced {
if (callback(page_start_ + slot) == KEEP_SLOT) { if (callback(page_start_ + slot) == KEEP_SLOT) {
++in_bucket_count; ++in_bucket_count;
} else { } else {
new_cell ^= bit_mask; mask |= bit_mask;
} }
cell ^= bit_mask; cell ^= bit_mask;
} }
uint32_t new_cell = old_cell & ~mask;
if (old_cell != new_cell) { if (old_cell != new_cell) {
while (!current_bucket[i].TrySetValue(old_cell, new_cell)) { while (!current_bucket[i].TrySetValue(old_cell, new_cell)) {
// If TrySetValue fails, the cell must have changed. We just // If TrySetValue fails, the cell must have changed. We just
...@@ -199,7 +200,7 @@ class SlotSet : public Malloced { ...@@ -199,7 +200,7 @@ class SlotSet : public Malloced {
// method will only be called on the main thread and filtering // method will only be called on the main thread and filtering
// threads will only remove slots. // threads will only remove slots.
old_cell = current_bucket[i].Value(); old_cell = current_bucket[i].Value();
new_cell &= old_cell; new_cell = old_cell & ~mask;
} }
} }
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment