Commit 1f89d369 authored by hpayer's avatar hpayer Committed by Commit bot

[heap] Concurrent slot filtering in sweeper threads.

This is an intermediate step for concurrent slot filtering. This CL already makes filtering concurrent, but does not integrate it in the actual sweeping. This will be done in two follow up CLs. One for the regular slot set and one for the typed slot set.

BUG=chromium:648568

Review-Url: https://codereview.chromium.org/2401563002
Cr-Commit-Position: refs/heads/master@{#40040}
parent a943c9e4
......@@ -302,24 +302,6 @@ bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
return compacting_;
}
void MarkCompactCollector::ClearInvalidRememberedSetSlots() {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_STORE_BUFFER);
RememberedSet<OLD_TO_NEW>::ClearInvalidSlots(heap());
}
// There is not need to filter the old to old set because
// it is completely cleared after the mark-compact GC.
// The slots that become invalid due to runtime transitions are
// cleared eagerly immediately after the transition.
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
RememberedSet<OLD_TO_NEW>::VerifyValidSlots(heap());
RememberedSet<OLD_TO_OLD>::VerifyValidSlots(heap());
}
#endif
}
void MarkCompactCollector::CollectGarbage() {
// Make sure that Prepare() has been called. The individual steps below will
// update the state as they proceed.
......@@ -2410,8 +2392,6 @@ void MarkCompactCollector::ClearNonLiveReferences() {
MarkDependentCodeForDeoptimization(dependent_code_list);
ClearWeakCollections();
ClearInvalidRememberedSetSlots();
}
......@@ -3824,11 +3804,12 @@ int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page,
Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
if (identity == NEW_SPACE) {
RawSweep(page, IGNORE_FREE_LIST, free_space_mode);
} else if (identity == OLD_SPACE) {
max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
} else if (identity == CODE_SPACE) {
max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
} else {
if (identity == OLD_SPACE || identity == MAP_SPACE) {
RememberedSet<OLD_TO_NEW>::ClearInvalidSlots(heap_, page);
} else {
RememberedSet<OLD_TO_NEW>::ClearInvalidTypedSlots(heap_, page);
}
max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
}
......
......@@ -502,7 +502,6 @@ class MarkCompactCollector {
explicit MarkCompactCollector(Heap* heap);
bool WillBeDeoptimized(Code* code);
void ClearInvalidRememberedSetSlots();
void ComputeEvacuationHeuristics(int area_size,
int* target_fragmentation_percent,
......
......@@ -15,93 +15,55 @@ namespace v8 {
namespace internal {
template <PointerDirection direction>
void RememberedSet<direction>::ClearInvalidSlots(Heap* heap) {
void RememberedSet<direction>::ClearInvalidSlots(Heap* heap,
MemoryChunk* chunk) {
STATIC_ASSERT(direction == OLD_TO_NEW);
for (MemoryChunk* chunk : *heap->old_space()) {
SlotSet* slots = GetSlotSet(chunk);
if (slots != nullptr) {
slots->Iterate(
[heap, chunk](Address addr) {
Object** slot = reinterpret_cast<Object**>(addr);
return IsValidSlot(heap, chunk, slot) ? KEEP_SLOT : REMOVE_SLOT;
},
SlotSet::PREFREE_EMPTY_BUCKETS);
}
}
for (MemoryChunk* chunk : *heap->code_space()) {
TypedSlotSet* slots = GetTypedSlotSet(chunk);
if (slots != nullptr) {
slots->Iterate(
[heap, chunk](SlotType type, Address host_addr, Address addr) {
if (Marking::IsBlack(ObjectMarking::MarkBitFrom(host_addr))) {
return KEEP_SLOT;
} else {
return REMOVE_SLOT;
}
},
TypedSlotSet::PREFREE_EMPTY_CHUNKS);
}
}
for (MemoryChunk* chunk : *heap->map_space()) {
SlotSet* slots = GetSlotSet(chunk);
if (slots != nullptr) {
slots->Iterate(
[heap, chunk](Address addr) {
Object** slot = reinterpret_cast<Object**>(addr);
// TODO(mlippautz): In map space all allocations would ideally be
// map
// aligned. After establishing this invariant IsValidSlot could just
// refer to the containing object using alignment and check the mark
// bits.
return IsValidSlot(heap, chunk, slot) ? KEEP_SLOT : REMOVE_SLOT;
},
SlotSet::PREFREE_EMPTY_BUCKETS);
}
DCHECK(chunk->owner()->identity() == OLD_SPACE ||
chunk->owner()->identity() == MAP_SPACE);
SlotSet* slots = GetSlotSet(chunk);
if (slots != nullptr) {
slots->Iterate(
[heap, chunk](Address addr) {
Object** slot = reinterpret_cast<Object**>(addr);
return IsValidSlot(heap, chunk, slot) ? KEEP_SLOT : REMOVE_SLOT;
},
SlotSet::KEEP_EMPTY_BUCKETS);
}
}
template <PointerDirection direction>
void RememberedSet<direction>::VerifyValidSlots(Heap* heap) {
Iterate(heap, [heap](Address addr) {
HeapObject* obj =
heap->mark_compact_collector()->FindBlackObjectBySlotSlow(addr);
if (obj == nullptr) {
// The slot is in dead object.
MemoryChunk* chunk = MemoryChunk::FromAnyPointerAddress(heap, addr);
AllocationSpace owner = chunk->owner()->identity();
// The old to old remembered set should not have dead slots.
CHECK_NE(direction, OLD_TO_OLD);
// The old to new remembered set is allowed to have slots in dead
// objects only in map and large object space because these space
// cannot have raw untagged pointers.
CHECK(owner == MAP_SPACE || owner == LO_SPACE);
} else {
int offset = static_cast<int>(addr - obj->address());
CHECK(obj->IsValidSlot(offset));
}
return KEEP_SLOT;
});
void RememberedSet<direction>::ClearInvalidTypedSlots(Heap* heap,
MemoryChunk* chunk) {
STATIC_ASSERT(direction == OLD_TO_NEW);
DCHECK(chunk->owner()->identity() == CODE_SPACE);
TypedSlotSet* slots = GetTypedSlotSet(chunk);
if (slots != nullptr) {
slots->Iterate(
[heap, chunk](SlotType type, Address host_addr, Address addr) {
if (Marking::IsBlack(ObjectMarking::MarkBitFrom(host_addr))) {
return KEEP_SLOT;
} else {
return REMOVE_SLOT;
}
},
TypedSlotSet::KEEP_EMPTY_CHUNKS);
}
}
template <PointerDirection direction>
bool RememberedSet<direction>::IsValidSlot(Heap* heap, MemoryChunk* chunk,
Object** slot) {
STATIC_ASSERT(direction == OLD_TO_NEW);
Object* object = *slot;
if (!heap->InNewSpace(object)) {
return false;
}
HeapObject* heap_object = HeapObject::cast(object);
// If the target object is not black, the source slot must be part
// of a non-black (dead) object.
return Marking::IsBlack(ObjectMarking::MarkBitFrom(heap_object)) &&
heap->mark_compact_collector()->IsSlotInBlackObject(
chunk, reinterpret_cast<Address>(slot));
return heap->mark_compact_collector()->IsSlotInBlackObject(
chunk, reinterpret_cast<Address>(slot));
}
template void RememberedSet<OLD_TO_NEW>::ClearInvalidSlots(Heap* heap);
template void RememberedSet<OLD_TO_NEW>::VerifyValidSlots(Heap* heap);
template void RememberedSet<OLD_TO_OLD>::VerifyValidSlots(Heap* heap);
template void RememberedSet<OLD_TO_NEW>::ClearInvalidSlots(Heap* heap,
MemoryChunk* chunk);
template void RememberedSet<OLD_TO_NEW>::ClearInvalidTypedSlots(
Heap* heap, MemoryChunk* chunk);
} // namespace internal
} // namespace v8
......@@ -203,6 +203,9 @@ class RememberedSet {
// must be called before sweeping when mark bits are still intact.
static void ClearInvalidSlots(Heap* heap);
static void ClearInvalidSlots(Heap* heap, MemoryChunk* chunk);
static void ClearInvalidTypedSlots(Heap* heap, MemoryChunk* chunk);
static void VerifyValidSlots(Heap* heap);
private:
......
......@@ -160,7 +160,7 @@ class SlotSet : public Malloced {
if (current_bucket[i].Value()) {
uint32_t cell = current_bucket[i].Value();
uint32_t old_cell = cell;
uint32_t new_cell = cell;
uint32_t mask = 0;
while (cell) {
int bit_offset = base::bits::CountTrailingZeros32(cell);
uint32_t bit_mask = 1u << bit_offset;
......@@ -168,10 +168,11 @@ class SlotSet : public Malloced {
if (callback(page_start_ + slot) == KEEP_SLOT) {
++in_bucket_count;
} else {
new_cell ^= bit_mask;
mask |= bit_mask;
}
cell ^= bit_mask;
}
uint32_t new_cell = old_cell & ~mask;
if (old_cell != new_cell) {
while (!current_bucket[i].TrySetValue(old_cell, new_cell)) {
// If TrySetValue fails, the cell must have changed. We just
......@@ -180,7 +181,7 @@ class SlotSet : public Malloced {
// method will only be called on the main thread and filtering
// threads will only remove slots.
old_cell = current_bucket[i].Value();
new_cell &= old_cell;
new_cell = old_cell & ~mask;
}
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment