Commit 0083c093 authored by hpayer's avatar hpayer Committed by Commit bot

Revert of [heap] Concurrent slot filtering in sweeper threads. (patchset #4...

Revert of [heap] Concurrent slot filtering in sweeper threads. (patchset #4 id:60001 of https://codereview.chromium.org/2401563002/ )

Reason for revert:
Crashing.

Original issue's description:
> [heap] Concurrent slot filtering in sweeper threads.
>
> This is an intermediate step for concurrent slot filtering. This CL already makes filtering concurrent, but does not integrate it in the actual sweeping. This will be done in two follow up CLs. One for the regular slot set and one for the typed slot set.
>
> BUG=chromium:648568
>
> Committed: https://crrev.com/1f89d369fc952a2826f9f62901fb84fcf30920d7
> Cr-Commit-Position: refs/heads/master@{#40040}

TBR=ulan@chromium.org,mlippautz@chromium.org
# Skipping CQ checks because original CL landed less than 1 days ago.
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=chromium:648568

Review-Url: https://codereview.chromium.org/2399003002
Cr-Commit-Position: refs/heads/master@{#40043}
parent 8f5d1c1d
......@@ -302,6 +302,24 @@ bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
return compacting_;
}
void MarkCompactCollector::ClearInvalidRememberedSetSlots() {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_STORE_BUFFER);
RememberedSet<OLD_TO_NEW>::ClearInvalidSlots(heap());
}
// There is not need to filter the old to old set because
// it is completely cleared after the mark-compact GC.
// The slots that become invalid due to runtime transitions are
// cleared eagerly immediately after the transition.
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
RememberedSet<OLD_TO_NEW>::VerifyValidSlots(heap());
RememberedSet<OLD_TO_OLD>::VerifyValidSlots(heap());
}
#endif
}
void MarkCompactCollector::CollectGarbage() {
// Make sure that Prepare() has been called. The individual steps below will
// update the state as they proceed.
......@@ -2392,6 +2410,8 @@ void MarkCompactCollector::ClearNonLiveReferences() {
MarkDependentCodeForDeoptimization(dependent_code_list);
ClearWeakCollections();
ClearInvalidRememberedSetSlots();
}
......@@ -3804,12 +3824,11 @@ int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page,
Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
if (identity == NEW_SPACE) {
RawSweep(page, IGNORE_FREE_LIST, free_space_mode);
} else if (identity == OLD_SPACE) {
max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
} else if (identity == CODE_SPACE) {
max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
} else {
if (identity == OLD_SPACE || identity == MAP_SPACE) {
RememberedSet<OLD_TO_NEW>::ClearInvalidSlots(heap_, page);
} else {
RememberedSet<OLD_TO_NEW>::ClearInvalidTypedSlots(heap_, page);
}
max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
}
......
......@@ -502,6 +502,7 @@ class MarkCompactCollector {
explicit MarkCompactCollector(Heap* heap);
bool WillBeDeoptimized(Code* code);
void ClearInvalidRememberedSetSlots();
void ComputeEvacuationHeuristics(int area_size,
int* target_fragmentation_percent,
......
......@@ -15,55 +15,93 @@ namespace v8 {
namespace internal {
template <PointerDirection direction>
void RememberedSet<direction>::ClearInvalidSlots(Heap* heap,
MemoryChunk* chunk) {
void RememberedSet<direction>::ClearInvalidSlots(Heap* heap) {
STATIC_ASSERT(direction == OLD_TO_NEW);
DCHECK(chunk->owner()->identity() == OLD_SPACE ||
chunk->owner()->identity() == MAP_SPACE);
SlotSet* slots = GetSlotSet(chunk);
if (slots != nullptr) {
slots->Iterate(
[heap, chunk](Address addr) {
Object** slot = reinterpret_cast<Object**>(addr);
return IsValidSlot(heap, chunk, slot) ? KEEP_SLOT : REMOVE_SLOT;
},
SlotSet::KEEP_EMPTY_BUCKETS);
for (MemoryChunk* chunk : *heap->old_space()) {
SlotSet* slots = GetSlotSet(chunk);
if (slots != nullptr) {
slots->Iterate(
[heap, chunk](Address addr) {
Object** slot = reinterpret_cast<Object**>(addr);
return IsValidSlot(heap, chunk, slot) ? KEEP_SLOT : REMOVE_SLOT;
},
SlotSet::PREFREE_EMPTY_BUCKETS);
}
}
for (MemoryChunk* chunk : *heap->code_space()) {
TypedSlotSet* slots = GetTypedSlotSet(chunk);
if (slots != nullptr) {
slots->Iterate(
[heap, chunk](SlotType type, Address host_addr, Address addr) {
if (Marking::IsBlack(ObjectMarking::MarkBitFrom(host_addr))) {
return KEEP_SLOT;
} else {
return REMOVE_SLOT;
}
},
TypedSlotSet::PREFREE_EMPTY_CHUNKS);
}
}
for (MemoryChunk* chunk : *heap->map_space()) {
SlotSet* slots = GetSlotSet(chunk);
if (slots != nullptr) {
slots->Iterate(
[heap, chunk](Address addr) {
Object** slot = reinterpret_cast<Object**>(addr);
// TODO(mlippautz): In map space all allocations would ideally be
// map
// aligned. After establishing this invariant IsValidSlot could just
// refer to the containing object using alignment and check the mark
// bits.
return IsValidSlot(heap, chunk, slot) ? KEEP_SLOT : REMOVE_SLOT;
},
SlotSet::PREFREE_EMPTY_BUCKETS);
}
}
}
template <PointerDirection direction>
void RememberedSet<direction>::ClearInvalidTypedSlots(Heap* heap,
MemoryChunk* chunk) {
STATIC_ASSERT(direction == OLD_TO_NEW);
DCHECK(chunk->owner()->identity() == CODE_SPACE);
TypedSlotSet* slots = GetTypedSlotSet(chunk);
if (slots != nullptr) {
slots->Iterate(
[heap, chunk](SlotType type, Address host_addr, Address addr) {
if (Marking::IsBlack(ObjectMarking::MarkBitFrom(host_addr))) {
return KEEP_SLOT;
} else {
return REMOVE_SLOT;
}
},
TypedSlotSet::KEEP_EMPTY_CHUNKS);
}
void RememberedSet<direction>::VerifyValidSlots(Heap* heap) {
Iterate(heap, [heap](Address addr) {
HeapObject* obj =
heap->mark_compact_collector()->FindBlackObjectBySlotSlow(addr);
if (obj == nullptr) {
// The slot is in dead object.
MemoryChunk* chunk = MemoryChunk::FromAnyPointerAddress(heap, addr);
AllocationSpace owner = chunk->owner()->identity();
// The old to old remembered set should not have dead slots.
CHECK_NE(direction, OLD_TO_OLD);
// The old to new remembered set is allowed to have slots in dead
// objects only in map and large object space because these space
// cannot have raw untagged pointers.
CHECK(owner == MAP_SPACE || owner == LO_SPACE);
} else {
int offset = static_cast<int>(addr - obj->address());
CHECK(obj->IsValidSlot(offset));
}
return KEEP_SLOT;
});
}
template <PointerDirection direction>
bool RememberedSet<direction>::IsValidSlot(Heap* heap, MemoryChunk* chunk,
Object** slot) {
STATIC_ASSERT(direction == OLD_TO_NEW);
Object* object = *slot;
if (!heap->InNewSpace(object)) {
return false;
}
HeapObject* heap_object = HeapObject::cast(object);
// If the target object is not black, the source slot must be part
// of a non-black (dead) object.
return heap->mark_compact_collector()->IsSlotInBlackObject(
chunk, reinterpret_cast<Address>(slot));
return Marking::IsBlack(ObjectMarking::MarkBitFrom(heap_object)) &&
heap->mark_compact_collector()->IsSlotInBlackObject(
chunk, reinterpret_cast<Address>(slot));
}
template void RememberedSet<OLD_TO_NEW>::ClearInvalidSlots(Heap* heap,
MemoryChunk* chunk);
template void RememberedSet<OLD_TO_NEW>::ClearInvalidTypedSlots(
Heap* heap, MemoryChunk* chunk);
template void RememberedSet<OLD_TO_NEW>::ClearInvalidSlots(Heap* heap);
template void RememberedSet<OLD_TO_NEW>::VerifyValidSlots(Heap* heap);
template void RememberedSet<OLD_TO_OLD>::VerifyValidSlots(Heap* heap);
} // namespace internal
} // namespace v8
......@@ -203,9 +203,6 @@ class RememberedSet {
// must be called before sweeping when mark bits are still intact.
static void ClearInvalidSlots(Heap* heap);
static void ClearInvalidSlots(Heap* heap, MemoryChunk* chunk);
static void ClearInvalidTypedSlots(Heap* heap, MemoryChunk* chunk);
static void VerifyValidSlots(Heap* heap);
private:
......
......@@ -160,7 +160,7 @@ class SlotSet : public Malloced {
if (current_bucket[i].Value()) {
uint32_t cell = current_bucket[i].Value();
uint32_t old_cell = cell;
uint32_t mask = 0;
uint32_t new_cell = cell;
while (cell) {
int bit_offset = base::bits::CountTrailingZeros32(cell);
uint32_t bit_mask = 1u << bit_offset;
......@@ -168,11 +168,10 @@ class SlotSet : public Malloced {
if (callback(page_start_ + slot) == KEEP_SLOT) {
++in_bucket_count;
} else {
mask |= bit_mask;
new_cell ^= bit_mask;
}
cell ^= bit_mask;
}
uint32_t new_cell = old_cell & ~mask;
if (old_cell != new_cell) {
while (!current_bucket[i].TrySetValue(old_cell, new_cell)) {
// If TrySetValue fails, the cell must have changed. We just
......@@ -181,7 +180,7 @@ class SlotSet : public Malloced {
// method will only be called on the main thread and filtering
// threads will only remove slots.
old_cell = current_bucket[i].Value();
new_cell = old_cell & ~mask;
new_cell &= old_cell;
}
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment