Commit cdc3459a authored by machenbach's avatar machenbach Committed by Commit bot

Revert of [heap] Move slot filtering logic into sweeper. (patchset #4 id:60001...

Revert of [heap] Move slot filtering logic into sweeper. (patchset #4 id:60001 of https://codereview.chromium.org/2418773002/ )

Reason for revert:
[Sheriff] Speculative revert for heap corruption on all platforms, e.g.:
https://build.chromium.org/p/client.v8/builders/V8%20Linux64%20-%20debug/builds/12377
https://build.chromium.org/p/client.v8/builders/V8%20Linux64%20-%20debug/builds/12379
https://build.chromium.org/p/client.v8/builders/V8%20Win32/builds/4819
https://build.chromium.org/p/client.v8/builders/V8%20Win32%20-%20nosnap%20-%20shared/builds/16783
https://build.chromium.org/p/client.v8/builders/V8%20Mac64%20-%20debug/builds/10007

Original issue's description:
> [heap] Move slot filtering logic into sweeper.
>
> BUG=chromium:648568
>
> Committed: https://crrev.com/18db69c38c93450c1ae957999fc48c465f111f00
> Cr-Commit-Position: refs/heads/master@{#40267}

TBR=ulan@chromium.org,mlippautz@chromium.org,hpayer@chromium.org
# Skipping CQ checks because original CL landed less than 1 days ago.
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=chromium:648568

Review-Url: https://codereview.chromium.org/2418053002
Cr-Commit-Position: refs/heads/master@{#40292}
parent 73a40d46
...@@ -3320,10 +3320,9 @@ int MarkCompactCollector::Sweeper::RawSweep( ...@@ -3320,10 +3320,9 @@ int MarkCompactCollector::Sweeper::RawSweep(
Page* p, FreeListRebuildingMode free_list_mode, Page* p, FreeListRebuildingMode free_list_mode,
FreeSpaceTreatmentMode free_space_mode) { FreeSpaceTreatmentMode free_space_mode) {
Space* space = p->owner(); Space* space = p->owner();
AllocationSpace identity = space->identity();
DCHECK_NOT_NULL(space); DCHECK_NOT_NULL(space);
DCHECK(free_list_mode == IGNORE_FREE_LIST || identity == OLD_SPACE || DCHECK(free_list_mode == IGNORE_FREE_LIST || space->identity() == OLD_SPACE ||
identity == CODE_SPACE || identity == MAP_SPACE); space->identity() == CODE_SPACE || space->identity() == MAP_SPACE);
DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone()); DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
// Before we sweep objects on the page, we free dead array buffers which // Before we sweep objects on the page, we free dead array buffers which
...@@ -3352,8 +3351,6 @@ int MarkCompactCollector::Sweeper::RawSweep( ...@@ -3352,8 +3351,6 @@ int MarkCompactCollector::Sweeper::RawSweep(
LiveObjectIterator<kBlackObjects> it(p); LiveObjectIterator<kBlackObjects> it(p);
HeapObject* object = NULL; HeapObject* object = NULL;
bool clear_slots =
p->old_to_new_slots() && (identity == OLD_SPACE || identity == MAP_SPACE);
while ((object = it.Next()) != NULL) { while ((object = it.Next()) != NULL) {
DCHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(object))); DCHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(object)));
Address free_end = object->address(); Address free_end = object->address();
...@@ -3371,11 +3368,6 @@ int MarkCompactCollector::Sweeper::RawSweep( ...@@ -3371,11 +3368,6 @@ int MarkCompactCollector::Sweeper::RawSweep(
p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size), p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
ClearRecordedSlots::kNo); ClearRecordedSlots::kNo);
} }
if (clear_slots) {
RememberedSet<OLD_TO_NEW>::RemoveRange(p, free_start, free_end,
SlotSet::KEEP_EMPTY_BUCKETS);
}
} }
Map* map = object->synchronized_map(); Map* map = object->synchronized_map();
int size = object->SizeFromMap(map); int size = object->SizeFromMap(map);
...@@ -3391,6 +3383,9 @@ int MarkCompactCollector::Sweeper::RawSweep( ...@@ -3391,6 +3383,9 @@ int MarkCompactCollector::Sweeper::RawSweep(
free_start = free_end + size; free_start = free_end + size;
} }
// Clear the mark bits of that page and reset live bytes count.
p->ClearLiveness();
if (free_start != p->area_end()) { if (free_start != p->area_end()) {
CHECK_GT(p->area_end(), free_start); CHECK_GT(p->area_end(), free_start);
size_t size = static_cast<size_t>(p->area_end() - free_start); size_t size = static_cast<size_t>(p->area_end() - free_start);
...@@ -3405,16 +3400,7 @@ int MarkCompactCollector::Sweeper::RawSweep( ...@@ -3405,16 +3400,7 @@ int MarkCompactCollector::Sweeper::RawSweep(
p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size), p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
ClearRecordedSlots::kNo); ClearRecordedSlots::kNo);
} }
if (clear_slots) {
RememberedSet<OLD_TO_NEW>::RemoveRange(p, free_start, p->area_end(),
SlotSet::KEEP_EMPTY_BUCKETS);
}
} }
// Clear the mark bits of that page and reset live bytes count.
p->ClearLiveness();
p->concurrent_sweeping_state().SetValue(Page::kSweepingDone); p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
if (free_list_mode == IGNORE_FREE_LIST) return 0; if (free_list_mode == IGNORE_FREE_LIST) return 0;
return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes)); return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
...@@ -3830,7 +3816,9 @@ int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page, ...@@ -3830,7 +3816,9 @@ int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page,
if (identity == NEW_SPACE) { if (identity == NEW_SPACE) {
RawSweep(page, IGNORE_FREE_LIST, free_space_mode); RawSweep(page, IGNORE_FREE_LIST, free_space_mode);
} else { } else {
if (identity == CODE_SPACE) { if (identity == OLD_SPACE || identity == MAP_SPACE) {
RememberedSet<OLD_TO_NEW>::ClearInvalidSlots(heap_, page);
} else {
RememberedSet<OLD_TO_NEW>::ClearInvalidTypedSlots(heap_, page); RememberedSet<OLD_TO_NEW>::ClearInvalidTypedSlots(heap_, page);
} }
max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode); max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
......
...@@ -14,6 +14,23 @@ ...@@ -14,6 +14,23 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
template <PointerDirection direction>
void RememberedSet<direction>::ClearInvalidSlots(Heap* heap,
MemoryChunk* chunk) {
STATIC_ASSERT(direction == OLD_TO_NEW);
DCHECK(chunk->owner()->identity() == OLD_SPACE ||
chunk->owner()->identity() == MAP_SPACE);
SlotSet* slots = GetSlotSet(chunk);
if (slots != nullptr) {
slots->Iterate(
[heap, chunk](Address addr) {
Object** slot = reinterpret_cast<Object**>(addr);
return IsValidSlot(heap, chunk, slot) ? KEEP_SLOT : REMOVE_SLOT;
},
SlotSet::KEEP_EMPTY_BUCKETS);
}
}
template <PointerDirection direction> template <PointerDirection direction>
void RememberedSet<direction>::ClearInvalidTypedSlots(Heap* heap, void RememberedSet<direction>::ClearInvalidTypedSlots(Heap* heap,
MemoryChunk* chunk) { MemoryChunk* chunk) {
...@@ -43,6 +60,8 @@ bool RememberedSet<direction>::IsValidSlot(Heap* heap, MemoryChunk* chunk, ...@@ -43,6 +60,8 @@ bool RememberedSet<direction>::IsValidSlot(Heap* heap, MemoryChunk* chunk,
chunk, reinterpret_cast<Address>(slot)); chunk, reinterpret_cast<Address>(slot));
} }
template void RememberedSet<OLD_TO_NEW>::ClearInvalidSlots(Heap* heap,
MemoryChunk* chunk);
template void RememberedSet<OLD_TO_NEW>::ClearInvalidTypedSlots( template void RememberedSet<OLD_TO_NEW>::ClearInvalidTypedSlots(
Heap* heap, MemoryChunk* chunk); Heap* heap, MemoryChunk* chunk);
......
...@@ -202,8 +202,13 @@ class RememberedSet { ...@@ -202,8 +202,13 @@ class RememberedSet {
// slots that are not part of live objects anymore. This method must be // slots that are not part of live objects anymore. This method must be
// called after marking, when the whole transitive closure is known and // called after marking, when the whole transitive closure is known and
// must be called before sweeping when mark bits are still intact. // must be called before sweeping when mark bits are still intact.
static void ClearInvalidSlots(Heap* heap);
static void ClearInvalidSlots(Heap* heap, MemoryChunk* chunk);
static void ClearInvalidTypedSlots(Heap* heap, MemoryChunk* chunk); static void ClearInvalidTypedSlots(Heap* heap, MemoryChunk* chunk);
static void VerifyValidSlots(Heap* heap);
private: private:
static SlotSet* GetSlotSet(MemoryChunk* chunk) { static SlotSet* GetSlotSet(MemoryChunk* chunk) {
if (direction == OLD_TO_OLD) { if (direction == OLD_TO_OLD) {
......
...@@ -80,6 +80,15 @@ class SlotSet : public Malloced { ...@@ -80,6 +80,15 @@ class SlotSet : public Malloced {
} }
} }
void PreFreeEmptyBucket(int bucket_index) {
base::AtomicValue<uint32_t>* bucket_ptr = bucket[bucket_index].Value();
if (bucket_ptr != nullptr) {
base::LockGuard<base::Mutex> guard(&to_be_freed_buckets_mutex_);
to_be_freed_buckets_.push(bucket_ptr);
bucket[bucket_index].SetValue(nullptr);
}
}
// The slot offsets specify a range of slots at addresses: // The slot offsets specify a range of slots at addresses:
// [page_start_ + start_offset ... page_start_ + end_offset). // [page_start_ + start_offset ... page_start_ + end_offset).
void RemoveRange(int start_offset, int end_offset, EmptyBucketMode mode) { void RemoveRange(int start_offset, int end_offset, EmptyBucketMode mode) {
...@@ -99,10 +108,12 @@ class SlotSet : public Malloced { ...@@ -99,10 +108,12 @@ class SlotSet : public Malloced {
int current_cell = start_cell; int current_cell = start_cell;
ClearCell(current_bucket, current_cell, ~start_mask); ClearCell(current_bucket, current_cell, ~start_mask);
current_cell++; current_cell++;
base::AtomicValue<uint32_t>* bucket_ptr = bucket[current_bucket].Value();
if (current_bucket < end_bucket) { if (current_bucket < end_bucket) {
if (bucket_ptr != nullptr) { if (bucket[current_bucket].Value() != nullptr) {
ClearBucket(bucket_ptr, current_cell, kCellsPerBucket); while (current_cell < kCellsPerBucket) {
bucket[current_bucket].Value()[current_cell].SetValue(0);
current_cell++;
}
} }
// The rest of the current bucket is cleared. // The rest of the current bucket is cleared.
// Move on to the next bucket. // Move on to the next bucket.
...@@ -116,23 +127,17 @@ class SlotSet : public Malloced { ...@@ -116,23 +127,17 @@ class SlotSet : public Malloced {
PreFreeEmptyBucket(current_bucket); PreFreeEmptyBucket(current_bucket);
} else if (mode == FREE_EMPTY_BUCKETS) { } else if (mode == FREE_EMPTY_BUCKETS) {
ReleaseBucket(current_bucket); ReleaseBucket(current_bucket);
} else {
DCHECK(mode == KEEP_EMPTY_BUCKETS);
bucket_ptr = bucket[current_bucket].Value();
if (bucket_ptr) {
ClearBucket(bucket_ptr, 0, kCellsPerBucket);
}
} }
current_bucket++; current_bucket++;
} }
// All buckets between start_bucket and end_bucket are cleared. // All buckets between start_bucket and end_bucket are cleared.
bucket_ptr = bucket[current_bucket].Value();
DCHECK(current_bucket == end_bucket && current_cell <= end_cell); DCHECK(current_bucket == end_bucket && current_cell <= end_cell);
if (current_bucket == kBuckets || bucket_ptr == nullptr) { if (current_bucket == kBuckets ||
bucket[current_bucket].Value() == nullptr) {
return; return;
} }
while (current_cell < end_cell) { while (current_cell < end_cell) {
bucket_ptr[current_cell].SetValue(0); bucket[current_bucket].Value()[current_cell].SetValue(0);
current_cell++; current_cell++;
} }
// All cells between start_cell and end_cell are cleared. // All cells between start_cell and end_cell are cleared.
...@@ -237,26 +242,6 @@ class SlotSet : public Malloced { ...@@ -237,26 +242,6 @@ class SlotSet : public Malloced {
return result; return result;
} }
void ClearBucket(base::AtomicValue<uint32_t>* bucket, int start_cell,
int end_cell) {
DCHECK_GE(start_cell, 0);
DCHECK_LE(end_cell, kCellsPerBucket);
int current_cell = start_cell;
while (current_cell < kCellsPerBucket) {
bucket[current_cell].SetValue(0);
current_cell++;
}
}
void PreFreeEmptyBucket(int bucket_index) {
base::AtomicValue<uint32_t>* bucket_ptr = bucket[bucket_index].Value();
if (bucket_ptr != nullptr) {
base::LockGuard<base::Mutex> guard(&to_be_freed_buckets_mutex_);
to_be_freed_buckets_.push(bucket_ptr);
bucket[bucket_index].SetValue(nullptr);
}
}
void ReleaseBucket(int bucket_index) { void ReleaseBucket(int bucket_index) {
DeleteArray<base::AtomicValue<uint32_t>>(bucket[bucket_index].Value()); DeleteArray<base::AtomicValue<uint32_t>>(bucket[bucket_index].Value());
bucket[bucket_index].SetValue(nullptr); bucket[bucket_index].SetValue(nullptr);
......
...@@ -101,12 +101,10 @@ void CheckRemoveRangeOn(uint32_t start, uint32_t end) { ...@@ -101,12 +101,10 @@ void CheckRemoveRangeOn(uint32_t start, uint32_t end) {
set.SetPageStart(0); set.SetPageStart(0);
uint32_t first = start == 0 ? 0 : start - kPointerSize; uint32_t first = start == 0 ? 0 : start - kPointerSize;
uint32_t last = end == Page::kPageSize ? end - kPointerSize : end; uint32_t last = end == Page::kPageSize ? end - kPointerSize : end;
for (const auto mode :
{SlotSet::FREE_EMPTY_BUCKETS, SlotSet::KEEP_EMPTY_BUCKETS}) {
for (uint32_t i = first; i <= last; i += kPointerSize) { for (uint32_t i = first; i <= last; i += kPointerSize) {
set.Insert(i); set.Insert(i);
} }
set.RemoveRange(start, end, mode); set.RemoveRange(start, end, SlotSet::FREE_EMPTY_BUCKETS);
if (first != start) { if (first != start) {
EXPECT_TRUE(set.Lookup(first)); EXPECT_TRUE(set.Lookup(first));
} }
...@@ -116,7 +114,6 @@ void CheckRemoveRangeOn(uint32_t start, uint32_t end) { ...@@ -116,7 +114,6 @@ void CheckRemoveRangeOn(uint32_t start, uint32_t end) {
for (uint32_t i = start; i < end; i += kPointerSize) { for (uint32_t i = start; i < end; i += kPointerSize) {
EXPECT_FALSE(set.Lookup(i)); EXPECT_FALSE(set.Lookup(i));
} }
}
} }
TEST(SlotSet, RemoveRange) { TEST(SlotSet, RemoveRange) {
...@@ -137,14 +134,11 @@ TEST(SlotSet, RemoveRange) { ...@@ -137,14 +134,11 @@ TEST(SlotSet, RemoveRange) {
} }
SlotSet set; SlotSet set;
set.SetPageStart(0); set.SetPageStart(0);
for (const auto mode :
{SlotSet::FREE_EMPTY_BUCKETS, SlotSet::KEEP_EMPTY_BUCKETS}) {
set.Insert(Page::kPageSize / 2); set.Insert(Page::kPageSize / 2);
set.RemoveRange(0, Page::kPageSize, mode); set.RemoveRange(0, Page::kPageSize, SlotSet::FREE_EMPTY_BUCKETS);
for (uint32_t i = 0; i < Page::kPageSize; i += kPointerSize) { for (uint32_t i = 0; i < Page::kPageSize; i += kPointerSize) {
EXPECT_FALSE(set.Lookup(i)); EXPECT_FALSE(set.Lookup(i));
} }
}
} }
TEST(TypedSlotSet, Iterate) { TEST(TypedSlotSet, Iterate) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment