Commit 017d128b authored by ulan's avatar ulan Committed by Commit bot

Filter invalid slots after array trimming.

If sweeping is in progress then we need to filter out slots in free space after
array trimming, because the sweeper will add the free space into free list.

This CL also fixes a bug in SlotSet::RemoveRange.

BUG=chromium:587004
LOG=NO
TBR=hpayer@chromium.org

Review URL: https://codereview.chromium.org/1701963003

Cr-Commit-Position: refs/heads/master@{#34071}
parent 9eb49295
......@@ -3141,6 +3141,11 @@ FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
// Maintain consistency of live bytes during incremental marking
Marking::TransferMark(this, object->address(), new_start);
if (mark_compact_collector()->sweeping_in_progress()) {
// Array trimming during sweeping can add invalid slots in free list.
ClearRecordedSlotRange(object, former_start,
HeapObject::RawField(new_object, 0));
}
AdjustLiveBytes(new_object, -bytes_to_trim, Heap::CONCURRENT_TO_SWEEPER);
// Notify the heap profiler of change in object layout.
......@@ -3190,7 +3195,8 @@ void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
}
// Calculate location of new array end.
Address new_end = object->address() + object->Size() - bytes_to_trim;
Address old_end = object->address() + object->Size();
Address new_end = old_end - bytes_to_trim;
// Technically in new space this write might be omitted (except for
// debug mode which iterates through the heap), but to play safer
......@@ -3200,6 +3206,11 @@ void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
// of the object changed significantly.
if (!lo_space()->Contains(object)) {
CreateFillerObjectAt(new_end, bytes_to_trim);
if (mark_compact_collector()->sweeping_in_progress()) {
// Array trimming during sweeping can add invalid slots in free list.
ClearRecordedSlotRange(object, reinterpret_cast<Object**>(new_end),
reinterpret_cast<Object**>(old_end));
}
}
// Initialize header of the trimmed array. We are storing the new length
......@@ -5530,6 +5541,18 @@ void Heap::ClearRecordedSlot(HeapObject* object, Object** slot) {
}
}
void Heap::ClearRecordedSlotRange(HeapObject* object, Object** start,
Object** end) {
if (!InNewSpace(object)) {
store_buffer()->MoveEntriesToRememberedSet();
Address start_addr = reinterpret_cast<Address>(start);
Address end_addr = reinterpret_cast<Address>(end);
Page* page = Page::FromAddress(start_addr);
DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
RememberedSet<OLD_TO_NEW>::RemoveRange(page, start_addr, end_addr);
}
}
Space* AllSpaces::next() {
switch (counter_++) {
case NEW_SPACE:
......
......@@ -1059,6 +1059,7 @@ class Heap {
}
void ClearRecordedSlot(HeapObject* object, Object** slot);
void ClearRecordedSlotRange(HeapObject* object, Object** start, Object** end);
// ===========================================================================
// Incremental marking API. ==================================================
......
......@@ -41,6 +41,20 @@ class RememberedSet {
}
}
// Given a page and a range of slots in that page, this function removes the
// slots from the remembered set.
static void RemoveRange(Page* page, Address start, Address end) {
SlotSet* slot_set = GetSlotSet(page);
if (slot_set != nullptr) {
uintptr_t start_offset = start - page->address();
uintptr_t end_offset = end - page->address();
DCHECK_LT(start_offset, end_offset);
DCHECK_LE(end_offset, static_cast<uintptr_t>(Page::kPageSize));
slot_set->RemoveRange(static_cast<uint32_t>(start_offset),
static_cast<uint32_t>(end_offset));
}
}
// Iterates and filters the remembered set with the given callback.
// The callback should take (Address slot) and return SlotSet::CallbackResult.
template <typename Callback>
......
......@@ -74,28 +74,40 @@ class SlotSet : public Malloced {
MaskCell(start_bucket, start_cell, start_mask | end_mask);
return;
}
MaskCell(start_bucket, start_cell, start_mask);
start_cell++;
if (bucket[start_bucket] != nullptr && start_bucket < end_bucket) {
while (start_cell < kCellsPerBucket) {
bucket[start_bucket][start_cell] = 0;
start_cell++;
int current_bucket = start_bucket;
int current_cell = start_cell;
MaskCell(current_bucket, current_cell, start_mask);
current_cell++;
if (current_bucket < end_bucket) {
if (bucket[current_bucket] != nullptr) {
while (current_cell < kCellsPerBucket) {
bucket[current_bucket][current_cell] = 0;
current_cell++;
}
}
// The rest of the current bucket is cleared.
// Move on to the next bucket.
current_bucket++;
current_cell = 0;
}
while (start_bucket < end_bucket) {
delete[] bucket[start_bucket];
bucket[start_bucket] = nullptr;
start_bucket++;
DCHECK(current_bucket == end_bucket ||
(current_bucket < end_bucket && current_cell == 0));
while (current_bucket < end_bucket) {
ReleaseBucket(current_bucket);
current_bucket++;
}
if (start_bucket < kBuckets && bucket[start_bucket] != nullptr) {
while (start_cell < end_cell) {
bucket[start_bucket][start_cell] = 0;
start_cell++;
}
// All buckets between start_bucket and end_bucket are cleared.
DCHECK(current_bucket == end_bucket && current_cell <= end_cell);
if (current_bucket == kBuckets || bucket[current_bucket] == nullptr) {
return;
}
if (end_bucket < kBuckets) {
MaskCell(end_bucket, end_cell, end_mask);
while (current_cell < end_cell) {
bucket[current_bucket][current_cell] = 0;
current_cell++;
}
// All cells between start_cell and end_cell are cleared.
DCHECK(current_bucket == end_bucket && current_cell == end_cell);
MaskCell(end_bucket, end_cell, end_mask);
}
// The slot offset specifies a slot at address page_start_ + slot_offset.
......
......@@ -28,9 +28,9 @@
V(StressHandles) \
V(TestMemoryReducerSampleJsCalls) \
V(TestSizeOfObjects) \
V(Regress587004) \
V(WriteBarriersInCopyJSObject)
#define HEAP_TEST(Name) \
CcTest register_test_##Name(v8::internal::HeapTester::Test##Name, __FILE__, \
#Name, NULL, true, true); \
......
......@@ -6492,6 +6492,43 @@ HEAP_TEST(TestMemoryReducerSampleJsCalls) {
CheckDoubleEquals(2, calls_per_ms);
}
HEAP_TEST(Regress587004) {
FLAG_concurrent_sweeping = false;
#ifdef VERIFY_HEAP
FLAG_verify_heap = false;
#endif
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
Heap* heap = CcTest::heap();
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
const int N = (Page::kMaxRegularHeapObjectSize - FixedArray::kHeaderSize) /
kPointerSize;
Handle<FixedArray> array = factory->NewFixedArray(N, TENURED);
CHECK(heap->old_space()->Contains(*array));
Handle<Object> number = factory->NewHeapNumber(1.0);
CHECK(heap->InNewSpace(*number));
for (int i = 0; i < N; i++) {
array->set(i, *number);
}
heap->CollectGarbage(OLD_SPACE);
SimulateFullSpace(heap->old_space());
heap->RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(*array, N - 1);
heap->mark_compact_collector()->EnsureSweepingCompleted();
ByteArray* byte_array;
const int M = 256;
// Don't allow old space expansion. The test works without this flag too,
// but becomes very slow.
heap->set_force_oom(true);
while (heap->AllocateByteArray(M, TENURED).To(&byte_array)) {
for (int j = 0; j < M; j++) {
byte_array->set(j, 0x31);
}
}
// Re-enable old space expansion to avoid OOM crash.
heap->set_force_oom(false);
heap->CollectGarbage(NEW_SPACE);
}
} // namespace internal
} // namespace v8
......@@ -94,36 +94,49 @@ TEST(SlotSet, Remove) {
}
}
TEST(SlotSet, RemoveRange) {
void CheckRemoveRangeOn(uint32_t start, uint32_t end) {
SlotSet set;
set.SetPageStart(0);
for (int i = 0; i < Page::kPageSize; i += kPointerSize) {
uint32_t first = start == 0 ? 0 : start - kPointerSize;
uint32_t last = end == Page::kPageSize ? end - kPointerSize : end;
for (uint32_t i = first; i <= last; i += kPointerSize) {
set.Insert(i);
}
set.RemoveRange(0, Page::kPageSize);
for (int i = 0; i < Page::kPageSize; i += kPointerSize) {
EXPECT_FALSE(set.Lookup(i));
set.RemoveRange(start, end);
if (first != start) {
EXPECT_TRUE(set.Lookup(first));
}
for (int i = 0; i < Page::kPageSize; i += kPointerSize) {
set.Insert(i);
if (last == end) {
EXPECT_TRUE(set.Lookup(last));
}
for (uint32_t i = start; i < end; i += kPointerSize) {
EXPECT_FALSE(set.Lookup(i));
}
}
set.RemoveRange(10 * kPointerSize, 10 * kPointerSize);
EXPECT_TRUE(set.Lookup(9 * kPointerSize));
EXPECT_TRUE(set.Lookup(10 * kPointerSize));
EXPECT_TRUE(set.Lookup(11 * kPointerSize));
set.RemoveRange(10 * kPointerSize, 1000 * kPointerSize);
for (int i = 0; i < Page::kPageSize; i += kPointerSize) {
if (10 * kPointerSize <= i && i < 1000 * kPointerSize) {
EXPECT_FALSE(set.Lookup(i));
} else {
EXPECT_TRUE(set.Lookup(i));
TEST(SlotSet, RemoveRange) {
CheckRemoveRangeOn(0, Page::kPageSize);
CheckRemoveRangeOn(1 * kPointerSize, 1023 * kPointerSize);
for (uint32_t start = 0; start <= 32; start++) {
CheckRemoveRangeOn(start * kPointerSize, (start + 1) * kPointerSize);
CheckRemoveRangeOn(start * kPointerSize, (start + 2) * kPointerSize);
const uint32_t kEnds[] = {32, 64, 100, 128, 1024, 1500, 2048};
for (int i = 0; i < sizeof(kEnds) / sizeof(uint32_t); i++) {
for (int k = -3; k <= 3; k++) {
uint32_t end = (kEnds[i] + k);
if (start < end) {
CheckRemoveRangeOn(start * kPointerSize, end * kPointerSize);
}
}
}
}
SlotSet set;
set.SetPageStart(0);
set.Insert(Page::kPageSize / 2);
set.RemoveRange(0, Page::kPageSize);
for (uint32_t i = 0; i < Page::kPageSize; i += kPointerSize) {
EXPECT_FALSE(set.Lookup(i));
}
}
} // namespace internal
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment