Commit 719d23c0 authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

Fix invalidation of old-to-old slots after object trimming.

A recorded old-to-old slot may be overwritten with a pointer to a new
space object. If the object containing the slot is trimmed later on,
then the mark-compactor may crash on a stale pointer to new space.

This patch ensures that:
1) On trimming of an object we add it to the invalidated_slots sets.
2) The InvalidatedSlotsFilter::IsValid returns false for slots outside
   the invalidated object unless the page was already swept.

Array left-trimming is handled as a special case because object start
moves and cannot be added to the invalidated set. Instead, we clear
the freed memory so that the recorded slots contain Smi values.

Bug: chromium:870226,chromium:816426
Change-Id: Iffc05a58fcf52ece45fdb085b5d1fd4b3acb5d53
Reviewed-on: https://chromium-review.googlesource.com/1163784
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarHannes Payer <hpayer@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#54953}
parent bbe8db59
......@@ -2882,6 +2882,17 @@ class LeftTrimmerVerifierRootVisitor : public RootVisitor {
} // namespace
#endif // ENABLE_SLOW_DCHECKS
namespace {
bool MayContainRecordedSlots(HeapObject* object) {
// New space object do not have recorded slots.
if (MemoryChunk::FromHeapObject(object)->InNewSpace()) return false;
// Whitelist objects that definitely do not have pointers.
if (object->IsByteArray() || object->IsFixedDoubleArray()) return false;
// Conservatively return true for other objects.
return true;
}
} // namespace
FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
int elements_to_trim) {
CHECK_NOT_NULL(object);
......@@ -2918,7 +2929,17 @@ FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
// Technically in new space this write might be omitted (except for
// debug mode which iterates through the heap), but to play safer
// we still do it.
CreateFillerObjectAt(old_start, bytes_to_trim, ClearRecordedSlots::kYes);
// For arrays that can have recorded slots we clear the freed memory to
// avoid invalid old-to-old remembered set slots. Note that we cannot
// register invalidated slot because the start of the object can move if the
// array is left-trimmed later on.
DCHECK(HAS_SMI_TAG(static_cast<intptr_t>(kClearedFreeMemoryValue)));
ClearFreedMemoryMode clear_memory_mode =
MayContainRecordedSlots(object)
? ClearFreedMemoryMode::kClearFreedMemory
: ClearFreedMemoryMode::kDontClearFreedMemory;
CreateFillerObjectAt(old_start, bytes_to_trim, ClearRecordedSlots::kYes,
clear_memory_mode);
// Initialize header of the trimmed array. Since left trimming is only
// performed on pages which are not concurrently swept creating a filler
......@@ -2999,9 +3020,20 @@ void Heap::CreateFillerForArray(T* object, int elements_to_trim,
}
// Calculate location of new array end.
Address old_end = object->address() + object->Size();
int old_size = object->Size();
Address old_end = object->address() + old_size;
Address new_end = old_end - bytes_to_trim;
// For arrays that can have recorded slots we clear the freed memory to
// avoid invalid old-to-old remembered set slots. Note that we cannot
// register invalidated slot because the start of the object can move if the
// array is left-trimmed later on.
ClearFreedMemoryMode clear_memory_mode =
MayContainRecordedSlots(object)
? ClearFreedMemoryMode::kClearFreedMemory
: ClearFreedMemoryMode::kDontClearFreedMemory;
DCHECK(HAS_SMI_TAG(static_cast<intptr_t>(kClearedFreeMemoryValue)));
// Technically in new space this write might be omitted (except for
// debug mode which iterates through the heap), but to play safer
// we still do it.
......@@ -3009,8 +3041,8 @@ void Heap::CreateFillerForArray(T* object, int elements_to_trim,
// TODO(hpayer): We should shrink the large object page if the size
// of the object changed significantly.
if (!lo_space()->Contains(object)) {
HeapObject* filler =
CreateFillerObjectAt(new_end, bytes_to_trim, ClearRecordedSlots::kYes);
HeapObject* filler = CreateFillerObjectAt(
new_end, bytes_to_trim, ClearRecordedSlots::kYes, clear_memory_mode);
DCHECK_NOT_NULL(filler);
// Clear the mark bits of the black area that belongs now to the filler.
// This is an optimization. The sweeper will release black fillers anyway.
......@@ -3021,6 +3053,11 @@ void Heap::CreateFillerForArray(T* object, int elements_to_trim,
page->AddressToMarkbitIndex(new_end),
page->AddressToMarkbitIndex(new_end + bytes_to_trim));
}
} else if (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory) {
// Even though we do not create filler for large object space arrays, we
// have to clear the free memory if old-to-old slots may have been recorded.
memset(reinterpret_cast<void*>(new_end), kClearedFreeMemoryValue,
bytes_to_trim);
}
// Initialize header of the trimmed array. We are storing the new length
......@@ -3290,20 +3327,12 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation(
void Heap::NotifyObjectLayoutChange(HeapObject* object, int size,
const DisallowHeapAllocation&) {
// In large object space, we only allow changing from non-pointers to
// pointers, so that there are no slots in the invalidated slots buffer.
// TODO(ulan) Make the IsString assertion stricter, we only want to allow
// strings that cannot have slots in them (seq-strings and such).
DCHECK(InOldSpace(object) || InNewSpace(object) ||
(lo_space()->Contains(object) &&
(object->IsString() || object->IsByteArray())));
if (FLAG_incremental_marking && incremental_marking()->IsMarking()) {
if (incremental_marking()->IsMarking()) {
incremental_marking()->MarkBlackAndPush(object);
if (InOldSpace(object) && incremental_marking()->IsCompacting()) {
// The concurrent marker might have recorded slots for the object.
// Register this object as invalidated to filter out the slots.
MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
chunk->RegisterObjectWithInvalidatedSlots(object, size);
if (incremental_marking()->IsCompacting() &&
MayContainRecordedSlots(object)) {
MemoryChunk::FromHeapObject(object)->RegisterObjectWithInvalidatedSlots(
object, size);
}
}
#ifdef VERIFY_HEAP
......
......@@ -56,10 +56,7 @@ bool InvalidatedSlotsFilter::IsValid(Address slot) {
static_cast<int>(invalidated_end_ - invalidated_start_));
if (offset >= invalidated_object_size_) {
// A new object could have been allocated during evacuation in the free
// space outside the object. Since objects are not invalidated in GC pause
// we can return true here.
return true;
return slots_in_free_space_are_valid_;
}
return invalidated_object_->IsValidSlot(invalidated_object_->map(), offset);
}
......
......@@ -9,8 +9,12 @@ namespace v8 {
namespace internal {
InvalidatedSlotsFilter::InvalidatedSlotsFilter(MemoryChunk* chunk) {
DCHECK_IMPLIES(chunk->invalidated_slots() != nullptr,
chunk->owner()->identity() == OLD_SPACE);
// Adjust slots_in_free_space_are_valid_ if more spaces are added.
DCHECK_IMPLIES(chunk->invalidated_slots() != nullptr, chunk->InOldSpace());
// The sweeper removes invalid slots and makes free space available for
// allocation. Slots for new objects can be recorded in new space.
slots_in_free_space_are_valid_ = chunk->SweepingDone();
InvalidatedSlots* invalidated_slots =
chunk->invalidated_slots() ? chunk->invalidated_slots() : &empty_;
iterator_ = invalidated_slots->begin();
......
......@@ -42,6 +42,7 @@ class InvalidatedSlotsFilter {
Address invalidated_end_;
HeapObject* invalidated_object_;
int invalidated_object_size_;
bool slots_in_free_space_are_valid_;
InvalidatedSlots empty_;
#ifdef DEBUG
Address last_slot_;
......
......@@ -2195,6 +2195,8 @@ static inline SlotCallbackResult UpdateSlot(
}
DCHECK(!Heap::InFromSpace(target));
DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(target));
} else {
DCHECK(heap_obj->map()->IsMap());
}
// OLD_TO_OLD slots are always removed after updating.
return REMOVE_SLOT;
......
......@@ -768,6 +768,14 @@ bool MemoryChunk::IsPagedSpace() const {
return owner()->identity() != LO_SPACE;
}
bool MemoryChunk::InOldSpace() const {
return owner()->identity() == OLD_SPACE;
}
bool MemoryChunk::InLargeObjectSpace() const {
return owner()->identity() == LO_SPACE;
}
MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
size_t commit_area_size,
Executability executable,
......
......@@ -626,6 +626,10 @@ class MemoryChunk {
bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); }
bool InOldSpace() const;
bool InLargeObjectSpace() const;
Space* owner() const { return owner_; }
void set_owner(Space* space) { owner_ = space; }
......
......@@ -21,6 +21,10 @@
V(InvalidatedSlotsEvacuationCandidate) \
V(InvalidatedSlotsNoInvalidatedRanges) \
V(InvalidatedSlotsResetObjectRegression) \
V(InvalidatedSlotsRightTrimFixedArray) \
V(InvalidatedSlotsRightTrimLargeFixedArray) \
V(InvalidatedSlotsLeftTrimFixedArray) \
V(InvalidatedSlotsFastToSlow) \
V(InvalidatedSlotsSomeInvalidatedRanges) \
V(TestNewSpaceRefsInCopiedCode) \
V(GCFlags) \
......
......@@ -109,6 +109,7 @@ HEAP_TEST(InvalidatedSlotsAllInvalidatedRanges) {
}
HEAP_TEST(InvalidatedSlotsAfterTrimming) {
ManualGCScope manual_gc_scope;
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
std::vector<ByteArray*> byte_arrays;
......@@ -119,9 +120,7 @@ HEAP_TEST(InvalidatedSlotsAfterTrimming) {
byte_arrays[i]->Size());
}
// Trim byte arrays and check that the slots outside the byte arrays are
// considered valid. Free space outside invalidated object can be reused
// during evacuation for allocation of the evacuated objects. That can
// add new valid slots to evacuation candidates.
// considered invalid if the old space page was swept.
InvalidatedSlotsFilter filter(page);
for (size_t i = 0; i < byte_arrays.size(); i++) {
ByteArray* byte_array = byte_arrays[i];
......@@ -129,7 +128,7 @@ HEAP_TEST(InvalidatedSlotsAfterTrimming) {
Address end = byte_array->address() + byte_array->Size();
heap->RightTrimFixedArray(byte_array, byte_array->length());
for (Address addr = start; addr < end; addr += kPointerSize) {
CHECK(filter.IsValid(addr));
CHECK_EQ(filter.IsValid(addr), page->SweepingDone());
}
}
}
......@@ -184,6 +183,185 @@ HEAP_TEST(InvalidatedSlotsResetObjectRegression) {
}
}
Handle<FixedArray> AllocateArrayOnFreshPage(Isolate* isolate,
PagedSpace* old_space, int length) {
AlwaysAllocateScope always_allocate(isolate);
heap::SimulateFullSpace(old_space);
return isolate->factory()->NewFixedArray(length, TENURED);
}
Handle<FixedArray> AllocateArrayOnEvacuationCandidate(Isolate* isolate,
PagedSpace* old_space,
int length) {
Handle<FixedArray> object =
AllocateArrayOnFreshPage(isolate, old_space, length);
heap::ForceEvacuationCandidate(Page::FromHeapObject(*object));
return object;
}
HEAP_TEST(InvalidatedSlotsRightTrimFixedArray) {
FLAG_manual_evacuation_candidates_selection = true;
FLAG_parallel_compaction = false;
ManualGCScope manual_gc_scope;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
Heap* heap = CcTest::heap();
HandleScope scope(isolate);
PagedSpace* old_space = heap->old_space();
// Allocate a dummy page to be swept be the sweeper during evacuation.
AllocateArrayOnFreshPage(isolate, old_space, 1);
Handle<FixedArray> evacuated =
AllocateArrayOnEvacuationCandidate(isolate, old_space, 1);
Handle<FixedArray> trimmed = AllocateArrayOnFreshPage(isolate, old_space, 10);
heap::SimulateIncrementalMarking(heap);
for (int i = 1; i < trimmed->length(); i++) {
trimmed->set(i, *evacuated);
}
{
HandleScope scope(isolate);
Handle<HeapObject> dead = factory->NewFixedArray(1);
for (int i = 1; i < trimmed->length(); i++) {
trimmed->set(i, *dead);
}
heap->RightTrimFixedArray(*trimmed, trimmed->length() - 1);
}
CcTest::CollectGarbage(i::NEW_SPACE);
CcTest::CollectGarbage(i::OLD_SPACE);
}
HEAP_TEST(InvalidatedSlotsRightTrimLargeFixedArray) {
FLAG_manual_evacuation_candidates_selection = true;
FLAG_parallel_compaction = false;
ManualGCScope manual_gc_scope;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
Heap* heap = CcTest::heap();
HandleScope scope(isolate);
PagedSpace* old_space = heap->old_space();
// Allocate a dummy page to be swept be the sweeper during evacuation.
AllocateArrayOnFreshPage(isolate, old_space, 1);
Handle<FixedArray> evacuated =
AllocateArrayOnEvacuationCandidate(isolate, old_space, 1);
Handle<FixedArray> trimmed;
{
AlwaysAllocateScope always_allocate(isolate);
trimmed =
factory->NewFixedArray(kMaxRegularHeapObjectSize / kPointerSize + 100);
DCHECK(MemoryChunk::FromHeapObject(*trimmed)->InLargeObjectSpace());
}
heap::SimulateIncrementalMarking(heap);
for (int i = 1; i < trimmed->length(); i++) {
trimmed->set(i, *evacuated);
}
{
HandleScope scope(isolate);
Handle<HeapObject> dead = factory->NewFixedArray(1);
for (int i = 1; i < trimmed->length(); i++) {
trimmed->set(i, *dead);
}
heap->RightTrimFixedArray(*trimmed, trimmed->length() - 1);
}
CcTest::CollectGarbage(i::NEW_SPACE);
CcTest::CollectGarbage(i::OLD_SPACE);
}
HEAP_TEST(InvalidatedSlotsLeftTrimFixedArray) {
FLAG_manual_evacuation_candidates_selection = true;
FLAG_parallel_compaction = false;
ManualGCScope manual_gc_scope;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
Heap* heap = CcTest::heap();
HandleScope scope(isolate);
PagedSpace* old_space = heap->old_space();
// Allocate a dummy page to be swept be the sweeper during evacuation.
AllocateArrayOnFreshPage(isolate, old_space, 1);
Handle<FixedArray> evacuated =
AllocateArrayOnEvacuationCandidate(isolate, old_space, 1);
Handle<FixedArray> trimmed = AllocateArrayOnFreshPage(isolate, old_space, 10);
heap::SimulateIncrementalMarking(heap);
for (int i = 0; i + 1 < trimmed->length(); i++) {
trimmed->set(i, *evacuated);
}
{
HandleScope scope(isolate);
Handle<HeapObject> dead = factory->NewFixedArray(1);
for (int i = 1; i < trimmed->length(); i++) {
trimmed->set(i, *dead);
}
heap->LeftTrimFixedArray(*trimmed, trimmed->length() - 1);
}
CcTest::CollectGarbage(i::NEW_SPACE);
CcTest::CollectGarbage(i::OLD_SPACE);
}
HEAP_TEST(InvalidatedSlotsFastToSlow) {
FLAG_manual_evacuation_candidates_selection = true;
FLAG_parallel_compaction = false;
ManualGCScope manual_gc_scope;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
Heap* heap = CcTest::heap();
PagedSpace* old_space = heap->old_space();
HandleScope scope(isolate);
Handle<String> name = factory->InternalizeUtf8String("TestObject");
Handle<String> prop_name1 = factory->InternalizeUtf8String("prop1");
Handle<String> prop_name2 = factory->InternalizeUtf8String("prop2");
Handle<String> prop_name3 = factory->InternalizeUtf8String("prop3");
// Allocate a dummy page to be swept be the sweeper during evacuation.
AllocateArrayOnFreshPage(isolate, old_space, 1);
Handle<FixedArray> evacuated =
AllocateArrayOnEvacuationCandidate(isolate, old_space, 1);
// Allocate a dummy page to ensure that the JSObject is allocated on
// a fresh page.
AllocateArrayOnFreshPage(isolate, old_space, 1);
Handle<JSObject> obj;
{
AlwaysAllocateScope always_allocate(isolate);
Handle<JSFunction> function = factory->NewFunctionForTest(name);
function->shared()->set_expected_nof_properties(3);
obj = factory->NewJSObject(function, TENURED);
}
// Start incremental marking.
heap::SimulateIncrementalMarking(heap);
// Set properties to point to the evacuation candidate.
JSReceiver::SetProperty(isolate, obj, prop_name1, evacuated,
LanguageMode::kSloppy)
.Check();
JSReceiver::SetProperty(isolate, obj, prop_name2, evacuated,
LanguageMode::kSloppy)
.Check();
JSReceiver::SetProperty(isolate, obj, prop_name3, evacuated,
LanguageMode::kSloppy)
.Check();
{
HandleScope scope(isolate);
Handle<HeapObject> dead = factory->NewFixedArray(1);
JSReceiver::SetProperty(isolate, obj, prop_name1, dead,
LanguageMode::kSloppy)
.Check();
JSReceiver::SetProperty(isolate, obj, prop_name2, dead,
LanguageMode::kSloppy)
.Check();
JSReceiver::SetProperty(isolate, obj, prop_name3, dead,
LanguageMode::kSloppy)
.Check();
Handle<Map> map(obj->map(), isolate);
Handle<Map> normalized_map =
Map::Normalize(isolate, map, CLEAR_INOBJECT_PROPERTIES, "testing");
JSObject::MigrateToMap(obj, normalized_map);
}
CcTest::CollectGarbage(i::NEW_SPACE);
CcTest::CollectGarbage(i::OLD_SPACE);
}
} // namespace heap
} // namespace internal
} // namespace v8
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment