Commit 51e6ecb9 authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

Reland "Fix invalidation of old-to-old slots after object trimming."

This reverts commit 5b434929.

Changes after the original CL:
- Right-trimming registers the array as an object with invalidated
  slots.
- Left-trimming moves the array start in the invalidated slots map.

Original change's description:
> Fix invalidation of old-to-old slots after object trimming.
>
> A recorded old-to-old slot may be overwritten with a pointer to a new
> space object. If the object containing the slot is trimmed later on,
> then the mark-compactor may crash on a stale pointer to new space.
>
> This patch ensures that:
> 1) On trimming of an object we add it to the invalidated_slots sets.
> 2) The InvalidatedSlotsFilter::IsValid returns false for slots outside
>    the invalidated object unless the page was already swept.
>
> Array left-trimming is handled as a special case because object start
> moves and cannot be added to the invalidated set. Instead, we clear
> the freed memory so that the recorded slots contain Smi values.
>
> Bug: chromium:870226,chromium:816426
> Change-Id: Iffc05a58fcf52ece45fdb085b5d1fd4b3acb5d53
> Reviewed-on: https://chromium-review.googlesource.com/1163784
> Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
> Reviewed-by: Hannes Payer <hpayer@chromium.org>
> Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#54953}

Change-Id: I1f1080f680196c581f62aef8d3a00a595f9bb9b0
Reviewed-on: https://chromium-review.googlesource.com/1165555
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarHannes Payer <hpayer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#55066}
parent 4b92f35b
...@@ -2881,8 +2881,23 @@ class LeftTrimmerVerifierRootVisitor : public RootVisitor { ...@@ -2881,8 +2881,23 @@ class LeftTrimmerVerifierRootVisitor : public RootVisitor {
} // namespace } // namespace
#endif // ENABLE_SLOW_DCHECKS #endif // ENABLE_SLOW_DCHECKS
namespace {
bool MayContainRecordedSlots(HeapObject* object) {
// New space object do not have recorded slots.
if (MemoryChunk::FromHeapObject(object)->InNewSpace()) return false;
// Whitelist objects that definitely do not have pointers.
if (object->IsByteArray() || object->IsFixedDoubleArray()) return false;
// Conservatively return true for other objects.
return true;
}
} // namespace
FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object, FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
int elements_to_trim) { int elements_to_trim) {
if (elements_to_trim == 0) {
// This simplifies reasoning in the rest of the function.
return object;
}
CHECK_NOT_NULL(object); CHECK_NOT_NULL(object);
DCHECK(CanMoveObjectStart(object)); DCHECK(CanMoveObjectStart(object));
// Add custom visitor to concurrent marker if new left-trimmable type // Add custom visitor to concurrent marker if new left-trimmable type
...@@ -2917,6 +2932,7 @@ FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object, ...@@ -2917,6 +2932,7 @@ FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
// Technically in new space this write might be omitted (except for // Technically in new space this write might be omitted (except for
// debug mode which iterates through the heap), but to play safer // debug mode which iterates through the heap), but to play safer
// we still do it. // we still do it.
HeapObject* filler =
CreateFillerObjectAt(old_start, bytes_to_trim, ClearRecordedSlots::kYes); CreateFillerObjectAt(old_start, bytes_to_trim, ClearRecordedSlots::kYes);
// Initialize header of the trimmed array. Since left trimming is only // Initialize header of the trimmed array. Since left trimming is only
...@@ -2934,6 +2950,23 @@ FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object, ...@@ -2934,6 +2950,23 @@ FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
ClearRecordedSlot(new_object, HeapObject::RawField( ClearRecordedSlot(new_object, HeapObject::RawField(
new_object, FixedArrayBase::kLengthOffset)); new_object, FixedArrayBase::kLengthOffset));
// Handle invalidated old-to-old slots.
if (incremental_marking()->IsCompacting() &&
MayContainRecordedSlots(new_object)) {
// If the array was right-trimmed before, then it is registered in
// the invalidated_slots.
MemoryChunk::FromHeapObject(new_object)
->MoveObjectWithInvalidatedSlots(filler, new_object);
// We have to clear slots in the free space to avoid stale old-to-old slots.
// Note we cannot use ClearFreedMemoryMode of CreateFillerObjectAt because
// we need pointer granularity writes to avoid race with the concurrent
// marking.
if (filler->Size() > FreeSpace::kSize) {
MemsetPointer(HeapObject::RawField(filler, FreeSpace::kSize),
ReadOnlyRoots(this).undefined_value(),
(filler->Size() - FreeSpace::kSize) / kPointerSize);
}
}
// Notify the heap profiler of change in object layout. // Notify the heap profiler of change in object layout.
OnMoveEvent(new_object, object, new_object->Size()); OnMoveEvent(new_object, object, new_object->Size());
...@@ -2998,9 +3031,23 @@ void Heap::CreateFillerForArray(T* object, int elements_to_trim, ...@@ -2998,9 +3031,23 @@ void Heap::CreateFillerForArray(T* object, int elements_to_trim,
} }
// Calculate location of new array end. // Calculate location of new array end.
Address old_end = object->address() + object->Size(); int old_size = object->Size();
Address old_end = object->address() + old_size;
Address new_end = old_end - bytes_to_trim; Address new_end = old_end - bytes_to_trim;
// Register the array as an object with invalidated old-to-old slots. We
// cannot use NotifyObjectLayoutChange as it would mark the array black,
// which is not safe for left-trimming because left-trimming re-pushes
// only grey arrays onto the marking worklist.
if (incremental_marking()->IsCompacting() &&
MayContainRecordedSlots(object)) {
// Ensure that the object survives because the InvalidatedSlotsFilter will
// compute its size from its map during pointers updating phase.
incremental_marking()->WhiteToGreyAndPush(object);
MemoryChunk::FromHeapObject(object)->RegisterObjectWithInvalidatedSlots(
object, old_size);
}
// Technically in new space this write might be omitted (except for // Technically in new space this write might be omitted (except for
// debug mode which iterates through the heap), but to play safer // debug mode which iterates through the heap), but to play safer
// we still do it. // we still do it.
...@@ -3289,20 +3336,12 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation( ...@@ -3289,20 +3336,12 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation(
void Heap::NotifyObjectLayoutChange(HeapObject* object, int size, void Heap::NotifyObjectLayoutChange(HeapObject* object, int size,
const DisallowHeapAllocation&) { const DisallowHeapAllocation&) {
// In large object space, we only allow changing from non-pointers to if (incremental_marking()->IsMarking()) {
// pointers, so that there are no slots in the invalidated slots buffer.
// TODO(ulan) Make the IsString assertion stricter, we only want to allow
// strings that cannot have slots in them (seq-strings and such).
DCHECK(InOldSpace(object) || InNewSpace(object) ||
(lo_space()->Contains(object) &&
(object->IsString() || object->IsByteArray())));
if (FLAG_incremental_marking && incremental_marking()->IsMarking()) {
incremental_marking()->MarkBlackAndPush(object); incremental_marking()->MarkBlackAndPush(object);
if (InOldSpace(object) && incremental_marking()->IsCompacting()) { if (incremental_marking()->IsCompacting() &&
// The concurrent marker might have recorded slots for the object. MayContainRecordedSlots(object)) {
// Register this object as invalidated to filter out the slots. MemoryChunk::FromHeapObject(object)->RegisterObjectWithInvalidatedSlots(
MemoryChunk* chunk = MemoryChunk::FromAddress(object->address()); object, size);
chunk->RegisterObjectWithInvalidatedSlots(object, size);
} }
} }
#ifdef VERIFY_HEAP #ifdef VERIFY_HEAP
......
...@@ -130,6 +130,9 @@ bool IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj) { ...@@ -130,6 +130,9 @@ bool IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj) {
} }
void IncrementalMarking::MarkBlackAndPush(HeapObject* obj) { void IncrementalMarking::MarkBlackAndPush(HeapObject* obj) {
// Marking left-trimmable fixed array black is unsafe because left-trimming
// re-pushes only grey arrays onto the marking worklist.
DCHECK(!obj->IsFixedArrayBase());
// Color the object black and push it into the bailout deque. // Color the object black and push it into the bailout deque.
marking_state()->WhiteToGrey(obj); marking_state()->WhiteToGrey(obj);
if (marking_state()->GreyToBlack(obj)) { if (marking_state()->GreyToBlack(obj)) {
...@@ -197,7 +200,10 @@ void IncrementalMarking::NotifyLeftTrimming(HeapObject* from, HeapObject* to) { ...@@ -197,7 +200,10 @@ void IncrementalMarking::NotifyLeftTrimming(HeapObject* from, HeapObject* to) {
DCHECK(success); DCHECK(success);
USE(success); USE(success);
} }
marking_worklist()->Push(to); // Subsequent left-trimming will re-push only grey arrays.
// Ensure that this array is grey.
DCHECK(Marking::IsGrey<kAtomicity>(new_mark_bit));
marking_worklist()->PushBailout(to);
RestartIfNotMarking(); RestartIfNotMarking();
} }
} }
......
...@@ -47,6 +47,7 @@ bool InvalidatedSlotsFilter::IsValid(Address slot) { ...@@ -47,6 +47,7 @@ bool InvalidatedSlotsFilter::IsValid(Address slot) {
// Ask the object if the slot is valid. // Ask the object if the slot is valid.
if (invalidated_object_ == nullptr) { if (invalidated_object_ == nullptr) {
invalidated_object_ = HeapObject::FromAddress(invalidated_start_); invalidated_object_ = HeapObject::FromAddress(invalidated_start_);
DCHECK(!invalidated_object_->IsFiller());
invalidated_object_size_ = invalidated_object_size_ =
invalidated_object_->SizeFromMap(invalidated_object_->map()); invalidated_object_->SizeFromMap(invalidated_object_->map());
} }
...@@ -56,10 +57,7 @@ bool InvalidatedSlotsFilter::IsValid(Address slot) { ...@@ -56,10 +57,7 @@ bool InvalidatedSlotsFilter::IsValid(Address slot) {
static_cast<int>(invalidated_end_ - invalidated_start_)); static_cast<int>(invalidated_end_ - invalidated_start_));
if (offset >= invalidated_object_size_) { if (offset >= invalidated_object_size_) {
// A new object could have been allocated during evacuation in the free return slots_in_free_space_are_valid_;
// space outside the object. Since objects are not invalidated in GC pause
// we can return true here.
return true;
} }
return invalidated_object_->IsValidSlot(invalidated_object_->map(), offset); return invalidated_object_->IsValidSlot(invalidated_object_->map(), offset);
} }
......
...@@ -9,8 +9,15 @@ namespace v8 { ...@@ -9,8 +9,15 @@ namespace v8 {
namespace internal { namespace internal {
InvalidatedSlotsFilter::InvalidatedSlotsFilter(MemoryChunk* chunk) { InvalidatedSlotsFilter::InvalidatedSlotsFilter(MemoryChunk* chunk) {
// Adjust slots_in_free_space_are_valid_ if more spaces are added.
DCHECK_IMPLIES(chunk->invalidated_slots() != nullptr, DCHECK_IMPLIES(chunk->invalidated_slots() != nullptr,
chunk->owner()->identity() == OLD_SPACE); chunk->InOldSpace() || chunk->InLargeObjectSpace());
// The sweeper removes invalid slots and makes free space available for
// allocation. Slots for new objects can be recorded in the free space.
// Note that we cannot simply check for SweepingDone because pages in large
// object space are not swept but have SweepingDone() == true.
slots_in_free_space_are_valid_ = chunk->SweepingDone() && chunk->InOldSpace();
InvalidatedSlots* invalidated_slots = InvalidatedSlots* invalidated_slots =
chunk->invalidated_slots() ? chunk->invalidated_slots() : &empty_; chunk->invalidated_slots() ? chunk->invalidated_slots() : &empty_;
iterator_ = invalidated_slots->begin(); iterator_ = invalidated_slots->begin();
......
...@@ -42,6 +42,7 @@ class InvalidatedSlotsFilter { ...@@ -42,6 +42,7 @@ class InvalidatedSlotsFilter {
Address invalidated_end_; Address invalidated_end_;
HeapObject* invalidated_object_; HeapObject* invalidated_object_;
int invalidated_object_size_; int invalidated_object_size_;
bool slots_in_free_space_are_valid_;
InvalidatedSlots empty_; InvalidatedSlots empty_;
#ifdef DEBUG #ifdef DEBUG
Address last_slot_; Address last_slot_;
......
...@@ -2207,6 +2207,8 @@ static inline SlotCallbackResult UpdateSlot( ...@@ -2207,6 +2207,8 @@ static inline SlotCallbackResult UpdateSlot(
} }
DCHECK(!Heap::InFromSpace(target)); DCHECK(!Heap::InFromSpace(target));
DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(target)); DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(target));
} else {
DCHECK(heap_obj->map()->IsMap());
} }
// OLD_TO_OLD slots are always removed after updating. // OLD_TO_OLD slots are always removed after updating.
return REMOVE_SLOT; return REMOVE_SLOT;
......
...@@ -768,6 +768,14 @@ bool MemoryChunk::IsPagedSpace() const { ...@@ -768,6 +768,14 @@ bool MemoryChunk::IsPagedSpace() const {
return owner()->identity() != LO_SPACE; return owner()->identity() != LO_SPACE;
} }
bool MemoryChunk::InOldSpace() const {
return owner()->identity() == OLD_SPACE;
}
bool MemoryChunk::InLargeObjectSpace() const {
return owner()->identity() == LO_SPACE;
}
MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size, MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
size_t commit_area_size, size_t commit_area_size,
Executability executable, Executability executable,
...@@ -1399,6 +1407,22 @@ void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject* object, ...@@ -1399,6 +1407,22 @@ void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject* object,
} }
} }
void MemoryChunk::MoveObjectWithInvalidatedSlots(HeapObject* old_start,
HeapObject* new_start) {
DCHECK_LT(old_start, new_start);
DCHECK_EQ(MemoryChunk::FromHeapObject(old_start),
MemoryChunk::FromHeapObject(new_start));
if (!ShouldSkipEvacuationSlotRecording() && invalidated_slots()) {
auto it = invalidated_slots()->find(old_start);
if (it != invalidated_slots()->end()) {
int old_size = it->second;
int delta = static_cast<int>(new_start->address() - old_start->address());
invalidated_slots()->erase(it);
(*invalidated_slots())[new_start] = old_size - delta;
}
}
}
void MemoryChunk::ReleaseLocalTracker() { void MemoryChunk::ReleaseLocalTracker() {
DCHECK_NOT_NULL(local_tracker_); DCHECK_NOT_NULL(local_tracker_);
delete local_tracker_; delete local_tracker_;
......
...@@ -515,6 +515,9 @@ class MemoryChunk { ...@@ -515,6 +515,9 @@ class MemoryChunk {
InvalidatedSlots* AllocateInvalidatedSlots(); InvalidatedSlots* AllocateInvalidatedSlots();
void ReleaseInvalidatedSlots(); void ReleaseInvalidatedSlots();
void RegisterObjectWithInvalidatedSlots(HeapObject* object, int size); void RegisterObjectWithInvalidatedSlots(HeapObject* object, int size);
// Updates invalidated_slots after array left-trimming.
void MoveObjectWithInvalidatedSlots(HeapObject* old_start,
HeapObject* new_start);
InvalidatedSlots* invalidated_slots() { return invalidated_slots_; } InvalidatedSlots* invalidated_slots() { return invalidated_slots_; }
void ReleaseLocalTracker(); void ReleaseLocalTracker();
...@@ -611,6 +614,10 @@ class MemoryChunk { ...@@ -611,6 +614,10 @@ class MemoryChunk {
bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); } bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); }
bool InOldSpace() const;
bool InLargeObjectSpace() const;
Space* owner() const { return owner_; } Space* owner() const { return owner_; }
void set_owner(Space* space) { owner_ = space; } void set_owner(Space* space) { owner_ = space; }
......
...@@ -21,6 +21,10 @@ ...@@ -21,6 +21,10 @@
V(InvalidatedSlotsEvacuationCandidate) \ V(InvalidatedSlotsEvacuationCandidate) \
V(InvalidatedSlotsNoInvalidatedRanges) \ V(InvalidatedSlotsNoInvalidatedRanges) \
V(InvalidatedSlotsResetObjectRegression) \ V(InvalidatedSlotsResetObjectRegression) \
V(InvalidatedSlotsRightTrimFixedArray) \
V(InvalidatedSlotsRightTrimLargeFixedArray) \
V(InvalidatedSlotsLeftTrimFixedArray) \
V(InvalidatedSlotsFastToSlow) \
V(InvalidatedSlotsSomeInvalidatedRanges) \ V(InvalidatedSlotsSomeInvalidatedRanges) \
V(TestNewSpaceRefsInCopiedCode) \ V(TestNewSpaceRefsInCopiedCode) \
V(GCFlags) \ V(GCFlags) \
......
...@@ -109,6 +109,7 @@ HEAP_TEST(InvalidatedSlotsAllInvalidatedRanges) { ...@@ -109,6 +109,7 @@ HEAP_TEST(InvalidatedSlotsAllInvalidatedRanges) {
} }
HEAP_TEST(InvalidatedSlotsAfterTrimming) { HEAP_TEST(InvalidatedSlotsAfterTrimming) {
ManualGCScope manual_gc_scope;
CcTest::InitializeVM(); CcTest::InitializeVM();
Heap* heap = CcTest::heap(); Heap* heap = CcTest::heap();
std::vector<ByteArray*> byte_arrays; std::vector<ByteArray*> byte_arrays;
...@@ -119,9 +120,7 @@ HEAP_TEST(InvalidatedSlotsAfterTrimming) { ...@@ -119,9 +120,7 @@ HEAP_TEST(InvalidatedSlotsAfterTrimming) {
byte_arrays[i]->Size()); byte_arrays[i]->Size());
} }
// Trim byte arrays and check that the slots outside the byte arrays are // Trim byte arrays and check that the slots outside the byte arrays are
// considered valid. Free space outside invalidated object can be reused // considered invalid if the old space page was swept.
// during evacuation for allocation of the evacuated objects. That can
// add new valid slots to evacuation candidates.
InvalidatedSlotsFilter filter(page); InvalidatedSlotsFilter filter(page);
for (size_t i = 0; i < byte_arrays.size(); i++) { for (size_t i = 0; i < byte_arrays.size(); i++) {
ByteArray* byte_array = byte_arrays[i]; ByteArray* byte_array = byte_arrays[i];
...@@ -129,7 +128,7 @@ HEAP_TEST(InvalidatedSlotsAfterTrimming) { ...@@ -129,7 +128,7 @@ HEAP_TEST(InvalidatedSlotsAfterTrimming) {
Address end = byte_array->address() + byte_array->Size(); Address end = byte_array->address() + byte_array->Size();
heap->RightTrimFixedArray(byte_array, byte_array->length()); heap->RightTrimFixedArray(byte_array, byte_array->length());
for (Address addr = start; addr < end; addr += kPointerSize) { for (Address addr = start; addr < end; addr += kPointerSize) {
CHECK(filter.IsValid(addr)); CHECK_EQ(filter.IsValid(addr), page->SweepingDone());
} }
} }
} }
...@@ -184,6 +183,185 @@ HEAP_TEST(InvalidatedSlotsResetObjectRegression) { ...@@ -184,6 +183,185 @@ HEAP_TEST(InvalidatedSlotsResetObjectRegression) {
} }
} }
Handle<FixedArray> AllocateArrayOnFreshPage(Isolate* isolate,
PagedSpace* old_space, int length) {
AlwaysAllocateScope always_allocate(isolate);
heap::SimulateFullSpace(old_space);
return isolate->factory()->NewFixedArray(length, TENURED);
}
Handle<FixedArray> AllocateArrayOnEvacuationCandidate(Isolate* isolate,
PagedSpace* old_space,
int length) {
Handle<FixedArray> object =
AllocateArrayOnFreshPage(isolate, old_space, length);
heap::ForceEvacuationCandidate(Page::FromHeapObject(*object));
return object;
}
HEAP_TEST(InvalidatedSlotsRightTrimFixedArray) {
FLAG_manual_evacuation_candidates_selection = true;
FLAG_parallel_compaction = false;
ManualGCScope manual_gc_scope;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
Heap* heap = CcTest::heap();
HandleScope scope(isolate);
PagedSpace* old_space = heap->old_space();
// Allocate a dummy page to be swept be the sweeper during evacuation.
AllocateArrayOnFreshPage(isolate, old_space, 1);
Handle<FixedArray> evacuated =
AllocateArrayOnEvacuationCandidate(isolate, old_space, 1);
Handle<FixedArray> trimmed = AllocateArrayOnFreshPage(isolate, old_space, 10);
heap::SimulateIncrementalMarking(heap);
for (int i = 1; i < trimmed->length(); i++) {
trimmed->set(i, *evacuated);
}
{
HandleScope scope(isolate);
Handle<HeapObject> dead = factory->NewFixedArray(1);
for (int i = 1; i < trimmed->length(); i++) {
trimmed->set(i, *dead);
}
heap->RightTrimFixedArray(*trimmed, trimmed->length() - 1);
}
CcTest::CollectGarbage(i::NEW_SPACE);
CcTest::CollectGarbage(i::OLD_SPACE);
}
HEAP_TEST(InvalidatedSlotsRightTrimLargeFixedArray) {
FLAG_manual_evacuation_candidates_selection = true;
FLAG_parallel_compaction = false;
ManualGCScope manual_gc_scope;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
Heap* heap = CcTest::heap();
HandleScope scope(isolate);
PagedSpace* old_space = heap->old_space();
// Allocate a dummy page to be swept be the sweeper during evacuation.
AllocateArrayOnFreshPage(isolate, old_space, 1);
Handle<FixedArray> evacuated =
AllocateArrayOnEvacuationCandidate(isolate, old_space, 1);
Handle<FixedArray> trimmed;
{
AlwaysAllocateScope always_allocate(isolate);
trimmed =
factory->NewFixedArray(kMaxRegularHeapObjectSize / kPointerSize + 100);
DCHECK(MemoryChunk::FromHeapObject(*trimmed)->InLargeObjectSpace());
}
heap::SimulateIncrementalMarking(heap);
for (int i = 1; i < trimmed->length(); i++) {
trimmed->set(i, *evacuated);
}
{
HandleScope scope(isolate);
Handle<HeapObject> dead = factory->NewFixedArray(1);
for (int i = 1; i < trimmed->length(); i++) {
trimmed->set(i, *dead);
}
heap->RightTrimFixedArray(*trimmed, trimmed->length() - 1);
}
CcTest::CollectGarbage(i::NEW_SPACE);
CcTest::CollectGarbage(i::OLD_SPACE);
}
HEAP_TEST(InvalidatedSlotsLeftTrimFixedArray) {
FLAG_manual_evacuation_candidates_selection = true;
FLAG_parallel_compaction = false;
ManualGCScope manual_gc_scope;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
Heap* heap = CcTest::heap();
HandleScope scope(isolate);
PagedSpace* old_space = heap->old_space();
// Allocate a dummy page to be swept be the sweeper during evacuation.
AllocateArrayOnFreshPage(isolate, old_space, 1);
Handle<FixedArray> evacuated =
AllocateArrayOnEvacuationCandidate(isolate, old_space, 1);
Handle<FixedArray> trimmed = AllocateArrayOnFreshPage(isolate, old_space, 10);
heap::SimulateIncrementalMarking(heap);
for (int i = 0; i + 1 < trimmed->length(); i++) {
trimmed->set(i, *evacuated);
}
{
HandleScope scope(isolate);
Handle<HeapObject> dead = factory->NewFixedArray(1);
for (int i = 1; i < trimmed->length(); i++) {
trimmed->set(i, *dead);
}
heap->LeftTrimFixedArray(*trimmed, trimmed->length() - 1);
}
CcTest::CollectGarbage(i::NEW_SPACE);
CcTest::CollectGarbage(i::OLD_SPACE);
}
HEAP_TEST(InvalidatedSlotsFastToSlow) {
FLAG_manual_evacuation_candidates_selection = true;
FLAG_parallel_compaction = false;
ManualGCScope manual_gc_scope;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
Heap* heap = CcTest::heap();
PagedSpace* old_space = heap->old_space();
HandleScope scope(isolate);
Handle<String> name = factory->InternalizeUtf8String("TestObject");
Handle<String> prop_name1 = factory->InternalizeUtf8String("prop1");
Handle<String> prop_name2 = factory->InternalizeUtf8String("prop2");
Handle<String> prop_name3 = factory->InternalizeUtf8String("prop3");
// Allocate a dummy page to be swept be the sweeper during evacuation.
AllocateArrayOnFreshPage(isolate, old_space, 1);
Handle<FixedArray> evacuated =
AllocateArrayOnEvacuationCandidate(isolate, old_space, 1);
// Allocate a dummy page to ensure that the JSObject is allocated on
// a fresh page.
AllocateArrayOnFreshPage(isolate, old_space, 1);
Handle<JSObject> obj;
{
AlwaysAllocateScope always_allocate(isolate);
Handle<JSFunction> function = factory->NewFunctionForTest(name);
function->shared()->set_expected_nof_properties(3);
obj = factory->NewJSObject(function, TENURED);
}
// Start incremental marking.
heap::SimulateIncrementalMarking(heap);
// Set properties to point to the evacuation candidate.
JSReceiver::SetProperty(isolate, obj, prop_name1, evacuated,
LanguageMode::kSloppy)
.Check();
JSReceiver::SetProperty(isolate, obj, prop_name2, evacuated,
LanguageMode::kSloppy)
.Check();
JSReceiver::SetProperty(isolate, obj, prop_name3, evacuated,
LanguageMode::kSloppy)
.Check();
{
HandleScope scope(isolate);
Handle<HeapObject> dead = factory->NewFixedArray(1);
JSReceiver::SetProperty(isolate, obj, prop_name1, dead,
LanguageMode::kSloppy)
.Check();
JSReceiver::SetProperty(isolate, obj, prop_name2, dead,
LanguageMode::kSloppy)
.Check();
JSReceiver::SetProperty(isolate, obj, prop_name3, dead,
LanguageMode::kSloppy)
.Check();
Handle<Map> map(obj->map(), isolate);
Handle<Map> normalized_map =
Map::Normalize(isolate, map, CLEAR_INOBJECT_PROPERTIES, "testing");
JSObject::MigrateToMap(obj, normalized_map);
}
CcTest::CollectGarbage(i::NEW_SPACE);
CcTest::CollectGarbage(i::OLD_SPACE);
}
} // namespace heap } // namespace heap
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment