Commit f584f7cc authored by Dominik Inführ's avatar Dominik Inführ Committed by Commit Bot

Revert "[heap] Introduce old-to-new invalidation set"

This reverts commit 604b0e1e.

Reason for revert: Clusterfuzz found an issue.

Original change's description:
> [heap] Introduce old-to-new invalidation set
> 
> Introduce list of invalidated objects for old-to-new slots. Objects
> are registered as invalidated in NotifyObjectLayoutChange, however
> no slots are filtered right now. Slots are still deleted, so all
> recorded slots are valid.
> 
> Bug: v8:9454
> Change-Id: Ic0ea15283c4075f4051fae6a5b148721265339f7
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1765528
> Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
> Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#63367}

TBR=ulan@chromium.org,dinfuehr@chromium.org

# Not skipping CQ checks because original CL landed > 1 day ago.

Bug: v8:9454
Change-Id: Ic898db38f297824aa54744123f85cd75df957159
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1770676Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/master@{#63390}
parent e68a51f7
......@@ -1108,15 +1108,6 @@ void Heap::GarbageCollectionEpilogue() {
AllowHeapAllocation for_the_rest_of_the_epilogue;
#ifdef DEBUG
// Old-to-new slot sets must be empty after each collection.
for (SpaceIterator it(this); it.HasNext();) {
Space* space = it.Next();
for (MemoryChunk* chunk = space->first_page(); chunk != space->last_page();
chunk = chunk->list_node().next())
DCHECK_NULL(chunk->invalidated_slots<OLD_TO_NEW>());
}
if (FLAG_print_global_handles) isolate_->global_handles()->Print();
if (FLAG_print_handles) PrintHandles();
if (FLAG_gc_verbose) Print();
......@@ -3007,20 +2998,13 @@ FixedArrayBase Heap::LeftTrimFixedArray(FixedArrayBase object,
FixedArrayBase new_object =
FixedArrayBase::cast(HeapObject::FromAddress(new_start));
#ifdef DEBUG
if (MayContainRecordedSlots(object)) {
MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
DCHECK(!chunk->RegisteredObjectWithInvalidatedSlots<OLD_TO_NEW>(object));
}
#endif
// Handle invalidated old-to-old slots.
if (incremental_marking()->IsCompacting() &&
MayContainRecordedSlots(new_object)) {
// If the array was right-trimmed before, then it is registered in
// the invalidated_slots.
MemoryChunk::FromHeapObject(new_object)
->MoveObjectWithInvalidatedSlots<OLD_TO_OLD>(filler, new_object);
->MoveObjectWithInvalidatedSlots(filler, new_object);
// We have to clear slots in the free space to avoid stale old-to-old slots.
// Note we cannot use ClearFreedMemoryMode of CreateFillerObjectAt because
// we need pointer granularity writes to avoid race with the concurrent
......@@ -3099,13 +3083,6 @@ void Heap::CreateFillerForArray(T object, int elements_to_trim,
Address old_end = object.address() + old_size;
Address new_end = old_end - bytes_to_trim;
#ifdef DEBUG
if (MayContainRecordedSlots(object)) {
MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
DCHECK(!chunk->RegisteredObjectWithInvalidatedSlots<OLD_TO_NEW>(object));
}
#endif
// Register the array as an object with invalidated old-to-old slots. We
// cannot use NotifyObjectLayoutChange as it would mark the array black,
// which is not safe for left-trimming because left-trimming re-pushes
......@@ -3115,8 +3092,8 @@ void Heap::CreateFillerForArray(T object, int elements_to_trim,
// Ensure that the object survives because the InvalidatedSlotsFilter will
// compute its size from its map during pointers updating phase.
incremental_marking()->WhiteToGreyAndPush(object);
MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
chunk->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object, old_size);
MemoryChunk::FromHeapObject(object)->RegisterObjectWithInvalidatedSlots(
object, old_size);
}
// Technically in new space this write might be omitted (except for
......@@ -3408,14 +3385,10 @@ void Heap::NotifyObjectLayoutChange(HeapObject object, int size,
incremental_marking()->MarkBlackAndVisitObjectDueToLayoutChange(object);
if (incremental_marking()->IsCompacting() &&
MayContainRecordedSlots(object)) {
MemoryChunk::FromHeapObject(object)
->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object, size);
MemoryChunk::FromHeapObject(object)->RegisterObjectWithInvalidatedSlots(
object, size);
}
}
if (MayContainRecordedSlots(object)) {
MemoryChunk::FromHeapObject(object)
->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(object, size);
}
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
DCHECK(pending_layout_change_object_.is_null());
......@@ -5564,7 +5537,7 @@ void Heap::VerifyClearedSlot(HeapObject object, ObjectSlot slot) {
CHECK(!RememberedSet<OLD_TO_NEW>::Contains(page, slot.address()));
// Old to old slots are filtered with invalidated slots.
CHECK_IMPLIES(RememberedSet<OLD_TO_OLD>::Contains(page, slot.address()),
page->RegisteredObjectWithInvalidatedSlots<OLD_TO_OLD>(object));
page->RegisteredObjectWithInvalidatedSlots(object));
}
#endif
......
......@@ -62,53 +62,6 @@ bool InvalidatedSlotsFilter::IsValid(Address slot) {
return invalidated_object_.IsValidSlot(invalidated_object_.map(), offset);
}
void InvalidatedSlotsCleanup::Free(Address free_start, Address free_end) {
#ifdef DEBUG
DCHECK_LT(free_start, free_end);
// Free regions should come in increasing order and do not overlap
DCHECK_LE(last_free_, free_start);
last_free_ = free_start;
#endif
if (iterator_ == iterator_end_) return;
// Ignore invalidated objects before free region
while (free_start >= invalidated_end_) {
++iterator_;
NextInvalidatedObject();
}
// Loop here: Free region might contain multiple invalidated objects
while (free_end > invalidated_start_) {
// Case: Free region starts before current invalidated object
if (free_start <= invalidated_start_) {
CHECK(invalidated_end_ <= free_end);
iterator_ = invalidated_slots_->erase(iterator_);
} else {
// Case: Free region starts within current invalidated object
// (Can happen for right-trimmed objects)
iterator_->second =
static_cast<int>(free_start - iterator_->first.address());
CHECK(free_end >= invalidated_end_);
iterator_++;
}
NextInvalidatedObject();
}
}
void InvalidatedSlotsCleanup::NextInvalidatedObject() {
if (iterator_ != iterator_end_) {
invalidated_start_ = iterator_->first.address();
invalidated_end_ = invalidated_start_ + iterator_->second;
} else {
invalidated_start_ = sentinel_;
invalidated_end_ = sentinel_;
}
}
} // namespace internal
} // namespace v8
......
......@@ -8,18 +8,9 @@
namespace v8 {
namespace internal {
InvalidatedSlotsFilter InvalidatedSlotsFilter::OldToOld(MemoryChunk* chunk) {
return InvalidatedSlotsFilter(chunk, chunk->invalidated_slots<OLD_TO_OLD>());
}
InvalidatedSlotsFilter InvalidatedSlotsFilter::OldToNew(MemoryChunk* chunk) {
return InvalidatedSlotsFilter(chunk, chunk->invalidated_slots<OLD_TO_NEW>());
}
InvalidatedSlotsFilter::InvalidatedSlotsFilter(
MemoryChunk* chunk, InvalidatedSlots* invalidated_slots) {
InvalidatedSlotsFilter::InvalidatedSlotsFilter(MemoryChunk* chunk) {
// Adjust slots_in_free_space_are_valid_ if more spaces are added.
DCHECK_IMPLIES(invalidated_slots != nullptr,
DCHECK_IMPLIES(chunk->invalidated_slots() != nullptr,
chunk->InOldSpace() || chunk->InLargeObjectSpace());
// The sweeper removes invalid slots and makes free space available for
// allocation. Slots for new objects can be recorded in the free space.
......@@ -27,8 +18,8 @@ InvalidatedSlotsFilter::InvalidatedSlotsFilter(
// object space are not swept but have SweepingDone() == true.
slots_in_free_space_are_valid_ = chunk->SweepingDone() && chunk->InOldSpace();
invalidated_slots = invalidated_slots ? invalidated_slots : &empty_;
InvalidatedSlots* invalidated_slots =
chunk->invalidated_slots() ? chunk->invalidated_slots() : &empty_;
iterator_ = invalidated_slots->begin();
iterator_end_ = invalidated_slots->end();
sentinel_ = chunk->area_end();
......@@ -46,37 +37,5 @@ InvalidatedSlotsFilter::InvalidatedSlotsFilter(
#endif
}
InvalidatedSlotsCleanup InvalidatedSlotsCleanup::OldToOld(MemoryChunk* chunk) {
return InvalidatedSlotsCleanup(chunk, chunk->invalidated_slots<OLD_TO_OLD>());
}
InvalidatedSlotsCleanup InvalidatedSlotsCleanup::OldToNew(MemoryChunk* chunk) {
return InvalidatedSlotsCleanup(chunk, chunk->invalidated_slots<OLD_TO_NEW>());
}
InvalidatedSlotsCleanup InvalidatedSlotsCleanup::NoCleanup(MemoryChunk* chunk) {
return InvalidatedSlotsCleanup(chunk, nullptr);
}
InvalidatedSlotsCleanup::InvalidatedSlotsCleanup(
MemoryChunk* chunk, InvalidatedSlots* invalidated_slots) {
invalidated_slots_ = invalidated_slots ? invalidated_slots : &empty_;
iterator_ = invalidated_slots_->begin();
iterator_end_ = invalidated_slots_->end();
sentinel_ = chunk->area_end();
if (iterator_ != iterator_end_) {
invalidated_start_ = iterator_->first.address();
invalidated_end_ = invalidated_start_ + iterator_->second;
} else {
invalidated_start_ = sentinel_;
invalidated_end_ = sentinel_;
}
#ifdef DEBUG
last_free_ = chunk->area_start();
#endif
}
} // namespace internal
} // namespace v8
......@@ -30,11 +30,7 @@ using InvalidatedSlots = std::map<HeapObject, int, Object::Comparer>;
// n is the number of IsValid queries.
class V8_EXPORT_PRIVATE InvalidatedSlotsFilter {
public:
static InvalidatedSlotsFilter OldToOld(MemoryChunk* chunk);
static InvalidatedSlotsFilter OldToNew(MemoryChunk* chunk);
explicit InvalidatedSlotsFilter(MemoryChunk* chunk,
InvalidatedSlots* invalidated_slots);
explicit InvalidatedSlotsFilter(MemoryChunk* chunk);
inline bool IsValid(Address slot);
private:
......@@ -52,33 +48,6 @@ class V8_EXPORT_PRIVATE InvalidatedSlotsFilter {
#endif
};
class V8_EXPORT_PRIVATE InvalidatedSlotsCleanup {
public:
static InvalidatedSlotsCleanup OldToOld(MemoryChunk* chunk);
static InvalidatedSlotsCleanup OldToNew(MemoryChunk* chunk);
static InvalidatedSlotsCleanup NoCleanup(MemoryChunk* chunk);
explicit InvalidatedSlotsCleanup(MemoryChunk* chunk,
InvalidatedSlots* invalidated_slots);
inline void Free(Address free_start, Address free_end);
private:
InvalidatedSlots::iterator iterator_;
InvalidatedSlots::iterator iterator_end_;
InvalidatedSlots* invalidated_slots_;
InvalidatedSlots empty_;
Address sentinel_;
Address invalidated_start_;
Address invalidated_end_;
inline void NextInvalidatedObject();
#ifdef DEBUG
Address last_free_;
#endif
};
} // namespace internal
} // namespace v8
......
......@@ -2689,8 +2689,7 @@ void MarkCompactCollector::EvacuateEpilogue() {
for (Page* p : *heap()->old_space()) {
DCHECK_NULL((p->slot_set<OLD_TO_OLD, AccessMode::ATOMIC>()));
DCHECK_NULL((p->typed_slot_set<OLD_TO_OLD, AccessMode::ATOMIC>()));
DCHECK_NULL(p->invalidated_slots<OLD_TO_OLD>());
DCHECK_NULL(p->invalidated_slots<OLD_TO_NEW>());
DCHECK_NULL(p->invalidated_slots());
}
#endif
}
......@@ -3411,32 +3410,16 @@ class RememberedSetUpdatingItem : public UpdatingItem {
void UpdateUntypedPointers() {
if (chunk_->slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() != nullptr) {
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(chunk_);
RememberedSet<OLD_TO_NEW>::Iterate(
chunk_,
[this, &filter](MaybeObjectSlot slot) {
if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
[this](MaybeObjectSlot slot) {
return CheckAndUpdateOldToNewSlot(slot);
},
SlotSet::PREFREE_EMPTY_BUCKETS);
}
if (chunk_->invalidated_slots<OLD_TO_NEW>() != nullptr) {
#ifdef DEBUG
for (auto object_size : *chunk_->invalidated_slots<OLD_TO_NEW>()) {
HeapObject object = object_size.first;
int size = object_size.second;
DCHECK_LE(object.SizeFromMap(object.map()), size);
}
#endif
// The invalidated slots are not needed after old-to-new slots were
// processed.
chunk_->ReleaseInvalidatedSlots<OLD_TO_NEW>();
}
if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
(chunk_->slot_set<OLD_TO_OLD, AccessMode::NON_ATOMIC>() != nullptr)) {
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(chunk_);
InvalidatedSlotsFilter filter(chunk_);
RememberedSet<OLD_TO_OLD>::Iterate(
chunk_,
[&filter](MaybeObjectSlot slot) {
......@@ -3446,9 +3429,9 @@ class RememberedSetUpdatingItem : public UpdatingItem {
SlotSet::PREFREE_EMPTY_BUCKETS);
}
if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
chunk_->invalidated_slots<OLD_TO_OLD>() != nullptr) {
chunk_->invalidated_slots() != nullptr) {
#ifdef DEBUG
for (auto object_size : *chunk_->invalidated_slots<OLD_TO_OLD>()) {
for (auto object_size : *chunk_->invalidated_slots()) {
HeapObject object = object_size.first;
int size = object_size.second;
DCHECK_LE(object.SizeFromMap(object.map()), size);
......@@ -3456,7 +3439,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
#endif
// The invalidated slots are not needed after old-to-old slots were
// processsed.
chunk_->ReleaseInvalidatedSlots<OLD_TO_OLD>();
chunk_->ReleaseInvalidatedSlots();
}
}
......@@ -3570,17 +3553,13 @@ int MarkCompactCollectorBase::CollectRememberedSetUpdatingItems(
const bool contains_old_to_new_slots =
chunk->slot_set<OLD_TO_NEW>() != nullptr ||
chunk->typed_slot_set<OLD_TO_NEW>() != nullptr;
const bool contains_old_to_old_invalidated_slots =
chunk->invalidated_slots<OLD_TO_OLD>() != nullptr;
const bool contains_old_to_new_invalidated_slots =
chunk->invalidated_slots<OLD_TO_NEW>() != nullptr;
const bool contains_invalidated_slots =
chunk->invalidated_slots() != nullptr;
if (!contains_old_to_new_slots && !contains_old_to_old_slots &&
!contains_old_to_old_invalidated_slots &&
!contains_old_to_new_invalidated_slots)
!contains_invalidated_slots)
continue;
if (mode == RememberedSetUpdatingMode::ALL || contains_old_to_new_slots ||
contains_old_to_old_invalidated_slots ||
contains_old_to_new_invalidated_slots) {
contains_invalidated_slots) {
job->AddItem(CreateRememberedSetUpdatingItem(chunk, mode));
pages++;
}
......@@ -4657,11 +4636,9 @@ class PageMarkingItem : public MarkingItem {
inline Heap* heap() { return chunk_->heap(); }
void MarkUntypedPointers(YoungGenerationMarkingTask* task) {
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(chunk_);
RememberedSet<OLD_TO_NEW>::Iterate(
chunk_,
[this, task, &filter](MaybeObjectSlot slot) {
if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
[this, task](MaybeObjectSlot slot) {
return CheckAndMarkObject(task, slot);
},
SlotSet::PREFREE_EMPTY_BUCKETS);
......
......@@ -122,7 +122,7 @@ class RememberedSet : public AllStatic {
SlotSet* slots = chunk->slot_set<type>();
TypedSlotSet* typed_slots = chunk->typed_slot_set<type>();
if (slots != nullptr || typed_slots != nullptr ||
chunk->invalidated_slots<type>() != nullptr) {
chunk->invalidated_slots() != nullptr) {
callback(chunk);
}
}
......@@ -256,7 +256,7 @@ class RememberedSet : public AllStatic {
while ((chunk = it.next()) != nullptr) {
chunk->ReleaseSlotSet<OLD_TO_OLD>();
chunk->ReleaseTypedSlotSet<OLD_TO_OLD>();
chunk->ReleaseInvalidatedSlots<OLD_TO_OLD>();
chunk->ReleaseInvalidatedSlots();
}
}
......
......@@ -8,7 +8,6 @@
#include "src/heap/barrier.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/item-parallel-job.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/objects-visiting-inl.h"
......@@ -432,28 +431,12 @@ void Scavenger::AddPageToSweeperIfNecessary(MemoryChunk* page) {
void Scavenger::ScavengePage(MemoryChunk* page) {
CodePageMemoryModificationScope memory_modification_scope(page);
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(page);
RememberedSet<OLD_TO_NEW>::Iterate(
page,
[this, &filter](MaybeObjectSlot addr) {
if (!filter.IsValid(addr.address())) return REMOVE_SLOT;
[this](MaybeObjectSlot addr) {
return CheckAndScavengeObject(heap_, addr);
},
SlotSet::KEEP_EMPTY_BUCKETS);
if (page->invalidated_slots<OLD_TO_NEW>() != nullptr) {
#ifdef DEBUG
for (auto object_size : *page->invalidated_slots<OLD_TO_NEW>()) {
HeapObject object = object_size.first;
int size = object_size.second;
DCHECK_LE(object.SizeFromMap(object.map()), size);
}
#endif
// The invalidated slots are not needed after old-to-new slots were
// processed.
page->ReleaseInvalidatedSlots<OLD_TO_NEW>();
}
RememberedSet<OLD_TO_NEW>::IterateTyped(
page, [=](SlotType type, Address addr) {
return UpdateTypedSlotHelper::UpdateTypedSlot(
......
......@@ -703,8 +703,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
nullptr);
base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_OLD],
nullptr);
chunk->invalidated_slots_[OLD_TO_NEW] = nullptr;
chunk->invalidated_slots_[OLD_TO_OLD] = nullptr;
chunk->invalidated_slots_ = nullptr;
chunk->progress_bar_ = 0;
chunk->high_water_mark_ = static_cast<intptr_t>(area_start - base);
chunk->set_concurrent_sweeping_state(kSweepingDone);
......@@ -1380,8 +1379,7 @@ void MemoryChunk::ReleaseAllocatedMemoryNeededForWritableChunk() {
ReleaseSlotSet<OLD_TO_OLD>();
ReleaseTypedSlotSet<OLD_TO_NEW>();
ReleaseTypedSlotSet<OLD_TO_OLD>();
ReleaseInvalidatedSlots<OLD_TO_NEW>();
ReleaseInvalidatedSlots<OLD_TO_OLD>();
ReleaseInvalidatedSlots();
if (local_tracker_ != nullptr) ReleaseLocalTracker();
if (young_generation_bitmap_ != nullptr) ReleaseYoungGenerationBitmap();
......@@ -1463,107 +1461,53 @@ void MemoryChunk::ReleaseTypedSlotSet() {
}
}
template InvalidatedSlots* MemoryChunk::AllocateInvalidatedSlots<OLD_TO_NEW>();
template InvalidatedSlots* MemoryChunk::AllocateInvalidatedSlots<OLD_TO_OLD>();
template <RememberedSetType type>
InvalidatedSlots* MemoryChunk::AllocateInvalidatedSlots() {
DCHECK_NULL(invalidated_slots_[type]);
invalidated_slots_[type] = new InvalidatedSlots();
return invalidated_slots_[type];
DCHECK_NULL(invalidated_slots_);
invalidated_slots_ = new InvalidatedSlots();
return invalidated_slots_;
}
template void MemoryChunk::ReleaseInvalidatedSlots<OLD_TO_NEW>();
template void MemoryChunk::ReleaseInvalidatedSlots<OLD_TO_OLD>();
template <RememberedSetType type>
void MemoryChunk::ReleaseInvalidatedSlots() {
if (invalidated_slots_[type]) {
delete invalidated_slots_[type];
invalidated_slots_[type] = nullptr;
if (invalidated_slots_) {
delete invalidated_slots_;
invalidated_slots_ = nullptr;
}
}
template V8_EXPORT_PRIVATE void
MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(HeapObject object,
int size);
template V8_EXPORT_PRIVATE void
MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(HeapObject object,
int size);
template <RememberedSetType type>
void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object,
int size) {
bool skip_slot_recording;
if (type == OLD_TO_NEW) {
skip_slot_recording = InYoungGeneration();
} else {
skip_slot_recording = ShouldSkipEvacuationSlotRecording();
}
if (skip_slot_recording) {
return;
}
if (invalidated_slots<type>() == nullptr) {
AllocateInvalidatedSlots<type>();
}
InvalidatedSlots* invalidated_slots = this->invalidated_slots<type>();
InvalidatedSlots::iterator it = invalidated_slots->lower_bound(object);
if (it != invalidated_slots->end() && it->first == object) {
// object was already inserted
CHECK_LE(size, it->second);
return;
}
it = invalidated_slots->insert(it, std::make_pair(object, size));
// prevent overlapping invalidated objects for old-to-new.
if (type == OLD_TO_NEW && it != invalidated_slots->begin()) {
HeapObject pred = (--it)->first;
int pred_size = it->second;
DCHECK_LT(pred.address(), object.address());
if (pred.address() + pred_size > object.address()) {
it->second = static_cast<int>(object.address() - pred.address());
if (!ShouldSkipEvacuationSlotRecording()) {
if (invalidated_slots() == nullptr) {
AllocateInvalidatedSlots();
}
int old_size = (*invalidated_slots())[object];
(*invalidated_slots())[object] = std::max(old_size, size);
}
}
template bool MemoryChunk::RegisteredObjectWithInvalidatedSlots<OLD_TO_NEW>(
HeapObject object);
template bool MemoryChunk::RegisteredObjectWithInvalidatedSlots<OLD_TO_OLD>(
HeapObject object);
template <RememberedSetType type>
bool MemoryChunk::RegisteredObjectWithInvalidatedSlots(HeapObject object) {
if (invalidated_slots<type>() == nullptr) {
if (ShouldSkipEvacuationSlotRecording()) {
// Invalidated slots do not matter if we are not recording slots.
return true;
}
if (invalidated_slots() == nullptr) {
return false;
}
return invalidated_slots<type>()->find(object) !=
invalidated_slots<type>()->end();
return invalidated_slots()->find(object) != invalidated_slots()->end();
}
template void MemoryChunk::MoveObjectWithInvalidatedSlots<OLD_TO_OLD>(
HeapObject old_start, HeapObject new_start);
template <RememberedSetType type>
void MemoryChunk::MoveObjectWithInvalidatedSlots(HeapObject old_start,
HeapObject new_start) {
DCHECK_LT(old_start, new_start);
DCHECK_EQ(MemoryChunk::FromHeapObject(old_start),
MemoryChunk::FromHeapObject(new_start));
static_assert(type == OLD_TO_OLD, "only use this for old-to-old slots");
if (!ShouldSkipEvacuationSlotRecording() && invalidated_slots<type>()) {
auto it = invalidated_slots<type>()->find(old_start);
if (it != invalidated_slots<type>()->end()) {
if (!ShouldSkipEvacuationSlotRecording() && invalidated_slots()) {
auto it = invalidated_slots()->find(old_start);
if (it != invalidated_slots()->end()) {
int old_size = it->second;
int delta = static_cast<int>(new_start.address() - old_start.address());
invalidated_slots<type>()->erase(it);
(*invalidated_slots<type>())[new_start] = old_size - delta;
invalidated_slots()->erase(it);
(*invalidated_slots())[new_start] = old_size - delta;
}
}
}
......@@ -3749,17 +3693,9 @@ bool PagedSpace::RawSlowRefillLinearAllocationArea(int size_in_bytes,
static_cast<size_t>(size_in_bytes), origin))
return true;
// Cleanup invalidated old-to-new refs for compaction space in the
// final atomic pause.
Sweeper::FreeSpaceMayContainInvalidatedSlots
invalidated_slots_in_free_space =
is_local() ? Sweeper::FreeSpaceMayContainInvalidatedSlots::kYes
: Sweeper::FreeSpaceMayContainInvalidatedSlots::kNo;
// If sweeping is still in progress try to sweep pages.
int max_freed = collector->sweeper()->ParallelSweepSpace(
identity(), size_in_bytes, kMaxPagesToSweep,
invalidated_slots_in_free_space);
identity(), size_in_bytes, kMaxPagesToSweep);
RefillFreeList();
if (max_freed >= size_in_bytes) {
if (RefillLinearAllocationAreaFromFreeList(
......
......@@ -636,8 +636,7 @@ class MemoryChunk : public BasicMemoryChunk {
+ kSystemPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // SlotSet* array
+ kSystemPointerSize *
NUMBER_OF_REMEMBERED_SET_TYPES // TypedSlotSet* array
+ kSystemPointerSize *
NUMBER_OF_REMEMBERED_SET_TYPES // InvalidatedSlots* array
+ kSystemPointerSize // InvalidatedSlots* invalidated_slots_
+ kSystemPointerSize // std::atomic<intptr_t> high_water_mark_
+ kSystemPointerSize // base::Mutex* mutex_
+ kSystemPointerSize // std::atomic<ConcurrentSweepingState>
......@@ -723,7 +722,7 @@ class MemoryChunk : public BasicMemoryChunk {
template <RememberedSetType type>
bool ContainsSlots() {
return slot_set<type>() != nullptr || typed_slot_set<type>() != nullptr ||
invalidated_slots<type>() != nullptr;
invalidated_slots() != nullptr;
}
template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
......@@ -751,23 +750,15 @@ class MemoryChunk : public BasicMemoryChunk {
template <RememberedSetType type>
void ReleaseTypedSlotSet();
template <RememberedSetType type>
InvalidatedSlots* AllocateInvalidatedSlots();
template <RememberedSetType type>
void ReleaseInvalidatedSlots();
template <RememberedSetType type>
V8_EXPORT_PRIVATE void RegisterObjectWithInvalidatedSlots(HeapObject object,
int size);
// Updates invalidated_slots after array left-trimming.
template <RememberedSetType type>
void MoveObjectWithInvalidatedSlots(HeapObject old_start,
HeapObject new_start);
template <RememberedSetType type>
bool RegisteredObjectWithInvalidatedSlots(HeapObject object);
template <RememberedSetType type>
InvalidatedSlots* invalidated_slots() {
return invalidated_slots_[type];
}
InvalidatedSlots* invalidated_slots() { return invalidated_slots_; }
void ReleaseLocalTracker();
......@@ -943,7 +934,7 @@ class MemoryChunk : public BasicMemoryChunk {
// is ceil(size() / kPageSize).
SlotSet* slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
TypedSlotSet* typed_slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
InvalidatedSlots* invalidated_slots_[NUMBER_OF_REMEMBERED_SET_TYPES];
InvalidatedSlots* invalidated_slots_;
// Assuming the initial allocation on a page is sequential,
// count highest number of bytes ever allocated on the page.
......
......@@ -8,7 +8,6 @@
#include "src/execution/vm-state-inl.h"
#include "src/heap/array-buffer-tracker-inl.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/remembered-set.h"
#include "src/objects/objects-inl.h"
......@@ -251,10 +250,8 @@ void Sweeper::EnsureCompleted() {
bool Sweeper::AreSweeperTasksRunning() { return num_sweeping_tasks_ != 0; }
int Sweeper::RawSweep(
Page* p, FreeListRebuildingMode free_list_mode,
FreeSpaceTreatmentMode free_space_mode,
FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space) {
int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
FreeSpaceTreatmentMode free_space_mode) {
Space* space = p->owner();
DCHECK_NOT_NULL(space);
DCHECK(free_list_mode == IGNORE_FREE_LIST || space->identity() == OLD_SPACE ||
......@@ -277,15 +274,6 @@ int Sweeper::RawSweep(
ArrayBufferTracker::FreeDead(p, marking_state_);
Address free_start = p->area_start();
InvalidatedSlotsCleanup old_to_new_cleanup =
InvalidatedSlotsCleanup::NoCleanup(p);
// Clean invalidated slots during the final atomic pause. After resuming
// execution this isn't necessary, invalid old-to-new refs were already
// removed by mark compact's update pointers phase.
if (invalidated_slots_in_free_space ==
FreeSpaceMayContainInvalidatedSlots::kYes)
old_to_new_cleanup = InvalidatedSlotsCleanup::OldToNew(p);
intptr_t live_bytes = 0;
intptr_t freed_bytes = 0;
......@@ -330,8 +318,6 @@ int Sweeper::RawSweep(
static_cast<uint32_t>(free_start - p->address()),
static_cast<uint32_t>(free_end - p->address())));
}
old_to_new_cleanup.Free(free_start, free_end);
}
Map map = object.synchronized_map();
int size = object.SizeFromMap(map);
......@@ -364,8 +350,6 @@ int Sweeper::RawSweep(
static_cast<uint32_t>(free_start - p->address()),
static_cast<uint32_t>(p->area_end() - p->address())));
}
old_to_new_cleanup.Free(free_start, p->area_end());
}
// Clear invalid typed slots after collection all free ranges.
......@@ -415,15 +399,13 @@ bool Sweeper::SweepSpaceIncrementallyFromTask(AllocationSpace identity) {
return sweeping_list_[GetSweepSpaceIndex(identity)].empty();
}
int Sweeper::ParallelSweepSpace(
AllocationSpace identity, int required_freed_bytes, int max_pages,
FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space) {
int Sweeper::ParallelSweepSpace(AllocationSpace identity,
int required_freed_bytes, int max_pages) {
int max_freed = 0;
int pages_freed = 0;
Page* page = nullptr;
while ((page = GetSweepingPageSafe(identity)) != nullptr) {
int freed =
ParallelSweepPage(page, identity, invalidated_slots_in_free_space);
int freed = ParallelSweepPage(page, identity);
if (page->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
// Free list of a never-allocate page will be dropped later on.
continue;
......@@ -437,9 +419,7 @@ int Sweeper::ParallelSweepSpace(
return max_freed;
}
int Sweeper::ParallelSweepPage(
Page* page, AllocationSpace identity,
FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space) {
int Sweeper::ParallelSweepPage(Page* page, AllocationSpace identity) {
// Early bailout for pages that are swept outside of the regular sweeping
// path. This check here avoids taking the lock first, avoiding deadlocks.
if (page->SweepingDone()) return 0;
......@@ -459,8 +439,7 @@ int Sweeper::ParallelSweepPage(
page->set_concurrent_sweeping_state(Page::kSweepingInProgress);
const FreeSpaceTreatmentMode free_space_mode =
Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode,
invalidated_slots_in_free_space);
max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
DCHECK(page->SweepingDone());
// After finishing sweeping of a page we clean up its remembered set.
......@@ -616,8 +595,7 @@ void Sweeper::MakeIterable(Page* page) {
DCHECK(IsValidIterabilitySpace(page->owner_identity()));
const FreeSpaceTreatmentMode free_space_mode =
Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
RawSweep(page, IGNORE_FREE_LIST, free_space_mode,
FreeSpaceMayContainInvalidatedSlots::kNo);
RawSweep(page, IGNORE_FREE_LIST, free_space_mode);
}
} // namespace internal
......
......@@ -70,8 +70,12 @@ class Sweeper {
};
enum FreeListRebuildingMode { REBUILD_FREE_LIST, IGNORE_FREE_LIST };
enum ClearOldToNewSlotsMode {
DO_NOT_CLEAR,
CLEAR_REGULAR_SLOTS,
CLEAR_TYPED_SLOTS
};
enum AddPageMode { REGULAR, READD_TEMPORARY_REMOVED_PAGE };
enum class FreeSpaceMayContainInvalidatedSlots { kYes, kNo };
Sweeper(Heap* heap, MajorNonAtomicMarkingState* marking_state);
......@@ -79,21 +83,14 @@ class Sweeper {
void AddPage(AllocationSpace space, Page* page, AddPageMode mode);
int ParallelSweepSpace(
AllocationSpace identity, int required_freed_bytes, int max_pages = 0,
FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space =
FreeSpaceMayContainInvalidatedSlots::kNo);
int ParallelSweepPage(
Page* page, AllocationSpace identity,
FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space =
FreeSpaceMayContainInvalidatedSlots::kNo);
int ParallelSweepSpace(AllocationSpace identity, int required_freed_bytes,
int max_pages = 0);
int ParallelSweepPage(Page* page, AllocationSpace identity);
void ScheduleIncrementalSweepingTask();
int RawSweep(
Page* p, FreeListRebuildingMode free_list_mode,
FreeSpaceTreatmentMode free_space_mode,
FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space);
int RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
FreeSpaceTreatmentMode free_space_mode);
// After calling this function sweeping is considered to be in progress
// and the main thread can sweep lazily, but the background sweeper tasks
......
......@@ -19,10 +19,6 @@
V(CompactionSpaceDivideSinglePage) \
V(InvalidatedSlotsAfterTrimming) \
V(InvalidatedSlotsAllInvalidatedRanges) \
V(InvalidatedSlotsCleanupEachObject) \
V(InvalidatedSlotsCleanupFull) \
V(InvalidatedSlotsCleanupRightTrim) \
V(InvalidatedSlotsCleanupOverlapRight) \
V(InvalidatedSlotsEvacuationCandidate) \
V(InvalidatedSlotsNoInvalidatedRanges) \
V(InvalidatedSlotsResetObjectRegression) \
......
......@@ -44,7 +44,7 @@ Page* HeapTester::AllocateByteArraysOnPage(
CHECK_EQ(page, Page::FromHeapObject(byte_array));
}
}
CHECK_NULL(page->invalidated_slots<OLD_TO_OLD>());
CHECK_NULL(page->invalidated_slots());
return page;
}
......@@ -53,7 +53,7 @@ HEAP_TEST(InvalidatedSlotsNoInvalidatedRanges) {
Heap* heap = CcTest::heap();
std::vector<ByteArray> byte_arrays;
Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page);
InvalidatedSlotsFilter filter(page);
for (ByteArray byte_array : byte_arrays) {
Address start = byte_array.address() + ByteArray::kHeaderSize;
Address end = byte_array.address() + byte_array.Size();
......@@ -70,10 +70,10 @@ HEAP_TEST(InvalidatedSlotsSomeInvalidatedRanges) {
Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
// Register every second byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i += 2) {
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i],
byte_arrays[i].Size());
page->RegisterObjectWithInvalidatedSlots(byte_arrays[i],
byte_arrays[i].Size());
}
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page);
InvalidatedSlotsFilter filter(page);
for (size_t i = 0; i < byte_arrays.size(); i++) {
ByteArray byte_array = byte_arrays[i];
Address start = byte_array.address() + ByteArray::kHeaderSize;
......@@ -95,10 +95,10 @@ HEAP_TEST(InvalidatedSlotsAllInvalidatedRanges) {
Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
// Register the all byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i++) {
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i],
byte_arrays[i].Size());
page->RegisterObjectWithInvalidatedSlots(byte_arrays[i],
byte_arrays[i].Size());
}
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page);
InvalidatedSlotsFilter filter(page);
for (size_t i = 0; i < byte_arrays.size(); i++) {
ByteArray byte_array = byte_arrays[i];
Address start = byte_array.address() + ByteArray::kHeaderSize;
......@@ -117,12 +117,12 @@ HEAP_TEST(InvalidatedSlotsAfterTrimming) {
Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
// Register the all byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i++) {
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i],
byte_arrays[i].Size());
page->RegisterObjectWithInvalidatedSlots(byte_arrays[i],
byte_arrays[i].Size());
}
// Trim byte arrays and check that the slots outside the byte arrays are
// considered invalid if the old space page was swept.
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page);
InvalidatedSlotsFilter filter(page);
for (size_t i = 0; i < byte_arrays.size(); i++) {
ByteArray byte_array = byte_arrays[i];
Address start = byte_array.address() + ByteArray::kHeaderSize;
......@@ -145,11 +145,11 @@ HEAP_TEST(InvalidatedSlotsEvacuationCandidate) {
// This should be no-op because the page is marked as evacuation
// candidate.
for (size_t i = 0; i < byte_arrays.size(); i++) {
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i],
byte_arrays[i].Size());
page->RegisterObjectWithInvalidatedSlots(byte_arrays[i],
byte_arrays[i].Size());
}
// All slots must still be valid.
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page);
InvalidatedSlotsFilter filter(page);
for (size_t i = 0; i < byte_arrays.size(); i++) {
ByteArray byte_array = byte_arrays[i];
Address start = byte_array.address() + ByteArray::kHeaderSize;
......@@ -169,11 +169,11 @@ HEAP_TEST(InvalidatedSlotsResetObjectRegression) {
heap->RightTrimFixedArray(byte_arrays[0], byte_arrays[0].length() - 8);
// Register the all byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i++) {
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i],
byte_arrays[i].Size());
page->RegisterObjectWithInvalidatedSlots(byte_arrays[i],
byte_arrays[i].Size());
}
// All slots must still be invalid.
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page);
InvalidatedSlotsFilter filter(page);
for (size_t i = 0; i < byte_arrays.size(); i++) {
ByteArray byte_array = byte_arrays[i];
Address start = byte_array.address() + ByteArray::kHeaderSize;
......@@ -351,78 +351,6 @@ HEAP_TEST(InvalidatedSlotsFastToSlow) {
CcTest::CollectGarbage(i::OLD_SPACE);
}
HEAP_TEST(InvalidatedSlotsCleanupFull) {
ManualGCScope manual_gc_scope;
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
std::vector<ByteArray> byte_arrays;
Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
// Register all byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i++) {
page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(byte_arrays[i],
byte_arrays[i].Size());
}
// Mark full page as free
InvalidatedSlotsCleanup cleanup = InvalidatedSlotsCleanup::OldToNew(page);
cleanup.Free(page->area_start(), page->area_end());
// After cleanup there should be no invalidated objects on page left
CHECK(page->invalidated_slots<OLD_TO_NEW>()->empty());
}
HEAP_TEST(InvalidatedSlotsCleanupEachObject) {
ManualGCScope manual_gc_scope;
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
std::vector<ByteArray> byte_arrays;
Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
// Register all byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i++) {
page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(byte_arrays[i],
byte_arrays[i].Size());
}
// Mark each object as free on page
InvalidatedSlotsCleanup cleanup = InvalidatedSlotsCleanup::OldToNew(page);
for (size_t i = 0; i < byte_arrays.size(); i++) {
Address free_start = byte_arrays[i].address();
Address free_end = free_start + byte_arrays[i].Size();
cleanup.Free(free_start, free_end);
}
// After cleanup there should be no invalidated objects on page left
CHECK(page->invalidated_slots<OLD_TO_NEW>()->empty());
}
HEAP_TEST(InvalidatedSlotsCleanupRightTrim) {
ManualGCScope manual_gc_scope;
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
std::vector<ByteArray> byte_arrays;
Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
CHECK_GT(byte_arrays.size(), 1);
ByteArray& invalidated = byte_arrays[1];
int invalidated_size = invalidated.Size();
heap->RightTrimFixedArray(invalidated, invalidated.length() - 8);
page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(invalidated,
invalidated_size);
// Free memory at end of invalidated object
InvalidatedSlotsCleanup cleanup = InvalidatedSlotsCleanup::OldToNew(page);
Address free_start = invalidated.address() + invalidated.Size();
cleanup.Free(free_start, page->area_end());
// After cleanup the invalidated object should be smaller
InvalidatedSlots* invalidated_slots = page->invalidated_slots<OLD_TO_NEW>();
CHECK_EQ((*invalidated_slots)[HeapObject::FromAddress(invalidated.address())],
invalidated.Size());
CHECK_EQ(invalidated_slots->size(), 1);
}
} // namespace heap
} // namespace internal
} // namespace v8
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment