Commit 95dab014 authored by Nico Hartmann's avatar Nico Hartmann Committed by V8 LUCI CQ

Revert "[heap] Store size with invalidated object"

This reverts commit 5d235def.

Reason for revert: Speculative revert because of https://ci.chromium.org/ui/p/v8/builders/ci/V8%20Linux%20-%20gc%20stress/38153/overview

Original change's description:
> [heap] Store size with invalidated object
>
> When updating pointers during a full GC, a page might not be swept
> already. In such cases there might be invalid objects and slots recorded
> in free memory. Updating tagged slots in free memory is fine even though
> it is superfluous work.
>
> However, the GC also needs to calculate the size of potentially dead
> invalid objects in order to be able to check whether a slot is within
> that object. But since that object is dead, its map might be dead as
> well which makes size calculation impossible on such objects. The CL
> changes this to cache the size of invalid objects. A follow-up CL will
> also check the marking bit of invalid objects.
>
> Bug: v8:12578, chromium:1316289
> Change-Id: Ie773d0862a565982957e0dc409630d76552d1a32
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3599482
> Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
> Reviewed-by: Jakob Linke <jgruber@chromium.org>
> Reviewed-by: Patrick Thier <pthier@chromium.org>
> Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
> Cr-Commit-Position: refs/heads/main@{#80169}

Bug: v8:12578, chromium:1316289
Change-Id: I6949412c5d6e1aa15718d027043d9528137a60a0
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3605812
Auto-Submit: Nico Hartmann <nicohartmann@chromium.org>
Owners-Override: Nico Hartmann <nicohartmann@chromium.org>
Commit-Queue: Rubber Stamper <rubber-stamper@appspot.gserviceaccount.com>
Bot-Commit: Rubber Stamper <rubber-stamper@appspot.gserviceaccount.com>
Cr-Commit-Position: refs/heads/main@{#80173}
parent acd134de
......@@ -13,7 +13,6 @@
#include "src/diagnostics/disasm.h"
#include "src/execution/frames.h"
#include "src/execution/isolate.h"
#include "src/heap/heap.h"
#include "src/numbers/conversions.h"
#include "src/objects/arguments.h"
#include "src/objects/heap-number-inl.h"
......@@ -1851,9 +1850,7 @@ void TranslatedState::InitializeJSObjectAt(
CHECK_GE(children_count, 2);
// Notify the concurrent marker about the layout change.
isolate()->heap()->NotifyObjectLayoutChange(
*object_storage, no_gc, InvalidateRecordedSlots::kYes,
slot->GetChildrenCount() * kTaggedSize);
isolate()->heap()->NotifyObjectLayoutChange(*object_storage, no_gc);
// Fill the property array field.
{
......@@ -1905,9 +1902,7 @@ void TranslatedState::InitializeObjectWithTaggedFieldsAt(
}
// Notify the concurrent marker about the layout change.
isolate()->heap()->NotifyObjectLayoutChange(
*object_storage, no_gc, InvalidateRecordedSlots::kYes,
slot->GetChildrenCount() * kTaggedSize);
isolate()->heap()->NotifyObjectLayoutChange(*object_storage, no_gc);
// Write the fields to the object.
for (int i = 1; i < children_count; i++) {
......
......@@ -3954,24 +3954,22 @@ void Heap::FinalizeIncrementalMarkingIncrementally(
void Heap::NotifyObjectLayoutChange(
HeapObject object, const DisallowGarbageCollection&,
InvalidateRecordedSlots invalidate_recorded_slots, int new_size) {
DCHECK_IMPLIES(invalidate_recorded_slots == InvalidateRecordedSlots::kYes,
new_size > 0);
InvalidateRecordedSlots invalidate_recorded_slots) {
if (incremental_marking()->IsMarking()) {
incremental_marking()->MarkBlackAndVisitObjectDueToLayoutChange(object);
if (incremental_marking()->IsCompacting() &&
invalidate_recorded_slots == InvalidateRecordedSlots::kYes &&
MayContainRecordedSlots(object)) {
MemoryChunk::FromHeapObject(object)
->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object, new_size);
->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object);
}
}
if (invalidate_recorded_slots == InvalidateRecordedSlots::kYes &&
MayContainRecordedSlots(object)) {
MemoryChunk::FromHeapObject(object)
->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(object, new_size);
->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(object);
MemoryChunk::FromHeapObject(object)
->RegisterObjectWithInvalidatedSlots<OLD_TO_SHARED>(object, new_size);
->RegisterObjectWithInvalidatedSlots<OLD_TO_SHARED>(object);
}
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
......@@ -4648,34 +4646,11 @@ void Heap::Verify() {
if (new_lo_space_) new_lo_space_->Verify(isolate());
isolate()->string_table()->VerifyIfOwnedBy(isolate());
VerifyInvalidatedObjectSize();
#if DEBUG
VerifyCommittedPhysicalMemory();
#endif // DEBUG
}
namespace {
void VerifyInvalidatedSlots(InvalidatedSlots* invalidated_slots) {
if (!invalidated_slots) return;
for (std::pair<HeapObject, int> object_and_size : *invalidated_slots) {
HeapObject object = object_and_size.first;
int size = object_and_size.second;
CHECK_EQ(object.Size(), size);
}
}
} // namespace
void Heap::VerifyInvalidatedObjectSize() {
OldGenerationMemoryChunkIterator chunk_iterator(this);
MemoryChunk* chunk;
while ((chunk = chunk_iterator.next()) != nullptr) {
VerifyInvalidatedSlots(chunk->invalidated_slots<OLD_TO_NEW>());
VerifyInvalidatedSlots(chunk->invalidated_slots<OLD_TO_SHARED>());
}
}
void Heap::VerifyReadOnlyHeap() {
CHECK(!read_only_space_->writable());
read_only_space_->Verify(isolate());
......
......@@ -1114,7 +1114,8 @@ class Heap {
// manually.
void NotifyObjectLayoutChange(
HeapObject object, const DisallowGarbageCollection&,
InvalidateRecordedSlots invalidate_recorded_slots, int new_size = 0);
InvalidateRecordedSlots invalidate_recorded_slots =
InvalidateRecordedSlots::kYes);
#ifdef VERIFY_HEAP
// This function checks that either
......@@ -1583,9 +1584,6 @@ class Heap {
// created.
void VerifyReadOnlyHeap();
void VerifyRememberedSetFor(HeapObject object);
// Verify that cached size of invalidated object is up-to-date.
void VerifyInvalidatedObjectSize();
#endif
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
......
......@@ -28,18 +28,22 @@ bool InvalidatedSlotsFilter::IsValid(Address slot) {
NextInvalidatedObject();
}
HeapObject invalidated_object = HeapObject::FromAddress(invalidated_start_);
if (invalidated_size_ == 0) {
DCHECK(MarkCompactCollector::IsMapOrForwarded(invalidated_object.map()));
invalidated_size_ = invalidated_object.Size();
}
int offset = static_cast<int>(slot - invalidated_start_);
// OLD_TO_OLD can have slots in map word unlike other remembered sets.
DCHECK_GE(offset, 0);
DCHECK_IMPLIES(remembered_set_type_ != OLD_TO_OLD, offset > 0);
if (offset < invalidated_size_) {
if (offset == 0) return true;
HeapObject invalidated_object = HeapObject::FromAddress(invalidated_start_);
DCHECK(MarkCompactCollector::IsMapOrForwarded(invalidated_object.map()));
return invalidated_object.IsValidSlot(invalidated_object.map(), offset);
}
if (offset < invalidated_size_)
return offset == 0 ||
invalidated_object.IsValidSlot(invalidated_object.map(), offset);
NextInvalidatedObject();
return true;
......@@ -47,14 +51,12 @@ bool InvalidatedSlotsFilter::IsValid(Address slot) {
void InvalidatedSlotsFilter::NextInvalidatedObject() {
invalidated_start_ = next_invalidated_start_;
invalidated_size_ = next_invalidated_size_;
invalidated_size_ = 0;
if (iterator_ == iterator_end_) {
next_invalidated_start_ = sentinel_;
next_invalidated_size_ = 0;
} else {
next_invalidated_start_ = iterator_->first.address();
next_invalidated_size_ = iterator_->second;
next_invalidated_start_ = iterator_->address();
iterator_++;
}
}
......@@ -85,7 +87,7 @@ void InvalidatedSlotsCleanup::Free(Address free_start, Address free_end) {
void InvalidatedSlotsCleanup::NextInvalidatedObject() {
if (iterator_ != iterator_end_) {
invalidated_start_ = iterator_->first.address();
invalidated_start_ = iterator_->address();
} else {
invalidated_start_ = sentinel_;
}
......
......@@ -21,7 +21,7 @@ namespace internal {
// that potentially invalidates slots recorded concurrently. The second part
// of each element is the size of the corresponding object before the layout
// change.
using InvalidatedSlots = std::map<HeapObject, int, Object::Comparer>;
using InvalidatedSlots = std::set<HeapObject, Object::Comparer>;
// This class provides IsValid predicate that takes into account the set
// of invalidated objects in the given memory chunk.
......@@ -45,10 +45,9 @@ class V8_EXPORT_PRIVATE InvalidatedSlotsFilter {
InvalidatedSlots::const_iterator iterator_;
InvalidatedSlots::const_iterator iterator_end_;
Address sentinel_;
Address invalidated_start_{kNullAddress};
Address next_invalidated_start_{kNullAddress};
int invalidated_size_{0};
int next_invalidated_size_{0};
Address invalidated_start_;
Address next_invalidated_start_;
int invalidated_size_;
InvalidatedSlots empty_;
#ifdef DEBUG
Address last_slot_;
......
......@@ -364,17 +364,14 @@ void MemoryChunk::ReleaseInvalidatedSlots() {
}
template V8_EXPORT_PRIVATE void
MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(HeapObject object,
int new_size);
MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(HeapObject object);
template V8_EXPORT_PRIVATE void
MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(HeapObject object,
int new_size);
MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(HeapObject object);
template V8_EXPORT_PRIVATE void MemoryChunk::RegisterObjectWithInvalidatedSlots<
OLD_TO_SHARED>(HeapObject object, int new_size);
OLD_TO_SHARED>(HeapObject object);
template <RememberedSetType type>
void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object,
int new_size) {
void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object) {
bool skip_slot_recording;
switch (type) {
......@@ -402,27 +399,23 @@ void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object,
AllocateInvalidatedSlots<type>();
}
DCHECK_GT(new_size, 0);
InvalidatedSlots& invalidated_slots = *this->invalidated_slots<type>();
DCHECK_IMPLIES(invalidated_slots[object] > 0,
new_size <= invalidated_slots[object]);
invalidated_slots.insert_or_assign(object, new_size);
invalidated_slots<type>()->insert(object);
}
void MemoryChunk::InvalidateRecordedSlots(HeapObject object, int new_size) {
void MemoryChunk::InvalidateRecordedSlots(HeapObject object) {
if (V8_DISABLE_WRITE_BARRIERS_BOOL) return;
if (heap()->incremental_marking()->IsCompacting()) {
// We cannot check slot_set_[OLD_TO_OLD] here, since the
// concurrent markers might insert slots concurrently.
RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object, new_size);
RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object);
}
if (slot_set_[OLD_TO_NEW] != nullptr) {
RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(object, new_size);
RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(object);
}
if (slot_set_[OLD_TO_SHARED] != nullptr) {
RegisterObjectWithInvalidatedSlots<OLD_TO_SHARED>(object, new_size);
RegisterObjectWithInvalidatedSlots<OLD_TO_SHARED>(object);
}
}
......
......@@ -143,9 +143,8 @@ class MemoryChunk : public BasicMemoryChunk {
template <RememberedSetType type>
void ReleaseInvalidatedSlots();
template <RememberedSetType type>
V8_EXPORT_PRIVATE void RegisterObjectWithInvalidatedSlots(HeapObject object,
int new_size);
void InvalidateRecordedSlots(HeapObject object, int new_size);
V8_EXPORT_PRIVATE void RegisterObjectWithInvalidatedSlots(HeapObject object);
void InvalidateRecordedSlots(HeapObject object);
template <RememberedSetType type>
bool RegisteredObjectWithInvalidatedSlots(HeapObject object);
template <RememberedSetType type>
......
......@@ -11,7 +11,6 @@
#include "src/common/globals.h"
#include "src/handles/handles-inl.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/heap/heap.h"
#include "src/objects/debug-objects-inl.h"
#include "src/objects/feedback-vector-inl.h"
#include "src/objects/scope-info-inl.h"
......@@ -827,8 +826,7 @@ void SharedFunctionInfo::ClearPreparseData() {
Heap* heap = GetHeapFromWritableObject(data);
// Swap the map.
heap->NotifyObjectLayoutChange(data, no_gc, InvalidateRecordedSlots::kYes,
UncompiledDataWithoutPreparseData::kSize);
heap->NotifyObjectLayoutChange(data, no_gc);
STATIC_ASSERT(UncompiledDataWithoutPreparseData::kSize <
UncompiledDataWithPreparseData::kSize);
STATIC_ASSERT(UncompiledDataWithoutPreparseData::kSize ==
......
......@@ -391,6 +391,11 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
bool is_internalized = this->IsInternalizedString();
bool has_pointers = StringShape(*this).IsIndirect();
if (has_pointers) {
isolate->heap()->NotifyObjectLayoutChange(*this, no_gc,
InvalidateRecordedSlots::kYes);
}
base::SharedMutexGuard<base::kExclusive> shared_mutex_guard(
isolate->internalized_string_access());
// Morph the string to an external string by replacing the map and
......@@ -414,12 +419,6 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
// Byte size of the external String object.
int new_size = this->SizeFromMap(new_map);
if (has_pointers) {
isolate->heap()->NotifyObjectLayoutChange(
*this, no_gc, InvalidateRecordedSlots::kYes, new_size);
}
if (!isolate->heap()->IsLargeObject(*this)) {
isolate->heap()->CreateFillerObjectAt(
this->address() + new_size, size - new_size,
......@@ -476,6 +475,11 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
bool is_internalized = this->IsInternalizedString();
bool has_pointers = StringShape(*this).IsIndirect();
if (has_pointers) {
isolate->heap()->NotifyObjectLayoutChange(*this, no_gc,
InvalidateRecordedSlots::kYes);
}
base::SharedMutexGuard<base::kExclusive> shared_mutex_guard(
isolate->internalized_string_access());
// Morph the string to an external string by replacing the map and
......@@ -500,11 +504,6 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
// Byte size of the external String object.
int new_size = this->SizeFromMap(new_map);
if (has_pointers) {
isolate->heap()->NotifyObjectLayoutChange(
*this, no_gc, InvalidateRecordedSlots::kYes, new_size);
}
isolate->heap()->CreateFillerObjectAt(
this->address() + new_size, size - new_size,
has_pointers ? ClearRecordedSlots::kYes : ClearRecordedSlots::kNo);
......
......@@ -219,8 +219,7 @@ bool DeleteObjectPropertyFast(Isolate* isolate, Handle<JSReceiver> receiver,
receiver->RawField(index.offset()));
if (!FLAG_enable_third_party_heap) {
MemoryChunk* chunk = MemoryChunk::FromHeapObject(*receiver);
int new_size = parent_map->instance_size();
chunk->InvalidateRecordedSlots(*receiver, new_size);
chunk->InvalidateRecordedSlots(*receiver);
}
}
}
......
......@@ -234,15 +234,13 @@ HEAP_TEST(CompactionPartiallyAbortedPageWithInvalidatedSlots) {
}
}
// First object is going to be evacuated.
HeapObject front_object = *compaction_page_handles.front();
to_be_aborted_page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(
front_object, front_object.Size());
*compaction_page_handles.front());
// Last object is NOT going to be evacuated.
// This happens since not all objects fit on the only other page in the
// old space, the GC isn't allowed to allocate another page.
HeapObject back_object = *compaction_page_handles.back();
to_be_aborted_page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(
back_object, back_object.Size());
*compaction_page_handles.back());
to_be_aborted_page->SetFlag(
MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
......
......@@ -71,9 +71,7 @@ HEAP_TEST(InvalidatedSlotsSomeInvalidatedRanges) {
Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
// Register every second byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i += 2) {
ByteArray byte_array = byte_arrays[i];
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_array,
byte_array.Size());
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i]);
}
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page);
for (size_t i = 0; i < byte_arrays.size(); i++) {
......@@ -98,9 +96,7 @@ HEAP_TEST(InvalidatedSlotsAllInvalidatedRanges) {
Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
// Register the all byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i++) {
ByteArray byte_array = byte_arrays[i];
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_array,
byte_array.Size());
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i]);
}
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page);
for (size_t i = 0; i < byte_arrays.size(); i++) {
......@@ -121,18 +117,16 @@ HEAP_TEST(InvalidatedSlotsAfterTrimming) {
Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
// Register the all byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i++) {
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(
byte_arrays[i], ByteArray::kHeaderSize);
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i]);
}
// Trim byte arrays and check that the slots outside the byte arrays are
// considered invalid if the old space page was swept.
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page);
for (size_t i = 0; i < byte_arrays.size(); i++) {
ByteArray byte_array = byte_arrays[i];
Address start = byte_array.address() + ByteArray::kHeaderSize;
Address end = byte_array.address() + byte_array.Size();
heap->RightTrimFixedArray(byte_array, byte_array.length());
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page);
for (Address addr = start; addr < end; addr += kTaggedSize) {
CHECK_EQ(filter.IsValid(addr), page->SweepingDone());
}
......@@ -150,9 +144,7 @@ HEAP_TEST(InvalidatedSlotsEvacuationCandidate) {
// This should be no-op because the page is marked as evacuation
// candidate.
for (size_t i = 0; i < byte_arrays.size(); i++) {
ByteArray byte_array = byte_arrays[i];
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_array,
byte_array.Size());
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i]);
}
// All slots must still be valid.
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page);
......@@ -176,9 +168,7 @@ HEAP_TEST(InvalidatedSlotsResetObjectRegression) {
heap->RightTrimFixedArray(byte_arrays[0], byte_arrays[0].length() - 8);
// Register the all byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i++) {
ByteArray byte_array = byte_arrays[i];
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_array,
byte_array.Size());
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i]);
}
// All slots must still be invalid.
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page);
......@@ -371,9 +361,7 @@ HEAP_TEST(InvalidatedSlotsCleanupFull) {
Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
// Register all byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i++) {
ByteArray byte_array = byte_arrays[i];
page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(byte_array,
byte_array.Size());
page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(byte_arrays[i]);
}
// Mark full page as free
......@@ -392,9 +380,7 @@ HEAP_TEST(InvalidatedSlotsCleanupEachObject) {
Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
// Register all byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i++) {
ByteArray byte_array = byte_arrays[i];
page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(byte_array,
byte_array.Size());
page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(byte_arrays[i]);
}
// Mark each object as free on page
......@@ -421,8 +407,7 @@ HEAP_TEST(InvalidatedSlotsCleanupRightTrim) {
ByteArray& invalidated = byte_arrays[1];
heap->RightTrimFixedArray(invalidated, invalidated.length() - 8);
page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(invalidated,
invalidated.Size());
page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(invalidated);
// Free memory at end of invalidated object
InvalidatedSlotsCleanup cleanup = InvalidatedSlotsCleanup::OldToNew(page);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment