Commit 23b2d571 authored by Dominik Inführ's avatar Dominik Inführ Committed by V8 LUCI CQ

Reland "[heap] Store size with invalidated object"

This is a reland of commit 5d235def

The previous version of this CL got reverted because the cached
size of an invalidated object wasn't up-to-date when performing a GC.

Not all size changes go through NotifyObjectLayoutChange, so
https://crrev.com/c/3607992 introduced NotifyObjectSizeChange as a
bottleneck for object size changes/right-trimming. This method is
now used to update the size of invalidated objects.

Original change's description:
> [heap] Store size with invalidated object
>
> When updating pointers during a full GC, a page might not be swept
> already. In such cases there might be invalid objects and slots recorded
> in free memory. Updating tagged slots in free memory is fine even though
> it is superfluous work.
>
> However, the GC also needs to calculate the size of potentially dead
> invalid objects in order to be able to check whether a slot is within
> that object. But since that object is dead, its map might be dead as
> well which makes size calculation impossible on such objects. The CL
> changes this to cache the size of invalid objects. A follow-up CL will
> also check the marking bit of invalid objects.
>
> Bug: v8:12578, chromium:1316289
> Change-Id: Ie773d0862a565982957e0dc409630d76552d1a32
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3599482
> Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
> Reviewed-by: Jakob Linke <jgruber@chromium.org>
> Reviewed-by: Patrick Thier <pthier@chromium.org>
> Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
> Cr-Commit-Position: refs/heads/main@{#80169}

Bug: v8:12578, chromium:1316289
Change-Id: I1f7c6070b8e7d116aeb1a8d03d4f87927ab40872
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3608632Reviewed-by: 's avatarJakob Linke <jgruber@chromium.org>
Reviewed-by: 's avatarPatrick Thier <pthier@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#80262}
parent 56adca8e
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include "src/diagnostics/disasm.h" #include "src/diagnostics/disasm.h"
#include "src/execution/frames.h" #include "src/execution/frames.h"
#include "src/execution/isolate.h" #include "src/execution/isolate.h"
#include "src/heap/heap.h"
#include "src/numbers/conversions.h" #include "src/numbers/conversions.h"
#include "src/objects/arguments.h" #include "src/objects/arguments.h"
#include "src/objects/heap-number-inl.h" #include "src/objects/heap-number-inl.h"
...@@ -1850,7 +1851,9 @@ void TranslatedState::InitializeJSObjectAt( ...@@ -1850,7 +1851,9 @@ void TranslatedState::InitializeJSObjectAt(
CHECK_GE(children_count, 2); CHECK_GE(children_count, 2);
// Notify the concurrent marker about the layout change. // Notify the concurrent marker about the layout change.
isolate()->heap()->NotifyObjectLayoutChange(*object_storage, no_gc); isolate()->heap()->NotifyObjectLayoutChange(
*object_storage, no_gc, InvalidateRecordedSlots::kYes,
slot->GetChildrenCount() * kTaggedSize);
// Fill the property array field. // Fill the property array field.
{ {
...@@ -1902,7 +1905,9 @@ void TranslatedState::InitializeObjectWithTaggedFieldsAt( ...@@ -1902,7 +1905,9 @@ void TranslatedState::InitializeObjectWithTaggedFieldsAt(
} }
// Notify the concurrent marker about the layout change. // Notify the concurrent marker about the layout change.
isolate()->heap()->NotifyObjectLayoutChange(*object_storage, no_gc); isolate()->heap()->NotifyObjectLayoutChange(
*object_storage, no_gc, InvalidateRecordedSlots::kYes,
slot->GetChildrenCount() * kTaggedSize);
// Write the fields to the object. // Write the fields to the object.
for (int i = 1; i < children_count; i++) { for (int i = 1; i < children_count; i++) {
......
...@@ -3535,8 +3535,9 @@ FixedArrayBase Heap::LeftTrimFixedArray(FixedArrayBase object, ...@@ -3535,8 +3535,9 @@ FixedArrayBase Heap::LeftTrimFixedArray(FixedArrayBase object,
#ifdef DEBUG #ifdef DEBUG
if (MayContainRecordedSlots(object)) { if (MayContainRecordedSlots(object)) {
MemoryChunk* chunk = MemoryChunk::FromHeapObject(object); MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
DCHECK(!chunk->RegisteredObjectWithInvalidatedSlots<OLD_TO_OLD>(object));
DCHECK(!chunk->RegisteredObjectWithInvalidatedSlots<OLD_TO_NEW>(object)); DCHECK(!chunk->RegisteredObjectWithInvalidatedSlots<OLD_TO_NEW>(object));
DCHECK(!chunk->RegisteredObjectWithInvalidatedSlots<OLD_TO_OLD>(object));
DCHECK(!chunk->RegisteredObjectWithInvalidatedSlots<OLD_TO_SHARED>(object));
} }
#endif #endif
...@@ -3953,22 +3954,24 @@ void Heap::FinalizeIncrementalMarkingIncrementally( ...@@ -3953,22 +3954,24 @@ void Heap::FinalizeIncrementalMarkingIncrementally(
void Heap::NotifyObjectLayoutChange( void Heap::NotifyObjectLayoutChange(
HeapObject object, const DisallowGarbageCollection&, HeapObject object, const DisallowGarbageCollection&,
InvalidateRecordedSlots invalidate_recorded_slots) { InvalidateRecordedSlots invalidate_recorded_slots, int new_size) {
DCHECK_IMPLIES(invalidate_recorded_slots == InvalidateRecordedSlots::kYes,
new_size > 0);
if (incremental_marking()->IsMarking()) { if (incremental_marking()->IsMarking()) {
incremental_marking()->MarkBlackAndVisitObjectDueToLayoutChange(object); incremental_marking()->MarkBlackAndVisitObjectDueToLayoutChange(object);
if (incremental_marking()->IsCompacting() && if (incremental_marking()->IsCompacting() &&
invalidate_recorded_slots == InvalidateRecordedSlots::kYes && invalidate_recorded_slots == InvalidateRecordedSlots::kYes &&
MayContainRecordedSlots(object)) { MayContainRecordedSlots(object)) {
MemoryChunk::FromHeapObject(object) MemoryChunk::FromHeapObject(object)
->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object); ->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object, new_size);
} }
} }
if (invalidate_recorded_slots == InvalidateRecordedSlots::kYes && if (invalidate_recorded_slots == InvalidateRecordedSlots::kYes &&
MayContainRecordedSlots(object)) { MayContainRecordedSlots(object)) {
MemoryChunk::FromHeapObject(object) MemoryChunk::FromHeapObject(object)
->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(object); ->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(object, new_size);
MemoryChunk::FromHeapObject(object) MemoryChunk::FromHeapObject(object)
->RegisterObjectWithInvalidatedSlots<OLD_TO_SHARED>(object); ->RegisterObjectWithInvalidatedSlots<OLD_TO_SHARED>(object, new_size);
} }
#ifdef VERIFY_HEAP #ifdef VERIFY_HEAP
if (FLAG_verify_heap) { if (FLAG_verify_heap) {
...@@ -3983,6 +3986,8 @@ void Heap::NotifyObjectSizeChange(HeapObject object, int old_size, int new_size, ...@@ -3983,6 +3986,8 @@ void Heap::NotifyObjectSizeChange(HeapObject object, int old_size, int new_size,
DCHECK_LE(new_size, old_size); DCHECK_LE(new_size, old_size);
if (new_size == old_size) return; if (new_size == old_size) return;
UpdateInvalidatedObjectSize(object, new_size);
const bool is_background = LocalHeap::Current() != nullptr; const bool is_background = LocalHeap::Current() != nullptr;
DCHECK_IMPLIES(is_background, DCHECK_IMPLIES(is_background,
clear_recorded_slots == ClearRecordedSlots::kNo); clear_recorded_slots == ClearRecordedSlots::kNo);
...@@ -3999,6 +4004,20 @@ void Heap::NotifyObjectSizeChange(HeapObject object, int old_size, int new_size, ...@@ -3999,6 +4004,20 @@ void Heap::NotifyObjectSizeChange(HeapObject object, int old_size, int new_size,
clear_recorded_slots, verify_no_slots_recorded); clear_recorded_slots, verify_no_slots_recorded);
} }
void Heap::UpdateInvalidatedObjectSize(HeapObject object, int new_size) {
if (!MayContainRecordedSlots(object)) return;
if (incremental_marking()->IsCompacting()) {
MemoryChunk::FromHeapObject(object)
->UpdateInvalidatedObjectSize<OLD_TO_OLD>(object, new_size);
}
MemoryChunk::FromHeapObject(object)->UpdateInvalidatedObjectSize<OLD_TO_NEW>(
object, new_size);
MemoryChunk::FromHeapObject(object)
->UpdateInvalidatedObjectSize<OLD_TO_SHARED>(object, new_size);
}
#ifdef VERIFY_HEAP #ifdef VERIFY_HEAP
// Helper class for collecting slot addresses. // Helper class for collecting slot addresses.
class SlotCollectingVisitor final : public ObjectVisitor { class SlotCollectingVisitor final : public ObjectVisitor {
...@@ -4668,11 +4687,35 @@ void Heap::Verify() { ...@@ -4668,11 +4687,35 @@ void Heap::Verify() {
if (new_lo_space_) new_lo_space_->Verify(isolate()); if (new_lo_space_) new_lo_space_->Verify(isolate());
isolate()->string_table()->VerifyIfOwnedBy(isolate()); isolate()->string_table()->VerifyIfOwnedBy(isolate());
VerifyInvalidatedObjectSize();
#if DEBUG #if DEBUG
VerifyCommittedPhysicalMemory(); VerifyCommittedPhysicalMemory();
#endif // DEBUG #endif // DEBUG
} }
namespace {
void VerifyInvalidatedSlots(InvalidatedSlots* invalidated_slots) {
if (!invalidated_slots) return;
for (std::pair<HeapObject, int> object_and_size : *invalidated_slots) {
HeapObject object = object_and_size.first;
int size = object_and_size.second;
CHECK_EQ(object.Size(), size);
}
}
} // namespace
void Heap::VerifyInvalidatedObjectSize() {
OldGenerationMemoryChunkIterator chunk_iterator(this);
MemoryChunk* chunk;
while ((chunk = chunk_iterator.next()) != nullptr) {
VerifyInvalidatedSlots(chunk->invalidated_slots<OLD_TO_NEW>());
VerifyInvalidatedSlots(chunk->invalidated_slots<OLD_TO_OLD>());
VerifyInvalidatedSlots(chunk->invalidated_slots<OLD_TO_SHARED>());
}
}
void Heap::VerifyReadOnlyHeap() { void Heap::VerifyReadOnlyHeap() {
CHECK(!read_only_space_->writable()); CHECK(!read_only_space_->writable());
read_only_space_->Verify(isolate()); read_only_space_->Verify(isolate());
......
...@@ -1127,8 +1127,7 @@ class Heap { ...@@ -1127,8 +1127,7 @@ class Heap {
// manually. // manually.
void NotifyObjectLayoutChange( void NotifyObjectLayoutChange(
HeapObject object, const DisallowGarbageCollection&, HeapObject object, const DisallowGarbageCollection&,
InvalidateRecordedSlots invalidate_recorded_slots = InvalidateRecordedSlots invalidate_recorded_slots, int new_size = 0);
InvalidateRecordedSlots::kYes);
// The runtime uses this function to inform the GC of object size changes. The // The runtime uses this function to inform the GC of object size changes. The
// GC will fill this area with a filler object and might clear recorded slots // GC will fill this area with a filler object and might clear recorded slots
...@@ -1603,6 +1602,9 @@ class Heap { ...@@ -1603,6 +1602,9 @@ class Heap {
// created. // created.
void VerifyReadOnlyHeap(); void VerifyReadOnlyHeap();
void VerifyRememberedSetFor(HeapObject object); void VerifyRememberedSetFor(HeapObject object);
// Verify that cached size of invalidated object is up-to-date.
void VerifyInvalidatedObjectSize();
#endif #endif
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT #ifdef V8_ENABLE_ALLOCATION_TIMEOUT
...@@ -1819,6 +1821,9 @@ class Heap { ...@@ -1819,6 +1821,9 @@ class Heap {
V8_EXPORT_PRIVATE void ZapCodeObject(Address start_address, V8_EXPORT_PRIVATE void ZapCodeObject(Address start_address,
int size_in_bytes); int size_in_bytes);
// Updates invalidated object size in all remembered sets.
void UpdateInvalidatedObjectSize(HeapObject object, int new_size);
enum class VerifyNoSlotsRecorded { kYes, kNo }; enum class VerifyNoSlotsRecorded { kYes, kNo };
// This method is used by the sweeper on free memory ranges to make the page // This method is used by the sweeper on free memory ranges to make the page
......
...@@ -28,22 +28,18 @@ bool InvalidatedSlotsFilter::IsValid(Address slot) { ...@@ -28,22 +28,18 @@ bool InvalidatedSlotsFilter::IsValid(Address slot) {
NextInvalidatedObject(); NextInvalidatedObject();
} }
HeapObject invalidated_object = HeapObject::FromAddress(invalidated_start_);
if (invalidated_size_ == 0) {
DCHECK(MarkCompactCollector::IsMapOrForwarded(invalidated_object.map()));
invalidated_size_ = invalidated_object.Size();
}
int offset = static_cast<int>(slot - invalidated_start_); int offset = static_cast<int>(slot - invalidated_start_);
// OLD_TO_OLD can have slots in map word unlike other remembered sets. // OLD_TO_OLD can have slots in map word unlike other remembered sets.
DCHECK_GE(offset, 0); DCHECK_GE(offset, 0);
DCHECK_IMPLIES(remembered_set_type_ != OLD_TO_OLD, offset > 0); DCHECK_IMPLIES(remembered_set_type_ != OLD_TO_OLD, offset > 0);
if (offset < invalidated_size_) if (offset < invalidated_size_) {
return offset == 0 || if (offset == 0) return true;
invalidated_object.IsValidSlot(invalidated_object.map(), offset); HeapObject invalidated_object = HeapObject::FromAddress(invalidated_start_);
DCHECK(MarkCompactCollector::IsMapOrForwarded(invalidated_object.map()));
return invalidated_object.IsValidSlot(invalidated_object.map(), offset);
}
NextInvalidatedObject(); NextInvalidatedObject();
return true; return true;
...@@ -51,12 +47,14 @@ bool InvalidatedSlotsFilter::IsValid(Address slot) { ...@@ -51,12 +47,14 @@ bool InvalidatedSlotsFilter::IsValid(Address slot) {
void InvalidatedSlotsFilter::NextInvalidatedObject() { void InvalidatedSlotsFilter::NextInvalidatedObject() {
invalidated_start_ = next_invalidated_start_; invalidated_start_ = next_invalidated_start_;
invalidated_size_ = 0; invalidated_size_ = next_invalidated_size_;
if (iterator_ == iterator_end_) { if (iterator_ == iterator_end_) {
next_invalidated_start_ = sentinel_; next_invalidated_start_ = sentinel_;
next_invalidated_size_ = 0;
} else { } else {
next_invalidated_start_ = iterator_->address(); next_invalidated_start_ = iterator_->first.address();
next_invalidated_size_ = iterator_->second;
iterator_++; iterator_++;
} }
} }
...@@ -87,7 +85,7 @@ void InvalidatedSlotsCleanup::Free(Address free_start, Address free_end) { ...@@ -87,7 +85,7 @@ void InvalidatedSlotsCleanup::Free(Address free_start, Address free_end) {
void InvalidatedSlotsCleanup::NextInvalidatedObject() { void InvalidatedSlotsCleanup::NextInvalidatedObject() {
if (iterator_ != iterator_end_) { if (iterator_ != iterator_end_) {
invalidated_start_ = iterator_->address(); invalidated_start_ = iterator_->first.address();
} else { } else {
invalidated_start_ = sentinel_; invalidated_start_ = sentinel_;
} }
......
...@@ -21,7 +21,7 @@ namespace internal { ...@@ -21,7 +21,7 @@ namespace internal {
// that potentially invalidates slots recorded concurrently. The second part // that potentially invalidates slots recorded concurrently. The second part
// of each element is the size of the corresponding object before the layout // of each element is the size of the corresponding object before the layout
// change. // change.
using InvalidatedSlots = std::set<HeapObject, Object::Comparer>; using InvalidatedSlots = std::map<HeapObject, int, Object::Comparer>;
// This class provides IsValid predicate that takes into account the set // This class provides IsValid predicate that takes into account the set
// of invalidated objects in the given memory chunk. // of invalidated objects in the given memory chunk.
...@@ -45,9 +45,10 @@ class V8_EXPORT_PRIVATE InvalidatedSlotsFilter { ...@@ -45,9 +45,10 @@ class V8_EXPORT_PRIVATE InvalidatedSlotsFilter {
InvalidatedSlots::const_iterator iterator_; InvalidatedSlots::const_iterator iterator_;
InvalidatedSlots::const_iterator iterator_end_; InvalidatedSlots::const_iterator iterator_end_;
Address sentinel_; Address sentinel_;
Address invalidated_start_; Address invalidated_start_{kNullAddress};
Address next_invalidated_start_; Address next_invalidated_start_{kNullAddress};
int invalidated_size_; int invalidated_size_{0};
int next_invalidated_size_{0};
InvalidatedSlots empty_; InvalidatedSlots empty_;
#ifdef DEBUG #ifdef DEBUG
Address last_slot_; Address last_slot_;
......
...@@ -370,14 +370,17 @@ void MemoryChunk::ReleaseInvalidatedSlots() { ...@@ -370,14 +370,17 @@ void MemoryChunk::ReleaseInvalidatedSlots() {
} }
template V8_EXPORT_PRIVATE void template V8_EXPORT_PRIVATE void
MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(HeapObject object); MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(HeapObject object,
int new_size);
template V8_EXPORT_PRIVATE void template V8_EXPORT_PRIVATE void
MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(HeapObject object); MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(HeapObject object,
int new_size);
template V8_EXPORT_PRIVATE void MemoryChunk::RegisterObjectWithInvalidatedSlots< template V8_EXPORT_PRIVATE void MemoryChunk::RegisterObjectWithInvalidatedSlots<
OLD_TO_SHARED>(HeapObject object); OLD_TO_SHARED>(HeapObject object, int new_size);
template <RememberedSetType type> template <RememberedSetType type>
void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object) { void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object,
int new_size) {
bool skip_slot_recording; bool skip_slot_recording;
switch (type) { switch (type) {
...@@ -405,23 +408,51 @@ void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object) { ...@@ -405,23 +408,51 @@ void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object) {
AllocateInvalidatedSlots<type>(); AllocateInvalidatedSlots<type>();
} }
invalidated_slots<type>()->insert(object); DCHECK_GT(new_size, 0);
InvalidatedSlots& invalidated_slots = *this->invalidated_slots<type>();
DCHECK_IMPLIES(invalidated_slots[object] > 0,
new_size <= invalidated_slots[object]);
invalidated_slots.insert_or_assign(object, new_size);
} }
void MemoryChunk::InvalidateRecordedSlots(HeapObject object) { template V8_EXPORT_PRIVATE void
MemoryChunk::UpdateInvalidatedObjectSize<OLD_TO_NEW>(HeapObject object,
int new_size);
template V8_EXPORT_PRIVATE void
MemoryChunk::UpdateInvalidatedObjectSize<OLD_TO_OLD>(HeapObject object,
int new_size);
template V8_EXPORT_PRIVATE void
MemoryChunk::UpdateInvalidatedObjectSize<OLD_TO_SHARED>(HeapObject object,
int new_size);
template <RememberedSetType type>
void MemoryChunk::UpdateInvalidatedObjectSize(HeapObject object, int new_size) {
DCHECK_GT(new_size, 0);
if (invalidated_slots<type>() == nullptr) return;
InvalidatedSlots& invalidated_slots = *this->invalidated_slots<type>();
DCHECK_IMPLIES(invalidated_slots[object] > 0,
new_size <= invalidated_slots[object]);
if (invalidated_slots.count(object) > 0) {
invalidated_slots.insert_or_assign(object, new_size);
}
}
void MemoryChunk::InvalidateRecordedSlots(HeapObject object, int new_size) {
if (V8_DISABLE_WRITE_BARRIERS_BOOL) return; if (V8_DISABLE_WRITE_BARRIERS_BOOL) return;
if (heap()->incremental_marking()->IsCompacting()) { if (heap()->incremental_marking()->IsCompacting()) {
// We cannot check slot_set_[OLD_TO_OLD] here, since the // We cannot check slot_set_[OLD_TO_OLD] here, since the
// concurrent markers might insert slots concurrently. // concurrent markers might insert slots concurrently.
RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object); RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object, new_size);
} }
if (slot_set_[OLD_TO_NEW] != nullptr) { if (slot_set_[OLD_TO_NEW] != nullptr) {
RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(object); RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(object, new_size);
} }
if (slot_set_[OLD_TO_SHARED] != nullptr) { if (slot_set_[OLD_TO_SHARED] != nullptr) {
RegisterObjectWithInvalidatedSlots<OLD_TO_SHARED>(object); RegisterObjectWithInvalidatedSlots<OLD_TO_SHARED>(object, new_size);
} }
} }
...@@ -429,6 +460,8 @@ template bool MemoryChunk::RegisteredObjectWithInvalidatedSlots<OLD_TO_NEW>( ...@@ -429,6 +460,8 @@ template bool MemoryChunk::RegisteredObjectWithInvalidatedSlots<OLD_TO_NEW>(
HeapObject object); HeapObject object);
template bool MemoryChunk::RegisteredObjectWithInvalidatedSlots<OLD_TO_OLD>( template bool MemoryChunk::RegisteredObjectWithInvalidatedSlots<OLD_TO_OLD>(
HeapObject object); HeapObject object);
template bool MemoryChunk::RegisteredObjectWithInvalidatedSlots<OLD_TO_SHARED>(
HeapObject object);
template <RememberedSetType type> template <RememberedSetType type>
bool MemoryChunk::RegisteredObjectWithInvalidatedSlots(HeapObject object) { bool MemoryChunk::RegisteredObjectWithInvalidatedSlots(HeapObject object) {
......
...@@ -143,8 +143,12 @@ class MemoryChunk : public BasicMemoryChunk { ...@@ -143,8 +143,12 @@ class MemoryChunk : public BasicMemoryChunk {
template <RememberedSetType type> template <RememberedSetType type>
void ReleaseInvalidatedSlots(); void ReleaseInvalidatedSlots();
template <RememberedSetType type> template <RememberedSetType type>
V8_EXPORT_PRIVATE void RegisterObjectWithInvalidatedSlots(HeapObject object); V8_EXPORT_PRIVATE void RegisterObjectWithInvalidatedSlots(HeapObject object,
void InvalidateRecordedSlots(HeapObject object); int new_size);
template <RememberedSetType type>
V8_EXPORT_PRIVATE void UpdateInvalidatedObjectSize(HeapObject object,
int new_size);
void InvalidateRecordedSlots(HeapObject object, int new_size);
template <RememberedSetType type> template <RememberedSetType type>
bool RegisteredObjectWithInvalidatedSlots(HeapObject object); bool RegisteredObjectWithInvalidatedSlots(HeapObject object);
template <RememberedSetType type> template <RememberedSetType type>
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include "src/common/globals.h" #include "src/common/globals.h"
#include "src/handles/handles-inl.h" #include "src/handles/handles-inl.h"
#include "src/heap/heap-write-barrier-inl.h" #include "src/heap/heap-write-barrier-inl.h"
#include "src/heap/heap.h"
#include "src/objects/debug-objects-inl.h" #include "src/objects/debug-objects-inl.h"
#include "src/objects/feedback-vector-inl.h" #include "src/objects/feedback-vector-inl.h"
#include "src/objects/scope-info-inl.h" #include "src/objects/scope-info-inl.h"
...@@ -826,7 +827,8 @@ void SharedFunctionInfo::ClearPreparseData() { ...@@ -826,7 +827,8 @@ void SharedFunctionInfo::ClearPreparseData() {
Heap* heap = GetHeapFromWritableObject(data); Heap* heap = GetHeapFromWritableObject(data);
// Swap the map. // Swap the map.
heap->NotifyObjectLayoutChange(data, no_gc); heap->NotifyObjectLayoutChange(data, no_gc, InvalidateRecordedSlots::kYes,
UncompiledDataWithoutPreparseData::kSize);
STATIC_ASSERT(UncompiledDataWithoutPreparseData::kSize < STATIC_ASSERT(UncompiledDataWithoutPreparseData::kSize <
UncompiledDataWithPreparseData::kSize); UncompiledDataWithPreparseData::kSize);
STATIC_ASSERT(UncompiledDataWithoutPreparseData::kSize == STATIC_ASSERT(UncompiledDataWithoutPreparseData::kSize ==
......
...@@ -395,11 +395,6 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) { ...@@ -395,11 +395,6 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
bool is_internalized = this->IsInternalizedString(); bool is_internalized = this->IsInternalizedString();
bool has_pointers = StringShape(*this).IsIndirect(); bool has_pointers = StringShape(*this).IsIndirect();
if (has_pointers) {
isolate->heap()->NotifyObjectLayoutChange(*this, no_gc,
InvalidateRecordedSlots::kYes);
}
base::SharedMutexGuard<base::kExclusive> shared_mutex_guard( base::SharedMutexGuard<base::kExclusive> shared_mutex_guard(
isolate->internalized_string_access()); isolate->internalized_string_access());
// Morph the string to an external string by replacing the map and // Morph the string to an external string by replacing the map and
...@@ -423,6 +418,12 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) { ...@@ -423,6 +418,12 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
// Byte size of the external String object. // Byte size of the external String object.
int new_size = this->SizeFromMap(new_map); int new_size = this->SizeFromMap(new_map);
if (has_pointers) {
isolate->heap()->NotifyObjectLayoutChange(
*this, no_gc, InvalidateRecordedSlots::kYes, new_size);
}
if (!isolate->heap()->IsLargeObject(*this)) { if (!isolate->heap()->IsLargeObject(*this)) {
isolate->heap()->NotifyObjectSizeChange( isolate->heap()->NotifyObjectSizeChange(
*this, size, new_size, *this, size, new_size,
...@@ -479,11 +480,6 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) { ...@@ -479,11 +480,6 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
bool is_internalized = this->IsInternalizedString(); bool is_internalized = this->IsInternalizedString();
bool has_pointers = StringShape(*this).IsIndirect(); bool has_pointers = StringShape(*this).IsIndirect();
if (has_pointers) {
isolate->heap()->NotifyObjectLayoutChange(*this, no_gc,
InvalidateRecordedSlots::kYes);
}
base::SharedMutexGuard<base::kExclusive> shared_mutex_guard( base::SharedMutexGuard<base::kExclusive> shared_mutex_guard(
isolate->internalized_string_access()); isolate->internalized_string_access());
// Morph the string to an external string by replacing the map and // Morph the string to an external string by replacing the map and
...@@ -508,6 +504,11 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) { ...@@ -508,6 +504,11 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
// Byte size of the external String object. // Byte size of the external String object.
int new_size = this->SizeFromMap(new_map); int new_size = this->SizeFromMap(new_map);
if (has_pointers) {
isolate->heap()->NotifyObjectLayoutChange(
*this, no_gc, InvalidateRecordedSlots::kYes, new_size);
}
isolate->heap()->NotifyObjectSizeChange( isolate->heap()->NotifyObjectSizeChange(
*this, size, new_size, *this, size, new_size,
has_pointers ? ClearRecordedSlots::kYes : ClearRecordedSlots::kNo); has_pointers ? ClearRecordedSlots::kYes : ClearRecordedSlots::kNo);
......
...@@ -219,7 +219,8 @@ bool DeleteObjectPropertyFast(Isolate* isolate, Handle<JSReceiver> receiver, ...@@ -219,7 +219,8 @@ bool DeleteObjectPropertyFast(Isolate* isolate, Handle<JSReceiver> receiver,
receiver->RawField(index.offset())); receiver->RawField(index.offset()));
if (!FLAG_enable_third_party_heap) { if (!FLAG_enable_third_party_heap) {
MemoryChunk* chunk = MemoryChunk::FromHeapObject(*receiver); MemoryChunk* chunk = MemoryChunk::FromHeapObject(*receiver);
chunk->InvalidateRecordedSlots(*receiver); int new_size = parent_map->instance_size();
chunk->InvalidateRecordedSlots(*receiver, new_size);
} }
} }
} }
......
...@@ -234,13 +234,15 @@ HEAP_TEST(CompactionPartiallyAbortedPageWithInvalidatedSlots) { ...@@ -234,13 +234,15 @@ HEAP_TEST(CompactionPartiallyAbortedPageWithInvalidatedSlots) {
} }
} }
// First object is going to be evacuated. // First object is going to be evacuated.
HeapObject front_object = *compaction_page_handles.front();
to_be_aborted_page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>( to_be_aborted_page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(
*compaction_page_handles.front()); front_object, front_object.Size());
// Last object is NOT going to be evacuated. // Last object is NOT going to be evacuated.
// This happens since not all objects fit on the only other page in the // This happens since not all objects fit on the only other page in the
// old space, the GC isn't allowed to allocate another page. // old space, the GC isn't allowed to allocate another page.
HeapObject back_object = *compaction_page_handles.back();
to_be_aborted_page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>( to_be_aborted_page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(
*compaction_page_handles.back()); back_object, back_object.Size());
to_be_aborted_page->SetFlag( to_be_aborted_page->SetFlag(
MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING); MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
......
...@@ -71,7 +71,9 @@ HEAP_TEST(InvalidatedSlotsSomeInvalidatedRanges) { ...@@ -71,7 +71,9 @@ HEAP_TEST(InvalidatedSlotsSomeInvalidatedRanges) {
Page* page = AllocateByteArraysOnPage(heap, &byte_arrays); Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
// Register every second byte arrays as invalidated. // Register every second byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i += 2) { for (size_t i = 0; i < byte_arrays.size(); i += 2) {
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i]); ByteArray byte_array = byte_arrays[i];
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_array,
byte_array.Size());
} }
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page); InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page);
for (size_t i = 0; i < byte_arrays.size(); i++) { for (size_t i = 0; i < byte_arrays.size(); i++) {
...@@ -96,7 +98,9 @@ HEAP_TEST(InvalidatedSlotsAllInvalidatedRanges) { ...@@ -96,7 +98,9 @@ HEAP_TEST(InvalidatedSlotsAllInvalidatedRanges) {
Page* page = AllocateByteArraysOnPage(heap, &byte_arrays); Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
// Register the all byte arrays as invalidated. // Register the all byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i++) { for (size_t i = 0; i < byte_arrays.size(); i++) {
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i]); ByteArray byte_array = byte_arrays[i];
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_array,
byte_array.Size());
} }
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page); InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page);
for (size_t i = 0; i < byte_arrays.size(); i++) { for (size_t i = 0; i < byte_arrays.size(); i++) {
...@@ -117,16 +121,18 @@ HEAP_TEST(InvalidatedSlotsAfterTrimming) { ...@@ -117,16 +121,18 @@ HEAP_TEST(InvalidatedSlotsAfterTrimming) {
Page* page = AllocateByteArraysOnPage(heap, &byte_arrays); Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
// Register the all byte arrays as invalidated. // Register the all byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i++) { for (size_t i = 0; i < byte_arrays.size(); i++) {
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i]); page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(
byte_arrays[i], ByteArray::kHeaderSize);
} }
// Trim byte arrays and check that the slots outside the byte arrays are // Trim byte arrays and check that the slots outside the byte arrays are
// considered invalid if the old space page was swept. // considered invalid if the old space page was swept.
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page);
for (size_t i = 0; i < byte_arrays.size(); i++) { for (size_t i = 0; i < byte_arrays.size(); i++) {
ByteArray byte_array = byte_arrays[i]; ByteArray byte_array = byte_arrays[i];
Address start = byte_array.address() + ByteArray::kHeaderSize; Address start = byte_array.address() + ByteArray::kHeaderSize;
Address end = byte_array.address() + byte_array.Size(); Address end = byte_array.address() + byte_array.Size();
heap->RightTrimFixedArray(byte_array, byte_array.length()); heap->RightTrimFixedArray(byte_array, byte_array.length());
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page);
for (Address addr = start; addr < end; addr += kTaggedSize) { for (Address addr = start; addr < end; addr += kTaggedSize) {
CHECK_EQ(filter.IsValid(addr), page->SweepingDone()); CHECK_EQ(filter.IsValid(addr), page->SweepingDone());
} }
...@@ -144,7 +150,9 @@ HEAP_TEST(InvalidatedSlotsEvacuationCandidate) { ...@@ -144,7 +150,9 @@ HEAP_TEST(InvalidatedSlotsEvacuationCandidate) {
// This should be no-op because the page is marked as evacuation // This should be no-op because the page is marked as evacuation
// candidate. // candidate.
for (size_t i = 0; i < byte_arrays.size(); i++) { for (size_t i = 0; i < byte_arrays.size(); i++) {
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i]); ByteArray byte_array = byte_arrays[i];
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_array,
byte_array.Size());
} }
// All slots must still be valid. // All slots must still be valid.
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page); InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page);
...@@ -168,7 +176,9 @@ HEAP_TEST(InvalidatedSlotsResetObjectRegression) { ...@@ -168,7 +176,9 @@ HEAP_TEST(InvalidatedSlotsResetObjectRegression) {
heap->RightTrimFixedArray(byte_arrays[0], byte_arrays[0].length() - 8); heap->RightTrimFixedArray(byte_arrays[0], byte_arrays[0].length() - 8);
// Register the all byte arrays as invalidated. // Register the all byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i++) { for (size_t i = 0; i < byte_arrays.size(); i++) {
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i]); ByteArray byte_array = byte_arrays[i];
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_array,
byte_array.Size());
} }
// All slots must still be invalid. // All slots must still be invalid.
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page); InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page);
...@@ -361,7 +371,9 @@ HEAP_TEST(InvalidatedSlotsCleanupFull) { ...@@ -361,7 +371,9 @@ HEAP_TEST(InvalidatedSlotsCleanupFull) {
Page* page = AllocateByteArraysOnPage(heap, &byte_arrays); Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
// Register all byte arrays as invalidated. // Register all byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i++) { for (size_t i = 0; i < byte_arrays.size(); i++) {
page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(byte_arrays[i]); ByteArray byte_array = byte_arrays[i];
page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(byte_array,
byte_array.Size());
} }
// Mark full page as free // Mark full page as free
...@@ -380,7 +392,9 @@ HEAP_TEST(InvalidatedSlotsCleanupEachObject) { ...@@ -380,7 +392,9 @@ HEAP_TEST(InvalidatedSlotsCleanupEachObject) {
Page* page = AllocateByteArraysOnPage(heap, &byte_arrays); Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
// Register all byte arrays as invalidated. // Register all byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i++) { for (size_t i = 0; i < byte_arrays.size(); i++) {
page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(byte_arrays[i]); ByteArray byte_array = byte_arrays[i];
page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(byte_array,
byte_array.Size());
} }
// Mark each object as free on page // Mark each object as free on page
...@@ -407,7 +421,8 @@ HEAP_TEST(InvalidatedSlotsCleanupRightTrim) { ...@@ -407,7 +421,8 @@ HEAP_TEST(InvalidatedSlotsCleanupRightTrim) {
ByteArray& invalidated = byte_arrays[1]; ByteArray& invalidated = byte_arrays[1];
heap->RightTrimFixedArray(invalidated, invalidated.length() - 8); heap->RightTrimFixedArray(invalidated, invalidated.length() - 8);
page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(invalidated); page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(invalidated,
invalidated.Size());
// Free memory at end of invalidated object // Free memory at end of invalidated object
InvalidatedSlotsCleanup cleanup = InvalidatedSlotsCleanup::OldToNew(page); InvalidatedSlotsCleanup cleanup = InvalidatedSlotsCleanup::OldToNew(page);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment