Commit d4e168a3 authored by Dominik Inführ's avatar Dominik Inführ Committed by Commit Bot

Revert "[heap] Remove size from invalidated slots"

This reverts commit 93063ade.

Reason for revert: Clusterfuzz found issue.

Original change's description:
> [heap] Remove size from invalidated slots
> 
> Slots are always valid inside an invalidated area when outside the
> respective object's current size. This allows us to remove the size
> from the InvalidatedSlots data structure.
> 
> This change was enabled by https://crrev.com/c/1771793.
> 
> Bug: v8:9454
> Change-Id: I2b5a7234d47227cb6ad8d67de20e9b5a2028ae83
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1773242
> Reviewed-by: Sigurd Schneider <sigurds@chromium.org>
> Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
> Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#63510}

TBR=ulan@chromium.org,sigurds@chromium.org,tebbi@chromium.org,dinfuehr@chromium.org

# Not skipping CQ checks because original CL landed > 1 day ago.

Bug: v8:9454
Change-Id: I7daf96cf50aaedd4dbdab48fd550182df94e54bf
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1783106Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/master@{#63535}
parent dc51c15b
...@@ -3712,7 +3712,8 @@ void TranslatedState::InitializeJSObjectAt( ...@@ -3712,7 +3712,8 @@ void TranslatedState::InitializeJSObjectAt(
CHECK_GE(slot->GetChildrenCount(), 2); CHECK_GE(slot->GetChildrenCount(), 2);
// Notify the concurrent marker about the layout change. // Notify the concurrent marker about the layout change.
isolate()->heap()->NotifyObjectLayoutChange(*object_storage, no_allocation); isolate()->heap()->NotifyObjectLayoutChange(
*object_storage, slot->GetChildrenCount() * kTaggedSize, no_allocation);
// Fill the property array field. // Fill the property array field.
{ {
...@@ -3771,7 +3772,8 @@ void TranslatedState::InitializeObjectWithTaggedFieldsAt( ...@@ -3771,7 +3772,8 @@ void TranslatedState::InitializeObjectWithTaggedFieldsAt(
} }
// Notify the concurrent marker about the layout change. // Notify the concurrent marker about the layout change.
isolate()->heap()->NotifyObjectLayoutChange(*object_storage, no_allocation); isolate()->heap()->NotifyObjectLayoutChange(
*object_storage, slot->GetChildrenCount() * kTaggedSize, no_allocation);
// Write the fields to the object. // Write the fields to the object.
for (int i = 1; i < slot->GetChildrenCount(); i++) { for (int i = 1; i < slot->GetChildrenCount(); i++) {
......
...@@ -3387,19 +3387,19 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation( ...@@ -3387,19 +3387,19 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation(
} }
} }
void Heap::NotifyObjectLayoutChange(HeapObject object, void Heap::NotifyObjectLayoutChange(HeapObject object, int size,
const DisallowHeapAllocation&) { const DisallowHeapAllocation&) {
if (incremental_marking()->IsMarking()) { if (incremental_marking()->IsMarking()) {
incremental_marking()->MarkBlackAndVisitObjectDueToLayoutChange(object); incremental_marking()->MarkBlackAndVisitObjectDueToLayoutChange(object);
if (incremental_marking()->IsCompacting() && if (incremental_marking()->IsCompacting() &&
MayContainRecordedSlots(object)) { MayContainRecordedSlots(object)) {
MemoryChunk::FromHeapObject(object) MemoryChunk::FromHeapObject(object)
->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object); ->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object, size);
} }
} }
if (MayContainRecordedSlots(object)) { if (MayContainRecordedSlots(object)) {
MemoryChunk::FromHeapObject(object) MemoryChunk::FromHeapObject(object)
->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(object); ->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(object, size);
} }
#ifdef VERIFY_HEAP #ifdef VERIFY_HEAP
if (FLAG_verify_heap) { if (FLAG_verify_heap) {
......
...@@ -895,7 +895,8 @@ class Heap { ...@@ -895,7 +895,8 @@ class Heap {
// The runtime uses this function to notify potentially unsafe object layout // The runtime uses this function to notify potentially unsafe object layout
// changes that require special synchronization with the concurrent marker. // changes that require special synchronization with the concurrent marker.
void NotifyObjectLayoutChange(HeapObject object, // The old size is the size of the object before layout change.
void NotifyObjectLayoutChange(HeapObject object, int old_size,
const DisallowHeapAllocation&); const DisallowHeapAllocation&);
#ifdef VERIFY_HEAP #ifdef VERIFY_HEAP
......
...@@ -24,39 +24,42 @@ bool InvalidatedSlotsFilter::IsValid(Address slot) { ...@@ -24,39 +24,42 @@ bool InvalidatedSlotsFilter::IsValid(Address slot) {
DCHECK_LE(last_slot_, slot); DCHECK_LE(last_slot_, slot);
last_slot_ = slot; last_slot_ = slot;
#endif #endif
while (slot >= invalidated_end_) {
++iterator_;
if (iterator_ != iterator_end_) {
// Invalidated ranges must not overlap.
DCHECK_LE(invalidated_end_, iterator_->first.address());
invalidated_start_ = iterator_->first.address();
invalidated_end_ = invalidated_start_ + iterator_->second;
invalidated_object_ = HeapObject();
invalidated_object_size_ = 0;
} else {
invalidated_start_ = sentinel_;
invalidated_end_ = sentinel_;
}
}
// Now the invalidated region ends after the slot.
if (slot < invalidated_start_) { if (slot < invalidated_start_) {
// The invalidated region starts after the slot.
return true; return true;
} }
// The invalidated region includes the slot.
while (slot >= next_invalidated_start_) { // Ask the object if the slot is valid.
NextInvalidatedObject(); if (invalidated_object_.is_null()) {
} invalidated_object_ = HeapObject::FromAddress(invalidated_start_);
DCHECK(!invalidated_object_.IsFiller());
HeapObject invalidated_object = HeapObject::FromAddress(invalidated_start_); invalidated_object_size_ =
invalidated_object_.SizeFromMap(invalidated_object_.map());
if (invalidated_size_ == 0) {
invalidated_size_ = invalidated_object.Size();
} }
int offset = static_cast<int>(slot - invalidated_start_); int offset = static_cast<int>(slot - invalidated_start_);
DCHECK_GT(offset, 0); DCHECK_GT(offset, 0);
if (offset < invalidated_size_) DCHECK_LE(invalidated_object_size_,
return invalidated_object.IsValidSlot(invalidated_object.map(), offset); static_cast<int>(invalidated_end_ - invalidated_start_));
NextInvalidatedObject();
return true;
}
void InvalidatedSlotsFilter::NextInvalidatedObject() {
invalidated_start_ = next_invalidated_start_;
invalidated_size_ = 0;
if (iterator_ == iterator_end_) { if (offset >= invalidated_object_size_) {
next_invalidated_start_ = sentinel_; return slots_in_free_space_are_valid_;
} else {
next_invalidated_start_ = iterator_->address();
iterator_++;
} }
return invalidated_object_.IsValidSlot(invalidated_object_.map(), offset);
} }
void InvalidatedSlotsCleanup::Free(Address free_start, Address free_end) { void InvalidatedSlotsCleanup::Free(Address free_start, Address free_end) {
...@@ -69,25 +72,35 @@ void InvalidatedSlotsCleanup::Free(Address free_start, Address free_end) { ...@@ -69,25 +72,35 @@ void InvalidatedSlotsCleanup::Free(Address free_start, Address free_end) {
if (iterator_ == iterator_end_) return; if (iterator_ == iterator_end_) return;
// Ignore invalidated objects that start before free region // Ignore invalidated objects before free region
while (invalidated_start_ < free_start) { while (free_start >= invalidated_end_) {
++iterator_; ++iterator_;
NextInvalidatedObject(); NextInvalidatedObject();
} }
// Remove all invalidated objects that start within // Loop here: Free region might contain multiple invalidated objects
// free region. while (free_end > invalidated_start_) {
while (invalidated_start_ < free_end) { // Case: Free region starts before current invalidated object
iterator_ = invalidated_slots_->erase(iterator_); if (free_start <= invalidated_start_) {
iterator_ = invalidated_slots_->erase(iterator_);
} else {
// Case: Free region starts within current invalidated object
// (Can happen for right-trimmed objects)
iterator_++;
}
NextInvalidatedObject(); NextInvalidatedObject();
} }
} }
void InvalidatedSlotsCleanup::NextInvalidatedObject() { void InvalidatedSlotsCleanup::NextInvalidatedObject() {
if (iterator_ != iterator_end_) { if (iterator_ != iterator_end_) {
invalidated_start_ = iterator_->address(); invalidated_start_ = iterator_->first.address();
invalidated_end_ = invalidated_start_ + iterator_->second;
} else { } else {
invalidated_start_ = sentinel_; invalidated_start_ = sentinel_;
invalidated_end_ = sentinel_;
} }
} }
......
...@@ -3,35 +3,52 @@ ...@@ -3,35 +3,52 @@
// found in the LICENSE file. // found in the LICENSE file.
#include "src/heap/invalidated-slots.h" #include "src/heap/invalidated-slots.h"
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/spaces.h" #include "src/heap/spaces.h"
#include "src/objects/objects-inl.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
InvalidatedSlotsFilter InvalidatedSlotsFilter::OldToOld(MemoryChunk* chunk) { InvalidatedSlotsFilter InvalidatedSlotsFilter::OldToOld(MemoryChunk* chunk) {
return InvalidatedSlotsFilter(chunk, chunk->invalidated_slots<OLD_TO_OLD>()); // The sweeper removes invalid slots and makes free space available for
// allocation. Slots for new objects can be recorded in the free space.
// Note that we cannot simply check for SweepingDone because pages in large
// object space are not swept but have SweepingDone() == true.
bool slots_in_free_space_are_valid =
chunk->SweepingDone() && chunk->InOldSpace();
return InvalidatedSlotsFilter(chunk, chunk->invalidated_slots<OLD_TO_OLD>(),
slots_in_free_space_are_valid);
} }
InvalidatedSlotsFilter InvalidatedSlotsFilter::OldToNew(MemoryChunk* chunk) { InvalidatedSlotsFilter InvalidatedSlotsFilter::OldToNew(MemoryChunk* chunk) {
return InvalidatedSlotsFilter(chunk, chunk->invalidated_slots<OLD_TO_NEW>()); // Always treat these slots as valid for old-to-new for now. Invalid
// old-to-new slots are always cleared.
bool slots_in_free_space_are_valid = true;
return InvalidatedSlotsFilter(chunk, chunk->invalidated_slots<OLD_TO_NEW>(),
slots_in_free_space_are_valid);
} }
InvalidatedSlotsFilter::InvalidatedSlotsFilter( InvalidatedSlotsFilter::InvalidatedSlotsFilter(
MemoryChunk* chunk, InvalidatedSlots* invalidated_slots) { MemoryChunk* chunk, InvalidatedSlots* invalidated_slots,
bool slots_in_free_space_are_valid) {
// Adjust slots_in_free_space_are_valid_ if more spaces are added.
DCHECK_IMPLIES(invalidated_slots != nullptr,
chunk->InOldSpace() || chunk->InLargeObjectSpace());
slots_in_free_space_are_valid_ = slots_in_free_space_are_valid;
invalidated_slots = invalidated_slots ? invalidated_slots : &empty_; invalidated_slots = invalidated_slots ? invalidated_slots : &empty_;
iterator_ = invalidated_slots->begin(); iterator_ = invalidated_slots->begin();
iterator_end_ = invalidated_slots->end(); iterator_end_ = invalidated_slots->end();
sentinel_ = chunk->area_end(); sentinel_ = chunk->area_end();
if (iterator_ != iterator_end_) {
// Invoke NextInvalidatedObject twice, to initialize invalidated_start_ = iterator_->first.address();
// invalidated_start_ to the first invalidated object and invalidated_end_ = invalidated_start_ + iterator_->second;
// next_invalidated_object_ to the second one. } else {
NextInvalidatedObject(); invalidated_start_ = sentinel_;
NextInvalidatedObject(); invalidated_end_ = sentinel_;
}
// These values will be lazily set when needed.
invalidated_object_size_ = 0;
#ifdef DEBUG #ifdef DEBUG
last_slot_ = chunk->area_start(); last_slot_ = chunk->area_start();
#endif #endif
...@@ -52,7 +69,13 @@ InvalidatedSlotsCleanup::InvalidatedSlotsCleanup( ...@@ -52,7 +69,13 @@ InvalidatedSlotsCleanup::InvalidatedSlotsCleanup(
iterator_end_ = invalidated_slots_->end(); iterator_end_ = invalidated_slots_->end();
sentinel_ = chunk->area_end(); sentinel_ = chunk->area_end();
NextInvalidatedObject(); if (iterator_ != iterator_end_) {
invalidated_start_ = iterator_->first.address();
invalidated_end_ = invalidated_start_ + iterator_->second;
} else {
invalidated_start_ = sentinel_;
invalidated_end_ = sentinel_;
}
#ifdef DEBUG #ifdef DEBUG
last_free_ = chunk->area_start(); last_free_ = chunk->area_start();
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
#ifndef V8_HEAP_INVALIDATED_SLOTS_H_ #ifndef V8_HEAP_INVALIDATED_SLOTS_H_
#define V8_HEAP_INVALIDATED_SLOTS_H_ #define V8_HEAP_INVALIDATED_SLOTS_H_
#include <set> #include <map>
#include <stack> #include <stack>
#include "src/base/atomic-utils.h" #include "src/base/atomic-utils.h"
...@@ -20,7 +20,7 @@ namespace internal { ...@@ -20,7 +20,7 @@ namespace internal {
// that potentially invalidates slots recorded concurrently. The second part // that potentially invalidates slots recorded concurrently. The second part
// of each element is the size of the corresponding object before the layout // of each element is the size of the corresponding object before the layout
// change. // change.
using InvalidatedSlots = std::set<HeapObject, Object::Comparer>; using InvalidatedSlots = std::map<HeapObject, int, Object::Comparer>;
// This class provides IsValid predicate that takes into account the set // This class provides IsValid predicate that takes into account the set
// of invalidated objects in the given memory chunk. // of invalidated objects in the given memory chunk.
...@@ -34,7 +34,8 @@ class V8_EXPORT_PRIVATE InvalidatedSlotsFilter { ...@@ -34,7 +34,8 @@ class V8_EXPORT_PRIVATE InvalidatedSlotsFilter {
static InvalidatedSlotsFilter OldToNew(MemoryChunk* chunk); static InvalidatedSlotsFilter OldToNew(MemoryChunk* chunk);
explicit InvalidatedSlotsFilter(MemoryChunk* chunk, explicit InvalidatedSlotsFilter(MemoryChunk* chunk,
InvalidatedSlots* invalidated_slots); InvalidatedSlots* invalidated_slots,
bool slots_in_free_space_are_valid);
inline bool IsValid(Address slot); inline bool IsValid(Address slot);
private: private:
...@@ -42,15 +43,14 @@ class V8_EXPORT_PRIVATE InvalidatedSlotsFilter { ...@@ -42,15 +43,14 @@ class V8_EXPORT_PRIVATE InvalidatedSlotsFilter {
InvalidatedSlots::const_iterator iterator_end_; InvalidatedSlots::const_iterator iterator_end_;
Address sentinel_; Address sentinel_;
Address invalidated_start_; Address invalidated_start_;
Address next_invalidated_start_; Address invalidated_end_;
int invalidated_size_; HeapObject invalidated_object_;
int invalidated_object_size_;
bool slots_in_free_space_are_valid_;
InvalidatedSlots empty_; InvalidatedSlots empty_;
#ifdef DEBUG #ifdef DEBUG
Address last_slot_; Address last_slot_;
#endif #endif
private:
inline void NextInvalidatedObject();
}; };
class V8_EXPORT_PRIVATE InvalidatedSlotsCleanup { class V8_EXPORT_PRIVATE InvalidatedSlotsCleanup {
...@@ -71,6 +71,7 @@ class V8_EXPORT_PRIVATE InvalidatedSlotsCleanup { ...@@ -71,6 +71,7 @@ class V8_EXPORT_PRIVATE InvalidatedSlotsCleanup {
Address sentinel_; Address sentinel_;
Address invalidated_start_; Address invalidated_start_;
Address invalidated_end_;
inline void NextInvalidatedObject(); inline void NextInvalidatedObject();
#ifdef DEBUG #ifdef DEBUG
......
...@@ -3420,6 +3420,13 @@ class RememberedSetUpdatingItem : public UpdatingItem { ...@@ -3420,6 +3420,13 @@ class RememberedSetUpdatingItem : public UpdatingItem {
} }
if (chunk_->invalidated_slots<OLD_TO_NEW>() != nullptr) { if (chunk_->invalidated_slots<OLD_TO_NEW>() != nullptr) {
#ifdef DEBUG
for (auto object_size : *chunk_->invalidated_slots<OLD_TO_NEW>()) {
HeapObject object = object_size.first;
int size = object_size.second;
DCHECK_LE(object.SizeFromMap(object.map()), size);
}
#endif
// The invalidated slots are not needed after old-to-new slots were // The invalidated slots are not needed after old-to-new slots were
// processed. // processed.
chunk_->ReleaseInvalidatedSlots<OLD_TO_NEW>(); chunk_->ReleaseInvalidatedSlots<OLD_TO_NEW>();
...@@ -3438,6 +3445,13 @@ class RememberedSetUpdatingItem : public UpdatingItem { ...@@ -3438,6 +3445,13 @@ class RememberedSetUpdatingItem : public UpdatingItem {
} }
if ((updating_mode_ == RememberedSetUpdatingMode::ALL) && if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
chunk_->invalidated_slots<OLD_TO_OLD>() != nullptr) { chunk_->invalidated_slots<OLD_TO_OLD>() != nullptr) {
#ifdef DEBUG
for (auto object_size : *chunk_->invalidated_slots<OLD_TO_OLD>()) {
HeapObject object = object_size.first;
int size = object_size.second;
DCHECK_LE(object.SizeFromMap(object.map()), size);
}
#endif
// The invalidated slots are not needed after old-to-old slots were // The invalidated slots are not needed after old-to-old slots were
// processsed. // processsed.
chunk_->ReleaseInvalidatedSlots<OLD_TO_OLD>(); chunk_->ReleaseInvalidatedSlots<OLD_TO_OLD>();
......
...@@ -440,6 +440,13 @@ void Scavenger::ScavengePage(MemoryChunk* page) { ...@@ -440,6 +440,13 @@ void Scavenger::ScavengePage(MemoryChunk* page) {
SlotSet::KEEP_EMPTY_BUCKETS); SlotSet::KEEP_EMPTY_BUCKETS);
if (page->invalidated_slots<OLD_TO_NEW>() != nullptr) { if (page->invalidated_slots<OLD_TO_NEW>() != nullptr) {
#ifdef DEBUG
for (auto object_size : *page->invalidated_slots<OLD_TO_NEW>()) {
HeapObject object = object_size.first;
int size = object_size.second;
DCHECK_LE(object.SizeFromMap(object.map()), size);
}
#endif
// The invalidated slots are not needed after old-to-new slots were // The invalidated slots are not needed after old-to-new slots were
// processed. // processed.
page->ReleaseInvalidatedSlots<OLD_TO_NEW>(); page->ReleaseInvalidatedSlots<OLD_TO_NEW>();
......
...@@ -1484,12 +1484,15 @@ void MemoryChunk::ReleaseInvalidatedSlots() { ...@@ -1484,12 +1484,15 @@ void MemoryChunk::ReleaseInvalidatedSlots() {
} }
template V8_EXPORT_PRIVATE void template V8_EXPORT_PRIVATE void
MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(HeapObject object); MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(HeapObject object,
int size);
template V8_EXPORT_PRIVATE void template V8_EXPORT_PRIVATE void
MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(HeapObject object); MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(HeapObject object,
int size);
template <RememberedSetType type> template <RememberedSetType type>
void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object) { void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object,
int size) {
bool skip_slot_recording; bool skip_slot_recording;
if (type == OLD_TO_NEW) { if (type == OLD_TO_NEW) {
...@@ -1506,7 +1509,27 @@ void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object) { ...@@ -1506,7 +1509,27 @@ void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object) {
AllocateInvalidatedSlots<type>(); AllocateInvalidatedSlots<type>();
} }
invalidated_slots<type>()->insert(object); InvalidatedSlots* invalidated_slots = this->invalidated_slots<type>();
InvalidatedSlots::iterator it = invalidated_slots->lower_bound(object);
if (it != invalidated_slots->end() && it->first == object) {
// object was already inserted
CHECK_LE(size, it->second);
return;
}
it = invalidated_slots->insert(it, std::make_pair(object, size));
// prevent overlapping invalidated objects for old-to-new.
if (type == OLD_TO_NEW && it != invalidated_slots->begin()) {
HeapObject pred = (--it)->first;
int pred_size = it->second;
DCHECK_LT(pred.address(), object.address());
if (pred.address() + pred_size > object.address()) {
it->second = static_cast<int>(object.address() - pred.address());
}
}
} }
template bool MemoryChunk::RegisteredObjectWithInvalidatedSlots<OLD_TO_NEW>( template bool MemoryChunk::RegisteredObjectWithInvalidatedSlots<OLD_TO_NEW>(
...@@ -1523,6 +1546,27 @@ bool MemoryChunk::RegisteredObjectWithInvalidatedSlots(HeapObject object) { ...@@ -1523,6 +1546,27 @@ bool MemoryChunk::RegisteredObjectWithInvalidatedSlots(HeapObject object) {
invalidated_slots<type>()->end(); invalidated_slots<type>()->end();
} }
template void MemoryChunk::MoveObjectWithInvalidatedSlots<OLD_TO_OLD>(
HeapObject old_start, HeapObject new_start);
template <RememberedSetType type>
void MemoryChunk::MoveObjectWithInvalidatedSlots(HeapObject old_start,
HeapObject new_start) {
DCHECK_LT(old_start, new_start);
DCHECK_EQ(MemoryChunk::FromHeapObject(old_start),
MemoryChunk::FromHeapObject(new_start));
static_assert(type == OLD_TO_OLD, "only use this for old-to-old slots");
if (!ShouldSkipEvacuationSlotRecording() && invalidated_slots<type>()) {
auto it = invalidated_slots<type>()->find(old_start);
if (it != invalidated_slots<type>()->end()) {
int old_size = it->second;
int delta = static_cast<int>(new_start.address() - old_start.address());
invalidated_slots<type>()->erase(it);
(*invalidated_slots<type>())[new_start] = old_size - delta;
}
}
}
void MemoryChunk::ReleaseLocalTracker() { void MemoryChunk::ReleaseLocalTracker() {
DCHECK_NOT_NULL(local_tracker_); DCHECK_NOT_NULL(local_tracker_);
delete local_tracker_; delete local_tracker_;
......
...@@ -729,7 +729,12 @@ class MemoryChunk : public BasicMemoryChunk { ...@@ -729,7 +729,12 @@ class MemoryChunk : public BasicMemoryChunk {
template <RememberedSetType type> template <RememberedSetType type>
void ReleaseInvalidatedSlots(); void ReleaseInvalidatedSlots();
template <RememberedSetType type> template <RememberedSetType type>
V8_EXPORT_PRIVATE void RegisterObjectWithInvalidatedSlots(HeapObject object); V8_EXPORT_PRIVATE void RegisterObjectWithInvalidatedSlots(HeapObject object,
int size);
// Updates invalidated_slots after array left-trimming.
template <RememberedSetType type>
void MoveObjectWithInvalidatedSlots(HeapObject old_start,
HeapObject new_start);
template <RememberedSetType type> template <RememberedSetType type>
bool RegisteredObjectWithInvalidatedSlots(HeapObject object); bool RegisteredObjectWithInvalidatedSlots(HeapObject object);
template <RememberedSetType type> template <RememberedSetType type>
......
...@@ -2776,7 +2776,9 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object, ...@@ -2776,7 +2776,9 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
Heap* heap = isolate->heap(); Heap* heap = isolate->heap();
heap->NotifyObjectLayoutChange(*object, no_allocation); int old_instance_size = old_map->instance_size();
heap->NotifyObjectLayoutChange(*object, old_instance_size, no_allocation);
// Copy (real) inobject properties. If necessary, stop at number_of_fields to // Copy (real) inobject properties. If necessary, stop at number_of_fields to
// avoid overwriting |one_pointer_filler_map|. // avoid overwriting |one_pointer_filler_map|.
...@@ -2807,7 +2809,6 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object, ...@@ -2807,7 +2809,6 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
object->SetProperties(*array); object->SetProperties(*array);
// Create filler object past the new instance size. // Create filler object past the new instance size.
int old_instance_size = old_map->instance_size();
int new_instance_size = new_map->instance_size(); int new_instance_size = new_map->instance_size();
int instance_size_delta = old_instance_size - new_instance_size; int instance_size_delta = old_instance_size - new_instance_size;
DCHECK_GE(instance_size_delta, 0); DCHECK_GE(instance_size_delta, 0);
...@@ -2890,10 +2891,10 @@ void MigrateFastToSlow(Isolate* isolate, Handle<JSObject> object, ...@@ -2890,10 +2891,10 @@ void MigrateFastToSlow(Isolate* isolate, Handle<JSObject> object,
DisallowHeapAllocation no_allocation; DisallowHeapAllocation no_allocation;
Heap* heap = isolate->heap(); Heap* heap = isolate->heap();
heap->NotifyObjectLayoutChange(*object, no_allocation); int old_instance_size = map->instance_size();
heap->NotifyObjectLayoutChange(*object, old_instance_size, no_allocation);
// Resize the object in the heap if necessary. // Resize the object in the heap if necessary.
int old_instance_size = map->instance_size();
int new_instance_size = new_map->instance_size(); int new_instance_size = new_map->instance_size();
int instance_size_delta = old_instance_size - new_instance_size; int instance_size_delta = old_instance_size - new_instance_size;
DCHECK_GE(instance_size_delta, 0); DCHECK_GE(instance_size_delta, 0);
......
...@@ -613,7 +613,8 @@ void SharedFunctionInfo::ClearPreparseData() { ...@@ -613,7 +613,8 @@ void SharedFunctionInfo::ClearPreparseData() {
Heap* heap = GetHeapFromWritableObject(data); Heap* heap = GetHeapFromWritableObject(data);
// Swap the map. // Swap the map.
heap->NotifyObjectLayoutChange(data, no_gc); heap->NotifyObjectLayoutChange(data, UncompiledDataWithPreparseData::kSize,
no_gc);
STATIC_ASSERT(UncompiledDataWithoutPreparseData::kSize < STATIC_ASSERT(UncompiledDataWithoutPreparseData::kSize <
UncompiledDataWithPreparseData::kSize); UncompiledDataWithPreparseData::kSize);
STATIC_ASSERT(UncompiledDataWithoutPreparseData::kSize == STATIC_ASSERT(UncompiledDataWithoutPreparseData::kSize ==
......
...@@ -113,7 +113,7 @@ void String::MakeThin(Isolate* isolate, String internalized) { ...@@ -113,7 +113,7 @@ void String::MakeThin(Isolate* isolate, String internalized) {
bool has_pointers = StringShape(*this).IsIndirect(); bool has_pointers = StringShape(*this).IsIndirect();
int old_size = this->Size(); int old_size = this->Size();
isolate->heap()->NotifyObjectLayoutChange(*this, no_gc); isolate->heap()->NotifyObjectLayoutChange(*this, old_size, no_gc);
bool one_byte = internalized.IsOneByteRepresentation(); bool one_byte = internalized.IsOneByteRepresentation();
Handle<Map> map = one_byte ? isolate->factory()->thin_one_byte_string_map() Handle<Map> map = one_byte ? isolate->factory()->thin_one_byte_string_map()
: isolate->factory()->thin_string_map(); : isolate->factory()->thin_string_map();
...@@ -158,7 +158,7 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) { ...@@ -158,7 +158,7 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
bool has_pointers = StringShape(*this).IsIndirect(); bool has_pointers = StringShape(*this).IsIndirect();
if (has_pointers) { if (has_pointers) {
isolate->heap()->NotifyObjectLayoutChange(*this, no_allocation); isolate->heap()->NotifyObjectLayoutChange(*this, size, no_allocation);
} }
// Morph the string to an external string by replacing the map and // Morph the string to an external string by replacing the map and
// reinitializing the fields. This won't work if the space the existing // reinitializing the fields. This won't work if the space the existing
...@@ -232,7 +232,7 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) { ...@@ -232,7 +232,7 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
bool has_pointers = StringShape(*this).IsIndirect(); bool has_pointers = StringShape(*this).IsIndirect();
if (has_pointers) { if (has_pointers) {
isolate->heap()->NotifyObjectLayoutChange(*this, no_allocation); isolate->heap()->NotifyObjectLayoutChange(*this, size, no_allocation);
} }
// Morph the string to an external string by replacing the map and // Morph the string to an external string by replacing the map and
// reinitializing the fields. This won't work if the space the existing // reinitializing the fields. This won't work if the space the existing
......
...@@ -132,7 +132,8 @@ bool DeleteObjectPropertyFast(Isolate* isolate, Handle<JSReceiver> receiver, ...@@ -132,7 +132,8 @@ bool DeleteObjectPropertyFast(Isolate* isolate, Handle<JSReceiver> receiver,
// for properties stored in the descriptor array. // for properties stored in the descriptor array.
if (details.location() == kField) { if (details.location() == kField) {
DisallowHeapAllocation no_allocation; DisallowHeapAllocation no_allocation;
isolate->heap()->NotifyObjectLayoutChange(*receiver, no_allocation); isolate->heap()->NotifyObjectLayoutChange(
*receiver, receiver_map->instance_size(), no_allocation);
FieldIndex index = FieldIndex index =
FieldIndex::ForPropertyIndex(*receiver_map, details.field_index()); FieldIndex::ForPropertyIndex(*receiver_map, details.field_index());
// Special case deleting the last out-of object property. // Special case deleting the last out-of object property.
......
...@@ -70,7 +70,8 @@ HEAP_TEST(InvalidatedSlotsSomeInvalidatedRanges) { ...@@ -70,7 +70,8 @@ HEAP_TEST(InvalidatedSlotsSomeInvalidatedRanges) {
Page* page = AllocateByteArraysOnPage(heap, &byte_arrays); Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
// Register every second byte arrays as invalidated. // Register every second byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i += 2) { for (size_t i = 0; i < byte_arrays.size(); i += 2) {
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i]); page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i],
byte_arrays[i].Size());
} }
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page); InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page);
for (size_t i = 0; i < byte_arrays.size(); i++) { for (size_t i = 0; i < byte_arrays.size(); i++) {
...@@ -94,7 +95,8 @@ HEAP_TEST(InvalidatedSlotsAllInvalidatedRanges) { ...@@ -94,7 +95,8 @@ HEAP_TEST(InvalidatedSlotsAllInvalidatedRanges) {
Page* page = AllocateByteArraysOnPage(heap, &byte_arrays); Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
// Register the all byte arrays as invalidated. // Register the all byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i++) { for (size_t i = 0; i < byte_arrays.size(); i++) {
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i]); page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i],
byte_arrays[i].Size());
} }
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page); InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page);
for (size_t i = 0; i < byte_arrays.size(); i++) { for (size_t i = 0; i < byte_arrays.size(); i++) {
...@@ -115,7 +117,8 @@ HEAP_TEST(InvalidatedSlotsAfterTrimming) { ...@@ -115,7 +117,8 @@ HEAP_TEST(InvalidatedSlotsAfterTrimming) {
Page* page = AllocateByteArraysOnPage(heap, &byte_arrays); Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
// Register the all byte arrays as invalidated. // Register the all byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i++) { for (size_t i = 0; i < byte_arrays.size(); i++) {
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i]); page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i],
byte_arrays[i].Size());
} }
// Trim byte arrays and check that the slots outside the byte arrays are // Trim byte arrays and check that the slots outside the byte arrays are
// considered invalid if the old space page was swept. // considered invalid if the old space page was swept.
...@@ -142,7 +145,8 @@ HEAP_TEST(InvalidatedSlotsEvacuationCandidate) { ...@@ -142,7 +145,8 @@ HEAP_TEST(InvalidatedSlotsEvacuationCandidate) {
// This should be no-op because the page is marked as evacuation // This should be no-op because the page is marked as evacuation
// candidate. // candidate.
for (size_t i = 0; i < byte_arrays.size(); i++) { for (size_t i = 0; i < byte_arrays.size(); i++) {
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i]); page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i],
byte_arrays[i].Size());
} }
// All slots must still be valid. // All slots must still be valid.
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page); InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page);
...@@ -165,7 +169,8 @@ HEAP_TEST(InvalidatedSlotsResetObjectRegression) { ...@@ -165,7 +169,8 @@ HEAP_TEST(InvalidatedSlotsResetObjectRegression) {
heap->RightTrimFixedArray(byte_arrays[0], byte_arrays[0].length() - 8); heap->RightTrimFixedArray(byte_arrays[0], byte_arrays[0].length() - 8);
// Register the all byte arrays as invalidated. // Register the all byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i++) { for (size_t i = 0; i < byte_arrays.size(); i++) {
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i]); page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i],
byte_arrays[i].Size());
} }
// All slots must still be invalid. // All slots must still be invalid.
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page); InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page);
...@@ -354,7 +359,8 @@ HEAP_TEST(InvalidatedSlotsCleanupFull) { ...@@ -354,7 +359,8 @@ HEAP_TEST(InvalidatedSlotsCleanupFull) {
Page* page = AllocateByteArraysOnPage(heap, &byte_arrays); Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
// Register all byte arrays as invalidated. // Register all byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i++) { for (size_t i = 0; i < byte_arrays.size(); i++) {
page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(byte_arrays[i]); page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(byte_arrays[i],
byte_arrays[i].Size());
} }
// Mark full page as free // Mark full page as free
...@@ -373,7 +379,8 @@ HEAP_TEST(InvalidatedSlotsCleanupEachObject) { ...@@ -373,7 +379,8 @@ HEAP_TEST(InvalidatedSlotsCleanupEachObject) {
Page* page = AllocateByteArraysOnPage(heap, &byte_arrays); Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
// Register all byte arrays as invalidated. // Register all byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i++) { for (size_t i = 0; i < byte_arrays.size(); i++) {
page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(byte_arrays[i]); page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(byte_arrays[i],
byte_arrays[i].Size());
} }
// Mark each object as free on page // Mark each object as free on page
...@@ -398,9 +405,11 @@ HEAP_TEST(InvalidatedSlotsCleanupRightTrim) { ...@@ -398,9 +405,11 @@ HEAP_TEST(InvalidatedSlotsCleanupRightTrim) {
CHECK_GT(byte_arrays.size(), 1); CHECK_GT(byte_arrays.size(), 1);
ByteArray& invalidated = byte_arrays[1]; ByteArray& invalidated = byte_arrays[1];
int invalidated_size = invalidated.Size();
heap->RightTrimFixedArray(invalidated, invalidated.length() - 8); heap->RightTrimFixedArray(invalidated, invalidated.length() - 8);
page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(invalidated); page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(invalidated,
invalidated_size);
// Free memory at end of invalidated object // Free memory at end of invalidated object
InvalidatedSlotsCleanup cleanup = InvalidatedSlotsCleanup::OldToNew(page); InvalidatedSlotsCleanup cleanup = InvalidatedSlotsCleanup::OldToNew(page);
...@@ -409,6 +418,8 @@ HEAP_TEST(InvalidatedSlotsCleanupRightTrim) { ...@@ -409,6 +418,8 @@ HEAP_TEST(InvalidatedSlotsCleanupRightTrim) {
// After cleanup the invalidated object should be smaller // After cleanup the invalidated object should be smaller
InvalidatedSlots* invalidated_slots = page->invalidated_slots<OLD_TO_NEW>(); InvalidatedSlots* invalidated_slots = page->invalidated_slots<OLD_TO_NEW>();
CHECK_GE((*invalidated_slots)[HeapObject::FromAddress(invalidated.address())],
invalidated.Size());
CHECK_EQ(invalidated_slots->size(), 1); CHECK_EQ(invalidated_slots->size(), 1);
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment