Commit c4d3e9bd authored by Dominik Inführ's avatar Dominik Inführ Committed by Commit Bot

[heap] Remove size from invalidated slots

Slots are always valid inside an invalidated area when outside the
respective object's current size. This allows us to remove the size
from the InvalidatedSlots data structure.

This change was enabled by https://crrev.com/c/1771793. Reland after
revert in https://crrev.com/c/1783106, this CL was not the culprit
of the issue (chromium:1000404).

Bug: v8:9454
Change-Id: I823d34670515924bf74200daa21a834044087310
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1787431Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarSigurd Schneider <sigurds@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/master@{#63607}
parent eaa0bb4c
...@@ -3712,8 +3712,7 @@ void TranslatedState::InitializeJSObjectAt( ...@@ -3712,8 +3712,7 @@ void TranslatedState::InitializeJSObjectAt(
CHECK_GE(slot->GetChildrenCount(), 2); CHECK_GE(slot->GetChildrenCount(), 2);
// Notify the concurrent marker about the layout change. // Notify the concurrent marker about the layout change.
isolate()->heap()->NotifyObjectLayoutChange( isolate()->heap()->NotifyObjectLayoutChange(*object_storage, no_allocation);
*object_storage, slot->GetChildrenCount() * kTaggedSize, no_allocation);
// Fill the property array field. // Fill the property array field.
{ {
...@@ -3772,8 +3771,7 @@ void TranslatedState::InitializeObjectWithTaggedFieldsAt( ...@@ -3772,8 +3771,7 @@ void TranslatedState::InitializeObjectWithTaggedFieldsAt(
} }
// Notify the concurrent marker about the layout change. // Notify the concurrent marker about the layout change.
isolate()->heap()->NotifyObjectLayoutChange( isolate()->heap()->NotifyObjectLayoutChange(*object_storage, no_allocation);
*object_storage, slot->GetChildrenCount() * kTaggedSize, no_allocation);
// Write the fields to the object. // Write the fields to the object.
for (int i = 1; i < slot->GetChildrenCount(); i++) { for (int i = 1; i < slot->GetChildrenCount(); i++) {
......
...@@ -3388,7 +3388,7 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation( ...@@ -3388,7 +3388,7 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation(
} }
void Heap::NotifyObjectLayoutChange( void Heap::NotifyObjectLayoutChange(
HeapObject object, int size, const DisallowHeapAllocation&, HeapObject object, const DisallowHeapAllocation&,
InvalidateRecordedSlots invalidate_recorded_slots) { InvalidateRecordedSlots invalidate_recorded_slots) {
if (incremental_marking()->IsMarking()) { if (incremental_marking()->IsMarking()) {
incremental_marking()->MarkBlackAndVisitObjectDueToLayoutChange(object); incremental_marking()->MarkBlackAndVisitObjectDueToLayoutChange(object);
...@@ -3396,13 +3396,13 @@ void Heap::NotifyObjectLayoutChange( ...@@ -3396,13 +3396,13 @@ void Heap::NotifyObjectLayoutChange(
invalidate_recorded_slots == InvalidateRecordedSlots::kYes && invalidate_recorded_slots == InvalidateRecordedSlots::kYes &&
MayContainRecordedSlots(object)) { MayContainRecordedSlots(object)) {
MemoryChunk::FromHeapObject(object) MemoryChunk::FromHeapObject(object)
->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object, size); ->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object);
} }
} }
if (invalidate_recorded_slots == InvalidateRecordedSlots::kYes && if (invalidate_recorded_slots == InvalidateRecordedSlots::kYes &&
MayContainRecordedSlots(object)) { MayContainRecordedSlots(object)) {
MemoryChunk::FromHeapObject(object) MemoryChunk::FromHeapObject(object)
->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(object, size); ->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(object);
} }
#ifdef VERIFY_HEAP #ifdef VERIFY_HEAP
if (FLAG_verify_heap) { if (FLAG_verify_heap) {
......
...@@ -904,7 +904,7 @@ class Heap { ...@@ -904,7 +904,7 @@ class Heap {
// InvalidateRecordedSlots::kNo if this is not necessary or to perform this // InvalidateRecordedSlots::kNo if this is not necessary or to perform this
// manually. // manually.
void NotifyObjectLayoutChange( void NotifyObjectLayoutChange(
HeapObject object, int old_size, const DisallowHeapAllocation&, HeapObject object, const DisallowHeapAllocation&,
InvalidateRecordedSlots invalidate_recorded_slots = InvalidateRecordedSlots invalidate_recorded_slots =
InvalidateRecordedSlots::kYes); InvalidateRecordedSlots::kYes);
......
...@@ -24,42 +24,39 @@ bool InvalidatedSlotsFilter::IsValid(Address slot) { ...@@ -24,42 +24,39 @@ bool InvalidatedSlotsFilter::IsValid(Address slot) {
DCHECK_LE(last_slot_, slot); DCHECK_LE(last_slot_, slot);
last_slot_ = slot; last_slot_ = slot;
#endif #endif
while (slot >= invalidated_end_) {
++iterator_;
if (iterator_ != iterator_end_) {
// Invalidated ranges must not overlap.
DCHECK_LE(invalidated_end_, iterator_->first.address());
invalidated_start_ = iterator_->first.address();
invalidated_end_ = invalidated_start_ + iterator_->second;
invalidated_object_ = HeapObject();
invalidated_object_size_ = 0;
} else {
invalidated_start_ = sentinel_;
invalidated_end_ = sentinel_;
}
}
// Now the invalidated region ends after the slot.
if (slot < invalidated_start_) { if (slot < invalidated_start_) {
// The invalidated region starts after the slot.
return true; return true;
} }
// The invalidated region includes the slot.
// Ask the object if the slot is valid. while (slot >= next_invalidated_start_) {
if (invalidated_object_.is_null()) { NextInvalidatedObject();
invalidated_object_ = HeapObject::FromAddress(invalidated_start_); }
DCHECK(!invalidated_object_.IsFiller());
invalidated_object_size_ = HeapObject invalidated_object = HeapObject::FromAddress(invalidated_start_);
invalidated_object_.SizeFromMap(invalidated_object_.map());
if (invalidated_size_ == 0) {
invalidated_size_ = invalidated_object.Size();
} }
int offset = static_cast<int>(slot - invalidated_start_); int offset = static_cast<int>(slot - invalidated_start_);
DCHECK_GT(offset, 0); DCHECK_GT(offset, 0);
DCHECK_LE(invalidated_object_size_, if (offset < invalidated_size_)
static_cast<int>(invalidated_end_ - invalidated_start_)); return invalidated_object.IsValidSlot(invalidated_object.map(), offset);
NextInvalidatedObject();
return true;
}
void InvalidatedSlotsFilter::NextInvalidatedObject() {
invalidated_start_ = next_invalidated_start_;
invalidated_size_ = 0;
if (offset >= invalidated_object_size_) { if (iterator_ == iterator_end_) {
return slots_in_free_space_are_valid_; next_invalidated_start_ = sentinel_;
} else {
next_invalidated_start_ = iterator_->address();
iterator_++;
} }
return invalidated_object_.IsValidSlot(invalidated_object_.map(), offset);
} }
void InvalidatedSlotsCleanup::Free(Address free_start, Address free_end) { void InvalidatedSlotsCleanup::Free(Address free_start, Address free_end) {
...@@ -72,35 +69,25 @@ void InvalidatedSlotsCleanup::Free(Address free_start, Address free_end) { ...@@ -72,35 +69,25 @@ void InvalidatedSlotsCleanup::Free(Address free_start, Address free_end) {
if (iterator_ == iterator_end_) return; if (iterator_ == iterator_end_) return;
// Ignore invalidated objects before free region // Ignore invalidated objects that start before free region
while (free_start >= invalidated_end_) { while (invalidated_start_ < free_start) {
++iterator_; ++iterator_;
NextInvalidatedObject(); NextInvalidatedObject();
} }
// Loop here: Free region might contain multiple invalidated objects // Remove all invalidated objects that start within
while (free_end > invalidated_start_) { // free region.
// Case: Free region starts before current invalidated object while (invalidated_start_ < free_end) {
if (free_start <= invalidated_start_) { iterator_ = invalidated_slots_->erase(iterator_);
iterator_ = invalidated_slots_->erase(iterator_);
} else {
// Case: Free region starts within current invalidated object
// (Can happen for right-trimmed objects)
iterator_++;
}
NextInvalidatedObject(); NextInvalidatedObject();
} }
} }
void InvalidatedSlotsCleanup::NextInvalidatedObject() { void InvalidatedSlotsCleanup::NextInvalidatedObject() {
if (iterator_ != iterator_end_) { if (iterator_ != iterator_end_) {
invalidated_start_ = iterator_->first.address(); invalidated_start_ = iterator_->address();
invalidated_end_ = invalidated_start_ + iterator_->second;
} else { } else {
invalidated_start_ = sentinel_; invalidated_start_ = sentinel_;
invalidated_end_ = sentinel_;
} }
} }
......
...@@ -3,52 +3,35 @@ ...@@ -3,52 +3,35 @@
// found in the LICENSE file. // found in the LICENSE file.
#include "src/heap/invalidated-slots.h" #include "src/heap/invalidated-slots.h"
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/spaces.h" #include "src/heap/spaces.h"
#include "src/objects/objects-inl.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
InvalidatedSlotsFilter InvalidatedSlotsFilter::OldToOld(MemoryChunk* chunk) { InvalidatedSlotsFilter InvalidatedSlotsFilter::OldToOld(MemoryChunk* chunk) {
// The sweeper removes invalid slots and makes free space available for return InvalidatedSlotsFilter(chunk, chunk->invalidated_slots<OLD_TO_OLD>());
// allocation. Slots for new objects can be recorded in the free space.
// Note that we cannot simply check for SweepingDone because pages in large
// object space are not swept but have SweepingDone() == true.
bool slots_in_free_space_are_valid =
chunk->SweepingDone() && chunk->InOldSpace();
return InvalidatedSlotsFilter(chunk, chunk->invalidated_slots<OLD_TO_OLD>(),
slots_in_free_space_are_valid);
} }
InvalidatedSlotsFilter InvalidatedSlotsFilter::OldToNew(MemoryChunk* chunk) { InvalidatedSlotsFilter InvalidatedSlotsFilter::OldToNew(MemoryChunk* chunk) {
// Always treat these slots as valid for old-to-new for now. Invalid return InvalidatedSlotsFilter(chunk, chunk->invalidated_slots<OLD_TO_NEW>());
// old-to-new slots are always cleared.
bool slots_in_free_space_are_valid = true;
return InvalidatedSlotsFilter(chunk, chunk->invalidated_slots<OLD_TO_NEW>(),
slots_in_free_space_are_valid);
} }
InvalidatedSlotsFilter::InvalidatedSlotsFilter( InvalidatedSlotsFilter::InvalidatedSlotsFilter(
MemoryChunk* chunk, InvalidatedSlots* invalidated_slots, MemoryChunk* chunk, InvalidatedSlots* invalidated_slots) {
bool slots_in_free_space_are_valid) {
// Adjust slots_in_free_space_are_valid_ if more spaces are added.
DCHECK_IMPLIES(invalidated_slots != nullptr,
chunk->InOldSpace() || chunk->InLargeObjectSpace());
slots_in_free_space_are_valid_ = slots_in_free_space_are_valid;
invalidated_slots = invalidated_slots ? invalidated_slots : &empty_; invalidated_slots = invalidated_slots ? invalidated_slots : &empty_;
iterator_ = invalidated_slots->begin(); iterator_ = invalidated_slots->begin();
iterator_end_ = invalidated_slots->end(); iterator_end_ = invalidated_slots->end();
sentinel_ = chunk->area_end(); sentinel_ = chunk->area_end();
if (iterator_ != iterator_end_) {
invalidated_start_ = iterator_->first.address(); // Invoke NextInvalidatedObject twice, to initialize
invalidated_end_ = invalidated_start_ + iterator_->second; // invalidated_start_ to the first invalidated object and
} else { // next_invalidated_object_ to the second one.
invalidated_start_ = sentinel_; NextInvalidatedObject();
invalidated_end_ = sentinel_; NextInvalidatedObject();
}
// These values will be lazily set when needed.
invalidated_object_size_ = 0;
#ifdef DEBUG #ifdef DEBUG
last_slot_ = chunk->area_start(); last_slot_ = chunk->area_start();
#endif #endif
...@@ -69,13 +52,7 @@ InvalidatedSlotsCleanup::InvalidatedSlotsCleanup( ...@@ -69,13 +52,7 @@ InvalidatedSlotsCleanup::InvalidatedSlotsCleanup(
iterator_end_ = invalidated_slots_->end(); iterator_end_ = invalidated_slots_->end();
sentinel_ = chunk->area_end(); sentinel_ = chunk->area_end();
if (iterator_ != iterator_end_) { NextInvalidatedObject();
invalidated_start_ = iterator_->first.address();
invalidated_end_ = invalidated_start_ + iterator_->second;
} else {
invalidated_start_ = sentinel_;
invalidated_end_ = sentinel_;
}
#ifdef DEBUG #ifdef DEBUG
last_free_ = chunk->area_start(); last_free_ = chunk->area_start();
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
#ifndef V8_HEAP_INVALIDATED_SLOTS_H_ #ifndef V8_HEAP_INVALIDATED_SLOTS_H_
#define V8_HEAP_INVALIDATED_SLOTS_H_ #define V8_HEAP_INVALIDATED_SLOTS_H_
#include <map> #include <set>
#include <stack> #include <stack>
#include "src/base/atomic-utils.h" #include "src/base/atomic-utils.h"
...@@ -20,7 +20,7 @@ namespace internal { ...@@ -20,7 +20,7 @@ namespace internal {
// that potentially invalidates slots recorded concurrently. The second part // that potentially invalidates slots recorded concurrently. The second part
// of each element is the size of the corresponding object before the layout // of each element is the size of the corresponding object before the layout
// change. // change.
using InvalidatedSlots = std::map<HeapObject, int, Object::Comparer>; using InvalidatedSlots = std::set<HeapObject, Object::Comparer>;
// This class provides IsValid predicate that takes into account the set // This class provides IsValid predicate that takes into account the set
// of invalidated objects in the given memory chunk. // of invalidated objects in the given memory chunk.
...@@ -34,8 +34,7 @@ class V8_EXPORT_PRIVATE InvalidatedSlotsFilter { ...@@ -34,8 +34,7 @@ class V8_EXPORT_PRIVATE InvalidatedSlotsFilter {
static InvalidatedSlotsFilter OldToNew(MemoryChunk* chunk); static InvalidatedSlotsFilter OldToNew(MemoryChunk* chunk);
explicit InvalidatedSlotsFilter(MemoryChunk* chunk, explicit InvalidatedSlotsFilter(MemoryChunk* chunk,
InvalidatedSlots* invalidated_slots, InvalidatedSlots* invalidated_slots);
bool slots_in_free_space_are_valid);
inline bool IsValid(Address slot); inline bool IsValid(Address slot);
private: private:
...@@ -43,14 +42,15 @@ class V8_EXPORT_PRIVATE InvalidatedSlotsFilter { ...@@ -43,14 +42,15 @@ class V8_EXPORT_PRIVATE InvalidatedSlotsFilter {
InvalidatedSlots::const_iterator iterator_end_; InvalidatedSlots::const_iterator iterator_end_;
Address sentinel_; Address sentinel_;
Address invalidated_start_; Address invalidated_start_;
Address invalidated_end_; Address next_invalidated_start_;
HeapObject invalidated_object_; int invalidated_size_;
int invalidated_object_size_;
bool slots_in_free_space_are_valid_;
InvalidatedSlots empty_; InvalidatedSlots empty_;
#ifdef DEBUG #ifdef DEBUG
Address last_slot_; Address last_slot_;
#endif #endif
private:
inline void NextInvalidatedObject();
}; };
class V8_EXPORT_PRIVATE InvalidatedSlotsCleanup { class V8_EXPORT_PRIVATE InvalidatedSlotsCleanup {
...@@ -71,7 +71,6 @@ class V8_EXPORT_PRIVATE InvalidatedSlotsCleanup { ...@@ -71,7 +71,6 @@ class V8_EXPORT_PRIVATE InvalidatedSlotsCleanup {
Address sentinel_; Address sentinel_;
Address invalidated_start_; Address invalidated_start_;
Address invalidated_end_;
inline void NextInvalidatedObject(); inline void NextInvalidatedObject();
#ifdef DEBUG #ifdef DEBUG
......
...@@ -3422,13 +3422,6 @@ class RememberedSetUpdatingItem : public UpdatingItem { ...@@ -3422,13 +3422,6 @@ class RememberedSetUpdatingItem : public UpdatingItem {
} }
if (chunk_->invalidated_slots<OLD_TO_NEW>() != nullptr) { if (chunk_->invalidated_slots<OLD_TO_NEW>() != nullptr) {
#ifdef DEBUG
for (auto object_size : *chunk_->invalidated_slots<OLD_TO_NEW>()) {
HeapObject object = object_size.first;
int size = object_size.second;
DCHECK_LE(object.SizeFromMap(object.map()), size);
}
#endif
// The invalidated slots are not needed after old-to-new slots were // The invalidated slots are not needed after old-to-new slots were
// processed. // processed.
chunk_->ReleaseInvalidatedSlots<OLD_TO_NEW>(); chunk_->ReleaseInvalidatedSlots<OLD_TO_NEW>();
...@@ -3447,13 +3440,6 @@ class RememberedSetUpdatingItem : public UpdatingItem { ...@@ -3447,13 +3440,6 @@ class RememberedSetUpdatingItem : public UpdatingItem {
} }
if ((updating_mode_ == RememberedSetUpdatingMode::ALL) && if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
chunk_->invalidated_slots<OLD_TO_OLD>() != nullptr) { chunk_->invalidated_slots<OLD_TO_OLD>() != nullptr) {
#ifdef DEBUG
for (auto object_size : *chunk_->invalidated_slots<OLD_TO_OLD>()) {
HeapObject object = object_size.first;
int size = object_size.second;
DCHECK_LE(object.SizeFromMap(object.map()), size);
}
#endif
// The invalidated slots are not needed after old-to-old slots were // The invalidated slots are not needed after old-to-old slots were
// processsed. // processsed.
chunk_->ReleaseInvalidatedSlots<OLD_TO_OLD>(); chunk_->ReleaseInvalidatedSlots<OLD_TO_OLD>();
......
...@@ -442,13 +442,6 @@ void Scavenger::ScavengePage(MemoryChunk* page) { ...@@ -442,13 +442,6 @@ void Scavenger::ScavengePage(MemoryChunk* page) {
SlotSet::KEEP_EMPTY_BUCKETS); SlotSet::KEEP_EMPTY_BUCKETS);
if (page->invalidated_slots<OLD_TO_NEW>() != nullptr) { if (page->invalidated_slots<OLD_TO_NEW>() != nullptr) {
#ifdef DEBUG
for (auto object_size : *page->invalidated_slots<OLD_TO_NEW>()) {
HeapObject object = object_size.first;
int size = object_size.second;
DCHECK_LE(object.SizeFromMap(object.map()), size);
}
#endif
// The invalidated slots are not needed after old-to-new slots were // The invalidated slots are not needed after old-to-new slots were
// processed. // processed.
page->ReleaseInvalidatedSlots<OLD_TO_NEW>(); page->ReleaseInvalidatedSlots<OLD_TO_NEW>();
......
...@@ -1484,15 +1484,12 @@ void MemoryChunk::ReleaseInvalidatedSlots() { ...@@ -1484,15 +1484,12 @@ void MemoryChunk::ReleaseInvalidatedSlots() {
} }
template V8_EXPORT_PRIVATE void template V8_EXPORT_PRIVATE void
MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(HeapObject object, MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(HeapObject object);
int size);
template V8_EXPORT_PRIVATE void template V8_EXPORT_PRIVATE void
MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(HeapObject object, MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(HeapObject object);
int size);
template <RememberedSetType type> template <RememberedSetType type>
void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object, void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object) {
int size) {
bool skip_slot_recording; bool skip_slot_recording;
if (type == OLD_TO_NEW) { if (type == OLD_TO_NEW) {
...@@ -1509,40 +1506,20 @@ void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object, ...@@ -1509,40 +1506,20 @@ void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object,
AllocateInvalidatedSlots<type>(); AllocateInvalidatedSlots<type>();
} }
InvalidatedSlots* invalidated_slots = this->invalidated_slots<type>(); invalidated_slots<type>()->insert(object);
InvalidatedSlots::iterator it = invalidated_slots->lower_bound(object);
if (it != invalidated_slots->end() && it->first == object) {
// object was already inserted
CHECK_LE(size, it->second);
return;
}
it = invalidated_slots->insert(it, std::make_pair(object, size));
// prevent overlapping invalidated objects for old-to-new.
if (type == OLD_TO_NEW && it != invalidated_slots->begin()) {
HeapObject pred = (--it)->first;
int pred_size = it->second;
DCHECK_LT(pred.address(), object.address());
if (pred.address() + pred_size > object.address()) {
it->second = static_cast<int>(object.address() - pred.address());
}
}
} }
void MemoryChunk::InvalidateRecordedSlots(HeapObject object, int size) { void MemoryChunk::InvalidateRecordedSlots(HeapObject object) {
if (heap()->incremental_marking()->IsCompacting()) { if (heap()->incremental_marking()->IsCompacting()) {
// We cannot check slot_set_[OLD_TO_OLD] here, since the // We cannot check slot_set_[OLD_TO_OLD] here, since the
// concurrent markers might insert slots concurrently. // concurrent markers might insert slots concurrently.
RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object, size); RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object);
} }
heap()->MoveStoreBufferEntriesToRememberedSet(); heap()->MoveStoreBufferEntriesToRememberedSet();
if (slot_set_[OLD_TO_NEW] != nullptr) { if (slot_set_[OLD_TO_NEW] != nullptr) {
RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(object, size); RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(object);
} }
} }
...@@ -1560,27 +1537,6 @@ bool MemoryChunk::RegisteredObjectWithInvalidatedSlots(HeapObject object) { ...@@ -1560,27 +1537,6 @@ bool MemoryChunk::RegisteredObjectWithInvalidatedSlots(HeapObject object) {
invalidated_slots<type>()->end(); invalidated_slots<type>()->end();
} }
template void MemoryChunk::MoveObjectWithInvalidatedSlots<OLD_TO_OLD>(
HeapObject old_start, HeapObject new_start);
template <RememberedSetType type>
void MemoryChunk::MoveObjectWithInvalidatedSlots(HeapObject old_start,
HeapObject new_start) {
DCHECK_LT(old_start, new_start);
DCHECK_EQ(MemoryChunk::FromHeapObject(old_start),
MemoryChunk::FromHeapObject(new_start));
static_assert(type == OLD_TO_OLD, "only use this for old-to-old slots");
if (!ShouldSkipEvacuationSlotRecording() && invalidated_slots<type>()) {
auto it = invalidated_slots<type>()->find(old_start);
if (it != invalidated_slots<type>()->end()) {
int old_size = it->second;
int delta = static_cast<int>(new_start.address() - old_start.address());
invalidated_slots<type>()->erase(it);
(*invalidated_slots<type>())[new_start] = old_size - delta;
}
}
}
void MemoryChunk::ReleaseLocalTracker() { void MemoryChunk::ReleaseLocalTracker() {
DCHECK_NOT_NULL(local_tracker_); DCHECK_NOT_NULL(local_tracker_);
delete local_tracker_; delete local_tracker_;
......
...@@ -729,13 +729,8 @@ class MemoryChunk : public BasicMemoryChunk { ...@@ -729,13 +729,8 @@ class MemoryChunk : public BasicMemoryChunk {
template <RememberedSetType type> template <RememberedSetType type>
void ReleaseInvalidatedSlots(); void ReleaseInvalidatedSlots();
template <RememberedSetType type> template <RememberedSetType type>
V8_EXPORT_PRIVATE void RegisterObjectWithInvalidatedSlots(HeapObject object, V8_EXPORT_PRIVATE void RegisterObjectWithInvalidatedSlots(HeapObject object);
int size); void InvalidateRecordedSlots(HeapObject object);
// Updates invalidated_slots after array left-trimming.
template <RememberedSetType type>
void MoveObjectWithInvalidatedSlots(HeapObject old_start,
HeapObject new_start);
void InvalidateRecordedSlots(HeapObject object, int size);
template <RememberedSetType type> template <RememberedSetType type>
bool RegisteredObjectWithInvalidatedSlots(HeapObject object); bool RegisteredObjectWithInvalidatedSlots(HeapObject object);
template <RememberedSetType type> template <RememberedSetType type>
......
...@@ -2776,11 +2776,9 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object, ...@@ -2776,11 +2776,9 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
Heap* heap = isolate->heap(); Heap* heap = isolate->heap();
int old_instance_size = old_map->instance_size();
// Invalidate slots manually later in case of tagged to untagged translation. // Invalidate slots manually later in case of tagged to untagged translation.
// In all other cases the recorded slot remains dereferenceable. // In all other cases the recorded slot remains dereferenceable.
heap->NotifyObjectLayoutChange(*object, old_instance_size, no_allocation, heap->NotifyObjectLayoutChange(*object, no_allocation,
InvalidateRecordedSlots::kNo); InvalidateRecordedSlots::kNo);
// Copy (real) inobject properties. If necessary, stop at number_of_fields to // Copy (real) inobject properties. If necessary, stop at number_of_fields to
...@@ -2800,7 +2798,7 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object, ...@@ -2800,7 +2798,7 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
// Transition from tagged to untagged slot. // Transition from tagged to untagged slot.
heap->ClearRecordedSlot(*object, object->RawField(index.offset())); heap->ClearRecordedSlot(*object, object->RawField(index.offset()));
MemoryChunk* chunk = MemoryChunk::FromHeapObject(*object); MemoryChunk* chunk = MemoryChunk::FromHeapObject(*object);
chunk->InvalidateRecordedSlots(*object, old_instance_size); chunk->InvalidateRecordedSlots(*object);
} else { } else {
#ifdef DEBUG #ifdef DEBUG
heap->VerifyClearedSlot(*object, object->RawField(index.offset())); heap->VerifyClearedSlot(*object, object->RawField(index.offset()));
...@@ -2814,6 +2812,7 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object, ...@@ -2814,6 +2812,7 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
object->SetProperties(*array); object->SetProperties(*array);
// Create filler object past the new instance size. // Create filler object past the new instance size.
int old_instance_size = old_map->instance_size();
int new_instance_size = new_map->instance_size(); int new_instance_size = new_map->instance_size();
int instance_size_delta = old_instance_size - new_instance_size; int instance_size_delta = old_instance_size - new_instance_size;
DCHECK_GE(instance_size_delta, 0); DCHECK_GE(instance_size_delta, 0);
...@@ -2896,15 +2895,15 @@ void MigrateFastToSlow(Isolate* isolate, Handle<JSObject> object, ...@@ -2896,15 +2895,15 @@ void MigrateFastToSlow(Isolate* isolate, Handle<JSObject> object,
DisallowHeapAllocation no_allocation; DisallowHeapAllocation no_allocation;
Heap* heap = isolate->heap(); Heap* heap = isolate->heap();
int old_instance_size = map->instance_size();
// Invalidate slots manually later in case the new map has in-object // Invalidate slots manually later in case the new map has in-object
// properties. If not, it is not possible to store an untagged value // properties. If not, it is not possible to store an untagged value
// in a recorded slot. // in a recorded slot.
heap->NotifyObjectLayoutChange(*object, old_instance_size, no_allocation, heap->NotifyObjectLayoutChange(*object, no_allocation,
InvalidateRecordedSlots::kNo); InvalidateRecordedSlots::kNo);
// Resize the object in the heap if necessary. // Resize the object in the heap if necessary.
int old_instance_size = map->instance_size();
int new_instance_size = new_map->instance_size(); int new_instance_size = new_map->instance_size();
int instance_size_delta = old_instance_size - new_instance_size; int instance_size_delta = old_instance_size - new_instance_size;
DCHECK_GE(instance_size_delta, 0); DCHECK_GE(instance_size_delta, 0);
...@@ -2929,7 +2928,7 @@ void MigrateFastToSlow(Isolate* isolate, Handle<JSObject> object, ...@@ -2929,7 +2928,7 @@ void MigrateFastToSlow(Isolate* isolate, Handle<JSObject> object,
object->address() + map->GetInObjectPropertyOffset(0), object->address() + map->GetInObjectPropertyOffset(0),
object->address() + new_instance_size); object->address() + new_instance_size);
MemoryChunk* chunk = MemoryChunk::FromHeapObject(*object); MemoryChunk* chunk = MemoryChunk::FromHeapObject(*object);
chunk->InvalidateRecordedSlots(*object, old_instance_size); chunk->InvalidateRecordedSlots(*object);
for (int i = 0; i < inobject_properties; i++) { for (int i = 0; i < inobject_properties; i++) {
FieldIndex index = FieldIndex::ForPropertyIndex(*new_map, i); FieldIndex index = FieldIndex::ForPropertyIndex(*new_map, i);
......
...@@ -599,8 +599,7 @@ void SharedFunctionInfo::ClearPreparseData() { ...@@ -599,8 +599,7 @@ void SharedFunctionInfo::ClearPreparseData() {
Heap* heap = GetHeapFromWritableObject(data); Heap* heap = GetHeapFromWritableObject(data);
// Swap the map. // Swap the map.
heap->NotifyObjectLayoutChange(data, UncompiledDataWithPreparseData::kSize, heap->NotifyObjectLayoutChange(data, no_gc);
no_gc);
STATIC_ASSERT(UncompiledDataWithoutPreparseData::kSize < STATIC_ASSERT(UncompiledDataWithoutPreparseData::kSize <
UncompiledDataWithPreparseData::kSize); UncompiledDataWithPreparseData::kSize);
STATIC_ASSERT(UncompiledDataWithoutPreparseData::kSize == STATIC_ASSERT(UncompiledDataWithoutPreparseData::kSize ==
......
...@@ -113,7 +113,7 @@ void String::MakeThin(Isolate* isolate, String internalized) { ...@@ -113,7 +113,7 @@ void String::MakeThin(Isolate* isolate, String internalized) {
bool has_pointers = StringShape(*this).IsIndirect(); bool has_pointers = StringShape(*this).IsIndirect();
int old_size = this->Size(); int old_size = this->Size();
isolate->heap()->NotifyObjectLayoutChange(*this, old_size, no_gc); isolate->heap()->NotifyObjectLayoutChange(*this, no_gc);
bool one_byte = internalized.IsOneByteRepresentation(); bool one_byte = internalized.IsOneByteRepresentation();
Handle<Map> map = one_byte ? isolate->factory()->thin_one_byte_string_map() Handle<Map> map = one_byte ? isolate->factory()->thin_one_byte_string_map()
: isolate->factory()->thin_string_map(); : isolate->factory()->thin_string_map();
...@@ -158,7 +158,7 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) { ...@@ -158,7 +158,7 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
bool has_pointers = StringShape(*this).IsIndirect(); bool has_pointers = StringShape(*this).IsIndirect();
if (has_pointers) { if (has_pointers) {
isolate->heap()->NotifyObjectLayoutChange(*this, size, no_allocation); isolate->heap()->NotifyObjectLayoutChange(*this, no_allocation);
} }
// Morph the string to an external string by replacing the map and // Morph the string to an external string by replacing the map and
// reinitializing the fields. This won't work if the space the existing // reinitializing the fields. This won't work if the space the existing
...@@ -232,7 +232,7 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) { ...@@ -232,7 +232,7 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
bool has_pointers = StringShape(*this).IsIndirect(); bool has_pointers = StringShape(*this).IsIndirect();
if (has_pointers) { if (has_pointers) {
isolate->heap()->NotifyObjectLayoutChange(*this, size, no_allocation); isolate->heap()->NotifyObjectLayoutChange(*this, no_allocation);
} }
// Morph the string to an external string by replacing the map and // Morph the string to an external string by replacing the map and
// reinitializing the fields. This won't work if the space the existing // reinitializing the fields. This won't work if the space the existing
......
...@@ -132,13 +132,12 @@ bool DeleteObjectPropertyFast(Isolate* isolate, Handle<JSReceiver> receiver, ...@@ -132,13 +132,12 @@ bool DeleteObjectPropertyFast(Isolate* isolate, Handle<JSReceiver> receiver,
// for properties stored in the descriptor array. // for properties stored in the descriptor array.
if (details.location() == kField) { if (details.location() == kField) {
DisallowHeapAllocation no_allocation; DisallowHeapAllocation no_allocation;
int receiver_size = receiver_map->instance_size();
// Invalidate slots manually later in case we delete an in-object tagged // Invalidate slots manually later in case we delete an in-object tagged
// property. In this case we might later store an untagged value in the // property. In this case we might later store an untagged value in the
// recorded slot. // recorded slot.
isolate->heap()->NotifyObjectLayoutChange( isolate->heap()->NotifyObjectLayoutChange(*receiver, no_allocation,
*receiver, receiver_size, no_allocation, InvalidateRecordedSlots::kNo); InvalidateRecordedSlots::kNo);
FieldIndex index = FieldIndex index =
FieldIndex::ForPropertyIndex(*receiver_map, details.field_index()); FieldIndex::ForPropertyIndex(*receiver_map, details.field_index());
// Special case deleting the last out-of object property. // Special case deleting the last out-of object property.
...@@ -157,7 +156,7 @@ bool DeleteObjectPropertyFast(Isolate* isolate, Handle<JSReceiver> receiver, ...@@ -157,7 +156,7 @@ bool DeleteObjectPropertyFast(Isolate* isolate, Handle<JSReceiver> receiver,
isolate->heap()->ClearRecordedSlot(*receiver, isolate->heap()->ClearRecordedSlot(*receiver,
receiver->RawField(index.offset())); receiver->RawField(index.offset()));
MemoryChunk* chunk = MemoryChunk::FromHeapObject(*receiver); MemoryChunk* chunk = MemoryChunk::FromHeapObject(*receiver);
chunk->InvalidateRecordedSlots(*receiver, receiver_size); chunk->InvalidateRecordedSlots(*receiver);
} }
} }
} }
......
...@@ -70,8 +70,7 @@ HEAP_TEST(InvalidatedSlotsSomeInvalidatedRanges) { ...@@ -70,8 +70,7 @@ HEAP_TEST(InvalidatedSlotsSomeInvalidatedRanges) {
Page* page = AllocateByteArraysOnPage(heap, &byte_arrays); Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
// Register every second byte arrays as invalidated. // Register every second byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i += 2) { for (size_t i = 0; i < byte_arrays.size(); i += 2) {
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i], page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i]);
byte_arrays[i].Size());
} }
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page); InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page);
for (size_t i = 0; i < byte_arrays.size(); i++) { for (size_t i = 0; i < byte_arrays.size(); i++) {
...@@ -95,8 +94,7 @@ HEAP_TEST(InvalidatedSlotsAllInvalidatedRanges) { ...@@ -95,8 +94,7 @@ HEAP_TEST(InvalidatedSlotsAllInvalidatedRanges) {
Page* page = AllocateByteArraysOnPage(heap, &byte_arrays); Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
// Register the all byte arrays as invalidated. // Register the all byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i++) { for (size_t i = 0; i < byte_arrays.size(); i++) {
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i], page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i]);
byte_arrays[i].Size());
} }
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page); InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page);
for (size_t i = 0; i < byte_arrays.size(); i++) { for (size_t i = 0; i < byte_arrays.size(); i++) {
...@@ -117,8 +115,7 @@ HEAP_TEST(InvalidatedSlotsAfterTrimming) { ...@@ -117,8 +115,7 @@ HEAP_TEST(InvalidatedSlotsAfterTrimming) {
Page* page = AllocateByteArraysOnPage(heap, &byte_arrays); Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
// Register the all byte arrays as invalidated. // Register the all byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i++) { for (size_t i = 0; i < byte_arrays.size(); i++) {
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i], page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i]);
byte_arrays[i].Size());
} }
// Trim byte arrays and check that the slots outside the byte arrays are // Trim byte arrays and check that the slots outside the byte arrays are
// considered invalid if the old space page was swept. // considered invalid if the old space page was swept.
...@@ -145,8 +142,7 @@ HEAP_TEST(InvalidatedSlotsEvacuationCandidate) { ...@@ -145,8 +142,7 @@ HEAP_TEST(InvalidatedSlotsEvacuationCandidate) {
// This should be no-op because the page is marked as evacuation // This should be no-op because the page is marked as evacuation
// candidate. // candidate.
for (size_t i = 0; i < byte_arrays.size(); i++) { for (size_t i = 0; i < byte_arrays.size(); i++) {
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i], page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i]);
byte_arrays[i].Size());
} }
// All slots must still be valid. // All slots must still be valid.
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page); InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page);
...@@ -169,8 +165,7 @@ HEAP_TEST(InvalidatedSlotsResetObjectRegression) { ...@@ -169,8 +165,7 @@ HEAP_TEST(InvalidatedSlotsResetObjectRegression) {
heap->RightTrimFixedArray(byte_arrays[0], byte_arrays[0].length() - 8); heap->RightTrimFixedArray(byte_arrays[0], byte_arrays[0].length() - 8);
// Register the all byte arrays as invalidated. // Register the all byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i++) { for (size_t i = 0; i < byte_arrays.size(); i++) {
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i], page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i]);
byte_arrays[i].Size());
} }
// All slots must still be invalid. // All slots must still be invalid.
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page); InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page);
...@@ -359,8 +354,7 @@ HEAP_TEST(InvalidatedSlotsCleanupFull) { ...@@ -359,8 +354,7 @@ HEAP_TEST(InvalidatedSlotsCleanupFull) {
Page* page = AllocateByteArraysOnPage(heap, &byte_arrays); Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
// Register all byte arrays as invalidated. // Register all byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i++) { for (size_t i = 0; i < byte_arrays.size(); i++) {
page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(byte_arrays[i], page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(byte_arrays[i]);
byte_arrays[i].Size());
} }
// Mark full page as free // Mark full page as free
...@@ -379,8 +373,7 @@ HEAP_TEST(InvalidatedSlotsCleanupEachObject) { ...@@ -379,8 +373,7 @@ HEAP_TEST(InvalidatedSlotsCleanupEachObject) {
Page* page = AllocateByteArraysOnPage(heap, &byte_arrays); Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
// Register all byte arrays as invalidated. // Register all byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i++) { for (size_t i = 0; i < byte_arrays.size(); i++) {
page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(byte_arrays[i], page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(byte_arrays[i]);
byte_arrays[i].Size());
} }
// Mark each object as free on page // Mark each object as free on page
...@@ -405,11 +398,9 @@ HEAP_TEST(InvalidatedSlotsCleanupRightTrim) { ...@@ -405,11 +398,9 @@ HEAP_TEST(InvalidatedSlotsCleanupRightTrim) {
CHECK_GT(byte_arrays.size(), 1); CHECK_GT(byte_arrays.size(), 1);
ByteArray& invalidated = byte_arrays[1]; ByteArray& invalidated = byte_arrays[1];
int invalidated_size = invalidated.Size();
heap->RightTrimFixedArray(invalidated, invalidated.length() - 8); heap->RightTrimFixedArray(invalidated, invalidated.length() - 8);
page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(invalidated, page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(invalidated);
invalidated_size);
// Free memory at end of invalidated object // Free memory at end of invalidated object
InvalidatedSlotsCleanup cleanup = InvalidatedSlotsCleanup::OldToNew(page); InvalidatedSlotsCleanup cleanup = InvalidatedSlotsCleanup::OldToNew(page);
...@@ -418,8 +409,6 @@ HEAP_TEST(InvalidatedSlotsCleanupRightTrim) { ...@@ -418,8 +409,6 @@ HEAP_TEST(InvalidatedSlotsCleanupRightTrim) {
// After cleanup the invalidated object should be smaller // After cleanup the invalidated object should be smaller
InvalidatedSlots* invalidated_slots = page->invalidated_slots<OLD_TO_NEW>(); InvalidatedSlots* invalidated_slots = page->invalidated_slots<OLD_TO_NEW>();
CHECK_GE((*invalidated_slots)[HeapObject::FromAddress(invalidated.address())],
invalidated.Size());
CHECK_EQ(invalidated_slots->size(), 1); CHECK_EQ(invalidated_slots->size(), 1);
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment