Commit c4d3e9bd authored by Dominik Inführ's avatar Dominik Inführ Committed by Commit Bot

[heap] Remove size from invalidated slots

Slots are always valid inside an invalidated area when outside the
respective object's current size. This allows us to remove the size
from the InvalidatedSlots data structure.

This change was enabled by https://crrev.com/c/1771793. Reland after
revert in https://crrev.com/c/1783106, this CL was not the culprit
of the issue (chromium:1000404).

Bug: v8:9454
Change-Id: I823d34670515924bf74200daa21a834044087310
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1787431Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarSigurd Schneider <sigurds@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/master@{#63607}
parent eaa0bb4c
......@@ -3712,8 +3712,7 @@ void TranslatedState::InitializeJSObjectAt(
CHECK_GE(slot->GetChildrenCount(), 2);
// Notify the concurrent marker about the layout change.
isolate()->heap()->NotifyObjectLayoutChange(
*object_storage, slot->GetChildrenCount() * kTaggedSize, no_allocation);
isolate()->heap()->NotifyObjectLayoutChange(*object_storage, no_allocation);
// Fill the property array field.
{
......@@ -3772,8 +3771,7 @@ void TranslatedState::InitializeObjectWithTaggedFieldsAt(
}
// Notify the concurrent marker about the layout change.
isolate()->heap()->NotifyObjectLayoutChange(
*object_storage, slot->GetChildrenCount() * kTaggedSize, no_allocation);
isolate()->heap()->NotifyObjectLayoutChange(*object_storage, no_allocation);
// Write the fields to the object.
for (int i = 1; i < slot->GetChildrenCount(); i++) {
......
......@@ -3388,7 +3388,7 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation(
}
void Heap::NotifyObjectLayoutChange(
HeapObject object, int size, const DisallowHeapAllocation&,
HeapObject object, const DisallowHeapAllocation&,
InvalidateRecordedSlots invalidate_recorded_slots) {
if (incremental_marking()->IsMarking()) {
incremental_marking()->MarkBlackAndVisitObjectDueToLayoutChange(object);
......@@ -3396,13 +3396,13 @@ void Heap::NotifyObjectLayoutChange(
invalidate_recorded_slots == InvalidateRecordedSlots::kYes &&
MayContainRecordedSlots(object)) {
MemoryChunk::FromHeapObject(object)
->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object, size);
->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object);
}
}
if (invalidate_recorded_slots == InvalidateRecordedSlots::kYes &&
MayContainRecordedSlots(object)) {
MemoryChunk::FromHeapObject(object)
->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(object, size);
->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(object);
}
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
......
......@@ -904,7 +904,7 @@ class Heap {
// InvalidateRecordedSlots::kNo if this is not necessary or to perform this
// manually.
void NotifyObjectLayoutChange(
HeapObject object, int old_size, const DisallowHeapAllocation&,
HeapObject object, const DisallowHeapAllocation&,
InvalidateRecordedSlots invalidate_recorded_slots =
InvalidateRecordedSlots::kYes);
......
......@@ -24,42 +24,39 @@ bool InvalidatedSlotsFilter::IsValid(Address slot) {
DCHECK_LE(last_slot_, slot);
last_slot_ = slot;
#endif
while (slot >= invalidated_end_) {
++iterator_;
if (iterator_ != iterator_end_) {
// Invalidated ranges must not overlap.
DCHECK_LE(invalidated_end_, iterator_->first.address());
invalidated_start_ = iterator_->first.address();
invalidated_end_ = invalidated_start_ + iterator_->second;
invalidated_object_ = HeapObject();
invalidated_object_size_ = 0;
} else {
invalidated_start_ = sentinel_;
invalidated_end_ = sentinel_;
}
}
// Now the invalidated region ends after the slot.
if (slot < invalidated_start_) {
// The invalidated region starts after the slot.
return true;
}
// The invalidated region includes the slot.
// Ask the object if the slot is valid.
if (invalidated_object_.is_null()) {
invalidated_object_ = HeapObject::FromAddress(invalidated_start_);
DCHECK(!invalidated_object_.IsFiller());
invalidated_object_size_ =
invalidated_object_.SizeFromMap(invalidated_object_.map());
while (slot >= next_invalidated_start_) {
NextInvalidatedObject();
}
HeapObject invalidated_object = HeapObject::FromAddress(invalidated_start_);
if (invalidated_size_ == 0) {
invalidated_size_ = invalidated_object.Size();
}
int offset = static_cast<int>(slot - invalidated_start_);
DCHECK_GT(offset, 0);
DCHECK_LE(invalidated_object_size_,
static_cast<int>(invalidated_end_ - invalidated_start_));
if (offset < invalidated_size_)
return invalidated_object.IsValidSlot(invalidated_object.map(), offset);
NextInvalidatedObject();
return true;
}
void InvalidatedSlotsFilter::NextInvalidatedObject() {
invalidated_start_ = next_invalidated_start_;
invalidated_size_ = 0;
if (offset >= invalidated_object_size_) {
return slots_in_free_space_are_valid_;
if (iterator_ == iterator_end_) {
next_invalidated_start_ = sentinel_;
} else {
next_invalidated_start_ = iterator_->address();
iterator_++;
}
return invalidated_object_.IsValidSlot(invalidated_object_.map(), offset);
}
void InvalidatedSlotsCleanup::Free(Address free_start, Address free_end) {
......@@ -72,35 +69,25 @@ void InvalidatedSlotsCleanup::Free(Address free_start, Address free_end) {
if (iterator_ == iterator_end_) return;
// Ignore invalidated objects before free region
while (free_start >= invalidated_end_) {
// Ignore invalidated objects that start before free region
while (invalidated_start_ < free_start) {
++iterator_;
NextInvalidatedObject();
}
// Loop here: Free region might contain multiple invalidated objects
while (free_end > invalidated_start_) {
// Case: Free region starts before current invalidated object
if (free_start <= invalidated_start_) {
iterator_ = invalidated_slots_->erase(iterator_);
} else {
// Case: Free region starts within current invalidated object
// (Can happen for right-trimmed objects)
iterator_++;
}
// Remove all invalidated objects that start within
// free region.
while (invalidated_start_ < free_end) {
iterator_ = invalidated_slots_->erase(iterator_);
NextInvalidatedObject();
}
}
void InvalidatedSlotsCleanup::NextInvalidatedObject() {
if (iterator_ != iterator_end_) {
invalidated_start_ = iterator_->first.address();
invalidated_end_ = invalidated_start_ + iterator_->second;
invalidated_start_ = iterator_->address();
} else {
invalidated_start_ = sentinel_;
invalidated_end_ = sentinel_;
}
}
......
......@@ -3,52 +3,35 @@
// found in the LICENSE file.
#include "src/heap/invalidated-slots.h"
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/spaces.h"
#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
InvalidatedSlotsFilter InvalidatedSlotsFilter::OldToOld(MemoryChunk* chunk) {
// The sweeper removes invalid slots and makes free space available for
// allocation. Slots for new objects can be recorded in the free space.
// Note that we cannot simply check for SweepingDone because pages in large
// object space are not swept but have SweepingDone() == true.
bool slots_in_free_space_are_valid =
chunk->SweepingDone() && chunk->InOldSpace();
return InvalidatedSlotsFilter(chunk, chunk->invalidated_slots<OLD_TO_OLD>(),
slots_in_free_space_are_valid);
return InvalidatedSlotsFilter(chunk, chunk->invalidated_slots<OLD_TO_OLD>());
}
InvalidatedSlotsFilter InvalidatedSlotsFilter::OldToNew(MemoryChunk* chunk) {
// Always treat these slots as valid for old-to-new for now. Invalid
// old-to-new slots are always cleared.
bool slots_in_free_space_are_valid = true;
return InvalidatedSlotsFilter(chunk, chunk->invalidated_slots<OLD_TO_NEW>(),
slots_in_free_space_are_valid);
return InvalidatedSlotsFilter(chunk, chunk->invalidated_slots<OLD_TO_NEW>());
}
InvalidatedSlotsFilter::InvalidatedSlotsFilter(
MemoryChunk* chunk, InvalidatedSlots* invalidated_slots,
bool slots_in_free_space_are_valid) {
// Adjust slots_in_free_space_are_valid_ if more spaces are added.
DCHECK_IMPLIES(invalidated_slots != nullptr,
chunk->InOldSpace() || chunk->InLargeObjectSpace());
slots_in_free_space_are_valid_ = slots_in_free_space_are_valid;
MemoryChunk* chunk, InvalidatedSlots* invalidated_slots) {
invalidated_slots = invalidated_slots ? invalidated_slots : &empty_;
iterator_ = invalidated_slots->begin();
iterator_end_ = invalidated_slots->end();
sentinel_ = chunk->area_end();
if (iterator_ != iterator_end_) {
invalidated_start_ = iterator_->first.address();
invalidated_end_ = invalidated_start_ + iterator_->second;
} else {
invalidated_start_ = sentinel_;
invalidated_end_ = sentinel_;
}
// These values will be lazily set when needed.
invalidated_object_size_ = 0;
// Invoke NextInvalidatedObject twice, to initialize
// invalidated_start_ to the first invalidated object and
// next_invalidated_object_ to the second one.
NextInvalidatedObject();
NextInvalidatedObject();
#ifdef DEBUG
last_slot_ = chunk->area_start();
#endif
......@@ -69,13 +52,7 @@ InvalidatedSlotsCleanup::InvalidatedSlotsCleanup(
iterator_end_ = invalidated_slots_->end();
sentinel_ = chunk->area_end();
if (iterator_ != iterator_end_) {
invalidated_start_ = iterator_->first.address();
invalidated_end_ = invalidated_start_ + iterator_->second;
} else {
invalidated_start_ = sentinel_;
invalidated_end_ = sentinel_;
}
NextInvalidatedObject();
#ifdef DEBUG
last_free_ = chunk->area_start();
......
......@@ -5,7 +5,7 @@
#ifndef V8_HEAP_INVALIDATED_SLOTS_H_
#define V8_HEAP_INVALIDATED_SLOTS_H_
#include <map>
#include <set>
#include <stack>
#include "src/base/atomic-utils.h"
......@@ -20,7 +20,7 @@ namespace internal {
// that potentially invalidates slots recorded concurrently. The second part
// of each element is the size of the corresponding object before the layout
// change.
using InvalidatedSlots = std::map<HeapObject, int, Object::Comparer>;
using InvalidatedSlots = std::set<HeapObject, Object::Comparer>;
// This class provides IsValid predicate that takes into account the set
// of invalidated objects in the given memory chunk.
......@@ -34,8 +34,7 @@ class V8_EXPORT_PRIVATE InvalidatedSlotsFilter {
static InvalidatedSlotsFilter OldToNew(MemoryChunk* chunk);
explicit InvalidatedSlotsFilter(MemoryChunk* chunk,
InvalidatedSlots* invalidated_slots,
bool slots_in_free_space_are_valid);
InvalidatedSlots* invalidated_slots);
inline bool IsValid(Address slot);
private:
......@@ -43,14 +42,15 @@ class V8_EXPORT_PRIVATE InvalidatedSlotsFilter {
InvalidatedSlots::const_iterator iterator_end_;
Address sentinel_;
Address invalidated_start_;
Address invalidated_end_;
HeapObject invalidated_object_;
int invalidated_object_size_;
bool slots_in_free_space_are_valid_;
Address next_invalidated_start_;
int invalidated_size_;
InvalidatedSlots empty_;
#ifdef DEBUG
Address last_slot_;
#endif
private:
inline void NextInvalidatedObject();
};
class V8_EXPORT_PRIVATE InvalidatedSlotsCleanup {
......@@ -71,7 +71,6 @@ class V8_EXPORT_PRIVATE InvalidatedSlotsCleanup {
Address sentinel_;
Address invalidated_start_;
Address invalidated_end_;
inline void NextInvalidatedObject();
#ifdef DEBUG
......
......@@ -3422,13 +3422,6 @@ class RememberedSetUpdatingItem : public UpdatingItem {
}
if (chunk_->invalidated_slots<OLD_TO_NEW>() != nullptr) {
#ifdef DEBUG
for (auto object_size : *chunk_->invalidated_slots<OLD_TO_NEW>()) {
HeapObject object = object_size.first;
int size = object_size.second;
DCHECK_LE(object.SizeFromMap(object.map()), size);
}
#endif
// The invalidated slots are not needed after old-to-new slots were
// processed.
chunk_->ReleaseInvalidatedSlots<OLD_TO_NEW>();
......@@ -3447,13 +3440,6 @@ class RememberedSetUpdatingItem : public UpdatingItem {
}
if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
chunk_->invalidated_slots<OLD_TO_OLD>() != nullptr) {
#ifdef DEBUG
for (auto object_size : *chunk_->invalidated_slots<OLD_TO_OLD>()) {
HeapObject object = object_size.first;
int size = object_size.second;
DCHECK_LE(object.SizeFromMap(object.map()), size);
}
#endif
// The invalidated slots are not needed after old-to-old slots were
// processsed.
chunk_->ReleaseInvalidatedSlots<OLD_TO_OLD>();
......
......@@ -442,13 +442,6 @@ void Scavenger::ScavengePage(MemoryChunk* page) {
SlotSet::KEEP_EMPTY_BUCKETS);
if (page->invalidated_slots<OLD_TO_NEW>() != nullptr) {
#ifdef DEBUG
for (auto object_size : *page->invalidated_slots<OLD_TO_NEW>()) {
HeapObject object = object_size.first;
int size = object_size.second;
DCHECK_LE(object.SizeFromMap(object.map()), size);
}
#endif
// The invalidated slots are not needed after old-to-new slots were
// processed.
page->ReleaseInvalidatedSlots<OLD_TO_NEW>();
......
......@@ -1484,15 +1484,12 @@ void MemoryChunk::ReleaseInvalidatedSlots() {
}
template V8_EXPORT_PRIVATE void
MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(HeapObject object,
int size);
MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(HeapObject object);
template V8_EXPORT_PRIVATE void
MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(HeapObject object,
int size);
MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(HeapObject object);
template <RememberedSetType type>
void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object,
int size) {
void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object) {
bool skip_slot_recording;
if (type == OLD_TO_NEW) {
......@@ -1509,40 +1506,20 @@ void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object,
AllocateInvalidatedSlots<type>();
}
InvalidatedSlots* invalidated_slots = this->invalidated_slots<type>();
InvalidatedSlots::iterator it = invalidated_slots->lower_bound(object);
if (it != invalidated_slots->end() && it->first == object) {
// object was already inserted
CHECK_LE(size, it->second);
return;
}
it = invalidated_slots->insert(it, std::make_pair(object, size));
// prevent overlapping invalidated objects for old-to-new.
if (type == OLD_TO_NEW && it != invalidated_slots->begin()) {
HeapObject pred = (--it)->first;
int pred_size = it->second;
DCHECK_LT(pred.address(), object.address());
if (pred.address() + pred_size > object.address()) {
it->second = static_cast<int>(object.address() - pred.address());
}
}
invalidated_slots<type>()->insert(object);
}
void MemoryChunk::InvalidateRecordedSlots(HeapObject object, int size) {
void MemoryChunk::InvalidateRecordedSlots(HeapObject object) {
if (heap()->incremental_marking()->IsCompacting()) {
// We cannot check slot_set_[OLD_TO_OLD] here, since the
// concurrent markers might insert slots concurrently.
RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object, size);
RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object);
}
heap()->MoveStoreBufferEntriesToRememberedSet();
if (slot_set_[OLD_TO_NEW] != nullptr) {
RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(object, size);
RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(object);
}
}
......@@ -1560,27 +1537,6 @@ bool MemoryChunk::RegisteredObjectWithInvalidatedSlots(HeapObject object) {
invalidated_slots<type>()->end();
}
template void MemoryChunk::MoveObjectWithInvalidatedSlots<OLD_TO_OLD>(
HeapObject old_start, HeapObject new_start);
template <RememberedSetType type>
void MemoryChunk::MoveObjectWithInvalidatedSlots(HeapObject old_start,
HeapObject new_start) {
DCHECK_LT(old_start, new_start);
DCHECK_EQ(MemoryChunk::FromHeapObject(old_start),
MemoryChunk::FromHeapObject(new_start));
static_assert(type == OLD_TO_OLD, "only use this for old-to-old slots");
if (!ShouldSkipEvacuationSlotRecording() && invalidated_slots<type>()) {
auto it = invalidated_slots<type>()->find(old_start);
if (it != invalidated_slots<type>()->end()) {
int old_size = it->second;
int delta = static_cast<int>(new_start.address() - old_start.address());
invalidated_slots<type>()->erase(it);
(*invalidated_slots<type>())[new_start] = old_size - delta;
}
}
}
void MemoryChunk::ReleaseLocalTracker() {
DCHECK_NOT_NULL(local_tracker_);
delete local_tracker_;
......
......@@ -729,13 +729,8 @@ class MemoryChunk : public BasicMemoryChunk {
template <RememberedSetType type>
void ReleaseInvalidatedSlots();
template <RememberedSetType type>
V8_EXPORT_PRIVATE void RegisterObjectWithInvalidatedSlots(HeapObject object,
int size);
// Updates invalidated_slots after array left-trimming.
template <RememberedSetType type>
void MoveObjectWithInvalidatedSlots(HeapObject old_start,
HeapObject new_start);
void InvalidateRecordedSlots(HeapObject object, int size);
V8_EXPORT_PRIVATE void RegisterObjectWithInvalidatedSlots(HeapObject object);
void InvalidateRecordedSlots(HeapObject object);
template <RememberedSetType type>
bool RegisteredObjectWithInvalidatedSlots(HeapObject object);
template <RememberedSetType type>
......
......@@ -2776,11 +2776,9 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
Heap* heap = isolate->heap();
int old_instance_size = old_map->instance_size();
// Invalidate slots manually later in case of tagged to untagged translation.
// In all other cases the recorded slot remains dereferenceable.
heap->NotifyObjectLayoutChange(*object, old_instance_size, no_allocation,
heap->NotifyObjectLayoutChange(*object, no_allocation,
InvalidateRecordedSlots::kNo);
// Copy (real) inobject properties. If necessary, stop at number_of_fields to
......@@ -2800,7 +2798,7 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
// Transition from tagged to untagged slot.
heap->ClearRecordedSlot(*object, object->RawField(index.offset()));
MemoryChunk* chunk = MemoryChunk::FromHeapObject(*object);
chunk->InvalidateRecordedSlots(*object, old_instance_size);
chunk->InvalidateRecordedSlots(*object);
} else {
#ifdef DEBUG
heap->VerifyClearedSlot(*object, object->RawField(index.offset()));
......@@ -2814,6 +2812,7 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
object->SetProperties(*array);
// Create filler object past the new instance size.
int old_instance_size = old_map->instance_size();
int new_instance_size = new_map->instance_size();
int instance_size_delta = old_instance_size - new_instance_size;
DCHECK_GE(instance_size_delta, 0);
......@@ -2896,15 +2895,15 @@ void MigrateFastToSlow(Isolate* isolate, Handle<JSObject> object,
DisallowHeapAllocation no_allocation;
Heap* heap = isolate->heap();
int old_instance_size = map->instance_size();
// Invalidate slots manually later in case the new map has in-object
// properties. If not, it is not possible to store an untagged value
// in a recorded slot.
heap->NotifyObjectLayoutChange(*object, old_instance_size, no_allocation,
heap->NotifyObjectLayoutChange(*object, no_allocation,
InvalidateRecordedSlots::kNo);
// Resize the object in the heap if necessary.
int old_instance_size = map->instance_size();
int new_instance_size = new_map->instance_size();
int instance_size_delta = old_instance_size - new_instance_size;
DCHECK_GE(instance_size_delta, 0);
......@@ -2929,7 +2928,7 @@ void MigrateFastToSlow(Isolate* isolate, Handle<JSObject> object,
object->address() + map->GetInObjectPropertyOffset(0),
object->address() + new_instance_size);
MemoryChunk* chunk = MemoryChunk::FromHeapObject(*object);
chunk->InvalidateRecordedSlots(*object, old_instance_size);
chunk->InvalidateRecordedSlots(*object);
for (int i = 0; i < inobject_properties; i++) {
FieldIndex index = FieldIndex::ForPropertyIndex(*new_map, i);
......
......@@ -599,8 +599,7 @@ void SharedFunctionInfo::ClearPreparseData() {
Heap* heap = GetHeapFromWritableObject(data);
// Swap the map.
heap->NotifyObjectLayoutChange(data, UncompiledDataWithPreparseData::kSize,
no_gc);
heap->NotifyObjectLayoutChange(data, no_gc);
STATIC_ASSERT(UncompiledDataWithoutPreparseData::kSize <
UncompiledDataWithPreparseData::kSize);
STATIC_ASSERT(UncompiledDataWithoutPreparseData::kSize ==
......
......@@ -113,7 +113,7 @@ void String::MakeThin(Isolate* isolate, String internalized) {
bool has_pointers = StringShape(*this).IsIndirect();
int old_size = this->Size();
isolate->heap()->NotifyObjectLayoutChange(*this, old_size, no_gc);
isolate->heap()->NotifyObjectLayoutChange(*this, no_gc);
bool one_byte = internalized.IsOneByteRepresentation();
Handle<Map> map = one_byte ? isolate->factory()->thin_one_byte_string_map()
: isolate->factory()->thin_string_map();
......@@ -158,7 +158,7 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
bool has_pointers = StringShape(*this).IsIndirect();
if (has_pointers) {
isolate->heap()->NotifyObjectLayoutChange(*this, size, no_allocation);
isolate->heap()->NotifyObjectLayoutChange(*this, no_allocation);
}
// Morph the string to an external string by replacing the map and
// reinitializing the fields. This won't work if the space the existing
......@@ -232,7 +232,7 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
bool has_pointers = StringShape(*this).IsIndirect();
if (has_pointers) {
isolate->heap()->NotifyObjectLayoutChange(*this, size, no_allocation);
isolate->heap()->NotifyObjectLayoutChange(*this, no_allocation);
}
// Morph the string to an external string by replacing the map and
// reinitializing the fields. This won't work if the space the existing
......
......@@ -132,13 +132,12 @@ bool DeleteObjectPropertyFast(Isolate* isolate, Handle<JSReceiver> receiver,
// for properties stored in the descriptor array.
if (details.location() == kField) {
DisallowHeapAllocation no_allocation;
int receiver_size = receiver_map->instance_size();
// Invalidate slots manually later in case we delete an in-object tagged
// property. In this case we might later store an untagged value in the
// recorded slot.
isolate->heap()->NotifyObjectLayoutChange(
*receiver, receiver_size, no_allocation, InvalidateRecordedSlots::kNo);
isolate->heap()->NotifyObjectLayoutChange(*receiver, no_allocation,
InvalidateRecordedSlots::kNo);
FieldIndex index =
FieldIndex::ForPropertyIndex(*receiver_map, details.field_index());
// Special case deleting the last out-of object property.
......@@ -157,7 +156,7 @@ bool DeleteObjectPropertyFast(Isolate* isolate, Handle<JSReceiver> receiver,
isolate->heap()->ClearRecordedSlot(*receiver,
receiver->RawField(index.offset()));
MemoryChunk* chunk = MemoryChunk::FromHeapObject(*receiver);
chunk->InvalidateRecordedSlots(*receiver, receiver_size);
chunk->InvalidateRecordedSlots(*receiver);
}
}
}
......
......@@ -70,8 +70,7 @@ HEAP_TEST(InvalidatedSlotsSomeInvalidatedRanges) {
Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
// Register every second byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i += 2) {
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i],
byte_arrays[i].Size());
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i]);
}
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page);
for (size_t i = 0; i < byte_arrays.size(); i++) {
......@@ -95,8 +94,7 @@ HEAP_TEST(InvalidatedSlotsAllInvalidatedRanges) {
Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
// Register the all byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i++) {
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i],
byte_arrays[i].Size());
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i]);
}
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page);
for (size_t i = 0; i < byte_arrays.size(); i++) {
......@@ -117,8 +115,7 @@ HEAP_TEST(InvalidatedSlotsAfterTrimming) {
Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
// Register the all byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i++) {
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i],
byte_arrays[i].Size());
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i]);
}
// Trim byte arrays and check that the slots outside the byte arrays are
// considered invalid if the old space page was swept.
......@@ -145,8 +142,7 @@ HEAP_TEST(InvalidatedSlotsEvacuationCandidate) {
// This should be no-op because the page is marked as evacuation
// candidate.
for (size_t i = 0; i < byte_arrays.size(); i++) {
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i],
byte_arrays[i].Size());
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i]);
}
// All slots must still be valid.
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page);
......@@ -169,8 +165,7 @@ HEAP_TEST(InvalidatedSlotsResetObjectRegression) {
heap->RightTrimFixedArray(byte_arrays[0], byte_arrays[0].length() - 8);
// Register the all byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i++) {
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i],
byte_arrays[i].Size());
page->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(byte_arrays[i]);
}
// All slots must still be invalid.
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(page);
......@@ -359,8 +354,7 @@ HEAP_TEST(InvalidatedSlotsCleanupFull) {
Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
// Register all byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i++) {
page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(byte_arrays[i],
byte_arrays[i].Size());
page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(byte_arrays[i]);
}
// Mark full page as free
......@@ -379,8 +373,7 @@ HEAP_TEST(InvalidatedSlotsCleanupEachObject) {
Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
// Register all byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i++) {
page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(byte_arrays[i],
byte_arrays[i].Size());
page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(byte_arrays[i]);
}
// Mark each object as free on page
......@@ -405,11 +398,9 @@ HEAP_TEST(InvalidatedSlotsCleanupRightTrim) {
CHECK_GT(byte_arrays.size(), 1);
ByteArray& invalidated = byte_arrays[1];
int invalidated_size = invalidated.Size();
heap->RightTrimFixedArray(invalidated, invalidated.length() - 8);
page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(invalidated,
invalidated_size);
page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(invalidated);
// Free memory at end of invalidated object
InvalidatedSlotsCleanup cleanup = InvalidatedSlotsCleanup::OldToNew(page);
......@@ -418,8 +409,6 @@ HEAP_TEST(InvalidatedSlotsCleanupRightTrim) {
// After cleanup the invalidated object should be smaller
InvalidatedSlots* invalidated_slots = page->invalidated_slots<OLD_TO_NEW>();
CHECK_GE((*invalidated_slots)[HeapObject::FromAddress(invalidated.address())],
invalidated.Size());
CHECK_EQ(invalidated_slots->size(), 1);
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment