Commit a14e2f12 authored by Dominik Inführ's avatar Dominik Inführ Committed by Commit Bot

[heap] Reduce old-to-new invalidations

Reduce number of old-to-new invalidations. MigrateFastToFast,
MigrateFastToSlow and DeleteObjectPropertyFast only need to invalidate
objects in some cases but not in all.

Bug: v8:9454
Change-Id: I901eecb9409c6dfa30cf6b4ee0bdd597862fc229
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1781042Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/master@{#63557}
parent e4e86b53
...@@ -3387,17 +3387,20 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation( ...@@ -3387,17 +3387,20 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation(
} }
} }
void Heap::NotifyObjectLayoutChange(HeapObject object, int size, void Heap::NotifyObjectLayoutChange(
const DisallowHeapAllocation&) { HeapObject object, int size, const DisallowHeapAllocation&,
InvalidateRecordedSlots invalidate_recorded_slots) {
if (incremental_marking()->IsMarking()) { if (incremental_marking()->IsMarking()) {
incremental_marking()->MarkBlackAndVisitObjectDueToLayoutChange(object); incremental_marking()->MarkBlackAndVisitObjectDueToLayoutChange(object);
if (incremental_marking()->IsCompacting() && if (incremental_marking()->IsCompacting() &&
invalidate_recorded_slots == InvalidateRecordedSlots::kYes &&
MayContainRecordedSlots(object)) { MayContainRecordedSlots(object)) {
MemoryChunk::FromHeapObject(object) MemoryChunk::FromHeapObject(object)
->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object, size); ->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object, size);
} }
} }
if (MayContainRecordedSlots(object)) { if (invalidate_recorded_slots == InvalidateRecordedSlots::kYes &&
MayContainRecordedSlots(object)) {
MemoryChunk::FromHeapObject(object) MemoryChunk::FromHeapObject(object)
->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(object, size); ->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(object, size);
} }
...@@ -5529,6 +5532,10 @@ Address Heap::store_buffer_overflow_function_address() { ...@@ -5529,6 +5532,10 @@ Address Heap::store_buffer_overflow_function_address() {
return FUNCTION_ADDR(StoreBuffer::StoreBufferOverflow); return FUNCTION_ADDR(StoreBuffer::StoreBufferOverflow);
} }
void Heap::MoveStoreBufferEntriesToRememberedSet() {
store_buffer()->MoveAllEntriesToRememberedSet();
}
void Heap::ClearRecordedSlot(HeapObject object, ObjectSlot slot) { void Heap::ClearRecordedSlot(HeapObject object, ObjectSlot slot) {
DCHECK(!IsLargeObject(object)); DCHECK(!IsLargeObject(object));
Page* page = Page::FromAddress(slot.address()); Page* page = Page::FromAddress(slot.address());
......
...@@ -86,6 +86,8 @@ enum ArrayStorageAllocationMode { ...@@ -86,6 +86,8 @@ enum ArrayStorageAllocationMode {
enum class ClearRecordedSlots { kYes, kNo }; enum class ClearRecordedSlots { kYes, kNo };
enum class InvalidateRecordedSlots { kYes, kNo };
enum class ClearFreedMemoryMode { kClearFreedMemory, kDontClearFreedMemory }; enum class ClearFreedMemoryMode { kClearFreedMemory, kDontClearFreedMemory };
enum ExternalBackingStoreType { kArrayBuffer, kExternalString, kNumTypes }; enum ExternalBackingStoreType { kArrayBuffer, kExternalString, kNumTypes };
...@@ -843,6 +845,8 @@ class Heap { ...@@ -843,6 +845,8 @@ class Heap {
static intptr_t store_buffer_mask_constant(); static intptr_t store_buffer_mask_constant();
static Address store_buffer_overflow_function_address(); static Address store_buffer_overflow_function_address();
void MoveStoreBufferEntriesToRememberedSet();
void ClearRecordedSlot(HeapObject object, ObjectSlot slot); void ClearRecordedSlot(HeapObject object, ObjectSlot slot);
void ClearRecordedSlotRange(Address start, Address end); void ClearRecordedSlotRange(Address start, Address end);
...@@ -896,8 +900,13 @@ class Heap { ...@@ -896,8 +900,13 @@ class Heap {
// The runtime uses this function to notify potentially unsafe object layout // The runtime uses this function to notify potentially unsafe object layout
// changes that require special synchronization with the concurrent marker. // changes that require special synchronization with the concurrent marker.
// The old size is the size of the object before layout change. // The old size is the size of the object before layout change.
void NotifyObjectLayoutChange(HeapObject object, int old_size, // By default recorded slots in the object are invalidated. Pass
const DisallowHeapAllocation&); // InvalidateRecordedSlots::kNo if this is not necessary or to perform this
// manually.
void NotifyObjectLayoutChange(
HeapObject object, int old_size, const DisallowHeapAllocation&,
InvalidateRecordedSlots invalidate_recorded_slots =
InvalidateRecordedSlots::kYes);
#ifdef VERIFY_HEAP #ifdef VERIFY_HEAP
// This function checks that either // This function checks that either
......
...@@ -1532,6 +1532,20 @@ void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object, ...@@ -1532,6 +1532,20 @@ void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object,
} }
} }
void MemoryChunk::InvalidateRecordedSlots(HeapObject object, int size) {
if (heap()->incremental_marking()->IsCompacting()) {
// We cannot check slot_set_[OLD_TO_OLD] here, since the
// concurrent markers might insert slots concurrently.
RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object, size);
}
heap()->MoveStoreBufferEntriesToRememberedSet();
if (slot_set_[OLD_TO_NEW] != nullptr) {
RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(object, size);
}
}
template bool MemoryChunk::RegisteredObjectWithInvalidatedSlots<OLD_TO_NEW>( template bool MemoryChunk::RegisteredObjectWithInvalidatedSlots<OLD_TO_NEW>(
HeapObject object); HeapObject object);
template bool MemoryChunk::RegisteredObjectWithInvalidatedSlots<OLD_TO_OLD>( template bool MemoryChunk::RegisteredObjectWithInvalidatedSlots<OLD_TO_OLD>(
......
...@@ -735,6 +735,7 @@ class MemoryChunk : public BasicMemoryChunk { ...@@ -735,6 +735,7 @@ class MemoryChunk : public BasicMemoryChunk {
template <RememberedSetType type> template <RememberedSetType type>
void MoveObjectWithInvalidatedSlots(HeapObject old_start, void MoveObjectWithInvalidatedSlots(HeapObject old_start,
HeapObject new_start); HeapObject new_start);
void InvalidateRecordedSlots(HeapObject object, int size);
template <RememberedSetType type> template <RememberedSetType type>
bool RegisteredObjectWithInvalidatedSlots(HeapObject object); bool RegisteredObjectWithInvalidatedSlots(HeapObject object);
template <RememberedSetType type> template <RememberedSetType type>
......
...@@ -2778,7 +2778,10 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object, ...@@ -2778,7 +2778,10 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
int old_instance_size = old_map->instance_size(); int old_instance_size = old_map->instance_size();
heap->NotifyObjectLayoutChange(*object, old_instance_size, no_allocation); // Invalidate slots manually later in case of tagged to untagged translation.
// In all other cases the recorded slot remains dereferenceable.
heap->NotifyObjectLayoutChange(*object, old_instance_size, no_allocation,
InvalidateRecordedSlots::kNo);
// Copy (real) inobject properties. If necessary, stop at number_of_fields to // Copy (real) inobject properties. If necessary, stop at number_of_fields to
// avoid overwriting |one_pointer_filler_map|. // avoid overwriting |one_pointer_filler_map|.
...@@ -2796,6 +2799,8 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object, ...@@ -2796,6 +2799,8 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
if (i < old_number_of_fields && !old_map->IsUnboxedDoubleField(index)) { if (i < old_number_of_fields && !old_map->IsUnboxedDoubleField(index)) {
// Transition from tagged to untagged slot. // Transition from tagged to untagged slot.
heap->ClearRecordedSlot(*object, object->RawField(index.offset())); heap->ClearRecordedSlot(*object, object->RawField(index.offset()));
MemoryChunk* chunk = MemoryChunk::FromHeapObject(*object);
chunk->InvalidateRecordedSlots(*object, old_instance_size);
} else { } else {
#ifdef DEBUG #ifdef DEBUG
heap->VerifyClearedSlot(*object, object->RawField(index.offset())); heap->VerifyClearedSlot(*object, object->RawField(index.offset()));
...@@ -2892,7 +2897,12 @@ void MigrateFastToSlow(Isolate* isolate, Handle<JSObject> object, ...@@ -2892,7 +2897,12 @@ void MigrateFastToSlow(Isolate* isolate, Handle<JSObject> object,
Heap* heap = isolate->heap(); Heap* heap = isolate->heap();
int old_instance_size = map->instance_size(); int old_instance_size = map->instance_size();
heap->NotifyObjectLayoutChange(*object, old_instance_size, no_allocation);
// Invalidate slots manually later in case the new map has in-object
// properties. If not, it is not possible to store an untagged value
// in a recorded slot.
heap->NotifyObjectLayoutChange(*object, old_instance_size, no_allocation,
InvalidateRecordedSlots::kNo);
// Resize the object in the heap if necessary. // Resize the object in the heap if necessary.
int new_instance_size = new_map->instance_size(); int new_instance_size = new_map->instance_size();
...@@ -2918,6 +2928,8 @@ void MigrateFastToSlow(Isolate* isolate, Handle<JSObject> object, ...@@ -2918,6 +2928,8 @@ void MigrateFastToSlow(Isolate* isolate, Handle<JSObject> object,
heap->ClearRecordedSlotRange( heap->ClearRecordedSlotRange(
object->address() + map->GetInObjectPropertyOffset(0), object->address() + map->GetInObjectPropertyOffset(0),
object->address() + new_instance_size); object->address() + new_instance_size);
MemoryChunk* chunk = MemoryChunk::FromHeapObject(*object);
chunk->InvalidateRecordedSlots(*object, old_instance_size);
for (int i = 0; i < inobject_properties; i++) { for (int i = 0; i < inobject_properties; i++) {
FieldIndex index = FieldIndex::ForPropertyIndex(*new_map, i); FieldIndex index = FieldIndex::ForPropertyIndex(*new_map, i);
......
...@@ -132,8 +132,13 @@ bool DeleteObjectPropertyFast(Isolate* isolate, Handle<JSReceiver> receiver, ...@@ -132,8 +132,13 @@ bool DeleteObjectPropertyFast(Isolate* isolate, Handle<JSReceiver> receiver,
// for properties stored in the descriptor array. // for properties stored in the descriptor array.
if (details.location() == kField) { if (details.location() == kField) {
DisallowHeapAllocation no_allocation; DisallowHeapAllocation no_allocation;
int receiver_size = receiver_map->instance_size();
// Invalidate slots manually later in case we delete an in-object tagged
// property. In this case we might later store an untagged value in the
// recorded slot.
isolate->heap()->NotifyObjectLayoutChange( isolate->heap()->NotifyObjectLayoutChange(
*receiver, receiver_map->instance_size(), no_allocation); *receiver, receiver_size, no_allocation, InvalidateRecordedSlots::kNo);
FieldIndex index = FieldIndex index =
FieldIndex::ForPropertyIndex(*receiver_map, details.field_index()); FieldIndex::ForPropertyIndex(*receiver_map, details.field_index());
// Special case deleting the last out-of object property. // Special case deleting the last out-of object property.
...@@ -151,6 +156,8 @@ bool DeleteObjectPropertyFast(Isolate* isolate, Handle<JSReceiver> receiver, ...@@ -151,6 +156,8 @@ bool DeleteObjectPropertyFast(Isolate* isolate, Handle<JSReceiver> receiver,
if (index.is_inobject() && !receiver_map->IsUnboxedDoubleField(index)) { if (index.is_inobject() && !receiver_map->IsUnboxedDoubleField(index)) {
isolate->heap()->ClearRecordedSlot(*receiver, isolate->heap()->ClearRecordedSlot(*receiver,
receiver->RawField(index.offset())); receiver->RawField(index.offset()));
MemoryChunk* chunk = MemoryChunk::FromHeapObject(*receiver);
chunk->InvalidateRecordedSlots(*receiver, receiver_size);
} }
} }
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment