Commit 11960b13 authored by Camillo Bruni's avatar Camillo Bruni Committed by V8 LUCI CQ

[runtime] Dehandlify some Map and Descriptor code

Avoiding handles in tight loops and setup code improves performance
and reduces code size.

This CL also makes more non-allocating mode more explicit by adding
more DisallowGarbageCollection scopes.

Change-Id: I95b5b1a29204c27a23c42ccd67fff150b3fa4a3e
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3460740Reviewed-by: 's avatarIgor Sheludko <ishell@chromium.org>
Commit-Queue: Camillo Bruni <cbruni@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79142}
parent 6f8b501c
......@@ -688,8 +688,8 @@ bool Map::IsBooleanMap() const {
}
bool Map::IsNullOrUndefinedMap() const {
return *this == GetReadOnlyRoots().null_map() ||
*this == GetReadOnlyRoots().undefined_map();
auto roots = GetReadOnlyRoots();
return *this == roots.null_map() || *this == roots.undefined_map();
}
bool Map::IsPrimitiveMap() const {
......
......@@ -924,6 +924,7 @@ Map Map::FindElementsKindTransitionedMap(Isolate* isolate,
static Map FindClosestElementsTransition(Isolate* isolate, Map map,
ElementsKind to_kind,
ConcurrencyMode cmode) {
DisallowGarbageCollection no_gc;
// Ensure we are requested to search elements kind transition "near the root".
DCHECK_EQ(map.FindRootMap(isolate).NumberOfOwnDescriptors(),
map.NumberOfOwnDescriptors());
......@@ -1129,29 +1130,36 @@ bool Map::MayHaveReadOnlyElementsInPrototypeChain(Isolate* isolate) {
return false;
}
Handle<Map> Map::RawCopy(Isolate* isolate, Handle<Map> map, int instance_size,
int inobject_properties) {
Handle<Map> Map::RawCopy(Isolate* isolate, Handle<Map> src_handle,
int instance_size, int inobject_properties) {
Handle<Map> result = isolate->factory()->NewMap(
map->instance_type(), instance_size, TERMINAL_FAST_ELEMENTS_KIND,
src_handle->instance_type(), instance_size, TERMINAL_FAST_ELEMENTS_KIND,
inobject_properties);
Handle<HeapObject> prototype(map->prototype(), isolate);
Handle<HeapObject> prototype(src_handle->prototype(), isolate);
Map::SetPrototype(isolate, result, prototype);
result->set_constructor_or_back_pointer(map->GetConstructor());
result->set_bit_field(map->bit_field());
result->set_bit_field2(map->bit_field2());
int new_bit_field3 = map->bit_field3();
new_bit_field3 = Bits3::OwnsDescriptorsBit::update(new_bit_field3, true);
new_bit_field3 = Bits3::NumberOfOwnDescriptorsBits::update(new_bit_field3, 0);
new_bit_field3 =
Bits3::EnumLengthBits::update(new_bit_field3, kInvalidEnumCacheSentinel);
new_bit_field3 = Bits3::IsDeprecatedBit::update(new_bit_field3, false);
new_bit_field3 = Bits3::IsInRetainedMapListBit::update(new_bit_field3, false);
if (!map->is_dictionary_map()) {
new_bit_field3 = Bits3::IsUnstableBit::update(new_bit_field3, false);
}
// Same as bit_field comment above.
result->set_bit_field3(new_bit_field3);
result->clear_padding();
{
DisallowGarbageCollection no_gc;
Map src = *src_handle;
Map raw = *result;
raw.set_constructor_or_back_pointer(src.GetConstructor());
raw.set_bit_field(src.bit_field());
raw.set_bit_field2(src.bit_field2());
int new_bit_field3 = src.bit_field3();
new_bit_field3 = Bits3::OwnsDescriptorsBit::update(new_bit_field3, true);
new_bit_field3 =
Bits3::NumberOfOwnDescriptorsBits::update(new_bit_field3, 0);
new_bit_field3 = Bits3::EnumLengthBits::update(new_bit_field3,
kInvalidEnumCacheSentinel);
new_bit_field3 = Bits3::IsDeprecatedBit::update(new_bit_field3, false);
new_bit_field3 =
Bits3::IsInRetainedMapListBit::update(new_bit_field3, false);
if (!src.is_dictionary_map()) {
new_bit_field3 = Bits3::IsUnstableBit::update(new_bit_field3, false);
}
// Same as bit_field comment above.
raw.set_bit_field3(new_bit_field3);
raw.clear_padding();
}
return result;
}
......@@ -1242,13 +1250,17 @@ Handle<Map> Map::CopyNormalized(Isolate* isolate, Handle<Map> map,
Handle<Map> result = RawCopy(
isolate, map, new_instance_size,
mode == CLEAR_INOBJECT_PROPERTIES ? 0 : map->GetInObjectProperties());
// Clear the unused_property_fields explicitly as this field should not
// be accessed for normalized maps.
result->SetInObjectUnusedPropertyFields(0);
result->set_is_dictionary_map(true);
result->set_is_migration_target(false);
result->set_may_have_interesting_symbols(true);
result->set_construction_counter(kNoSlackTracking);
{
DisallowGarbageCollection no_gc;
Map raw = *result;
// Clear the unused_property_fields explicitly as this field should not
// be accessed for normalized maps.
raw.SetInObjectUnusedPropertyFields(0);
raw.set_is_dictionary_map(true);
raw.set_is_migration_target(false);
raw.set_may_have_interesting_symbols(true);
raw.set_construction_counter(kNoSlackTracking);
}
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) result->DictionaryMapVerify(isolate);
......@@ -1635,9 +1647,11 @@ Handle<Map> Map::Copy(Isolate* isolate, Handle<Map> map, const char* reason) {
}
Handle<Map> Map::Create(Isolate* isolate, int inobject_properties) {
Handle<Map> copy =
Handle<Map> copy_handle =
Copy(isolate, handle(isolate->object_function()->initial_map(), isolate),
"MapCreate");
DisallowGarbageCollection no_gc;
Map copy = *copy_handle;
// Check that we do not overflow the instance size when adding the extra
// inobject properties. If the instance size overflows, we allocate as many
......@@ -1650,12 +1664,13 @@ Handle<Map> Map::Create(Isolate* isolate, int inobject_properties) {
JSObject::kHeaderSize + kTaggedSize * inobject_properties;
// Adjust the map with the extra inobject properties.
copy->set_instance_size(new_instance_size);
copy->SetInObjectPropertiesStartInWords(JSObject::kHeaderSize / kTaggedSize);
DCHECK_EQ(copy->GetInObjectProperties(), inobject_properties);
copy->SetInObjectUnusedPropertyFields(inobject_properties);
copy->set_visitor_id(Map::GetVisitorId(*copy));
return copy;
copy.set_instance_size(new_instance_size);
copy.SetInObjectPropertiesStartInWords(JSObject::kHeaderSize / kTaggedSize);
DCHECK_EQ(copy.GetInObjectProperties(), inobject_properties);
copy.SetInObjectUnusedPropertyFields(inobject_properties);
copy.set_visitor_id(Map::GetVisitorId(copy));
return copy_handle;
}
Handle<Map> Map::CopyForPreventExtensions(
......
......@@ -3816,22 +3816,25 @@ Handle<DescriptorArray> DescriptorArray::CopyUpTo(Isolate* isolate,
}
Handle<DescriptorArray> DescriptorArray::CopyUpToAddAttributes(
Isolate* isolate, Handle<DescriptorArray> desc, int enumeration_index,
PropertyAttributes attributes, int slack) {
Isolate* isolate, Handle<DescriptorArray> source_handle,
int enumeration_index, PropertyAttributes attributes, int slack) {
if (enumeration_index + slack == 0) {
return isolate->factory()->empty_descriptor_array();
}
int size = enumeration_index;
Handle<DescriptorArray> descriptors =
Handle<DescriptorArray> copy_handle =
DescriptorArray::Allocate(isolate, size, slack);
DisallowGarbageCollection no_gc;
auto source = *source_handle;
auto copy = *copy_handle;
if (attributes != NONE) {
for (InternalIndex i : InternalIndex::Range(size)) {
MaybeObject value_or_field_type = desc->GetValue(i);
Name key = desc->GetKey(i);
PropertyDetails details = desc->GetDetails(i);
MaybeObject value_or_field_type = source.GetValue(i);
Name key = source.GetKey(i);
PropertyDetails details = source.GetDetails(i);
// Bulk attribute changes never affect private properties.
if (!key.IsPrivate()) {
int mask = DONT_DELETE | DONT_ENUM;
......@@ -3845,35 +3848,39 @@ Handle<DescriptorArray> DescriptorArray::CopyUpToAddAttributes(
details = details.CopyAddAttributes(
static_cast<PropertyAttributes>(attributes & mask));
}
descriptors->Set(i, key, value_or_field_type, details);
copy.Set(i, key, value_or_field_type, details);
}
} else {
for (InternalIndex i : InternalIndex::Range(size)) {
descriptors->CopyFrom(i, *desc);
copy.CopyFrom(i, source);
}
}
if (desc->number_of_descriptors() != enumeration_index) descriptors->Sort();
if (source.number_of_descriptors() != enumeration_index) copy.Sort();
return descriptors;
return copy_handle;
}
// Create a new descriptor array with only enumerable, configurable, writeable
// data properties, but identical field locations.
Handle<DescriptorArray> DescriptorArray::CopyForFastObjectClone(
Isolate* isolate, Handle<DescriptorArray> src, int enumeration_index,
Isolate* isolate, Handle<DescriptorArray> src_handle, int enumeration_index,
int slack) {
if (enumeration_index + slack == 0) {
return isolate->factory()->empty_descriptor_array();
}
int size = enumeration_index;
Handle<DescriptorArray> descriptors =
Handle<DescriptorArray> descriptors_handle =
DescriptorArray::Allocate(isolate, size, slack);
DisallowGarbageCollection no_gc;
auto src = *src_handle;
auto descriptors = *descriptors_handle;
for (InternalIndex i : InternalIndex::Range(size)) {
Name key = src->GetKey(i);
PropertyDetails details = src->GetDetails(i);
Name key = src.GetKey(i);
PropertyDetails details = src.GetDetails(i);
Representation new_representation = details.representation();
DCHECK(!key.IsPrivateName());
......@@ -3882,7 +3889,7 @@ Handle<DescriptorArray> DescriptorArray::CopyForFastObjectClone(
// If the new representation is an in-place changeable field, make it
// generic as possible (under in-place changes) to avoid type confusion if
// the source representation changes after this feedback has been collected.
MaybeObject type = src->GetValue(i);
MaybeObject type = src.GetValue(i);
if (details.location() == PropertyLocation::kField) {
type = MaybeObject::FromObject(FieldType::Any());
// TODO(bmeurer,ishell): Igor suggested to use some kind of dynamic
......@@ -3899,12 +3906,12 @@ Handle<DescriptorArray> DescriptorArray::CopyForFastObjectClone(
details.constness(), new_representation,
details.field_index());
descriptors->Set(i, key, type, new_details);
descriptors.Set(i, key, type, new_details);
}
descriptors->Sort();
descriptors.Sort();
return descriptors;
return descriptors_handle;
}
bool DescriptorArray::IsEqualUpTo(DescriptorArray desc, int nof_descriptors) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment