Commit 1b22e6fb authored by Jakob Gruber's avatar Jakob Gruber Committed by V8 LUCI CQ

[compiler] Fix multiple races in Map::FindElementsKindTransitionedMap

The concurrent version was added recently in crrev.com/c/3085262.

- UnusedPropertyFields requires the MapUpdater lock.
- instance_descriptors must be read atomically on the bg thread.

Finally, there appears to be a false positive report for the pattern:

 x = is_concurrent ? foo(kAcquireLoad) : foo();

Here, clang emits code that executes both the atomic and nonatomic
reads when is_concurrent is true. Needs more investigation.

Bug: v8:7790, chromium:1239009
Change-Id: I07d442e72cf0278f79f202a267e8d246f8abca1b
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3090341
Commit-Queue: Santiago Aboy Solanes <solanes@chromium.org>
Auto-Submit: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: 's avatarSantiago Aboy Solanes <solanes@chromium.org>
Cr-Commit-Position: refs/heads/master@{#76261}
parent 2e006255
......@@ -889,6 +889,10 @@ ElementAccessFeedback const& JSHeapBroker::ProcessFeedbackMapsForElementAccess(
// Don't generate elements kind transitions from stable maps.
if (!map.is_stable()) {
// The lock is needed for UnusedPropertyFields (called deep inside
// FindElementsKindTransitionedMap).
MapUpdaterGuardIfNeeded mumd_scope(this);
transition_target = map.object()->FindElementsKindTransitionedMap(
isolate(), possible_transition_targets, ConcurrencyMode::kConcurrent);
}
......
......@@ -2867,14 +2867,16 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
}
int old_number_of_fields;
int number_of_fields = new_map->NumberOfFields();
int number_of_fields =
new_map->NumberOfFields(ConcurrencyMode::kNotConcurrent);
int inobject = new_map->GetInObjectProperties();
int unused = new_map->UnusedPropertyFields();
// Nothing to do if no functions were converted to fields and no smis were
// converted to doubles.
if (!old_map->InstancesNeedRewriting(*new_map, number_of_fields, inobject,
unused, &old_number_of_fields)) {
unused, &old_number_of_fields,
ConcurrencyMode::kNotConcurrent)) {
object->set_map(*new_map, kReleaseStore);
return;
}
......@@ -3201,7 +3203,7 @@ void JSObject::AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map) {
}
map = MapUpdater{isolate, map}.ReconfigureElementsKind(to_kind);
}
int number_of_fields = map->NumberOfFields();
int number_of_fields = map->NumberOfFields(ConcurrencyMode::kNotConcurrent);
int inobject = map->GetInObjectProperties();
int unused = map->UnusedPropertyFields();
int total_size = number_of_fields + unused;
......
......@@ -190,7 +190,8 @@ bool Map::TooManyFastProperties(StoreOrigin store_origin) const {
return external > limit || counts.GetTotal() > kMaxNumberOfDescriptors;
} else {
int limit = std::max({kFastPropertiesSoftLimit, GetInObjectProperties()});
int external = NumberOfFields() - GetInObjectProperties();
int external = NumberOfFields(ConcurrencyMode::kNotConcurrent) -
GetInObjectProperties();
return external > limit;
}
}
......
......@@ -355,7 +355,7 @@ base::Optional<Map> MapUpdater::TryUpdateNoLock(Isolate* isolate, Map old_map,
}
return constructor.initial_map();
}
if (!old_map.EquivalentToForTransition(root_map)) return {};
if (!old_map.EquivalentToForTransition(root_map, cmode)) return {};
ElementsKind from_kind = root_map.elements_kind();
ElementsKind to_kind = old_map.elements_kind();
......@@ -542,7 +542,8 @@ MapUpdater::State MapUpdater::FindRootMap() {
return state_;
}
if (!old_map_->EquivalentToForTransition(*root_map_)) {
if (!old_map_->EquivalentToForTransition(*root_map_,
ConcurrencyMode::kNotConcurrent)) {
return Normalize("Normalize_NotEquivalent");
} else if (old_map_->is_extensible() != root_map_->is_extensible()) {
DCHECK(!old_map_->is_extensible());
......
......@@ -447,28 +447,33 @@ MaybeHandle<Map> Map::CopyWithConstant(Isolate* isolate, Handle<Map> map,
PropertyConstness::kConst, representation, flag);
}
bool Map::InstancesNeedRewriting(Map target) const {
int target_number_of_fields = target.NumberOfFields();
bool Map::InstancesNeedRewriting(Map target, ConcurrencyMode cmode) const {
int target_number_of_fields = target.NumberOfFields(cmode);
int target_inobject = target.GetInObjectProperties();
int target_unused = target.UnusedPropertyFields();
int old_number_of_fields;
return InstancesNeedRewriting(target, target_number_of_fields,
target_inobject, target_unused,
&old_number_of_fields);
&old_number_of_fields, cmode);
}
bool Map::InstancesNeedRewriting(Map target, int target_number_of_fields,
int target_inobject, int target_unused,
int* old_number_of_fields) const {
int* old_number_of_fields,
ConcurrencyMode cmode) const {
// If fields were added (or removed), rewrite the instance.
*old_number_of_fields = NumberOfFields();
*old_number_of_fields = NumberOfFields(cmode);
DCHECK(target_number_of_fields >= *old_number_of_fields);
if (target_number_of_fields != *old_number_of_fields) return true;
// If smi descriptors were replaced by double descriptors, rewrite.
DescriptorArray old_desc = instance_descriptors();
DescriptorArray new_desc = target.instance_descriptors();
DescriptorArray old_desc = cmode == ConcurrencyMode::kConcurrent
? instance_descriptors(kAcquireLoad)
: instance_descriptors();
DescriptorArray new_desc = cmode == ConcurrencyMode::kConcurrent
? target.instance_descriptors(kAcquireLoad)
: target.instance_descriptors();
for (InternalIndex i : IterateOwnDescriptors()) {
if (new_desc.GetDetails(i).representation().IsDouble() !=
old_desc.GetDetails(i).representation().IsDouble()) {
......@@ -491,8 +496,10 @@ bool Map::InstancesNeedRewriting(Map target, int target_number_of_fields,
return true;
}
int Map::NumberOfFields() const {
DescriptorArray descriptors = instance_descriptors();
int Map::NumberOfFields(ConcurrencyMode cmode) const {
DescriptorArray descriptors = cmode == ConcurrencyMode::kConcurrent
? instance_descriptors(kAcquireLoad)
: instance_descriptors();
int result = 0;
for (InternalIndex i : IterateOwnDescriptors()) {
if (descriptors.GetDetails(i).location() == kField) result++;
......@@ -521,7 +528,8 @@ Map::FieldCounts Map::GetFieldCounts() const {
}
bool Map::HasOutOfObjectProperties() const {
return GetInObjectProperties() < NumberOfFields();
return GetInObjectProperties() <
NumberOfFields(ConcurrencyMode::kNotConcurrent);
}
void Map::DeprecateTransitionTree(Isolate* isolate) {
......@@ -670,11 +678,13 @@ Map Map::TryReplayPropertyTransitions(Isolate* isolate, Map old_map,
DisallowGarbageCollection no_gc;
const bool is_concurrent = cmode == ConcurrencyMode::kConcurrent;
int root_nof = NumberOfOwnDescriptors();
int old_nof = old_map.NumberOfOwnDescriptors();
const int root_nof = NumberOfOwnDescriptors();
const int old_nof = old_map.NumberOfOwnDescriptors();
// TODO(jgruber,chromium:1239009): The main thread should use non-atomic
// reads, but this currently leads to odd behavior (see the linked bug).
// Investigate and fix this properly. Also below and in called functions.
DescriptorArray old_descriptors =
is_concurrent ? old_map.instance_descriptors(isolate, kAcquireLoad)
: old_map.instance_descriptors(isolate);
old_map.instance_descriptors(isolate, kAcquireLoad);
Map new_map = *this;
for (InternalIndex i : InternalIndex::Range(root_nof, old_nof)) {
......@@ -686,8 +696,7 @@ Map Map::TryReplayPropertyTransitions(Isolate* isolate, Map old_map,
if (transition.is_null()) return Map();
new_map = transition;
DescriptorArray new_descriptors =
is_concurrent ? new_map.instance_descriptors(isolate, kAcquireLoad)
: new_map.instance_descriptors(isolate);
new_map.instance_descriptors(isolate, kAcquireLoad);
PropertyDetails new_details = new_descriptors.GetDetails(i);
DCHECK_EQ(old_details.kind(), new_details.kind());
......@@ -859,7 +868,7 @@ Map Map::FindElementsKindTransitionedMap(Isolate* isolate,
if (IsTransitionableFastElementsKind(kind)) {
// Check the state of the root map.
Map root_map = FindRootMap(isolate);
if (!EquivalentToForElementsKindTransition(root_map)) return Map();
if (!EquivalentToForElementsKindTransition(root_map, cmode)) return Map();
root_map = root_map.LookupElementsTransitionMap(isolate, kind, cmode);
DCHECK(!root_map.is_null());
// Starting from the next existing elements kind transition try to
......@@ -874,7 +883,7 @@ Map Map::FindElementsKindTransitionedMap(Isolate* isolate,
Map current =
root_map.TryReplayPropertyTransitions(isolate, *this, cmode);
if (current.is_null()) continue;
if (InstancesNeedRewriting(current)) continue;
if (InstancesNeedRewriting(current, cmode)) continue;
const bool current_is_packed =
IsFastPackedElementsKind(current.elements_kind());
......@@ -1287,7 +1296,7 @@ Handle<Map> Map::CopyInitialMap(Isolate* isolate, Handle<Map> map,
result->set_owns_descriptors(false);
result->UpdateDescriptors(isolate, descriptors, number_of_own_descriptors);
DCHECK_EQ(result->NumberOfFields(),
DCHECK_EQ(result->NumberOfFields(ConcurrencyMode::kNotConcurrent),
result->GetInObjectProperties() - result->UnusedPropertyFields());
}
......@@ -2043,7 +2052,8 @@ bool CheckEquivalent(const Map first, const Map second) {
} // namespace
bool Map::EquivalentToForTransition(const Map other) const {
bool Map::EquivalentToForTransition(const Map other,
ConcurrencyMode cmode) const {
CHECK_EQ(GetConstructor(), other.GetConstructor());
CHECK_EQ(instance_type(), other.instance_type());
......@@ -2055,19 +2065,28 @@ bool Map::EquivalentToForTransition(const Map other) const {
// not equivalent to strict function.
int nof =
std::min(NumberOfOwnDescriptors(), other.NumberOfOwnDescriptors());
return instance_descriptors().IsEqualUpTo(other.instance_descriptors(),
nof);
DescriptorArray this_descriptors = cmode == ConcurrencyMode::kConcurrent
? instance_descriptors(kAcquireLoad)
: instance_descriptors();
DescriptorArray that_descriptors =
cmode == ConcurrencyMode::kConcurrent
? other.instance_descriptors(kAcquireLoad)
: other.instance_descriptors();
return this_descriptors.IsEqualUpTo(that_descriptors, nof);
}
return true;
}
bool Map::EquivalentToForElementsKindTransition(const Map other) const {
if (!EquivalentToForTransition(other)) return false;
bool Map::EquivalentToForElementsKindTransition(const Map other,
ConcurrencyMode cmode) const {
if (!EquivalentToForTransition(other, cmode)) return false;
#ifdef DEBUG
// Ensure that we don't try to generate elements kind transitions from maps
// with fields that may be generalized in-place. This must already be handled
// during addition of a new field.
DescriptorArray descriptors = instance_descriptors();
DescriptorArray descriptors = cmode == ConcurrencyMode::kConcurrent
? instance_descriptors(kAcquireLoad)
: instance_descriptors();
for (InternalIndex i : IterateOwnDescriptors()) {
PropertyDetails details = descriptors.GetDetails(i);
if (details.location() == kField) {
......
......@@ -497,15 +497,16 @@ class Map : public TorqueGeneratedMap<Map, HeapObject> {
};
FieldCounts GetFieldCounts() const;
int NumberOfFields() const;
int NumberOfFields(ConcurrencyMode cmode) const;
bool HasOutOfObjectProperties() const;
// TODO(ishell): candidate with JSObject::MigrateToMap().
bool InstancesNeedRewriting(Map target) const;
bool InstancesNeedRewriting(Map target, ConcurrencyMode cmode) const;
bool InstancesNeedRewriting(Map target, int target_number_of_fields,
int target_inobject, int target_unused,
int* old_number_of_fields) const;
int* old_number_of_fields,
ConcurrencyMode cmode) const;
// Returns true if the |field_type| is the most general one for
// given |representation|.
static inline bool IsMostGeneralFieldType(Representation representation,
......@@ -880,8 +881,9 @@ class Map : public TorqueGeneratedMap<Map, HeapObject> {
Handle<Map> child, Handle<Name> name,
SimpleTransitionFlag flag);
bool EquivalentToForTransition(const Map other) const;
bool EquivalentToForElementsKindTransition(const Map other) const;
bool EquivalentToForTransition(const Map other, ConcurrencyMode cmode) const;
bool EquivalentToForElementsKindTransition(const Map other,
ConcurrencyMode cmode) const;
static Handle<Map> RawCopy(Isolate* isolate, Handle<Map> map,
int instance_size, int inobject_properties);
static Handle<Map> ShareDescriptor(Isolate* isolate, Handle<Map> map,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment