Commit 9bfd401e authored by Jakob Gruber's avatar Jakob Gruber Committed by V8 LUCI CQ

[compiler] RawFastPropertyAt without serialization

This is a step towards making JSObjectRef non-serialized.

Change JSObjectRef::RawFastPropertyAt to use a direct load with
relaxed semantics. Special handling of `uninitialized` sentinel values
is moved to the only use-site.

A new lock `boilerplate_migration_access` protects against concurrent
boilerplate migrations while we are iterating over properties.

Bug: v8:7790
Change-Id: Ic9de54ca16c1f3364d497a77058cfa33d48dd4a4
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2928184
Commit-Queue: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: 's avatarGeorg Neis <neis@chromium.org>
Cr-Commit-Position: refs/heads/master@{#75033}
parent 75d9e3b8
...@@ -719,8 +719,7 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo( ...@@ -719,8 +719,7 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
Handle<Map> map, Handle<Name> name, AccessMode access_mode) const { Handle<Map> map, Handle<Name> name, AccessMode access_mode) const {
CHECK(name->IsUniqueName()); CHECK(name->IsUniqueName());
JSHeapBroker::MapUpdaterGuardIfNeeded mumd_scope( JSHeapBroker::MapUpdaterGuardIfNeeded mumd_scope(broker());
broker(), isolate()->map_updater_access());
if (access_mode == AccessMode::kHas && !map->IsJSReceiverMap()) { if (access_mode == AccessMode::kHas && !map->IsJSReceiverMap()) {
return Invalid(); return Invalid();
......
...@@ -802,12 +802,15 @@ class HeapNumberData : public HeapObjectData { ...@@ -802,12 +802,15 @@ class HeapNumberData : public HeapObjectData {
Handle<HeapNumber> object, Handle<HeapNumber> object,
ObjectDataKind kind = ObjectDataKind::kSerializedHeapObject) ObjectDataKind kind = ObjectDataKind::kSerializedHeapObject)
: HeapObjectData(broker, storage, object, kind), : HeapObjectData(broker, storage, object, kind),
value_(object->value()) {} value_(object->value()),
value_as_bits_(object->value_as_bits()) {}
double value() const { return value_; } double value() const { return value_; }
uint64_t value_as_bits() const { return value_as_bits_; }
private: private:
double const value_; double const value_;
uint64_t const value_as_bits_;
}; };
class ContextData : public HeapObjectData { class ContextData : public HeapObjectData {
...@@ -1412,8 +1415,7 @@ MapData::MapData(JSHeapBroker* broker, ObjectData** storage, Handle<Map> object, ...@@ -1412,8 +1415,7 @@ MapData::MapData(JSHeapBroker* broker, ObjectData** storage, Handle<Map> object,
// while the lock is held the Map object may not be modified (except in // while the lock is held the Map object may not be modified (except in
// benign ways). // benign ways).
// TODO(jgruber): Consider removing this lock by being smrt. // TODO(jgruber): Consider removing this lock by being smrt.
JSHeapBroker::MapUpdaterGuardIfNeeded mumd_scope( JSHeapBroker::MapUpdaterGuardIfNeeded mumd_scope(broker);
broker, broker->isolate()->map_updater_access());
// When background serializing the map, we can perform a lite serialization // When background serializing the map, we can perform a lite serialization
// since the MapRef will read some of the Map's fields can be read directly. // since the MapRef will read some of the Map's fields can be read directly.
...@@ -2466,17 +2468,6 @@ void JSObjectData::SerializeRecursiveAsBoilerplate(JSHeapBroker* broker, ...@@ -2466,17 +2468,6 @@ void JSObjectData::SerializeRecursiveAsBoilerplate(JSHeapBroker* broker,
DCHECK_EQ(field_index.property_index(), DCHECK_EQ(field_index.property_index(),
static_cast<int>(inobject_fields_.size())); static_cast<int>(inobject_fields_.size()));
Handle<Object> value(boilerplate->RawFastPropertyAt(field_index), isolate); Handle<Object> value(boilerplate->RawFastPropertyAt(field_index), isolate);
// In case of double fields we use a sentinel NaN value to mark
// uninitialized fields. A boilerplate value with such a field may migrate
// from its double to a tagged representation. The sentinel value carries
// no special meaning when it occurs in a heap number, so we would like to
// recover the uninitialized value. We check for the sentinel here,
// specifically, since migrations might have been triggered as part of
// boilerplate serialization.
if (!details.representation().IsDouble() && value->IsHeapNumber() &&
HeapNumber::cast(*value).value_as_bits() == kHoleNanInt64) {
value = isolate->factory()->uninitialized_value();
}
ObjectData* value_data = broker->GetOrCreateData(value); ObjectData* value_data = broker->GetOrCreateData(value);
if (value_data->IsJSObject() && !value_data->should_access_heap()) { if (value_data->IsJSObject() && !value_data->should_access_heap()) {
value_data->AsJSObject()->SerializeRecursiveAsBoilerplate(broker, value_data->AsJSObject()->SerializeRecursiveAsBoilerplate(broker,
...@@ -3026,10 +3017,12 @@ FeedbackCellRef FeedbackVectorRef::GetClosureFeedbackCell(int index) const { ...@@ -3026,10 +3017,12 @@ FeedbackCellRef FeedbackVectorRef::GetClosureFeedbackCell(int index) const {
data()->AsFeedbackVector()->GetClosureFeedbackCell(broker(), index)); data()->AsFeedbackVector()->GetClosureFeedbackCell(broker(), index));
} }
ObjectRef JSObjectRef::RawFastPropertyAt(FieldIndex index) const { base::Optional<ObjectRef> JSObjectRef::RawFastPropertyAt(
FieldIndex index) const {
CHECK(index.is_inobject()); CHECK(index.is_inobject());
if (data_->should_access_heap()) { if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
return MakeRef(broker(), object()->RawFastPropertyAt(index)); return TryMakeRef(broker(),
object()->RawFastPropertyAt(index, kRelaxedLoad));
} }
JSObjectData* object_data = data()->AsJSObject(); JSObjectData* object_data = data()->AsJSObject();
return ObjectRef(broker(), return ObjectRef(broker(),
...@@ -3306,6 +3299,7 @@ BIMODAL_ACCESSOR_C(FeedbackVector, double, invocation_count) ...@@ -3306,6 +3299,7 @@ BIMODAL_ACCESSOR_C(FeedbackVector, double, invocation_count)
BIMODAL_ACCESSOR(HeapObject, Map, map) BIMODAL_ACCESSOR(HeapObject, Map, map)
BIMODAL_ACCESSOR_C(HeapNumber, double, value) BIMODAL_ACCESSOR_C(HeapNumber, double, value)
BIMODAL_ACCESSOR_C(HeapNumber, uint64_t, value_as_bits)
// These JSBoundFunction fields are immutable after initialization. Moreover, // These JSBoundFunction fields are immutable after initialization. Moreover,
// as long as JSObjects are still serialized on the main thread, all // as long as JSObjects are still serialized on the main thread, all
...@@ -4053,6 +4047,10 @@ base::Optional<ObjectRef> SourceTextModuleRef::import_meta() const { ...@@ -4053,6 +4047,10 @@ base::Optional<ObjectRef> SourceTextModuleRef::import_meta() const {
data()->AsSourceTextModule()->GetImportMeta(broker())); data()->AsSourceTextModule()->GetImportMeta(broker()));
} }
base::Optional<MapRef> HeapObjectRef::map_direct_read() const {
return TryMakeRef(broker(), object()->map(kAcquireLoad), kAssumeMemoryFence);
}
namespace { namespace {
OddballType GetOddballType(Isolate* isolate, Map map) { OddballType GetOddballType(Isolate* isolate, Map map) {
......
...@@ -279,6 +279,10 @@ class HeapObjectRef : public ObjectRef { ...@@ -279,6 +279,10 @@ class HeapObjectRef : public ObjectRef {
MapRef map() const; MapRef map() const;
// Only for use in special situations where we need to read the object's
// current map (instead of returning the cached map). Use with care.
base::Optional<MapRef> map_direct_read() const;
// See the comment on the HeapObjectType class. // See the comment on the HeapObjectType class.
HeapObjectType GetHeapObjectType() const; HeapObjectType GetHeapObjectType() const;
}; };
...@@ -315,7 +319,11 @@ class JSObjectRef : public JSReceiverRef { ...@@ -315,7 +319,11 @@ class JSObjectRef : public JSReceiverRef {
Handle<JSObject> object() const; Handle<JSObject> object() const;
ObjectRef RawFastPropertyAt(FieldIndex index) const; // Usable only for in-object properties. Only use this if the underlying
// value can be an uninitialized-sentinel, or if HeapNumber construction must
// be avoided for some reason. Otherwise, use the higher-level
// GetOwnFastDataProperty.
base::Optional<ObjectRef> RawFastPropertyAt(FieldIndex index) const;
// Return the element at key {index} if {index} is known to be an own data // Return the element at key {index} if {index} is known to be an own data
// property of the object that is non-writable and non-configurable. // property of the object that is non-writable and non-configurable.
...@@ -439,6 +447,7 @@ class HeapNumberRef : public HeapObjectRef { ...@@ -439,6 +447,7 @@ class HeapNumberRef : public HeapObjectRef {
Handle<HeapNumber> object() const; Handle<HeapNumber> object() const;
double value() const; double value() const;
uint64_t value_as_bits() const;
}; };
class ContextRef : public HeapObjectRef { class ContextRef : public HeapObjectRef {
......
...@@ -1656,8 +1656,21 @@ Node* JSCreateLowering::AllocateElements(Node* effect, Node* control, ...@@ -1656,8 +1656,21 @@ Node* JSCreateLowering::AllocateElements(Node* effect, Node* control,
base::Optional<Node*> JSCreateLowering::TryAllocateFastLiteral( base::Optional<Node*> JSCreateLowering::TryAllocateFastLiteral(
Node* effect, Node* control, JSObjectRef boilerplate, Node* effect, Node* control, JSObjectRef boilerplate,
AllocationType allocation) { AllocationType allocation) {
// Compute the in-object properties to store first (might have effects). // Prevent concurrent migrations of boilerplate objects.
JSHeapBroker::BoilerplateMigrationGuardIfNeeded boilerplate_access_guard(
broker());
// Now that we hold the migration lock, get the current map.
MapRef boilerplate_map = boilerplate.map(); MapRef boilerplate_map = boilerplate.map();
base::Optional<MapRef> current_boilerplate_map =
boilerplate.map_direct_read();
if (!current_boilerplate_map.has_value() ||
!current_boilerplate_map.value().equals(boilerplate_map)) {
return {};
}
// Compute the in-object properties to store first (might have effects).
ZoneVector<std::pair<FieldAccess, Node*>> inobject_fields(zone()); ZoneVector<std::pair<FieldAccess, Node*>> inobject_fields(zone());
inobject_fields.reserve(boilerplate_map.GetInObjectProperties()); inobject_fields.reserve(boilerplate_map.GetInObjectProperties());
int const boilerplate_nof = boilerplate_map.NumberOfOwnDescriptors(); int const boilerplate_nof = boilerplate_map.NumberOfOwnDescriptors();
...@@ -1678,14 +1691,48 @@ base::Optional<Node*> JSCreateLowering::TryAllocateFastLiteral( ...@@ -1678,14 +1691,48 @@ base::Optional<Node*> JSCreateLowering::TryAllocateFastLiteral(
kFullWriteBarrier, kFullWriteBarrier,
LoadSensitivity::kUnsafe, LoadSensitivity::kUnsafe,
const_field_info}; const_field_info};
ObjectRef boilerplate_value = boilerplate.RawFastPropertyAt(index);
bool is_uninitialized = // Note: the use of RawFastPropertyAt (vs. the higher-level
boilerplate_value.IsHeapObject() && // GetOwnFastDataProperty) here is necessary, since the underlying value
// may be `uninitialized`, which the latter explicitly does not support.
base::Optional<ObjectRef> maybe_boilerplate_value =
boilerplate.RawFastPropertyAt(index);
if (!maybe_boilerplate_value.has_value()) return {};
// Note: We don't need to take a compilation dependency verifying the value
// of `boilerplate_value`, since boilerplate properties are constant after
// initialization modulo map migration. We protect against concurrent map
// migrations via the boilerplate_migration_access lock.
ObjectRef boilerplate_value = maybe_boilerplate_value.value();
// Uninitialized fields are marked through the `uninitialized_value`, or in
// the case of double representation through a HeapNumber containing a
// special sentinel value. The boilerplate value is not updated to keep up
// with the map, thus conversions may be necessary. Specifically:
//
// - Smi representation: uninitialized is converted to Smi 0.
// - Not double representation: the special heap number sentinel is
// converted to `uninitialized_value`.
//
// Note that although we create nodes to write `uninitialized_value` into
// the object, the field should be overwritten immediately with a real
// value, and `uninitialized_value` should never be exposed to JS.
bool is_uninitialized = false;
if (boilerplate_value.IsHeapObject() &&
boilerplate_value.AsHeapObject().map().oddball_type() == boilerplate_value.AsHeapObject().map().oddball_type() ==
OddballType::kUninitialized; OddballType::kUninitialized) {
if (is_uninitialized) { is_uninitialized = true;
access.const_field_info = ConstFieldInfo::None();
} else if (!property_details.representation().IsDouble() &&
boilerplate_value.IsHeapNumber() &&
boilerplate_value.AsHeapNumber().value_as_bits() ==
kHoleNanInt64) {
is_uninitialized = true;
boilerplate_value =
MakeRef<HeapObject>(broker(), factory()->uninitialized_value());
access.const_field_info = ConstFieldInfo::None(); access.const_field_info = ConstFieldInfo::None();
} }
Node* value; Node* value;
if (boilerplate_value.IsJSObject()) { if (boilerplate_value.IsJSObject()) {
JSObjectRef boilerplate_object = boilerplate_value.AsJSObject(); JSObjectRef boilerplate_object = boilerplate_value.AsJSObject();
......
...@@ -354,30 +354,44 @@ class V8_EXPORT_PRIVATE JSHeapBroker { ...@@ -354,30 +354,44 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
// Locks {mutex} through the duration of this scope iff it is the first // Locks {mutex} through the duration of this scope iff it is the first
// occurrence. This is done to have a recursive shared lock on {mutex}. // occurrence. This is done to have a recursive shared lock on {mutex}.
class V8_NODISCARD MapUpdaterGuardIfNeeded final { class V8_NODISCARD RecursiveSharedMutexGuardIfNeeded {
public: protected:
explicit MapUpdaterGuardIfNeeded(JSHeapBroker* ptr, RecursiveSharedMutexGuardIfNeeded(base::SharedMutex* mutex,
base::SharedMutex* mutex) int* mutex_depth_address)
: ptr_(ptr), : mutex_depth_address_(mutex_depth_address),
initial_map_updater_mutex_depth_(ptr->map_updater_mutex_depth_), initial_mutex_depth_(*mutex_depth_address_),
shared_mutex(mutex, should_lock()) { shared_mutex_guard_(mutex, initial_mutex_depth_ == 0) {
ptr_->map_updater_mutex_depth_++; (*mutex_depth_address_)++;
} }
~MapUpdaterGuardIfNeeded() { ~RecursiveSharedMutexGuardIfNeeded() {
ptr_->map_updater_mutex_depth_--; DCHECK_GE((*mutex_depth_address_), 1);
DCHECK_EQ(initial_map_updater_mutex_depth_, (*mutex_depth_address_)--;
ptr_->map_updater_mutex_depth_); DCHECK_EQ(initial_mutex_depth_, (*mutex_depth_address_));
} }
// Whether the MapUpdater mutex should be physically locked (if not, we
// already hold the lock).
bool should_lock() const { return initial_map_updater_mutex_depth_ == 0; }
private: private:
JSHeapBroker* const ptr_; int* const mutex_depth_address_;
const int initial_map_updater_mutex_depth_; const int initial_mutex_depth_;
base::SharedMutexGuardIf<base::kShared> shared_mutex; base::SharedMutexGuardIf<base::kShared> shared_mutex_guard_;
};
class MapUpdaterGuardIfNeeded final
: public RecursiveSharedMutexGuardIfNeeded {
public:
explicit MapUpdaterGuardIfNeeded(JSHeapBroker* broker)
: RecursiveSharedMutexGuardIfNeeded(
broker->isolate()->map_updater_access(),
&broker->map_updater_mutex_depth_) {}
};
class BoilerplateMigrationGuardIfNeeded final
: public RecursiveSharedMutexGuardIfNeeded {
public:
explicit BoilerplateMigrationGuardIfNeeded(JSHeapBroker* broker)
: RecursiveSharedMutexGuardIfNeeded(
broker->isolate()->boilerplate_migration_access(),
&broker->boilerplate_migration_mutex_depth_) {}
}; };
private: private:
...@@ -502,6 +516,8 @@ class V8_EXPORT_PRIVATE JSHeapBroker { ...@@ -502,6 +516,8 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
// holds the locking depth, i.e. how many times the mutex has been // holds the locking depth, i.e. how many times the mutex has been
// recursively locked. Only the outermost locker actually locks underneath. // recursively locked. Only the outermost locker actually locks underneath.
int map_updater_mutex_depth_ = 0; int map_updater_mutex_depth_ = 0;
// Likewise for boilerplate migrations.
int boilerplate_migration_mutex_depth_ = 0;
static constexpr size_t kMaxSerializedFunctionsCacheSize = 200; static constexpr size_t kMaxSerializedFunctionsCacheSize = 200;
static constexpr uint32_t kMinimalRefsBucketCount = 8; static constexpr uint32_t kMinimalRefsBucketCount = 8;
......
...@@ -672,8 +672,21 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory { ...@@ -672,8 +672,21 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
return &shared_function_info_access_; return &shared_function_info_access_;
} }
// Protects (most) map update operations, see also MapUpdater.
base::SharedMutex* map_updater_access() { return &map_updater_access_; } base::SharedMutex* map_updater_access() { return &map_updater_access_; }
// Protects JSObject boilerplate migrations (i.e. calls to MigrateInstance on
// boilerplate objects; elements kind transitions are *not* protected).
// Note this lock interacts with `map_updater_access` as follows
//
// - boilerplate migrations may trigger map updates.
// - if so, `boilerplate_migration_access` is locked before
// `map_updater_access`.
// - backgrounds threads must use the same lock order to avoid deadlocks.
base::SharedMutex* boilerplate_migration_access() {
return &boilerplate_migration_access_;
}
// The isolate's string table. // The isolate's string table.
StringTable* string_table() const { return string_table_.get(); } StringTable* string_table() const { return string_table_.get(); }
...@@ -1936,6 +1949,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory { ...@@ -1936,6 +1949,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
base::SharedMutex full_transition_array_access_; base::SharedMutex full_transition_array_access_;
base::SharedMutex shared_function_info_access_; base::SharedMutex shared_function_info_access_;
base::SharedMutex map_updater_access_; base::SharedMutex map_updater_access_;
base::SharedMutex boilerplate_migration_access_;
Logger* logger_ = nullptr; Logger* logger_ = nullptr;
StubCache* load_stub_cache_ = nullptr; StubCache* load_stub_cache_ = nullptr;
StubCache* store_stub_cache_ = nullptr; StubCache* store_stub_cache_ = nullptr;
......
...@@ -342,6 +342,12 @@ Object JSObject::RawFastPropertyAt(PtrComprCageBase cage_base, ...@@ -342,6 +342,12 @@ Object JSObject::RawFastPropertyAt(PtrComprCageBase cage_base,
} }
} }
Object JSObject::RawFastPropertyAt(FieldIndex index, RelaxedLoadTag tag) const {
PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
CHECK(index.is_inobject());
return TaggedField<Object>::Relaxed_Load(cage_base, *this, index.offset());
}
void JSObject::RawFastInobjectPropertyAtPut(FieldIndex index, Object value, void JSObject::RawFastInobjectPropertyAtPut(FieldIndex index, Object value,
WriteBarrierMode mode) { WriteBarrierMode mode) {
DCHECK(index.is_inobject()); DCHECK(index.is_inobject());
......
...@@ -651,6 +651,9 @@ class JSObject : public TorqueGeneratedJSObject<JSObject, JSReceiver> { ...@@ -651,6 +651,9 @@ class JSObject : public TorqueGeneratedJSObject<JSObject, JSReceiver> {
inline Object RawFastPropertyAt(FieldIndex index) const; inline Object RawFastPropertyAt(FieldIndex index) const;
inline Object RawFastPropertyAt(PtrComprCageBase cage_base, inline Object RawFastPropertyAt(PtrComprCageBase cage_base,
FieldIndex index) const; FieldIndex index) const;
// A specialized version of the above for use by TurboFan. Only supports
// in-object properties.
inline Object RawFastPropertyAt(FieldIndex index, RelaxedLoadTag) const;
inline void FastPropertyAtPut(FieldIndex index, Object value, inline void FastPropertyAtPut(FieldIndex index, Object value,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER); WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
......
...@@ -84,6 +84,8 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk( ...@@ -84,6 +84,8 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
} }
if (object->map(isolate).is_deprecated()) { if (object->map(isolate).is_deprecated()) {
base::SharedMutexGuard<base::kExclusive> mutex_guard(
isolate->boilerplate_migration_access());
JSObject::MigrateInstance(isolate, object); JSObject::MigrateInstance(isolate, object);
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment