Commit 339bf314 authored by Santiago Aboy Solanes's avatar Santiago Aboy Solanes Committed by Commit Bot

[compiler] Access Map's bitfield methods concurrently

Make the three map's bitfields use release/acquire semantics. This
ensures, for example, that when we set the number of own descriptors on
the map we will do so with a release store.

Also, start reading and writing said bitfields concurrently when
concurrent inlining is enabled. Note the BIMODAL_ACCESSOR_WITH_FLAG_B
macro which enables this.

Bug: v8:7790
Change-Id: I16561d8e065c50ce272b085d9606b98e37922633
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2536640Reviewed-by: 's avatarGeorg Neis <neis@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Santiago Aboy Solanes <solanes@chromium.org>
Cr-Commit-Position: refs/heads/master@{#71237}
parent 800307f6
......@@ -124,6 +124,10 @@ inline void Relaxed_Store(volatile Atomic32* ptr, Atomic32 value) {
__atomic_store_n(ptr, value, __ATOMIC_RELAXED);
}
inline void Release_Store(volatile Atomic8* ptr, Atomic8 value) {
__atomic_store_n(ptr, value, __ATOMIC_RELEASE);
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
__atomic_store_n(ptr, value, __ATOMIC_RELEASE);
}
......@@ -140,6 +144,10 @@ inline Atomic32 Relaxed_Load(volatile const Atomic32* ptr) {
return __atomic_load_n(ptr, __ATOMIC_RELAXED);
}
inline Atomic8 Acquire_Load(volatile const Atomic8* ptr) {
return __atomic_load_n(ptr, __ATOMIC_ACQUIRE);
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
return __atomic_load_n(ptr, __ATOMIC_ACQUIRE);
}
......
......@@ -114,6 +114,11 @@ inline void Relaxed_Store(volatile Atomic32* ptr, Atomic32 value) {
std::memory_order_relaxed);
}
inline void Release_Store(volatile Atomic8* ptr, Atomic8 value) {
std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
std::memory_order_release);
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
std::memory_order_release);
......@@ -134,6 +139,11 @@ inline Atomic32 Relaxed_Load(volatile const Atomic32* ptr) {
std::memory_order_relaxed);
}
inline Atomic8 Acquire_Load(volatile const Atomic8* ptr) {
return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
std::memory_order_acquire);
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
std::memory_order_acquire);
......
......@@ -593,7 +593,6 @@ class V8_EXPORT_PRIVATE MapRef : public HeapObjectRef {
int UnusedPropertyFields() const;
ElementsKind elements_kind() const;
bool is_stable() const;
bool is_extensible() const;
bool is_constructor() const;
bool has_prototype_slot() const;
bool is_access_check_needed() const;
......
......@@ -3381,6 +3381,11 @@ int BytecodeArrayRef::handler_table_size() const {
IF_ACCESS_FROM_HEAP_WITH_FLAG_C(name); \
return ObjectRef::data()->As##holder()->name(); \
}
#define BIMODAL_ACCESSOR_WITH_FLAG_B(holder, field, name, BitField) \
typename BitField::FieldType holder##Ref::name() const { \
IF_ACCESS_FROM_HEAP_WITH_FLAG_C(name); \
return BitField::decode(ObjectRef::data()->As##holder()->field()); \
}
BIMODAL_ACCESSOR(AllocationSite, Object, nested_site)
BIMODAL_ACCESSOR_C(AllocationSite, bool, CanInlineCall)
......@@ -3429,25 +3434,28 @@ BIMODAL_ACCESSOR_C(JSTypedArray, bool, is_on_heap)
BIMODAL_ACCESSOR_C(JSTypedArray, size_t, length)
BIMODAL_ACCESSOR(JSTypedArray, HeapObject, buffer)
BIMODAL_ACCESSOR_B(Map, bit_field2, elements_kind, Map::Bits2::ElementsKindBits)
BIMODAL_ACCESSOR_B(Map, bit_field3, is_dictionary_map,
Map::Bits3::IsDictionaryMapBit)
BIMODAL_ACCESSOR_B(Map, bit_field3, is_deprecated, Map::Bits3::IsDeprecatedBit)
BIMODAL_ACCESSOR_B(Map, bit_field3, NumberOfOwnDescriptors,
Map::Bits3::NumberOfOwnDescriptorsBits)
BIMODAL_ACCESSOR_B(Map, bit_field3, is_migration_target,
Map::Bits3::IsMigrationTargetBit)
BIMODAL_ACCESSOR_B(Map, bit_field3, is_extensible, Map::Bits3::IsExtensibleBit)
BIMODAL_ACCESSOR_B(Map, bit_field, has_prototype_slot,
Map::Bits1::HasPrototypeSlotBit)
BIMODAL_ACCESSOR_B(Map, bit_field, is_access_check_needed,
Map::Bits1::IsAccessCheckNeededBit)
BIMODAL_ACCESSOR_B(Map, bit_field, is_callable, Map::Bits1::IsCallableBit)
BIMODAL_ACCESSOR_B(Map, bit_field, has_indexed_interceptor,
Map::Bits1::HasIndexedInterceptorBit)
BIMODAL_ACCESSOR_B(Map, bit_field, is_constructor, Map::Bits1::IsConstructorBit)
BIMODAL_ACCESSOR_B(Map, bit_field, is_undetectable,
Map::Bits1::IsUndetectableBit)
BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field2, elements_kind,
Map::Bits2::ElementsKindBits)
BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field3, is_dictionary_map,
Map::Bits3::IsDictionaryMapBit)
BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field3, is_deprecated,
Map::Bits3::IsDeprecatedBit)
BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field3, NumberOfOwnDescriptors,
Map::Bits3::NumberOfOwnDescriptorsBits)
BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field3, is_migration_target,
Map::Bits3::IsMigrationTargetBit)
BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field, has_prototype_slot,
Map::Bits1::HasPrototypeSlotBit)
BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field, is_access_check_needed,
Map::Bits1::IsAccessCheckNeededBit)
BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field, is_callable,
Map::Bits1::IsCallableBit)
BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field, has_indexed_interceptor,
Map::Bits1::HasIndexedInterceptorBit)
BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field, is_constructor,
Map::Bits1::IsConstructorBit)
BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field, is_undetectable,
Map::Bits1::IsUndetectableBit)
BIMODAL_ACCESSOR_C(Map, int, instance_size)
BIMODAL_ACCESSOR_C(Map, int, NextFreePropertyIndex)
BIMODAL_ACCESSOR_C(Map, int, UnusedPropertyFields)
......@@ -3644,7 +3652,7 @@ void* JSTypedArrayRef::data_ptr() const {
}
bool MapRef::IsInobjectSlackTrackingInProgress() const {
IF_ACCESS_FROM_HEAP_C(IsInobjectSlackTrackingInProgress);
IF_ACCESS_FROM_HEAP_WITH_FLAG_C(IsInobjectSlackTrackingInProgress);
return Map::Bits3::ConstructionCounterBits::decode(
data()->AsMap()->bit_field3()) != Map::kNoSlackTracking;
}
......
......@@ -1473,7 +1473,7 @@ Map Factory::InitializeMap(Map map, InstanceType type, int instance_size,
// Must be called only after |instance_type|, |instance_size| and
// |layout_descriptor| are set.
map.set_visitor_id(Map::GetVisitorId(map));
map.set_relaxed_bit_field(0);
map.set_bit_field(0);
map.set_bit_field2(Map::Bits2::NewTargetIsBaseBit::encode(true));
int bit_field3 =
Map::Bits3::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
......
......@@ -60,7 +60,7 @@ ACCESSORS_CHECKED(Map, prototype_info, Object,
// is explicitly allowlisted here. The former is never modified after the map
// is setup but it's being read by concurrent marker when pointer compression
// is enabled. The latter bit can be modified on a live objects.
BIT_FIELD_ACCESSORS(Map, relaxed_bit_field, has_non_instance_prototype,
BIT_FIELD_ACCESSORS(Map, bit_field, has_non_instance_prototype,
Map::Bits1::HasNonInstancePrototypeBit)
BIT_FIELD_ACCESSORS(Map, bit_field, is_callable, Map::Bits1::IsCallableBit)
BIT_FIELD_ACCESSORS(Map, bit_field, has_named_interceptor,
......@@ -73,7 +73,7 @@ BIT_FIELD_ACCESSORS(Map, bit_field, is_access_check_needed,
Map::Bits1::IsAccessCheckNeededBit)
BIT_FIELD_ACCESSORS(Map, bit_field, is_constructor,
Map::Bits1::IsConstructorBit)
BIT_FIELD_ACCESSORS(Map, relaxed_bit_field, has_prototype_slot,
BIT_FIELD_ACCESSORS(Map, bit_field, has_prototype_slot,
Map::Bits1::HasPrototypeSlotBit)
// |bit_field2| fields.
......@@ -448,24 +448,20 @@ void Map::AccountAddedOutOfObjectPropertyField(int unused_in_property_array) {
DCHECK_EQ(unused_in_property_array, UnusedPropertyFields());
}
byte Map::bit_field() const { return ReadField<byte>(kBitFieldOffset); }
void Map::set_bit_field(byte value) {
WriteField<byte>(kBitFieldOffset, value);
byte Map::bit_field() const {
return ACQUIRE_READ_BYTE_FIELD(*this, kBitFieldOffset);
}
byte Map::relaxed_bit_field() const {
return RELAXED_READ_BYTE_FIELD(*this, kBitFieldOffset);
void Map::set_bit_field(byte value) {
RELEASE_WRITE_BYTE_FIELD(*this, kBitFieldOffset, value);
}
void Map::set_relaxed_bit_field(byte value) {
RELAXED_WRITE_BYTE_FIELD(*this, kBitFieldOffset, value);
byte Map::bit_field2() const {
return ACQUIRE_READ_BYTE_FIELD(*this, kBitField2Offset);
}
byte Map::bit_field2() const { return ReadField<byte>(kBitField2Offset); }
void Map::set_bit_field2(byte value) {
WriteField<byte>(kBitField2Offset, value);
RELEASE_WRITE_BYTE_FIELD(*this, kBitField2Offset, value);
}
bool Map::is_abandoned_prototype_map() const {
......@@ -666,11 +662,11 @@ void Map::InitializeDescriptors(Isolate* isolate, DescriptorArray descriptors,
}
void Map::set_bit_field3(uint32_t bits) {
RELAXED_WRITE_UINT32_FIELD(*this, kBitField3Offset, bits);
RELEASE_WRITE_UINT32_FIELD(*this, kBitField3Offset, bits);
}
uint32_t Map::bit_field3() const {
return RELAXED_READ_UINT32_FIELD(*this, kBitField3Offset);
return ACQUIRE_READ_UINT32_FIELD(*this, kBitField3Offset);
}
void Map::clear_padding() {
......
......@@ -1449,7 +1449,7 @@ Handle<Map> Map::RawCopy(Isolate* isolate, Handle<Map> map, int instance_size,
Handle<HeapObject> prototype(map->prototype(), isolate);
Map::SetPrototype(isolate, result, prototype);
result->set_constructor_or_backpointer(map->GetConstructor());
result->set_relaxed_bit_field(map->bit_field());
result->set_bit_field(map->bit_field());
result->set_bit_field2(map->bit_field2());
int new_bit_field3 = map->bit_field3();
new_bit_field3 = Bits3::OwnsDescriptorsBit::update(new_bit_field3, true);
......
......@@ -249,8 +249,6 @@ class Map : public HeapObject {
// Bit field.
//
DECL_PRIMITIVE_ACCESSORS(bit_field, byte)
// Atomic accessors, used for allowlisting legitimate concurrent accesses.
DECL_PRIMITIVE_ACCESSORS(relaxed_bit_field, byte)
// Bit positions for |bit_field|.
struct Bits1 {
......
......@@ -487,11 +487,20 @@
static_cast<uint32_t>(base::Relaxed_Load( \
reinterpret_cast<const base::Atomic32*>(FIELD_ADDR(p, offset))))
#define ACQUIRE_READ_UINT32_FIELD(p, offset) \
static_cast<uint32_t>(base::Acquire_Load( \
reinterpret_cast<const base::Atomic32*>(FIELD_ADDR(p, offset))))
#define RELAXED_WRITE_UINT32_FIELD(p, offset, value) \
base::Relaxed_Store( \
reinterpret_cast<base::Atomic32*>(FIELD_ADDR(p, offset)), \
static_cast<base::Atomic32>(value));
#define RELEASE_WRITE_UINT32_FIELD(p, offset, value) \
base::Release_Store( \
reinterpret_cast<base::Atomic32*>(FIELD_ADDR(p, offset)), \
static_cast<base::Atomic32>(value));
#define RELAXED_READ_INT32_FIELD(p, offset) \
static_cast<int32_t>(base::Relaxed_Load( \
reinterpret_cast<const base::Atomic32*>(FIELD_ADDR(p, offset))))
......@@ -510,10 +519,18 @@
static_cast<byte>(base::Relaxed_Load( \
reinterpret_cast<const base::Atomic8*>(FIELD_ADDR(p, offset))))
#define ACQUIRE_READ_BYTE_FIELD(p, offset) \
static_cast<byte>(base::Acquire_Load( \
reinterpret_cast<const base::Atomic8*>(FIELD_ADDR(p, offset))))
#define RELAXED_WRITE_BYTE_FIELD(p, offset, value) \
base::Relaxed_Store(reinterpret_cast<base::Atomic8*>(FIELD_ADDR(p, offset)), \
static_cast<base::Atomic8>(value));
#define RELEASE_WRITE_BYTE_FIELD(p, offset, value) \
base::Release_Store(reinterpret_cast<base::Atomic8*>(FIELD_ADDR(p, offset)), \
static_cast<base::Atomic8>(value));
#ifdef OBJECT_PRINT
#define DECL_PRINTER(Name) void Name##Print(std::ostream& os); // NOLINT
#else
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment