Commit 67044edf authored by Shu-yu Guo's avatar Shu-yu Guo Committed by V8 LUCI CQ

[shared-struct] Support shared structs in Atomics.exchange

Bug: v8:12547
Change-Id: Ie27831b793f214368a003adac24b7c92f1a5fc11
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3518426Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarJakob Kummerow <jkummerow@chromium.org>
Commit-Queue: Shu-yu Guo <syg@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79504}
parent e62f5568
......@@ -111,6 +111,14 @@ class AsAtomicImpl {
cast_helper<T>::to_storage_type(new_value));
}
template <typename T>
static T SeqCst_Swap(T* addr,
typename std::remove_reference<T>::type new_value) {
STATIC_ASSERT(sizeof(T) <= sizeof(AtomicStorageType));
return base::SeqCst_AtomicExchange(
to_storage_addr(addr), cast_helper<T>::to_storage_type(new_value));
}
template <typename T>
static T Release_CompareAndSwap(
T* addr, typename std::remove_reference<T>::type old_value,
......
......@@ -130,6 +130,12 @@ inline Atomic32 Relaxed_AtomicExchange(volatile Atomic32* ptr,
std::memory_order_relaxed);
}
inline Atomic32 SeqCst_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
return std::atomic_exchange_explicit(helper::to_std_atomic(ptr), new_value,
std::memory_order_seq_cst);
}
inline Atomic32 Relaxed_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return increment + std::atomic_fetch_add_explicit(helper::to_std_atomic(ptr),
......@@ -267,6 +273,12 @@ inline Atomic64 Relaxed_AtomicExchange(volatile Atomic64* ptr,
std::memory_order_relaxed);
}
inline Atomic64 SeqCst_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
return std::atomic_exchange_explicit(helper::to_std_atomic(ptr), new_value,
std::memory_order_seq_cst);
}
inline Atomic64 Relaxed_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return increment + std::atomic_fetch_add_explicit(helper::to_std_atomic(ptr),
......
......@@ -892,8 +892,8 @@ namespace internal {
kIndexOrFieldName) \
TFJ(AtomicsStore, kJSArgcReceiverSlots + 3, kReceiver, kArrayOrSharedStruct, \
kIndexOrFieldName, kValue) \
TFJ(AtomicsExchange, kJSArgcReceiverSlots + 3, kReceiver, kArray, kIndex, \
kValue) \
TFJ(AtomicsExchange, kJSArgcReceiverSlots + 3, kReceiver, \
kArrayOrSharedStruct, kIndexOrFieldName, kValue) \
TFJ(AtomicsCompareExchange, kJSArgcReceiverSlots + 4, kReceiver, kArray, \
kIndex, kOldValue, kNewValue) \
TFJ(AtomicsAdd, kJSArgcReceiverSlots + 3, kReceiver, kArray, kIndex, kValue) \
......
......@@ -385,11 +385,15 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) {
// https://tc39.es/ecma262/#sec-atomics.exchange
TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) {
auto maybe_array = Parameter<Object>(Descriptor::kArray);
auto index = Parameter<Object>(Descriptor::kIndex);
auto maybe_array_or_shared_struct =
Parameter<Object>(Descriptor::kArrayOrSharedStruct);
auto index_or_field_name = Parameter<Object>(Descriptor::kIndexOrFieldName);
auto value = Parameter<Object>(Descriptor::kValue);
auto context = Parameter<Context>(Descriptor::kContext);
Label shared_struct(this);
GotoIf(IsJSSharedStruct(maybe_array_or_shared_struct), &shared_struct);
// Inlines AtomicReadModifyWrite
// https://tc39.es/ecma262/#sec-atomicreadmodifywrite
......@@ -397,12 +401,14 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) {
Label detached(this);
TNode<Int32T> elements_kind;
TNode<RawPtrT> backing_store;
TNode<JSArrayBuffer> array_buffer = ValidateIntegerTypedArray(
maybe_array, context, &elements_kind, &backing_store, &detached);
TNode<JSTypedArray> array = CAST(maybe_array);
TNode<JSArrayBuffer> array_buffer =
ValidateIntegerTypedArray(maybe_array_or_shared_struct, context,
&elements_kind, &backing_store, &detached);
TNode<JSTypedArray> array = CAST(maybe_array_or_shared_struct);
// 2. Let i be ? ValidateAtomicAccess(typedArray, index).
TNode<UintPtrT> index_word = ValidateAtomicAccess(array, index, context);
TNode<UintPtrT> index_word =
ValidateAtomicAccess(array, index_or_field_name, context);
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_RISCV64
USE(array_buffer);
......@@ -513,6 +519,13 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) {
ThrowTypeError(context, MessageTemplate::kDetachedOperation,
"Atomics.exchange");
}
BIND(&shared_struct);
{
Return(CallRuntime(Runtime::kAtomicsExchangeSharedStructField, context,
maybe_array_or_shared_struct, index_or_field_name,
value));
}
}
// https://tc39.es/ecma262/#sec-atomics.compareexchange
......
......@@ -477,6 +477,23 @@ void JSObject::WriteToField(InternalIndex descriptor, PropertyDetails details,
}
}
Object JSObject::RawFastPropertyAtSwap(FieldIndex index, Object value,
SeqCstAccessTag tag) {
PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
return RawFastPropertyAtSwap(cage_base, index, value, tag);
}
Object JSObject::RawFastPropertyAtSwap(PtrComprCageBase cage_base,
FieldIndex index, Object value,
SeqCstAccessTag tag) {
if (index.is_inobject()) {
return TaggedField<Object>::SeqCst_Swap(cage_base, *this, index.offset(),
value);
}
return property_array().Swap(cage_base, index.outobject_array_index(), value,
tag);
}
int JSObject::GetInObjectPropertyOffset(int index) {
return map().GetInObjectPropertyOffset(index);
}
......
......@@ -707,6 +707,12 @@ class JSObject : public TorqueGeneratedJSObject<JSObject, JSReceiver> {
inline void WriteToField(InternalIndex descriptor, PropertyDetails details,
Object value);
inline Object RawFastPropertyAtSwap(FieldIndex index, Object value,
SeqCstAccessTag tag);
inline Object RawFastPropertyAtSwap(PtrComprCageBase cage_base,
FieldIndex index, Object value,
SeqCstAccessTag tag);
// Access to in object properties.
inline int GetInObjectPropertyOffset(int index);
inline Object InObjectPropertyAt(int index);
......
......@@ -1137,6 +1137,20 @@ void LookupIterator::WriteDataValue(Handle<Object> value, SeqCstAccessTag tag) {
holder->FastPropertyAtPut(field_index, *value, tag);
}
Handle<Object> LookupIterator::SwapDataValue(Handle<Object> value,
SeqCstAccessTag tag) {
DCHECK_EQ(DATA, state_);
DCHECK_EQ(PropertyLocation::kField, property_details_.location());
DCHECK_EQ(PropertyKind::kData, property_details_.kind());
// Currently only shared structs support sequentially consistent access.
Handle<JSSharedStruct> holder = GetHolder<JSSharedStruct>();
DisallowGarbageCollection no_gc;
FieldIndex field_index =
FieldIndex::ForDescriptor(holder->map(isolate_), descriptor_number());
return handle(holder->RawFastPropertyAtSwap(field_index, *value, tag),
isolate_);
}
#if V8_ENABLE_WEBASSEMBLY
wasm::ValueType LookupIterator::wasm_value_type() const {
......
......@@ -190,6 +190,7 @@ class V8_EXPORT_PRIVATE LookupIterator final {
void WriteDataValue(Handle<Object> value, bool initializing_store);
Handle<Object> GetDataValue(SeqCstAccessTag tag) const;
void WriteDataValue(Handle<Object> value, SeqCstAccessTag tag);
Handle<Object> SwapDataValue(Handle<Object> value, SeqCstAccessTag tag);
inline void UpdateProtector();
static inline void UpdateProtector(Isolate* isolate, Handle<Object> receiver,
Handle<Name> name);
......
......@@ -81,6 +81,25 @@ void PropertyArray::set(int index, Object value, SeqCstAccessTag tag) {
// space, so the generational write barrier is also not needed.
}
Object PropertyArray::Swap(int index, Object value, SeqCstAccessTag tag) {
PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
return Swap(cage_base, index, value, tag);
}
Object PropertyArray::Swap(PtrComprCageBase cage_base, int index, Object value,
SeqCstAccessTag tag) {
DCHECK(IsPropertyArray());
DCHECK_LT(static_cast<unsigned>(index),
static_cast<unsigned>(this->length(kAcquireLoad)));
DCHECK(value.IsShared());
return TaggedField<Object>::SeqCst_Swap(cage_base, *this,
OffsetOfElementAt(index), value);
// JSSharedStructs are allocated in the shared old space, which is currently
// collected by stopping the world, so the incremental write barrier is not
// needed. They can only store Smis and other HeapObjects in the shared old
// space, so the generational write barrier is also not needed.
}
ObjectSlot PropertyArray::data_start() { return RawField(kHeaderSize); }
int PropertyArray::length() const {
......
......@@ -40,6 +40,10 @@ class PropertyArray
// Setter with explicit barrier mode.
inline void set(int index, Object value, WriteBarrierMode mode);
inline Object Swap(int index, Object value, SeqCstAccessTag tag);
inline Object Swap(PtrComprCageBase cage_base, int index, Object value,
SeqCstAccessTag tag);
// Signature must be in sync with FixedArray::CopyElements().
inline void CopyElements(Isolate* isolate, int dst_index, PropertyArray src,
int src_index, int len, WriteBarrierMode mode);
......
......@@ -236,6 +236,29 @@ void TaggedField<T, kFieldOffset>::SeqCst_Store(HeapObject host, int offset,
AsAtomicTagged::SeqCst_Store(location(host, offset), full_to_tagged(ptr));
}
// static
template <typename T, int kFieldOffset>
T TaggedField<T, kFieldOffset>::SeqCst_Swap(HeapObject host, int offset,
T value) {
Address ptr = value.ptr();
DCHECK_NE(kFieldOffset + offset, HeapObject::kMapOffset);
AtomicTagged_t old_value =
AsAtomicTagged::SeqCst_Swap(location(host, offset), full_to_tagged(ptr));
return T(tagged_to_full(host.ptr(), old_value));
}
// static
template <typename T, int kFieldOffset>
T TaggedField<T, kFieldOffset>::SeqCst_Swap(PtrComprCageBase cage_base,
HeapObject host, int offset,
T value) {
Address ptr = value.ptr();
DCHECK_NE(kFieldOffset + offset, HeapObject::kMapOffset);
AtomicTagged_t old_value =
AsAtomicTagged::SeqCst_Swap(location(host, offset), full_to_tagged(ptr));
return T(tagged_to_full(cage_base, old_value));
}
} // namespace internal
} // namespace v8
......
......@@ -57,16 +57,20 @@ class TaggedField : public AllStatic {
static inline T Acquire_Load(PtrComprCageBase cage_base, HeapObject host,
int offset = 0);
static inline void Release_Store(HeapObject host, T value);
static inline void Release_Store(HeapObject host, int offset, T value);
static inline T SeqCst_Load(HeapObject host, int offset = 0);
static inline T SeqCst_Load(PtrComprCageBase cage_base, HeapObject host,
int offset = 0);
static inline void Release_Store(HeapObject host, T value);
static inline void Release_Store(HeapObject host, int offset, T value);
static inline void SeqCst_Store(HeapObject host, T value);
static inline void SeqCst_Store(HeapObject host, int offset, T value);
static inline T SeqCst_Swap(HeapObject host, int offset, T value);
static inline T SeqCst_Swap(PtrComprCageBase cage_base, HeapObject host,
int offset, T value);
static inline Tagged_t Release_CompareAndSwap(HeapObject host, T old,
T value);
......
......@@ -640,9 +640,34 @@ RUNTIME_FUNCTION(Runtime_AtomicsStoreSharedStructField) {
}
// Shared structs are non-extensible. Instead of duplicating logic, call
// Object::AddDataProperty to handle the error case.
CHECK(Object::AddDataProperty(&it, shared_value, NONE, Nothing<ShouldThrow>(),
StoreOrigin::kMaybeKeyed)
.IsNothing());
Maybe<bool> result =
Object::AddDataProperty(&it, shared_value, NONE, Nothing<ShouldThrow>(),
StoreOrigin::kMaybeKeyed);
DCHECK(result.IsNothing());
USE(result);
return ReadOnlyRoots(isolate).exception();
}
RUNTIME_FUNCTION(Runtime_AtomicsExchangeSharedStructField) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
Handle<JSSharedStruct> shared_struct = args.at<JSSharedStruct>(0);
Handle<Name> field_name;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, field_name,
Object::ToName(isolate, args.at(1)));
Handle<Object> shared_value;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, shared_value, Object::Share(isolate, args.at(2), kThrowOnError));
// Shared structs are prototypeless.
LookupIterator it(isolate, shared_struct, field_name, LookupIterator::OWN);
if (it.IsFound()) return *it.SwapDataValue(shared_value, kSeqCstAccess);
// Shared structs are non-extensible. Instead of duplicating logic, call
// Object::AddDataProperty to handle the error case.
Maybe<bool> result =
Object::AddDataProperty(&it, shared_value, NONE, Nothing<ShouldThrow>(),
StoreOrigin::kMaybeKeyed);
DCHECK(result.IsNothing());
USE(result);
return ReadOnlyRoots(isolate).exception();
}
......
......@@ -67,7 +67,8 @@ namespace internal {
F(AtomicsXor, 3, 1) \
F(SetAllowAtomicsWait, 1, 1) \
F(AtomicsLoadSharedStructField, 2, 1) \
F(AtomicsStoreSharedStructField, 3, 1)
F(AtomicsStoreSharedStructField, 3, 1) \
F(AtomicsExchangeSharedStructField, 3, 1)
#define FOR_EACH_INTRINSIC_BIGINT(F, I) \
F(BigIntBinaryOp, 3, 1) \
......
......@@ -11,10 +11,20 @@ let S = new SharedStructType(['field']);
(function TestPrimitivesUsingAtomics() {
// All primitives can be stored in fields.
let s = new S();
for (let prim of [42, -0, undefined, null, true, false, "foo"]) {
const prims = [42, -0, undefined, null, true, false, "foo"];
for (let prim of prims) {
Atomics.store(s, 'field', prim);
assertEquals(Atomics.load(s, 'field'), prim);
}
for (let prim1 of prims) {
for (let prim2 of prims) {
s.field = prim1;
assertEquals(Atomics.exchange(s, 'field', prim2), prim1);
assertEquals(s.field, prim2);
}
}
})();
(function TestObjectsUsingAtomics() {
......@@ -26,6 +36,10 @@ let S = new SharedStructType(['field']);
let shared_rhs = new S();
Atomics.store(s, 'field', shared_rhs);
assertEquals(Atomics.load(s, 'field'), shared_rhs);
let shared_rhs2 = new S();
assertEquals(Atomics.exchange(s, 'field', shared_rhs2), shared_rhs);
assertEquals(s.field, shared_rhs2);
})();
(function TestNotExtensibleUsingAtomics() {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment