runtime-atomics.cc 22.7 KB
Newer Older
binji's avatar
binji committed
1 2 3 4 5 6
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "src/base/macros.h"
#include "src/base/platform/mutex.h"
7
#include "src/execution/arguments-inl.h"
8
#include "src/heap/factory.h"
9
#include "src/logging/counters.h"
10
#include "src/numbers/conversions-inl.h"
11
#include "src/objects/js-array-buffer-inl.h"
12
#include "src/objects/js-struct-inl.h"
13
#include "src/runtime/runtime-utils.h"
binji's avatar
binji committed
14

15 16
// Implement Atomic accesses to ArrayBuffers and SharedArrayBuffers.
// https://tc39.es/ecma262/#sec-atomics
binji's avatar
binji committed
17 18 19 20

namespace v8 {
namespace internal {

21 22
// Other platforms have CSA support, see builtins-sharedarraybuffer-gen.h.
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 || \
Brice Dobry's avatar
Brice Dobry committed
23
    V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X ||    \
24
    V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_LOONG64
25

26 27
namespace {

28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65
#if defined(V8_OS_STARBOARD)

template <typename T>
inline T ExchangeSeqCst(T* p, T value) {
  UNIMPLEMENTED();
}

template <typename T>
inline T CompareExchangeSeqCst(T* p, T oldval, T newval) {
  UNIMPLEMENTED();
}

template <typename T>
inline T AddSeqCst(T* p, T value) {
  UNIMPLEMENTED();
}

template <typename T>
inline T SubSeqCst(T* p, T value) {
  UNIMPLEMENTED();
}

template <typename T>
inline T AndSeqCst(T* p, T value) {
  UNIMPLEMENTED();
}

template <typename T>
inline T OrSeqCst(T* p, T value) {
  UNIMPLEMENTED();
}

template <typename T>
inline T XorSeqCst(T* p, T value) {
  UNIMPLEMENTED();
}

#elif V8_CC_GNU
66

67 68
// GCC/Clang helpfully warn us that using 64-bit atomics on 32-bit platforms
// can be slow. Good to know, but we don't have a choice.
69
#ifdef V8_TARGET_ARCH_32_BIT
70
#pragma GCC diagnostic push
71
#pragma GCC diagnostic ignored "-Wpragmas"
72
#pragma GCC diagnostic ignored "-Watomic-alignment"
73
#endif  // V8_TARGET_ARCH_32_BIT
74

75 76 77 78 79 80 81 82 83 84
template <typename T>
inline T LoadSeqCst(T* p) {
  return __atomic_load_n(p, __ATOMIC_SEQ_CST);
}

template <typename T>
inline void StoreSeqCst(T* p, T value) {
  __atomic_store_n(p, value, __ATOMIC_SEQ_CST);
}

85 86 87 88 89
template <typename T>
inline T ExchangeSeqCst(T* p, T value) {
  return __atomic_exchange_n(p, value, __ATOMIC_SEQ_CST);
}

90 91 92 93 94 95 96
template <typename T>
inline T CompareExchangeSeqCst(T* p, T oldval, T newval) {
  (void)__atomic_compare_exchange_n(p, &oldval, newval, 0, __ATOMIC_SEQ_CST,
                                    __ATOMIC_SEQ_CST);
  return oldval;
}

97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
template <typename T>
inline T AddSeqCst(T* p, T value) {
  return __atomic_fetch_add(p, value, __ATOMIC_SEQ_CST);
}

template <typename T>
inline T SubSeqCst(T* p, T value) {
  return __atomic_fetch_sub(p, value, __ATOMIC_SEQ_CST);
}

template <typename T>
inline T AndSeqCst(T* p, T value) {
  return __atomic_fetch_and(p, value, __ATOMIC_SEQ_CST);
}

template <typename T>
inline T OrSeqCst(T* p, T value) {
  return __atomic_fetch_or(p, value, __ATOMIC_SEQ_CST);
}

template <typename T>
inline T XorSeqCst(T* p, T value) {
  return __atomic_fetch_xor(p, value, __ATOMIC_SEQ_CST);
}

122
#ifdef V8_TARGET_ARCH_32_BIT
123
#pragma GCC diagnostic pop
124
#endif  // V8_TARGET_ARCH_32_BIT
125

126 127 128
#elif V8_CC_MSVC

#define InterlockedExchange32 _InterlockedExchange
129 130
#define InterlockedCompareExchange32 _InterlockedCompareExchange
#define InterlockedCompareExchange8 _InterlockedCompareExchange8
131 132 133 134
#define InterlockedExchangeAdd32 _InterlockedExchangeAdd
#define InterlockedExchangeAdd16 _InterlockedExchangeAdd16
#define InterlockedExchangeAdd8 _InterlockedExchangeAdd8
#define InterlockedAnd32 _InterlockedAnd
135
#define InterlockedOr64 _InterlockedOr64
136 137
#define InterlockedOr32 _InterlockedOr
#define InterlockedXor32 _InterlockedXor
138

139 140 141 142
#if defined(V8_HOST_ARCH_ARM64)
#define InterlockedExchange8 _InterlockedExchange8
#endif

143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171
#define ATOMIC_OPS(type, suffix, vctype)                                       \
  inline type ExchangeSeqCst(type* p, type value) {                            \
    return InterlockedExchange##suffix(reinterpret_cast<vctype*>(p),           \
                                       base::bit_cast<vctype>(value));         \
  }                                                                            \
  inline type CompareExchangeSeqCst(type* p, type oldval, type newval) {       \
    return InterlockedCompareExchange##suffix(reinterpret_cast<vctype*>(p),    \
                                              base::bit_cast<vctype>(newval),  \
                                              base::bit_cast<vctype>(oldval)); \
  }                                                                            \
  inline type AddSeqCst(type* p, type value) {                                 \
    return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p),        \
                                          base::bit_cast<vctype>(value));      \
  }                                                                            \
  inline type SubSeqCst(type* p, type value) {                                 \
    return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p),        \
                                          -base::bit_cast<vctype>(value));     \
  }                                                                            \
  inline type AndSeqCst(type* p, type value) {                                 \
    return InterlockedAnd##suffix(reinterpret_cast<vctype*>(p),                \
                                  base::bit_cast<vctype>(value));              \
  }                                                                            \
  inline type OrSeqCst(type* p, type value) {                                  \
    return InterlockedOr##suffix(reinterpret_cast<vctype*>(p),                 \
                                 base::bit_cast<vctype>(value));               \
  }                                                                            \
  inline type XorSeqCst(type* p, type value) {                                 \
    return InterlockedXor##suffix(reinterpret_cast<vctype*>(p),                \
                                  base::bit_cast<vctype>(value));              \
172
  }
173 174 175 176 177 178 179

ATOMIC_OPS(int8_t, 8, char)
ATOMIC_OPS(uint8_t, 8, char)
ATOMIC_OPS(int16_t, 16, short)  /* NOLINT(runtime/int) */
ATOMIC_OPS(uint16_t, 16, short) /* NOLINT(runtime/int) */
ATOMIC_OPS(int32_t, 32, long)   /* NOLINT(runtime/int) */
ATOMIC_OPS(uint32_t, 32, long)  /* NOLINT(runtime/int) */
180 181
ATOMIC_OPS(int64_t, 64, __int64)
ATOMIC_OPS(uint64_t, 64, __int64)
182

183 184 185 186 187 188 189 190 191 192
template <typename T>
inline T LoadSeqCst(T* p) {
  UNREACHABLE();
}

template <typename T>
inline void StoreSeqCst(T* p, T value) {
  UNREACHABLE();
}

193 194 195
#undef ATOMIC_OPS

#undef InterlockedExchange32
196 197
#undef InterlockedCompareExchange32
#undef InterlockedCompareExchange8
198 199 200 201
#undef InterlockedExchangeAdd32
#undef InterlockedExchangeAdd16
#undef InterlockedExchangeAdd8
#undef InterlockedAnd32
202
#undef InterlockedOr64
203 204
#undef InterlockedOr32
#undef InterlockedXor32
205

206 207 208 209
#if defined(V8_HOST_ARCH_ARM64)
#undef InterlockedExchange8
#endif

210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248
#else

#error Unsupported platform!

#endif

template <typename T>
T FromObject(Handle<Object> number);

template <>
inline uint8_t FromObject<uint8_t>(Handle<Object> number) {
  return NumberToUint32(*number);
}

template <>
inline int8_t FromObject<int8_t>(Handle<Object> number) {
  return NumberToInt32(*number);
}

template <>
inline uint16_t FromObject<uint16_t>(Handle<Object> number) {
  return NumberToUint32(*number);
}

template <>
inline int16_t FromObject<int16_t>(Handle<Object> number) {
  return NumberToInt32(*number);
}

template <>
inline uint32_t FromObject<uint32_t>(Handle<Object> number) {
  return NumberToUint32(*number);
}

template <>
inline int32_t FromObject<int32_t>(Handle<Object> number) {
  return NumberToInt32(*number);
}

249 250 251 252 253 254 255 256 257
template <>
inline uint64_t FromObject<uint64_t>(Handle<Object> bigint) {
  return Handle<BigInt>::cast(bigint)->AsUint64();
}

template <>
inline int64_t FromObject<int64_t>(Handle<Object> bigint) {
  return Handle<BigInt>::cast(bigint)->AsInt64();
}
258

259
inline Object ToObject(Isolate* isolate, int8_t t) { return Smi::FromInt(t); }
260

261
inline Object ToObject(Isolate* isolate, uint8_t t) { return Smi::FromInt(t); }
262

263
inline Object ToObject(Isolate* isolate, int16_t t) { return Smi::FromInt(t); }
264

265
inline Object ToObject(Isolate* isolate, uint16_t t) { return Smi::FromInt(t); }
266

267
inline Object ToObject(Isolate* isolate, int32_t t) {
268 269 270
  return *isolate->factory()->NewNumber(t);
}

271
inline Object ToObject(Isolate* isolate, uint32_t t) {
272 273 274
  return *isolate->factory()->NewNumber(t);
}

275
inline Object ToObject(Isolate* isolate, int64_t t) {
276 277 278
  return *BigInt::FromInt64(isolate, t);
}

279
inline Object ToObject(Isolate* isolate, uint64_t t) {
280
  return *BigInt::FromUint64(isolate, t);
281 282
}

283 284
template <typename T>
struct Load {
285
  static inline Object Do(Isolate* isolate, void* buffer, size_t index) {
286 287 288 289 290 291 292 293 294 295 296 297 298 299
    T result = LoadSeqCst(static_cast<T*>(buffer) + index);
    return ToObject(isolate, result);
  }
};

template <typename T>
struct Store {
  static inline void Do(Isolate* isolate, void* buffer, size_t index,
                        Handle<Object> obj) {
    T value = FromObject<T>(obj);
    StoreSeqCst(static_cast<T*>(buffer) + index, value);
  }
};

300 301
template <typename T>
struct Exchange {
302 303
  static inline Object Do(Isolate* isolate, void* buffer, size_t index,
                          Handle<Object> obj) {
304 305 306 307 308 309
    T value = FromObject<T>(obj);
    T result = ExchangeSeqCst(static_cast<T*>(buffer) + index, value);
    return ToObject(isolate, result);
  }
};

310
template <typename T>
311 312
inline Object DoCompareExchange(Isolate* isolate, void* buffer, size_t index,
                                Handle<Object> oldobj, Handle<Object> newobj) {
313 314 315 316 317 318 319
  T oldval = FromObject<T>(oldobj);
  T newval = FromObject<T>(newobj);
  T result =
      CompareExchangeSeqCst(static_cast<T*>(buffer) + index, oldval, newval);
  return ToObject(isolate, result);
}

320
template <typename T>
321
struct Add {
322 323
  static inline Object Do(Isolate* isolate, void* buffer, size_t index,
                          Handle<Object> obj) {
324 325 326 327 328
    T value = FromObject<T>(obj);
    T result = AddSeqCst(static_cast<T*>(buffer) + index, value);
    return ToObject(isolate, result);
  }
};
329 330

template <typename T>
331
struct Sub {
332 333
  static inline Object Do(Isolate* isolate, void* buffer, size_t index,
                          Handle<Object> obj) {
334 335 336 337 338
    T value = FromObject<T>(obj);
    T result = SubSeqCst(static_cast<T*>(buffer) + index, value);
    return ToObject(isolate, result);
  }
};
339 340

template <typename T>
341
struct And {
342 343
  static inline Object Do(Isolate* isolate, void* buffer, size_t index,
                          Handle<Object> obj) {
344 345 346 347 348
    T value = FromObject<T>(obj);
    T result = AndSeqCst(static_cast<T*>(buffer) + index, value);
    return ToObject(isolate, result);
  }
};
349 350

template <typename T>
351
struct Or {
352 353
  static inline Object Do(Isolate* isolate, void* buffer, size_t index,
                          Handle<Object> obj) {
354 355 356 357 358
    T value = FromObject<T>(obj);
    T result = OrSeqCst(static_cast<T*>(buffer) + index, value);
    return ToObject(isolate, result);
  }
};
359 360

template <typename T>
361
struct Xor {
362 363
  static inline Object Do(Isolate* isolate, void* buffer, size_t index,
                          Handle<Object> obj) {
364 365 366 367 368
    T value = FromObject<T>(obj);
    T result = XorSeqCst(static_cast<T*>(buffer) + index, value);
    return ToObject(isolate, result);
  }
};
369

370 371 372
}  // anonymous namespace

// Duplicated from objects.h
373 374 375 376 377 378 379 380
// V has parameters (Type, type, TYPE, C type)
#define INTEGER_TYPED_ARRAYS(V)       \
  V(Uint8, uint8, UINT8, uint8_t)     \
  V(Int8, int8, INT8, int8_t)         \
  V(Uint16, uint16, UINT16, uint16_t) \
  V(Int16, int16, INT16, int16_t)     \
  V(Uint32, uint32, UINT32, uint32_t) \
  V(Int32, int32, INT32, int32_t)
381

382 383
#define THROW_ERROR_RETURN_FAILURE_ON_DETACHED_OR_OUT_OF_BOUNDS(               \
    isolate, sta, index, method_name)                                          \
384
  do {                                                                         \
385 386 387
    bool out_of_bounds = false;                                                \
    auto length = sta->GetLengthOrOutOfBounds(out_of_bounds);                  \
    if (V8_UNLIKELY(sta->WasDetached() || out_of_bounds || index >= length)) { \
388 389 390 391 392 393 394
      THROW_NEW_ERROR_RETURN_FAILURE(                                          \
          isolate, NewTypeError(MessageTemplate::kDetachedOperation,           \
                                isolate->factory()->NewStringFromAsciiChecked( \
                                    method_name)));                            \
    }                                                                          \
  } while (false)

395 396 397 398
// This is https://tc39.github.io/ecma262/#sec-getmodifysetvalueinbuffer
// but also includes the ToInteger/ToBigInt conversion that's part of
// https://tc39.github.io/ecma262/#sec-atomicreadmodifywrite
template <template <typename> class Op>
399 400
Object GetModifySetValueInBuffer(RuntimeArguments args, Isolate* isolate,
                                 const char* method_name) {
401 402
  HandleScope scope(isolate);
  DCHECK_EQ(3, args.length());
403 404 405
  Handle<JSTypedArray> sta = args.at<JSTypedArray>(0);
  size_t index = NumberToSize(args[1]);
  Handle<Object> value_obj = args.at(2);
406 407

  uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
408
                    sta->byte_offset();
409

410 411 412 413
  if (sta->type() >= kExternalBigInt64Array) {
    Handle<BigInt> bigint;
    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, bigint,
                                       BigInt::FromObject(isolate, value_obj));
414

415 416
    THROW_ERROR_RETURN_FAILURE_ON_DETACHED_OR_OUT_OF_BOUNDS(isolate, sta, index,
                                                            method_name);
417

418
    CHECK_LT(index, sta->GetLength());
419 420 421 422 423 424 425 426 427 428
    if (sta->type() == kExternalBigInt64Array) {
      return Op<int64_t>::Do(isolate, source, index, bigint);
    }
    DCHECK(sta->type() == kExternalBigUint64Array);
    return Op<uint64_t>::Do(isolate, source, index, bigint);
  }

  Handle<Object> value;
  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
                                     Object::ToInteger(isolate, value_obj));
429

430 431
  THROW_ERROR_RETURN_FAILURE_ON_DETACHED_OR_OUT_OF_BOUNDS(isolate, sta, index,
                                                          method_name);
432

433
  CHECK_LT(index, sta->GetLength());
434

435
  switch (sta->type()) {
436 437
#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype) \
  case kExternal##Type##Array:                        \
438
    return Op<ctype>::Do(isolate, source, index, value);
439 440 441 442 443 444 445 446 447 448 449

    INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE

    default:
      break;
  }

  UNREACHABLE();
}

450 451 452
RUNTIME_FUNCTION(Runtime_AtomicsLoad64) {
  HandleScope scope(isolate);
  DCHECK_EQ(2, args.length());
453 454
  Handle<JSTypedArray> sta = args.at<JSTypedArray>(0);
  size_t index = NumberToSize(args[1]);
455 456 457 458 459 460

  uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
                    sta->byte_offset();

  DCHECK(sta->type() == kExternalBigInt64Array ||
         sta->type() == kExternalBigUint64Array);
461
  DCHECK(!sta->IsDetachedOrOutOfBounds());
462
  CHECK_LT(index, sta->GetLength());
463 464 465 466 467 468 469 470 471 472
  if (sta->type() == kExternalBigInt64Array) {
    return Load<int64_t>::Do(isolate, source, index);
  }
  DCHECK(sta->type() == kExternalBigUint64Array);
  return Load<uint64_t>::Do(isolate, source, index);
}

RUNTIME_FUNCTION(Runtime_AtomicsStore64) {
  HandleScope scope(isolate);
  DCHECK_EQ(3, args.length());
473 474 475
  Handle<JSTypedArray> sta = args.at<JSTypedArray>(0);
  size_t index = NumberToSize(args[1]);
  Handle<Object> value_obj = args.at(2);
476 477 478 479 480 481 482 483

  uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
                    sta->byte_offset();

  Handle<BigInt> bigint;
  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, bigint,
                                     BigInt::FromObject(isolate, value_obj));

484 485
  THROW_ERROR_RETURN_FAILURE_ON_DETACHED_OR_OUT_OF_BOUNDS(isolate, sta, index,
                                                          "Atomics.store");
486

487 488
  DCHECK(sta->type() == kExternalBigInt64Array ||
         sta->type() == kExternalBigUint64Array);
489
  CHECK_LT(index, sta->GetLength());
490 491 492 493 494 495 496 497 498
  if (sta->type() == kExternalBigInt64Array) {
    Store<int64_t>::Do(isolate, source, index, bigint);
    return *bigint;
  }
  DCHECK(sta->type() == kExternalBigUint64Array);
  Store<uint64_t>::Do(isolate, source, index, bigint);
  return *bigint;
}

499
RUNTIME_FUNCTION(Runtime_AtomicsExchange) {
500
  return GetModifySetValueInBuffer<Exchange>(args, isolate, "Atomics.exchange");
501 502
}

503 504 505
RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) {
  HandleScope scope(isolate);
  DCHECK_EQ(4, args.length());
506 507 508 509
  Handle<JSTypedArray> sta = args.at<JSTypedArray>(0);
  size_t index = NumberToSize(args[1]);
  Handle<Object> old_value_obj = args.at(2);
  Handle<Object> new_value_obj = args.at(3);
510 511

  uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
512
                    sta->byte_offset();
513

514 515 516 517 518 519 520
  if (sta->type() >= kExternalBigInt64Array) {
    Handle<BigInt> old_bigint;
    Handle<BigInt> new_bigint;
    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
        isolate, old_bigint, BigInt::FromObject(isolate, old_value_obj));
    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
        isolate, new_bigint, BigInt::FromObject(isolate, new_value_obj));
521

522 523
    THROW_ERROR_RETURN_FAILURE_ON_DETACHED_OR_OUT_OF_BOUNDS(
        isolate, sta, index, "Atomics.compareExchange");
524

525
    CHECK_LT(index, sta->GetLength());
526 527 528 529 530 531 532 533 534 535 536 537 538 539 540
    if (sta->type() == kExternalBigInt64Array) {
      return DoCompareExchange<int64_t>(isolate, source, index, old_bigint,
                                        new_bigint);
    }
    DCHECK(sta->type() == kExternalBigUint64Array);
    return DoCompareExchange<uint64_t>(isolate, source, index, old_bigint,
                                       new_bigint);
  }

  Handle<Object> old_value;
  Handle<Object> new_value;
  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, old_value,
                                     Object::ToInteger(isolate, old_value_obj));
  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, new_value,
                                     Object::ToInteger(isolate, new_value_obj));
541

542 543
  THROW_ERROR_RETURN_FAILURE_ON_DETACHED_OR_OUT_OF_BOUNDS(
      isolate, sta, index, "Atomics.compareExchange");
544

545
  switch (sta->type()) {
546 547 548 549
#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype)                  \
  case kExternal##Type##Array:                                         \
    return DoCompareExchange<ctype>(isolate, source, index, old_value, \
                                    new_value);
550 551 552 553 554 555 556 557 558 559

    INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE

    default:
      break;
  }

  UNREACHABLE();
}
560

561 562 563
// ES #sec-atomics.add
// Atomics.add( typedArray, index, value )
RUNTIME_FUNCTION(Runtime_AtomicsAdd) {
564
  return GetModifySetValueInBuffer<Add>(args, isolate, "Atomics.add");
565 566 567 568 569
}

// ES #sec-atomics.sub
// Atomics.sub( typedArray, index, value )
RUNTIME_FUNCTION(Runtime_AtomicsSub) {
570
  return GetModifySetValueInBuffer<Sub>(args, isolate, "Atomics.sub");
571 572 573 574 575
}

// ES #sec-atomics.and
// Atomics.and( typedArray, index, value )
RUNTIME_FUNCTION(Runtime_AtomicsAnd) {
576
  return GetModifySetValueInBuffer<And>(args, isolate, "Atomics.and");
577 578 579 580 581
}

// ES #sec-atomics.or
// Atomics.or( typedArray, index, value )
RUNTIME_FUNCTION(Runtime_AtomicsOr) {
582
  return GetModifySetValueInBuffer<Or>(args, isolate, "Atomics.or");
583 584 585 586 587
}

// ES #sec-atomics.xor
// Atomics.xor( typedArray, index, value )
RUNTIME_FUNCTION(Runtime_AtomicsXor) {
588
  return GetModifySetValueInBuffer<Xor>(args, isolate, "Atomics.xor");
589
}
590

591
#undef INTEGER_TYPED_ARRAYS
592

593
#else
594

595 596 597 598
RUNTIME_FUNCTION(Runtime_AtomicsLoad64) { UNREACHABLE(); }

RUNTIME_FUNCTION(Runtime_AtomicsStore64) { UNREACHABLE(); }

599
RUNTIME_FUNCTION(Runtime_AtomicsExchange) { UNREACHABLE(); }
600

601
RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) { UNREACHABLE(); }
602

603
RUNTIME_FUNCTION(Runtime_AtomicsAdd) { UNREACHABLE(); }
604

605 606 607 608 609 610 611 612 613 614
RUNTIME_FUNCTION(Runtime_AtomicsSub) { UNREACHABLE(); }

RUNTIME_FUNCTION(Runtime_AtomicsAnd) { UNREACHABLE(); }

RUNTIME_FUNCTION(Runtime_AtomicsOr) { UNREACHABLE(); }

RUNTIME_FUNCTION(Runtime_AtomicsXor) { UNREACHABLE(); }

#endif  // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64
        // || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X
615
        // || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_LOONG64
616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647

RUNTIME_FUNCTION(Runtime_AtomicsLoadSharedStructField) {
  HandleScope scope(isolate);
  DCHECK_EQ(2, args.length());
  Handle<JSSharedStruct> shared_struct = args.at<JSSharedStruct>(0);
  Handle<Name> field_name;
  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, field_name,
                                     Object::ToName(isolate, args.at(1)));
  // Shared structs are prototypeless.
  LookupIterator it(isolate, shared_struct, field_name, LookupIterator::OWN);
  if (it.IsFound()) return *it.GetDataValue(kSeqCstAccess);
  return ReadOnlyRoots(isolate).undefined_value();
}

RUNTIME_FUNCTION(Runtime_AtomicsStoreSharedStructField) {
  HandleScope scope(isolate);
  DCHECK_EQ(3, args.length());
  Handle<JSSharedStruct> shared_struct = args.at<JSSharedStruct>(0);
  Handle<Name> field_name;
  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, field_name,
                                     Object::ToName(isolate, args.at(1)));
  Handle<Object> shared_value;
  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
      isolate, shared_value, Object::Share(isolate, args.at(2), kThrowOnError));
  // Shared structs are prototypeless.
  LookupIterator it(isolate, shared_struct, field_name, LookupIterator::OWN);
  if (it.IsFound()) {
    it.WriteDataValue(shared_value, kSeqCstAccess);
    return *shared_value;
  }
  // Shared structs are non-extensible. Instead of duplicating logic, call
  // Object::AddDataProperty to handle the error case.
648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675
  Maybe<bool> result =
      Object::AddDataProperty(&it, shared_value, NONE, Nothing<ShouldThrow>(),
                              StoreOrigin::kMaybeKeyed);
  DCHECK(result.IsNothing());
  USE(result);
  return ReadOnlyRoots(isolate).exception();
}

RUNTIME_FUNCTION(Runtime_AtomicsExchangeSharedStructField) {
  HandleScope scope(isolate);
  DCHECK_EQ(3, args.length());
  Handle<JSSharedStruct> shared_struct = args.at<JSSharedStruct>(0);
  Handle<Name> field_name;
  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, field_name,
                                     Object::ToName(isolate, args.at(1)));
  Handle<Object> shared_value;
  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
      isolate, shared_value, Object::Share(isolate, args.at(2), kThrowOnError));
  // Shared structs are prototypeless.
  LookupIterator it(isolate, shared_struct, field_name, LookupIterator::OWN);
  if (it.IsFound()) return *it.SwapDataValue(shared_value, kSeqCstAccess);
  // Shared structs are non-extensible. Instead of duplicating logic, call
  // Object::AddDataProperty to handle the error case.
  Maybe<bool> result =
      Object::AddDataProperty(&it, shared_value, NONE, Nothing<ShouldThrow>(),
                              StoreOrigin::kMaybeKeyed);
  DCHECK(result.IsNothing());
  USE(result);
676 677 678
  return ReadOnlyRoots(isolate).exception();
}

679 680
}  // namespace internal
}  // namespace v8