builtins-sharedarraybuffer-gen.cc 25.1 KB
Newer Older
1 2 3 4 5 6
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
7
#include "src/codegen/code-stub-assembler.h"
8
#include "src/objects/objects.h"
9 10 11 12 13 14 15 16 17 18 19 20 21

namespace v8 {
namespace internal {

using compiler::Node;

class SharedArrayBufferBuiltinsAssembler : public CodeStubAssembler {
 public:
  explicit SharedArrayBufferBuiltinsAssembler(
      compiler::CodeAssemblerState* state)
      : CodeStubAssembler(state) {}

 protected:
22 23 24 25
  using AssemblerFunction = Node* (CodeAssembler::*)(MachineType type,
                                                     Node* base, Node* offset,
                                                     Node* value,
                                                     Node* value_high);
26
  void ValidateSharedTypedArray(Node* tagged, Node* context,
27
                                TNode<Int32T>* out_elements_kind,
28 29 30
                                Node** out_backing_store);
  Node* ConvertTaggedAtomicIndexToWord32(Node* tagged, Node* context,
                                         Node** number_index);
31 32 33 34 35
  void ValidateAtomicIndex(Node* array, Node* index_word, Node* context);
#if DEBUG
  void DebugSanityCheckAtomicIndex(Node* array, Node* index_word,
                                   Node* context);
#endif
36 37 38
  void AtomicBinopBuiltinCommon(Node* array, Node* index, Node* value,
                                Node* context, AssemblerFunction function,
                                Runtime::FunctionId runtime_function);
39 40 41 42 43

  // Create a BigInt from the result of a 64-bit atomic operation, using
  // projections on 32-bit platforms.
  TNode<BigInt> BigIntFromSigned64(Node* signed64);
  TNode<BigInt> BigIntFromUnsigned64(Node* unsigned64);
44 45 46
};

void SharedArrayBufferBuiltinsAssembler::ValidateSharedTypedArray(
47
    Node* tagged, Node* context, TNode<Int32T>* out_elements_kind,
48 49 50 51 52 53 54
    Node** out_backing_store) {
  Label not_float_or_clamped(this), invalid(this);

  // Fail if it is not a heap object.
  GotoIf(TaggedIsSmi(tagged), &invalid);

  // Fail if the array's instance type is not JSTypedArray.
55
  TNode<Map> tagged_map = LoadMap(tagged);
56
  GotoIfNot(IsJSTypedArrayMap(tagged_map), &invalid);
57 58

  // Fail if the array's JSArrayBuffer is not shared.
59 60
  TNode<JSArrayBuffer> array_buffer = LoadJSArrayBufferViewBuffer(CAST(tagged));
  TNode<Uint32T> bitfield = LoadJSArrayBufferBitField(array_buffer);
61
  GotoIfNot(IsSetWord32<JSArrayBuffer::IsSharedBit>(bitfield), &invalid);
62 63

  // Fail if the array's element type is float32, float64 or clamped.
64 65 66 67 68 69
  STATIC_ASSERT(INT8_ELEMENTS < FLOAT32_ELEMENTS);
  STATIC_ASSERT(INT16_ELEMENTS < FLOAT32_ELEMENTS);
  STATIC_ASSERT(INT32_ELEMENTS < FLOAT32_ELEMENTS);
  STATIC_ASSERT(UINT8_ELEMENTS < FLOAT32_ELEMENTS);
  STATIC_ASSERT(UINT16_ELEMENTS < FLOAT32_ELEMENTS);
  STATIC_ASSERT(UINT32_ELEMENTS < FLOAT32_ELEMENTS);
70
  TNode<Int32T> elements_kind = LoadMapElementsKind(tagged_map);
71
  GotoIf(Int32LessThan(elements_kind, Int32Constant(FLOAT32_ELEMENTS)),
72
         &not_float_or_clamped);
73 74 75
  STATIC_ASSERT(BIGINT64_ELEMENTS > UINT8_CLAMPED_ELEMENTS);
  STATIC_ASSERT(BIGUINT64_ELEMENTS > UINT8_CLAMPED_ELEMENTS);
  Branch(Int32GreaterThan(elements_kind, Int32Constant(UINT8_CLAMPED_ELEMENTS)),
76 77
         &not_float_or_clamped, &invalid);

78
  BIND(&invalid);
79
  {
80 81
    ThrowTypeError(context, MessageTemplate::kNotIntegerSharedTypedArray,
                   tagged);
82 83
  }

84
  BIND(&not_float_or_clamped);
85
  *out_elements_kind = elements_kind;
86

87 88 89
  TNode<RawPtrT> backing_store = LoadJSArrayBufferBackingStore(array_buffer);
  TNode<UintPtrT> byte_offset = LoadJSArrayBufferViewByteOffset(CAST(tagged));
  *out_backing_store = IntPtrAdd(backing_store, byte_offset);
90 91
}

92
// https://tc39.github.io/ecma262/#sec-validateatomicaccess
93 94
Node* SharedArrayBufferBuiltinsAssembler::ConvertTaggedAtomicIndexToWord32(
    Node* tagged, Node* context, Node** number_index) {
95
  VARIABLE(var_result, MachineRepresentation::kWord32);
96 97 98 99 100 101 102
  Label done(this), range_error(this);

  // Returns word32 since index cannot be longer than a TypedArray length,
  // which has a uint32 maximum.
  // The |number_index| output parameter is used only for architectures that
  // don't currently have a TF implementation and forward to runtime functions
  // instead; they expect the value has already been coerced to an integer.
103
  *number_index = ToSmiIndex(CAST(context), CAST(tagged), &range_error);
104
  var_result.Bind(SmiToInt32(*number_index));
105 106 107
  Goto(&done);

  BIND(&range_error);
108
  { ThrowRangeError(context, MessageTemplate::kInvalidAtomicAccessIndex); }
109

110
  BIND(&done);
111 112 113
  return var_result.value();
}

114
void SharedArrayBufferBuiltinsAssembler::ValidateAtomicIndex(Node* array,
115
                                                             Node* index,
116
                                                             Node* context) {
117 118
  // Check if the index is in bounds. If not, throw RangeError.
  Label check_passed(this);
119 120 121 122
  TNode<UintPtrT> array_length = LoadJSTypedArrayLength(CAST(array));
  // TODO(v8:4153): Use UintPtr for the {index} as well.
  GotoIf(UintPtrLessThan(ChangeUint32ToWord(index), array_length),
         &check_passed);
123

124
  ThrowRangeError(context, MessageTemplate::kInvalidAtomicAccessIndex);
125

126
  BIND(&check_passed);
127 128
}

129 130 131 132 133
#if DEBUG
void SharedArrayBufferBuiltinsAssembler::DebugSanityCheckAtomicIndex(
    Node* array, Node* index_word, Node* context) {
  // In Debug mode, we re-validate the index as a sanity check because
  // ToInteger above calls out to JavaScript. A SharedArrayBuffer can't be
134
  // detached and the TypedArray length can't change either, so skipping this
135
  // check in Release mode is safe.
136 137
  CSA_ASSERT(this, UintPtrLessThan(ChangeUint32ToWord(index_word),
                                   LoadJSTypedArrayLength(CAST(array))));
138 139 140
}
#endif

141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162
TNode<BigInt> SharedArrayBufferBuiltinsAssembler::BigIntFromSigned64(
    Node* signed64) {
  if (Is64()) {
    return BigIntFromInt64(UncheckedCast<IntPtrT>(signed64));
  } else {
    TNode<IntPtrT> low = UncheckedCast<IntPtrT>(Projection(0, signed64));
    TNode<IntPtrT> high = UncheckedCast<IntPtrT>(Projection(1, signed64));
    return BigIntFromInt32Pair(low, high);
  }
}

TNode<BigInt> SharedArrayBufferBuiltinsAssembler::BigIntFromUnsigned64(
    Node* unsigned64) {
  if (Is64()) {
    return BigIntFromUint64(UncheckedCast<UintPtrT>(unsigned64));
  } else {
    TNode<UintPtrT> low = UncheckedCast<UintPtrT>(Projection(0, unsigned64));
    TNode<UintPtrT> high = UncheckedCast<UintPtrT>(Projection(1, unsigned64));
    return BigIntFromUint32Pair(low, high);
  }
}

163
TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) {
164 165 166
  Node* array = Parameter(Descriptor::kArray);
  Node* index = Parameter(Descriptor::kIndex);
  Node* context = Parameter(Descriptor::kContext);
167

168
  TNode<Int32T> elements_kind;
169
  Node* backing_store;
170
  ValidateSharedTypedArray(array, context, &elements_kind, &backing_store);
171

172 173 174 175
  Node* index_integer;
  Node* index_word32 =
      ConvertTaggedAtomicIndexToWord32(index, context, &index_integer);
  ValidateAtomicIndex(array, index_word32, context);
176
  TNode<UintPtrT> index_word = ChangeUint32ToWord(index_word32);
177 178

  Label i8(this), u8(this), i16(this), u16(this), i32(this), u32(this),
179
      i64(this), u64(this), other(this);
180
  int32_t case_values[] = {
181 182
      INT8_ELEMENTS,  UINT8_ELEMENTS,  INT16_ELEMENTS,    UINT16_ELEMENTS,
      INT32_ELEMENTS, UINT32_ELEMENTS, BIGINT64_ELEMENTS, BIGUINT64_ELEMENTS,
183
  };
184
  Label* case_labels[] = {&i8, &u8, &i16, &u16, &i32, &u32, &i64, &u64};
185
  Switch(elements_kind, &other, case_values, case_labels,
186 187
         arraysize(case_labels));

188
  BIND(&i8);
189 190
  Return(
      SmiFromInt32(AtomicLoad(MachineType::Int8(), backing_store, index_word)));
191

192
  BIND(&u8);
193
  Return(SmiFromInt32(
194 195
      AtomicLoad(MachineType::Uint8(), backing_store, index_word)));

196
  BIND(&i16);
197
  Return(SmiFromInt32(
198 199
      AtomicLoad(MachineType::Int16(), backing_store, WordShl(index_word, 1))));

200
  BIND(&u16);
201 202
  Return(SmiFromInt32(AtomicLoad(MachineType::Uint16(), backing_store,
                                 WordShl(index_word, 1))));
203

204
  BIND(&i32);
205 206 207
  Return(ChangeInt32ToTagged(
      AtomicLoad(MachineType::Int32(), backing_store, WordShl(index_word, 2))));

208
  BIND(&u32);
209 210
  Return(ChangeUint32ToTagged(AtomicLoad(MachineType::Uint32(), backing_store,
                                         WordShl(index_word, 2))));
211
#if V8_TARGET_ARCH_MIPS && !_MIPS_ARCH_MIPS32R6
212 213
  BIND(&i64);
  Return(CallRuntime(Runtime::kAtomicsLoad64, context, array, index_integer));
214

215 216 217
  BIND(&u64);
  Return(CallRuntime(Runtime::kAtomicsLoad64, context, array, index_integer));
#else
218 219 220 221 222 223 224 225 226 227
  BIND(&i64);
  // This uses Uint64() intentionally: AtomicLoad is not implemented for
  // Int64(), which is fine because the machine instruction only cares
  // about words.
  Return(BigIntFromSigned64(AtomicLoad(MachineType::Uint64(), backing_store,
                                       WordShl(index_word, 3))));

  BIND(&u64);
  Return(BigIntFromUnsigned64(AtomicLoad(MachineType::Uint64(), backing_store,
                                         WordShl(index_word, 3))));
228
#endif
229
  // This shouldn't happen, we've already validated the type.
230
  BIND(&other);
231 232 233 234
  Unreachable();
}

TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) {
235 236 237 238
  Node* array = Parameter(Descriptor::kArray);
  Node* index = Parameter(Descriptor::kIndex);
  Node* value = Parameter(Descriptor::kValue);
  Node* context = Parameter(Descriptor::kContext);
239

240
  TNode<Int32T> elements_kind;
241
  Node* backing_store;
242
  ValidateSharedTypedArray(array, context, &elements_kind, &backing_store);
243 244 245 246

  Node* index_integer;
  Node* index_word32 =
      ConvertTaggedAtomicIndexToWord32(index, context, &index_integer);
247
  ValidateAtomicIndex(array, index_word32, context);
248
  TNode<UintPtrT> index_word = ChangeUint32ToWord(index_word32);
249

250
  Label u8(this), u16(this), u32(this), u64(this), other(this);
251 252 253
  STATIC_ASSERT(BIGINT64_ELEMENTS > INT32_ELEMENTS);
  STATIC_ASSERT(BIGUINT64_ELEMENTS > INT32_ELEMENTS);
  GotoIf(Int32GreaterThan(elements_kind, Int32Constant(INT32_ELEMENTS)), &u64);
254

255
  TNode<Number> value_integer = ToInteger_Inline(CAST(context), CAST(value));
256
  TNode<Word32T> value_word32 = TruncateTaggedToWord32(context, value_integer);
257

258 259 260
#if DEBUG
  DebugSanityCheckAtomicIndex(array, index_word32, context);
#endif
261 262

  int32_t case_values[] = {
263 264
      INT8_ELEMENTS,   UINT8_ELEMENTS, INT16_ELEMENTS,
      UINT16_ELEMENTS, INT32_ELEMENTS, UINT32_ELEMENTS,
265
  };
266
  Label* case_labels[] = {&u8, &u8, &u16, &u16, &u32, &u32};
267
  Switch(elements_kind, &other, case_values, case_labels,
268 269
         arraysize(case_labels));

270
  BIND(&u8);
271 272 273 274
  AtomicStore(MachineRepresentation::kWord8, backing_store, index_word,
              value_word32);
  Return(value_integer);

275
  BIND(&u16);
276 277 278 279
  AtomicStore(MachineRepresentation::kWord16, backing_store,
              WordShl(index_word, 1), value_word32);
  Return(value_integer);

280
  BIND(&u32);
281 282 283 284
  AtomicStore(MachineRepresentation::kWord32, backing_store,
              WordShl(index_word, 2), value_word32);
  Return(value_integer);

285
  BIND(&u64);
286
#if V8_TARGET_ARCH_MIPS && !_MIPS_ARCH_MIPS32R6
287 288 289
  Return(CallRuntime(Runtime::kAtomicsStore64, context, array, index_integer,
                     value));
#else
290 291 292 293 294 295 296 297 298 299 300
  TNode<BigInt> value_bigint = ToBigInt(CAST(context), CAST(value));
#if DEBUG
  DebugSanityCheckAtomicIndex(array, index_word32, context);
#endif
  TVARIABLE(UintPtrT, var_low);
  TVARIABLE(UintPtrT, var_high);
  BigIntToRawBytes(value_bigint, &var_low, &var_high);
  Node* high = Is64() ? nullptr : static_cast<Node*>(var_high.value());
  AtomicStore(MachineRepresentation::kWord64, backing_store,
              WordShl(index_word, 3), var_low.value(), high);
  Return(value_bigint);
301
#endif
302

303
  // This shouldn't happen, we've already validated the type.
304
  BIND(&other);
305 306 307 308
  Unreachable();
}

TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) {
309 310 311 312
  Node* array = Parameter(Descriptor::kArray);
  Node* index = Parameter(Descriptor::kIndex);
  Node* value = Parameter(Descriptor::kValue);
  Node* context = Parameter(Descriptor::kContext);
313

314
  TNode<Int32T> elements_kind;
315
  Node* backing_store;
316
  ValidateSharedTypedArray(array, context, &elements_kind, &backing_store);
317 318 319 320

  Node* index_integer;
  Node* index_word32 =
      ConvertTaggedAtomicIndexToWord32(index, context, &index_integer);
321
  ValidateAtomicIndex(array, index_word32, context);
322

323
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
324
  Return(CallRuntime(Runtime::kAtomicsExchange, context, array, index_integer,
325
                     value));
326
#else
327
  TNode<UintPtrT> index_word = ChangeUint32ToWord(index_word32);
328

329 330
  Label i8(this), u8(this), i16(this), u16(this), i32(this), u32(this),
      i64(this), u64(this), big(this), other(this);
331 332 333
  STATIC_ASSERT(BIGINT64_ELEMENTS > INT32_ELEMENTS);
  STATIC_ASSERT(BIGUINT64_ELEMENTS > INT32_ELEMENTS);
  GotoIf(Int32GreaterThan(elements_kind, Int32Constant(INT32_ELEMENTS)), &big);
334

335
  TNode<Number> value_integer = ToInteger_Inline(CAST(context), CAST(value));
336 337 338
#if DEBUG
  DebugSanityCheckAtomicIndex(array, index_word32, context);
#endif
339
  TNode<Word32T> value_word32 = TruncateTaggedToWord32(context, value_integer);
340 341

  int32_t case_values[] = {
342 343
      INT8_ELEMENTS,   UINT8_ELEMENTS, INT16_ELEMENTS,
      UINT16_ELEMENTS, INT32_ELEMENTS, UINT32_ELEMENTS,
344 345 346 347
  };
  Label* case_labels[] = {
      &i8, &u8, &i16, &u16, &i32, &u32,
  };
348
  Switch(elements_kind, &other, case_values, case_labels,
349 350
         arraysize(case_labels));

351
  BIND(&i8);
352 353
  Return(SmiFromInt32(AtomicExchange(MachineType::Int8(), backing_store,
                                     index_word, value_word32)));
354

355
  BIND(&u8);
356 357
  Return(SmiFromInt32(AtomicExchange(MachineType::Uint8(), backing_store,
                                     index_word, value_word32)));
358

359
  BIND(&i16);
360 361
  Return(SmiFromInt32(AtomicExchange(MachineType::Int16(), backing_store,
                                     WordShl(index_word, 1), value_word32)));
362

363
  BIND(&u16);
364 365
  Return(SmiFromInt32(AtomicExchange(MachineType::Uint16(), backing_store,
                                     WordShl(index_word, 1), value_word32)));
366

367
  BIND(&i32);
368 369 370 371
  Return(ChangeInt32ToTagged(AtomicExchange(MachineType::Int32(), backing_store,
                                            WordShl(index_word, 2),
                                            value_word32)));

372
  BIND(&u32);
373 374 375 376
  Return(ChangeUint32ToTagged(
      AtomicExchange(MachineType::Uint32(), backing_store,
                     WordShl(index_word, 2), value_word32)));

377 378 379 380 381 382 383 384 385
  BIND(&big);
  TNode<BigInt> value_bigint = ToBigInt(CAST(context), CAST(value));
#if DEBUG
  DebugSanityCheckAtomicIndex(array, index_word32, context);
#endif
  TVARIABLE(UintPtrT, var_low);
  TVARIABLE(UintPtrT, var_high);
  BigIntToRawBytes(value_bigint, &var_low, &var_high);
  Node* high = Is64() ? nullptr : static_cast<Node*>(var_high.value());
386 387
  GotoIf(Word32Equal(elements_kind, Int32Constant(BIGINT64_ELEMENTS)), &i64);
  GotoIf(Word32Equal(elements_kind, Int32Constant(BIGUINT64_ELEMENTS)), &u64);
388 389 390 391 392 393 394 395 396 397 398 399 400 401 402
  Unreachable();

  BIND(&i64);
  // This uses Uint64() intentionally: AtomicExchange is not implemented for
  // Int64(), which is fine because the machine instruction only cares
  // about words.
  Return(BigIntFromSigned64(AtomicExchange(MachineType::Uint64(), backing_store,
                                           WordShl(index_word, 3),
                                           var_low.value(), high)));

  BIND(&u64);
  Return(BigIntFromUnsigned64(
      AtomicExchange(MachineType::Uint64(), backing_store,
                     WordShl(index_word, 3), var_low.value(), high)));

403
  // This shouldn't happen, we've already validated the type.
404
  BIND(&other);
405
  Unreachable();
406
#endif  // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
407 408 409
}

TF_BUILTIN(AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) {
410 411 412 413 414
  Node* array = Parameter(Descriptor::kArray);
  Node* index = Parameter(Descriptor::kIndex);
  Node* old_value = Parameter(Descriptor::kOldValue);
  Node* new_value = Parameter(Descriptor::kNewValue);
  Node* context = Parameter(Descriptor::kContext);
415

416
  TNode<Int32T> elements_kind;
417
  Node* backing_store;
418
  ValidateSharedTypedArray(array, context, &elements_kind, &backing_store);
419 420 421 422

  Node* index_integer;
  Node* index_word32 =
      ConvertTaggedAtomicIndexToWord32(index, context, &index_integer);
423
  ValidateAtomicIndex(array, index_word32, context);
424

425 426 427
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 || \
    V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X
  Return(CallRuntime(Runtime::kAtomicsCompareExchange, context, array,
428
                     index_integer, old_value, new_value));
429
#else
430
  TNode<UintPtrT> index_word = ChangeUint32ToWord(index_word32);
431

432 433
  Label i8(this), u8(this), i16(this), u16(this), i32(this), u32(this),
      i64(this), u64(this), big(this), other(this);
434 435 436
  STATIC_ASSERT(BIGINT64_ELEMENTS > INT32_ELEMENTS);
  STATIC_ASSERT(BIGUINT64_ELEMENTS > INT32_ELEMENTS);
  GotoIf(Int32GreaterThan(elements_kind, Int32Constant(INT32_ELEMENTS)), &big);
437

438 439 440 441
  TNode<Number> old_value_integer =
      ToInteger_Inline(CAST(context), CAST(old_value));
  TNode<Number> new_value_integer =
      ToInteger_Inline(CAST(context), CAST(new_value));
442 443 444
#if DEBUG
  DebugSanityCheckAtomicIndex(array, index_word32, context);
#endif
445 446 447 448
  TNode<Word32T> old_value_word32 =
      TruncateTaggedToWord32(context, old_value_integer);
  TNode<Word32T> new_value_word32 =
      TruncateTaggedToWord32(context, new_value_integer);
449 450

  int32_t case_values[] = {
451 452
      INT8_ELEMENTS,   UINT8_ELEMENTS, INT16_ELEMENTS,
      UINT16_ELEMENTS, INT32_ELEMENTS, UINT32_ELEMENTS,
453 454 455 456
  };
  Label* case_labels[] = {
      &i8, &u8, &i16, &u16, &i32, &u32,
  };
457
  Switch(elements_kind, &other, case_values, case_labels,
458 459
         arraysize(case_labels));

460
  BIND(&i8);
461 462 463
  Return(SmiFromInt32(AtomicCompareExchange(MachineType::Int8(), backing_store,
                                            index_word, old_value_word32,
                                            new_value_word32)));
464

465
  BIND(&u8);
466 467 468
  Return(SmiFromInt32(AtomicCompareExchange(MachineType::Uint8(), backing_store,
                                            index_word, old_value_word32,
                                            new_value_word32)));
469

470
  BIND(&i16);
471
  Return(SmiFromInt32(AtomicCompareExchange(
472 473 474
      MachineType::Int16(), backing_store, WordShl(index_word, 1),
      old_value_word32, new_value_word32)));

475
  BIND(&u16);
476
  Return(SmiFromInt32(AtomicCompareExchange(
477 478 479
      MachineType::Uint16(), backing_store, WordShl(index_word, 1),
      old_value_word32, new_value_word32)));

480
  BIND(&i32);
481 482 483 484
  Return(ChangeInt32ToTagged(AtomicCompareExchange(
      MachineType::Int32(), backing_store, WordShl(index_word, 2),
      old_value_word32, new_value_word32)));

485
  BIND(&u32);
486 487 488 489
  Return(ChangeUint32ToTagged(AtomicCompareExchange(
      MachineType::Uint32(), backing_store, WordShl(index_word, 2),
      old_value_word32, new_value_word32)));

490 491 492 493 494 495 496 497 498 499 500 501 502 503
  BIND(&big);
  TNode<BigInt> old_value_bigint = ToBigInt(CAST(context), CAST(old_value));
  TNode<BigInt> new_value_bigint = ToBigInt(CAST(context), CAST(new_value));
#if DEBUG
  DebugSanityCheckAtomicIndex(array, index_word32, context);
#endif
  TVARIABLE(UintPtrT, var_old_low);
  TVARIABLE(UintPtrT, var_old_high);
  TVARIABLE(UintPtrT, var_new_low);
  TVARIABLE(UintPtrT, var_new_high);
  BigIntToRawBytes(old_value_bigint, &var_old_low, &var_old_high);
  BigIntToRawBytes(new_value_bigint, &var_new_low, &var_new_high);
  Node* old_high = Is64() ? nullptr : static_cast<Node*>(var_old_high.value());
  Node* new_high = Is64() ? nullptr : static_cast<Node*>(var_new_high.value());
504 505
  GotoIf(Word32Equal(elements_kind, Int32Constant(BIGINT64_ELEMENTS)), &i64);
  GotoIf(Word32Equal(elements_kind, Int32Constant(BIGUINT64_ELEMENTS)), &u64);
506 507 508 509 510 511 512 513 514 515 516 517 518 519 520
  Unreachable();

  BIND(&i64);
  // This uses Uint64() intentionally: AtomicCompareExchange is not implemented
  // for Int64(), which is fine because the machine instruction only cares
  // about words.
  Return(BigIntFromSigned64(AtomicCompareExchange(
      MachineType::Uint64(), backing_store, WordShl(index_word, 3),
      var_old_low.value(), var_new_low.value(), old_high, new_high)));

  BIND(&u64);
  Return(BigIntFromUnsigned64(AtomicCompareExchange(
      MachineType::Uint64(), backing_store, WordShl(index_word, 3),
      var_old_low.value(), var_new_low.value(), old_high, new_high)));

521
  // This shouldn't happen, we've already validated the type.
522
  BIND(&other);
523 524 525 526 527
  Unreachable();
#endif  // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64
        // || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X
}

528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547
#define BINOP_BUILTIN(op)                                       \
  TF_BUILTIN(Atomics##op, SharedArrayBufferBuiltinsAssembler) { \
    Node* array = Parameter(Descriptor::kArray);                \
    Node* index = Parameter(Descriptor::kIndex);                \
    Node* value = Parameter(Descriptor::kValue);                \
    Node* context = Parameter(Descriptor::kContext);            \
    AtomicBinopBuiltinCommon(array, index, value, context,      \
                             &CodeAssembler::Atomic##op,        \
                             Runtime::kAtomics##op);            \
  }
BINOP_BUILTIN(Add)
BINOP_BUILTIN(Sub)
BINOP_BUILTIN(And)
BINOP_BUILTIN(Or)
BINOP_BUILTIN(Xor)
#undef BINOP_BUILTIN

void SharedArrayBufferBuiltinsAssembler::AtomicBinopBuiltinCommon(
    Node* array, Node* index, Node* value, Node* context,
    AssemblerFunction function, Runtime::FunctionId runtime_function) {
548
  TNode<Int32T> elements_kind;
549
  Node* backing_store;
550
  ValidateSharedTypedArray(array, context, &elements_kind, &backing_store);
551 552 553 554

  Node* index_integer;
  Node* index_word32 =
      ConvertTaggedAtomicIndexToWord32(index, context, &index_integer);
555
  ValidateAtomicIndex(array, index_word32, context);
556 557 558

#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 || \
    V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X
559
  Return(CallRuntime(runtime_function, context, array, index_integer, value));
560
#else
561
  TNode<UintPtrT> index_word = ChangeUint32ToWord(index_word32);
562

563 564 565
  Label i8(this), u8(this), i16(this), u16(this), i32(this), u32(this),
      i64(this), u64(this), big(this), other(this);

566 567 568
  STATIC_ASSERT(BIGINT64_ELEMENTS > INT32_ELEMENTS);
  STATIC_ASSERT(BIGUINT64_ELEMENTS > INT32_ELEMENTS);
  GotoIf(Int32GreaterThan(elements_kind, Int32Constant(INT32_ELEMENTS)), &big);
569

570
  TNode<Number> value_integer = ToInteger_Inline(CAST(context), CAST(value));
571 572 573
#if DEBUG
  DebugSanityCheckAtomicIndex(array, index_word32, context);
#endif
574
  TNode<Word32T> value_word32 = TruncateTaggedToWord32(context, value_integer);
575 576

  int32_t case_values[] = {
577 578
      INT8_ELEMENTS,   UINT8_ELEMENTS, INT16_ELEMENTS,
      UINT16_ELEMENTS, INT32_ELEMENTS, UINT32_ELEMENTS,
579 580 581 582
  };
  Label* case_labels[] = {
      &i8, &u8, &i16, &u16, &i32, &u32,
  };
583
  Switch(elements_kind, &other, case_values, case_labels,
584 585
         arraysize(case_labels));

586
  BIND(&i8);
587
  Return(SmiFromInt32((this->*function)(MachineType::Int8(), backing_store,
588
                                        index_word, value_word32, nullptr)));
589

590
  BIND(&u8);
591
  Return(SmiFromInt32((this->*function)(MachineType::Uint8(), backing_store,
592
                                        index_word, value_word32, nullptr)));
593

594
  BIND(&i16);
595
  Return(SmiFromInt32((this->*function)(MachineType::Int16(), backing_store,
596 597
                                        WordShl(index_word, 1), value_word32,
                                        nullptr)));
598

599
  BIND(&u16);
600
  Return(SmiFromInt32((this->*function)(MachineType::Uint16(), backing_store,
601 602
                                        WordShl(index_word, 1), value_word32,
                                        nullptr)));
603

604
  BIND(&i32);
605 606
  Return(ChangeInt32ToTagged(
      (this->*function)(MachineType::Int32(), backing_store,
607
                        WordShl(index_word, 2), value_word32, nullptr)));
608

609
  BIND(&u32);
610 611
  Return(ChangeUint32ToTagged(
      (this->*function)(MachineType::Uint32(), backing_store,
612 613 614 615 616 617 618 619 620 621 622
                        WordShl(index_word, 2), value_word32, nullptr)));

  BIND(&big);
  TNode<BigInt> value_bigint = ToBigInt(CAST(context), CAST(value));
#if DEBUG
  DebugSanityCheckAtomicIndex(array, index_word32, context);
#endif
  TVARIABLE(UintPtrT, var_low);
  TVARIABLE(UintPtrT, var_high);
  BigIntToRawBytes(value_bigint, &var_low, &var_high);
  Node* high = Is64() ? nullptr : static_cast<Node*>(var_high.value());
623 624
  GotoIf(Word32Equal(elements_kind, Int32Constant(BIGINT64_ELEMENTS)), &i64);
  GotoIf(Word32Equal(elements_kind, Int32Constant(BIGUINT64_ELEMENTS)), &u64);
625 626 627 628 629 630 631 632 633 634 635 636 637 638
  Unreachable();

  BIND(&i64);
  // This uses Uint64() intentionally: Atomic* ops are not implemented for
  // Int64(), which is fine because the machine instructions only care
  // about words.
  Return(BigIntFromSigned64(
      (this->*function)(MachineType::Uint64(), backing_store,
                        WordShl(index_word, 3), var_low.value(), high)));

  BIND(&u64);
  Return(BigIntFromUnsigned64(
      (this->*function)(MachineType::Uint64(), backing_store,
                        WordShl(index_word, 3), var_low.value(), high)));
639 640

  // This shouldn't happen, we've already validated the type.
641
  BIND(&other);
642 643 644 645 646
  Unreachable();
#endif  // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64
        // || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X
}

647 648
}  // namespace internal
}  // namespace v8