Commit b0329614 authored by Ross McIlroy's avatar Ross McIlroy Committed by Commit Bot

[csa][cleanup] TNodify AtomicLoad and thereby all uses of Projection.

Introduces an AtomicUint64 type and a seperate AtomicLoad64 due to the
different types returned by loading 64-bit atomic values on 32-bit vs
64-bit architectures.

BUG=v8:6949,v8:11074

Change-Id: I95de994df9639847cd6b5fd56ea2a6585189ed3a
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2529455
Commit-Queue: Ross McIlroy <rmcilroy@chromium.org>
Reviewed-by: 's avatarSantiago Aboy Solanes <solanes@chromium.org>
Auto-Submit: Ross McIlroy <rmcilroy@chromium.org>
Cr-Commit-Position: refs/heads/master@{#71110}
parent 3658a431
......@@ -43,8 +43,8 @@ class SharedArrayBufferBuiltinsAssembler : public CodeStubAssembler {
// Create a BigInt from the result of a 64-bit atomic operation, using
// projections on 32-bit platforms.
TNode<BigInt> BigIntFromSigned64(Node* signed64);
TNode<BigInt> BigIntFromUnsigned64(Node* unsigned64);
TNode<BigInt> BigIntFromSigned64(SloppyTNode<AtomicInt64> signed64);
TNode<BigInt> BigIntFromUnsigned64(SloppyTNode<AtomicUint64> unsigned64);
};
// https://tc39.es/ecma262/#sec-validateintegertypedarray
......@@ -142,25 +142,25 @@ void SharedArrayBufferBuiltinsAssembler::DebugCheckAtomicIndex(
}
TNode<BigInt> SharedArrayBufferBuiltinsAssembler::BigIntFromSigned64(
Node* signed64) {
if (Is64()) {
return BigIntFromInt64(UncheckedCast<IntPtrT>(signed64));
} else {
TNode<IntPtrT> low = UncheckedCast<IntPtrT>(Projection(0, signed64));
TNode<IntPtrT> high = UncheckedCast<IntPtrT>(Projection(1, signed64));
SloppyTNode<AtomicInt64> signed64) {
#if defined(V8_HOST_ARCH_32_BIT)
TNode<IntPtrT> low = Projection<0>(signed64);
TNode<IntPtrT> high = Projection<1>(signed64);
return BigIntFromInt32Pair(low, high);
}
#else
return BigIntFromInt64(signed64);
#endif
}
TNode<BigInt> SharedArrayBufferBuiltinsAssembler::BigIntFromUnsigned64(
Node* unsigned64) {
if (Is64()) {
return BigIntFromUint64(UncheckedCast<UintPtrT>(unsigned64));
} else {
TNode<UintPtrT> low = UncheckedCast<UintPtrT>(Projection(0, unsigned64));
TNode<UintPtrT> high = UncheckedCast<UintPtrT>(Projection(1, unsigned64));
SloppyTNode<AtomicUint64> unsigned64) {
#if defined(V8_HOST_ARCH_32_BIT)
TNode<UintPtrT> low = Projection<0>(unsigned64);
TNode<UintPtrT> high = Projection<1>(unsigned64);
return BigIntFromUint32Pair(low, high);
}
#else
return BigIntFromUint64(unsigned64);
#endif
}
// https://tc39.es/ecma262/#sec-atomicload
......@@ -201,28 +201,26 @@ TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) {
arraysize(case_labels));
BIND(&i8);
Return(
SmiFromInt32(AtomicLoad(MachineType::Int8(), backing_store, index_word)));
Return(SmiFromInt32(AtomicLoad<Int8T>(backing_store, index_word)));
BIND(&u8);
Return(SmiFromInt32(
AtomicLoad(MachineType::Uint8(), backing_store, index_word)));
Return(SmiFromInt32(AtomicLoad<Uint8T>(backing_store, index_word)));
BIND(&i16);
Return(SmiFromInt32(
AtomicLoad(MachineType::Int16(), backing_store, WordShl(index_word, 1))));
Return(
SmiFromInt32(AtomicLoad<Int16T>(backing_store, WordShl(index_word, 1))));
BIND(&u16);
Return(SmiFromInt32(AtomicLoad(MachineType::Uint16(), backing_store,
WordShl(index_word, 1))));
Return(
SmiFromInt32(AtomicLoad<Uint16T>(backing_store, WordShl(index_word, 1))));
BIND(&i32);
Return(ChangeInt32ToTagged(
AtomicLoad(MachineType::Int32(), backing_store, WordShl(index_word, 2))));
AtomicLoad<Int32T>(backing_store, WordShl(index_word, 2))));
BIND(&u32);
Return(ChangeUint32ToTagged(AtomicLoad(MachineType::Uint32(), backing_store,
WordShl(index_word, 2))));
Return(ChangeUint32ToTagged(
AtomicLoad<Uint32T>(backing_store, WordShl(index_word, 2))));
#if V8_TARGET_ARCH_MIPS && !_MIPS_ARCH_MIPS32R6
BIND(&i64);
Goto(&u64);
......@@ -234,15 +232,12 @@ TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) {
}
#else
BIND(&i64);
// This uses Uint64() intentionally: AtomicLoad is not implemented for
// Int64(), which is fine because the machine instruction only cares
// about words.
Return(BigIntFromSigned64(AtomicLoad(MachineType::Uint64(), backing_store,
WordShl(index_word, 3))));
Return(BigIntFromSigned64(
AtomicLoad64<AtomicInt64>(backing_store, WordShl(index_word, 3))));
BIND(&u64);
Return(BigIntFromUnsigned64(AtomicLoad(MachineType::Uint64(), backing_store,
WordShl(index_word, 3))));
Return(BigIntFromUnsigned64(
AtomicLoad64<AtomicUint64>(backing_store, WordShl(index_word, 3))));
#endif
// This shouldn't happen, we've already validated the type.
......
......@@ -667,10 +667,22 @@ TNode<Object> CodeAssembler::LoadFullTagged(Node* base, Node* offset,
Load(MachineType::Pointer(), base, offset, needs_poisoning));
}
Node* CodeAssembler::AtomicLoad(MachineType type, Node* base, Node* offset) {
Node* CodeAssembler::AtomicLoad(MachineType type, TNode<RawPtrT> base,
TNode<WordT> offset) {
return raw_assembler()->AtomicLoad(type, base, offset);
}
template <class Type>
TNode<Type> CodeAssembler::AtomicLoad64(TNode<RawPtrT> base,
TNode<WordT> offset) {
return UncheckedCast<Type>(raw_assembler()->AtomicLoad64(base, offset));
}
template TNode<AtomicInt64> CodeAssembler::AtomicLoad64<AtomicInt64>(
TNode<RawPtrT> base, TNode<WordT> offset);
template TNode<AtomicUint64> CodeAssembler::AtomicLoad64<AtomicUint64>(
TNode<RawPtrT> base, TNode<WordT> offset);
Node* CodeAssembler::LoadFromObject(MachineType type, TNode<HeapObject> object,
TNode<IntPtrT> offset) {
return raw_assembler()->LoadFromObject(type, object, offset);
......
......@@ -178,6 +178,20 @@ HEAP_OBJECT_TEMPLATE_TYPE_LIST(OBJECT_TYPE_TEMPLATE_CASE)
#undef OBJECT_TYPE_STRUCT_CASE
#undef OBJECT_TYPE_TEMPLATE_CASE
#if defined(V8_HOST_ARCH_32_BIT)
#define BINT_IS_SMI
using BInt = Smi;
using AtomicInt64 = PairT<IntPtrT, IntPtrT>;
using AtomicUint64 = PairT<UintPtrT, UintPtrT>;
#elif defined(V8_HOST_ARCH_64_BIT)
#define BINT_IS_INTPTR
using BInt = IntPtrT;
using AtomicInt64 = IntPtrT;
using AtomicUint64 = UintPtrT;
#else
#error Unknown architecture.
#endif
// {raw_value} must be a tagged Object.
// {raw_type} must be a tagged Smi.
// {raw_location} must be a tagged String.
......@@ -731,7 +745,13 @@ class V8_EXPORT_PRIVATE CodeAssembler {
return UncheckedCast<Type>(
Load(MachineTypeOf<Type>::value, base, offset, needs_poisoning));
}
Node* AtomicLoad(MachineType type, Node* base, Node* offset);
template <class Type>
TNode<Type> AtomicLoad(TNode<RawPtrT> base, TNode<WordT> offset) {
return UncheckedCast<Type>(
AtomicLoad(MachineTypeOf<Type>::value, base, offset));
}
template <class Type>
TNode<Type> AtomicLoad64(TNode<RawPtrT> base, TNode<WordT> offset);
// Load uncompressed tagged value from (most likely off JS heap) memory
// location.
TNode<Object> LoadFullTagged(
......@@ -992,8 +1012,6 @@ class V8_EXPORT_PRIVATE CodeAssembler {
TNode<Int32T> TruncateFloat32ToInt32(SloppyTNode<Float32T> value);
// Projections
Node* Projection(int index, Node* value);
template <int index, class T1, class T2>
TNode<typename std::tuple_element<index, std::tuple<T1, T2>>::type>
Projection(TNode<PairT<T1, T2>> value) {
......@@ -1232,11 +1250,15 @@ class V8_EXPORT_PRIVATE CodeAssembler {
const CallInterfaceDescriptor& descriptor, int input_count,
Node* const* inputs);
Node* AtomicLoad(MachineType type, TNode<RawPtrT> base, TNode<WordT> offset);
// These two don't have definitions and are here only for catching use cases
// where the cast is not necessary.
TNode<Int32T> Signed(TNode<Int32T> x);
TNode<Uint32T> Unsigned(TNode<Uint32T> x);
Node* Projection(int index, Node* value);
RawMachineAssembler* raw_assembler() const;
JSGraph* jsgraph() const;
......@@ -1533,17 +1555,6 @@ class V8_EXPORT_PRIVATE ScopedExceptionHandler {
};
} // namespace compiler
#if defined(V8_HOST_ARCH_32_BIT)
#define BINT_IS_SMI
using BInt = Smi;
#elif defined(V8_HOST_ARCH_64_BIT)
#define BINT_IS_INTPTR
using BInt = IntPtrT;
#else
#error Unknown architecture.
#endif
} // namespace internal
} // namespace v8
......
......@@ -221,15 +221,21 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
// Atomic memory operations.
Node* AtomicLoad(MachineType type, Node* base, Node* index) {
if (type.representation() == MachineRepresentation::kWord64) {
DCHECK_NE(type.representation(), MachineRepresentation::kWord64);
return AddNode(machine()->Word32AtomicLoad(type), base, index);
}
Node* AtomicLoad64(Node* base, Node* index) {
if (machine()->Is64()) {
return AddNode(machine()->Word64AtomicLoad(type), base, index);
// This uses Uint64() intentionally: AtomicLoad is not implemented for
// Int64(), which is fine because the machine instruction only cares
// about words.
return AddNode(machine()->Word64AtomicLoad(MachineType::Uint64()), base,
index);
} else {
return AddNode(machine()->Word32AtomicPairLoad(), base, index);
}
}
return AddNode(machine()->Word32AtomicLoad(type), base, index);
}
#if defined(V8_TARGET_BIG_ENDIAN)
#define VALUE_HALVES value_high, value
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment