Commit b0329614 authored by Ross McIlroy's avatar Ross McIlroy Committed by Commit Bot

[csa][cleanup] TNodify AtomicLoad and thereby all uses of Projection.

Introduces an AtomicUint64 type and a seperate AtomicLoad64 due to the
different types returned by loading 64-bit atomic values on 32-bit vs
64-bit architectures.

BUG=v8:6949,v8:11074

Change-Id: I95de994df9639847cd6b5fd56ea2a6585189ed3a
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2529455
Commit-Queue: Ross McIlroy <rmcilroy@chromium.org>
Reviewed-by: 's avatarSantiago Aboy Solanes <solanes@chromium.org>
Auto-Submit: Ross McIlroy <rmcilroy@chromium.org>
Cr-Commit-Position: refs/heads/master@{#71110}
parent 3658a431
...@@ -43,8 +43,8 @@ class SharedArrayBufferBuiltinsAssembler : public CodeStubAssembler { ...@@ -43,8 +43,8 @@ class SharedArrayBufferBuiltinsAssembler : public CodeStubAssembler {
// Create a BigInt from the result of a 64-bit atomic operation, using // Create a BigInt from the result of a 64-bit atomic operation, using
// projections on 32-bit platforms. // projections on 32-bit platforms.
TNode<BigInt> BigIntFromSigned64(Node* signed64); TNode<BigInt> BigIntFromSigned64(SloppyTNode<AtomicInt64> signed64);
TNode<BigInt> BigIntFromUnsigned64(Node* unsigned64); TNode<BigInt> BigIntFromUnsigned64(SloppyTNode<AtomicUint64> unsigned64);
}; };
// https://tc39.es/ecma262/#sec-validateintegertypedarray // https://tc39.es/ecma262/#sec-validateintegertypedarray
...@@ -142,25 +142,25 @@ void SharedArrayBufferBuiltinsAssembler::DebugCheckAtomicIndex( ...@@ -142,25 +142,25 @@ void SharedArrayBufferBuiltinsAssembler::DebugCheckAtomicIndex(
} }
TNode<BigInt> SharedArrayBufferBuiltinsAssembler::BigIntFromSigned64( TNode<BigInt> SharedArrayBufferBuiltinsAssembler::BigIntFromSigned64(
Node* signed64) { SloppyTNode<AtomicInt64> signed64) {
if (Is64()) { #if defined(V8_HOST_ARCH_32_BIT)
return BigIntFromInt64(UncheckedCast<IntPtrT>(signed64)); TNode<IntPtrT> low = Projection<0>(signed64);
} else { TNode<IntPtrT> high = Projection<1>(signed64);
TNode<IntPtrT> low = UncheckedCast<IntPtrT>(Projection(0, signed64));
TNode<IntPtrT> high = UncheckedCast<IntPtrT>(Projection(1, signed64));
return BigIntFromInt32Pair(low, high); return BigIntFromInt32Pair(low, high);
} #else
return BigIntFromInt64(signed64);
#endif
} }
TNode<BigInt> SharedArrayBufferBuiltinsAssembler::BigIntFromUnsigned64( TNode<BigInt> SharedArrayBufferBuiltinsAssembler::BigIntFromUnsigned64(
Node* unsigned64) { SloppyTNode<AtomicUint64> unsigned64) {
if (Is64()) { #if defined(V8_HOST_ARCH_32_BIT)
return BigIntFromUint64(UncheckedCast<UintPtrT>(unsigned64)); TNode<UintPtrT> low = Projection<0>(unsigned64);
} else { TNode<UintPtrT> high = Projection<1>(unsigned64);
TNode<UintPtrT> low = UncheckedCast<UintPtrT>(Projection(0, unsigned64));
TNode<UintPtrT> high = UncheckedCast<UintPtrT>(Projection(1, unsigned64));
return BigIntFromUint32Pair(low, high); return BigIntFromUint32Pair(low, high);
} #else
return BigIntFromUint64(unsigned64);
#endif
} }
// https://tc39.es/ecma262/#sec-atomicload // https://tc39.es/ecma262/#sec-atomicload
...@@ -201,28 +201,26 @@ TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) { ...@@ -201,28 +201,26 @@ TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) {
arraysize(case_labels)); arraysize(case_labels));
BIND(&i8); BIND(&i8);
Return( Return(SmiFromInt32(AtomicLoad<Int8T>(backing_store, index_word)));
SmiFromInt32(AtomicLoad(MachineType::Int8(), backing_store, index_word)));
BIND(&u8); BIND(&u8);
Return(SmiFromInt32( Return(SmiFromInt32(AtomicLoad<Uint8T>(backing_store, index_word)));
AtomicLoad(MachineType::Uint8(), backing_store, index_word)));
BIND(&i16); BIND(&i16);
Return(SmiFromInt32( Return(
AtomicLoad(MachineType::Int16(), backing_store, WordShl(index_word, 1)))); SmiFromInt32(AtomicLoad<Int16T>(backing_store, WordShl(index_word, 1))));
BIND(&u16); BIND(&u16);
Return(SmiFromInt32(AtomicLoad(MachineType::Uint16(), backing_store, Return(
WordShl(index_word, 1)))); SmiFromInt32(AtomicLoad<Uint16T>(backing_store, WordShl(index_word, 1))));
BIND(&i32); BIND(&i32);
Return(ChangeInt32ToTagged( Return(ChangeInt32ToTagged(
AtomicLoad(MachineType::Int32(), backing_store, WordShl(index_word, 2)))); AtomicLoad<Int32T>(backing_store, WordShl(index_word, 2))));
BIND(&u32); BIND(&u32);
Return(ChangeUint32ToTagged(AtomicLoad(MachineType::Uint32(), backing_store, Return(ChangeUint32ToTagged(
WordShl(index_word, 2)))); AtomicLoad<Uint32T>(backing_store, WordShl(index_word, 2))));
#if V8_TARGET_ARCH_MIPS && !_MIPS_ARCH_MIPS32R6 #if V8_TARGET_ARCH_MIPS && !_MIPS_ARCH_MIPS32R6
BIND(&i64); BIND(&i64);
Goto(&u64); Goto(&u64);
...@@ -234,15 +232,12 @@ TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) { ...@@ -234,15 +232,12 @@ TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) {
} }
#else #else
BIND(&i64); BIND(&i64);
// This uses Uint64() intentionally: AtomicLoad is not implemented for Return(BigIntFromSigned64(
// Int64(), which is fine because the machine instruction only cares AtomicLoad64<AtomicInt64>(backing_store, WordShl(index_word, 3))));
// about words.
Return(BigIntFromSigned64(AtomicLoad(MachineType::Uint64(), backing_store,
WordShl(index_word, 3))));
BIND(&u64); BIND(&u64);
Return(BigIntFromUnsigned64(AtomicLoad(MachineType::Uint64(), backing_store, Return(BigIntFromUnsigned64(
WordShl(index_word, 3)))); AtomicLoad64<AtomicUint64>(backing_store, WordShl(index_word, 3))));
#endif #endif
// This shouldn't happen, we've already validated the type. // This shouldn't happen, we've already validated the type.
......
...@@ -667,10 +667,22 @@ TNode<Object> CodeAssembler::LoadFullTagged(Node* base, Node* offset, ...@@ -667,10 +667,22 @@ TNode<Object> CodeAssembler::LoadFullTagged(Node* base, Node* offset,
Load(MachineType::Pointer(), base, offset, needs_poisoning)); Load(MachineType::Pointer(), base, offset, needs_poisoning));
} }
Node* CodeAssembler::AtomicLoad(MachineType type, Node* base, Node* offset) { Node* CodeAssembler::AtomicLoad(MachineType type, TNode<RawPtrT> base,
TNode<WordT> offset) {
return raw_assembler()->AtomicLoad(type, base, offset); return raw_assembler()->AtomicLoad(type, base, offset);
} }
template <class Type>
TNode<Type> CodeAssembler::AtomicLoad64(TNode<RawPtrT> base,
TNode<WordT> offset) {
return UncheckedCast<Type>(raw_assembler()->AtomicLoad64(base, offset));
}
template TNode<AtomicInt64> CodeAssembler::AtomicLoad64<AtomicInt64>(
TNode<RawPtrT> base, TNode<WordT> offset);
template TNode<AtomicUint64> CodeAssembler::AtomicLoad64<AtomicUint64>(
TNode<RawPtrT> base, TNode<WordT> offset);
Node* CodeAssembler::LoadFromObject(MachineType type, TNode<HeapObject> object, Node* CodeAssembler::LoadFromObject(MachineType type, TNode<HeapObject> object,
TNode<IntPtrT> offset) { TNode<IntPtrT> offset) {
return raw_assembler()->LoadFromObject(type, object, offset); return raw_assembler()->LoadFromObject(type, object, offset);
......
...@@ -178,6 +178,20 @@ HEAP_OBJECT_TEMPLATE_TYPE_LIST(OBJECT_TYPE_TEMPLATE_CASE) ...@@ -178,6 +178,20 @@ HEAP_OBJECT_TEMPLATE_TYPE_LIST(OBJECT_TYPE_TEMPLATE_CASE)
#undef OBJECT_TYPE_STRUCT_CASE #undef OBJECT_TYPE_STRUCT_CASE
#undef OBJECT_TYPE_TEMPLATE_CASE #undef OBJECT_TYPE_TEMPLATE_CASE
#if defined(V8_HOST_ARCH_32_BIT)
#define BINT_IS_SMI
using BInt = Smi;
using AtomicInt64 = PairT<IntPtrT, IntPtrT>;
using AtomicUint64 = PairT<UintPtrT, UintPtrT>;
#elif defined(V8_HOST_ARCH_64_BIT)
#define BINT_IS_INTPTR
using BInt = IntPtrT;
using AtomicInt64 = IntPtrT;
using AtomicUint64 = UintPtrT;
#else
#error Unknown architecture.
#endif
// {raw_value} must be a tagged Object. // {raw_value} must be a tagged Object.
// {raw_type} must be a tagged Smi. // {raw_type} must be a tagged Smi.
// {raw_location} must be a tagged String. // {raw_location} must be a tagged String.
...@@ -731,7 +745,13 @@ class V8_EXPORT_PRIVATE CodeAssembler { ...@@ -731,7 +745,13 @@ class V8_EXPORT_PRIVATE CodeAssembler {
return UncheckedCast<Type>( return UncheckedCast<Type>(
Load(MachineTypeOf<Type>::value, base, offset, needs_poisoning)); Load(MachineTypeOf<Type>::value, base, offset, needs_poisoning));
} }
Node* AtomicLoad(MachineType type, Node* base, Node* offset); template <class Type>
TNode<Type> AtomicLoad(TNode<RawPtrT> base, TNode<WordT> offset) {
return UncheckedCast<Type>(
AtomicLoad(MachineTypeOf<Type>::value, base, offset));
}
template <class Type>
TNode<Type> AtomicLoad64(TNode<RawPtrT> base, TNode<WordT> offset);
// Load uncompressed tagged value from (most likely off JS heap) memory // Load uncompressed tagged value from (most likely off JS heap) memory
// location. // location.
TNode<Object> LoadFullTagged( TNode<Object> LoadFullTagged(
...@@ -992,8 +1012,6 @@ class V8_EXPORT_PRIVATE CodeAssembler { ...@@ -992,8 +1012,6 @@ class V8_EXPORT_PRIVATE CodeAssembler {
TNode<Int32T> TruncateFloat32ToInt32(SloppyTNode<Float32T> value); TNode<Int32T> TruncateFloat32ToInt32(SloppyTNode<Float32T> value);
// Projections // Projections
Node* Projection(int index, Node* value);
template <int index, class T1, class T2> template <int index, class T1, class T2>
TNode<typename std::tuple_element<index, std::tuple<T1, T2>>::type> TNode<typename std::tuple_element<index, std::tuple<T1, T2>>::type>
Projection(TNode<PairT<T1, T2>> value) { Projection(TNode<PairT<T1, T2>> value) {
...@@ -1232,11 +1250,15 @@ class V8_EXPORT_PRIVATE CodeAssembler { ...@@ -1232,11 +1250,15 @@ class V8_EXPORT_PRIVATE CodeAssembler {
const CallInterfaceDescriptor& descriptor, int input_count, const CallInterfaceDescriptor& descriptor, int input_count,
Node* const* inputs); Node* const* inputs);
Node* AtomicLoad(MachineType type, TNode<RawPtrT> base, TNode<WordT> offset);
// These two don't have definitions and are here only for catching use cases // These two don't have definitions and are here only for catching use cases
// where the cast is not necessary. // where the cast is not necessary.
TNode<Int32T> Signed(TNode<Int32T> x); TNode<Int32T> Signed(TNode<Int32T> x);
TNode<Uint32T> Unsigned(TNode<Uint32T> x); TNode<Uint32T> Unsigned(TNode<Uint32T> x);
Node* Projection(int index, Node* value);
RawMachineAssembler* raw_assembler() const; RawMachineAssembler* raw_assembler() const;
JSGraph* jsgraph() const; JSGraph* jsgraph() const;
...@@ -1533,17 +1555,6 @@ class V8_EXPORT_PRIVATE ScopedExceptionHandler { ...@@ -1533,17 +1555,6 @@ class V8_EXPORT_PRIVATE ScopedExceptionHandler {
}; };
} // namespace compiler } // namespace compiler
#if defined(V8_HOST_ARCH_32_BIT)
#define BINT_IS_SMI
using BInt = Smi;
#elif defined(V8_HOST_ARCH_64_BIT)
#define BINT_IS_INTPTR
using BInt = IntPtrT;
#else
#error Unknown architecture.
#endif
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
......
...@@ -221,15 +221,21 @@ class V8_EXPORT_PRIVATE RawMachineAssembler { ...@@ -221,15 +221,21 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
// Atomic memory operations. // Atomic memory operations.
Node* AtomicLoad(MachineType type, Node* base, Node* index) { Node* AtomicLoad(MachineType type, Node* base, Node* index) {
if (type.representation() == MachineRepresentation::kWord64) { DCHECK_NE(type.representation(), MachineRepresentation::kWord64);
return AddNode(machine()->Word32AtomicLoad(type), base, index);
}
Node* AtomicLoad64(Node* base, Node* index) {
if (machine()->Is64()) { if (machine()->Is64()) {
return AddNode(machine()->Word64AtomicLoad(type), base, index); // This uses Uint64() intentionally: AtomicLoad is not implemented for
// Int64(), which is fine because the machine instruction only cares
// about words.
return AddNode(machine()->Word64AtomicLoad(MachineType::Uint64()), base,
index);
} else { } else {
return AddNode(machine()->Word32AtomicPairLoad(), base, index); return AddNode(machine()->Word32AtomicPairLoad(), base, index);
} }
} }
return AddNode(machine()->Word32AtomicLoad(type), base, index);
}
#if defined(V8_TARGET_BIG_ENDIAN) #if defined(V8_TARGET_BIG_ENDIAN)
#define VALUE_HALVES value_high, value #define VALUE_HALVES value_high, value
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment