Commit 01b06e99 authored by Shu-yu Guo's avatar Shu-yu Guo Committed by V8 LUCI CQ

[compiler] Add TSAN seq cst store support for generated code

This CL does the following for x64:

- Add seq cst TSAN helpers.

- Refactors codegen's handling of TSAN helpers to also support
  seq cst accesses.

- Perform stores only once instead twice under TSAN, since
  duplicating stores is unsound. Previously this was "fine"
  because all duplicated stores were relaxed. SeqCst stores
  are used for synchronization, however, and duplicating them
  breaks the synchronization.

Bug: v8:7790, v8:11600, v8:11995
Cq-Include-Trybots: luci.v8.try:v8_linux64_tsan_rel_ng
Change-Id: I43071b0ed516cb0917a10f3b2b9861d74edca041
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3103308
Commit-Queue: Shu-yu Guo <syg@chromium.org>
Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Reviewed-by: 's avatarClemens Backes <clemensb@chromium.org>
Reviewed-by: 's avatarAdam Klein <adamk@chromium.org>
Reviewed-by: 's avatarNico Hartmann <nicohartmann@chromium.org>
Cr-Commit-Position: refs/heads/main@{#76612}
parent 3926d6cd
...@@ -191,11 +191,31 @@ inline void Release_Store(volatile Atomic8* ptr, Atomic8 value) { ...@@ -191,11 +191,31 @@ inline void Release_Store(volatile Atomic8* ptr, Atomic8 value) {
std::memory_order_release); std::memory_order_release);
} }
inline void Release_Store(volatile Atomic16* ptr, Atomic16 value) {
std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
std::memory_order_release);
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
std::atomic_store_explicit(helper::to_std_atomic(ptr), value, std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
std::memory_order_release); std::memory_order_release);
} }
inline void SeqCst_Store(volatile Atomic8* ptr, Atomic8 value) {
std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
std::memory_order_seq_cst);
}
inline void SeqCst_Store(volatile Atomic16* ptr, Atomic16 value) {
std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
std::memory_order_seq_cst);
}
inline void SeqCst_Store(volatile Atomic32* ptr, Atomic32 value) {
std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
std::memory_order_seq_cst);
}
inline Atomic8 Relaxed_Load(volatile const Atomic8* ptr) { inline Atomic8 Relaxed_Load(volatile const Atomic8* ptr) {
return std::atomic_load_explicit(helper::to_std_atomic_const(ptr), return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
std::memory_order_relaxed); std::memory_order_relaxed);
...@@ -279,6 +299,11 @@ inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { ...@@ -279,6 +299,11 @@ inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
std::memory_order_release); std::memory_order_release);
} }
inline void SeqCst_Store(volatile Atomic64* ptr, Atomic64 value) {
std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
std::memory_order_seq_cst);
}
inline Atomic64 Relaxed_Load(volatile const Atomic64* ptr) { inline Atomic64 Relaxed_Load(volatile const Atomic64* ptr) {
return std::atomic_load_explicit(helper::to_std_atomic_const(ptr), return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
std::memory_order_relaxed); std::memory_order_relaxed);
......
...@@ -41,20 +41,28 @@ namespace internal { ...@@ -41,20 +41,28 @@ namespace internal {
TFC(EphemeronKeyBarrierIgnoreFP, WriteBarrier) \ TFC(EphemeronKeyBarrierIgnoreFP, WriteBarrier) \
\ \
/* TSAN support for stores in generated code.*/ \ /* TSAN support for stores in generated code.*/ \
IF_TSAN(TFC, TSANRelaxedStore8IgnoreFP, TSANRelaxedStore) \ IF_TSAN(TFC, TSANRelaxedStore8IgnoreFP, TSANStore) \
IF_TSAN(TFC, TSANRelaxedStore8SaveFP, TSANRelaxedStore) \ IF_TSAN(TFC, TSANRelaxedStore8SaveFP, TSANStore) \
IF_TSAN(TFC, TSANRelaxedStore16IgnoreFP, TSANRelaxedStore) \ IF_TSAN(TFC, TSANRelaxedStore16IgnoreFP, TSANStore) \
IF_TSAN(TFC, TSANRelaxedStore16SaveFP, TSANRelaxedStore) \ IF_TSAN(TFC, TSANRelaxedStore16SaveFP, TSANStore) \
IF_TSAN(TFC, TSANRelaxedStore32IgnoreFP, TSANRelaxedStore) \ IF_TSAN(TFC, TSANRelaxedStore32IgnoreFP, TSANStore) \
IF_TSAN(TFC, TSANRelaxedStore32SaveFP, TSANRelaxedStore) \ IF_TSAN(TFC, TSANRelaxedStore32SaveFP, TSANStore) \
IF_TSAN(TFC, TSANRelaxedStore64IgnoreFP, TSANRelaxedStore) \ IF_TSAN(TFC, TSANRelaxedStore64IgnoreFP, TSANStore) \
IF_TSAN(TFC, TSANRelaxedStore64SaveFP, TSANRelaxedStore) \ IF_TSAN(TFC, TSANRelaxedStore64SaveFP, TSANStore) \
IF_TSAN(TFC, TSANSeqCstStore8IgnoreFP, TSANStore) \
IF_TSAN(TFC, TSANSeqCstStore8SaveFP, TSANStore) \
IF_TSAN(TFC, TSANSeqCstStore16IgnoreFP, TSANStore) \
IF_TSAN(TFC, TSANSeqCstStore16SaveFP, TSANStore) \
IF_TSAN(TFC, TSANSeqCstStore32IgnoreFP, TSANStore) \
IF_TSAN(TFC, TSANSeqCstStore32SaveFP, TSANStore) \
IF_TSAN(TFC, TSANSeqCstStore64IgnoreFP, TSANStore) \
IF_TSAN(TFC, TSANSeqCstStore64SaveFP, TSANStore) \
\ \
/* TSAN support for loads in generated code.*/ \ /* TSAN support for loads in generated code.*/ \
IF_TSAN(TFC, TSANRelaxedLoad32IgnoreFP, TSANRelaxedLoad) \ IF_TSAN(TFC, TSANRelaxedLoad32IgnoreFP, TSANLoad) \
IF_TSAN(TFC, TSANRelaxedLoad32SaveFP, TSANRelaxedLoad) \ IF_TSAN(TFC, TSANRelaxedLoad32SaveFP, TSANLoad) \
IF_TSAN(TFC, TSANRelaxedLoad64IgnoreFP, TSANRelaxedLoad) \ IF_TSAN(TFC, TSANRelaxedLoad64IgnoreFP, TSANLoad) \
IF_TSAN(TFC, TSANRelaxedLoad64SaveFP, TSANRelaxedLoad) \ IF_TSAN(TFC, TSANRelaxedLoad64SaveFP, TSANLoad) \
\ \
/* Adaptor for CPP builtin */ \ /* Adaptor for CPP builtin */ \
TFC(AdaptorWithBuiltinExitFrame, CppBuiltinAdaptor) \ TFC(AdaptorWithBuiltinExitFrame, CppBuiltinAdaptor) \
......
...@@ -439,10 +439,9 @@ class TSANRelaxedStoreCodeStubAssembler : public CodeStubAssembler { ...@@ -439,10 +439,9 @@ class TSANRelaxedStoreCodeStubAssembler : public CodeStubAssembler {
void GenerateTSANRelaxedStore(SaveFPRegsMode fp_mode, int size) { void GenerateTSANRelaxedStore(SaveFPRegsMode fp_mode, int size) {
TNode<ExternalReference> function = GetExternalReference(size); TNode<ExternalReference> function = GetExternalReference(size);
auto address = auto address = UncheckedParameter<IntPtrT>(TSANStoreDescriptor::kAddress);
UncheckedParameter<IntPtrT>(TSANRelaxedStoreDescriptor::kAddress);
TNode<IntPtrT> value = BitcastTaggedToWord( TNode<IntPtrT> value = BitcastTaggedToWord(
UncheckedParameter<Object>(TSANRelaxedStoreDescriptor::kValue)); UncheckedParameter<Object>(TSANStoreDescriptor::kValue));
CallCFunctionWithCallerSavedRegisters( CallCFunctionWithCallerSavedRegisters(
function, MachineType::Int32(), fp_mode, function, MachineType::Int32(), fp_mode,
std::make_pair(MachineType::IntPtr(), address), std::make_pair(MachineType::IntPtr(), address),
...@@ -483,6 +482,73 @@ TF_BUILTIN(TSANRelaxedStore64SaveFP, TSANRelaxedStoreCodeStubAssembler) { ...@@ -483,6 +482,73 @@ TF_BUILTIN(TSANRelaxedStore64SaveFP, TSANRelaxedStoreCodeStubAssembler) {
GenerateTSANRelaxedStore(SaveFPRegsMode::kSave, kInt64Size); GenerateTSANRelaxedStore(SaveFPRegsMode::kSave, kInt64Size);
} }
class TSANSeqCstStoreCodeStubAssembler : public CodeStubAssembler {
public:
explicit TSANSeqCstStoreCodeStubAssembler(compiler::CodeAssemblerState* state)
: CodeStubAssembler(state) {}
TNode<ExternalReference> GetExternalReference(int size) {
if (size == kInt8Size) {
return ExternalConstant(
ExternalReference::tsan_seq_cst_store_function_8_bits());
} else if (size == kInt16Size) {
return ExternalConstant(
ExternalReference::tsan_seq_cst_store_function_16_bits());
} else if (size == kInt32Size) {
return ExternalConstant(
ExternalReference::tsan_seq_cst_store_function_32_bits());
} else {
CHECK_EQ(size, kInt64Size);
return ExternalConstant(
ExternalReference::tsan_seq_cst_store_function_64_bits());
}
}
void GenerateTSANSeqCstStore(SaveFPRegsMode fp_mode, int size) {
TNode<ExternalReference> function = GetExternalReference(size);
auto address = UncheckedParameter<IntPtrT>(TSANStoreDescriptor::kAddress);
TNode<IntPtrT> value = BitcastTaggedToWord(
UncheckedParameter<Object>(TSANStoreDescriptor::kValue));
CallCFunctionWithCallerSavedRegisters(
function, MachineType::Int32(), fp_mode,
std::make_pair(MachineType::IntPtr(), address),
std::make_pair(MachineType::IntPtr(), value));
Return(UndefinedConstant());
}
};
TF_BUILTIN(TSANSeqCstStore8IgnoreFP, TSANSeqCstStoreCodeStubAssembler) {
GenerateTSANSeqCstStore(SaveFPRegsMode::kIgnore, kInt8Size);
}
TF_BUILTIN(TSANSeqCstStore8SaveFP, TSANSeqCstStoreCodeStubAssembler) {
GenerateTSANSeqCstStore(SaveFPRegsMode::kSave, kInt8Size);
}
TF_BUILTIN(TSANSeqCstStore16IgnoreFP, TSANSeqCstStoreCodeStubAssembler) {
GenerateTSANSeqCstStore(SaveFPRegsMode::kIgnore, kInt16Size);
}
TF_BUILTIN(TSANSeqCstStore16SaveFP, TSANSeqCstStoreCodeStubAssembler) {
GenerateTSANSeqCstStore(SaveFPRegsMode::kSave, kInt16Size);
}
TF_BUILTIN(TSANSeqCstStore32IgnoreFP, TSANSeqCstStoreCodeStubAssembler) {
GenerateTSANSeqCstStore(SaveFPRegsMode::kIgnore, kInt32Size);
}
TF_BUILTIN(TSANSeqCstStore32SaveFP, TSANSeqCstStoreCodeStubAssembler) {
GenerateTSANSeqCstStore(SaveFPRegsMode::kSave, kInt32Size);
}
TF_BUILTIN(TSANSeqCstStore64IgnoreFP, TSANSeqCstStoreCodeStubAssembler) {
GenerateTSANSeqCstStore(SaveFPRegsMode::kIgnore, kInt64Size);
}
TF_BUILTIN(TSANSeqCstStore64SaveFP, TSANSeqCstStoreCodeStubAssembler) {
GenerateTSANSeqCstStore(SaveFPRegsMode::kSave, kInt64Size);
}
class TSANRelaxedLoadCodeStubAssembler : public CodeStubAssembler { class TSANRelaxedLoadCodeStubAssembler : public CodeStubAssembler {
public: public:
explicit TSANRelaxedLoadCodeStubAssembler(compiler::CodeAssemblerState* state) explicit TSANRelaxedLoadCodeStubAssembler(compiler::CodeAssemblerState* state)
...@@ -501,8 +567,7 @@ class TSANRelaxedLoadCodeStubAssembler : public CodeStubAssembler { ...@@ -501,8 +567,7 @@ class TSANRelaxedLoadCodeStubAssembler : public CodeStubAssembler {
void GenerateTSANRelaxedLoad(SaveFPRegsMode fp_mode, int size) { void GenerateTSANRelaxedLoad(SaveFPRegsMode fp_mode, int size) {
TNode<ExternalReference> function = GetExternalReference(size); TNode<ExternalReference> function = GetExternalReference(size);
auto address = auto address = UncheckedParameter<IntPtrT>(TSANLoadDescriptor::kAddress);
UncheckedParameter<IntPtrT>(TSANRelaxedLoadDescriptor::kAddress);
CallCFunctionWithCallerSavedRegisters( CallCFunctionWithCallerSavedRegisters(
function, MachineType::Int32(), fp_mode, function, MachineType::Int32(), fp_mode,
std::make_pair(MachineType::IntPtr(), address)); std::make_pair(MachineType::IntPtr(), address));
......
...@@ -378,24 +378,47 @@ Callable CodeFactory::ArraySingleArgumentConstructor( ...@@ -378,24 +378,47 @@ Callable CodeFactory::ArraySingleArgumentConstructor(
#ifdef V8_IS_TSAN #ifdef V8_IS_TSAN
// static // static
Builtin CodeFactory::GetTSANRelaxedStoreStub(SaveFPRegsMode fp_mode, int size) { Builtin CodeFactory::GetTSANStoreStub(SaveFPRegsMode fp_mode, int size,
if (size == kInt8Size) { std::memory_order order) {
return fp_mode == SaveFPRegsMode::kIgnore if (order == std::memory_order_relaxed) {
? Builtin::kTSANRelaxedStore8IgnoreFP if (size == kInt8Size) {
: Builtin::kTSANRelaxedStore8SaveFP; return fp_mode == SaveFPRegsMode::kIgnore
} else if (size == kInt16Size) { ? Builtin::kTSANRelaxedStore8IgnoreFP
return fp_mode == SaveFPRegsMode::kIgnore : Builtin::kTSANRelaxedStore8SaveFP;
? Builtin::kTSANRelaxedStore16IgnoreFP } else if (size == kInt16Size) {
: Builtin::kTSANRelaxedStore16SaveFP; return fp_mode == SaveFPRegsMode::kIgnore
} else if (size == kInt32Size) { ? Builtin::kTSANRelaxedStore16IgnoreFP
return fp_mode == SaveFPRegsMode::kIgnore : Builtin::kTSANRelaxedStore16SaveFP;
? Builtin::kTSANRelaxedStore32IgnoreFP } else if (size == kInt32Size) {
: Builtin::kTSANRelaxedStore32SaveFP; return fp_mode == SaveFPRegsMode::kIgnore
? Builtin::kTSANRelaxedStore32IgnoreFP
: Builtin::kTSANRelaxedStore32SaveFP;
} else {
CHECK_EQ(size, kInt64Size);
return fp_mode == SaveFPRegsMode::kIgnore
? Builtin::kTSANRelaxedStore64IgnoreFP
: Builtin::kTSANRelaxedStore64SaveFP;
}
} else { } else {
CHECK_EQ(size, kInt64Size); DCHECK_EQ(order, std::memory_order_seq_cst);
return fp_mode == SaveFPRegsMode::kIgnore if (size == kInt8Size) {
? Builtin::kTSANRelaxedStore64IgnoreFP return fp_mode == SaveFPRegsMode::kIgnore
: Builtin::kTSANRelaxedStore64SaveFP; ? Builtin::kTSANSeqCstStore8IgnoreFP
: Builtin::kTSANSeqCstStore8SaveFP;
} else if (size == kInt16Size) {
return fp_mode == SaveFPRegsMode::kIgnore
? Builtin::kTSANSeqCstStore16IgnoreFP
: Builtin::kTSANSeqCstStore16SaveFP;
} else if (size == kInt32Size) {
return fp_mode == SaveFPRegsMode::kIgnore
? Builtin::kTSANSeqCstStore32IgnoreFP
: Builtin::kTSANSeqCstStore32SaveFP;
} else {
CHECK_EQ(size, kInt64Size);
return fp_mode == SaveFPRegsMode::kIgnore
? Builtin::kTSANSeqCstStore64IgnoreFP
: Builtin::kTSANSeqCstStore64SaveFP;
}
} }
} }
......
...@@ -90,7 +90,8 @@ class V8_EXPORT_PRIVATE CodeFactory final { ...@@ -90,7 +90,8 @@ class V8_EXPORT_PRIVATE CodeFactory final {
AllocationSiteOverrideMode override_mode); AllocationSiteOverrideMode override_mode);
#ifdef V8_IS_TSAN #ifdef V8_IS_TSAN
static Builtin GetTSANRelaxedStoreStub(SaveFPRegsMode fp_mode, int size); static Builtin GetTSANStoreStub(SaveFPRegsMode fp_mode, int size,
std::memory_order order);
static Builtin GetTSANRelaxedLoadStub(SaveFPRegsMode fp_mode, int size); static Builtin GetTSANRelaxedLoadStub(SaveFPRegsMode fp_mode, int size);
#endif // V8_IS_TSAN #endif // V8_IS_TSAN
}; };
......
...@@ -1182,7 +1182,7 @@ namespace { ...@@ -1182,7 +1182,7 @@ namespace {
// address, with the same value. This is done in order for TSAN to see these // address, with the same value. This is done in order for TSAN to see these
// stores from generated code. // stores from generated code.
// Note that {value} is an int64_t irrespective of the store size. This is on // Note that {value} is an int64_t irrespective of the store size. This is on
// purpose to keep the function signatures the same accross stores. The // purpose to keep the function signatures the same across stores. The
// static_cast inside the method will ignore the bits which will not be stored. // static_cast inside the method will ignore the bits which will not be stored.
void tsan_relaxed_store_8_bits(Address addr, int64_t value) { void tsan_relaxed_store_8_bits(Address addr, int64_t value) {
#if V8_TARGET_ARCH_X64 #if V8_TARGET_ARCH_X64
...@@ -1220,6 +1220,44 @@ void tsan_relaxed_store_64_bits(Address addr, int64_t value) { ...@@ -1220,6 +1220,44 @@ void tsan_relaxed_store_64_bits(Address addr, int64_t value) {
#endif // V8_TARGET_ARCH_X64 #endif // V8_TARGET_ARCH_X64
} }
// Same as above, for sequentially consistent stores.
void tsan_seq_cst_store_8_bits(Address addr, int64_t value) {
#if V8_TARGET_ARCH_X64
base::SeqCst_Store(reinterpret_cast<base::Atomic8*>(addr),
static_cast<base::Atomic8>(value));
#else
UNREACHABLE();
#endif // V8_TARGET_ARCH_X64
}
void tsan_seq_cst_store_16_bits(Address addr, int64_t value) {
#if V8_TARGET_ARCH_X64
base::SeqCst_Store(reinterpret_cast<base::Atomic16*>(addr),
static_cast<base::Atomic16>(value));
#else
UNREACHABLE();
#endif // V8_TARGET_ARCH_X64
}
void tsan_seq_cst_store_32_bits(Address addr, int64_t value) {
#if V8_TARGET_ARCH_X64
base::SeqCst_Store(reinterpret_cast<base::Atomic32*>(addr),
static_cast<base::Atomic32>(value));
#else
UNREACHABLE();
#endif // V8_TARGET_ARCH_X64
}
void tsan_seq_cst_store_64_bits(Address addr, int64_t value) {
#if V8_TARGET_ARCH_X64
base::SeqCst_Store(reinterpret_cast<base::Atomic64*>(addr),
static_cast<base::Atomic64>(value));
#else
UNREACHABLE();
#endif // V8_TARGET_ARCH_X64
}
// Same as above, for relaxed loads.
base::Atomic32 tsan_relaxed_load_32_bits(Address addr, int64_t value) { base::Atomic32 tsan_relaxed_load_32_bits(Address addr, int64_t value) {
#if V8_TARGET_ARCH_X64 #if V8_TARGET_ARCH_X64
return base::Relaxed_Load(reinterpret_cast<base::Atomic32*>(addr)); return base::Relaxed_Load(reinterpret_cast<base::Atomic32*>(addr));
...@@ -1247,6 +1285,14 @@ IF_TSAN(FUNCTION_REFERENCE, tsan_relaxed_store_function_32_bits, ...@@ -1247,6 +1285,14 @@ IF_TSAN(FUNCTION_REFERENCE, tsan_relaxed_store_function_32_bits,
tsan_relaxed_store_32_bits) tsan_relaxed_store_32_bits)
IF_TSAN(FUNCTION_REFERENCE, tsan_relaxed_store_function_64_bits, IF_TSAN(FUNCTION_REFERENCE, tsan_relaxed_store_function_64_bits,
tsan_relaxed_store_64_bits) tsan_relaxed_store_64_bits)
IF_TSAN(FUNCTION_REFERENCE, tsan_seq_cst_store_function_8_bits,
tsan_seq_cst_store_8_bits)
IF_TSAN(FUNCTION_REFERENCE, tsan_seq_cst_store_function_16_bits,
tsan_seq_cst_store_16_bits)
IF_TSAN(FUNCTION_REFERENCE, tsan_seq_cst_store_function_32_bits,
tsan_seq_cst_store_32_bits)
IF_TSAN(FUNCTION_REFERENCE, tsan_seq_cst_store_function_64_bits,
tsan_seq_cst_store_64_bits)
IF_TSAN(FUNCTION_REFERENCE, tsan_relaxed_load_function_32_bits, IF_TSAN(FUNCTION_REFERENCE, tsan_relaxed_load_function_32_bits,
tsan_relaxed_load_32_bits) tsan_relaxed_load_32_bits)
IF_TSAN(FUNCTION_REFERENCE, tsan_relaxed_load_function_64_bits, IF_TSAN(FUNCTION_REFERENCE, tsan_relaxed_load_function_64_bits,
......
...@@ -274,6 +274,14 @@ class StatsCounter; ...@@ -274,6 +274,14 @@ class StatsCounter;
"tsan_relaxed_store_function_32_bits") \ "tsan_relaxed_store_function_32_bits") \
IF_TSAN(V, tsan_relaxed_store_function_64_bits, \ IF_TSAN(V, tsan_relaxed_store_function_64_bits, \
"tsan_relaxed_store_function_64_bits") \ "tsan_relaxed_store_function_64_bits") \
IF_TSAN(V, tsan_seq_cst_store_function_8_bits, \
"tsan_seq_cst_store_function_8_bits") \
IF_TSAN(V, tsan_seq_cst_store_function_16_bits, \
"tsan_seq_cst_store_function_16_bits") \
IF_TSAN(V, tsan_seq_cst_store_function_32_bits, \
"tsan_seq_cst_store_function_32_bits") \
IF_TSAN(V, tsan_seq_cst_store_function_64_bits, \
"tsan_seq_cst_store_function_64_bits") \
IF_TSAN(V, tsan_relaxed_load_function_32_bits, \ IF_TSAN(V, tsan_relaxed_load_function_32_bits, \
"tsan_relaxed_load_function_32_bits") \ "tsan_relaxed_load_function_32_bits") \
IF_TSAN(V, tsan_relaxed_load_function_64_bits, \ IF_TSAN(V, tsan_relaxed_load_function_64_bits, \
......
...@@ -111,8 +111,8 @@ namespace internal { ...@@ -111,8 +111,8 @@ namespace internal {
V(StringAt) \ V(StringAt) \
V(StringAtAsString) \ V(StringAtAsString) \
V(StringSubstring) \ V(StringSubstring) \
IF_TSAN(V, TSANRelaxedStore) \ IF_TSAN(V, TSANStore) \
IF_TSAN(V, TSANRelaxedLoad) \ IF_TSAN(V, TSANLoad) \
V(TypeConversion) \ V(TypeConversion) \
V(TypeConversionNoContext) \ V(TypeConversionNoContext) \
V(TypeConversion_Baseline) \ V(TypeConversion_Baseline) \
...@@ -1053,26 +1053,26 @@ class WriteBarrierDescriptor final ...@@ -1053,26 +1053,26 @@ class WriteBarrierDescriptor final
}; };
#ifdef V8_IS_TSAN #ifdef V8_IS_TSAN
class TSANRelaxedStoreDescriptor final class TSANStoreDescriptor final
: public StaticCallInterfaceDescriptor<TSANRelaxedStoreDescriptor> { : public StaticCallInterfaceDescriptor<TSANStoreDescriptor> {
public: public:
DEFINE_PARAMETERS_NO_CONTEXT(kAddress, kValue) DEFINE_PARAMETERS_NO_CONTEXT(kAddress, kValue)
DEFINE_PARAMETER_TYPES(MachineType::Pointer(), // kAddress DEFINE_PARAMETER_TYPES(MachineType::Pointer(), // kAddress
MachineType::AnyTagged()) // kValue MachineType::AnyTagged()) // kValue
DECLARE_DESCRIPTOR(TSANRelaxedStoreDescriptor) DECLARE_DESCRIPTOR(TSANStoreDescriptor)
static constexpr auto registers(); static constexpr auto registers();
static constexpr bool kRestrictAllocatableRegisters = true; static constexpr bool kRestrictAllocatableRegisters = true;
}; };
class TSANRelaxedLoadDescriptor final class TSANLoadDescriptor final
: public StaticCallInterfaceDescriptor<TSANRelaxedLoadDescriptor> { : public StaticCallInterfaceDescriptor<TSANLoadDescriptor> {
public: public:
DEFINE_PARAMETERS_NO_CONTEXT(kAddress) DEFINE_PARAMETERS_NO_CONTEXT(kAddress)
DEFINE_PARAMETER_TYPES(MachineType::Pointer()) // kAddress DEFINE_PARAMETER_TYPES(MachineType::Pointer()) // kAddress
DECLARE_DESCRIPTOR(TSANRelaxedLoadDescriptor) DECLARE_DESCRIPTOR(TSANLoadDescriptor)
static constexpr auto registers(); static constexpr auto registers();
static constexpr bool kRestrictAllocatableRegisters = true; static constexpr bool kRestrictAllocatableRegisters = true;
......
...@@ -43,12 +43,12 @@ constexpr auto WriteBarrierDescriptor::registers() { ...@@ -43,12 +43,12 @@ constexpr auto WriteBarrierDescriptor::registers() {
#ifdef V8_IS_TSAN #ifdef V8_IS_TSAN
// static // static
constexpr auto TSANRelaxedStoreDescriptor::registers() { constexpr auto TSANStoreDescriptor::registers() {
return RegisterArray(arg_reg_1, arg_reg_2, kReturnRegister0); return RegisterArray(arg_reg_1, arg_reg_2, kReturnRegister0);
} }
// static // static
constexpr auto TSANRelaxedLoadDescriptor::registers() { constexpr auto TSANLoadDescriptor::registers() {
return RegisterArray(arg_reg_1, kReturnRegister0); return RegisterArray(arg_reg_1, kReturnRegister0);
} }
#endif // V8_IS_TSAN #endif // V8_IS_TSAN
......
...@@ -494,26 +494,27 @@ void TurboAssembler::CallRecordWriteStub( ...@@ -494,26 +494,27 @@ void TurboAssembler::CallRecordWriteStub(
} }
#ifdef V8_IS_TSAN #ifdef V8_IS_TSAN
void TurboAssembler::CallTSANRelaxedStoreStub(Register address, Register value, void TurboAssembler::CallTSANStoreStub(Register address, Register value,
SaveFPRegsMode fp_mode, int size, SaveFPRegsMode fp_mode, int size,
StubCallMode mode) { StubCallMode mode,
std::memory_order order) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(address, value)); DCHECK(!AreAliased(address, value));
TSANRelaxedStoreDescriptor descriptor; TSANStoreDescriptor descriptor;
RegList registers = descriptor.allocatable_registers(); RegList registers = descriptor.allocatable_registers();
MaybeSaveRegisters(registers); MaybeSaveRegisters(registers);
Register address_parameter( Register address_parameter(
descriptor.GetRegisterParameter(TSANRelaxedStoreDescriptor::kAddress)); descriptor.GetRegisterParameter(TSANStoreDescriptor::kAddress));
Register value_parameter( Register value_parameter(
descriptor.GetRegisterParameter(TSANRelaxedStoreDescriptor::kValue)); descriptor.GetRegisterParameter(TSANStoreDescriptor::kValue));
// Prepare argument registers for calling GetTSANRelaxedStoreStub. // Prepare argument registers for calling GetTSANStoreStub.
MovePair(address_parameter, address, value_parameter, value); MovePair(address_parameter, address, value_parameter, value);
if (isolate()) { if (isolate()) {
Builtin builtin = CodeFactory::GetTSANRelaxedStoreStub(fp_mode, size); Builtin builtin = CodeFactory::GetTSANStoreStub(fp_mode, size, order);
Handle<Code> code_target = isolate()->builtins()->code_handle(builtin); Handle<Code> code_target = isolate()->builtins()->code_handle(builtin);
Call(code_target, RelocInfo::CODE_TARGET); Call(code_target, RelocInfo::CODE_TARGET);
} }
...@@ -531,7 +532,7 @@ void TurboAssembler::CallTSANRelaxedStoreStub(Register address, Register value, ...@@ -531,7 +532,7 @@ void TurboAssembler::CallTSANRelaxedStoreStub(Register address, Register value,
else { else {
DCHECK_EQ(mode, StubCallMode::kCallWasmRuntimeStub); DCHECK_EQ(mode, StubCallMode::kCallWasmRuntimeStub);
// Use {near_call} for direct Wasm call within a module. // Use {near_call} for direct Wasm call within a module.
auto wasm_target = wasm::WasmCode::GetTSANRelaxedStoreStub(fp_mode, size); auto wasm_target = wasm::WasmCode::GetTSANStoreStub(fp_mode, size, order);
near_call(wasm_target, RelocInfo::WASM_STUB_CALL); near_call(wasm_target, RelocInfo::WASM_STUB_CALL);
} }
#endif // V8_ENABLE_WEBASSEMBLY #endif // V8_ENABLE_WEBASSEMBLY
...@@ -542,13 +543,13 @@ void TurboAssembler::CallTSANRelaxedStoreStub(Register address, Register value, ...@@ -542,13 +543,13 @@ void TurboAssembler::CallTSANRelaxedStoreStub(Register address, Register value,
void TurboAssembler::CallTSANRelaxedLoadStub(Register address, void TurboAssembler::CallTSANRelaxedLoadStub(Register address,
SaveFPRegsMode fp_mode, int size, SaveFPRegsMode fp_mode, int size,
StubCallMode mode) { StubCallMode mode) {
TSANRelaxedLoadDescriptor descriptor; TSANLoadDescriptor descriptor;
RegList registers = descriptor.allocatable_registers(); RegList registers = descriptor.allocatable_registers();
MaybeSaveRegisters(registers); MaybeSaveRegisters(registers);
Register address_parameter( Register address_parameter(
descriptor.GetRegisterParameter(TSANRelaxedLoadDescriptor::kAddress)); descriptor.GetRegisterParameter(TSANLoadDescriptor::kAddress));
// Prepare argument registers for calling TSANRelaxedLoad. // Prepare argument registers for calling TSANRelaxedLoad.
Move(address_parameter, address); Move(address_parameter, address);
......
...@@ -526,9 +526,9 @@ class V8_EXPORT_PRIVATE TurboAssembler ...@@ -526,9 +526,9 @@ class V8_EXPORT_PRIVATE TurboAssembler
StubCallMode mode = StubCallMode::kCallBuiltinPointer); StubCallMode mode = StubCallMode::kCallBuiltinPointer);
#ifdef V8_IS_TSAN #ifdef V8_IS_TSAN
void CallTSANRelaxedStoreStub(Register address, Register value, void CallTSANStoreStub(Register address, Register value,
SaveFPRegsMode fp_mode, int size, SaveFPRegsMode fp_mode, int size, StubCallMode mode,
StubCallMode mode); std::memory_order order);
void CallTSANRelaxedLoadStub(Register address, SaveFPRegsMode fp_mode, void CallTSANRelaxedLoadStub(Register address, SaveFPRegsMode fp_mode,
int size, StubCallMode mode); int size, StubCallMode mode);
#endif // V8_IS_TSAN #endif // V8_IS_TSAN
......
...@@ -324,11 +324,124 @@ class OutOfLineRecordWrite final : public OutOfLineCode { ...@@ -324,11 +324,124 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Zone* zone_; Zone* zone_;
}; };
template <std::memory_order order>
void EmitStore(TurboAssembler* tasm, Operand operand, Register value,
MachineRepresentation rep) {
if (order == std::memory_order_relaxed) {
switch (rep) {
case MachineRepresentation::kWord8:
tasm->movb(operand, value);
break;
case MachineRepresentation::kWord16:
tasm->movw(operand, value);
break;
case MachineRepresentation::kWord32:
tasm->movl(operand, value);
break;
case MachineRepresentation::kWord64:
tasm->movq(operand, value);
break;
case MachineRepresentation::kTagged:
tasm->StoreTaggedField(operand, value);
break;
default:
UNREACHABLE();
}
return;
}
DCHECK_EQ(order, std::memory_order_seq_cst);
switch (rep) {
case MachineRepresentation::kWord8:
tasm->movq(kScratchRegister, value);
tasm->xchgb(kScratchRegister, operand);
break;
case MachineRepresentation::kWord16:
tasm->movq(kScratchRegister, value);
tasm->xchgw(kScratchRegister, operand);
break;
case MachineRepresentation::kWord32:
tasm->movq(kScratchRegister, value);
tasm->xchgl(kScratchRegister, operand);
break;
case MachineRepresentation::kWord64:
tasm->movq(kScratchRegister, value);
tasm->xchgq(kScratchRegister, operand);
break;
case MachineRepresentation::kTagged:
tasm->AtomicStoreTaggedField(operand, value);
break;
default:
UNREACHABLE();
}
}
template <std::memory_order order>
void EmitStore(TurboAssembler* tasm, Operand operand, Immediate value,
MachineRepresentation rep);
template <>
void EmitStore<std::memory_order_relaxed>(TurboAssembler* tasm, Operand operand,
Immediate value,
MachineRepresentation rep) {
switch (rep) {
case MachineRepresentation::kWord8:
tasm->movb(operand, value);
break;
case MachineRepresentation::kWord16:
tasm->movw(operand, value);
break;
case MachineRepresentation::kWord32:
tasm->movl(operand, value);
break;
case MachineRepresentation::kWord64:
tasm->movq(operand, value);
break;
case MachineRepresentation::kTagged:
tasm->StoreTaggedField(operand, value);
break;
default:
UNREACHABLE();
}
}
#ifdef V8_IS_TSAN #ifdef V8_IS_TSAN
class OutOfLineTSANRelaxedStore final : public OutOfLineCode { void EmitMemoryProbeForTrapHandlerIfNeeded(TurboAssembler* tasm,
Register scratch, Operand operand,
StubCallMode mode, int size) {
#if V8_ENABLE_WEBASSEMBLY && V8_TRAP_HANDLER_SUPPORTED
// The wasm OOB trap handler needs to be able to look up the faulting
// instruction pointer to handle the SIGSEGV raised by an OOB access. It
// will not handle SIGSEGVs raised by the TSAN store helpers. Emit a
// redundant load here to give the trap handler a chance to handle any
// OOB SIGSEGVs.
if (trap_handler::IsTrapHandlerEnabled() &&
mode == StubCallMode::kCallWasmRuntimeStub) {
switch (size) {
case kInt8Size:
tasm->movb(scratch, operand);
break;
case kInt16Size:
tasm->movw(scratch, operand);
break;
case kInt32Size:
tasm->movl(scratch, operand);
break;
case kInt64Size:
tasm->movq(scratch, operand);
break;
default:
UNREACHABLE();
}
}
#endif
}
class OutOfLineTSANStore : public OutOfLineCode {
public: public:
OutOfLineTSANRelaxedStore(CodeGenerator* gen, Operand operand, Register value, OutOfLineTSANStore(CodeGenerator* gen, Operand operand, Register value,
Register scratch0, StubCallMode stub_mode, int size) Register scratch0, StubCallMode stub_mode, int size,
std::memory_order order)
: OutOfLineCode(gen), : OutOfLineCode(gen),
operand_(operand), operand_(operand),
value_(value), value_(value),
...@@ -337,6 +450,7 @@ class OutOfLineTSANRelaxedStore final : public OutOfLineCode { ...@@ -337,6 +450,7 @@ class OutOfLineTSANRelaxedStore final : public OutOfLineCode {
stub_mode_(stub_mode), stub_mode_(stub_mode),
#endif // V8_ENABLE_WEBASSEMBLY #endif // V8_ENABLE_WEBASSEMBLY
size_(size), size_(size),
memory_order_(order),
zone_(gen->zone()) { zone_(gen->zone()) {
DCHECK(!AreAliased(value, scratch0)); DCHECK(!AreAliased(value, scratch0));
} }
...@@ -352,14 +466,15 @@ class OutOfLineTSANRelaxedStore final : public OutOfLineCode { ...@@ -352,14 +466,15 @@ class OutOfLineTSANRelaxedStore final : public OutOfLineCode {
// A direct call to a wasm runtime stub defined in this module. // A direct call to a wasm runtime stub defined in this module.
// Just encode the stub index. This will be patched when the code // Just encode the stub index. This will be patched when the code
// is added to the native module and copied into wasm code space. // is added to the native module and copied into wasm code space.
__ CallTSANRelaxedStoreStub(scratch0_, value_, save_fp_mode, size_, tasm()->CallTSANStoreStub(scratch0_, value_, save_fp_mode, size_,
StubCallMode::kCallWasmRuntimeStub); StubCallMode::kCallWasmRuntimeStub,
memory_order_);
return; return;
} }
#endif // V8_ENABLE_WEBASSEMBLY #endif // V8_ENABLE_WEBASSEMBLY
__ CallTSANRelaxedStoreStub(scratch0_, value_, save_fp_mode, size_, tasm()->CallTSANStoreStub(scratch0_, value_, save_fp_mode, size_,
StubCallMode::kCallBuiltinPointer); StubCallMode::kCallBuiltinPointer, memory_order_);
} }
private: private:
...@@ -370,42 +485,66 @@ class OutOfLineTSANRelaxedStore final : public OutOfLineCode { ...@@ -370,42 +485,66 @@ class OutOfLineTSANRelaxedStore final : public OutOfLineCode {
StubCallMode const stub_mode_; StubCallMode const stub_mode_;
#endif // V8_ENABLE_WEBASSEMBLY #endif // V8_ENABLE_WEBASSEMBLY
int size_; int size_;
const std::memory_order memory_order_;
Zone* zone_; Zone* zone_;
}; };
void EmitTSANStoreOOLIfNeeded(Zone* zone, CodeGenerator* codegen, void EmitTSANStoreOOL(Zone* zone, CodeGenerator* codegen, TurboAssembler* tasm,
TurboAssembler* tasm, Operand operand, Operand operand, Register value_reg,
Register value_reg, X64OperandConverter& i, X64OperandConverter& i, StubCallMode mode, int size,
StubCallMode mode, int size) { std::memory_order order) {
// The FOR_TESTING code doesn't initialize the root register. We can't call // The FOR_TESTING code doesn't initialize the root register. We can't call
// the TSAN builtin since we need to load the external reference through the // the TSAN builtin since we need to load the external reference through the
// root register. // root register.
// TODO(solanes, v8:7790, v8:11600): See if we can support the FOR_TESTING // TODO(solanes, v8:7790, v8:11600): See if we can support the FOR_TESTING
// path. It is not crucial, but it would be nice to remove this if. // path. It is not crucial, but it would be nice to remove this restriction.
if (codegen->code_kind() == CodeKind::FOR_TESTING) return; DCHECK_NE(codegen->code_kind(), CodeKind::FOR_TESTING);
Register scratch0 = i.TempRegister(0); Register scratch0 = i.TempRegister(0);
auto tsan_ool = zone->New<OutOfLineTSANRelaxedStore>( auto tsan_ool = zone->New<OutOfLineTSANStore>(codegen, operand, value_reg,
codegen, operand, value_reg, scratch0, mode, size); scratch0, mode, size, order);
tasm->jmp(tsan_ool->entry()); tasm->jmp(tsan_ool->entry());
tasm->bind(tsan_ool->exit()); tasm->bind(tsan_ool->exit());
} }
void EmitTSANStoreOOLIfNeeded(Zone* zone, CodeGenerator* codegen, template <std::memory_order order>
TurboAssembler* tasm, Operand operand, Register GetTSANValueRegister(TurboAssembler* tasm, Register value,
Immediate value, X64OperandConverter& i, X64OperandConverter& i) {
StubCallMode mode, int size) { return value;
}
template <std::memory_order order>
Register GetTSANValueRegister(TurboAssembler* tasm, Immediate value,
X64OperandConverter& i);
template <>
Register GetTSANValueRegister<std::memory_order_relaxed>(
TurboAssembler* tasm, Immediate value, X64OperandConverter& i) {
Register value_reg = i.TempRegister(1);
tasm->movq(value_reg, value);
return value_reg;
}
template <std::memory_order order, typename ValueT>
void EmitTSANAwareStore(Zone* zone, CodeGenerator* codegen,
TurboAssembler* tasm, Operand operand, ValueT value,
X64OperandConverter& i, StubCallMode stub_call_mode,
MachineRepresentation rep) {
// The FOR_TESTING code doesn't initialize the root register. We can't call // The FOR_TESTING code doesn't initialize the root register. We can't call
// the TSAN builtin since we need to load the external reference through the // the TSAN builtin since we need to load the external reference through the
// root register. // root register.
// TODO(solanes, v8:7790, v8:11600): See if we can support the FOR_TESTING // TODO(solanes, v8:7790, v8:11600): See if we can support the FOR_TESTING
// path. It is not crucial, but it would be nice to remove this if. // path. It is not crucial, but it would be nice to remove this restriction.
if (codegen->code_kind() == CodeKind::FOR_TESTING) return; if (codegen->code_kind() != CodeKind::FOR_TESTING) {
int size = ElementSizeInBytes(rep);
Register value_reg = i.TempRegister(1); EmitMemoryProbeForTrapHandlerIfNeeded(tasm, i.TempRegister(0), operand,
tasm->movq(value_reg, value); stub_call_mode, size);
EmitTSANStoreOOLIfNeeded(zone, codegen, tasm, operand, value_reg, i, mode, Register value_reg = GetTSANValueRegister<order>(tasm, value, i);
size); EmitTSANStoreOOL(zone, codegen, tasm, operand, value_reg, i, stub_call_mode,
size, order);
} else {
EmitStore<order>(tasm, operand, value, rep);
}
} }
class OutOfLineTSANRelaxedLoad final : public OutOfLineCode { class OutOfLineTSANRelaxedLoad final : public OutOfLineCode {
...@@ -453,10 +592,10 @@ class OutOfLineTSANRelaxedLoad final : public OutOfLineCode { ...@@ -453,10 +592,10 @@ class OutOfLineTSANRelaxedLoad final : public OutOfLineCode {
Zone* zone_; Zone* zone_;
}; };
void EmitTSANLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen, void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
TurboAssembler* tasm, Operand operand, TurboAssembler* tasm, Operand operand,
X64OperandConverter& i, StubCallMode mode, X64OperandConverter& i, StubCallMode mode,
int size) { int size) {
// The FOR_TESTING code doesn't initialize the root register. We can't call // The FOR_TESTING code doesn't initialize the root register. We can't call
// the TSAN builtin since we need to load the external reference through the // the TSAN builtin since we need to load the external reference through the
// root register. // root register.
...@@ -472,20 +611,20 @@ void EmitTSANLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen, ...@@ -472,20 +611,20 @@ void EmitTSANLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
} }
#else #else
void EmitTSANStoreOOLIfNeeded(Zone* zone, CodeGenerator* codegen, template <std::memory_order order, typename ValueT>
TurboAssembler* tasm, Operand operand, void EmitTSANAwareStore(Zone* zone, CodeGenerator* codegen,
Register value_reg, X64OperandConverter& i, TurboAssembler* tasm, Operand operand, ValueT value,
StubCallMode mode, int size) {} X64OperandConverter& i, StubCallMode stub_call_mode,
MachineRepresentation rep) {
void EmitTSANStoreOOLIfNeeded(Zone* zone, CodeGenerator* codegen, DCHECK(order == std::memory_order_relaxed ||
TurboAssembler* tasm, Operand operand, order == std::memory_order_seq_cst);
Immediate value, X64OperandConverter& i, EmitStore<order>(tasm, operand, value, rep);
StubCallMode mode, int size) {} }
void EmitTSANLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen, void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
TurboAssembler* tasm, Operand operand, TurboAssembler* tasm, Operand operand,
X64OperandConverter& i, StubCallMode mode, X64OperandConverter& i, StubCallMode mode,
int size) {} int size) {}
#endif // V8_IS_TSAN #endif // V8_IS_TSAN
#if V8_ENABLE_WEBASSEMBLY #if V8_ENABLE_WEBASSEMBLY
...@@ -881,6 +1020,15 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen, ...@@ -881,6 +1020,15 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
} \ } \
} while (false) } while (false)
#define ASSEMBLE_SEQ_CST_STORE(rep) \
do { \
Register value = i.InputRegister(0); \
Operand operand = i.MemoryOperand(1); \
EmitTSANAwareStore<std::memory_order_seq_cst>( \
zone(), this, tasm(), operand, value, i, DetermineStubCallMode(), \
rep); \
} while (false)
void CodeGenerator::AssembleDeconstructFrame() { void CodeGenerator::AssembleDeconstructFrame() {
unwinding_info_writer_.MarkFrameDeconstructed(__ pc_offset()); unwinding_info_writer_.MarkFrameDeconstructed(__ pc_offset());
__ movq(rsp, rbp); __ movq(rsp, rbp);
...@@ -1306,10 +1454,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1306,10 +1454,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
scratch0, scratch1, mode, scratch0, scratch1, mode,
DetermineStubCallMode()); DetermineStubCallMode());
if (arch_opcode == kArchStoreWithWriteBarrier) { if (arch_opcode == kArchStoreWithWriteBarrier) {
__ StoreTaggedField(operand, value); EmitTSANAwareStore<std::memory_order_relaxed>(
zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
MachineRepresentation::kTagged);
} else { } else {
DCHECK_EQ(kArchAtomicStoreWithWriteBarrier, arch_opcode); DCHECK_EQ(arch_opcode, kArchAtomicStoreWithWriteBarrier);
__ AtomicStoreTaggedField(operand, value); EmitTSANAwareStore<std::memory_order_seq_cst>(
zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
MachineRepresentation::kTagged);
} }
if (mode > RecordWriteMode::kValueIsPointer) { if (mode > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value, ool->exit()); __ JumpIfSmi(value, ool->exit());
...@@ -1318,9 +1470,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1318,9 +1470,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
MemoryChunk::kPointersFromHereAreInterestingMask, MemoryChunk::kPointersFromHereAreInterestingMask,
not_zero, ool->entry()); not_zero, ool->entry());
__ bind(ool->exit()); __ bind(ool->exit());
// TODO(syg): Support non-relaxed memory orders in TSAN.
EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
DetermineStubCallMode(), kTaggedSize);
break; break;
} }
case kX64MFence: case kX64MFence:
...@@ -2092,14 +2241,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -2092,14 +2241,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Operand operand = i.MemoryOperand(&index); Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) { if (HasImmediateInput(instr, index)) {
Immediate value(Immediate(i.InputInt8(index))); Immediate value(Immediate(i.InputInt8(index)));
__ movb(operand, value); EmitTSANAwareStore<std::memory_order_relaxed>(
EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i, zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
DetermineStubCallMode(), kInt8Size); MachineRepresentation::kWord8);
} else { } else {
Register value(i.InputRegister(index)); Register value(i.InputRegister(index));
__ movb(operand, value); EmitTSANAwareStore<std::memory_order_relaxed>(
EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i, zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
DetermineStubCallMode(), kInt8Size); MachineRepresentation::kWord8);
} }
break; break;
} }
...@@ -2128,14 +2277,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -2128,14 +2277,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Operand operand = i.MemoryOperand(&index); Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) { if (HasImmediateInput(instr, index)) {
Immediate value(Immediate(i.InputInt16(index))); Immediate value(Immediate(i.InputInt16(index)));
__ movw(operand, value); EmitTSANAwareStore<std::memory_order_relaxed>(
EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i, zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
DetermineStubCallMode(), kInt16Size); MachineRepresentation::kWord16);
} else { } else {
Register value(i.InputRegister(index)); Register value(i.InputRegister(index));
__ movw(operand, value); EmitTSANAwareStore<std::memory_order_relaxed>(
EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i, zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
DetermineStubCallMode(), kInt16Size); MachineRepresentation::kWord16);
} }
break; break;
} }
...@@ -2145,8 +2294,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -2145,8 +2294,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (HasAddressingMode(instr)) { if (HasAddressingMode(instr)) {
Operand address(i.MemoryOperand()); Operand address(i.MemoryOperand());
__ movl(i.OutputRegister(), address); __ movl(i.OutputRegister(), address);
EmitTSANLoadOOLIfNeeded(zone(), this, tasm(), address, i, EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i,
DetermineStubCallMode(), kInt32Size); DetermineStubCallMode(), kInt32Size);
} else { } else {
if (HasRegisterInput(instr, 0)) { if (HasRegisterInput(instr, 0)) {
__ movl(i.OutputRegister(), i.InputRegister(0)); __ movl(i.OutputRegister(), i.InputRegister(0));
...@@ -2160,14 +2309,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -2160,14 +2309,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Operand operand = i.MemoryOperand(&index); Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) { if (HasImmediateInput(instr, index)) {
Immediate value(i.InputImmediate(index)); Immediate value(i.InputImmediate(index));
__ movl(operand, value); EmitTSANAwareStore<std::memory_order_relaxed>(
EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i, zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
DetermineStubCallMode(), kInt32Size); MachineRepresentation::kWord32);
} else { } else {
Register value(i.InputRegister(index)); Register value(i.InputRegister(index));
__ movl(operand, value); EmitTSANAwareStore<std::memory_order_relaxed>(
EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i, zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
DetermineStubCallMode(), kInt32Size); MachineRepresentation::kWord32);
} }
} }
break; break;
...@@ -2179,24 +2328,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -2179,24 +2328,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
CHECK(instr->HasOutput()); CHECK(instr->HasOutput());
Operand address(i.MemoryOperand()); Operand address(i.MemoryOperand());
__ DecompressTaggedSigned(i.OutputRegister(), address); __ DecompressTaggedSigned(i.OutputRegister(), address);
EmitTSANLoadOOLIfNeeded(zone(), this, tasm(), address, i, EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i,
DetermineStubCallMode(), kTaggedSize); DetermineStubCallMode(), kTaggedSize);
break; break;
} }
case kX64MovqDecompressTaggedPointer: { case kX64MovqDecompressTaggedPointer: {
CHECK(instr->HasOutput()); CHECK(instr->HasOutput());
Operand address(i.MemoryOperand()); Operand address(i.MemoryOperand());
__ DecompressTaggedPointer(i.OutputRegister(), address); __ DecompressTaggedPointer(i.OutputRegister(), address);
EmitTSANLoadOOLIfNeeded(zone(), this, tasm(), address, i, EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i,
DetermineStubCallMode(), kTaggedSize); DetermineStubCallMode(), kTaggedSize);
break; break;
} }
case kX64MovqDecompressAnyTagged: { case kX64MovqDecompressAnyTagged: {
CHECK(instr->HasOutput()); CHECK(instr->HasOutput());
Operand address(i.MemoryOperand()); Operand address(i.MemoryOperand());
__ DecompressAnyTagged(i.OutputRegister(), address); __ DecompressAnyTagged(i.OutputRegister(), address);
EmitTSANLoadOOLIfNeeded(zone(), this, tasm(), address, i, EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i,
DetermineStubCallMode(), kTaggedSize); DetermineStubCallMode(), kTaggedSize);
break; break;
} }
case kX64MovqCompressTagged: { case kX64MovqCompressTagged: {
...@@ -2205,14 +2354,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -2205,14 +2354,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Operand operand = i.MemoryOperand(&index); Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) { if (HasImmediateInput(instr, index)) {
Immediate value(i.InputImmediate(index)); Immediate value(i.InputImmediate(index));
__ StoreTaggedField(operand, value); EmitTSANAwareStore<std::memory_order_relaxed>(
EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i, zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
DetermineStubCallMode(), kTaggedSize); MachineRepresentation::kTagged);
} else { } else {
Register value(i.InputRegister(index)); Register value(i.InputRegister(index));
__ StoreTaggedField(operand, value); EmitTSANAwareStore<std::memory_order_relaxed>(
EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i, zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
DetermineStubCallMode(), kTaggedSize); MachineRepresentation::kTagged);
} }
break; break;
} }
...@@ -2221,21 +2370,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -2221,21 +2370,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (instr->HasOutput()) { if (instr->HasOutput()) {
Operand address(i.MemoryOperand()); Operand address(i.MemoryOperand());
__ movq(i.OutputRegister(), address); __ movq(i.OutputRegister(), address);
EmitTSANLoadOOLIfNeeded(zone(), this, tasm(), address, i, EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i,
DetermineStubCallMode(), kInt64Size); DetermineStubCallMode(), kInt64Size);
} else { } else {
size_t index = 0; size_t index = 0;
Operand operand = i.MemoryOperand(&index); Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) { if (HasImmediateInput(instr, index)) {
Immediate value(i.InputImmediate(index)); Immediate value(i.InputImmediate(index));
__ movq(operand, value); EmitTSANAwareStore<std::memory_order_relaxed>(
EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i, zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
DetermineStubCallMode(), kInt64Size); MachineRepresentation::kWord64);
} else { } else {
Register value(i.InputRegister(index)); Register value(i.InputRegister(index));
__ movq(operand, value); EmitTSANAwareStore<std::memory_order_relaxed>(
EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i, zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
DetermineStubCallMode(), kInt64Size); MachineRepresentation::kWord64);
} }
} }
break; break;
...@@ -3982,6 +4131,22 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -3982,6 +4131,22 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SIMD_ALL_TRUE(Pcmpeqb); ASSEMBLE_SIMD_ALL_TRUE(Pcmpeqb);
break; break;
} }
case kAtomicStoreWord8: {
ASSEMBLE_SEQ_CST_STORE(MachineRepresentation::kWord8);
break;
}
case kAtomicStoreWord16: {
ASSEMBLE_SEQ_CST_STORE(MachineRepresentation::kWord16);
break;
}
case kAtomicStoreWord32: {
ASSEMBLE_SEQ_CST_STORE(MachineRepresentation::kWord32);
break;
}
case kX64Word64AtomicStoreWord64: {
ASSEMBLE_SEQ_CST_STORE(MachineRepresentation::kWord64);
break;
}
case kAtomicExchangeInt8: { case kAtomicExchangeInt8: {
DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32); DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
__ xchgb(i.InputRegister(0), i.MemoryOperand(1)); __ xchgb(i.InputRegister(0), i.MemoryOperand(1));
...@@ -4140,9 +4305,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -4140,9 +4305,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kAtomicLoadInt16: case kAtomicLoadInt16:
case kAtomicLoadUint16: case kAtomicLoadUint16:
case kAtomicLoadWord32: case kAtomicLoadWord32:
case kAtomicStoreWord8:
case kAtomicStoreWord16:
case kAtomicStoreWord32:
UNREACHABLE(); // Won't be generated by instruction selector. UNREACHABLE(); // Won't be generated by instruction selector.
} }
return kSuccess; return kSuccess;
...@@ -4168,6 +4330,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -4168,6 +4330,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#undef ASSEMBLE_SIMD_IMM_SHUFFLE #undef ASSEMBLE_SIMD_IMM_SHUFFLE
#undef ASSEMBLE_SIMD_ALL_TRUE #undef ASSEMBLE_SIMD_ALL_TRUE
#undef ASSEMBLE_SIMD_SHIFT #undef ASSEMBLE_SIMD_SHIFT
#undef ASSEMBLE_SEQ_CST_STORE
namespace { namespace {
......
...@@ -392,6 +392,7 @@ namespace compiler { ...@@ -392,6 +392,7 @@ namespace compiler {
V(X64Word64AtomicAndUint64) \ V(X64Word64AtomicAndUint64) \
V(X64Word64AtomicOrUint64) \ V(X64Word64AtomicOrUint64) \
V(X64Word64AtomicXorUint64) \ V(X64Word64AtomicXorUint64) \
V(X64Word64AtomicStoreWord64) \
V(X64Word64AtomicExchangeUint64) \ V(X64Word64AtomicExchangeUint64) \
V(X64Word64AtomicCompareExchangeUint64) V(X64Word64AtomicCompareExchangeUint64)
......
...@@ -418,6 +418,7 @@ int InstructionScheduler::GetTargetInstructionFlags( ...@@ -418,6 +418,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64LFence: case kX64LFence:
return kHasSideEffect; return kHasSideEffect;
case kX64Word64AtomicStoreWord64:
case kX64Word64AtomicAddUint64: case kX64Word64AtomicAddUint64:
case kX64Word64AtomicSubUint64: case kX64Word64AtomicSubUint64:
case kX64Word64AtomicAndUint64: case kX64Word64AtomicAndUint64:
......
...@@ -344,22 +344,22 @@ ArchOpcode GetStoreOpcode(StoreRepresentation store_rep) { ...@@ -344,22 +344,22 @@ ArchOpcode GetStoreOpcode(StoreRepresentation store_rep) {
ArchOpcode GetSeqCstStoreOpcode(StoreRepresentation store_rep) { ArchOpcode GetSeqCstStoreOpcode(StoreRepresentation store_rep) {
switch (store_rep.representation()) { switch (store_rep.representation()) {
case MachineRepresentation::kWord8: case MachineRepresentation::kWord8:
return kAtomicExchangeUint8; return kAtomicStoreWord8;
case MachineRepresentation::kWord16: case MachineRepresentation::kWord16:
return kAtomicExchangeUint16; return kAtomicStoreWord16;
case MachineRepresentation::kWord32: case MachineRepresentation::kWord32:
return kAtomicExchangeWord32; return kAtomicStoreWord32;
case MachineRepresentation::kWord64: case MachineRepresentation::kWord64:
return kX64Word64AtomicExchangeUint64; return kX64Word64AtomicStoreWord64;
case MachineRepresentation::kTaggedSigned: // Fall through. case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through. case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: case MachineRepresentation::kTagged:
if (COMPRESS_POINTERS_BOOL) return kAtomicExchangeWord32; if (COMPRESS_POINTERS_BOOL) return kAtomicStoreWord32;
return kX64Word64AtomicExchangeUint64; return kX64Word64AtomicStoreWord64;
case MachineRepresentation::kCompressedPointer: // Fall through. case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: case MachineRepresentation::kCompressed:
CHECK(COMPRESS_POINTERS_BOOL); CHECK(COMPRESS_POINTERS_BOOL);
return kAtomicExchangeWord32; return kAtomicStoreWord32;
default: default:
UNREACHABLE(); UNREACHABLE();
} }
...@@ -529,8 +529,7 @@ void VisitAtomicExchange(InstructionSelector* selector, Node* node, ...@@ -529,8 +529,7 @@ void VisitAtomicExchange(InstructionSelector* selector, Node* node,
void VisitStoreCommon(InstructionSelector* selector, Node* node, void VisitStoreCommon(InstructionSelector* selector, Node* node,
StoreRepresentation store_rep, StoreRepresentation store_rep,
base::Optional<AtomicMemoryOrder> atomic_order, base::Optional<AtomicMemoryOrder> atomic_order) {
base::Optional<AtomicWidth> atomic_width) {
X64OperandGenerator g(selector); X64OperandGenerator g(selector);
Node* base = node->InputAt(0); Node* base = node->InputAt(0);
Node* index = node->InputAt(1); Node* index = node->InputAt(1);
...@@ -562,18 +561,7 @@ void VisitStoreCommon(InstructionSelector* selector, Node* node, ...@@ -562,18 +561,7 @@ void VisitStoreCommon(InstructionSelector* selector, Node* node,
code |= MiscField::encode(static_cast<int>(record_write_mode)); code |= MiscField::encode(static_cast<int>(record_write_mode));
selector->Emit(code, 0, nullptr, arraysize(inputs), inputs, selector->Emit(code, 0, nullptr, arraysize(inputs), inputs,
arraysize(temps), temps); arraysize(temps), temps);
} else if (is_seqcst) {
VisitAtomicExchange(selector, node, GetSeqCstStoreOpcode(store_rep),
*atomic_width);
} else { } else {
// Release and non-atomic stores emit MOV.
// https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
if ((ElementSizeLog2Of(store_rep.representation()) <
kSystemPointerSizeLog2) &&
value->opcode() == IrOpcode::kTruncateInt64ToInt32) {
value = value->InputAt(0);
}
#ifdef V8_IS_TSAN #ifdef V8_IS_TSAN
// On TSAN builds we require two scratch registers. Because of this we also // On TSAN builds we require two scratch registers. Because of this we also
// have to modify the inputs to take into account possible aliasing and use // have to modify the inputs to take into account possible aliasing and use
...@@ -587,15 +575,40 @@ void VisitStoreCommon(InstructionSelector* selector, Node* node, ...@@ -587,15 +575,40 @@ void VisitStoreCommon(InstructionSelector* selector, Node* node,
auto reg_kind = OperandGenerator::RegisterUseKind::kUseRegister; auto reg_kind = OperandGenerator::RegisterUseKind::kUseRegister;
#endif // V8_IS_TSAN #endif // V8_IS_TSAN
// Release and non-atomic stores emit MOV and sequentially consistent stores
// emit XCHG.
// https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
ArchOpcode opcode;
AddressingMode addressing_mode;
InstructionOperand inputs[4]; InstructionOperand inputs[4];
size_t input_count = 0; size_t input_count = 0;
AddressingMode addressing_mode = g.GetEffectiveAddressMemoryOperand(
node, inputs, &input_count, reg_kind); if (is_seqcst) {
InstructionOperand value_operand = g.CanBeImmediate(value) // SeqCst stores emit XCHG instead of MOV, so encode the inputs as we
? g.UseImmediate(value) // would for XCHG. XCHG can't encode the value as an immediate and has
: g.UseRegister(value, reg_kind); // fewer addressing modes available.
inputs[input_count++] = value_operand; inputs[input_count++] = g.UseUniqueRegister(value);
ArchOpcode opcode = GetStoreOpcode(store_rep); inputs[input_count++] = g.UseUniqueRegister(base);
inputs[input_count++] =
g.GetEffectiveIndexOperand(index, &addressing_mode);
opcode = GetSeqCstStoreOpcode(store_rep);
} else {
if ((ElementSizeLog2Of(store_rep.representation()) <
kSystemPointerSizeLog2) &&
value->opcode() == IrOpcode::kTruncateInt64ToInt32) {
value = value->InputAt(0);
}
addressing_mode = g.GetEffectiveAddressMemoryOperand(
node, inputs, &input_count, reg_kind);
InstructionOperand value_operand = g.CanBeImmediate(value)
? g.UseImmediate(value)
: g.UseRegister(value, reg_kind);
inputs[input_count++] = value_operand;
opcode = GetStoreOpcode(store_rep);
}
InstructionCode code = InstructionCode code =
opcode | AddressingModeField::encode(addressing_mode); opcode | AddressingModeField::encode(addressing_mode);
selector->Emit(code, 0, static_cast<InstructionOperand*>(nullptr), selector->Emit(code, 0, static_cast<InstructionOperand*>(nullptr),
...@@ -607,7 +620,7 @@ void VisitStoreCommon(InstructionSelector* selector, Node* node, ...@@ -607,7 +620,7 @@ void VisitStoreCommon(InstructionSelector* selector, Node* node,
void InstructionSelector::VisitStore(Node* node) { void InstructionSelector::VisitStore(Node* node) {
return VisitStoreCommon(this, node, StoreRepresentationOf(node->op()), return VisitStoreCommon(this, node, StoreRepresentationOf(node->op()),
base::nullopt, base::nullopt); base::nullopt);
} }
void InstructionSelector::VisitProtectedStore(Node* node) { void InstructionSelector::VisitProtectedStore(Node* node) {
...@@ -2779,16 +2792,14 @@ void InstructionSelector::VisitWord32AtomicStore(Node* node) { ...@@ -2779,16 +2792,14 @@ void InstructionSelector::VisitWord32AtomicStore(Node* node) {
DCHECK_NE(params.representation(), MachineRepresentation::kWord64); DCHECK_NE(params.representation(), MachineRepresentation::kWord64);
DCHECK_IMPLIES(CanBeTaggedOrCompressedPointer(params.representation()), DCHECK_IMPLIES(CanBeTaggedOrCompressedPointer(params.representation()),
kTaggedSize == 4); kTaggedSize == 4);
VisitStoreCommon(this, node, params.store_representation(), params.order(), VisitStoreCommon(this, node, params.store_representation(), params.order());
AtomicWidth::kWord32);
} }
void InstructionSelector::VisitWord64AtomicStore(Node* node) { void InstructionSelector::VisitWord64AtomicStore(Node* node) {
AtomicStoreParameters params = AtomicStoreParametersOf(node->op()); AtomicStoreParameters params = AtomicStoreParametersOf(node->op());
DCHECK_IMPLIES(CanBeTaggedOrCompressedPointer(params.representation()), DCHECK_IMPLIES(CanBeTaggedOrCompressedPointer(params.representation()),
kTaggedSize == 8); kTaggedSize == 8);
VisitStoreCommon(this, node, params.store_representation(), params.order(), VisitStoreCommon(this, node, params.store_representation(), params.order());
AtomicWidth::kWord64);
} }
void InstructionSelector::VisitWord32AtomicExchange(Node* node) { void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
......
...@@ -1059,6 +1059,14 @@ static bool TransitivelyCalledBuiltinHasNoSideEffect(Builtin caller, ...@@ -1059,6 +1059,14 @@ static bool TransitivelyCalledBuiltinHasNoSideEffect(Builtin caller,
case Builtin::kTSANRelaxedStore32SaveFP: case Builtin::kTSANRelaxedStore32SaveFP:
case Builtin::kTSANRelaxedStore64IgnoreFP: case Builtin::kTSANRelaxedStore64IgnoreFP:
case Builtin::kTSANRelaxedStore64SaveFP: case Builtin::kTSANRelaxedStore64SaveFP:
case Builtin::kTSANSeqCstStore8IgnoreFP:
case Builtin::kTSANSeqCstStore8SaveFP:
case Builtin::kTSANSeqCstStore16IgnoreFP:
case Builtin::kTSANSeqCstStore16SaveFP:
case Builtin::kTSANSeqCstStore32IgnoreFP:
case Builtin::kTSANSeqCstStore32SaveFP:
case Builtin::kTSANSeqCstStore64IgnoreFP:
case Builtin::kTSANSeqCstStore64SaveFP:
case Builtin::kTSANRelaxedLoad32IgnoreFP: case Builtin::kTSANRelaxedLoad32IgnoreFP:
case Builtin::kTSANRelaxedLoad32SaveFP: case Builtin::kTSANRelaxedLoad32SaveFP:
case Builtin::kTSANRelaxedLoad64IgnoreFP: case Builtin::kTSANRelaxedLoad64IgnoreFP:
......
...@@ -102,6 +102,14 @@ struct WasmModule; ...@@ -102,6 +102,14 @@ struct WasmModule;
IF_TSAN(V, TSANRelaxedStore32SaveFP) \ IF_TSAN(V, TSANRelaxedStore32SaveFP) \
IF_TSAN(V, TSANRelaxedStore64IgnoreFP) \ IF_TSAN(V, TSANRelaxedStore64IgnoreFP) \
IF_TSAN(V, TSANRelaxedStore64SaveFP) \ IF_TSAN(V, TSANRelaxedStore64SaveFP) \
IF_TSAN(V, TSANSeqCstStore8IgnoreFP) \
IF_TSAN(V, TSANSeqCstStore8SaveFP) \
IF_TSAN(V, TSANSeqCstStore16IgnoreFP) \
IF_TSAN(V, TSANSeqCstStore16SaveFP) \
IF_TSAN(V, TSANSeqCstStore32IgnoreFP) \
IF_TSAN(V, TSANSeqCstStore32SaveFP) \
IF_TSAN(V, TSANSeqCstStore64IgnoreFP) \
IF_TSAN(V, TSANSeqCstStore64SaveFP) \
IF_TSAN(V, TSANRelaxedLoad32IgnoreFP) \ IF_TSAN(V, TSANRelaxedLoad32IgnoreFP) \
IF_TSAN(V, TSANRelaxedLoad32SaveFP) \ IF_TSAN(V, TSANRelaxedLoad32SaveFP) \
IF_TSAN(V, TSANRelaxedLoad64IgnoreFP) \ IF_TSAN(V, TSANRelaxedLoad64IgnoreFP) \
...@@ -188,25 +196,47 @@ class V8_EXPORT_PRIVATE WasmCode final { ...@@ -188,25 +196,47 @@ class V8_EXPORT_PRIVATE WasmCode final {
} }
#ifdef V8_IS_TSAN #ifdef V8_IS_TSAN
static RuntimeStubId GetTSANRelaxedStoreStub(SaveFPRegsMode fp_mode, static RuntimeStubId GetTSANStoreStub(SaveFPRegsMode fp_mode, int size,
int size) { std::memory_order order) {
if (size == kInt8Size) { if (order == std::memory_order_relaxed) {
return fp_mode == SaveFPRegsMode::kIgnore if (size == kInt8Size) {
? RuntimeStubId::kTSANRelaxedStore8IgnoreFP return fp_mode == SaveFPRegsMode::kIgnore
: RuntimeStubId::kTSANRelaxedStore8SaveFP; ? RuntimeStubId::kTSANRelaxedStore8IgnoreFP
} else if (size == kInt16Size) { : RuntimeStubId::kTSANRelaxedStore8SaveFP;
return fp_mode == SaveFPRegsMode::kIgnore } else if (size == kInt16Size) {
? RuntimeStubId::kTSANRelaxedStore16IgnoreFP return fp_mode == SaveFPRegsMode::kIgnore
: RuntimeStubId::kTSANRelaxedStore16SaveFP; ? RuntimeStubId::kTSANRelaxedStore16IgnoreFP
} else if (size == kInt32Size) { : RuntimeStubId::kTSANRelaxedStore16SaveFP;
return fp_mode == SaveFPRegsMode::kIgnore } else if (size == kInt32Size) {
? RuntimeStubId::kTSANRelaxedStore32IgnoreFP return fp_mode == SaveFPRegsMode::kIgnore
: RuntimeStubId::kTSANRelaxedStore32SaveFP; ? RuntimeStubId::kTSANRelaxedStore32IgnoreFP
: RuntimeStubId::kTSANRelaxedStore32SaveFP;
} else {
CHECK_EQ(size, kInt64Size);
return fp_mode == SaveFPRegsMode::kIgnore
? RuntimeStubId::kTSANRelaxedStore64IgnoreFP
: RuntimeStubId::kTSANRelaxedStore64SaveFP;
}
} else { } else {
CHECK_EQ(size, kInt64Size); DCHECK_EQ(order, std::memory_order_seq_cst);
return fp_mode == SaveFPRegsMode::kIgnore if (size == kInt8Size) {
? RuntimeStubId::kTSANRelaxedStore64IgnoreFP return fp_mode == SaveFPRegsMode::kIgnore
: RuntimeStubId::kTSANRelaxedStore64SaveFP; ? RuntimeStubId::kTSANSeqCstStore8IgnoreFP
: RuntimeStubId::kTSANSeqCstStore8SaveFP;
} else if (size == kInt16Size) {
return fp_mode == SaveFPRegsMode::kIgnore
? RuntimeStubId::kTSANSeqCstStore16IgnoreFP
: RuntimeStubId::kTSANSeqCstStore16SaveFP;
} else if (size == kInt32Size) {
return fp_mode == SaveFPRegsMode::kIgnore
? RuntimeStubId::kTSANSeqCstStore32IgnoreFP
: RuntimeStubId::kTSANSeqCstStore32SaveFP;
} else {
CHECK_EQ(size, kInt64Size);
return fp_mode == SaveFPRegsMode::kIgnore
? RuntimeStubId::kTSANSeqCstStore64IgnoreFP
: RuntimeStubId::kTSANSeqCstStore64SaveFP;
}
} }
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment