Commit 01b06e99 authored by Shu-yu Guo's avatar Shu-yu Guo Committed by V8 LUCI CQ

[compiler] Add TSAN seq cst store support for generated code

This CL does the following for x64:

- Add seq cst TSAN helpers.

- Refactors codegen's handling of TSAN helpers to also support
  seq cst accesses.

- Perform stores only once instead twice under TSAN, since
  duplicating stores is unsound. Previously this was "fine"
  because all duplicated stores were relaxed. SeqCst stores
  are used for synchronization, however, and duplicating them
  breaks the synchronization.

Bug: v8:7790, v8:11600, v8:11995
Cq-Include-Trybots: luci.v8.try:v8_linux64_tsan_rel_ng
Change-Id: I43071b0ed516cb0917a10f3b2b9861d74edca041
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3103308
Commit-Queue: Shu-yu Guo <syg@chromium.org>
Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Reviewed-by: 's avatarClemens Backes <clemensb@chromium.org>
Reviewed-by: 's avatarAdam Klein <adamk@chromium.org>
Reviewed-by: 's avatarNico Hartmann <nicohartmann@chromium.org>
Cr-Commit-Position: refs/heads/main@{#76612}
parent 3926d6cd
......@@ -191,11 +191,31 @@ inline void Release_Store(volatile Atomic8* ptr, Atomic8 value) {
std::memory_order_release);
}
inline void Release_Store(volatile Atomic16* ptr, Atomic16 value) {
std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
std::memory_order_release);
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
std::memory_order_release);
}
inline void SeqCst_Store(volatile Atomic8* ptr, Atomic8 value) {
std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
std::memory_order_seq_cst);
}
inline void SeqCst_Store(volatile Atomic16* ptr, Atomic16 value) {
std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
std::memory_order_seq_cst);
}
inline void SeqCst_Store(volatile Atomic32* ptr, Atomic32 value) {
std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
std::memory_order_seq_cst);
}
inline Atomic8 Relaxed_Load(volatile const Atomic8* ptr) {
return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
std::memory_order_relaxed);
......@@ -279,6 +299,11 @@ inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
std::memory_order_release);
}
inline void SeqCst_Store(volatile Atomic64* ptr, Atomic64 value) {
std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
std::memory_order_seq_cst);
}
inline Atomic64 Relaxed_Load(volatile const Atomic64* ptr) {
return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
std::memory_order_relaxed);
......
......@@ -41,20 +41,28 @@ namespace internal {
TFC(EphemeronKeyBarrierIgnoreFP, WriteBarrier) \
\
/* TSAN support for stores in generated code.*/ \
IF_TSAN(TFC, TSANRelaxedStore8IgnoreFP, TSANRelaxedStore) \
IF_TSAN(TFC, TSANRelaxedStore8SaveFP, TSANRelaxedStore) \
IF_TSAN(TFC, TSANRelaxedStore16IgnoreFP, TSANRelaxedStore) \
IF_TSAN(TFC, TSANRelaxedStore16SaveFP, TSANRelaxedStore) \
IF_TSAN(TFC, TSANRelaxedStore32IgnoreFP, TSANRelaxedStore) \
IF_TSAN(TFC, TSANRelaxedStore32SaveFP, TSANRelaxedStore) \
IF_TSAN(TFC, TSANRelaxedStore64IgnoreFP, TSANRelaxedStore) \
IF_TSAN(TFC, TSANRelaxedStore64SaveFP, TSANRelaxedStore) \
IF_TSAN(TFC, TSANRelaxedStore8IgnoreFP, TSANStore) \
IF_TSAN(TFC, TSANRelaxedStore8SaveFP, TSANStore) \
IF_TSAN(TFC, TSANRelaxedStore16IgnoreFP, TSANStore) \
IF_TSAN(TFC, TSANRelaxedStore16SaveFP, TSANStore) \
IF_TSAN(TFC, TSANRelaxedStore32IgnoreFP, TSANStore) \
IF_TSAN(TFC, TSANRelaxedStore32SaveFP, TSANStore) \
IF_TSAN(TFC, TSANRelaxedStore64IgnoreFP, TSANStore) \
IF_TSAN(TFC, TSANRelaxedStore64SaveFP, TSANStore) \
IF_TSAN(TFC, TSANSeqCstStore8IgnoreFP, TSANStore) \
IF_TSAN(TFC, TSANSeqCstStore8SaveFP, TSANStore) \
IF_TSAN(TFC, TSANSeqCstStore16IgnoreFP, TSANStore) \
IF_TSAN(TFC, TSANSeqCstStore16SaveFP, TSANStore) \
IF_TSAN(TFC, TSANSeqCstStore32IgnoreFP, TSANStore) \
IF_TSAN(TFC, TSANSeqCstStore32SaveFP, TSANStore) \
IF_TSAN(TFC, TSANSeqCstStore64IgnoreFP, TSANStore) \
IF_TSAN(TFC, TSANSeqCstStore64SaveFP, TSANStore) \
\
/* TSAN support for loads in generated code.*/ \
IF_TSAN(TFC, TSANRelaxedLoad32IgnoreFP, TSANRelaxedLoad) \
IF_TSAN(TFC, TSANRelaxedLoad32SaveFP, TSANRelaxedLoad) \
IF_TSAN(TFC, TSANRelaxedLoad64IgnoreFP, TSANRelaxedLoad) \
IF_TSAN(TFC, TSANRelaxedLoad64SaveFP, TSANRelaxedLoad) \
IF_TSAN(TFC, TSANRelaxedLoad32IgnoreFP, TSANLoad) \
IF_TSAN(TFC, TSANRelaxedLoad32SaveFP, TSANLoad) \
IF_TSAN(TFC, TSANRelaxedLoad64IgnoreFP, TSANLoad) \
IF_TSAN(TFC, TSANRelaxedLoad64SaveFP, TSANLoad) \
\
/* Adaptor for CPP builtin */ \
TFC(AdaptorWithBuiltinExitFrame, CppBuiltinAdaptor) \
......
......@@ -439,10 +439,9 @@ class TSANRelaxedStoreCodeStubAssembler : public CodeStubAssembler {
void GenerateTSANRelaxedStore(SaveFPRegsMode fp_mode, int size) {
TNode<ExternalReference> function = GetExternalReference(size);
auto address =
UncheckedParameter<IntPtrT>(TSANRelaxedStoreDescriptor::kAddress);
auto address = UncheckedParameter<IntPtrT>(TSANStoreDescriptor::kAddress);
TNode<IntPtrT> value = BitcastTaggedToWord(
UncheckedParameter<Object>(TSANRelaxedStoreDescriptor::kValue));
UncheckedParameter<Object>(TSANStoreDescriptor::kValue));
CallCFunctionWithCallerSavedRegisters(
function, MachineType::Int32(), fp_mode,
std::make_pair(MachineType::IntPtr(), address),
......@@ -483,6 +482,73 @@ TF_BUILTIN(TSANRelaxedStore64SaveFP, TSANRelaxedStoreCodeStubAssembler) {
GenerateTSANRelaxedStore(SaveFPRegsMode::kSave, kInt64Size);
}
class TSANSeqCstStoreCodeStubAssembler : public CodeStubAssembler {
public:
explicit TSANSeqCstStoreCodeStubAssembler(compiler::CodeAssemblerState* state)
: CodeStubAssembler(state) {}
TNode<ExternalReference> GetExternalReference(int size) {
if (size == kInt8Size) {
return ExternalConstant(
ExternalReference::tsan_seq_cst_store_function_8_bits());
} else if (size == kInt16Size) {
return ExternalConstant(
ExternalReference::tsan_seq_cst_store_function_16_bits());
} else if (size == kInt32Size) {
return ExternalConstant(
ExternalReference::tsan_seq_cst_store_function_32_bits());
} else {
CHECK_EQ(size, kInt64Size);
return ExternalConstant(
ExternalReference::tsan_seq_cst_store_function_64_bits());
}
}
void GenerateTSANSeqCstStore(SaveFPRegsMode fp_mode, int size) {
TNode<ExternalReference> function = GetExternalReference(size);
auto address = UncheckedParameter<IntPtrT>(TSANStoreDescriptor::kAddress);
TNode<IntPtrT> value = BitcastTaggedToWord(
UncheckedParameter<Object>(TSANStoreDescriptor::kValue));
CallCFunctionWithCallerSavedRegisters(
function, MachineType::Int32(), fp_mode,
std::make_pair(MachineType::IntPtr(), address),
std::make_pair(MachineType::IntPtr(), value));
Return(UndefinedConstant());
}
};
TF_BUILTIN(TSANSeqCstStore8IgnoreFP, TSANSeqCstStoreCodeStubAssembler) {
GenerateTSANSeqCstStore(SaveFPRegsMode::kIgnore, kInt8Size);
}
TF_BUILTIN(TSANSeqCstStore8SaveFP, TSANSeqCstStoreCodeStubAssembler) {
GenerateTSANSeqCstStore(SaveFPRegsMode::kSave, kInt8Size);
}
TF_BUILTIN(TSANSeqCstStore16IgnoreFP, TSANSeqCstStoreCodeStubAssembler) {
GenerateTSANSeqCstStore(SaveFPRegsMode::kIgnore, kInt16Size);
}
TF_BUILTIN(TSANSeqCstStore16SaveFP, TSANSeqCstStoreCodeStubAssembler) {
GenerateTSANSeqCstStore(SaveFPRegsMode::kSave, kInt16Size);
}
TF_BUILTIN(TSANSeqCstStore32IgnoreFP, TSANSeqCstStoreCodeStubAssembler) {
GenerateTSANSeqCstStore(SaveFPRegsMode::kIgnore, kInt32Size);
}
TF_BUILTIN(TSANSeqCstStore32SaveFP, TSANSeqCstStoreCodeStubAssembler) {
GenerateTSANSeqCstStore(SaveFPRegsMode::kSave, kInt32Size);
}
TF_BUILTIN(TSANSeqCstStore64IgnoreFP, TSANSeqCstStoreCodeStubAssembler) {
GenerateTSANSeqCstStore(SaveFPRegsMode::kIgnore, kInt64Size);
}
TF_BUILTIN(TSANSeqCstStore64SaveFP, TSANSeqCstStoreCodeStubAssembler) {
GenerateTSANSeqCstStore(SaveFPRegsMode::kSave, kInt64Size);
}
class TSANRelaxedLoadCodeStubAssembler : public CodeStubAssembler {
public:
explicit TSANRelaxedLoadCodeStubAssembler(compiler::CodeAssemblerState* state)
......@@ -501,8 +567,7 @@ class TSANRelaxedLoadCodeStubAssembler : public CodeStubAssembler {
void GenerateTSANRelaxedLoad(SaveFPRegsMode fp_mode, int size) {
TNode<ExternalReference> function = GetExternalReference(size);
auto address =
UncheckedParameter<IntPtrT>(TSANRelaxedLoadDescriptor::kAddress);
auto address = UncheckedParameter<IntPtrT>(TSANLoadDescriptor::kAddress);
CallCFunctionWithCallerSavedRegisters(
function, MachineType::Int32(), fp_mode,
std::make_pair(MachineType::IntPtr(), address));
......
......@@ -378,24 +378,47 @@ Callable CodeFactory::ArraySingleArgumentConstructor(
#ifdef V8_IS_TSAN
// static
Builtin CodeFactory::GetTSANRelaxedStoreStub(SaveFPRegsMode fp_mode, int size) {
if (size == kInt8Size) {
return fp_mode == SaveFPRegsMode::kIgnore
? Builtin::kTSANRelaxedStore8IgnoreFP
: Builtin::kTSANRelaxedStore8SaveFP;
} else if (size == kInt16Size) {
return fp_mode == SaveFPRegsMode::kIgnore
? Builtin::kTSANRelaxedStore16IgnoreFP
: Builtin::kTSANRelaxedStore16SaveFP;
} else if (size == kInt32Size) {
return fp_mode == SaveFPRegsMode::kIgnore
? Builtin::kTSANRelaxedStore32IgnoreFP
: Builtin::kTSANRelaxedStore32SaveFP;
Builtin CodeFactory::GetTSANStoreStub(SaveFPRegsMode fp_mode, int size,
std::memory_order order) {
if (order == std::memory_order_relaxed) {
if (size == kInt8Size) {
return fp_mode == SaveFPRegsMode::kIgnore
? Builtin::kTSANRelaxedStore8IgnoreFP
: Builtin::kTSANRelaxedStore8SaveFP;
} else if (size == kInt16Size) {
return fp_mode == SaveFPRegsMode::kIgnore
? Builtin::kTSANRelaxedStore16IgnoreFP
: Builtin::kTSANRelaxedStore16SaveFP;
} else if (size == kInt32Size) {
return fp_mode == SaveFPRegsMode::kIgnore
? Builtin::kTSANRelaxedStore32IgnoreFP
: Builtin::kTSANRelaxedStore32SaveFP;
} else {
CHECK_EQ(size, kInt64Size);
return fp_mode == SaveFPRegsMode::kIgnore
? Builtin::kTSANRelaxedStore64IgnoreFP
: Builtin::kTSANRelaxedStore64SaveFP;
}
} else {
CHECK_EQ(size, kInt64Size);
return fp_mode == SaveFPRegsMode::kIgnore
? Builtin::kTSANRelaxedStore64IgnoreFP
: Builtin::kTSANRelaxedStore64SaveFP;
DCHECK_EQ(order, std::memory_order_seq_cst);
if (size == kInt8Size) {
return fp_mode == SaveFPRegsMode::kIgnore
? Builtin::kTSANSeqCstStore8IgnoreFP
: Builtin::kTSANSeqCstStore8SaveFP;
} else if (size == kInt16Size) {
return fp_mode == SaveFPRegsMode::kIgnore
? Builtin::kTSANSeqCstStore16IgnoreFP
: Builtin::kTSANSeqCstStore16SaveFP;
} else if (size == kInt32Size) {
return fp_mode == SaveFPRegsMode::kIgnore
? Builtin::kTSANSeqCstStore32IgnoreFP
: Builtin::kTSANSeqCstStore32SaveFP;
} else {
CHECK_EQ(size, kInt64Size);
return fp_mode == SaveFPRegsMode::kIgnore
? Builtin::kTSANSeqCstStore64IgnoreFP
: Builtin::kTSANSeqCstStore64SaveFP;
}
}
}
......
......@@ -90,7 +90,8 @@ class V8_EXPORT_PRIVATE CodeFactory final {
AllocationSiteOverrideMode override_mode);
#ifdef V8_IS_TSAN
static Builtin GetTSANRelaxedStoreStub(SaveFPRegsMode fp_mode, int size);
static Builtin GetTSANStoreStub(SaveFPRegsMode fp_mode, int size,
std::memory_order order);
static Builtin GetTSANRelaxedLoadStub(SaveFPRegsMode fp_mode, int size);
#endif // V8_IS_TSAN
};
......
......@@ -1182,7 +1182,7 @@ namespace {
// address, with the same value. This is done in order for TSAN to see these
// stores from generated code.
// Note that {value} is an int64_t irrespective of the store size. This is on
// purpose to keep the function signatures the same accross stores. The
// purpose to keep the function signatures the same across stores. The
// static_cast inside the method will ignore the bits which will not be stored.
void tsan_relaxed_store_8_bits(Address addr, int64_t value) {
#if V8_TARGET_ARCH_X64
......@@ -1220,6 +1220,44 @@ void tsan_relaxed_store_64_bits(Address addr, int64_t value) {
#endif // V8_TARGET_ARCH_X64
}
// Same as above, for sequentially consistent stores.
void tsan_seq_cst_store_8_bits(Address addr, int64_t value) {
#if V8_TARGET_ARCH_X64
base::SeqCst_Store(reinterpret_cast<base::Atomic8*>(addr),
static_cast<base::Atomic8>(value));
#else
UNREACHABLE();
#endif // V8_TARGET_ARCH_X64
}
void tsan_seq_cst_store_16_bits(Address addr, int64_t value) {
#if V8_TARGET_ARCH_X64
base::SeqCst_Store(reinterpret_cast<base::Atomic16*>(addr),
static_cast<base::Atomic16>(value));
#else
UNREACHABLE();
#endif // V8_TARGET_ARCH_X64
}
void tsan_seq_cst_store_32_bits(Address addr, int64_t value) {
#if V8_TARGET_ARCH_X64
base::SeqCst_Store(reinterpret_cast<base::Atomic32*>(addr),
static_cast<base::Atomic32>(value));
#else
UNREACHABLE();
#endif // V8_TARGET_ARCH_X64
}
void tsan_seq_cst_store_64_bits(Address addr, int64_t value) {
#if V8_TARGET_ARCH_X64
base::SeqCst_Store(reinterpret_cast<base::Atomic64*>(addr),
static_cast<base::Atomic64>(value));
#else
UNREACHABLE();
#endif // V8_TARGET_ARCH_X64
}
// Same as above, for relaxed loads.
base::Atomic32 tsan_relaxed_load_32_bits(Address addr, int64_t value) {
#if V8_TARGET_ARCH_X64
return base::Relaxed_Load(reinterpret_cast<base::Atomic32*>(addr));
......@@ -1247,6 +1285,14 @@ IF_TSAN(FUNCTION_REFERENCE, tsan_relaxed_store_function_32_bits,
tsan_relaxed_store_32_bits)
IF_TSAN(FUNCTION_REFERENCE, tsan_relaxed_store_function_64_bits,
tsan_relaxed_store_64_bits)
IF_TSAN(FUNCTION_REFERENCE, tsan_seq_cst_store_function_8_bits,
tsan_seq_cst_store_8_bits)
IF_TSAN(FUNCTION_REFERENCE, tsan_seq_cst_store_function_16_bits,
tsan_seq_cst_store_16_bits)
IF_TSAN(FUNCTION_REFERENCE, tsan_seq_cst_store_function_32_bits,
tsan_seq_cst_store_32_bits)
IF_TSAN(FUNCTION_REFERENCE, tsan_seq_cst_store_function_64_bits,
tsan_seq_cst_store_64_bits)
IF_TSAN(FUNCTION_REFERENCE, tsan_relaxed_load_function_32_bits,
tsan_relaxed_load_32_bits)
IF_TSAN(FUNCTION_REFERENCE, tsan_relaxed_load_function_64_bits,
......
......@@ -274,6 +274,14 @@ class StatsCounter;
"tsan_relaxed_store_function_32_bits") \
IF_TSAN(V, tsan_relaxed_store_function_64_bits, \
"tsan_relaxed_store_function_64_bits") \
IF_TSAN(V, tsan_seq_cst_store_function_8_bits, \
"tsan_seq_cst_store_function_8_bits") \
IF_TSAN(V, tsan_seq_cst_store_function_16_bits, \
"tsan_seq_cst_store_function_16_bits") \
IF_TSAN(V, tsan_seq_cst_store_function_32_bits, \
"tsan_seq_cst_store_function_32_bits") \
IF_TSAN(V, tsan_seq_cst_store_function_64_bits, \
"tsan_seq_cst_store_function_64_bits") \
IF_TSAN(V, tsan_relaxed_load_function_32_bits, \
"tsan_relaxed_load_function_32_bits") \
IF_TSAN(V, tsan_relaxed_load_function_64_bits, \
......
......@@ -111,8 +111,8 @@ namespace internal {
V(StringAt) \
V(StringAtAsString) \
V(StringSubstring) \
IF_TSAN(V, TSANRelaxedStore) \
IF_TSAN(V, TSANRelaxedLoad) \
IF_TSAN(V, TSANStore) \
IF_TSAN(V, TSANLoad) \
V(TypeConversion) \
V(TypeConversionNoContext) \
V(TypeConversion_Baseline) \
......@@ -1053,26 +1053,26 @@ class WriteBarrierDescriptor final
};
#ifdef V8_IS_TSAN
class TSANRelaxedStoreDescriptor final
: public StaticCallInterfaceDescriptor<TSANRelaxedStoreDescriptor> {
class TSANStoreDescriptor final
: public StaticCallInterfaceDescriptor<TSANStoreDescriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kAddress, kValue)
DEFINE_PARAMETER_TYPES(MachineType::Pointer(), // kAddress
MachineType::AnyTagged()) // kValue
DECLARE_DESCRIPTOR(TSANRelaxedStoreDescriptor)
DECLARE_DESCRIPTOR(TSANStoreDescriptor)
static constexpr auto registers();
static constexpr bool kRestrictAllocatableRegisters = true;
};
class TSANRelaxedLoadDescriptor final
: public StaticCallInterfaceDescriptor<TSANRelaxedLoadDescriptor> {
class TSANLoadDescriptor final
: public StaticCallInterfaceDescriptor<TSANLoadDescriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kAddress)
DEFINE_PARAMETER_TYPES(MachineType::Pointer()) // kAddress
DECLARE_DESCRIPTOR(TSANRelaxedLoadDescriptor)
DECLARE_DESCRIPTOR(TSANLoadDescriptor)
static constexpr auto registers();
static constexpr bool kRestrictAllocatableRegisters = true;
......
......@@ -43,12 +43,12 @@ constexpr auto WriteBarrierDescriptor::registers() {
#ifdef V8_IS_TSAN
// static
constexpr auto TSANRelaxedStoreDescriptor::registers() {
constexpr auto TSANStoreDescriptor::registers() {
return RegisterArray(arg_reg_1, arg_reg_2, kReturnRegister0);
}
// static
constexpr auto TSANRelaxedLoadDescriptor::registers() {
constexpr auto TSANLoadDescriptor::registers() {
return RegisterArray(arg_reg_1, kReturnRegister0);
}
#endif // V8_IS_TSAN
......
......@@ -494,26 +494,27 @@ void TurboAssembler::CallRecordWriteStub(
}
#ifdef V8_IS_TSAN
void TurboAssembler::CallTSANRelaxedStoreStub(Register address, Register value,
SaveFPRegsMode fp_mode, int size,
StubCallMode mode) {
void TurboAssembler::CallTSANStoreStub(Register address, Register value,
SaveFPRegsMode fp_mode, int size,
StubCallMode mode,
std::memory_order order) {
ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(address, value));
TSANRelaxedStoreDescriptor descriptor;
TSANStoreDescriptor descriptor;
RegList registers = descriptor.allocatable_registers();
MaybeSaveRegisters(registers);
Register address_parameter(
descriptor.GetRegisterParameter(TSANRelaxedStoreDescriptor::kAddress));
descriptor.GetRegisterParameter(TSANStoreDescriptor::kAddress));
Register value_parameter(
descriptor.GetRegisterParameter(TSANRelaxedStoreDescriptor::kValue));
descriptor.GetRegisterParameter(TSANStoreDescriptor::kValue));
// Prepare argument registers for calling GetTSANRelaxedStoreStub.
// Prepare argument registers for calling GetTSANStoreStub.
MovePair(address_parameter, address, value_parameter, value);
if (isolate()) {
Builtin builtin = CodeFactory::GetTSANRelaxedStoreStub(fp_mode, size);
Builtin builtin = CodeFactory::GetTSANStoreStub(fp_mode, size, order);
Handle<Code> code_target = isolate()->builtins()->code_handle(builtin);
Call(code_target, RelocInfo::CODE_TARGET);
}
......@@ -531,7 +532,7 @@ void TurboAssembler::CallTSANRelaxedStoreStub(Register address, Register value,
else {
DCHECK_EQ(mode, StubCallMode::kCallWasmRuntimeStub);
// Use {near_call} for direct Wasm call within a module.
auto wasm_target = wasm::WasmCode::GetTSANRelaxedStoreStub(fp_mode, size);
auto wasm_target = wasm::WasmCode::GetTSANStoreStub(fp_mode, size, order);
near_call(wasm_target, RelocInfo::WASM_STUB_CALL);
}
#endif // V8_ENABLE_WEBASSEMBLY
......@@ -542,13 +543,13 @@ void TurboAssembler::CallTSANRelaxedStoreStub(Register address, Register value,
void TurboAssembler::CallTSANRelaxedLoadStub(Register address,
SaveFPRegsMode fp_mode, int size,
StubCallMode mode) {
TSANRelaxedLoadDescriptor descriptor;
TSANLoadDescriptor descriptor;
RegList registers = descriptor.allocatable_registers();
MaybeSaveRegisters(registers);
Register address_parameter(
descriptor.GetRegisterParameter(TSANRelaxedLoadDescriptor::kAddress));
descriptor.GetRegisterParameter(TSANLoadDescriptor::kAddress));
// Prepare argument registers for calling TSANRelaxedLoad.
Move(address_parameter, address);
......
......@@ -526,9 +526,9 @@ class V8_EXPORT_PRIVATE TurboAssembler
StubCallMode mode = StubCallMode::kCallBuiltinPointer);
#ifdef V8_IS_TSAN
void CallTSANRelaxedStoreStub(Register address, Register value,
SaveFPRegsMode fp_mode, int size,
StubCallMode mode);
void CallTSANStoreStub(Register address, Register value,
SaveFPRegsMode fp_mode, int size, StubCallMode mode,
std::memory_order order);
void CallTSANRelaxedLoadStub(Register address, SaveFPRegsMode fp_mode,
int size, StubCallMode mode);
#endif // V8_IS_TSAN
......
......@@ -392,6 +392,7 @@ namespace compiler {
V(X64Word64AtomicAndUint64) \
V(X64Word64AtomicOrUint64) \
V(X64Word64AtomicXorUint64) \
V(X64Word64AtomicStoreWord64) \
V(X64Word64AtomicExchangeUint64) \
V(X64Word64AtomicCompareExchangeUint64)
......
......@@ -418,6 +418,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64LFence:
return kHasSideEffect;
case kX64Word64AtomicStoreWord64:
case kX64Word64AtomicAddUint64:
case kX64Word64AtomicSubUint64:
case kX64Word64AtomicAndUint64:
......
......@@ -344,22 +344,22 @@ ArchOpcode GetStoreOpcode(StoreRepresentation store_rep) {
ArchOpcode GetSeqCstStoreOpcode(StoreRepresentation store_rep) {
switch (store_rep.representation()) {
case MachineRepresentation::kWord8:
return kAtomicExchangeUint8;
return kAtomicStoreWord8;
case MachineRepresentation::kWord16:
return kAtomicExchangeUint16;
return kAtomicStoreWord16;
case MachineRepresentation::kWord32:
return kAtomicExchangeWord32;
return kAtomicStoreWord32;
case MachineRepresentation::kWord64:
return kX64Word64AtomicExchangeUint64;
return kX64Word64AtomicStoreWord64;
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged:
if (COMPRESS_POINTERS_BOOL) return kAtomicExchangeWord32;
return kX64Word64AtomicExchangeUint64;
if (COMPRESS_POINTERS_BOOL) return kAtomicStoreWord32;
return kX64Word64AtomicStoreWord64;
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed:
CHECK(COMPRESS_POINTERS_BOOL);
return kAtomicExchangeWord32;
return kAtomicStoreWord32;
default:
UNREACHABLE();
}
......@@ -529,8 +529,7 @@ void VisitAtomicExchange(InstructionSelector* selector, Node* node,
void VisitStoreCommon(InstructionSelector* selector, Node* node,
StoreRepresentation store_rep,
base::Optional<AtomicMemoryOrder> atomic_order,
base::Optional<AtomicWidth> atomic_width) {
base::Optional<AtomicMemoryOrder> atomic_order) {
X64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
......@@ -562,18 +561,7 @@ void VisitStoreCommon(InstructionSelector* selector, Node* node,
code |= MiscField::encode(static_cast<int>(record_write_mode));
selector->Emit(code, 0, nullptr, arraysize(inputs), inputs,
arraysize(temps), temps);
} else if (is_seqcst) {
VisitAtomicExchange(selector, node, GetSeqCstStoreOpcode(store_rep),
*atomic_width);
} else {
// Release and non-atomic stores emit MOV.
// https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
if ((ElementSizeLog2Of(store_rep.representation()) <
kSystemPointerSizeLog2) &&
value->opcode() == IrOpcode::kTruncateInt64ToInt32) {
value = value->InputAt(0);
}
#ifdef V8_IS_TSAN
// On TSAN builds we require two scratch registers. Because of this we also
// have to modify the inputs to take into account possible aliasing and use
......@@ -587,15 +575,40 @@ void VisitStoreCommon(InstructionSelector* selector, Node* node,
auto reg_kind = OperandGenerator::RegisterUseKind::kUseRegister;
#endif // V8_IS_TSAN
// Release and non-atomic stores emit MOV and sequentially consistent stores
// emit XCHG.
// https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
ArchOpcode opcode;
AddressingMode addressing_mode;
InstructionOperand inputs[4];
size_t input_count = 0;
AddressingMode addressing_mode = g.GetEffectiveAddressMemoryOperand(
node, inputs, &input_count, reg_kind);
InstructionOperand value_operand = g.CanBeImmediate(value)
? g.UseImmediate(value)
: g.UseRegister(value, reg_kind);
inputs[input_count++] = value_operand;
ArchOpcode opcode = GetStoreOpcode(store_rep);
if (is_seqcst) {
// SeqCst stores emit XCHG instead of MOV, so encode the inputs as we
// would for XCHG. XCHG can't encode the value as an immediate and has
// fewer addressing modes available.
inputs[input_count++] = g.UseUniqueRegister(value);
inputs[input_count++] = g.UseUniqueRegister(base);
inputs[input_count++] =
g.GetEffectiveIndexOperand(index, &addressing_mode);
opcode = GetSeqCstStoreOpcode(store_rep);
} else {
if ((ElementSizeLog2Of(store_rep.representation()) <
kSystemPointerSizeLog2) &&
value->opcode() == IrOpcode::kTruncateInt64ToInt32) {
value = value->InputAt(0);
}
addressing_mode = g.GetEffectiveAddressMemoryOperand(
node, inputs, &input_count, reg_kind);
InstructionOperand value_operand = g.CanBeImmediate(value)
? g.UseImmediate(value)
: g.UseRegister(value, reg_kind);
inputs[input_count++] = value_operand;
opcode = GetStoreOpcode(store_rep);
}
InstructionCode code =
opcode | AddressingModeField::encode(addressing_mode);
selector->Emit(code, 0, static_cast<InstructionOperand*>(nullptr),
......@@ -607,7 +620,7 @@ void VisitStoreCommon(InstructionSelector* selector, Node* node,
void InstructionSelector::VisitStore(Node* node) {
return VisitStoreCommon(this, node, StoreRepresentationOf(node->op()),
base::nullopt, base::nullopt);
base::nullopt);
}
void InstructionSelector::VisitProtectedStore(Node* node) {
......@@ -2779,16 +2792,14 @@ void InstructionSelector::VisitWord32AtomicStore(Node* node) {
DCHECK_NE(params.representation(), MachineRepresentation::kWord64);
DCHECK_IMPLIES(CanBeTaggedOrCompressedPointer(params.representation()),
kTaggedSize == 4);
VisitStoreCommon(this, node, params.store_representation(), params.order(),
AtomicWidth::kWord32);
VisitStoreCommon(this, node, params.store_representation(), params.order());
}
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
AtomicStoreParameters params = AtomicStoreParametersOf(node->op());
DCHECK_IMPLIES(CanBeTaggedOrCompressedPointer(params.representation()),
kTaggedSize == 8);
VisitStoreCommon(this, node, params.store_representation(), params.order(),
AtomicWidth::kWord64);
VisitStoreCommon(this, node, params.store_representation(), params.order());
}
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
......
......@@ -1059,6 +1059,14 @@ static bool TransitivelyCalledBuiltinHasNoSideEffect(Builtin caller,
case Builtin::kTSANRelaxedStore32SaveFP:
case Builtin::kTSANRelaxedStore64IgnoreFP:
case Builtin::kTSANRelaxedStore64SaveFP:
case Builtin::kTSANSeqCstStore8IgnoreFP:
case Builtin::kTSANSeqCstStore8SaveFP:
case Builtin::kTSANSeqCstStore16IgnoreFP:
case Builtin::kTSANSeqCstStore16SaveFP:
case Builtin::kTSANSeqCstStore32IgnoreFP:
case Builtin::kTSANSeqCstStore32SaveFP:
case Builtin::kTSANSeqCstStore64IgnoreFP:
case Builtin::kTSANSeqCstStore64SaveFP:
case Builtin::kTSANRelaxedLoad32IgnoreFP:
case Builtin::kTSANRelaxedLoad32SaveFP:
case Builtin::kTSANRelaxedLoad64IgnoreFP:
......
......@@ -102,6 +102,14 @@ struct WasmModule;
IF_TSAN(V, TSANRelaxedStore32SaveFP) \
IF_TSAN(V, TSANRelaxedStore64IgnoreFP) \
IF_TSAN(V, TSANRelaxedStore64SaveFP) \
IF_TSAN(V, TSANSeqCstStore8IgnoreFP) \
IF_TSAN(V, TSANSeqCstStore8SaveFP) \
IF_TSAN(V, TSANSeqCstStore16IgnoreFP) \
IF_TSAN(V, TSANSeqCstStore16SaveFP) \
IF_TSAN(V, TSANSeqCstStore32IgnoreFP) \
IF_TSAN(V, TSANSeqCstStore32SaveFP) \
IF_TSAN(V, TSANSeqCstStore64IgnoreFP) \
IF_TSAN(V, TSANSeqCstStore64SaveFP) \
IF_TSAN(V, TSANRelaxedLoad32IgnoreFP) \
IF_TSAN(V, TSANRelaxedLoad32SaveFP) \
IF_TSAN(V, TSANRelaxedLoad64IgnoreFP) \
......@@ -188,25 +196,47 @@ class V8_EXPORT_PRIVATE WasmCode final {
}
#ifdef V8_IS_TSAN
static RuntimeStubId GetTSANRelaxedStoreStub(SaveFPRegsMode fp_mode,
int size) {
if (size == kInt8Size) {
return fp_mode == SaveFPRegsMode::kIgnore
? RuntimeStubId::kTSANRelaxedStore8IgnoreFP
: RuntimeStubId::kTSANRelaxedStore8SaveFP;
} else if (size == kInt16Size) {
return fp_mode == SaveFPRegsMode::kIgnore
? RuntimeStubId::kTSANRelaxedStore16IgnoreFP
: RuntimeStubId::kTSANRelaxedStore16SaveFP;
} else if (size == kInt32Size) {
return fp_mode == SaveFPRegsMode::kIgnore
? RuntimeStubId::kTSANRelaxedStore32IgnoreFP
: RuntimeStubId::kTSANRelaxedStore32SaveFP;
static RuntimeStubId GetTSANStoreStub(SaveFPRegsMode fp_mode, int size,
std::memory_order order) {
if (order == std::memory_order_relaxed) {
if (size == kInt8Size) {
return fp_mode == SaveFPRegsMode::kIgnore
? RuntimeStubId::kTSANRelaxedStore8IgnoreFP
: RuntimeStubId::kTSANRelaxedStore8SaveFP;
} else if (size == kInt16Size) {
return fp_mode == SaveFPRegsMode::kIgnore
? RuntimeStubId::kTSANRelaxedStore16IgnoreFP
: RuntimeStubId::kTSANRelaxedStore16SaveFP;
} else if (size == kInt32Size) {
return fp_mode == SaveFPRegsMode::kIgnore
? RuntimeStubId::kTSANRelaxedStore32IgnoreFP
: RuntimeStubId::kTSANRelaxedStore32SaveFP;
} else {
CHECK_EQ(size, kInt64Size);
return fp_mode == SaveFPRegsMode::kIgnore
? RuntimeStubId::kTSANRelaxedStore64IgnoreFP
: RuntimeStubId::kTSANRelaxedStore64SaveFP;
}
} else {
CHECK_EQ(size, kInt64Size);
return fp_mode == SaveFPRegsMode::kIgnore
? RuntimeStubId::kTSANRelaxedStore64IgnoreFP
: RuntimeStubId::kTSANRelaxedStore64SaveFP;
DCHECK_EQ(order, std::memory_order_seq_cst);
if (size == kInt8Size) {
return fp_mode == SaveFPRegsMode::kIgnore
? RuntimeStubId::kTSANSeqCstStore8IgnoreFP
: RuntimeStubId::kTSANSeqCstStore8SaveFP;
} else if (size == kInt16Size) {
return fp_mode == SaveFPRegsMode::kIgnore
? RuntimeStubId::kTSANSeqCstStore16IgnoreFP
: RuntimeStubId::kTSANSeqCstStore16SaveFP;
} else if (size == kInt32Size) {
return fp_mode == SaveFPRegsMode::kIgnore
? RuntimeStubId::kTSANSeqCstStore32IgnoreFP
: RuntimeStubId::kTSANSeqCstStore32SaveFP;
} else {
CHECK_EQ(size, kInt64Size);
return fp_mode == SaveFPRegsMode::kIgnore
? RuntimeStubId::kTSANSeqCstStore64IgnoreFP
: RuntimeStubId::kTSANSeqCstStore64SaveFP;
}
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment