Commit 01b06e99 authored by Shu-yu Guo's avatar Shu-yu Guo Committed by V8 LUCI CQ

[compiler] Add TSAN seq cst store support for generated code

This CL does the following for x64:

- Add seq cst TSAN helpers.

- Refactors codegen's handling of TSAN helpers to also support
  seq cst accesses.

- Perform stores only once instead twice under TSAN, since
  duplicating stores is unsound. Previously this was "fine"
  because all duplicated stores were relaxed. SeqCst stores
  are used for synchronization, however, and duplicating them
  breaks the synchronization.

Bug: v8:7790, v8:11600, v8:11995
Cq-Include-Trybots: luci.v8.try:v8_linux64_tsan_rel_ng
Change-Id: I43071b0ed516cb0917a10f3b2b9861d74edca041
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3103308
Commit-Queue: Shu-yu Guo <syg@chromium.org>
Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Reviewed-by: 's avatarClemens Backes <clemensb@chromium.org>
Reviewed-by: 's avatarAdam Klein <adamk@chromium.org>
Reviewed-by: 's avatarNico Hartmann <nicohartmann@chromium.org>
Cr-Commit-Position: refs/heads/main@{#76612}
parent 3926d6cd
......@@ -191,11 +191,31 @@ inline void Release_Store(volatile Atomic8* ptr, Atomic8 value) {
std::memory_order_release);
}
inline void Release_Store(volatile Atomic16* ptr, Atomic16 value) {
std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
std::memory_order_release);
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
std::memory_order_release);
}
inline void SeqCst_Store(volatile Atomic8* ptr, Atomic8 value) {
std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
std::memory_order_seq_cst);
}
inline void SeqCst_Store(volatile Atomic16* ptr, Atomic16 value) {
std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
std::memory_order_seq_cst);
}
inline void SeqCst_Store(volatile Atomic32* ptr, Atomic32 value) {
std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
std::memory_order_seq_cst);
}
inline Atomic8 Relaxed_Load(volatile const Atomic8* ptr) {
return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
std::memory_order_relaxed);
......@@ -279,6 +299,11 @@ inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
std::memory_order_release);
}
inline void SeqCst_Store(volatile Atomic64* ptr, Atomic64 value) {
std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
std::memory_order_seq_cst);
}
inline Atomic64 Relaxed_Load(volatile const Atomic64* ptr) {
return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
std::memory_order_relaxed);
......
......@@ -41,20 +41,28 @@ namespace internal {
TFC(EphemeronKeyBarrierIgnoreFP, WriteBarrier) \
\
/* TSAN support for stores in generated code.*/ \
IF_TSAN(TFC, TSANRelaxedStore8IgnoreFP, TSANRelaxedStore) \
IF_TSAN(TFC, TSANRelaxedStore8SaveFP, TSANRelaxedStore) \
IF_TSAN(TFC, TSANRelaxedStore16IgnoreFP, TSANRelaxedStore) \
IF_TSAN(TFC, TSANRelaxedStore16SaveFP, TSANRelaxedStore) \
IF_TSAN(TFC, TSANRelaxedStore32IgnoreFP, TSANRelaxedStore) \
IF_TSAN(TFC, TSANRelaxedStore32SaveFP, TSANRelaxedStore) \
IF_TSAN(TFC, TSANRelaxedStore64IgnoreFP, TSANRelaxedStore) \
IF_TSAN(TFC, TSANRelaxedStore64SaveFP, TSANRelaxedStore) \
IF_TSAN(TFC, TSANRelaxedStore8IgnoreFP, TSANStore) \
IF_TSAN(TFC, TSANRelaxedStore8SaveFP, TSANStore) \
IF_TSAN(TFC, TSANRelaxedStore16IgnoreFP, TSANStore) \
IF_TSAN(TFC, TSANRelaxedStore16SaveFP, TSANStore) \
IF_TSAN(TFC, TSANRelaxedStore32IgnoreFP, TSANStore) \
IF_TSAN(TFC, TSANRelaxedStore32SaveFP, TSANStore) \
IF_TSAN(TFC, TSANRelaxedStore64IgnoreFP, TSANStore) \
IF_TSAN(TFC, TSANRelaxedStore64SaveFP, TSANStore) \
IF_TSAN(TFC, TSANSeqCstStore8IgnoreFP, TSANStore) \
IF_TSAN(TFC, TSANSeqCstStore8SaveFP, TSANStore) \
IF_TSAN(TFC, TSANSeqCstStore16IgnoreFP, TSANStore) \
IF_TSAN(TFC, TSANSeqCstStore16SaveFP, TSANStore) \
IF_TSAN(TFC, TSANSeqCstStore32IgnoreFP, TSANStore) \
IF_TSAN(TFC, TSANSeqCstStore32SaveFP, TSANStore) \
IF_TSAN(TFC, TSANSeqCstStore64IgnoreFP, TSANStore) \
IF_TSAN(TFC, TSANSeqCstStore64SaveFP, TSANStore) \
\
/* TSAN support for loads in generated code.*/ \
IF_TSAN(TFC, TSANRelaxedLoad32IgnoreFP, TSANRelaxedLoad) \
IF_TSAN(TFC, TSANRelaxedLoad32SaveFP, TSANRelaxedLoad) \
IF_TSAN(TFC, TSANRelaxedLoad64IgnoreFP, TSANRelaxedLoad) \
IF_TSAN(TFC, TSANRelaxedLoad64SaveFP, TSANRelaxedLoad) \
IF_TSAN(TFC, TSANRelaxedLoad32IgnoreFP, TSANLoad) \
IF_TSAN(TFC, TSANRelaxedLoad32SaveFP, TSANLoad) \
IF_TSAN(TFC, TSANRelaxedLoad64IgnoreFP, TSANLoad) \
IF_TSAN(TFC, TSANRelaxedLoad64SaveFP, TSANLoad) \
\
/* Adaptor for CPP builtin */ \
TFC(AdaptorWithBuiltinExitFrame, CppBuiltinAdaptor) \
......
......@@ -439,10 +439,9 @@ class TSANRelaxedStoreCodeStubAssembler : public CodeStubAssembler {
void GenerateTSANRelaxedStore(SaveFPRegsMode fp_mode, int size) {
TNode<ExternalReference> function = GetExternalReference(size);
auto address =
UncheckedParameter<IntPtrT>(TSANRelaxedStoreDescriptor::kAddress);
auto address = UncheckedParameter<IntPtrT>(TSANStoreDescriptor::kAddress);
TNode<IntPtrT> value = BitcastTaggedToWord(
UncheckedParameter<Object>(TSANRelaxedStoreDescriptor::kValue));
UncheckedParameter<Object>(TSANStoreDescriptor::kValue));
CallCFunctionWithCallerSavedRegisters(
function, MachineType::Int32(), fp_mode,
std::make_pair(MachineType::IntPtr(), address),
......@@ -483,6 +482,73 @@ TF_BUILTIN(TSANRelaxedStore64SaveFP, TSANRelaxedStoreCodeStubAssembler) {
GenerateTSANRelaxedStore(SaveFPRegsMode::kSave, kInt64Size);
}
class TSANSeqCstStoreCodeStubAssembler : public CodeStubAssembler {
public:
explicit TSANSeqCstStoreCodeStubAssembler(compiler::CodeAssemblerState* state)
: CodeStubAssembler(state) {}
TNode<ExternalReference> GetExternalReference(int size) {
if (size == kInt8Size) {
return ExternalConstant(
ExternalReference::tsan_seq_cst_store_function_8_bits());
} else if (size == kInt16Size) {
return ExternalConstant(
ExternalReference::tsan_seq_cst_store_function_16_bits());
} else if (size == kInt32Size) {
return ExternalConstant(
ExternalReference::tsan_seq_cst_store_function_32_bits());
} else {
CHECK_EQ(size, kInt64Size);
return ExternalConstant(
ExternalReference::tsan_seq_cst_store_function_64_bits());
}
}
void GenerateTSANSeqCstStore(SaveFPRegsMode fp_mode, int size) {
TNode<ExternalReference> function = GetExternalReference(size);
auto address = UncheckedParameter<IntPtrT>(TSANStoreDescriptor::kAddress);
TNode<IntPtrT> value = BitcastTaggedToWord(
UncheckedParameter<Object>(TSANStoreDescriptor::kValue));
CallCFunctionWithCallerSavedRegisters(
function, MachineType::Int32(), fp_mode,
std::make_pair(MachineType::IntPtr(), address),
std::make_pair(MachineType::IntPtr(), value));
Return(UndefinedConstant());
}
};
TF_BUILTIN(TSANSeqCstStore8IgnoreFP, TSANSeqCstStoreCodeStubAssembler) {
GenerateTSANSeqCstStore(SaveFPRegsMode::kIgnore, kInt8Size);
}
TF_BUILTIN(TSANSeqCstStore8SaveFP, TSANSeqCstStoreCodeStubAssembler) {
GenerateTSANSeqCstStore(SaveFPRegsMode::kSave, kInt8Size);
}
TF_BUILTIN(TSANSeqCstStore16IgnoreFP, TSANSeqCstStoreCodeStubAssembler) {
GenerateTSANSeqCstStore(SaveFPRegsMode::kIgnore, kInt16Size);
}
TF_BUILTIN(TSANSeqCstStore16SaveFP, TSANSeqCstStoreCodeStubAssembler) {
GenerateTSANSeqCstStore(SaveFPRegsMode::kSave, kInt16Size);
}
TF_BUILTIN(TSANSeqCstStore32IgnoreFP, TSANSeqCstStoreCodeStubAssembler) {
GenerateTSANSeqCstStore(SaveFPRegsMode::kIgnore, kInt32Size);
}
TF_BUILTIN(TSANSeqCstStore32SaveFP, TSANSeqCstStoreCodeStubAssembler) {
GenerateTSANSeqCstStore(SaveFPRegsMode::kSave, kInt32Size);
}
TF_BUILTIN(TSANSeqCstStore64IgnoreFP, TSANSeqCstStoreCodeStubAssembler) {
GenerateTSANSeqCstStore(SaveFPRegsMode::kIgnore, kInt64Size);
}
TF_BUILTIN(TSANSeqCstStore64SaveFP, TSANSeqCstStoreCodeStubAssembler) {
GenerateTSANSeqCstStore(SaveFPRegsMode::kSave, kInt64Size);
}
class TSANRelaxedLoadCodeStubAssembler : public CodeStubAssembler {
public:
explicit TSANRelaxedLoadCodeStubAssembler(compiler::CodeAssemblerState* state)
......@@ -501,8 +567,7 @@ class TSANRelaxedLoadCodeStubAssembler : public CodeStubAssembler {
void GenerateTSANRelaxedLoad(SaveFPRegsMode fp_mode, int size) {
TNode<ExternalReference> function = GetExternalReference(size);
auto address =
UncheckedParameter<IntPtrT>(TSANRelaxedLoadDescriptor::kAddress);
auto address = UncheckedParameter<IntPtrT>(TSANLoadDescriptor::kAddress);
CallCFunctionWithCallerSavedRegisters(
function, MachineType::Int32(), fp_mode,
std::make_pair(MachineType::IntPtr(), address));
......
......@@ -378,7 +378,9 @@ Callable CodeFactory::ArraySingleArgumentConstructor(
#ifdef V8_IS_TSAN
// static
Builtin CodeFactory::GetTSANRelaxedStoreStub(SaveFPRegsMode fp_mode, int size) {
Builtin CodeFactory::GetTSANStoreStub(SaveFPRegsMode fp_mode, int size,
std::memory_order order) {
if (order == std::memory_order_relaxed) {
if (size == kInt8Size) {
return fp_mode == SaveFPRegsMode::kIgnore
? Builtin::kTSANRelaxedStore8IgnoreFP
......@@ -397,6 +399,27 @@ Builtin CodeFactory::GetTSANRelaxedStoreStub(SaveFPRegsMode fp_mode, int size) {
? Builtin::kTSANRelaxedStore64IgnoreFP
: Builtin::kTSANRelaxedStore64SaveFP;
}
} else {
DCHECK_EQ(order, std::memory_order_seq_cst);
if (size == kInt8Size) {
return fp_mode == SaveFPRegsMode::kIgnore
? Builtin::kTSANSeqCstStore8IgnoreFP
: Builtin::kTSANSeqCstStore8SaveFP;
} else if (size == kInt16Size) {
return fp_mode == SaveFPRegsMode::kIgnore
? Builtin::kTSANSeqCstStore16IgnoreFP
: Builtin::kTSANSeqCstStore16SaveFP;
} else if (size == kInt32Size) {
return fp_mode == SaveFPRegsMode::kIgnore
? Builtin::kTSANSeqCstStore32IgnoreFP
: Builtin::kTSANSeqCstStore32SaveFP;
} else {
CHECK_EQ(size, kInt64Size);
return fp_mode == SaveFPRegsMode::kIgnore
? Builtin::kTSANSeqCstStore64IgnoreFP
: Builtin::kTSANSeqCstStore64SaveFP;
}
}
}
// static
......
......@@ -90,7 +90,8 @@ class V8_EXPORT_PRIVATE CodeFactory final {
AllocationSiteOverrideMode override_mode);
#ifdef V8_IS_TSAN
static Builtin GetTSANRelaxedStoreStub(SaveFPRegsMode fp_mode, int size);
static Builtin GetTSANStoreStub(SaveFPRegsMode fp_mode, int size,
std::memory_order order);
static Builtin GetTSANRelaxedLoadStub(SaveFPRegsMode fp_mode, int size);
#endif // V8_IS_TSAN
};
......
......@@ -1182,7 +1182,7 @@ namespace {
// address, with the same value. This is done in order for TSAN to see these
// stores from generated code.
// Note that {value} is an int64_t irrespective of the store size. This is on
// purpose to keep the function signatures the same accross stores. The
// purpose to keep the function signatures the same across stores. The
// static_cast inside the method will ignore the bits which will not be stored.
void tsan_relaxed_store_8_bits(Address addr, int64_t value) {
#if V8_TARGET_ARCH_X64
......@@ -1220,6 +1220,44 @@ void tsan_relaxed_store_64_bits(Address addr, int64_t value) {
#endif // V8_TARGET_ARCH_X64
}
// Same as above, for sequentially consistent stores.
void tsan_seq_cst_store_8_bits(Address addr, int64_t value) {
#if V8_TARGET_ARCH_X64
base::SeqCst_Store(reinterpret_cast<base::Atomic8*>(addr),
static_cast<base::Atomic8>(value));
#else
UNREACHABLE();
#endif // V8_TARGET_ARCH_X64
}
void tsan_seq_cst_store_16_bits(Address addr, int64_t value) {
#if V8_TARGET_ARCH_X64
base::SeqCst_Store(reinterpret_cast<base::Atomic16*>(addr),
static_cast<base::Atomic16>(value));
#else
UNREACHABLE();
#endif // V8_TARGET_ARCH_X64
}
void tsan_seq_cst_store_32_bits(Address addr, int64_t value) {
#if V8_TARGET_ARCH_X64
base::SeqCst_Store(reinterpret_cast<base::Atomic32*>(addr),
static_cast<base::Atomic32>(value));
#else
UNREACHABLE();
#endif // V8_TARGET_ARCH_X64
}
void tsan_seq_cst_store_64_bits(Address addr, int64_t value) {
#if V8_TARGET_ARCH_X64
base::SeqCst_Store(reinterpret_cast<base::Atomic64*>(addr),
static_cast<base::Atomic64>(value));
#else
UNREACHABLE();
#endif // V8_TARGET_ARCH_X64
}
// Same as above, for relaxed loads.
base::Atomic32 tsan_relaxed_load_32_bits(Address addr, int64_t value) {
#if V8_TARGET_ARCH_X64
return base::Relaxed_Load(reinterpret_cast<base::Atomic32*>(addr));
......@@ -1247,6 +1285,14 @@ IF_TSAN(FUNCTION_REFERENCE, tsan_relaxed_store_function_32_bits,
tsan_relaxed_store_32_bits)
IF_TSAN(FUNCTION_REFERENCE, tsan_relaxed_store_function_64_bits,
tsan_relaxed_store_64_bits)
IF_TSAN(FUNCTION_REFERENCE, tsan_seq_cst_store_function_8_bits,
tsan_seq_cst_store_8_bits)
IF_TSAN(FUNCTION_REFERENCE, tsan_seq_cst_store_function_16_bits,
tsan_seq_cst_store_16_bits)
IF_TSAN(FUNCTION_REFERENCE, tsan_seq_cst_store_function_32_bits,
tsan_seq_cst_store_32_bits)
IF_TSAN(FUNCTION_REFERENCE, tsan_seq_cst_store_function_64_bits,
tsan_seq_cst_store_64_bits)
IF_TSAN(FUNCTION_REFERENCE, tsan_relaxed_load_function_32_bits,
tsan_relaxed_load_32_bits)
IF_TSAN(FUNCTION_REFERENCE, tsan_relaxed_load_function_64_bits,
......
......@@ -274,6 +274,14 @@ class StatsCounter;
"tsan_relaxed_store_function_32_bits") \
IF_TSAN(V, tsan_relaxed_store_function_64_bits, \
"tsan_relaxed_store_function_64_bits") \
IF_TSAN(V, tsan_seq_cst_store_function_8_bits, \
"tsan_seq_cst_store_function_8_bits") \
IF_TSAN(V, tsan_seq_cst_store_function_16_bits, \
"tsan_seq_cst_store_function_16_bits") \
IF_TSAN(V, tsan_seq_cst_store_function_32_bits, \
"tsan_seq_cst_store_function_32_bits") \
IF_TSAN(V, tsan_seq_cst_store_function_64_bits, \
"tsan_seq_cst_store_function_64_bits") \
IF_TSAN(V, tsan_relaxed_load_function_32_bits, \
"tsan_relaxed_load_function_32_bits") \
IF_TSAN(V, tsan_relaxed_load_function_64_bits, \
......
......@@ -111,8 +111,8 @@ namespace internal {
V(StringAt) \
V(StringAtAsString) \
V(StringSubstring) \
IF_TSAN(V, TSANRelaxedStore) \
IF_TSAN(V, TSANRelaxedLoad) \
IF_TSAN(V, TSANStore) \
IF_TSAN(V, TSANLoad) \
V(TypeConversion) \
V(TypeConversionNoContext) \
V(TypeConversion_Baseline) \
......@@ -1053,26 +1053,26 @@ class WriteBarrierDescriptor final
};
#ifdef V8_IS_TSAN
class TSANRelaxedStoreDescriptor final
: public StaticCallInterfaceDescriptor<TSANRelaxedStoreDescriptor> {
class TSANStoreDescriptor final
: public StaticCallInterfaceDescriptor<TSANStoreDescriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kAddress, kValue)
DEFINE_PARAMETER_TYPES(MachineType::Pointer(), // kAddress
MachineType::AnyTagged()) // kValue
DECLARE_DESCRIPTOR(TSANRelaxedStoreDescriptor)
DECLARE_DESCRIPTOR(TSANStoreDescriptor)
static constexpr auto registers();
static constexpr bool kRestrictAllocatableRegisters = true;
};
class TSANRelaxedLoadDescriptor final
: public StaticCallInterfaceDescriptor<TSANRelaxedLoadDescriptor> {
class TSANLoadDescriptor final
: public StaticCallInterfaceDescriptor<TSANLoadDescriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kAddress)
DEFINE_PARAMETER_TYPES(MachineType::Pointer()) // kAddress
DECLARE_DESCRIPTOR(TSANRelaxedLoadDescriptor)
DECLARE_DESCRIPTOR(TSANLoadDescriptor)
static constexpr auto registers();
static constexpr bool kRestrictAllocatableRegisters = true;
......
......@@ -43,12 +43,12 @@ constexpr auto WriteBarrierDescriptor::registers() {
#ifdef V8_IS_TSAN
// static
constexpr auto TSANRelaxedStoreDescriptor::registers() {
constexpr auto TSANStoreDescriptor::registers() {
return RegisterArray(arg_reg_1, arg_reg_2, kReturnRegister0);
}
// static
constexpr auto TSANRelaxedLoadDescriptor::registers() {
constexpr auto TSANLoadDescriptor::registers() {
return RegisterArray(arg_reg_1, kReturnRegister0);
}
#endif // V8_IS_TSAN
......
......@@ -494,26 +494,27 @@ void TurboAssembler::CallRecordWriteStub(
}
#ifdef V8_IS_TSAN
void TurboAssembler::CallTSANRelaxedStoreStub(Register address, Register value,
void TurboAssembler::CallTSANStoreStub(Register address, Register value,
SaveFPRegsMode fp_mode, int size,
StubCallMode mode) {
StubCallMode mode,
std::memory_order order) {
ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(address, value));
TSANRelaxedStoreDescriptor descriptor;
TSANStoreDescriptor descriptor;
RegList registers = descriptor.allocatable_registers();
MaybeSaveRegisters(registers);
Register address_parameter(
descriptor.GetRegisterParameter(TSANRelaxedStoreDescriptor::kAddress));
descriptor.GetRegisterParameter(TSANStoreDescriptor::kAddress));
Register value_parameter(
descriptor.GetRegisterParameter(TSANRelaxedStoreDescriptor::kValue));
descriptor.GetRegisterParameter(TSANStoreDescriptor::kValue));
// Prepare argument registers for calling GetTSANRelaxedStoreStub.
// Prepare argument registers for calling GetTSANStoreStub.
MovePair(address_parameter, address, value_parameter, value);
if (isolate()) {
Builtin builtin = CodeFactory::GetTSANRelaxedStoreStub(fp_mode, size);
Builtin builtin = CodeFactory::GetTSANStoreStub(fp_mode, size, order);
Handle<Code> code_target = isolate()->builtins()->code_handle(builtin);
Call(code_target, RelocInfo::CODE_TARGET);
}
......@@ -531,7 +532,7 @@ void TurboAssembler::CallTSANRelaxedStoreStub(Register address, Register value,
else {
DCHECK_EQ(mode, StubCallMode::kCallWasmRuntimeStub);
// Use {near_call} for direct Wasm call within a module.
auto wasm_target = wasm::WasmCode::GetTSANRelaxedStoreStub(fp_mode, size);
auto wasm_target = wasm::WasmCode::GetTSANStoreStub(fp_mode, size, order);
near_call(wasm_target, RelocInfo::WASM_STUB_CALL);
}
#endif // V8_ENABLE_WEBASSEMBLY
......@@ -542,13 +543,13 @@ void TurboAssembler::CallTSANRelaxedStoreStub(Register address, Register value,
void TurboAssembler::CallTSANRelaxedLoadStub(Register address,
SaveFPRegsMode fp_mode, int size,
StubCallMode mode) {
TSANRelaxedLoadDescriptor descriptor;
TSANLoadDescriptor descriptor;
RegList registers = descriptor.allocatable_registers();
MaybeSaveRegisters(registers);
Register address_parameter(
descriptor.GetRegisterParameter(TSANRelaxedLoadDescriptor::kAddress));
descriptor.GetRegisterParameter(TSANLoadDescriptor::kAddress));
// Prepare argument registers for calling TSANRelaxedLoad.
Move(address_parameter, address);
......
......@@ -526,9 +526,9 @@ class V8_EXPORT_PRIVATE TurboAssembler
StubCallMode mode = StubCallMode::kCallBuiltinPointer);
#ifdef V8_IS_TSAN
void CallTSANRelaxedStoreStub(Register address, Register value,
SaveFPRegsMode fp_mode, int size,
StubCallMode mode);
void CallTSANStoreStub(Register address, Register value,
SaveFPRegsMode fp_mode, int size, StubCallMode mode,
std::memory_order order);
void CallTSANRelaxedLoadStub(Register address, SaveFPRegsMode fp_mode,
int size, StubCallMode mode);
#endif // V8_IS_TSAN
......
......@@ -324,11 +324,124 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Zone* zone_;
};
template <std::memory_order order>
void EmitStore(TurboAssembler* tasm, Operand operand, Register value,
MachineRepresentation rep) {
if (order == std::memory_order_relaxed) {
switch (rep) {
case MachineRepresentation::kWord8:
tasm->movb(operand, value);
break;
case MachineRepresentation::kWord16:
tasm->movw(operand, value);
break;
case MachineRepresentation::kWord32:
tasm->movl(operand, value);
break;
case MachineRepresentation::kWord64:
tasm->movq(operand, value);
break;
case MachineRepresentation::kTagged:
tasm->StoreTaggedField(operand, value);
break;
default:
UNREACHABLE();
}
return;
}
DCHECK_EQ(order, std::memory_order_seq_cst);
switch (rep) {
case MachineRepresentation::kWord8:
tasm->movq(kScratchRegister, value);
tasm->xchgb(kScratchRegister, operand);
break;
case MachineRepresentation::kWord16:
tasm->movq(kScratchRegister, value);
tasm->xchgw(kScratchRegister, operand);
break;
case MachineRepresentation::kWord32:
tasm->movq(kScratchRegister, value);
tasm->xchgl(kScratchRegister, operand);
break;
case MachineRepresentation::kWord64:
tasm->movq(kScratchRegister, value);
tasm->xchgq(kScratchRegister, operand);
break;
case MachineRepresentation::kTagged:
tasm->AtomicStoreTaggedField(operand, value);
break;
default:
UNREACHABLE();
}
}
template <std::memory_order order>
void EmitStore(TurboAssembler* tasm, Operand operand, Immediate value,
MachineRepresentation rep);
template <>
void EmitStore<std::memory_order_relaxed>(TurboAssembler* tasm, Operand operand,
Immediate value,
MachineRepresentation rep) {
switch (rep) {
case MachineRepresentation::kWord8:
tasm->movb(operand, value);
break;
case MachineRepresentation::kWord16:
tasm->movw(operand, value);
break;
case MachineRepresentation::kWord32:
tasm->movl(operand, value);
break;
case MachineRepresentation::kWord64:
tasm->movq(operand, value);
break;
case MachineRepresentation::kTagged:
tasm->StoreTaggedField(operand, value);
break;
default:
UNREACHABLE();
}
}
#ifdef V8_IS_TSAN
class OutOfLineTSANRelaxedStore final : public OutOfLineCode {
void EmitMemoryProbeForTrapHandlerIfNeeded(TurboAssembler* tasm,
Register scratch, Operand operand,
StubCallMode mode, int size) {
#if V8_ENABLE_WEBASSEMBLY && V8_TRAP_HANDLER_SUPPORTED
// The wasm OOB trap handler needs to be able to look up the faulting
// instruction pointer to handle the SIGSEGV raised by an OOB access. It
// will not handle SIGSEGVs raised by the TSAN store helpers. Emit a
// redundant load here to give the trap handler a chance to handle any
// OOB SIGSEGVs.
if (trap_handler::IsTrapHandlerEnabled() &&
mode == StubCallMode::kCallWasmRuntimeStub) {
switch (size) {
case kInt8Size:
tasm->movb(scratch, operand);
break;
case kInt16Size:
tasm->movw(scratch, operand);
break;
case kInt32Size:
tasm->movl(scratch, operand);
break;
case kInt64Size:
tasm->movq(scratch, operand);
break;
default:
UNREACHABLE();
}
}
#endif
}
class OutOfLineTSANStore : public OutOfLineCode {
public:
OutOfLineTSANRelaxedStore(CodeGenerator* gen, Operand operand, Register value,
Register scratch0, StubCallMode stub_mode, int size)
OutOfLineTSANStore(CodeGenerator* gen, Operand operand, Register value,
Register scratch0, StubCallMode stub_mode, int size,
std::memory_order order)
: OutOfLineCode(gen),
operand_(operand),
value_(value),
......@@ -337,6 +450,7 @@ class OutOfLineTSANRelaxedStore final : public OutOfLineCode {
stub_mode_(stub_mode),
#endif // V8_ENABLE_WEBASSEMBLY
size_(size),
memory_order_(order),
zone_(gen->zone()) {
DCHECK(!AreAliased(value, scratch0));
}
......@@ -352,14 +466,15 @@ class OutOfLineTSANRelaxedStore final : public OutOfLineCode {
// A direct call to a wasm runtime stub defined in this module.
// Just encode the stub index. This will be patched when the code
// is added to the native module and copied into wasm code space.
__ CallTSANRelaxedStoreStub(scratch0_, value_, save_fp_mode, size_,
StubCallMode::kCallWasmRuntimeStub);
tasm()->CallTSANStoreStub(scratch0_, value_, save_fp_mode, size_,
StubCallMode::kCallWasmRuntimeStub,
memory_order_);
return;
}
#endif // V8_ENABLE_WEBASSEMBLY
__ CallTSANRelaxedStoreStub(scratch0_, value_, save_fp_mode, size_,
StubCallMode::kCallBuiltinPointer);
tasm()->CallTSANStoreStub(scratch0_, value_, save_fp_mode, size_,
StubCallMode::kCallBuiltinPointer, memory_order_);
}
private:
......@@ -370,42 +485,66 @@ class OutOfLineTSANRelaxedStore final : public OutOfLineCode {
StubCallMode const stub_mode_;
#endif // V8_ENABLE_WEBASSEMBLY
int size_;
const std::memory_order memory_order_;
Zone* zone_;
};
void EmitTSANStoreOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
TurboAssembler* tasm, Operand operand,
Register value_reg, X64OperandConverter& i,
StubCallMode mode, int size) {
void EmitTSANStoreOOL(Zone* zone, CodeGenerator* codegen, TurboAssembler* tasm,
Operand operand, Register value_reg,
X64OperandConverter& i, StubCallMode mode, int size,
std::memory_order order) {
// The FOR_TESTING code doesn't initialize the root register. We can't call
// the TSAN builtin since we need to load the external reference through the
// root register.
// TODO(solanes, v8:7790, v8:11600): See if we can support the FOR_TESTING
// path. It is not crucial, but it would be nice to remove this if.
if (codegen->code_kind() == CodeKind::FOR_TESTING) return;
// path. It is not crucial, but it would be nice to remove this restriction.
DCHECK_NE(codegen->code_kind(), CodeKind::FOR_TESTING);
Register scratch0 = i.TempRegister(0);
auto tsan_ool = zone->New<OutOfLineTSANRelaxedStore>(
codegen, operand, value_reg, scratch0, mode, size);
auto tsan_ool = zone->New<OutOfLineTSANStore>(codegen, operand, value_reg,
scratch0, mode, size, order);
tasm->jmp(tsan_ool->entry());
tasm->bind(tsan_ool->exit());
}
void EmitTSANStoreOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
TurboAssembler* tasm, Operand operand,
Immediate value, X64OperandConverter& i,
StubCallMode mode, int size) {
template <std::memory_order order>
Register GetTSANValueRegister(TurboAssembler* tasm, Register value,
X64OperandConverter& i) {
return value;
}
template <std::memory_order order>
Register GetTSANValueRegister(TurboAssembler* tasm, Immediate value,
X64OperandConverter& i);
template <>
Register GetTSANValueRegister<std::memory_order_relaxed>(
TurboAssembler* tasm, Immediate value, X64OperandConverter& i) {
Register value_reg = i.TempRegister(1);
tasm->movq(value_reg, value);
return value_reg;
}
template <std::memory_order order, typename ValueT>
void EmitTSANAwareStore(Zone* zone, CodeGenerator* codegen,
TurboAssembler* tasm, Operand operand, ValueT value,
X64OperandConverter& i, StubCallMode stub_call_mode,
MachineRepresentation rep) {
// The FOR_TESTING code doesn't initialize the root register. We can't call
// the TSAN builtin since we need to load the external reference through the
// root register.
// TODO(solanes, v8:7790, v8:11600): See if we can support the FOR_TESTING
// path. It is not crucial, but it would be nice to remove this if.
if (codegen->code_kind() == CodeKind::FOR_TESTING) return;
Register value_reg = i.TempRegister(1);
tasm->movq(value_reg, value);
EmitTSANStoreOOLIfNeeded(zone, codegen, tasm, operand, value_reg, i, mode,
size);
// path. It is not crucial, but it would be nice to remove this restriction.
if (codegen->code_kind() != CodeKind::FOR_TESTING) {
int size = ElementSizeInBytes(rep);
EmitMemoryProbeForTrapHandlerIfNeeded(tasm, i.TempRegister(0), operand,
stub_call_mode, size);
Register value_reg = GetTSANValueRegister<order>(tasm, value, i);
EmitTSANStoreOOL(zone, codegen, tasm, operand, value_reg, i, stub_call_mode,
size, order);
} else {
EmitStore<order>(tasm, operand, value, rep);
}
}
class OutOfLineTSANRelaxedLoad final : public OutOfLineCode {
......@@ -453,7 +592,7 @@ class OutOfLineTSANRelaxedLoad final : public OutOfLineCode {
Zone* zone_;
};
void EmitTSANLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
TurboAssembler* tasm, Operand operand,
X64OperandConverter& i, StubCallMode mode,
int size) {
......@@ -472,17 +611,17 @@ void EmitTSANLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
}
#else
void EmitTSANStoreOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
TurboAssembler* tasm, Operand operand,
Register value_reg, X64OperandConverter& i,
StubCallMode mode, int size) {}
void EmitTSANStoreOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
TurboAssembler* tasm, Operand operand,
Immediate value, X64OperandConverter& i,
StubCallMode mode, int size) {}
template <std::memory_order order, typename ValueT>
void EmitTSANAwareStore(Zone* zone, CodeGenerator* codegen,
TurboAssembler* tasm, Operand operand, ValueT value,
X64OperandConverter& i, StubCallMode stub_call_mode,
MachineRepresentation rep) {
DCHECK(order == std::memory_order_relaxed ||
order == std::memory_order_seq_cst);
EmitStore<order>(tasm, operand, value, rep);
}
void EmitTSANLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
TurboAssembler* tasm, Operand operand,
X64OperandConverter& i, StubCallMode mode,
int size) {}
......@@ -881,6 +1020,15 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
} \
} while (false)
#define ASSEMBLE_SEQ_CST_STORE(rep) \
do { \
Register value = i.InputRegister(0); \
Operand operand = i.MemoryOperand(1); \
EmitTSANAwareStore<std::memory_order_seq_cst>( \
zone(), this, tasm(), operand, value, i, DetermineStubCallMode(), \
rep); \
} while (false)
void CodeGenerator::AssembleDeconstructFrame() {
unwinding_info_writer_.MarkFrameDeconstructed(__ pc_offset());
__ movq(rsp, rbp);
......@@ -1306,10 +1454,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
scratch0, scratch1, mode,
DetermineStubCallMode());
if (arch_opcode == kArchStoreWithWriteBarrier) {
__ StoreTaggedField(operand, value);
EmitTSANAwareStore<std::memory_order_relaxed>(
zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
MachineRepresentation::kTagged);
} else {
DCHECK_EQ(kArchAtomicStoreWithWriteBarrier, arch_opcode);
__ AtomicStoreTaggedField(operand, value);
DCHECK_EQ(arch_opcode, kArchAtomicStoreWithWriteBarrier);
EmitTSANAwareStore<std::memory_order_seq_cst>(
zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
MachineRepresentation::kTagged);
}
if (mode > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value, ool->exit());
......@@ -1318,9 +1470,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
MemoryChunk::kPointersFromHereAreInterestingMask,
not_zero, ool->entry());
__ bind(ool->exit());
// TODO(syg): Support non-relaxed memory orders in TSAN.
EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
DetermineStubCallMode(), kTaggedSize);
break;
}
case kX64MFence:
......@@ -2092,14 +2241,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
Immediate value(Immediate(i.InputInt8(index)));
__ movb(operand, value);
EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
DetermineStubCallMode(), kInt8Size);
EmitTSANAwareStore<std::memory_order_relaxed>(
zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
MachineRepresentation::kWord8);
} else {
Register value(i.InputRegister(index));
__ movb(operand, value);
EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
DetermineStubCallMode(), kInt8Size);
EmitTSANAwareStore<std::memory_order_relaxed>(
zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
MachineRepresentation::kWord8);
}
break;
}
......@@ -2128,14 +2277,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
Immediate value(Immediate(i.InputInt16(index)));
__ movw(operand, value);
EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
DetermineStubCallMode(), kInt16Size);
EmitTSANAwareStore<std::memory_order_relaxed>(
zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
MachineRepresentation::kWord16);
} else {
Register value(i.InputRegister(index));
__ movw(operand, value);
EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
DetermineStubCallMode(), kInt16Size);
EmitTSANAwareStore<std::memory_order_relaxed>(
zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
MachineRepresentation::kWord16);
}
break;
}
......@@ -2145,7 +2294,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (HasAddressingMode(instr)) {
Operand address(i.MemoryOperand());
__ movl(i.OutputRegister(), address);
EmitTSANLoadOOLIfNeeded(zone(), this, tasm(), address, i,
EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i,
DetermineStubCallMode(), kInt32Size);
} else {
if (HasRegisterInput(instr, 0)) {
......@@ -2160,14 +2309,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
Immediate value(i.InputImmediate(index));
__ movl(operand, value);
EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
DetermineStubCallMode(), kInt32Size);
EmitTSANAwareStore<std::memory_order_relaxed>(
zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
MachineRepresentation::kWord32);
} else {
Register value(i.InputRegister(index));
__ movl(operand, value);
EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
DetermineStubCallMode(), kInt32Size);
EmitTSANAwareStore<std::memory_order_relaxed>(
zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
MachineRepresentation::kWord32);
}
}
break;
......@@ -2179,7 +2328,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
CHECK(instr->HasOutput());
Operand address(i.MemoryOperand());
__ DecompressTaggedSigned(i.OutputRegister(), address);
EmitTSANLoadOOLIfNeeded(zone(), this, tasm(), address, i,
EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i,
DetermineStubCallMode(), kTaggedSize);
break;
}
......@@ -2187,7 +2336,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
CHECK(instr->HasOutput());
Operand address(i.MemoryOperand());
__ DecompressTaggedPointer(i.OutputRegister(), address);
EmitTSANLoadOOLIfNeeded(zone(), this, tasm(), address, i,
EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i,
DetermineStubCallMode(), kTaggedSize);
break;
}
......@@ -2195,7 +2344,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
CHECK(instr->HasOutput());
Operand address(i.MemoryOperand());
__ DecompressAnyTagged(i.OutputRegister(), address);
EmitTSANLoadOOLIfNeeded(zone(), this, tasm(), address, i,
EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i,
DetermineStubCallMode(), kTaggedSize);
break;
}
......@@ -2205,14 +2354,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
Immediate value(i.InputImmediate(index));
__ StoreTaggedField(operand, value);
EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
DetermineStubCallMode(), kTaggedSize);
EmitTSANAwareStore<std::memory_order_relaxed>(
zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
MachineRepresentation::kTagged);
} else {
Register value(i.InputRegister(index));
__ StoreTaggedField(operand, value);
EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
DetermineStubCallMode(), kTaggedSize);
EmitTSANAwareStore<std::memory_order_relaxed>(
zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
MachineRepresentation::kTagged);
}
break;
}
......@@ -2221,21 +2370,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (instr->HasOutput()) {
Operand address(i.MemoryOperand());
__ movq(i.OutputRegister(), address);
EmitTSANLoadOOLIfNeeded(zone(), this, tasm(), address, i,
EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i,
DetermineStubCallMode(), kInt64Size);
} else {
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
Immediate value(i.InputImmediate(index));
__ movq(operand, value);
EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
DetermineStubCallMode(), kInt64Size);
EmitTSANAwareStore<std::memory_order_relaxed>(
zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
MachineRepresentation::kWord64);
} else {
Register value(i.InputRegister(index));
__ movq(operand, value);
EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
DetermineStubCallMode(), kInt64Size);
EmitTSANAwareStore<std::memory_order_relaxed>(
zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
MachineRepresentation::kWord64);
}
}
break;
......@@ -3982,6 +4131,22 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SIMD_ALL_TRUE(Pcmpeqb);
break;
}
case kAtomicStoreWord8: {
ASSEMBLE_SEQ_CST_STORE(MachineRepresentation::kWord8);
break;
}
case kAtomicStoreWord16: {
ASSEMBLE_SEQ_CST_STORE(MachineRepresentation::kWord16);
break;
}
case kAtomicStoreWord32: {
ASSEMBLE_SEQ_CST_STORE(MachineRepresentation::kWord32);
break;
}
case kX64Word64AtomicStoreWord64: {
ASSEMBLE_SEQ_CST_STORE(MachineRepresentation::kWord64);
break;
}
case kAtomicExchangeInt8: {
DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
__ xchgb(i.InputRegister(0), i.MemoryOperand(1));
......@@ -4140,9 +4305,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kAtomicLoadInt16:
case kAtomicLoadUint16:
case kAtomicLoadWord32:
case kAtomicStoreWord8:
case kAtomicStoreWord16:
case kAtomicStoreWord32:
UNREACHABLE(); // Won't be generated by instruction selector.
}
return kSuccess;
......@@ -4168,6 +4330,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#undef ASSEMBLE_SIMD_IMM_SHUFFLE
#undef ASSEMBLE_SIMD_ALL_TRUE
#undef ASSEMBLE_SIMD_SHIFT
#undef ASSEMBLE_SEQ_CST_STORE
namespace {
......
......@@ -392,6 +392,7 @@ namespace compiler {
V(X64Word64AtomicAndUint64) \
V(X64Word64AtomicOrUint64) \
V(X64Word64AtomicXorUint64) \
V(X64Word64AtomicStoreWord64) \
V(X64Word64AtomicExchangeUint64) \
V(X64Word64AtomicCompareExchangeUint64)
......
......@@ -418,6 +418,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64LFence:
return kHasSideEffect;
case kX64Word64AtomicStoreWord64:
case kX64Word64AtomicAddUint64:
case kX64Word64AtomicSubUint64:
case kX64Word64AtomicAndUint64:
......
......@@ -344,22 +344,22 @@ ArchOpcode GetStoreOpcode(StoreRepresentation store_rep) {
ArchOpcode GetSeqCstStoreOpcode(StoreRepresentation store_rep) {
switch (store_rep.representation()) {
case MachineRepresentation::kWord8:
return kAtomicExchangeUint8;
return kAtomicStoreWord8;
case MachineRepresentation::kWord16:
return kAtomicExchangeUint16;
return kAtomicStoreWord16;
case MachineRepresentation::kWord32:
return kAtomicExchangeWord32;
return kAtomicStoreWord32;
case MachineRepresentation::kWord64:
return kX64Word64AtomicExchangeUint64;
return kX64Word64AtomicStoreWord64;
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged:
if (COMPRESS_POINTERS_BOOL) return kAtomicExchangeWord32;
return kX64Word64AtomicExchangeUint64;
if (COMPRESS_POINTERS_BOOL) return kAtomicStoreWord32;
return kX64Word64AtomicStoreWord64;
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed:
CHECK(COMPRESS_POINTERS_BOOL);
return kAtomicExchangeWord32;
return kAtomicStoreWord32;
default:
UNREACHABLE();
}
......@@ -529,8 +529,7 @@ void VisitAtomicExchange(InstructionSelector* selector, Node* node,
void VisitStoreCommon(InstructionSelector* selector, Node* node,
StoreRepresentation store_rep,
base::Optional<AtomicMemoryOrder> atomic_order,
base::Optional<AtomicWidth> atomic_width) {
base::Optional<AtomicMemoryOrder> atomic_order) {
X64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
......@@ -562,18 +561,7 @@ void VisitStoreCommon(InstructionSelector* selector, Node* node,
code |= MiscField::encode(static_cast<int>(record_write_mode));
selector->Emit(code, 0, nullptr, arraysize(inputs), inputs,
arraysize(temps), temps);
} else if (is_seqcst) {
VisitAtomicExchange(selector, node, GetSeqCstStoreOpcode(store_rep),
*atomic_width);
} else {
// Release and non-atomic stores emit MOV.
// https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
if ((ElementSizeLog2Of(store_rep.representation()) <
kSystemPointerSizeLog2) &&
value->opcode() == IrOpcode::kTruncateInt64ToInt32) {
value = value->InputAt(0);
}
#ifdef V8_IS_TSAN
// On TSAN builds we require two scratch registers. Because of this we also
// have to modify the inputs to take into account possible aliasing and use
......@@ -587,15 +575,40 @@ void VisitStoreCommon(InstructionSelector* selector, Node* node,
auto reg_kind = OperandGenerator::RegisterUseKind::kUseRegister;
#endif // V8_IS_TSAN
// Release and non-atomic stores emit MOV and sequentially consistent stores
// emit XCHG.
// https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
ArchOpcode opcode;
AddressingMode addressing_mode;
InstructionOperand inputs[4];
size_t input_count = 0;
AddressingMode addressing_mode = g.GetEffectiveAddressMemoryOperand(
if (is_seqcst) {
// SeqCst stores emit XCHG instead of MOV, so encode the inputs as we
// would for XCHG. XCHG can't encode the value as an immediate and has
// fewer addressing modes available.
inputs[input_count++] = g.UseUniqueRegister(value);
inputs[input_count++] = g.UseUniqueRegister(base);
inputs[input_count++] =
g.GetEffectiveIndexOperand(index, &addressing_mode);
opcode = GetSeqCstStoreOpcode(store_rep);
} else {
if ((ElementSizeLog2Of(store_rep.representation()) <
kSystemPointerSizeLog2) &&
value->opcode() == IrOpcode::kTruncateInt64ToInt32) {
value = value->InputAt(0);
}
addressing_mode = g.GetEffectiveAddressMemoryOperand(
node, inputs, &input_count, reg_kind);
InstructionOperand value_operand = g.CanBeImmediate(value)
? g.UseImmediate(value)
: g.UseRegister(value, reg_kind);
inputs[input_count++] = value_operand;
ArchOpcode opcode = GetStoreOpcode(store_rep);
opcode = GetStoreOpcode(store_rep);
}
InstructionCode code =
opcode | AddressingModeField::encode(addressing_mode);
selector->Emit(code, 0, static_cast<InstructionOperand*>(nullptr),
......@@ -607,7 +620,7 @@ void VisitStoreCommon(InstructionSelector* selector, Node* node,
void InstructionSelector::VisitStore(Node* node) {
return VisitStoreCommon(this, node, StoreRepresentationOf(node->op()),
base::nullopt, base::nullopt);
base::nullopt);
}
void InstructionSelector::VisitProtectedStore(Node* node) {
......@@ -2779,16 +2792,14 @@ void InstructionSelector::VisitWord32AtomicStore(Node* node) {
DCHECK_NE(params.representation(), MachineRepresentation::kWord64);
DCHECK_IMPLIES(CanBeTaggedOrCompressedPointer(params.representation()),
kTaggedSize == 4);
VisitStoreCommon(this, node, params.store_representation(), params.order(),
AtomicWidth::kWord32);
VisitStoreCommon(this, node, params.store_representation(), params.order());
}
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
AtomicStoreParameters params = AtomicStoreParametersOf(node->op());
DCHECK_IMPLIES(CanBeTaggedOrCompressedPointer(params.representation()),
kTaggedSize == 8);
VisitStoreCommon(this, node, params.store_representation(), params.order(),
AtomicWidth::kWord64);
VisitStoreCommon(this, node, params.store_representation(), params.order());
}
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
......
......@@ -1059,6 +1059,14 @@ static bool TransitivelyCalledBuiltinHasNoSideEffect(Builtin caller,
case Builtin::kTSANRelaxedStore32SaveFP:
case Builtin::kTSANRelaxedStore64IgnoreFP:
case Builtin::kTSANRelaxedStore64SaveFP:
case Builtin::kTSANSeqCstStore8IgnoreFP:
case Builtin::kTSANSeqCstStore8SaveFP:
case Builtin::kTSANSeqCstStore16IgnoreFP:
case Builtin::kTSANSeqCstStore16SaveFP:
case Builtin::kTSANSeqCstStore32IgnoreFP:
case Builtin::kTSANSeqCstStore32SaveFP:
case Builtin::kTSANSeqCstStore64IgnoreFP:
case Builtin::kTSANSeqCstStore64SaveFP:
case Builtin::kTSANRelaxedLoad32IgnoreFP:
case Builtin::kTSANRelaxedLoad32SaveFP:
case Builtin::kTSANRelaxedLoad64IgnoreFP:
......
......@@ -102,6 +102,14 @@ struct WasmModule;
IF_TSAN(V, TSANRelaxedStore32SaveFP) \
IF_TSAN(V, TSANRelaxedStore64IgnoreFP) \
IF_TSAN(V, TSANRelaxedStore64SaveFP) \
IF_TSAN(V, TSANSeqCstStore8IgnoreFP) \
IF_TSAN(V, TSANSeqCstStore8SaveFP) \
IF_TSAN(V, TSANSeqCstStore16IgnoreFP) \
IF_TSAN(V, TSANSeqCstStore16SaveFP) \
IF_TSAN(V, TSANSeqCstStore32IgnoreFP) \
IF_TSAN(V, TSANSeqCstStore32SaveFP) \
IF_TSAN(V, TSANSeqCstStore64IgnoreFP) \
IF_TSAN(V, TSANSeqCstStore64SaveFP) \
IF_TSAN(V, TSANRelaxedLoad32IgnoreFP) \
IF_TSAN(V, TSANRelaxedLoad32SaveFP) \
IF_TSAN(V, TSANRelaxedLoad64IgnoreFP) \
......@@ -188,8 +196,9 @@ class V8_EXPORT_PRIVATE WasmCode final {
}
#ifdef V8_IS_TSAN
static RuntimeStubId GetTSANRelaxedStoreStub(SaveFPRegsMode fp_mode,
int size) {
static RuntimeStubId GetTSANStoreStub(SaveFPRegsMode fp_mode, int size,
std::memory_order order) {
if (order == std::memory_order_relaxed) {
if (size == kInt8Size) {
return fp_mode == SaveFPRegsMode::kIgnore
? RuntimeStubId::kTSANRelaxedStore8IgnoreFP
......@@ -208,6 +217,27 @@ class V8_EXPORT_PRIVATE WasmCode final {
? RuntimeStubId::kTSANRelaxedStore64IgnoreFP
: RuntimeStubId::kTSANRelaxedStore64SaveFP;
}
} else {
DCHECK_EQ(order, std::memory_order_seq_cst);
if (size == kInt8Size) {
return fp_mode == SaveFPRegsMode::kIgnore
? RuntimeStubId::kTSANSeqCstStore8IgnoreFP
: RuntimeStubId::kTSANSeqCstStore8SaveFP;
} else if (size == kInt16Size) {
return fp_mode == SaveFPRegsMode::kIgnore
? RuntimeStubId::kTSANSeqCstStore16IgnoreFP
: RuntimeStubId::kTSANSeqCstStore16SaveFP;
} else if (size == kInt32Size) {
return fp_mode == SaveFPRegsMode::kIgnore
? RuntimeStubId::kTSANSeqCstStore32IgnoreFP
: RuntimeStubId::kTSANSeqCstStore32SaveFP;
} else {
CHECK_EQ(size, kInt64Size);
return fp_mode == SaveFPRegsMode::kIgnore
? RuntimeStubId::kTSANSeqCstStore64IgnoreFP
: RuntimeStubId::kTSANSeqCstStore64SaveFP;
}
}
}
static RuntimeStubId GetTSANRelaxedLoadStub(SaveFPRegsMode fp_mode,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment