atomicops_internals_tsan.h 15.5 KB
Newer Older
1
// Copyright 2012 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4 5 6 7 8 9 10 11


// This file is an internal atomic implementation for compiler-based
// ThreadSanitizer. Use base/atomicops.h instead.

#ifndef V8_ATOMICOPS_INTERNALS_TSAN_H_
#define V8_ATOMICOPS_INTERNALS_TSAN_H_

12 13 14 15 16 17
namespace v8 {
namespace internal {

#ifndef TSAN_INTERFACE_ATOMIC_H
#define TSAN_INTERFACE_ATOMIC_H

18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38
// This struct is not part of the public API of this module; clients may not
// use it.  (However, it's exported via BASE_EXPORT because clients implicitly
// do use it at link time by inlining these functions.)
// Features of this x86.  Values may not be correct before main() is run,
// but are set conservatively.
struct AtomicOps_x86CPUFeatureStruct {
  bool has_amd_lock_mb_bug;  // Processor has AMD memory-barrier bug; do lfence
                             // after acquire compare-and-swap.
  bool has_sse2;             // Processor has SSE2.
};
extern struct AtomicOps_x86CPUFeatureStruct
    AtomicOps_Internalx86CPUFeatures;

#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")

extern "C" {
typedef char  __tsan_atomic8;
typedef short __tsan_atomic16;  // NOLINT
typedef int   __tsan_atomic32;
typedef long  __tsan_atomic64;  // NOLINT

39 40 41 42 43 44 45 46 47
#if defined(__SIZEOF_INT128__) \
    || (__clang_major__ * 100 + __clang_minor__ >= 302)
typedef __int128 __tsan_atomic128;
#define __TSAN_HAS_INT128 1
#else
typedef char     __tsan_atomic128;
#define __TSAN_HAS_INT128 0
#endif

48
typedef enum {
49 50 51 52 53 54
  __tsan_memory_order_relaxed,
  __tsan_memory_order_consume,
  __tsan_memory_order_acquire,
  __tsan_memory_order_release,
  __tsan_memory_order_acq_rel,
  __tsan_memory_order_seq_cst,
55 56
} __tsan_memory_order;

57
__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8* a,
58
    __tsan_memory_order mo);
59
__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16* a,
60
    __tsan_memory_order mo);
61
__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32* a,
62
    __tsan_memory_order mo);
63
__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64* a,
64
    __tsan_memory_order mo);
65
__tsan_atomic128 __tsan_atomic128_load(const volatile __tsan_atomic128* a,
66 67
    __tsan_memory_order mo);

68
void __tsan_atomic8_store(volatile __tsan_atomic8* a, __tsan_atomic8 v,
69
    __tsan_memory_order mo);
70
void __tsan_atomic16_store(volatile __tsan_atomic16* a, __tsan_atomic16 v,
71
    __tsan_memory_order mo);
72
void __tsan_atomic32_store(volatile __tsan_atomic32* a, __tsan_atomic32 v,
73
    __tsan_memory_order mo);
74
void __tsan_atomic64_store(volatile __tsan_atomic64* a, __tsan_atomic64 v,
75
    __tsan_memory_order mo);
76
void __tsan_atomic128_store(volatile __tsan_atomic128* a, __tsan_atomic128 v,
77 78
    __tsan_memory_order mo);

79
__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8* a,
80
    __tsan_atomic8 v, __tsan_memory_order mo);
81
__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16* a,
82
    __tsan_atomic16 v, __tsan_memory_order mo);
83
__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32* a,
84
    __tsan_atomic32 v, __tsan_memory_order mo);
85
__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64* a,
86
    __tsan_atomic64 v, __tsan_memory_order mo);
87
__tsan_atomic128 __tsan_atomic128_exchange(volatile __tsan_atomic128* a,
88
    __tsan_atomic128 v, __tsan_memory_order mo);
89

90
__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8* a,
91
    __tsan_atomic8 v, __tsan_memory_order mo);
92
__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16* a,
93
    __tsan_atomic16 v, __tsan_memory_order mo);
94
__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32* a,
95
    __tsan_atomic32 v, __tsan_memory_order mo);
96
__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64* a,
97
    __tsan_atomic64 v, __tsan_memory_order mo);
98
__tsan_atomic128 __tsan_atomic128_fetch_add(volatile __tsan_atomic128* a,
99
    __tsan_atomic128 v, __tsan_memory_order mo);
100

101
__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8* a,
102
    __tsan_atomic8 v, __tsan_memory_order mo);
103
__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16* a,
104
    __tsan_atomic16 v, __tsan_memory_order mo);
105
__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32* a,
106
    __tsan_atomic32 v, __tsan_memory_order mo);
107
__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64* a,
108
    __tsan_atomic64 v, __tsan_memory_order mo);
109
__tsan_atomic128 __tsan_atomic128_fetch_and(volatile __tsan_atomic128* a,
110
    __tsan_atomic128 v, __tsan_memory_order mo);
111

112
__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8* a,
113
    __tsan_atomic8 v, __tsan_memory_order mo);
114
__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16* a,
115
    __tsan_atomic16 v, __tsan_memory_order mo);
116
__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32* a,
117
    __tsan_atomic32 v, __tsan_memory_order mo);
118
__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64* a,
119
    __tsan_atomic64 v, __tsan_memory_order mo);
120
__tsan_atomic128 __tsan_atomic128_fetch_or(volatile __tsan_atomic128* a,
121
    __tsan_atomic128 v, __tsan_memory_order mo);
122

123
__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8* a,
124
    __tsan_atomic8 v, __tsan_memory_order mo);
125
__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16* a,
126
    __tsan_atomic16 v, __tsan_memory_order mo);
127
__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32* a,
128
    __tsan_atomic32 v, __tsan_memory_order mo);
129
__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64* a,
130
    __tsan_atomic64 v, __tsan_memory_order mo);
131
__tsan_atomic128 __tsan_atomic128_fetch_xor(volatile __tsan_atomic128* a,
132
    __tsan_atomic128 v, __tsan_memory_order mo);
133

134
__tsan_atomic8 __tsan_atomic8_fetch_nand(volatile __tsan_atomic8* a,
135
    __tsan_atomic8 v, __tsan_memory_order mo);
136
__tsan_atomic16 __tsan_atomic16_fetch_nand(volatile __tsan_atomic16* a,
137
    __tsan_atomic16 v, __tsan_memory_order mo);
138
__tsan_atomic32 __tsan_atomic32_fetch_nand(volatile __tsan_atomic32* a,
139
    __tsan_atomic32 v, __tsan_memory_order mo);
140
__tsan_atomic64 __tsan_atomic64_fetch_nand(volatile __tsan_atomic64* a,
141
    __tsan_atomic64 v, __tsan_memory_order mo);
142 143
__tsan_atomic128 __tsan_atomic128_fetch_nand(volatile __tsan_atomic128* a,
    __tsan_atomic128 v, __tsan_memory_order mo);
144

145 146
int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8* a,
    __tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo,
147
    __tsan_memory_order fail_mo);
148 149
int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16* a,
    __tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo,
150
    __tsan_memory_order fail_mo);
151 152
int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32* a,
    __tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo,
153
    __tsan_memory_order fail_mo);
154 155
int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64* a,
    __tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo,
156
    __tsan_memory_order fail_mo);
157 158
int __tsan_atomic128_compare_exchange_weak(volatile __tsan_atomic128* a,
    __tsan_atomic128* c, __tsan_atomic128 v, __tsan_memory_order mo,
159 160
    __tsan_memory_order fail_mo);

161 162
int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8* a,
    __tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo,
163
    __tsan_memory_order fail_mo);
164 165
int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16* a,
    __tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo,
166
    __tsan_memory_order fail_mo);
167 168
int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32* a,
    __tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo,
169
    __tsan_memory_order fail_mo);
170 171
int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64* a,
    __tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo,
172
    __tsan_memory_order fail_mo);
173 174
int __tsan_atomic128_compare_exchange_strong(volatile __tsan_atomic128* a,
    __tsan_atomic128* c, __tsan_atomic128 v, __tsan_memory_order mo,
175 176 177
    __tsan_memory_order fail_mo);

__tsan_atomic8 __tsan_atomic8_compare_exchange_val(
178
    volatile __tsan_atomic8* a, __tsan_atomic8 c, __tsan_atomic8 v,
179 180
    __tsan_memory_order mo, __tsan_memory_order fail_mo);
__tsan_atomic16 __tsan_atomic16_compare_exchange_val(
181
    volatile __tsan_atomic16* a, __tsan_atomic16 c, __tsan_atomic16 v,
182 183
    __tsan_memory_order mo, __tsan_memory_order fail_mo);
__tsan_atomic32 __tsan_atomic32_compare_exchange_val(
184
    volatile __tsan_atomic32* a, __tsan_atomic32 c, __tsan_atomic32 v,
185 186
    __tsan_memory_order mo, __tsan_memory_order fail_mo);
__tsan_atomic64 __tsan_atomic64_compare_exchange_val(
187
    volatile __tsan_atomic64* a, __tsan_atomic64 c, __tsan_atomic64 v,
188 189
    __tsan_memory_order mo, __tsan_memory_order fail_mo);
__tsan_atomic128 __tsan_atomic128_compare_exchange_val(
190
    volatile __tsan_atomic128* a, __tsan_atomic128 c, __tsan_atomic128 v,
191
    __tsan_memory_order mo, __tsan_memory_order fail_mo);
192 193

void __tsan_atomic_thread_fence(__tsan_memory_order mo);
194
void __tsan_atomic_signal_fence(__tsan_memory_order mo);
195 196 197 198
}  // extern "C"

#endif  // #ifndef TSAN_INTERFACE_ATOMIC_H

199
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
200 201 202 203
                                         Atomic32 old_value,
                                         Atomic32 new_value) {
  Atomic32 cmp = old_value;
  __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
204
      __tsan_memory_order_relaxed, __tsan_memory_order_relaxed);
205 206 207
  return cmp;
}

208
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
209 210
                                         Atomic32 new_value) {
  return __tsan_atomic32_exchange(ptr, new_value,
211
      __tsan_memory_order_relaxed);
212 213
}

214
inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr,
215 216
                                       Atomic32 new_value) {
  return __tsan_atomic32_exchange(ptr, new_value,
217
      __tsan_memory_order_acquire);
218 219
}

220
inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr,
221 222
                                       Atomic32 new_value) {
  return __tsan_atomic32_exchange(ptr, new_value,
223
      __tsan_memory_order_release);
224 225
}

226
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
227 228
                                          Atomic32 increment) {
  return increment + __tsan_atomic32_fetch_add(ptr, increment,
229
      __tsan_memory_order_relaxed);
230 231
}

232
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
233 234
                                        Atomic32 increment) {
  return increment + __tsan_atomic32_fetch_add(ptr, increment,
235
      __tsan_memory_order_acq_rel);
236 237
}

238
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
239 240 241 242
                                       Atomic32 old_value,
                                       Atomic32 new_value) {
  Atomic32 cmp = old_value;
  __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
243
      __tsan_memory_order_acquire, __tsan_memory_order_acquire);
244 245 246
  return cmp;
}

247
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
248 249 250 251
                                       Atomic32 old_value,
                                       Atomic32 new_value) {
  Atomic32 cmp = old_value;
  __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
252
      __tsan_memory_order_release, __tsan_memory_order_relaxed);
253 254 255
  return cmp;
}

256 257 258 259
inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
  __tsan_atomic8_store(ptr, value, __tsan_memory_order_relaxed);
}

260
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
261 262 263
  __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
}

264
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
265 266 267 268
  __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
  __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
}

269
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
270 271 272
  __tsan_atomic32_store(ptr, value, __tsan_memory_order_release);
}

273 274 275 276
inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
  return __tsan_atomic8_load(ptr, __tsan_memory_order_relaxed);
}

277
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
278 279 280
  return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
}

281
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
282 283 284
  return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire);
}

285
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
286 287 288 289
  __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
  return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
}

290
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
291 292 293 294
                                         Atomic64 old_value,
                                         Atomic64 new_value) {
  Atomic64 cmp = old_value;
  __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
295
      __tsan_memory_order_relaxed, __tsan_memory_order_relaxed);
296 297 298
  return cmp;
}

299
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
300 301 302 303
                                         Atomic64 new_value) {
  return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_relaxed);
}

304
inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
305 306 307 308
                                       Atomic64 new_value) {
  return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_acquire);
}

309
inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
310 311 312 313
                                       Atomic64 new_value) {
  return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_release);
}

314
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
315 316
                                          Atomic64 increment) {
  return increment + __tsan_atomic64_fetch_add(ptr, increment,
317
      __tsan_memory_order_relaxed);
318 319
}

320
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
321 322
                                        Atomic64 increment) {
  return increment + __tsan_atomic64_fetch_add(ptr, increment,
323
      __tsan_memory_order_acq_rel);
324 325
}

326
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
327 328 329
  __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
}

330
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
331 332 333 334
  __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
  __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
}

335
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
336 337 338
  __tsan_atomic64_store(ptr, value, __tsan_memory_order_release);
}

339
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
340 341 342
  return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
}

343
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
344 345 346
  return __tsan_atomic64_load(ptr, __tsan_memory_order_acquire);
}

347
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
348 349 350 351
  __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
  return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
}

352
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
353 354 355 356
                                       Atomic64 old_value,
                                       Atomic64 new_value) {
  Atomic64 cmp = old_value;
  __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
357
      __tsan_memory_order_acquire, __tsan_memory_order_acquire);
358 359 360
  return cmp;
}

361
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
362 363 364 365
                                       Atomic64 old_value,
                                       Atomic64 new_value) {
  Atomic64 cmp = old_value;
  __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
366
      __tsan_memory_order_release, __tsan_memory_order_relaxed);
367 368 369 370 371 372 373 374 375 376 377 378 379
  return cmp;
}

inline void MemoryBarrier() {
  __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
}

}  // namespace internal
}  // namespace v8

#undef ATOMICOPS_COMPILER_BARRIER

#endif  // V8_ATOMICOPS_INTERNALS_TSAN_H_