atomicops.h 17.1 KB
Newer Older
1
// Copyright 2010 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4 5 6 7 8 9 10 11 12 13 14 15 16

// The routines exported by this module are subtle.  If you use them, even if
// you get the code right, it will depend on careful reasoning about atomicity
// and memory ordering; it will be less readable, and harder to maintain.  If
// you plan to use these routines, you should have a good reason, such as solid
// evidence that performance would otherwise suffer, or there being no
// alternative.  You should assume only properties explicitly guaranteed by the
// specifications in this file.  You are almost certainly _not_ writing code
// just for the x86; if you assume x86 semantics, x86 hardware bugs and
// implementations on other archtectures will cause your code to break.  If you
// do not know what you are doing, avoid these routines, and use a Mutex.
//
// It is incorrect to make direct assignments to/from an atomic variable.
17 18 19 20
// You should use one of the Load or Store routines.  The Relaxed  versions
// are provided when no fences are needed:
//   Relaxed_Store()
//   Relaxed_Load()
21 22 23 24
// Although there are currently no compiler enforcement, you are encouraged
// to use these.
//

25 26
#ifndef V8_BASE_ATOMICOPS_H_
#define V8_BASE_ATOMICOPS_H_
27

28
#include <stdint.h>
29

30 31
#include <atomic>

32 33 34 35 36 37 38
// Small C++ header which defines implementation specific macros used to
// identify the STL implementation.
// - libc++: captures __config for _LIBCPP_VERSION
// - libstdc++: captures bits/c++config.h for __GLIBCXX__
#include <cstddef>

#include "src/base/base-export.h"
39
#include "src/base/build_config.h"
40
#include "src/base/macros.h"
41

johnx's avatar
johnx committed
42 43 44 45
#if defined(V8_OS_STARBOARD)
#include "starboard/atomic.h"
#endif  // V8_OS_STARBOARD

46
namespace v8 {
47
namespace base {
48

johnx's avatar
johnx committed
49 50 51 52 53 54 55 56
#ifdef V8_OS_STARBOARD
using Atomic8 = SbAtomic8;
using Atomic16 = int16_t;
using Atomic32 = SbAtomic32;
#if SB_IS_64_BIT
using Atomic64 = SbAtomic64;
#endif
#else
57 58 59
using Atomic8 = char;
using Atomic16 = int16_t;
using Atomic32 = int32_t;
yangguo's avatar
yangguo committed
60
#if defined(V8_HOST_ARCH_64_BIT)
61 62
// We need to be able to go between Atomic64 and AtomicWord implicitly.  This
// means Atomic64 and AtomicWord should be the same type on 64-bit.
63
#if defined(__ILP32__)
64
using Atomic64 = int64_t;
65
#else
66
using Atomic64 = intptr_t;
yangguo's avatar
yangguo committed
67
#endif  // defined(__ILP32__)
68
#endif  // defined(V8_HOST_ARCH_64_BIT)
johnx's avatar
johnx committed
69
#endif  // V8_OS_STARBOARD
70 71 72

// Use AtomicWord for a machine-sized pointer.  It will use the Atomic32 or
// Atomic64 routines below, depending on your architecture.
johnx's avatar
johnx committed
73 74 75
#if defined(V8_OS_STARBOARD)
using AtomicWord = SbAtomicPtr;
#else
76
using AtomicWord = intptr_t;
johnx's avatar
johnx committed
77
#endif
78

79 80 81 82 83 84 85 86 87 88 89 90 91 92 93
namespace helper {
template <typename T>
volatile std::atomic<T>* to_std_atomic(volatile T* ptr) {
  return reinterpret_cast<volatile std::atomic<T>*>(ptr);
}
template <typename T>
volatile const std::atomic<T>* to_std_atomic_const(volatile const T* ptr) {
  return reinterpret_cast<volatile const std::atomic<T>*>(ptr);
}
}  // namespace helper

inline void SeqCst_MemoryFence() {
  std::atomic_thread_fence(std::memory_order_seq_cst);
}

94
// Atomically execute:
95 96 97 98
//   result = *ptr;
//   if (result == old_value)
//     *ptr = new_value;
//   return result;
99
//
100 101 102
// I.e. replace |*ptr| with |new_value| if |*ptr| used to be |old_value|.
// Always return the value of |*ptr| before the operation.
// Acquire, Relaxed, Release correspond to standard C++ memory orders.
103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132
inline Atomic8 Relaxed_CompareAndSwap(volatile Atomic8* ptr, Atomic8 old_value,
                                      Atomic8 new_value) {
  std::atomic_compare_exchange_strong_explicit(
      helper::to_std_atomic(ptr), &old_value, new_value,
      std::memory_order_relaxed, std::memory_order_relaxed);
  return old_value;
}

inline Atomic16 Relaxed_CompareAndSwap(volatile Atomic16* ptr,
                                       Atomic16 old_value, Atomic16 new_value) {
  std::atomic_compare_exchange_strong_explicit(
      helper::to_std_atomic(ptr), &old_value, new_value,
      std::memory_order_relaxed, std::memory_order_relaxed);
  return old_value;
}

inline Atomic32 Relaxed_CompareAndSwap(volatile Atomic32* ptr,
                                       Atomic32 old_value, Atomic32 new_value) {
  std::atomic_compare_exchange_strong_explicit(
      helper::to_std_atomic(ptr), &old_value, new_value,
      std::memory_order_relaxed, std::memory_order_relaxed);
  return old_value;
}

inline Atomic32 Relaxed_AtomicExchange(volatile Atomic32* ptr,
                                       Atomic32 new_value) {
  return std::atomic_exchange_explicit(helper::to_std_atomic(ptr), new_value,
                                       std::memory_order_relaxed);
}

133 134 135 136 137 138
inline Atomic32 SeqCst_AtomicExchange(volatile Atomic32* ptr,
                                      Atomic32 new_value) {
  return std::atomic_exchange_explicit(helper::to_std_atomic(ptr), new_value,
                                       std::memory_order_seq_cst);
}

139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199
inline Atomic32 Relaxed_AtomicIncrement(volatile Atomic32* ptr,
                                        Atomic32 increment) {
  return increment + std::atomic_fetch_add_explicit(helper::to_std_atomic(ptr),
                                                    increment,
                                                    std::memory_order_relaxed);
}

inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
                                       Atomic32 old_value, Atomic32 new_value) {
  atomic_compare_exchange_strong_explicit(
      helper::to_std_atomic(ptr), &old_value, new_value,
      std::memory_order_acquire, std::memory_order_acquire);
  return old_value;
}

inline Atomic8 Release_CompareAndSwap(volatile Atomic8* ptr, Atomic8 old_value,
                                      Atomic8 new_value) {
  bool result = atomic_compare_exchange_strong_explicit(
      helper::to_std_atomic(ptr), &old_value, new_value,
      std::memory_order_release, std::memory_order_relaxed);
  USE(result);  // Make gcc compiler happy.
  return old_value;
}

inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
                                       Atomic32 old_value, Atomic32 new_value) {
  atomic_compare_exchange_strong_explicit(
      helper::to_std_atomic(ptr), &old_value, new_value,
      std::memory_order_release, std::memory_order_relaxed);
  return old_value;
}

inline Atomic32 AcquireRelease_CompareAndSwap(volatile Atomic32* ptr,
                                              Atomic32 old_value,
                                              Atomic32 new_value) {
  atomic_compare_exchange_strong_explicit(
      helper::to_std_atomic(ptr), &old_value, new_value,
      std::memory_order_acq_rel, std::memory_order_acquire);
  return old_value;
}

inline void Relaxed_Store(volatile Atomic8* ptr, Atomic8 value) {
  std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
                             std::memory_order_relaxed);
}

inline void Relaxed_Store(volatile Atomic16* ptr, Atomic16 value) {
  std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
                             std::memory_order_relaxed);
}

inline void Relaxed_Store(volatile Atomic32* ptr, Atomic32 value) {
  std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
                             std::memory_order_relaxed);
}

inline void Release_Store(volatile Atomic8* ptr, Atomic8 value) {
  std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
                             std::memory_order_release);
}

200 201 202 203 204
inline void Release_Store(volatile Atomic16* ptr, Atomic16 value) {
  std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
                             std::memory_order_release);
}

205 206 207 208 209
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
  std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
                             std::memory_order_release);
}

210 211 212 213 214 215 216 217 218 219 220 221 222 223 224
inline void SeqCst_Store(volatile Atomic8* ptr, Atomic8 value) {
  std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
                             std::memory_order_seq_cst);
}

inline void SeqCst_Store(volatile Atomic16* ptr, Atomic16 value) {
  std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
                             std::memory_order_seq_cst);
}

inline void SeqCst_Store(volatile Atomic32* ptr, Atomic32 value) {
  std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
                             std::memory_order_seq_cst);
}

225 226 227 228 229 230 231 232 233 234 235 236 237 238
inline Atomic8 Relaxed_Load(volatile const Atomic8* ptr) {
  return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
                                   std::memory_order_relaxed);
}

inline Atomic16 Relaxed_Load(volatile const Atomic16* ptr) {
  return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
                                   std::memory_order_relaxed);
}

inline Atomic32 Relaxed_Load(volatile const Atomic32* ptr) {
  return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
                                   std::memory_order_relaxed);
}
239

240 241 242 243 244 245 246 247 248 249
inline Atomic8 Acquire_Load(volatile const Atomic8* ptr) {
  return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
                                   std::memory_order_acquire);
}

inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
  return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
                                   std::memory_order_acquire);
}

250 251 252 253 254 255 256 257 258 259
inline Atomic8 SeqCst_Load(volatile const Atomic8* ptr) {
  return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
                                   std::memory_order_seq_cst);
}

inline Atomic32 SeqCst_Load(volatile const Atomic32* ptr) {
  return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
                                   std::memory_order_seq_cst);
}

260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275
#if defined(V8_HOST_ARCH_64_BIT)

inline Atomic64 Relaxed_CompareAndSwap(volatile Atomic64* ptr,
                                       Atomic64 old_value, Atomic64 new_value) {
  std::atomic_compare_exchange_strong_explicit(
      helper::to_std_atomic(ptr), &old_value, new_value,
      std::memory_order_relaxed, std::memory_order_relaxed);
  return old_value;
}

inline Atomic64 Relaxed_AtomicExchange(volatile Atomic64* ptr,
                                       Atomic64 new_value) {
  return std::atomic_exchange_explicit(helper::to_std_atomic(ptr), new_value,
                                       std::memory_order_relaxed);
}

276 277 278 279 280 281
inline Atomic64 SeqCst_AtomicExchange(volatile Atomic64* ptr,
                                      Atomic64 new_value) {
  return std::atomic_exchange_explicit(helper::to_std_atomic(ptr), new_value,
                                       std::memory_order_seq_cst);
}

282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323
inline Atomic64 Relaxed_AtomicIncrement(volatile Atomic64* ptr,
                                        Atomic64 increment) {
  return increment + std::atomic_fetch_add_explicit(helper::to_std_atomic(ptr),
                                                    increment,
                                                    std::memory_order_relaxed);
}

inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
                                       Atomic64 old_value, Atomic64 new_value) {
  std::atomic_compare_exchange_strong_explicit(
      helper::to_std_atomic(ptr), &old_value, new_value,
      std::memory_order_acquire, std::memory_order_acquire);
  return old_value;
}

inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
                                       Atomic64 old_value, Atomic64 new_value) {
  std::atomic_compare_exchange_strong_explicit(
      helper::to_std_atomic(ptr), &old_value, new_value,
      std::memory_order_release, std::memory_order_relaxed);
  return old_value;
}

inline Atomic64 AcquireRelease_CompareAndSwap(volatile Atomic64* ptr,
                                              Atomic64 old_value,
                                              Atomic64 new_value) {
  std::atomic_compare_exchange_strong_explicit(
      helper::to_std_atomic(ptr), &old_value, new_value,
      std::memory_order_acq_rel, std::memory_order_acquire);
  return old_value;
}

inline void Relaxed_Store(volatile Atomic64* ptr, Atomic64 value) {
  std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
                             std::memory_order_relaxed);
}

inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
  std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
                             std::memory_order_release);
}

324 325 326 327 328
inline void SeqCst_Store(volatile Atomic64* ptr, Atomic64 value) {
  std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
                             std::memory_order_seq_cst);
}

329 330 331 332 333 334 335 336 337 338
inline Atomic64 Relaxed_Load(volatile const Atomic64* ptr) {
  return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
                                   std::memory_order_relaxed);
}

inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
  return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
                                   std::memory_order_acquire);
}

339 340 341 342 343
inline Atomic64 SeqCst_Load(volatile const Atomic64* ptr) {
  return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
                                   std::memory_order_seq_cst);
}

344
#endif  // defined(V8_HOST_ARCH_64_BIT)
345 346 347 348 349 350 351 352 353

inline void Relaxed_Memcpy(volatile Atomic8* dst, volatile const Atomic8* src,
                           size_t bytes) {
  constexpr size_t kAtomicWordSize = sizeof(AtomicWord);
  while (bytes > 0 &&
         !IsAligned(reinterpret_cast<uintptr_t>(dst), kAtomicWordSize)) {
    Relaxed_Store(dst++, Relaxed_Load(src++));
    --bytes;
  }
354 355
  if (IsAligned(reinterpret_cast<uintptr_t>(src), kAtomicWordSize) &&
      IsAligned(reinterpret_cast<uintptr_t>(dst), kAtomicWordSize)) {
356 357 358 359 360 361 362 363 364 365 366 367 368 369 370
    while (bytes >= kAtomicWordSize) {
      Relaxed_Store(
          reinterpret_cast<volatile AtomicWord*>(dst),
          Relaxed_Load(reinterpret_cast<const volatile AtomicWord*>(src)));
      dst += kAtomicWordSize;
      src += kAtomicWordSize;
      bytes -= kAtomicWordSize;
    }
  }
  while (bytes > 0) {
    Relaxed_Store(dst++, Relaxed_Load(src++));
    --bytes;
  }
}

371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407
inline void Relaxed_Memmove(volatile Atomic8* dst, volatile const Atomic8* src,
                            size_t bytes) {
  // Use Relaxed_Memcpy if copying forwards is safe. This is the case if there
  // is no overlap, or {dst} lies before {src}.
  // This single check checks for both:
  if (reinterpret_cast<uintptr_t>(dst) - reinterpret_cast<uintptr_t>(src) >=
      bytes) {
    Relaxed_Memcpy(dst, src, bytes);
    return;
  }

  // Otherwise copy backwards.
  dst += bytes;
  src += bytes;
  constexpr size_t kAtomicWordSize = sizeof(AtomicWord);
  while (bytes > 0 &&
         !IsAligned(reinterpret_cast<uintptr_t>(dst), kAtomicWordSize)) {
    Relaxed_Store(--dst, Relaxed_Load(--src));
    --bytes;
  }
  if (IsAligned(reinterpret_cast<uintptr_t>(src), kAtomicWordSize) &&
      IsAligned(reinterpret_cast<uintptr_t>(dst), kAtomicWordSize)) {
    while (bytes >= kAtomicWordSize) {
      dst -= kAtomicWordSize;
      src -= kAtomicWordSize;
      bytes -= kAtomicWordSize;
      Relaxed_Store(
          reinterpret_cast<volatile AtomicWord*>(dst),
          Relaxed_Load(reinterpret_cast<const volatile AtomicWord*>(src)));
    }
  }
  while (bytes > 0) {
    Relaxed_Store(--dst, Relaxed_Load(--src));
    --bytes;
  }
}

Shu-yu Guo's avatar
Shu-yu Guo committed
408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465
namespace helper {
inline int MemcmpNotEqualFundamental(Atomic8 u1, Atomic8 u2) {
  DCHECK_NE(u1, u2);
  return u1 < u2 ? -1 : 1;
}
inline int MemcmpNotEqualFundamental(AtomicWord u1, AtomicWord u2) {
  DCHECK_NE(u1, u2);
#if defined(V8_TARGET_BIG_ENDIAN)
  return u1 < u2 ? -1 : 1;
#else
  for (size_t i = 0; i < sizeof(AtomicWord); ++i) {
    uint8_t byte1 = u1 & 0xFF;
    uint8_t byte2 = u2 & 0xFF;
    if (byte1 != byte2) return byte1 < byte2 ? -1 : 1;
    u1 >>= 8;
    u2 >>= 8;
  }
  UNREACHABLE();
#endif
}
}  // namespace helper

inline int Relaxed_Memcmp(volatile const Atomic8* s1,
                          volatile const Atomic8* s2, size_t len) {
  constexpr size_t kAtomicWordSize = sizeof(AtomicWord);
  while (len > 0 &&
         !(IsAligned(reinterpret_cast<uintptr_t>(s1), kAtomicWordSize) &&
           IsAligned(reinterpret_cast<uintptr_t>(s2), kAtomicWordSize))) {
    Atomic8 u1 = Relaxed_Load(s1++);
    Atomic8 u2 = Relaxed_Load(s2++);
    if (u1 != u2) return helper::MemcmpNotEqualFundamental(u1, u2);
    --len;
  }

  if (IsAligned(reinterpret_cast<uintptr_t>(s1), kAtomicWordSize) &&
      IsAligned(reinterpret_cast<uintptr_t>(s2), kAtomicWordSize)) {
    while (len >= kAtomicWordSize) {
      AtomicWord u1 =
          Relaxed_Load(reinterpret_cast<const volatile AtomicWord*>(s1));
      AtomicWord u2 =
          Relaxed_Load(reinterpret_cast<const volatile AtomicWord*>(s2));
      if (u1 != u2) return helper::MemcmpNotEqualFundamental(u1, u2);
      s1 += kAtomicWordSize;
      s2 += kAtomicWordSize;
      len -= kAtomicWordSize;
    }
  }

  while (len > 0) {
    Atomic8 u1 = Relaxed_Load(s1++);
    Atomic8 u2 = Relaxed_Load(s2++);
    if (u1 != u2) return helper::MemcmpNotEqualFundamental(u1, u2);
    --len;
  }

  return 0;
}

466 467
}  // namespace base
}  // namespace v8
468

469 470
// On some platforms we need additional declarations to make
// AtomicWord compatible with our other Atomic* types.
471
#if defined(V8_OS_DARWIN) || defined(V8_OS_OPENBSD) || defined(V8_OS_AIX)
472
#include "src/base/atomicops_internals_atomicword_compat.h"
473 474
#endif

475
#endif  // V8_BASE_ATOMICOPS_H_