atomicops_internals_arm64_gcc.h 8.72 KB
Newer Older
1
// Copyright 2012 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4 5 6

// This file is an internal atomic implementation, use atomicops.h instead.

7 8
#ifndef V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
#define V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
9 10

namespace v8 {
11
namespace base {
12

13
inline void MemoryBarrier() {
14
  __asm__ __volatile__ ("dmb ish" ::: "memory");  // NOLINT
15 16
}

17 18 19 20 21 22 23
// NoBarrier versions of the operation include "memory" in the clobber list.
// This is not required for direct usage of the NoBarrier versions of the
// operations. However this is required for correctness when they are used as
// part of the Acquire or Release versions, to ensure that nothing from outside
// the call is reordered between the operation and the memory barrier. This does
// not change the code generated, so has no or minimal impact on the
// NoBarrier operations.
24 25 26 27 28 29 30 31 32

inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
                                         Atomic32 old_value,
                                         Atomic32 new_value) {
  Atomic32 prev;
  int32_t temp;

  __asm__ __volatile__ (  // NOLINT
    "0:                                    \n\t"
33
    "ldxr %w[prev], %[ptr]                 \n\t"  // Load the previous value.
34 35
    "cmp %w[prev], %w[old_value]           \n\t"
    "bne 1f                                \n\t"
36
    "stxr %w[temp], %w[new_value], %[ptr]  \n\t"  // Try to store the new value.
37 38 39
    "cbnz %w[temp], 0b                     \n\t"  // Retry if it did not work.
    "1:                                    \n\t"
    : [prev]"=&r" (prev),
40 41
      [temp]"=&r" (temp),
      [ptr]"+Q" (*ptr)
42
    : [old_value]"IJr" (old_value),
43
      [new_value]"r" (new_value)
44
    : "cc", "memory"
45 46 47 48 49 50 51 52 53 54 55 56
  );  // NOLINT

  return prev;
}

inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
                                         Atomic32 new_value) {
  Atomic32 result;
  int32_t temp;

  __asm__ __volatile__ (  // NOLINT
    "0:                                    \n\t"
57 58
    "ldxr %w[result], %[ptr]               \n\t"  // Load the previous value.
    "stxr %w[temp], %w[new_value], %[ptr]  \n\t"  // Try to store the new value.
59 60
    "cbnz %w[temp], 0b                     \n\t"  // Retry if it did not work.
    : [result]"=&r" (result),
61 62 63
      [temp]"=&r" (temp),
      [ptr]"+Q" (*ptr)
    : [new_value]"r" (new_value)
64 65 66 67 68 69 70 71 72 73 74 75 76
    : "memory"
  );  // NOLINT

  return result;
}

inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
                                          Atomic32 increment) {
  Atomic32 result;
  int32_t temp;

  __asm__ __volatile__ (  // NOLINT
    "0:                                       \n\t"
77
    "ldxr %w[result], %[ptr]                  \n\t"  // Load the previous value.
78
    "add %w[result], %w[result], %w[increment]\n\t"
79
    "stxr %w[temp], %w[result], %[ptr]        \n\t"  // Try to store the result.
80 81
    "cbnz %w[temp], 0b                        \n\t"  // Retry on failure.
    : [result]"=&r" (result),
82 83
      [temp]"=&r" (temp),
      [ptr]"+Q" (*ptr)
84
    : [increment]"IJr" (increment)
85 86 87 88 89 90 91 92
    : "memory"
  );  // NOLINT

  return result;
}

inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
                                        Atomic32 increment) {
93 94
  Atomic32 result;

95
  MemoryBarrier();
96
  result = NoBarrier_AtomicIncrement(ptr, increment);
97
  MemoryBarrier();
98 99 100 101 102 103 104 105 106

  return result;
}

inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
                                       Atomic32 old_value,
                                       Atomic32 new_value) {
  Atomic32 prev;

107 108
  prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
  MemoryBarrier();
109 110 111 112 113 114 115 116 117

  return prev;
}

inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
                                       Atomic32 old_value,
                                       Atomic32 new_value) {
  Atomic32 prev;

118
  MemoryBarrier();
119
  prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
120 121 122 123

  return prev;
}

124 125 126 127
inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
  *ptr = value;
}

128 129 130 131 132 133
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
  *ptr = value;
}

inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
  *ptr = value;
134
  MemoryBarrier();
135 136 137
}

inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
138 139 140 141 142 143
  __asm__ __volatile__ (  // NOLINT
    "stlr %w[value], %[ptr]  \n\t"
    : [ptr]"=Q" (*ptr)
    : [value]"r" (value)
    : "memory"
  );  // NOLINT
144 145
}

146 147 148 149
inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
  return *ptr;
}

150 151 152 153 154
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
  return *ptr;
}

inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
155 156 157 158 159 160 161 162 163
  Atomic32 value;

  __asm__ __volatile__ (  // NOLINT
    "ldar %w[value], %[ptr]  \n\t"
    : [value]"=r" (value)
    : [ptr]"Q" (*ptr)
    : "memory"
  );  // NOLINT

164 165 166 167
  return value;
}

inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
168
  MemoryBarrier();
169 170 171 172 173 174 175 176 177 178 179 180 181 182
  return *ptr;
}

// 64-bit versions of the operations.
// See the 32-bit versions for comments.

inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
                                         Atomic64 old_value,
                                         Atomic64 new_value) {
  Atomic64 prev;
  int32_t temp;

  __asm__ __volatile__ (  // NOLINT
    "0:                                    \n\t"
183
    "ldxr %[prev], %[ptr]                  \n\t"
184 185
    "cmp %[prev], %[old_value]             \n\t"
    "bne 1f                                \n\t"
186
    "stxr %w[temp], %[new_value], %[ptr]   \n\t"
187 188 189
    "cbnz %w[temp], 0b                     \n\t"
    "1:                                    \n\t"
    : [prev]"=&r" (prev),
190 191
      [temp]"=&r" (temp),
      [ptr]"+Q" (*ptr)
192
    : [old_value]"IJr" (old_value),
193
      [new_value]"r" (new_value)
194
    : "cc", "memory"
195 196 197 198 199 200 201 202 203 204 205 206
  );  // NOLINT

  return prev;
}

inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
                                         Atomic64 new_value) {
  Atomic64 result;
  int32_t temp;

  __asm__ __volatile__ (  // NOLINT
    "0:                                    \n\t"
207 208
    "ldxr %[result], %[ptr]                \n\t"
    "stxr %w[temp], %[new_value], %[ptr]   \n\t"
209 210
    "cbnz %w[temp], 0b                     \n\t"
    : [result]"=&r" (result),
211 212 213
      [temp]"=&r" (temp),
      [ptr]"+Q" (*ptr)
    : [new_value]"r" (new_value)
214 215 216 217 218 219 220 221 222 223 224 225 226
    : "memory"
  );  // NOLINT

  return result;
}

inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
                                          Atomic64 increment) {
  Atomic64 result;
  int32_t temp;

  __asm__ __volatile__ (  // NOLINT
    "0:                                     \n\t"
227
    "ldxr %[result], %[ptr]                 \n\t"
228
    "add %[result], %[result], %[increment] \n\t"
229
    "stxr %w[temp], %[result], %[ptr]       \n\t"
230 231
    "cbnz %w[temp], 0b                      \n\t"
    : [result]"=&r" (result),
232 233
      [temp]"=&r" (temp),
      [ptr]"+Q" (*ptr)
234
    : [increment]"IJr" (increment)
235 236 237 238 239 240 241 242
    : "memory"
  );  // NOLINT

  return result;
}

inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
                                        Atomic64 increment) {
243 244
  Atomic64 result;

245
  MemoryBarrier();
246
  result = NoBarrier_AtomicIncrement(ptr, increment);
247
  MemoryBarrier();
248 249 250 251 252 253 254 255 256

  return result;
}

inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
                                       Atomic64 old_value,
                                       Atomic64 new_value) {
  Atomic64 prev;

257 258
  prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
  MemoryBarrier();
259 260 261 262 263 264 265 266 267

  return prev;
}

inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
                                       Atomic64 old_value,
                                       Atomic64 new_value) {
  Atomic64 prev;

268
  MemoryBarrier();
269
  prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
270 271 272 273 274 275 276 277 278 279

  return prev;
}

inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
  *ptr = value;
}

inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
  *ptr = value;
280
  MemoryBarrier();
281 282 283
}

inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
284 285 286 287 288 289
  __asm__ __volatile__ (  // NOLINT
    "stlr %x[value], %[ptr]  \n\t"
    : [ptr]"=Q" (*ptr)
    : [value]"r" (value)
    : "memory"
  );  // NOLINT
290 291 292 293 294 295 296
}

inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
  return *ptr;
}

inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
297
  Atomic64 value;
298 299 300 301 302 303 304 305

  __asm__ __volatile__ (  // NOLINT
    "ldar %x[value], %[ptr]  \n\t"
    : [value]"=r" (value)
    : [ptr]"Q" (*ptr)
    : "memory"
  );  // NOLINT

306 307 308 309
  return value;
}

inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
310
  MemoryBarrier();
311 312 313
  return *ptr;
}

314 315
}  // namespace base
}  // namespace v8
316

317
#endif  // V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_