atomicops_internals_mac.h 6.19 KB
Newer Older
1
// Copyright 2010 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4 5 6

// This file is an internal atomic implementation, use atomicops.h instead.

7 8
#ifndef V8_BASE_ATOMICOPS_INTERNALS_MAC_H_
#define V8_BASE_ATOMICOPS_INTERNALS_MAC_H_
9 10 11 12

#include <libkern/OSAtomic.h>

namespace v8 {
13
namespace base {
14

15 16 17 18 19 20 21 22 23 24 25 26 27 28
#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")

inline void MemoryBarrier() { OSMemoryBarrier(); }

inline void AcquireMemoryBarrier() {
// On x86 processors, loads already have acquire semantics, so
// there is no need to put a full barrier here.
#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
  ATOMICOPS_COMPILER_BARRIER();
#else
  MemoryBarrier();
#endif
}

29
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
30 31 32 33 34 35 36 37 38 39 40 41 42
                                         Atomic32 old_value,
                                         Atomic32 new_value) {
  Atomic32 prev_value;
  do {
    if (OSAtomicCompareAndSwap32(old_value, new_value,
                                 const_cast<Atomic32*>(ptr))) {
      return old_value;
    }
    prev_value = *ptr;
  } while (prev_value == old_value);
  return prev_value;
}

43
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
44 45 46 47 48 49 50 51 52
                                         Atomic32 new_value) {
  Atomic32 old_value;
  do {
    old_value = *ptr;
  } while (!OSAtomicCompareAndSwap32(old_value, new_value,
                                     const_cast<Atomic32*>(ptr)));
  return old_value;
}

53
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
54 55 56 57
                                          Atomic32 increment) {
  return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr));
}

58
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
59
                                        Atomic32 increment) {
60 61 62
  return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr));
}

63
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
64 65 66 67 68 69 70 71 72 73 74 75 76
                                       Atomic32 old_value,
                                       Atomic32 new_value) {
  Atomic32 prev_value;
  do {
    if (OSAtomicCompareAndSwap32Barrier(old_value, new_value,
                                        const_cast<Atomic32*>(ptr))) {
      return old_value;
    }
    prev_value = *ptr;
  } while (prev_value == old_value);
  return prev_value;
}

77
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
78 79 80 81 82
                                       Atomic32 old_value,
                                       Atomic32 new_value) {
  return Acquire_CompareAndSwap(ptr, old_value, new_value);
}

83 84 85 86
inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
  *ptr = value;
}

87 88 89 90
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
  *ptr = value;
}

91
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
92 93 94 95
  *ptr = value;
  MemoryBarrier();
}

96
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
97 98 99 100
  MemoryBarrier();
  *ptr = value;
}

101 102 103 104
inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
  return *ptr;
}

105 106 107 108
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
  return *ptr;
}

109
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
110
  Atomic32 value = *ptr;
111
  AcquireMemoryBarrier();
112 113 114
  return value;
}

115
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
116 117 118 119 120 121 122 123
  MemoryBarrier();
  return *ptr;
}

#ifdef __LP64__

// 64-bit implementation on 64-bit platform

124
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
125 126 127 128 129
                                         Atomic64 old_value,
                                         Atomic64 new_value) {
  Atomic64 prev_value;
  do {
    if (OSAtomicCompareAndSwap64(old_value, new_value,
130
                                 reinterpret_cast<volatile int64_t*>(ptr))) {
131 132 133 134 135 136 137
      return old_value;
    }
    prev_value = *ptr;
  } while (prev_value == old_value);
  return prev_value;
}

138
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
139 140 141 142 143
                                         Atomic64 new_value) {
  Atomic64 old_value;
  do {
    old_value = *ptr;
  } while (!OSAtomicCompareAndSwap64(old_value, new_value,
144
                                     reinterpret_cast<volatile int64_t*>(ptr)));
145 146 147
  return old_value;
}

148
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
149
                                          Atomic64 increment) {
150
  return OSAtomicAdd64(increment, reinterpret_cast<volatile int64_t*>(ptr));
151 152
}

153
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
154
                                        Atomic64 increment) {
155 156
  return OSAtomicAdd64Barrier(increment,
                              reinterpret_cast<volatile int64_t*>(ptr));
157 158
}

159
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
160 161 162 163
                                       Atomic64 old_value,
                                       Atomic64 new_value) {
  Atomic64 prev_value;
  do {
164 165
    if (OSAtomicCompareAndSwap64Barrier(
        old_value, new_value, reinterpret_cast<volatile int64_t*>(ptr))) {
166 167 168 169 170 171 172
      return old_value;
    }
    prev_value = *ptr;
  } while (prev_value == old_value);
  return prev_value;
}

173
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
174 175 176 177 178 179 180 181 182 183 184
                                       Atomic64 old_value,
                                       Atomic64 new_value) {
  // The lib kern interface does not distinguish between
  // Acquire and Release memory barriers; they are equivalent.
  return Acquire_CompareAndSwap(ptr, old_value, new_value);
}

inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
  *ptr = value;
}

185
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
186 187 188 189
  *ptr = value;
  MemoryBarrier();
}

190
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
191 192 193 194 195 196 197 198
  MemoryBarrier();
  *ptr = value;
}

inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
  return *ptr;
}

199
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
200
  Atomic64 value = *ptr;
201
  AcquireMemoryBarrier();
202 203 204
  return value;
}

205
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
206 207 208 209 210 211
  MemoryBarrier();
  return *ptr;
}

#endif  // defined(__LP64__)

212
#undef ATOMICOPS_COMPILER_BARRIER
213 214
}  // namespace base
}  // namespace v8
215

216
#endif  // V8_BASE_ATOMICOPS_INTERNALS_MAC_H_