Commit b83b8b9e authored by jarin@chromium.org's avatar jarin@chromium.org

Atomic ops: Sync with Chromium and add unit test.

R=jarin@chromium.org

Review URL: https://codereview.chromium.org/129813008

Patch from Cosmin Truta <ctruta@gmail.com>.

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@19738 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent c981914d
......@@ -51,6 +51,15 @@
#include "../include/v8.h"
#include "globals.h"
#if defined(_WIN32) && defined(V8_HOST_ARCH_64_BIT)
// windows.h #defines this (only on x64). This causes problems because the
// public API also uses MemoryBarrier at the public name for this fence. So, on
// X64, undef it, and call its documented
// (http://msdn.microsoft.com/en-us/library/windows/desktop/ms684208.aspx)
// implementation directly.
#undef MemoryBarrier
#endif
namespace v8 {
namespace internal {
......@@ -58,9 +67,7 @@ typedef int32_t Atomic32;
#ifdef V8_HOST_ARCH_64_BIT
// We need to be able to go between Atomic64 and AtomicWord implicitly. This
// means Atomic64 and AtomicWord should be the same type on 64-bit.
#if defined(__ILP32__) || defined(__APPLE__)
// MacOS is an exception to the implicit conversion rule above,
// because it uses long for intptr_t.
#if defined(__ILP32__)
typedef int64_t Atomic64;
#else
typedef intptr_t Atomic64;
......@@ -69,11 +76,7 @@ typedef intptr_t Atomic64;
// Use AtomicWord for a machine-sized pointer. It will use the Atomic32 or
// Atomic64 routines below, depending on your architecture.
#if defined(__OpenBSD__) && defined(__i386__)
typedef Atomic32 AtomicWord;
#else
typedef intptr_t AtomicWord;
#endif
// Atomically execute:
// result = *ptr;
......@@ -155,18 +158,24 @@ Atomic64 Release_Load(volatile const Atomic64* ptr);
#include "atomicops_internals_tsan.h"
#elif defined(_MSC_VER) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
#include "atomicops_internals_x86_msvc.h"
#elif defined(__APPLE__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
#include "atomicops_internals_x86_macosx.h"
#elif defined(__GNUC__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
#include "atomicops_internals_x86_gcc.h"
#elif defined(__APPLE__)
#include "atomicops_internals_mac.h"
#elif defined(__GNUC__) && V8_HOST_ARCH_A64
#include "atomicops_internals_a64_gcc.h"
#elif defined(__GNUC__) && V8_HOST_ARCH_ARM
#include "atomicops_internals_arm_gcc.h"
#elif defined(__GNUC__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
#include "atomicops_internals_x86_gcc.h"
#elif defined(__GNUC__) && V8_HOST_ARCH_MIPS
#include "atomicops_internals_mips_gcc.h"
#else
#error "Atomic operations are not supported on your platform"
#endif
// On some platforms we need additional declarations to make
// AtomicWord compatible with our other Atomic* types.
#if defined(__APPLE__) || defined(__OpenBSD__)
#include "atomicops_internals_atomicword_compat.h"
#endif
#endif // V8_ATOMICOPS_H_
This diff is collapsed.
// Copyright 2014 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is an internal atomic implementation, use atomicops.h instead.
#ifndef V8_ATOMICOPS_INTERNALS_ATOMICWORD_COMPAT_H_
#define V8_ATOMICOPS_INTERNALS_ATOMICWORD_COMPAT_H_
// AtomicWord is a synonym for intptr_t, and Atomic32 is a synonym for int32,
// which in turn means int. On some LP32 platforms, intptr_t is an int, but
// on others, it's a long. When AtomicWord and Atomic32 are based on different
// fundamental types, their pointers are incompatible.
//
// This file defines function overloads to allow both AtomicWord and Atomic32
// data to be used with this interface.
//
// On LP64 platforms, AtomicWord and Atomic64 are both always long,
// so this problem doesn't occur.
#if !defined(V8_HOST_ARCH_64_BIT)
namespace v8 {
namespace internal {
inline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr,
AtomicWord old_value,
AtomicWord new_value) {
return NoBarrier_CompareAndSwap(
reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value);
}
inline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr,
AtomicWord new_value) {
return NoBarrier_AtomicExchange(
reinterpret_cast<volatile Atomic32*>(ptr), new_value);
}
inline AtomicWord NoBarrier_AtomicIncrement(volatile AtomicWord* ptr,
AtomicWord increment) {
return NoBarrier_AtomicIncrement(
reinterpret_cast<volatile Atomic32*>(ptr), increment);
}
inline AtomicWord Barrier_AtomicIncrement(volatile AtomicWord* ptr,
AtomicWord increment) {
return Barrier_AtomicIncrement(
reinterpret_cast<volatile Atomic32*>(ptr), increment);
}
inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr,
AtomicWord old_value,
AtomicWord new_value) {
return v8::internal::Acquire_CompareAndSwap(
reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value);
}
inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr,
AtomicWord old_value,
AtomicWord new_value) {
return v8::internal::Release_CompareAndSwap(
reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value);
}
inline void NoBarrier_Store(volatile AtomicWord *ptr, AtomicWord value) {
NoBarrier_Store(
reinterpret_cast<volatile Atomic32*>(ptr), value);
}
inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) {
return v8::internal::Acquire_Store(
reinterpret_cast<volatile Atomic32*>(ptr), value);
}
inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
return v8::internal::Release_Store(
reinterpret_cast<volatile Atomic32*>(ptr), value);
}
inline AtomicWord NoBarrier_Load(volatile const AtomicWord *ptr) {
return NoBarrier_Load(
reinterpret_cast<volatile const Atomic32*>(ptr));
}
inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) {
return v8::internal::Acquire_Load(
reinterpret_cast<volatile const Atomic32*>(ptr));
}
inline AtomicWord Release_Load(volatile const AtomicWord* ptr) {
return v8::internal::Release_Load(
reinterpret_cast<volatile const Atomic32*>(ptr));
}
} } // namespace v8::internal
#endif // !defined(V8_HOST_ARCH_64_BIT)
#endif // V8_ATOMICOPS_INTERNALS_ATOMICWORD_COMPAT_H_
......@@ -27,8 +27,8 @@
// This file is an internal atomic implementation, use atomicops.h instead.
#ifndef V8_ATOMICOPS_INTERNALS_X86_MACOSX_H_
#define V8_ATOMICOPS_INTERNALS_X86_MACOSX_H_
#ifndef V8_ATOMICOPS_INTERNALS_MAC_H_
#define V8_ATOMICOPS_INTERNALS_MAC_H_
#include <libkern/OSAtomic.h>
......@@ -65,7 +65,7 @@ inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
Atomic32 increment) {
return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr));
}
......@@ -132,7 +132,7 @@ inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 prev_value;
do {
if (OSAtomicCompareAndSwap64(old_value, new_value,
const_cast<Atomic64*>(ptr))) {
reinterpret_cast<volatile int64_t*>(ptr))) {
return old_value;
}
prev_value = *ptr;
......@@ -146,18 +146,19 @@ inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
do {
old_value = *ptr;
} while (!OSAtomicCompareAndSwap64(old_value, new_value,
const_cast<Atomic64*>(ptr)));
reinterpret_cast<volatile int64_t*>(ptr)));
return old_value;
}
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return OSAtomicAdd64(increment, const_cast<Atomic64*>(ptr));
return OSAtomicAdd64(increment, reinterpret_cast<volatile int64_t*>(ptr));
}
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return OSAtomicAdd64Barrier(increment, const_cast<Atomic64*>(ptr));
return OSAtomicAdd64Barrier(increment,
reinterpret_cast<volatile int64_t*>(ptr));
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
......@@ -165,8 +166,8 @@ inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 new_value) {
Atomic64 prev_value;
do {
if (OSAtomicCompareAndSwap64Barrier(old_value, new_value,
const_cast<Atomic64*>(ptr))) {
if (OSAtomicCompareAndSwap64Barrier(
old_value, new_value, reinterpret_cast<volatile int64_t*>(ptr))) {
return old_value;
}
prev_value = *ptr;
......@@ -213,89 +214,6 @@ inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
#endif // defined(__LP64__)
// MacOS uses long for intptr_t, AtomicWord and Atomic32 are always different
// on the Mac, even when they are the same size. We need to explicitly cast
// from AtomicWord to Atomic32/64 to implement the AtomicWord interface.
#ifdef __LP64__
#define AtomicWordCastType Atomic64
#else
#define AtomicWordCastType Atomic32
#endif
inline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr,
AtomicWord old_value,
AtomicWord new_value) {
return NoBarrier_CompareAndSwap(
reinterpret_cast<volatile AtomicWordCastType*>(ptr),
old_value, new_value);
}
inline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr,
AtomicWord new_value) {
return NoBarrier_AtomicExchange(
reinterpret_cast<volatile AtomicWordCastType*>(ptr), new_value);
}
inline AtomicWord NoBarrier_AtomicIncrement(volatile AtomicWord* ptr,
AtomicWord increment) {
return NoBarrier_AtomicIncrement(
reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment);
}
inline AtomicWord Barrier_AtomicIncrement(volatile AtomicWord* ptr,
AtomicWord increment) {
return Barrier_AtomicIncrement(
reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment);
}
inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr,
AtomicWord old_value,
AtomicWord new_value) {
return v8::internal::Acquire_CompareAndSwap(
reinterpret_cast<volatile AtomicWordCastType*>(ptr),
old_value, new_value);
}
inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr,
AtomicWord old_value,
AtomicWord new_value) {
return v8::internal::Release_CompareAndSwap(
reinterpret_cast<volatile AtomicWordCastType*>(ptr),
old_value, new_value);
}
inline void NoBarrier_Store(volatile AtomicWord* ptr, AtomicWord value) {
NoBarrier_Store(
reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
}
inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) {
return v8::internal::Acquire_Store(
reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
}
inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
return v8::internal::Release_Store(
reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
}
inline AtomicWord NoBarrier_Load(volatile const AtomicWord* ptr) {
return NoBarrier_Load(
reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
}
inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) {
return v8::internal::Acquire_Load(
reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
}
inline AtomicWord Release_Load(volatile const AtomicWord* ptr) {
return v8::internal::Release_Load(
reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
}
#undef AtomicWordCastType
} } // namespace v8::internal
#endif // V8_ATOMICOPS_INTERNALS_X86_MACOSX_H_
#endif // V8_ATOMICOPS_INTERNALS_MAC_H_
This diff is collapsed.
......@@ -33,6 +33,15 @@
#include "checks.h"
#include "win32-headers.h"
#if defined(V8_HOST_ARCH_64_BIT)
// windows.h #defines this (only on x64). This causes problems because the
// public API also uses MemoryBarrier at the public name for this fence. So, on
// X64, undef it, and call its documented
// (http://msdn.microsoft.com/en-us/library/windows/desktop/ms684208.aspx)
// implementation directly.
#undef MemoryBarrier
#endif
namespace v8 {
namespace internal {
......@@ -70,8 +79,13 @@ inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
#error "We require at least vs2005 for MemoryBarrier"
#endif
inline void MemoryBarrier() {
#if defined(V8_HOST_ARCH_64_BIT)
// See #undef and note at the top of this file.
__faststorefence();
#else
// We use MemoryBarrier from WinNT.h
::MemoryBarrier();
#endif
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
......
......@@ -53,6 +53,7 @@
'test-alloc.cc',
'test-api.cc',
'test-ast.cc',
'test-atomicops.cc',
'test-bignum.cc',
'test-bignum-dtoa.cc',
'test-circular-queue.cc',
......
// Copyright 2014 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "cctest.h"
#include "atomicops.h"
using namespace v8::internal;
#define CHECK_EQU(v1, v2) \
CHECK_EQ(static_cast<int64_t>(v1), static_cast<int64_t>(v2))
#define NUM_BITS(T) (sizeof(T) * 8)
template <class AtomicType>
static void TestAtomicIncrement() {
// For now, we just test the single-threaded execution.
// Use a guard value to make sure that NoBarrier_AtomicIncrement doesn't
// go outside the expected address bounds. This is to test that the
// 32-bit NoBarrier_AtomicIncrement doesn't do the wrong thing on 64-bit
// machines.
struct {
AtomicType prev_word;
AtomicType count;
AtomicType next_word;
} s;
AtomicType prev_word_value, next_word_value;
memset(&prev_word_value, 0xFF, sizeof(AtomicType));
memset(&next_word_value, 0xEE, sizeof(AtomicType));
s.prev_word = prev_word_value;
s.count = 0;
s.next_word = next_word_value;
CHECK_EQU(NoBarrier_AtomicIncrement(&s.count, 1), 1);
CHECK_EQU(s.count, 1);
CHECK_EQU(s.prev_word, prev_word_value);
CHECK_EQU(s.next_word, next_word_value);
CHECK_EQU(NoBarrier_AtomicIncrement(&s.count, 2), 3);
CHECK_EQU(s.count, 3);
CHECK_EQU(s.prev_word, prev_word_value);
CHECK_EQU(s.next_word, next_word_value);
CHECK_EQU(NoBarrier_AtomicIncrement(&s.count, 3), 6);
CHECK_EQU(s.count, 6);
CHECK_EQU(s.prev_word, prev_word_value);
CHECK_EQU(s.next_word, next_word_value);
CHECK_EQU(NoBarrier_AtomicIncrement(&s.count, -3), 3);
CHECK_EQU(s.count, 3);
CHECK_EQU(s.prev_word, prev_word_value);
CHECK_EQU(s.next_word, next_word_value);
CHECK_EQU(NoBarrier_AtomicIncrement(&s.count, -2), 1);
CHECK_EQU(s.count, 1);
CHECK_EQU(s.prev_word, prev_word_value);
CHECK_EQU(s.next_word, next_word_value);
CHECK_EQU(NoBarrier_AtomicIncrement(&s.count, -1), 0);
CHECK_EQU(s.count, 0);
CHECK_EQU(s.prev_word, prev_word_value);
CHECK_EQU(s.next_word, next_word_value);
CHECK_EQU(NoBarrier_AtomicIncrement(&s.count, -1), -1);
CHECK_EQU(s.count, -1);
CHECK_EQU(s.prev_word, prev_word_value);
CHECK_EQU(s.next_word, next_word_value);
CHECK_EQU(NoBarrier_AtomicIncrement(&s.count, -4), -5);
CHECK_EQU(s.count, -5);
CHECK_EQU(s.prev_word, prev_word_value);
CHECK_EQU(s.next_word, next_word_value);
CHECK_EQU(NoBarrier_AtomicIncrement(&s.count, 5), 0);
CHECK_EQU(s.count, 0);
CHECK_EQU(s.prev_word, prev_word_value);
CHECK_EQU(s.next_word, next_word_value);
}
template <class AtomicType>
static void TestCompareAndSwap() {
AtomicType value = 0;
AtomicType prev = NoBarrier_CompareAndSwap(&value, 0, 1);
CHECK_EQU(1, value);
CHECK_EQU(0, prev);
// Use a test value that has non-zero bits in both halves, for testing
// the 64-bit implementation on 32-bit platforms.
const AtomicType k_test_val =
(static_cast<AtomicType>(1) << (NUM_BITS(AtomicType) - 2)) + 11;
value = k_test_val;
prev = NoBarrier_CompareAndSwap(&value, 0, 5);
CHECK_EQU(k_test_val, value);
CHECK_EQU(k_test_val, prev);
value = k_test_val;
prev = NoBarrier_CompareAndSwap(&value, k_test_val, 5);
CHECK_EQU(5, value);
CHECK_EQU(k_test_val, prev);
}
template <class AtomicType>
static void TestAtomicExchange() {
AtomicType value = 0;
AtomicType new_value = NoBarrier_AtomicExchange(&value, 1);
CHECK_EQU(1, value);
CHECK_EQU(0, new_value);
// Use a test value that has non-zero bits in both halves, for testing
// the 64-bit implementation on 32-bit platforms.
const AtomicType k_test_val =
(static_cast<AtomicType>(1) << (NUM_BITS(AtomicType) - 2)) + 11;
value = k_test_val;
new_value = NoBarrier_AtomicExchange(&value, k_test_val);
CHECK_EQU(k_test_val, value);
CHECK_EQU(k_test_val, new_value);
value = k_test_val;
new_value = NoBarrier_AtomicExchange(&value, 5);
CHECK_EQU(5, value);
CHECK_EQU(k_test_val, new_value);
}
template <class AtomicType>
static void TestAtomicIncrementBounds() {
// Test at rollover boundary between int_max and int_min.
AtomicType test_val =
static_cast<AtomicType>(1) << (NUM_BITS(AtomicType) - 1);
AtomicType value = -1 ^ test_val;
AtomicType new_value = NoBarrier_AtomicIncrement(&value, 1);
CHECK_EQU(test_val, value);
CHECK_EQU(value, new_value);
NoBarrier_AtomicIncrement(&value, -1);
CHECK_EQU(-1 ^ test_val, value);
// Test at 32-bit boundary for 64-bit atomic type.
test_val = static_cast<AtomicType>(1) << (NUM_BITS(AtomicType) / 2);
value = test_val - 1;
new_value = NoBarrier_AtomicIncrement(&value, 1);
CHECK_EQU(test_val, value);
CHECK_EQU(value, new_value);
NoBarrier_AtomicIncrement(&value, -1);
CHECK_EQU(test_val - 1, value);
}
// Return an AtomicType with the value 0xa5a5a5..
template <class AtomicType>
static AtomicType TestFillValue() {
AtomicType val = 0;
memset(&val, 0xa5, sizeof(AtomicType));
return val;
}
// This is a simple sanity check to ensure that values are correct.
// Not testing atomicity.
template <class AtomicType>
static void TestStore() {
const AtomicType kVal1 = TestFillValue<AtomicType>();
const AtomicType kVal2 = static_cast<AtomicType>(-1);
AtomicType value;
NoBarrier_Store(&value, kVal1);
CHECK_EQU(kVal1, value);
NoBarrier_Store(&value, kVal2);
CHECK_EQU(kVal2, value);
Acquire_Store(&value, kVal1);
CHECK_EQU(kVal1, value);
Acquire_Store(&value, kVal2);
CHECK_EQU(kVal2, value);
Release_Store(&value, kVal1);
CHECK_EQU(kVal1, value);
Release_Store(&value, kVal2);
CHECK_EQU(kVal2, value);
}
// This is a simple sanity check to ensure that values are correct.
// Not testing atomicity.
template <class AtomicType>
static void TestLoad() {
const AtomicType kVal1 = TestFillValue<AtomicType>();
const AtomicType kVal2 = static_cast<AtomicType>(-1);
AtomicType value;
value = kVal1;
CHECK_EQU(kVal1, NoBarrier_Load(&value));
value = kVal2;
CHECK_EQU(kVal2, NoBarrier_Load(&value));
value = kVal1;
CHECK_EQU(kVal1, Acquire_Load(&value));
value = kVal2;
CHECK_EQU(kVal2, Acquire_Load(&value));
value = kVal1;
CHECK_EQU(kVal1, Release_Load(&value));
value = kVal2;
CHECK_EQU(kVal2, Release_Load(&value));
}
TEST(AtomicIncrement) {
TestAtomicIncrement<Atomic32>();
TestAtomicIncrement<AtomicWord>();
}
TEST(CompareAndSwap) {
TestCompareAndSwap<Atomic32>();
TestCompareAndSwap<AtomicWord>();
}
TEST(AtomicExchange) {
TestAtomicExchange<Atomic32>();
TestAtomicExchange<AtomicWord>();
}
TEST(AtomicIncrementBounds) {
TestAtomicIncrementBounds<Atomic32>();
TestAtomicIncrementBounds<AtomicWord>();
}
TEST(Store) {
TestStore<Atomic32>();
TestStore<AtomicWord>();
}
TEST(Load) {
TestLoad<Atomic32>();
TestLoad<AtomicWord>();
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment