Commit 182c89ab authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

[base] Replace Windows specific atomic ops with std::atomic.

Bug: chromium:757175
Change-Id: I6c566475a730084e8ab35e6f8505a12c466644ff
Reviewed-on: https://chromium-review.googlesource.com/622430Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#47461}
parent 1e08466b
......@@ -2402,7 +2402,7 @@ v8_component("v8_libbase") {
"src/base/atomicops.h",
"src/base/atomicops_internals_atomicword_compat.h",
"src/base/atomicops_internals_portable.h",
"src/base/atomicops_internals_x86_msvc.h",
"src/base/atomicops_internals_std.h",
"src/base/base-export.h",
"src/base/bits.cc",
"src/base/bits.h",
......
......@@ -81,12 +81,13 @@ Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
// These following lower-level operations are typically useful only to people
// implementing higher-level synchronization operations like spinlocks,
// mutexes, and condition-variables. They combine CompareAndSwap(), a load, or
// a store with appropriate memory-ordering instructions. "Acquire" operations
// ensure that no later memory access can be reordered ahead of the operation.
// "Release" operations ensure that no previous memory access can be reordered
// after the operation. "Fence" operations have both "Acquire" and "Release"
// semantics. A MemoryFence() has "Fence" semantics, but does no memory access.
// mutexes, and condition-variables. They combine CompareAndSwap(), a load,
// or a store with appropriate memory-ordering instructions. "Acquire"
// operations ensure that no later memory access can be reordered ahead of the
// operation. "Release" operations ensure that no previous memory access can
// be reordered after the operation. "Fence" operations have both "Acquire"
// and "Release" semantics. A SeqCst_MemoryFence() has "Fence" semantics, but
// does no memory access.
Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value);
......@@ -94,7 +95,7 @@ Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value);
void MemoryFence();
void SeqCst_MemoryFence();
void Relaxed_Store(volatile Atomic8* ptr, Atomic8 value);
void Relaxed_Store(volatile Atomic32* ptr, Atomic32 value);
void Release_Store(volatile Atomic32* ptr, Atomic32 value);
......@@ -127,10 +128,10 @@ Atomic64 Acquire_Load(volatile const Atomic64* ptr);
} // namespace v8
#if defined(V8_OS_WIN)
// TODO(hpayer): The MSVC header includes windows.h, which other files end up
// relying on. Fix this as part of crbug.com/559247.
#include "src/base/atomicops_internals_x86_msvc.h"
#include "src/base/atomicops_internals_std.h"
#else
// TODO(ulan): Switch to std version after performance regression with Wheezy
// sysroot is no longer relevant. Debian Wheezy LTS ends on 31st of May 2018.
#include "src/base/atomicops_internals_portable.h"
#endif
......
......@@ -39,7 +39,7 @@ namespace base {
// This implementation is transitional and maintains the original API for
// atomicops.h.
inline void MemoryFence() {
inline void SeqCst_MemoryFence() {
#if defined(__GLIBCXX__)
// Work around libstdc++ bug 51038 where atomic_thread_fence was declared but
// not defined, leading to the linker complaining about undefined references.
......
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ATOMICOPS_INTERNALS_STD_H_
#define BASE_ATOMICOPS_INTERNALS_STD_H_
#include <atomic>
#include "src/base/build_config.h"
namespace v8 {
namespace base {
namespace helper {
template <typename T>
volatile std::atomic<T>* to_std_atomic(volatile T* ptr) {
return reinterpret_cast<volatile std::atomic<T>*>(ptr);
}
template <typename T>
volatile const std::atomic<T>* to_std_atomic_const(volatile const T* ptr) {
return reinterpret_cast<volatile const std::atomic<T>*>(ptr);
}
} // namespace helper
inline void SeqCst_MemoryFence() {
std::atomic_thread_fence(std::memory_order_seq_cst);
}
inline Atomic32 Relaxed_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value, Atomic32 new_value) {
std::atomic_compare_exchange_strong_explicit(
helper::to_std_atomic(ptr), &old_value, new_value,
std::memory_order_relaxed, std::memory_order_relaxed);
return old_value;
}
inline Atomic32 Relaxed_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
return std::atomic_exchange_explicit(helper::to_std_atomic(ptr), new_value,
std::memory_order_relaxed);
}
inline Atomic32 Relaxed_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return increment + std::atomic_fetch_add_explicit(helper::to_std_atomic(ptr),
increment,
std::memory_order_relaxed);
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return increment + std::atomic_fetch_add_explicit(helper::to_std_atomic(ptr),
increment,
std::memory_order_seq_cst);
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value, Atomic32 new_value) {
atomic_compare_exchange_strong_explicit(
helper::to_std_atomic(ptr), &old_value, new_value,
std::memory_order_acquire, std::memory_order_acquire);
return old_value;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value, Atomic32 new_value) {
atomic_compare_exchange_strong_explicit(
helper::to_std_atomic(ptr), &old_value, new_value,
std::memory_order_release, std::memory_order_relaxed);
return old_value;
}
inline void Relaxed_Store(volatile Atomic8* ptr, Atomic8 value) {
std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
std::memory_order_relaxed);
}
inline void Relaxed_Store(volatile Atomic32* ptr, Atomic32 value) {
std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
std::memory_order_relaxed);
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
std::memory_order_release);
}
inline Atomic8 Relaxed_Load(volatile const Atomic8* ptr) {
return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
std::memory_order_relaxed);
}
inline Atomic32 Relaxed_Load(volatile const Atomic32* ptr) {
return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
std::memory_order_relaxed);
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
std::memory_order_acquire);
}
#if defined(V8_HOST_ARCH_64_BIT)
inline Atomic64 Relaxed_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value, Atomic64 new_value) {
std::atomic_compare_exchange_strong_explicit(
helper::to_std_atomic(ptr), &old_value, new_value,
std::memory_order_relaxed, std::memory_order_relaxed);
return old_value;
}
inline Atomic64 Relaxed_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
return std::atomic_exchange_explicit(helper::to_std_atomic(ptr), new_value,
std::memory_order_relaxed);
}
inline Atomic64 Relaxed_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return increment + std::atomic_fetch_add_explicit(helper::to_std_atomic(ptr),
increment,
std::memory_order_relaxed);
}
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return increment + std::atomic_fetch_add_explicit(helper::to_std_atomic(ptr),
increment,
std::memory_order_seq_cst);
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value, Atomic64 new_value) {
std::atomic_compare_exchange_strong_explicit(
helper::to_std_atomic(ptr), &old_value, new_value,
std::memory_order_acquire, std::memory_order_acquire);
return old_value;
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value, Atomic64 new_value) {
std::atomic_compare_exchange_strong_explicit(
helper::to_std_atomic(ptr), &old_value, new_value,
std::memory_order_release, std::memory_order_relaxed);
return old_value;
}
inline void Relaxed_Store(volatile Atomic64* ptr, Atomic64 value) {
std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
std::memory_order_relaxed);
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
std::memory_order_release);
}
inline Atomic64 Relaxed_Load(volatile const Atomic64* ptr) {
return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
std::memory_order_relaxed);
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
std::memory_order_acquire);
}
#endif // defined(V8_HOST_ARCH_64_BIT)
} // namespace base
} // namespace v8
#endif // V8_BASE_ATOMICOPS_INTERNALS_STD_H_
// Copyright 2010 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file is an internal atomic implementation, use base/atomicops.h instead.
#ifndef V8_BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
#define V8_BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
#include "src/base/macros.h"
#include "src/base/win32-headers.h"
namespace v8 {
namespace base {
inline Atomic32 Relaxed_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value, Atomic32 new_value) {
LONG result = InterlockedCompareExchange(
reinterpret_cast<volatile LONG*>(ptr), static_cast<LONG>(new_value),
static_cast<LONG>(old_value));
return static_cast<Atomic32>(result);
}
inline Atomic32 Relaxed_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
LONG result = InterlockedExchange(reinterpret_cast<volatile LONG*>(ptr),
static_cast<LONG>(new_value));
return static_cast<Atomic32>(result);
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return InterlockedExchangeAdd(reinterpret_cast<volatile LONG*>(ptr),
static_cast<LONG>(increment)) +
increment;
}
inline Atomic32 Relaxed_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return Barrier_AtomicIncrement(ptr, increment);
}
inline void MemoryFence() { MemoryBarrier(); }
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
return Relaxed_CompareAndSwap(ptr, old_value, new_value);
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
return Relaxed_CompareAndSwap(ptr, old_value, new_value);
}
inline void Relaxed_Store(volatile Atomic8* ptr, Atomic8 value) {
*ptr = value;
}
inline void Relaxed_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value; // works w/o barrier for current Intel chips as of June 2005
// See comments in Atomic64 version of Release_Store() below.
}
inline Atomic8 Relaxed_Load(volatile const Atomic8* ptr) { return *ptr; }
inline Atomic32 Relaxed_Load(volatile const Atomic32* ptr) { return *ptr; }
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value = *ptr;
return value;
}
#if defined(_WIN64)
// 64-bit low-level operations on 64-bit platform.
static_assert(sizeof(Atomic64) == sizeof(PVOID), "atomic word is atomic");
inline Atomic64 Relaxed_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value, Atomic64 new_value) {
PVOID result = InterlockedCompareExchangePointer(
reinterpret_cast<volatile PVOID*>(ptr),
reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value));
return reinterpret_cast<Atomic64>(result);
}
inline Atomic64 Relaxed_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
PVOID result = InterlockedExchangePointer(
reinterpret_cast<volatile PVOID*>(ptr),
reinterpret_cast<PVOID>(new_value));
return reinterpret_cast<Atomic64>(result);
}
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return InterlockedExchangeAdd64(
reinterpret_cast<volatile LONGLONG*>(ptr),
static_cast<LONGLONG>(increment)) + increment;
}
inline Atomic64 Relaxed_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return Barrier_AtomicIncrement(ptr, increment);
}
inline void Relaxed_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value; // works w/o barrier for current Intel chips as of June 2005
// When new chips come out, check:
// IA-32 Intel Architecture Software Developer's Manual, Volume 3:
// System Programming Guide, Chatper 7: Multiple-processor management,
// Section 7.2, Memory Ordering.
// Last seen at:
// http://developer.intel.com/design/pentium4/manuals/index_new.htm
}
inline Atomic64 Relaxed_Load(volatile const Atomic64* ptr) { return *ptr; }
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
Atomic64 value = *ptr;
return value;
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
return Relaxed_CompareAndSwap(ptr, old_value, new_value);
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
return Relaxed_CompareAndSwap(ptr, old_value, new_value);
}
#endif // defined(_WIN64)
} // namespace base
} // namespace v8
#endif // V8_BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
......@@ -14,7 +14,7 @@ void Bitmap::Clear() {
}
// This fence prevents re-ordering of publishing stores with the mark-bit
// clearing stores.
base::MemoryFence();
base::SeqCst_MemoryFence();
}
void Bitmap::SetRange(uint32_t start_index, uint32_t end_index) {
......@@ -40,7 +40,7 @@ void Bitmap::SetRange(uint32_t start_index, uint32_t end_index) {
}
// This fence prevents re-ordering of publishing stores with the mark-
// bit setting stores.
base::MemoryFence();
base::SeqCst_MemoryFence();
}
void Bitmap::ClearRange(uint32_t start_index, uint32_t end_index) {
......@@ -68,7 +68,7 @@ void Bitmap::ClearRange(uint32_t start_index, uint32_t end_index) {
}
// This fence prevents re-ordering of publishing stores with the mark-
// bit clearing stores.
base::MemoryFence();
base::SeqCst_MemoryFence();
}
bool Bitmap::AllBitsSetInRange(uint32_t start_index, uint32_t end_index) {
......
......@@ -523,7 +523,7 @@ Heap* MemoryChunk::synchronized_heap() {
}
void MemoryChunk::InitializationMemoryFence() {
base::MemoryFence();
base::SeqCst_MemoryFence();
#ifdef THREAD_SANITIZER
// Since TSAN does not process memory fences, we use the following annotation
// to tell TSAN that there is no data race when emitting a
......
......@@ -24,7 +24,7 @@ SamplingCircularQueue<T, L>::~SamplingCircularQueue() {
template<typename T, unsigned L>
T* SamplingCircularQueue<T, L>::Peek() {
base::MemoryFence();
base::SeqCst_MemoryFence();
if (base::Acquire_Load(&dequeue_pos_->marker) == kFull) {
return &dequeue_pos_->record;
}
......@@ -41,7 +41,7 @@ void SamplingCircularQueue<T, L>::Remove() {
template<typename T, unsigned L>
T* SamplingCircularQueue<T, L>::StartEnqueue() {
base::MemoryFence();
base::SeqCst_MemoryFence();
if (base::Acquire_Load(&enqueue_pos_->marker) == kEmpty) {
return &enqueue_pos_->record;
}
......
......@@ -1848,7 +1848,7 @@
'base/atomicops.h',
'base/atomicops_internals_atomicword_compat.h',
'base/atomicops_internals_portable.h',
'base/atomicops_internals_x86_msvc.h',
'base/atomicops_internals_std.h',
'base/base-export.h',
'base/bits.cc',
'base/bits.h',
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment