2
0
mirror of https://github.com/boostorg/atomic.git synced 2026-02-03 08:42:08 +00:00

Merge redesigned library implementation from develop.

This commit is contained in:
Andrey Semashev
2014-05-17 19:33:39 +04:00
67 changed files with 9253 additions and 14513 deletions

View File

@@ -90,7 +90,7 @@ public:
\param value Initial value
Creates and initializes an atomic variable.
*/
atomic(Type value);
explicit atomic(Type value);
/**
\brief Read the current value of the atomic variable
@@ -496,15 +496,14 @@ public:
*/
Type operator--(int);
private:
/** \brief Deleted copy constructor */
atomic(const atomic &);
atomic(const atomic &) = delete;
/** \brief Deleted copy assignment */
void operator=(const atomic &);
const atomic & operator=(const atomic &) = delete;
};
/**
\brief Insert explicit fence
\brief Insert explicit fence for thread synchronization
\param order Memory ordering constraint
Inserts an explicit fence. The exact semantic depends on the
@@ -523,4 +522,26 @@ private:
*/
void atomic_thread_fence(memory_order order);
/**
\brief Insert explicit fence for synchronization with a signal handler
\param order Memory ordering constraint
Inserts an explicit fence to synchronize with a signal handler called within
the context of the same thread. The fence ensures the corresponding operations
around it are complete and/or not started. The exact semantic depends on the
type of fence inserted:
- \c memory_order_relaxed: No operation
- \c memory_order_release: Ensures the operations before the fence are complete
- \c memory_order_acquire or \c memory_order_consume: Ensures the operations
after the fence are not started.
- \c memory_order_acq_rel or \c memory_order_seq_cst: Ensures the operations
around the fence do not cross it.
Note that this call does not affect visibility order of the memory operations
to other threads. It is functionally similar to \c atomic_thread_fence, only
it does not generate any instructions to synchronize hardware threads.
*/
void atomic_signal_fence(memory_order order);
}

View File

@@ -1,5 +1,6 @@
[/
/ Copyright (c) 2009 Helge Bahmann
/ Copyright (c) 2014 Andrey Semashev
/
/ Distributed under the Boost Software License, Version 1.0. (See accompanying
/ file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
@@ -7,7 +8,7 @@
[library Boost.Atomic
[quickbook 1.4]
[authors [Bahmann, Helge]]
[authors [Bahmann, Helge][Semashev, Andrey]]
[copyright 2011 Helge Bahmann]
[copyright 2012 Tim Blechmann]
[copyright 2013 Andrey Semashev]
@@ -90,6 +91,7 @@ hierarchies may generally reorder memory references at will.
As a consequence a program such as:
[c++]
int x = 0, int y = 0;
thread1:
@@ -339,8 +341,53 @@ on the same or different atomic variables, or use a "stand-alone"
[section:interface Programming interfaces]
[section:configuration Configuration and building]
The library contains header-only and compiled parts. The library is
header-only for lock-free cases but requires a separate binary to
implement the lock-based emulation. Users are able to detect whether
linking to the compiled part is required by checking the
[link atomic.interface.feature_macros feature macros].
The following macros affect library behavior:
[table
[[Macro] [Description]]
[[`BOOST_ATOMIC_NO_CMPXCHG16B`] [Affects 64-bit x86 MSVC builds. When defined,
the library assumes the target CPU does not support `cmpxchg16b` instruction used
to support 128-bit atomic operations. This is the case with some early 64-bit AMD CPUs,
all Intel CPUs and current AMD CPUs support this instruction. The library does not
perform runtime detection of this instruction, so running the code that uses 128-bit
atomics on such CPUs will result in crashes, unless this macro is defined. Note that
the macro does not affect GCC and compatible compilers because the library infers
this information from the compiler-defined macros.]]
[[`BOOST_ATOMIC_FORCE_FALLBACK`] [When defined, all operations are implemented with locks.
This is mostly used for testing and should not be used in real world projects.]]
[[`BOOST_ATOMIC_DYN_LINK` and `BOOST_ALL_DYN_LINK`] [Control library linking. If defined,
the library assumes dynamic linking, otherwise static. The latter macro affects all Boost
libraries, not just [*Boost.Atomic].]]
[[`BOOST_ATOMIC_NO_LIB` and `BOOST_ALL_NO_LIB`] [Control library auto-linking on Windows.
When defined, disables auto-linking. The latter macro affects all Boost libraries,
not just [*Boost.Atomic].]]
]
Besides macros, it is important to specify the correct compiler options for the target CPU.
With GCC and compatible compilers this affects whether particular atomic operations are
lock-free or not.
Boost building process is described in the [@http://www.boost.org/doc/libs/release/more/getting_started/ Getting Started guide].
For example, you can build [*Boost.Atomic] with the following command line:
[pre
bjam --with-atomic variant=release instruction-set=core2 stage
]
[endsect]
[section:interface_memory_order Memory order]
#include <boost/memory_order.hpp>
The enumeration [^boost::memory_order] defines the following
values to represent memory ordering constraints:
@@ -352,7 +399,8 @@ values to represent memory ordering constraints:
operation. This constraint is suitable only when
either a) further operations do not depend on the outcome
of the atomic operation or b) ordering is enforced through
stand-alone `atomic_thread_fence` operations
stand-alone `atomic_thread_fence` operations. The operation on
the atomic value itself is still atomic though.
]]
[[`memory_order_release`] [
Perform `release` operation. Informally speaking,
@@ -365,8 +413,8 @@ values to represent memory ordering constraints:
before this point.
]]
[[`memory_order_consume`] [
Perform `consume` operation. More restrictive (and
usually more efficient) than `memory_order_acquire`
Perform `consume` operation. More relaxed (and
on some architectures more efficient) than `memory_order_acquire`
as it only affects succeeding operations that are
computationally-dependent on the value retrieved from
an atomic variable.
@@ -374,7 +422,7 @@ values to represent memory ordering constraints:
[[`memory_order_acq_rel`] [Perform both `release` and `acquire` operation]]
[[`memory_order_seq_cst`] [
Enforce sequential consistency. Implies `memory_order_acq_rel`, but
additional enforces total order for all operations such qualified.
additionally enforces total order for all operations such qualified.
]]
]
@@ -385,6 +433,8 @@ of the various ordering constraints.
[section:interface_atomic_object Atomic objects]
#include <boost/atomic/atomic.hpp>
[^boost::atomic<['T]>] provides methods for atomically accessing
variables of a suitable type [^['T]]. The type is suitable if
it satisfies one of the following constraints:
@@ -551,6 +601,8 @@ constraint.
[section:interface_fences Fences]
#include <boost/atomic/fences.hpp>
[table
[[Syntax] [Description]]
[
@@ -567,6 +619,8 @@ constraint.
[section:feature_macros Feature testing macros]
#include <boost/atomic/capabilities.hpp>
[*Boost.Atomic] defines a number of macros to allow compile-time
detection whether an atomic data type is implemented using
"true" atomic operations, or whether an internal "lock" is
@@ -617,10 +671,6 @@ sometimes require a lock, and to `2` if they are always lock-free:
[`BOOST_ATOMIC_LLONG_LOCK_FREE`]
[Indicate whether `atomic<long long>` (including signed/unsigned variants) is lock-free]
]
[
[`BOOST_ATOMIC_INT128_LOCK_FREE`]
[Indicate whether `atomic<int128_type>` (including signed/unsigned variants) is lock-free. This macro is a non-standard extension.]
]
[
[`BOOST_ATOMIC_ADDRESS_LOCK_FREE` or `BOOST_ATOMIC_POINTER_LOCK_FREE`]
[Indicate whether `atomic<T *>` is lock-free]
@@ -635,6 +685,36 @@ sometimes require a lock, and to `2` if they are always lock-free:
]
]
In addition to these standard macros, [*Boost.Atomic] also defines a number of extension macros,
which can also be useful. Like the standard ones, these macros are defined to values `0`, `1` and `2`
to indicate whether the corresponding operations are lock-free or not.
[table
[[Macro] [Description]]
[
[`BOOST_ATOMIC_INT8_LOCK_FREE`]
[Indicate whether `atomic<int8_type>` is lock-free.]
]
[
[`BOOST_ATOMIC_INT16_LOCK_FREE`]
[Indicate whether `atomic<int16_type>` is lock-free.]
]
[
[`BOOST_ATOMIC_INT32_LOCK_FREE`]
[Indicate whether `atomic<int32_type>` is lock-free.]
]
[
[`BOOST_ATOMIC_INT64_LOCK_FREE`]
[Indicate whether `atomic<int64_type>` is lock-free.]
]
[
[`BOOST_ATOMIC_INT128_LOCK_FREE`]
[Indicate whether `atomic<int128_type>` is lock-free.]
]
]
In the table above, `intN_type` is a type that fits storage of contiguous `N` bits, suitably aligned for atomic operations.
[endsect]
[endsect]
@@ -684,7 +764,7 @@ limitations that cannot be lifted without compiler support:
cases this may result in a less efficient code than a C++11 compiler
could generate.
* [*No interprocess fallback]: using `atomic<T>` in shared memory only works
correctly, if `atomic<T>::is_lock_free() == true`
correctly, if `atomic<T>::is_lock_free() == true`.
[endsect]
@@ -729,17 +809,14 @@ implementation behaves as expected:
[*Boost.Atomic] has been tested on and is known to work on
the following compilers/platforms:
* gcc 4.x: i386, x86_64, ppc32, ppc64, armv5, armv6, alpha
* Visual Studio Express 2008/Windows XP, i386
If you have an unsupported platform, contact me and I will
work to add support for it.
* gcc 4.x: i386, x86_64, ppc32, ppc64, sparcv9, armv6, alpha
* Visual Studio Express 2008/Windows XP, x86, x64, ARM
[endsect]
[section:acknowledgements Acknowledgements]
* Adam Wulkiewicz created the logo used on the [@https://github.com/boostorg/log GitHub project page]. The logo was taken from his [@https://github.com/awulkiew/boost-logos collection] of Boost logos.
* Adam Wulkiewicz created the logo used on the [@https://github.com/boostorg/atomic GitHub project page]. The logo was taken from his [@https://github.com/awulkiew/boost-logos collection] of Boost logos.
[endsect]

View File

@@ -1,26 +1,26 @@
#ifndef BOOST_ATOMIC_ATOMIC_HPP
#define BOOST_ATOMIC_ATOMIC_HPP
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2011 Helge Bahmann
* Copyright (c) 2013 Tim Blechmann
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/atomic.hpp
*
* This header contains definition of \c atomic template and \c atomic_flag.
*/
// Copyright (c) 2011 Helge Bahmann
// Copyright (c) 2013 Tim Blechmann
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_ATOMIC_ATOMIC_HPP_INCLUDED_
#define BOOST_ATOMIC_ATOMIC_HPP_INCLUDED_
#include <cstddef>
#include <boost/cstdint.hpp>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/platform.hpp>
#include <boost/atomic/detail/type-classification.hpp>
#include <boost/type_traits/is_signed.hpp>
#if defined(BOOST_MSVC) && BOOST_MSVC < 1400
#include <boost/type_traits/is_integral.hpp>
#include <boost/mpl/and.hpp>
#endif
#include <boost/atomic/capabilities.hpp>
#include <boost/atomic/fences.hpp>
#include <boost/atomic/atomic_flag.hpp>
#include <boost/atomic/detail/atomic_template.hpp>
#include <boost/atomic/detail/operations.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
@@ -28,205 +28,66 @@
namespace boost {
#ifndef BOOST_ATOMIC_CHAR_LOCK_FREE
#define BOOST_ATOMIC_CHAR_LOCK_FREE 0
#endif
using atomics::atomic;
#ifndef BOOST_ATOMIC_CHAR16_T_LOCK_FREE
#define BOOST_ATOMIC_CHAR16_T_LOCK_FREE 0
#endif
#ifndef BOOST_ATOMIC_CHAR32_T_LOCK_FREE
#define BOOST_ATOMIC_CHAR32_T_LOCK_FREE 0
#endif
#ifndef BOOST_ATOMIC_WCHAR_T_LOCK_FREE
#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 0
#endif
#ifndef BOOST_ATOMIC_SHORT_LOCK_FREE
#define BOOST_ATOMIC_SHORT_LOCK_FREE 0
#endif
#ifndef BOOST_ATOMIC_INT_LOCK_FREE
#define BOOST_ATOMIC_INT_LOCK_FREE 0
#endif
#ifndef BOOST_ATOMIC_LONG_LOCK_FREE
#define BOOST_ATOMIC_LONG_LOCK_FREE 0
#endif
#ifndef BOOST_ATOMIC_LLONG_LOCK_FREE
#define BOOST_ATOMIC_LLONG_LOCK_FREE 0
#endif
#ifndef BOOST_ATOMIC_INT128_LOCK_FREE
#define BOOST_ATOMIC_INT128_LOCK_FREE 0
#endif
#ifndef BOOST_ATOMIC_POINTER_LOCK_FREE
#define BOOST_ATOMIC_POINTER_LOCK_FREE 0
#endif
#define BOOST_ATOMIC_ADDRESS_LOCK_FREE BOOST_ATOMIC_POINTER_LOCK_FREE
#ifndef BOOST_ATOMIC_BOOL_LOCK_FREE
#define BOOST_ATOMIC_BOOL_LOCK_FREE 0
#endif
#ifndef BOOST_ATOMIC_THREAD_FENCE
#define BOOST_ATOMIC_THREAD_FENCE 0
inline void atomic_thread_fence(memory_order)
{
}
#endif
#ifndef BOOST_ATOMIC_SIGNAL_FENCE
#define BOOST_ATOMIC_SIGNAL_FENCE 0
inline void atomic_signal_fence(memory_order order)
{
atomic_thread_fence(order);
}
#endif
template<typename T>
class atomic :
public atomics::detail::base_atomic<
T,
typename atomics::detail::classify<T>::type,
atomics::detail::storage_size_of<T>::value,
#if !defined(BOOST_MSVC) || BOOST_MSVC >= 1400
boost::is_signed<T>::value
#else
// MSVC 2003 has problems instantiating is_signed on non-integral types
mpl::and_< boost::is_integral<T>, boost::is_signed<T> >::value
#endif
>
{
private:
typedef T value_type;
typedef atomics::detail::base_atomic<
T,
typename atomics::detail::classify<T>::type,
atomics::detail::storage_size_of<T>::value,
#if !defined(BOOST_MSVC) || BOOST_MSVC >= 1400
boost::is_signed<T>::value
#else
// MSVC 2003 has problems instantiating is_signed on non-itegral types
mpl::and_< boost::is_integral<T>, boost::is_signed<T> >::value
#endif
> super;
typedef typename super::value_arg_type value_arg_type;
public:
BOOST_DEFAULTED_FUNCTION(atomic(void), BOOST_NOEXCEPT {})
// NOTE: The constructor is made explicit because gcc 4.7 complains that
// operator=(value_arg_type) is considered ambiguous with operator=(atomic const&)
// in assignment expressions, even though conversion to atomic<> is less preferred
// than conversion to value_arg_type.
explicit BOOST_CONSTEXPR atomic(value_arg_type v) BOOST_NOEXCEPT : super(v) {}
value_type operator=(value_arg_type v) volatile BOOST_NOEXCEPT
{
this->store(v);
return v;
}
operator value_type(void) volatile const BOOST_NOEXCEPT
{
return this->load();
}
BOOST_DELETED_FUNCTION(atomic(atomic const&))
BOOST_DELETED_FUNCTION(atomic& operator=(atomic const&) volatile)
};
typedef atomic<char> atomic_char;
typedef atomic<unsigned char> atomic_uchar;
typedef atomic<signed char> atomic_schar;
typedef atomic<uint8_t> atomic_uint8_t;
typedef atomic<int8_t> atomic_int8_t;
typedef atomic<unsigned short> atomic_ushort;
typedef atomic<short> atomic_short;
typedef atomic<uint16_t> atomic_uint16_t;
typedef atomic<int16_t> atomic_int16_t;
typedef atomic<unsigned int> atomic_uint;
typedef atomic<int> atomic_int;
typedef atomic<uint32_t> atomic_uint32_t;
typedef atomic<int32_t> atomic_int32_t;
typedef atomic<unsigned long> atomic_ulong;
typedef atomic<long> atomic_long;
typedef atomic<uint64_t> atomic_uint64_t;
typedef atomic<int64_t> atomic_int64_t;
using atomics::atomic_char;
using atomics::atomic_uchar;
using atomics::atomic_schar;
using atomics::atomic_uint8_t;
using atomics::atomic_int8_t;
using atomics::atomic_ushort;
using atomics::atomic_short;
using atomics::atomic_uint16_t;
using atomics::atomic_int16_t;
using atomics::atomic_uint;
using atomics::atomic_int;
using atomics::atomic_uint32_t;
using atomics::atomic_int32_t;
using atomics::atomic_ulong;
using atomics::atomic_long;
using atomics::atomic_uint64_t;
using atomics::atomic_int64_t;
#ifdef BOOST_HAS_LONG_LONG
typedef atomic<boost::ulong_long_type> atomic_ullong;
typedef atomic<boost::long_long_type> atomic_llong;
using atomics::atomic_ullong;
using atomics::atomic_llong;
#endif
typedef atomic<void*> atomic_address;
typedef atomic<bool> atomic_bool;
typedef atomic<wchar_t> atomic_wchar_t;
using atomics::atomic_address;
using atomics::atomic_bool;
using atomics::atomic_wchar_t;
#if !defined(BOOST_NO_CXX11_CHAR16_T)
typedef atomic<char16_t> atomic_char16_t;
using atomics::atomic_char16_t;
#endif
#if !defined(BOOST_NO_CXX11_CHAR32_T)
typedef atomic<char32_t> atomic_char32_t;
using atomics::atomic_char32_t;
#endif
typedef atomic<int_least8_t> atomic_int_least8_t;
typedef atomic<uint_least8_t> atomic_uint_least8_t;
typedef atomic<int_least16_t> atomic_int_least16_t;
typedef atomic<uint_least16_t> atomic_uint_least16_t;
typedef atomic<int_least32_t> atomic_int_least32_t;
typedef atomic<uint_least32_t> atomic_uint_least32_t;
typedef atomic<int_least64_t> atomic_int_least64_t;
typedef atomic<uint_least64_t> atomic_uint_least64_t;
typedef atomic<int_fast8_t> atomic_int_fast8_t;
typedef atomic<uint_fast8_t> atomic_uint_fast8_t;
typedef atomic<int_fast16_t> atomic_int_fast16_t;
typedef atomic<uint_fast16_t> atomic_uint_fast16_t;
typedef atomic<int_fast32_t> atomic_int_fast32_t;
typedef atomic<uint_fast32_t> atomic_uint_fast32_t;
typedef atomic<int_fast64_t> atomic_int_fast64_t;
typedef atomic<uint_fast64_t> atomic_uint_fast64_t;
typedef atomic<intmax_t> atomic_intmax_t;
typedef atomic<uintmax_t> atomic_uintmax_t;
using atomics::atomic_int_least8_t;
using atomics::atomic_uint_least8_t;
using atomics::atomic_int_least16_t;
using atomics::atomic_uint_least16_t;
using atomics::atomic_int_least32_t;
using atomics::atomic_uint_least32_t;
using atomics::atomic_int_least64_t;
using atomics::atomic_uint_least64_t;
using atomics::atomic_int_fast8_t;
using atomics::atomic_uint_fast8_t;
using atomics::atomic_int_fast16_t;
using atomics::atomic_uint_fast16_t;
using atomics::atomic_int_fast32_t;
using atomics::atomic_uint_fast32_t;
using atomics::atomic_int_fast64_t;
using atomics::atomic_uint_fast64_t;
using atomics::atomic_intmax_t;
using atomics::atomic_uintmax_t;
typedef atomic<std::size_t> atomic_size_t;
typedef atomic<std::ptrdiff_t> atomic_ptrdiff_t;
using atomics::atomic_size_t;
using atomics::atomic_ptrdiff_t;
#if defined(BOOST_HAS_INTPTR_T)
typedef atomic<intptr_t> atomic_intptr_t;
typedef atomic<uintptr_t> atomic_uintptr_t;
using atomics::atomic_intptr_t;
using atomics::atomic_uintptr_t;
#endif
#ifndef BOOST_ATOMIC_FLAG_LOCK_FREE
#define BOOST_ATOMIC_FLAG_LOCK_FREE 0
class atomic_flag
{
public:
BOOST_CONSTEXPR atomic_flag(void) BOOST_NOEXCEPT : v_(false) {}
} // namespace boost
bool
test_and_set(memory_order order = memory_order_seq_cst) BOOST_NOEXCEPT
{
return v_.exchange(true, order);
}
void
clear(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
v_.store(false, order);
}
BOOST_DELETED_FUNCTION(atomic_flag(atomic_flag const&))
BOOST_DELETED_FUNCTION(atomic_flag& operator=(atomic_flag const&))
private:
atomic<bool> v_;
};
#endif
}
#endif
#endif // BOOST_ATOMIC_ATOMIC_HPP_INCLUDED_

View File

@@ -0,0 +1,33 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2011 Helge Bahmann
* Copyright (c) 2013 Tim Blechmann
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/atomic_flag.hpp
*
* This header contains definition of \c atomic_flag.
*/
#ifndef BOOST_ATOMIC_ATOMIC_FLAG_HPP_INCLUDED_
#define BOOST_ATOMIC_ATOMIC_FLAG_HPP_INCLUDED_
#include <boost/atomic/capabilities.hpp>
#include <boost/atomic/detail/operations.hpp>
#include <boost/atomic/detail/atomic_flag.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
using atomics::atomic_flag;
} // namespace boost
#endif // BOOST_ATOMIC_ATOMIC_FLAG_HPP_INCLUDED_

View File

@@ -0,0 +1,160 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/capabilities.hpp
*
* This header defines feature capabilities macros.
*/
#ifndef BOOST_ATOMIC_CAPABILITIES_HPP_INCLUDED_
#define BOOST_ATOMIC_CAPABILITIES_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/platform.hpp>
#include <boost/atomic/detail/int_sizes.hpp>
#if !defined(BOOST_ATOMIC_EMULATED)
#include BOOST_ATOMIC_DETAIL_HEADER(boost/atomic/detail/caps_)
#endif
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
#ifndef BOOST_ATOMIC_INT8_LOCK_FREE
#define BOOST_ATOMIC_INT8_LOCK_FREE 0
#endif
#ifndef BOOST_ATOMIC_INT16_LOCK_FREE
#define BOOST_ATOMIC_INT16_LOCK_FREE 0
#endif
#ifndef BOOST_ATOMIC_INT32_LOCK_FREE
#define BOOST_ATOMIC_INT32_LOCK_FREE 0
#endif
#ifndef BOOST_ATOMIC_INT64_LOCK_FREE
#define BOOST_ATOMIC_INT64_LOCK_FREE 0
#endif
#ifndef BOOST_ATOMIC_INT128_LOCK_FREE
#define BOOST_ATOMIC_INT128_LOCK_FREE 0
#endif
#ifndef BOOST_ATOMIC_CHAR_LOCK_FREE
#define BOOST_ATOMIC_CHAR_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE
#endif
#ifndef BOOST_ATOMIC_CHAR16_T_LOCK_FREE
#define BOOST_ATOMIC_CHAR16_T_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE
#endif
#ifndef BOOST_ATOMIC_CHAR32_T_LOCK_FREE
#define BOOST_ATOMIC_CHAR32_T_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
#endif
#ifndef BOOST_ATOMIC_WCHAR_T_LOCK_FREE
#if BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 1
#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 2
#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 4
#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 8
#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE
#else
#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 0
#endif
#endif
#ifndef BOOST_ATOMIC_SHORT_LOCK_FREE
#if BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 1
#define BOOST_ATOMIC_SHORT_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 2
#define BOOST_ATOMIC_SHORT_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 4
#define BOOST_ATOMIC_SHORT_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 8
#define BOOST_ATOMIC_SHORT_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE
#else
#define BOOST_ATOMIC_SHORT_LOCK_FREE 0
#endif
#endif
#ifndef BOOST_ATOMIC_INT_LOCK_FREE
#if BOOST_ATOMIC_DETAIL_SIZEOF_INT == 1
#define BOOST_ATOMIC_INT_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 2
#define BOOST_ATOMIC_INT_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 4
#define BOOST_ATOMIC_INT_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 8
#define BOOST_ATOMIC_INT_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE
#else
#define BOOST_ATOMIC_INT_LOCK_FREE 0
#endif
#endif
#ifndef BOOST_ATOMIC_LONG_LOCK_FREE
#if BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 1
#define BOOST_ATOMIC_LONG_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 2
#define BOOST_ATOMIC_LONG_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 4
#define BOOST_ATOMIC_LONG_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 8
#define BOOST_ATOMIC_LONG_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE
#else
#define BOOST_ATOMIC_LONG_LOCK_FREE 0
#endif
#endif
#ifndef BOOST_ATOMIC_LLONG_LOCK_FREE
#if BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 1
#define BOOST_ATOMIC_LLONG_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 2
#define BOOST_ATOMIC_LLONG_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 4
#define BOOST_ATOMIC_LLONG_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 8
#define BOOST_ATOMIC_LLONG_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE
#else
#define BOOST_ATOMIC_LLONG_LOCK_FREE 0
#endif
#endif
#ifndef BOOST_ATOMIC_POINTER_LOCK_FREE
#if (BOOST_ATOMIC_DETAIL_SIZEOF_POINTER + 0) == 8
#define BOOST_ATOMIC_POINTER_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE
#elif (BOOST_ATOMIC_DETAIL_SIZEOF_POINTER + 0) == 4
#define BOOST_ATOMIC_POINTER_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
#else
#define BOOST_ATOMIC_POINTER_LOCK_FREE 0
#endif
#endif
#define BOOST_ATOMIC_ADDRESS_LOCK_FREE BOOST_ATOMIC_POINTER_LOCK_FREE
#ifndef BOOST_ATOMIC_BOOL_LOCK_FREE
#define BOOST_ATOMIC_BOOL_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE
#endif
#ifndef BOOST_ATOMIC_FLAG_LOCK_FREE
#define BOOST_ATOMIC_FLAG_LOCK_FREE BOOST_ATOMIC_BOOL_LOCK_FREE
#endif
#ifndef BOOST_ATOMIC_THREAD_FENCE
#define BOOST_ATOMIC_THREAD_FENCE 0
#endif
#ifndef BOOST_ATOMIC_SIGNAL_FENCE
#define BOOST_ATOMIC_SIGNAL_FENCE 0
#endif
#endif // BOOST_ATOMIC_CAPABILITIES_HPP_INCLUDED_

View File

@@ -0,0 +1,70 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/atomic_flag.hpp
*
* This header contains interface definition of \c atomic_flag.
*/
#ifndef BOOST_ATOMIC_DETAIL_ATOMIC_FLAG_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_ATOMIC_FLAG_HPP_INCLUDED_
#include <boost/assert.hpp>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/operations_lockfree.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
/*
* IMPLEMENTATION NOTE: All interface functions MUST be declared with BOOST_FORCEINLINE,
* see comment for convert_memory_order_to_gcc in ops_gcc_atomic.hpp.
*/
namespace boost {
namespace atomics {
#if defined(BOOST_NO_CXX11_CONSTEXPR) || defined(BOOST_NO_CXX11_UNIFIED_INITIALIZATION_SYNTAX)
#define BOOST_ATOMIC_NO_ATOMIC_FLAG_INIT
#else
#define BOOST_ATOMIC_FLAG_INIT {}
#endif
struct atomic_flag
{
typedef atomics::detail::operations< 1u, false > operations;
typedef operations::storage_type storage_type;
storage_type m_storage;
BOOST_FORCEINLINE BOOST_CONSTEXPR atomic_flag() BOOST_NOEXCEPT : m_storage(0)
{
}
BOOST_FORCEINLINE bool test_and_set(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
return operations::test_and_set(m_storage, order);
}
BOOST_FORCEINLINE void clear(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
BOOST_ASSERT(order != memory_order_acquire);
BOOST_ASSERT(order != memory_order_acq_rel);
operations::clear(m_storage, order);
}
BOOST_DELETED_FUNCTION(atomic_flag(atomic_flag const&))
BOOST_DELETED_FUNCTION(atomic_flag& operator= (atomic_flag const&))
};
} // namespace atomics
} // namespace boost
#endif // BOOST_ATOMIC_DETAIL_ATOMIC_FLAG_HPP_INCLUDED_

View File

@@ -0,0 +1,774 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2011 Helge Bahmann
* Copyright (c) 2013 Tim Blechmann
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/atomic_template.hpp
*
* This header contains interface definition of \c atomic template.
*/
#ifndef BOOST_ATOMIC_DETAIL_ATOMIC_TEMPLATE_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_ATOMIC_TEMPLATE_HPP_INCLUDED_
#include <cstddef>
#include <boost/cstdint.hpp>
#include <boost/assert.hpp>
#include <boost/type_traits/is_signed.hpp>
#include <boost/type_traits/is_integral.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/union_cast.hpp>
#include <boost/atomic/detail/operations_fwd.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
#if defined(BOOST_MSVC)
#pragma warning(push)
// 'boost::atomics::atomic<T>' : multiple assignment operators specified
#pragma warning(disable: 4522)
#endif
/*
* IMPLEMENTATION NOTE: All interface functions MUST be declared with BOOST_FORCEINLINE,
* see comment for convert_memory_order_to_gcc in ops_gcc_atomic.hpp.
*/
namespace boost {
namespace atomics {
namespace detail {
BOOST_FORCEINLINE BOOST_CONSTEXPR memory_order deduce_failure_order(memory_order order) BOOST_NOEXCEPT
{
return order == memory_order_acq_rel ? memory_order_acquire : (order == memory_order_release ? memory_order_relaxed : order);
}
BOOST_FORCEINLINE BOOST_CONSTEXPR bool cas_failure_order_must_not_be_stronger_than_success_order(memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
return ((failure_order | success_order) & ~memory_order_consume) == (success_order & ~memory_order_consume)
&& (failure_order & memory_order_consume) <= (success_order & memory_order_consume);
}
template< typename T, bool IsInt = boost::is_integral< T >::value >
struct classify
{
typedef void type;
};
template< typename T >
struct classify< T, true > { typedef int type; };
template< typename T >
struct classify< T*, false > { typedef void* type; };
template< typename T, typename Kind >
class base_atomic;
//! Implementation for integers
template< typename T >
class base_atomic< T, int >
{
private:
typedef T value_type;
typedef T difference_type;
typedef atomics::detail::operations< storage_size_of< value_type >::value, boost::is_signed< T >::value > operations;
protected:
typedef value_type value_arg_type;
public:
typedef typename operations::storage_type storage_type;
protected:
storage_type m_storage;
public:
BOOST_DEFAULTED_FUNCTION(base_atomic(), {})
BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : m_storage(v) {}
BOOST_FORCEINLINE void store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
BOOST_ASSERT(order != memory_order_consume);
BOOST_ASSERT(order != memory_order_acquire);
BOOST_ASSERT(order != memory_order_acq_rel);
operations::store(m_storage, static_cast< storage_type >(v), order);
}
BOOST_FORCEINLINE value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT
{
BOOST_ASSERT(order != memory_order_release);
BOOST_ASSERT(order != memory_order_acq_rel);
return static_cast< value_type >(operations::load(m_storage, order));
}
BOOST_FORCEINLINE value_type fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
return static_cast< value_type >(operations::fetch_add(m_storage, static_cast< storage_type >(v), order));
}
BOOST_FORCEINLINE value_type fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
return static_cast< value_type >(operations::fetch_sub(m_storage, static_cast< storage_type >(v), order));
}
BOOST_FORCEINLINE value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
return static_cast< value_type >(operations::exchange(m_storage, static_cast< storage_type >(v), order));
}
BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT
{
BOOST_ASSERT(failure_order != memory_order_release);
BOOST_ASSERT(failure_order != memory_order_acq_rel);
BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
storage_type old_value = static_cast< storage_type >(expected);
const bool res = operations::compare_exchange_strong(m_storage, old_value, static_cast< storage_type >(desired), success_order, failure_order);
expected = static_cast< value_type >(old_value);
return res;
}
BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
return compare_exchange_strong(expected, desired, order, atomics::detail::deduce_failure_order(order));
}
BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT
{
BOOST_ASSERT(failure_order != memory_order_release);
BOOST_ASSERT(failure_order != memory_order_acq_rel);
BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
storage_type old_value = static_cast< storage_type >(expected);
const bool res = operations::compare_exchange_weak(m_storage, old_value, static_cast< storage_type >(desired), success_order, failure_order);
expected = static_cast< value_type >(old_value);
return res;
}
BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
return compare_exchange_weak(expected, desired, order, atomics::detail::deduce_failure_order(order));
}
BOOST_FORCEINLINE value_type fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
return static_cast< value_type >(operations::fetch_and(m_storage, static_cast< storage_type >(v), order));
}
BOOST_FORCEINLINE value_type fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
return static_cast< value_type >(operations::fetch_or(m_storage, static_cast< storage_type >(v), order));
}
BOOST_FORCEINLINE value_type fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
return static_cast< value_type >(operations::fetch_xor(m_storage, static_cast< storage_type >(v), order));
}
BOOST_FORCEINLINE bool is_lock_free() const volatile BOOST_NOEXCEPT
{
return operations::is_lock_free(m_storage);
}
BOOST_FORCEINLINE value_type operator++(int) volatile BOOST_NOEXCEPT
{
return fetch_add(1);
}
BOOST_FORCEINLINE value_type operator++() volatile BOOST_NOEXCEPT
{
return fetch_add(1) + 1;
}
BOOST_FORCEINLINE value_type operator--(int) volatile BOOST_NOEXCEPT
{
return fetch_sub(1);
}
BOOST_FORCEINLINE value_type operator--() volatile BOOST_NOEXCEPT
{
return fetch_sub(1) - 1;
}
BOOST_FORCEINLINE value_type operator+=(difference_type v) volatile BOOST_NOEXCEPT
{
return fetch_add(v) + v;
}
BOOST_FORCEINLINE value_type operator-=(difference_type v) volatile BOOST_NOEXCEPT
{
return fetch_sub(v) - v;
}
BOOST_FORCEINLINE value_type operator&=(value_type v) volatile BOOST_NOEXCEPT
{
return fetch_and(v) & v;
}
BOOST_FORCEINLINE value_type operator|=(value_type v) volatile BOOST_NOEXCEPT
{
return fetch_or(v) | v;
}
BOOST_FORCEINLINE value_type operator^=(value_type v) volatile BOOST_NOEXCEPT
{
return fetch_xor(v) ^ v;
}
BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&))
BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&))
};
//! Implementation for bool
template< >
class base_atomic< bool, int >
{
private:
typedef bool value_type;
typedef atomics::detail::operations< storage_size_of< value_type >::value, false > operations;
protected:
typedef value_type value_arg_type;
public:
typedef operations::storage_type storage_type;
protected:
storage_type m_storage;
public:
BOOST_DEFAULTED_FUNCTION(base_atomic(), {})
BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : m_storage(v) {}
BOOST_FORCEINLINE void store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
BOOST_ASSERT(order != memory_order_consume);
BOOST_ASSERT(order != memory_order_acquire);
BOOST_ASSERT(order != memory_order_acq_rel);
operations::store(m_storage, static_cast< storage_type >(v), order);
}
BOOST_FORCEINLINE value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT
{
BOOST_ASSERT(order != memory_order_release);
BOOST_ASSERT(order != memory_order_acq_rel);
return !!operations::load(m_storage, order);
}
BOOST_FORCEINLINE value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
return !!operations::exchange(m_storage, static_cast< storage_type >(v), order);
}
BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT
{
BOOST_ASSERT(failure_order != memory_order_release);
BOOST_ASSERT(failure_order != memory_order_acq_rel);
BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
storage_type old_value = static_cast< storage_type >(expected);
const bool res = operations::compare_exchange_strong(m_storage, old_value, static_cast< storage_type >(desired), success_order, failure_order);
expected = !!old_value;
return res;
}
BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
return compare_exchange_strong(expected, desired, order, atomics::detail::deduce_failure_order(order));
}
BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT
{
BOOST_ASSERT(failure_order != memory_order_release);
BOOST_ASSERT(failure_order != memory_order_acq_rel);
BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
storage_type old_value = static_cast< storage_type >(expected);
const bool res = operations::compare_exchange_weak(m_storage, old_value, static_cast< storage_type >(desired), success_order, failure_order);
expected = !!old_value;
return res;
}
BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
return compare_exchange_weak(expected, desired, order, atomics::detail::deduce_failure_order(order));
}
BOOST_FORCEINLINE bool is_lock_free() const volatile BOOST_NOEXCEPT
{
return operations::is_lock_free(m_storage);
}
BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&))
BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&))
};
//! Implementation for user-defined types, such as structs and enums
template< typename T >
class base_atomic< T, void >
{
private:
typedef T value_type;
typedef atomics::detail::operations< storage_size_of< value_type >::value, false > operations;
protected:
typedef value_type const& value_arg_type;
public:
typedef typename operations::storage_type storage_type;
protected:
storage_type m_storage;
public:
BOOST_DEFAULTED_FUNCTION(base_atomic(), {})
BOOST_FORCEINLINE explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : m_storage(atomics::detail::union_cast< storage_type >(v))
{
}
BOOST_FORCEINLINE void store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
BOOST_ASSERT(order != memory_order_consume);
BOOST_ASSERT(order != memory_order_acquire);
BOOST_ASSERT(order != memory_order_acq_rel);
operations::store(m_storage, atomics::detail::union_cast< storage_type >(v), order);
}
BOOST_FORCEINLINE value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT
{
BOOST_ASSERT(order != memory_order_release);
BOOST_ASSERT(order != memory_order_acq_rel);
return atomics::detail::union_cast< value_type >(operations::load(m_storage, order));
}
BOOST_FORCEINLINE value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
return atomics::detail::union_cast< value_type >(operations::exchange(m_storage, atomics::detail::union_cast< storage_type >(v), order));
}
BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT
{
BOOST_ASSERT(failure_order != memory_order_release);
BOOST_ASSERT(failure_order != memory_order_acq_rel);
BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
storage_type old_value = atomics::detail::union_cast< storage_type >(expected);
const bool res = operations::compare_exchange_strong(m_storage, old_value, atomics::detail::union_cast< storage_type >(desired), success_order, failure_order);
expected = atomics::detail::union_cast< value_type >(old_value);
return res;
}
BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
return compare_exchange_strong(expected, desired, order, atomics::detail::deduce_failure_order(order));
}
BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT
{
BOOST_ASSERT(failure_order != memory_order_release);
BOOST_ASSERT(failure_order != memory_order_acq_rel);
BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
storage_type old_value = atomics::detail::union_cast< storage_type >(expected);
const bool res = operations::compare_exchange_weak(m_storage, old_value, atomics::detail::union_cast< storage_type >(desired), success_order, failure_order);
expected = atomics::detail::union_cast< value_type >(old_value);
return res;
}
BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
return compare_exchange_weak(expected, desired, order, atomics::detail::deduce_failure_order(order));
}
BOOST_FORCEINLINE bool is_lock_free() const volatile BOOST_NOEXCEPT
{
return operations::is_lock_free(m_storage);
}
BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&))
BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&))
};
//! Implementation for pointers
template< typename T >
class base_atomic< T*, void* >
{
private:
typedef T* value_type;
typedef std::ptrdiff_t difference_type;
typedef atomics::detail::operations< storage_size_of< value_type >::value, false > operations;
protected:
typedef value_type value_arg_type;
public:
typedef typename operations::storage_type storage_type;
protected:
storage_type m_storage;
public:
BOOST_DEFAULTED_FUNCTION(base_atomic(), {})
BOOST_FORCEINLINE explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : m_storage(atomics::detail::union_cast< storage_type >(v))
{
}
BOOST_FORCEINLINE void store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
BOOST_ASSERT(order != memory_order_consume);
BOOST_ASSERT(order != memory_order_acquire);
BOOST_ASSERT(order != memory_order_acq_rel);
operations::store(m_storage, atomics::detail::union_cast< storage_type >(v), order);
}
BOOST_FORCEINLINE value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT
{
BOOST_ASSERT(order != memory_order_release);
BOOST_ASSERT(order != memory_order_acq_rel);
return atomics::detail::union_cast< value_type >(operations::load(m_storage, order));
}
BOOST_FORCEINLINE value_type fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
return atomics::detail::union_cast< value_type >(operations::fetch_add(m_storage, static_cast< storage_type >(v * sizeof(T)), order));
}
BOOST_FORCEINLINE value_type fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
return atomics::detail::union_cast< value_type >(operations::fetch_sub(m_storage, static_cast< storage_type >(v * sizeof(T)), order));
}
BOOST_FORCEINLINE value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
return atomics::detail::union_cast< value_type >(operations::exchange(m_storage, atomics::detail::union_cast< storage_type >(v), order));
}
BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT
{
BOOST_ASSERT(failure_order != memory_order_release);
BOOST_ASSERT(failure_order != memory_order_acq_rel);
BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
storage_type old_value = atomics::detail::union_cast< storage_type >(expected);
const bool res = operations::compare_exchange_strong(m_storage, old_value, atomics::detail::union_cast< storage_type >(desired), success_order, failure_order);
expected = atomics::detail::union_cast< value_type >(old_value);
return res;
}
BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
return compare_exchange_strong(expected, desired, order, atomics::detail::deduce_failure_order(order));
}
BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT
{
BOOST_ASSERT(failure_order != memory_order_release);
BOOST_ASSERT(failure_order != memory_order_acq_rel);
BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
storage_type old_value = atomics::detail::union_cast< storage_type >(expected);
const bool res = operations::compare_exchange_weak(m_storage, old_value, atomics::detail::union_cast< storage_type >(desired), success_order, failure_order);
expected = atomics::detail::union_cast< value_type >(old_value);
return res;
}
BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
return compare_exchange_weak(expected, desired, order, atomics::detail::deduce_failure_order(order));
}
BOOST_FORCEINLINE bool is_lock_free() const volatile BOOST_NOEXCEPT
{
return operations::is_lock_free(m_storage);
}
BOOST_FORCEINLINE value_type operator++(int) volatile BOOST_NOEXCEPT
{
return fetch_add(1);
}
BOOST_FORCEINLINE value_type operator++() volatile BOOST_NOEXCEPT
{
return fetch_add(1) + 1;
}
BOOST_FORCEINLINE value_type operator--(int) volatile BOOST_NOEXCEPT
{
return fetch_sub(1);
}
BOOST_FORCEINLINE value_type operator--() volatile BOOST_NOEXCEPT
{
return fetch_sub(1) - 1;
}
BOOST_FORCEINLINE value_type operator+=(difference_type v) volatile BOOST_NOEXCEPT
{
return fetch_add(v) + v;
}
BOOST_FORCEINLINE value_type operator-=(difference_type v) volatile BOOST_NOEXCEPT
{
return fetch_sub(v) - v;
}
BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&))
BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&))
};
//! Implementation for void pointers
template< >
class base_atomic< void*, void* >
{
private:
typedef void* value_type;
typedef std::ptrdiff_t difference_type;
typedef atomics::detail::operations< storage_size_of< value_type >::value, false > operations;
protected:
typedef value_type value_arg_type;
public:
typedef operations::storage_type storage_type;
protected:
storage_type m_storage;
public:
BOOST_DEFAULTED_FUNCTION(base_atomic(), {})
BOOST_FORCEINLINE explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : m_storage(atomics::detail::union_cast< storage_type >(v))
{
}
BOOST_FORCEINLINE void store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
BOOST_ASSERT(order != memory_order_consume);
BOOST_ASSERT(order != memory_order_acquire);
BOOST_ASSERT(order != memory_order_acq_rel);
operations::store(m_storage, atomics::detail::union_cast< storage_type >(v), order);
}
BOOST_FORCEINLINE value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT
{
BOOST_ASSERT(order != memory_order_release);
BOOST_ASSERT(order != memory_order_acq_rel);
return atomics::detail::union_cast< value_type >(operations::load(m_storage, order));
}
BOOST_FORCEINLINE value_type fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
return atomics::detail::union_cast< value_type >(operations::fetch_add(m_storage, static_cast< storage_type >(v), order));
}
BOOST_FORCEINLINE value_type fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
return atomics::detail::union_cast< value_type >(operations::fetch_sub(m_storage, static_cast< storage_type >(v), order));
}
BOOST_FORCEINLINE value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
return atomics::detail::union_cast< value_type >(operations::exchange(m_storage, atomics::detail::union_cast< storage_type >(v), order));
}
BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT
{
BOOST_ASSERT(failure_order != memory_order_release);
BOOST_ASSERT(failure_order != memory_order_acq_rel);
BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
storage_type old_value = atomics::detail::union_cast< storage_type >(expected);
const bool res = operations::compare_exchange_strong(m_storage, old_value, atomics::detail::union_cast< storage_type >(desired), success_order, failure_order);
expected = atomics::detail::union_cast< value_type >(old_value);
return res;
}
BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
return compare_exchange_strong(expected, desired, order, atomics::detail::deduce_failure_order(order));
}
BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT
{
BOOST_ASSERT(failure_order != memory_order_release);
BOOST_ASSERT(failure_order != memory_order_acq_rel);
BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
storage_type old_value = atomics::detail::union_cast< storage_type >(expected);
const bool res = operations::compare_exchange_weak(m_storage, old_value, atomics::detail::union_cast< storage_type >(desired), success_order, failure_order);
expected = atomics::detail::union_cast< value_type >(old_value);
return res;
}
BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
return compare_exchange_weak(expected, desired, order, atomics::detail::deduce_failure_order(order));
}
BOOST_FORCEINLINE bool is_lock_free() const volatile BOOST_NOEXCEPT
{
return operations::is_lock_free(m_storage);
}
BOOST_FORCEINLINE value_type operator++(int) volatile BOOST_NOEXCEPT
{
return fetch_add(1);
}
BOOST_FORCEINLINE value_type operator++() volatile BOOST_NOEXCEPT
{
return (char*)fetch_add(1) + 1;
}
BOOST_FORCEINLINE value_type operator--(int) volatile BOOST_NOEXCEPT
{
return fetch_sub(1);
}
BOOST_FORCEINLINE value_type operator--() volatile BOOST_NOEXCEPT
{
return (char*)fetch_sub(1) - 1;
}
BOOST_FORCEINLINE value_type operator+=(difference_type v) volatile BOOST_NOEXCEPT
{
return (char*)fetch_add(v) + v;
}
BOOST_FORCEINLINE value_type operator-=(difference_type v) volatile BOOST_NOEXCEPT
{
return (char*)fetch_sub(v) - v;
}
BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&))
BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&))
};
} // namespace detail
template< typename T >
class atomic :
public atomics::detail::base_atomic< T, typename atomics::detail::classify< T >::type >
{
private:
typedef T value_type;
typedef atomics::detail::base_atomic< T, typename atomics::detail::classify< T >::type > base_type;
typedef typename base_type::value_arg_type value_arg_type;
public:
typedef typename base_type::storage_type storage_type;
public:
BOOST_DEFAULTED_FUNCTION(atomic(), BOOST_NOEXCEPT {})
// NOTE: The constructor is made explicit because gcc 4.7 complains that
// operator=(value_arg_type) is considered ambiguous with operator=(atomic const&)
// in assignment expressions, even though conversion to atomic<> is less preferred
// than conversion to value_arg_type.
BOOST_FORCEINLINE explicit BOOST_CONSTEXPR atomic(value_arg_type v) BOOST_NOEXCEPT : base_type(v) {}
BOOST_FORCEINLINE value_type operator= (value_arg_type v) volatile BOOST_NOEXCEPT
{
this->store(v);
return v;
}
BOOST_FORCEINLINE operator value_type() volatile const BOOST_NOEXCEPT
{
return this->load();
}
BOOST_FORCEINLINE storage_type& storage() BOOST_NOEXCEPT { return this->m_storage; }
BOOST_FORCEINLINE storage_type volatile& storage() volatile BOOST_NOEXCEPT { return this->m_storage; }
BOOST_FORCEINLINE storage_type const& storage() const BOOST_NOEXCEPT { return this->m_storage; }
BOOST_FORCEINLINE storage_type const volatile& storage() const volatile BOOST_NOEXCEPT { return this->m_storage; }
BOOST_DELETED_FUNCTION(atomic(atomic const&))
BOOST_DELETED_FUNCTION(atomic& operator= (atomic const&))
BOOST_DELETED_FUNCTION(atomic& operator= (atomic const&) volatile)
};
typedef atomic< char > atomic_char;
typedef atomic< unsigned char > atomic_uchar;
typedef atomic< signed char > atomic_schar;
typedef atomic< uint8_t > atomic_uint8_t;
typedef atomic< int8_t > atomic_int8_t;
typedef atomic< unsigned short > atomic_ushort;
typedef atomic< short > atomic_short;
typedef atomic< uint16_t > atomic_uint16_t;
typedef atomic< int16_t > atomic_int16_t;
typedef atomic< unsigned int > atomic_uint;
typedef atomic< int > atomic_int;
typedef atomic< uint32_t > atomic_uint32_t;
typedef atomic< int32_t > atomic_int32_t;
typedef atomic< unsigned long > atomic_ulong;
typedef atomic< long > atomic_long;
typedef atomic< uint64_t > atomic_uint64_t;
typedef atomic< int64_t > atomic_int64_t;
#ifdef BOOST_HAS_LONG_LONG
typedef atomic< boost::ulong_long_type > atomic_ullong;
typedef atomic< boost::long_long_type > atomic_llong;
#endif
typedef atomic< void* > atomic_address;
typedef atomic< bool > atomic_bool;
typedef atomic< wchar_t > atomic_wchar_t;
#if !defined(BOOST_NO_CXX11_CHAR16_T)
typedef atomic< char16_t > atomic_char16_t;
#endif
#if !defined(BOOST_NO_CXX11_CHAR32_T)
typedef atomic< char32_t > atomic_char32_t;
#endif
typedef atomic< int_least8_t > atomic_int_least8_t;
typedef atomic< uint_least8_t > atomic_uint_least8_t;
typedef atomic< int_least16_t > atomic_int_least16_t;
typedef atomic< uint_least16_t > atomic_uint_least16_t;
typedef atomic< int_least32_t > atomic_int_least32_t;
typedef atomic< uint_least32_t > atomic_uint_least32_t;
typedef atomic< int_least64_t > atomic_int_least64_t;
typedef atomic< uint_least64_t > atomic_uint_least64_t;
typedef atomic< int_fast8_t > atomic_int_fast8_t;
typedef atomic< uint_fast8_t > atomic_uint_fast8_t;
typedef atomic< int_fast16_t > atomic_int_fast16_t;
typedef atomic< uint_fast16_t > atomic_uint_fast16_t;
typedef atomic< int_fast32_t > atomic_int_fast32_t;
typedef atomic< uint_fast32_t > atomic_uint_fast32_t;
typedef atomic< int_fast64_t > atomic_int_fast64_t;
typedef atomic< uint_fast64_t > atomic_uint_fast64_t;
typedef atomic< intmax_t > atomic_intmax_t;
typedef atomic< uintmax_t > atomic_uintmax_t;
typedef atomic< std::size_t > atomic_size_t;
typedef atomic< std::ptrdiff_t > atomic_ptrdiff_t;
#if defined(BOOST_HAS_INTPTR_T)
typedef atomic< intptr_t > atomic_intptr_t;
typedef atomic< uintptr_t > atomic_uintptr_t;
#endif
} // namespace atomics
} // namespace boost
#if defined(BOOST_MSVC)
#pragma warning(pop)
#endif
#endif // BOOST_ATOMIC_DETAIL_ATOMIC_TEMPLATE_HPP_INCLUDED_

View File

@@ -1,605 +0,0 @@
#ifndef BOOST_ATOMIC_DETAIL_BASE_HPP
#define BOOST_ATOMIC_DETAIL_BASE_HPP
// Copyright (c) 2009 Helge Bahmann
// Copyright (c) 2013 Tim Blechmann
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Base class definition and fallback implementation.
// To be overridden (through partial specialization) by
// platform implementations.
#include <string.h>
#include <cstddef>
#include <boost/cstdint.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/lockpool.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
#define BOOST_ATOMIC_DECLARE_BASE_OPERATORS \
bool \
compare_exchange_strong( \
value_type & expected, \
value_type desired, \
memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT \
{ \
return compare_exchange_strong(expected, desired, order, calculate_failure_order(order)); \
} \
\
bool \
compare_exchange_weak( \
value_type & expected, \
value_type desired, \
memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT \
{ \
return compare_exchange_weak(expected, desired, order, calculate_failure_order(order)); \
} \
#define BOOST_ATOMIC_DECLARE_ADDITIVE_OPERATORS \
value_type \
operator++(int) volatile BOOST_NOEXCEPT \
{ \
return fetch_add(1); \
} \
\
value_type \
operator++(void) volatile BOOST_NOEXCEPT \
{ \
return fetch_add(1) + 1; \
} \
\
value_type \
operator--(int) volatile BOOST_NOEXCEPT \
{ \
return fetch_sub(1); \
} \
\
value_type \
operator--(void) volatile BOOST_NOEXCEPT \
{ \
return fetch_sub(1) - 1; \
} \
\
value_type \
operator+=(difference_type v) volatile BOOST_NOEXCEPT \
{ \
return fetch_add(v) + v; \
} \
\
value_type \
operator-=(difference_type v) volatile BOOST_NOEXCEPT \
{ \
return fetch_sub(v) - v; \
} \
#define BOOST_ATOMIC_DECLARE_VOID_POINTER_ADDITIVE_OPERATORS \
value_type \
operator++(int) volatile BOOST_NOEXCEPT \
{ \
return fetch_add(1); \
} \
\
value_type \
operator++(void) volatile BOOST_NOEXCEPT \
{ \
return (char*)fetch_add(1) + 1; \
} \
\
value_type \
operator--(int) volatile BOOST_NOEXCEPT \
{ \
return fetch_sub(1); \
} \
\
value_type \
operator--(void) volatile BOOST_NOEXCEPT \
{ \
return (char*)fetch_sub(1) - 1; \
} \
\
value_type \
operator+=(difference_type v) volatile BOOST_NOEXCEPT \
{ \
return (char*)fetch_add(v) + v; \
} \
\
value_type \
operator-=(difference_type v) volatile BOOST_NOEXCEPT \
{ \
return (char*)fetch_sub(v) - v; \
} \
#define BOOST_ATOMIC_DECLARE_BIT_OPERATORS \
value_type \
operator&=(difference_type v) volatile BOOST_NOEXCEPT \
{ \
return fetch_and(v) & v; \
} \
\
value_type \
operator|=(difference_type v) volatile BOOST_NOEXCEPT \
{ \
return fetch_or(v) | v; \
} \
\
value_type \
operator^=(difference_type v) volatile BOOST_NOEXCEPT\
{ \
return fetch_xor(v) ^ v; \
} \
#define BOOST_ATOMIC_DECLARE_POINTER_OPERATORS \
BOOST_ATOMIC_DECLARE_BASE_OPERATORS \
BOOST_ATOMIC_DECLARE_ADDITIVE_OPERATORS \
#define BOOST_ATOMIC_DECLARE_VOID_POINTER_OPERATORS \
BOOST_ATOMIC_DECLARE_BASE_OPERATORS \
BOOST_ATOMIC_DECLARE_VOID_POINTER_ADDITIVE_OPERATORS \
#define BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS \
BOOST_ATOMIC_DECLARE_BASE_OPERATORS \
BOOST_ATOMIC_DECLARE_ADDITIVE_OPERATORS \
BOOST_ATOMIC_DECLARE_BIT_OPERATORS \
namespace boost {
namespace atomics {
namespace detail {
inline memory_order
calculate_failure_order(memory_order order)
{
switch(order)
{
case memory_order_acq_rel:
return memory_order_acquire;
case memory_order_release:
return memory_order_relaxed;
default:
return order;
}
}
template<typename T, typename C, unsigned int Size, bool Sign>
class base_atomic
{
private:
typedef base_atomic this_type;
typedef T value_type;
typedef lockpool::scoped_lock guard_type;
protected:
typedef value_type const& value_arg_type;
public:
BOOST_DEFAULTED_FUNCTION(base_atomic(void), {})
BOOST_CONSTEXPR explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : v_(v)
{}
void
store(value_type const& v, memory_order /*order*/ = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
char * storage = storage_ptr();
guard_type guard(storage);
memcpy(storage, &v, sizeof(value_type));
}
value_type
load(memory_order /*order*/ = memory_order_seq_cst) volatile const BOOST_NOEXCEPT
{
char * storage = storage_ptr();
guard_type guard(storage);
value_type v;
memcpy(&v, storage, sizeof(value_type));
return v;
}
bool
compare_exchange_strong(
value_type & expected,
value_type const& desired,
memory_order /*success_order*/,
memory_order /*failure_order*/) volatile BOOST_NOEXCEPT
{
char * storage = storage_ptr();
guard_type guard(storage);
if (memcmp(storage, &expected, sizeof(value_type)) == 0) {
memcpy(storage, &desired, sizeof(value_type));
return true;
} else {
memcpy(&expected, storage, sizeof(value_type));
return false;
}
}
bool
compare_exchange_weak(
value_type & expected,
value_type const& desired,
memory_order success_order,
memory_order failure_order) volatile BOOST_NOEXCEPT
{
return compare_exchange_strong(expected, desired, success_order, failure_order);
}
value_type
exchange(value_type const& v, memory_order /*order*/=memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
char * storage = storage_ptr();
guard_type guard(storage);
value_type tmp;
memcpy(&tmp, storage, sizeof(value_type));
memcpy(storage, &v, sizeof(value_type));
return tmp;
}
bool
is_lock_free(void) const volatile BOOST_NOEXCEPT
{
return false;
}
BOOST_ATOMIC_DECLARE_BASE_OPERATORS
BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&))
BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&))
private:
char * storage_ptr() volatile const BOOST_NOEXCEPT
{
return const_cast<char *>(&reinterpret_cast<char const volatile &>(v_));
}
T v_;
};
template<typename T, unsigned int Size, bool Sign>
class base_atomic<T, int, Size, Sign>
{
private:
typedef base_atomic this_type;
typedef T value_type;
typedef T difference_type;
typedef lockpool::scoped_lock guard_type;
protected:
typedef value_type value_arg_type;
public:
BOOST_DEFAULTED_FUNCTION(base_atomic(void), {})
BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {}
void
store(value_type v, memory_order /*order*/ = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
guard_type guard(const_cast<value_type *>(&v_));
v_ = v;
}
value_type
load(memory_order /*order*/ = memory_order_seq_cst) const volatile BOOST_NOEXCEPT
{
guard_type guard(const_cast<value_type *>(&v_));
value_type v = const_cast<const volatile value_type &>(v_);
return v;
}
value_type
exchange(value_type v, memory_order /*order*/ = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
guard_type guard(const_cast<value_type *>(&v_));
value_type old = v_;
v_ = v;
return old;
}
bool
compare_exchange_strong(value_type & expected, value_type desired,
memory_order /*success_order*/,
memory_order /*failure_order*/) volatile BOOST_NOEXCEPT
{
guard_type guard(const_cast<value_type *>(&v_));
if (v_ == expected) {
v_ = desired;
return true;
} else {
expected = v_;
return false;
}
}
bool
compare_exchange_weak(value_type & expected, value_type desired,
memory_order success_order,
memory_order failure_order) volatile BOOST_NOEXCEPT
{
return compare_exchange_strong(expected, desired, success_order, failure_order);
}
value_type
fetch_add(difference_type v, memory_order /*order*/ = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
guard_type guard(const_cast<value_type *>(&v_));
value_type old = v_;
v_ += v;
return old;
}
value_type
fetch_sub(difference_type v, memory_order /*order*/ = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
guard_type guard(const_cast<value_type *>(&v_));
value_type old = v_;
v_ -= v;
return old;
}
value_type
fetch_and(value_type v, memory_order /*order*/ = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
guard_type guard(const_cast<value_type *>(&v_));
value_type old = v_;
v_ &= v;
return old;
}
value_type
fetch_or(value_type v, memory_order /*order*/ = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
guard_type guard(const_cast<value_type *>(&v_));
value_type old = v_;
v_ |= v;
return old;
}
value_type
fetch_xor(value_type v, memory_order /*order*/ = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
guard_type guard(const_cast<value_type *>(&v_));
value_type old = v_;
v_ ^= v;
return old;
}
bool
is_lock_free(void) const volatile BOOST_NOEXCEPT
{
return false;
}
BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS
BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&))
BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&))
private:
value_type v_;
};
template<typename T, unsigned int Size, bool Sign>
class base_atomic<T *, void *, Size, Sign>
{
private:
typedef base_atomic this_type;
typedef T * value_type;
typedef std::ptrdiff_t difference_type;
typedef lockpool::scoped_lock guard_type;
protected:
typedef value_type value_arg_type;
public:
BOOST_DEFAULTED_FUNCTION(base_atomic(void), {})
BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {}
void
store(value_type v, memory_order /*order*/ = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
guard_type guard(const_cast<value_type *>(&v_));
v_ = v;
}
value_type
load(memory_order /*order*/ = memory_order_seq_cst) const volatile BOOST_NOEXCEPT
{
guard_type guard(const_cast<value_type *>(&v_));
value_type v = const_cast<const volatile value_type &>(v_);
return v;
}
value_type
exchange(value_type v, memory_order /*order*/ = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
guard_type guard(const_cast<value_type *>(&v_));
value_type old = v_;
v_ = v;
return old;
}
bool
compare_exchange_strong(value_type & expected, value_type desired,
memory_order /*success_order*/,
memory_order /*failure_order*/) volatile BOOST_NOEXCEPT
{
guard_type guard(const_cast<value_type *>(&v_));
if (v_ == expected) {
v_ = desired;
return true;
} else {
expected = v_;
return false;
}
}
bool
compare_exchange_weak(value_type & expected, value_type desired,
memory_order success_order,
memory_order failure_order) volatile BOOST_NOEXCEPT
{
return compare_exchange_strong(expected, desired, success_order, failure_order);
}
value_type fetch_add(difference_type v, memory_order /*order*/ = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
guard_type guard(const_cast<value_type *>(&v_));
value_type old = v_;
v_ += v;
return old;
}
value_type fetch_sub(difference_type v, memory_order /*order*/ = memory_order_seq_cst) volatile
{
guard_type guard(const_cast<value_type *>(&v_));
value_type old = v_;
v_ -= v;
return old;
}
bool
is_lock_free(void) const volatile BOOST_NOEXCEPT
{
return false;
}
BOOST_ATOMIC_DECLARE_POINTER_OPERATORS
BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&))
BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&))
private:
value_type v_;
};
template<unsigned int Size, bool Sign>
class base_atomic<void *, void *, Size, Sign>
{
private:
typedef base_atomic this_type;
typedef std::ptrdiff_t difference_type;
typedef void * value_type;
typedef lockpool::scoped_lock guard_type;
protected:
typedef value_type value_arg_type;
public:
BOOST_DEFAULTED_FUNCTION(base_atomic(void), {})
BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {}
void
store(value_type v, memory_order /*order*/ = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
guard_type guard(const_cast<value_type *>(&v_));
v_ = v;
}
value_type
load(memory_order /*order*/ = memory_order_seq_cst) const volatile BOOST_NOEXCEPT
{
guard_type guard(const_cast<value_type *>(&v_));
value_type v = const_cast<const volatile value_type &>(v_);
return v;
}
value_type
exchange(value_type v, memory_order /*order*/ = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
guard_type guard(const_cast<value_type *>(&v_));
value_type old = v_;
v_ = v;
return old;
}
bool
compare_exchange_strong(value_type & expected, value_type desired,
memory_order /*success_order*/,
memory_order /*failure_order*/) volatile BOOST_NOEXCEPT
{
guard_type guard(const_cast<value_type *>(&v_));
if (v_ == expected) {
v_ = desired;
return true;
} else {
expected = v_;
return false;
}
}
bool
compare_exchange_weak(value_type & expected, value_type desired,
memory_order success_order,
memory_order failure_order) volatile BOOST_NOEXCEPT
{
return compare_exchange_strong(expected, desired, success_order, failure_order);
}
bool
is_lock_free(void) const volatile BOOST_NOEXCEPT
{
return false;
}
value_type fetch_add(difference_type v, memory_order /*order*/ = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
guard_type guard(const_cast<value_type *>(&v_));
value_type old = v_;
char * cv = reinterpret_cast<char*>(old);
cv += v;
v_ = cv;
return old;
}
value_type fetch_sub(difference_type v, memory_order /*order*/ = memory_order_seq_cst) volatile
{
guard_type guard(const_cast<value_type *>(&v_));
value_type old = v_;
char * cv = reinterpret_cast<char*>(old);
cv -= v;
v_ = cv;
return old;
}
BOOST_ATOMIC_DECLARE_VOID_POINTER_OPERATORS
BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&))
BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&))
private:
value_type v_;
};
}
}
}
#endif

View File

@@ -0,0 +1,34 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2009 Helge Bahmann
* Copyright (c) 2013 Tim Blechmann
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/caps_gcc_alpha.hpp
*
* This header defines feature capabilities macros
*/
#ifndef BOOST_ATOMIC_DETAIL_CAPS_GCC_ALPHA_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_CAPS_GCC_ALPHA_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
#define BOOST_ATOMIC_INT8_LOCK_FREE 2
#define BOOST_ATOMIC_INT16_LOCK_FREE 2
#define BOOST_ATOMIC_INT32_LOCK_FREE 2
#define BOOST_ATOMIC_INT64_LOCK_FREE 2
#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
#define BOOST_ATOMIC_THREAD_FENCE 2
#define BOOST_ATOMIC_SIGNAL_FENCE 2
#endif // BOOST_ATOMIC_DETAIL_CAPS_GCC_ALPHA_HPP_INCLUDED_

View File

@@ -0,0 +1,53 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2009 Helge Bahmann
* Copyright (c) 2009 Phil Endecott
* Copyright (c) 2013 Tim Blechmann
* ARM Code by Phil Endecott, based on other architectures.
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/caps_gcc_arm.hpp
*
* This header defines feature capabilities macros
*/
#ifndef BOOST_ATOMIC_DETAIL_CAPS_GCC_ARM_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_CAPS_GCC_ARM_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
#if !(defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6ZK__))
// ARMv7 and later have dmb instruction
#define BOOST_ATOMIC_DETAIL_ARM_HAS_DMB 1
#endif
#if !(defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6Z__))
// ARMv6k and ARMv7 have 8 and 16 ldrex/strex variants
#define BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXB_STREXB 1
#define BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXH_STREXH 1
#if !defined(__ARM_ARCH_7M__)
// ARMv6k and ARMv7 except ARMv7-M have 64-bit ldrex/strex variants
#define BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXD_STREXD 1
#endif
#endif
#define BOOST_ATOMIC_INT8_LOCK_FREE 2
#define BOOST_ATOMIC_INT16_LOCK_FREE 2
#define BOOST_ATOMIC_INT32_LOCK_FREE 2
#if defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXD_STREXD)
#define BOOST_ATOMIC_INT64_LOCK_FREE 2
#endif
#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
#define BOOST_ATOMIC_THREAD_FENCE 2
#define BOOST_ATOMIC_SIGNAL_FENCE 2
#endif // BOOST_ATOMIC_DETAIL_CAPS_GCC_ARM_HPP_INCLUDED_

View File

@@ -0,0 +1,134 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/caps_gcc_atomic.hpp
*
* This header defines feature capabilities macros
*/
#ifndef BOOST_ATOMIC_DETAIL_CAPS_GCC_ATOMIC_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_CAPS_GCC_ATOMIC_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/int_sizes.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
#if defined(__i386__) && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B 1
#endif
#if defined(__x86_64__) && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)
#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B 1
#endif
#if __GCC_ATOMIC_BOOL_LOCK_FREE == 2
#define BOOST_ATOMIC_FLAG_LOCK_FREE 2
#else
#define BOOST_ATOMIC_FLAG_LOCK_FREE 0
#endif
#if __GCC_ATOMIC_CHAR_LOCK_FREE == 2
#define BOOST_ATOMIC_CHAR_LOCK_FREE 2
#else
#define BOOST_ATOMIC_CHAR_LOCK_FREE 0
#endif
#if __GCC_ATOMIC_CHAR16_T_LOCK_FREE == 2
#define BOOST_ATOMIC_CHAR16_T_LOCK_FREE 2
#else
#define BOOST_ATOMIC_CHAR16_T_LOCK_FREE 0
#endif
#if __GCC_ATOMIC_CHAR32_T_LOCK_FREE == 2
#define BOOST_ATOMIC_CHAR32_T_LOCK_FREE 2
#else
#define BOOST_ATOMIC_CHAR32_T_LOCK_FREE 0
#endif
#if __GCC_ATOMIC_WCHAR_T_LOCK_FREE == 2
#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 2
#else
#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 0
#endif
#if __GCC_ATOMIC_SHORT_LOCK_FREE == 2
#define BOOST_ATOMIC_SHORT_LOCK_FREE 2
#else
#define BOOST_ATOMIC_SHORT_LOCK_FREE 0
#endif
#if __GCC_ATOMIC_INT_LOCK_FREE == 2
#define BOOST_ATOMIC_INT_LOCK_FREE 2
#else
#define BOOST_ATOMIC_INT_LOCK_FREE 0
#endif
#if __GCC_ATOMIC_LONG_LOCK_FREE == 2
#define BOOST_ATOMIC_LONG_LOCK_FREE 2
#else
#define BOOST_ATOMIC_LONG_LOCK_FREE 0
#endif
#if __GCC_ATOMIC_LLONG_LOCK_FREE == 2
#define BOOST_ATOMIC_LLONG_LOCK_FREE 2
#else
#define BOOST_ATOMIC_LLONG_LOCK_FREE 0
#endif
#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B) && (defined(BOOST_HAS_INT128) || !defined(BOOST_NO_ALIGNMENT))
#define BOOST_ATOMIC_INT128_LOCK_FREE 2
#else
#define BOOST_ATOMIC_INT128_LOCK_FREE 0
#endif
#if __GCC_ATOMIC_POINTER_LOCK_FREE == 2
#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
#else
#define BOOST_ATOMIC_POINTER_LOCK_FREE 0
#endif
#if __GCC_ATOMIC_BOOL_LOCK_FREE == 2
#define BOOST_ATOMIC_BOOL_LOCK_FREE 2
#else
#define BOOST_ATOMIC_BOOL_LOCK_FREE 0
#endif
#define BOOST_ATOMIC_INT8_LOCK_FREE BOOST_ATOMIC_CHAR_LOCK_FREE
#if BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 2
#define BOOST_ATOMIC_INT16_LOCK_FREE BOOST_ATOMIC_SHORT_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 2
#define BOOST_ATOMIC_INT16_LOCK_FREE BOOST_ATOMIC_INT_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 2
#define BOOST_ATOMIC_INT16_LOCK_FREE BOOST_ATOMIC_LONG_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 2
#define BOOST_ATOMIC_INT16_LOCK_FREE BOOST_ATOMIC_LLONG_LOCK_FREE
#else
#define BOOST_ATOMIC_INT16_LOCK_FREE 0
#endif
#if BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 4
#define BOOST_ATOMIC_INT32_LOCK_FREE BOOST_ATOMIC_SHORT_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 4
#define BOOST_ATOMIC_INT32_LOCK_FREE BOOST_ATOMIC_INT_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 4
#define BOOST_ATOMIC_INT32_LOCK_FREE BOOST_ATOMIC_LONG_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 4
#define BOOST_ATOMIC_INT32_LOCK_FREE BOOST_ATOMIC_LLONG_LOCK_FREE
#else
#define BOOST_ATOMIC_INT32_LOCK_FREE 0
#endif
#if BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 8
#define BOOST_ATOMIC_INT64_LOCK_FREE BOOST_ATOMIC_SHORT_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 8
#define BOOST_ATOMIC_INT64_LOCK_FREE BOOST_ATOMIC_INT_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 8
#define BOOST_ATOMIC_INT64_LOCK_FREE BOOST_ATOMIC_LONG_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 8
#define BOOST_ATOMIC_INT64_LOCK_FREE BOOST_ATOMIC_LLONG_LOCK_FREE
#else
#define BOOST_ATOMIC_INT64_LOCK_FREE 0
#endif
#define BOOST_ATOMIC_THREAD_FENCE 2
#define BOOST_ATOMIC_SIGNAL_FENCE 2
#endif // BOOST_ATOMIC_DETAIL_CAPS_GCC_ATOMIC_HPP_INCLUDED_

View File

@@ -0,0 +1,36 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2009 Helge Bahmann
* Copyright (c) 2013 Tim Blechmann
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/caps_gcc_ppc.hpp
*
* This header defines feature capabilities macros
*/
#ifndef BOOST_ATOMIC_DETAIL_CAPS_GCC_PPC_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_CAPS_GCC_PPC_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
#define BOOST_ATOMIC_INT8_LOCK_FREE 2
#define BOOST_ATOMIC_INT16_LOCK_FREE 2
#define BOOST_ATOMIC_INT32_LOCK_FREE 2
#if defined(__powerpc64__)
#define BOOST_ATOMIC_INT64_LOCK_FREE 2
#endif
#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
#define BOOST_ATOMIC_THREAD_FENCE 2
#define BOOST_ATOMIC_SIGNAL_FENCE 2
#endif // BOOST_ATOMIC_DETAIL_CAPS_GCC_PPC_HPP_INCLUDED_

View File

@@ -0,0 +1,34 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2010 Helge Bahmann
* Copyright (c) 2013 Tim Blechmann
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/caps_gcc_sparc.hpp
*
* This header defines feature capabilities macros
*/
#ifndef BOOST_ATOMIC_DETAIL_CAPS_GCC_SPARC_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_CAPS_GCC_SPARC_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
#define BOOST_ATOMIC_INT8_LOCK_FREE 2
#define BOOST_ATOMIC_INT16_LOCK_FREE 2
#define BOOST_ATOMIC_INT32_LOCK_FREE 2
#define BOOST_ATOMIC_INT64_LOCK_FREE 2
#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
#define BOOST_ATOMIC_THREAD_FENCE 2
#define BOOST_ATOMIC_SIGNAL_FENCE 2
#endif // BOOST_ATOMIC_DETAIL_CAPS_GCC_SPARC_HPP_INCLUDED_

View File

@@ -0,0 +1,62 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2011 Helge Bahmann
* Copyright (c) 2013 Tim Blechmann
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/caps_gcc_sync.hpp
*
* This header defines feature capabilities macros
*/
#ifndef BOOST_ATOMIC_DETAIL_CAPS_GCC_SYNC_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_CAPS_GCC_SYNC_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
#if defined(__i386__) && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B 1
#endif
#if defined(__x86_64__) && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)
#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B 1
#endif
#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1)\
|| defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2)\
|| defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)\
|| defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)\
|| defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)
#define BOOST_ATOMIC_INT8_LOCK_FREE 2
#endif
#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2)\
|| defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)\
|| defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)\
|| defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)
#define BOOST_ATOMIC_INT16_LOCK_FREE 2
#endif
#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)\
|| defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)\
|| defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)
#define BOOST_ATOMIC_INT32_LOCK_FREE 2
#endif
#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)\
|| defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)
#define BOOST_ATOMIC_INT64_LOCK_FREE 2
#endif
#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)
#define BOOST_ATOMIC_INT128_LOCK_FREE 2
#endif
#define BOOST_ATOMIC_THREAD_FENCE 2
#define BOOST_ATOMIC_SIGNAL_FENCE 2
#endif // BOOST_ATOMIC_DETAIL_CAPS_GCC_SYNC_HPP_INCLUDED_

View File

@@ -0,0 +1,52 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2009 Helge Bahmann
* Copyright (c) 2012 Tim Blechmann
* Copyright (c) 2013 - 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/caps_gcc_x86.hpp
*
* This header defines feature capabilities macros
*/
#ifndef BOOST_ATOMIC_DETAIL_CAPS_GCC_X86_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_CAPS_GCC_X86_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
#if defined(__i386__) &&\
(\
defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) ||\
defined(__i586__) || defined(__i686__) || defined(__pentium4__) || defined(__nocona__) || defined(__core2__) || defined(__corei7__) ||\
defined(__k6__) || defined(__athlon__) || defined(__k8__) || defined(__amdfam10__) || defined(__bdver1__) || defined(__bdver2__) || defined(__bdver3__) || defined(__btver1__) || defined(__btver2__)\
)
#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B 1
#endif
#if defined(__x86_64__) && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)
#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B 1
#endif
#define BOOST_ATOMIC_INT8_LOCK_FREE 2
#define BOOST_ATOMIC_INT16_LOCK_FREE 2
#define BOOST_ATOMIC_INT32_LOCK_FREE 2
#if defined(__x86_64__) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B)
#define BOOST_ATOMIC_INT64_LOCK_FREE 2
#endif
#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B) && (defined(BOOST_HAS_INT128) || !defined(BOOST_NO_ALIGNMENT))
#define BOOST_ATOMIC_INT128_LOCK_FREE 2
#endif
#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
#define BOOST_ATOMIC_THREAD_FENCE 2
#define BOOST_ATOMIC_SIGNAL_FENCE 2
#endif // BOOST_ATOMIC_DETAIL_CAPS_GCC_X86_HPP_INCLUDED_

View File

@@ -0,0 +1,35 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2009, 2011 Helge Bahmann
* Copyright (c) 2009 Phil Endecott
* Copyright (c) 2013 Tim Blechmann
* Linux-specific code by Phil Endecott
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/caps_linux_arm.hpp
*
* This header defines feature capabilities macros
*/
#ifndef BOOST_ATOMIC_DETAIL_CAPS_LINUX_ARM_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_CAPS_LINUX_ARM_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
#define BOOST_ATOMIC_INT8_LOCK_FREE 2
#define BOOST_ATOMIC_INT16_LOCK_FREE 2
#define BOOST_ATOMIC_INT32_LOCK_FREE 2
#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
#define BOOST_ATOMIC_THREAD_FENCE 2
#define BOOST_ATOMIC_SIGNAL_FENCE 2
#endif // BOOST_ATOMIC_DETAIL_CAPS_LINUX_ARM_HPP_INCLUDED_

View File

@@ -0,0 +1,34 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2009 Helge Bahmann
* Copyright (c) 2013 Tim Blechmann
* Copyright (c) 2012 - 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/caps_msvc_arm.hpp
*
* This header defines feature capabilities macros
*/
#ifndef BOOST_ATOMIC_DETAIL_CAPS_MSVC_ARM_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_CAPS_MSVC_ARM_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
#define BOOST_ATOMIC_INT8_LOCK_FREE 2
#define BOOST_ATOMIC_INT16_LOCK_FREE 2
#define BOOST_ATOMIC_INT32_LOCK_FREE 2
#define BOOST_ATOMIC_INT64_LOCK_FREE 2
#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
#define BOOST_ATOMIC_THREAD_FENCE 2
#define BOOST_ATOMIC_SIGNAL_FENCE 2
#endif // BOOST_ATOMIC_DETAIL_CAPS_MSVC_ARM_HPP_INCLUDED_

View File

@@ -0,0 +1,50 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2009 Helge Bahmann
* Copyright (c) 2013 Tim Blechmann
* Copyright (c) 2012 - 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/caps_msvc_x86.hpp
*
* This header defines feature capabilities macros
*/
#ifndef BOOST_ATOMIC_DETAIL_CAPS_MSVC_X86_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_CAPS_MSVC_X86_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
#if defined(_M_IX86) && _M_IX86 >= 500
#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B 1
#endif
#if _MSC_VER >= 1500 && defined(_M_AMD64) && !defined(BOOST_ATOMIC_NO_CMPXCHG16B)
#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B 1
#endif
#define BOOST_ATOMIC_INT8_LOCK_FREE 2
#define BOOST_ATOMIC_INT16_LOCK_FREE 2
#define BOOST_ATOMIC_INT32_LOCK_FREE 2
#if defined(_M_AMD64) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B)
#define BOOST_ATOMIC_INT64_LOCK_FREE 2
#endif
#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B) && (defined(BOOST_HAS_INT128) || !defined(BOOST_NO_ALIGNMENT))
#define BOOST_ATOMIC_INT128_LOCK_FREE 2
#endif
#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
#define BOOST_ATOMIC_THREAD_FENCE 2
#define BOOST_ATOMIC_SIGNAL_FENCE 2
#endif // BOOST_ATOMIC_DETAIL_CAPS_MSVC_X86_HPP_INCLUDED_

View File

@@ -0,0 +1,33 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2009 Helge Bahmann
* Copyright (c) 2013 Tim Blechmann
* Copyright (c) 2012 - 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/caps_windows.hpp
*
* This header defines feature capabilities macros
*/
#ifndef BOOST_ATOMIC_DETAIL_CAPS_WINDOWS_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_CAPS_WINDOWS_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
#define BOOST_ATOMIC_INT8_LOCK_FREE 2
#define BOOST_ATOMIC_INT16_LOCK_FREE 2
#define BOOST_ATOMIC_INT32_LOCK_FREE 2
#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
#define BOOST_ATOMIC_THREAD_FENCE 2
#define BOOST_ATOMIC_SIGNAL_FENCE 2
#endif // BOOST_ATOMIC_DETAIL_CAPS_WINDOWS_HPP_INCLUDED_

View File

@@ -1,290 +0,0 @@
#ifndef BOOST_ATOMIC_DETAIL_CAS128STRONG_HPP
#define BOOST_ATOMIC_DETAIL_CAS128STRONG_HPP
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// Copyright (c) 2011 Helge Bahmann
// Copyright (c) 2013 Tim Blechmann, Andrey Semashev
// Build 128-bit atomic operation on integers/UDTs from platform_cmpxchg128_strong
// primitive. It is assumed that 128-bit loads/stores are not
// atomic, so they are implemented through platform_load128/platform_store128.
#include <string.h>
#include <cstddef>
#include <boost/cstdint.hpp>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/base.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
/* integral types */
template<typename T, bool Sign>
class base_atomic<T, int, 16, Sign>
{
private:
typedef base_atomic this_type;
typedef T value_type;
typedef T difference_type;
protected:
typedef value_type value_arg_type;
public:
BOOST_DEFAULTED_FUNCTION(base_atomic(void), {})
BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {}
void
store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
platform_fence_before_store(order);
platform_store128(v, &v_);
platform_fence_after_store(order);
}
value_type
load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT
{
value_type v = platform_load128(&v_);
platform_fence_after_load(order);
return v;
}
value_type
exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
return original;
}
bool
compare_exchange_weak(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile BOOST_NOEXCEPT
{
return compare_exchange_strong(expected, desired, success_order, failure_order);
}
bool
compare_exchange_strong(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile BOOST_NOEXCEPT
{
platform_fence_before(success_order);
bool success = platform_cmpxchg128_strong(expected, desired, &v_);
if (success) {
platform_fence_after(success_order);
} else {
platform_fence_after(failure_order);
}
return success;
}
value_type
fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original + v, order, memory_order_relaxed));
return original;
}
value_type
fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original - v, order, memory_order_relaxed));
return original;
}
value_type
fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original & v, order, memory_order_relaxed));
return original;
}
value_type
fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original | v, order, memory_order_relaxed));
return original;
}
value_type
fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original ^ v, order, memory_order_relaxed));
return original;
}
bool
is_lock_free(void) const volatile BOOST_NOEXCEPT
{
return true;
}
BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS
BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&))
BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&))
private:
value_type v_;
};
/* generic types */
#if defined(BOOST_HAS_INT128)
typedef boost::uint128_type storage128_type;
#else // defined(BOOST_HAS_INT128)
struct BOOST_ALIGNMENT(16) storage128_type
{
uint64_t data[2];
};
inline bool operator== (storage128_type const& left, storage128_type const& right)
{
return left.data[0] == right.data[0] && left.data[1] == right.data[1];
}
inline bool operator!= (storage128_type const& left, storage128_type const& right)
{
return !(left == right);
}
#endif // defined(BOOST_HAS_INT128)
template<typename T, bool Sign>
class base_atomic<T, void, 16, Sign>
{
private:
typedef base_atomic this_type;
typedef T value_type;
typedef storage128_type storage_type;
protected:
typedef value_type const& value_arg_type;
public:
BOOST_DEFAULTED_FUNCTION(base_atomic(void), {})
explicit base_atomic(value_type const& v) BOOST_NOEXCEPT
{
memset(&v_, 0, sizeof(v_));
memcpy(&v_, &v, sizeof(value_type));
}
void
store(value_type const& value, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
storage_type value_s;
memset(&value_s, 0, sizeof(value_s));
memcpy(&value_s, &value, sizeof(value_type));
platform_fence_before_store(order);
platform_store128(value_s, &v_);
platform_fence_after_store(order);
}
value_type
load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT
{
storage_type value_s = platform_load128(&v_);
platform_fence_after_load(order);
value_type value;
memcpy(&value, &value_s, sizeof(value_type));
return value;
}
value_type
exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
return original;
}
bool
compare_exchange_weak(
value_type & expected,
value_type const& desired,
memory_order success_order,
memory_order failure_order) volatile BOOST_NOEXCEPT
{
return compare_exchange_strong(expected, desired, success_order, failure_order);
}
bool
compare_exchange_strong(
value_type & expected,
value_type const& desired,
memory_order success_order,
memory_order failure_order) volatile BOOST_NOEXCEPT
{
storage_type expected_s, desired_s;
memset(&expected_s, 0, sizeof(expected_s));
memset(&desired_s, 0, sizeof(desired_s));
memcpy(&expected_s, &expected, sizeof(value_type));
memcpy(&desired_s, &desired, sizeof(value_type));
platform_fence_before(success_order);
bool success = platform_cmpxchg128_strong(expected_s, desired_s, &v_);
if (success) {
platform_fence_after(success_order);
} else {
platform_fence_after(failure_order);
memcpy(&expected, &expected_s, sizeof(value_type));
}
return success;
}
bool
is_lock_free(void) const volatile BOOST_NOEXCEPT
{
return true;
}
BOOST_ATOMIC_DECLARE_BASE_OPERATORS
BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&))
BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&))
private:
storage_type v_;
};
}
}
}
#endif

View File

@@ -1,939 +0,0 @@
#ifndef BOOST_ATOMIC_DETAIL_CAS32STRONG_HPP
#define BOOST_ATOMIC_DETAIL_CAS32STRONG_HPP
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// Copyright (c) 2011 Helge Bahmann
// Copyright (c) 2013 Tim Blechmann
// Build 8-, 16- and 32-bit atomic operations from
// a platform_cmpxchg32_strong primitive.
#include <string.h>
#include <cstddef>
#include <boost/cstdint.hpp>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/base.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
/* integral types */
template<typename T, bool Sign>
class base_atomic<T, int, 1, Sign>
{
private:
typedef base_atomic this_type;
typedef T value_type;
typedef T difference_type;
typedef uint32_t storage_type;
protected:
typedef value_type value_arg_type;
public:
BOOST_DEFAULTED_FUNCTION(base_atomic(void), {})
BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {}
void
store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
platform_fence_before_store(order);
const_cast<volatile storage_type &>(v_) = v;
platform_fence_after_store(order);
}
value_type
load(memory_order order = memory_order_seq_cst) const volatile
{
value_type v = const_cast<const volatile storage_type &>(v_);
platform_fence_after_load(order);
return v;
}
value_type
exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
return original;
}
bool
compare_exchange_weak(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile BOOST_NOEXCEPT
{
return compare_exchange_strong(expected, desired, success_order, failure_order);
}
bool
compare_exchange_strong(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile BOOST_NOEXCEPT
{
platform_fence_before(success_order);
storage_type expected_s = (storage_type) expected;
storage_type desired_s = (storage_type) desired;
bool success = platform_cmpxchg32_strong(expected_s, desired_s, &v_);
if (success) {
platform_fence_after(success_order);
} else {
platform_fence_after(failure_order);
expected = (value_type) expected_s;
}
return success;
}
value_type
fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original + v, order, memory_order_relaxed));
return original;
}
value_type
fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original - v, order, memory_order_relaxed));
return original;
}
value_type
fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original & v, order, memory_order_relaxed));
return original;
}
value_type
fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original | v, order, memory_order_relaxed));
return original;
}
value_type
fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original ^ v, order, memory_order_relaxed));
return original;
}
bool
is_lock_free(void) const volatile
{
return true;
}
BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS
BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&))
BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&))
private:
storage_type v_;
};
template<typename T, bool Sign>
class base_atomic<T, int, 2, Sign>
{
private:
typedef base_atomic this_type;
typedef T value_type;
typedef T difference_type;
typedef uint32_t storage_type;
protected:
typedef value_type value_arg_type;
public:
BOOST_DEFAULTED_FUNCTION(base_atomic(void), {})
BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {}
void
store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
platform_fence_before_store(order);
const_cast<volatile storage_type &>(v_) = v;
platform_fence_after_store(order);
}
value_type
load(memory_order order = memory_order_seq_cst) const volatile
{
value_type v = const_cast<const volatile storage_type &>(v_);
platform_fence_after_load(order);
return v;
}
value_type
exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
return original;
}
bool
compare_exchange_weak(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile BOOST_NOEXCEPT
{
return compare_exchange_strong(expected, desired, success_order, failure_order);
}
bool
compare_exchange_strong(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile BOOST_NOEXCEPT
{
platform_fence_before(success_order);
storage_type expected_s = (storage_type) expected;
storage_type desired_s = (storage_type) desired;
bool success = platform_cmpxchg32_strong(expected_s, desired_s, &v_);
if (success) {
platform_fence_after(success_order);
} else {
platform_fence_after(failure_order);
expected = (value_type) expected_s;
}
return success;
}
value_type
fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original + v, order, memory_order_relaxed));
return original;
}
value_type
fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original - v, order, memory_order_relaxed));
return original;
}
value_type
fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original & v, order, memory_order_relaxed));
return original;
}
value_type
fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original | v, order, memory_order_relaxed));
return original;
}
value_type
fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original ^ v, order, memory_order_relaxed));
return original;
}
bool
is_lock_free(void) const volatile
{
return true;
}
BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS
BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&))
BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&))
private:
storage_type v_;
};
template<typename T, bool Sign>
class base_atomic<T, int, 4, Sign>
{
private:
typedef base_atomic this_type;
typedef T value_type;
typedef T difference_type;
protected:
typedef value_type value_arg_type;
public:
BOOST_DEFAULTED_FUNCTION(base_atomic(void), {})
BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {}
void
store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
platform_fence_before_store(order);
const_cast<volatile value_type &>(v_) = v;
platform_fence_after_store(order);
}
value_type
load(memory_order order = memory_order_seq_cst) const volatile
{
value_type v = const_cast<const volatile value_type &>(v_);
platform_fence_after_load(order);
return v;
}
value_type
exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
return original;
}
bool
compare_exchange_weak(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile BOOST_NOEXCEPT
{
return compare_exchange_strong(expected, desired, success_order, failure_order);
}
bool
compare_exchange_strong(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile BOOST_NOEXCEPT
{
platform_fence_before(success_order);
bool success = platform_cmpxchg32_strong(expected, desired, &v_);
if (success) {
platform_fence_after(success_order);
} else {
platform_fence_after(failure_order);
}
return success;
}
value_type
fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original + v, order, memory_order_relaxed));
return original;
}
value_type
fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original - v, order, memory_order_relaxed));
return original;
}
value_type
fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original & v, order, memory_order_relaxed));
return original;
}
value_type
fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original | v, order, memory_order_relaxed));
return original;
}
value_type
fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original ^ v, order, memory_order_relaxed));
return original;
}
bool
is_lock_free(void) const volatile
{
return true;
}
BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS
BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&))
BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&))
private:
value_type v_;
};
/* pointer types */
template<bool Sign>
class base_atomic<void *, void *, 4, Sign>
{
private:
typedef base_atomic this_type;
typedef void * value_type;
typedef std::ptrdiff_t difference_type;
protected:
typedef value_type value_arg_type;
public:
BOOST_DEFAULTED_FUNCTION(base_atomic(void), {})
BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {}
void
store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
platform_fence_before_store(order);
const_cast<volatile value_type &>(v_) = v;
platform_fence_after_store(order);
}
value_type
load(memory_order order = memory_order_seq_cst) const volatile
{
value_type v = const_cast<const volatile value_type &>(v_);
platform_fence_after_load(order);
return v;
}
value_type
exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
return original;
}
bool
compare_exchange_weak(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile BOOST_NOEXCEPT
{
return compare_exchange_strong(expected, desired, success_order, failure_order);
}
bool
compare_exchange_strong(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile BOOST_NOEXCEPT
{
platform_fence_before(success_order);
bool success = platform_cmpxchg32_strong(expected, desired, &v_);
if (success) {
platform_fence_after(success_order);
} else {
platform_fence_after(failure_order);
}
return success;
}
value_type
fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, (char*)original + v, order, memory_order_relaxed));
return original;
}
value_type
fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, (char*)original - v, order, memory_order_relaxed));
return original;
}
bool
is_lock_free(void) const volatile
{
return true;
}
BOOST_ATOMIC_DECLARE_VOID_POINTER_OPERATORS
BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&))
BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&))
private:
value_type v_;
};
template<typename T, bool Sign>
class base_atomic<T *, void *, 4, Sign>
{
private:
typedef base_atomic this_type;
typedef T * value_type;
typedef std::ptrdiff_t difference_type;
protected:
typedef value_type value_arg_type;
public:
BOOST_DEFAULTED_FUNCTION(base_atomic(void), {})
BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {}
void
store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
platform_fence_before_store(order);
const_cast<volatile value_type &>(v_) = v;
platform_fence_after_store(order);
}
value_type
load(memory_order order = memory_order_seq_cst) const volatile
{
value_type v = const_cast<const volatile value_type &>(v_);
platform_fence_after_load(order);
return v;
}
value_type
exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
return original;
}
bool
compare_exchange_weak(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile BOOST_NOEXCEPT
{
return compare_exchange_strong(expected, desired, success_order, failure_order);
}
bool
compare_exchange_strong(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile BOOST_NOEXCEPT
{
platform_fence_before(success_order);
bool success = platform_cmpxchg32_strong(expected, desired, &v_);
if (success) {
platform_fence_after(success_order);
} else {
platform_fence_after(failure_order);
}
return success;
}
value_type
fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original + v, order, memory_order_relaxed));
return original;
}
value_type
fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original - v, order, memory_order_relaxed));
return original;
}
bool
is_lock_free(void) const volatile
{
return true;
}
BOOST_ATOMIC_DECLARE_POINTER_OPERATORS
BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&))
BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&))
private:
value_type v_;
};
/* generic types */
template<typename T, bool Sign>
class base_atomic<T, void, 1, Sign>
{
private:
typedef base_atomic this_type;
typedef T value_type;
typedef uint32_t storage_type;
protected:
typedef value_type const& value_arg_type;
public:
BOOST_DEFAULTED_FUNCTION(base_atomic(void), {})
explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : v_(0)
{
memcpy(&v_, &v, sizeof(value_type));
}
void
store(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
storage_type tmp = 0;
memcpy(&tmp, &v, sizeof(value_type));
platform_fence_before_store(order);
const_cast<volatile storage_type &>(v_) = tmp;
platform_fence_after_store(order);
}
value_type
load(memory_order order = memory_order_seq_cst) const volatile
{
storage_type tmp = const_cast<const volatile storage_type &>(v_);
platform_fence_after_load(order);
value_type v;
memcpy(&v, &tmp, sizeof(value_type));
return v;
}
value_type
exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
return original;
}
bool
compare_exchange_weak(
value_type & expected,
value_type const& desired,
memory_order success_order,
memory_order failure_order) volatile BOOST_NOEXCEPT
{
return compare_exchange_strong(expected, desired, success_order, failure_order);
}
bool
compare_exchange_strong(
value_type & expected,
value_type const& desired,
memory_order success_order,
memory_order failure_order) volatile BOOST_NOEXCEPT
{
storage_type expected_s = 0, desired_s = 0;
memcpy(&expected_s, &expected, sizeof(value_type));
memcpy(&desired_s, &desired, sizeof(value_type));
platform_fence_before(success_order);
bool success = platform_cmpxchg32_strong(expected_s, desired_s, &v_);
if (success) {
platform_fence_after(success_order);
} else {
platform_fence_after(failure_order);
memcpy(&expected, &expected_s, sizeof(value_type));
}
return success;
}
bool
is_lock_free(void) const volatile
{
return true;
}
BOOST_ATOMIC_DECLARE_BASE_OPERATORS
BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&))
BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&))
private:
storage_type v_;
};
template<typename T, bool Sign>
class base_atomic<T, void, 2, Sign>
{
private:
typedef base_atomic this_type;
typedef T value_type;
typedef uint32_t storage_type;
protected:
typedef value_type const& value_arg_type;
public:
BOOST_DEFAULTED_FUNCTION(base_atomic(void), {})
explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : v_(0)
{
memcpy(&v_, &v, sizeof(value_type));
}
void
store(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
storage_type tmp = 0;
memcpy(&tmp, &v, sizeof(value_type));
platform_fence_before_store(order);
const_cast<volatile storage_type &>(v_) = tmp;
platform_fence_after_store(order);
}
value_type
load(memory_order order = memory_order_seq_cst) const volatile
{
storage_type tmp = const_cast<const volatile storage_type &>(v_);
platform_fence_after_load(order);
value_type v;
memcpy(&v, &tmp, sizeof(value_type));
return v;
}
value_type
exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
return original;
}
bool
compare_exchange_weak(
value_type & expected,
value_type const& desired,
memory_order success_order,
memory_order failure_order) volatile BOOST_NOEXCEPT
{
return compare_exchange_strong(expected, desired, success_order, failure_order);
}
bool
compare_exchange_strong(
value_type & expected,
value_type const& desired,
memory_order success_order,
memory_order failure_order) volatile BOOST_NOEXCEPT
{
storage_type expected_s = 0, desired_s = 0;
memcpy(&expected_s, &expected, sizeof(value_type));
memcpy(&desired_s, &desired, sizeof(value_type));
platform_fence_before(success_order);
bool success = platform_cmpxchg32_strong(expected_s, desired_s, &v_);
if (success) {
platform_fence_after(success_order);
} else {
platform_fence_after(failure_order);
memcpy(&expected, &expected_s, sizeof(value_type));
}
return success;
}
bool
is_lock_free(void) const volatile
{
return true;
}
BOOST_ATOMIC_DECLARE_BASE_OPERATORS
BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&))
BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&))
private:
storage_type v_;
};
template<typename T, bool Sign>
class base_atomic<T, void, 4, Sign>
{
private:
typedef base_atomic this_type;
typedef T value_type;
typedef uint32_t storage_type;
protected:
typedef value_type const& value_arg_type;
public:
BOOST_DEFAULTED_FUNCTION(base_atomic(void), {})
explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : v_(0)
{
memcpy(&v_, &v, sizeof(value_type));
}
void
store(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
storage_type tmp = 0;
memcpy(&tmp, &v, sizeof(value_type));
platform_fence_before_store(order);
const_cast<volatile storage_type &>(v_) = tmp;
platform_fence_after_store(order);
}
value_type
load(memory_order order = memory_order_seq_cst) const volatile
{
storage_type tmp = const_cast<const volatile storage_type &>(v_);
platform_fence_after_load(order);
value_type v;
memcpy(&v, &tmp, sizeof(value_type));
return v;
}
value_type
exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
return original;
}
bool
compare_exchange_weak(
value_type & expected,
value_type const& desired,
memory_order success_order,
memory_order failure_order) volatile BOOST_NOEXCEPT
{
return compare_exchange_strong(expected, desired, success_order, failure_order);
}
bool
compare_exchange_strong(
value_type & expected,
value_type const& desired,
memory_order success_order,
memory_order failure_order) volatile BOOST_NOEXCEPT
{
storage_type expected_s = 0, desired_s = 0;
memcpy(&expected_s, &expected, sizeof(value_type));
memcpy(&desired_s, &desired, sizeof(value_type));
platform_fence_before(success_order);
bool success = platform_cmpxchg32_strong(expected_s, desired_s, &v_);
if (success) {
platform_fence_after(success_order);
} else {
platform_fence_after(failure_order);
memcpy(&expected, &expected_s, sizeof(value_type));
}
return success;
}
bool
is_lock_free(void) const volatile
{
return true;
}
BOOST_ATOMIC_DECLARE_BASE_OPERATORS
BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&))
BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&))
private:
storage_type v_;
};
}
}
}
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -1,247 +0,0 @@
#ifndef BOOST_ATOMIC_DETAIL_CAS64STRONG_PTR_HPP
#define BOOST_ATOMIC_DETAIL_CAS64STRONG_PTR_HPP
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// Copyright (c) 2011 Helge Bahmann
// Copyright (c) 2013 Tim Blechmann
// Build 64-bit atomic operation on pointers from platform_cmpxchg64_strong
// primitive. It is assumed that 64-bit loads/stores are not
// atomic, so they are implemented through platform_load64/platform_store64.
//
// The reason for extracting pointer specializations to a separate header is
// that 64-bit CAS is available on some 32-bit platforms (notably, x86).
// On these platforms there is no need for 64-bit pointer specializations,
// since they will never be used.
#include <string.h>
#include <cstddef>
#include <boost/cstdint.hpp>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/base.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
/* pointer types */
template<bool Sign>
class base_atomic<void *, void *, 8, Sign>
{
private:
typedef base_atomic this_type;
typedef void * value_type;
typedef std::ptrdiff_t difference_type;
protected:
typedef value_type value_arg_type;
public:
BOOST_DEFAULTED_FUNCTION(base_atomic(void), {})
BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {}
void
store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
platform_fence_before_store(order);
platform_store64(v, &v_);
platform_fence_after_store(order);
}
value_type
load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT
{
value_type v = platform_load64(&v_);
platform_fence_after_load(order);
return v;
}
value_type
exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
return original;
}
bool
compare_exchange_weak(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile BOOST_NOEXCEPT
{
return compare_exchange_strong(expected, desired, success_order, failure_order);
}
bool
compare_exchange_strong(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile BOOST_NOEXCEPT
{
platform_fence_before(success_order);
bool success = platform_cmpxchg64_strong(expected, desired, &v_);
if (success) {
platform_fence_after(success_order);
} else {
platform_fence_after(failure_order);
}
return success;
}
value_type
fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, (char*)original + v, order, memory_order_relaxed));
return original;
}
value_type
fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, (char*)original - v, order, memory_order_relaxed));
return original;
}
bool
is_lock_free(void) const volatile BOOST_NOEXCEPT
{
return true;
}
BOOST_ATOMIC_DECLARE_VOID_POINTER_OPERATORS
BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&))
BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&))
private:
value_type v_;
};
template<typename T, bool Sign>
class base_atomic<T *, void *, 8, Sign>
{
private:
typedef base_atomic this_type;
typedef T * value_type;
typedef std::ptrdiff_t difference_type;
protected:
typedef value_type value_arg_type;
public:
BOOST_DEFAULTED_FUNCTION(base_atomic(void), {})
BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {}
void
store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
platform_fence_before_store(order);
platform_store64(v, &v_);
platform_fence_after_store(order);
}
value_type
load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT
{
value_type v = platform_load64(&v_);
platform_fence_after_load(order);
return v;
}
value_type
exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
return original;
}
bool
compare_exchange_weak(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile BOOST_NOEXCEPT
{
return compare_exchange_strong(expected, desired, success_order, failure_order);
}
bool
compare_exchange_strong(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile BOOST_NOEXCEPT
{
platform_fence_before(success_order);
bool success = platform_cmpxchg64_strong(expected, desired, &v_);
if (success) {
platform_fence_after(success_order);
} else {
platform_fence_after(failure_order);
}
return success;
}
value_type
fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original + v, order, memory_order_relaxed));
return original;
}
value_type
fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original - v, order, memory_order_relaxed));
return original;
}
bool
is_lock_free(void) const volatile BOOST_NOEXCEPT
{
return true;
}
BOOST_ATOMIC_DECLARE_POINTER_OPERATORS
BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&))
BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&))
private:
value_type v_;
};
}
}
}
#endif

View File

@@ -1,264 +0,0 @@
#ifndef BOOST_ATOMIC_DETAIL_CAS64STRONG_HPP
#define BOOST_ATOMIC_DETAIL_CAS64STRONG_HPP
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// Copyright (c) 2011 Helge Bahmann
// Copyright (c) 2013 Tim Blechmann
// Build 64-bit atomic operation on integers/UDTs from platform_cmpxchg64_strong
// primitive. It is assumed that 64-bit loads/stores are not
// atomic, so they are implemented through platform_load64/platform_store64.
#include <string.h>
#include <cstddef>
#include <boost/cstdint.hpp>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/base.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
/* integral types */
template<typename T, bool Sign>
class base_atomic<T, int, 8, Sign>
{
private:
typedef base_atomic this_type;
typedef T value_type;
typedef T difference_type;
protected:
typedef value_type value_arg_type;
public:
BOOST_DEFAULTED_FUNCTION(base_atomic(void), {})
BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {}
void
store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
platform_fence_before_store(order);
platform_store64(v, &v_);
platform_fence_after_store(order);
}
value_type
load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT
{
value_type v = platform_load64(&v_);
platform_fence_after_load(order);
return v;
}
value_type
exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
return original;
}
bool
compare_exchange_weak(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile BOOST_NOEXCEPT
{
return compare_exchange_strong(expected, desired, success_order, failure_order);
}
bool
compare_exchange_strong(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile BOOST_NOEXCEPT
{
platform_fence_before(success_order);
bool success = platform_cmpxchg64_strong(expected, desired, &v_);
if (success) {
platform_fence_after(success_order);
} else {
platform_fence_after(failure_order);
}
return success;
}
value_type
fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original + v, order, memory_order_relaxed));
return original;
}
value_type
fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original - v, order, memory_order_relaxed));
return original;
}
value_type
fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original & v, order, memory_order_relaxed));
return original;
}
value_type
fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original | v, order, memory_order_relaxed));
return original;
}
value_type
fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original ^ v, order, memory_order_relaxed));
return original;
}
bool
is_lock_free(void) const volatile BOOST_NOEXCEPT
{
return true;
}
BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS
BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&))
BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&))
private:
value_type v_;
};
/* generic types */
template<typename T, bool Sign>
class base_atomic<T, void, 8, Sign>
{
private:
typedef base_atomic this_type;
typedef T value_type;
typedef uint64_t storage_type;
protected:
typedef value_type const& value_arg_type;
public:
BOOST_DEFAULTED_FUNCTION(base_atomic(void), {})
explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : v_(0)
{
memcpy(&v_, &v, sizeof(value_type));
}
void
store(value_type const& value, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
storage_type value_s = 0;
memcpy(&value_s, &value, sizeof(value_type));
platform_fence_before_store(order);
platform_store64(value_s, &v_);
platform_fence_after_store(order);
}
value_type
load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT
{
storage_type value_s = platform_load64(&v_);
platform_fence_after_load(order);
value_type value;
memcpy(&value, &value_s, sizeof(value_type));
return value;
}
value_type
exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
return original;
}
bool
compare_exchange_weak(
value_type & expected,
value_type const& desired,
memory_order success_order,
memory_order failure_order) volatile BOOST_NOEXCEPT
{
return compare_exchange_strong(expected, desired, success_order, failure_order);
}
bool
compare_exchange_strong(
value_type & expected,
value_type const& desired,
memory_order success_order,
memory_order failure_order) volatile BOOST_NOEXCEPT
{
storage_type expected_s = 0, desired_s = 0;
memcpy(&expected_s, &expected, sizeof(value_type));
memcpy(&desired_s, &desired, sizeof(value_type));
platform_fence_before(success_order);
bool success = platform_cmpxchg64_strong(expected_s, desired_s, &v_);
if (success) {
platform_fence_after(success_order);
} else {
platform_fence_after(failure_order);
memcpy(&expected, &expected_s, sizeof(value_type));
}
return success;
}
bool
is_lock_free(void) const volatile BOOST_NOEXCEPT
{
return true;
}
BOOST_ATOMIC_DECLARE_BASE_OPERATORS
BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&))
BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&))
private:
storage_type v_;
};
}
}
}
#endif

View File

@@ -1,11 +1,19 @@
#ifndef BOOST_ATOMIC_DETAIL_CONFIG_HPP
#define BOOST_ATOMIC_DETAIL_CONFIG_HPP
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2012 Hartmut Kaiser
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/config.hpp
*
* This header defines configuraion macros for Boost.Atomic
*/
// Copyright (c) 2012 Hartmut Kaiser
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_ATOMIC_DETAIL_CONFIG_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_CONFIG_HPP_INCLUDED_
#include <boost/config.hpp>
@@ -13,4 +21,4 @@
#pragma once
#endif
#endif
#endif // BOOST_ATOMIC_DETAIL_CONFIG_HPP_INCLUDED_

View File

@@ -1,368 +0,0 @@
#ifndef BOOST_ATOMIC_DETAIL_GCC_ALPHA_HPP
#define BOOST_ATOMIC_DETAIL_GCC_ALPHA_HPP
// Copyright (c) 2009 Helge Bahmann
// Copyright (c) 2013 Tim Blechmann
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/base.hpp>
#include <boost/atomic/detail/builder.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
/*
Refer to http://h71000.www7.hp.com/doc/82final/5601/5601pro_004.html
(HP OpenVMS systems documentation) and the alpha reference manual.
*/
/*
NB: The most natural thing would be to write the increment/decrement
operators along the following lines:
__asm__ __volatile__(
"1: ldl_l %0,%1 \n"
"addl %0,1,%0 \n"
"stl_c %0,%1 \n"
"beq %0,1b\n"
: "=&b" (tmp)
: "m" (value)
: "cc"
);
However according to the comments on the HP website and matching
comments in the Linux kernel sources this defies branch prediction,
as the cpu assumes that backward branches are always taken; so
instead copy the trick from the Linux kernel, introduce a forward
branch and back again.
I have, however, had a hard time measuring the difference between
the two versions in microbenchmarks -- I am leaving it in nevertheless
as it apparently does not hurt either.
*/
namespace boost {
namespace atomics {
namespace detail {
inline void fence_before(memory_order order)
{
switch(order) {
case memory_order_consume:
case memory_order_release:
case memory_order_acq_rel:
case memory_order_seq_cst:
__asm__ __volatile__ ("mb" ::: "memory");
default:;
}
}
inline void fence_after(memory_order order)
{
switch(order) {
case memory_order_acquire:
case memory_order_acq_rel:
case memory_order_seq_cst:
__asm__ __volatile__ ("mb" ::: "memory");
default:;
}
}
template<>
inline void platform_atomic_thread_fence(memory_order order)
{
switch(order) {
case memory_order_acquire:
case memory_order_consume:
case memory_order_release:
case memory_order_acq_rel:
case memory_order_seq_cst:
__asm__ __volatile__ ("mb" ::: "memory");
default:;
}
}
template<typename T>
class atomic_alpha_32
{
public:
typedef T integral_type;
BOOST_CONSTEXPR atomic_alpha_32(T v) BOOST_NOEXCEPT: i(v) {}
atomic_alpha_32() {}
T load(memory_order order=memory_order_seq_cst) const volatile BOOST_NOEXCEPT
{
T v=*reinterpret_cast<volatile const int *>(&i);
fence_after(order);
return v;
}
void store(T v, memory_order order=memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
fence_before(order);
*reinterpret_cast<volatile int *>(&i)=(int)v;
}
bool compare_exchange_weak(
T &expected,
T desired,
memory_order success_order,
memory_order failure_order) volatile BOOST_NOEXCEPT
{
fence_before(success_order);
int current, success;
__asm__ __volatile__(
"1: ldl_l %2, %4\n"
"cmpeq %2, %0, %3\n"
"mov %2, %0\n"
"beq %3, 3f\n"
"stl_c %1, %4\n"
"2:\n"
".subsection 2\n"
"3: mov %3, %1\n"
"br 2b\n"
".previous\n"
: "+&r" (expected), "+&r" (desired), "=&r"(current), "=&r"(success)
: "m" (i)
:
);
if (desired) fence_after(success_order);
else fence_after(failure_order);
return desired;
}
bool is_lock_free(void) const volatile BOOST_NOEXCEPT {return true;}
protected:
inline T fetch_add_var(T c, memory_order order) volatile BOOST_NOEXCEPT
{
fence_before(order);
T original, modified;
__asm__ __volatile__(
"1: ldl_l %0, %2\n"
"addl %0, %3, %1\n"
"stl_c %1, %2\n"
"beq %1, 2f\n"
".subsection 2\n"
"2: br 1b\n"
".previous\n"
: "=&r" (original), "=&r" (modified)
: "m" (i), "r" (c)
:
);
fence_after(order);
return original;
}
inline T fetch_inc(memory_order order) volatile BOOST_NOEXCEPT
{
fence_before(order);
int original, modified;
__asm__ __volatile__(
"1: ldl_l %0, %2\n"
"addl %0, 1, %1\n"
"stl_c %1, %2\n"
"beq %1, 2f\n"
".subsection 2\n"
"2: br 1b\n"
".previous\n"
: "=&r" (original), "=&r" (modified)
: "m" (i)
:
);
fence_after(order);
return original;
}
inline T fetch_dec(memory_order order) volatile BOOST_NOEXCEPT
{
fence_before(order);
int original, modified;
__asm__ __volatile__(
"1: ldl_l %0, %2\n"
"subl %0, 1, %1\n"
"stl_c %1, %2\n"
"beq %1, 2f\n"
".subsection 2\n"
"2: br 1b\n"
".previous\n"
: "=&r" (original), "=&r" (modified)
: "m" (i)
:
);
fence_after(order);
return original;
}
private:
T i;
};
template<typename T>
class atomic_alpha_64
{
public:
typedef T integral_type;
BOOST_CONSTEXPR atomic_alpha_64(T v) BOOST_NOEXCEPT: i(v) {}
atomic_alpha_64() {}
T load(memory_order order=memory_order_seq_cst) const volatile
{
T v=*reinterpret_cast<volatile const T *>(&i);
fence_after(order);
return v;
}
void store(T v, memory_order order=memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
fence_before(order);
*reinterpret_cast<volatile T *>(&i)=v;
}
bool compare_exchange_weak(
T &expected,
T desired,
memory_order success_order,
memory_order failure_order) volatile BOOST_NOEXCEPT
{
fence_before(success_order);
int current, success;
__asm__ __volatile__(
"1: ldq_l %2, %4\n"
"cmpeq %2, %0, %3\n"
"mov %2, %0\n"
"beq %3, 3f\n"
"stq_c %1, %4\n"
"2:\n"
".subsection 2\n"
"3: mov %3, %1\n"
"br 2b\n"
".previous\n"
: "+&r" (expected), "+&r" (desired), "=&r"(current), "=&r"(success)
: "m" (i)
:
);
if (desired) fence_after(success_order);
else fence_after(failure_order);
return desired;
}
bool is_lock_free(void) const volatile BOOST_NOEXCEPT {return true;}
protected:
inline T fetch_add_var(T c, memory_order order) volatile BOOST_NOEXCEPT
{
fence_before(order);
T original, modified;
__asm__ __volatile__(
"1: ldq_l %0, %2\n"
"addq %0, %3, %1\n"
"stq_c %1, %2\n"
"beq %1, 2f\n"
".subsection 2\n"
"2: br 1b\n"
".previous\n"
: "=&r" (original), "=&r" (modified)
: "m" (i), "r" (c)
:
);
fence_after(order);
return original;
}
inline T fetch_inc(memory_order order) volatile BOOST_NOEXCEPT
{
fence_before(order);
T original, modified;
__asm__ __volatile__(
"1: ldq_l %0, %2\n"
"addq %0, 1, %1\n"
"stq_c %1, %2\n"
"beq %1, 2f\n"
".subsection 2\n"
"2: br 1b\n"
".previous\n"
: "=&r" (original), "=&r" (modified)
: "m" (i)
:
);
fence_after(order);
return original;
}
inline T fetch_dec(memory_order order) volatile BOOST_NOEXCEPT
{
fence_before(order);
T original, modified;
__asm__ __volatile__(
"1: ldq_l %0, %2\n"
"subq %0, 1, %1\n"
"stq_c %1, %2\n"
"beq %1, 2f\n"
".subsection 2\n"
"2: br 1b\n"
".previous\n"
: "=&r" (original), "=&r" (modified)
: "m" (i)
:
);
fence_after(order);
return original;
}
private:
T i;
};
template<typename T>
class platform_atomic_integral<T, 4> :
public build_atomic_from_typical<build_exchange<atomic_alpha_32<T> > >
{
public:
typedef build_atomic_from_typical<build_exchange<atomic_alpha_32<T> > > super;
BOOST_CONSTEXPR platform_atomic_integral(T v) BOOST_NOEXCEPT: super(v) {}
platform_atomic_integral(void) {}
};
template<typename T>
class platform_atomic_integral<T, 8> :
public build_atomic_from_typical<build_exchange<atomic_alpha_64<T> > >
{
public:
typedef build_atomic_from_typical<build_exchange<atomic_alpha_64<T> > > super;
BOOST_CONSTEXPR platform_atomic_integral(T v) BOOST_NOEXCEPT: super(v) {}
platform_atomic_integral(void) {}
};
template<typename T>
class platform_atomic_integral<T, 1> :
public build_atomic_from_larger_type<atomic_alpha_32<uint32_t>, T>
{
public:
typedef build_atomic_from_larger_type<atomic_alpha_32<uint32_t>, T> super;
BOOST_CONSTEXPR platform_atomic_integral(T v) BOOST_NOEXCEPT: super(v) {}
platform_atomic_integral(void) {}
};
template<typename T>
class platform_atomic_integral<T, 2> :
public build_atomic_from_larger_type<atomic_alpha_32<uint32_t>, T>
{
public:
typedef build_atomic_from_larger_type<atomic_alpha_32<uint32_t>, T> super;
BOOST_CONSTEXPR platform_atomic_integral(T v) BOOST_NOEXCEPT: super(v) {}
platform_atomic_integral(void) {}
};
}
}
}
#endif

View File

@@ -1,255 +0,0 @@
#ifndef BOOST_ATOMIC_DETAIL_GCC_ARMV6PLUS_HPP
#define BOOST_ATOMIC_DETAIL_GCC_ARMV6PLUS_HPP
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// Copyright (c) 2009 Helge Bahmann
// Copyright (c) 2009 Phil Endecott
// Copyright (c) 2013 Tim Blechmann
// ARM Code by Phil Endecott, based on other architectures.
#include <boost/cstdint.hpp>
#include <boost/atomic/detail/config.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
// From the ARM Architecture Reference Manual for architecture v6:
//
// LDREX{<cond>} <Rd>, [<Rn>]
// <Rd> Specifies the destination register for the memory word addressed by <Rd>
// <Rn> Specifies the register containing the address.
//
// STREX{<cond>} <Rd>, <Rm>, [<Rn>]
// <Rd> Specifies the destination register for the returned status value.
// 0 if the operation updates memory
// 1 if the operation fails to update memory
// <Rm> Specifies the register containing the word to be stored to memory.
// <Rn> Specifies the register containing the address.
// Rd must not be the same register as Rm or Rn.
//
// ARM v7 is like ARM v6 plus:
// There are half-word and byte versions of the LDREX and STREX instructions,
// LDREXH, LDREXB, STREXH and STREXB.
// There are also double-word versions, LDREXD and STREXD.
// (Actually it looks like these are available from version 6k onwards.)
// FIXME these are not yet used; should be mostly a matter of copy-and-paste.
// I think you can supply an immediate offset to the address.
//
// A memory barrier is effected using a "co-processor 15" instruction,
// though a separate assembler mnemonic is available for it in v7.
namespace boost {
namespace atomics {
namespace detail {
// "Thumb 1" is a subset of the ARM instruction set that uses a 16-bit encoding. It
// doesn't include all instructions and in particular it doesn't include the co-processor
// instruction used for the memory barrier or the load-locked/store-conditional
// instructions. So, if we're compiling in "Thumb 1" mode, we need to wrap all of our
// asm blocks with code to temporarily change to ARM mode.
//
// You can only change between ARM and Thumb modes when branching using the bx instruction.
// bx takes an address specified in a register. The least significant bit of the address
// indicates the mode, so 1 is added to indicate that the destination code is Thumb.
// A temporary register is needed for the address and is passed as an argument to these
// macros. It must be one of the "low" registers accessible to Thumb code, specified
// using the "l" attribute in the asm statement.
//
// Architecture v7 introduces "Thumb 2", which does include (almost?) all of the ARM
// instruction set. So in v7 we don't need to change to ARM mode; we can write "universal
// assembler" which will assemble to Thumb 2 or ARM code as appropriate. The only thing
// we need to do to make this "universal" assembler mode work is to insert "IT" instructions
// to annotate the conditional instructions. These are ignored in other modes (e.g. v6),
// so they can always be present.
#if defined(__thumb__) && !defined(__thumb2__)
#define BOOST_ATOMIC_ARM_ASM_START(TMPREG) "adr " #TMPREG ", 1f\n" "bx " #TMPREG "\n" ".arm\n" ".align 4\n" "1: "
#define BOOST_ATOMIC_ARM_ASM_END(TMPREG) "adr " #TMPREG ", 1f + 1\n" "bx " #TMPREG "\n" ".thumb\n" ".align 2\n" "1: "
#else
// The tmpreg is wasted in this case, which is non-optimal.
#define BOOST_ATOMIC_ARM_ASM_START(TMPREG)
#define BOOST_ATOMIC_ARM_ASM_END(TMPREG)
#endif
#if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7EM__) || defined(__ARM_ARCH_7S__)
#define BOOST_ATOMIC_ARM_DMB "dmb\n"
#else
#define BOOST_ATOMIC_ARM_DMB "mcr\tp15, 0, r0, c7, c10, 5\n"
#endif
inline void
arm_barrier(void) BOOST_NOEXCEPT
{
int brtmp;
__asm__ __volatile__
(
BOOST_ATOMIC_ARM_ASM_START(%0)
BOOST_ATOMIC_ARM_DMB
BOOST_ATOMIC_ARM_ASM_END(%0)
: "=&l" (brtmp) :: "memory"
);
}
inline void
platform_fence_before(memory_order order) BOOST_NOEXCEPT
{
switch(order)
{
case memory_order_release:
case memory_order_acq_rel:
case memory_order_seq_cst:
arm_barrier();
case memory_order_consume:
default:;
}
}
inline void
platform_fence_after(memory_order order) BOOST_NOEXCEPT
{
switch(order)
{
case memory_order_acquire:
case memory_order_acq_rel:
case memory_order_seq_cst:
arm_barrier();
default:;
}
}
inline void
platform_fence_before_store(memory_order order) BOOST_NOEXCEPT
{
platform_fence_before(order);
}
inline void
platform_fence_after_store(memory_order order) BOOST_NOEXCEPT
{
if (order == memory_order_seq_cst)
arm_barrier();
}
inline void
platform_fence_after_load(memory_order order) BOOST_NOEXCEPT
{
platform_fence_after(order);
}
template<typename T>
inline bool
platform_cmpxchg32(T & expected, T desired, volatile T * ptr) BOOST_NOEXCEPT
{
int success;
int tmp;
__asm__ __volatile__
(
BOOST_ATOMIC_ARM_ASM_START(%2)
"mov %1, #0\n" // success = 0
"ldrex %0, %3\n" // expected' = *(&i)
"teq %0, %4\n" // flags = expected'==expected
"ittt eq\n"
"strexeq %2, %5, %3\n" // if (flags.equal) *(&i) = desired, tmp = !OK
"teqeq %2, #0\n" // if (flags.equal) flags = tmp==0
"moveq %1, #1\n" // if (flags.equal) success = 1
BOOST_ATOMIC_ARM_ASM_END(%2)
: "=&r" (expected), // %0
"=&r" (success), // %1
"=&l" (tmp), // %2
"+Q" (*ptr) // %3
: "r" (expected), // %4
"r" (desired) // %5
: "cc"
);
return success;
}
}
}
#define BOOST_ATOMIC_THREAD_FENCE 2
inline void
atomic_thread_fence(memory_order order)
{
switch(order)
{
case memory_order_acquire:
case memory_order_release:
case memory_order_acq_rel:
case memory_order_seq_cst:
atomics::detail::arm_barrier();
default:;
}
}
#define BOOST_ATOMIC_SIGNAL_FENCE 2
inline void
atomic_signal_fence(memory_order)
{
__asm__ __volatile__ ("" ::: "memory");
}
class atomic_flag
{
private:
uint32_t v_;
public:
BOOST_CONSTEXPR atomic_flag(void) BOOST_NOEXCEPT : v_(0) {}
void
clear(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
atomics::detail::platform_fence_before_store(order);
const_cast<volatile uint32_t &>(v_) = 0;
atomics::detail::platform_fence_after_store(order);
}
bool
test_and_set(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
atomics::detail::platform_fence_before(order);
uint32_t expected = v_;
do {
if (expected == 1)
break;
} while (!atomics::detail::platform_cmpxchg32(expected, (uint32_t)1, &v_));
atomics::detail::platform_fence_after(order);
return expected;
}
BOOST_DELETED_FUNCTION(atomic_flag(const atomic_flag &))
BOOST_DELETED_FUNCTION(atomic_flag& operator=(const atomic_flag &))
};
#define BOOST_ATOMIC_FLAG_LOCK_FREE 2
}
#undef BOOST_ATOMIC_ARM_ASM_START
#undef BOOST_ATOMIC_ARM_ASM_END
#include <boost/atomic/detail/base.hpp>
#if !defined(BOOST_ATOMIC_FORCE_FALLBACK)
#define BOOST_ATOMIC_CHAR_LOCK_FREE 2
#define BOOST_ATOMIC_CHAR16_T_LOCK_FREE 2
#define BOOST_ATOMIC_CHAR32_T_LOCK_FREE 2
#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 2
#define BOOST_ATOMIC_SHORT_LOCK_FREE 2
#define BOOST_ATOMIC_INT_LOCK_FREE 2
#define BOOST_ATOMIC_LONG_LOCK_FREE 2
#define BOOST_ATOMIC_LLONG_LOCK_FREE 0
#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
#define BOOST_ATOMIC_BOOL_LOCK_FREE 2
#include <boost/atomic/detail/cas32weak.hpp>
#endif /* !defined(BOOST_ATOMIC_FORCE_FALLBACK) */
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -1,163 +0,0 @@
// Copyright (c) 2011 Helge Bahmann
// Copyright (c) 2013 Tim Blechmann
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Use the gnu builtin __sync_val_compare_and_swap to build
// atomic operations for 32 bit and smaller.
#ifndef BOOST_ATOMIC_DETAIL_GENERIC_CAS_HPP
#define BOOST_ATOMIC_DETAIL_GENERIC_CAS_HPP
#include <boost/cstdint.hpp>
#include <boost/atomic/detail/config.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
#define BOOST_ATOMIC_THREAD_FENCE 2
inline void
atomic_thread_fence(memory_order order)
{
switch(order)
{
case memory_order_relaxed:
break;
case memory_order_release:
case memory_order_consume:
case memory_order_acquire:
case memory_order_acq_rel:
case memory_order_seq_cst:
__sync_synchronize();
break;
}
}
namespace atomics {
namespace detail {
inline void
platform_fence_before(memory_order)
{
/* empty, as compare_and_swap is synchronizing already */
}
inline void
platform_fence_after(memory_order)
{
/* empty, as compare_and_swap is synchronizing already */
}
inline void
platform_fence_before_store(memory_order order)
{
switch(order)
{
case memory_order_relaxed:
case memory_order_acquire:
case memory_order_consume:
break;
case memory_order_release:
case memory_order_acq_rel:
case memory_order_seq_cst:
__sync_synchronize();
break;
}
}
inline void
platform_fence_after_store(memory_order order)
{
if (order == memory_order_seq_cst)
__sync_synchronize();
}
inline void
platform_fence_after_load(memory_order order)
{
switch(order)
{
case memory_order_relaxed:
case memory_order_release:
break;
case memory_order_consume:
case memory_order_acquire:
case memory_order_acq_rel:
case memory_order_seq_cst:
__sync_synchronize();
break;
}
}
template<typename T>
inline bool
platform_cmpxchg32_strong(T & expected, T desired, volatile T * ptr)
{
T found = __sync_val_compare_and_swap(ptr, expected, desired);
bool success = (found == expected);
expected = found;
return success;
}
}
}
class atomic_flag
{
private:
uint32_t v_;
public:
BOOST_CONSTEXPR atomic_flag(void) BOOST_NOEXCEPT : v_(0) {}
void
clear(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
atomics::detail::platform_fence_before_store(order);
const_cast<volatile uint32_t &>(v_) = 0;
atomics::detail::platform_fence_after_store(order);
}
bool
test_and_set(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
atomics::detail::platform_fence_before(order);
uint32_t expected = v_;
do {
if (expected == 1)
break;
} while (!atomics::detail::platform_cmpxchg32_strong(expected, (uint32_t)1, &v_));
atomics::detail::platform_fence_after(order);
return expected;
}
BOOST_DELETED_FUNCTION(atomic_flag(atomic_flag const&))
BOOST_DELETED_FUNCTION(atomic_flag& operator= (atomic_flag const&))
};
#define BOOST_ATOMIC_FLAG_LOCK_FREE 2
}
#include <boost/atomic/detail/base.hpp>
#if !defined(BOOST_ATOMIC_FORCE_FALLBACK)
#define BOOST_ATOMIC_CHAR_LOCK_FREE 2
#define BOOST_ATOMIC_SHORT_LOCK_FREE 2
#define BOOST_ATOMIC_INT_LOCK_FREE 2
#define BOOST_ATOMIC_LONG_LOCK_FREE (__SIZEOF_LONG__ <= 4 ? 2 : 0)
#define BOOST_ATOMIC_LLONG_LOCK_FREE (__SIZEOF_LONG_LONG__ <= 4 ? 2 : 0)
#define BOOST_ATOMIC_POINTER_LOCK_FREE (__SIZEOF_POINTER__ <= 4 ? 2 : 0)
#define BOOST_ATOMIC_BOOL_LOCK_FREE 2
#include <boost/atomic/detail/cas32strong.hpp>
#endif /* !defined(BOOST_ATOMIC_FORCE_FALLBACK) */
#endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,206 +0,0 @@
#ifndef BOOST_ATOMIC_DETAIL_GENERIC_CAS_HPP
#define BOOST_ATOMIC_DETAIL_GENERIC_CAS_HPP
// Copyright (c) 2009 Helge Bahmann
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include <cstddef>
#include <boost/cstdint.hpp>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/base.hpp>
#include <boost/atomic/detail/builder.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
/* fallback implementation for various compilation targets;
this is *not* efficient, particularly because all operations
are fully fenced (full memory barriers before and after
each operation) */
#if defined(__GNUC__)
namespace boost { namespace atomics { namespace detail {
inline int32_t
fenced_compare_exchange_strong_32(volatile int32_t *ptr, int32_t expected, int32_t desired)
{
return __sync_val_compare_and_swap_4(ptr, expected, desired);
}
#define BOOST_ATOMIC_HAVE_CAS32 1
#if defined(__amd64__) || defined(__i686__)
inline int64_t
fenced_compare_exchange_strong_64(int64_t *ptr, int64_t expected, int64_t desired)
{
return __sync_val_compare_and_swap_8(ptr, expected, desired);
}
#define BOOST_ATOMIC_HAVE_CAS64 1
#endif
}}}
#elif defined(__ICL) || defined(_MSC_VER)
#if defined(_MSC_VER)
#include <Windows.h>
#include <intrin.h>
#endif
namespace boost { namespace atomics { namespace detail {
inline int32_t
fenced_compare_exchange_strong(int32_t *ptr, int32_t expected, int32_t desired)
{
return _InterlockedCompareExchange(reinterpret_cast<volatile long*>(ptr), desired, expected);
}
#define BOOST_ATOMIC_HAVE_CAS32 1
#if defined(_WIN64)
inline int64_t
fenced_compare_exchange_strong(int64_t *ptr, int64_t expected, int64_t desired)
{
return _InterlockedCompareExchange64(ptr, desired, expected);
}
#define BOOST_ATOMIC_HAVE_CAS64 1
#endif
}}}
#elif (defined(__ICC) || defined(__ECC))
namespace boost { namespace atomics { namespace detail {
inline int32_t
fenced_compare_exchange_strong_32(int32_t *ptr, int32_t expected, int32_t desired)
{
return _InterlockedCompareExchange((void*)ptr, desired, expected);
}
#define BOOST_ATOMIC_HAVE_CAS32 1
#if defined(__x86_64)
inline int64_t
fenced_compare_exchange_strong(int64_t *ptr, int64_t expected, int64_t desired)
{
return cas64<int>(ptr, expected, desired);
}
#define BOOST_ATOMIC_HAVE_CAS64 1
#elif defined(__ECC) //IA-64 version
inline int64_t
fenced_compare_exchange_strong(int64_t *ptr, int64_t expected, int64_t desired)
{
return _InterlockedCompareExchange64((void*)ptr, desired, expected);
}
#define BOOST_ATOMIC_HAVE_CAS64 1
#endif
}}}
#elif (defined(__SUNPRO_CC) && defined(__sparc))
#include <sys/atomic.h>
namespace boost { namespace atomics { namespace detail {
inline int32_t
fenced_compare_exchange_strong_32(int32_t *ptr, int32_t expected, int32_t desired)
{
return atomic_cas_32((volatile unsigned int*)ptr, expected, desired);
}
#define BOOST_ATOMIC_HAVE_CAS32 1
/* FIXME: check for 64 bit mode */
inline int64_t
fenced_compare_exchange_strong_64(int64_t *ptr, int64_t expected, int64_t desired)
{
return atomic_cas_64((volatile unsigned long long*)ptr, expected, desired);
}
#define BOOST_ATOMIC_HAVE_CAS64 1
}}}
#endif
namespace boost {
namespace atomics {
namespace detail {
#ifdef BOOST_ATOMIC_HAVE_CAS32
template<typename T>
class atomic_generic_cas32
{
private:
typedef atomic_generic_cas32 this_type;
public:
explicit atomic_generic_cas32(T v) : i((int32_t)v) {}
atomic_generic_cas32() {}
T load(memory_order order=memory_order_seq_cst) const volatile
{
T expected=(T)i;
do { } while(!const_cast<this_type *>(this)->compare_exchange_weak(expected, expected, order, memory_order_relaxed));
return expected;
}
void store(T v, memory_order order=memory_order_seq_cst) volatile
{
exchange(v);
}
bool compare_exchange_strong(
T &expected,
T desired,
memory_order success_order,
memory_order failure_order) volatile
{
T found;
found=(T)fenced_compare_exchange_strong_32(&i, (int32_t)expected, (int32_t)desired);
bool success=(found==expected);
expected=found;
return success;
}
bool compare_exchange_weak(
T &expected,
T desired,
memory_order success_order,
memory_order failure_order) volatile
{
return compare_exchange_strong(expected, desired, success_order, failure_order);
}
T exchange(T r, memory_order order=memory_order_seq_cst) volatile
{
T expected=(T)i;
do { } while(!compare_exchange_weak(expected, r, order, memory_order_relaxed));
return expected;
}
bool is_lock_free(void) const volatile {return true;}
typedef T integral_type;
private:
mutable int32_t i;
};
template<typename T>
class platform_atomic_integral<T, 4> :
public build_atomic_from_exchange<atomic_generic_cas32<T> >
{
public:
typedef build_atomic_from_exchange<atomic_generic_cas32<T> > super;
explicit platform_atomic_integral(T v) : super(v) {}
platform_atomic_integral(void) {}
};
template<typename T>
class platform_atomic_integral<T, 1> :
public build_atomic_from_larger_type<atomic_generic_cas32<int32_t>, T>
{
public:
typedef build_atomic_from_larger_type<atomic_generic_cas32<int32_t>, T> super;
explicit platform_atomic_integral(T v) : super(v) {}
platform_atomic_integral(void) {}
};
template<typename T>
class platform_atomic_integral<T, 2> :
public build_atomic_from_larger_type<atomic_generic_cas32<int32_t>, T>
{
public:
typedef build_atomic_from_larger_type<atomic_generic_cas32<int32_t>, T> super;
explicit platform_atomic_integral(T v) : super(v) {}
platform_atomic_integral(void) {}
};
#endif
} } }
#endif

View File

@@ -0,0 +1,137 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/int_sizes.hpp
*
* This header defines macros for testing buitin integer type sizes
*/
#ifndef BOOST_ATOMIC_DETAIL_INT_SIZES_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_INT_SIZES_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
// GCC and compatible compilers define internal macros with builtin type traits
#if defined(__SIZEOF_SHORT__)
#define BOOST_ATOMIC_DETAIL_SIZEOF_SHORT __SIZEOF_SHORT__
#endif
#if defined(__SIZEOF_INT__)
#define BOOST_ATOMIC_DETAIL_SIZEOF_INT __SIZEOF_INT__
#endif
#if defined(__SIZEOF_LONG__)
#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG __SIZEOF_LONG__
#endif
#if defined(__SIZEOF_LONG_LONG__)
#define BOOST_ATOMIC_DETAIL_SIZEOF_LLONG __SIZEOF_LONG_LONG__
#endif
#if defined(__SIZEOF_WCHAR_T__)
#define BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T __SIZEOF_WCHAR_T__
#endif
#if defined(__SIZEOF_POINTER__)
#define BOOST_ATOMIC_DETAIL_SIZEOF_POINTER __SIZEOF_POINTER__
#elif defined(_MSC_VER)
#if defined(_M_AMD64) || defined(_M_IA64)
#define BOOST_ATOMIC_DETAIL_SIZEOF_POINTER 8
#else
#define BOOST_ATOMIC_DETAIL_SIZEOF_POINTER 4
#endif
#endif
#if !defined(BOOST_ATOMIC_DETAIL_SIZEOF_SHORT) || !defined(BOOST_ATOMIC_DETAIL_SIZEOF_INT) ||\
!defined(BOOST_ATOMIC_DETAIL_SIZEOF_LONG) || !defined(BOOST_ATOMIC_DETAIL_SIZEOF_LLONG)
// Try to deduce sizes from limits
#include <limits.h>
#include <boost/cstdint.hpp>
#if (USHRT_MAX + 0) == 0xff
#define BOOST_ATOMIC_DETAIL_SIZEOF_SHORT 1
#elif (USHRT_MAX + 0) == 0xffff
#define BOOST_ATOMIC_DETAIL_SIZEOF_SHORT 2
#elif (USHRT_MAX + 0) == 0xffffffff
#define BOOST_ATOMIC_DETAIL_SIZEOF_SHORT 4
#elif (USHRT_MAX + 0) == UINT64_C(0xffffffffffffffff)
#define BOOST_ATOMIC_DETAIL_SIZEOF_SHORT 8
#endif
#if (UINT_MAX + 0) == 0xff
#define BOOST_ATOMIC_DETAIL_SIZEOF_INT 1
#elif (UINT_MAX + 0) == 0xffff
#define BOOST_ATOMIC_DETAIL_SIZEOF_INT 2
#elif (UINT_MAX + 0) == 0xffffffff
#define BOOST_ATOMIC_DETAIL_SIZEOF_INT 4
#elif (UINT_MAX + 0) == UINT64_C(0xffffffffffffffff)
#define BOOST_ATOMIC_DETAIL_SIZEOF_INT 8
#endif
#if (ULONG_MAX + 0) == 0xff
#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG 1
#elif (ULONG_MAX + 0) == 0xffff
#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG 2
#elif (ULONG_MAX + 0) == 0xffffffff
#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG 4
#elif (ULONG_MAX + 0) == UINT64_C(0xffffffffffffffff)
#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG 8
#endif
#if defined(__hpux) // HP-UX's value of ULONG_LONG_MAX is unusable in preprocessor expressions
#define BOOST_ATOMIC_DETAIL_SIZEOF_LLONG 8
#else
// The list of the non-standard macros (the ones except ULLONG_MAX) is taken from cstdint.hpp
#if defined(ULLONG_MAX)
#define BOOST_ATOMIC_DETAIL_ULLONG_MAX ULLONG_MAX
#elif defined(ULONG_LONG_MAX)
#define BOOST_ATOMIC_DETAIL_ULLONG_MAX ULONG_LONG_MAX
#elif defined(ULONGLONG_MAX)
#define BOOST_ATOMIC_DETAIL_ULLONG_MAX ULONGLONG_MAX
#elif defined(_LLONG_MAX) // strangely enough, this one seems to be holding the limit for the unsigned integer
#define BOOST_ATOMIC_DETAIL_ULLONG_MAX _LLONG_MAX
#endif
#if (BOOST_ATOMIC_DETAIL_ULLONG_MAX + 0) == 0xff
#define BOOST_ATOMIC_DETAIL_SIZEOF_LLONG 1
#elif (BOOST_ATOMIC_DETAIL_ULLONG_MAX + 0) == 0xffff
#define BOOST_ATOMIC_DETAIL_SIZEOF_LLONG 2
#elif (BOOST_ATOMIC_DETAIL_ULLONG_MAX + 0) == 0xffffffff
#define BOOST_ATOMIC_DETAIL_SIZEOF_LLONG 4
#elif (BOOST_ATOMIC_DETAIL_ULLONG_MAX + 0) == UINT64_C(0xffffffffffffffff)
#define BOOST_ATOMIC_DETAIL_SIZEOF_LLONG 8
#endif
#endif // defined(__hpux)
#endif
#if !defined(BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T)
#include <wchar.h>
#include <boost/cstdint.hpp>
#if (WCHAR_MAX + 0) == 0xff || (WCHAR_MAX + 0) == 0x7f
#define BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T 1
#elif (WCHAR_MAX + 0) == 0xffff || (WCHAR_MAX + 0) == 0x7fff
#define BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T 2
#elif (WCHAR_MAX + 0) == 0xffffffff || (WCHAR_MAX + 0) == 0x7fffffff
#define BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T 4
#elif (WCHAR_MAX + 0) == UINT64_C(0xffffffffffffffff) || (WCHAR_MAX + 0) == INT64_C(0x7fffffffffffffff)
#define BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T 8
#endif
#endif
#if !defined(BOOST_ATOMIC_DETAIL_SIZEOF_SHORT) || !defined(BOOST_ATOMIC_DETAIL_SIZEOF_INT) ||\
!defined(BOOST_ATOMIC_DETAIL_SIZEOF_LONG) || !defined(BOOST_ATOMIC_DETAIL_SIZEOF_LLONG) ||\
!defined(BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T)
#error Boost.Atomic: Failed to determine builtin integer sizes, the target platform is not supported. Please, report to the developers.
#endif
#endif // BOOST_ATOMIC_DETAIL_INT_SIZES_HPP_INCLUDED_

View File

@@ -48,6 +48,11 @@
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(dest, exchange, compare) _InterlockedCompareExchange64((__int64*)(dest), (__int64)(exchange), (__int64)(compare))
#endif
#if _MSC_VER >= 1500 && defined(_M_AMD64)
#pragma intrinsic(_InterlockedCompareExchange128)
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE128(dest, exchange, compare) _InterlockedCompareExchange128((__int64*)(dest), ((const __int64*)(&exchange))[1], ((const __int64*)(&exchange))[0], (__int64*)(compare))
#endif
#if _MSC_VER >= 1600
// MSVC 2010 and later provide intrinsics for 8 and 16 bit integers.
@@ -105,13 +110,219 @@
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) _InterlockedExchangePointer((void**)(dest), (void*)(newval))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64((long*)(dest), byte_offset))
#else // defined(_M_AMD64) || defined(_M_IA64)
#elif defined(_M_IX86)
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) ((void*)_InterlockedCompareExchange((long*)(dest), (long)(exchange), (long)(compare)))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) ((void*)_InterlockedExchange((long*)(dest), (long)(newval)))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD((long*)(dest), byte_offset))
#endif // defined(_M_AMD64) || defined(_M_IA64)
#endif
#if _MSC_VER >= 1700 && defined(_M_ARM)
#pragma intrinsic(_InterlockedExchangeAdd64)
#pragma intrinsic(_InterlockedExchange64)
#pragma intrinsic(_InterlockedAnd64)
#pragma intrinsic(_InterlockedOr64)
#pragma intrinsic(_InterlockedXor64)
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(dest, addend) _InterlockedExchangeAdd64((__int64*)(dest), (__int64)(addend))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(dest, newval) _InterlockedExchange64((__int64*)(dest), (__int64)(newval))
#define BOOST_ATOMIC_INTERLOCKED_AND64(dest, arg) _InterlockedAnd64((__int64*)(dest), (__int64)(arg))
#define BOOST_ATOMIC_INTERLOCKED_OR64(dest, arg) _InterlockedOr64((__int64*)(dest), (__int64)(arg))
#define BOOST_ATOMIC_INTERLOCKED_XOR64(dest, arg) _InterlockedXor64((__int64*)(dest), (__int64)(arg))
#pragma intrinsic(_InterlockedCompareExchange8_nf)
#pragma intrinsic(_InterlockedCompareExchange8_acq)
#pragma intrinsic(_InterlockedCompareExchange8_rel)
#pragma intrinsic(_InterlockedCompareExchange16_nf)
#pragma intrinsic(_InterlockedCompareExchange16_acq)
#pragma intrinsic(_InterlockedCompareExchange16_rel)
#pragma intrinsic(_InterlockedCompareExchange_nf)
#pragma intrinsic(_InterlockedCompareExchange_acq)
#pragma intrinsic(_InterlockedCompareExchange_rel)
#pragma intrinsic(_InterlockedCompareExchange64)
#pragma intrinsic(_InterlockedCompareExchange64_nf)
#pragma intrinsic(_InterlockedCompareExchange64_acq)
#pragma intrinsic(_InterlockedCompareExchange64_rel)
#pragma intrinsic(_InterlockedCompareExchangePointer)
#pragma intrinsic(_InterlockedCompareExchangePointer_nf)
#pragma intrinsic(_InterlockedCompareExchangePointer_acq)
#pragma intrinsic(_InterlockedCompareExchangePointer_rel)
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8_RELAXED(dest, exchange, compare) _InterlockedCompareExchange8_nf((char*)(dest), (char)(exchange), (char)(compare))
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8_ACQUIRE(dest, exchange, compare) _InterlockedCompareExchange8_acq((char*)(dest), (char)(exchange), (char)(compare))
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8_RELEASE(dest, exchange, compare) _InterlockedCompareExchange8_rel((char*)(dest), (char)(exchange), (char)(compare))
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16_RELAXED(dest, exchange, compare) _InterlockedCompareExchange16_nf((short*)(dest), (short)(exchange), (short)(compare))
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16_ACQUIRE(dest, exchange, compare) _InterlockedCompareExchange16_acq((short*)(dest), (short)(exchange), (short)(compare))
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16_RELEASE(dest, exchange, compare) _InterlockedCompareExchange16_rel((short*)(dest), (short)(exchange), (short)(compare))
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_RELAXED(dest, exchange, compare) _InterlockedCompareExchange_nf((long*)(dest), (long)(exchange), (long)(compare))
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_ACQUIRE(dest, exchange, compare) _InterlockedCompareExchange_acq((long*)(dest), (long)(exchange), (long)(compare))
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_RELEASE(dest, exchange, compare) _InterlockedCompareExchange_rel((long*)(dest), (long)(exchange), (long)(compare))
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(dest, exchange, compare) _InterlockedCompareExchange64((__int64*)(dest), (__int64)(exchange), (__int64)(compare))
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64_RELAXED(dest, exchange, compare) _InterlockedCompareExchange64_nf((__int64*)(dest), (__int64)(exchange), (__int64)(compare))
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64_ACQUIRE(dest, exchange, compare) _InterlockedCompareExchange64_acq((__int64*)(dest), (__int64)(exchange), (__int64)(compare))
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64_RELEASE(dest, exchange, compare) _InterlockedCompareExchange64_rel((__int64*)(dest), (__int64)(exchange), (__int64)(compare))
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) _InterlockedCompareExchangePointer((void**)(dest), (void*)(exchange), (void*)(compare))
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER_RELAXED(dest, exchange, compare) _InterlockedCompareExchangePointer_nf((void**)(dest), (void*)(exchange), (void*)(compare))
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER_ACQUIRE(dest, exchange, compare) _InterlockedCompareExchangePointer_acq((void**)(dest), (void*)(exchange), (void*)(compare))
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER_RELEASE(dest, exchange, compare) _InterlockedCompareExchangePointer_rel((void**)(dest), (void*)(exchange), (void*)(compare))
#pragma intrinsic(_InterlockedExchangeAdd8_nf)
#pragma intrinsic(_InterlockedExchangeAdd8_acq)
#pragma intrinsic(_InterlockedExchangeAdd8_rel)
#pragma intrinsic(_InterlockedExchangeAdd16_nf)
#pragma intrinsic(_InterlockedExchangeAdd16_acq)
#pragma intrinsic(_InterlockedExchangeAdd16_rel)
#pragma intrinsic(_InterlockedExchangeAdd_nf)
#pragma intrinsic(_InterlockedExchangeAdd_acq)
#pragma intrinsic(_InterlockedExchangeAdd_rel)
#pragma intrinsic(_InterlockedExchangeAdd64_nf)
#pragma intrinsic(_InterlockedExchangeAdd64_acq)
#pragma intrinsic(_InterlockedExchangeAdd64_rel)
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8_RELAXED(dest, addend) _InterlockedExchangeAdd8_nf((char*)(dest), (char)(addend))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8_ACQUIRE(dest, addend) _InterlockedExchangeAdd8_acq((char*)(dest), (char)(addend))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8_RELEASE(dest, addend) _InterlockedExchangeAdd8_rel((char*)(dest), (char)(addend))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16_RELAXED(dest, addend) _InterlockedExchangeAdd16_nf((short*)(dest), (short)(addend))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16_ACQUIRE(dest, addend) _InterlockedExchangeAdd16_acq((short*)(dest), (short)(addend))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16_RELEASE(dest, addend) _InterlockedExchangeAdd16_rel((short*)(dest), (short)(addend))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_RELAXED(dest, addend) _InterlockedExchangeAdd_nf((long*)(dest), (long)(addend))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_ACQUIRE(dest, addend) _InterlockedExchangeAdd_acq((long*)(dest), (long)(addend))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_RELEASE(dest, addend) _InterlockedExchangeAdd_rel((long*)(dest), (long)(addend))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64_RELAXED(dest, addend) _InterlockedExchangeAdd64_nf((__int64*)(dest), (__int64)(addend))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64_ACQUIRE(dest, addend) _InterlockedExchangeAdd64_acq((__int64*)(dest), (__int64)(addend))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64_RELEASE(dest, addend) _InterlockedExchangeAdd64_rel((__int64*)(dest), (__int64)(addend))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD((long*)(dest), byte_offset))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER_RELAXED(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_RELAXED((long*)(dest), byte_offset))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER_ACQUIRE(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_ACQUIRE((long*)(dest), byte_offset))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER_RELEASE(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_RELEASE((long*)(dest), byte_offset))
#pragma intrinsic(_InterlockedExchange8_nf)
#pragma intrinsic(_InterlockedExchange8_acq)
#pragma intrinsic(_InterlockedExchange16_nf)
#pragma intrinsic(_InterlockedExchange16_acq)
#pragma intrinsic(_InterlockedExchange_nf)
#pragma intrinsic(_InterlockedExchange_acq)
#pragma intrinsic(_InterlockedExchange64_nf)
#pragma intrinsic(_InterlockedExchange64_acq)
#pragma intrinsic(_InterlockedExchangePointer)
#pragma intrinsic(_InterlockedExchangePointer_nf)
#pragma intrinsic(_InterlockedExchangePointer_acq)
#if _MSC_VER >= 1800
#pragma intrinsic(_InterlockedExchange8_rel)
#pragma intrinsic(_InterlockedExchange16_rel)
#pragma intrinsic(_InterlockedExchange_rel)
#pragma intrinsic(_InterlockedExchange64_rel)
#pragma intrinsic(_InterlockedExchangePointer_rel)
#endif
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE8_RELAXED(dest, newval) _InterlockedExchange8_nf((char*)(dest), (char)(newval))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE8_ACQUIRE(dest, newval) _InterlockedExchange8_acq((char*)(dest), (char)(newval))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE16_RELAXED(dest, newval) _InterlockedExchange16_nf((short*)(dest), (short)(newval))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE16_ACQUIRE(dest, newval) _InterlockedExchange16_acq((short*)(dest), (short)(newval))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_RELAXED(dest, newval) _InterlockedExchange_nf((long*)(dest), (long)(newval))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ACQUIRE(dest, newval) _InterlockedExchange_acq((long*)(dest), (long)(newval))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE64_RELAXED(dest, newval) _InterlockedExchange64_nf((__int64*)(dest), (__int64)(newval))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE64_ACQUIRE(dest, newval) _InterlockedExchange64_acq((__int64*)(dest), (__int64)(newval))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) _InterlockedExchangePointer((void**)(dest), (void*)(newval))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER_RELAXED(dest, newval) _InterlockedExchangePointer_nf((void**)(dest), (void*)(newval))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER_ACQUIRE(dest, newval) _InterlockedExchangePointer_acq((void**)(dest), (void*)(newval))
#if _MSC_VER >= 1800
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE8_RELEASE(dest, newval) _InterlockedExchange8_rel((char*)(dest), (char)(newval))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE16_RELEASE(dest, newval) _InterlockedExchange16_rel((short*)(dest), (short)(newval))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_RELEASE(dest, newval) _InterlockedExchange_rel((long*)(dest), (long)(newval))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE64_RELEASE(dest, newval) _InterlockedExchange64_rel((__int64*)(dest), (__int64)(newval))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER_RELEASE(dest, newval) _InterlockedExchangePointer_rel((void**)(dest), (void*)(newval))
#else
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE8_RELEASE(dest, newval) BOOST_ATOMIC_INTERLOCKED_EXCHANGE8(dest, newval)
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE16_RELEASE(dest, newval) BOOST_ATOMIC_INTERLOCKED_EXCHANGE16(dest, newval)
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_RELEASE(dest, newval) BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval)
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE64_RELEASE(dest, newval) BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(dest, newval)
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER_RELEASE(dest, newval) BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval)
#endif
#pragma intrinsic(_InterlockedAnd8_nf)
#pragma intrinsic(_InterlockedAnd8_acq)
#pragma intrinsic(_InterlockedAnd8_rel)
#pragma intrinsic(_InterlockedAnd16_nf)
#pragma intrinsic(_InterlockedAnd16_acq)
#pragma intrinsic(_InterlockedAnd16_rel)
#pragma intrinsic(_InterlockedAnd_nf)
#pragma intrinsic(_InterlockedAnd_acq)
#pragma intrinsic(_InterlockedAnd_rel)
#pragma intrinsic(_InterlockedAnd64_nf)
#pragma intrinsic(_InterlockedAnd64_acq)
#pragma intrinsic(_InterlockedAnd64_rel)
#define BOOST_ATOMIC_INTERLOCKED_AND8_RELAXED(dest, arg) _InterlockedAnd8_nf((char*)(dest), (char)(arg))
#define BOOST_ATOMIC_INTERLOCKED_AND8_ACQUIRE(dest, arg) _InterlockedAnd8_acq((char*)(dest), (char)(arg))
#define BOOST_ATOMIC_INTERLOCKED_AND8_RELEASE(dest, arg) _InterlockedAnd8_rel((char*)(dest), (char)(arg))
#define BOOST_ATOMIC_INTERLOCKED_AND16_RELAXED(dest, arg) _InterlockedAnd16_nf((short*)(dest), (short)(arg))
#define BOOST_ATOMIC_INTERLOCKED_AND16_ACQUIRE(dest, arg) _InterlockedAnd16_acq((short*)(dest), (short)(arg))
#define BOOST_ATOMIC_INTERLOCKED_AND16_RELEASE(dest, arg) _InterlockedAnd16_rel((short*)(dest), (short)(arg))
#define BOOST_ATOMIC_INTERLOCKED_AND_RELAXED(dest, arg) _InterlockedAnd_nf((long*)(dest), (long)(arg))
#define BOOST_ATOMIC_INTERLOCKED_AND_ACQUIRE(dest, arg) _InterlockedAnd_acq((long*)(dest), (long)(arg))
#define BOOST_ATOMIC_INTERLOCKED_AND_RELEASE(dest, arg) _InterlockedAnd_rel((long*)(dest), (long)(arg))
#define BOOST_ATOMIC_INTERLOCKED_AND64_RELAXED(dest, arg) _InterlockedAnd64_nf((__int64*)(dest), (__int64)(arg))
#define BOOST_ATOMIC_INTERLOCKED_AND64_ACQUIRE(dest, arg) _InterlockedAnd64_acq((__int64*)(dest), (__int64)(arg))
#define BOOST_ATOMIC_INTERLOCKED_AND64_RELEASE(dest, arg) _InterlockedAnd64_rel((__int64*)(dest), (__int64)(arg))
#pragma intrinsic(_InterlockedOr8_nf)
#pragma intrinsic(_InterlockedOr8_acq)
#pragma intrinsic(_InterlockedOr8_rel)
#pragma intrinsic(_InterlockedOr16_nf)
#pragma intrinsic(_InterlockedOr16_acq)
#pragma intrinsic(_InterlockedOr16_rel)
#pragma intrinsic(_InterlockedOr_nf)
#pragma intrinsic(_InterlockedOr_acq)
#pragma intrinsic(_InterlockedOr_rel)
#pragma intrinsic(_InterlockedOr64_nf)
#pragma intrinsic(_InterlockedOr64_acq)
#pragma intrinsic(_InterlockedOr64_rel)
#define BOOST_ATOMIC_INTERLOCKED_OR8_RELAXED(dest, arg) _InterlockedOr8_nf((char*)(dest), (char)(arg))
#define BOOST_ATOMIC_INTERLOCKED_OR8_ACQUIRE(dest, arg) _InterlockedOr8_acq((char*)(dest), (char)(arg))
#define BOOST_ATOMIC_INTERLOCKED_OR8_RELEASE(dest, arg) _InterlockedOr8_rel((char*)(dest), (char)(arg))
#define BOOST_ATOMIC_INTERLOCKED_OR16_RELAXED(dest, arg) _InterlockedOr16_nf((short*)(dest), (short)(arg))
#define BOOST_ATOMIC_INTERLOCKED_OR16_ACQUIRE(dest, arg) _InterlockedOr16_acq((short*)(dest), (short)(arg))
#define BOOST_ATOMIC_INTERLOCKED_OR16_RELEASE(dest, arg) _InterlockedOr16_rel((short*)(dest), (short)(arg))
#define BOOST_ATOMIC_INTERLOCKED_OR_RELAXED(dest, arg) _InterlockedOr_nf((long*)(dest), (long)(arg))
#define BOOST_ATOMIC_INTERLOCKED_OR_ACQUIRE(dest, arg) _InterlockedOr_acq((long*)(dest), (long)(arg))
#define BOOST_ATOMIC_INTERLOCKED_OR_RELEASE(dest, arg) _InterlockedOr_rel((long*)(dest), (long)(arg))
#define BOOST_ATOMIC_INTERLOCKED_OR64_RELAXED(dest, arg) _InterlockedOr64_nf((__int64*)(dest), (__int64)(arg))
#define BOOST_ATOMIC_INTERLOCKED_OR64_ACQUIRE(dest, arg) _InterlockedOr64_acq((__int64*)(dest), (__int64)(arg))
#define BOOST_ATOMIC_INTERLOCKED_OR64_RELEASE(dest, arg) _InterlockedOr64_rel((__int64*)(dest), (__int64)(arg))
#pragma intrinsic(_InterlockedXor8_nf)
#pragma intrinsic(_InterlockedXor8_acq)
#pragma intrinsic(_InterlockedXor8_rel)
#pragma intrinsic(_InterlockedXor16_nf)
#pragma intrinsic(_InterlockedXor16_acq)
#pragma intrinsic(_InterlockedXor16_rel)
#pragma intrinsic(_InterlockedXor_nf)
#pragma intrinsic(_InterlockedXor_acq)
#pragma intrinsic(_InterlockedXor_rel)
#pragma intrinsic(_InterlockedXor64_nf)
#pragma intrinsic(_InterlockedXor64_acq)
#pragma intrinsic(_InterlockedXor64_rel)
#define BOOST_ATOMIC_INTERLOCKED_XOR8_RELAXED(dest, arg) _InterlockedXor8_nf((char*)(dest), (char)(arg))
#define BOOST_ATOMIC_INTERLOCKED_XOR8_ACQUIRE(dest, arg) _InterlockedXor8_acq((char*)(dest), (char)(arg))
#define BOOST_ATOMIC_INTERLOCKED_XOR8_RELEASE(dest, arg) _InterlockedXor8_rel((char*)(dest), (char)(arg))
#define BOOST_ATOMIC_INTERLOCKED_XOR16_RELAXED(dest, arg) _InterlockedXor16_nf((short*)(dest), (short)(arg))
#define BOOST_ATOMIC_INTERLOCKED_XOR16_ACQUIRE(dest, arg) _InterlockedXor16_acq((short*)(dest), (short)(arg))
#define BOOST_ATOMIC_INTERLOCKED_XOR16_RELEASE(dest, arg) _InterlockedXor16_rel((short*)(dest), (short)(arg))
#define BOOST_ATOMIC_INTERLOCKED_XOR_RELAXED(dest, arg) _InterlockedXor_nf((long*)(dest), (long)(arg))
#define BOOST_ATOMIC_INTERLOCKED_XOR_ACQUIRE(dest, arg) _InterlockedXor_acq((long*)(dest), (long)(arg))
#define BOOST_ATOMIC_INTERLOCKED_XOR_RELEASE(dest, arg) _InterlockedXor_rel((long*)(dest), (long)(arg))
#define BOOST_ATOMIC_INTERLOCKED_XOR64_RELAXED(dest, arg) _InterlockedXor64_nf((__int64*)(dest), (__int64)(arg))
#define BOOST_ATOMIC_INTERLOCKED_XOR64_ACQUIRE(dest, arg) _InterlockedXor64_acq((__int64*)(dest), (__int64)(arg))
#define BOOST_ATOMIC_INTERLOCKED_XOR64_RELEASE(dest, arg) _InterlockedXor64_rel((__int64*)(dest), (__int64)(arg))
#endif // _MSC_VER >= 1700 && defined(_M_ARM)
#else // defined(_MSC_VER) && _MSC_VER >= 1400

View File

@@ -1,11 +1,19 @@
#ifndef BOOST_ATOMIC_DETAIL_LINK_HPP
#define BOOST_ATOMIC_DETAIL_LINK_HPP
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2012 Hartmut Kaiser
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/config.hpp
*
* This header defines macros for linking with compiled library of Boost.Atomic
*/
// Copyright (c) 2012 Hartmut Kaiser
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_ATOMIC_DETAIL_LINK_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_LINK_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>

View File

@@ -1,192 +0,0 @@
#ifndef BOOST_ATOMIC_DETAIL_LINUX_ARM_HPP
#define BOOST_ATOMIC_DETAIL_LINUX_ARM_HPP
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// Copyright (c) 2009, 2011 Helge Bahmann
// Copyright (c) 2009 Phil Endecott
// Copyright (c) 2013 Tim Blechmann
// Linux-specific code by Phil Endecott
// Different ARM processors have different atomic instructions. In particular,
// architecture versions before v6 (which are still in widespread use, e.g. the
// Intel/Marvell XScale chips like the one in the NSLU2) have only atomic swap.
// On Linux the kernel provides some support that lets us abstract away from
// these differences: it provides emulated CAS and barrier functions at special
// addresses that are guaranteed not to be interrupted by the kernel. Using
// this facility is slightly slower than inline assembler would be, but much
// faster than a system call.
//
// While this emulated CAS is "strong" in the sense that it does not fail
// "spuriously" (i.e.: it never fails to perform the exchange when the value
// found equals the value expected), it does not return the found value on
// failure. To satisfy the atomic API, compare_exchange_{weak|strong} must
// return the found value on failure, and we have to manually load this value
// after the emulated CAS reports failure. This in turn introduces a race
// between the CAS failing (due to the "wrong" value being found) and subsequently
// loading (which might turn up the "right" value). From an application's
// point of view this looks like "spurious failure", and therefore the
// emulated CAS is only good enough to provide compare_exchange_weak
// semantics.
#include <cstddef>
#include <boost/cstdint.hpp>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
inline void
arm_barrier(void)
{
void (*kernel_dmb)(void) = (void (*)(void)) 0xffff0fa0;
kernel_dmb();
}
inline void
platform_fence_before(memory_order order)
{
switch(order) {
case memory_order_release:
case memory_order_acq_rel:
case memory_order_seq_cst:
arm_barrier();
case memory_order_consume:
default:;
}
}
inline void
platform_fence_after(memory_order order)
{
switch(order) {
case memory_order_acquire:
case memory_order_acq_rel:
case memory_order_seq_cst:
arm_barrier();
default:;
}
}
inline void
platform_fence_before_store(memory_order order)
{
platform_fence_before(order);
}
inline void
platform_fence_after_store(memory_order order)
{
if (order == memory_order_seq_cst)
arm_barrier();
}
inline void
platform_fence_after_load(memory_order order)
{
platform_fence_after(order);
}
template<typename T>
inline bool
platform_cmpxchg32(T & expected, T desired, volatile T * ptr)
{
typedef T (*kernel_cmpxchg32_t)(T oldval, T newval, volatile T * ptr);
if (((kernel_cmpxchg32_t) 0xffff0fc0)(expected, desired, ptr) == 0) {
return true;
} else {
expected = *ptr;
return false;
}
}
}
}
#define BOOST_ATOMIC_THREAD_FENCE 2
inline void
atomic_thread_fence(memory_order order)
{
switch(order) {
case memory_order_acquire:
case memory_order_release:
case memory_order_acq_rel:
case memory_order_seq_cst:
atomics::detail::arm_barrier();
default:;
}
}
#define BOOST_ATOMIC_SIGNAL_FENCE 2
inline void
atomic_signal_fence(memory_order)
{
__asm__ __volatile__ ("" ::: "memory");
}
class atomic_flag
{
private:
uint32_t v_;
public:
BOOST_CONSTEXPR atomic_flag(void) BOOST_NOEXCEPT : v_(0) {}
void
clear(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
atomics::detail::platform_fence_before_store(order);
const_cast<volatile uint32_t &>(v_) = 0;
atomics::detail::platform_fence_after_store(order);
}
bool
test_and_set(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
atomics::detail::platform_fence_before(order);
uint32_t expected = v_;
do {
if (expected == 1)
break;
} while (!atomics::detail::platform_cmpxchg32(expected, (uint32_t)1, &v_));
atomics::detail::platform_fence_after(order);
return expected;
}
BOOST_DELETED_FUNCTION(atomic_flag(atomic_flag const&))
BOOST_DELETED_FUNCTION(atomic_flag& operator= (atomic_flag const&))
};
#define BOOST_ATOMIC_FLAG_LOCK_FREE 2
}
#include <boost/atomic/detail/base.hpp>
#if !defined(BOOST_ATOMIC_FORCE_FALLBACK)
#define BOOST_ATOMIC_CHAR_LOCK_FREE 2
#define BOOST_ATOMIC_CHAR16_T_LOCK_FREE 2
#define BOOST_ATOMIC_CHAR32_T_LOCK_FREE 2
#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 2
#define BOOST_ATOMIC_SHORT_LOCK_FREE 2
#define BOOST_ATOMIC_INT_LOCK_FREE 2
#define BOOST_ATOMIC_LONG_LOCK_FREE 2
#define BOOST_ATOMIC_LLONG_LOCK_FREE 0
#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
#define BOOST_ATOMIC_BOOL_LOCK_FREE 2
#include <boost/atomic/detail/cas32weak.hpp>
#endif /* !defined(BOOST_ATOMIC_FORCE_FALLBACK) */
#endif

View File

@@ -1,12 +1,19 @@
#ifndef BOOST_ATOMIC_DETAIL_LOCKPOOL_HPP
#define BOOST_ATOMIC_DETAIL_LOCKPOOL_HPP
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2011 Helge Bahmann
* Copyright (c) 2013-2014 Andrey Semashev
*/
/*!
* \file atomic/detail/lockpool.hpp
*
* This header contains declaration of the lockpool used to emulate atomic ops.
*/
// Copyright (c) 2011 Helge Bahmann
// Copyright (c) 2013 Andrey Semashev
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_ATOMIC_DETAIL_LOCKPOOL_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_LOCKPOOL_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/link.hpp>
@@ -19,65 +26,26 @@ namespace boost {
namespace atomics {
namespace detail {
#if !defined(BOOST_ATOMIC_FLAG_LOCK_FREE) || BOOST_ATOMIC_FLAG_LOCK_FREE != 2
class lockpool
struct lockpool
{
public:
class scoped_lock
{
void* lock_;
void* m_lock;
public:
explicit BOOST_ATOMIC_DECL scoped_lock(const volatile void* addr);
BOOST_ATOMIC_DECL ~scoped_lock();
explicit BOOST_ATOMIC_DECL scoped_lock(const volatile void* addr) BOOST_NOEXCEPT;
BOOST_ATOMIC_DECL ~scoped_lock() BOOST_NOEXCEPT;
BOOST_DELETED_FUNCTION(scoped_lock(scoped_lock const&))
BOOST_DELETED_FUNCTION(scoped_lock& operator=(scoped_lock const&))
};
static BOOST_ATOMIC_DECL void thread_fence() BOOST_NOEXCEPT;
static BOOST_ATOMIC_DECL void signal_fence() BOOST_NOEXCEPT;
};
#else
} // namespace detail
} // namespace atomics
} // namespace boost
class lockpool
{
public:
typedef atomic_flag lock_type;
class scoped_lock
{
private:
lock_type& flag_;
public:
explicit
scoped_lock(const volatile void * addr) : flag_(get_lock_for(addr))
{
while (flag_.test_and_set(memory_order_acquire))
{
#if defined(BOOST_ATOMIC_X86_PAUSE)
BOOST_ATOMIC_X86_PAUSE();
#endif
}
}
~scoped_lock(void)
{
flag_.clear(memory_order_release);
}
BOOST_DELETED_FUNCTION(scoped_lock(const scoped_lock &))
BOOST_DELETED_FUNCTION(scoped_lock& operator=(const scoped_lock &))
};
private:
static BOOST_ATOMIC_DECL lock_type& get_lock_for(const volatile void * addr);
};
#endif
}
}
}
#endif
#endif // BOOST_ATOMIC_DETAIL_LOCKPOOL_HPP_INCLUDED_

View File

@@ -0,0 +1,24 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/operations.hpp
*
* This header defines atomic operations, including the emulated version.
*/
#ifndef BOOST_ATOMIC_DETAIL_OPERATIONS_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_OPERATIONS_HPP_INCLUDED_
#include <boost/atomic/detail/operations_lockfree.hpp>
#include <boost/atomic/detail/ops_emulated.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
#endif // BOOST_ATOMIC_DETAIL_OPERATIONS_HPP_INCLUDED_

View File

@@ -0,0 +1,34 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/operations_fwd.hpp
*
* This header contains forward declaration of the \c operations template.
*/
#ifndef BOOST_ATOMIC_DETAIL_OPERATIONS_FWD_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_OPERATIONS_FWD_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
template< unsigned int Size, bool Signed >
struct operations;
} // namespace detail
} // namespace atomics
} // namespace boost
#endif // BOOST_ATOMIC_DETAIL_OPERATIONS_FWD_HPP_INCLUDED_

View File

@@ -0,0 +1,30 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/operations_lockfree.hpp
*
* This header defines lockfree atomic operations.
*/
#ifndef BOOST_ATOMIC_DETAIL_OPERATIONS_LOCKFREE_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_OPERATIONS_LOCKFREE_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/platform.hpp>
#if !defined(BOOST_ATOMIC_EMULATED)
#include BOOST_ATOMIC_DETAIL_HEADER(boost/atomic/detail/ops_)
#else
#include <boost/atomic/detail/operations_fwd.hpp>
#endif
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
#endif // BOOST_ATOMIC_DETAIL_OPERATIONS_LOCKFREE_HPP_INCLUDED_

View File

@@ -0,0 +1,91 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/ops_cas_based.hpp
*
* This header contains CAS-based implementation of the \c operations template.
*/
#ifndef BOOST_ATOMIC_DETAIL_OPS_CAS_BASED_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_OPS_CAS_BASED_HPP_INCLUDED_
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
template< typename Base >
struct cas_based_operations :
public Base
{
typedef typename Base::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type old_val = Base::load(storage, memory_order_relaxed);
while (!Base::compare_exchange_weak(storage, old_val, old_val + v, order, memory_order_relaxed)) {}
return old_val;
}
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type old_val = Base::load(storage, memory_order_relaxed);
while (!Base::compare_exchange_weak(storage, old_val, old_val - v, order, memory_order_relaxed)) {}
return old_val;
}
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type old_val = Base::load(storage, memory_order_relaxed);
while (!Base::compare_exchange_weak(storage, old_val, v, order, memory_order_relaxed)) {}
return old_val;
}
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type old_val = Base::load(storage, memory_order_relaxed);
while (!Base::compare_exchange_weak(storage, old_val, old_val & v, order, memory_order_relaxed)) {}
return old_val;
}
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type old_val = Base::load(storage, memory_order_relaxed);
while (!Base::compare_exchange_weak(storage, old_val, old_val | v, order, memory_order_relaxed)) {}
return old_val;
}
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type old_val = Base::load(storage, memory_order_relaxed);
while (!Base::compare_exchange_weak(storage, old_val, old_val ^ v, order, memory_order_relaxed)) {}
return old_val;
}
static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
return !!exchange(storage, (storage_type)1, order);
}
static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
Base::store(storage, (storage_type)0, order);
}
};
} // namespace detail
} // namespace atomics
} // namespace boost
#endif // BOOST_ATOMIC_DETAIL_OPS_CAS_BASED_HPP_INCLUDED_

View File

@@ -0,0 +1,149 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/ops_emulated.hpp
*
* This header contains lockpool-based implementation of the \c operations template.
*/
#ifndef BOOST_ATOMIC_DETAIL_OPS_EMULATED_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_OPS_EMULATED_HPP_INCLUDED_
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/storage_type.hpp>
#include <boost/atomic/detail/operations_fwd.hpp>
#include <boost/atomic/detail/lockpool.hpp>
#include <boost/atomic/capabilities.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
template< typename T >
struct emulated_operations
{
typedef T storage_type;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
lockpool::scoped_lock lock(&storage);
const_cast< storage_type& >(storage) = v;
}
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order) BOOST_NOEXCEPT
{
lockpool::scoped_lock lock(&storage);
return const_cast< storage_type const& >(storage);
}
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
storage_type& s = const_cast< storage_type& >(storage);
lockpool::scoped_lock lock(&storage);
storage_type old_val = s;
s += v;
return old_val;
}
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
storage_type& s = const_cast< storage_type& >(storage);
lockpool::scoped_lock lock(&storage);
storage_type old_val = s;
s -= v;
return old_val;
}
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
storage_type& s = const_cast< storage_type& >(storage);
lockpool::scoped_lock lock(&storage);
storage_type old_val = s;
s = v;
return old_val;
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
{
storage_type& s = const_cast< storage_type& >(storage);
lockpool::scoped_lock lock(&storage);
storage_type old_val = s;
const bool res = old_val == expected;
if (res)
s = desired;
expected = old_val;
return res;
}
static BOOST_FORCEINLINE bool compare_exchange_weak(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
return compare_exchange_strong(storage, expected, desired, success_order, failure_order);
}
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
storage_type& s = const_cast< storage_type& >(storage);
lockpool::scoped_lock lock(&storage);
storage_type old_val = s;
s &= v;
return old_val;
}
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
storage_type& s = const_cast< storage_type& >(storage);
lockpool::scoped_lock lock(&storage);
storage_type old_val = s;
s |= v;
return old_val;
}
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
storage_type& s = const_cast< storage_type& >(storage);
lockpool::scoped_lock lock(&storage);
storage_type old_val = s;
s ^= v;
return old_val;
}
static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
return !!exchange(storage, (storage_type)1, order);
}
static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
store(storage, (storage_type)0, order);
}
static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT
{
return false;
}
};
template< unsigned int Size, bool Signed >
struct operations :
public emulated_operations< typename make_storage_type< Size, Signed >::type >
{
};
} // namespace detail
} // namespace atomics
} // namespace boost
#endif // BOOST_ATOMIC_DETAIL_OPS_EMULATED_HPP_INCLUDED_

View File

@@ -0,0 +1,65 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/ops_extending_cas_based.hpp
*
* This header contains a boilerplate of the \c operations template implementation that requires sign/zero extension in arithmetic operations.
*/
#ifndef BOOST_ATOMIC_DETAIL_OPS_EXTENDING_CAS_BASED_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_OPS_EXTENDING_CAS_BASED_HPP_INCLUDED_
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/storage_type.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
template< typename Base, unsigned int Size, bool Signed >
struct extending_cas_based_operations :
public Base
{
typedef typename Base::storage_type storage_type;
typedef typename make_storage_type< Size, Signed >::type emulated_storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type old_val = Base::load(storage, memory_order_relaxed);
emulated_storage_type new_val;
do
{
new_val = static_cast< emulated_storage_type >(old_val) + static_cast< emulated_storage_type >(v);
}
while (!Base::compare_exchange_weak(storage, old_val, static_cast< storage_type >(new_val), order, memory_order_relaxed));
return old_val;
}
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type old_val = Base::load(storage, memory_order_relaxed);
emulated_storage_type new_val;
do
{
new_val = static_cast< emulated_storage_type >(old_val) - static_cast< emulated_storage_type >(v);
}
while (!Base::compare_exchange_weak(storage, old_val, static_cast< storage_type >(new_val), order, memory_order_relaxed));
return old_val;
}
};
} // namespace detail
} // namespace atomics
} // namespace boost
#endif // BOOST_ATOMIC_DETAIL_OPS_EXTENDING_CAS_BASED_HPP_INCLUDED_

View File

@@ -0,0 +1,874 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2009 Helge Bahmann
* Copyright (c) 2013 Tim Blechmann
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/ops_gcc_alpha.hpp
*
* This header contains implementation of the \c operations template.
*/
#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_ALPHA_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_OPS_GCC_ALPHA_HPP_INCLUDED_
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/storage_type.hpp>
#include <boost/atomic/detail/operations_fwd.hpp>
#include <boost/atomic/capabilities.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
/*
Refer to http://h71000.www7.hp.com/doc/82final/5601/5601pro_004.html
(HP OpenVMS systems documentation) and the Alpha Architecture Reference Manual.
*/
/*
NB: The most natural thing would be to write the increment/decrement
operators along the following lines:
__asm__ __volatile__
(
"1: ldl_l %0,%1 \n"
"addl %0,1,%0 \n"
"stl_c %0,%1 \n"
"beq %0,1b\n"
: "=&b" (tmp)
: "m" (value)
: "cc"
);
However according to the comments on the HP website and matching
comments in the Linux kernel sources this defies branch prediction,
as the cpu assumes that backward branches are always taken; so
instead copy the trick from the Linux kernel, introduce a forward
branch and back again.
I have, however, had a hard time measuring the difference between
the two versions in microbenchmarks -- I am leaving it in nevertheless
as it apparently does not hurt either.
*/
struct gcc_alpha_operations_base
{
static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT
{
if ((order & (memory_order_release | memory_order_consume)) != 0)
__asm__ __volatile__ ("mb" ::: "memory");
}
static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT
{
if ((order & memory_order_acquire) != 0)
__asm__ __volatile__ ("mb" ::: "memory");
}
static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT
{
if (order == memory_order_seq_cst)
__asm__ __volatile__ ("mb" ::: "memory");
}
};
template< bool Signed >
struct operations< 4u, Signed > :
public gcc_alpha_operations_base
{
typedef typename make_storage_type< 4u, Signed >::type storage_type;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
fence_before(order);
storage = v;
fence_after_store(order);
}
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
{
storage_type v = storage;
fence_after(order);
return v;
}
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, tmp;
fence_before(order);
__asm__ __volatile__
(
"1:\n"
"mov %3, %1\n"
"ldl_l %0, %2\n"
"stl_c %1, %2\n"
"beq %1, 2f\n"
".subsection 2\n"
"2: br 1b\n"
".previous\n"
: "=&r" (original), // %0
"=&r" (tmp) // %1
: "m" (storage), // %2
"r" (v) // %3
:
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE bool compare_exchange_weak(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
fence_before(success_order);
int success;
storage_type current;
__asm__ __volatile__
(
"1:\n"
"ldl_l %2, %4\n" // current = *(&storage)
"cmpeq %2, %0, %3\n" // success = current == expected
"mov %2, %0\n" // expected = current
"beq %3, 2f\n" // if (success == 0) goto end
"stl_c %1, %4\n" // storage = desired; desired = store succeeded
"mov %1, %3\n" // success = desired
"2:\n"
: "+&r" (expected), // %0
"+&r" (desired), // %1
"=&r" (current), // %2
"=&r" (success) // %3
: "m" (storage) // %4
:
);
if (success)
fence_after(success_order);
else
fence_after(failure_order);
return !!success;
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
int success;
storage_type current, tmp;
fence_before(success_order);
__asm__ __volatile__
(
"1:\n"
"mov %5, %1\n" // tmp = desired
"ldl_l %2, %4\n" // current = *(&storage)
"cmpeq %2, %0, %3\n" // success = current == expected
"mov %2, %0\n" // expected = current
"beq %3, 2f\n" // if (success == 0) goto end
"stl_c %1, %4\n" // storage = tmp; tmp = store succeeded
"beq %1, 3f\n" // if (tmp == 0) goto retry
"mov %1, %3\n" // success = tmp
"2:\n"
".subsection 2\n"
"3: br 1b\n"
".previous\n"
: "+&r" (expected), // %0
"=&r" (tmp), // %1
"=&r" (current), // %2
"=&r" (success) // %3
: "m" (storage), // %4
"r" (desired) // %5
:
);
if (success)
fence_after(success_order);
else
fence_after(failure_order);
return !!success;
}
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, modified;
fence_before(order);
__asm__ __volatile__
(
"1:\n"
"ldl_l %0, %2\n"
"addl %0, %3, %1\n"
"stl_c %1, %2\n"
"beq %1, 2f\n"
".subsection 2\n"
"2: br 1b\n"
".previous\n"
: "=&r" (original), // %0
"=&r" (modified) // %1
: "m" (storage), // %2
"r" (v) // %3
:
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, modified;
fence_before(order);
__asm__ __volatile__
(
"1:\n"
"ldl_l %0, %2\n"
"subl %0, %3, %1\n"
"stl_c %1, %2\n"
"beq %1, 2f\n"
".subsection 2\n"
"2: br 1b\n"
".previous\n"
: "=&r" (original), // %0
"=&r" (modified) // %1
: "m" (storage), // %2
"r" (v) // %3
:
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, modified;
fence_before(order);
__asm__ __volatile__
(
"1:\n"
"ldl_l %0, %2\n"
"and %0, %3, %1\n"
"stl_c %1, %2\n"
"beq %1, 2f\n"
".subsection 2\n"
"2: br 1b\n"
".previous\n"
: "=&r" (original), // %0
"=&r" (modified) // %1
: "m" (storage), // %2
"r" (v) // %3
:
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, modified;
fence_before(order);
__asm__ __volatile__
(
"1:\n"
"ldl_l %0, %2\n"
"bis %0, %3, %1\n"
"stl_c %1, %2\n"
"beq %1, 2f\n"
".subsection 2\n"
"2: br 1b\n"
".previous\n"
: "=&r" (original), // %0
"=&r" (modified) // %1
: "m" (storage), // %2
"r" (v) // %3
:
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, modified;
fence_before(order);
__asm__ __volatile__
(
"1:\n"
"ldl_l %0, %2\n"
"xor %0, %3, %1\n"
"stl_c %1, %2\n"
"beq %1, 2f\n"
".subsection 2\n"
"2: br 1b\n"
".previous\n"
: "=&r" (original), // %0
"=&r" (modified) // %1
: "m" (storage), // %2
"r" (v) // %3
:
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
return !!exchange(storage, (storage_type)1, order);
}
static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
store(storage, 0, order);
}
static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT
{
return true;
}
};
template< >
struct operations< 1u, false > :
public operations< 4u, false >
{
typedef operations< 4u, false > base_type;
typedef base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, modified;
fence_before(order);
__asm__ __volatile__
(
"1:\n"
"ldl_l %0, %2\n"
"addl %0, %3, %1\n"
"zapnot %1, #1, %1\n"
"stl_c %1, %2\n"
"beq %1, 2f\n"
".subsection 2\n"
"2: br 1b\n"
".previous\n"
: "=&r" (original), // %0
"=&r" (modified) // %1
: "m" (storage), // %2
"r" (v) // %3
:
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, modified;
fence_before(order);
__asm__ __volatile__
(
"1:\n"
"ldl_l %0, %2\n"
"subl %0, %3, %1\n"
"zapnot %1, #1, %1\n"
"stl_c %1, %2\n"
"beq %1, 2f\n"
".subsection 2\n"
"2: br 1b\n"
".previous\n"
: "=&r" (original), // %0
"=&r" (modified) // %1
: "m" (storage), // %2
"r" (v) // %3
:
);
fence_after(order);
return original;
}
};
template< >
struct operations< 1u, true > :
public operations< 4u, true >
{
typedef operations< 4u, true > base_type;
typedef base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, modified;
fence_before(order);
__asm__ __volatile__
(
"1:\n"
"ldl_l %0, %2\n"
"addl %0, %3, %1\n"
"sextb %1, %1\n"
"stl_c %1, %2\n"
"beq %1, 2f\n"
".subsection 2\n"
"2: br 1b\n"
".previous\n"
: "=&r" (original), // %0
"=&r" (modified) // %1
: "m" (storage), // %2
"r" (v) // %3
:
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, modified;
fence_before(order);
__asm__ __volatile__
(
"1:\n"
"ldl_l %0, %2\n"
"subl %0, %3, %1\n"
"sextb %1, %1\n"
"stl_c %1, %2\n"
"beq %1, 2f\n"
".subsection 2\n"
"2: br 1b\n"
".previous\n"
: "=&r" (original), // %0
"=&r" (modified) // %1
: "m" (storage), // %2
"r" (v) // %3
:
);
fence_after(order);
return original;
}
};
template< >
struct operations< 2u, false > :
public operations< 4u, false >
{
typedef operations< 4u, false > base_type;
typedef base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, modified;
fence_before(order);
__asm__ __volatile__
(
"1:\n"
"ldl_l %0, %2\n"
"addl %0, %3, %1\n"
"zapnot %1, #3, %1\n"
"stl_c %1, %2\n"
"beq %1, 2f\n"
".subsection 2\n"
"2: br 1b\n"
".previous\n"
: "=&r" (original), // %0
"=&r" (modified) // %1
: "m" (storage), // %2
"r" (v) // %3
:
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, modified;
fence_before(order);
__asm__ __volatile__
(
"1:\n"
"ldl_l %0, %2\n"
"subl %0, %3, %1\n"
"zapnot %1, #3, %1\n"
"stl_c %1, %2\n"
"beq %1, 2f\n"
".subsection 2\n"
"2: br 1b\n"
".previous\n"
: "=&r" (original), // %0
"=&r" (modified) // %1
: "m" (storage), // %2
"r" (v) // %3
:
);
fence_after(order);
return original;
}
};
template< >
struct operations< 2u, true > :
public operations< 4u, true >
{
typedef operations< 4u, true > base_type;
typedef base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, modified;
fence_before(order);
__asm__ __volatile__
(
"1:\n"
"ldl_l %0, %2\n"
"addl %0, %3, %1\n"
"sextw %1, %1\n"
"stl_c %1, %2\n"
"beq %1, 2f\n"
".subsection 2\n"
"2: br 1b\n"
".previous\n"
: "=&r" (original), // %0
"=&r" (modified) // %1
: "m" (storage), // %2
"r" (v) // %3
:
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, modified;
fence_before(order);
__asm__ __volatile__
(
"1:\n"
"ldl_l %0, %2\n"
"subl %0, %3, %1\n"
"sextw %1, %1\n"
"stl_c %1, %2\n"
"beq %1, 2f\n"
".subsection 2\n"
"2: br 1b\n"
".previous\n"
: "=&r" (original), // %0
"=&r" (modified) // %1
: "m" (storage), // %2
"r" (v) // %3
:
);
fence_after(order);
return original;
}
};
template< bool Signed >
struct operations< 8u, Signed > :
public gcc_alpha_operations_base
{
typedef typename make_storage_type< 8u, Signed >::type storage_type;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
fence_before(order);
storage = v;
fence_after_store(order);
}
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
{
storage_type v = storage;
fence_after(order);
return v;
}
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, tmp;
fence_before(order);
__asm__ __volatile__
(
"1:\n"
"mov %3, %1\n"
"ldq_l %0, %2\n"
"stq_c %1, %2\n"
"beq %1, 2f\n"
".subsection 2\n"
"2: br 1b\n"
".previous\n"
: "=&r" (original), // %0
"=&r" (tmp) // %1
: "m" (storage), // %2
"r" (v) // %3
:
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE bool compare_exchange_weak(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
fence_before(success_order);
int success;
storage_type current;
__asm__ __volatile__
(
"1:\n"
"ldq_l %2, %4\n" // current = *(&storage)
"cmpeq %2, %0, %3\n" // success = current == expected
"mov %2, %0\n" // expected = current
"beq %3, 2f\n" // if (success == 0) goto end
"stq_c %1, %4\n" // storage = desired; desired = store succeeded
"mov %1, %3\n" // success = desired
"2:\n"
: "+&r" (expected), // %0
"+&r" (desired), // %1
"=&r" (current), // %2
"=&r" (success) // %3
: "m" (storage) // %4
:
);
if (success)
fence_after(success_order);
else
fence_after(failure_order);
return !!success;
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
int success;
storage_type current, tmp;
fence_before(success_order);
__asm__ __volatile__
(
"1:\n"
"mov %5, %1\n" // tmp = desired
"ldq_l %2, %4\n" // current = *(&storage)
"cmpeq %2, %0, %3\n" // success = current == expected
"mov %2, %0\n" // expected = current
"beq %3, 2f\n" // if (success == 0) goto end
"stq_c %1, %4\n" // storage = tmp; tmp = store succeeded
"beq %1, 3f\n" // if (tmp == 0) goto retry
"mov %1, %3\n" // success = tmp
"2:\n"
".subsection 2\n"
"3: br 1b\n"
".previous\n"
: "+&r" (expected), // %0
"=&r" (tmp), // %1
"=&r" (current), // %2
"=&r" (success) // %3
: "m" (storage), // %4
"r" (desired) // %5
:
);
if (success)
fence_after(success_order);
else
fence_after(failure_order);
return !!success;
}
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, modified;
fence_before(order);
__asm__ __volatile__
(
"1:\n"
"ldq_l %0, %2\n"
"addq %0, %3, %1\n"
"stq_c %1, %2\n"
"beq %1, 2f\n"
".subsection 2\n"
"2: br 1b\n"
".previous\n"
: "=&r" (original), // %0
"=&r" (modified) // %1
: "m" (storage), // %2
"r" (v) // %3
:
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, modified;
fence_before(order);
__asm__ __volatile__
(
"1:\n"
"ldq_l %0, %2\n"
"subq %0, %3, %1\n"
"stq_c %1, %2\n"
"beq %1, 2f\n"
".subsection 2\n"
"2: br 1b\n"
".previous\n"
: "=&r" (original), // %0
"=&r" (modified) // %1
: "m" (storage), // %2
"r" (v) // %3
:
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, modified;
fence_before(order);
__asm__ __volatile__
(
"1:\n"
"ldq_l %0, %2\n"
"and %0, %3, %1\n"
"stq_c %1, %2\n"
"beq %1, 2f\n"
".subsection 2\n"
"2: br 1b\n"
".previous\n"
: "=&r" (original), // %0
"=&r" (modified) // %1
: "m" (storage), // %2
"r" (v) // %3
:
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, modified;
fence_before(order);
__asm__ __volatile__
(
"1:\n"
"ldq_l %0, %2\n"
"bis %0, %3, %1\n"
"stq_c %1, %2\n"
"beq %1, 2f\n"
".subsection 2\n"
"2: br 1b\n"
".previous\n"
: "=&r" (original), // %0
"=&r" (modified) // %1
: "m" (storage), // %2
"r" (v) // %3
:
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, modified;
fence_before(order);
__asm__ __volatile__
(
"1:\n"
"ldq_l %0, %2\n"
"xor %0, %3, %1\n"
"stq_c %1, %2\n"
"beq %1, 2f\n"
".subsection 2\n"
"2: br 1b\n"
".previous\n"
: "=&r" (original), // %0
"=&r" (modified) // %1
: "m" (storage), // %2
"r" (v) // %3
:
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
return !!exchange(storage, (storage_type)1, order);
}
static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
store(storage, 0, order);
}
static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT
{
return true;
}
};
BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
{
if (order != memory_order_relaxed)
__asm__ __volatile__ ("mb" ::: "memory");
}
BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
{
if ((order & ~memory_order_consume) != 0)
__asm__ __volatile__ ("" ::: "memory");
}
} // namespace detail
} // namespace atomics
} // namespace boost
#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_ALPHA_HPP_INCLUDED_

View File

@@ -0,0 +1,954 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2009 Helge Bahmann
* Copyright (c) 2013 Tim Blechmann
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/ops_gcc_arm.hpp
*
* This header contains implementation of the \c operations template.
*/
#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_ARM_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_OPS_GCC_ARM_HPP_INCLUDED_
#include <boost/cstdint.hpp>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/storage_type.hpp>
#include <boost/atomic/detail/operations_fwd.hpp>
#include <boost/atomic/detail/ops_extending_cas_based.hpp>
#include <boost/atomic/capabilities.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
// From the ARM Architecture Reference Manual for architecture v6:
//
// LDREX{<cond>} <Rd>, [<Rn>]
// <Rd> Specifies the destination register for the memory word addressed by <Rd>
// <Rn> Specifies the register containing the address.
//
// STREX{<cond>} <Rd>, <Rm>, [<Rn>]
// <Rd> Specifies the destination register for the returned status value.
// 0 if the operation updates memory
// 1 if the operation fails to update memory
// <Rm> Specifies the register containing the word to be stored to memory.
// <Rn> Specifies the register containing the address.
// Rd must not be the same register as Rm or Rn.
//
// ARM v7 is like ARM v6 plus:
// There are half-word and byte versions of the LDREX and STREX instructions,
// LDREXH, LDREXB, STREXH and STREXB.
// There are also double-word versions, LDREXD and STREXD.
// (Actually it looks like these are available from version 6k onwards.)
// FIXME these are not yet used; should be mostly a matter of copy-and-paste.
// I think you can supply an immediate offset to the address.
//
// A memory barrier is effected using a "co-processor 15" instruction,
// though a separate assembler mnemonic is available for it in v7.
//
// "Thumb 1" is a subset of the ARM instruction set that uses a 16-bit encoding. It
// doesn't include all instructions and in particular it doesn't include the co-processor
// instruction used for the memory barrier or the load-locked/store-conditional
// instructions. So, if we're compiling in "Thumb 1" mode, we need to wrap all of our
// asm blocks with code to temporarily change to ARM mode.
//
// You can only change between ARM and Thumb modes when branching using the bx instruction.
// bx takes an address specified in a register. The least significant bit of the address
// indicates the mode, so 1 is added to indicate that the destination code is Thumb.
// A temporary register is needed for the address and is passed as an argument to these
// macros. It must be one of the "low" registers accessible to Thumb code, specified
// using the "l" attribute in the asm statement.
//
// Architecture v7 introduces "Thumb 2", which does include (almost?) all of the ARM
// instruction set. (Actually, there was an extension of v6 called v6T2 which supported
// "Thumb 2" mode, but its architecture manual is no longer available, referring to v7.)
// So in v7 we don't need to change to ARM mode; we can write "universal
// assembler" which will assemble to Thumb 2 or ARM code as appropriate. The only thing
// we need to do to make this "universal" assembler mode work is to insert "IT" instructions
// to annotate the conditional instructions. These are ignored in other modes (e.g. v6),
// so they can always be present.
#if defined(__thumb__) && !defined(__thumb2__)
#define BOOST_ATOMIC_DETAIL_ARM_ASM_START(TMPREG) "adr " #TMPREG ", 8f\n" "bx " #TMPREG "\n" ".arm\n" ".align 4\n" "8: "
#define BOOST_ATOMIC_DETAIL_ARM_ASM_END(TMPREG) "adr " #TMPREG ", 9f + 1\n" "bx " #TMPREG "\n" ".thumb\n" ".align 2\n" "9: "
#else
// The tmpreg may be wasted in this case, which is non-optimal.
#define BOOST_ATOMIC_DETAIL_ARM_ASM_START(TMPREG)
#define BOOST_ATOMIC_DETAIL_ARM_ASM_END(TMPREG)
#endif
struct gcc_arm_operations_base
{
static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT
{
if ((order & memory_order_release) != 0)
hardware_full_fence();
}
static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT
{
if ((order & memory_order_acquire) != 0)
hardware_full_fence();
}
static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT
{
if (order == memory_order_seq_cst)
hardware_full_fence();
}
static BOOST_FORCEINLINE void hardware_full_fence() BOOST_NOEXCEPT
{
#if defined(BOOST_ATOMIC_DETAIL_ARM_HAS_DMB)
__asm__ __volatile__
(
"dmb ish\n"
:
:
: "memory"
);
#else
int tmp;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
"mcr\tp15, 0, r0, c7, c10, 5\n"
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
: "=&l" (tmp)
:
: "memory"
);
#endif
}
};
template< bool Signed >
struct operations< 4u, Signed > :
public gcc_arm_operations_base
{
typedef typename make_storage_type< 4u, Signed >::type storage_type;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
fence_before(order);
storage = v;
fence_after_store(order);
}
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
{
storage_type v = storage;
fence_after(order);
return v;
}
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original;
fence_before(order);
uint32_t tmp;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
"1:\n"
"ldrex %[original], %[storage]\n" // load the original value
"strex %[tmp], %[value], %[storage]\n" // store the replacement, tmp = store failed
"teq %[tmp], #0\n" // check if store succeeded
"bne 1b\n"
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [tmp] "=&l" (tmp), [original] "=&r" (original), [storage] "+Q" (storage)
: [value] "r" (v)
: "cc"
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE bool compare_exchange_weak(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
fence_before(success_order);
uint32_t success;
uint32_t tmp;
storage_type original;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
"mov %[success], #0\n" // success = 0
"ldrex %[original], %[storage]\n" // original = *(&storage)
"teq %[original], %[expected]\n" // flags = original==expected
"itt eq\n" // [hint that the following 2 instructions are conditional on flags.equal]
"strexeq %[success], %[desired], %[storage]\n" // if (flags.equal) *(&storage) = desired, success = store failed
"eoreq %[success], %[success], #1\n" // if (flags.equal) success ^= 1 (i.e. make it 1 if store succeeded)
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
: [original] "=&r" (original), // %0
[success] "=&r" (success), // %1
[tmp] "=&l" (tmp), // %2
[storage] "+Q" (storage) // %3
: [expected] "r" (expected), // %4
[desired] "r" (desired) // %5
: "cc"
);
if (success)
fence_after(success_order);
else
fence_after(failure_order);
expected = original;
return !!success;
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
fence_before(success_order);
uint32_t success;
uint32_t tmp;
storage_type original;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
"mov %[success], #0\n" // success = 0
"1:\n"
"ldrex %[original], %[storage]\n" // original = *(&storage)
"teq %[original], %[expected]\n" // flags = original==expected
"bne 2f\n" // if (!flags.equal) goto end
"strex %[success], %[desired], %[storage]\n" // *(&storage) = desired, success = store failed
"eors %[success], %[success], #1\n" // success ^= 1 (i.e. make it 1 if store succeeded); flags.equal = success == 0
"beq 1b\n" // if (flags.equal) goto retry
"2:\n"
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
: [original] "=&r" (original), // %0
[success] "=&r" (success), // %1
[tmp] "=&l" (tmp), // %2
[storage] "+Q" (storage) // %3
: [expected] "r" (expected), // %4
[desired] "r" (desired) // %5
: "cc"
);
if (success)
fence_after(success_order);
else
fence_after(failure_order);
expected = original;
return !!success;
}
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
fence_before(order);
uint32_t tmp;
storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
"1:\n"
"ldrex %[original], %[storage]\n" // original = *(&storage)
"add %[result], %[original], %[value]\n" // result = original + value
"strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
"teq %[tmp], #0\n" // flags = tmp==0
"bne 1b\n" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
[tmp] "=&l" (tmp), // %2
[storage] "+Q" (storage) // %3
: [value] "r" (v) // %4
: "cc"
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
fence_before(order);
uint32_t tmp;
storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
"1:\n"
"ldrex %[original], %[storage]\n" // original = *(&storage)
"sub %[result], %[original], %[value]\n" // result = original - value
"strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
"teq %[tmp], #0\n" // flags = tmp==0
"bne 1b\n" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
[tmp] "=&l" (tmp), // %2
[storage] "+Q" (storage) // %3
: [value] "r" (v) // %4
: "cc"
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
fence_before(order);
uint32_t tmp;
storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
"1:\n"
"ldrex %[original], %[storage]\n" // original = *(&storage)
"and %[result], %[original], %[value]\n" // result = original & value
"strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
"teq %[tmp], #0\n" // flags = tmp==0
"bne 1b\n" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
[tmp] "=&l" (tmp), // %2
[storage] "+Q" (storage) // %3
: [value] "r" (v) // %4
: "cc"
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
fence_before(order);
uint32_t tmp;
storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
"1:\n"
"ldrex %[original], %[storage]\n" // original = *(&storage)
"orr %[result], %[original], %[value]\n" // result = original | value
"strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
"teq %[tmp], #0\n" // flags = tmp==0
"bne 1b\n" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
[tmp] "=&l" (tmp), // %2
[storage] "+Q" (storage) // %3
: [value] "r" (v) // %4
: "cc"
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
fence_before(order);
uint32_t tmp;
storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
"1:\n"
"ldrex %[original], %[storage]\n" // original = *(&storage)
"eor %[result], %[original], %[value]\n" // result = original ^ value
"strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
"teq %[tmp], #0\n" // flags = tmp==0
"bne 1b\n" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
[tmp] "=&l" (tmp), // %2
[storage] "+Q" (storage) // %3
: [value] "r" (v) // %4
: "cc"
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
return !!exchange(storage, (storage_type)1, order);
}
static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
store(storage, 0, order);
}
static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT
{
return true;
}
};
template< >
struct operations< 1u, false > :
public operations< 4u, false >
{
typedef operations< 4u, false > base_type;
typedef base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
fence_before(order);
uint32_t tmp;
storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
"1:\n"
"ldrex %[original], %[storage]\n" // original = *(&storage)
"add %[result], %[original], %[value]\n" // result = original + value
"uxtb %[result], %[result]\n" // zero extend result from 8 to 32 bits
"strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
"teq %[tmp], #0\n" // flags = tmp==0
"bne 1b\n" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
[tmp] "=&l" (tmp), // %2
[storage] "+Q" (storage) // %3
: [value] "r" (v) // %4
: "cc"
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
fence_before(order);
uint32_t tmp;
storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
"1:\n"
"ldrex %[original], %[storage]\n" // original = *(&storage)
"sub %[result], %[original], %[value]\n" // result = original - value
"uxtb %[result], %[result]\n" // zero extend result from 8 to 32 bits
"strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
"teq %[tmp], #0\n" // flags = tmp==0
"bne 1b\n" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
[tmp] "=&l" (tmp), // %2
[storage] "+Q" (storage) // %3
: [value] "r" (v) // %4
: "cc"
);
fence_after(order);
return original;
}
};
template< >
struct operations< 1u, true > :
public operations< 4u, true >
{
typedef operations< 4u, true > base_type;
typedef base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
fence_before(order);
uint32_t tmp;
storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
"1:\n"
"ldrex %[original], %[storage]\n" // original = *(&storage)
"add %[result], %[original], %[value]\n" // result = original + value
"sxtb %[result], %[result]\n" // sign extend result from 8 to 32 bits
"strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
"teq %[tmp], #0\n" // flags = tmp==0
"bne 1b\n" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
[tmp] "=&l" (tmp), // %2
[storage] "+Q" (storage) // %3
: [value] "r" (v) // %4
: "cc"
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
fence_before(order);
uint32_t tmp;
storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
"1:\n"
"ldrex %[original], %[storage]\n" // original = *(&storage)
"sub %[result], %[original], %[value]\n" // result = original - value
"sxtb %[result], %[result]\n" // sign extend result from 8 to 32 bits
"strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
"teq %[tmp], #0\n" // flags = tmp==0
"bne 1b\n" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
[tmp] "=&l" (tmp), // %2
[storage] "+Q" (storage) // %3
: [value] "r" (v) // %4
: "cc"
);
fence_after(order);
return original;
}
};
template< >
struct operations< 2u, false > :
public operations< 4u, false >
{
typedef operations< 4u, false > base_type;
typedef base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
fence_before(order);
uint32_t tmp;
storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
"1:\n"
"ldrex %[original], %[storage]\n" // original = *(&storage)
"add %[result], %[original], %[value]\n" // result = original + value
"uxth %[result], %[result]\n" // zero extend result from 16 to 32 bits
"strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
"teq %[tmp], #0\n" // flags = tmp==0
"bne 1b\n" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
[tmp] "=&l" (tmp), // %2
[storage] "+Q" (storage) // %3
: [value] "r" (v) // %4
: "cc"
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
fence_before(order);
uint32_t tmp;
storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
"1:\n"
"ldrex %[original], %[storage]\n" // original = *(&storage)
"sub %[result], %[original], %[value]\n" // result = original - value
"uxth %[result], %[result]\n" // zero extend result from 16 to 32 bits
"strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
"teq %[tmp], #0\n" // flags = tmp==0
"bne 1b\n" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
[tmp] "=&l" (tmp), // %2
[storage] "+Q" (storage) // %3
: [value] "r" (v) // %4
: "cc"
);
fence_after(order);
return original;
}
};
template< >
struct operations< 2u, true > :
public operations< 4u, true >
{
typedef operations< 4u, true > base_type;
typedef base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
fence_before(order);
uint32_t tmp;
storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
"1:\n"
"ldrex %[original], %[storage]\n" // original = *(&storage)
"add %[result], %[original], %[value]\n" // result = original + value
"sxth %[result], %[result]\n" // sign extend result from 16 to 32 bits
"strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
"teq %[tmp], #0\n" // flags = tmp==0
"bne 1b\n" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
[tmp] "=&l" (tmp), // %2
[storage] "+Q" (storage) // %3
: [value] "r" (v) // %4
: "cc"
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
fence_before(order);
uint32_t tmp;
storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
"1:\n"
"ldrex %[original], %[storage]\n" // original = *(&storage)
"sub %[result], %[original], %[value]\n" // result = original - value
"sxth %[result], %[result]\n" // sign extend result from 16 to 32 bits
"strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
"teq %[tmp], #0\n" // flags = tmp==0
"bne 1b\n" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
: [original] "=&r" (original), // %0
[result] "=&r" (result), // %1
[tmp] "=&l" (tmp), // %2
[storage] "+Q" (storage) // %3
: [value] "r" (v) // %4
: "cc"
);
fence_after(order);
return original;
}
};
#if defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXD_STREXD)
// Unlike 32-bit operations, for 64-bit loads and stores we must use ldrexd/strexd.
// Any other instructions result in a non-atomic sequence of 32-bit accesses.
// See "ARM Architecture Reference Manual ARMv7-A and ARMv7-R edition",
// Section A3.5.3 "Atomicity in the ARM architecture".
// In the asm blocks below we have to use 32-bit register pairs to compose 64-bit values.
// In order to pass the 64-bit operands to/from asm blocks, we use undocumented gcc feature:
// the lower half (Rt) of the operand is accessible normally, via the numbered placeholder (e.g. %0),
// and the upper half (Rt2) - via the same placeholder with an 'H' after the '%' sign (e.g. %H0).
// See: http://hardwarebug.org/2010/07/06/arm-inline-asm-secrets/
template< bool Signed >
struct operations< 8u, Signed > :
public gcc_arm_operations_base
{
typedef typename make_storage_type< 8u, Signed >::type storage_type;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
exchange(storage, v, order);
}
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
{
storage_type original;
uint32_t tmp;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
"ldrexd %1, %H1, %2\n"
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
: "=&l" (tmp), // %0
"=&r" (original) // %1
: "Q" (storage) // %2
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original;
fence_before(order);
uint32_t tmp;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
"1:\n"
"ldrexd %1, %H1, %2\n" // load the original value
"strexd %0, %3, %H3, %2\n" // store the replacement, tmp = store failed
"teq %0, #0\n" // check if store succeeded
"bne 1b\n"
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
: "=&l" (tmp), // %0
"=&r" (original), // %1
"+Q" (storage) // %2
: "r" (v) // %3
: "cc"
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE bool compare_exchange_weak(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
fence_before(success_order);
uint32_t success;
uint32_t tmp;
storage_type original;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%2)
"ldrexd %0, %H0, %3\n" // original = *(&storage)
"eor %1, %0, %4\n" // The three instructions are just a fancy way of comparing 2 64-bit integers:
"eor %2, %H0, %H4\n" // success = original[lo] ^ expected[lo]; tmp = original[hi] ^ expected[hi];
"orrs %1, %1, %2\n" // success = success | tmp (i.e. 0 if original==expected); flags = original==expected
"itte eq\n" // [hint that the following 3 instructions are conditional on flags.equal]
"strexdeq %1, %5, %H5, %3\n" // if (flags.equal) *(&storage) = desired, success = store failed
"eoreq %1, %1, #1\n" // if (flags.equal) success ^= 1 (i.e. make it 1 if store succeeded)
"movne %1, #0\n" // if (!flags.equal) success = 0
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%2)
: "=&r" (original), // %0
"=&r" (success), // %1
"=&l" (tmp), // %2
"+Q" (storage) // %3
: "r" (expected), // %4
"r" (desired) // %5
: "cc"
);
if (success)
fence_after(success_order);
else
fence_after(failure_order);
expected = original;
return !!success;
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
fence_before(success_order);
uint32_t success;
uint32_t tmp;
storage_type original;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%2)
"1:\n"
"ldrexd %0, %H0, %3\n" // original = *(&storage)
"eor %1, %0, %4\n" // The three instructions are just a fancy way of comparing 2 64-bit integers:
"eor %2, %H0, %H4\n" // success = original[lo] ^ expected[lo]; tmp = original[hi] ^ expected[hi];
"orrs %1, %1, %2\n" // success = success | tmp (i.e. 0 if original==expected); flags = original==expected
"itt ne\n" // [hint that the following 2 instructions are conditional on flags.equal]
"movne %1, #0\n" // if (!flags.equal) success = 0
"bne 2f\n" // if (!flags.equal) goto end
"strexd %1, %5, %H5, %3\n" // *(&storage) = desired, success = store failed
"eors %1, %1, #1\n" // success ^= 1 (i.e. make it 1 if store succeeded); flags.equal = success == 0
"beq 1b\n" // if (flags.equal) goto retry
"2:\n"
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%2)
: "=&r" (original), // %0
"=&r" (success), // %1
"=&l" (tmp), // %2
"+Q" (storage) // %3
: "r" (expected), // %4
"r" (desired) // %5
: "cc"
);
if (success)
fence_after(success_order);
else
fence_after(failure_order);
expected = original;
return !!success;
}
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
fence_before(order);
uint32_t tmp;
storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%2)
"1:\n"
"ldrexd %0, %H0, %3\n" // original = *(&storage)
"adds %1, %0, %4\n" // result = original + value
"adc %H1, %H0, %H4\n"
"strexd %2, %1, %H1, %3\n" // *(&storage) = result, tmp = store failed
"teq %2, #0\n" // flags = tmp==0
"bne 1b\n" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%2)
: "=&r" (original), // %0
"=&r" (result), // %1
"=&l" (tmp), // %2
"+Q" (storage) // %3
: "r" (v) // %4
: "cc"
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
fence_before(order);
uint32_t tmp;
storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%2)
"1:\n"
"ldrexd %0, %H0, %3\n" // original = *(&storage)
"subs %1, %0, %4\n" // result = original - value
"sbc %H1, %H0, %H4\n"
"strexd %2, %1, %H1, %3\n" // *(&storage) = result, tmp = store failed
"teq %2, #0\n" // flags = tmp==0
"bne 1b\n" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%2)
: "=&r" (original), // %0
"=&r" (result), // %1
"=&l" (tmp), // %2
"+Q" (storage) // %3
: "r" (v) // %4
: "cc"
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
fence_before(order);
uint32_t tmp;
storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%2)
"1:\n"
"ldrexd %0, %H0, %3\n" // original = *(&storage)
"and %1, %0, %4\n" // result = original & value
"and %H1, %H0, %H4\n"
"strexd %2, %1, %H1, %3\n" // *(&storage) = result, tmp = store failed
"teq %2, #0\n" // flags = tmp==0
"bne 1b\n" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%2)
: "=&r" (original), // %0
"=&r" (result), // %1
"=&l" (tmp), // %2
"+Q" (storage) // %3
: "r" (v) // %4
: "cc"
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
fence_before(order);
uint32_t tmp;
storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%2)
"1:\n"
"ldrexd %0, %H0, %3\n" // original = *(&storage)
"orr %1, %0, %4\n" // result = original | value
"orr %H1, %H0, %H4\n"
"strexd %2, %1, %H1, %3\n" // *(&storage) = result, tmp = store failed
"teq %2, #0\n" // flags = tmp==0
"bne 1b\n" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%2)
: "=&r" (original), // %0
"=&r" (result), // %1
"=&l" (tmp), // %2
"+Q" (storage) // %3
: "r" (v) // %4
: "cc"
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
fence_before(order);
uint32_t tmp;
storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%2)
"1:\n"
"ldrexd %0, %H0, %3\n" // original = *(&storage)
"eor %1, %0, %4\n" // result = original ^ value
"eor %H1, %H0, %H4\n"
"strexd %2, %1, %H1, %3\n" // *(&storage) = result, tmp = store failed
"teq %2, #0\n" // flags = tmp==0
"bne 1b\n" // if (!flags.equal) goto retry
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%2)
: "=&r" (original), // %0
"=&r" (result), // %1
"=&l" (tmp), // %2
"+Q" (storage) // %3
: "r" (v) // %4
: "cc"
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
return !!exchange(storage, (storage_type)1, order);
}
static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
store(storage, 0, order);
}
static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT
{
return true;
}
};
#endif // defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXD_STREXD)
BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
{
if ((order & (memory_order_acquire | memory_order_release)) != 0)
gcc_arm_operations_base::hardware_full_fence();
}
BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
{
if ((order & ~memory_order_consume) != 0)
__asm__ __volatile__ ("" ::: "memory");
}
} // namespace detail
} // namespace atomics
} // namespace boost
#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_ARM_HPP_INCLUDED_

View File

@@ -0,0 +1,238 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/ops_gcc_atomic.hpp
*
* This header contains implementation of the \c operations template.
*/
#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_ATOMIC_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_OPS_GCC_ATOMIC_HPP_INCLUDED_
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/storage_type.hpp>
#include <boost/atomic/detail/operations_fwd.hpp>
#include <boost/atomic/capabilities.hpp>
#if defined(__clang__) && (defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B))
#include <boost/atomic/detail/ops_gcc_x86_dcas.hpp>
#include <boost/atomic/detail/ops_cas_based.hpp>
#endif
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
#if defined(__INTEL_COMPILER)
// This is used to suppress warning #32013 described below for Intel Compiler.
// In debug builds the compiler does not inline any functions, so basically
// every atomic function call results in this warning. I don't know any other
// way to selectively disable just this one warning.
#pragma system_header
#endif
namespace boost {
namespace atomics {
namespace detail {
/*!
* The function converts \c boost::memory_order values to the compiler-specific constants.
*
* NOTE: The intention is that the function is optimized away by the compiler, and the
* compiler-specific constants are passed to the intrinsics. I know constexpr doesn't
* work in this case because the standard atomics interface require memory ordering
* constants to be passed as function arguments, at which point they stop being constexpr.
* However it is crucial that the compiler sees constants and not runtime values,
* because otherwise it just ignores the ordering value and always uses seq_cst.
* This is the case with Intel C++ Compiler 14.0.3 (Composer XE 2013 SP1, update 3) and
* gcc 4.8.2. Intel Compiler issues a warning in this case:
*
* warning #32013: Invalid memory order specified. Defaulting to seq_cst memory order.
*
* while gcc acts silently.
*
* To mitigate the problem ALL functions, including the atomic<> members must be
* declared with BOOST_FORCEINLINE. In this case the compilers are able to see that
* all functions are called with constant orderings and call intrinstcts properly.
*
* Unfortunately, this still doesn't work in debug mode as the compiler doesn't
* inline functions even when marked with BOOST_FORCEINLINE. In this case all atomic
* operaions will be executed with seq_cst semantics.
*/
BOOST_FORCEINLINE BOOST_CONSTEXPR int convert_memory_order_to_gcc(memory_order order) BOOST_NOEXCEPT
{
return (order == memory_order_relaxed ? __ATOMIC_RELAXED : (order == memory_order_consume ? __ATOMIC_CONSUME :
(order == memory_order_acquire ? __ATOMIC_ACQUIRE : (order == memory_order_release ? __ATOMIC_RELEASE :
(order == memory_order_acq_rel ? __ATOMIC_ACQ_REL : __ATOMIC_SEQ_CST)))));
}
template< typename T >
struct gcc_atomic_operations
{
typedef T storage_type;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
__atomic_store_n(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
}
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
{
return __atomic_load_n(&storage, atomics::detail::convert_memory_order_to_gcc(order));
}
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
return __atomic_fetch_add(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
}
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
return __atomic_fetch_sub(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
}
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
return __atomic_exchange_n(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
return __atomic_compare_exchange_n
(
&storage, &expected, desired, false,
atomics::detail::convert_memory_order_to_gcc(success_order),
atomics::detail::convert_memory_order_to_gcc(failure_order)
);
}
static BOOST_FORCEINLINE bool compare_exchange_weak(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
return __atomic_compare_exchange_n
(
&storage, &expected, desired, true,
atomics::detail::convert_memory_order_to_gcc(success_order),
atomics::detail::convert_memory_order_to_gcc(failure_order)
);
}
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
return __atomic_fetch_and(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
}
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
return __atomic_fetch_or(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
}
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
return __atomic_fetch_xor(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
}
static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
return __atomic_test_and_set(&storage, atomics::detail::convert_memory_order_to_gcc(order));
}
static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
__atomic_clear(const_cast< storage_type* >(&storage), atomics::detail::convert_memory_order_to_gcc(order));
}
static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile& storage) BOOST_NOEXCEPT
{
return __atomic_is_lock_free(sizeof(storage_type), &storage);
}
};
#if BOOST_ATOMIC_INT8_LOCK_FREE > 0
template< bool Signed >
struct operations< 1u, Signed > :
public gcc_atomic_operations< typename make_storage_type< 1u, Signed >::type >
{
};
#endif
#if BOOST_ATOMIC_INT16_LOCK_FREE > 0
template< bool Signed >
struct operations< 2u, Signed > :
public gcc_atomic_operations< typename make_storage_type< 2u, Signed >::type >
{
};
#endif
#if BOOST_ATOMIC_INT32_LOCK_FREE > 0
template< bool Signed >
struct operations< 4u, Signed > :
public gcc_atomic_operations< typename make_storage_type< 4u, Signed >::type >
{
};
#endif
#if BOOST_ATOMIC_INT64_LOCK_FREE > 0
#if defined(__clang__) && defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B)
// Workaround for clang bug http://llvm.org/bugs/show_bug.cgi?id=19355
template< bool Signed >
struct operations< 8u, Signed > :
public cas_based_operations< gcc_dcas_x86< Signed > >
{
};
#else
template< bool Signed >
struct operations< 8u, Signed > :
public gcc_atomic_operations< typename make_storage_type< 8u, Signed >::type >
{
};
#endif
#endif
#if BOOST_ATOMIC_INT128_LOCK_FREE > 0
#if defined(__clang__) && defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
// Workaround for clang bug: http://llvm.org/bugs/show_bug.cgi?id=19149
// Clang 3.4 does not implement 128-bit __atomic* intrinsics even though it defines __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16
template< bool Signed >
struct operations< 16u, Signed > :
public cas_based_operations< gcc_dcas_x86_64< Signed > >
{
};
#else
template< bool Signed >
struct operations< 16u, Signed > :
public gcc_atomic_operations< typename make_storage_type< 16u, Signed >::type >
{
};
#endif
#endif
BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
{
__atomic_thread_fence(atomics::detail::convert_memory_order_to_gcc(order));
}
BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
{
__atomic_signal_fence(atomics::detail::convert_memory_order_to_gcc(order));
}
} // namespace detail
} // namespace atomics
} // namespace boost
#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_ATOMIC_HPP_INCLUDED_

View File

@@ -0,0 +1,767 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2009 Helge Bahmann
* Copyright (c) 2013 Tim Blechmann
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/ops_gcc_ppc.hpp
*
* This header contains implementation of the \c operations template.
*/
#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_PPC_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_OPS_GCC_PPC_HPP_INCLUDED_
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/storage_type.hpp>
#include <boost/atomic/detail/operations_fwd.hpp>
#include <boost/atomic/capabilities.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
/*
Refer to: Motorola: "Programming Environments Manual for 32-Bit
Implementations of the PowerPC Architecture", Appendix E:
"Synchronization Programming Examples" for an explanation of what is
going on here (can be found on the web at various places by the
name "MPCFPE32B.pdf", Google is your friend...)
Most of the atomic operations map to instructions in a relatively
straight-forward fashion, but "load"s may at first glance appear
a bit strange as they map to:
lwz %rX, addr
cmpw %rX, %rX
bne- 1f
1:
That is, the CPU is forced to perform a branch that "formally" depends
on the value retrieved from memory. This scheme has an overhead of
about 1-2 clock cycles per load, but it allows to map "acquire" to
the "isync" instruction instead of "sync" uniformly and for all type
of atomic operations. Since "isync" has a cost of about 15 clock
cycles, while "sync" hast a cost of about 50 clock cycles, the small
penalty to atomic loads more than compensates for this.
Byte- and halfword-sized atomic values are realized by encoding the
value to be represented into a word, performing sign/zero extension
as appropriate. This means that after add/sub operations the value
needs fixing up to accurately preserve the wrap-around semantic of
the smaller type. (Nothing special needs to be done for the bit-wise
and the "exchange type" operators as the compiler already sees to
it that values carried in registers are extended appropriately and
everything falls into place naturally).
The register constraint "b" instructs gcc to use any register
except r0; this is sometimes required because the encoding for
r0 is used to signify "constant zero" in a number of instructions,
making r0 unusable in this place. For simplicity this constraint
is used everywhere since I am to lazy to look this up on a
per-instruction basis, and ppc has enough registers for this not
to pose a problem.
*/
struct gcc_ppc_operations_base
{
static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT
{
#if defined(__powerpc64__)
if (order == memory_order_seq_cst)
__asm__ __volatile__ ("sync" ::: "memory");
else if ((order & memory_order_release) != 0)
__asm__ __volatile__ ("lwsync" ::: "memory");
#else
if ((order & memory_order_release) != 0)
__asm__ __volatile__ ("sync" ::: "memory");
#endif
}
static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT
{
if ((order & memory_order_acquire) != 0)
__asm__ __volatile__ ("isync" ::: "memory");
else if (order == memory_order_consume)
__asm__ __volatile__ ("" ::: "memory");
}
static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT
{
if (order == memory_order_seq_cst)
__asm__ __volatile__ ("sync" ::: "memory");
}
};
template< bool Signed >
struct operations< 4u, Signed > :
public gcc_ppc_operations_base
{
typedef typename make_storage_type< 4u, Signed >::type storage_type;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
fence_before(order);
__asm__ __volatile__
(
"stw %1, %0\n"
: "+m" (storage)
: "r" (v)
);
fence_after_store(order);
}
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
{
storage_type v;
__asm__ __volatile__
(
"lwz %0, %1\n"
"cmpw %0, %0\n"
"bne- 1f\n"
"1:\n"
: "=&r" (v)
: "m" (storage)
: "cr0"
);
fence_after(order);
return v;
}
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original;
fence_before(order);
__asm__ __volatile__
(
"1:\n"
"lwarx %0,%y1\n"
"stwcx. %2,%y1\n"
"bne- 1b\n"
: "=&b" (original), "+Z" (storage)
: "b" (v)
: "cr0"
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE bool compare_exchange_weak(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
int success;
fence_before(success_order);
__asm__ __volatile__
(
"li %1, 0\n"
"lwarx %0,%y2\n"
"cmpw %0, %3\n"
"bne- 1f\n"
"stwcx. %4,%y2\n"
"bne- 1f\n"
"li %1, 1\n"
"1:"
: "=&b" (expected), "=&b" (success), "+Z" (storage)
: "b" (expected), "b" (desired)
: "cr0"
);
if (success)
fence_after(success_order);
else
fence_after(failure_order);
return !!success;
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
int success;
fence_before(success_order);
__asm__ __volatile__
(
"li %1, 0\n"
"0: lwarx %0,%y2\n"
"cmpw %0, %3\n"
"bne- 1f\n"
"stwcx. %4,%y2\n"
"bne- 0b\n"
"li %1, 1\n"
"1:"
: "=&b" (expected), "=&b" (success), "+Z" (storage)
: "b" (expected), "b" (desired)
: "cr0"
);
if (success)
fence_after(success_order);
else
fence_after(failure_order);
return !!success;
}
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, tmp;
fence_before(order);
__asm__ __volatile__
(
"1:\n"
"lwarx %0,%y2\n"
"add %1,%0,%3\n"
"stwcx. %1,%y2\n"
"bne- 1b\n"
: "=&b" (original), "=&b" (tmp), "+Z" (storage)
: "b" (v)
: "cc"
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, tmp;
fence_before(order);
__asm__ __volatile__
(
"1:\n"
"lwarx %0,%y2\n"
"sub %1,%0,%3\n"
"stwcx. %1,%y2\n"
"bne- 1b\n"
: "=&b" (original), "=&b" (tmp), "+Z" (storage)
: "b" (v)
: "cc"
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, tmp;
fence_before(order);
__asm__ __volatile__
(
"1:\n"
"lwarx %0,%y2\n"
"and %1,%0,%3\n"
"stwcx. %1,%y2\n"
"bne- 1b\n"
: "=&b" (original), "=&b" (tmp), "+Z" (storage)
: "b" (v)
: "cc"
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, tmp;
fence_before(order);
__asm__ __volatile__
(
"1:\n"
"lwarx %0,%y2\n"
"or %1,%0,%3\n"
"stwcx. %1,%y2\n"
"bne- 1b\n"
: "=&b" (original), "=&b" (tmp), "+Z" (storage)
: "b" (v)
: "cc"
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, tmp;
fence_before(order);
__asm__ __volatile__
(
"1:\n"
"lwarx %0,%y2\n"
"xor %1,%0,%3\n"
"stwcx. %1,%y2\n"
"bne- 1b\n"
: "=&b" (original), "=&b" (tmp), "+Z" (storage)
: "b" (v)
: "cc"
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
return !!exchange(storage, (storage_type)1, order);
}
static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
store(storage, 0, order);
}
static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT
{
return true;
}
};
template< >
struct operations< 1u, false > :
public operations< 4u, false >
{
typedef operations< 4u, false > base_type;
typedef base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, tmp;
fence_before(order);
__asm__ __volatile__
(
"1:\n"
"lwarx %0,%y2\n"
"add %1,%0,%3\n"
"rlwinm %1, %1, 0, 0xff\n"
"stwcx. %1,%y2\n"
"bne- 1b\n"
: "=&b" (original), "=&b" (tmp), "+Z" (storage)
: "b" (v)
: "cc"
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, tmp;
fence_before(order);
__asm__ __volatile__
(
"1:\n"
"lwarx %0,%y2\n"
"sub %1,%0,%3\n"
"rlwinm %1, %1, 0, 0xff\n"
"stwcx. %1,%y2\n"
"bne- 1b\n"
: "=&b" (original), "=&b" (tmp), "+Z" (storage)
: "b" (v)
: "cc"
);
fence_after(order);
return original;
}
};
template< >
struct operations< 1u, true > :
public operations< 4u, true >
{
typedef operations< 4u, true > base_type;
typedef base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, tmp;
fence_before(order);
__asm__ __volatile__
(
"1:\n"
"lwarx %0,%y2\n"
"add %1,%0,%3\n"
"extsb %1, %1\n"
"stwcx. %1,%y2\n"
"bne- 1b\n"
: "=&b" (original), "=&b" (tmp), "+Z" (storage)
: "b" (v)
: "cc"
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, tmp;
fence_before(order);
__asm__ __volatile__
(
"1:\n"
"lwarx %0,%y2\n"
"sub %1,%0,%3\n"
"extsb %1, %1\n"
"stwcx. %1,%y2\n"
"bne- 1b\n"
: "=&b" (original), "=&b" (tmp), "+Z" (storage)
: "b" (v)
: "cc"
);
fence_after(order);
return original;
}
};
template< >
struct operations< 2u, false > :
public operations< 4u, false >
{
typedef operations< 4u, false > base_type;
typedef base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, tmp;
fence_before(order);
__asm__ __volatile__
(
"1:\n"
"lwarx %0,%y2\n"
"add %1,%0,%3\n"
"rlwinm %1, %1, 0, 0xffff\n"
"stwcx. %1,%y2\n"
"bne- 1b\n"
: "=&b" (original), "=&b" (tmp), "+Z" (storage)
: "b" (v)
: "cc"
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, tmp;
fence_before(order);
__asm__ __volatile__
(
"1:\n"
"lwarx %0,%y2\n"
"sub %1,%0,%3\n"
"rlwinm %1, %1, 0, 0xffff\n"
"stwcx. %1,%y2\n"
"bne- 1b\n"
: "=&b" (original), "=&b" (tmp), "+Z" (storage)
: "b" (v)
: "cc"
);
fence_after(order);
return original;
}
};
template< >
struct operations< 2u, true > :
public operations< 4u, true >
{
typedef operations< 4u, true > base_type;
typedef base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, tmp;
fence_before(order);
__asm__ __volatile__
(
"1:\n"
"lwarx %0,%y2\n"
"add %1,%0,%3\n"
"extsh %1, %1\n"
"stwcx. %1,%y2\n"
"bne- 1b\n"
: "=&b" (original), "=&b" (tmp), "+Z" (storage)
: "b" (v)
: "cc"
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, tmp;
fence_before(order);
__asm__ __volatile__
(
"1:\n"
"lwarx %0,%y2\n"
"sub %1,%0,%3\n"
"extsh %1, %1\n"
"stwcx. %1,%y2\n"
"bne- 1b\n"
: "=&b" (original), "=&b" (tmp), "+Z" (storage)
: "b" (v)
: "cc"
);
fence_after(order);
return original;
}
};
#if defined(__powerpc64__)
template< bool Signed >
struct operations< 8u, Signed > :
public gcc_ppc_operations_base
{
typedef typename make_storage_type< 8u, Signed >::type storage_type;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
fence_before(order);
__asm__ __volatile__
(
"std %1, %0\n"
: "+m" (storage)
: "r" (v)
);
fence_after_store(order);
}
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
{
storage_type v;
__asm__ __volatile__
(
"ld %0, %1\n"
"cmpd %0, %0\n"
"bne- 1f\n"
"1:\n"
: "=&b" (v)
: "m" (storage)
: "cr0"
);
fence_after(order);
return v;
}
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original;
fence_before(order);
__asm__ __volatile__
(
"1:\n"
"ldarx %0,%y1\n"
"stdcx. %2,%y1\n"
"bne- 1b\n"
: "=&b" (original), "+Z" (storage)
: "b" (v)
: "cr0"
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE bool compare_exchange_weak(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
int success;
fence_before(success_order);
__asm__ __volatile__
(
"li %1, 0\n"
"ldarx %0,%y2\n"
"cmpd %0, %3\n"
"bne- 1f\n"
"stdcx. %4,%y2\n"
"bne- 1f\n"
"li %1, 1\n"
"1:"
: "=&b" (expected), "=&b" (success), "+Z" (storage)
: "b" (expected), "b" (desired)
: "cr0"
);
if (success)
fence_after(success_order);
else
fence_after(failure_order);
return !!success;
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
int success;
fence_before(success_order);
__asm__ __volatile__
(
"li %1, 0\n"
"0: ldarx %0,%y2\n"
"cmpd %0, %3\n"
"bne- 1f\n"
"stdcx. %4,%y2\n"
"bne- 0b\n"
"li %1, 1\n"
"1:"
: "=&b" (expected), "=&b" (success), "+Z" (storage)
: "b" (expected), "b" (desired)
: "cr0"
);
if (success)
fence_after(success_order);
else
fence_after(failure_order);
return !!success;
}
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, tmp;
fence_before(order);
__asm__ __volatile__
(
"1:\n"
"ldarx %0,%y2\n"
"add %1,%0,%3\n"
"stdcx. %1,%y2\n"
"bne- 1b\n"
: "=&b" (original), "=&b" (tmp), "+Z" (storage)
: "b" (v)
: "cc"
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, tmp;
fence_before(order);
__asm__ __volatile__
(
"1:\n"
"ldarx %0,%y2\n"
"sub %1,%0,%3\n"
"stdcx. %1,%y2\n"
"bne- 1b\n"
: "=&b" (original), "=&b" (tmp), "+Z" (storage)
: "b" (v)
: "cc"
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, tmp;
fence_before(order);
__asm__ __volatile__
(
"1:\n"
"ldarx %0,%y2\n"
"and %1,%0,%3\n"
"stdcx. %1,%y2\n"
"bne- 1b\n"
: "=&b" (original), "=&b" (tmp), "+Z" (storage)
: "b" (v)
: "cc"
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, tmp;
fence_before(order);
__asm__ __volatile__
(
"1:\n"
"ldarx %0,%y2\n"
"or %1,%0,%3\n"
"stdcx. %1,%y2\n"
"bne- 1b\n"
: "=&b" (original), "=&b" (tmp), "+Z" (storage)
: "b" (v)
: "cc"
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, tmp;
fence_before(order);
__asm__ __volatile__
(
"1:\n"
"ldarx %0,%y2\n"
"xor %1,%0,%3\n"
"stdcx. %1,%y2\n"
"bne- 1b\n"
: "=&b" (original), "=&b" (tmp), "+Z" (storage)
: "b" (v)
: "cc"
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
return !!exchange(storage, (storage_type)1, order);
}
static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
store(storage, 0, order);
}
static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT
{
return true;
}
};
#endif // defined(__powerpc64__)
BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
{
switch (order)
{
case memory_order_acquire:
__asm__ __volatile__ ("isync" ::: "memory");
break;
case memory_order_release:
#if defined(__powerpc64__)
__asm__ __volatile__ ("lwsync" ::: "memory");
break;
#endif
case memory_order_acq_rel:
case memory_order_seq_cst:
__asm__ __volatile__ ("sync" ::: "memory");
default:;
}
}
BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
{
if ((order & ~memory_order_consume) != 0)
__asm__ __volatile__ ("" ::: "memory");
}
} // namespace detail
} // namespace atomics
} // namespace boost
#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_PPC_HPP_INCLUDED_

View File

@@ -0,0 +1,245 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2010 Helge Bahmann
* Copyright (c) 2013 Tim Blechmann
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/ops_gcc_sparc.hpp
*
* This header contains implementation of the \c operations template.
*/
#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_SPARC_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_OPS_GCC_SPARC_HPP_INCLUDED_
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/storage_type.hpp>
#include <boost/atomic/detail/operations_fwd.hpp>
#include <boost/atomic/capabilities.hpp>
#include <boost/atomic/detail/ops_cas_based.hpp>
#include <boost/atomic/detail/ops_extending_cas_based.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
struct gcc_sparc_cas_base
{
static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT
{
if (order == memory_order_seq_cst)
__asm__ __volatile__ ("membar #Sync" ::: "memory");
else if ((order & memory_order_release) != 0)
__asm__ __volatile__ ("membar #StoreStore | #LoadStore" ::: "memory");
}
static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT
{
if (order == memory_order_seq_cst)
__asm__ __volatile__ ("membar #Sync" ::: "memory");
else if ((order & memory_order_acquire) != 0)
__asm__ __volatile__ ("membar #StoreStore | #LoadStore" ::: "memory");
}
static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT
{
if (order == memory_order_seq_cst)
__asm__ __volatile__ ("membar #Sync" ::: "memory");
}
};
template< bool Signed >
struct gcc_sparc_cas32 :
public gcc_sparc_cas_base
{
typedef typename make_storage_type< 4u, Signed >::type storage_type;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
fence_before_store(order);
storage = v;
fence_after_store(order);
}
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
{
storage_type v = storage;
fence_after(order);
return v;
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
fence_before(success_order);
storage_type previous = expected;
__asm__ __volatile__
(
"cas [%1], %2, %0"
: "+r" (desired)
: "r" (&storage), "r" (previous)
: "memory"
);
const bool success = (desired == previous);
if (success)
fence_after(success_order);
else
fence_after(failure_order);
expected = desired;
return success;
}
static BOOST_FORCEINLINE bool compare_exchange_weak(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
return compare_exchange_strong(storage, expected, desired, success_order, failure_order);
}
static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT
{
return true;
}
};
template< bool Signed >
struct operations< 4u, Signed > :
public cas_based_operations< gcc_sparc_cas32< Signed > >
{
typedef cas_based_operations< gcc_sparc_cas32< Signed > > base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
base_type::fence_before(order);
__asm__ __volatile__
(
"swap [%1], %0"
: "+r" (v)
: "r" (&storage)
: "memory"
);
base_type::fence_after(order);
return v;
}
static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
return !!exchange(storage, (storage_type)1, order);
}
};
template< bool Signed >
struct operations< 1u, Signed > :
public extending_cas_based_operations< operations< 4u, Signed >, 1u, Signed >
{
};
template< bool Signed >
struct operations< 2u, Signed > :
public extending_cas_based_operations< operations< 4u, Signed >, 2u, Signed >
{
};
template< bool Signed >
struct gcc_sparc_cas64 :
public gcc_sparc_cas_base
{
typedef typename make_storage_type< 8u, Signed >::type storage_type;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
fence_before_store(order);
storage = v;
fence_after_store(order);
}
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
{
storage_type v = storage;
fence_after(order);
return v;
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
fence_before(success_order);
storage_type previous = expected;
__asm__ __volatile__
(
"casx [%1], %2, %0"
: "+r" (desired)
: "r" (&storage), "r" (previous)
: "memory"
);
const bool success = (desired == previous);
if (success)
fence_after(success_order);
else
fence_after(failure_order);
expected = desired;
return success;
}
static BOOST_FORCEINLINE bool compare_exchange_weak(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
return compare_exchange_strong(storage, expected, desired, success_order, failure_order);
}
static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT
{
return true;
}
};
template< bool Signed >
struct operations< 8u, Signed > :
public cas_based_operations< gcc_sparc_cas64< Signed > >
{
};
BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
{
switch (order)
{
case memory_order_release:
__asm__ __volatile__ ("membar #StoreStore | #LoadStore" ::: "memory");
break;
case memory_order_acquire:
__asm__ __volatile__ ("membar #LoadLoad | #LoadStore" ::: "memory");
break;
case memory_order_acq_rel:
__asm__ __volatile__ ("membar #LoadLoad | #LoadStore | #StoreStore" ::: "memory");
break;
case memory_order_seq_cst:
__asm__ __volatile__ ("membar #Sync" ::: "memory");
break;
case memory_order_consume:
case memory_order_relaxed:
default:
break;
}
}
BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
{
if ((order & ~memory_order_consume) != 0)
__asm__ __volatile__ ("" ::: "memory");
}
} // namespace detail
} // namespace atomics
} // namespace boost
#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_SPARC_HPP_INCLUDED_

View File

@@ -0,0 +1,237 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2011 Helge Bahmann
* Copyright (c) 2013 Tim Blechmann
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/ops_gcc_sync.hpp
*
* This header contains implementation of the \c operations template.
*/
#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_SYNC_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_OPS_GCC_SYNC_HPP_INCLUDED_
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/storage_type.hpp>
#include <boost/atomic/detail/operations_fwd.hpp>
#include <boost/atomic/detail/ops_extending_cas_based.hpp>
#include <boost/atomic/capabilities.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
struct gcc_sync_operations_base
{
static BOOST_FORCEINLINE void fence_before_store(memory_order order) BOOST_NOEXCEPT
{
if ((order & memory_order_release) != 0)
__sync_synchronize();
}
static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT
{
if (order == memory_order_seq_cst)
__sync_synchronize();
}
static BOOST_FORCEINLINE void fence_after_load(memory_order order) BOOST_NOEXCEPT
{
if ((order & (memory_order_acquire | memory_order_consume)) != 0)
__sync_synchronize();
}
};
template< typename T >
struct gcc_sync_operations :
public gcc_sync_operations_base
{
typedef T storage_type;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
fence_before_store(order);
storage = v;
fence_after_store(order);
}
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
{
storage_type v = storage;
fence_after_load(order);
return v;
}
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
return __sync_fetch_and_add(&storage, v);
}
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
return __sync_fetch_and_sub(&storage, v);
}
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
// GCC docs mention that not all architectures may support full exchange semantics for this intrinsic. However, GCC's implementation of
// std::atomic<> uses this intrinsic unconditionally. We do so as well. In case if some architectures actually don't support this, we can always
// add a check here and fall back to a CAS loop.
if ((order & memory_order_release) != 0)
__sync_synchronize();
return __sync_lock_test_and_set(&storage, v);
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
{
storage_type expected2 = expected;
storage_type old_val = __sync_val_compare_and_swap(&storage, expected2, desired);
if (old_val == expected2)
{
return true;
}
else
{
expected = old_val;
return false;
}
}
static BOOST_FORCEINLINE bool compare_exchange_weak(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
return compare_exchange_strong(storage, expected, desired, success_order, failure_order);
}
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
return __sync_fetch_and_and(&storage, v);
}
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
return __sync_fetch_and_or(&storage, v);
}
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
return __sync_fetch_and_xor(&storage, v);
}
static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
if ((order & memory_order_release) != 0)
__sync_synchronize();
return !!__sync_lock_test_and_set(&storage, 1);
}
static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
__sync_lock_release(&storage);
if (order == memory_order_seq_cst)
__sync_synchronize();
}
static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT
{
return true;
}
};
#if BOOST_ATOMIC_INT8_LOCK_FREE > 0
template< bool Signed >
struct operations< 1u, Signed > :
#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1)
public gcc_sync_operations< typename make_storage_type< 1u, Signed >::type >
#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2)
public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 2u, Signed >::type >, 1u, Signed >
#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 4u, Signed >::type >, 1u, Signed >
#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 8u, Signed >::type >, 1u, Signed >
#else
public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 16u, Signed >::type >, 1u, Signed >
#endif
{
};
#endif
#if BOOST_ATOMIC_INT16_LOCK_FREE > 0
template< bool Signed >
struct operations< 2u, Signed > :
#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2)
public gcc_sync_operations< typename make_storage_type< 2u, Signed >::type >
#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 4u, Signed >::type >, 2u, Signed >
#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 8u, Signed >::type >, 2u, Signed >
#else
public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 16u, Signed >::type >, 2u, Signed >
#endif
{
};
#endif
#if BOOST_ATOMIC_INT32_LOCK_FREE > 0
template< bool Signed >
struct operations< 4u, Signed > :
#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
public gcc_sync_operations< typename make_storage_type< 4u, Signed >::type >
#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 8u, Signed >::type >, 4u, Signed >
#else
public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 16u, Signed >::type >, 4u, Signed >
#endif
{
};
#endif
#if BOOST_ATOMIC_INT64_LOCK_FREE > 0
template< bool Signed >
struct operations< 8u, Signed > :
#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
public gcc_sync_operations< typename make_storage_type< 8u, Signed >::type >
#else
public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 16u, Signed >::type >, 8u, Signed >
#endif
{
};
#endif
#if BOOST_ATOMIC_INT128_LOCK_FREE > 0
template< bool Signed >
struct operations< 16u, Signed > :
public gcc_sync_operations< typename make_storage_type< 16u, Signed >::type >
{
};
#endif
BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
{
if (order != memory_order_relaxed)
__sync_synchronize();
}
BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
{
if ((order & ~memory_order_consume) != 0)
__asm__ __volatile__ ("" ::: "memory");
}
} // namespace detail
} // namespace atomics
} // namespace boost
#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_SYNC_HPP_INCLUDED_

View File

@@ -0,0 +1,510 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2009 Helge Bahmann
* Copyright (c) 2012 Tim Blechmann
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/ops_gcc_x86.hpp
*
* This header contains implementation of the \c operations template.
*/
#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_X86_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_OPS_GCC_X86_HPP_INCLUDED_
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/storage_type.hpp>
#include <boost/atomic/detail/operations_fwd.hpp>
#include <boost/atomic/capabilities.hpp>
#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
#include <boost/atomic/detail/ops_gcc_x86_dcas.hpp>
#include <boost/atomic/detail/ops_cas_based.hpp>
#endif
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
#if defined(__x86_64__)
#define BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER "rdx"
#else
#define BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER "edx"
#endif
namespace boost {
namespace atomics {
namespace detail {
struct gcc_x86_operations_base
{
static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT
{
if ((order & memory_order_release) != 0)
__asm__ __volatile__ ("" ::: "memory");
}
static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT
{
if ((order & memory_order_acquire) != 0)
__asm__ __volatile__ ("" ::: "memory");
}
};
template< typename T, typename Derived >
struct gcc_x86_operations :
public gcc_x86_operations_base
{
typedef T storage_type;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
if (order != memory_order_seq_cst)
{
fence_before(order);
storage = v;
fence_after(order);
}
else
{
Derived::exchange(storage, v, order);
}
}
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
{
storage_type v = storage;
fence_after(order);
return v;
}
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
return Derived::fetch_add(storage, -v, order);
}
static BOOST_FORCEINLINE bool compare_exchange_weak(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
return Derived::compare_exchange_strong(storage, expected, desired, success_order, failure_order);
}
static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
return !!Derived::exchange(storage, (storage_type)1, order);
}
static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
store(storage, (storage_type)0, order);
}
static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT
{
return true;
}
};
template< bool Signed >
struct operations< 1u, Signed > :
public gcc_x86_operations< typename make_storage_type< 1u, Signed >::type, operations< 1u, Signed > >
{
typedef gcc_x86_operations< typename make_storage_type< 1u, Signed >::type, operations< 1u, Signed > > base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
__asm__ __volatile__
(
"lock; xaddb %0, %1"
: "+q" (v), "+m" (storage)
:
: "cc", "memory"
);
return v;
}
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
__asm__ __volatile__
(
"xchgb %0, %1"
: "+q" (v), "+m" (storage)
:
: "memory"
);
return v;
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
{
storage_type previous = expected;
bool success;
__asm__ __volatile__
(
"lock; cmpxchgb %3, %1\n\t"
"sete %2"
: "+a" (previous), "+m" (storage), "=q" (success)
: "q" (desired)
: "cc", "memory"
);
expected = previous;
return success;
}
#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, argument, result)\
__asm__ __volatile__\
(\
"xor %%" BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER ", %%" BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER "\n\t"\
".align 16\n\t"\
"1: movb %[arg], %%dl\n\t"\
op " %%al, %%dl\n\t"\
"lock; cmpxchgb %%dl, %[storage]\n\t"\
"jne 1b"\
: [res] "+a" (result), [storage] "+m" (storage)\
: [arg] "q" (argument)\
: "cc", BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER, "memory"\
)
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
storage_type res = storage;
BOOST_ATOMIC_DETAIL_CAS_LOOP("andb", v, res);
return res;
}
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
storage_type res = storage;
BOOST_ATOMIC_DETAIL_CAS_LOOP("orb", v, res);
return res;
}
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
storage_type res = storage;
BOOST_ATOMIC_DETAIL_CAS_LOOP("xorb", v, res);
return res;
}
#undef BOOST_ATOMIC_DETAIL_CAS_LOOP
};
template< bool Signed >
struct operations< 2u, Signed > :
public gcc_x86_operations< typename make_storage_type< 2u, Signed >::type, operations< 2u, Signed > >
{
typedef gcc_x86_operations< typename make_storage_type< 2u, Signed >::type, operations< 2u, Signed > > base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
__asm__ __volatile__
(
"lock; xaddw %0, %1"
: "+q" (v), "+m" (storage)
:
: "cc", "memory"
);
return v;
}
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
__asm__ __volatile__
(
"xchgw %0, %1"
: "+q" (v), "+m" (storage)
:
: "memory"
);
return v;
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
{
storage_type previous = expected;
bool success;
__asm__ __volatile__
(
"lock; cmpxchgw %3, %1\n\t"
"sete %2"
: "+a" (previous), "+m" (storage), "=q" (success)
: "q" (desired)
: "cc", "memory"
);
expected = previous;
return success;
}
#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, argument, result)\
__asm__ __volatile__\
(\
"xor %%" BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER ", %%" BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER "\n\t"\
".align 16\n\t"\
"1: movw %[arg], %%dx\n\t"\
op " %%ax, %%dx\n\t"\
"lock; cmpxchgw %%dx, %[storage]\n\t"\
"jne 1b"\
: [res] "+a" (result), [storage] "+m" (storage)\
: [arg] "q" (argument)\
: "cc", BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER, "memory"\
)
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
storage_type res = storage;
BOOST_ATOMIC_DETAIL_CAS_LOOP("andw", v, res);
return res;
}
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
storage_type res = storage;
BOOST_ATOMIC_DETAIL_CAS_LOOP("orw", v, res);
return res;
}
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
storage_type res = storage;
BOOST_ATOMIC_DETAIL_CAS_LOOP("xorw", v, res);
return res;
}
#undef BOOST_ATOMIC_DETAIL_CAS_LOOP
};
template< bool Signed >
struct operations< 4u, Signed > :
public gcc_x86_operations< typename make_storage_type< 4u, Signed >::type, operations< 4u, Signed > >
{
typedef gcc_x86_operations< typename make_storage_type< 4u, Signed >::type, operations< 4u, Signed > > base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
__asm__ __volatile__
(
"lock; xaddl %0, %1"
: "+r" (v), "+m" (storage)
:
: "cc", "memory"
);
return v;
}
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
__asm__ __volatile__
(
"xchgl %0, %1"
: "+r" (v), "+m" (storage)
:
: "memory"
);
return v;
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
{
storage_type previous = expected;
bool success;
__asm__ __volatile__
(
"lock; cmpxchgl %3, %1\n\t"
"sete %2"
: "+a" (previous), "+m" (storage), "=q" (success)
: "r" (desired)
: "cc", "memory"
);
expected = previous;
return success;
}
#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, argument, result)\
__asm__ __volatile__\
(\
"xor %%" BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER ", %%" BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER "\n\t"\
".align 16\n\t"\
"1: movl %[arg], %%edx\n\t"\
op " %%eax, %%edx\n\t"\
"lock; cmpxchgl %%edx, %[storage]\n\t"\
"jne 1b"\
: [res] "+a" (result), [storage] "+m" (storage)\
: [arg] "r" (argument)\
: "cc", BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER, "memory"\
)
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
storage_type res = storage;
BOOST_ATOMIC_DETAIL_CAS_LOOP("andl", v, res);
return res;
}
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
storage_type res = storage;
BOOST_ATOMIC_DETAIL_CAS_LOOP("orl", v, res);
return res;
}
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
storage_type res = storage;
BOOST_ATOMIC_DETAIL_CAS_LOOP("xorl", v, res);
return res;
}
#undef BOOST_ATOMIC_DETAIL_CAS_LOOP
};
#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B)
template< bool Signed >
struct operations< 8u, Signed > :
public cas_based_operations< gcc_dcas_x86< Signed > >
{
};
#elif defined(__x86_64__)
template< bool Signed >
struct operations< 8u, Signed > :
public gcc_x86_operations< typename make_storage_type< 8u, Signed >::type, operations< 8u, Signed > >
{
typedef gcc_x86_operations< typename make_storage_type< 8u, Signed >::type, operations< 8u, Signed > > base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
__asm__ __volatile__
(
"lock; xaddq %0, %1"
: "+r" (v), "+m" (storage)
:
: "cc", "memory"
);
return v;
}
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
__asm__ __volatile__
(
"xchgq %0, %1"
: "+r" (v), "+m" (storage)
:
: "memory"
);
return v;
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
{
storage_type previous = expected;
bool success;
__asm__ __volatile__
(
"lock; cmpxchgq %3, %1\n\t"
"sete %2"
: "+a" (previous), "+m" (storage), "=q" (success)
: "r" (desired)
: "cc", "memory"
);
expected = previous;
return success;
}
#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, argument, result)\
__asm__ __volatile__\
(\
"xor %%" BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER ", %%" BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER "\n\t"\
".align 16\n\t"\
"1: movq %[arg], %%rdx\n\t"\
op " %%rax, %%rdx\n\t"\
"lock; cmpxchgq %%rdx, %[storage]\n\t"\
"jne 1b"\
: [res] "+a" (result), [storage] "+m" (storage)\
: [arg] "r" (argument)\
: "cc", BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER, "memory"\
)
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
storage_type res = storage;
BOOST_ATOMIC_DETAIL_CAS_LOOP("andq", v, res);
return res;
}
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
storage_type res = storage;
BOOST_ATOMIC_DETAIL_CAS_LOOP("orq", v, res);
return res;
}
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
storage_type res = storage;
BOOST_ATOMIC_DETAIL_CAS_LOOP("xorq", v, res);
return res;
}
#undef BOOST_ATOMIC_DETAIL_CAS_LOOP
};
#endif
#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
template< bool Signed >
struct operations< 16u, Signed > :
public cas_based_operations< gcc_dcas_x86_64< Signed > >
{
};
#endif // defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
{
if (order == memory_order_seq_cst)
{
__asm__ __volatile__
(
#if defined(__x86_64__) || defined(__SSE2__)
"mfence\n"
#else
"lock; addl $0, (%%esp)\n"
#endif
::: "memory"
);
}
else if ((order & (memory_order_acquire | memory_order_release)) != 0)
{
__asm__ __volatile__ ("" ::: "memory");
}
}
BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
{
if ((order & ~memory_order_consume) != 0)
__asm__ __volatile__ ("" ::: "memory");
}
} // namespace detail
} // namespace atomics
} // namespace boost
#undef BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER
#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_X86_HPP_INCLUDED_

View File

@@ -0,0 +1,308 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2009 Helge Bahmann
* Copyright (c) 2012 Tim Blechmann
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/ops_gcc_x86_dcas.hpp
*
* This header contains implementation of the double-width CAS primitive for x86.
*/
#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_X86_DCAS_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_OPS_GCC_X86_DCAS_HPP_INCLUDED_
#include <boost/cstdint.hpp>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/storage_type.hpp>
#include <boost/atomic/capabilities.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B)
template< bool Signed >
struct gcc_dcas_x86
{
typedef typename make_storage_type< 8u, Signed >::type storage_type;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
if ((((uint32_t)&storage) & 0x00000007) == 0)
{
#if defined(__SSE2__)
__asm__ __volatile__
(
#if defined(__AVX__)
"vmovq %1, %%xmm4\n\t"
"vmovq %%xmm4, %0\n\t"
#else
"movq %1, %%xmm4\n\t"
"movq %%xmm4, %0\n\t"
#endif
: "=m" (storage)
: "m" (v)
: "memory", "xmm4"
);
#else
__asm__ __volatile__
(
"fildll %1\n\t"
"fistpll %0\n\t"
: "=m" (storage)
: "m" (v)
: "memory"
);
#endif
}
else
{
#if defined(__PIC__)
uint32_t scratch;
__asm__ __volatile__
(
"movl %%ebx, %[scratch]\n\t"
"movl %[value_lo], %%ebx\n\t"
"movl 0(%[dest]), %%eax\n\t"
"movl 4(%[dest]), %%edx\n\t"
".align 16\n\t"
"1: lock; cmpxchg8b 0(%[dest])\n\t"
"jne 1b\n\t"
"movl %[scratch], %%ebx"
: [scratch] "=m,m" (scratch)
: [value_lo] "a,a" ((uint32_t)v), "c,c" ((uint32_t)(v >> 32)), [dest] "D,S" (&storage)
: "cc", "edx", "memory"
);
#else
__asm__ __volatile__
(
"movl 0(%[dest]), %%eax\n\t"
"movl 4(%[dest]), %%edx\n\t"
".align 16\n\t"
"1: lock; cmpxchg8b 0(%[dest])\n\t"
"jne 1b\n\t"
:
: [value_lo] "b,b" ((uint32_t)v), "c,c" ((uint32_t)(v >> 32)), [dest] "D,S" (&storage)
: "cc", "eax", "edx", "memory"
);
#endif
}
}
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order) BOOST_NOEXCEPT
{
storage_type value;
if ((((uint32_t)&storage) & 0x00000007) == 0)
{
#if defined(__SSE2__)
__asm__ __volatile__
(
#if defined(__AVX__)
"vmovq %1, %%xmm4\n\t"
"vmovq %%xmm4, %0\n\t"
#else
"movq %1, %%xmm4\n\t"
"movq %%xmm4, %0\n\t"
#endif
: "=m" (value)
: "m" (storage)
: "memory", "xmm4"
);
#else
__asm__ __volatile__
(
"fildll %1\n\t"
"fistpll %0\n\t"
: "=m" (value)
: "m" (storage)
: "memory"
);
#endif
}
else
{
#if defined(__clang__)
// Clang cannot allocate eax:edx register pairs but it has sync intrinsics
value = __sync_val_compare_and_swap(&storage, (storage_type)0, (storage_type)0);
#else
// We don't care for comparison result here; the previous value will be stored into value anyway.
// Also we don't care for ebx and ecx values, they just have to be equal to eax and edx before cmpxchg8b.
__asm__ __volatile__
(
"movl %%ebx, %%eax\n\t"
"movl %%ecx, %%edx\n\t"
"lock; cmpxchg8b %[storage]"
: "=&A" (value)
: [storage] "m" (storage)
: "cc", "memory"
);
#endif
}
return value;
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
{
#if defined(__clang__)
// Clang cannot allocate eax:edx register pairs but it has sync intrinsics
storage_type old_expected = expected;
expected = __sync_val_compare_and_swap(&storage, old_expected, desired);
return expected == old_expected;
#elif defined(__PIC__)
// Make sure ebx is saved and restored properly in case
// of position independent code. To make this work
// setup register constraints such that ebx can not be
// used by accident e.g. as base address for the variable
// to be modified. Accessing "scratch" should always be okay,
// as it can only be placed on the stack (and therefore
// accessed through ebp or esp only).
//
// In theory, could push/pop ebx onto/off the stack, but movs
// to a prepared stack slot turn out to be faster.
uint32_t scratch;
bool success;
__asm__ __volatile__
(
"movl %%ebx, %[scratch]\n\t"
"movl %[desired_lo], %%ebx\n\t"
"lock; cmpxchg8b %[dest]\n\t"
"movl %[scratch], %%ebx\n\t"
"sete %[success]"
: "+A,A,A,A,A,A" (expected), [dest] "+m,m,m,m,m,m" (storage), [scratch] "=m,m,m,m,m,m" (scratch), [success] "=q,m,q,m,q,m" (success)
: [desired_lo] "S,S,D,D,m,m" ((uint32_t)desired), "c,c,c,c,c,c" ((uint32_t)(desired >> 32))
: "cc", "memory"
);
return success;
#else
bool success;
__asm__ __volatile__
(
"lock; cmpxchg8b %[dest]\n\t"
"sete %[success]"
: "+A,A" (expected), [dest] "+m,m" (storage), [scratch] "=m,m" (scratch), [success] "=q,m" (success)
: "b,b" ((uint32_t)desired), "c,c" ((uint32_t)(desired >> 32))
: "cc", "memory"
);
return success;
#endif
}
static BOOST_FORCEINLINE bool compare_exchange_weak(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
return compare_exchange_strong(storage, expected, desired, success_order, failure_order);
}
static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT
{
return true;
}
};
#endif // defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B)
#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
template< bool Signed >
struct gcc_dcas_x86_64
{
typedef typename make_storage_type< 16u, Signed >::type storage_type;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
uint64_t const* p_value = (uint64_t const*)&v;
__asm__ __volatile__
(
"movq 0(%[dest]), %%rax\n\t"
"movq 8(%[dest]), %%rdx\n\t"
".align 16\n\t"
"1: lock; cmpxchg16b 0(%[dest])\n\t"
"jne 1b"
:
: "b" (p_value[0]), "c" (p_value[1]), [dest] "r" (&storage)
: "cc", "rax", "rdx", "memory"
);
}
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order) BOOST_NOEXCEPT
{
#if defined(__clang__)
// Clang cannot allocate rax:rdx register pairs but it has sync intrinsics
storage_type value = storage_type();
return __sync_val_compare_and_swap(&storage, value, value);
#else
storage_type value;
// We don't care for comparison result here; the previous value will be stored into value anyway.
// Also we don't care for rbx and rcx values, they just have to be equal to rax and rdx before cmpxchg16b.
__asm__ __volatile__
(
"movq %%rbx, %%rax\n\t"
"movq %%rcx, %%rdx\n\t"
"lock; cmpxchg16b %[storage]"
: "=&A" (value)
: [storage] "m" (storage)
: "cc", "memory"
);
return value;
#endif
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
{
#if defined(__clang__)
// Clang cannot allocate rax:rdx register pairs but it has sync intrinsics
storage_type old_expected = expected;
expected = __sync_val_compare_and_swap(&storage, old_expected, desired);
return expected == old_expected;
#else
uint64_t const* p_desired = (uint64_t const*)&desired;
bool success;
__asm__ __volatile__
(
"lock; cmpxchg16b %[dest]\n\t"
"sete %[success]"
: "+A,A" (expected), [dest] "+m,m" (storage), [success] "=q,m" (success)
: "b,b" (p_desired[0]), "c,c" (p_desired[1])
: "cc", "memory"
);
return success;
#endif
}
static BOOST_FORCEINLINE bool compare_exchange_weak(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
return compare_exchange_strong(storage, expected, desired, success_order, failure_order);
}
static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT
{
return true;
}
};
#endif // defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
} // namespace detail
} // namespace atomics
} // namespace boost
#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_X86_DCAS_HPP_INCLUDED_

View File

@@ -0,0 +1,177 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2009, 2011 Helge Bahmann
* Copyright (c) 2009 Phil Endecott
* Copyright (c) 2013 Tim Blechmann
* Linux-specific code by Phil Endecott
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/ops_linux_arm.hpp
*
* This header contains implementation of the \c operations template.
*/
#ifndef BOOST_ATOMIC_DETAIL_OPS_LINUX_ARM_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_OPS_LINUX_ARM_HPP_INCLUDED_
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/storage_type.hpp>
#include <boost/atomic/detail/operations_fwd.hpp>
#include <boost/atomic/capabilities.hpp>
#include <boost/atomic/detail/ops_cas_based.hpp>
#include <boost/atomic/detail/ops_extending_cas_based.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
// Different ARM processors have different atomic instructions. In particular,
// architecture versions before v6 (which are still in widespread use, e.g. the
// Intel/Marvell XScale chips like the one in the NSLU2) have only atomic swap.
// On Linux the kernel provides some support that lets us abstract away from
// these differences: it provides emulated CAS and barrier functions at special
// addresses that are guaranteed not to be interrupted by the kernel. Using
// this facility is slightly slower than inline assembler would be, but much
// faster than a system call.
//
// While this emulated CAS is "strong" in the sense that it does not fail
// "spuriously" (i.e.: it never fails to perform the exchange when the value
// found equals the value expected), it does not return the found value on
// failure. To satisfy the atomic API, compare_exchange_{weak|strong} must
// return the found value on failure, and we have to manually load this value
// after the emulated CAS reports failure. This in turn introduces a race
// between the CAS failing (due to the "wrong" value being found) and subsequently
// loading (which might turn up the "right" value). From an application's
// point of view this looks like "spurious failure", and therefore the
// emulated CAS is only good enough to provide compare_exchange_weak
// semantics.
struct linux_arm_cas_base
{
static BOOST_FORCEINLINE void fence_before_store(memory_order order) BOOST_NOEXCEPT
{
if ((order & memory_order_release) != 0)
hardware_full_fence();
}
static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT
{
if (order == memory_order_seq_cst)
hardware_full_fence();
}
static BOOST_FORCEINLINE void fence_after_load(memory_order order) BOOST_NOEXCEPT
{
if ((order & memory_order_acquire) != 0)
hardware_full_fence();
}
static BOOST_FORCEINLINE void hardware_full_fence() BOOST_NOEXCEPT
{
typedef void (*kernel_dmb_t)(void);
((kernel_dmb_t)0xffff0fa0)();
}
};
template< bool Signed >
struct linux_arm_cas :
public linux_arm_cas_base
{
typedef typename make_storage_type< 4u, Signed >::type storage_type;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
fence_before_store(order);
storage = v;
fence_after_store(order);
}
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
{
storage_type v = storage;
fence_after_load(order);
return v;
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
while (true)
{
storage_type tmp = expected;
if (compare_exchange_weak(storage, tmp, desired, success_order, failure_order))
return true;
if (tmp != expected)
{
expected = tmp;
return false;
}
}
}
static BOOST_FORCEINLINE bool compare_exchange_weak(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
{
typedef storage_type (*kernel_cmpxchg32_t)(storage_type oldval, storage_type newval, volatile storage_type* ptr);
if (((kernel_cmpxchg32_t)0xffff0fc0)(expected, desired, &storage) == 0)
{
return true;
}
else
{
expected = storage;
return false;
}
}
static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT
{
return true;
}
};
template< bool Signed >
struct operations< 1u, Signed > :
public extending_cas_based_operations< cas_based_operations< linux_arm_cas< Signed > >, 1u, Signed >
{
};
template< bool Signed >
struct operations< 2u, Signed > :
public extending_cas_based_operations< cas_based_operations< linux_arm_cas< Signed > >, 2u, Signed >
{
};
template< bool Signed >
struct operations< 4u, Signed > :
public cas_based_operations< linux_arm_cas< Signed > >
{
};
BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
{
if ((order & (memory_order_acquire | memory_order_release)) != 0)
linux_arm_cas_base::hardware_full_fence();
}
BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
{
if ((order & ~memory_order_consume) != 0)
__asm__ __volatile__ ("" ::: "memory");
}
} // namespace detail
} // namespace atomics
} // namespace boost
#endif // BOOST_ATOMIC_DETAIL_OPS_LINUX_ARM_HPP_INCLUDED_

View File

@@ -0,0 +1,837 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2009 Helge Bahmann
* Copyright (c) 2012 Tim Blechmann
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/ops_msvc_arm.hpp
*
* This header contains implementation of the \c operations template.
*/
#ifndef BOOST_ATOMIC_DETAIL_OPS_MSVC_ARM_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_OPS_MSVC_ARM_HPP_INCLUDED_
#include <intrin.h>
#include <boost/memory_order.hpp>
#include <boost/type_traits/make_signed.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/interlocked.hpp>
#include <boost/atomic/detail/storage_type.hpp>
#include <boost/atomic/detail/operations_fwd.hpp>
#include <boost/atomic/capabilities.hpp>
#include <boost/atomic/detail/ops_msvc_common.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
#define BOOST_ATOMIC_DETAIL_ARM_LOAD8(p) __iso_volatile_load8((const volatile __int8*)(p))
#define BOOST_ATOMIC_DETAIL_ARM_LOAD16(p) __iso_volatile_load16((const volatile __int16*)(p))
#define BOOST_ATOMIC_DETAIL_ARM_LOAD32(p) __iso_volatile_load32((const volatile __int32*)(p))
#define BOOST_ATOMIC_DETAIL_ARM_LOAD64(p) __iso_volatile_load64((const volatile __int64*)(p))
#define BOOST_ATOMIC_DETAIL_ARM_STORE8(p, v) __iso_volatile_store8((volatile __int8*)(p), (__int8)(v))
#define BOOST_ATOMIC_DETAIL_ARM_STORE16(p, v) __iso_volatile_store16((volatile __int16*)(p), (__int16)(v))
#define BOOST_ATOMIC_DETAIL_ARM_STORE32(p, v) __iso_volatile_store32((volatile __int32*)(p), (__int32)(v))
#define BOOST_ATOMIC_DETAIL_ARM_STORE64(p, v) __iso_volatile_store64((volatile __int64*)(p), (__int64)(v))
namespace boost {
namespace atomics {
namespace detail {
struct msvc_arm_operations_base
{
static BOOST_FORCEINLINE void hardware_full_fence() BOOST_NOEXCEPT
{
__dmb(0xB); // _ARM_BARRIER_ISH, see armintr.h from MSVC 11 and later
}
static BOOST_FORCEINLINE void fence_before_store(memory_order order) BOOST_NOEXCEPT
{
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
if ((order & memory_order_release) != 0)
hardware_full_fence();
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
}
static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT
{
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
if (order == memory_order_seq_cst)
hardware_full_fence();
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
}
static BOOST_FORCEINLINE void fence_after_load(memory_order order) BOOST_NOEXCEPT
{
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
if (order == memory_order_seq_cst)
hardware_full_fence();
}
static BOOST_FORCEINLINE BOOST_CONSTEXPR memory_order cas_common_order(memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
// Combine order flags together and transform memory_order_consume to memory_order_acquire
return static_cast< memory_order >(((failure_order | success_order) & ~memory_order_consume) | (((failure_order | success_order) & memory_order_consume) >> 3u));
}
};
template< typename T, typename Derived >
struct msvc_arm_operations :
public msvc_arm_operations_base
{
typedef T storage_type;
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
typedef typename make_signed< storage_type >::type signed_storage_type;
return Derived::fetch_add(storage, static_cast< storage_type >(-static_cast< signed_storage_type >(v)), order);
}
static BOOST_FORCEINLINE bool compare_exchange_weak(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
return Derived::compare_exchange_strong(storage, expected, desired, success_order, failure_order);
}
static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
return !!Derived::exchange(storage, (storage_type)1, order);
}
static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
Derived::store(storage, (storage_type)0, order);
}
static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT
{
return true;
}
};
template< bool Signed >
struct operations< 1u, Signed > :
public msvc_arm_operations< typename make_storage_type< 1u, Signed >::type, operations< 1u, Signed > >
{
typedef msvc_arm_operations< typename make_storage_type< 1u, Signed >::type, operations< 1u, Signed > > base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
if (order != memory_order_seq_cst)
{
base_type::fence_before_store(order);
BOOST_ATOMIC_DETAIL_ARM_STORE8(&storage, v);
base_type::fence_after_store(order);
}
else
{
exchange(storage, v, order);
}
}
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
{
storage_type v = BOOST_ATOMIC_DETAIL_ARM_LOAD8(&storage);
base_type::fence_after_load(order);
return v;
}
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
switch (order)
{
case memory_order_relaxed:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8_RELAXED(&storage, v));
break;
case memory_order_consume:
case memory_order_acquire:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8_ACQUIRE(&storage, v));
break;
case memory_order_release:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8_RELEASE(&storage, v));
break;
case memory_order_acq_rel:
case memory_order_seq_cst:
default:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8(&storage, v));
break;
}
return v;
}
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
switch (order)
{
case memory_order_relaxed:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE8_RELAXED(&storage, v));
break;
case memory_order_consume:
case memory_order_acquire:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE8_ACQUIRE(&storage, v));
break;
case memory_order_release:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE8_RELEASE(&storage, v));
break;
case memory_order_acq_rel:
case memory_order_seq_cst:
default:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE8(&storage, v));
break;
}
return v;
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
storage_type previous = expected, old_val;
switch (cas_common_order(success_order, failure_order))
{
case memory_order_relaxed:
old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8_RELAXED(&storage, desired, previous));
break;
case memory_order_consume:
case memory_order_acquire:
old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8_ACQUIRE(&storage, desired, previous));
break;
case memory_order_release:
old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8_RELEASE(&storage, desired, previous));
break;
case memory_order_acq_rel:
case memory_order_seq_cst:
default:
old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8(&storage, desired, previous));
break;
}
expected = old_val;
return (previous == old_val);
}
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
switch (order)
{
case memory_order_relaxed:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND8_RELAXED(&storage, v));
break;
case memory_order_consume:
case memory_order_acquire:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND8_ACQUIRE(&storage, v));
break;
case memory_order_release:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND8_RELEASE(&storage, v));
break;
case memory_order_acq_rel:
case memory_order_seq_cst:
default:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND8(&storage, v));
break;
}
return v;
}
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
switch (order)
{
case memory_order_relaxed:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR8_RELAXED(&storage, v));
break;
case memory_order_consume:
case memory_order_acquire:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR8_ACQUIRE(&storage, v));
break;
case memory_order_release:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR8_RELEASE(&storage, v));
break;
case memory_order_acq_rel:
case memory_order_seq_cst:
default:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR8(&storage, v));
break;
}
return v;
}
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
switch (order)
{
case memory_order_relaxed:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR8_RELAXED(&storage, v));
break;
case memory_order_consume:
case memory_order_acquire:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR8_ACQUIRE(&storage, v));
break;
case memory_order_release:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR8_RELEASE(&storage, v));
break;
case memory_order_acq_rel:
case memory_order_seq_cst:
default:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR8(&storage, v));
break;
}
return v;
}
};
template< bool Signed >
struct operations< 2u, Signed > :
public msvc_arm_operations< typename make_storage_type< 2u, Signed >::type, operations< 2u, Signed > >
{
typedef msvc_arm_operations< typename make_storage_type< 2u, Signed >::type, operations< 2u, Signed > > base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
if (order != memory_order_seq_cst)
{
base_type::fence_before_store(order);
BOOST_ATOMIC_DETAIL_ARM_STORE16(&storage, v);
base_type::fence_after_store(order);
}
else
{
exchange(storage, v, order);
}
}
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
{
storage_type v = BOOST_ATOMIC_DETAIL_ARM_LOAD16(&storage);
base_type::fence_after_load(order);
return v;
}
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
switch (order)
{
case memory_order_relaxed:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16_RELAXED(&storage, v));
break;
case memory_order_consume:
case memory_order_acquire:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16_ACQUIRE(&storage, v));
break;
case memory_order_release:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16_RELEASE(&storage, v));
break;
case memory_order_acq_rel:
case memory_order_seq_cst:
default:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16(&storage, v));
break;
}
return v;
}
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
switch (order)
{
case memory_order_relaxed:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE16_RELAXED(&storage, v));
break;
case memory_order_consume:
case memory_order_acquire:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE16_ACQUIRE(&storage, v));
break;
case memory_order_release:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE16_RELEASE(&storage, v));
break;
case memory_order_acq_rel:
case memory_order_seq_cst:
default:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE16(&storage, v));
break;
}
return v;
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
storage_type previous = expected, old_val;
switch (cas_common_order(success_order, failure_order))
{
case memory_order_relaxed:
old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16_RELAXED(&storage, desired, previous));
break;
case memory_order_consume:
case memory_order_acquire:
old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16_ACQUIRE(&storage, desired, previous));
break;
case memory_order_release:
old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16_RELEASE(&storage, desired, previous));
break;
case memory_order_acq_rel:
case memory_order_seq_cst:
default:
old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16(&storage, desired, previous));
break;
}
expected = old_val;
return (previous == old_val);
}
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
switch (order)
{
case memory_order_relaxed:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND16_RELAXED(&storage, v));
break;
case memory_order_consume:
case memory_order_acquire:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND16_ACQUIRE(&storage, v));
break;
case memory_order_release:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND16_RELEASE(&storage, v));
break;
case memory_order_acq_rel:
case memory_order_seq_cst:
default:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND16(&storage, v));
break;
}
return v;
}
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
switch (order)
{
case memory_order_relaxed:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR16_RELAXED(&storage, v));
break;
case memory_order_consume:
case memory_order_acquire:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR16_ACQUIRE(&storage, v));
break;
case memory_order_release:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR16_RELEASE(&storage, v));
break;
case memory_order_acq_rel:
case memory_order_seq_cst:
default:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR16(&storage, v));
break;
}
return v;
}
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
switch (order)
{
case memory_order_relaxed:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR16_RELAXED(&storage, v));
break;
case memory_order_consume:
case memory_order_acquire:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR16_ACQUIRE(&storage, v));
break;
case memory_order_release:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR16_RELEASE(&storage, v));
break;
case memory_order_acq_rel:
case memory_order_seq_cst:
default:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR16(&storage, v));
break;
}
return v;
}
};
template< bool Signed >
struct operations< 4u, Signed > :
public msvc_arm_operations< typename make_storage_type< 4u, Signed >::type, operations< 4u, Signed > >
{
typedef msvc_arm_operations< typename make_storage_type< 4u, Signed >::type, operations< 4u, Signed > > base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
if (order != memory_order_seq_cst)
{
base_type::fence_before_store(order);
BOOST_ATOMIC_DETAIL_ARM_STORE32(&storage, v);
base_type::fence_after_store(order);
}
else
{
exchange(storage, v, order);
}
}
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
{
storage_type v = BOOST_ATOMIC_DETAIL_ARM_LOAD32(&storage);
base_type::fence_after_load(order);
return v;
}
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
switch (order)
{
case memory_order_relaxed:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_RELAXED(&storage, v));
break;
case memory_order_consume:
case memory_order_acquire:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_ACQUIRE(&storage, v));
break;
case memory_order_release:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_RELEASE(&storage, v));
break;
case memory_order_acq_rel:
case memory_order_seq_cst:
default:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(&storage, v));
break;
}
return v;
}
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
switch (order)
{
case memory_order_relaxed:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_RELAXED(&storage, v));
break;
case memory_order_consume:
case memory_order_acquire:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ACQUIRE(&storage, v));
break;
case memory_order_release:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_RELEASE(&storage, v));
break;
case memory_order_acq_rel:
case memory_order_seq_cst:
default:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&storage, v));
break;
}
return v;
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
storage_type previous = expected, old_val;
switch (cas_common_order(success_order, failure_order))
{
case memory_order_relaxed:
old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_RELAXED(&storage, desired, previous));
break;
case memory_order_consume:
case memory_order_acquire:
old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_ACQUIRE(&storage, desired, previous));
break;
case memory_order_release:
old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_RELEASE(&storage, desired, previous));
break;
case memory_order_acq_rel:
case memory_order_seq_cst:
default:
old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(&storage, desired, previous));
break;
}
expected = old_val;
return (previous == old_val);
}
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
switch (order)
{
case memory_order_relaxed:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND_RELAXED(&storage, v));
break;
case memory_order_consume:
case memory_order_acquire:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND_ACQUIRE(&storage, v));
break;
case memory_order_release:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND_RELEASE(&storage, v));
break;
case memory_order_acq_rel:
case memory_order_seq_cst:
default:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND(&storage, v));
break;
}
return v;
}
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
switch (order)
{
case memory_order_relaxed:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR_RELAXED(&storage, v));
break;
case memory_order_consume:
case memory_order_acquire:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR_ACQUIRE(&storage, v));
break;
case memory_order_release:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR_RELEASE(&storage, v));
break;
case memory_order_acq_rel:
case memory_order_seq_cst:
default:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR(&storage, v));
break;
}
return v;
}
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
switch (order)
{
case memory_order_relaxed:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR_RELAXED(&storage, v));
break;
case memory_order_consume:
case memory_order_acquire:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR_ACQUIRE(&storage, v));
break;
case memory_order_release:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR_RELEASE(&storage, v));
break;
case memory_order_acq_rel:
case memory_order_seq_cst:
default:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR(&storage, v));
break;
}
return v;
}
};
template< bool Signed >
struct operations< 8u, Signed > :
public msvc_arm_operations< typename make_storage_type< 8u, Signed >::type, operations< 8u, Signed > >
{
typedef msvc_arm_operations< typename make_storage_type< 8u, Signed >::type, operations< 8u, Signed > > base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
if (order != memory_order_seq_cst)
{
base_type::fence_before_store(order);
BOOST_ATOMIC_DETAIL_ARM_STORE64(&storage, v);
base_type::fence_after_store(order);
}
else
{
exchange(storage, v, order);
}
}
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
{
storage_type v = BOOST_ATOMIC_DETAIL_ARM_LOAD64(&storage);
base_type::fence_after_load(order);
return v;
}
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
switch (order)
{
case memory_order_relaxed:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64_RELAXED(&storage, v));
break;
case memory_order_consume:
case memory_order_acquire:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64_ACQUIRE(&storage, v));
break;
case memory_order_release:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64_RELEASE(&storage, v));
break;
case memory_order_acq_rel:
case memory_order_seq_cst:
default:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(&storage, v));
break;
}
return v;
}
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
switch (order)
{
case memory_order_relaxed:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE64_RELAXED(&storage, v));
break;
case memory_order_consume:
case memory_order_acquire:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE64_ACQUIRE(&storage, v));
break;
case memory_order_release:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE64_RELEASE(&storage, v));
break;
case memory_order_acq_rel:
case memory_order_seq_cst:
default:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(&storage, v));
break;
}
return v;
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
storage_type previous = expected, old_val;
switch (cas_common_order(success_order, failure_order))
{
case memory_order_relaxed:
old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64_RELAXED(&storage, desired, previous));
break;
case memory_order_consume:
case memory_order_acquire:
old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64_ACQUIRE(&storage, desired, previous));
break;
case memory_order_release:
old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64_RELEASE(&storage, desired, previous));
break;
case memory_order_acq_rel:
case memory_order_seq_cst:
default:
old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(&storage, desired, previous));
break;
}
expected = old_val;
return (previous == old_val);
}
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
switch (order)
{
case memory_order_relaxed:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND64_RELAXED(&storage, v));
break;
case memory_order_consume:
case memory_order_acquire:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND64_ACQUIRE(&storage, v));
break;
case memory_order_release:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND64_RELEASE(&storage, v));
break;
case memory_order_acq_rel:
case memory_order_seq_cst:
default:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND64(&storage, v));
break;
}
return v;
}
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
switch (order)
{
case memory_order_relaxed:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR64_RELAXED(&storage, v));
break;
case memory_order_consume:
case memory_order_acquire:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR64_ACQUIRE(&storage, v));
break;
case memory_order_release:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR64_RELEASE(&storage, v));
break;
case memory_order_acq_rel:
case memory_order_seq_cst:
default:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR64(&storage, v));
break;
}
return v;
}
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
switch (order)
{
case memory_order_relaxed:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR64_RELAXED(&storage, v));
break;
case memory_order_consume:
case memory_order_acquire:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR64_ACQUIRE(&storage, v));
break;
case memory_order_release:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR64_RELEASE(&storage, v));
break;
case memory_order_acq_rel:
case memory_order_seq_cst:
default:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR64(&storage, v));
break;
}
return v;
}
};
BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
{
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
if (order == memory_order_seq_cst)
msvc_arm_operations_base::hardware_full_fence();
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
}
BOOST_FORCEINLINE void signal_fence(memory_order) BOOST_NOEXCEPT
{
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
}
} // namespace detail
} // namespace atomics
} // namespace boost
#undef BOOST_ATOMIC_DETAIL_ARM_LOAD8
#undef BOOST_ATOMIC_DETAIL_ARM_LOAD16
#undef BOOST_ATOMIC_DETAIL_ARM_LOAD32
#undef BOOST_ATOMIC_DETAIL_ARM_LOAD64
#undef BOOST_ATOMIC_DETAIL_ARM_STORE8
#undef BOOST_ATOMIC_DETAIL_ARM_STORE16
#undef BOOST_ATOMIC_DETAIL_ARM_STORE32
#undef BOOST_ATOMIC_DETAIL_ARM_STORE64
#endif // BOOST_ATOMIC_DETAIL_OPS_MSVC_ARM_HPP_INCLUDED_

View File

@@ -0,0 +1,38 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2009 Helge Bahmann
* Copyright (c) 2012 Tim Blechmann
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/ops_msvc_common.hpp
*
* This header contains common tools for MSVC implementation of the \c operations template.
*/
#ifndef BOOST_ATOMIC_DETAIL_OPS_MSVC_COMMON_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_OPS_MSVC_COMMON_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
// Define compiler barriers
#if defined(__INTEL_COMPILER)
#define BOOST_ATOMIC_DETAIL_COMPILER_BARRIER() __memory_barrier()
#elif defined(_MSC_VER) && !defined(_WIN32_WCE)
extern "C" void _ReadWriteBarrier(void);
#pragma intrinsic(_ReadWriteBarrier)
#define BOOST_ATOMIC_DETAIL_COMPILER_BARRIER() _ReadWriteBarrier()
#endif
#ifndef BOOST_ATOMIC_DETAIL_COMPILER_BARRIER
#define BOOST_ATOMIC_DETAIL_COMPILER_BARRIER()
#endif
#endif // BOOST_ATOMIC_DETAIL_OPS_MSVC_COMMON_HPP_INCLUDED_

View File

@@ -0,0 +1,820 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2009 Helge Bahmann
* Copyright (c) 2012 Tim Blechmann
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/ops_msvc_x86.hpp
*
* This header contains implementation of the \c operations template.
*/
#ifndef BOOST_ATOMIC_DETAIL_OPS_MSVC_X86_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_OPS_MSVC_X86_HPP_INCLUDED_
#include <boost/memory_order.hpp>
#include <boost/type_traits/make_signed.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/interlocked.hpp>
#include <boost/atomic/detail/storage_type.hpp>
#include <boost/atomic/detail/operations_fwd.hpp>
#include <boost/atomic/capabilities.hpp>
#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
#include <boost/cstdint.hpp>
#include <boost/atomic/detail/ops_cas_based.hpp>
#endif
#include <boost/atomic/detail/ops_msvc_common.hpp>
#if !defined(_M_IX86) && !(defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8) && defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16))
#include <boost/atomic/detail/ops_extending_cas_based.hpp>
#endif
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
struct msvc_x86_operations_base
{
static BOOST_FORCEINLINE void hardware_full_fence() BOOST_NOEXCEPT
{
#if defined(_MSC_VER) && (defined(_M_AMD64) || (defined(_M_IX86) && defined(_M_IX86_FP) && _M_IX86_FP >= 2))
// Use mfence only if SSE2 is available
_mm_mfence();
#else
long tmp;
BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&tmp, 0);
#endif
}
static BOOST_FORCEINLINE void fence_before(memory_order) BOOST_NOEXCEPT
{
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
}
static BOOST_FORCEINLINE void fence_after(memory_order) BOOST_NOEXCEPT
{
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
}
static BOOST_FORCEINLINE void fence_after_load(memory_order) BOOST_NOEXCEPT
{
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
// On x86 and x86_64 there is no need for a hardware barrier,
// even if seq_cst memory order is requested, because all
// seq_cst writes are implemented with lock-prefixed operations
// or xchg which has implied lock prefix. Therefore normal loads
// are already ordered with seq_cst stores on these architectures.
}
};
template< typename T, typename Derived >
struct msvc_x86_operations :
public msvc_x86_operations_base
{
typedef T storage_type;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
if (order != memory_order_seq_cst)
{
fence_before(order);
storage = v;
fence_after(order);
}
else
{
Derived::exchange(storage, v, order);
}
}
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
{
storage_type v = storage;
fence_after_load(order);
return v;
}
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
typedef typename make_signed< storage_type >::type signed_storage_type;
return Derived::fetch_add(storage, static_cast< storage_type >(-static_cast< signed_storage_type >(v)), order);
}
static BOOST_FORCEINLINE bool compare_exchange_weak(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
return Derived::compare_exchange_strong(storage, expected, desired, success_order, failure_order);
}
static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
return !!Derived::exchange(storage, (storage_type)1, order);
}
static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
store(storage, (storage_type)0, order);
}
static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT
{
return true;
}
};
template< bool Signed >
struct operations< 4u, Signed > :
public msvc_x86_operations< typename make_storage_type< 4u, Signed >::type, operations< 4u, Signed > >
{
typedef msvc_x86_operations< typename make_storage_type< 4u, Signed >::type, operations< 4u, Signed > > base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(&storage, v));
}
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&storage, v));
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
{
storage_type previous = expected;
storage_type old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(&storage, desired, previous));
expected = old_val;
return (previous == old_val);
}
#if defined(BOOST_ATOMIC_INTERLOCKED_AND)
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND(&storage, v));
}
#else
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type res = storage;
while (!compare_exchange_strong(storage, res, res & v, order, memory_order_relaxed)) {}
return res;
}
#endif
#if defined(BOOST_ATOMIC_INTERLOCKED_OR)
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR(&storage, v));
}
#else
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type res = storage;
while (!compare_exchange_strong(storage, res, res | v, order, memory_order_relaxed)) {}
return res;
}
#endif
#if defined(BOOST_ATOMIC_INTERLOCKED_XOR)
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR(&storage, v));
}
#else
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type res = storage;
while (!compare_exchange_strong(storage, res, res ^ v, order, memory_order_relaxed)) {}
return res;
}
#endif
};
#if defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8)
template< bool Signed >
struct operations< 1u, Signed > :
public msvc_x86_operations< typename make_storage_type< 1u, Signed >::type, operations< 1u, Signed > >
{
typedef msvc_x86_operations< typename make_storage_type< 1u, Signed >::type, operations< 1u, Signed > > base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8(&storage, v));
}
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE8(&storage, v));
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
storage_type previous = expected;
storage_type old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8(&storage, desired, previous));
expected = old_val;
return (previous == old_val);
}
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND8(&storage, v));
}
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR8(&storage, v));
}
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR8(&storage, v));
}
};
#elif defined(_M_IX86)
template< bool Signed >
struct operations< 1u, Signed > :
public msvc_x86_operations< typename make_storage_type< 1u, Signed >::type, operations< 1u, Signed > >
{
typedef msvc_x86_operations< typename make_storage_type< 1u, Signed >::type, operations< 1u, Signed > > base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
base_type::fence_before(order);
__asm
{
mov edx, storage
movzx eax, v
lock xadd byte ptr [edx], al
mov v, al
};
base_type::fence_after(order);
return v;
}
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
base_type::fence_before(order);
__asm
{
mov edx, storage
movzx eax, v
xchg byte ptr [edx], al
mov v, al
};
base_type::fence_after(order);
return v;
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order) BOOST_NOEXCEPT
{
base_type::fence_before(success_order);
bool success;
__asm
{
mov esi, expected
mov edi, storage
movzx eax, byte ptr [esi]
movzx edx, desired
lock cmpxchg byte ptr [edi], dl
mov byte ptr [esi], al
sete success
};
// The success and failure fences are equivalent anyway
base_type::fence_after(success_order);
return success;
}
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
base_type::fence_before(order);
__asm
{
xor edx, edx
mov edi, storage
movzx ebx, v
movzx eax, byte ptr [edi]
align 16
again:
mov dl, al
and dl, bl
lock cmpxchg byte ptr [edi], dl
jne again
mov v, al
};
base_type::fence_after(order);
return v;
}
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
base_type::fence_before(order);
__asm
{
xor edx, edx
mov edi, storage
movzx ebx, v
movzx eax, byte ptr [edi]
align 16
again:
mov dl, al
or dl, bl
lock cmpxchg byte ptr [edi], dl
jne again
mov v, al
};
base_type::fence_after(order);
return v;
}
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
base_type::fence_before(order);
__asm
{
xor edx, edx
mov edi, storage
movzx ebx, v
movzx eax, byte ptr [edi]
align 16
again:
mov dl, al
xor dl, bl
lock cmpxchg byte ptr [edi], dl
jne again
mov v, al
};
base_type::fence_after(order);
return v;
}
};
#else
template< bool Signed >
struct operations< 1u, Signed > :
public extending_cas_based_operations< operations< 4u, Signed >, 1u, Signed >
{
};
#endif
#if defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16)
template< bool Signed >
struct operations< 2u, Signed > :
public msvc_x86_operations< typename make_storage_type< 2u, Signed >::type, operations< 2u, Signed > >
{
typedef msvc_x86_operations< typename make_storage_type< 2u, Signed >::type, operations< 2u, Signed > > base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16(&storage, v));
}
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE16(&storage, v));
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
{
storage_type previous = expected;
storage_type old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16(&storage, desired, previous));
expected = old_val;
return (previous == old_val);
}
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND16(&storage, v));
}
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR16(&storage, v));
}
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR16(&storage, v));
}
};
#elif defined(_M_IX86)
template< bool Signed >
struct operations< 2u, Signed > :
public msvc_x86_operations< typename make_storage_type< 2u, Signed >::type, operations< 2u, Signed > >
{
typedef msvc_x86_operations< typename make_storage_type< 2u, Signed >::type, operations< 2u, Signed > > base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
base_type::fence_before(order);
__asm
{
mov edx, storage
movzx eax, v
lock xadd word ptr [edx], ax
mov v, ax
};
base_type::fence_after(order);
return v;
}
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
base_type::fence_before(order);
__asm
{
mov edx, storage
movzx eax, v
xchg word ptr [edx], ax
mov v, ax
};
base_type::fence_after(order);
return v;
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order) BOOST_NOEXCEPT
{
base_type::fence_before(success_order);
bool success;
__asm
{
mov esi, expected
mov edi, storage
movzx eax, word ptr [esi]
movzx edx, desired
lock cmpxchg word ptr [edi], dx
mov word ptr [esi], ax
sete success
};
// The success and failure fences are equivalent anyway
base_type::fence_after(success_order);
return success;
}
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
base_type::fence_before(order);
__asm
{
xor edx, edx
mov edi, storage
movzx ebx, v
movzx eax, word ptr [edi]
align 16
again:
mov dx, ax
and dx, bx
lock cmpxchg word ptr [edi], dx
jne again
mov v, ax
};
base_type::fence_after(order);
return v;
}
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
base_type::fence_before(order);
__asm
{
xor edx, edx
mov edi, storage
movzx ebx, v
movzx eax, word ptr [edi]
align 16
again:
mov dx, ax
or dx, bx
lock cmpxchg word ptr [edi], dx
jne again
mov v, ax
};
base_type::fence_after(order);
return v;
}
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
base_type::fence_before(order);
__asm
{
xor edx, edx
mov edi, storage
movzx ebx, v
movzx eax, word ptr [edi]
align 16
again:
mov dx, ax
xor dx, bx
lock cmpxchg word ptr [edi], dx
jne again
mov v, ax
};
base_type::fence_after(order);
return v;
}
};
#else
template< bool Signed >
struct operations< 2u, Signed > :
public extending_cas_based_operations< operations< 4u, Signed >, 2u, Signed >
{
};
#endif
#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B)
template< bool Signed >
struct msvc_dcas_x86
{
typedef typename make_storage_type< 8u, Signed >::type storage_type;
// Intel 64 and IA-32 Architectures Software Developer's Manual, Volume 3A, 8.1.1. Guaranteed Atomic Operations:
//
// The Pentium processor (and newer processors since) guarantees that the following additional memory operations will always be carried out atomically:
// * Reading or writing a quadword aligned on a 64-bit boundary
//
// Luckily, the memory is almost always 8-byte aligned in our case because atomic<> uses 64 bit native types for storage and dynamic memory allocations
// have at least 8 byte alignment. The only unfortunate case is when atomic is placeod on the stack and it is not 8-byte aligned (like on 32 bit Windows).
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
storage_type volatile* p = &storage;
if (((uint32_t)p & 0x00000007) == 0)
{
#if defined(_M_IX86_FP) && _M_IX86_FP >= 2
#if defined(__AVX__)
__asm
{
mov edx, p
vmovq xmm4, v
vmovq qword ptr [edx], xmm4
};
#else
__asm
{
mov edx, p
movq xmm4, v
movq qword ptr [edx], xmm4
};
#endif
#else
__asm
{
mov edx, p
fild v
fistp qword ptr [edx]
};
#endif
}
else
{
__asm
{
mov edi, p
mov ebx, dword ptr [v]
mov ecx, dword ptr [v + 4]
mov eax, dword ptr [edi]
mov edx, dword ptr [edi + 4]
align 16
again:
lock cmpxchg8b qword ptr [edi]
jne again
};
}
}
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order) BOOST_NOEXCEPT
{
storage_type const volatile* p = &storage;
storage_type value;
if (((uint32_t)p & 0x00000007) == 0)
{
#if defined(_M_IX86_FP) && _M_IX86_FP >= 2
#if defined(__AVX__)
__asm
{
mov edx, p
vmovq xmm4, qword ptr [edx]
vmovq value, xmm4
};
#else
__asm
{
mov edx, p
movq xmm4, qword ptr [edx]
movq value, xmm4
};
#endif
#else
__asm
{
mov edx, p
fild qword ptr [edx]
fistp value
};
#endif
}
else
{
// We don't care for comparison result here; the previous value will be stored into value anyway.
// Also we don't care for ebx and ecx values, they just have to be equal to eax and edx before cmpxchg8b.
__asm
{
mov edi, p
mov eax, ebx
mov edx, ecx
lock cmpxchg8b qword ptr [edi]
mov dword ptr [value], eax
mov dword ptr [value + 4], edx
};
}
return value;
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
{
storage_type volatile* p = &storage;
#if defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64)
const storage_type old_val = (storage_type)BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(p, desired, expected);
const bool result = (old_val == expected);
expected = old_val;
return result;
#else
bool result;
__asm
{
mov edi, p
mov esi, expected
mov ebx, dword ptr [desired]
mov ecx, dword ptr [desired + 4]
mov eax, dword ptr [esi]
mov edx, dword ptr [esi + 4]
lock cmpxchg8b qword ptr [edi]
mov dword ptr [esi], eax
mov dword ptr [esi + 4], edx
sete result
};
return result;
#endif
}
static BOOST_FORCEINLINE bool compare_exchange_weak(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
return compare_exchange_strong(storage, expected, desired, success_order, failure_order);
}
static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT
{
return true;
}
};
template< bool Signed >
struct operations< 8u, Signed > :
public cas_based_operations< msvc_dcas_x86< Signed > >
{
};
#elif defined(_M_AMD64)
template< bool Signed >
struct operations< 8u, Signed > :
public msvc_x86_operations< typename make_storage_type< 8u, Signed >::type, operations< 8u, Signed > >
{
typedef msvc_x86_operations< typename make_storage_type< 8u, Signed >::type, operations< 8u, Signed > > base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(&storage, v));
}
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(&storage, v));
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
{
storage_type previous = expected;
storage_type old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(&storage, desired, previous));
expected = old_val;
return (previous == old_val);
}
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND64(&storage, v));
}
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR64(&storage, v));
}
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR64(&storage, v));
}
};
#endif
#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
template< bool Signed >
struct msvc_dcas_x86_64
{
typedef typename make_storage_type< 16u, Signed >::type storage_type;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
storage_type value = const_cast< storage_type& >(storage);
while (!BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE128(&storage, v, &value)) {}
}
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order) BOOST_NOEXCEPT
{
storage_type value = storage_type();
BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE128(&storage, value, &value);
return value;
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
{
return !!BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE128(&storage, desired, &expected);
}
static BOOST_FORCEINLINE bool compare_exchange_weak(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
return compare_exchange_strong(storage, expected, desired, success_order, failure_order);
}
static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT
{
return true;
}
};
template< bool Signed >
struct operations< 16u, Signed > :
public cas_based_operations< msvc_dcas_x86_64< Signed > >
{
};
#endif // defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
{
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
if (order == memory_order_seq_cst)
msvc_x86_operations_base::hardware_full_fence();
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
}
BOOST_FORCEINLINE void signal_fence(memory_order) BOOST_NOEXCEPT
{
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
}
} // namespace detail
} // namespace atomics
} // namespace boost
#endif // BOOST_ATOMIC_DETAIL_OPS_MSVC_X86_HPP_INCLUDED_

View File

@@ -0,0 +1,214 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2009 Helge Bahmann
* Copyright (c) 2012 Tim Blechmann
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/ops_windows.hpp
*
* This header contains implementation of the \c operations template.
*
* This implementation is the most basic version for Windows. It should
* work for any non-MSVC-like compilers as long as there are Interlocked WinAPI
* functions available. This version is also used for WinCE.
*
* Notably, this implementation is not as efficient as other
* versions based on compiler intrinsics.
*/
#ifndef BOOST_ATOMIC_DETAIL_OPS_WINDOWS_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_OPS_WINDOWS_HPP_INCLUDED_
#include <boost/memory_order.hpp>
#include <boost/type_traits/make_signed.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/interlocked.hpp>
#include <boost/atomic/detail/storage_type.hpp>
#include <boost/atomic/detail/operations_fwd.hpp>
#include <boost/atomic/capabilities.hpp>
#include <boost/atomic/detail/ops_msvc_common.hpp>
#include <boost/atomic/detail/ops_extending_cas_based.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
struct windows_operations_base
{
static BOOST_FORCEINLINE void hardware_full_fence() BOOST_NOEXCEPT
{
long tmp;
BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&tmp, 0);
}
static BOOST_FORCEINLINE void fence_before(memory_order) BOOST_NOEXCEPT
{
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
}
static BOOST_FORCEINLINE void fence_after(memory_order) BOOST_NOEXCEPT
{
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
}
};
template< typename T, typename Derived >
struct windows_operations :
public windows_operations_base
{
typedef T storage_type;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
Derived::exchange(storage, v, order);
}
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
{
return Derived::fetch_add(storage, (storage_type)0, order);
}
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
typedef typename make_signed< storage_type >::type signed_storage_type;
return Derived::fetch_add(storage, static_cast< storage_type >(-static_cast< signed_storage_type >(v)), order);
}
static BOOST_FORCEINLINE bool compare_exchange_weak(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
return Derived::compare_exchange_strong(storage, expected, desired, success_order, failure_order);
}
static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
return !!Derived::exchange(storage, (storage_type)1, order);
}
static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
store(storage, (storage_type)0, order);
}
static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT
{
return true;
}
};
template< bool Signed >
struct operations< 4u, Signed > :
public windows_operations< typename make_storage_type< 4u, Signed >::type, operations< 4u, Signed > >
{
typedef windows_operations< typename make_storage_type< 4u, Signed >::type, operations< 4u, Signed > > base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
base_type::fence_before(order);
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(&storage, v));
base_type::fence_after(order);
return v;
}
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
base_type::fence_before(order);
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&storage, v));
base_type::fence_after(order);
return v;
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
storage_type previous = expected;
base_type::fence_before(success_order);
storage_type old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(&storage, desired, previous));
expected = old_val;
// The success and failure fences are the same anyway
base_type::fence_after(success_order);
return (previous == old_val);
}
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
#if defined(BOOST_ATOMIC_INTERLOCKED_AND)
base_type::fence_before(order);
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND(&storage, v));
base_type::fence_after(order);
return v;
#else
storage_type res = storage;
while (!compare_exchange_strong(storage, res, res & v, order, memory_order_relaxed)) {}
return res;
#endif
}
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
#if defined(BOOST_ATOMIC_INTERLOCKED_OR)
base_type::fence_before(order);
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR(&storage, v));
base_type::fence_after(order);
return v;
#else
storage_type res = storage;
while (!compare_exchange_strong(storage, res, res | v, order, memory_order_relaxed)) {}
return res;
#endif
}
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
#if defined(BOOST_ATOMIC_INTERLOCKED_XOR)
base_type::fence_before(order);
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR(&storage, v));
base_type::fence_after(order);
return v;
#else
storage_type res = storage;
while (!compare_exchange_strong(storage, res, res ^ v, order, memory_order_relaxed)) {}
return res;
#endif
}
};
template< bool Signed >
struct operations< 1u, Signed > :
public extending_cas_based_operations< operations< 4u, Signed >, 1u, Signed >
{
};
template< bool Signed >
struct operations< 2u, Signed > :
public extending_cas_based_operations< operations< 4u, Signed >, 2u, Signed >
{
};
BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
{
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
if (order == memory_order_seq_cst)
windows_operations_base::hardware_full_fence();
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
}
BOOST_FORCEINLINE void signal_fence(memory_order) BOOST_NOEXCEPT
{
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
}
} // namespace detail
} // namespace atomics
} // namespace boost
#endif // BOOST_ATOMIC_DETAIL_OPS_WINDOWS_HPP_INCLUDED_

View File

@@ -0,0 +1,43 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* (C) Copyright 2013 Tim Blechmann
* (C) Copyright 2013 Andrey Semashev
*/
#ifndef BOOST_ATOMIC_DETAIL_PAUSE_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_PAUSE_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
#if defined(_MSC_VER) && (defined(_M_AMD64) || defined(_M_IX86))
extern "C" void _mm_pause(void);
#pragma intrinsic(_mm_pause)
#endif
namespace boost {
namespace atomics {
namespace detail {
BOOST_FORCEINLINE void pause() BOOST_NOEXCEPT
{
#if defined(_MSC_VER) && (defined(_M_AMD64) || defined(_M_IX86))
_mm_pause();
#elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
__asm__ __volatile__("pause;");
#endif
}
} // namespace detail
} // namespace atomics
} // namespace boost
#endif // BOOST_ATOMIC_DETAIL_PAUSE_HPP_INCLUDED_

View File

@@ -1,13 +1,19 @@
#ifndef BOOST_ATOMIC_DETAIL_PLATFORM_HPP
#define BOOST_ATOMIC_DETAIL_PLATFORM_HPP
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2009 Helge Bahmann
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/platform.hpp
*
* This header defines macros for the target platform detection
*/
// Copyright (c) 2009 Helge Bahmann
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Platform selection file
#ifndef BOOST_ATOMIC_DETAIL_PLATFORM_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_PLATFORM_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
@@ -15,56 +21,73 @@
#pragma once
#endif
// Intel compiler does not support __atomic* intrinsics properly, although defines them (tested with 13.0.1 and 13.1.1 on Linux)
#if (defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 407) && !defined(BOOST_INTEL_CXX_VERSION))\
#if defined(BOOST_ATOMIC_FORCE_FALLBACK)
#define BOOST_ATOMIC_DETAIL_PLATFORM emulated
#define BOOST_ATOMIC_EMULATED
#elif (defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 407))\
|| (defined(BOOST_CLANG) && ((__clang_major__ * 100 + __clang_minor__) >= 302))
#include <boost/atomic/detail/gcc-atomic.hpp>
#define BOOST_ATOMIC_DETAIL_PLATFORM gcc_atomic
#elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
#include <boost/atomic/detail/gcc-x86.hpp>
#elif 0 && defined(__GNUC__) && defined(__alpha__) /* currently does not work correctly */
#include <boost/atomic/detail/base.hpp>
#include <boost/atomic/detail/gcc-alpha.hpp>
#define BOOST_ATOMIC_DETAIL_PLATFORM gcc_x86
#elif defined(__GNUC__) && (defined(__POWERPC__) || defined(__PPC__))
#include <boost/atomic/detail/gcc-ppc.hpp>
#define BOOST_ATOMIC_DETAIL_PLATFORM gcc_ppc
// This list of ARM architecture versions comes from Apple's arm/arch.h header.
// I don't know how complete it is.
#elif defined(__GNUC__) && (defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \
|| defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) \
|| defined(__ARM_ARCH_6K__) \
|| defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \
|| defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \
|| defined(__ARM_ARCH_7EM__) || defined(__ARM_ARCH_7S__))
#elif defined(__GNUC__) &&\
(\
defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) ||\
defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) ||\
defined(__ARM_ARCH_6ZK__) ||\
defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) ||\
defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) ||\
defined(__ARM_ARCH_7EM__) || defined(__ARM_ARCH_7S__)\
)
#include <boost/atomic/detail/gcc-armv6plus.hpp>
#elif defined(__linux__) && defined(__arm__)
#include <boost/atomic/detail/linux-arm.hpp>
#define BOOST_ATOMIC_DETAIL_PLATFORM gcc_arm
#elif defined(__GNUC__) && defined(__sparc_v9__)
#include <boost/atomic/detail/gcc-sparcv9.hpp>
#define BOOST_ATOMIC_DETAIL_PLATFORM gcc_sparc
#elif defined(BOOST_WINDOWS) || defined(_WIN32_CE)
#elif defined(__GNUC__) && defined(__alpha__)
#include <boost/atomic/detail/windows.hpp>
#define BOOST_ATOMIC_DETAIL_PLATFORM gcc_alpha
#elif defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 401)
#include <boost/atomic/detail/gcc-cas.hpp>
#define BOOST_ATOMIC_DETAIL_PLATFORM gcc_sync
#elif defined(__linux__) && defined(__arm__)
#define BOOST_ATOMIC_DETAIL_PLATFORM linux_arm
#elif defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_X64))
#define BOOST_ATOMIC_DETAIL_PLATFORM msvc_x86
#elif defined(_MSC_VER) && _MSC_VER >= 1700 && defined(_M_ARM)
#define BOOST_ATOMIC_DETAIL_PLATFORM msvc_arm
#elif defined(BOOST_WINDOWS) || defined(_WIN32_CE)
#define BOOST_ATOMIC_DETAIL_PLATFORM windows
#else
#include <boost/atomic/detail/base.hpp>
#define BOOST_ATOMIC_DETAIL_PLATFORM emulated
#define BOOST_ATOMIC_EMULATED
#endif
#endif
#define BOOST_ATOMIC_DETAIL_HEADER(prefix) <BOOST_JOIN(prefix, BOOST_ATOMIC_DETAIL_PLATFORM).hpp>
#endif // BOOST_ATOMIC_DETAIL_PLATFORM_HPP_INCLUDED_

View File

@@ -0,0 +1,168 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2009 Helge Bahmann
* Copyright (c) 2012 Tim Blechmann
* Copyright (c) 2013 - 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/storage_type.hpp
*
* This header defines underlying types used as storage
*/
#ifndef BOOST_ATOMIC_DETAIL_STORAGE_TYPE_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_STORAGE_TYPE_HPP_INCLUDED_
#include <cstring>
#include <boost/cstdint.hpp>
#include <boost/atomic/detail/config.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
template< unsigned int Size >
struct buffer_storage
{
unsigned char data[Size];
BOOST_FORCEINLINE bool operator! () const BOOST_NOEXCEPT
{
bool result = true;
for (unsigned int i = 0; i < Size && result; ++i)
{
result &= data[i] == 0;
}
return result;
}
BOOST_FORCEINLINE bool operator== (buffer_storage const& that) const BOOST_NOEXCEPT
{
return std::memcmp(data, that.data, Size) == 0;
}
BOOST_FORCEINLINE bool operator!= (buffer_storage const& that) const BOOST_NOEXCEPT
{
return std::memcmp(data, that.data, Size) != 0;
}
};
template< unsigned int Size, bool Signed >
struct make_storage_type
{
typedef buffer_storage< Size > type;
};
template< >
struct make_storage_type< 1u, false >
{
typedef boost::uint8_t type;
};
template< >
struct make_storage_type< 1u, true >
{
typedef boost::int8_t type;
};
template< >
struct make_storage_type< 2u, false >
{
typedef boost::uint16_t type;
};
template< >
struct make_storage_type< 2u, true >
{
typedef boost::int16_t type;
};
template< >
struct make_storage_type< 4u, false >
{
typedef boost::uint32_t type;
};
template< >
struct make_storage_type< 4u, true >
{
typedef boost::int32_t type;
};
template< >
struct make_storage_type< 8u, false >
{
typedef boost::uint64_t type;
};
template< >
struct make_storage_type< 8u, true >
{
typedef boost::int64_t type;
};
#if defined(BOOST_HAS_INT128)
template< >
struct make_storage_type< 16u, false >
{
typedef boost::uint128_type type;
};
template< >
struct make_storage_type< 16u, true >
{
typedef boost::int128_type type;
};
#elif !defined(BOOST_NO_ALIGNMENT)
struct BOOST_ALIGNMENT(16) storage128_t
{
boost::uint64_t data[2];
BOOST_FORCEINLINE bool operator! () const BOOST_NOEXCEPT
{
return data[0] == 0 && data[1] == 0;
}
};
BOOST_FORCEINLINE bool operator== (storage128_t const& left, storage128_t const& right) BOOST_NOEXCEPT
{
return left.data[0] == right.data[0] && left.data[1] == right.data[1];
}
BOOST_FORCEINLINE bool operator!= (storage128_t const& left, storage128_t const& right) BOOST_NOEXCEPT
{
return !(left == right);
}
template< bool Signed >
struct make_storage_type< 16u, Signed >
{
typedef storage128_t type;
};
#endif
template< typename T >
struct storage_size_of
{
enum _
{
size = sizeof(T),
value = (size == 3 ? 4 : (size >= 5 && size <= 7 ? 8 : (size >= 9 && size <= 15 ? 16 : size)))
};
};
} // namespace detail
} // namespace atomics
} // namespace boost
#endif // BOOST_ATOMIC_DETAIL_STORAGE_TYPE_HPP_INCLUDED_

View File

@@ -1,45 +0,0 @@
#ifndef BOOST_ATOMIC_DETAIL_TYPE_CLASSIFICATION_HPP
#define BOOST_ATOMIC_DETAIL_TYPE_CLASSIFICATION_HPP
// Copyright (c) 2011 Helge Bahmann
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include <boost/atomic/detail/config.hpp>
#include <boost/type_traits/is_integral.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
template<typename T, bool IsInt = boost::is_integral<T>::value>
struct classify
{
typedef void type;
};
template<typename T>
struct classify<T, true> {typedef int type;};
template<typename T>
struct classify<T*, false> {typedef void* type;};
template<typename T>
struct storage_size_of
{
enum _
{
size = sizeof(T),
value = (size == 3 ? 4 : (size >= 5 && size <= 7 ? 8 : (size >= 9 && size <= 15 ? 16 : size)))
};
};
}}}
#endif

View File

@@ -0,0 +1,46 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2009 Helge Bahmann
* Copyright (c) 2012 Tim Blechmann
* Copyright (c) 2013 - 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/union_cast.hpp
*
* This header defines \c union_cast used to convert between storage and value types
*/
#ifndef BOOST_ATOMIC_DETAIL_UNION_CAST_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_UNION_CAST_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
template< typename To, typename From >
BOOST_FORCEINLINE To union_cast(From const& from) BOOST_NOEXCEPT
{
union
{
To as_to;
From as_from;
}
caster = {};
caster.as_from = from;
return caster.as_to;
}
} // namespace detail
} // namespace atomics
} // namespace boost
#endif // BOOST_ATOMIC_DETAIL_UNION_CAST_HPP_INCLUDED_

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,67 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2011 Helge Bahmann
* Copyright (c) 2013 Tim Blechmann
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/fences.hpp
*
* This header contains definition of \c atomic_thread_fence and \c atomic_signal_fence functions.
*/
#ifndef BOOST_ATOMIC_FENCES_HPP_INCLUDED_
#define BOOST_ATOMIC_FENCES_HPP_INCLUDED_
#include <boost/memory_order.hpp>
#include <boost/atomic/capabilities.hpp>
#include <boost/atomic/detail/operations.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
/*
* IMPLEMENTATION NOTE: All interface functions MUST be declared with BOOST_FORCEINLINE,
* see comment for convert_memory_order_to_gcc in ops_gcc_atomic.hpp.
*/
namespace boost {
namespace atomics {
#if BOOST_ATOMIC_THREAD_FENCE > 0
BOOST_FORCEINLINE void atomic_thread_fence(memory_order order) BOOST_NOEXCEPT
{
detail::thread_fence(order);
}
#else
BOOST_FORCEINLINE void atomic_thread_fence(memory_order) BOOST_NOEXCEPT
{
detail::lockpool::thread_fence();
}
#endif
#if BOOST_ATOMIC_SIGNAL_FENCE > 0
BOOST_FORCEINLINE void atomic_signal_fence(memory_order order) BOOST_NOEXCEPT
{
detail::signal_fence(order);
}
#else
BOOST_FORCEINLINE void atomic_signal_fence(memory_order) BOOST_NOEXCEPT
{
detail::lockpool::signal_fence();
}
#endif
} // namespace atomics
using atomics::atomic_thread_fence;
using atomics::atomic_signal_fence;
} // namespace boost
#endif // BOOST_ATOMIC_FENCES_HPP_INCLUDED_

View File

@@ -1,22 +1,34 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2011 Helge Bahmann
* Copyright (c) 2013-2014 Andrey Semashev
*/
/*!
* \file lockpool.cpp
*
* This file contains implementation of the lockpool used to emulate atomic ops.
*/
#include <cstddef>
#include <boost/config.hpp>
#include <boost/assert.hpp>
#include <boost/atomic.hpp>
#include <boost/memory_order.hpp>
#include <boost/atomic/capabilities.hpp>
#if !defined(BOOST_ATOMIC_FLAG_LOCK_FREE) || BOOST_ATOMIC_FLAG_LOCK_FREE != 2
#if !defined(BOOST_HAS_PTHREADS)
#if BOOST_ATOMIC_FLAG_LOCK_FREE == 2
#include <boost/atomic/detail/operations_lockfree.hpp>
#elif !defined(BOOST_HAS_PTHREADS)
#error Boost.Atomic: Unsupported target platform, POSIX threads are required when native atomic operations are not available
#endif
#else
#include <pthread.h>
#define BOOST_ATOMIC_USE_PTHREAD
#endif
// Copyright (c) 2011 Helge Bahmann
// Copyright (c) 2013 Andrey Semashev
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include <boost/atomic/detail/lockpool.hpp>
#include <boost/atomic/detail/pause.hpp>
namespace boost {
namespace atomics {
@@ -43,7 +55,8 @@ struct BOOST_ALIGNMENT(BOOST_ATOMIC_CACHE_LINE_SIZE) padded_lock
#if defined(BOOST_ATOMIC_USE_PTHREAD)
typedef pthread_mutex_t lock_type;
#else
typedef lockpool::lock_type lock_type;
typedef atomics::detail::operations< 1u, false > operations;
typedef operations::storage_type lock_type;
#endif
lock_type lock;
@@ -54,7 +67,7 @@ struct BOOST_ALIGNMENT(BOOST_ATOMIC_CACHE_LINE_SIZE) padded_lock
padding< padding_size > pad;
};
static padded_lock lock_pool_[41]
static padded_lock g_lock_pool[41]
#if defined(BOOST_ATOMIC_USE_PTHREAD)
=
{
@@ -77,27 +90,55 @@ static padded_lock lock_pool_[41]
#if !defined(BOOST_ATOMIC_USE_PTHREAD)
// NOTE: This function must NOT be inline. Otherwise MSVC 9 will sometimes generate broken code for modulus operation which result in crashes.
BOOST_ATOMIC_DECL lockpool::lock_type& lockpool::get_lock_for(const volatile void* addr)
BOOST_ATOMIC_DECL lockpool::scoped_lock::scoped_lock(const volatile void* addr) BOOST_NOEXCEPT :
m_lock(&g_lock_pool[reinterpret_cast< std::size_t >(addr) % (sizeof(g_lock_pool) / sizeof(*g_lock_pool))].lock)
{
std::size_t index = reinterpret_cast< std::size_t >(addr) % (sizeof(lock_pool_) / sizeof(*lock_pool_));
return lock_pool_[index].lock;
while (padded_lock::operations::test_and_set(*static_cast< padded_lock::lock_type* >(m_lock), memory_order_acquire))
{
atomics::detail::pause();
}
}
BOOST_ATOMIC_DECL lockpool::scoped_lock::~scoped_lock() BOOST_NOEXCEPT
{
padded_lock::operations::clear(*static_cast< padded_lock::lock_type* >(m_lock), memory_order_release);
}
BOOST_ATOMIC_DECL void signal_fence() BOOST_NOEXCEPT;
#else // !defined(BOOST_ATOMIC_USE_PTHREAD)
BOOST_ATOMIC_DECL lockpool::scoped_lock::scoped_lock(const volatile void* addr) :
lock_(&lock_pool_[reinterpret_cast< std::size_t >(addr) % (sizeof(lock_pool_) / sizeof(*lock_pool_))].lock)
BOOST_ATOMIC_DECL lockpool::scoped_lock::scoped_lock(const volatile void* addr) BOOST_NOEXCEPT :
m_lock(&g_lock_pool[reinterpret_cast< std::size_t >(addr) % (sizeof(g_lock_pool) / sizeof(*g_lock_pool))].lock)
{
BOOST_VERIFY(pthread_mutex_lock(static_cast< pthread_mutex_t* >(lock_)) == 0);
BOOST_VERIFY(pthread_mutex_lock(static_cast< pthread_mutex_t* >(m_lock)) == 0);
}
BOOST_ATOMIC_DECL lockpool::scoped_lock::~scoped_lock()
BOOST_ATOMIC_DECL lockpool::scoped_lock::~scoped_lock() BOOST_NOEXCEPT
{
BOOST_VERIFY(pthread_mutex_unlock(static_cast< pthread_mutex_t* >(lock_)) == 0);
BOOST_VERIFY(pthread_mutex_unlock(static_cast< pthread_mutex_t* >(m_lock)) == 0);
}
#endif // !defined(BOOST_ATOMIC_USE_PTHREAD)
BOOST_ATOMIC_DECL void lockpool::thread_fence() BOOST_NOEXCEPT
{
#if BOOST_ATOMIC_THREAD_FENCE > 0
atomics::detail::thread_fence(memory_order_seq_cst);
#else
// Emulate full fence by locking/unlocking a mutex
scoped_lock lock(0);
#endif
}
BOOST_ATOMIC_DECL void lockpool::signal_fence() BOOST_NOEXCEPT
{
// This function is intentionally non-inline, even if empty. This forces the compiler to treat its call as a compiler barrier.
#if BOOST_ATOMIC_SIGNAL_FENCE > 0
atomics::detail::signal_fence(memory_order_seq_cst);
#endif
}
} // namespace detail
} // namespace atomics
} // namespace boost

View File

@@ -15,7 +15,11 @@ execution */
static void
test_flag_api(void)
{
#ifndef BOOST_ATOMIC_NO_ATOMIC_FLAG_INIT
boost::atomic_flag f = BOOST_ATOMIC_FLAG_INIT;
#else
boost::atomic_flag f;
#endif
BOOST_CHECK( !f.test_and_set() );
BOOST_CHECK( f.test_and_set() );

View File

@@ -44,7 +44,7 @@ verify_lock_free(const char * type_name, int lock_free_macro_val, int lock_free_
#define EXPECT_SHORT_LOCK_FREE 2
#define EXPECT_INT_LOCK_FREE 2
#define EXPECT_LONG_LOCK_FREE 2
#if defined(BOOST_ATOMIC_X86_HAS_CMPXCHG8B)
#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B)
#define EXPECT_LLONG_LOCK_FREE 2
#else
#define EXPECT_LLONG_LOCK_FREE 0
@@ -60,7 +60,7 @@ verify_lock_free(const char * type_name, int lock_free_macro_val, int lock_free_
#define EXPECT_INT_LOCK_FREE 2
#define EXPECT_LONG_LOCK_FREE 2
#define EXPECT_LLONG_LOCK_FREE 2
#if defined(BOOST_ATOMIC_X86_HAS_CMPXCHG16B) && defined(BOOST_HAS_INT128)
#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B) && (defined(BOOST_HAS_INT128) || !defined(BOOST_NO_ALIGNMENT))
#define EXPECT_INT128_LOCK_FREE 2
#else
#define EXPECT_INT128_LOCK_FREE 0
@@ -141,7 +141,7 @@ verify_lock_free(const char * type_name, int lock_free_macro_val, int lock_free_
#define EXPECT_SHORT_LOCK_FREE 2
#define EXPECT_INT_LOCK_FREE 2
#define EXPECT_LONG_LOCK_FREE 2
#if defined(_WIN64) || defined(BOOST_ATOMIC_X86_HAS_CMPXCHG8B) || defined(_M_AMD64) || defined(_M_IA64)
#if defined(_WIN64) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) || defined(_M_AMD64) || defined(_M_IA64)
#define EXPECT_LLONG_LOCK_FREE 2
#else
#define EXPECT_LLONG_LOCK_FREE 0