From 55f3aaadaf8dcb4692961161cd28398266cee975 Mon Sep 17 00:00:00 2001 From: Andrey Semashev Date: Sat, 15 Dec 2012 13:24:01 +0000 Subject: [PATCH] Code cleanup. Implemented optimized atomic operations for Windows. [SVN r81969] --- include/boost/atomic/atomic.hpp | 48 +- include/boost/atomic/detail/base.hpp | 32 +- include/boost/atomic/detail/cas32strong.hpp | 39 +- include/boost/atomic/detail/cas32weak.hpp | 38 +- include/boost/atomic/detail/cas64strong.hpp | 17 +- include/boost/atomic/detail/gcc-alpha.hpp | 4 +- include/boost/atomic/detail/gcc-armv6plus.hpp | 20 +- include/boost/atomic/detail/gcc-cas.hpp | 14 +- include/boost/atomic/detail/gcc-ppc.hpp | 74 +- include/boost/atomic/detail/gcc-sparcv9.hpp | 58 +- include/boost/atomic/detail/gcc-x86.hpp | 94 +- include/boost/atomic/detail/generic-cas.hpp | 22 +- include/boost/atomic/detail/interlocked.hpp | 295 +-- include/boost/atomic/detail/linux-arm.hpp | 20 +- include/boost/atomic/detail/lockpool.hpp | 51 +- include/boost/atomic/detail/platform.hpp | 4 +- .../atomic/detail/type-classification.hpp | 45 + .../boost/atomic/detail/type-classifier.hpp | 87 - include/boost/atomic/detail/windows.hpp | 1585 +++++++++++++++++ src/lockpool.cpp | 9 +- 20 files changed, 2078 insertions(+), 478 deletions(-) create mode 100644 include/boost/atomic/detail/type-classification.hpp delete mode 100644 include/boost/atomic/detail/type-classifier.hpp create mode 100644 include/boost/atomic/detail/windows.hpp diff --git a/include/boost/atomic/atomic.hpp b/include/boost/atomic/atomic.hpp index 7efa257..a07645e 100644 --- a/include/boost/atomic/atomic.hpp +++ b/include/boost/atomic/atomic.hpp @@ -14,7 +14,7 @@ #include #include -#include +#include #include #ifdef BOOST_ATOMIC_HAS_PRAGMA_ONCE @@ -67,26 +67,26 @@ namespace boost { #ifndef BOOST_ATOMIC_THREAD_FENCE #define BOOST_ATOMIC_THREAD_FENCE 0 -static inline void -atomic_thread_fence(memory_order) +inline void atomic_thread_fence(memory_order) { } #endif #ifndef BOOST_ATOMIC_SIGNAL_FENCE #define BOOST_ATOMIC_SIGNAL_FENCE 0 -static inline void -atomic_signal_fence(memory_order order) +inline void atomic_signal_fence(memory_order order) { atomic_thread_fence(order); } #endif template -class atomic : public atomics::detail::base_atomic::test, sizeof(T), boost::is_signed::value > { +class atomic : + public atomics::detail::base_atomic::type, atomics::detail::storage_size_of::value, boost::is_signed::value > +{ private: typedef T value_type; - typedef atomics::detail::base_atomic::test, sizeof(T), boost::is_signed::value > super; + typedef atomics::detail::base_atomic::type, atomics::detail::storage_size_of::value, boost::is_signed::value > super; public: atomic(void) : super() {} explicit atomic(const value_type & v) : super(v) {} @@ -124,10 +124,18 @@ typedef atomic atomic_llong; #endif typedef atomic atomic_address; typedef atomic atomic_bool; +typedef atomic atomic_wchar_t; +#if !defined(BOOST_NO_CXX11_CHAR16_T) +typedef atomic atomic_char16_t; +#endif +#if !defined(BOOST_NO_CXX11_CHAR32_T) +typedef atomic atomic_char32_t; +#endif #ifndef BOOST_ATOMIC_FLAG_LOCK_FREE #define BOOST_ATOMIC_FLAG_LOCK_FREE 0 -class atomic_flag { +class atomic_flag +{ public: atomic_flag(void) : v_(false) {} @@ -149,30 +157,6 @@ private: }; #endif -typedef atomic atomic_char; -typedef atomic atomic_uchar; -typedef atomic atomic_schar; -typedef atomic atomic_uint8_t; -typedef atomic atomic_int8_t; -typedef atomic atomic_ushort; -typedef atomic atomic_short; -typedef atomic atomic_uint16_t; -typedef atomic atomic_int16_t; -typedef atomic atomic_uint; -typedef atomic atomic_int; -typedef atomic atomic_uint32_t; -typedef atomic atomic_int32_t; -typedef atomic atomic_ulong; -typedef atomic atomic_long; -typedef atomic atomic_uint64_t; -typedef atomic atomic_int64_t; -#ifdef BOOST_HAS_LONG_LONG -typedef atomic atomic_ullong; -typedef atomic atomic_llong; -#endif -typedef atomic atomic_address; -typedef atomic atomic_bool; - } #endif diff --git a/include/boost/atomic/detail/base.hpp b/include/boost/atomic/detail/base.hpp index 5e7001a..b035454 100644 --- a/include/boost/atomic/detail/base.hpp +++ b/include/boost/atomic/detail/base.hpp @@ -13,6 +13,8 @@ #include +#include +#include #include #include @@ -121,7 +123,7 @@ namespace boost { namespace atomics { namespace detail { -static inline memory_order +inline memory_order calculate_failure_order(memory_order order) { switch(order) { @@ -134,7 +136,7 @@ calculate_failure_order(memory_order order) } } -template +template class base_atomic { private: typedef base_atomic this_type; @@ -145,15 +147,15 @@ public: explicit base_atomic(const value_type & v) { - memcpy(&v_, &v, Size); + memcpy(&v_, &v, sizeof(value_type)); } void - store(value_type v, memory_order /*order*/ = memory_order_seq_cst) volatile + store(value_type const& v, memory_order /*order*/ = memory_order_seq_cst) volatile { guard_type guard(const_cast(v_)); - memcpy(const_cast(v_), &v, Size); + memcpy(const_cast(v_), &v, sizeof(value_type)); } value_type @@ -162,24 +164,24 @@ public: guard_type guard(const_cast(v_)); value_type v; - memcpy(&v, const_cast(v_), Size); + memcpy(&v, const_cast(v_), sizeof(value_type)); return v; } bool compare_exchange_strong( value_type & expected, - value_type desired, + value_type const& desired, memory_order /*success_order*/, memory_order /*failure_order*/) volatile { guard_type guard(const_cast(v_)); - if (memcmp(const_cast(v_), &expected, Size) == 0) { - memcpy(const_cast(v_), &desired, Size); + if (memcmp(const_cast(v_), &expected, sizeof(value_type)) == 0) { + memcpy(const_cast(v_), &desired, sizeof(value_type)); return true; } else { - memcpy(&expected, const_cast(v_), Size); + memcpy(&expected, const_cast(v_), sizeof(value_type)); return false; } } @@ -187,7 +189,7 @@ public: bool compare_exchange_weak( value_type & expected, - value_type desired, + value_type const& desired, memory_order success_order, memory_order failure_order) volatile { @@ -195,14 +197,14 @@ public: } value_type - exchange(value_type v, memory_order /*order*/=memory_order_seq_cst) volatile + exchange(value_type const& v, memory_order /*order*/=memory_order_seq_cst) volatile { guard_type guard(const_cast(v_)); value_type tmp; - memcpy(&tmp, const_cast(v_), Size); + memcpy(&tmp, const_cast(v_), sizeof(value_type)); - memcpy(const_cast(v_), &v, Size); + memcpy(const_cast(v_), &v, sizeof(value_type)); return tmp; } @@ -217,7 +219,7 @@ private: base_atomic(const base_atomic &) /* = delete */ ; void operator=(const base_atomic &) /* = delete */ ; - char v_[Size]; + char v_[sizeof(value_type)]; }; template diff --git a/include/boost/atomic/detail/cas32strong.hpp b/include/boost/atomic/detail/cas32strong.hpp index 3bfa557..2cbfaab 100644 --- a/include/boost/atomic/detail/cas32strong.hpp +++ b/include/boost/atomic/detail/cas32strong.hpp @@ -10,6 +10,8 @@ // Build 8-, 16- and 32-bit atomic operations from // a platform_cmpxchg32_strong primitive. +#include +#include #include #include #include @@ -600,14 +602,14 @@ class base_atomic { typedef T value_type; typedef uint32_t storage_type; public: - explicit base_atomic(value_type v) : v_(0) + explicit base_atomic(value_type const& v) { memcpy(&v_, &v, sizeof(value_type)); } - base_atomic(void) : v_(0) {} + base_atomic(void) {} void - store(value_type v, memory_order order = memory_order_seq_cst) volatile + store(value_type const& v, memory_order order = memory_order_seq_cst) volatile { storage_type tmp = 0; memcpy(&tmp, &v, sizeof(value_type)); @@ -628,7 +630,7 @@ public: } value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile + exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile { value_type original = load(memory_order_relaxed); do { @@ -639,7 +641,7 @@ public: bool compare_exchange_weak( value_type & expected, - value_type desired, + value_type const& desired, memory_order success_order, memory_order failure_order) volatile { @@ -649,11 +651,10 @@ public: bool compare_exchange_strong( value_type & expected, - value_type desired, + value_type const& desired, memory_order success_order, memory_order failure_order) volatile { - storage_type expected_s = 0, desired_s = 0; memcpy(&expected_s, &expected, sizeof(value_type)); memcpy(&desired_s, &desired, sizeof(value_type)); @@ -690,14 +691,14 @@ class base_atomic { typedef T value_type; typedef uint32_t storage_type; public: - explicit base_atomic(value_type v) : v_(0) + explicit base_atomic(value_type const& v) { memcpy(&v_, &v, sizeof(value_type)); } - base_atomic(void) : v_(0) {} + base_atomic(void) {} void - store(value_type v, memory_order order = memory_order_seq_cst) volatile + store(value_type const& v, memory_order order = memory_order_seq_cst) volatile { storage_type tmp = 0; memcpy(&tmp, &v, sizeof(value_type)); @@ -718,7 +719,7 @@ public: } value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile + exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile { value_type original = load(memory_order_relaxed); do { @@ -729,7 +730,7 @@ public: bool compare_exchange_weak( value_type & expected, - value_type desired, + value_type const& desired, memory_order success_order, memory_order failure_order) volatile { @@ -739,7 +740,7 @@ public: bool compare_exchange_strong( value_type & expected, - value_type desired, + value_type const& desired, memory_order success_order, memory_order failure_order) volatile { @@ -780,14 +781,14 @@ class base_atomic { typedef T value_type; typedef uint32_t storage_type; public: - explicit base_atomic(value_type v) : v_(0) + explicit base_atomic(value_type const& v) : v_(0) { memcpy(&v_, &v, sizeof(value_type)); } - base_atomic(void) : v_(0) {} + base_atomic(void) {} void - store(value_type v, memory_order order = memory_order_seq_cst) volatile + store(value_type const& v, memory_order order = memory_order_seq_cst) volatile { storage_type tmp = 0; memcpy(&tmp, &v, sizeof(value_type)); @@ -808,7 +809,7 @@ public: } value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile + exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile { value_type original = load(memory_order_relaxed); do { @@ -819,7 +820,7 @@ public: bool compare_exchange_weak( value_type & expected, - value_type desired, + value_type const& desired, memory_order success_order, memory_order failure_order) volatile { @@ -829,7 +830,7 @@ public: bool compare_exchange_strong( value_type & expected, - value_type desired, + value_type const& desired, memory_order success_order, memory_order failure_order) volatile { diff --git a/include/boost/atomic/detail/cas32weak.hpp b/include/boost/atomic/detail/cas32weak.hpp index a58cfde..f4d9f7f 100644 --- a/include/boost/atomic/detail/cas32weak.hpp +++ b/include/boost/atomic/detail/cas32weak.hpp @@ -7,6 +7,8 @@ // // Copyright (c) 2011 Helge Bahmann +#include +#include #include #include #include @@ -619,14 +621,14 @@ class base_atomic { typedef T value_type; typedef uint32_t storage_type; public: - explicit base_atomic(value_type v) : v_(0) + explicit base_atomic(value_type const& v) : v_(0) { memcpy(&v_, &v, sizeof(value_type)); } - base_atomic(void) : v_(0) {} + base_atomic(void) {} void - store(value_type v, memory_order order = memory_order_seq_cst) volatile + store(value_type const& v, memory_order order = memory_order_seq_cst) volatile { storage_type tmp = 0; memcpy(&tmp, &v, sizeof(value_type)); @@ -647,7 +649,7 @@ public: } value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile + exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile { value_type original = load(memory_order_relaxed); do { @@ -658,7 +660,7 @@ public: bool compare_exchange_weak( value_type & expected, - value_type desired, + value_type const& desired, memory_order success_order, memory_order failure_order) volatile { @@ -683,7 +685,7 @@ public: bool compare_exchange_strong( value_type & expected, - value_type desired, + value_type const& desired, memory_order success_order, memory_order failure_order) volatile { @@ -717,14 +719,14 @@ class base_atomic { typedef T value_type; typedef uint32_t storage_type; public: - explicit base_atomic(value_type v) : v_(0) + explicit base_atomic(value_type const& v) : v_(0) { memcpy(&v_, &v, sizeof(value_type)); } - base_atomic(void) : v_(0) {} + base_atomic(void) {} void - store(value_type v, memory_order order = memory_order_seq_cst) volatile + store(value_type const& v, memory_order order = memory_order_seq_cst) volatile { storage_type tmp = 0; memcpy(&tmp, &v, sizeof(value_type)); @@ -745,7 +747,7 @@ public: } value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile + exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile { value_type original = load(memory_order_relaxed); do { @@ -756,7 +758,7 @@ public: bool compare_exchange_weak( value_type & expected, - value_type desired, + value_type const& desired, memory_order success_order, memory_order failure_order) volatile { @@ -781,7 +783,7 @@ public: bool compare_exchange_strong( value_type & expected, - value_type desired, + value_type const& desired, memory_order success_order, memory_order failure_order) volatile { @@ -815,14 +817,14 @@ class base_atomic { typedef T value_type; typedef uint32_t storage_type; public: - explicit base_atomic(value_type v) : v_(0) + explicit base_atomic(value_type const& v) : v_(0) { memcpy(&v_, &v, sizeof(value_type)); } - base_atomic(void) : v_(0) {} + base_atomic(void) {} void - store(value_type v, memory_order order = memory_order_seq_cst) volatile + store(value_type const& v, memory_order order = memory_order_seq_cst) volatile { storage_type tmp = 0; memcpy(&tmp, &v, sizeof(value_type)); @@ -843,7 +845,7 @@ public: } value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile + exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile { value_type original = load(memory_order_relaxed); do { @@ -854,7 +856,7 @@ public: bool compare_exchange_weak( value_type & expected, - value_type desired, + value_type const& desired, memory_order success_order, memory_order failure_order) volatile { @@ -879,7 +881,7 @@ public: bool compare_exchange_strong( value_type & expected, - value_type desired, + value_type const& desired, memory_order success_order, memory_order failure_order) volatile { diff --git a/include/boost/atomic/detail/cas64strong.hpp b/include/boost/atomic/detail/cas64strong.hpp index e5111b6..ca7f41f 100644 --- a/include/boost/atomic/detail/cas64strong.hpp +++ b/include/boost/atomic/detail/cas64strong.hpp @@ -11,6 +11,8 @@ // primitive. It is assumed that 64-bit loads/stores are not // atomic, so they are funnelled through cmpxchg as well. +#include +#include #include #include #include @@ -347,16 +349,16 @@ class base_atomic { typedef T value_type; typedef uint64_t storage_type; public: - explicit base_atomic(value_type v) : v_(0) + explicit base_atomic(value_type const& v) : v_(0) { memcpy(&v_, &v, sizeof(value_type)); } - base_atomic(void) : v_(0) {} + base_atomic(void) {} void - store(value_type value, memory_order order = memory_order_seq_cst) volatile + store(value_type const& value, memory_order order = memory_order_seq_cst) volatile { - storage_type value_s; + storage_type value_s = 0; memcpy(&value_s, &value, sizeof(value_s)); platform_fence_before_store(order); platform_store64(value_s, &v_); @@ -374,7 +376,7 @@ public: } value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile + exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile { value_type original = load(memory_order_relaxed); do { @@ -385,7 +387,7 @@ public: bool compare_exchange_weak( value_type & expected, - value_type desired, + value_type const& desired, memory_order success_order, memory_order failure_order) volatile { @@ -395,11 +397,10 @@ public: bool compare_exchange_strong( value_type & expected, - value_type desired, + value_type const& desired, memory_order success_order, memory_order failure_order) volatile { - storage_type expected_s = 0, desired_s = 0; memcpy(&expected_s, &expected, sizeof(value_type)); memcpy(&desired_s, &desired, sizeof(value_type)); diff --git a/include/boost/atomic/detail/gcc-alpha.hpp b/include/boost/atomic/detail/gcc-alpha.hpp index 1f465f9..0748fc3 100644 --- a/include/boost/atomic/detail/gcc-alpha.hpp +++ b/include/boost/atomic/detail/gcc-alpha.hpp @@ -49,7 +49,7 @@ namespace boost { namespace atomics { namespace detail { -static inline void fence_before(memory_order order) +inline void fence_before(memory_order order) { switch(order) { case memory_order_consume: @@ -61,7 +61,7 @@ static inline void fence_before(memory_order order) } } -static inline void fence_after(memory_order order) +inline void fence_after(memory_order order) { switch(order) { case memory_order_acquire: diff --git a/include/boost/atomic/detail/gcc-armv6plus.hpp b/include/boost/atomic/detail/gcc-armv6plus.hpp index 3c9f942..0cbbf10 100644 --- a/include/boost/atomic/detail/gcc-armv6plus.hpp +++ b/include/boost/atomic/detail/gcc-armv6plus.hpp @@ -9,6 +9,8 @@ // Copyright (c) 2009 Phil Endecott // ARM Code by Phil Endecott, based on other architectures. +#include +#include #include #ifdef BOOST_ATOMIC_HAS_PRAGMA_ONCE @@ -82,7 +84,7 @@ namespace detail { #define BOOST_ATOMIC_ARM_DMB "mcr\tp15, 0, r0, c7, c10, 5\n" #endif -static inline void +inline void arm_barrier(void) { int brtmp; @@ -94,7 +96,7 @@ arm_barrier(void) ); } -static inline void +inline void platform_fence_before(memory_order order) { switch(order) { @@ -107,7 +109,7 @@ platform_fence_before(memory_order order) } } -static inline void +inline void platform_fence_after(memory_order order) { switch(order) { @@ -119,27 +121,27 @@ platform_fence_after(memory_order order) } } -static inline void +inline void platform_fence_before_store(memory_order order) { platform_fence_before(order); } -static inline void +inline void platform_fence_after_store(memory_order order) { if (order == memory_order_seq_cst) arm_barrier(); } -static inline void +inline void platform_fence_after_load(memory_order order) { platform_fence_after(order); } template -bool +inline bool platform_cmpxchg32(T & expected, T desired, volatile T * ptr) { int success; @@ -169,7 +171,7 @@ platform_cmpxchg32(T & expected, T desired, volatile T * ptr) } #define BOOST_ATOMIC_THREAD_FENCE 2 -static inline void +inline void atomic_thread_fence(memory_order order) { switch(order) { @@ -183,7 +185,7 @@ atomic_thread_fence(memory_order order) } #define BOOST_ATOMIC_SIGNAL_FENCE 2 -static inline void +inline void atomic_signal_fence(memory_order) { __asm__ __volatile__ ("" ::: "memory"); diff --git a/include/boost/atomic/detail/gcc-cas.hpp b/include/boost/atomic/detail/gcc-cas.hpp index c220c53..781e935 100644 --- a/include/boost/atomic/detail/gcc-cas.hpp +++ b/include/boost/atomic/detail/gcc-cas.hpp @@ -10,6 +10,8 @@ #ifndef BOOST_ATOMIC_DETAIL_GENERIC_CAS_HPP #define BOOST_ATOMIC_DETAIL_GENERIC_CAS_HPP +#include +#include #include #ifdef BOOST_ATOMIC_HAS_PRAGMA_ONCE @@ -38,19 +40,19 @@ atomic_thread_fence(memory_order order) namespace atomics { namespace detail { -static inline void +inline void platform_fence_before(memory_order) { /* empty, as compare_and_swap is synchronizing already */ } -static inline void +inline void platform_fence_after(memory_order) { /* empty, as compare_and_swap is synchronizing already */ } -static inline void +inline void platform_fence_before_store(memory_order order) { switch(order) { @@ -66,14 +68,14 @@ platform_fence_before_store(memory_order order) } } -static inline void +inline void platform_fence_after_store(memory_order order) { if (order == memory_order_seq_cst) __sync_synchronize(); } -static inline void +inline void platform_fence_after_load(memory_order order) { switch(order) { @@ -90,7 +92,7 @@ platform_fence_after_load(memory_order order) } template -bool +inline bool platform_cmpxchg32_strong(T & expected, T desired, volatile T * ptr) { T found = __sync_val_compare_and_swap(ptr, expected, desired); diff --git a/include/boost/atomic/detail/gcc-ppc.hpp b/include/boost/atomic/detail/gcc-ppc.hpp index f5813ae..a26a894 100644 --- a/include/boost/atomic/detail/gcc-ppc.hpp +++ b/include/boost/atomic/detail/gcc-ppc.hpp @@ -7,6 +7,8 @@ // See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) +#include +#include #include #ifdef BOOST_ATOMIC_HAS_PRAGMA_ONCE @@ -59,7 +61,7 @@ namespace boost { namespace atomics { namespace detail { -static inline void +inline void ppc_fence_before(memory_order order) { switch(order) { @@ -75,7 +77,7 @@ ppc_fence_before(memory_order order) } } -static inline void +inline void ppc_fence_after(memory_order order) { switch(order) { @@ -89,7 +91,7 @@ ppc_fence_after(memory_order order) } } -static inline void +inline void ppc_fence_after_store(memory_order order) { switch(order) { @@ -2113,14 +2115,14 @@ class base_atomic { typedef T value_type; typedef uint32_t storage_type; public: - explicit base_atomic(value_type v) : v_(0) + explicit base_atomic(value_type const& v) { memcpy(&v_, &v, sizeof(value_type)); } - base_atomic(void) : v_(0) {} + base_atomic(void) {} void - store(value_type v, memory_order order = memory_order_seq_cst) volatile + store(value_type const& v, memory_order order = memory_order_seq_cst) volatile { storage_type tmp = 0; memcpy(&tmp, &v, sizeof(value_type)); @@ -2154,7 +2156,7 @@ public: } value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile + exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile { storage_type tmp = 0, original; memcpy(&tmp, &v, sizeof(value_type)); @@ -2169,14 +2171,15 @@ public: : "cr0" ); ppc_fence_after(order); - memcpy(&v, &original, sizeof(value_type)); - return v; + value_type res; + memcpy(&res, &original, sizeof(value_type)); + return res; } bool compare_exchange_weak( value_type & expected, - value_type desired, + value_type const& desired, memory_order success_order, memory_order failure_order) volatile { @@ -2211,7 +2214,7 @@ public: bool compare_exchange_strong( value_type & expected, - value_type desired, + value_type const& desired, memory_order success_order, memory_order failure_order) volatile { @@ -2262,14 +2265,14 @@ class base_atomic { typedef T value_type; typedef uint32_t storage_type; public: - explicit base_atomic(value_type v) : v_(0) + explicit base_atomic(value_type const& v) { memcpy(&v_, &v, sizeof(value_type)); } - base_atomic(void) : v_(0) {} + base_atomic(void) {} void - store(value_type v, memory_order order = memory_order_seq_cst) volatile + store(value_type const& v, memory_order order = memory_order_seq_cst) volatile { storage_type tmp = 0; memcpy(&tmp, &v, sizeof(value_type)); @@ -2303,7 +2306,7 @@ public: } value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile + exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile { storage_type tmp = 0, original; memcpy(&tmp, &v, sizeof(value_type)); @@ -2318,14 +2321,15 @@ public: : "cr0" ); ppc_fence_after(order); - memcpy(&v, &original, sizeof(value_type)); - return v; + value_type res; + memcpy(&res, &original, sizeof(value_type)); + return res; } bool compare_exchange_weak( value_type & expected, - value_type desired, + value_type const& desired, memory_order success_order, memory_order failure_order) volatile { @@ -2360,7 +2364,7 @@ public: bool compare_exchange_strong( value_type & expected, - value_type desired, + value_type const& desired, memory_order success_order, memory_order failure_order) volatile { @@ -2411,14 +2415,14 @@ class base_atomic { typedef T value_type; typedef uint32_t storage_type; public: - explicit base_atomic(value_type v) : v_(0) + explicit base_atomic(value_type const& v) : v_(0) { memcpy(&v_, &v, sizeof(value_type)); } - base_atomic(void) : v_(0) {} + base_atomic(void) {} void - store(value_type v, memory_order order = memory_order_seq_cst) volatile + store(value_type const& v, memory_order order = memory_order_seq_cst) volatile { storage_type tmp = 0; memcpy(&tmp, &v, sizeof(value_type)); @@ -2452,7 +2456,7 @@ public: } value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile + exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile { storage_type tmp = 0, original; memcpy(&tmp, &v, sizeof(value_type)); @@ -2467,14 +2471,15 @@ public: : "cr0" ); ppc_fence_after(order); - memcpy(&v, &original, sizeof(value_type)); - return v; + value_type res; + memcpy(&res, &original, sizeof(value_type)); + return res; } bool compare_exchange_weak( value_type & expected, - value_type desired, + value_type const& desired, memory_order success_order, memory_order failure_order) volatile { @@ -2509,7 +2514,7 @@ public: bool compare_exchange_strong( value_type & expected, - value_type desired, + value_type const& desired, memory_order success_order, memory_order failure_order) volatile { @@ -2562,14 +2567,14 @@ class base_atomic { typedef T value_type; typedef uint64_t storage_type; public: - explicit base_atomic(value_type v) + explicit base_atomic(value_type const& v) : v_(0) { memcpy(&v_, &v, sizeof(value_type)); } base_atomic(void) {} void - store(value_type v, memory_order order = memory_order_seq_cst) volatile + store(value_type const& v, memory_order order = memory_order_seq_cst) volatile { storage_type tmp; memcpy(&tmp, &v, sizeof(value_type)); @@ -2603,7 +2608,7 @@ public: } value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile + exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile { storage_type tmp = 0, original; memcpy(&tmp, &v, sizeof(value_type)); @@ -2618,14 +2623,15 @@ public: : "cr0" ); ppc_fence_after(order); - memcpy(&v, &original, sizeof(value_type)); - return v; + value_type res; + memcpy(&res, &original, sizeof(value_type)); + return res; } bool compare_exchange_weak( value_type & expected, - value_type desired, + value_type const& desired, memory_order success_order, memory_order failure_order) volatile { @@ -2660,7 +2666,7 @@ public: bool compare_exchange_strong( value_type & expected, - value_type desired, + value_type const& desired, memory_order success_order, memory_order failure_order) volatile { diff --git a/include/boost/atomic/detail/gcc-sparcv9.hpp b/include/boost/atomic/detail/gcc-sparcv9.hpp index 5c47ea9..ae256d1 100644 --- a/include/boost/atomic/detail/gcc-sparcv9.hpp +++ b/include/boost/atomic/detail/gcc-sparcv9.hpp @@ -7,6 +7,8 @@ // See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) +#include +#include #include #ifdef BOOST_ATOMIC_HAS_PRAGMA_ONCE @@ -17,7 +19,7 @@ namespace boost { namespace atomics { namespace detail { -static inline void +inline void platform_fence_before(memory_order order) { switch(order) { @@ -37,7 +39,7 @@ platform_fence_before(memory_order order) } } -static inline void +inline void platform_fence_after(memory_order order) { switch(order) { @@ -60,7 +62,7 @@ platform_fence_after(memory_order order) } } -static inline void +inline void platform_fence_after_store(memory_order order) { switch(order) { @@ -71,7 +73,7 @@ platform_fence_after_store(memory_order order) } -static inline void +inline void platform_fence_after_load(memory_order order) { platform_fence_after(order); @@ -134,7 +136,7 @@ public: namespace boost { #define BOOST_ATOMIC_THREAD_FENCE 2 -static inline void +inline void atomic_thread_fence(memory_order order) { switch(order) { @@ -159,7 +161,7 @@ atomic_thread_fence(memory_order order) } #define BOOST_ATOMIC_SIGNAL_FENCE 2 -static inline void +inline void atomic_signal_fence(memory_order) { __asm__ __volatile__ ("" ::: "memory"); @@ -178,7 +180,7 @@ class base_atomic { typedef int32_t storage_type; public: explicit base_atomic(value_type v) : v_(v) {} - base_atomic(void) : v_(0) {} + base_atomic(void) {} void store(value_type v, memory_order order = memory_order_seq_cst) volatile @@ -300,7 +302,7 @@ class base_atomic { typedef uint32_t storage_type; public: explicit base_atomic(value_type v) : v_(v) {} - base_atomic(void) : v_(0) {} + base_atomic(void) {} void store(value_type v, memory_order order = memory_order_seq_cst) volatile @@ -422,7 +424,7 @@ class base_atomic { typedef int32_t storage_type; public: explicit base_atomic(value_type v) : v_(v) {} - base_atomic(void) : v_(0) {} + base_atomic(void) {} void store(value_type v, memory_order order = memory_order_seq_cst) volatile @@ -544,7 +546,7 @@ class base_atomic { typedef uint32_t storage_type; public: explicit base_atomic(value_type v) : v_(v) {} - base_atomic(void) : v_(0) {} + base_atomic(void) {} void store(value_type v, memory_order order = memory_order_seq_cst) volatile @@ -957,15 +959,14 @@ class base_atomic { typedef T value_type; typedef uint32_t storage_type; public: - explicit base_atomic(value_type v) + explicit base_atomic(value_type const& v) : v_(0) { - v_ = 0; memcpy(&v_, &v, sizeof(value_type)); } base_atomic(void) {} void - store(value_type v, memory_order order = memory_order_seq_cst) volatile + store(value_type const& v, memory_order order = memory_order_seq_cst) volatile { storage_type tmp = 0; memcpy(&tmp, &v, sizeof(value_type)); @@ -985,7 +986,7 @@ public: } value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile + exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile { value_type tmp = load(memory_order_relaxed); do {} while(!compare_exchange_weak(tmp, v, order, memory_order_relaxed)); @@ -995,7 +996,7 @@ public: bool compare_exchange_strong( value_type & expected, - value_type desired, + value_type const& desired, memory_order success_order, memory_order failure_order) volatile { @@ -1021,7 +1022,7 @@ public: bool compare_exchange_weak( value_type & expected, - value_type desired, + value_type const& desired, memory_order success_order, memory_order failure_order) volatile { @@ -1047,15 +1048,14 @@ class base_atomic { typedef T value_type; typedef uint32_t storage_type; public: - explicit base_atomic(value_type v) + explicit base_atomic(value_type const& v) : v_(0) { - v_ = 0; memcpy(&v_, &v, sizeof(value_type)); } base_atomic(void) {} void - store(value_type v, memory_order order = memory_order_seq_cst) volatile + store(value_type const& v, memory_order order = memory_order_seq_cst) volatile { storage_type tmp = 0; memcpy(&tmp, &v, sizeof(value_type)); @@ -1075,7 +1075,7 @@ public: } value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile + exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile { value_type tmp = load(memory_order_relaxed); do {} while(!compare_exchange_weak(tmp, v, order, memory_order_relaxed)); @@ -1085,7 +1085,7 @@ public: bool compare_exchange_strong( value_type & expected, - value_type desired, + value_type const& desired, memory_order success_order, memory_order failure_order) volatile { @@ -1111,7 +1111,7 @@ public: bool compare_exchange_weak( value_type & expected, - value_type desired, + value_type const& desired, memory_order success_order, memory_order failure_order) volatile { @@ -1137,16 +1137,16 @@ class base_atomic { typedef T value_type; typedef uint32_t storage_type; public: - explicit base_atomic(value_type v) + explicit base_atomic(value_type const& v) : v_(0) { memcpy(&v_, &v, sizeof(value_type)); } base_atomic(void) {} void - store(value_type v, memory_order order = memory_order_seq_cst) volatile + store(value_type const& v, memory_order order = memory_order_seq_cst) volatile { - storage_type tmp; + storage_type tmp = 0; memcpy(&tmp, &v, sizeof(value_type)); platform_fence_before(order); const_cast(v_) = tmp; @@ -1164,7 +1164,7 @@ public: } value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile + exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile { value_type tmp = load(memory_order_relaxed); do {} while(!compare_exchange_weak(tmp, v, order, memory_order_relaxed)); @@ -1174,11 +1174,11 @@ public: bool compare_exchange_strong( value_type & expected, - value_type desired, + value_type const& desired, memory_order success_order, memory_order failure_order) volatile { - storage_type expected_s, desired_s; + storage_type expected_s = 0, desired_s = 0; memcpy(&expected_s, &expected, sizeof(value_type)); memcpy(&desired_s, &desired, sizeof(value_type)); platform_fence_before(success_order); @@ -1200,7 +1200,7 @@ public: bool compare_exchange_weak( value_type & expected, - value_type desired, + value_type const& desired, memory_order success_order, memory_order failure_order) volatile { diff --git a/include/boost/atomic/detail/gcc-x86.hpp b/include/boost/atomic/detail/gcc-x86.hpp index 76080ec..bc33868 100644 --- a/include/boost/atomic/detail/gcc-x86.hpp +++ b/include/boost/atomic/detail/gcc-x86.hpp @@ -8,6 +8,8 @@ // See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) +#include +#include #include #ifdef BOOST_ATOMIC_HAS_PRAGMA_ONCE @@ -24,7 +26,7 @@ namespace detail { # define BOOST_ATOMIC_X86_FENCE_INSTR "lock ; addl $0, (%%esp)\n" #endif -static inline void +inline void platform_fence_before(memory_order order) { switch(order) { @@ -44,7 +46,7 @@ platform_fence_before(memory_order order) } } -static inline void +inline void platform_fence_after(memory_order order) { switch(order) { @@ -67,7 +69,7 @@ platform_fence_after(memory_order order) } } -static inline void +inline void platform_fence_after_load(memory_order order) { switch(order) { @@ -87,7 +89,7 @@ platform_fence_after_load(memory_order order) } } -static inline void +inline void platform_fence_before_store(memory_order order) { switch(order) { @@ -107,7 +109,7 @@ platform_fence_before_store(memory_order order) } } -static inline void +inline void platform_fence_after_store(memory_order order) { switch(order) { @@ -198,7 +200,7 @@ public: namespace boost { #define BOOST_ATOMIC_THREAD_FENCE 2 -static inline void +inline void atomic_thread_fence(memory_order order) { switch(order) { @@ -223,7 +225,7 @@ atomic_thread_fence(memory_order order) } #define BOOST_ATOMIC_SIGNAL_FENCE 2 -static inline void +inline void atomic_signal_fence(memory_order) { __asm__ __volatile__ ("" ::: "memory"); @@ -1125,14 +1127,14 @@ class base_atomic { typedef T value_type; typedef uint8_t storage_type; public: - explicit base_atomic(value_type v) + explicit base_atomic(value_type const& v) { memcpy(&v_, &v, sizeof(value_type)); } base_atomic(void) {} void - store(value_type v, memory_order order = memory_order_seq_cst) volatile + store(value_type const& v, memory_order order = memory_order_seq_cst) volatile { if (order != memory_order_seq_cst) { storage_type tmp; @@ -1155,7 +1157,7 @@ public: } value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile + exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile { storage_type tmp; memcpy(&tmp, &v, sizeof(value_type)); @@ -1165,14 +1167,15 @@ public: : "+q" (tmp), "+m" (v_) ); platform_fence_after(order); - memcpy(&v, &tmp, sizeof(value_type)); - return v; + value_type res; + memcpy(&res, &tmp, sizeof(value_type)); + return res; } bool compare_exchange_strong( value_type & expected, - value_type desired, + value_type const& desired, memory_order success_order, memory_order failure_order) volatile { @@ -1198,7 +1201,7 @@ public: bool compare_exchange_weak( value_type & expected, - value_type desired, + value_type const& desired, memory_order success_order, memory_order failure_order) volatile { @@ -1224,14 +1227,14 @@ class base_atomic { typedef T value_type; typedef uint16_t storage_type; public: - explicit base_atomic(value_type v) + explicit base_atomic(value_type const& v) { memcpy(&v_, &v, sizeof(value_type)); } base_atomic(void) {} void - store(value_type v, memory_order order = memory_order_seq_cst) volatile + store(value_type const& v, memory_order order = memory_order_seq_cst) volatile { if (order != memory_order_seq_cst) { storage_type tmp; @@ -1254,7 +1257,7 @@ public: } value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile + exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile { storage_type tmp; memcpy(&tmp, &v, sizeof(value_type)); @@ -1264,14 +1267,15 @@ public: : "+q" (tmp), "+m" (v_) ); platform_fence_after(order); - memcpy(&v, &tmp, sizeof(value_type)); - return v; + value_type res; + memcpy(&res, &tmp, sizeof(value_type)); + return res; } bool compare_exchange_strong( value_type & expected, - value_type desired, + value_type const& desired, memory_order success_order, memory_order failure_order) volatile { @@ -1297,7 +1301,7 @@ public: bool compare_exchange_weak( value_type & expected, - value_type desired, + value_type const& desired, memory_order success_order, memory_order failure_order) volatile { @@ -1323,17 +1327,17 @@ class base_atomic { typedef T value_type; typedef uint32_t storage_type; public: - explicit base_atomic(value_type v) + explicit base_atomic(value_type const& v) : v_(0) { memcpy(&v_, &v, sizeof(value_type)); } base_atomic(void) {} void - store(value_type v, memory_order order = memory_order_seq_cst) volatile + store(value_type const& v, memory_order order = memory_order_seq_cst) volatile { if (order != memory_order_seq_cst) { - storage_type tmp; + storage_type tmp = 0; memcpy(&tmp, &v, sizeof(value_type)); platform_fence_before(order); const_cast(v_) = tmp; @@ -1353,9 +1357,9 @@ public: } value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile + exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile { - storage_type tmp; + storage_type tmp = 0; memcpy(&tmp, &v, sizeof(value_type)); platform_fence_before(order); __asm__ ( @@ -1363,18 +1367,19 @@ public: : "+q" (tmp), "+m" (v_) ); platform_fence_after(order); - memcpy(&v, &tmp, sizeof(value_type)); - return v; + value_type res; + memcpy(&res, &tmp, sizeof(value_type)); + return res; } bool compare_exchange_strong( value_type & expected, - value_type desired, + value_type const& desired, memory_order success_order, memory_order failure_order) volatile { - storage_type expected_s, desired_s; + storage_type expected_s = 0, desired_s = 0; memcpy(&expected_s, &expected, sizeof(value_type)); memcpy(&desired_s, &desired, sizeof(value_type)); storage_type previous_s = expected_s; @@ -1396,7 +1401,7 @@ public: bool compare_exchange_weak( value_type & expected, - value_type desired, + value_type const& desired, memory_order success_order, memory_order failure_order) volatile { @@ -1423,17 +1428,17 @@ class base_atomic { typedef T value_type; typedef uint64_t storage_type; public: - explicit base_atomic(value_type v) + explicit base_atomic(value_type const& v) : v_(0) { memcpy(&v_, &v, sizeof(value_type)); } base_atomic(void) {} void - store(value_type v, memory_order order = memory_order_seq_cst) volatile + store(value_type const& v, memory_order order = memory_order_seq_cst) volatile { if (order != memory_order_seq_cst) { - storage_type tmp; + storage_type tmp = 0; memcpy(&tmp, &v, sizeof(value_type)); platform_fence_before(order); const_cast(v_) = tmp; @@ -1453,9 +1458,9 @@ public: } value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile + exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile { - storage_type tmp; + storage_type tmp = 0; memcpy(&tmp, &v, sizeof(value_type)); platform_fence_before(order); __asm__ ( @@ -1463,18 +1468,19 @@ public: : "+q" (tmp), "+m" (v_) ); platform_fence_after(order); - memcpy(&v, &tmp, sizeof(value_type)); - return v; + value_type res; + memcpy(&res, &tmp, sizeof(value_type)); + return res; } bool compare_exchange_strong( value_type & expected, - value_type desired, + value_type const& desired, memory_order success_order, memory_order failure_order) volatile { - storage_type expected_s, desired_s; + storage_type expected_s = 0, desired_s = 0; memcpy(&expected_s, &expected, sizeof(value_type)); memcpy(&desired_s, &desired, sizeof(value_type)); storage_type previous_s = expected_s; @@ -1496,7 +1502,7 @@ public: bool compare_exchange_weak( value_type & expected, - value_type desired, + value_type const& desired, memory_order success_order, memory_order failure_order) volatile { @@ -1520,7 +1526,7 @@ private: #if !defined(__x86_64__) && (defined(__i686__) || defined (__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)) template -bool +inline bool platform_cmpxchg64_strong(T & expected, T desired, volatile T * ptr) { #ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 @@ -1560,7 +1566,7 @@ platform_cmpxchg64_strong(T & expected, T desired, volatile T * ptr) } template -void +inline void platform_store64(T value, volatile T * ptr) { T expected = *ptr; @@ -1569,7 +1575,7 @@ platform_store64(T value, volatile T * ptr) } template -T +inline T platform_load64(const volatile T * ptr) { T expected = *ptr; diff --git a/include/boost/atomic/detail/generic-cas.hpp b/include/boost/atomic/detail/generic-cas.hpp index f3ae701..d87c87d 100644 --- a/include/boost/atomic/detail/generic-cas.hpp +++ b/include/boost/atomic/detail/generic-cas.hpp @@ -7,8 +7,8 @@ // See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) -#include - +#include +#include #include #include #include @@ -25,7 +25,7 @@ each operation) */ #if defined(__GNUC__) namespace boost { namespace atomics { namespace detail { - static inline int32_t + inline int32_t fenced_compare_exchange_strong_32(volatile int32_t *ptr, int32_t expected, int32_t desired) { return __sync_val_compare_and_swap_4(ptr, expected, desired); @@ -33,7 +33,7 @@ each operation) */ #define BOOST_ATOMIC_HAVE_CAS32 1 #if defined(__amd64__) || defined(__i686__) - static inline int64_t + inline int64_t fenced_compare_exchange_strong_64(int64_t *ptr, int64_t expected, int64_t desired) { return __sync_val_compare_and_swap_8(ptr, expected, desired); @@ -50,14 +50,14 @@ each operation) */ #endif namespace boost { namespace atomics { namespace detail { - static inline int32_t + inline int32_t fenced_compare_exchange_strong(int32_t *ptr, int32_t expected, int32_t desired) { return _InterlockedCompareExchange(reinterpret_cast(ptr), desired, expected); } #define BOOST_ATOMIC_HAVE_CAS32 1 #if defined(_WIN64) - static inline int64_t + inline int64_t fenced_compare_exchange_strong(int64_t *ptr, int64_t expected, int64_t desired) { return _InterlockedCompareExchange64(ptr, desired, expected); @@ -68,21 +68,21 @@ each operation) */ #elif (defined(__ICC) || defined(__ECC)) namespace boost { namespace atomics { namespace detail { - static inline int32_t + inline int32_t fenced_compare_exchange_strong_32(int32_t *ptr, int32_t expected, int32_t desired) { return _InterlockedCompareExchange((void*)ptr, desired, expected); } #define BOOST_ATOMIC_HAVE_CAS32 1 #if defined(__x86_64) - static inline int64_t + inline int64_t fenced_compare_exchange_strong(int64_t *ptr, int64_t expected, int64_t desired) { return cas64(ptr, expected, desired); } #define BOOST_ATOMIC_HAVE_CAS64 1 #elif defined(__ECC) //IA-64 version - static inline int64_t + inline int64_t fenced_compare_exchange_strong(int64_t *ptr, int64_t expected, int64_t desired) { return _InterlockedCompareExchange64((void*)ptr, desired, expected); @@ -94,7 +94,7 @@ each operation) */ #elif (defined(__SUNPRO_CC) && defined(__sparc)) #include namespace boost { namespace atomics { namespace detail { - static inline int32_t + inline int32_t fenced_compare_exchange_strong_32(int32_t *ptr, int32_t expected, int32_t desired) { return atomic_cas_32((volatile unsigned int*)ptr, expected, desired); @@ -102,7 +102,7 @@ each operation) */ #define BOOST_ATOMIC_HAVE_CAS32 1 /* FIXME: check for 64 bit mode */ - static inline int64_t + inline int64_t fenced_compare_exchange_strong_64(int64_t *ptr, int64_t expected, int64_t desired) { return atomic_cas_64((volatile unsigned long long*)ptr, expected, desired); diff --git a/include/boost/atomic/detail/interlocked.hpp b/include/boost/atomic/detail/interlocked.hpp index a1eea09..5b2c111 100644 --- a/include/boost/atomic/detail/interlocked.hpp +++ b/include/boost/atomic/detail/interlocked.hpp @@ -2,162 +2,205 @@ #define BOOST_ATOMIC_DETAIL_INTERLOCKED_HPP // Copyright (c) 2009 Helge Bahmann +// Copyright (c) 2012 Andrey Semashev // // Distributed under the Boost Software License, Version 1.0. // See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) -#include #include #ifdef BOOST_ATOMIC_HAS_PRAGMA_ONCE #pragma once #endif +#if defined(_WIN32_WCE) + +#include + +#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare) BOOST_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval) BOOST_INTERLOCKED_EXCHANGE(dest, newval) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, addend) BOOST_INTERLOCKED_EXCHANGE_ADD(dest, addend) +#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) BOOST_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) BOOST_INTERLOCKED_EXCHANGE_POINTER(dest, newval) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_INTERLOCKED_EXCHANGE_ADD((long*)(dest), byte_offset)) + +#elif defined(_MSC_VER) + +#include + +#pragma intrinsic(_InterlockedCompareExchange) +#pragma intrinsic(_InterlockedExchangeAdd) +#pragma intrinsic(_InterlockedExchange) + +#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare) _InterlockedCompareExchange((long*)(dest), (long)(exchange), (long)(compare)) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, addend) _InterlockedExchangeAdd((long*)(dest), (long)(addend)) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval) _InterlockedExchange((long*)(dest), (long)(newval)) + +#if _MSC_VER >= 1400 + +#pragma intrinsic(_InterlockedAnd) +#pragma intrinsic(_InterlockedOr) +#pragma intrinsic(_InterlockedXor) + +#define BOOST_ATOMIC_INTERLOCKED_AND(dest, arg) _InterlockedAnd((long*)(dest), (long)(arg)) +#define BOOST_ATOMIC_INTERLOCKED_OR(dest, arg) _InterlockedOr((long*)(dest), (long)(arg)) +#define BOOST_ATOMIC_INTERLOCKED_XOR(dest, arg) _InterlockedXor((long*)(dest), (long)(arg)) + +#endif // _MSC_VER >= 1400 + +#if _MSC_VER >= 1600 + +// MSVC 2010 and later provide intrinsics for 8 and 16 bit integers. +// Note that for each bit count these macros must be either all defined or all not defined. +// Otherwise atomic<> operations will be implemented inconsistently. + +#pragma intrinsic(_InterlockedCompareExchange8) +#pragma intrinsic(_InterlockedExchangeAdd8) +#pragma intrinsic(_InterlockedExchange8) +#pragma intrinsic(_InterlockedAnd8) +#pragma intrinsic(_InterlockedOr8) +#pragma intrinsic(_InterlockedXor8) + +#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8(dest, exchange, compare) _InterlockedCompareExchange8((char*)(dest), (char)(exchange), (char)(compare)) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8(dest, addend) _InterlockedExchangeAdd8((char*)(dest), (char)(addend)) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE8(dest, newval) _InterlockedExchange8((char*)(dest), (char)(newval)) +#define BOOST_ATOMIC_INTERLOCKED_AND8(dest, arg) _InterlockedAnd8((char*)(dest), (char)(arg)) +#define BOOST_ATOMIC_INTERLOCKED_OR8(dest, arg) _InterlockedOr8((char*)(dest), (char)(arg)) +#define BOOST_ATOMIC_INTERLOCKED_XOR8(dest, arg) _InterlockedXor8((char*)(dest), (char)(arg)) + +#pragma intrinsic(_InterlockedCompareExchange16) +#pragma intrinsic(_InterlockedExchangeAdd16) +#pragma intrinsic(_InterlockedExchange16) +#pragma intrinsic(_InterlockedAnd16) +#pragma intrinsic(_InterlockedOr16) +#pragma intrinsic(_InterlockedXor16) + +#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16(dest, exchange, compare) _InterlockedCompareExchange16((short*)(dest), (short)(exchange), (short)(compare)) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16(dest, addend) _InterlockedExchangeAdd16((short*)(dest), (short)(addend)) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE16(dest, newval) _InterlockedExchange16((short*)(dest), (short)(newval)) +#define BOOST_ATOMIC_INTERLOCKED_AND16(dest, arg) _InterlockedAnd16((short*)(dest), (short)(arg)) +#define BOOST_ATOMIC_INTERLOCKED_OR16(dest, arg) _InterlockedOr16((short*)(dest), (short)(arg)) +#define BOOST_ATOMIC_INTERLOCKED_XOR16(dest, arg) _InterlockedXor16((short*)(dest), (short)(arg)) + +#endif // _MSC_VER >= 1600 + +#if defined(_M_AMD64) || defined(_M_IA64) + +#pragma intrinsic(_InterlockedCompareExchange64) +#pragma intrinsic(_InterlockedExchangeAdd64) +#pragma intrinsic(_InterlockedExchange64) +#pragma intrinsic(_InterlockedAnd64) +#pragma intrinsic(_InterlockedOr64) +#pragma intrinsic(_InterlockedXor64) + +#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(dest, exchange, compare) _InterlockedCompareExchange64((__int64*)(dest), (__int64)(exchange), (__int64)(compare)) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(dest, addend) _InterlockedExchangeAdd64((__int64*)(dest), (__int64)(addend)) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(dest, newval) _InterlockedExchange64((__int64*)(dest), (__int64)(newval)) +#define BOOST_ATOMIC_INTERLOCKED_AND64(dest, arg) _InterlockedAnd64((__int64*)(dest), (__int64)(arg)) +#define BOOST_ATOMIC_INTERLOCKED_OR64(dest, arg) _InterlockedOr64((__int64*)(dest), (__int64)(arg)) +#define BOOST_ATOMIC_INTERLOCKED_XOR64(dest, arg) _InterlockedXor64((__int64*)(dest), (__int64)(arg)) + +#pragma intrinsic(_InterlockedCompareExchangePointer) +#pragma intrinsic(_InterlockedExchangePointer) + +#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) _InterlockedCompareExchangePointer((void**)(dest), (void*)(exchange), (void*)(compare)) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) _InterlockedExchangePointer((void**)(dest), (void*)(newval)) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64((long*)(dest), byte_offset)) + +#else // defined(_M_AMD64) + +#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) ((void*)_InterlockedCompareExchange((long*)(dest), (long)(exchange), (long)(compare))) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) ((void*)_InterlockedExchange((long*)(dest), (long)(newval))) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD((long*)(dest), byte_offset)) + +#endif // defined(_M_AMD64) + +#else // defined(_MSC_VER) + +#if defined(BOOST_USE_WINDOWS_H) + +#include + +#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare) InterlockedCompareExchange((long*)(dest), (long)(exchange), (long)(compare)) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval) InterlockedExchange((long*)(dest), (long)(newval)) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, addend) InterlockedExchangeAdd((long*)(dest), (long)(addend)) + +#if defined(_WIN64) + +#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(dest, exchange, compare) InterlockedCompareExchange64((__int64*)(dest), (__int64)(exchange), (__int64)(compare)) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(dest, newval) InterlockedExchange64((__int64*)(dest), (__int64)(newval)) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(dest, addend) InterlockedExchangeAdd64((__int64*)(dest), (__int64)(addend)) + +#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) InterlockedCompareExchangePointer((void**)(dest), (void*)(exchange), (void*)(compare)) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) InterlockedExchangePointer((void**)(dest), (void*)(newval)) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(dest, byte_offset)) + +#else // defined(_WIN64) + +#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) ((void*)BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare)) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval)) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, byte_offset)) + +#endif // defined(_WIN64) + +#else // defined(BOOST_USE_WINDOWS_H) + +#if defined(__MINGW64__) +#define BOOST_ATOMIC_INTERLOCKED_IMPORT +#else +#define BOOST_ATOMIC_INTERLOCKED_IMPORT __declspec(dllimport) +#endif + namespace boost { namespace atomics { namespace detail { -static inline void -x86_full_fence(void) -{ - long tmp; - BOOST_INTERLOCKED_EXCHANGE(&tmp, 0); -} +extern "C" { -static inline void -platform_fence_before(memory_order) -{ -} +BOOST_ATOMIC_INTERLOCKED_IMPORT long __stdcall InterlockedCompareExchange(long volatile*, long, long); +BOOST_ATOMIC_INTERLOCKED_IMPORT long __stdcall InterlockedExchange(long volatile*, long); +BOOST_ATOMIC_INTERLOCKED_IMPORT long __stdcall InterlockedExchangeAdd(long volatile*, long); -static inline void -platform_fence_after(memory_order) -{ -} - -static inline void -platform_fence_before_store(memory_order) -{ -} - -static inline void -platform_fence_after_store(memory_order order) -{ - if (order == memory_order_seq_cst) - x86_full_fence(); -} - -static inline void -platform_fence_after_load(memory_order order) -{ - if (order == memory_order_seq_cst) { - x86_full_fence(); - } -} - -template -bool -platform_cmpxchg32_strong(T & expected, T desired, volatile T * ptr) -{ - T prev = expected; - expected = (T)BOOST_INTERLOCKED_COMPARE_EXCHANGE((long *)(ptr), (long)desired, (long)expected); - bool success = (prev==expected); - return success; -} +#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare) boost::atomics::detail::InterlockedCompareExchange((long*)(dest), (long)(exchange), (long)(compare)) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval) boost::atomics::detail::InterlockedExchange((long*)(dest), (long)(newval)) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, addend) boost::atomics::detail::InterlockedExchangeAdd((long*)(dest), (long)(addend)) #if defined(_WIN64) -template -bool -platform_cmpxchg64_strong(T & expected, T desired, volatile T * ptr) -{ - T prev = expected; - expected = (T) _InterlockedCompareExchange64((long long *)(ptr), (long long)desired, (long long)expected); - bool success = (prev==expected); - return success; -} -template -void -platform_store64(T value, volatile T * ptr) -{ - *ptr = value; -} +BOOST_ATOMIC_INTERLOCKED_IMPORT __int64 __stdcall InterlockedCompareExchange64(__int64 volatile*, __int64, __int64); +BOOST_ATOMIC_INTERLOCKED_IMPORT __int64 __stdcall InterlockedExchange64(__int64 volatile*, __int64); +BOOST_ATOMIC_INTERLOCKED_IMPORT __int64 __stdcall InterlockedExchangeAdd64(__int64 volatile*, __int64); -template -T -platform_load64(volatile T * ptr) -{ - return *ptr; -} -#endif +BOOST_ATOMIC_INTERLOCKED_IMPORT void* __stdcall InterlockedCompareExchangePointer(void* volatile *, void*, void*); +BOOST_ATOMIC_INTERLOCKED_IMPORT void* __stdcall InterlockedExchangePointer(void* volatile *, void*); -} -} +#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(dest, exchange, compare) boost::atomics::detail::InterlockedCompareExchange64((__int64*)(dest), (__int64)(exchange), (__int64)(compare)) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(dest, newval) boost::atomics::detail::InterlockedExchange64((__int64*)(dest), (__int64)(newval)) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(dest, addend) boost::atomics::detail::InterlockedExchangeAdd64((__int64*)(dest), (__int64)(addend)) -#define BOOST_ATOMIC_THREAD_FENCE 2 -static inline void -atomic_thread_fence(memory_order order) -{ - if (order == memory_order_seq_cst) { - atomics::detail::x86_full_fence(); - } -} +#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) boost::atomics::detail::InterlockedCompareExchangePointer((void**)(dest), (void*)(exchange), (void*)(compare)) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) boost::atomics::detail::InterlockedExchangePointer((void**)(dest), (void*)(newval)) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(dest, byte_offset)) -class atomic_flag { -private: - atomic_flag(const atomic_flag &) /* = delete */ ; - atomic_flag & operator=(const atomic_flag &) /* = delete */ ; - uint32_t v_; -public: - atomic_flag(void) : v_(false) {} +#else // defined(_WIN64) - void - clear(memory_order order = memory_order_seq_cst) volatile - { - atomics::detail::platform_fence_before_store(order); - const_cast(v_) = 0; - atomics::detail::platform_fence_after_store(order); - } +#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) ((void*)BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare)) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval)) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, byte_offset)) - bool - test_and_set(memory_order order = memory_order_seq_cst) volatile - { - atomics::detail::platform_fence_before(order); - uint32_t expected = v_; - do { - if (expected == 1) - break; - } while (!atomics::detail::platform_cmpxchg32_strong(expected, (uint32_t)1, &v_)); - atomics::detail::platform_fence_after(order); - return expected != 0; - } -}; +#endif // defined(_WIN64) -} +} // extern "C" -#define BOOST_ATOMIC_FLAG_LOCK_FREE 2 +} // namespace detail +} // namespace atomics +} // namespace boost -#include +#undef BOOST_ATOMIC_INTERLOCKED_IMPORT -#if !defined(BOOST_ATOMIC_FORCE_FALLBACK) +#endif // defined(BOOST_USE_WINDOWS_H) -#define BOOST_ATOMIC_CHAR_LOCK_FREE 2 -#define BOOST_ATOMIC_SHORT_LOCK_FREE 2 -#define BOOST_ATOMIC_INT_LOCK_FREE 2 -#define BOOST_ATOMIC_LONG_LOCK_FREE 2 -#if defined(_WIN64) -#define BOOST_ATOMIC_LLONG_LOCK_FREE 2 -#else -#define BOOST_ATOMIC_LLONG_LOCK_FREE 0 -#endif -#define BOOST_ATOMIC_POINTER_LOCK_FREE 2 -#define BOOST_ATOMIC_BOOL_LOCK_FREE 2 - -#include -#if defined(_WIN64) -#include -#endif - -#endif /* !defined(BOOST_ATOMIC_FORCE_FALLBACK) */ +#endif // defined(_MSC_VER) #endif diff --git a/include/boost/atomic/detail/linux-arm.hpp b/include/boost/atomic/detail/linux-arm.hpp index 80c3c7e..87061ee 100644 --- a/include/boost/atomic/detail/linux-arm.hpp +++ b/include/boost/atomic/detail/linux-arm.hpp @@ -30,6 +30,8 @@ // emulated CAS is only good enough to provide compare_exchange_weak // semantics. +#include +#include #include #include @@ -41,14 +43,14 @@ namespace boost { namespace atomics { namespace detail { -static inline void +inline void arm_barrier(void) { void (*kernel_dmb)(void) = (void (*)(void)) 0xffff0fa0; kernel_dmb(); } -static inline void +inline void platform_fence_before(memory_order order) { switch(order) { @@ -61,7 +63,7 @@ platform_fence_before(memory_order order) } } -static inline void +inline void platform_fence_after(memory_order order) { switch(order) { @@ -73,27 +75,27 @@ platform_fence_after(memory_order order) } } -static inline void +inline void platform_fence_before_store(memory_order order) { platform_fence_before(order); } -static inline void +inline void platform_fence_after_store(memory_order order) { if (order == memory_order_seq_cst) arm_barrier(); } -static inline void +inline void platform_fence_after_load(memory_order order) { platform_fence_after(order); } template -bool +inline bool platform_cmpxchg32(T & expected, T desired, volatile T * ptr) { typedef T (*kernel_cmpxchg32_t)(T oldval, T newval, volatile T * ptr); @@ -110,7 +112,7 @@ platform_cmpxchg32(T & expected, T desired, volatile T * ptr) } #define BOOST_ATOMIC_THREAD_FENCE 2 -static inline void +inline void atomic_thread_fence(memory_order order) { switch(order) { @@ -124,7 +126,7 @@ atomic_thread_fence(memory_order order) } #define BOOST_ATOMIC_SIGNAL_FENCE 2 -static inline void +inline void atomic_signal_fence(memory_order) { __asm__ __volatile__ ("" ::: "memory"); diff --git a/include/boost/atomic/detail/lockpool.hpp b/include/boost/atomic/detail/lockpool.hpp index a0699e2..c278fa4 100644 --- a/include/boost/atomic/detail/lockpool.hpp +++ b/include/boost/atomic/detail/lockpool.hpp @@ -7,7 +7,6 @@ // See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) - #include #ifndef BOOST_ATOMIC_FLAG_LOCK_FREE #include @@ -23,45 +22,52 @@ namespace detail { #ifndef BOOST_ATOMIC_FLAG_LOCK_FREE -class lockpool { +class lockpool +{ public: typedef mutex lock_type; - class scoped_lock { + class scoped_lock + { private: - mutex::scoped_lock guard; + lock_type& mtx_; + + scoped_lock(scoped_lock const&) /* = delete */; + scoped_lock& operator=(scoped_lock const&) /* = delete */; + public: explicit - scoped_lock(const volatile void * addr) : guard( lock_for(addr) ) + scoped_lock(const volatile void * addr) : mtx_(get_lock_for(addr)) { + mtx_.lock(); + } + ~scoped_lock() + { + mtx_.unlock(); } }; -private: - static BOOST_ATOMIC_DECL mutex pool_[41]; - static mutex & - lock_for(const volatile void * addr) - { - std::size_t index = reinterpret_cast(addr) % 41; - return pool_[index]; - } +private: + static BOOST_ATOMIC_DECL lock_type& get_lock_for(const volatile void * addr); }; #else -class lockpool { +class lockpool +{ public: typedef atomic_flag lock_type; - class scoped_lock { + class scoped_lock + { private: - atomic_flag & flag_; + atomic_flag& flag_; scoped_lock(const scoped_lock &) /* = delete */; - void operator=(const scoped_lock &) /* = delete */; + scoped_lock& operator=(const scoped_lock &) /* = delete */; public: explicit - scoped_lock(const volatile void * addr) : flag_( lock_for(addr) ) + scoped_lock(const volatile void * addr) : flag_(get_lock_for(addr)) { do { } while (flag_.test_and_set(memory_order_acquire)); @@ -74,14 +80,7 @@ public: }; private: - static BOOST_ATOMIC_DECL atomic_flag pool_[41]; - - static lock_type & - lock_for(const volatile void * addr) - { - std::size_t index = reinterpret_cast(addr) % 41; - return pool_[index]; - } + static BOOST_ATOMIC_DECL lock_type& get_lock_for(const volatile void * addr); }; #endif diff --git a/include/boost/atomic/detail/platform.hpp b/include/boost/atomic/detail/platform.hpp index 8063435..a31ecec 100644 --- a/include/boost/atomic/detail/platform.hpp +++ b/include/boost/atomic/detail/platform.hpp @@ -44,9 +44,9 @@ #include -#elif defined(BOOST_USE_WINDOWS_H) || defined(_WIN32_CE) || defined(BOOST_MSVC) || defined(BOOST_INTEL_WIN) || defined(WIN32) || defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) +#elif defined(BOOST_WINDOWS) || defined(_WIN32_CE) - #include + #include #elif 0 && defined(__GNUC__) /* currently does not work correctly */ diff --git a/include/boost/atomic/detail/type-classification.hpp b/include/boost/atomic/detail/type-classification.hpp new file mode 100644 index 0000000..f7c2f8b --- /dev/null +++ b/include/boost/atomic/detail/type-classification.hpp @@ -0,0 +1,45 @@ +#ifndef BOOST_ATOMIC_DETAIL_TYPE_CLASSIFICATION_HPP +#define BOOST_ATOMIC_DETAIL_TYPE_CLASSIFICATION_HPP + +// Copyright (c) 2011 Helge Bahmann +// +// Distributed under the Boost Software License, Version 1.0. +// See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +#include +#include + +#ifdef BOOST_ATOMIC_HAS_PRAGMA_ONCE +#pragma once +#endif + +namespace boost { +namespace atomics { +namespace detail { + +template::value> +struct classify +{ + typedef void type; +}; + +template +struct classify {typedef int type;}; + +template +struct classify {typedef void* type;}; + +template +struct storage_size_of +{ + enum _ + { + size = sizeof(T), + value = (size == 3 ? 4 : (size == 5 || size == 6 || size == 7 ? 8 : size)) + }; +}; + +}}} + +#endif diff --git a/include/boost/atomic/detail/type-classifier.hpp b/include/boost/atomic/detail/type-classifier.hpp deleted file mode 100644 index 4028dd0..0000000 --- a/include/boost/atomic/detail/type-classifier.hpp +++ /dev/null @@ -1,87 +0,0 @@ -#ifndef BOOST_ATOMIC_DETAIL_TYPE_CLASSIFIER_HPP -#define BOOST_ATOMIC_DETAIL_TYPE_CLASSIFIER_HPP - -// Copyright (c) 2011 Helge Bahmann -// -// Distributed under the Boost Software License, Version 1.0. -// See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -#include - -#ifdef BOOST_ATOMIC_HAS_PRAGMA_ONCE -#pragma once -#endif - -namespace boost { -namespace atomics { -namespace detail { - -template -struct type_classifier { - typedef void test; -}; - -template<> -struct type_classifier {typedef int test;}; -template<> -struct type_classifier {typedef int test;}; -template<> -struct type_classifier {typedef int test;}; -template<> -struct type_classifier {typedef int test;}; -template<> -struct type_classifier {typedef int test;}; -template<> -struct type_classifier {typedef int test;}; -template<> -struct type_classifier {typedef int test;}; -template<> -struct type_classifier {typedef int test;}; -template<> -struct type_classifier {typedef int test;}; -#ifdef BOOST_HAS_LONG_LONG -template<> struct type_classifier -{typedef int test;}; -template<> struct type_classifier -{typedef int test;}; -#endif - -template -struct type_classifier {typedef void * test;}; - -template -struct sign_trait { - typedef void test; -}; - -template<> -struct sign_trait {typedef int test;}; -template<> -struct sign_trait {typedef unsigned int test;}; -template<> -struct sign_trait {typedef int test;}; -template<> -struct sign_trait {typedef unsigned int test;}; -template<> -struct sign_trait {typedef int test;}; -template<> -struct sign_trait {typedef unsigned int test;}; -template<> -struct sign_trait {typedef int test;}; -template<> -struct sign_trait {typedef unsigned int test;}; -template<> -struct sign_trait {typedef int test;}; -#ifdef BOOST_HAS_LONG_LONG -template<> struct sign_trait -{typedef unsigned int test;}; -template<> struct sign_trait -{typedef int test;}; -#endif - - - -}}} - -#endif diff --git a/include/boost/atomic/detail/windows.hpp b/include/boost/atomic/detail/windows.hpp new file mode 100644 index 0000000..2c98963 --- /dev/null +++ b/include/boost/atomic/detail/windows.hpp @@ -0,0 +1,1585 @@ +#ifndef BOOST_ATOMIC_DETAIL_WINDOWS_HPP +#define BOOST_ATOMIC_DETAIL_WINDOWS_HPP + +// Copyright (c) 2009 Helge Bahmann +// Copyright (c) 2012 Andrey Semashev +// +// Distributed under the Boost Software License, Version 1.0. +// See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +#include +#include +#include +#include +#include + +#ifdef BOOST_ATOMIC_HAS_PRAGMA_ONCE +#pragma once +#endif + +#ifdef _MSC_VER +#pragma warning(push) +// 'order' : unreferenced formal parameter +#pragma warning(disable: 4100) +#endif + +namespace boost { +namespace atomics { +namespace detail { + +// Define hardware barriers +#if defined(_MSC_VER) && (defined(_M_AMD64) || (defined(_M_IX86) && defined(_M_IX86_FP) && _M_IX86_FP >= 2)) +extern "C" void _mm_mfence(void); +#pragma intrinsic(_mm_mfence) +#endif + +BOOST_FORCEINLINE void x86_full_fence(void) +{ +#if defined(_MSC_VER) && (defined(_M_AMD64) || (defined(_M_IX86) && defined(_M_IX86_FP) && _M_IX86_FP >= 2)) + // Use mfence only if SSE2 is available + _mm_mfence(); +#else + long tmp; + BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&tmp, 0); +#endif +} + +// Define compiler barriers +#if defined(_MSC_VER) && _MSC_VER >= 1310 + +extern "C" void _ReadWriteBarrier(); +#pragma intrinsic(_ReadWriteBarrier) + +#define BOOST_ATOMIC_READ_WRITE_BARRIER() _ReadWriteBarrier() + +#if _MSC_VER >= 1400 + +extern "C" void _ReadBarrier(); +#pragma intrinsic(_ReadBarrier) +extern "C" void _WriteBarrier(); +#pragma intrinsic(_WriteBarrier) + +#define BOOST_ATOMIC_READ_BARRIER() _ReadBarrier() +#define BOOST_ATOMIC_WRITE_BARRIER() _WriteBarrier() + +#endif +#endif + +#ifndef BOOST_ATOMIC_READ_WRITE_BARRIER +#define BOOST_ATOMIC_READ_WRITE_BARRIER() +#endif +#ifndef BOOST_ATOMIC_READ_BARRIER +#define BOOST_ATOMIC_READ_BARRIER() BOOST_ATOMIC_READ_WRITE_BARRIER() +#endif +#ifndef BOOST_ATOMIC_WRITE_BARRIER +#define BOOST_ATOMIC_WRITE_BARRIER() BOOST_ATOMIC_READ_WRITE_BARRIER() +#endif + +// MSVC (up to 2012, inclusively) optimizer generates a very poor code for switch-case in fence functions. +// Issuing unconditional compiler barriers generates better code. We may re-enable the main branch if MSVC optimizer improves. +#ifdef BOOST_MSVC +#define BOOST_ATOMIC_DETAIL_BAD_SWITCH_CASE_OPTIMIZER +#endif + +BOOST_FORCEINLINE void +platform_fence_before(memory_order order) +{ +#ifdef BOOST_ATOMIC_DETAIL_BAD_SWITCH_CASE_OPTIMIZER + + BOOST_ATOMIC_READ_WRITE_BARRIER(); + +#else + + switch(order) + { + case memory_order_relaxed: + case memory_order_consume: + case memory_order_acquire: + break; + case memory_order_release: + case memory_order_acq_rel: + BOOST_ATOMIC_WRITE_BARRIER(); + /* release */ + break; + case memory_order_seq_cst: + BOOST_ATOMIC_READ_WRITE_BARRIER(); + /* seq */ + break; + } + +#endif +} + +BOOST_FORCEINLINE void +platform_fence_after(memory_order order) +{ +#ifdef BOOST_ATOMIC_DETAIL_BAD_SWITCH_CASE_OPTIMIZER + + BOOST_ATOMIC_READ_WRITE_BARRIER(); + +#else + + switch(order) + { + case memory_order_relaxed: + case memory_order_release: + break; + case memory_order_consume: + case memory_order_acquire: + case memory_order_acq_rel: + BOOST_ATOMIC_READ_BARRIER(); + break; + case memory_order_seq_cst: + BOOST_ATOMIC_READ_WRITE_BARRIER(); + /* seq */ + break; + } + +#endif +} + +BOOST_FORCEINLINE void +platform_fence_before_store(memory_order order) +{ +#ifdef BOOST_ATOMIC_DETAIL_BAD_SWITCH_CASE_OPTIMIZER + + BOOST_ATOMIC_WRITE_BARRIER(); + +#else + + switch(order) + { + case memory_order_relaxed: + case memory_order_acquire: + case memory_order_consume: + break; + case memory_order_acq_rel: + case memory_order_release: + case memory_order_seq_cst: + BOOST_ATOMIC_WRITE_BARRIER(); + break; + } + +#endif +} + +BOOST_FORCEINLINE void +platform_fence_after_store(memory_order order) +{ +#ifdef BOOST_ATOMIC_DETAIL_BAD_SWITCH_CASE_OPTIMIZER + + BOOST_ATOMIC_WRITE_BARRIER(); + if (order == memory_order_seq_cst) + x86_full_fence(); + +#else + + switch(order) + { + case memory_order_relaxed: + case memory_order_acquire: + case memory_order_consume: + break; + case memory_order_acq_rel: + case memory_order_release: + BOOST_ATOMIC_WRITE_BARRIER(); + break; + case memory_order_seq_cst: + x86_full_fence(); + break; + } + +#endif +} + +BOOST_FORCEINLINE void +platform_fence_after_load(memory_order order) +{ +#ifdef BOOST_ATOMIC_DETAIL_BAD_SWITCH_CASE_OPTIMIZER + + BOOST_ATOMIC_READ_BARRIER(); + if (order == memory_order_seq_cst) + x86_full_fence(); + +#else + + switch(order) + { + case memory_order_relaxed: + case memory_order_consume: + break; + case memory_order_acquire: + case memory_order_acq_rel: + BOOST_ATOMIC_READ_BARRIER(); + break; + case memory_order_release: + break; + case memory_order_seq_cst: + x86_full_fence(); + break; + } + +#endif +} + +} // namespace detail +} // namespace atomics + +#define BOOST_ATOMIC_THREAD_FENCE 2 +BOOST_FORCEINLINE void +atomic_thread_fence(memory_order order) +{ +#ifdef BOOST_ATOMIC_DETAIL_BAD_SWITCH_CASE_OPTIMIZER + + BOOST_ATOMIC_READ_WRITE_BARRIER(); + if (order == memory_order_seq_cst) + atomics::detail::x86_full_fence(); + +#else + + switch (order) + { + case memory_order_relaxed: + break; + case memory_order_consume: + case memory_order_acquire: + BOOST_ATOMIC_READ_BARRIER(); + break; + case memory_order_release: + BOOST_ATOMIC_WRITE_BARRIER(); + break; + case memory_order_acq_rel: + BOOST_ATOMIC_READ_WRITE_BARRIER(); + break; + case memory_order_seq_cst: + atomics::detail::x86_full_fence(); + break; + } + +#endif +} + +#define BOOST_ATOMIC_SIGNAL_FENCE 2 +BOOST_FORCEINLINE void +atomic_signal_fence(memory_order order) +{ +#ifdef BOOST_ATOMIC_DETAIL_BAD_SWITCH_CASE_OPTIMIZER + + BOOST_ATOMIC_READ_WRITE_BARRIER(); + +#else + + switch (order) + { + case memory_order_relaxed: + break; + case memory_order_consume: + case memory_order_acquire: + BOOST_ATOMIC_READ_BARRIER(); + break; + case memory_order_release: + BOOST_ATOMIC_WRITE_BARRIER(); + break; + case memory_order_acq_rel: + case memory_order_seq_cst: + BOOST_ATOMIC_READ_WRITE_BARRIER(); + break; + } + +#endif +} + +#undef BOOST_ATOMIC_READ_WRITE_BARRIER +#undef BOOST_ATOMIC_READ_BARRIER +#undef BOOST_ATOMIC_WRITE_BARRIER + +class atomic_flag +{ +private: + atomic_flag(const atomic_flag &) /* = delete */ ; + atomic_flag & operator=(const atomic_flag &) /* = delete */ ; +#ifdef BOOST_ATOMIC_INTERLOCKED_EXCHANGE8 + char v_; +#else + long v_; +#endif +public: + atomic_flag(void) : v_(0) {} + + void + clear(memory_order order = memory_order_seq_cst) volatile + { + atomics::detail::platform_fence_before_store(order); +#ifdef BOOST_ATOMIC_INTERLOCKED_EXCHANGE8 + BOOST_ATOMIC_INTERLOCKED_EXCHANGE8(&v_, 0); +#else + BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&v_, 0); +#endif + atomics::detail::platform_fence_after_store(order); + } + + bool + test_and_set(memory_order order = memory_order_seq_cst) volatile + { + atomics::detail::platform_fence_before(order); +#ifdef BOOST_ATOMIC_INTERLOCKED_EXCHANGE8 + const char old = BOOST_ATOMIC_INTERLOCKED_EXCHANGE8(&v_, 1); +#else + const long old = BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&v_, 1); +#endif + atomics::detail::platform_fence_after(order); + return old != 0; + } +}; + +} // namespace boost + +#define BOOST_ATOMIC_FLAG_LOCK_FREE 2 + +#include + +#if !defined(BOOST_ATOMIC_FORCE_FALLBACK) + +#define BOOST_ATOMIC_CHAR_LOCK_FREE 2 +#define BOOST_ATOMIC_SHORT_LOCK_FREE 2 +#define BOOST_ATOMIC_INT_LOCK_FREE 2 +#define BOOST_ATOMIC_LONG_LOCK_FREE 2 +#if defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64) +#define BOOST_ATOMIC_LLONG_LOCK_FREE 2 +#else +#define BOOST_ATOMIC_LLONG_LOCK_FREE 0 +#endif +#define BOOST_ATOMIC_POINTER_LOCK_FREE 2 +#define BOOST_ATOMIC_BOOL_LOCK_FREE 2 + +namespace boost { +namespace atomics { +namespace detail { + +#if defined(_MSC_VER) +#pragma warning(push) +// 'char' : forcing value to bool 'true' or 'false' (performance warning) +#pragma warning(disable: 4800) +#endif + +template +class base_atomic +{ + typedef base_atomic this_type; + typedef T value_type; +#ifdef BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8 + typedef value_type storage_type; +#else + typedef uint32_t storage_type; +#endif + typedef T difference_type; +public: + explicit base_atomic(value_type v) : v_(v) {} + base_atomic(void) {} + + void + store(value_type v, memory_order order = memory_order_seq_cst) volatile + { + if (order != memory_order_seq_cst) { + platform_fence_before(order); + v_ = static_cast< storage_type >(v); + } else { + exchange(v, order); + } + } + + value_type + load(memory_order order = memory_order_seq_cst) const volatile + { + value_type v = static_cast< value_type >(v_); + platform_fence_after_load(order); + return v; + } + + value_type + fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile + { + platform_fence_before(order); +#ifdef BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8 + v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8(&v_, v)); +#else + v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(&v_, v)); +#endif + platform_fence_after(order); + return v; + } + + value_type + fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile + { + typedef typename make_signed< value_type >::type signed_value_type; + return fetch_add(static_cast< value_type >(-static_cast< signed_value_type >(v)), order); + } + + value_type + exchange(value_type v, memory_order order = memory_order_seq_cst) volatile + { + platform_fence_before(order); +#ifdef BOOST_ATOMIC_INTERLOCKED_EXCHANGE8 + v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE8(&v_, v)); +#else + v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&v_, v)); +#endif + platform_fence_after(order); + return v; + } + + bool + compare_exchange_strong( + value_type & expected, + value_type desired, + memory_order success_order, + memory_order failure_order) volatile + { + value_type previous = expected; + platform_fence_before(success_order); +#ifdef BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8 + value_type oldval = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8(&v_, desired, previous)); +#else + value_type oldval = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(&v_, desired, previous)); +#endif + bool success = (previous == oldval); + if (success) + platform_fence_after(success_order); + else + platform_fence_after(failure_order); + expected = oldval; + return success; + } + + bool + compare_exchange_weak( + value_type & expected, + value_type desired, + memory_order success_order, + memory_order failure_order) volatile + { + return compare_exchange_strong(expected, desired, success_order, failure_order); + } + + value_type + fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile + { +#ifdef BOOST_ATOMIC_INTERLOCKED_AND8 + platform_fence_before(order); + v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_AND8(&v_, v)); + platform_fence_after(order); + return v; +#elif defined(BOOST_ATOMIC_INTERLOCKED_AND) + platform_fence_before(order); + v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_AND(&v_, v)); + platform_fence_after(order); + return v; +#else + value_type tmp = load(memory_order_relaxed); + do {} while(!compare_exchange_weak(tmp, tmp & v, order, memory_order_relaxed)); + return tmp; +#endif + } + + value_type + fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile + { +#ifdef BOOST_ATOMIC_INTERLOCKED_OR8 + platform_fence_before(order); + v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_OR8(&v_, v)); + platform_fence_after(order); + return v; +#elif defined(BOOST_ATOMIC_INTERLOCKED_OR) + platform_fence_before(order); + v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_OR(&v_, v)); + platform_fence_after(order); + return v; +#else + value_type tmp = load(memory_order_relaxed); + do {} while(!compare_exchange_weak(tmp, tmp | v, order, memory_order_relaxed)); + return tmp; +#endif + } + + value_type + fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile + { +#ifdef BOOST_ATOMIC_INTERLOCKED_XOR8 + platform_fence_before(order); + v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_XOR8(&v_, v)); + platform_fence_after(order); + return v; +#elif defined(BOOST_ATOMIC_INTERLOCKED_XOR) + platform_fence_before(order); + v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_XOR(&v_, v)); + platform_fence_after(order); + return v; +#else + value_type tmp = load(memory_order_relaxed); + do {} while(!compare_exchange_weak(tmp, tmp ^ v, order, memory_order_relaxed)); + return tmp; +#endif + } + + bool + is_lock_free(void) const volatile + { + return true; + } + + BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS +private: + base_atomic(const base_atomic &) /* = delete */ ; + void operator=(const base_atomic &) /* = delete */ ; + storage_type v_; +}; + +#if defined(_MSC_VER) +#pragma warning(pop) +#endif + +template +class base_atomic +{ + typedef base_atomic this_type; + typedef T value_type; +#ifdef BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16 + typedef value_type storage_type; +#else + typedef uint32_t storage_type; +#endif + typedef T difference_type; +public: + explicit base_atomic(value_type v) : v_(v) {} + base_atomic(void) {} + + void + store(value_type v, memory_order order = memory_order_seq_cst) volatile + { + if (order != memory_order_seq_cst) { + platform_fence_before(order); + v_ = static_cast< storage_type >(v); + } else { + exchange(v, order); + } + } + + value_type + load(memory_order order = memory_order_seq_cst) const volatile + { + value_type v = static_cast< value_type >(v_); + platform_fence_after_load(order); + return v; + } + + value_type + fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile + { + platform_fence_before(order); +#ifdef BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16 + v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16(&v_, v)); +#else + v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(&v_, v)); +#endif + platform_fence_after(order); + return v; + } + + value_type + fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile + { + typedef typename make_signed< value_type >::type signed_value_type; + return fetch_add(static_cast< value_type >(-static_cast< signed_value_type >(v)), order); + } + + value_type + exchange(value_type v, memory_order order = memory_order_seq_cst) volatile + { + platform_fence_before(order); +#ifdef BOOST_ATOMIC_INTERLOCKED_EXCHANGE16 + v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE16(&v_, v)); +#else + v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&v_, v)); +#endif + platform_fence_after(order); + return v; + } + + bool + compare_exchange_strong( + value_type & expected, + value_type desired, + memory_order success_order, + memory_order failure_order) volatile + { + value_type previous = expected; + platform_fence_before(success_order); +#ifdef BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16 + value_type oldval = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16(&v_, desired, previous)); +#else + value_type oldval = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(&v_, desired, previous)); +#endif + bool success = (previous == oldval); + if (success) + platform_fence_after(success_order); + else + platform_fence_after(failure_order); + expected = oldval; + return success; + } + + bool + compare_exchange_weak( + value_type & expected, + value_type desired, + memory_order success_order, + memory_order failure_order) volatile + { + return compare_exchange_strong(expected, desired, success_order, failure_order); + } + + value_type + fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile + { +#ifdef BOOST_ATOMIC_INTERLOCKED_AND16 + platform_fence_before(order); + v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_AND16(&v_, v)); + platform_fence_after(order); + return v; +#elif defined(BOOST_ATOMIC_INTERLOCKED_AND) + platform_fence_before(order); + v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_AND(&v_, v)); + platform_fence_after(order); + return v; +#else + value_type tmp = load(memory_order_relaxed); + do {} while(!compare_exchange_weak(tmp, tmp & v, order, memory_order_relaxed)); + return tmp; +#endif + } + + value_type + fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile + { +#ifdef BOOST_ATOMIC_INTERLOCKED_OR16 + platform_fence_before(order); + v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_OR16(&v_, v)); + platform_fence_after(order); + return v; +#elif defined(BOOST_ATOMIC_INTERLOCKED_OR) + platform_fence_before(order); + v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_OR(&v_, v)); + platform_fence_after(order); + return v; +#else + value_type tmp = load(memory_order_relaxed); + do {} while(!compare_exchange_weak(tmp, tmp | v, order, memory_order_relaxed)); + return tmp; +#endif + } + + value_type + fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile + { +#ifdef BOOST_ATOMIC_INTERLOCKED_XOR16 + platform_fence_before(order); + v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_XOR16(&v_, v)); + platform_fence_after(order); + return v; +#elif defined(BOOST_ATOMIC_INTERLOCKED_XOR) + platform_fence_before(order); + v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_XOR(&v_, v)); + platform_fence_after(order); + return v; +#else + value_type tmp = load(memory_order_relaxed); + do {} while(!compare_exchange_weak(tmp, tmp ^ v, order, memory_order_relaxed)); + return tmp; +#endif + } + + bool + is_lock_free(void) const volatile + { + return true; + } + + BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS +private: + base_atomic(const base_atomic &) /* = delete */ ; + void operator=(const base_atomic &) /* = delete */ ; + storage_type v_; +}; + +template +class base_atomic +{ + typedef base_atomic this_type; + typedef T value_type; + typedef value_type storage_type; + typedef T difference_type; +public: + explicit base_atomic(value_type v) : v_(v) {} + base_atomic(void) {} + + void + store(value_type v, memory_order order = memory_order_seq_cst) volatile + { + if (order != memory_order_seq_cst) { + platform_fence_before(order); + v_ = static_cast< storage_type >(v); + } else { + exchange(v, order); + } + } + + value_type + load(memory_order order = memory_order_seq_cst) const volatile + { + value_type v = static_cast< value_type >(v_); + platform_fence_after_load(order); + return v; + } + + value_type + fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile + { + platform_fence_before(order); + v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(&v_, v)); + platform_fence_after(order); + return v; + } + + value_type + fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile + { + typedef typename make_signed< value_type >::type signed_value_type; + return fetch_add(static_cast< value_type >(-static_cast< signed_value_type >(v)), order); + } + + value_type + exchange(value_type v, memory_order order = memory_order_seq_cst) volatile + { + platform_fence_before(order); + v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&v_, v)); + platform_fence_after(order); + return v; + } + + bool + compare_exchange_strong( + value_type & expected, + value_type desired, + memory_order success_order, + memory_order failure_order) volatile + { + value_type previous = expected; + platform_fence_before(success_order); + value_type oldval = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(&v_, desired, previous)); + bool success = (previous == oldval); + if (success) + platform_fence_after(success_order); + else + platform_fence_after(failure_order); + expected = oldval; + return success; + } + + bool + compare_exchange_weak( + value_type & expected, + value_type desired, + memory_order success_order, + memory_order failure_order) volatile + { + return compare_exchange_strong(expected, desired, success_order, failure_order); + } + + value_type + fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile + { +#if defined(BOOST_ATOMIC_INTERLOCKED_AND) + platform_fence_before(order); + v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_AND(&v_, v)); + platform_fence_after(order); + return v; +#else + value_type tmp = load(memory_order_relaxed); + do {} while(!compare_exchange_weak(tmp, tmp & v, order, memory_order_relaxed)); + return tmp; +#endif + } + + value_type + fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile + { +#if defined(BOOST_ATOMIC_INTERLOCKED_OR) + platform_fence_before(order); + v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_OR(&v_, v)); + platform_fence_after(order); + return v; +#else + value_type tmp = load(memory_order_relaxed); + do {} while(!compare_exchange_weak(tmp, tmp | v, order, memory_order_relaxed)); + return tmp; +#endif + } + + value_type + fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile + { +#if defined(BOOST_ATOMIC_INTERLOCKED_XOR) + platform_fence_before(order); + v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_XOR(&v_, v)); + platform_fence_after(order); + return v; +#else + value_type tmp = load(memory_order_relaxed); + do {} while(!compare_exchange_weak(tmp, tmp ^ v, order, memory_order_relaxed)); + return tmp; +#endif + } + + bool + is_lock_free(void) const volatile + { + return true; + } + + BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS +private: + base_atomic(const base_atomic &) /* = delete */ ; + void operator=(const base_atomic &) /* = delete */ ; + storage_type v_; +}; + +#if defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64) + +template +class base_atomic +{ + typedef base_atomic this_type; + typedef T value_type; + typedef value_type storage_type; + typedef T difference_type; +public: + explicit base_atomic(value_type v) : v_(v) {} + base_atomic(void) {} + + void + store(value_type v, memory_order order = memory_order_seq_cst) volatile + { + if (order != memory_order_seq_cst) { + platform_fence_before(order); + v_ = static_cast< storage_type >(v); + } else { + exchange(v, order); + } + } + + value_type + load(memory_order order = memory_order_seq_cst) const volatile + { + value_type v = static_cast< value_type >(v_); + platform_fence_after_load(order); + return v; + } + + value_type + fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile + { + platform_fence_before(order); + v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(&v_, v)); + platform_fence_after(order); + return v; + } + + value_type + fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile + { + typedef typename make_signed< value_type >::type signed_value_type; + return fetch_add(static_cast< value_type >(-static_cast< signed_value_type >(v)), order); + } + + value_type + exchange(value_type v, memory_order order = memory_order_seq_cst) volatile + { + platform_fence_before(order); + v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(&v_, v)); + platform_fence_after(order); + return v; + } + + bool + compare_exchange_strong( + value_type & expected, + value_type desired, + memory_order success_order, + memory_order failure_order) volatile + { + value_type previous = expected; + platform_fence_before(success_order); + value_type oldval = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(&v_, desired, previous)); + bool success = (previous == oldval); + if (success) + platform_fence_after(success_order); + else + platform_fence_after(failure_order); + expected = oldval; + return success; + } + + bool + compare_exchange_weak( + value_type & expected, + value_type desired, + memory_order success_order, + memory_order failure_order) volatile + { + return compare_exchange_strong(expected, desired, success_order, failure_order); + } + + value_type + fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile + { +#if defined(BOOST_ATOMIC_INTERLOCKED_AND64) + platform_fence_before(order); + v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_AND64(&v_, v)); + platform_fence_after(order); + return v; +#else + value_type tmp = load(memory_order_relaxed); + do {} while(!compare_exchange_weak(tmp, tmp & v, order, memory_order_relaxed)); + return tmp; +#endif + } + + value_type + fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile + { +#if defined(BOOST_ATOMIC_INTERLOCKED_OR64) + platform_fence_before(order); + v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_OR64(&v_, v)); + platform_fence_after(order); + return v; +#else + value_type tmp = load(memory_order_relaxed); + do {} while(!compare_exchange_weak(tmp, tmp | v, order, memory_order_relaxed)); + return tmp; +#endif + } + + value_type + fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile + { +#if defined(BOOST_ATOMIC_INTERLOCKED_XOR64) + platform_fence_before(order); + v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_XOR64(&v_, v)); + platform_fence_after(order); + return v; +#else + value_type tmp = load(memory_order_relaxed); + do {} while(!compare_exchange_weak(tmp, tmp ^ v, order, memory_order_relaxed)); + return tmp; +#endif + } + + bool + is_lock_free(void) const volatile + { + return true; + } + + BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS +private: + base_atomic(const base_atomic &) /* = delete */ ; + void operator=(const base_atomic &) /* = delete */ ; + storage_type v_; +}; + +#endif // defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64) + +// MSVC 2012 fails to recognize sizeof(T) as a constant expression in template specializations +enum msvc_sizeof_pointer_workaround { sizeof_pointer = sizeof(void*) }; + +template +class base_atomic +{ + typedef base_atomic this_type; + typedef void* value_type; +public: + explicit base_atomic(value_type v) : v_(v) {} + base_atomic(void) {} + + void + store(value_type v, memory_order order = memory_order_seq_cst) volatile + { + if (order != memory_order_seq_cst) { + platform_fence_before(order); + const_cast(v_) = v; + } else { + exchange(v, order); + } + } + + value_type load(memory_order order = memory_order_seq_cst) const volatile + { + value_type v = const_cast(v_); + platform_fence_after_load(order); + return v; + } + + value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile + { + platform_fence_before(order); + v = (value_type)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(&v_, v); + platform_fence_after(order); + return v; + } + + bool compare_exchange_strong(value_type & expected, value_type desired, + memory_order success_order, + memory_order failure_order) volatile + { + value_type previous = expected; + platform_fence_before(success_order); + value_type oldval = (value_type)BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(&v_, desired, previous); + bool success = (previous == oldval); + if (success) + platform_fence_after(success_order); + else + platform_fence_after(failure_order); + expected = oldval; + return success; + } + + bool compare_exchange_weak(value_type & expected, value_type desired, + memory_order success_order, + memory_order failure_order) volatile + { + return compare_exchange_strong(expected, desired, success_order, failure_order); + } + + bool + is_lock_free(void) const volatile + { + return true; + } + + BOOST_ATOMIC_DECLARE_BASE_OPERATORS +private: + base_atomic(const base_atomic &) /* = delete */ ; + void operator=(const base_atomic &) /* = delete */ ; + value_type v_; +}; + +template +class base_atomic +{ + typedef base_atomic this_type; + typedef T* value_type; + typedef ptrdiff_t difference_type; +public: + explicit base_atomic(value_type v) : v_(v) {} + base_atomic(void) {} + + void + store(value_type v, memory_order order = memory_order_seq_cst) volatile + { + if (order != memory_order_seq_cst) { + platform_fence_before(order); + const_cast(v_) = v; + } else { + exchange(v, order); + } + } + + value_type + load(memory_order order = memory_order_seq_cst) const volatile + { + value_type v = const_cast(v_); + platform_fence_after_load(order); + return v; + } + + value_type + exchange(value_type v, memory_order order = memory_order_seq_cst) volatile + { + platform_fence_before(order); + v = (value_type)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(&v_, v); + platform_fence_after(order); + return v; + } + + bool + compare_exchange_strong( + value_type & expected, + value_type desired, + memory_order success_order, + memory_order failure_order) volatile + { + value_type previous = expected; + platform_fence_before(success_order); + value_type oldval = (value_type)BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(&v_, desired, previous); + bool success = (previous == oldval); + if (success) + platform_fence_after(success_order); + else + platform_fence_after(failure_order); + expected = oldval; + return success; + } + + bool + compare_exchange_weak( + value_type & expected, + value_type desired, + memory_order success_order, + memory_order failure_order) volatile + { + return compare_exchange_strong(expected, desired, success_order, failure_order); + } + + value_type + fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile + { + v = v * sizeof(*v_); + platform_fence_before(order); + value_type res = (value_type)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(&v_, v); + platform_fence_after(order); + return res; + } + + value_type + fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile + { + return fetch_add(-v, order); + } + + bool + is_lock_free(void) const volatile + { + return true; + } + + BOOST_ATOMIC_DECLARE_POINTER_OPERATORS +private: + base_atomic(const base_atomic &) /* = delete */ ; + void operator=(const base_atomic &) /* = delete */ ; + value_type v_; +}; + + +template +class base_atomic +{ + typedef base_atomic this_type; + typedef T value_type; +#ifdef BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8 + typedef uint8_t storage_type; +#else + typedef uint32_t storage_type; +#endif +public: + explicit base_atomic(value_type const& v) : v_(0) + { + memcpy(&v_, &v, sizeof(value_type)); + } + base_atomic(void) {} + + void + store(value_type const& v, memory_order order = memory_order_seq_cst) volatile + { + if (order != memory_order_seq_cst) { + storage_type tmp = 0; + memcpy(&tmp, &v, sizeof(value_type)); + platform_fence_before(order); + const_cast(v_) = tmp; + } else { + exchange(v, order); + } + } + + value_type + load(memory_order order = memory_order_seq_cst) const volatile + { + storage_type tmp = const_cast(v_); + platform_fence_after_load(order); + value_type v; + memcpy(&v, &tmp, sizeof(value_type)); + return v; + } + + value_type + exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile + { + storage_type tmp = 0; + memcpy(&tmp, &v, sizeof(value_type)); + platform_fence_before(order); +#ifdef BOOST_ATOMIC_INTERLOCKED_EXCHANGE8 + tmp = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE8(&v_, tmp)); +#else + tmp = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&v_, tmp)); +#endif + platform_fence_after(order); + value_type res; + memcpy(&res, &tmp, sizeof(value_type)); + return res; + } + + bool + compare_exchange_strong( + value_type & expected, + value_type const& desired, + memory_order success_order, + memory_order failure_order) volatile + { + storage_type expected_s = 0, desired_s = 0; + memcpy(&expected_s, &expected, sizeof(value_type)); + memcpy(&desired_s, &desired, sizeof(value_type)); + platform_fence_before(success_order); +#ifdef BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8 + storage_type oldval = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8(&v_, desired_s, expected_s)); +#else + storage_type oldval = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(&v_, desired_s, expected_s)); +#endif + bool success = (oldval == expected_s); + if (success) + platform_fence_after(success_order); + else + platform_fence_after(failure_order); + memcpy(&expected, &oldval, sizeof(value_type)); + return success; + } + + bool + compare_exchange_weak( + value_type & expected, + value_type const& desired, + memory_order success_order, + memory_order failure_order) volatile + { + return compare_exchange_strong(expected, desired, success_order, failure_order); + } + + bool + is_lock_free(void) const volatile + { + return true; + } + + BOOST_ATOMIC_DECLARE_BASE_OPERATORS +private: + base_atomic(const base_atomic &) /* = delete */ ; + void operator=(const base_atomic &) /* = delete */ ; + storage_type v_; +}; + +template +class base_atomic +{ + typedef base_atomic this_type; + typedef T value_type; +#ifdef BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16 + typedef uint16_t storage_type; +#else + typedef uint32_t storage_type; +#endif +public: + explicit base_atomic(value_type const& v) : v_(0) + { + memcpy(&v_, &v, sizeof(value_type)); + } + base_atomic(void) {} + + void + store(value_type const& v, memory_order order = memory_order_seq_cst) volatile + { + if (order != memory_order_seq_cst) { + storage_type tmp = 0; + memcpy(&tmp, &v, sizeof(value_type)); + platform_fence_before(order); + const_cast(v_) = tmp; + } else { + exchange(v, order); + } + } + + value_type + load(memory_order order = memory_order_seq_cst) const volatile + { + storage_type tmp = const_cast(v_); + platform_fence_after_load(order); + value_type v; + memcpy(&v, &tmp, sizeof(value_type)); + return v; + } + + value_type + exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile + { + storage_type tmp = 0; + memcpy(&tmp, &v, sizeof(value_type)); + platform_fence_before(order); +#ifdef BOOST_ATOMIC_INTERLOCKED_EXCHANGE16 + tmp = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE16(&v_, tmp)); +#else + tmp = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&v_, tmp)); +#endif + platform_fence_after(order); + value_type res; + memcpy(&res, &tmp, sizeof(value_type)); + return res; + } + + bool + compare_exchange_strong( + value_type & expected, + value_type const& desired, + memory_order success_order, + memory_order failure_order) volatile + { + storage_type expected_s = 0, desired_s = 0; + memcpy(&expected_s, &expected, sizeof(value_type)); + memcpy(&desired_s, &desired, sizeof(value_type)); + platform_fence_before(success_order); +#ifdef BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16 + storage_type oldval = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16(&v_, desired_s, expected_s)); +#else + storage_type oldval = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(&v_, desired_s, expected_s)); +#endif + bool success = (oldval == expected_s); + if (success) + platform_fence_after(success_order); + else + platform_fence_after(failure_order); + memcpy(&expected, &oldval, sizeof(value_type)); + return success; + } + + bool + compare_exchange_weak( + value_type & expected, + value_type const& desired, + memory_order success_order, + memory_order failure_order) volatile + { + return compare_exchange_strong(expected, desired, success_order, failure_order); + } + + bool + is_lock_free(void) const volatile + { + return true; + } + + BOOST_ATOMIC_DECLARE_BASE_OPERATORS +private: + base_atomic(const base_atomic &) /* = delete */ ; + void operator=(const base_atomic &) /* = delete */ ; + storage_type v_; +}; + +template +class base_atomic +{ + typedef base_atomic this_type; + typedef T value_type; + typedef uint32_t storage_type; +public: + explicit base_atomic(value_type const& v) : v_(0) + { + memcpy(&v_, &v, sizeof(value_type)); + } + base_atomic(void) {} + + void + store(value_type const& v, memory_order order = memory_order_seq_cst) volatile + { + if (order != memory_order_seq_cst) { + storage_type tmp = 0; + memcpy(&tmp, &v, sizeof(value_type)); + platform_fence_before(order); + const_cast(v_) = tmp; + } else { + exchange(v, order); + } + } + + value_type + load(memory_order order = memory_order_seq_cst) const volatile + { + storage_type tmp = const_cast(v_); + platform_fence_after_load(order); + value_type v; + memcpy(&v, &tmp, sizeof(value_type)); + return v; + } + + value_type + exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile + { + storage_type tmp = 0; + memcpy(&tmp, &v, sizeof(value_type)); + platform_fence_before(order); + tmp = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&v_, tmp)); + platform_fence_after(order); + value_type res; + memcpy(&res, &tmp, sizeof(value_type)); + return res; + } + + bool + compare_exchange_strong( + value_type & expected, + value_type const& desired, + memory_order success_order, + memory_order failure_order) volatile + { + storage_type expected_s = 0, desired_s = 0; + memcpy(&expected_s, &expected, sizeof(value_type)); + memcpy(&desired_s, &desired, sizeof(value_type)); + platform_fence_before(success_order); + storage_type oldval = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(&v_, desired_s, expected_s)); + bool success = (oldval == expected_s); + if (success) + platform_fence_after(success_order); + else + platform_fence_after(failure_order); + memcpy(&expected, &oldval, sizeof(value_type)); + return success; + } + + bool + compare_exchange_weak( + value_type & expected, + value_type const& desired, + memory_order success_order, + memory_order failure_order) volatile + { + return compare_exchange_strong(expected, desired, success_order, failure_order); + } + + bool + is_lock_free(void) const volatile + { + return true; + } + + BOOST_ATOMIC_DECLARE_BASE_OPERATORS +private: + base_atomic(const base_atomic &) /* = delete */ ; + void operator=(const base_atomic &) /* = delete */ ; + storage_type v_; +}; + +#if defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64) + +template +class base_atomic +{ + typedef base_atomic this_type; + typedef T value_type; + typedef uint64_t storage_type; +public: + explicit base_atomic(value_type const& v) : v_(0) + { + memcpy(&v_, &v, sizeof(value_type)); + } + base_atomic(void) {} + + void + store(value_type const& v, memory_order order = memory_order_seq_cst) volatile + { + if (order != memory_order_seq_cst) { + storage_type tmp = 0; + memcpy(&tmp, &v, sizeof(value_type)); + platform_fence_before(order); + const_cast(v_) = tmp; + } else { + exchange(v, order); + } + } + + value_type + load(memory_order order = memory_order_seq_cst) const volatile + { + storage_type tmp = const_cast(v_); + platform_fence_after_load(order); + value_type v; + memcpy(&v, &tmp, sizeof(value_type)); + return v; + } + + value_type + exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile + { + storage_type tmp = 0; + memcpy(&tmp, &v, sizeof(value_type)); + platform_fence_before(order); + tmp = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(&v_, tmp)); + platform_fence_after(order); + value_type res; + memcpy(&res, &tmp, sizeof(value_type)); + return res; + } + + bool + compare_exchange_strong( + value_type & expected, + value_type const& desired, + memory_order success_order, + memory_order failure_order) volatile + { + storage_type expected_s = 0, desired_s = 0; + memcpy(&expected_s, &expected, sizeof(value_type)); + memcpy(&desired_s, &desired, sizeof(value_type)); + platform_fence_before(success_order); + storage_type oldval = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(&v_, desired_s, expected_s)); + bool success = (oldval == expected_s); + if (success) + platform_fence_after(success_order); + else + platform_fence_after(failure_order); + memcpy(&expected, &oldval, sizeof(value_type)); + return success; + } + + bool + compare_exchange_weak( + value_type & expected, + value_type const& desired, + memory_order success_order, + memory_order failure_order) volatile + { + return compare_exchange_strong(expected, desired, success_order, failure_order); + } + + bool + is_lock_free(void) const volatile + { + return true; + } + + BOOST_ATOMIC_DECLARE_BASE_OPERATORS +private: + base_atomic(const base_atomic &) /* = delete */ ; + void operator=(const base_atomic &) /* = delete */ ; + storage_type v_; +}; + +#endif // defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64) + +} // namespace detail +} // namespace atomics +} // namespace boost + +#endif /* !defined(BOOST_ATOMIC_FORCE_FALLBACK) */ + +#ifdef _MSC_VER +#pragma warning(pop) +#endif + +#endif diff --git a/src/lockpool.cpp b/src/lockpool.cpp index 44a20c5..da8e89a 100644 --- a/src/lockpool.cpp +++ b/src/lockpool.cpp @@ -10,7 +10,14 @@ namespace boost { namespace atomics { namespace detail { -lockpool::lock_type lockpool::pool_[41]; +static lockpool::lock_type lock_pool_[41]; + +// NOTE: This function must NOT be inline. Otherwise MSVC 9 will sometimes generate broken code for modulus operation which result in crashes. +BOOST_ATOMIC_DECL lockpool::lock_type& lockpool::get_lock_for(const volatile void* addr) +{ + std::size_t index = reinterpret_cast(addr) % (sizeof(lock_pool_) / sizeof(*lock_pool_)); + return lock_pool_[index]; +} } }