From 7d768ca2e1bf8f008969ecd8d79c8f71860e0ded Mon Sep 17 00:00:00 2001 From: Andrey Semashev Date: Sun, 26 Jan 2014 19:39:04 +0400 Subject: [PATCH 01/23] Extracted all capability macros to separate headers. --- include/boost/atomic/capabilities.hpp | 82 ++++++++++++++++ .../boost/atomic/detail/atomic_traits_fwd.hpp | 34 +++++++ include/boost/atomic/detail/caps_gcc_arm.hpp | 53 +++++++++++ .../boost/atomic/detail/caps_gcc_atomic.hpp | 71 ++++++++++++++ include/boost/atomic/detail/caps_gcc_ppc.hpp | 58 ++++++++++++ .../boost/atomic/detail/caps_gcc_sparcv9.hpp | 51 ++++++++++ include/boost/atomic/detail/caps_gcc_sync.hpp | 60 ++++++++++++ include/boost/atomic/detail/caps_gcc_x86.hpp | 74 +++++++++++++++ .../boost/atomic/detail/caps_linux_arm.hpp | 62 ++++++++++++ include/boost/atomic/detail/caps_msvc_arm.hpp | 41 ++++++++ include/boost/atomic/detail/caps_msvc_x86.hpp | 55 +++++++++++ include/boost/atomic/detail/caps_windows.hpp | 41 ++++++++ include/boost/atomic/detail/config.hpp | 24 +++-- include/boost/atomic/detail/link.hpp | 22 +++-- include/boost/atomic/detail/platform.hpp | 94 +++++++++++-------- 15 files changed, 770 insertions(+), 52 deletions(-) create mode 100644 include/boost/atomic/capabilities.hpp create mode 100644 include/boost/atomic/detail/atomic_traits_fwd.hpp create mode 100644 include/boost/atomic/detail/caps_gcc_arm.hpp create mode 100644 include/boost/atomic/detail/caps_gcc_atomic.hpp create mode 100644 include/boost/atomic/detail/caps_gcc_ppc.hpp create mode 100644 include/boost/atomic/detail/caps_gcc_sparcv9.hpp create mode 100644 include/boost/atomic/detail/caps_gcc_sync.hpp create mode 100644 include/boost/atomic/detail/caps_gcc_x86.hpp create mode 100644 include/boost/atomic/detail/caps_linux_arm.hpp create mode 100644 include/boost/atomic/detail/caps_msvc_arm.hpp create mode 100644 include/boost/atomic/detail/caps_msvc_x86.hpp create mode 100644 include/boost/atomic/detail/caps_windows.hpp diff --git a/include/boost/atomic/capabilities.hpp b/include/boost/atomic/capabilities.hpp new file mode 100644 index 0000000..a5d1b3b --- /dev/null +++ b/include/boost/atomic/capabilities.hpp @@ -0,0 +1,82 @@ +/* + * Distributed under the Boost Software License, Version 1.0. + * (See accompanying file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt) + * + * Copyright (c) 2014 Andrey Semashev + */ +/*! + * \file atomic/capabilities.hpp + * + * This header defines feature capabilities macros. + */ + +#ifndef BOOST_ATOMIC_CAPABILITIES_HPP_INCLUDED_ +#define BOOST_ATOMIC_CAPABILITIES_HPP_INCLUDED_ + +#include +#include + +#if !defined(BOOST_ATOMIC_EMULATED) +#include BOOST_ATOMIC_DETAIL_HEADER(boost/atomic/detail/caps_) +#endif + +#ifdef BOOST_HAS_PRAGMA_ONCE +#pragma once +#endif + +#ifndef BOOST_ATOMIC_CHAR_LOCK_FREE +#define BOOST_ATOMIC_CHAR_LOCK_FREE 0 +#endif + +#ifndef BOOST_ATOMIC_CHAR16_T_LOCK_FREE +#define BOOST_ATOMIC_CHAR16_T_LOCK_FREE 0 +#endif + +#ifndef BOOST_ATOMIC_CHAR32_T_LOCK_FREE +#define BOOST_ATOMIC_CHAR32_T_LOCK_FREE 0 +#endif + +#ifndef BOOST_ATOMIC_WCHAR_T_LOCK_FREE +#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 0 +#endif + +#ifndef BOOST_ATOMIC_SHORT_LOCK_FREE +#define BOOST_ATOMIC_SHORT_LOCK_FREE 0 +#endif + +#ifndef BOOST_ATOMIC_INT_LOCK_FREE +#define BOOST_ATOMIC_INT_LOCK_FREE 0 +#endif + +#ifndef BOOST_ATOMIC_LONG_LOCK_FREE +#define BOOST_ATOMIC_LONG_LOCK_FREE 0 +#endif + +#ifndef BOOST_ATOMIC_LLONG_LOCK_FREE +#define BOOST_ATOMIC_LLONG_LOCK_FREE 0 +#endif + +#ifndef BOOST_ATOMIC_INT128_LOCK_FREE +#define BOOST_ATOMIC_INT128_LOCK_FREE 0 +#endif + +#ifndef BOOST_ATOMIC_POINTER_LOCK_FREE +#define BOOST_ATOMIC_POINTER_LOCK_FREE 0 +#endif + +#define BOOST_ATOMIC_ADDRESS_LOCK_FREE BOOST_ATOMIC_POINTER_LOCK_FREE + +#ifndef BOOST_ATOMIC_BOOL_LOCK_FREE +#define BOOST_ATOMIC_BOOL_LOCK_FREE 0 +#endif + +#ifndef BOOST_ATOMIC_THREAD_FENCE +#define BOOST_ATOMIC_THREAD_FENCE 0 +#endif + +#ifndef BOOST_ATOMIC_SIGNAL_FENCE +#define BOOST_ATOMIC_SIGNAL_FENCE 0 +#endif + +#endif // BOOST_ATOMIC_CAPABILITIES_HPP_INCLUDED_ diff --git a/include/boost/atomic/detail/atomic_traits_fwd.hpp b/include/boost/atomic/detail/atomic_traits_fwd.hpp new file mode 100644 index 0000000..198d87a --- /dev/null +++ b/include/boost/atomic/detail/atomic_traits_fwd.hpp @@ -0,0 +1,34 @@ +/* + * Distributed under the Boost Software License, Version 1.0. + * (See accompanying file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt) + * + * Copyright (c) 2014 Andrey Semashev + */ +/*! + * \file atomic/detail/atomic_traits_fwd.hpp + * + * This header contains forward declaration of the \c atomic_traits template. + */ + +#ifndef BOOST_ATOMIC_DETAIL_ATOMIC_TRAITS_FWD_HPP_INCLUDED_ +#define BOOST_ATOMIC_DETAIL_ATOMIC_TRAITS_FWD_HPP_INCLUDED_ + +#include + +#ifdef BOOST_HAS_PRAGMA_ONCE +#pragma once +#endif + +namespace boost { +namespace atomics { +namespace detail { + +template< unsigned int Size > +struct atomic_traits; + +} // namespace detail +} // namespace atomics +} // namespace boost + +#endif // BOOST_ATOMIC_DETAIL_ATOMIC_TRAITS_FWD_HPP_INCLUDED_ diff --git a/include/boost/atomic/detail/caps_gcc_arm.hpp b/include/boost/atomic/detail/caps_gcc_arm.hpp new file mode 100644 index 0000000..df04375 --- /dev/null +++ b/include/boost/atomic/detail/caps_gcc_arm.hpp @@ -0,0 +1,53 @@ +/* + * Distributed under the Boost Software License, Version 1.0. + * (See accompanying file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt) + * + * Copyright (c) 2009 Helge Bahmann + * Copyright (c) 2009 Phil Endecott + * Copyright (c) 2013 Tim Blechmann + * ARM Code by Phil Endecott, based on other architectures. + * Copyright (c) 2014 Andrey Semashev + */ +/*! + * \file atomic/detail/caps_gcc_arm.hpp + * + * This header defines feature capabilities macros + */ + +#ifndef BOOST_ATOMIC_DETAIL_CAPS_GCC_ARM_HPP_INCLUDED_ +#define BOOST_ATOMIC_DETAIL_CAPS_GCC_ARM_HPP_INCLUDED_ + +#include + +#ifdef BOOST_HAS_PRAGMA_ONCE +#pragma once +#endif + +#define BOOST_ATOMIC_FLAG_LOCK_FREE 2 +#define BOOST_ATOMIC_CHAR_LOCK_FREE 2 +#define BOOST_ATOMIC_CHAR16_T_LOCK_FREE 2 +#define BOOST_ATOMIC_CHAR32_T_LOCK_FREE 2 +#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 2 +#define BOOST_ATOMIC_SHORT_LOCK_FREE 2 +#define BOOST_ATOMIC_INT_LOCK_FREE 2 + +#if defined(__SIZEOF_LONG__) +#if __SIZEOF_LONG__ == 4 +#define BOOST_ATOMIC_LONG_LOCK_FREE 2 +#endif +#else +#include +#if ULONG_MAX == 0xffffffff +#define BOOST_ATOMIC_LONG_LOCK_FREE 2 +#endif +#endif + +#define BOOST_ATOMIC_LLONG_LOCK_FREE 0 +#define BOOST_ATOMIC_POINTER_LOCK_FREE 2 +#define BOOST_ATOMIC_BOOL_LOCK_FREE 2 + +#define BOOST_ATOMIC_THREAD_FENCE 2 +#define BOOST_ATOMIC_SIGNAL_FENCE 2 + +#endif // BOOST_ATOMIC_DETAIL_CAPS_GCC_ARM_HPP_INCLUDED_ diff --git a/include/boost/atomic/detail/caps_gcc_atomic.hpp b/include/boost/atomic/detail/caps_gcc_atomic.hpp new file mode 100644 index 0000000..e5ec7ad --- /dev/null +++ b/include/boost/atomic/detail/caps_gcc_atomic.hpp @@ -0,0 +1,71 @@ +/* + * Distributed under the Boost Software License, Version 1.0. + * (See accompanying file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt) + * + * Copyright (c) 2014 Andrey Semashev + */ +/*! + * \file atomic/detail/caps_gcc_atomic.hpp + * + * This header defines feature capabilities macros + */ + +#ifndef BOOST_ATOMIC_DETAIL_CAPS_GCC_ATOMIC_HPP_INCLUDED_ +#define BOOST_ATOMIC_DETAIL_CAPS_GCC_ATOMIC_HPP_INCLUDED_ + +#include + +#ifdef BOOST_HAS_PRAGMA_ONCE +#pragma once +#endif + +#if defined(__i386__) && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) +#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B 1 +#endif + +#if defined(__x86_64__) && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) +#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B 1 +#endif + +#if __GCC_ATOMIC_BOOL_LOCK_FREE == 2 +#define BOOST_ATOMIC_FLAG_LOCK_FREE 2 +#endif +#if __GCC_ATOMIC_CHAR_LOCK_FREE == 2 +#define BOOST_ATOMIC_CHAR_LOCK_FREE 2 +#endif +#if __GCC_ATOMIC_CHAR16_T_LOCK_FREE == 2 +#define BOOST_ATOMIC_CHAR16_T_LOCK_FREE 2 +#endif +#if __GCC_ATOMIC_CHAR32_T_LOCK_FREE == 2 +#define BOOST_ATOMIC_CHAR32_T_LOCK_FREE 2 +#endif +#if __GCC_ATOMIC_WCHAR_T_LOCK_FREE == 2 +#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 2 +#endif +#if __GCC_ATOMIC_SHORT_LOCK_FREE == 2 +#define BOOST_ATOMIC_SHORT_LOCK_FREE 2 +#endif +#if __GCC_ATOMIC_INT_LOCK_FREE == 2 +#define BOOST_ATOMIC_INT_LOCK_FREE 2 +#endif +#if __GCC_ATOMIC_LONG_LOCK_FREE == 2 +#define BOOST_ATOMIC_LONG_LOCK_FREE 2 +#endif +#if __GCC_ATOMIC_LLONG_LOCK_FREE == 2 +#define BOOST_ATOMIC_LLONG_LOCK_FREE 2 +#endif +#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B) && (defined(BOOST_HAS_INT128) || !defined(BOOST_NO_ALIGNMENT)) +#define BOOST_ATOMIC_INT128_LOCK_FREE 2 +#endif +#if __GCC_ATOMIC_POINTER_LOCK_FREE == 2 +#define BOOST_ATOMIC_POINTER_LOCK_FREE 2 +#endif +#if __GCC_ATOMIC_BOOL_LOCK_FREE == 2 +#define BOOST_ATOMIC_BOOL_LOCK_FREE 2 +#endif + +#define BOOST_ATOMIC_THREAD_FENCE 2 +#define BOOST_ATOMIC_SIGNAL_FENCE 2 + +#endif // BOOST_ATOMIC_DETAIL_CAPS_GCC_ATOMIC_HPP_INCLUDED_ diff --git a/include/boost/atomic/detail/caps_gcc_ppc.hpp b/include/boost/atomic/detail/caps_gcc_ppc.hpp new file mode 100644 index 0000000..01ce0a6 --- /dev/null +++ b/include/boost/atomic/detail/caps_gcc_ppc.hpp @@ -0,0 +1,58 @@ +/* + * Distributed under the Boost Software License, Version 1.0. + * (See accompanying file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt) + * + * Copyright (c) 2009 Helge Bahmann + * Copyright (c) 2013 Tim Blechmann + * Copyright (c) 2014 Andrey Semashev + */ +/*! + * \file atomic/detail/caps_gcc_ppc.hpp + * + * This header defines feature capabilities macros + */ + +#ifndef BOOST_ATOMIC_DETAIL_CAPS_GCC_PPC_HPP_INCLUDED_ +#define BOOST_ATOMIC_DETAIL_CAPS_GCC_PPC_HPP_INCLUDED_ + +#include + +#ifdef BOOST_HAS_PRAGMA_ONCE +#pragma once +#endif + +#define BOOST_ATOMIC_FLAG_LOCK_FREE 2 +#define BOOST_ATOMIC_CHAR_LOCK_FREE 2 +#define BOOST_ATOMIC_CHAR16_T_LOCK_FREE 2 +#define BOOST_ATOMIC_CHAR32_T_LOCK_FREE 2 +#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 2 +#define BOOST_ATOMIC_SHORT_LOCK_FREE 2 +#define BOOST_ATOMIC_INT_LOCK_FREE 2 + +#if defined(__SIZEOF_LONG__) +#if __SIZEOF_LONG__ == 4 +#define BOOST_ATOMIC_LONG_LOCK_FREE 2 +#elif __SIZEOF_LONG__ == 8 && defined(__powerpc64__) +#define BOOST_ATOMIC_LONG_LOCK_FREE 2 +#endif +#else +#include +#if ULONG_MAX == 0xffffffff +#define BOOST_ATOMIC_LONG_LOCK_FREE 2 +#elif ULONG_MAX == 0xffffffffffffffff && defined(__powerpc64__) +#define BOOST_ATOMIC_LONG_LOCK_FREE 2 +#endif +#endif + +#if defined(__powerpc64__) +#define BOOST_ATOMIC_LLONG_LOCK_FREE 2 +#endif + +#define BOOST_ATOMIC_POINTER_LOCK_FREE 2 +#define BOOST_ATOMIC_BOOL_LOCK_FREE 2 + +#define BOOST_ATOMIC_THREAD_FENCE 2 +#define BOOST_ATOMIC_SIGNAL_FENCE 2 + +#endif // BOOST_ATOMIC_DETAIL_CAPS_GCC_PPC_HPP_INCLUDED_ diff --git a/include/boost/atomic/detail/caps_gcc_sparcv9.hpp b/include/boost/atomic/detail/caps_gcc_sparcv9.hpp new file mode 100644 index 0000000..4096118 --- /dev/null +++ b/include/boost/atomic/detail/caps_gcc_sparcv9.hpp @@ -0,0 +1,51 @@ +/* + * Distributed under the Boost Software License, Version 1.0. + * (See accompanying file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt) + * + * Copyright (c) 2010 Helge Bahmann + * Copyright (c) 2013 Tim Blechmann + * Copyright (c) 2014 Andrey Semashev + */ +/*! + * \file atomic/detail/caps_gcc_sparcv9.hpp + * + * This header defines feature capabilities macros + */ + +#ifndef BOOST_ATOMIC_DETAIL_CAPS_GCC_SPARCV9_HPP_INCLUDED_ +#define BOOST_ATOMIC_DETAIL_CAPS_GCC_SPARCV9_HPP_INCLUDED_ + +#include + +#ifdef BOOST_HAS_PRAGMA_ONCE +#pragma once +#endif + +#define BOOST_ATOMIC_FLAG_LOCK_FREE 2 +#define BOOST_ATOMIC_CHAR_LOCK_FREE 2 +#define BOOST_ATOMIC_CHAR16_T_LOCK_FREE 2 +#define BOOST_ATOMIC_CHAR32_T_LOCK_FREE 2 +#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 2 +#define BOOST_ATOMIC_SHORT_LOCK_FREE 2 +#define BOOST_ATOMIC_INT_LOCK_FREE 2 + +#if defined(__SIZEOF_LONG__) +#if __SIZEOF_LONG__ == 4 +#define BOOST_ATOMIC_LONG_LOCK_FREE 2 +#endif +#else +#include +#if ULONG_MAX == 0xffffffff +#define BOOST_ATOMIC_LONG_LOCK_FREE 2 +#endif +#endif + +#define BOOST_ATOMIC_LLONG_LOCK_FREE 0 +#define BOOST_ATOMIC_POINTER_LOCK_FREE 2 +#define BOOST_ATOMIC_BOOL_LOCK_FREE 2 + +#define BOOST_ATOMIC_THREAD_FENCE 2 +#define BOOST_ATOMIC_SIGNAL_FENCE 2 + +#endif // BOOST_ATOMIC_DETAIL_CAPS_GCC_SPARCV9_HPP_INCLUDED_ diff --git a/include/boost/atomic/detail/caps_gcc_sync.hpp b/include/boost/atomic/detail/caps_gcc_sync.hpp new file mode 100644 index 0000000..9bc846e --- /dev/null +++ b/include/boost/atomic/detail/caps_gcc_sync.hpp @@ -0,0 +1,60 @@ +/* + * Distributed under the Boost Software License, Version 1.0. + * (See accompanying file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt) + * + * Copyright (c) 2011 Helge Bahmann + * Copyright (c) 2013 Tim Blechmann + * Copyright (c) 2014 Andrey Semashev + */ +/*! + * \file atomic/detail/caps_gcc_sync.hpp + * + * This header defines feature capabilities macros + */ + +#ifndef BOOST_ATOMIC_DETAIL_CAPS_GCC_SYNC_HPP_INCLUDED_ +#define BOOST_ATOMIC_DETAIL_CAPS_GCC_SYNC_HPP_INCLUDED_ + +#include + +#ifdef BOOST_HAS_PRAGMA_ONCE +#pragma once +#endif + +#define BOOST_ATOMIC_CHAR_LOCK_FREE 2 +#define BOOST_ATOMIC_CHAR16_T_LOCK_FREE 2 +#define BOOST_ATOMIC_CHAR32_T_LOCK_FREE 2 +#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 2 +#define BOOST_ATOMIC_SHORT_LOCK_FREE 2 +#define BOOST_ATOMIC_INT_LOCK_FREE 2 + +#if defined(__SIZEOF_LONG__) +#if __SIZEOF_LONG__ == 4 +#define BOOST_ATOMIC_LONG_LOCK_FREE 2 +#endif +#else +#include +#if ULONG_MAX == 0xffffffff +#define BOOST_ATOMIC_LONG_LOCK_FREE 2 +#endif +#endif + +#if defined(__SIZEOF_LONG_LONG__) +#if __SIZEOF_LONG_LONG__ == 4 +#define BOOST_ATOMIC_LLONG_LOCK_FREE 2 +#endif +#else +#include +#if defined(ULLONG_MAX) && ULLONG_MAX == 0xffffffff +#define BOOST_ATOMIC_LLONG_LOCK_FREE 2 +#endif +#endif + +#define BOOST_ATOMIC_POINTER_LOCK_FREE 2 +#define BOOST_ATOMIC_BOOL_LOCK_FREE 2 + +#define BOOST_ATOMIC_THREAD_FENCE 2 +#define BOOST_ATOMIC_SIGNAL_FENCE 2 + +#endif // BOOST_ATOMIC_DETAIL_CAPS_GCC_SYNC_HPP_INCLUDED_ diff --git a/include/boost/atomic/detail/caps_gcc_x86.hpp b/include/boost/atomic/detail/caps_gcc_x86.hpp new file mode 100644 index 0000000..ce31b22 --- /dev/null +++ b/include/boost/atomic/detail/caps_gcc_x86.hpp @@ -0,0 +1,74 @@ +/* + * Distributed under the Boost Software License, Version 1.0. + * (See accompanying file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt) + * + * Copyright (c) 2009 Helge Bahmann + * Copyright (c) 2012 Tim Blechmann + * Copyright (c) 2013 - 2014 Andrey Semashev + */ +/*! + * \file atomic/detail/caps_gcc_x86.hpp + * + * This header defines feature capabilities macros + */ + +#ifndef BOOST_ATOMIC_DETAIL_CAPS_GCC_X86_HPP_INCLUDED_ +#define BOOST_ATOMIC_DETAIL_CAPS_GCC_X86_HPP_INCLUDED_ + +#include + +#ifdef BOOST_HAS_PRAGMA_ONCE +#pragma once +#endif + +#if defined(__i386__) &&\ + (\ + defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) ||\ + defined(__i586__) || defined(__i686__) || defined(__pentium4__) || defined(__nocona__) || defined(__core2__) || defined(__corei7__) ||\ + defined(__k6__) || defined(__athlon__) || defined(__k8__) || defined(__amdfam10__) || defined(__bdver1__) || defined(__bdver2__) || defined(__bdver3__) || defined(__btver1__) || defined(__btver2__)\ + ) +#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B 1 +#endif + +#if defined(__x86_64__) && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) +#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B 1 +#endif + +#define BOOST_ATOMIC_CHAR_LOCK_FREE 2 +#define BOOST_ATOMIC_CHAR16_T_LOCK_FREE 2 +#define BOOST_ATOMIC_CHAR32_T_LOCK_FREE 2 +#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 2 +#define BOOST_ATOMIC_SHORT_LOCK_FREE 2 +#define BOOST_ATOMIC_INT_LOCK_FREE 2 + +#if defined(__SIZEOF_LONG__) +#if __SIZEOF_LONG__ == 4 +#define BOOST_ATOMIC_LONG_LOCK_FREE 2 +#elif __SIZEOF_LONG__ == 8 && (defined(__x86_64__) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B)) +#define BOOST_ATOMIC_LONG_LOCK_FREE 2 +#endif +#else +#include +#if ULONG_MAX == 0xffffffff +#define BOOST_ATOMIC_LONG_LOCK_FREE 2 +#elif ULONG_MAX == 0xffffffffffffffff && (defined(__x86_64__) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B)) +#define BOOST_ATOMIC_LONG_LOCK_FREE 2 +#endif +#endif + +#if defined(__x86_64__) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) +#define BOOST_ATOMIC_LLONG_LOCK_FREE 2 +#endif + +#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B) && (defined(BOOST_HAS_INT128) || !defined(BOOST_NO_ALIGNMENT)) +#define BOOST_ATOMIC_INT128_LOCK_FREE 2 +#endif + +#define BOOST_ATOMIC_POINTER_LOCK_FREE 2 +#define BOOST_ATOMIC_BOOL_LOCK_FREE 2 + +#define BOOST_ATOMIC_THREAD_FENCE 2 +#define BOOST_ATOMIC_SIGNAL_FENCE 2 + +#endif // BOOST_ATOMIC_DETAIL_CAPS_GCC_X86_HPP_INCLUDED_ diff --git a/include/boost/atomic/detail/caps_linux_arm.hpp b/include/boost/atomic/detail/caps_linux_arm.hpp new file mode 100644 index 0000000..37323e2 --- /dev/null +++ b/include/boost/atomic/detail/caps_linux_arm.hpp @@ -0,0 +1,62 @@ +/* + * Distributed under the Boost Software License, Version 1.0. + * (See accompanying file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt) + * + * Copyright (c) 2009, 2011 Helge Bahmann + * Copyright (c) 2009 Phil Endecott + * Copyright (c) 2013 Tim Blechmann + * Linux-specific code by Phil Endecott + * Copyright (c) 2014 Andrey Semashev + */ +/*! + * \file atomic/detail/caps_linux_arm.hpp + * + * This header defines feature capabilities macros + */ + +#ifndef BOOST_ATOMIC_DETAIL_CAPS_LINUX_ARM_HPP_INCLUDED_ +#define BOOST_ATOMIC_DETAIL_CAPS_LINUX_ARM_HPP_INCLUDED_ + +#include + +#ifdef BOOST_HAS_PRAGMA_ONCE +#pragma once +#endif + +#define BOOST_ATOMIC_CHAR_LOCK_FREE 2 +#define BOOST_ATOMIC_CHAR16_T_LOCK_FREE 2 +#define BOOST_ATOMIC_CHAR32_T_LOCK_FREE 2 +#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 2 +#define BOOST_ATOMIC_SHORT_LOCK_FREE 2 +#define BOOST_ATOMIC_INT_LOCK_FREE 2 + +#if defined(__SIZEOF_LONG__) +#if __SIZEOF_LONG__ == 4 +#define BOOST_ATOMIC_LONG_LOCK_FREE 2 +#endif +#else +#include +#if ULONG_MAX == 0xffffffff +#define BOOST_ATOMIC_LONG_LOCK_FREE 2 +#endif +#endif + +#if defined(__SIZEOF_LONG_LONG__) +#if __SIZEOF_LONG_LONG__ == 4 +#define BOOST_ATOMIC_LLONG_LOCK_FREE 2 +#endif +#else +#include +#if defined(ULLONG_MAX) && ULLONG_MAX == 0xffffffff +#define BOOST_ATOMIC_LLONG_LOCK_FREE 2 +#endif +#endif + +#define BOOST_ATOMIC_POINTER_LOCK_FREE 2 +#define BOOST_ATOMIC_BOOL_LOCK_FREE 2 + +#define BOOST_ATOMIC_THREAD_FENCE 2 +#define BOOST_ATOMIC_SIGNAL_FENCE 2 + +#endif // BOOST_ATOMIC_DETAIL_CAPS_LINUX_ARM_HPP_INCLUDED_ diff --git a/include/boost/atomic/detail/caps_msvc_arm.hpp b/include/boost/atomic/detail/caps_msvc_arm.hpp new file mode 100644 index 0000000..bdd81bf --- /dev/null +++ b/include/boost/atomic/detail/caps_msvc_arm.hpp @@ -0,0 +1,41 @@ +/* + * Distributed under the Boost Software License, Version 1.0. + * (See accompanying file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt) + * + * Copyright (c) 2009 Helge Bahmann + * Copyright (c) 2013 Tim Blechmann + * Copyright (c) 2012 - 2014 Andrey Semashev + */ +/*! + * \file atomic/detail/caps_msvc_arm.hpp + * + * This header defines feature capabilities macros + */ + +#ifndef BOOST_ATOMIC_DETAIL_CAPS_MSVC_ARM_HPP_INCLUDED_ +#define BOOST_ATOMIC_DETAIL_CAPS_MSVC_ARM_HPP_INCLUDED_ + +#include + +#ifdef BOOST_HAS_PRAGMA_ONCE +#pragma once +#endif + +#define BOOST_ATOMIC_CHAR_LOCK_FREE 2 +#define BOOST_ATOMIC_CHAR16_T_LOCK_FREE 2 +#define BOOST_ATOMIC_CHAR32_T_LOCK_FREE 2 +#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 2 +#define BOOST_ATOMIC_SHORT_LOCK_FREE 2 +#define BOOST_ATOMIC_INT_LOCK_FREE 2 +#define BOOST_ATOMIC_LONG_LOCK_FREE 2 +#define BOOST_ATOMIC_LLONG_LOCK_FREE 0 +#define BOOST_ATOMIC_INT128_LOCK_FREE 0 + +#define BOOST_ATOMIC_POINTER_LOCK_FREE 2 +#define BOOST_ATOMIC_BOOL_LOCK_FREE 2 + +#define BOOST_ATOMIC_THREAD_FENCE 2 +#define BOOST_ATOMIC_SIGNAL_FENCE 2 + +#endif // BOOST_ATOMIC_DETAIL_CAPS_MSVC_ARM_HPP_INCLUDED_ diff --git a/include/boost/atomic/detail/caps_msvc_x86.hpp b/include/boost/atomic/detail/caps_msvc_x86.hpp new file mode 100644 index 0000000..c306388 --- /dev/null +++ b/include/boost/atomic/detail/caps_msvc_x86.hpp @@ -0,0 +1,55 @@ +/* + * Distributed under the Boost Software License, Version 1.0. + * (See accompanying file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt) + * + * Copyright (c) 2009 Helge Bahmann + * Copyright (c) 2013 Tim Blechmann + * Copyright (c) 2012 - 2014 Andrey Semashev + */ +/*! + * \file atomic/detail/caps_msvc_x86.hpp + * + * This header defines feature capabilities macros + */ + +#ifndef BOOST_ATOMIC_DETAIL_CAPS_MSVC_X86_HPP_INCLUDED_ +#define BOOST_ATOMIC_DETAIL_CAPS_MSVC_X86_HPP_INCLUDED_ + +#include + +#ifdef BOOST_HAS_PRAGMA_ONCE +#pragma once +#endif + +#if defined(_M_IX86) && _M_IX86 >= 500 +#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B 1 +#endif + +#if defined(_M_AMD64) && !defined(BOOST_ATOMIC_NO_CMPXCHG16B) +#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B 1 +#endif + +#define BOOST_ATOMIC_CHAR_LOCK_FREE 2 +#define BOOST_ATOMIC_CHAR16_T_LOCK_FREE 2 +#define BOOST_ATOMIC_CHAR32_T_LOCK_FREE 2 +#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 2 +#define BOOST_ATOMIC_SHORT_LOCK_FREE 2 +#define BOOST_ATOMIC_INT_LOCK_FREE 2 +#define BOOST_ATOMIC_LONG_LOCK_FREE 2 + +#if defined(_M_AMD64) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) +#define BOOST_ATOMIC_LLONG_LOCK_FREE 2 +#endif + +#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B) && (defined(BOOST_HAS_INT128) || !defined(BOOST_NO_ALIGNMENT)) +#define BOOST_ATOMIC_INT128_LOCK_FREE 2 +#endif + +#define BOOST_ATOMIC_POINTER_LOCK_FREE 2 +#define BOOST_ATOMIC_BOOL_LOCK_FREE 2 + +#define BOOST_ATOMIC_THREAD_FENCE 2 +#define BOOST_ATOMIC_SIGNAL_FENCE 2 + +#endif // BOOST_ATOMIC_DETAIL_CAPS_MSVC_X86_HPP_INCLUDED_ diff --git a/include/boost/atomic/detail/caps_windows.hpp b/include/boost/atomic/detail/caps_windows.hpp new file mode 100644 index 0000000..858f280 --- /dev/null +++ b/include/boost/atomic/detail/caps_windows.hpp @@ -0,0 +1,41 @@ +/* + * Distributed under the Boost Software License, Version 1.0. + * (See accompanying file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt) + * + * Copyright (c) 2009 Helge Bahmann + * Copyright (c) 2013 Tim Blechmann + * Copyright (c) 2012 - 2014 Andrey Semashev + */ +/*! + * \file atomic/detail/caps_windows.hpp + * + * This header defines feature capabilities macros + */ + +#ifndef BOOST_ATOMIC_DETAIL_CAPS_WINDOWS_HPP_INCLUDED_ +#define BOOST_ATOMIC_DETAIL_CAPS_WINDOWS_HPP_INCLUDED_ + +#include + +#ifdef BOOST_HAS_PRAGMA_ONCE +#pragma once +#endif + +#define BOOST_ATOMIC_CHAR_LOCK_FREE 2 +#define BOOST_ATOMIC_CHAR16_T_LOCK_FREE 2 +#define BOOST_ATOMIC_CHAR32_T_LOCK_FREE 2 +#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 2 +#define BOOST_ATOMIC_SHORT_LOCK_FREE 2 +#define BOOST_ATOMIC_INT_LOCK_FREE 2 +#define BOOST_ATOMIC_LONG_LOCK_FREE 2 +#define BOOST_ATOMIC_LLONG_LOCK_FREE 0 +#define BOOST_ATOMIC_INT128_LOCK_FREE 0 + +#define BOOST_ATOMIC_POINTER_LOCK_FREE 2 +#define BOOST_ATOMIC_BOOL_LOCK_FREE 2 + +#define BOOST_ATOMIC_THREAD_FENCE 2 +#define BOOST_ATOMIC_SIGNAL_FENCE 2 + +#endif // BOOST_ATOMIC_DETAIL_CAPS_WINDOWS_HPP_INCLUDED_ diff --git a/include/boost/atomic/detail/config.hpp b/include/boost/atomic/detail/config.hpp index b1984e5..d03ec6a 100644 --- a/include/boost/atomic/detail/config.hpp +++ b/include/boost/atomic/detail/config.hpp @@ -1,11 +1,19 @@ -#ifndef BOOST_ATOMIC_DETAIL_CONFIG_HPP -#define BOOST_ATOMIC_DETAIL_CONFIG_HPP +/* + * Distributed under the Boost Software License, Version 1.0. + * (See accompanying file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt) + * + * Copyright (c) 2012 Hartmut Kaiser + * Copyright (c) 2014 Andrey Semashev + */ +/*! + * \file atomic/detail/config.hpp + * + * This header defines configuraion macros for Boost.Atomic + */ -// Copyright (c) 2012 Hartmut Kaiser -// -// Distributed under the Boost Software License, Version 1.0. -// See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) +#ifndef BOOST_ATOMIC_DETAIL_CONFIG_HPP_INCLUDED_ +#define BOOST_ATOMIC_DETAIL_CONFIG_HPP_INCLUDED_ #include @@ -13,4 +21,4 @@ #pragma once #endif -#endif +#endif // BOOST_ATOMIC_DETAIL_CONFIG_HPP_INCLUDED_ diff --git a/include/boost/atomic/detail/link.hpp b/include/boost/atomic/detail/link.hpp index 25a4caf..4f522ac 100644 --- a/include/boost/atomic/detail/link.hpp +++ b/include/boost/atomic/detail/link.hpp @@ -1,11 +1,19 @@ -#ifndef BOOST_ATOMIC_DETAIL_LINK_HPP -#define BOOST_ATOMIC_DETAIL_LINK_HPP +/* + * Distributed under the Boost Software License, Version 1.0. + * (See accompanying file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt) + * + * Copyright (c) 2012 Hartmut Kaiser + * Copyright (c) 2014 Andrey Semashev + */ +/*! + * \file atomic/detail/config.hpp + * + * This header defines macros for linking with compiled library of Boost.Atomic + */ -// Copyright (c) 2012 Hartmut Kaiser -// -// Distributed under the Boost Software License, Version 1.0. -// See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) +#ifndef BOOST_ATOMIC_DETAIL_LINK_HPP_INCLUDED_ +#define BOOST_ATOMIC_DETAIL_LINK_HPP_INCLUDED_ #include diff --git a/include/boost/atomic/detail/platform.hpp b/include/boost/atomic/detail/platform.hpp index 4a172d6..746c6d4 100644 --- a/include/boost/atomic/detail/platform.hpp +++ b/include/boost/atomic/detail/platform.hpp @@ -1,13 +1,19 @@ -#ifndef BOOST_ATOMIC_DETAIL_PLATFORM_HPP -#define BOOST_ATOMIC_DETAIL_PLATFORM_HPP +/* + * Distributed under the Boost Software License, Version 1.0. + * (See accompanying file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt) + * + * Copyright (c) 2009 Helge Bahmann + * Copyright (c) 2014 Andrey Semashev + */ +/*! + * \file atomic/detail/platform.hpp + * + * This header defines macros for the target platform detection + */ -// Copyright (c) 2009 Helge Bahmann -// -// Distributed under the Boost Software License, Version 1.0. -// See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Platform selection file +#ifndef BOOST_ATOMIC_DETAIL_PLATFORM_HPP_INCLUDED_ +#define BOOST_ATOMIC_DETAIL_PLATFORM_HPP_INCLUDED_ #include @@ -15,56 +21,70 @@ #pragma once #endif +#if !defined(BOOST_ATOMIC_FORCE_FALLBACK) + +#define BOOST_ATOMIC_DETAIL_PLATFORM emulated +#define BOOST_ATOMIC_EMULATED + // Intel compiler does not support __atomic* intrinsics properly, although defines them (tested with 13.0.1 and 13.1.1 on Linux) -#if (defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 407) && !defined(BOOST_INTEL_CXX_VERSION))\ +#elif (defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 407) && !defined(BOOST_INTEL_CXX_VERSION))\ || (defined(BOOST_CLANG) && ((__clang_major__ * 100 + __clang_minor__) >= 302)) - #include +#define BOOST_ATOMIC_DETAIL_PLATFORM gcc_atomic #elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) - #include - -#elif 0 && defined(__GNUC__) && defined(__alpha__) /* currently does not work correctly */ - - #include - #include +#define BOOST_ATOMIC_DETAIL_PLATFORM gcc_x86 #elif defined(__GNUC__) && (defined(__POWERPC__) || defined(__PPC__)) - #include +#define BOOST_ATOMIC_DETAIL_PLATFORM gcc_ppc // This list of ARM architecture versions comes from Apple's arm/arch.h header. // I don't know how complete it is. -#elif defined(__GNUC__) && (defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \ - || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) \ - || defined(__ARM_ARCH_6K__) \ - || defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \ - || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \ - || defined(__ARM_ARCH_7EM__) || defined(__ARM_ARCH_7S__)) +#elif defined(__GNUC__) &&\ + (\ + defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) ||\ + defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) ||\ + defined(__ARM_ARCH_6K__) ||\ + defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) ||\ + defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) ||\ + defined(__ARM_ARCH_7EM__) || defined(__ARM_ARCH_7S__)\ + ) - #include - -#elif defined(__linux__) && defined(__arm__) - - #include +#define BOOST_ATOMIC_DETAIL_PLATFORM gcc_arm #elif defined(__GNUC__) && defined(__sparc_v9__) - #include - -#elif defined(BOOST_WINDOWS) || defined(_WIN32_CE) - - #include +#define BOOST_ATOMIC_DETAIL_PLATFORM gcc_sparcv9 #elif defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 401) - #include +#define BOOST_ATOMIC_DETAIL_PLATFORM gcc_sync + +#elif defined(__linux__) && defined(__arm__) + +#define BOOST_ATOMIC_DETAIL_PLATFORM linux_arm + +#elif defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_X64)) + +#define BOOST_ATOMIC_DETAIL_PLATFORM msvc_x86 + +#elif defined(_MSC_VER) && defined(_M_ARM) + +#define BOOST_ATOMIC_DETAIL_PLATFORM msvc_arm + +#elif defined(BOOST_WINDOWS) || defined(_WIN32_CE) + +#define BOOST_ATOMIC_DETAIL_PLATFORM windows #else -#include +#define BOOST_ATOMIC_DETAIL_PLATFORM emulated +#define BOOST_ATOMIC_EMULATED #endif -#endif +#define BOOST_ATOMIC_DETAIL_HEADER(prefix) + +#endif // BOOST_ATOMIC_DETAIL_PLATFORM_HPP_INCLUDED_ From 4efb93ba4d48292b1d02c39785e4c96cae717d80 Mon Sep 17 00:00:00 2001 From: Andrey Semashev Date: Sun, 9 Feb 2014 13:38:52 +0400 Subject: [PATCH 02/23] Ranamed header. --- .../atomic/detail/{atomic_traits_fwd.hpp => operations_fwd.hpp} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename include/boost/atomic/detail/{atomic_traits_fwd.hpp => operations_fwd.hpp} (100%) diff --git a/include/boost/atomic/detail/atomic_traits_fwd.hpp b/include/boost/atomic/detail/operations_fwd.hpp similarity index 100% rename from include/boost/atomic/detail/atomic_traits_fwd.hpp rename to include/boost/atomic/detail/operations_fwd.hpp From 7b13e9cd24028e15c64d142f9ad984f3ac4d80b7 Mon Sep 17 00:00:00 2001 From: Andrey Semashev Date: Sun, 9 Feb 2014 16:12:00 +0400 Subject: [PATCH 03/23] Reworked capabilities definition. For all platforms, added BOOST_ATOMIC_INTn_LOCK_FREE macros that define the support for fixed sized integers. Other macros are deduced from the new ones, according to the native type sizes. --- include/boost/atomic/capabilities.hpp | 119 ++++++++++++---- include/boost/atomic/detail/caps_gcc_arm.hpp | 24 +--- .../boost/atomic/detail/caps_gcc_atomic.hpp | 63 +++++++++ include/boost/atomic/detail/caps_gcc_ppc.hpp | 30 +--- .../boost/atomic/detail/caps_gcc_sparcv9.hpp | 24 +--- include/boost/atomic/detail/caps_gcc_sync.hpp | 35 +---- include/boost/atomic/detail/caps_gcc_x86.hpp | 30 +--- .../boost/atomic/detail/caps_linux_arm.hpp | 33 +---- include/boost/atomic/detail/caps_msvc_arm.hpp | 14 +- include/boost/atomic/detail/caps_msvc_x86.hpp | 13 +- include/boost/atomic/detail/caps_windows.hpp | 14 +- include/boost/atomic/detail/int_sizes.hpp | 128 ++++++++++++++++++ .../boost/atomic/detail/operations_fwd.hpp | 12 +- 13 files changed, 325 insertions(+), 214 deletions(-) create mode 100644 include/boost/atomic/detail/int_sizes.hpp diff --git a/include/boost/atomic/capabilities.hpp b/include/boost/atomic/capabilities.hpp index a5d1b3b..f8fb542 100644 --- a/include/boost/atomic/capabilities.hpp +++ b/include/boost/atomic/capabilities.hpp @@ -16,6 +16,7 @@ #include #include +#include #if !defined(BOOST_ATOMIC_EMULATED) #include BOOST_ATOMIC_DETAIL_HEADER(boost/atomic/detail/caps_) @@ -25,42 +26,106 @@ #pragma once #endif -#ifndef BOOST_ATOMIC_CHAR_LOCK_FREE -#define BOOST_ATOMIC_CHAR_LOCK_FREE 0 +#ifndef BOOST_ATOMIC_INT8_LOCK_FREE +#define BOOST_ATOMIC_INT8_LOCK_FREE 0 #endif -#ifndef BOOST_ATOMIC_CHAR16_T_LOCK_FREE -#define BOOST_ATOMIC_CHAR16_T_LOCK_FREE 0 +#ifndef BOOST_ATOMIC_INT16_LOCK_FREE +#define BOOST_ATOMIC_INT16_LOCK_FREE 0 #endif -#ifndef BOOST_ATOMIC_CHAR32_T_LOCK_FREE -#define BOOST_ATOMIC_CHAR32_T_LOCK_FREE 0 +#ifndef BOOST_ATOMIC_INT32_LOCK_FREE +#define BOOST_ATOMIC_INT32_LOCK_FREE 0 #endif -#ifndef BOOST_ATOMIC_WCHAR_T_LOCK_FREE -#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 0 -#endif - -#ifndef BOOST_ATOMIC_SHORT_LOCK_FREE -#define BOOST_ATOMIC_SHORT_LOCK_FREE 0 -#endif - -#ifndef BOOST_ATOMIC_INT_LOCK_FREE -#define BOOST_ATOMIC_INT_LOCK_FREE 0 -#endif - -#ifndef BOOST_ATOMIC_LONG_LOCK_FREE -#define BOOST_ATOMIC_LONG_LOCK_FREE 0 -#endif - -#ifndef BOOST_ATOMIC_LLONG_LOCK_FREE -#define BOOST_ATOMIC_LLONG_LOCK_FREE 0 +#ifndef BOOST_ATOMIC_INT64_LOCK_FREE +#define BOOST_ATOMIC_INT64_LOCK_FREE 0 #endif #ifndef BOOST_ATOMIC_INT128_LOCK_FREE #define BOOST_ATOMIC_INT128_LOCK_FREE 0 #endif + +#ifndef BOOST_ATOMIC_CHAR_LOCK_FREE +#define BOOST_ATOMIC_CHAR_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE +#endif + +#ifndef BOOST_ATOMIC_CHAR16_T_LOCK_FREE +#define BOOST_ATOMIC_CHAR16_T_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE +#endif + +#ifndef BOOST_ATOMIC_CHAR32_T_LOCK_FREE +#define BOOST_ATOMIC_CHAR32_T_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE +#endif + +#ifndef BOOST_ATOMIC_WCHAR_T_LOCK_FREE +#if BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 1 +#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE +#elif BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 2 +#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE +#elif BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 4 +#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE +#elif BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 8 +#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE +#else +#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 0 +#endif +#endif + +#ifndef BOOST_ATOMIC_SHORT_LOCK_FREE +#if BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 1 +#define BOOST_ATOMIC_SHORT_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE +#elif BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 2 +#define BOOST_ATOMIC_SHORT_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE +#elif BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 4 +#define BOOST_ATOMIC_SHORT_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE +#elif BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 8 +#define BOOST_ATOMIC_SHORT_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE +#else +#define BOOST_ATOMIC_SHORT_LOCK_FREE 0 +#endif +#endif + +#ifndef BOOST_ATOMIC_INT_LOCK_FREE +#if BOOST_ATOMIC_DETAIL_SIZEOF_INT == 1 +#define BOOST_ATOMIC_INT_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE +#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 2 +#define BOOST_ATOMIC_INT_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE +#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 4 +#define BOOST_ATOMIC_INT_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE +#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 8 +#define BOOST_ATOMIC_INT_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE +#else +#define BOOST_ATOMIC_INT_LOCK_FREE 0 +#endif + +#ifndef BOOST_ATOMIC_LONG_LOCK_FREE +#if BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 1 +#define BOOST_ATOMIC_LONG_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE +#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 2 +#define BOOST_ATOMIC_LONG_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE +#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 4 +#define BOOST_ATOMIC_LONG_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE +#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 8 +#define BOOST_ATOMIC_LONG_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE +#else +#define BOOST_ATOMIC_LONG_LOCK_FREE 0 +#endif + +#ifndef BOOST_ATOMIC_LLONG_LOCK_FREE +#if BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 1 +#define BOOST_ATOMIC_LLONG_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE +#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 2 +#define BOOST_ATOMIC_LLONG_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE +#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 4 +#define BOOST_ATOMIC_LLONG_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE +#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 8 +#define BOOST_ATOMIC_LLONG_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE +#else +#define BOOST_ATOMIC_LLONG_LOCK_FREE 0 +#endif + #ifndef BOOST_ATOMIC_POINTER_LOCK_FREE #define BOOST_ATOMIC_POINTER_LOCK_FREE 0 #endif @@ -68,7 +133,11 @@ #define BOOST_ATOMIC_ADDRESS_LOCK_FREE BOOST_ATOMIC_POINTER_LOCK_FREE #ifndef BOOST_ATOMIC_BOOL_LOCK_FREE -#define BOOST_ATOMIC_BOOL_LOCK_FREE 0 +#define BOOST_ATOMIC_BOOL_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE +#endif + +#ifndef BOOST_ATOMIC_FLAG_LOCK_FREE +#define BOOST_ATOMIC_FLAG_LOCK_FREE BOOST_ATOMIC_BOOL_LOCK_FREE #endif #ifndef BOOST_ATOMIC_THREAD_FENCE diff --git a/include/boost/atomic/detail/caps_gcc_arm.hpp b/include/boost/atomic/detail/caps_gcc_arm.hpp index df04375..5cadf09 100644 --- a/include/boost/atomic/detail/caps_gcc_arm.hpp +++ b/include/boost/atomic/detail/caps_gcc_arm.hpp @@ -24,28 +24,10 @@ #pragma once #endif -#define BOOST_ATOMIC_FLAG_LOCK_FREE 2 -#define BOOST_ATOMIC_CHAR_LOCK_FREE 2 -#define BOOST_ATOMIC_CHAR16_T_LOCK_FREE 2 -#define BOOST_ATOMIC_CHAR32_T_LOCK_FREE 2 -#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 2 -#define BOOST_ATOMIC_SHORT_LOCK_FREE 2 -#define BOOST_ATOMIC_INT_LOCK_FREE 2 - -#if defined(__SIZEOF_LONG__) -#if __SIZEOF_LONG__ == 4 -#define BOOST_ATOMIC_LONG_LOCK_FREE 2 -#endif -#else -#include -#if ULONG_MAX == 0xffffffff -#define BOOST_ATOMIC_LONG_LOCK_FREE 2 -#endif -#endif - -#define BOOST_ATOMIC_LLONG_LOCK_FREE 0 +#define BOOST_ATOMIC_INT8_LOCK_FREE 2 +#define BOOST_ATOMIC_INT16_LOCK_FREE 2 +#define BOOST_ATOMIC_INT32_LOCK_FREE 2 #define BOOST_ATOMIC_POINTER_LOCK_FREE 2 -#define BOOST_ATOMIC_BOOL_LOCK_FREE 2 #define BOOST_ATOMIC_THREAD_FENCE 2 #define BOOST_ATOMIC_SIGNAL_FENCE 2 diff --git a/include/boost/atomic/detail/caps_gcc_atomic.hpp b/include/boost/atomic/detail/caps_gcc_atomic.hpp index e5ec7ad..8299ad0 100644 --- a/include/boost/atomic/detail/caps_gcc_atomic.hpp +++ b/include/boost/atomic/detail/caps_gcc_atomic.hpp @@ -15,6 +15,7 @@ #define BOOST_ATOMIC_DETAIL_CAPS_GCC_ATOMIC_HPP_INCLUDED_ #include +#include #ifdef BOOST_HAS_PRAGMA_ONCE #pragma once @@ -30,39 +31,101 @@ #if __GCC_ATOMIC_BOOL_LOCK_FREE == 2 #define BOOST_ATOMIC_FLAG_LOCK_FREE 2 +#else +#define BOOST_ATOMIC_FLAG_LOCK_FREE 0 #endif #if __GCC_ATOMIC_CHAR_LOCK_FREE == 2 #define BOOST_ATOMIC_CHAR_LOCK_FREE 2 +#else +#define BOOST_ATOMIC_CHAR_LOCK_FREE 0 #endif #if __GCC_ATOMIC_CHAR16_T_LOCK_FREE == 2 #define BOOST_ATOMIC_CHAR16_T_LOCK_FREE 2 +#else +#define BOOST_ATOMIC_CHAR16_T_LOCK_FREE 0 #endif #if __GCC_ATOMIC_CHAR32_T_LOCK_FREE == 2 #define BOOST_ATOMIC_CHAR32_T_LOCK_FREE 2 +#else +#define BOOST_ATOMIC_CHAR32_T_LOCK_FREE 0 #endif #if __GCC_ATOMIC_WCHAR_T_LOCK_FREE == 2 #define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 2 +#else +#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 0 #endif #if __GCC_ATOMIC_SHORT_LOCK_FREE == 2 #define BOOST_ATOMIC_SHORT_LOCK_FREE 2 +#else +#define BOOST_ATOMIC_SHORT_LOCK_FREE 0 #endif #if __GCC_ATOMIC_INT_LOCK_FREE == 2 #define BOOST_ATOMIC_INT_LOCK_FREE 2 +#else +#define BOOST_ATOMIC_INT_LOCK_FREE 0 #endif #if __GCC_ATOMIC_LONG_LOCK_FREE == 2 #define BOOST_ATOMIC_LONG_LOCK_FREE 2 +#else +#define BOOST_ATOMIC_LONG_LOCK_FREE 0 #endif #if __GCC_ATOMIC_LLONG_LOCK_FREE == 2 #define BOOST_ATOMIC_LLONG_LOCK_FREE 2 +#else +#define BOOST_ATOMIC_LLONG_LOCK_FREE 0 #endif #if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B) && (defined(BOOST_HAS_INT128) || !defined(BOOST_NO_ALIGNMENT)) #define BOOST_ATOMIC_INT128_LOCK_FREE 2 +#else +#define BOOST_ATOMIC_INT128_LOCK_FREE 0 #endif #if __GCC_ATOMIC_POINTER_LOCK_FREE == 2 #define BOOST_ATOMIC_POINTER_LOCK_FREE 2 +#else +#define BOOST_ATOMIC_POINTER_LOCK_FREE 0 #endif #if __GCC_ATOMIC_BOOL_LOCK_FREE == 2 #define BOOST_ATOMIC_BOOL_LOCK_FREE 2 +#else +#define BOOST_ATOMIC_BOOL_LOCK_FREE 0 +#endif + +#define BOOST_ATOMIC_INT8_LOCK_FREE BOOST_ATOMIC_CHAR_LOCK_FREE + +#if BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 2 +#define BOOST_ATOMIC_INT16_LOCK_FREE BOOST_ATOMIC_SHORT_LOCK_FREE +#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 2 +#define BOOST_ATOMIC_INT16_LOCK_FREE BOOST_ATOMIC_INT_LOCK_FREE +#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 2 +#define BOOST_ATOMIC_INT16_LOCK_FREE BOOST_ATOMIC_LONG_LOCK_FREE +#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 2 +#define BOOST_ATOMIC_INT16_LOCK_FREE BOOST_ATOMIC_LLONG_LOCK_FREE +#else +#define BOOST_ATOMIC_INT16_LOCK_FREE 0 +#endif + +#if BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 4 +#define BOOST_ATOMIC_INT32_LOCK_FREE BOOST_ATOMIC_SHORT_LOCK_FREE +#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 4 +#define BOOST_ATOMIC_INT32_LOCK_FREE BOOST_ATOMIC_INT_LOCK_FREE +#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 4 +#define BOOST_ATOMIC_INT32_LOCK_FREE BOOST_ATOMIC_LONG_LOCK_FREE +#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 4 +#define BOOST_ATOMIC_INT32_LOCK_FREE BOOST_ATOMIC_LLONG_LOCK_FREE +#else +#define BOOST_ATOMIC_INT32_LOCK_FREE 0 +#endif + +#if BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 8 +#define BOOST_ATOMIC_INT64_LOCK_FREE BOOST_ATOMIC_SHORT_LOCK_FREE +#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 8 +#define BOOST_ATOMIC_INT64_LOCK_FREE BOOST_ATOMIC_INT_LOCK_FREE +#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 8 +#define BOOST_ATOMIC_INT64_LOCK_FREE BOOST_ATOMIC_LONG_LOCK_FREE +#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 8 +#define BOOST_ATOMIC_INT64_LOCK_FREE BOOST_ATOMIC_LLONG_LOCK_FREE +#else +#define BOOST_ATOMIC_INT64_LOCK_FREE 0 #endif #define BOOST_ATOMIC_THREAD_FENCE 2 diff --git a/include/boost/atomic/detail/caps_gcc_ppc.hpp b/include/boost/atomic/detail/caps_gcc_ppc.hpp index 01ce0a6..6dbdde8 100644 --- a/include/boost/atomic/detail/caps_gcc_ppc.hpp +++ b/include/boost/atomic/detail/caps_gcc_ppc.hpp @@ -22,35 +22,13 @@ #pragma once #endif -#define BOOST_ATOMIC_FLAG_LOCK_FREE 2 -#define BOOST_ATOMIC_CHAR_LOCK_FREE 2 -#define BOOST_ATOMIC_CHAR16_T_LOCK_FREE 2 -#define BOOST_ATOMIC_CHAR32_T_LOCK_FREE 2 -#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 2 -#define BOOST_ATOMIC_SHORT_LOCK_FREE 2 -#define BOOST_ATOMIC_INT_LOCK_FREE 2 - -#if defined(__SIZEOF_LONG__) -#if __SIZEOF_LONG__ == 4 -#define BOOST_ATOMIC_LONG_LOCK_FREE 2 -#elif __SIZEOF_LONG__ == 8 && defined(__powerpc64__) -#define BOOST_ATOMIC_LONG_LOCK_FREE 2 -#endif -#else -#include -#if ULONG_MAX == 0xffffffff -#define BOOST_ATOMIC_LONG_LOCK_FREE 2 -#elif ULONG_MAX == 0xffffffffffffffff && defined(__powerpc64__) -#define BOOST_ATOMIC_LONG_LOCK_FREE 2 -#endif -#endif - +#define BOOST_ATOMIC_INT8_LOCK_FREE 2 +#define BOOST_ATOMIC_INT16_LOCK_FREE 2 +#define BOOST_ATOMIC_INT32_LOCK_FREE 2 #if defined(__powerpc64__) -#define BOOST_ATOMIC_LLONG_LOCK_FREE 2 +#define BOOST_ATOMIC_INT64_LOCK_FREE 2 #endif - #define BOOST_ATOMIC_POINTER_LOCK_FREE 2 -#define BOOST_ATOMIC_BOOL_LOCK_FREE 2 #define BOOST_ATOMIC_THREAD_FENCE 2 #define BOOST_ATOMIC_SIGNAL_FENCE 2 diff --git a/include/boost/atomic/detail/caps_gcc_sparcv9.hpp b/include/boost/atomic/detail/caps_gcc_sparcv9.hpp index 4096118..2c645e0 100644 --- a/include/boost/atomic/detail/caps_gcc_sparcv9.hpp +++ b/include/boost/atomic/detail/caps_gcc_sparcv9.hpp @@ -22,28 +22,10 @@ #pragma once #endif -#define BOOST_ATOMIC_FLAG_LOCK_FREE 2 -#define BOOST_ATOMIC_CHAR_LOCK_FREE 2 -#define BOOST_ATOMIC_CHAR16_T_LOCK_FREE 2 -#define BOOST_ATOMIC_CHAR32_T_LOCK_FREE 2 -#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 2 -#define BOOST_ATOMIC_SHORT_LOCK_FREE 2 -#define BOOST_ATOMIC_INT_LOCK_FREE 2 - -#if defined(__SIZEOF_LONG__) -#if __SIZEOF_LONG__ == 4 -#define BOOST_ATOMIC_LONG_LOCK_FREE 2 -#endif -#else -#include -#if ULONG_MAX == 0xffffffff -#define BOOST_ATOMIC_LONG_LOCK_FREE 2 -#endif -#endif - -#define BOOST_ATOMIC_LLONG_LOCK_FREE 0 +#define BOOST_ATOMIC_INT8_LOCK_FREE 2 +#define BOOST_ATOMIC_INT16_LOCK_FREE 2 +#define BOOST_ATOMIC_INT32_LOCK_FREE 2 #define BOOST_ATOMIC_POINTER_LOCK_FREE 2 -#define BOOST_ATOMIC_BOOL_LOCK_FREE 2 #define BOOST_ATOMIC_THREAD_FENCE 2 #define BOOST_ATOMIC_SIGNAL_FENCE 2 diff --git a/include/boost/atomic/detail/caps_gcc_sync.hpp b/include/boost/atomic/detail/caps_gcc_sync.hpp index 9bc846e..37f5923 100644 --- a/include/boost/atomic/detail/caps_gcc_sync.hpp +++ b/include/boost/atomic/detail/caps_gcc_sync.hpp @@ -22,37 +22,16 @@ #pragma once #endif -#define BOOST_ATOMIC_CHAR_LOCK_FREE 2 -#define BOOST_ATOMIC_CHAR16_T_LOCK_FREE 2 -#define BOOST_ATOMIC_CHAR32_T_LOCK_FREE 2 -#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 2 -#define BOOST_ATOMIC_SHORT_LOCK_FREE 2 -#define BOOST_ATOMIC_INT_LOCK_FREE 2 - -#if defined(__SIZEOF_LONG__) -#if __SIZEOF_LONG__ == 4 -#define BOOST_ATOMIC_LONG_LOCK_FREE 2 +#define BOOST_ATOMIC_INT8_LOCK_FREE 2 +#define BOOST_ATOMIC_INT16_LOCK_FREE 2 +#define BOOST_ATOMIC_INT32_LOCK_FREE 2 +#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) +#define BOOST_ATOMIC_INT64_LOCK_FREE 2 #endif -#else -#include -#if ULONG_MAX == 0xffffffff -#define BOOST_ATOMIC_LONG_LOCK_FREE 2 +#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) +#define BOOST_ATOMIC_INT128_LOCK_FREE 2 #endif -#endif - -#if defined(__SIZEOF_LONG_LONG__) -#if __SIZEOF_LONG_LONG__ == 4 -#define BOOST_ATOMIC_LLONG_LOCK_FREE 2 -#endif -#else -#include -#if defined(ULLONG_MAX) && ULLONG_MAX == 0xffffffff -#define BOOST_ATOMIC_LLONG_LOCK_FREE 2 -#endif -#endif - #define BOOST_ATOMIC_POINTER_LOCK_FREE 2 -#define BOOST_ATOMIC_BOOL_LOCK_FREE 2 #define BOOST_ATOMIC_THREAD_FENCE 2 #define BOOST_ATOMIC_SIGNAL_FENCE 2 diff --git a/include/boost/atomic/detail/caps_gcc_x86.hpp b/include/boost/atomic/detail/caps_gcc_x86.hpp index ce31b22..0696bf1 100644 --- a/include/boost/atomic/detail/caps_gcc_x86.hpp +++ b/include/boost/atomic/detail/caps_gcc_x86.hpp @@ -35,38 +35,16 @@ #define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B 1 #endif -#define BOOST_ATOMIC_CHAR_LOCK_FREE 2 -#define BOOST_ATOMIC_CHAR16_T_LOCK_FREE 2 -#define BOOST_ATOMIC_CHAR32_T_LOCK_FREE 2 -#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 2 -#define BOOST_ATOMIC_SHORT_LOCK_FREE 2 -#define BOOST_ATOMIC_INT_LOCK_FREE 2 - -#if defined(__SIZEOF_LONG__) -#if __SIZEOF_LONG__ == 4 -#define BOOST_ATOMIC_LONG_LOCK_FREE 2 -#elif __SIZEOF_LONG__ == 8 && (defined(__x86_64__) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B)) -#define BOOST_ATOMIC_LONG_LOCK_FREE 2 -#endif -#else -#include -#if ULONG_MAX == 0xffffffff -#define BOOST_ATOMIC_LONG_LOCK_FREE 2 -#elif ULONG_MAX == 0xffffffffffffffff && (defined(__x86_64__) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B)) -#define BOOST_ATOMIC_LONG_LOCK_FREE 2 -#endif -#endif - +#define BOOST_ATOMIC_INT8_LOCK_FREE 2 +#define BOOST_ATOMIC_INT16_LOCK_FREE 2 +#define BOOST_ATOMIC_INT32_LOCK_FREE 2 #if defined(__x86_64__) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) -#define BOOST_ATOMIC_LLONG_LOCK_FREE 2 +#define BOOST_ATOMIC_INT64_LOCK_FREE 2 #endif - #if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B) && (defined(BOOST_HAS_INT128) || !defined(BOOST_NO_ALIGNMENT)) #define BOOST_ATOMIC_INT128_LOCK_FREE 2 #endif - #define BOOST_ATOMIC_POINTER_LOCK_FREE 2 -#define BOOST_ATOMIC_BOOL_LOCK_FREE 2 #define BOOST_ATOMIC_THREAD_FENCE 2 #define BOOST_ATOMIC_SIGNAL_FENCE 2 diff --git a/include/boost/atomic/detail/caps_linux_arm.hpp b/include/boost/atomic/detail/caps_linux_arm.hpp index 37323e2..abe6fb8 100644 --- a/include/boost/atomic/detail/caps_linux_arm.hpp +++ b/include/boost/atomic/detail/caps_linux_arm.hpp @@ -24,37 +24,10 @@ #pragma once #endif -#define BOOST_ATOMIC_CHAR_LOCK_FREE 2 -#define BOOST_ATOMIC_CHAR16_T_LOCK_FREE 2 -#define BOOST_ATOMIC_CHAR32_T_LOCK_FREE 2 -#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 2 -#define BOOST_ATOMIC_SHORT_LOCK_FREE 2 -#define BOOST_ATOMIC_INT_LOCK_FREE 2 - -#if defined(__SIZEOF_LONG__) -#if __SIZEOF_LONG__ == 4 -#define BOOST_ATOMIC_LONG_LOCK_FREE 2 -#endif -#else -#include -#if ULONG_MAX == 0xffffffff -#define BOOST_ATOMIC_LONG_LOCK_FREE 2 -#endif -#endif - -#if defined(__SIZEOF_LONG_LONG__) -#if __SIZEOF_LONG_LONG__ == 4 -#define BOOST_ATOMIC_LLONG_LOCK_FREE 2 -#endif -#else -#include -#if defined(ULLONG_MAX) && ULLONG_MAX == 0xffffffff -#define BOOST_ATOMIC_LLONG_LOCK_FREE 2 -#endif -#endif - +#define BOOST_ATOMIC_INT8_LOCK_FREE 2 +#define BOOST_ATOMIC_INT16_LOCK_FREE 2 +#define BOOST_ATOMIC_INT32_LOCK_FREE 2 #define BOOST_ATOMIC_POINTER_LOCK_FREE 2 -#define BOOST_ATOMIC_BOOL_LOCK_FREE 2 #define BOOST_ATOMIC_THREAD_FENCE 2 #define BOOST_ATOMIC_SIGNAL_FENCE 2 diff --git a/include/boost/atomic/detail/caps_msvc_arm.hpp b/include/boost/atomic/detail/caps_msvc_arm.hpp index bdd81bf..2c31521 100644 --- a/include/boost/atomic/detail/caps_msvc_arm.hpp +++ b/include/boost/atomic/detail/caps_msvc_arm.hpp @@ -22,18 +22,10 @@ #pragma once #endif -#define BOOST_ATOMIC_CHAR_LOCK_FREE 2 -#define BOOST_ATOMIC_CHAR16_T_LOCK_FREE 2 -#define BOOST_ATOMIC_CHAR32_T_LOCK_FREE 2 -#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 2 -#define BOOST_ATOMIC_SHORT_LOCK_FREE 2 -#define BOOST_ATOMIC_INT_LOCK_FREE 2 -#define BOOST_ATOMIC_LONG_LOCK_FREE 2 -#define BOOST_ATOMIC_LLONG_LOCK_FREE 0 -#define BOOST_ATOMIC_INT128_LOCK_FREE 0 - +#define BOOST_ATOMIC_INT8_LOCK_FREE 2 +#define BOOST_ATOMIC_INT16_LOCK_FREE 2 +#define BOOST_ATOMIC_INT32_LOCK_FREE 2 #define BOOST_ATOMIC_POINTER_LOCK_FREE 2 -#define BOOST_ATOMIC_BOOL_LOCK_FREE 2 #define BOOST_ATOMIC_THREAD_FENCE 2 #define BOOST_ATOMIC_SIGNAL_FENCE 2 diff --git a/include/boost/atomic/detail/caps_msvc_x86.hpp b/include/boost/atomic/detail/caps_msvc_x86.hpp index c306388..1b82b49 100644 --- a/include/boost/atomic/detail/caps_msvc_x86.hpp +++ b/include/boost/atomic/detail/caps_msvc_x86.hpp @@ -30,16 +30,12 @@ #define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B 1 #endif -#define BOOST_ATOMIC_CHAR_LOCK_FREE 2 -#define BOOST_ATOMIC_CHAR16_T_LOCK_FREE 2 -#define BOOST_ATOMIC_CHAR32_T_LOCK_FREE 2 -#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 2 -#define BOOST_ATOMIC_SHORT_LOCK_FREE 2 -#define BOOST_ATOMIC_INT_LOCK_FREE 2 -#define BOOST_ATOMIC_LONG_LOCK_FREE 2 +#define BOOST_ATOMIC_INT8_LOCK_FREE 2 +#define BOOST_ATOMIC_INT16_LOCK_FREE 2 +#define BOOST_ATOMIC_INT32_LOCK_FREE 2 #if defined(_M_AMD64) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) -#define BOOST_ATOMIC_LLONG_LOCK_FREE 2 +#define BOOST_ATOMIC_INT64_LOCK_FREE 2 #endif #if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B) && (defined(BOOST_HAS_INT128) || !defined(BOOST_NO_ALIGNMENT)) @@ -47,7 +43,6 @@ #endif #define BOOST_ATOMIC_POINTER_LOCK_FREE 2 -#define BOOST_ATOMIC_BOOL_LOCK_FREE 2 #define BOOST_ATOMIC_THREAD_FENCE 2 #define BOOST_ATOMIC_SIGNAL_FENCE 2 diff --git a/include/boost/atomic/detail/caps_windows.hpp b/include/boost/atomic/detail/caps_windows.hpp index 858f280..1cc0ded 100644 --- a/include/boost/atomic/detail/caps_windows.hpp +++ b/include/boost/atomic/detail/caps_windows.hpp @@ -22,18 +22,10 @@ #pragma once #endif -#define BOOST_ATOMIC_CHAR_LOCK_FREE 2 -#define BOOST_ATOMIC_CHAR16_T_LOCK_FREE 2 -#define BOOST_ATOMIC_CHAR32_T_LOCK_FREE 2 -#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 2 -#define BOOST_ATOMIC_SHORT_LOCK_FREE 2 -#define BOOST_ATOMIC_INT_LOCK_FREE 2 -#define BOOST_ATOMIC_LONG_LOCK_FREE 2 -#define BOOST_ATOMIC_LLONG_LOCK_FREE 0 -#define BOOST_ATOMIC_INT128_LOCK_FREE 0 - +#define BOOST_ATOMIC_INT8_LOCK_FREE 2 +#define BOOST_ATOMIC_INT16_LOCK_FREE 2 +#define BOOST_ATOMIC_INT32_LOCK_FREE 2 #define BOOST_ATOMIC_POINTER_LOCK_FREE 2 -#define BOOST_ATOMIC_BOOL_LOCK_FREE 2 #define BOOST_ATOMIC_THREAD_FENCE 2 #define BOOST_ATOMIC_SIGNAL_FENCE 2 diff --git a/include/boost/atomic/detail/int_sizes.hpp b/include/boost/atomic/detail/int_sizes.hpp new file mode 100644 index 0000000..5545ca1 --- /dev/null +++ b/include/boost/atomic/detail/int_sizes.hpp @@ -0,0 +1,128 @@ +/* + * Distributed under the Boost Software License, Version 1.0. + * (See accompanying file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt) + * + * Copyright (c) 2014 Andrey Semashev + */ +/*! + * \file atomic/detail/int_sizes.hpp + * + * This header defines macros for testing buitin integer type sizes + */ + +#ifndef BOOST_ATOMIC_DETAIL_INT_SIZES_HPP_INCLUDED_ +#define BOOST_ATOMIC_DETAIL_INT_SIZES_HPP_INCLUDED_ + +#include + +#ifdef BOOST_HAS_PRAGMA_ONCE +#pragma once +#endif + +// GCC and compatible compilers define internal macros with builtin type traits +#if defined(__SIZEOF_SHORT__) +#define BOOST_ATOMIC_DETAIL_SIZEOF_SHORT __SIZEOF_SHORT__ +#endif +#if defined(__SIZEOF_INT__) +#define BOOST_ATOMIC_DETAIL_SIZEOF_INT __SIZEOF_INT__ +#endif +#if defined(__SIZEOF_LONG__) +#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG __SIZEOF_LONG__ +#endif +#if defined(__SIZEOF_LONG_LONG__) +#define BOOST_ATOMIC_DETAIL_SIZEOF_LLONG __SIZEOF_LONG_LONG__ +#endif +#if defined(__SIZEOF_WCHAR_T__) +#define BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T __SIZEOF_WCHAR_T__ +#endif + +#if !defined(BOOST_ATOMIC_DETAIL_SIZEOF_SHORT) || !defined(BOOST_ATOMIC_DETAIL_SIZEOF_INT) ||\ + !defined(BOOST_ATOMIC_DETAIL_SIZEOF_LONG) || !defined(BOOST_ATOMIC_DETAIL_SIZEOF_LLONG) + +// Try to deduce sizes from limits +#include +#include + +#if (USHRT_MAX + 0) == 0xff +#define BOOST_ATOMIC_DETAIL_SIZEOF_SHORT 1 +#elif (USHRT_MAX + 0) == 0xffff +#define BOOST_ATOMIC_DETAIL_SIZEOF_SHORT 2 +#elif (USHRT_MAX + 0) == 0xffffffff +#define BOOST_ATOMIC_DETAIL_SIZEOF_SHORT 4 +#elif (USHRT_MAX + 0) == UINT64_C(0xffffffffffffffff) +#define BOOST_ATOMIC_DETAIL_SIZEOF_SHORT 8 +#endif + +#if (UINT_MAX + 0) == 0xff +#define BOOST_ATOMIC_DETAIL_SIZEOF_INT 1 +#elif (UINT_MAX + 0) == 0xffff +#define BOOST_ATOMIC_DETAIL_SIZEOF_INT 2 +#elif (UINT_MAX + 0) == 0xffffffff +#define BOOST_ATOMIC_DETAIL_SIZEOF_INT 4 +#elif (UINT_MAX + 0) == UINT64_C(0xffffffffffffffff) +#define BOOST_ATOMIC_DETAIL_SIZEOF_INT 8 +#endif + +#if (ULONG_MAX + 0) == 0xff +#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG 1 +#elif (ULONG_MAX + 0) == 0xffff +#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG 2 +#elif (ULONG_MAX + 0) == 0xffffffff +#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG 4 +#elif (ULONG_MAX + 0) == UINT64_C(0xffffffffffffffff) +#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG 8 +#endif + +#if defined(__hpux) // HP-UX's value of ULONG_LONG_MAX is unusable in preprocessor expressions +#define BOOST_ATOMIC_DETAIL_SIZEOF_LLONG 8 +#else + +// The list of the non-standard macros (the ones except ULLONG_MAX) is taken from cstdint.hpp +#if defined(ULLONG_MAX) +#define BOOST_ATOMIC_DETAIL_ULLONG_MAX ULLONG_MAX +#elif defined(ULONG_LONG_MAX) +#define BOOST_ATOMIC_DETAIL_ULLONG_MAX ULONG_LONG_MAX +#elif defined(ULONGLONG_MAX) +#define BOOST_ATOMIC_DETAIL_ULLONG_MAX ULONGLONG_MAX +#elif defined(_LLONG_MAX) // strangely enough, this one seems to be holding the limit for the unsigned integer +#define BOOST_ATOMIC_DETAIL_ULLONG_MAX _LLONG_MAX +#endif + +#if (BOOST_ATOMIC_DETAIL_ULLONG_MAX + 0) == 0xff +#define BOOST_ATOMIC_DETAIL_SIZEOF_LLONG 1 +#elif (BOOST_ATOMIC_DETAIL_ULLONG_MAX + 0) == 0xffff +#define BOOST_ATOMIC_DETAIL_SIZEOF_LLONG 2 +#elif (BOOST_ATOMIC_DETAIL_ULLONG_MAX + 0) == 0xffffffff +#define BOOST_ATOMIC_DETAIL_SIZEOF_LLONG 4 +#elif (BOOST_ATOMIC_DETAIL_ULLONG_MAX + 0) == UINT64_C(0xffffffffffffffff) +#define BOOST_ATOMIC_DETAIL_SIZEOF_LLONG 8 +#endif + +#endif // defined(__hpux) + +#endif + +#if !defined(BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T) + +#include +#include + +#if (WCHAR_MAX + 0) == 0xff || (WCHAR_MAX + 0) == 0x7f +#define BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T 1 +#elif (WCHAR_MAX + 0) == 0xffff || (WCHAR_MAX + 0) == 0x7fff +#define BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T 2 +#elif (WCHAR_MAX + 0) == 0xffffffff || (WCHAR_MAX + 0) == 0x7fffffff +#define BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T 4 +#elif (WCHAR_MAX + 0) == UINT64_C(0xffffffffffffffff) || (WCHAR_MAX + 0) == INT64_C(0x7fffffffffffffff) +#define BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T 8 +#endif +#endif + +#if !defined(BOOST_ATOMIC_DETAIL_SIZEOF_SHORT) || !defined(BOOST_ATOMIC_DETAIL_SIZEOF_INT) ||\ + !defined(BOOST_ATOMIC_DETAIL_SIZEOF_LONG) || !defined(BOOST_ATOMIC_DETAIL_SIZEOF_LLONG) ||\ + !defined(BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T) +#error Boost.Atomic: Failed to determine builtin integer sizes, the target platform is not supported. Please, report to the developers. +#endif + +#endif // BOOST_ATOMIC_DETAIL_INT_SIZES_HPP_INCLUDED_ diff --git a/include/boost/atomic/detail/operations_fwd.hpp b/include/boost/atomic/detail/operations_fwd.hpp index 198d87a..3a26281 100644 --- a/include/boost/atomic/detail/operations_fwd.hpp +++ b/include/boost/atomic/detail/operations_fwd.hpp @@ -6,13 +6,13 @@ * Copyright (c) 2014 Andrey Semashev */ /*! - * \file atomic/detail/atomic_traits_fwd.hpp + * \file atomic/detail/operations_fwd.hpp * - * This header contains forward declaration of the \c atomic_traits template. + * This header contains forward declaration of the \c operations template. */ -#ifndef BOOST_ATOMIC_DETAIL_ATOMIC_TRAITS_FWD_HPP_INCLUDED_ -#define BOOST_ATOMIC_DETAIL_ATOMIC_TRAITS_FWD_HPP_INCLUDED_ +#ifndef BOOST_ATOMIC_DETAIL_OPERATIONS_FWD_HPP_INCLUDED_ +#define BOOST_ATOMIC_DETAIL_OPERATIONS_FWD_HPP_INCLUDED_ #include @@ -25,10 +25,10 @@ namespace atomics { namespace detail { template< unsigned int Size > -struct atomic_traits; +struct operations; } // namespace detail } // namespace atomics } // namespace boost -#endif // BOOST_ATOMIC_DETAIL_ATOMIC_TRAITS_FWD_HPP_INCLUDED_ +#endif // BOOST_ATOMIC_DETAIL_OPERATIONS_FWD_HPP_INCLUDED_ From f3d59ec1c17c26e4e2e9ad67d01ee17cd893bff0 Mon Sep 17 00:00:00 2001 From: Andrey Semashev Date: Sat, 19 Apr 2014 22:25:02 +0400 Subject: [PATCH 04/23] Working on the new library design. Added implementation of atomic ops with gcc __atomic intrinsics. Implemented the unified atomic interface. Extracted atomic_flag to a separate header. atomic_flag now follows standard requirements for static initialization, if possible. --- include/boost/atomic/capabilities.hpp | 5 + include/boost/atomic/detail/atomic_flag.hpp | 79 +++ .../boost/atomic/detail/atomic_template.hpp | 561 ++++++++++++++++++ include/boost/atomic/detail/caps_gcc_sync.hpp | 19 +- include/boost/atomic/detail/int_sizes.hpp | 9 + .../atomic/detail/operations_lockfree.hpp | 28 + .../boost/atomic/detail/ops_gcc_atomic.hpp | 175 ++++++ include/boost/atomic/detail/storage_types.hpp | 84 +++ .../atomic/detail/type-classification.hpp | 45 -- include/boost/atomic/detail/union_cast.hpp | 46 ++ 10 files changed, 1004 insertions(+), 47 deletions(-) create mode 100644 include/boost/atomic/detail/atomic_flag.hpp create mode 100644 include/boost/atomic/detail/atomic_template.hpp create mode 100644 include/boost/atomic/detail/operations_lockfree.hpp create mode 100644 include/boost/atomic/detail/ops_gcc_atomic.hpp create mode 100644 include/boost/atomic/detail/storage_types.hpp delete mode 100644 include/boost/atomic/detail/type-classification.hpp create mode 100644 include/boost/atomic/detail/union_cast.hpp diff --git a/include/boost/atomic/capabilities.hpp b/include/boost/atomic/capabilities.hpp index f8fb542..34bef48 100644 --- a/include/boost/atomic/capabilities.hpp +++ b/include/boost/atomic/capabilities.hpp @@ -127,6 +127,11 @@ #endif #ifndef BOOST_ATOMIC_POINTER_LOCK_FREE +#if (BOOST_ATOMIC_DETAIL_SIZEOF_POINTER + 0) == 8 +#define BOOST_ATOMIC_POINTER_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE +#elif (BOOST_ATOMIC_DETAIL_SIZEOF_POINTER + 0) == 4 +#define BOOST_ATOMIC_POINTER_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE +#else #define BOOST_ATOMIC_POINTER_LOCK_FREE 0 #endif diff --git a/include/boost/atomic/detail/atomic_flag.hpp b/include/boost/atomic/detail/atomic_flag.hpp new file mode 100644 index 0000000..fcab30e --- /dev/null +++ b/include/boost/atomic/detail/atomic_flag.hpp @@ -0,0 +1,79 @@ +/* + * Distributed under the Boost Software License, Version 1.0. + * (See accompanying file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt) + * + * Copyright (c) 2014 Andrey Semashev + */ +/*! + * \file atomic/detail/atomic_flag.hpp + * + * This header contains interface definition of \c atomic_flag. + */ + +#ifndef BOOST_ATOMIC_DETAIL_ATOMIC_FLAG_HPP_INCLUDED_ +#define BOOST_ATOMIC_DETAIL_ATOMIC_FLAG_HPP_INCLUDED_ + +#include +#include + +#ifdef BOOST_HAS_PRAGMA_ONCE +#pragma once +#endif + +namespace boost { +namespace atomics { + +#if defined(BOOST_NO_CXX11_CONSTEXPR) +#define BOOST_ATOMIC_NO_STATIC_INIT_ATOMIC_FLAG +#endif + +#if !defined(BOOST_NO_CXX11_UNIFIED_INITIALIZATION_SYNTAX) && !defined(BOOST_ATOMIC_DEFAULT_INITIALIZE_ATOMIC_FLAG) +#define BOOST_ATOMIC_FLAG_INIT { 0 } +#else +namespace detail { +struct default_initializer {}; +BOOST_CONSTEXPR_OR_CONST default_initializer default_init = {}; +} // namespace detail +#define BOOST_ATOMIC_FLAG_INIT ::boost::atomics::detail::default_init +#endif + +struct atomic_flag +{ + typedef atomics::detail::operations< 1u > operations; + typedef operations::storage_type storage_type; + + storage_type m_storage; + +#if !defined(BOOST_ATOMIC_DEFAULT_INITIALIZE_ATOMIC_FLAG) +#if !defined(BOOST_NO_CXX11_DEFAULTED_FUNCTIONS) + BOOST_CONSTEXPR atomic_flag() BOOST_NOEXCEPT = default; +#else + BOOST_CONSTEXPR atomic_flag() BOOST_NOEXCEPT {} +#endif +#if defined(BOOST_NO_CXX11_UNIFIED_INITIALIZATION_SYNTAX) + BOOST_CONSTEXPR atomic_flag(atomics::detail::default_initializer) BOOST_NOEXCEPT : m_storage(0) {} +#endif +#else + BOOST_CONSTEXPR atomic_flag() BOOST_NOEXCEPT : m_storage(0) {} + BOOST_CONSTEXPR atomic_flag(atomics::detail::default_initializer) BOOST_NOEXCEPT : m_storage(0) {} +#endif + + bool test_and_set(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + { + return operations::test_and_set(m_storage, order); + } + + void clear(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + { + operations::clear(m_storage, order); + } + + BOOST_DELETED_FUNCTION(atomic_flag(atomic_flag const&)) + BOOST_DELETED_FUNCTION(atomic_flag& operator= (atomic_flag const&)) +}; + +} // namespace atomics +} // namespace boost + +#endif // BOOST_ATOMIC_DETAIL_ATOMIC_FLAG_HPP_INCLUDED_ diff --git a/include/boost/atomic/detail/atomic_template.hpp b/include/boost/atomic/detail/atomic_template.hpp new file mode 100644 index 0000000..b901b18 --- /dev/null +++ b/include/boost/atomic/detail/atomic_template.hpp @@ -0,0 +1,561 @@ +/* + * Distributed under the Boost Software License, Version 1.0. + * (See accompanying file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt) + * + * Copyright (c) 2014 Andrey Semashev + */ +/*! + * \file atomic/detail/atomic_template.hpp + * + * This header contains interface definition of \c atomic template. + */ + +#ifndef BOOST_ATOMIC_DETAIL_ATOMIC_TEMPLATE_HPP_INCLUDED_ +#define BOOST_ATOMIC_DETAIL_ATOMIC_TEMPLATE_HPP_INCLUDED_ + +#include +#include +#include +#include +#include + +#ifdef BOOST_HAS_PRAGMA_ONCE +#pragma once +#endif + +namespace boost { +namespace atomics { +namespace detail { + +BOOST_FORCEINLINE BOOST_CONSTEXPR memory_order deduce_failure_order(memory_order order) BOOST_NOEXCEPT +{ + return order == memory_order_acq_rel ? memory_order_acquire : (order == memory_order_release ? memory_order_relaxed : order); +} + +template< typename T, bool IsInt = boost::is_integral< T >::value > +struct classify +{ + typedef void type; +}; + +template< typename T > +struct classify< T, true > { typedef int type; }; + +template< typename T > +struct classify< T*, false > { typedef void* type; }; + +template< typename T, typename Kind > +class base_atomic; + +//! Implementation for integers +template< typename T > +class base_atomic< T, int > +{ +private: + typedef base_atomic this_type; + typedef T value_type; + typedef T difference_type; + typedef operations< storage_size_of< value_type >::value > operations; + +protected: + typedef value_type value_arg_type; + +public: + typedef typename operations::storage_type storage_type; + +protected: + storage_type m_storage; + +public: + BOOST_DEFAULTED_FUNCTION(base_atomic(), {}) + BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : m_storage(v) {} + + void store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + { + operations::store(m_storage, static_cast< storage_type >(v), order); + } + + value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT + { + return static_cast< value_type >(operations::load(m_storage, order)); + } + + value_type fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + { + return static_cast< value_type >(operations::fetch_add(m_storage, static_cast< storage_type >(v), order)); + } + + value_type fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + { + return static_cast< value_type >(operations::fetch_sub(m_storage, static_cast< storage_type >(v), order)); + } + + value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + { + return static_cast< value_type >(operations::exchange(m_storage, static_cast< storage_type >(v), order)); + } + + bool compare_exchange_strong( + value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT + { + storage_type old_value = static_cast< storage_type >(expected); + const bool res = operations::compare_exchange_strong(m_storage, old_value, static_cast< storage_type >(desired), success_order, failure_order); + expected = static_cast< value_type >(old_value); + return res; + } + + bool compare_exchange_strong(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + { + return compare_exchange_strong(expected, desired, order, atomics::detail::deduce_failure_order(order)); + } + + bool compare_exchange_weak( + value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT + { + storage_type old_value = static_cast< storage_type >(expected); + const bool res = operations::compare_exchange_weak(m_storage, old_value, static_cast< storage_type >(desired), success_order, failure_order); + expected = static_cast< value_type >(old_value); + return res; + } + + bool compare_exchange_weak(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + { + return compare_exchange_weak(expected, desired, order, atomics::detail::deduce_failure_order(order)); + } + + value_type fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + { + return static_cast< value_type >(operations::fetch_and(m_storage, static_cast< storage_type >(v), order)); + } + + value_type fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + { + return static_cast< value_type >(operations::fetch_or(m_storage, static_cast< storage_type >(v), order)); + } + + value_type fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + { + return static_cast< value_type >(operations::fetch_xor(m_storage, static_cast< storage_type >(v), order)); + } + + bool is_lock_free() const volatile BOOST_NOEXCEPT + { + return operations::is_lock_free(m_storage); + } + + value_type operator++(int) volatile BOOST_NOEXCEPT + { + return fetch_add(1); + } + + value_type operator++() volatile BOOST_NOEXCEPT + { + return fetch_add(1) + 1; + } + + value_type operator--(int) volatile BOOST_NOEXCEPT + { + return fetch_sub(1); + } + + value_type operator--() volatile BOOST_NOEXCEPT + { + return fetch_sub(1) - 1; + } + + value_type operator+=(difference_type v) volatile BOOST_NOEXCEPT + { + return fetch_add(v) + v; + } + + value_type operator-=(difference_type v) volatile BOOST_NOEXCEPT + { + return fetch_sub(v) - v; + } + + value_type operator&=(value_type v) volatile BOOST_NOEXCEPT + { + return fetch_and(v) & v; + } + + value_type operator|=(value_type v) volatile BOOST_NOEXCEPT + { + return fetch_or(v) | v; + } + + value_type operator^=(value_type v) volatile BOOST_NOEXCEPT + { + return fetch_xor(v) ^ v; + } + + BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) + BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) +}; + + +//! Implementation for user-defined types, such as structs and enums +template< typename T > +class base_atomic< T, void > +{ +private: + typedef base_atomic this_type; + typedef T value_type; + typedef operations< storage_size_of< value_type >::value > operations; + +protected: + typedef value_type const& value_arg_type; + +public: + typedef typename operations::storage_type storage_type; + +protected: + storage_type m_storage; + +public: + BOOST_DEFAULTED_FUNCTION(base_atomic(), {}) + explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : m_storage(atomics::detail::union_cast< storage_type >(v)) + { + } + + void store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + { + operations::store(m_storage, atomics::detail::union_cast< storage_type >(v), order); + } + + value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT + { + return atomics::detail::union_cast< value_type >(operations::load(m_storage, order)); + } + + value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + { + return atomics::detail::union_cast< value_type >(operations::exchange(m_storage, atomics::detail::union_cast< storage_type >(v), order)); + } + + bool compare_exchange_strong( + value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT + { + storage_type old_value = atomics::detail::union_cast< storage_type >(expected); + const bool res = operations::compare_exchange_strong(m_storage, old_value, atomics::detail::union_cast< storage_type >(desired), success_order, failure_order); + expected = atomics::detail::union_cast< value_type >(old_value); + return res; + } + + bool compare_exchange_strong(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + { + return compare_exchange_strong(expected, desired, order, atomics::detail::deduce_failure_order(order)); + } + + bool compare_exchange_weak( + value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT + { + storage_type old_value = atomics::detail::union_cast< storage_type >(expected); + const bool res = operations::compare_exchange_weak(m_storage, old_value, atomics::detail::union_cast< storage_type >(desired), success_order, failure_order); + expected = atomics::detail::union_cast< value_type >(old_value); + return res; + } + + bool compare_exchange_weak(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + { + return compare_exchange_weak(expected, desired, order, atomics::detail::deduce_failure_order(order)); + } + + bool is_lock_free() const volatile BOOST_NOEXCEPT + { + return operations::is_lock_free(m_storage); + } + + BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) + BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) +}; + + +//! Implementation for pointers +template +class base_atomic< T*, void* > +{ +private: + typedef base_atomic this_type; + typedef T* value_type; + typedef std::ptrdiff_t difference_type; + typedef operations< storage_size_of< value_type >::value > operations; + +protected: + typedef value_type value_arg_type; + +public: + typedef typename operations::storage_type storage_type; + +protected: + storage_type m_storage; + +public: + BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) + explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : m_storage(atomics::detail::union_cast< storage_type >(v)) + { + } + + void store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + { + operations::store(m_storage, atomics::detail::union_cast< storage_type >(v), order); + } + + value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT + { + return atomics::detail::union_cast< value_type >(operations::load(m_storage, order)); + } + + value_type fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + { + return atomics::detail::union_cast< value_type >(operations::fetch_add(m_storage, static_cast< storage_type >(v * sizeof(T)), order)); + } + + value_type fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + { + return atomics::detail::union_cast< value_type >(operations::fetch_sub(m_storage, static_cast< storage_type >(v * sizeof(T)), order)); + } + + value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + { + return atomics::detail::union_cast< value_type >(operations::exchange(m_storage, atomics::detail::union_cast< storage_type >(v), order)); + } + + bool compare_exchange_strong( + value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT + { + storage_type old_value = atomics::detail::union_cast< storage_type >(expected); + const bool res = operations::compare_exchange_strong(m_storage, old_value, atomics::detail::union_cast< storage_type >(desired), success_order, failure_order); + expected = atomics::detail::union_cast< value_type >(old_value); + return res; + } + + bool compare_exchange_strong(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + { + return compare_exchange_strong(expected, desired, order, atomics::detail::deduce_failure_order(order)); + } + + bool compare_exchange_weak( + value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT + { + storage_type old_value = atomics::detail::union_cast< storage_type >(expected); + const bool res = operations::compare_exchange_weak(m_storage, old_value, atomics::detail::union_cast< storage_type >(desired), success_order, failure_order); + expected = atomics::detail::union_cast< value_type >(old_value); + return res; + } + + bool compare_exchange_weak(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + { + return compare_exchange_weak(expected, desired, order, atomics::detail::deduce_failure_order(order)); + } + + bool is_lock_free() const volatile BOOST_NOEXCEPT + { + return operations::is_lock_free(m_storage); + } + + value_type operator++(int) volatile BOOST_NOEXCEPT + { + return fetch_add(1); + } + + value_type operator++() volatile BOOST_NOEXCEPT + { + return fetch_add(1) + 1; + } + + value_type operator--(int) volatile BOOST_NOEXCEPT + { + return fetch_sub(1); + } + + value_type operator--() volatile BOOST_NOEXCEPT + { + return fetch_sub(1) - 1; + } + + value_type operator+=(difference_type v) volatile BOOST_NOEXCEPT + { + return fetch_add(v) + v; + } + + value_type operator-=(difference_type v) volatile BOOST_NOEXCEPT + { + return fetch_sub(v) - v; + } + + BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) + BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) +}; + + +//! Implementation for void pointers +template< > +class base_atomic< void*, void* > +{ +private: + typedef base_atomic this_type; + typedef void* value_type; + typedef std::ptrdiff_t difference_type; + typedef operations< storage_size_of< value_type >::value > operations; + +protected: + typedef value_type value_arg_type; + +public: + typedef typename operations::storage_type storage_type; + +protected: + storage_type m_storage; + +public: + BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) + explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : m_storage(atomics::detail::union_cast< storage_type >(v)) + { + } + + void store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + { + operations::store(m_storage, atomics::detail::union_cast< storage_type >(v), order); + } + + value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT + { + return atomics::detail::union_cast< value_type >(operations::load(m_storage, order)); + } + + value_type fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + { + return atomics::detail::union_cast< value_type >(operations::fetch_add(m_storage, static_cast< storage_type >(v * sizeof(T)), order)); + } + + value_type fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + { + return atomics::detail::union_cast< value_type >(operations::fetch_sub(m_storage, static_cast< storage_type >(v * sizeof(T)), order)); + } + + value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + { + return atomics::detail::union_cast< value_type >(operations::exchange(m_storage, atomics::detail::union_cast< storage_type >(v), order)); + } + + bool compare_exchange_strong( + value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT + { + storage_type old_value = atomics::detail::union_cast< storage_type >(expected); + const bool res = operations::compare_exchange_strong(m_storage, old_value, atomics::detail::union_cast< storage_type >(desired), success_order, failure_order); + expected = atomics::detail::union_cast< value_type >(old_value); + return res; + } + + bool compare_exchange_strong(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + { + return compare_exchange_strong(expected, desired, order, atomics::detail::deduce_failure_order(order)); + } + + bool compare_exchange_weak( + value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT + { + storage_type old_value = atomics::detail::union_cast< storage_type >(expected); + const bool res = operations::compare_exchange_weak(m_storage, old_value, atomics::detail::union_cast< storage_type >(desired), success_order, failure_order); + expected = atomics::detail::union_cast< value_type >(old_value); + return res; + } + + bool compare_exchange_weak(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + { + return compare_exchange_weak(expected, desired, order, atomics::detail::deduce_failure_order(order)); + } + + bool is_lock_free() const volatile BOOST_NOEXCEPT + { + return operations::is_lock_free(m_storage); + } + + value_type operator++(int) volatile BOOST_NOEXCEPT + { + return fetch_add(1); + } + + value_type operator++() volatile BOOST_NOEXCEPT + { + return (char*)fetch_add(1) + 1; + } + + value_type operator--(int) volatile BOOST_NOEXCEPT + { + return fetch_sub(1); + } + + value_type operator--() volatile BOOST_NOEXCEPT + { + return (char*)fetch_sub(1) - 1; + } + + value_type operator+=(difference_type v) volatile BOOST_NOEXCEPT + { + return (char*)fetch_add(v) + v; + } + + value_type operator-=(difference_type v) volatile BOOST_NOEXCEPT + { + return (char*)fetch_sub(v) - v; + } + + BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) + BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) +}; + +} // namespace detail + +template< typename T > +class atomic : + public atomics::detail::base_atomic< + T, + typename atomics::detail::classify< T >::type + > +{ +private: + typedef T value_type; + typedef atomics::detail::base_atomic< + T, + typename atomics::detail::classify< T >::type + > base_type; + typedef typename base_type::value_arg_type value_arg_type; + +public: + typedef typename base_type::storage_type storage_type; + +public: + BOOST_DEFAULTED_FUNCTION(atomic(), BOOST_NOEXCEPT {}) + + // NOTE: The constructor is made explicit because gcc 4.7 complains that + // operator=(value_arg_type) is considered ambiguous with operator=(atomic const&) + // in assignment expressions, even though conversion to atomic<> is less preferred + // than conversion to value_arg_type. + explicit BOOST_CONSTEXPR atomic(value_arg_type v) BOOST_NOEXCEPT : base_type(v) {} + + BOOST_FORCEINLINE value_type operator= (value_arg_type v) volatile BOOST_NOEXCEPT + { + this->store(v); + return v; + } + + BOOST_FORCEINLINE operator value_type() volatile const BOOST_NOEXCEPT + { + return this->load(); + } + + BOOST_FORCEINLINE storage_type& storage() BOOST_NOEXCEPT { return this->m_storage; } + BOOST_FORCEINLINE storage_type volatile& storage() volatile BOOST_NOEXCEPT { return this->m_storage; } + BOOST_FORCEINLINE storage_type const& storage() const BOOST_NOEXCEPT { return this->m_storage; } + BOOST_FORCEINLINE storage_type const volatile& storage() const volatile BOOST_NOEXCEPT { return this->m_storage; } + + BOOST_DELETED_FUNCTION(atomic(atomic const&)) + BOOST_DELETED_FUNCTION(atomic& operator= (atomic const&) volatile) +}; + +} // namespace atomics +} // namespace boost + +#endif // BOOST_ATOMIC_DETAIL_ATOMIC_TEMPLATE_HPP_INCLUDED_ diff --git a/include/boost/atomic/detail/caps_gcc_sync.hpp b/include/boost/atomic/detail/caps_gcc_sync.hpp index 37f5923..43065fe 100644 --- a/include/boost/atomic/detail/caps_gcc_sync.hpp +++ b/include/boost/atomic/detail/caps_gcc_sync.hpp @@ -22,16 +22,31 @@ #pragma once #endif +#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1)\ + || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2)\ + || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)\ + || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)\ + || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) #define BOOST_ATOMIC_INT8_LOCK_FREE 2 +#endif +#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2)\ + || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)\ + || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)\ + || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) #define BOOST_ATOMIC_INT16_LOCK_FREE 2 +#endif +#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)\ + || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)\ + || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) #define BOOST_ATOMIC_INT32_LOCK_FREE 2 -#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) +#endif +#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)\ + || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) #define BOOST_ATOMIC_INT64_LOCK_FREE 2 #endif #if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) #define BOOST_ATOMIC_INT128_LOCK_FREE 2 #endif -#define BOOST_ATOMIC_POINTER_LOCK_FREE 2 #define BOOST_ATOMIC_THREAD_FENCE 2 #define BOOST_ATOMIC_SIGNAL_FENCE 2 diff --git a/include/boost/atomic/detail/int_sizes.hpp b/include/boost/atomic/detail/int_sizes.hpp index 5545ca1..3d6f0cf 100644 --- a/include/boost/atomic/detail/int_sizes.hpp +++ b/include/boost/atomic/detail/int_sizes.hpp @@ -36,6 +36,15 @@ #if defined(__SIZEOF_WCHAR_T__) #define BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T __SIZEOF_WCHAR_T__ #endif +#if defined(__SIZEOF_POINTER__) +#define BOOST_ATOMIC_DETAIL_SIZEOF_POINTER __SIZEOF_POINTER__ +#elif defined(_MSC_VER) +#if defined(_M_AMD64) || defined(_M_IA64) +#define BOOST_ATOMIC_DETAIL_SIZEOF_POINTER 8 +#else +#define BOOST_ATOMIC_DETAIL_SIZEOF_POINTER 4 +#endif +#endif #if !defined(BOOST_ATOMIC_DETAIL_SIZEOF_SHORT) || !defined(BOOST_ATOMIC_DETAIL_SIZEOF_INT) ||\ !defined(BOOST_ATOMIC_DETAIL_SIZEOF_LONG) || !defined(BOOST_ATOMIC_DETAIL_SIZEOF_LLONG) diff --git a/include/boost/atomic/detail/operations_lockfree.hpp b/include/boost/atomic/detail/operations_lockfree.hpp new file mode 100644 index 0000000..544c04b --- /dev/null +++ b/include/boost/atomic/detail/operations_lockfree.hpp @@ -0,0 +1,28 @@ +/* + * Distributed under the Boost Software License, Version 1.0. + * (See accompanying file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt) + * + * Copyright (c) 2014 Andrey Semashev + */ +/*! + * \file atomic/detail/operations_lockfree.hpp + * + * This header defines lockfree atomic operations. + */ + +#ifndef BOOST_ATOMIC_DETAIL_OPERATIONS_LOCKFREE_HPP_INCLUDED_ +#define BOOST_ATOMIC_DETAIL_OPERATIONS_LOCKFREE_HPP_INCLUDED_ + +#include +#include + +#if !defined(BOOST_ATOMIC_EMULATED) +#include BOOST_ATOMIC_DETAIL_HEADER(boost/atomic/detail/ops_) +#endif + +#ifdef BOOST_HAS_PRAGMA_ONCE +#pragma once +#endif + +#endif // BOOST_ATOMIC_DETAIL_OPERATIONS_LOCKFREE_HPP_INCLUDED_ diff --git a/include/boost/atomic/detail/ops_gcc_atomic.hpp b/include/boost/atomic/detail/ops_gcc_atomic.hpp new file mode 100644 index 0000000..1895b81 --- /dev/null +++ b/include/boost/atomic/detail/ops_gcc_atomic.hpp @@ -0,0 +1,175 @@ +/* + * Distributed under the Boost Software License, Version 1.0. + * (See accompanying file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt) + * + * Copyright (c) 2014 Andrey Semashev + */ +/*! + * \file atomic/detail/ops_gcc_atomic.hpp + * + * This header contains implementation of the \c operations template. + */ + +#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_ATOMIC_HPP_INCLUDED_ +#define BOOST_ATOMIC_DETAIL_OPS_GCC_ATOMIC_HPP_INCLUDED_ + +#include +#include +#include +#include + +#ifdef BOOST_HAS_PRAGMA_ONCE +#pragma once +#endif + +namespace boost { +namespace atomics { +namespace detail { + +BOOST_FORCEINLINE BOOST_CONSTEXPR int convert_memory_order_to_gcc(memory_order order) BOOST_NOEXCEPT +{ + return (order == memory_order_relaxed ? __ATOMIC_RELAXED : (order == memory_order_consume ? __ATOMIC_CONSUME : + (order == memory_order_acquire ? __ATOMIC_ACQUIRE : (order == memory_order_release ? __ATOMIC_RELEASE : + (order == memory_order_acq_rel ? __ATOMIC_ACQ_REL : __ATOMIC_SEQ_CST))))); +} + +template< typename T > +struct gcc_atomic_operations +{ + typedef T storage_type; + + static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order = memory_order_seq_cst) BOOST_NOEXCEPT + { + __atomic_store_n(&storage, v, atomics::detail::convert_memory_order_to_gcc(order)); + } + + static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order = memory_order_seq_cst) BOOST_NOEXCEPT + { + return __atomic_load_n(&storage, atomics::detail::convert_memory_order_to_gcc(order)); + } + + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order = memory_order_seq_cst) BOOST_NOEXCEPT + { + return __atomic_fetch_add(&storage, v, atomics::detail::convert_memory_order_to_gcc(order)); + } + + static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order = memory_order_seq_cst) BOOST_NOEXCEPT + { + return __atomic_fetch_sub(&storage, v, atomics::detail::convert_memory_order_to_gcc(order)); + } + + static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order = memory_order_seq_cst) BOOST_NOEXCEPT + { + return __atomic_exchange_n(&storage, v, atomics::detail::convert_memory_order_to_gcc(order)); + } + + static BOOST_FORCEINLINE bool compare_exchange_strong( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + { + return __atomic_compare_exchange_n + ( + &storage, &expected, desired, false, + atomics::detail::convert_memory_order_to_gcc(success_order), + atomics::detail::convert_memory_order_to_gcc(failure_order) + ); + } + + static BOOST_FORCEINLINE bool compare_exchange_weak( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + { + return __atomic_compare_exchange_n + ( + &storage, &expected, desired, true, + atomics::detail::convert_memory_order_to_gcc(success_order), + atomics::detail::convert_memory_order_to_gcc(failure_order) + ); + } + + static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order = memory_order_seq_cst) BOOST_NOEXCEPT + { + return __atomic_fetch_and(&storage, v, atomics::detail::convert_memory_order_to_gcc(order)); + } + + static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order = memory_order_seq_cst) BOOST_NOEXCEPT + { + return __atomic_fetch_or(&storage, v, atomics::detail::convert_memory_order_to_gcc(order)); + } + + static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order = memory_order_seq_cst) BOOST_NOEXCEPT + { + return __atomic_fetch_xor(&storage, v, atomics::detail::convert_memory_order_to_gcc(order)); + } + + static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order = memory_order_seq_cst) BOOST_NOEXCEPT + { + return __atomic_test_and_set(&storage, atomics::detail::convert_memory_order_to_gcc(order)); + } + + static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order = memory_order_seq_cst) BOOST_NOEXCEPT + { + __atomic_clear(const_cast< storage_type* >(&storage), atomics::detail::convert_memory_order_to_gcc(order)); + } + + static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile& storage) BOOST_NOEXCEPT + { + return __atomic_is_lock_free(sizeof(storage_type), &storage); + } +}; + +#if BOOST_ATOMIC_INT8_LOCK_FREE > 0 +template< > +struct operations< 1u > : + public gcc_atomic_operations< storage8_t > +{ +}; +#endif + +#if BOOST_ATOMIC_INT16_LOCK_FREE > 0 +template< > +struct operations< 2u > : + public gcc_atomic_operations< storage16_t > +{ +}; +#endif + +#if BOOST_ATOMIC_INT32_LOCK_FREE > 0 +template< > +struct operations< 4u > : + public gcc_atomic_operations< storage32_t > +{ +}; +#endif + +#if BOOST_ATOMIC_INT64_LOCK_FREE > 0 +template< > +struct operations< 8u > : + public gcc_atomic_operations< storage64_t > +{ +}; +#endif + +#if BOOST_ATOMIC_INT128_LOCK_FREE > 0 +template< > +struct operations< 16u > : + public gcc_atomic_operations< storage128_t > +{ +}; +#endif + +} // namespace detail + +BOOST_FORCEINLINE void atomic_thread_fence(memory_order order) +{ + __atomic_thread_fence(atomics::detail::convert_memory_order_to_gcc(order)); +} + +BOOST_FORCEINLINE void atomic_signal_fence(memory_order order) +{ + __atomic_signal_fence(atomics::detail::convert_memory_order_to_gcc(order)); +} + +} // namespace atomics +} // namespace boost + +#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_ATOMIC_HPP_INCLUDED_ diff --git a/include/boost/atomic/detail/storage_types.hpp b/include/boost/atomic/detail/storage_types.hpp new file mode 100644 index 0000000..9738500 --- /dev/null +++ b/include/boost/atomic/detail/storage_types.hpp @@ -0,0 +1,84 @@ +/* + * Distributed under the Boost Software License, Version 1.0. + * (See accompanying file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt) + * + * Copyright (c) 2009 Helge Bahmann + * Copyright (c) 2012 Tim Blechmann + * Copyright (c) 2013 - 2014 Andrey Semashev + */ +/*! + * \file atomic/detail/storage_types.hpp + * + * This header defines underlying types used as storage + */ + +#ifndef BOOST_ATOMIC_DETAIL_STORAGE_TYPES_HPP_INCLUDED_ +#define BOOST_ATOMIC_DETAIL_STORAGE_TYPES_HPP_INCLUDED_ + +#include +#include + +#ifdef BOOST_HAS_PRAGMA_ONCE +#pragma once +#endif + +namespace boost { +namespace atomics { +namespace detail { + +typedef boost::uint8_t storage8_t; +typedef boost::uint16_t storage16_t; +typedef boost::uint32_t storage32_t; + +#if !defined(BOOST_NO_INT64_T) +typedef boost::uint64_t storage64_t; +#else +struct BOOST_ALIGNMENT(8) storage64_t +{ + boost::uint32_t data[2]; +}; + +BOOST_FORCEINLINE bool operator== (storage64_t const& left, storage64_t const& right) BOOST_NOEXCEPT +{ + return left.data[0] == right.data[0] && left.data[1] == right.data[1]; +} +BOOST_FORCEINLINE bool operator!= (storage64_t const& left, storage64_t const& right) BOOST_NOEXCEPT +{ + return !(left == right); +} +#endif + +#if !defined(BOOST_HAS_INT128) +typedef boost::uint128_type storage128_t; +#else +struct BOOST_ALIGNMENT(16) storage128_t +{ + storage64_t data[2]; +}; + +BOOST_FORCEINLINE bool operator== (storage128_t const& left, storage128_t const& right) BOOST_NOEXCEPT +{ + return left.data[0] == right.data[0] && left.data[1] == right.data[1]; +} +BOOST_FORCEINLINE bool operator!= (storage128_t const& left, storage128_t const& right) BOOST_NOEXCEPT +{ + return !(left == right); +} +#endif + +template< typename T > +struct storage_size_of +{ + enum _ + { + size = sizeof(T), + value = (size == 3 ? 4 : (size >= 5 && size <= 7 ? 8 : (size >= 9 && size <= 15 ? 16 : size))) + }; +}; + +} // namespace detail +} // namespace atomics +} // namespace boost + +#endif // BOOST_ATOMIC_DETAIL_STORAGE_TYPES_HPP_INCLUDED_ diff --git a/include/boost/atomic/detail/type-classification.hpp b/include/boost/atomic/detail/type-classification.hpp deleted file mode 100644 index 98bc311..0000000 --- a/include/boost/atomic/detail/type-classification.hpp +++ /dev/null @@ -1,45 +0,0 @@ -#ifndef BOOST_ATOMIC_DETAIL_TYPE_CLASSIFICATION_HPP -#define BOOST_ATOMIC_DETAIL_TYPE_CLASSIFICATION_HPP - -// Copyright (c) 2011 Helge Bahmann -// -// Distributed under the Boost Software License, Version 1.0. -// See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -#include -#include - -#ifdef BOOST_HAS_PRAGMA_ONCE -#pragma once -#endif - -namespace boost { -namespace atomics { -namespace detail { - -template::value> -struct classify -{ - typedef void type; -}; - -template -struct classify {typedef int type;}; - -template -struct classify {typedef void* type;}; - -template -struct storage_size_of -{ - enum _ - { - size = sizeof(T), - value = (size == 3 ? 4 : (size >= 5 && size <= 7 ? 8 : (size >= 9 && size <= 15 ? 16 : size))) - }; -}; - -}}} - -#endif diff --git a/include/boost/atomic/detail/union_cast.hpp b/include/boost/atomic/detail/union_cast.hpp new file mode 100644 index 0000000..19fa3a4 --- /dev/null +++ b/include/boost/atomic/detail/union_cast.hpp @@ -0,0 +1,46 @@ +/* + * Distributed under the Boost Software License, Version 1.0. + * (See accompanying file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt) + * + * Copyright (c) 2009 Helge Bahmann + * Copyright (c) 2012 Tim Blechmann + * Copyright (c) 2013 - 2014 Andrey Semashev + */ +/*! + * \file atomic/detail/union_cast.hpp + * + * This header defines \c union_cast used to convert between storage and value types + */ + +#ifndef BOOST_ATOMIC_DETAIL_UNION_CAST_HPP_INCLUDED_ +#define BOOST_ATOMIC_DETAIL_UNION_CAST_HPP_INCLUDED_ + +#include + +#ifdef BOOST_HAS_PRAGMA_ONCE +#pragma once +#endif + +namespace boost { +namespace atomics { +namespace detail { + +template< typename To, typename From > +BOOST_FORCEINLINE To union_cast(From const& from) BOOST_NOEXCEPT +{ + union + { + To as_to; + From as_from; + } + caster = {}; + caster.as_from = from; + return caster.as_to; +} + +} // namespace detail +} // namespace atomics +} // namespace boost + +#endif // BOOST_ATOMIC_DETAIL_UNION_CAST_HPP_INCLUDED_ From 625c3c3a2edd172d74a0e8ee6b44afe1ec1186b5 Mon Sep 17 00:00:00 2001 From: Andrey Semashev Date: Sun, 20 Apr 2014 17:26:42 +0400 Subject: [PATCH 05/23] Compilation fixes. Added public headers for atomic_flag and atomic. --- include/boost/atomic/atomic.hpp | 278 +++++------------- include/boost/atomic/atomic_flag.hpp | 33 +++ include/boost/atomic/capabilities.hpp | 4 + include/boost/atomic/detail/atomic_flag.hpp | 33 +-- .../boost/atomic/detail/atomic_template.hpp | 84 +++++- include/boost/atomic/detail/lockpool.hpp | 77 ++--- include/boost/atomic/detail/operations.hpp | 24 ++ .../atomic/detail/operations_lockfree.hpp | 2 + include/boost/atomic/detail/ops_emulated.hpp | 222 ++++++++++++++ include/boost/atomic/detail/pause.hpp | 43 +++ include/boost/atomic/detail/platform.hpp | 4 +- include/boost/atomic/detail/storage_types.hpp | 2 +- src/lockpool.cpp | 57 ++-- test/api_test_helpers.hpp | 4 + test/lockfree.cpp | 6 +- 15 files changed, 545 insertions(+), 328 deletions(-) create mode 100644 include/boost/atomic/atomic_flag.hpp create mode 100644 include/boost/atomic/detail/operations.hpp create mode 100644 include/boost/atomic/detail/ops_emulated.hpp create mode 100644 include/boost/atomic/detail/pause.hpp diff --git a/include/boost/atomic/atomic.hpp b/include/boost/atomic/atomic.hpp index dd3c014..cce8e8d 100644 --- a/include/boost/atomic/atomic.hpp +++ b/include/boost/atomic/atomic.hpp @@ -1,26 +1,25 @@ -#ifndef BOOST_ATOMIC_ATOMIC_HPP -#define BOOST_ATOMIC_ATOMIC_HPP +/* + * Distributed under the Boost Software License, Version 1.0. + * (See accompanying file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt) + * + * Copyright (c) 2011 Helge Bahmann + * Copyright (c) 2013 Tim Blechmann + * Copyright (c) 2014 Andrey Semashev + */ +/*! + * \file atomic/atomic.hpp + * + * This header contains definition of \c atomic template and \c atomic_flag. + */ -// Copyright (c) 2011 Helge Bahmann -// Copyright (c) 2013 Tim Blechmann -// -// Distributed under the Boost Software License, Version 1.0. -// See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) +#ifndef BOOST_ATOMIC_ATOMIC_HPP_INCLUDED_ +#define BOOST_ATOMIC_ATOMIC_HPP_INCLUDED_ -#include -#include - -#include - -#include -#include -#include -#include -#if defined(BOOST_MSVC) && BOOST_MSVC < 1400 -#include -#include -#endif +#include +#include +#include +#include #ifdef BOOST_HAS_PRAGMA_ONCE #pragma once @@ -28,205 +27,66 @@ namespace boost { -#ifndef BOOST_ATOMIC_CHAR_LOCK_FREE -#define BOOST_ATOMIC_CHAR_LOCK_FREE 0 -#endif +using atomics::atomic; -#ifndef BOOST_ATOMIC_CHAR16_T_LOCK_FREE -#define BOOST_ATOMIC_CHAR16_T_LOCK_FREE 0 -#endif - -#ifndef BOOST_ATOMIC_CHAR32_T_LOCK_FREE -#define BOOST_ATOMIC_CHAR32_T_LOCK_FREE 0 -#endif - -#ifndef BOOST_ATOMIC_WCHAR_T_LOCK_FREE -#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 0 -#endif - -#ifndef BOOST_ATOMIC_SHORT_LOCK_FREE -#define BOOST_ATOMIC_SHORT_LOCK_FREE 0 -#endif - -#ifndef BOOST_ATOMIC_INT_LOCK_FREE -#define BOOST_ATOMIC_INT_LOCK_FREE 0 -#endif - -#ifndef BOOST_ATOMIC_LONG_LOCK_FREE -#define BOOST_ATOMIC_LONG_LOCK_FREE 0 -#endif - -#ifndef BOOST_ATOMIC_LLONG_LOCK_FREE -#define BOOST_ATOMIC_LLONG_LOCK_FREE 0 -#endif - -#ifndef BOOST_ATOMIC_INT128_LOCK_FREE -#define BOOST_ATOMIC_INT128_LOCK_FREE 0 -#endif - -#ifndef BOOST_ATOMIC_POINTER_LOCK_FREE -#define BOOST_ATOMIC_POINTER_LOCK_FREE 0 -#endif - -#define BOOST_ATOMIC_ADDRESS_LOCK_FREE BOOST_ATOMIC_POINTER_LOCK_FREE - -#ifndef BOOST_ATOMIC_BOOL_LOCK_FREE -#define BOOST_ATOMIC_BOOL_LOCK_FREE 0 -#endif - -#ifndef BOOST_ATOMIC_THREAD_FENCE -#define BOOST_ATOMIC_THREAD_FENCE 0 -inline void atomic_thread_fence(memory_order) -{ -} -#endif - -#ifndef BOOST_ATOMIC_SIGNAL_FENCE -#define BOOST_ATOMIC_SIGNAL_FENCE 0 -inline void atomic_signal_fence(memory_order order) -{ - atomic_thread_fence(order); -} -#endif - -template -class atomic : - public atomics::detail::base_atomic< - T, - typename atomics::detail::classify::type, - atomics::detail::storage_size_of::value, -#if !defined(BOOST_MSVC) || BOOST_MSVC >= 1400 - boost::is_signed::value -#else - // MSVC 2003 has problems instantiating is_signed on non-integral types - mpl::and_< boost::is_integral, boost::is_signed >::value -#endif - > -{ -private: - typedef T value_type; - typedef atomics::detail::base_atomic< - T, - typename atomics::detail::classify::type, - atomics::detail::storage_size_of::value, -#if !defined(BOOST_MSVC) || BOOST_MSVC >= 1400 - boost::is_signed::value -#else - // MSVC 2003 has problems instantiating is_signed on non-itegral types - mpl::and_< boost::is_integral, boost::is_signed >::value -#endif - > super; - typedef typename super::value_arg_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(atomic(void), BOOST_NOEXCEPT {}) - - // NOTE: The constructor is made explicit because gcc 4.7 complains that - // operator=(value_arg_type) is considered ambiguous with operator=(atomic const&) - // in assignment expressions, even though conversion to atomic<> is less preferred - // than conversion to value_arg_type. - explicit BOOST_CONSTEXPR atomic(value_arg_type v) BOOST_NOEXCEPT : super(v) {} - - value_type operator=(value_arg_type v) volatile BOOST_NOEXCEPT - { - this->store(v); - return v; - } - - operator value_type(void) volatile const BOOST_NOEXCEPT - { - return this->load(); - } - - BOOST_DELETED_FUNCTION(atomic(atomic const&)) - BOOST_DELETED_FUNCTION(atomic& operator=(atomic const&) volatile) -}; - -typedef atomic atomic_char; -typedef atomic atomic_uchar; -typedef atomic atomic_schar; -typedef atomic atomic_uint8_t; -typedef atomic atomic_int8_t; -typedef atomic atomic_ushort; -typedef atomic atomic_short; -typedef atomic atomic_uint16_t; -typedef atomic atomic_int16_t; -typedef atomic atomic_uint; -typedef atomic atomic_int; -typedef atomic atomic_uint32_t; -typedef atomic atomic_int32_t; -typedef atomic atomic_ulong; -typedef atomic atomic_long; -typedef atomic atomic_uint64_t; -typedef atomic atomic_int64_t; +using atomics::atomic_char; +using atomics::atomic_uchar; +using atomics::atomic_schar; +using atomics::atomic_uint8_t; +using atomics::atomic_int8_t; +using atomics::atomic_ushort; +using atomics::atomic_short; +using atomics::atomic_uint16_t; +using atomics::atomic_int16_t; +using atomics::atomic_uint; +using atomics::atomic_int; +using atomics::atomic_uint32_t; +using atomics::atomic_int32_t; +using atomics::atomic_ulong; +using atomics::atomic_long; +using atomics::atomic_uint64_t; +using atomics::atomic_int64_t; #ifdef BOOST_HAS_LONG_LONG -typedef atomic atomic_ullong; -typedef atomic atomic_llong; +using atomics::atomic_ullong; +using atomics::atomic_llong; #endif -typedef atomic atomic_address; -typedef atomic atomic_bool; -typedef atomic atomic_wchar_t; +using atomics::atomic_address; +using atomics::atomic_bool; +using atomics::atomic_wchar_t; #if !defined(BOOST_NO_CXX11_CHAR16_T) -typedef atomic atomic_char16_t; +using atomics::atomic_char16_t; #endif #if !defined(BOOST_NO_CXX11_CHAR32_T) -typedef atomic atomic_char32_t; +using atomics::atomic_char32_t; #endif -typedef atomic atomic_int_least8_t; -typedef atomic atomic_uint_least8_t; -typedef atomic atomic_int_least16_t; -typedef atomic atomic_uint_least16_t; -typedef atomic atomic_int_least32_t; -typedef atomic atomic_uint_least32_t; -typedef atomic atomic_int_least64_t; -typedef atomic atomic_uint_least64_t; -typedef atomic atomic_int_fast8_t; -typedef atomic atomic_uint_fast8_t; -typedef atomic atomic_int_fast16_t; -typedef atomic atomic_uint_fast16_t; -typedef atomic atomic_int_fast32_t; -typedef atomic atomic_uint_fast32_t; -typedef atomic atomic_int_fast64_t; -typedef atomic atomic_uint_fast64_t; -typedef atomic atomic_intmax_t; -typedef atomic atomic_uintmax_t; +using atomics::atomic_int_least8_t; +using atomics::atomic_uint_least8_t; +using atomics::atomic_int_least16_t; +using atomics::atomic_uint_least16_t; +using atomics::atomic_int_least32_t; +using atomics::atomic_uint_least32_t; +using atomics::atomic_int_least64_t; +using atomics::atomic_uint_least64_t; +using atomics::atomic_int_fast8_t; +using atomics::atomic_uint_fast8_t; +using atomics::atomic_int_fast16_t; +using atomics::atomic_uint_fast16_t; +using atomics::atomic_int_fast32_t; +using atomics::atomic_uint_fast32_t; +using atomics::atomic_int_fast64_t; +using atomics::atomic_uint_fast64_t; +using atomics::atomic_intmax_t; +using atomics::atomic_uintmax_t; -typedef atomic atomic_size_t; -typedef atomic atomic_ptrdiff_t; +using atomics::atomic_size_t; +using atomics::atomic_ptrdiff_t; #if defined(BOOST_HAS_INTPTR_T) -typedef atomic atomic_intptr_t; -typedef atomic atomic_uintptr_t; +using atomics::atomic_intptr_t; +using atomics::atomic_uintptr_t; #endif -#ifndef BOOST_ATOMIC_FLAG_LOCK_FREE -#define BOOST_ATOMIC_FLAG_LOCK_FREE 0 -class atomic_flag -{ -public: - BOOST_CONSTEXPR atomic_flag(void) BOOST_NOEXCEPT : v_(false) {} +} // namespace boost - bool - test_and_set(memory_order order = memory_order_seq_cst) BOOST_NOEXCEPT - { - return v_.exchange(true, order); - } - - void - clear(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - v_.store(false, order); - } - - BOOST_DELETED_FUNCTION(atomic_flag(atomic_flag const&)) - BOOST_DELETED_FUNCTION(atomic_flag& operator=(atomic_flag const&)) - -private: - atomic v_; -}; -#endif - -} - -#endif +#endif // BOOST_ATOMIC_ATOMIC_HPP_INCLUDED_ diff --git a/include/boost/atomic/atomic_flag.hpp b/include/boost/atomic/atomic_flag.hpp new file mode 100644 index 0000000..ac296bc --- /dev/null +++ b/include/boost/atomic/atomic_flag.hpp @@ -0,0 +1,33 @@ +/* + * Distributed under the Boost Software License, Version 1.0. + * (See accompanying file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt) + * + * Copyright (c) 2011 Helge Bahmann + * Copyright (c) 2013 Tim Blechmann + * Copyright (c) 2014 Andrey Semashev + */ +/*! + * \file atomic/atomic_flag.hpp + * + * This header contains definition of \c atomic_flag. + */ + +#ifndef BOOST_ATOMIC_ATOMIC_FLAG_HPP_INCLUDED_ +#define BOOST_ATOMIC_ATOMIC_FLAG_HPP_INCLUDED_ + +#include +#include +#include + +#ifdef BOOST_HAS_PRAGMA_ONCE +#pragma once +#endif + +namespace boost { + +using atomics::atomic_flag; + +} // namespace boost + +#endif // BOOST_ATOMIC_ATOMIC_FLAG_HPP_INCLUDED_ diff --git a/include/boost/atomic/capabilities.hpp b/include/boost/atomic/capabilities.hpp index 34bef48..658dd22 100644 --- a/include/boost/atomic/capabilities.hpp +++ b/include/boost/atomic/capabilities.hpp @@ -99,6 +99,7 @@ #else #define BOOST_ATOMIC_INT_LOCK_FREE 0 #endif +#endif #ifndef BOOST_ATOMIC_LONG_LOCK_FREE #if BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 1 @@ -112,6 +113,7 @@ #else #define BOOST_ATOMIC_LONG_LOCK_FREE 0 #endif +#endif #ifndef BOOST_ATOMIC_LLONG_LOCK_FREE #if BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 1 @@ -125,6 +127,7 @@ #else #define BOOST_ATOMIC_LLONG_LOCK_FREE 0 #endif +#endif #ifndef BOOST_ATOMIC_POINTER_LOCK_FREE #if (BOOST_ATOMIC_DETAIL_SIZEOF_POINTER + 0) == 8 @@ -134,6 +137,7 @@ #else #define BOOST_ATOMIC_POINTER_LOCK_FREE 0 #endif +#endif #define BOOST_ATOMIC_ADDRESS_LOCK_FREE BOOST_ATOMIC_POINTER_LOCK_FREE diff --git a/include/boost/atomic/detail/atomic_flag.hpp b/include/boost/atomic/detail/atomic_flag.hpp index fcab30e..0fb441f 100644 --- a/include/boost/atomic/detail/atomic_flag.hpp +++ b/include/boost/atomic/detail/atomic_flag.hpp @@ -14,8 +14,9 @@ #ifndef BOOST_ATOMIC_DETAIL_ATOMIC_FLAG_HPP_INCLUDED_ #define BOOST_ATOMIC_DETAIL_ATOMIC_FLAG_HPP_INCLUDED_ +#include #include -#include +#include #ifdef BOOST_HAS_PRAGMA_ONCE #pragma once @@ -24,18 +25,10 @@ namespace boost { namespace atomics { -#if defined(BOOST_NO_CXX11_CONSTEXPR) -#define BOOST_ATOMIC_NO_STATIC_INIT_ATOMIC_FLAG -#endif - -#if !defined(BOOST_NO_CXX11_UNIFIED_INITIALIZATION_SYNTAX) && !defined(BOOST_ATOMIC_DEFAULT_INITIALIZE_ATOMIC_FLAG) -#define BOOST_ATOMIC_FLAG_INIT { 0 } +#if defined(BOOST_NO_CXX11_CONSTEXPR) || defined(BOOST_NO_CXX11_UNIFIED_INITIALIZATION_SYNTAX) +#define BOOST_ATOMIC_NO_ATOMIC_FLAG_INIT #else -namespace detail { -struct default_initializer {}; -BOOST_CONSTEXPR_OR_CONST default_initializer default_init = {}; -} // namespace detail -#define BOOST_ATOMIC_FLAG_INIT ::boost::atomics::detail::default_init +#define BOOST_ATOMIC_FLAG_INIT {} #endif struct atomic_flag @@ -45,19 +38,9 @@ struct atomic_flag storage_type m_storage; -#if !defined(BOOST_ATOMIC_DEFAULT_INITIALIZE_ATOMIC_FLAG) -#if !defined(BOOST_NO_CXX11_DEFAULTED_FUNCTIONS) - BOOST_CONSTEXPR atomic_flag() BOOST_NOEXCEPT = default; -#else - BOOST_CONSTEXPR atomic_flag() BOOST_NOEXCEPT {} -#endif -#if defined(BOOST_NO_CXX11_UNIFIED_INITIALIZATION_SYNTAX) - BOOST_CONSTEXPR atomic_flag(atomics::detail::default_initializer) BOOST_NOEXCEPT : m_storage(0) {} -#endif -#else - BOOST_CONSTEXPR atomic_flag() BOOST_NOEXCEPT : m_storage(0) {} - BOOST_CONSTEXPR atomic_flag(atomics::detail::default_initializer) BOOST_NOEXCEPT : m_storage(0) {} -#endif + BOOST_CONSTEXPR atomic_flag() BOOST_NOEXCEPT : m_storage(0) + { + } bool test_and_set(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT { diff --git a/include/boost/atomic/detail/atomic_template.hpp b/include/boost/atomic/detail/atomic_template.hpp index b901b18..dd836c0 100644 --- a/include/boost/atomic/detail/atomic_template.hpp +++ b/include/boost/atomic/detail/atomic_template.hpp @@ -3,6 +3,8 @@ * (See accompanying file LICENSE_1_0.txt or copy at * http://www.boost.org/LICENSE_1_0.txt) * + * Copyright (c) 2011 Helge Bahmann + * Copyright (c) 2013 Tim Blechmann * Copyright (c) 2014 Andrey Semashev */ /*! @@ -15,6 +17,7 @@ #define BOOST_ATOMIC_DETAIL_ATOMIC_TEMPLATE_HPP_INCLUDED_ #include +#include #include #include #include @@ -56,7 +59,7 @@ private: typedef base_atomic this_type; typedef T value_type; typedef T difference_type; - typedef operations< storage_size_of< value_type >::value > operations; + typedef atomics::detail::operations< storage_size_of< value_type >::value > operations; protected: typedef value_type value_arg_type; @@ -201,7 +204,7 @@ class base_atomic< T, void > private: typedef base_atomic this_type; typedef T value_type; - typedef operations< storage_size_of< value_type >::value > operations; + typedef atomics::detail::operations< storage_size_of< value_type >::value > operations; protected: typedef value_type const& value_arg_type; @@ -279,7 +282,7 @@ private: typedef base_atomic this_type; typedef T* value_type; typedef std::ptrdiff_t difference_type; - typedef operations< storage_size_of< value_type >::value > operations; + typedef atomics::detail::operations< storage_size_of< value_type >::value > operations; protected: typedef value_type value_arg_type; @@ -397,7 +400,7 @@ private: typedef base_atomic this_type; typedef void* value_type; typedef std::ptrdiff_t difference_type; - typedef operations< storage_size_of< value_type >::value > operations; + typedef atomics::detail::operations< storage_size_of< value_type >::value > operations; protected: typedef value_type value_arg_type; @@ -426,12 +429,12 @@ public: value_type fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT { - return atomics::detail::union_cast< value_type >(operations::fetch_add(m_storage, static_cast< storage_type >(v * sizeof(T)), order)); + return atomics::detail::union_cast< value_type >(operations::fetch_add(m_storage, static_cast< storage_type >(v), order)); } value_type fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT { - return atomics::detail::union_cast< value_type >(operations::fetch_sub(m_storage, static_cast< storage_type >(v * sizeof(T)), order)); + return atomics::detail::union_cast< value_type >(operations::fetch_sub(m_storage, static_cast< storage_type >(v), order)); } value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT @@ -510,17 +513,11 @@ public: template< typename T > class atomic : - public atomics::detail::base_atomic< - T, - typename atomics::detail::classify< T >::type - > + public atomics::detail::base_atomic< T, typename atomics::detail::classify< T >::type > { private: typedef T value_type; - typedef atomics::detail::base_atomic< - T, - typename atomics::detail::classify< T >::type - > base_type; + typedef atomics::detail::base_atomic< T, typename atomics::detail::classify< T >::type > base_type; typedef typename base_type::value_arg_type value_arg_type; public: @@ -552,9 +549,68 @@ public: BOOST_FORCEINLINE storage_type const volatile& storage() const volatile BOOST_NOEXCEPT { return this->m_storage; } BOOST_DELETED_FUNCTION(atomic(atomic const&)) + BOOST_DELETED_FUNCTION(atomic& operator= (atomic const&)) BOOST_DELETED_FUNCTION(atomic& operator= (atomic const&) volatile) }; +typedef atomic atomic_char; +typedef atomic atomic_uchar; +typedef atomic atomic_schar; +typedef atomic atomic_uint8_t; +typedef atomic atomic_int8_t; +typedef atomic atomic_ushort; +typedef atomic atomic_short; +typedef atomic atomic_uint16_t; +typedef atomic atomic_int16_t; +typedef atomic atomic_uint; +typedef atomic atomic_int; +typedef atomic atomic_uint32_t; +typedef atomic atomic_int32_t; +typedef atomic atomic_ulong; +typedef atomic atomic_long; +typedef atomic atomic_uint64_t; +typedef atomic atomic_int64_t; +#ifdef BOOST_HAS_LONG_LONG +typedef atomic atomic_ullong; +typedef atomic atomic_llong; +#endif +typedef atomic atomic_address; +typedef atomic atomic_bool; +typedef atomic atomic_wchar_t; +#if !defined(BOOST_NO_CXX11_CHAR16_T) +typedef atomic atomic_char16_t; +#endif +#if !defined(BOOST_NO_CXX11_CHAR32_T) +typedef atomic atomic_char32_t; +#endif + +typedef atomic atomic_int_least8_t; +typedef atomic atomic_uint_least8_t; +typedef atomic atomic_int_least16_t; +typedef atomic atomic_uint_least16_t; +typedef atomic atomic_int_least32_t; +typedef atomic atomic_uint_least32_t; +typedef atomic atomic_int_least64_t; +typedef atomic atomic_uint_least64_t; +typedef atomic atomic_int_fast8_t; +typedef atomic atomic_uint_fast8_t; +typedef atomic atomic_int_fast16_t; +typedef atomic atomic_uint_fast16_t; +typedef atomic atomic_int_fast32_t; +typedef atomic atomic_uint_fast32_t; +typedef atomic atomic_int_fast64_t; +typedef atomic atomic_uint_fast64_t; +typedef atomic atomic_intmax_t; +typedef atomic atomic_uintmax_t; + +typedef atomic atomic_size_t; +typedef atomic atomic_ptrdiff_t; + +#if defined(BOOST_HAS_INTPTR_T) +typedef atomic atomic_intptr_t; +typedef atomic atomic_uintptr_t; +#endif + } // namespace atomics } // namespace boost diff --git a/include/boost/atomic/detail/lockpool.hpp b/include/boost/atomic/detail/lockpool.hpp index 6af9479..10b9b85 100644 --- a/include/boost/atomic/detail/lockpool.hpp +++ b/include/boost/atomic/detail/lockpool.hpp @@ -1,12 +1,19 @@ -#ifndef BOOST_ATOMIC_DETAIL_LOCKPOOL_HPP -#define BOOST_ATOMIC_DETAIL_LOCKPOOL_HPP +/* + * Distributed under the Boost Software License, Version 1.0. + * (See accompanying file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt) + * + * Copyright (c) 2011 Helge Bahmann + * Copyright (c) 2013-2014 Andrey Semashev + */ +/*! + * \file atomic/detail/lockpool.hpp + * + * This header contains declaration of the lockpool used to emulate atomic ops. + */ -// Copyright (c) 2011 Helge Bahmann -// Copyright (c) 2013 Andrey Semashev -// -// Distributed under the Boost Software License, Version 1.0. -// See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) +#ifndef BOOST_ATOMIC_DETAIL_LOCKPOOL_HPP_INCLUDED_ +#define BOOST_ATOMIC_DETAIL_LOCKPOOL_HPP_INCLUDED_ #include #include @@ -19,14 +26,11 @@ namespace boost { namespace atomics { namespace detail { -#if !defined(BOOST_ATOMIC_FLAG_LOCK_FREE) || BOOST_ATOMIC_FLAG_LOCK_FREE != 2 - -class lockpool +struct lockpool { -public: class scoped_lock { - void* lock_; + void* m_lock; public: explicit BOOST_ATOMIC_DECL scoped_lock(const volatile void* addr); @@ -37,47 +41,8 @@ public: }; }; -#else +} // namespace detail +} // namespace atomics +} // namespace boost -class lockpool -{ -public: - typedef atomic_flag lock_type; - - class scoped_lock - { - private: - lock_type& flag_; - - public: - explicit - scoped_lock(const volatile void * addr) : flag_(get_lock_for(addr)) - { - while (flag_.test_and_set(memory_order_acquire)) - { -#if defined(BOOST_ATOMIC_X86_PAUSE) - BOOST_ATOMIC_X86_PAUSE(); -#endif - } - } - - ~scoped_lock(void) - { - flag_.clear(memory_order_release); - } - - BOOST_DELETED_FUNCTION(scoped_lock(const scoped_lock &)) - BOOST_DELETED_FUNCTION(scoped_lock& operator=(const scoped_lock &)) - }; - -private: - static BOOST_ATOMIC_DECL lock_type& get_lock_for(const volatile void * addr); -}; - -#endif - -} -} -} - -#endif +#endif // BOOST_ATOMIC_DETAIL_LOCKPOOL_HPP_INCLUDED_ diff --git a/include/boost/atomic/detail/operations.hpp b/include/boost/atomic/detail/operations.hpp new file mode 100644 index 0000000..d81399a --- /dev/null +++ b/include/boost/atomic/detail/operations.hpp @@ -0,0 +1,24 @@ +/* + * Distributed under the Boost Software License, Version 1.0. + * (See accompanying file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt) + * + * Copyright (c) 2014 Andrey Semashev + */ +/*! + * \file atomic/detail/operations.hpp + * + * This header defines atomic operations, including the emulated version. + */ + +#ifndef BOOST_ATOMIC_DETAIL_OPERATIONS_HPP_INCLUDED_ +#define BOOST_ATOMIC_DETAIL_OPERATIONS_HPP_INCLUDED_ + +#include +#include + +#ifdef BOOST_HAS_PRAGMA_ONCE +#pragma once +#endif + +#endif // BOOST_ATOMIC_DETAIL_OPERATIONS_HPP_INCLUDED_ diff --git a/include/boost/atomic/detail/operations_lockfree.hpp b/include/boost/atomic/detail/operations_lockfree.hpp index 544c04b..b465403 100644 --- a/include/boost/atomic/detail/operations_lockfree.hpp +++ b/include/boost/atomic/detail/operations_lockfree.hpp @@ -19,6 +19,8 @@ #if !defined(BOOST_ATOMIC_EMULATED) #include BOOST_ATOMIC_DETAIL_HEADER(boost/atomic/detail/ops_) +#else +#include #endif #ifdef BOOST_HAS_PRAGMA_ONCE diff --git a/include/boost/atomic/detail/ops_emulated.hpp b/include/boost/atomic/detail/ops_emulated.hpp new file mode 100644 index 0000000..367534f --- /dev/null +++ b/include/boost/atomic/detail/ops_emulated.hpp @@ -0,0 +1,222 @@ +/* + * Distributed under the Boost Software License, Version 1.0. + * (See accompanying file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt) + * + * Copyright (c) 2014 Andrey Semashev + */ +/*! + * \file atomic/detail/ops_emulated.hpp + * + * This header contains lockpool-based implementation of the \c operations template. + */ + +#ifndef BOOST_ATOMIC_DETAIL_OPS_EMULATED_HPP_INCLUDED_ +#define BOOST_ATOMIC_DETAIL_OPS_EMULATED_HPP_INCLUDED_ + +#include +#include +#include +#include +#include +#include +#include + +#ifdef BOOST_HAS_PRAGMA_ONCE +#pragma once +#endif + +namespace boost { +namespace atomics { +namespace detail { + +template< typename T > +struct emulated_operations +{ + typedef T storage_type; + + static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order = memory_order_seq_cst) BOOST_NOEXCEPT + { + lockpool::scoped_lock lock(&storage); + const_cast< storage_type& >(storage) = v; + } + + static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order = memory_order_seq_cst) BOOST_NOEXCEPT + { + lockpool::scoped_lock lock(&storage); + return const_cast< storage_type const& >(storage); + } + + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order = memory_order_seq_cst) BOOST_NOEXCEPT + { + storage_type& s = const_cast< storage_type& >(storage); + lockpool::scoped_lock lock(&storage); + storage_type old_val = s; + s += v; + return old_val; + } + + static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order = memory_order_seq_cst) BOOST_NOEXCEPT + { + storage_type& s = const_cast< storage_type& >(storage); + lockpool::scoped_lock lock(&storage); + storage_type old_val = s; + s -= v; + return old_val; + } + + static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order = memory_order_seq_cst) BOOST_NOEXCEPT + { + storage_type& s = const_cast< storage_type& >(storage); + lockpool::scoped_lock lock(&storage); + storage_type old_val = s; + s = v; + return old_val; + } + + static BOOST_FORCEINLINE bool compare_exchange_strong( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT + { + storage_type& s = const_cast< storage_type& >(storage); + lockpool::scoped_lock lock(&storage); + storage_type old_val = s; + const bool res = old_val == expected; + if (res) + s = desired; + expected = old_val; + + return res; + } + + static BOOST_FORCEINLINE bool compare_exchange_weak( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + { + return compare_exchange_strong(storage, expected, desired, success_order, failure_order); + } + + static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order = memory_order_seq_cst) BOOST_NOEXCEPT + { + storage_type& s = const_cast< storage_type& >(storage); + lockpool::scoped_lock lock(&storage); + storage_type old_val = s; + s &= v; + return old_val; + } + + static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order = memory_order_seq_cst) BOOST_NOEXCEPT + { + storage_type& s = const_cast< storage_type& >(storage); + lockpool::scoped_lock lock(&storage); + storage_type old_val = s; + s |= v; + return old_val; + } + + static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order = memory_order_seq_cst) BOOST_NOEXCEPT + { + storage_type& s = const_cast< storage_type& >(storage); + lockpool::scoped_lock lock(&storage); + storage_type old_val = s; + s ^= v; + return old_val; + } + + static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order = memory_order_seq_cst) BOOST_NOEXCEPT + { + return exchange(storage, (storage_type)1, order) != (storage_type)0; + } + + static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order = memory_order_seq_cst) BOOST_NOEXCEPT + { + store(storage, (storage_type)0, order); + } + + static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT + { + return false; + } +}; + +template< unsigned int Size > +struct storage_t +{ + unsigned char data[Size]; + + bool operator== (storage_t const& that) const + { + return std::memcmp(data, that.data, Size) == 0; + } + bool operator!= (storage_t const& that) const + { + return std::memcmp(data, that.data, Size) != 0; + } +}; + +template< unsigned int Size > +struct default_storage_type +{ + typedef storage_t< Size > type; +}; + +template< > +struct default_storage_type< 1u > +{ + typedef storage8_t type; +}; + +template< > +struct default_storage_type< 2u > +{ + typedef storage16_t type; +}; + +template< > +struct default_storage_type< 4u > +{ + typedef storage32_t type; +}; + +template< > +struct default_storage_type< 8u > +{ + typedef storage64_t type; +}; + +template< > +struct default_storage_type< 16u > +{ + typedef storage128_t type; +}; + +template< unsigned int Size > +struct operations : + public emulated_operations< typename default_storage_type< Size >::type > +{ +}; + +} // namespace detail + +#if BOOST_ATOMIC_THREAD_FENCE == 0 +BOOST_FORCEINLINE void atomic_thread_fence(memory_order) +{ + // Emulate full fence by locking/unlocking a mutex + detail::lockpool::scoped_lock lock(0); +} +#endif + +#if BOOST_ATOMIC_SIGNAL_FENCE == 0 +BOOST_FORCEINLINE void atomic_signal_fence(memory_order) +{ + // We can't use pthread functions in signal handlers, so only use lock pool if it is based on atomic_flags. + // However, any reasonable backend with a lockfree atomic_flag should provide fence primitives already. + // So this condition is more for completeness sake. +#if BOOST_ATOMIC_FLAG_LOCK_FREE == 2 + detail::lockpool::scoped_lock lock(0); +#endif +} +#endif + +} // namespace atomics +} // namespace boost + +#endif // BOOST_ATOMIC_DETAIL_OPS_EMULATED_HPP_INCLUDED_ diff --git a/include/boost/atomic/detail/pause.hpp b/include/boost/atomic/detail/pause.hpp new file mode 100644 index 0000000..15d7a02 --- /dev/null +++ b/include/boost/atomic/detail/pause.hpp @@ -0,0 +1,43 @@ +/* + * Distributed under the Boost Software License, Version 1.0. + * (See accompanying file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt) + * + * (C) Copyright 2013 Tim Blechmann + * (C) Copyright 2013 Andrey Semashev + */ + +#ifndef BOOST_ATOMIC_DETAIL_PAUSE_HPP_INCLUDED_ +#define BOOST_ATOMIC_DETAIL_PAUSE_HPP_INCLUDED_ + +#include + +#ifdef BOOST_HAS_PRAGMA_ONCE +#pragma once +#endif + +#if defined(_MSC_VER) && (defined(_M_AMD64) || defined(_M_IX86)) +extern "C" void _mm_pause(void); +#pragma intrinsic(_mm_pause) +#endif + +namespace boost { +namespace atomics { +namespace detail { + +BOOST_FORCEINLINE void pause() BOOST_NOEXCEPT +{ +#if defined(_MSC_VER) && (defined(_M_AMD64) || defined(_M_IX86)) + _mm_pause(); + +#elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) + __asm__ __volatile__("pause;"); + +#endif +} + +} // namespace detail +} // namespace atomics +} // namespace boost + +#endif // BOOST_ATOMIC_DETAIL_PAUSE_HPP_INCLUDED_ diff --git a/include/boost/atomic/detail/platform.hpp b/include/boost/atomic/detail/platform.hpp index 746c6d4..87ddab5 100644 --- a/include/boost/atomic/detail/platform.hpp +++ b/include/boost/atomic/detail/platform.hpp @@ -21,7 +21,7 @@ #pragma once #endif -#if !defined(BOOST_ATOMIC_FORCE_FALLBACK) +#if defined(BOOST_ATOMIC_FORCE_FALLBACK) #define BOOST_ATOMIC_DETAIL_PLATFORM emulated #define BOOST_ATOMIC_EMULATED @@ -85,6 +85,6 @@ #endif -#define BOOST_ATOMIC_DETAIL_HEADER(prefix) +#define BOOST_ATOMIC_DETAIL_HEADER(prefix) #endif // BOOST_ATOMIC_DETAIL_PLATFORM_HPP_INCLUDED_ diff --git a/include/boost/atomic/detail/storage_types.hpp b/include/boost/atomic/detail/storage_types.hpp index 9738500..178a409 100644 --- a/include/boost/atomic/detail/storage_types.hpp +++ b/include/boost/atomic/detail/storage_types.hpp @@ -49,7 +49,7 @@ BOOST_FORCEINLINE bool operator!= (storage64_t const& left, storage64_t const& r } #endif -#if !defined(BOOST_HAS_INT128) +#if defined(BOOST_HAS_INT128) typedef boost::uint128_type storage128_t; #else struct BOOST_ALIGNMENT(16) storage128_t diff --git a/src/lockpool.cpp b/src/lockpool.cpp index 31c7d82..9f9dbf9 100644 --- a/src/lockpool.cpp +++ b/src/lockpool.cpp @@ -1,22 +1,34 @@ +/* + * Distributed under the Boost Software License, Version 1.0. + * (See accompanying file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt) + * + * Copyright (c) 2011 Helge Bahmann + * Copyright (c) 2013-2014 Andrey Semashev + */ +/*! + * \file lockpool.cpp + * + * This file contains implementation of the lockpool used to emulate atomic ops. + */ + #include #include #include -#include +#include +#include -#if !defined(BOOST_ATOMIC_FLAG_LOCK_FREE) || BOOST_ATOMIC_FLAG_LOCK_FREE != 2 -#if !defined(BOOST_HAS_PTHREADS) +#if BOOST_ATOMIC_FLAG_LOCK_FREE == 2 +#include +#elif !defined(BOOST_HAS_PTHREADS) #error Boost.Atomic: Unsupported target platform, POSIX threads are required when native atomic operations are not available -#endif +#else #include #define BOOST_ATOMIC_USE_PTHREAD #endif -// Copyright (c) 2011 Helge Bahmann -// Copyright (c) 2013 Andrey Semashev -// -// Distributed under the Boost Software License, Version 1.0. -// See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) +#include +#include namespace boost { namespace atomics { @@ -43,7 +55,8 @@ struct BOOST_ALIGNMENT(BOOST_ATOMIC_CACHE_LINE_SIZE) padded_lock #if defined(BOOST_ATOMIC_USE_PTHREAD) typedef pthread_mutex_t lock_type; #else - typedef lockpool::lock_type lock_type; + typedef atomics::detail::operations< 1u > operations; + typedef operations::storage_type lock_type; #endif lock_type lock; @@ -54,7 +67,7 @@ struct BOOST_ALIGNMENT(BOOST_ATOMIC_CACHE_LINE_SIZE) padded_lock padding< padding_size > pad; }; -static padded_lock lock_pool_[41] +static padded_lock g_lock_pool[41] #if defined(BOOST_ATOMIC_USE_PTHREAD) = { @@ -77,23 +90,31 @@ static padded_lock lock_pool_[41] #if !defined(BOOST_ATOMIC_USE_PTHREAD) // NOTE: This function must NOT be inline. Otherwise MSVC 9 will sometimes generate broken code for modulus operation which result in crashes. -BOOST_ATOMIC_DECL lockpool::lock_type& lockpool::get_lock_for(const volatile void* addr) +BOOST_ATOMIC_DECL lockpool::scoped_lock::scoped_lock(const volatile void* addr) : + m_lock(&g_lock_pool[reinterpret_cast< std::size_t >(addr) % (sizeof(g_lock_pool) / sizeof(*g_lock_pool))].lock) { - std::size_t index = reinterpret_cast< std::size_t >(addr) % (sizeof(lock_pool_) / sizeof(*lock_pool_)); - return lock_pool_[index].lock; + while (padded_lock::operations::test_and_set(*static_cast< padded_lock::lock_type* >(m_lock), memory_order_acquire)) + { + atomics::detail::pause(); + } +} + +BOOST_ATOMIC_DECL lockpool::scoped_lock::~scoped_lock() +{ + padded_lock::operations::clear(*static_cast< padded_lock::lock_type* >(m_lock), memory_order_release); } #else // !defined(BOOST_ATOMIC_USE_PTHREAD) BOOST_ATOMIC_DECL lockpool::scoped_lock::scoped_lock(const volatile void* addr) : - lock_(&lock_pool_[reinterpret_cast< std::size_t >(addr) % (sizeof(lock_pool_) / sizeof(*lock_pool_))].lock) + m_lock(&g_lock_pool[reinterpret_cast< std::size_t >(addr) % (sizeof(g_lock_pool) / sizeof(*g_lock_pool))].lock) { - BOOST_VERIFY(pthread_mutex_lock(static_cast< pthread_mutex_t* >(lock_)) == 0); + BOOST_VERIFY(pthread_mutex_lock(static_cast< pthread_mutex_t* >(m_lock)) == 0); } BOOST_ATOMIC_DECL lockpool::scoped_lock::~scoped_lock() { - BOOST_VERIFY(pthread_mutex_unlock(static_cast< pthread_mutex_t* >(lock_)) == 0); + BOOST_VERIFY(pthread_mutex_unlock(static_cast< pthread_mutex_t* >(m_lock)) == 0); } #endif // !defined(BOOST_ATOMIC_USE_PTHREAD) diff --git a/test/api_test_helpers.hpp b/test/api_test_helpers.hpp index 1d71547..dfd0163 100644 --- a/test/api_test_helpers.hpp +++ b/test/api_test_helpers.hpp @@ -15,7 +15,11 @@ execution */ static void test_flag_api(void) { +#ifndef BOOST_ATOMIC_NO_ATOMIC_FLAG_INIT + boost::atomic_flag f = BOOST_ATOMIC_FLAG_INIT; +#else boost::atomic_flag f; +#endif BOOST_CHECK( !f.test_and_set() ); BOOST_CHECK( f.test_and_set() ); diff --git a/test/lockfree.cpp b/test/lockfree.cpp index 30031c2..523f784 100644 --- a/test/lockfree.cpp +++ b/test/lockfree.cpp @@ -44,7 +44,7 @@ verify_lock_free(const char * type_name, int lock_free_macro_val, int lock_free_ #define EXPECT_SHORT_LOCK_FREE 2 #define EXPECT_INT_LOCK_FREE 2 #define EXPECT_LONG_LOCK_FREE 2 -#if defined(BOOST_ATOMIC_X86_HAS_CMPXCHG8B) +#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) #define EXPECT_LLONG_LOCK_FREE 2 #else #define EXPECT_LLONG_LOCK_FREE 0 @@ -60,7 +60,7 @@ verify_lock_free(const char * type_name, int lock_free_macro_val, int lock_free_ #define EXPECT_INT_LOCK_FREE 2 #define EXPECT_LONG_LOCK_FREE 2 #define EXPECT_LLONG_LOCK_FREE 2 -#if defined(BOOST_ATOMIC_X86_HAS_CMPXCHG16B) && defined(BOOST_HAS_INT128) +#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B) && defined(BOOST_HAS_INT128) #define EXPECT_INT128_LOCK_FREE 2 #else #define EXPECT_INT128_LOCK_FREE 0 @@ -141,7 +141,7 @@ verify_lock_free(const char * type_name, int lock_free_macro_val, int lock_free_ #define EXPECT_SHORT_LOCK_FREE 2 #define EXPECT_INT_LOCK_FREE 2 #define EXPECT_LONG_LOCK_FREE 2 -#if defined(_WIN64) || defined(BOOST_ATOMIC_X86_HAS_CMPXCHG8B) || defined(_M_AMD64) || defined(_M_IA64) +#if defined(_WIN64) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) || defined(_M_AMD64) || defined(_M_IA64) #define EXPECT_LLONG_LOCK_FREE 2 #else #define EXPECT_LLONG_LOCK_FREE 0 From 0e5b641e0ab55887949715c7237176e27a4acbc0 Mon Sep 17 00:00:00 2001 From: Andrey Semashev Date: Sun, 20 Apr 2014 19:03:56 +0400 Subject: [PATCH 06/23] Added workarounds for clang. --- include/boost/atomic/detail/ops_cas_based.hpp | 91 +++++++++ .../boost/atomic/detail/ops_gcc_atomic.hpp | 190 ++++++++++++++++++ include/boost/atomic/detail/storage_types.hpp | 17 -- 3 files changed, 281 insertions(+), 17 deletions(-) create mode 100644 include/boost/atomic/detail/ops_cas_based.hpp diff --git a/include/boost/atomic/detail/ops_cas_based.hpp b/include/boost/atomic/detail/ops_cas_based.hpp new file mode 100644 index 0000000..6619396 --- /dev/null +++ b/include/boost/atomic/detail/ops_cas_based.hpp @@ -0,0 +1,91 @@ +/* + * Distributed under the Boost Software License, Version 1.0. + * (See accompanying file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt) + * + * Copyright (c) 2014 Andrey Semashev + */ +/*! + * \file atomic/detail/ops_cas_based.hpp + * + * This header contains CAS-based implementation of the \c operations template. + */ + +#ifndef BOOST_ATOMIC_DETAIL_OPS_CAS_BASED_HPP_INCLUDED_ +#define BOOST_ATOMIC_DETAIL_OPS_CAS_BASED_HPP_INCLUDED_ + +#include +#include + +#ifdef BOOST_HAS_PRAGMA_ONCE +#pragma once +#endif + +namespace boost { +namespace atomics { +namespace detail { + +template< typename Base > +struct cas_based_operations : + public Base +{ + typedef typename Base::storage_type storage_type; + + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order = memory_order_seq_cst) BOOST_NOEXCEPT + { + storage_type old_val = Base::load(storage, memory_order_relaxed); + while (!Base::compare_exchange_weak(storage, old_val, old_val + v, order, memory_order_relaxed)) {} + return old_val; + } + + static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order = memory_order_seq_cst) BOOST_NOEXCEPT + { + storage_type old_val = Base::load(storage, memory_order_relaxed); + while (!Base::compare_exchange_weak(storage, old_val, old_val - v, order, memory_order_relaxed)) {} + return old_val; + } + + static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order = memory_order_seq_cst) BOOST_NOEXCEPT + { + storage_type old_val = Base::load(storage, memory_order_relaxed); + while (!Base::compare_exchange_weak(storage, old_val, v, order, memory_order_relaxed)) {} + return old_val; + } + + static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order = memory_order_seq_cst) BOOST_NOEXCEPT + { + storage_type old_val = Base::load(storage, memory_order_relaxed); + while (!Base::compare_exchange_weak(storage, old_val, old_val & v, order, memory_order_relaxed)) {} + return old_val; + } + + static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order = memory_order_seq_cst) BOOST_NOEXCEPT + { + storage_type old_val = Base::load(storage, memory_order_relaxed); + while (!Base::compare_exchange_weak(storage, old_val, old_val | v, order, memory_order_relaxed)) {} + return old_val; + } + + static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order = memory_order_seq_cst) BOOST_NOEXCEPT + { + storage_type old_val = Base::load(storage, memory_order_relaxed); + while (!Base::compare_exchange_weak(storage, old_val, old_val ^ v, order, memory_order_relaxed)) {} + return old_val; + } + + static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order = memory_order_seq_cst) BOOST_NOEXCEPT + { + return exchange(storage, (storage_type)1, order) != (storage_type)0; + } + + static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order = memory_order_seq_cst) BOOST_NOEXCEPT + { + Base::store(storage, (storage_type)0, order); + } +}; + +} // namespace detail +} // namespace atomics +} // namespace boost + +#endif // BOOST_ATOMIC_DETAIL_OPS_CAS_BASED_HPP_INCLUDED_ diff --git a/include/boost/atomic/detail/ops_gcc_atomic.hpp b/include/boost/atomic/detail/ops_gcc_atomic.hpp index 1895b81..3ed68dd 100644 --- a/include/boost/atomic/detail/ops_gcc_atomic.hpp +++ b/include/boost/atomic/detail/ops_gcc_atomic.hpp @@ -14,10 +14,14 @@ #ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_ATOMIC_HPP_INCLUDED_ #define BOOST_ATOMIC_DETAIL_OPS_GCC_ATOMIC_HPP_INCLUDED_ +#include #include #include #include #include +#if defined(__clang__) && (defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)) +#include +#endif #ifdef BOOST_HAS_PRAGMA_ONCE #pragma once @@ -142,19 +146,205 @@ struct operations< 4u > : #endif #if BOOST_ATOMIC_INT64_LOCK_FREE > 0 +#if defined(__clang__) && defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) + +// Workaround for clang bug http://llvm.org/bugs/show_bug.cgi?id=19355 +struct clang_dcas_x86 +{ + typedef storage64_t storage_type; + + static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order = memory_order_seq_cst) BOOST_NOEXCEPT + { + if ((((uint32_t)&storage) & 0x00000007) == 0) + { +#if defined(__SSE2__) + __asm__ __volatile__ + ( +#if defined(__AVX__) + "vmovq %1, %%xmm4\n\t" + "vmovq %%xmm4, %0\n\t" +#else + "movq %1, %%xmm4\n\t" + "movq %%xmm4, %0\n\t" +#endif + : "=m" (storage) + : "m" (v) + : "memory", "xmm4" + ); +#else + __asm__ __volatile__ + ( + "fildll %1\n\t" + "fistpll %0\n\t" + : "=m" (storage) + : "m" (v) + : "memory" + ); +#endif + } + else + { + uint32_t scratch; + __asm__ __volatile__ + ( + "movl %%ebx, %[scratch]\n\t" + "movl %[value_lo], %%ebx\n\t" + "movl 0(%[dest]), %%eax\n\t" + "movl 4(%[dest]), %%edx\n\t" + ".align 16\n\t" + "1: lock; cmpxchg8b 0(%[dest])\n\t" + "jne 1b\n\t" + "movl %[scratch], %%ebx" + : [scratch] "=m,m" (scratch) + : [value_lo] "a,a" ((uint32_t)v), "c,c" ((uint32_t)(v >> 32)), [dest] "D,S" (&storage) + : "memory", "cc", "edx" + ); + } + } + + static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order = memory_order_seq_cst) BOOST_NOEXCEPT + { + storage_type value; + + if ((((uint32_t)&storage) & 0x00000007) == 0) + { +#if defined(__SSE2__) + __asm__ __volatile__ + ( +#if defined(__AVX__) + "vmovq %1, %%xmm4\n\t" + "vmovq %%xmm4, %0\n\t" +#else + "movq %1, %%xmm4\n\t" + "movq %%xmm4, %0\n\t" +#endif + : "=m" (value) + : "m" (storage) + : "memory", "xmm4" + ); +#else + __asm__ __volatile__ + ( + "fildll %1\n\t" + "fistpll %0\n\t" + : "=m" (value) + : "m" (storage) + : "memory" + ); +#endif + } + else + { + // We don't care for comparison result here; the previous value will be stored into value anyway. + value = __sync_val_compare_and_swap(&storage, (storage_type)0, (storage_type)0); + } + + return value; + } + + static BOOST_FORCEINLINE bool compare_exchange_strong( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT + { + storage_type old_expected = expected; + expected = __sync_val_compare_and_swap(&storage, old_expected, desired); + return expected == old_expected; + } + + static BOOST_FORCEINLINE bool compare_exchange_weak( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + { + return compare_exchange_strong(storage, expected, desired, success_order, failure_order); + } + + static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT + { + return true; + } +}; + +template< > +struct operations< 8u > : + public cas_based_operations< clang_dcas_x86 > +{ +}; + +#else + template< > struct operations< 8u > : public gcc_atomic_operations< storage64_t > { }; + +#endif #endif #if BOOST_ATOMIC_INT128_LOCK_FREE > 0 +#if defined(__clang__) && defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B) + +// Workaround for clang bug: http://llvm.org/bugs/show_bug.cgi?id=19149 +// Clang 3.4 does not implement 128-bit __atomic* intrinsics even though it defines __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16 +struct clang_dcas_x86_64 +{ + typedef storage128_t storage_type; + + static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order = memory_order_seq_cst) BOOST_NOEXCEPT + { + uint64_t const* p_value = (uint64_t const*)&value; + __asm__ __volatile__ + ( + "movq 0(%[dest]), %%rax\n\t" + "movq 8(%[dest]), %%rdx\n\t" + ".align 16\n\t" + "1: lock; cmpxchg16b 0(%[dest])\n\t" + "jne 1b" + : + : "b" (p_value[0]), "c" (p_value[1]), [dest] "r" (ptr) + : "memory", "cc", "rax", "rdx" + ); + } + + static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order = memory_order_seq_cst) BOOST_NOEXCEPT + { + storage_type value = storage_type(); + return __sync_val_compare_and_swap(&storage, value, value); + } + + static BOOST_FORCEINLINE bool compare_exchange_strong( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT + { + storage_type old_expected = expected; + expected = __sync_val_compare_and_swap(&storage, old_expected, desired); + return expected == old_expected; + } + + static BOOST_FORCEINLINE bool compare_exchange_weak( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + { + return compare_exchange_strong(storage, expected, desired, success_order, failure_order); + } + + static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT + { + return true; + } +}; + +template< > +struct operations< 16u > : + public cas_based_operations< clang_dcas_x86_64 > +{ +}; + +#else + template< > struct operations< 16u > : public gcc_atomic_operations< storage128_t > { }; + +#endif #endif } // namespace detail diff --git a/include/boost/atomic/detail/storage_types.hpp b/include/boost/atomic/detail/storage_types.hpp index 178a409..1fda63b 100644 --- a/include/boost/atomic/detail/storage_types.hpp +++ b/include/boost/atomic/detail/storage_types.hpp @@ -30,24 +30,7 @@ namespace detail { typedef boost::uint8_t storage8_t; typedef boost::uint16_t storage16_t; typedef boost::uint32_t storage32_t; - -#if !defined(BOOST_NO_INT64_T) typedef boost::uint64_t storage64_t; -#else -struct BOOST_ALIGNMENT(8) storage64_t -{ - boost::uint32_t data[2]; -}; - -BOOST_FORCEINLINE bool operator== (storage64_t const& left, storage64_t const& right) BOOST_NOEXCEPT -{ - return left.data[0] == right.data[0] && left.data[1] == right.data[1]; -} -BOOST_FORCEINLINE bool operator!= (storage64_t const& left, storage64_t const& right) BOOST_NOEXCEPT -{ - return !(left == right); -} -#endif #if defined(BOOST_HAS_INT128) typedef boost::uint128_type storage128_t; From 17cf91d8c960b9a8b7857f50dc4b878069d45659 Mon Sep 17 00:00:00 2001 From: Andrey Semashev Date: Sun, 4 May 2014 00:14:24 +0400 Subject: [PATCH 07/23] Working on the library rewrite. Implemented gcc-x86 and gcc-sync backends. --- include/boost/atomic/atomic.hpp | 1 + include/boost/atomic/detail/atomic_flag.hpp | 3 + .../boost/atomic/detail/atomic_template.hpp | 185 ++-- include/boost/atomic/detail/caps_gcc_sync.hpp | 8 + include/boost/atomic/detail/lockpool.hpp | 7 +- include/boost/atomic/detail/ops_cas_based.hpp | 16 +- include/boost/atomic/detail/ops_emulated.hpp | 41 +- .../boost/atomic/detail/ops_gcc_atomic.hpp | 41 +- include/boost/atomic/detail/ops_gcc_sync.hpp | 284 ++++++ include/boost/atomic/detail/ops_gcc_x86.hpp | 815 ++++++++++++++++++ include/boost/atomic/fences.hpp | 62 ++ src/lockpool.cpp | 28 +- test/lockfree.cpp | 2 +- 13 files changed, 1365 insertions(+), 128 deletions(-) create mode 100644 include/boost/atomic/detail/ops_gcc_sync.hpp create mode 100644 include/boost/atomic/detail/ops_gcc_x86.hpp create mode 100644 include/boost/atomic/fences.hpp diff --git a/include/boost/atomic/atomic.hpp b/include/boost/atomic/atomic.hpp index cce8e8d..8b0bdd1 100644 --- a/include/boost/atomic/atomic.hpp +++ b/include/boost/atomic/atomic.hpp @@ -17,6 +17,7 @@ #define BOOST_ATOMIC_ATOMIC_HPP_INCLUDED_ #include +#include #include #include #include diff --git a/include/boost/atomic/detail/atomic_flag.hpp b/include/boost/atomic/detail/atomic_flag.hpp index 0fb441f..3c274a0 100644 --- a/include/boost/atomic/detail/atomic_flag.hpp +++ b/include/boost/atomic/detail/atomic_flag.hpp @@ -14,6 +14,7 @@ #ifndef BOOST_ATOMIC_DETAIL_ATOMIC_FLAG_HPP_INCLUDED_ #define BOOST_ATOMIC_DETAIL_ATOMIC_FLAG_HPP_INCLUDED_ +#include #include #include #include @@ -49,6 +50,8 @@ struct atomic_flag void clear(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT { + BOOST_ASSERT(order != memory_order_acquire); + BOOST_ASSERT(order != memory_order_acq_rel); operations::clear(m_storage, order); } diff --git a/include/boost/atomic/detail/atomic_template.hpp b/include/boost/atomic/detail/atomic_template.hpp index dd836c0..6153f26 100644 --- a/include/boost/atomic/detail/atomic_template.hpp +++ b/include/boost/atomic/detail/atomic_template.hpp @@ -18,6 +18,7 @@ #include #include +#include #include #include #include @@ -76,11 +77,18 @@ public: void store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT { + BOOST_ASSERT(order != memory_order_consume); + BOOST_ASSERT(order != memory_order_acquire); + BOOST_ASSERT(order != memory_order_acq_rel); + operations::store(m_storage, static_cast< storage_type >(v), order); } value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT { + BOOST_ASSERT(order != memory_order_release); + BOOST_ASSERT(order != memory_order_acq_rel); + return static_cast< value_type >(operations::load(m_storage, order)); } @@ -99,9 +107,13 @@ public: return static_cast< value_type >(operations::exchange(m_storage, static_cast< storage_type >(v), order)); } - bool compare_exchange_strong( - value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT + bool compare_exchange_strong(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT { + BOOST_ASSERT(failure_order != memory_order_release); + BOOST_ASSERT(failure_order != memory_order_acq_rel); + // failure_order must not be stronger than success_order + BOOST_ASSERT(((failure_order | success_order) & ~memory_order_consume) == (success_order & ~memory_order_consume) && (failure_order & memory_order_consume) <= (success_order & memory_order_consume)); + storage_type old_value = static_cast< storage_type >(expected); const bool res = operations::compare_exchange_strong(m_storage, old_value, static_cast< storage_type >(desired), success_order, failure_order); expected = static_cast< value_type >(old_value); @@ -113,9 +125,13 @@ public: return compare_exchange_strong(expected, desired, order, atomics::detail::deduce_failure_order(order)); } - bool compare_exchange_weak( - value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT + bool compare_exchange_weak(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT { + BOOST_ASSERT(failure_order != memory_order_release); + BOOST_ASSERT(failure_order != memory_order_acq_rel); + // failure_order must not be stronger than success_order + BOOST_ASSERT(((failure_order | success_order) & ~memory_order_consume) == (success_order & ~memory_order_consume) && (failure_order & memory_order_consume) <= (success_order & memory_order_consume)); + storage_type old_value = static_cast< storage_type >(expected); const bool res = operations::compare_exchange_weak(m_storage, old_value, static_cast< storage_type >(desired), success_order, failure_order); expected = static_cast< value_type >(old_value); @@ -223,11 +239,18 @@ public: void store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT { + BOOST_ASSERT(order != memory_order_consume); + BOOST_ASSERT(order != memory_order_acquire); + BOOST_ASSERT(order != memory_order_acq_rel); + operations::store(m_storage, atomics::detail::union_cast< storage_type >(v), order); } value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT { + BOOST_ASSERT(order != memory_order_release); + BOOST_ASSERT(order != memory_order_acq_rel); + return atomics::detail::union_cast< value_type >(operations::load(m_storage, order)); } @@ -236,9 +259,13 @@ public: return atomics::detail::union_cast< value_type >(operations::exchange(m_storage, atomics::detail::union_cast< storage_type >(v), order)); } - bool compare_exchange_strong( - value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT + bool compare_exchange_strong(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT { + BOOST_ASSERT(failure_order != memory_order_release); + BOOST_ASSERT(failure_order != memory_order_acq_rel); + // failure_order must not be stronger than success_order + BOOST_ASSERT(((failure_order | success_order) & ~memory_order_consume) == (success_order & ~memory_order_consume) && (failure_order & memory_order_consume) <= (success_order & memory_order_consume)); + storage_type old_value = atomics::detail::union_cast< storage_type >(expected); const bool res = operations::compare_exchange_strong(m_storage, old_value, atomics::detail::union_cast< storage_type >(desired), success_order, failure_order); expected = atomics::detail::union_cast< value_type >(old_value); @@ -250,9 +277,13 @@ public: return compare_exchange_strong(expected, desired, order, atomics::detail::deduce_failure_order(order)); } - bool compare_exchange_weak( - value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT + bool compare_exchange_weak(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT { + BOOST_ASSERT(failure_order != memory_order_release); + BOOST_ASSERT(failure_order != memory_order_acq_rel); + // failure_order must not be stronger than success_order + BOOST_ASSERT(((failure_order | success_order) & ~memory_order_consume) == (success_order & ~memory_order_consume) && (failure_order & memory_order_consume) <= (success_order & memory_order_consume)); + storage_type old_value = atomics::detail::union_cast< storage_type >(expected); const bool res = operations::compare_exchange_weak(m_storage, old_value, atomics::detail::union_cast< storage_type >(desired), success_order, failure_order); expected = atomics::detail::union_cast< value_type >(old_value); @@ -301,11 +332,18 @@ public: void store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT { + BOOST_ASSERT(order != memory_order_consume); + BOOST_ASSERT(order != memory_order_acquire); + BOOST_ASSERT(order != memory_order_acq_rel); + operations::store(m_storage, atomics::detail::union_cast< storage_type >(v), order); } value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT { + BOOST_ASSERT(order != memory_order_release); + BOOST_ASSERT(order != memory_order_acq_rel); + return atomics::detail::union_cast< value_type >(operations::load(m_storage, order)); } @@ -324,9 +362,13 @@ public: return atomics::detail::union_cast< value_type >(operations::exchange(m_storage, atomics::detail::union_cast< storage_type >(v), order)); } - bool compare_exchange_strong( - value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT + bool compare_exchange_strong(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT { + BOOST_ASSERT(failure_order != memory_order_release); + BOOST_ASSERT(failure_order != memory_order_acq_rel); + // failure_order must not be stronger than success_order + BOOST_ASSERT(((failure_order | success_order) & ~memory_order_consume) == (success_order & ~memory_order_consume) && (failure_order & memory_order_consume) <= (success_order & memory_order_consume)); + storage_type old_value = atomics::detail::union_cast< storage_type >(expected); const bool res = operations::compare_exchange_strong(m_storage, old_value, atomics::detail::union_cast< storage_type >(desired), success_order, failure_order); expected = atomics::detail::union_cast< value_type >(old_value); @@ -338,9 +380,13 @@ public: return compare_exchange_strong(expected, desired, order, atomics::detail::deduce_failure_order(order)); } - bool compare_exchange_weak( - value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT + bool compare_exchange_weak(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT { + BOOST_ASSERT(failure_order != memory_order_release); + BOOST_ASSERT(failure_order != memory_order_acq_rel); + // failure_order must not be stronger than success_order + BOOST_ASSERT(((failure_order | success_order) & ~memory_order_consume) == (success_order & ~memory_order_consume) && (failure_order & memory_order_consume) <= (success_order & memory_order_consume)); + storage_type old_value = atomics::detail::union_cast< storage_type >(expected); const bool res = operations::compare_exchange_weak(m_storage, old_value, atomics::detail::union_cast< storage_type >(desired), success_order, failure_order); expected = atomics::detail::union_cast< value_type >(old_value); @@ -419,11 +465,18 @@ public: void store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT { + BOOST_ASSERT(order != memory_order_consume); + BOOST_ASSERT(order != memory_order_acquire); + BOOST_ASSERT(order != memory_order_acq_rel); + operations::store(m_storage, atomics::detail::union_cast< storage_type >(v), order); } value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT { + BOOST_ASSERT(order != memory_order_release); + BOOST_ASSERT(order != memory_order_acq_rel); + return atomics::detail::union_cast< value_type >(operations::load(m_storage, order)); } @@ -442,9 +495,13 @@ public: return atomics::detail::union_cast< value_type >(operations::exchange(m_storage, atomics::detail::union_cast< storage_type >(v), order)); } - bool compare_exchange_strong( - value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT + bool compare_exchange_strong(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT { + BOOST_ASSERT(failure_order != memory_order_release); + BOOST_ASSERT(failure_order != memory_order_acq_rel); + // failure_order must not be stronger than success_order + BOOST_ASSERT(((failure_order | success_order) & ~memory_order_consume) == (success_order & ~memory_order_consume) && (failure_order & memory_order_consume) <= (success_order & memory_order_consume)); + storage_type old_value = atomics::detail::union_cast< storage_type >(expected); const bool res = operations::compare_exchange_strong(m_storage, old_value, atomics::detail::union_cast< storage_type >(desired), success_order, failure_order); expected = atomics::detail::union_cast< value_type >(old_value); @@ -456,9 +513,13 @@ public: return compare_exchange_strong(expected, desired, order, atomics::detail::deduce_failure_order(order)); } - bool compare_exchange_weak( - value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT + bool compare_exchange_weak(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT { + BOOST_ASSERT(failure_order != memory_order_release); + BOOST_ASSERT(failure_order != memory_order_acq_rel); + // failure_order must not be stronger than success_order + BOOST_ASSERT(((failure_order | success_order) & ~memory_order_consume) == (success_order & ~memory_order_consume) && (failure_order & memory_order_consume) <= (success_order & memory_order_consume)); + storage_type old_value = atomics::detail::union_cast< storage_type >(expected); const bool res = operations::compare_exchange_weak(m_storage, old_value, atomics::detail::union_cast< storage_type >(desired), success_order, failure_order); expected = atomics::detail::union_cast< value_type >(old_value); @@ -553,62 +614,62 @@ public: BOOST_DELETED_FUNCTION(atomic& operator= (atomic const&) volatile) }; -typedef atomic atomic_char; -typedef atomic atomic_uchar; -typedef atomic atomic_schar; -typedef atomic atomic_uint8_t; -typedef atomic atomic_int8_t; -typedef atomic atomic_ushort; -typedef atomic atomic_short; -typedef atomic atomic_uint16_t; -typedef atomic atomic_int16_t; -typedef atomic atomic_uint; -typedef atomic atomic_int; -typedef atomic atomic_uint32_t; -typedef atomic atomic_int32_t; -typedef atomic atomic_ulong; -typedef atomic atomic_long; -typedef atomic atomic_uint64_t; -typedef atomic atomic_int64_t; +typedef atomic< char > atomic_char; +typedef atomic< unsigned char > atomic_uchar; +typedef atomic< signed char > atomic_schar; +typedef atomic< uint8_t > atomic_uint8_t; +typedef atomic< int8_t > atomic_int8_t; +typedef atomic< unsigned short > atomic_ushort; +typedef atomic< short > atomic_short; +typedef atomic< uint16_t > atomic_uint16_t; +typedef atomic< int16_t > atomic_int16_t; +typedef atomic< unsigned int > atomic_uint; +typedef atomic< int > atomic_int; +typedef atomic< uint32_t > atomic_uint32_t; +typedef atomic< int32_t > atomic_int32_t; +typedef atomic< unsigned long > atomic_ulong; +typedef atomic< long > atomic_long; +typedef atomic< uint64_t > atomic_uint64_t; +typedef atomic< int64_t > atomic_int64_t; #ifdef BOOST_HAS_LONG_LONG -typedef atomic atomic_ullong; -typedef atomic atomic_llong; +typedef atomic< boost::ulong_long_type > atomic_ullong; +typedef atomic< boost::long_long_type > atomic_llong; #endif -typedef atomic atomic_address; -typedef atomic atomic_bool; -typedef atomic atomic_wchar_t; +typedef atomic< void* > atomic_address; +typedef atomic< bool > atomic_bool; +typedef atomic< wchar_t > atomic_wchar_t; #if !defined(BOOST_NO_CXX11_CHAR16_T) -typedef atomic atomic_char16_t; +typedef atomic< char16_t > atomic_char16_t; #endif #if !defined(BOOST_NO_CXX11_CHAR32_T) -typedef atomic atomic_char32_t; +typedef atomic< char32_t > atomic_char32_t; #endif -typedef atomic atomic_int_least8_t; -typedef atomic atomic_uint_least8_t; -typedef atomic atomic_int_least16_t; -typedef atomic atomic_uint_least16_t; -typedef atomic atomic_int_least32_t; -typedef atomic atomic_uint_least32_t; -typedef atomic atomic_int_least64_t; -typedef atomic atomic_uint_least64_t; -typedef atomic atomic_int_fast8_t; -typedef atomic atomic_uint_fast8_t; -typedef atomic atomic_int_fast16_t; -typedef atomic atomic_uint_fast16_t; -typedef atomic atomic_int_fast32_t; -typedef atomic atomic_uint_fast32_t; -typedef atomic atomic_int_fast64_t; -typedef atomic atomic_uint_fast64_t; -typedef atomic atomic_intmax_t; -typedef atomic atomic_uintmax_t; +typedef atomic< int_least8_t > atomic_int_least8_t; +typedef atomic< uint_least8_t > atomic_uint_least8_t; +typedef atomic< int_least16_t > atomic_int_least16_t; +typedef atomic< uint_least16_t > atomic_uint_least16_t; +typedef atomic< int_least32_t > atomic_int_least32_t; +typedef atomic< uint_least32_t > atomic_uint_least32_t; +typedef atomic< int_least64_t > atomic_int_least64_t; +typedef atomic< uint_least64_t > atomic_uint_least64_t; +typedef atomic< int_fast8_t > atomic_int_fast8_t; +typedef atomic< uint_fast8_t > atomic_uint_fast8_t; +typedef atomic< int_fast16_t > atomic_int_fast16_t; +typedef atomic< uint_fast16_t > atomic_uint_fast16_t; +typedef atomic< int_fast32_t > atomic_int_fast32_t; +typedef atomic< uint_fast32_t > atomic_uint_fast32_t; +typedef atomic< int_fast64_t > atomic_int_fast64_t; +typedef atomic< uint_fast64_t > atomic_uint_fast64_t; +typedef atomic< intmax_t > atomic_intmax_t; +typedef atomic< uintmax_t > atomic_uintmax_t; -typedef atomic atomic_size_t; -typedef atomic atomic_ptrdiff_t; +typedef atomic< std::size_t > atomic_size_t; +typedef atomic< std::ptrdiff_t > atomic_ptrdiff_t; #if defined(BOOST_HAS_INTPTR_T) -typedef atomic atomic_intptr_t; -typedef atomic atomic_uintptr_t; +typedef atomic< intptr_t > atomic_intptr_t; +typedef atomic< uintptr_t > atomic_uintptr_t; #endif } // namespace atomics diff --git a/include/boost/atomic/detail/caps_gcc_sync.hpp b/include/boost/atomic/detail/caps_gcc_sync.hpp index 43065fe..7fac07a 100644 --- a/include/boost/atomic/detail/caps_gcc_sync.hpp +++ b/include/boost/atomic/detail/caps_gcc_sync.hpp @@ -22,6 +22,14 @@ #pragma once #endif +#if defined(__i386__) && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) +#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B 1 +#endif + +#if defined(__x86_64__) && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) +#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B 1 +#endif + #if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1)\ || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2)\ || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)\ diff --git a/include/boost/atomic/detail/lockpool.hpp b/include/boost/atomic/detail/lockpool.hpp index 10b9b85..4e249aa 100644 --- a/include/boost/atomic/detail/lockpool.hpp +++ b/include/boost/atomic/detail/lockpool.hpp @@ -33,12 +33,15 @@ struct lockpool void* m_lock; public: - explicit BOOST_ATOMIC_DECL scoped_lock(const volatile void* addr); - BOOST_ATOMIC_DECL ~scoped_lock(); + explicit BOOST_ATOMIC_DECL scoped_lock(const volatile void* addr) BOOST_NOEXCEPT; + BOOST_ATOMIC_DECL ~scoped_lock() BOOST_NOEXCEPT; BOOST_DELETED_FUNCTION(scoped_lock(scoped_lock const&)) BOOST_DELETED_FUNCTION(scoped_lock& operator=(scoped_lock const&)) }; + + static BOOST_ATOMIC_DECL void thread_fence() BOOST_NOEXCEPT; + static BOOST_ATOMIC_DECL void signal_fence() BOOST_NOEXCEPT; }; } // namespace detail diff --git a/include/boost/atomic/detail/ops_cas_based.hpp b/include/boost/atomic/detail/ops_cas_based.hpp index 6619396..b9039ec 100644 --- a/include/boost/atomic/detail/ops_cas_based.hpp +++ b/include/boost/atomic/detail/ops_cas_based.hpp @@ -31,54 +31,54 @@ struct cas_based_operations : { typedef typename Base::storage_type storage_type; - static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order = memory_order_seq_cst) BOOST_NOEXCEPT + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT { storage_type old_val = Base::load(storage, memory_order_relaxed); while (!Base::compare_exchange_weak(storage, old_val, old_val + v, order, memory_order_relaxed)) {} return old_val; } - static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order = memory_order_seq_cst) BOOST_NOEXCEPT + static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT { storage_type old_val = Base::load(storage, memory_order_relaxed); while (!Base::compare_exchange_weak(storage, old_val, old_val - v, order, memory_order_relaxed)) {} return old_val; } - static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order = memory_order_seq_cst) BOOST_NOEXCEPT + static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT { storage_type old_val = Base::load(storage, memory_order_relaxed); while (!Base::compare_exchange_weak(storage, old_val, v, order, memory_order_relaxed)) {} return old_val; } - static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order = memory_order_seq_cst) BOOST_NOEXCEPT + static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT { storage_type old_val = Base::load(storage, memory_order_relaxed); while (!Base::compare_exchange_weak(storage, old_val, old_val & v, order, memory_order_relaxed)) {} return old_val; } - static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order = memory_order_seq_cst) BOOST_NOEXCEPT + static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT { storage_type old_val = Base::load(storage, memory_order_relaxed); while (!Base::compare_exchange_weak(storage, old_val, old_val | v, order, memory_order_relaxed)) {} return old_val; } - static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order = memory_order_seq_cst) BOOST_NOEXCEPT + static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT { storage_type old_val = Base::load(storage, memory_order_relaxed); while (!Base::compare_exchange_weak(storage, old_val, old_val ^ v, order, memory_order_relaxed)) {} return old_val; } - static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order = memory_order_seq_cst) BOOST_NOEXCEPT + static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT { return exchange(storage, (storage_type)1, order) != (storage_type)0; } - static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order = memory_order_seq_cst) BOOST_NOEXCEPT + static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT { Base::store(storage, (storage_type)0, order); } diff --git a/include/boost/atomic/detail/ops_emulated.hpp b/include/boost/atomic/detail/ops_emulated.hpp index 367534f..2fa0f96 100644 --- a/include/boost/atomic/detail/ops_emulated.hpp +++ b/include/boost/atomic/detail/ops_emulated.hpp @@ -35,19 +35,19 @@ struct emulated_operations { typedef T storage_type; - static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order = memory_order_seq_cst) BOOST_NOEXCEPT + static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT { lockpool::scoped_lock lock(&storage); const_cast< storage_type& >(storage) = v; } - static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order = memory_order_seq_cst) BOOST_NOEXCEPT + static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order) BOOST_NOEXCEPT { lockpool::scoped_lock lock(&storage); return const_cast< storage_type const& >(storage); } - static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order = memory_order_seq_cst) BOOST_NOEXCEPT + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT { storage_type& s = const_cast< storage_type& >(storage); lockpool::scoped_lock lock(&storage); @@ -56,7 +56,7 @@ struct emulated_operations return old_val; } - static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order = memory_order_seq_cst) BOOST_NOEXCEPT + static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT { storage_type& s = const_cast< storage_type& >(storage); lockpool::scoped_lock lock(&storage); @@ -65,7 +65,7 @@ struct emulated_operations return old_val; } - static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order = memory_order_seq_cst) BOOST_NOEXCEPT + static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT { storage_type& s = const_cast< storage_type& >(storage); lockpool::scoped_lock lock(&storage); @@ -94,7 +94,7 @@ struct emulated_operations return compare_exchange_strong(storage, expected, desired, success_order, failure_order); } - static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order = memory_order_seq_cst) BOOST_NOEXCEPT + static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT { storage_type& s = const_cast< storage_type& >(storage); lockpool::scoped_lock lock(&storage); @@ -103,7 +103,7 @@ struct emulated_operations return old_val; } - static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order = memory_order_seq_cst) BOOST_NOEXCEPT + static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT { storage_type& s = const_cast< storage_type& >(storage); lockpool::scoped_lock lock(&storage); @@ -112,7 +112,7 @@ struct emulated_operations return old_val; } - static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order = memory_order_seq_cst) BOOST_NOEXCEPT + static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT { storage_type& s = const_cast< storage_type& >(storage); lockpool::scoped_lock lock(&storage); @@ -121,12 +121,12 @@ struct emulated_operations return old_val; } - static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order = memory_order_seq_cst) BOOST_NOEXCEPT + static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT { return exchange(storage, (storage_type)1, order) != (storage_type)0; } - static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order = memory_order_seq_cst) BOOST_NOEXCEPT + static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT { store(storage, (storage_type)0, order); } @@ -195,27 +195,6 @@ struct operations : }; } // namespace detail - -#if BOOST_ATOMIC_THREAD_FENCE == 0 -BOOST_FORCEINLINE void atomic_thread_fence(memory_order) -{ - // Emulate full fence by locking/unlocking a mutex - detail::lockpool::scoped_lock lock(0); -} -#endif - -#if BOOST_ATOMIC_SIGNAL_FENCE == 0 -BOOST_FORCEINLINE void atomic_signal_fence(memory_order) -{ - // We can't use pthread functions in signal handlers, so only use lock pool if it is based on atomic_flags. - // However, any reasonable backend with a lockfree atomic_flag should provide fence primitives already. - // So this condition is more for completeness sake. -#if BOOST_ATOMIC_FLAG_LOCK_FREE == 2 - detail::lockpool::scoped_lock lock(0); -#endif -} -#endif - } // namespace atomics } // namespace boost diff --git a/include/boost/atomic/detail/ops_gcc_atomic.hpp b/include/boost/atomic/detail/ops_gcc_atomic.hpp index 3ed68dd..5f81f19 100644 --- a/include/boost/atomic/detail/ops_gcc_atomic.hpp +++ b/include/boost/atomic/detail/ops_gcc_atomic.hpp @@ -14,12 +14,13 @@ #ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_ATOMIC_HPP_INCLUDED_ #define BOOST_ATOMIC_DETAIL_OPS_GCC_ATOMIC_HPP_INCLUDED_ -#include #include #include #include #include +#include #if defined(__clang__) && (defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)) +#include #include #endif @@ -43,27 +44,27 @@ struct gcc_atomic_operations { typedef T storage_type; - static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order = memory_order_seq_cst) BOOST_NOEXCEPT + static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT { __atomic_store_n(&storage, v, atomics::detail::convert_memory_order_to_gcc(order)); } - static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order = memory_order_seq_cst) BOOST_NOEXCEPT + static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT { return __atomic_load_n(&storage, atomics::detail::convert_memory_order_to_gcc(order)); } - static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order = memory_order_seq_cst) BOOST_NOEXCEPT + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT { return __atomic_fetch_add(&storage, v, atomics::detail::convert_memory_order_to_gcc(order)); } - static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order = memory_order_seq_cst) BOOST_NOEXCEPT + static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT { return __atomic_fetch_sub(&storage, v, atomics::detail::convert_memory_order_to_gcc(order)); } - static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order = memory_order_seq_cst) BOOST_NOEXCEPT + static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT { return __atomic_exchange_n(&storage, v, atomics::detail::convert_memory_order_to_gcc(order)); } @@ -90,27 +91,27 @@ struct gcc_atomic_operations ); } - static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order = memory_order_seq_cst) BOOST_NOEXCEPT + static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT { return __atomic_fetch_and(&storage, v, atomics::detail::convert_memory_order_to_gcc(order)); } - static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order = memory_order_seq_cst) BOOST_NOEXCEPT + static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT { return __atomic_fetch_or(&storage, v, atomics::detail::convert_memory_order_to_gcc(order)); } - static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order = memory_order_seq_cst) BOOST_NOEXCEPT + static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT { return __atomic_fetch_xor(&storage, v, atomics::detail::convert_memory_order_to_gcc(order)); } - static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order = memory_order_seq_cst) BOOST_NOEXCEPT + static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT { return __atomic_test_and_set(&storage, atomics::detail::convert_memory_order_to_gcc(order)); } - static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order = memory_order_seq_cst) BOOST_NOEXCEPT + static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT { __atomic_clear(const_cast< storage_type* >(&storage), atomics::detail::convert_memory_order_to_gcc(order)); } @@ -153,7 +154,7 @@ struct clang_dcas_x86 { typedef storage64_t storage_type; - static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order = memory_order_seq_cst) BOOST_NOEXCEPT + static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT { if ((((uint32_t)&storage) & 0x00000007) == 0) { @@ -202,7 +203,7 @@ struct clang_dcas_x86 } } - static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order = memory_order_seq_cst) BOOST_NOEXCEPT + static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT { storage_type value; @@ -288,7 +289,7 @@ struct clang_dcas_x86_64 { typedef storage128_t storage_type; - static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order = memory_order_seq_cst) BOOST_NOEXCEPT + static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT { uint64_t const* p_value = (uint64_t const*)&value; __asm__ __volatile__ @@ -299,12 +300,12 @@ struct clang_dcas_x86_64 "1: lock; cmpxchg16b 0(%[dest])\n\t" "jne 1b" : - : "b" (p_value[0]), "c" (p_value[1]), [dest] "r" (ptr) + : "b" (p_value[0]), "c" (p_value[1]), [dest] "r" (&storage) : "memory", "cc", "rax", "rdx" ); } - static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order = memory_order_seq_cst) BOOST_NOEXCEPT + static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT { storage_type value = storage_type(); return __sync_val_compare_and_swap(&storage, value, value); @@ -347,18 +348,18 @@ struct operations< 16u > : #endif #endif -} // namespace detail - -BOOST_FORCEINLINE void atomic_thread_fence(memory_order order) +BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT { __atomic_thread_fence(atomics::detail::convert_memory_order_to_gcc(order)); } -BOOST_FORCEINLINE void atomic_signal_fence(memory_order order) +BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT { __atomic_signal_fence(atomics::detail::convert_memory_order_to_gcc(order)); } +} // namespace detail + } // namespace atomics } // namespace boost diff --git a/include/boost/atomic/detail/ops_gcc_sync.hpp b/include/boost/atomic/detail/ops_gcc_sync.hpp new file mode 100644 index 0000000..bd17578 --- /dev/null +++ b/include/boost/atomic/detail/ops_gcc_sync.hpp @@ -0,0 +1,284 @@ +/* + * Distributed under the Boost Software License, Version 1.0. + * (See accompanying file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt) + * + * Copyright (c) 2011 Helge Bahmann + * Copyright (c) 2013 Tim Blechmann + * Copyright (c) 2014 Andrey Semashev + */ +/*! + * \file atomic/detail/ops_gcc_sync.hpp + * + * This header contains implementation of the \c operations template. + */ + +#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_SYNC_HPP_INCLUDED_ +#define BOOST_ATOMIC_DETAIL_OPS_GCC_SYNC_HPP_INCLUDED_ + +#include +#include +#include +#include +#include + +#ifdef BOOST_HAS_PRAGMA_ONCE +#pragma once +#endif + +namespace boost { +namespace atomics { +namespace detail { + +template< typename T > +struct gcc_sync_operations +{ + typedef T storage_type; + + static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before_store(order); + storage = v; + fence_after_store(order); + } + + static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT + { + storage_type v = storage; + fence_after_load(order); + return v; + } + + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT + { + return __sync_fetch_and_add(&storage, v); + } + + static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT + { + return __sync_fetch_and_sub(&storage, v); + } + + static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + // GCC docs mention that not all architectures may support full exchange semantics for this intrinsic. However, GCC's implementation of + // std::atomic<> uses this intrinsic unconditionally. We do so as well. In case if some architectures actually don't support this, we can always + // add a check here and fall back to a CAS loop. + if ((order & memory_order_release) != 0) + __sync_synchronize(); + return __sync_lock_test_and_set(&storage, v); + } + + static BOOST_FORCEINLINE bool compare_exchange_strong( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT + { + storage_type expected2 = expected; + storage_type old_val = __sync_val_compare_and_swap(&storage, expected2, desired); + + if (old_val == expected2) + { + return true; + } + else + { + expected = old_val; + return false; + } + } + + static BOOST_FORCEINLINE bool compare_exchange_weak( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + { + return compare_exchange_strong(storage, expected, desired, success_order, failure_order); + } + + static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT + { + return __sync_fetch_and_and(&storage, v); + } + + static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT + { + return __sync_fetch_and_or(&storage, v); + } + + static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT + { + return __sync_fetch_and_xor(&storage, v); + } + + static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT + { + if ((order & memory_order_release) != 0) + __sync_synchronize(); + return __sync_lock_test_and_set(&storage, 1); + } + + static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT + { + __sync_lock_release(&storage); + if (order == memory_order_seq_cst) + __sync_synchronize(); + } + + static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT + { + return true; + } + +private: + static BOOST_FORCEINLINE void fence_before_store(memory_order order) BOOST_NOEXCEPT + { + switch (order) + { + case memory_order_relaxed: + case memory_order_acquire: + case memory_order_consume: + break; + case memory_order_release: + case memory_order_acq_rel: + case memory_order_seq_cst: + __sync_synchronize(); + break; + } + } + + static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT + { + if (order == memory_order_seq_cst) + __sync_synchronize(); + } + + static BOOST_FORCEINLINE void fence_after_load(memory_order order) BOOST_NOEXCEPT + { + switch (order) + { + case memory_order_relaxed: + case memory_order_release: + break; + case memory_order_consume: + case memory_order_acquire: + case memory_order_acq_rel: + case memory_order_seq_cst: + __sync_synchronize(); + break; + } + } +}; + +#if BOOST_ATOMIC_INT8_LOCK_FREE > 0 +template< > +struct operations< 1u > : + public gcc_sync_operations< +#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1) + storage8_t +#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2) + storage16_t +#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) + storage32_t +#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) + storage64_t +#else + storage128_t +#endif + > +{ +}; +#endif + +#if BOOST_ATOMIC_INT16_LOCK_FREE > 0 +template< > +struct operations< 2u > : + public gcc_sync_operations< +#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2) + storage16_t +#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) + storage32_t +#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) + storage64_t +#else + storage128_t +#endif + > +{ +}; +#endif + +#if BOOST_ATOMIC_INT32_LOCK_FREE > 0 +template< > +struct operations< 4u > : + public gcc_sync_operations< +#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) + storage32_t +#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) + storage64_t +#else + storage128_t +#endif + > +{ +}; +#endif + +#if BOOST_ATOMIC_INT64_LOCK_FREE > 0 +template< > +struct operations< 8u > : + public gcc_sync_operations< +#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) + storage64_t +#else + storage128_t +#endif + > +{ +}; + +#endif + +#if BOOST_ATOMIC_INT128_LOCK_FREE > 0 +template< > +struct operations< 16u > : + public gcc_sync_operations< storage128_t > +{ +}; +#endif + +BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT +{ + switch (order) + { + case memory_order_relaxed: + break; + case memory_order_release: + case memory_order_consume: + case memory_order_acquire: + case memory_order_acq_rel: + case memory_order_seq_cst: + __sync_synchronize(); + break; + } +} + +BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT +{ + switch (order) + { + case memory_order_relaxed: + case memory_order_consume: + break; + case memory_order_acquire: + case memory_order_release: + case memory_order_acq_rel: + case memory_order_seq_cst: + __asm__ __volatile__ ("" ::: "memory"); + break; + default:; + } +} + +} // namespace detail + +} // namespace atomics +} // namespace boost + +#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_SYNC_HPP_INCLUDED_ diff --git a/include/boost/atomic/detail/ops_gcc_x86.hpp b/include/boost/atomic/detail/ops_gcc_x86.hpp new file mode 100644 index 0000000..f4585d7 --- /dev/null +++ b/include/boost/atomic/detail/ops_gcc_x86.hpp @@ -0,0 +1,815 @@ +/* + * Distributed under the Boost Software License, Version 1.0. + * (See accompanying file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt) + * + * Copyright (c) 2009 Helge Bahmann + * Copyright (c) 2012 Tim Blechmann + * Copyright (c) 2014 Andrey Semashev + */ +/*! + * \file atomic/detail/ops_gcc_x86.hpp + * + * This header contains implementation of the \c operations template. + */ + +#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_X86_HPP_INCLUDED_ +#define BOOST_ATOMIC_DETAIL_OPS_GCC_X86_HPP_INCLUDED_ + +#include +#include +#include +#include +#include +#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B) +#include +#include +#endif + +#ifdef BOOST_HAS_PRAGMA_ONCE +#pragma once +#endif + +#if defined(__x86_64__) +#define BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER "rdx" +#else +#define BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER "edx" +#endif + +namespace boost { +namespace atomics { +namespace detail { + +struct gcc_x86_operations_base +{ + static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT + { + switch (order) + { + case memory_order_relaxed: + case memory_order_acquire: + case memory_order_consume: + break; + case memory_order_release: + case memory_order_acq_rel: + __asm__ __volatile__ ("" ::: "memory"); + break; + case memory_order_seq_cst: + __asm__ __volatile__ ("" ::: "memory"); + break; + default:; + } + } + + static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT + { + switch (order) + { + case memory_order_relaxed: + case memory_order_release: + break; + case memory_order_acquire: + case memory_order_acq_rel: + __asm__ __volatile__ ("" ::: "memory"); + break; + case memory_order_consume: + break; + case memory_order_seq_cst: + __asm__ __volatile__ ("" ::: "memory"); + break; + default:; + } + } + + static BOOST_FORCEINLINE void fence_after_load(memory_order order) BOOST_NOEXCEPT + { + switch (order) + { + case memory_order_relaxed: + case memory_order_release: + break; + case memory_order_acquire: + case memory_order_acq_rel: + __asm__ __volatile__ ("" ::: "memory"); + break; + case memory_order_consume: + break; + case memory_order_seq_cst: + __asm__ __volatile__ ("" ::: "memory"); + break; + default:; + } + } +}; + +template< typename T, typename Derived > +struct gcc_x86_operations : + public gcc_x86_operations_base +{ + typedef T storage_type; + + static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + if (order != memory_order_seq_cst) + { + fence_before(order); + storage = v; + fence_after(order); + } + else + { + Derived::exchange(storage, v, order); + } + } + + static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT + { + storage_type v = storage; + fence_after_load(order); + return v; + } + + static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + return Derived::fetch_add(storage, -v, order); + } + + static BOOST_FORCEINLINE bool compare_exchange_weak( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + { + return Derived::compare_exchange_strong(storage, expected, desired, success_order, failure_order); + } + + static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT + { + return Derived::exchange(storage, (storage_type)1, order); + } + + static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT + { + store(storage, (storage_type)0, order); + } + + static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT + { + return true; + } +}; + +template< > +struct operations< 1u > : + public gcc_x86_operations< storage8_t, operations< 1u > > +{ + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + __asm__ __volatile__ + ( + "lock; xaddb %0, %1" + : "+q" (v), "+m" (storage) + : + : "cc" + ); + fence_after(order); + return v; + } + + static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + __asm__ __volatile__ + ( + "xchgb %0, %1" + : "+q" (v), "+m" (storage) + ); + fence_after(order); + return v; + } + + static BOOST_FORCEINLINE bool compare_exchange_strong( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + { + storage_type previous = expected; + fence_before(success_order); + bool success; + __asm__ __volatile__ + ( + "lock; cmpxchgb %3, %1\n\t" + "sete %2" + : "+a" (previous), "+m" (storage), "=q" (success) + : "q" (desired) + : "cc" + ); + if (success) + fence_after(success_order); + else + fence_after(failure_order); + expected = previous; + return success; + } + +#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, argument, result)\ + __asm__ __volatile__\ + (\ + "xor %%" BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER ", %%" BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER "\n\t"\ + ".align 16\n\t"\ + "1: movb %[arg], %%dl\n\t"\ + op " %%al, %%dl\n\t"\ + "lock; cmpxchgb %%dl, %[storage]\n\t"\ + "jne 1b"\ + : [res] "+a" (result), [storage] "+m" (storage)\ + : [arg] "q" (argument)\ + : "cc", BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER, "memory"\ + ) + + static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT + { + storage_type res = storage; + BOOST_ATOMIC_DETAIL_CAS_LOOP("andb", v, res); + return res; + } + + static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT + { + storage_type res = storage; + BOOST_ATOMIC_DETAIL_CAS_LOOP("orb", v, res); + return res; + } + + static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT + { + storage_type res = storage; + BOOST_ATOMIC_DETAIL_CAS_LOOP("xorb", v, res); + return res; + } + +#undef BOOST_ATOMIC_DETAIL_CAS_LOOP +}; + +template< > +struct operations< 2u > : + public gcc_x86_operations< storage16_t, operations< 2u > > +{ + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + __asm__ __volatile__ + ( + "lock; xaddw %0, %1" + : "+q" (v), "+m" (storage) + : + : "cc" + ); + fence_after(order); + return v; + } + + static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + __asm__ __volatile__ + ( + "xchgw %0, %1" + : "+q" (v), "+m" (storage) + ); + fence_after(order); + return v; + } + + static BOOST_FORCEINLINE bool compare_exchange_strong( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + { + storage_type previous = expected; + fence_before(success_order); + bool success; + __asm__ __volatile__ + ( + "lock; cmpxchgw %3, %1\n\t" + "sete %2" + : "+a" (previous), "+m" (storage), "=q" (success) + : "q" (desired) + : "cc" + ); + if (success) + fence_after(success_order); + else + fence_after(failure_order); + expected = previous; + return success; + } + +#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, argument, result)\ + __asm__ __volatile__\ + (\ + "xor %%" BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER ", %%" BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER "\n\t"\ + ".align 16\n\t"\ + "1: movw %[arg], %%dx\n\t"\ + op " %%ax, %%dx\n\t"\ + "lock; cmpxchgw %%dx, %[storage]\n\t"\ + "jne 1b"\ + : [res] "+a" (result), [storage] "+m" (storage)\ + : [arg] "q" (argument)\ + : "cc", BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER, "memory"\ + ) + + static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT + { + storage_type res = storage; + BOOST_ATOMIC_DETAIL_CAS_LOOP("andw", v, res); + return res; + } + + static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT + { + storage_type res = storage; + BOOST_ATOMIC_DETAIL_CAS_LOOP("orw", v, res); + return res; + } + + static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT + { + storage_type res = storage; + BOOST_ATOMIC_DETAIL_CAS_LOOP("xorw", v, res); + return res; + } + +#undef BOOST_ATOMIC_DETAIL_CAS_LOOP +}; + +template< > +struct operations< 4u > : + public gcc_x86_operations< storage32_t, operations< 4u > > +{ + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + __asm__ __volatile__ + ( + "lock; xaddl %0, %1" + : "+r" (v), "+m" (storage) + : + : "cc" + ); + fence_after(order); + return v; + } + + static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + __asm__ __volatile__ + ( + "xchgl %0, %1" + : "+r" (v), "+m" (storage) + ); + fence_after(order); + return v; + } + + static BOOST_FORCEINLINE bool compare_exchange_strong( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + { + storage_type previous = expected; + fence_before(success_order); + bool success; + __asm__ __volatile__ + ( + "lock; cmpxchgl %3, %1\n\t" + "sete %2" + : "+a" (previous), "+m" (storage), "=q" (success) + : "r" (desired) + : "cc" + ); + if (success) + fence_after(success_order); + else + fence_after(failure_order); + expected = previous; + return success; + } + +#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, argument, result)\ + __asm__ __volatile__\ + (\ + "xor %%" BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER ", %%" BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER "\n\t"\ + ".align 16\n\t"\ + "1: movl %[arg], %%edx\n\t"\ + op " %%eax, %%edx\n\t"\ + "lock; cmpxchgl %%edx, %[storage]\n\t"\ + "jne 1b"\ + : [res] "+a" (result), [storage] "+m" (storage)\ + : [arg] "r" (argument)\ + : "cc", BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER, "memory"\ + ) + + static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT + { + storage_type res = storage; + BOOST_ATOMIC_DETAIL_CAS_LOOP("andl", v, res); + return res; + } + + static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT + { + storage_type res = storage; + BOOST_ATOMIC_DETAIL_CAS_LOOP("orl", v, res); + return res; + } + + static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT + { + storage_type res = storage; + BOOST_ATOMIC_DETAIL_CAS_LOOP("xorl", v, res); + return res; + } + +#undef BOOST_ATOMIC_DETAIL_CAS_LOOP +}; + +#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) + +struct gcc_dcas_x86 +{ + typedef storage64_t storage_type; + + static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT + { + if ((((uint32_t)&storage) & 0x00000007) == 0) + { +#if defined(__SSE2__) + __asm__ __volatile__ + ( +#if defined(__AVX__) + "vmovq %1, %%xmm4\n\t" + "vmovq %%xmm4, %0\n\t" +#else + "movq %1, %%xmm4\n\t" + "movq %%xmm4, %0\n\t" +#endif + : "=m" (storage) + : "m" (v) + : "memory", "xmm4" + ); +#else + __asm__ __volatile__ + ( + "fildll %1\n\t" + "fistpll %0\n\t" + : "=m" (storage) + : "m" (v) + : "memory" + ); +#endif + } + else + { + uint32_t scratch; + __asm__ __volatile__ + ( + "movl %%ebx, %[scratch]\n\t" + "movl %[value_lo], %%ebx\n\t" + "movl 0(%[dest]), %%eax\n\t" + "movl 4(%[dest]), %%edx\n\t" + ".align 16\n\t" + "1: lock; cmpxchg8b 0(%[dest])\n\t" + "jne 1b\n\t" + "movl %[scratch], %%ebx" + : [scratch] "=m,m" (scratch) + : [value_lo] "a,a" ((uint32_t)v), "c,c" ((uint32_t)(v >> 32)), [dest] "D,S" (&storage) + : "memory", "cc", "edx" + ); + } + } + + static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order) BOOST_NOEXCEPT + { + storage_type value; + + if ((((uint32_t)&storage) & 0x00000007) == 0) + { +#if defined(__SSE2__) + __asm__ __volatile__ + ( +#if defined(__AVX__) + "vmovq %1, %%xmm4\n\t" + "vmovq %%xmm4, %0\n\t" +#else + "movq %1, %%xmm4\n\t" + "movq %%xmm4, %0\n\t" +#endif + : "=m" (value) + : "m" (storage) + : "memory", "xmm4" + ); +#else + __asm__ __volatile__ + ( + "fildll %1\n\t" + "fistpll %0\n\t" + : "=m" (value) + : "m" (storage) + : "memory" + ); +#endif + } + else + { + // We don't care for comparison result here; the previous value will be stored into value anyway. + // Also we don't care for ebx and ecx values, they just have to be equal to eax and edx before cmpxchg8b. + __asm__ __volatile__ + ( + "movl %%ebx, %%eax\n\t" + "movl %%ecx, %%edx\n\t" + "lock; cmpxchg8b %[storage]" + : "=&A" (value) + : [storage] "m" (storage) + : "cc", "memory" + ); + } + + return value; + } + + static BOOST_FORCEINLINE bool compare_exchange_strong( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT + { +#if defined(__PIC__) + // Make sure ebx is saved and restored properly in case + // of position independent code. To make this work + // setup register constraints such that ebx can not be + // used by accident e.g. as base address for the variable + // to be modified. Accessing "scratch" should always be okay, + // as it can only be placed on the stack (and therefore + // accessed through ebp or esp only). + // + // In theory, could push/pop ebx onto/off the stack, but movs + // to a prepared stack slot turn out to be faster. + + uint32_t scratch; + bool success; + __asm__ __volatile__ + ( + "movl %%ebx, %[scratch]\n\t" + "movl %[desired_lo], %%ebx\n\t" + "lock; cmpxchg8b %[dest]\n\t" + "movl %[scratch], %%ebx\n\t" + "sete %[success]" + : "+A,A,A,A,A,A" (expected), [dest] "+m,m,m,m,m,m" (storage), [scratch] "=m,m,m,m,m,m" (scratch), [success] "=q,m,q,m,q,m" (success) + : [desired_lo] "S,S,D,D,m,m" ((uint32_t)desired), "c,c,c,c,c,c" ((uint32_t)(desired >> 32)) + : "cc", "memory" + ); + return success; +#else + bool success; + __asm__ __volatile__ + ( + "lock; cmpxchg8b %[dest]\n\t" + "sete %[success]" + : "+A,A" (expected), [dest] "+m,m" (storage), [scratch] "=m,m" (scratch), [success] "=q,m" (success) + : "b,b" ((uint32_t)desired), "c,c" ((uint32_t)(desired >> 32)) + : "cc", "memory" + ); + return success; +#endif + } + + static BOOST_FORCEINLINE bool compare_exchange_weak( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + { + return compare_exchange_strong(storage, expected, desired, success_order, failure_order); + } + + static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT + { + return true; + } +}; + +template< > +struct operations< 8u > : + public cas_based_operations< gcc_dcas_x86 > +{ +}; + +#elif defined(__x86_64__) + +template< > +struct operations< 8u > : + public gcc_x86_operations< storage64_t, operations< 8u > > +{ + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + __asm__ __volatile__ + ( + "lock; xaddq %0, %1" + : "+r" (v), "+m" (storage) + : + : "cc" + ); + fence_after(order); + return v; + } + + static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + __asm__ __volatile__ + ( + "xchgq %0, %1" + : "+r" (v), "+m" (storage) + ); + fence_after(order); + return v; + } + + static BOOST_FORCEINLINE bool compare_exchange_strong( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + { + storage_type previous = expected; + fence_before(success_order); + bool success; + __asm__ __volatile__ + ( + "lock; cmpxchgq %3, %1\n\t" + "sete %2" + : "+a" (previous), "+m" (storage), "=q" (success) + : "r" (desired) + : "cc" + ); + if (success) + fence_after(success_order); + else + fence_after(failure_order); + expected = previous; + return success; + } + +#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, argument, result)\ + __asm__ __volatile__\ + (\ + "xor %%" BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER ", %%" BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER "\n\t"\ + ".align 16\n\t"\ + "1: movq %[arg], %%rdx\n\t"\ + op " %%rax, %%rdx\n\t"\ + "lock; cmpxchgq %%rdx, %[storage]\n\t"\ + "jne 1b"\ + : [res] "+a" (result), [storage] "+m" (storage)\ + : [arg] "r" (argument)\ + : "cc", BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER, "memory"\ + ) + + static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT + { + storage_type res = storage; + BOOST_ATOMIC_DETAIL_CAS_LOOP("andq", v, res); + return res; + } + + static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT + { + storage_type res = storage; + BOOST_ATOMIC_DETAIL_CAS_LOOP("orq", v, res); + return res; + } + + static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT + { + storage_type res = storage; + BOOST_ATOMIC_DETAIL_CAS_LOOP("xorq", v, res); + return res; + } + +#undef BOOST_ATOMIC_DETAIL_CAS_LOOP +}; + +#endif + +#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B) + +struct gcc_dcas_x86_64 +{ + typedef storage128_t storage_type; + + static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT + { + uint64_t const* p_value = (uint64_t const*)&v; + __asm__ __volatile__ + ( + "movq 0(%[dest]), %%rax\n\t" + "movq 8(%[dest]), %%rdx\n\t" + ".align 16\n\t" + "1: lock; cmpxchg16b 0(%[dest])\n\t" + "jne 1b" + : + : "b" (p_value[0]), "c" (p_value[1]), [dest] "r" (&storage) + : "memory", "cc", "rax", "rdx" + ); + } + + static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order) BOOST_NOEXCEPT + { + storage_type value; + + // We don't care for comparison result here; the previous value will be stored into value anyway. + // Also we don't care for rbx and rcx values, they just have to be equal to rax and rdx before cmpxchg16b. + __asm__ __volatile__ + ( + "movq %%rbx, %%rax\n\t" + "movq %%rcx, %%rdx\n\t" + "lock; cmpxchg16b %[storage]" + : "=&A" (value) + : [storage] "m" (storage) + : "cc", "memory" + ); + + return value; + } + + static BOOST_FORCEINLINE bool compare_exchange_strong( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT + { + uint64_t const* p_desired = (uint64_t const*)&desired; + bool success; + __asm__ __volatile__ + ( + "lock; cmpxchg16b %[dest]\n\t" + "sete %[success]" + : "+A,A" (expected), [dest] "+m,m" (storage), [success] "=q,m" (success) + : "b,b" (p_desired[0]), "c,c" (p_desired[1]) + : "cc", "memory" + ); + return success; + } + + static BOOST_FORCEINLINE bool compare_exchange_weak( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + { + return compare_exchange_strong(storage, expected, desired, success_order, failure_order); + } + + static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT + { + return true; + } +}; + +template< > +struct operations< 16u > : + public cas_based_operations< gcc_dcas_x86_64 > +{ +}; + +#endif // defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B) + +BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT +{ + switch (order) + { + case memory_order_relaxed: + case memory_order_consume: + break; + case memory_order_acquire: + case memory_order_release: + case memory_order_acq_rel: + __asm__ __volatile__ ("" ::: "memory"); + break; + case memory_order_seq_cst: + __asm__ __volatile__ + ( +#if defined(__x86_64__) || defined(__SSE2__) + "mfence\n" +#else + "lock; addl $0, (%%esp)\n" +#endif + ::: "memory" + ); + break; + default:; + } +} + +BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT +{ + switch (order) + { + case memory_order_relaxed: + case memory_order_consume: + break; + case memory_order_acquire: + case memory_order_release: + case memory_order_acq_rel: + case memory_order_seq_cst: + __asm__ __volatile__ ("" ::: "memory"); + break; + default:; + } +} + +} // namespace detail +} // namespace atomics +} // namespace boost + +#undef BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER + +#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_X86_HPP_INCLUDED_ diff --git a/include/boost/atomic/fences.hpp b/include/boost/atomic/fences.hpp new file mode 100644 index 0000000..66b5ac5 --- /dev/null +++ b/include/boost/atomic/fences.hpp @@ -0,0 +1,62 @@ +/* + * Distributed under the Boost Software License, Version 1.0. + * (See accompanying file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt) + * + * Copyright (c) 2011 Helge Bahmann + * Copyright (c) 2013 Tim Blechmann + * Copyright (c) 2014 Andrey Semashev + */ +/*! + * \file atomic/fences.hpp + * + * This header contains definition of \c atomic_thread_fence and \c atomic_signal_fence functions. + */ + +#ifndef BOOST_ATOMIC_FENCES_HPP_INCLUDED_ +#define BOOST_ATOMIC_FENCES_HPP_INCLUDED_ + +#include +#include +#include + +#ifdef BOOST_HAS_PRAGMA_ONCE +#pragma once +#endif + +namespace boost { + +namespace atomics { + +#if BOOST_ATOMIC_THREAD_FENCE > 0 +BOOST_FORCEINLINE void atomic_thread_fence(memory_order order) BOOST_NOEXCEPT +{ + detail::thread_fence(order); +} +#else +BOOST_FORCEINLINE void atomic_thread_fence(memory_order) BOOST_NOEXCEPT +{ + detail::lockpool::thread_fence(); +} +#endif + +#if BOOST_ATOMIC_SIGNAL_FENCE > 0 +BOOST_FORCEINLINE void atomic_signal_fence(memory_order order) BOOST_NOEXCEPT +{ + detail::signal_fence(order); +} +#else +BOOST_FORCEINLINE void atomic_signal_fence(memory_order) BOOST_NOEXCEPT +{ + detail::lockpool::signal_fence(); +} +#endif + +} // namespace atomics + +using atomics::atomic_thread_fence; +using atomics::atomic_signal_fence; + +} // namespace boost + +#endif // BOOST_ATOMIC_FENCES_HPP_INCLUDED_ diff --git a/src/lockpool.cpp b/src/lockpool.cpp index 9f9dbf9..a6e6b8a 100644 --- a/src/lockpool.cpp +++ b/src/lockpool.cpp @@ -90,7 +90,7 @@ static padded_lock g_lock_pool[41] #if !defined(BOOST_ATOMIC_USE_PTHREAD) // NOTE: This function must NOT be inline. Otherwise MSVC 9 will sometimes generate broken code for modulus operation which result in crashes. -BOOST_ATOMIC_DECL lockpool::scoped_lock::scoped_lock(const volatile void* addr) : +BOOST_ATOMIC_DECL lockpool::scoped_lock::scoped_lock(const volatile void* addr) BOOST_NOEXCEPT : m_lock(&g_lock_pool[reinterpret_cast< std::size_t >(addr) % (sizeof(g_lock_pool) / sizeof(*g_lock_pool))].lock) { while (padded_lock::operations::test_and_set(*static_cast< padded_lock::lock_type* >(m_lock), memory_order_acquire)) @@ -99,26 +99,46 @@ BOOST_ATOMIC_DECL lockpool::scoped_lock::scoped_lock(const volatile void* addr) } } -BOOST_ATOMIC_DECL lockpool::scoped_lock::~scoped_lock() +BOOST_ATOMIC_DECL lockpool::scoped_lock::~scoped_lock() BOOST_NOEXCEPT { padded_lock::operations::clear(*static_cast< padded_lock::lock_type* >(m_lock), memory_order_release); } +BOOST_ATOMIC_DECL void signal_fence() BOOST_NOEXCEPT; + #else // !defined(BOOST_ATOMIC_USE_PTHREAD) -BOOST_ATOMIC_DECL lockpool::scoped_lock::scoped_lock(const volatile void* addr) : +BOOST_ATOMIC_DECL lockpool::scoped_lock::scoped_lock(const volatile void* addr) BOOST_NOEXCEPT : m_lock(&g_lock_pool[reinterpret_cast< std::size_t >(addr) % (sizeof(g_lock_pool) / sizeof(*g_lock_pool))].lock) { BOOST_VERIFY(pthread_mutex_lock(static_cast< pthread_mutex_t* >(m_lock)) == 0); } -BOOST_ATOMIC_DECL lockpool::scoped_lock::~scoped_lock() +BOOST_ATOMIC_DECL lockpool::scoped_lock::~scoped_lock() BOOST_NOEXCEPT { BOOST_VERIFY(pthread_mutex_unlock(static_cast< pthread_mutex_t* >(m_lock)) == 0); } #endif // !defined(BOOST_ATOMIC_USE_PTHREAD) +BOOST_ATOMIC_DECL void lockpool::thread_fence() BOOST_NOEXCEPT +{ +#if BOOST_ATOMIC_THREAD_FENCE > 0 + atomics::detail::thread_fence(memory_order_seq_cst); +#else + // Emulate full fence by locking/unlocking a mutex + scoped_lock lock(0); +#endif +} + +BOOST_ATOMIC_DECL void lockpool::signal_fence() BOOST_NOEXCEPT +{ + // This function is intentionally non-inline, even if empty. This forces the compiler to treat its call as a compiler barrier. +#if BOOST_ATOMIC_SIGNAL_FENCE > 0 + atomics::detail::signal_fence(memory_order_seq_cst); +#endif +} + } // namespace detail } // namespace atomics } // namespace boost diff --git a/test/lockfree.cpp b/test/lockfree.cpp index 523f784..1cd2f5f 100644 --- a/test/lockfree.cpp +++ b/test/lockfree.cpp @@ -60,7 +60,7 @@ verify_lock_free(const char * type_name, int lock_free_macro_val, int lock_free_ #define EXPECT_INT_LOCK_FREE 2 #define EXPECT_LONG_LOCK_FREE 2 #define EXPECT_LLONG_LOCK_FREE 2 -#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B) && defined(BOOST_HAS_INT128) +#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B) && (defined(BOOST_HAS_INT128) || !defined(BOOST_NO_ALIGNMENT)) #define EXPECT_INT128_LOCK_FREE 2 #else #define EXPECT_INT128_LOCK_FREE 0 From 931a5dc2aa104fc36ef36e5c3efc65e0e04f13a3 Mon Sep 17 00:00:00 2001 From: Andrey Semashev Date: Sun, 4 May 2014 05:22:59 +0400 Subject: [PATCH 08/23] Improved assertion messages. --- .../boost/atomic/detail/atomic_template.hpp | 30 +++++++++---------- 1 file changed, 14 insertions(+), 16 deletions(-) diff --git a/include/boost/atomic/detail/atomic_template.hpp b/include/boost/atomic/detail/atomic_template.hpp index 6153f26..2ebe43d 100644 --- a/include/boost/atomic/detail/atomic_template.hpp +++ b/include/boost/atomic/detail/atomic_template.hpp @@ -37,6 +37,12 @@ BOOST_FORCEINLINE BOOST_CONSTEXPR memory_order deduce_failure_order(memory_order return order == memory_order_acq_rel ? memory_order_acquire : (order == memory_order_release ? memory_order_relaxed : order); } +BOOST_FORCEINLINE BOOST_CONSTEXPR bool cas_failure_order_must_not_be_stronger_than_success_order(memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT +{ + return ((failure_order | success_order) & ~memory_order_consume) == (success_order & ~memory_order_consume) + && (failure_order & memory_order_consume) <= (success_order & memory_order_consume); +} + template< typename T, bool IsInt = boost::is_integral< T >::value > struct classify { @@ -111,8 +117,7 @@ public: { BOOST_ASSERT(failure_order != memory_order_release); BOOST_ASSERT(failure_order != memory_order_acq_rel); - // failure_order must not be stronger than success_order - BOOST_ASSERT(((failure_order | success_order) & ~memory_order_consume) == (success_order & ~memory_order_consume) && (failure_order & memory_order_consume) <= (success_order & memory_order_consume)); + BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order)); storage_type old_value = static_cast< storage_type >(expected); const bool res = operations::compare_exchange_strong(m_storage, old_value, static_cast< storage_type >(desired), success_order, failure_order); @@ -129,8 +134,7 @@ public: { BOOST_ASSERT(failure_order != memory_order_release); BOOST_ASSERT(failure_order != memory_order_acq_rel); - // failure_order must not be stronger than success_order - BOOST_ASSERT(((failure_order | success_order) & ~memory_order_consume) == (success_order & ~memory_order_consume) && (failure_order & memory_order_consume) <= (success_order & memory_order_consume)); + BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order)); storage_type old_value = static_cast< storage_type >(expected); const bool res = operations::compare_exchange_weak(m_storage, old_value, static_cast< storage_type >(desired), success_order, failure_order); @@ -263,8 +267,7 @@ public: { BOOST_ASSERT(failure_order != memory_order_release); BOOST_ASSERT(failure_order != memory_order_acq_rel); - // failure_order must not be stronger than success_order - BOOST_ASSERT(((failure_order | success_order) & ~memory_order_consume) == (success_order & ~memory_order_consume) && (failure_order & memory_order_consume) <= (success_order & memory_order_consume)); + BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order)); storage_type old_value = atomics::detail::union_cast< storage_type >(expected); const bool res = operations::compare_exchange_strong(m_storage, old_value, atomics::detail::union_cast< storage_type >(desired), success_order, failure_order); @@ -281,8 +284,7 @@ public: { BOOST_ASSERT(failure_order != memory_order_release); BOOST_ASSERT(failure_order != memory_order_acq_rel); - // failure_order must not be stronger than success_order - BOOST_ASSERT(((failure_order | success_order) & ~memory_order_consume) == (success_order & ~memory_order_consume) && (failure_order & memory_order_consume) <= (success_order & memory_order_consume)); + BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order)); storage_type old_value = atomics::detail::union_cast< storage_type >(expected); const bool res = operations::compare_exchange_weak(m_storage, old_value, atomics::detail::union_cast< storage_type >(desired), success_order, failure_order); @@ -366,8 +368,7 @@ public: { BOOST_ASSERT(failure_order != memory_order_release); BOOST_ASSERT(failure_order != memory_order_acq_rel); - // failure_order must not be stronger than success_order - BOOST_ASSERT(((failure_order | success_order) & ~memory_order_consume) == (success_order & ~memory_order_consume) && (failure_order & memory_order_consume) <= (success_order & memory_order_consume)); + BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order)); storage_type old_value = atomics::detail::union_cast< storage_type >(expected); const bool res = operations::compare_exchange_strong(m_storage, old_value, atomics::detail::union_cast< storage_type >(desired), success_order, failure_order); @@ -384,8 +385,7 @@ public: { BOOST_ASSERT(failure_order != memory_order_release); BOOST_ASSERT(failure_order != memory_order_acq_rel); - // failure_order must not be stronger than success_order - BOOST_ASSERT(((failure_order | success_order) & ~memory_order_consume) == (success_order & ~memory_order_consume) && (failure_order & memory_order_consume) <= (success_order & memory_order_consume)); + BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order)); storage_type old_value = atomics::detail::union_cast< storage_type >(expected); const bool res = operations::compare_exchange_weak(m_storage, old_value, atomics::detail::union_cast< storage_type >(desired), success_order, failure_order); @@ -499,8 +499,7 @@ public: { BOOST_ASSERT(failure_order != memory_order_release); BOOST_ASSERT(failure_order != memory_order_acq_rel); - // failure_order must not be stronger than success_order - BOOST_ASSERT(((failure_order | success_order) & ~memory_order_consume) == (success_order & ~memory_order_consume) && (failure_order & memory_order_consume) <= (success_order & memory_order_consume)); + BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order)); storage_type old_value = atomics::detail::union_cast< storage_type >(expected); const bool res = operations::compare_exchange_strong(m_storage, old_value, atomics::detail::union_cast< storage_type >(desired), success_order, failure_order); @@ -517,8 +516,7 @@ public: { BOOST_ASSERT(failure_order != memory_order_release); BOOST_ASSERT(failure_order != memory_order_acq_rel); - // failure_order must not be stronger than success_order - BOOST_ASSERT(((failure_order | success_order) & ~memory_order_consume) == (success_order & ~memory_order_consume) && (failure_order & memory_order_consume) <= (success_order & memory_order_consume)); + BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order)); storage_type old_value = atomics::detail::union_cast< storage_type >(expected); const bool res = operations::compare_exchange_weak(m_storage, old_value, atomics::detail::union_cast< storage_type >(desired), success_order, failure_order); From 20dd8bd827fd21ab5111faf680d08b1c639a9351 Mon Sep 17 00:00:00 2001 From: Andrey Semashev Date: Sun, 4 May 2014 23:41:16 +0400 Subject: [PATCH 09/23] Implemented MSVC and Windows backends. --- .../boost/atomic/detail/atomic_template.hpp | 136 ++- include/boost/atomic/detail/caps_msvc_arm.hpp | 1 + include/boost/atomic/detail/caps_msvc_x86.hpp | 2 +- include/boost/atomic/detail/interlocked.hpp | 215 ++++- include/boost/atomic/detail/ops_gcc_sync.hpp | 2 +- include/boost/atomic/detail/ops_gcc_x86.hpp | 2 +- include/boost/atomic/detail/ops_msvc_arm.hpp | 824 ++++++++++++++++ .../boost/atomic/detail/ops_msvc_common.hpp | 38 + include/boost/atomic/detail/ops_msvc_x86.hpp | 902 ++++++++++++++++++ include/boost/atomic/detail/ops_windows.hpp | 239 +++++ include/boost/atomic/detail/platform.hpp | 2 +- 11 files changed, 2337 insertions(+), 26 deletions(-) create mode 100644 include/boost/atomic/detail/ops_msvc_arm.hpp create mode 100644 include/boost/atomic/detail/ops_msvc_common.hpp create mode 100644 include/boost/atomic/detail/ops_msvc_x86.hpp create mode 100644 include/boost/atomic/detail/ops_windows.hpp diff --git a/include/boost/atomic/detail/atomic_template.hpp b/include/boost/atomic/detail/atomic_template.hpp index 2ebe43d..2073ccc 100644 --- a/include/boost/atomic/detail/atomic_template.hpp +++ b/include/boost/atomic/detail/atomic_template.hpp @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -28,6 +29,14 @@ #pragma once #endif +#if defined(BOOST_MSVC) +#pragma warning(push) +// 'boost::atomics::atomic' : multiple assignment operators specified +#pragma warning(disable: 4522) +// In case of atomic: 'unsigned char' : forcing value to bool 'true' or 'false' (performance warning) +#pragma warning(disable: 4800) +#endif + namespace boost { namespace atomics { namespace detail { @@ -63,8 +72,8 @@ template< typename T > class base_atomic< T, int > { private: - typedef base_atomic this_type; typedef T value_type; + typedef typename make_unsigned< value_type >::type unsigned_value_type; typedef T difference_type; typedef atomics::detail::operations< storage_size_of< value_type >::value > operations; @@ -79,7 +88,7 @@ protected: public: BOOST_DEFAULTED_FUNCTION(base_atomic(), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : m_storage(v) {} + BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : m_storage(static_cast< unsigned_value_type >(v)) {} void store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT { @@ -87,7 +96,7 @@ public: BOOST_ASSERT(order != memory_order_acquire); BOOST_ASSERT(order != memory_order_acq_rel); - operations::store(m_storage, static_cast< storage_type >(v), order); + operations::store(m_storage, static_cast< storage_type >(static_cast< unsigned_value_type >(v)), order); } value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT @@ -100,17 +109,17 @@ public: value_type fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT { - return static_cast< value_type >(operations::fetch_add(m_storage, static_cast< storage_type >(v), order)); + return static_cast< value_type >(operations::fetch_add(m_storage, static_cast< storage_type >(static_cast< unsigned_value_type >(v)), order)); } value_type fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT { - return static_cast< value_type >(operations::fetch_sub(m_storage, static_cast< storage_type >(v), order)); + return static_cast< value_type >(operations::fetch_sub(m_storage, static_cast< storage_type >(static_cast< unsigned_value_type >(v)), order)); } value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT { - return static_cast< value_type >(operations::exchange(m_storage, static_cast< storage_type >(v), order)); + return static_cast< value_type >(operations::exchange(m_storage, static_cast< storage_type >(static_cast< unsigned_value_type >(v)), order)); } bool compare_exchange_strong(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT @@ -119,8 +128,8 @@ public: BOOST_ASSERT(failure_order != memory_order_acq_rel); BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order)); - storage_type old_value = static_cast< storage_type >(expected); - const bool res = operations::compare_exchange_strong(m_storage, old_value, static_cast< storage_type >(desired), success_order, failure_order); + storage_type old_value = static_cast< storage_type >(static_cast< unsigned_value_type >(expected)); + const bool res = operations::compare_exchange_strong(m_storage, old_value, static_cast< storage_type >(static_cast< unsigned_value_type >(desired)), success_order, failure_order); expected = static_cast< value_type >(old_value); return res; } @@ -136,8 +145,8 @@ public: BOOST_ASSERT(failure_order != memory_order_acq_rel); BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order)); - storage_type old_value = static_cast< storage_type >(expected); - const bool res = operations::compare_exchange_weak(m_storage, old_value, static_cast< storage_type >(desired), success_order, failure_order); + storage_type old_value = static_cast< storage_type >(static_cast< unsigned_value_type >(expected)); + const bool res = operations::compare_exchange_weak(m_storage, old_value, static_cast< storage_type >(static_cast< unsigned_value_type >(desired)), success_order, failure_order); expected = static_cast< value_type >(old_value); return res; } @@ -149,17 +158,17 @@ public: value_type fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT { - return static_cast< value_type >(operations::fetch_and(m_storage, static_cast< storage_type >(v), order)); + return static_cast< value_type >(operations::fetch_and(m_storage, static_cast< storage_type >(static_cast< unsigned_value_type >(v)), order)); } value_type fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT { - return static_cast< value_type >(operations::fetch_or(m_storage, static_cast< storage_type >(v), order)); + return static_cast< value_type >(operations::fetch_or(m_storage, static_cast< storage_type >(static_cast< unsigned_value_type >(v)), order)); } value_type fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT { - return static_cast< value_type >(operations::fetch_xor(m_storage, static_cast< storage_type >(v), order)); + return static_cast< value_type >(operations::fetch_xor(m_storage, static_cast< storage_type >(static_cast< unsigned_value_type >(v)), order)); } bool is_lock_free() const volatile BOOST_NOEXCEPT @@ -216,13 +225,98 @@ public: BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) }; +//! Implementation for bool +template< > +class base_atomic< bool, int > +{ +private: + typedef bool value_type; + typedef atomics::detail::operations< storage_size_of< value_type >::value > operations; + +protected: + typedef value_type value_arg_type; + +public: + typedef operations::storage_type storage_type; + +protected: + storage_type m_storage; + +public: + BOOST_DEFAULTED_FUNCTION(base_atomic(), {}) + BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : m_storage(v) {} + + void store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + { + BOOST_ASSERT(order != memory_order_consume); + BOOST_ASSERT(order != memory_order_acquire); + BOOST_ASSERT(order != memory_order_acq_rel); + + operations::store(m_storage, static_cast< storage_type >(v), order); + } + + value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT + { + BOOST_ASSERT(order != memory_order_release); + BOOST_ASSERT(order != memory_order_acq_rel); + + return !!operations::load(m_storage, order); + } + + value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + { + return !!operations::exchange(m_storage, static_cast< storage_type >(v), order); + } + + bool compare_exchange_strong(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT + { + BOOST_ASSERT(failure_order != memory_order_release); + BOOST_ASSERT(failure_order != memory_order_acq_rel); + BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order)); + + storage_type old_value = static_cast< storage_type >(expected); + const bool res = operations::compare_exchange_strong(m_storage, old_value, static_cast< storage_type >(desired), success_order, failure_order); + expected = !!old_value; + return res; + } + + bool compare_exchange_strong(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + { + return compare_exchange_strong(expected, desired, order, atomics::detail::deduce_failure_order(order)); + } + + bool compare_exchange_weak(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT + { + BOOST_ASSERT(failure_order != memory_order_release); + BOOST_ASSERT(failure_order != memory_order_acq_rel); + BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order)); + + storage_type old_value = static_cast< storage_type >(expected); + const bool res = operations::compare_exchange_weak(m_storage, old_value, static_cast< storage_type >(desired), success_order, failure_order); + expected = !!old_value; + return res; + } + + bool compare_exchange_weak(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + { + return compare_exchange_weak(expected, desired, order, atomics::detail::deduce_failure_order(order)); + } + + bool is_lock_free() const volatile BOOST_NOEXCEPT + { + return operations::is_lock_free(m_storage); + } + + BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) + BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) +}; + //! Implementation for user-defined types, such as structs and enums template< typename T > class base_atomic< T, void > { private: - typedef base_atomic this_type; typedef T value_type; typedef atomics::detail::operations< storage_size_of< value_type >::value > operations; @@ -308,11 +402,10 @@ public: //! Implementation for pointers -template +template< typename T > class base_atomic< T*, void* > { private: - typedef base_atomic this_type; typedef T* value_type; typedef std::ptrdiff_t difference_type; typedef atomics::detail::operations< storage_size_of< value_type >::value > operations; @@ -327,7 +420,7 @@ protected: storage_type m_storage; public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) + BOOST_DEFAULTED_FUNCTION(base_atomic(), {}) explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : m_storage(atomics::detail::union_cast< storage_type >(v)) { } @@ -443,7 +536,6 @@ template< > class base_atomic< void*, void* > { private: - typedef base_atomic this_type; typedef void* value_type; typedef std::ptrdiff_t difference_type; typedef atomics::detail::operations< storage_size_of< value_type >::value > operations; @@ -452,13 +544,13 @@ protected: typedef value_type value_arg_type; public: - typedef typename operations::storage_type storage_type; + typedef operations::storage_type storage_type; protected: storage_type m_storage; public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) + BOOST_DEFAULTED_FUNCTION(base_atomic(), {}) explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : m_storage(atomics::detail::union_cast< storage_type >(v)) { } @@ -673,4 +765,8 @@ typedef atomic< uintptr_t > atomic_uintptr_t; } // namespace atomics } // namespace boost +#if defined(BOOST_MSVC) +#pragma warning(pop) +#endif + #endif // BOOST_ATOMIC_DETAIL_ATOMIC_TEMPLATE_HPP_INCLUDED_ diff --git a/include/boost/atomic/detail/caps_msvc_arm.hpp b/include/boost/atomic/detail/caps_msvc_arm.hpp index 2c31521..6b3c61f 100644 --- a/include/boost/atomic/detail/caps_msvc_arm.hpp +++ b/include/boost/atomic/detail/caps_msvc_arm.hpp @@ -25,6 +25,7 @@ #define BOOST_ATOMIC_INT8_LOCK_FREE 2 #define BOOST_ATOMIC_INT16_LOCK_FREE 2 #define BOOST_ATOMIC_INT32_LOCK_FREE 2 +#define BOOST_ATOMIC_INT64_LOCK_FREE 2 #define BOOST_ATOMIC_POINTER_LOCK_FREE 2 #define BOOST_ATOMIC_THREAD_FENCE 2 diff --git a/include/boost/atomic/detail/caps_msvc_x86.hpp b/include/boost/atomic/detail/caps_msvc_x86.hpp index 1b82b49..5661a5b 100644 --- a/include/boost/atomic/detail/caps_msvc_x86.hpp +++ b/include/boost/atomic/detail/caps_msvc_x86.hpp @@ -26,7 +26,7 @@ #define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B 1 #endif -#if defined(_M_AMD64) && !defined(BOOST_ATOMIC_NO_CMPXCHG16B) +#if _MSC_VER >= 1500 && defined(_M_AMD64) && !defined(BOOST_ATOMIC_NO_CMPXCHG16B) #define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B 1 #endif diff --git a/include/boost/atomic/detail/interlocked.hpp b/include/boost/atomic/detail/interlocked.hpp index ae8518d..99c3d31 100644 --- a/include/boost/atomic/detail/interlocked.hpp +++ b/include/boost/atomic/detail/interlocked.hpp @@ -48,6 +48,11 @@ #define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(dest, exchange, compare) _InterlockedCompareExchange64((__int64*)(dest), (__int64)(exchange), (__int64)(compare)) #endif +#if _MSC_VER >= 1500 && defined(_M_AMD64) +#pragma intrinsic(_InterlockedCompareExchange128) +#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE128(dest, exchange, compare) _InterlockedCompareExchange128((__int64*)(dest), ((const __int64*)(&exchange))[1], ((const __int64*)(&exchange))[0], (__int64*)(compare)) +#endif + #if _MSC_VER >= 1600 // MSVC 2010 and later provide intrinsics for 8 and 16 bit integers. @@ -105,13 +110,219 @@ #define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) _InterlockedExchangePointer((void**)(dest), (void*)(newval)) #define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64((long*)(dest), byte_offset)) -#else // defined(_M_AMD64) || defined(_M_IA64) +#elif defined(_M_IX86) #define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) ((void*)_InterlockedCompareExchange((long*)(dest), (long)(exchange), (long)(compare))) #define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) ((void*)_InterlockedExchange((long*)(dest), (long)(newval))) #define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD((long*)(dest), byte_offset)) -#endif // defined(_M_AMD64) || defined(_M_IA64) +#endif + +#if _MSC_VER >= 1700 && defined(_M_ARM) + +#pragma intrinsic(_InterlockedExchangeAdd64) +#pragma intrinsic(_InterlockedExchange64) +#pragma intrinsic(_InterlockedAnd64) +#pragma intrinsic(_InterlockedOr64) +#pragma intrinsic(_InterlockedXor64) + +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(dest, addend) _InterlockedExchangeAdd64((__int64*)(dest), (__int64)(addend)) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(dest, newval) _InterlockedExchange64((__int64*)(dest), (__int64)(newval)) +#define BOOST_ATOMIC_INTERLOCKED_AND64(dest, arg) _InterlockedAnd64((__int64*)(dest), (__int64)(arg)) +#define BOOST_ATOMIC_INTERLOCKED_OR64(dest, arg) _InterlockedOr64((__int64*)(dest), (__int64)(arg)) +#define BOOST_ATOMIC_INTERLOCKED_XOR64(dest, arg) _InterlockedXor64((__int64*)(dest), (__int64)(arg)) + +#pragma intrinsic(_InterlockedCompareExchange8_nf) +#pragma intrinsic(_InterlockedCompareExchange8_acq) +#pragma intrinsic(_InterlockedCompareExchange8_rel) +#pragma intrinsic(_InterlockedCompareExchange16_nf) +#pragma intrinsic(_InterlockedCompareExchange16_acq) +#pragma intrinsic(_InterlockedCompareExchange16_rel) +#pragma intrinsic(_InterlockedCompareExchange_nf) +#pragma intrinsic(_InterlockedCompareExchange_acq) +#pragma intrinsic(_InterlockedCompareExchange_rel) +#pragma intrinsic(_InterlockedCompareExchange64) +#pragma intrinsic(_InterlockedCompareExchange64_nf) +#pragma intrinsic(_InterlockedCompareExchange64_acq) +#pragma intrinsic(_InterlockedCompareExchange64_rel) +#pragma intrinsic(_InterlockedCompareExchangePointer) +#pragma intrinsic(_InterlockedCompareExchangePointer_nf) +#pragma intrinsic(_InterlockedCompareExchangePointer_acq) +#pragma intrinsic(_InterlockedCompareExchangePointer_rel) + +#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8_RELAXED(dest, exchange, compare) _InterlockedCompareExchange8_nf((char*)(dest), (char)(exchange), (char)(compare)) +#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8_ACQUIRE(dest, exchange, compare) _InterlockedCompareExchange8_acq((char*)(dest), (char)(exchange), (char)(compare)) +#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8_RELEASE(dest, exchange, compare) _InterlockedCompareExchange8_rel((char*)(dest), (char)(exchange), (char)(compare)) +#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16_RELAXED(dest, exchange, compare) _InterlockedCompareExchange16_nf((short*)(dest), (short)(exchange), (short)(compare)) +#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16_ACQUIRE(dest, exchange, compare) _InterlockedCompareExchange16_acq((short*)(dest), (short)(exchange), (short)(compare)) +#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16_RELEASE(dest, exchange, compare) _InterlockedCompareExchange16_rel((short*)(dest), (short)(exchange), (short)(compare)) +#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_RELAXED(dest, exchange, compare) _InterlockedCompareExchange_nf((long*)(dest), (long)(exchange), (long)(compare)) +#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_ACQUIRE(dest, exchange, compare) _InterlockedCompareExchange_acq((long*)(dest), (long)(exchange), (long)(compare)) +#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_RELEASE(dest, exchange, compare) _InterlockedCompareExchange_rel((long*)(dest), (long)(exchange), (long)(compare)) +#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(dest, exchange, compare) _InterlockedCompareExchange64((__int64*)(dest), (__int64)(exchange), (__int64)(compare)) +#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64_RELAXED(dest, exchange, compare) _InterlockedCompareExchange64_nf((__int64*)(dest), (__int64)(exchange), (__int64)(compare)) +#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64_ACQUIRE(dest, exchange, compare) _InterlockedCompareExchange64_acq((__int64*)(dest), (__int64)(exchange), (__int64)(compare)) +#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64_RELEASE(dest, exchange, compare) _InterlockedCompareExchange64_rel((__int64*)(dest), (__int64)(exchange), (__int64)(compare)) +#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) _InterlockedCompareExchangePointer((void**)(dest), (void*)(exchange), (void*)(compare)) +#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER_RELAXED(dest, exchange, compare) _InterlockedCompareExchangePointer_nf((void**)(dest), (void*)(exchange), (void*)(compare)) +#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER_ACQUIRE(dest, exchange, compare) _InterlockedCompareExchangePointer_acq((void**)(dest), (void*)(exchange), (void*)(compare)) +#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER_RELEASE(dest, exchange, compare) _InterlockedCompareExchangePointer_rel((void**)(dest), (void*)(exchange), (void*)(compare)) + +#pragma intrinsic(_InterlockedExchangeAdd8_nf) +#pragma intrinsic(_InterlockedExchangeAdd8_acq) +#pragma intrinsic(_InterlockedExchangeAdd8_rel) +#pragma intrinsic(_InterlockedExchangeAdd16_nf) +#pragma intrinsic(_InterlockedExchangeAdd16_acq) +#pragma intrinsic(_InterlockedExchangeAdd16_rel) +#pragma intrinsic(_InterlockedExchangeAdd_nf) +#pragma intrinsic(_InterlockedExchangeAdd_acq) +#pragma intrinsic(_InterlockedExchangeAdd_rel) +#pragma intrinsic(_InterlockedExchangeAdd64_nf) +#pragma intrinsic(_InterlockedExchangeAdd64_acq) +#pragma intrinsic(_InterlockedExchangeAdd64_rel) + +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8_RELAXED(dest, addend) _InterlockedExchangeAdd8_nf((char*)(dest), (char)(addend)) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8_ACQUIRE(dest, addend) _InterlockedExchangeAdd8_acq((char*)(dest), (char)(addend)) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8_RELEASE(dest, addend) _InterlockedExchangeAdd8_rel((char*)(dest), (char)(addend)) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16_RELAXED(dest, addend) _InterlockedExchangeAdd16_nf((short*)(dest), (short)(addend)) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16_ACQUIRE(dest, addend) _InterlockedExchangeAdd16_acq((short*)(dest), (short)(addend)) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16_RELEASE(dest, addend) _InterlockedExchangeAdd16_rel((short*)(dest), (short)(addend)) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_RELAXED(dest, addend) _InterlockedExchangeAdd_nf((long*)(dest), (long)(addend)) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_ACQUIRE(dest, addend) _InterlockedExchangeAdd_acq((long*)(dest), (long)(addend)) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_RELEASE(dest, addend) _InterlockedExchangeAdd_rel((long*)(dest), (long)(addend)) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64_RELAXED(dest, addend) _InterlockedExchangeAdd64_nf((__int64*)(dest), (__int64)(addend)) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64_ACQUIRE(dest, addend) _InterlockedExchangeAdd64_acq((__int64*)(dest), (__int64)(addend)) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64_RELEASE(dest, addend) _InterlockedExchangeAdd64_rel((__int64*)(dest), (__int64)(addend)) + +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD((long*)(dest), byte_offset)) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER_RELAXED(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_RELAXED((long*)(dest), byte_offset)) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER_ACQUIRE(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_ACQUIRE((long*)(dest), byte_offset)) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER_RELEASE(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_RELEASE((long*)(dest), byte_offset)) + +#pragma intrinsic(_InterlockedExchange8_nf) +#pragma intrinsic(_InterlockedExchange8_acq) +#pragma intrinsic(_InterlockedExchange16_nf) +#pragma intrinsic(_InterlockedExchange16_acq) +#pragma intrinsic(_InterlockedExchange_nf) +#pragma intrinsic(_InterlockedExchange_acq) +#pragma intrinsic(_InterlockedExchange64_nf) +#pragma intrinsic(_InterlockedExchange64_acq) +#pragma intrinsic(_InterlockedExchangePointer) +#pragma intrinsic(_InterlockedExchangePointer_nf) +#pragma intrinsic(_InterlockedExchangePointer_acq) +#if _MSC_VER >= 1800 +#pragma intrinsic(_InterlockedExchange8_rel) +#pragma intrinsic(_InterlockedExchange16_rel) +#pragma intrinsic(_InterlockedExchange_rel) +#pragma intrinsic(_InterlockedExchange64_rel) +#pragma intrinsic(_InterlockedExchangePointer_rel) +#endif + +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE8_RELAXED(dest, newval) _InterlockedExchange8_nf((char*)(dest), (char)(newval)) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE8_ACQUIRE(dest, newval) _InterlockedExchange8_acq((char*)(dest), (char)(newval)) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE16_RELAXED(dest, newval) _InterlockedExchange16_nf((short*)(dest), (short)(newval)) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE16_ACQUIRE(dest, newval) _InterlockedExchange16_acq((short*)(dest), (short)(newval)) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_RELAXED(dest, newval) _InterlockedExchange_nf((long*)(dest), (long)(newval)) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ACQUIRE(dest, newval) _InterlockedExchange_acq((long*)(dest), (long)(newval)) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE64_RELAXED(dest, newval) _InterlockedExchange64_nf((__int64*)(dest), (__int64)(newval)) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE64_ACQUIRE(dest, newval) _InterlockedExchange64_acq((__int64*)(dest), (__int64)(newval)) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) _InterlockedExchangePointer((void**)(dest), (void*)(newval)) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER_RELAXED(dest, newval) _InterlockedExchangePointer_nf((void**)(dest), (void*)(newval)) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER_ACQUIRE(dest, newval) _InterlockedExchangePointer_acq((void**)(dest), (void*)(newval)) + +#if _MSC_VER >= 1800 +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE8_RELEASE(dest, newval) _InterlockedExchange8_rel((char*)(dest), (char)(newval)) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE16_RELEASE(dest, newval) _InterlockedExchange16_rel((short*)(dest), (short)(newval)) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_RELEASE(dest, newval) _InterlockedExchange_rel((long*)(dest), (long)(newval)) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE64_RELEASE(dest, newval) _InterlockedExchange64_rel((__int64*)(dest), (__int64)(newval)) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER_RELEASE(dest, newval) _InterlockedExchangePointer_rel((void**)(dest), (void*)(newval)) +#else +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE8_RELEASE(dest, newval) BOOST_ATOMIC_INTERLOCKED_EXCHANGE8(dest, newval) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE16_RELEASE(dest, newval) BOOST_ATOMIC_INTERLOCKED_EXCHANGE16(dest, newval) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_RELEASE(dest, newval) BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE64_RELEASE(dest, newval) BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(dest, newval) +#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER_RELEASE(dest, newval) BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) +#endif + +#pragma intrinsic(_InterlockedAnd8_nf) +#pragma intrinsic(_InterlockedAnd8_acq) +#pragma intrinsic(_InterlockedAnd8_rel) +#pragma intrinsic(_InterlockedAnd16_nf) +#pragma intrinsic(_InterlockedAnd16_acq) +#pragma intrinsic(_InterlockedAnd16_rel) +#pragma intrinsic(_InterlockedAnd_nf) +#pragma intrinsic(_InterlockedAnd_acq) +#pragma intrinsic(_InterlockedAnd_rel) +#pragma intrinsic(_InterlockedAnd64_nf) +#pragma intrinsic(_InterlockedAnd64_acq) +#pragma intrinsic(_InterlockedAnd64_rel) + +#define BOOST_ATOMIC_INTERLOCKED_AND8_RELAXED(dest, arg) _InterlockedAnd8_nf((char*)(dest), (char)(arg)) +#define BOOST_ATOMIC_INTERLOCKED_AND8_ACQUIRE(dest, arg) _InterlockedAnd8_acq((char*)(dest), (char)(arg)) +#define BOOST_ATOMIC_INTERLOCKED_AND8_RELEASE(dest, arg) _InterlockedAnd8_rel((char*)(dest), (char)(arg)) +#define BOOST_ATOMIC_INTERLOCKED_AND16_RELAXED(dest, arg) _InterlockedAnd16_nf((short*)(dest), (short)(arg)) +#define BOOST_ATOMIC_INTERLOCKED_AND16_ACQUIRE(dest, arg) _InterlockedAnd16_acq((short*)(dest), (short)(arg)) +#define BOOST_ATOMIC_INTERLOCKED_AND16_RELEASE(dest, arg) _InterlockedAnd16_rel((short*)(dest), (short)(arg)) +#define BOOST_ATOMIC_INTERLOCKED_AND_RELAXED(dest, arg) _InterlockedAnd_nf((long*)(dest), (long)(arg)) +#define BOOST_ATOMIC_INTERLOCKED_AND_ACQUIRE(dest, arg) _InterlockedAnd_acq((long*)(dest), (long)(arg)) +#define BOOST_ATOMIC_INTERLOCKED_AND_RELEASE(dest, arg) _InterlockedAnd_rel((long*)(dest), (long)(arg)) +#define BOOST_ATOMIC_INTERLOCKED_AND64_RELAXED(dest, arg) _InterlockedAnd64_nf((__int64*)(dest), (__int64)(arg)) +#define BOOST_ATOMIC_INTERLOCKED_AND64_ACQUIRE(dest, arg) _InterlockedAnd64_acq((__int64*)(dest), (__int64)(arg)) +#define BOOST_ATOMIC_INTERLOCKED_AND64_RELEASE(dest, arg) _InterlockedAnd64_rel((__int64*)(dest), (__int64)(arg)) + +#pragma intrinsic(_InterlockedOr8_nf) +#pragma intrinsic(_InterlockedOr8_acq) +#pragma intrinsic(_InterlockedOr8_rel) +#pragma intrinsic(_InterlockedOr16_nf) +#pragma intrinsic(_InterlockedOr16_acq) +#pragma intrinsic(_InterlockedOr16_rel) +#pragma intrinsic(_InterlockedOr_nf) +#pragma intrinsic(_InterlockedOr_acq) +#pragma intrinsic(_InterlockedOr_rel) +#pragma intrinsic(_InterlockedOr64_nf) +#pragma intrinsic(_InterlockedOr64_acq) +#pragma intrinsic(_InterlockedOr64_rel) + +#define BOOST_ATOMIC_INTERLOCKED_OR8_RELAXED(dest, arg) _InterlockedOr8_nf((char*)(dest), (char)(arg)) +#define BOOST_ATOMIC_INTERLOCKED_OR8_ACQUIRE(dest, arg) _InterlockedOr8_acq((char*)(dest), (char)(arg)) +#define BOOST_ATOMIC_INTERLOCKED_OR8_RELEASE(dest, arg) _InterlockedOr8_rel((char*)(dest), (char)(arg)) +#define BOOST_ATOMIC_INTERLOCKED_OR16_RELAXED(dest, arg) _InterlockedOr16_nf((short*)(dest), (short)(arg)) +#define BOOST_ATOMIC_INTERLOCKED_OR16_ACQUIRE(dest, arg) _InterlockedOr16_acq((short*)(dest), (short)(arg)) +#define BOOST_ATOMIC_INTERLOCKED_OR16_RELEASE(dest, arg) _InterlockedOr16_rel((short*)(dest), (short)(arg)) +#define BOOST_ATOMIC_INTERLOCKED_OR_RELAXED(dest, arg) _InterlockedOr_nf((long*)(dest), (long)(arg)) +#define BOOST_ATOMIC_INTERLOCKED_OR_ACQUIRE(dest, arg) _InterlockedOr_acq((long*)(dest), (long)(arg)) +#define BOOST_ATOMIC_INTERLOCKED_OR_RELEASE(dest, arg) _InterlockedOr_rel((long*)(dest), (long)(arg)) +#define BOOST_ATOMIC_INTERLOCKED_OR64_RELAXED(dest, arg) _InterlockedOr64_nf((__int64*)(dest), (__int64)(arg)) +#define BOOST_ATOMIC_INTERLOCKED_OR64_ACQUIRE(dest, arg) _InterlockedOr64_acq((__int64*)(dest), (__int64)(arg)) +#define BOOST_ATOMIC_INTERLOCKED_OR64_RELEASE(dest, arg) _InterlockedOr64_rel((__int64*)(dest), (__int64)(arg)) + +#pragma intrinsic(_InterlockedXor8_nf) +#pragma intrinsic(_InterlockedXor8_acq) +#pragma intrinsic(_InterlockedXor8_rel) +#pragma intrinsic(_InterlockedXor16_nf) +#pragma intrinsic(_InterlockedXor16_acq) +#pragma intrinsic(_InterlockedXor16_rel) +#pragma intrinsic(_InterlockedXor_nf) +#pragma intrinsic(_InterlockedXor_acq) +#pragma intrinsic(_InterlockedXor_rel) +#pragma intrinsic(_InterlockedXor64_nf) +#pragma intrinsic(_InterlockedXor64_acq) +#pragma intrinsic(_InterlockedXor64_rel) + +#define BOOST_ATOMIC_INTERLOCKED_XOR8_RELAXED(dest, arg) _InterlockedXor8_nf((char*)(dest), (char)(arg)) +#define BOOST_ATOMIC_INTERLOCKED_XOR8_ACQUIRE(dest, arg) _InterlockedXor8_acq((char*)(dest), (char)(arg)) +#define BOOST_ATOMIC_INTERLOCKED_XOR8_RELEASE(dest, arg) _InterlockedXor8_rel((char*)(dest), (char)(arg)) +#define BOOST_ATOMIC_INTERLOCKED_XOR16_RELAXED(dest, arg) _InterlockedXor16_nf((short*)(dest), (short)(arg)) +#define BOOST_ATOMIC_INTERLOCKED_XOR16_ACQUIRE(dest, arg) _InterlockedXor16_acq((short*)(dest), (short)(arg)) +#define BOOST_ATOMIC_INTERLOCKED_XOR16_RELEASE(dest, arg) _InterlockedXor16_rel((short*)(dest), (short)(arg)) +#define BOOST_ATOMIC_INTERLOCKED_XOR_RELAXED(dest, arg) _InterlockedXor_nf((long*)(dest), (long)(arg)) +#define BOOST_ATOMIC_INTERLOCKED_XOR_ACQUIRE(dest, arg) _InterlockedXor_acq((long*)(dest), (long)(arg)) +#define BOOST_ATOMIC_INTERLOCKED_XOR_RELEASE(dest, arg) _InterlockedXor_rel((long*)(dest), (long)(arg)) +#define BOOST_ATOMIC_INTERLOCKED_XOR64_RELAXED(dest, arg) _InterlockedXor64_nf((__int64*)(dest), (__int64)(arg)) +#define BOOST_ATOMIC_INTERLOCKED_XOR64_ACQUIRE(dest, arg) _InterlockedXor64_acq((__int64*)(dest), (__int64)(arg)) +#define BOOST_ATOMIC_INTERLOCKED_XOR64_RELEASE(dest, arg) _InterlockedXor64_rel((__int64*)(dest), (__int64)(arg)) + +#endif // _MSC_VER >= 1700 && defined(_M_ARM) #else // defined(_MSC_VER) && _MSC_VER >= 1400 diff --git a/include/boost/atomic/detail/ops_gcc_sync.hpp b/include/boost/atomic/detail/ops_gcc_sync.hpp index bd17578..79a7311 100644 --- a/include/boost/atomic/detail/ops_gcc_sync.hpp +++ b/include/boost/atomic/detail/ops_gcc_sync.hpp @@ -111,7 +111,7 @@ struct gcc_sync_operations { if ((order & memory_order_release) != 0) __sync_synchronize(); - return __sync_lock_test_and_set(&storage, 1); + return __sync_lock_test_and_set(&storage, 1) != 0; } static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT diff --git a/include/boost/atomic/detail/ops_gcc_x86.hpp b/include/boost/atomic/detail/ops_gcc_x86.hpp index f4585d7..a2fe64f 100644 --- a/include/boost/atomic/detail/ops_gcc_x86.hpp +++ b/include/boost/atomic/detail/ops_gcc_x86.hpp @@ -142,7 +142,7 @@ struct gcc_x86_operations : static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT { - return Derived::exchange(storage, (storage_type)1, order); + return Derived::exchange(storage, (storage_type)1, order) != 0; } static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT diff --git a/include/boost/atomic/detail/ops_msvc_arm.hpp b/include/boost/atomic/detail/ops_msvc_arm.hpp new file mode 100644 index 0000000..f6f5626 --- /dev/null +++ b/include/boost/atomic/detail/ops_msvc_arm.hpp @@ -0,0 +1,824 @@ +/* + * Distributed under the Boost Software License, Version 1.0. + * (See accompanying file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt) + * + * Copyright (c) 2009 Helge Bahmann + * Copyright (c) 2012 Tim Blechmann + * Copyright (c) 2014 Andrey Semashev + */ +/*! + * \file atomic/detail/ops_msvc_arm.hpp + * + * This header contains implementation of the \c operations template. + */ + +#ifndef BOOST_ATOMIC_DETAIL_OPS_MSVC_ARM_HPP_INCLUDED_ +#define BOOST_ATOMIC_DETAIL_OPS_MSVC_ARM_HPP_INCLUDED_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef BOOST_HAS_PRAGMA_ONCE +#pragma once +#endif + +#define BOOST_ATOMIC_DETAIL_ARM_LOAD8(p) __iso_volatile_load8((const volatile __int8*)(p)) +#define BOOST_ATOMIC_DETAIL_ARM_LOAD16(p) __iso_volatile_load16((const volatile __int16*)(p)) +#define BOOST_ATOMIC_DETAIL_ARM_LOAD32(p) __iso_volatile_load32((const volatile __int32*)(p)) +#define BOOST_ATOMIC_DETAIL_ARM_LOAD64(p) __iso_volatile_load64((const volatile __int64*)(p)) +#define BOOST_ATOMIC_DETAIL_ARM_STORE8(p, v) __iso_volatile_store8((volatile __int8*)(p), (__int8)(v)) +#define BOOST_ATOMIC_DETAIL_ARM_STORE16(p, v) __iso_volatile_store16((volatile __int16*)(p), (__int16)(v)) +#define BOOST_ATOMIC_DETAIL_ARM_STORE32(p, v) __iso_volatile_store32((volatile __int32*)(p), (__int32)(v)) +#define BOOST_ATOMIC_DETAIL_ARM_STORE64(p, v) __iso_volatile_store64((volatile __int64*)(p), (__int64)(v)) + +namespace boost { +namespace atomics { +namespace detail { + +struct msvc_arm_operations_base +{ + static BOOST_FORCEINLINE void hardware_full_fence() BOOST_NOEXCEPT + { + __dmb(0xB); // _ARM_BARRIER_ISH, see armintr.h from MSVC 11 and later + } + + static BOOST_FORCEINLINE void fence_before_store(memory_order order) BOOST_NOEXCEPT + { + BOOST_ATOMIC_DETAIL_COMPILER_BARRIER(); + + if ((order & memory_order_release) != 0) + hardware_full_fence(); + + BOOST_ATOMIC_DETAIL_COMPILER_BARRIER(); + } + + static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT + { + BOOST_ATOMIC_DETAIL_COMPILER_BARRIER(); + + if (order == memory_order_seq_cst) + hardware_full_fence(); + + BOOST_ATOMIC_DETAIL_COMPILER_BARRIER(); + } + + static BOOST_FORCEINLINE void fence_after_load(memory_order order) BOOST_NOEXCEPT + { + BOOST_ATOMIC_DETAIL_COMPILER_BARRIER(); + + if (order == memory_order_seq_cst) + hardware_full_fence(); + } + + static BOOST_FORCEINLINE BOOST_CONSTEXPR memory_order cas_common_order(memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + { + // Combine order flags together and transform memory_order_consume to memory_order_acquire + return static_cast< memory_order >(((failure_order | success_order) & ~memory_order_consume) | (((failure_order | success_order) & memory_order_consume) >> 3u)); + } +}; + +template< typename T, typename Derived > +struct msvc_arm_operations : + public msvc_arm_operations_base +{ + typedef T storage_type; + + static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + typedef typename make_signed< storage_type >::type signed_storage_type; + return Derived::fetch_add(storage, static_cast< storage_type >(-static_cast< signed_storage_type >(v)), order); + } + + static BOOST_FORCEINLINE bool compare_exchange_weak( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + { + return Derived::compare_exchange_strong(storage, expected, desired, success_order, failure_order); + } + + static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT + { + return Derived::exchange(storage, (storage_type)1, order) != 0; + } + + static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT + { + Derived::store(storage, (storage_type)0, order); + } + + static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT + { + return true; + } +}; + +template< > +struct operations< 1u > : + public msvc_arm_operations< storage8_t, operations< 1u > > +{ + static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + if (order != memory_order_seq_cst) + { + fence_before_store(order); + BOOST_ATOMIC_DETAIL_ARM_STORE8(&storage, v); + fence_after_store(order); + } + else + { + exchange(storage, v, order); + } + } + + static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT + { + storage_type v = BOOST_ATOMIC_DETAIL_ARM_LOAD8(&storage); + fence_after_load(order); + return v; + } + + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + switch (order) + { + case memory_order_relaxed: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8_RELAXED(&storage, v)); + break; + case memory_order_consume: + case memory_order_acquire: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8_ACQUIRE(&storage, v)); + break; + case memory_order_release: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8_RELEASE(&storage, v)); + break; + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8(&storage, v)); + break; + } + return v; + } + + static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + switch (order) + { + case memory_order_relaxed: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE8_RELAXED(&storage, v)); + break; + case memory_order_consume: + case memory_order_acquire: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE8_ACQUIRE(&storage, v)); + break; + case memory_order_release: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE8_RELEASE(&storage, v)); + break; + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE8(&storage, v)); + break; + } + return v; + } + + static BOOST_FORCEINLINE bool compare_exchange_strong( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + { + storage_type previous = expected, old_val; + + switch (cas_common_order(success_order, failure_order)) + { + case memory_order_relaxed: + old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8_RELAXED(&storage, desired, previous)); + break; + case memory_order_consume: + case memory_order_acquire: + old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8_ACQUIRE(&storage, desired, previous)); + break; + case memory_order_release: + old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8_RELEASE(&storage, desired, previous)); + break; + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8(&storage, desired, previous)); + break; + } + expected = old_val; + + return (previous == old_val); + } + + static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + switch (order) + { + case memory_order_relaxed: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND8_RELAXED(&storage, v)); + break; + case memory_order_consume: + case memory_order_acquire: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND8_ACQUIRE(&storage, v)); + break; + case memory_order_release: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND8_RELEASE(&storage, v)); + break; + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND8(&storage, v)); + break; + } + return v; + } + + static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + switch (order) + { + case memory_order_relaxed: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR8_RELAXED(&storage, v)); + break; + case memory_order_consume: + case memory_order_acquire: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR8_ACQUIRE(&storage, v)); + break; + case memory_order_release: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR8_RELEASE(&storage, v)); + break; + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR8(&storage, v)); + break; + } + return v; + } + + static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + switch (order) + { + case memory_order_relaxed: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR8_RELAXED(&storage, v)); + break; + case memory_order_consume: + case memory_order_acquire: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR8_ACQUIRE(&storage, v)); + break; + case memory_order_release: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR8_RELEASE(&storage, v)); + break; + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR8(&storage, v)); + break; + } + return v; + } +}; + +template< > +struct operations< 2u > : + public msvc_arm_operations< storage16_t, operations< 2u > > +{ + static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + if (order != memory_order_seq_cst) + { + fence_before_store(order); + BOOST_ATOMIC_DETAIL_ARM_STORE16(&storage, v); + fence_after_store(order); + } + else + { + exchange(storage, v, order); + } + } + + static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT + { + storage_type v = BOOST_ATOMIC_DETAIL_ARM_LOAD16(&storage); + fence_after_load(order); + return v; + } + + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + switch (order) + { + case memory_order_relaxed: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16_RELAXED(&storage, v)); + break; + case memory_order_consume: + case memory_order_acquire: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16_ACQUIRE(&storage, v)); + break; + case memory_order_release: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16_RELEASE(&storage, v)); + break; + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16(&storage, v)); + break; + } + return v; + } + + static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + switch (order) + { + case memory_order_relaxed: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE16_RELAXED(&storage, v)); + break; + case memory_order_consume: + case memory_order_acquire: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE16_ACQUIRE(&storage, v)); + break; + case memory_order_release: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE16_RELEASE(&storage, v)); + break; + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE16(&storage, v)); + break; + } + return v; + } + + static BOOST_FORCEINLINE bool compare_exchange_strong( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + { + storage_type previous = expected, old_val; + + switch (cas_common_order(success_order, failure_order)) + { + case memory_order_relaxed: + old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16_RELAXED(&storage, desired, previous)); + break; + case memory_order_consume: + case memory_order_acquire: + old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16_ACQUIRE(&storage, desired, previous)); + break; + case memory_order_release: + old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16_RELEASE(&storage, desired, previous)); + break; + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16(&storage, desired, previous)); + break; + } + expected = old_val; + + return (previous == old_val); + } + + static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + switch (order) + { + case memory_order_relaxed: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND16_RELAXED(&storage, v)); + break; + case memory_order_consume: + case memory_order_acquire: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND16_ACQUIRE(&storage, v)); + break; + case memory_order_release: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND16_RELEASE(&storage, v)); + break; + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND16(&storage, v)); + break; + } + return v; + } + + static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + switch (order) + { + case memory_order_relaxed: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR16_RELAXED(&storage, v)); + break; + case memory_order_consume: + case memory_order_acquire: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR16_ACQUIRE(&storage, v)); + break; + case memory_order_release: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR16_RELEASE(&storage, v)); + break; + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR16(&storage, v)); + break; + } + return v; + } + + static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + switch (order) + { + case memory_order_relaxed: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR16_RELAXED(&storage, v)); + break; + case memory_order_consume: + case memory_order_acquire: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR16_ACQUIRE(&storage, v)); + break; + case memory_order_release: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR16_RELEASE(&storage, v)); + break; + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR16(&storage, v)); + break; + } + return v; + } +}; + +template< > +struct operations< 4u > : + public msvc_arm_operations< storage32_t, operations< 4u > > +{ + static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + if (order != memory_order_seq_cst) + { + fence_before_store(order); + BOOST_ATOMIC_DETAIL_ARM_STORE32(&storage, v); + fence_after_store(order); + } + else + { + exchange(storage, v, order); + } + } + + static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT + { + storage_type v = BOOST_ATOMIC_DETAIL_ARM_LOAD32(&storage); + fence_after_load(order); + return v; + } + + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + switch (order) + { + case memory_order_relaxed: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_RELAXED(&storage, v)); + break; + case memory_order_consume: + case memory_order_acquire: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_ACQUIRE(&storage, v)); + break; + case memory_order_release: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_RELEASE(&storage, v)); + break; + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(&storage, v)); + break; + } + return v; + } + + static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + switch (order) + { + case memory_order_relaxed: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_RELAXED(&storage, v)); + break; + case memory_order_consume: + case memory_order_acquire: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ACQUIRE(&storage, v)); + break; + case memory_order_release: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_RELEASE(&storage, v)); + break; + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&storage, v)); + break; + } + return v; + } + + static BOOST_FORCEINLINE bool compare_exchange_strong( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + { + storage_type previous = expected, old_val; + + switch (cas_common_order(success_order, failure_order)) + { + case memory_order_relaxed: + old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_RELAXED(&storage, desired, previous)); + break; + case memory_order_consume: + case memory_order_acquire: + old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_ACQUIRE(&storage, desired, previous)); + break; + case memory_order_release: + old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_RELEASE(&storage, desired, previous)); + break; + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(&storage, desired, previous)); + break; + } + expected = old_val; + + return (previous == old_val); + } + + static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + switch (order) + { + case memory_order_relaxed: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND_RELAXED(&storage, v)); + break; + case memory_order_consume: + case memory_order_acquire: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND_ACQUIRE(&storage, v)); + break; + case memory_order_release: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND_RELEASE(&storage, v)); + break; + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND(&storage, v)); + break; + } + return v; + } + + static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + switch (order) + { + case memory_order_relaxed: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR_RELAXED(&storage, v)); + break; + case memory_order_consume: + case memory_order_acquire: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR_ACQUIRE(&storage, v)); + break; + case memory_order_release: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR_RELEASE(&storage, v)); + break; + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR(&storage, v)); + break; + } + return v; + } + + static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + switch (order) + { + case memory_order_relaxed: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR_RELAXED(&storage, v)); + break; + case memory_order_consume: + case memory_order_acquire: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR_ACQUIRE(&storage, v)); + break; + case memory_order_release: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR_RELEASE(&storage, v)); + break; + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR(&storage, v)); + break; + } + return v; + } +}; + +template< > +struct operations< 8u > : + public msvc_arm_operations< storage64_t, operations< 8u > > +{ + static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + if (order != memory_order_seq_cst) + { + fence_before_store(order); + BOOST_ATOMIC_DETAIL_ARM_STORE64(&storage, v); + fence_after_store(order); + } + else + { + exchange(storage, v, order); + } + } + + static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT + { + storage_type v = BOOST_ATOMIC_DETAIL_ARM_LOAD64(&storage); + fence_after_load(order); + return v; + } + + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + switch (order) + { + case memory_order_relaxed: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64_RELAXED(&storage, v)); + break; + case memory_order_consume: + case memory_order_acquire: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64_ACQUIRE(&storage, v)); + break; + case memory_order_release: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64_RELEASE(&storage, v)); + break; + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(&storage, v)); + break; + } + return v; + } + + static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + switch (order) + { + case memory_order_relaxed: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE64_RELAXED(&storage, v)); + break; + case memory_order_consume: + case memory_order_acquire: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE64_ACQUIRE(&storage, v)); + break; + case memory_order_release: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE64_RELEASE(&storage, v)); + break; + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(&storage, v)); + break; + } + return v; + } + + static BOOST_FORCEINLINE bool compare_exchange_strong( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + { + storage_type previous = expected, old_val; + + switch (cas_common_order(success_order, failure_order)) + { + case memory_order_relaxed: + old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64_RELAXED(&storage, desired, previous)); + break; + case memory_order_consume: + case memory_order_acquire: + old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64_ACQUIRE(&storage, desired, previous)); + break; + case memory_order_release: + old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64_RELEASE(&storage, desired, previous)); + break; + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(&storage, desired, previous)); + break; + } + expected = old_val; + + return (previous == old_val); + } + + static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + switch (order) + { + case memory_order_relaxed: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND64_RELAXED(&storage, v)); + break; + case memory_order_consume: + case memory_order_acquire: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND64_ACQUIRE(&storage, v)); + break; + case memory_order_release: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND64_RELEASE(&storage, v)); + break; + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND64(&storage, v)); + break; + } + return v; + } + + static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + switch (order) + { + case memory_order_relaxed: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR64_RELAXED(&storage, v)); + break; + case memory_order_consume: + case memory_order_acquire: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR64_ACQUIRE(&storage, v)); + break; + case memory_order_release: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR64_RELEASE(&storage, v)); + break; + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR64(&storage, v)); + break; + } + return v; + } + + static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + switch (order) + { + case memory_order_relaxed: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR64_RELAXED(&storage, v)); + break; + case memory_order_consume: + case memory_order_acquire: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR64_ACQUIRE(&storage, v)); + break; + case memory_order_release: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR64_RELEASE(&storage, v)); + break; + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR64(&storage, v)); + break; + } + return v; + } +}; + + +BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT +{ + BOOST_ATOMIC_DETAIL_COMPILER_BARRIER(); + if (order == memory_order_seq_cst) + msvc_arm_operations_base::hardware_full_fence(); + BOOST_ATOMIC_DETAIL_COMPILER_BARRIER(); +} + +BOOST_FORCEINLINE void signal_fence(memory_order) BOOST_NOEXCEPT +{ + BOOST_ATOMIC_DETAIL_COMPILER_BARRIER(); +} + +} // namespace detail +} // namespace atomics +} // namespace boost + +#undef BOOST_ATOMIC_DETAIL_ARM_LOAD8 +#undef BOOST_ATOMIC_DETAIL_ARM_LOAD16 +#undef BOOST_ATOMIC_DETAIL_ARM_LOAD32 +#undef BOOST_ATOMIC_DETAIL_ARM_LOAD64 +#undef BOOST_ATOMIC_DETAIL_ARM_STORE8 +#undef BOOST_ATOMIC_DETAIL_ARM_STORE16 +#undef BOOST_ATOMIC_DETAIL_ARM_STORE32 +#undef BOOST_ATOMIC_DETAIL_ARM_STORE64 + +#endif // BOOST_ATOMIC_DETAIL_OPS_MSVC_ARM_HPP_INCLUDED_ diff --git a/include/boost/atomic/detail/ops_msvc_common.hpp b/include/boost/atomic/detail/ops_msvc_common.hpp new file mode 100644 index 0000000..53628f3 --- /dev/null +++ b/include/boost/atomic/detail/ops_msvc_common.hpp @@ -0,0 +1,38 @@ +/* + * Distributed under the Boost Software License, Version 1.0. + * (See accompanying file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt) + * + * Copyright (c) 2009 Helge Bahmann + * Copyright (c) 2012 Tim Blechmann + * Copyright (c) 2014 Andrey Semashev + */ +/*! + * \file atomic/detail/ops_msvc_common.hpp + * + * This header contains common tools for MSVC implementation of the \c operations template. + */ + +#ifndef BOOST_ATOMIC_DETAIL_OPS_MSVC_COMMON_HPP_INCLUDED_ +#define BOOST_ATOMIC_DETAIL_OPS_MSVC_COMMON_HPP_INCLUDED_ + +#include + +#ifdef BOOST_HAS_PRAGMA_ONCE +#pragma once +#endif + +// Define compiler barriers +#if defined(__INTEL_COMPILER) +#define BOOST_ATOMIC_DETAIL_COMPILER_BARRIER() __memory_barrier() +#elif defined(_MSC_VER) && !defined(_WIN32_WCE) +extern "C" void _ReadWriteBarrier(void); +#pragma intrinsic(_ReadWriteBarrier) +#define BOOST_ATOMIC_DETAIL_COMPILER_BARRIER() _ReadWriteBarrier() +#endif + +#ifndef BOOST_ATOMIC_DETAIL_COMPILER_BARRIER +#define BOOST_ATOMIC_DETAIL_COMPILER_BARRIER() +#endif + +#endif // BOOST_ATOMIC_DETAIL_OPS_MSVC_COMMON_HPP_INCLUDED_ diff --git a/include/boost/atomic/detail/ops_msvc_x86.hpp b/include/boost/atomic/detail/ops_msvc_x86.hpp new file mode 100644 index 0000000..af94634 --- /dev/null +++ b/include/boost/atomic/detail/ops_msvc_x86.hpp @@ -0,0 +1,902 @@ +/* + * Distributed under the Boost Software License, Version 1.0. + * (See accompanying file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt) + * + * Copyright (c) 2009 Helge Bahmann + * Copyright (c) 2012 Tim Blechmann + * Copyright (c) 2014 Andrey Semashev + */ +/*! + * \file atomic/detail/ops_msvc_x86.hpp + * + * This header contains implementation of the \c operations template. + */ + +#ifndef BOOST_ATOMIC_DETAIL_OPS_MSVC_X86_HPP_INCLUDED_ +#define BOOST_ATOMIC_DETAIL_OPS_MSVC_X86_HPP_INCLUDED_ + +#include +#include +#include +#include +#include +#include +#include +#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B) +#include +#include +#endif +#include + +#ifdef BOOST_HAS_PRAGMA_ONCE +#pragma once +#endif + +namespace boost { +namespace atomics { +namespace detail { + +struct msvc_x86_operations_base +{ + static BOOST_FORCEINLINE void hardware_full_fence() BOOST_NOEXCEPT + { +#if defined(_MSC_VER) && (defined(_M_AMD64) || (defined(_M_IX86) && defined(_M_IX86_FP) && _M_IX86_FP >= 2)) + // Use mfence only if SSE2 is available + _mm_mfence(); +#else + long tmp; + BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&tmp, 0); +#endif + } + + static BOOST_FORCEINLINE void fence_before(memory_order) BOOST_NOEXCEPT + { + BOOST_ATOMIC_DETAIL_COMPILER_BARRIER(); + } + + static BOOST_FORCEINLINE void fence_after(memory_order) BOOST_NOEXCEPT + { + BOOST_ATOMIC_DETAIL_COMPILER_BARRIER(); + } + + static BOOST_FORCEINLINE void fence_after_load(memory_order) BOOST_NOEXCEPT + { + BOOST_ATOMIC_DETAIL_COMPILER_BARRIER(); + + // On x86 and x86_64 there is no need for a hardware barrier, + // even if seq_cst memory order is requested, because all + // seq_cst writes are implemented with lock-prefixed operations + // or xchg which has implied lock prefix. Therefore normal loads + // are already ordered with seq_cst stores on these architectures. + } +}; + +template< typename T, typename Derived > +struct msvc_x86_operations : + public msvc_x86_operations_base +{ + typedef T storage_type; + + static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + if (order != memory_order_seq_cst) + { + fence_before(order); + storage = v; + fence_after(order); + } + else + { + Derived::exchange(storage, v, order); + } + } + + static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT + { + storage_type v = storage; + fence_after_load(order); + return v; + } + + static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + typedef typename make_signed< storage_type >::type signed_storage_type; + return Derived::fetch_add(storage, static_cast< storage_type >(-static_cast< signed_storage_type >(v)), order); + } + + static BOOST_FORCEINLINE bool compare_exchange_weak( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + { + return Derived::compare_exchange_strong(storage, expected, desired, success_order, failure_order); + } + + static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT + { + return Derived::exchange(storage, (storage_type)1, order) != 0; + } + + static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT + { + store(storage, (storage_type)0, order); + } + + static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT + { + return true; + } +}; + +template< > +struct operations< 4u > : + public msvc_x86_operations< storage32_t, operations< 4u > > +{ + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(&storage, v)); + fence_after(order); + return v; + } + + static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&storage, v)); + fence_after(order); + return v; + } + + static BOOST_FORCEINLINE bool compare_exchange_strong( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + { + storage_type previous = expected; + fence_before(success_order); + storage_type old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(&storage, desired, previous)); + bool success = (previous == old_val); + if (success) + fence_after(success_order); + else + fence_after(failure_order); + expected = old_val; + return success; + } + + static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { +#if defined(BOOST_ATOMIC_INTERLOCKED_AND) + fence_before(order); + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND(&storage, v)); + fence_after(order); + return v; +#else + storage_type res = storage; + while (!compare_exchange_strong(storage, res, res & v, order, memory_order_relaxed)) {} + return res; +#endif + } + + static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { +#if defined(BOOST_ATOMIC_INTERLOCKED_OR) + fence_before(order); + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR(&storage, v)); + fence_after(order); + return v; +#else + storage_type res = storage; + while (!compare_exchange_strong(storage, res, res | v, order, memory_order_relaxed)) {} + return res; +#endif + } + + static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { +#if defined(BOOST_ATOMIC_INTERLOCKED_XOR) + fence_before(order); + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR(&storage, v)); + fence_after(order); + return v; +#else + storage_type res = storage; + while (!compare_exchange_strong(storage, res, res ^ v, order, memory_order_relaxed)) {} + return res; +#endif + } +}; + +#if defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8) + +template< > +struct operations< 1u > : + public msvc_x86_operations< storage8_t, operations< 1u > > +{ + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8(&storage, v)); + fence_after(order); + return v; + } + + static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE8(&storage, v)); + fence_after(order); + return v; + } + + static BOOST_FORCEINLINE bool compare_exchange_strong( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + { + storage_type previous = expected; + fence_before(success_order); + storage_type old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8(&storage, desired, previous)); + bool success = (previous == old_val); + if (success) + fence_after(success_order); + else + fence_after(failure_order); + expected = old_val; + return success; + } + + static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND8(&storage, v)); + fence_after(order); + return v; + } + + static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR8(&storage, v)); + fence_after(order); + return v; + } + + static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR8(&storage, v)); + fence_after(order); + return v; + } +}; + +#elif defined(_M_IX86) + +template< > +struct operations< 1u > : + public msvc_x86_operations< storage8_t, operations< 1u > > +{ + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + __asm + { + mov edx, storage + movzx eax, v + lock xadd byte ptr [edx], al + mov v, al + }; + fence_after(order); + return v; + } + + static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + __asm + { + mov edx, storage + movzx eax, v + xchg byte ptr [edx], al + mov v, al + }; + fence_after(order); + return v; + } + + static BOOST_FORCEINLINE bool compare_exchange_strong( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + { + fence_before(success_order); + bool success; + __asm + { + mov esi, expected + mov edi, storage + movzx eax, byte ptr [esi] + movzx edx, desired + lock cmpxchg byte ptr [edi], dl + mov byte ptr [esi], al + sete success + }; + if (success) + fence_after(success_order); + else + fence_after(failure_order); + return success; + } + + static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + __asm + { + xor edx, edx + mov edi, storage + movzx ebx, v + movzx eax, byte ptr [edi] + align 16 + again: + mov dl, al + and dl, bl + lock cmpxchg byte ptr [edi], dl + jne again + mov v, al + }; + fence_after(order); + return v; + } + + static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + __asm + { + xor edx, edx + mov edi, storage + movzx ebx, v + movzx eax, byte ptr [edi] + align 16 + again: + mov dl, al + or dl, bl + lock cmpxchg byte ptr [edi], dl + jne again + mov v, al + }; + fence_after(order); + return v; + } + + static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + __asm + { + xor edx, edx + mov edi, storage + movzx ebx, v + movzx eax, byte ptr [edi] + align 16 + again: + mov dl, al + xor dl, bl + lock cmpxchg byte ptr [edi], dl + jne again + mov v, al + }; + fence_after(order); + return v; + } +}; + +#else + +template< > +struct operations< 1u > : + public operations< 4u > +{ + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + // We must resort to a CAS loop to handle overflows + storage_type res = storage; + while (!compare_exchange_strong(storage, res, (res + v) & 0x000000ff, order, memory_order_relaxed)) {} + return res; + } + + static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + typedef make_signed< storage_type >::type signed_storage_type; + return fetch_add(storage, static_cast< storage_type >(-static_cast< signed_storage_type >(v)), order); + } +}; + +#endif + +#if defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16) + +template< > +struct operations< 2u > : + public msvc_x86_operations< storage16_t, operations< 2u > > +{ + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16(&storage, v)); + fence_after(order); + return v; + } + + static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE16(&storage, v)); + fence_after(order); + return v; + } + + static BOOST_FORCEINLINE bool compare_exchange_strong( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + { + storage_type previous = expected; + fence_before(success_order); + storage_type old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16(&storage, desired, previous)); + bool success = (previous == old_val); + if (success) + fence_after(success_order); + else + fence_after(failure_order); + expected = old_val; + return success; + } + + static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND16(&storage, v)); + fence_after(order); + return v; + } + + static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR16(&storage, v)); + fence_after(order); + return v; + } + + static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR16(&storage, v)); + fence_after(order); + return v; + } +}; + +#elif defined(_M_IX86) + +template< > +struct operations< 2u > : + public msvc_x86_operations< storage16_t, operations< 2u > > +{ + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + __asm + { + mov edx, storage + movzx eax, v + lock xadd word ptr [edx], ax + mov v, ax + }; + fence_after(order); + return v; + } + + static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + __asm + { + mov edx, storage + movzx eax, v + xchg word ptr [edx], ax + mov v, ax + }; + fence_after(order); + return v; + } + + static BOOST_FORCEINLINE bool compare_exchange_strong( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + { + fence_before(success_order); + bool success; + __asm + { + mov esi, expected + mov edi, storage + movzx eax, word ptr [esi] + movzx edx, desired + lock cmpxchg word ptr [edi], dx + mov word ptr [esi], ax + sete success + }; + if (success) + fence_after(success_order); + else + fence_after(failure_order); + return success; + } + + static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + __asm + { + xor edx, edx + mov edi, storage + movzx ebx, v + movzx eax, word ptr [edi] + align 16 + again: + mov dx, ax + and dx, bx + lock cmpxchg word ptr [edi], dx + jne again + mov v, ax + }; + fence_after(order); + return v; + } + + static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + __asm + { + xor edx, edx + mov edi, storage + movzx ebx, v + movzx eax, word ptr [edi] + align 16 + again: + mov dx, ax + or dx, bx + lock cmpxchg word ptr [edi], dx + jne again + mov v, ax + }; + fence_after(order); + return v; + } + + static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + __asm + { + xor edx, edx + mov edi, storage + movzx ebx, v + movzx eax, word ptr [edi] + align 16 + again: + mov dx, ax + xor dx, bx + lock cmpxchg word ptr [edi], dx + jne again + mov v, ax + }; + fence_after(order); + return v; + } +}; + +#else + +template< > +struct operations< 2u > : + public operations< 4u > +{ + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + // We must resort to a CAS loop to handle overflows + storage_type res = storage; + while (!compare_exchange_strong(storage, res, (res + v) & 0x0000ffff, order, memory_order_relaxed)) {} + return res; + } + + static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + typedef make_signed< storage_type >::type signed_storage_type; + return fetch_add(storage, static_cast< storage_type >(-static_cast< signed_storage_type >(v)), order); + } +}; + +#endif + + +#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) + +struct msvc_dcas_x86 +{ + typedef storage64_t storage_type; + + // Intel 64 and IA-32 Architectures Software Developer's Manual, Volume 3A, 8.1.1. Guaranteed Atomic Operations: + // + // The Pentium processor (and newer processors since) guarantees that the following additional memory operations will always be carried out atomically: + // * Reading or writing a quadword aligned on a 64-bit boundary + // + // Luckily, the memory is almost always 8-byte aligned in our case because atomic<> uses 64 bit native types for storage and dynamic memory allocations + // have at least 8 byte alignment. The only unfortunate case is when atomic is placeod on the stack and it is not 8-byte aligned (like on 32 bit Windows). + + static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT + { + storage_type volatile* p = &storage; + if (((uint32_t)p & 0x00000007) == 0) + { +#if defined(_M_IX86_FP) && _M_IX86_FP >= 2 +#if defined(__AVX__) + __asm + { + mov edx, p + vmovq xmm4, v + vmovq qword ptr [edx], xmm4 + }; +#else + __asm + { + mov edx, p + movq xmm4, v + movq qword ptr [edx], xmm4 + }; +#endif +#else + __asm + { + mov edx, p + fild v + fistp qword ptr [edx] + }; +#endif + } + else + { + __asm + { + mov edi, p + mov ebx, dword ptr [v] + mov ecx, dword ptr [v + 4] + mov eax, dword ptr [edi] + mov edx, dword ptr [edi + 4] + align 16 + again: + lock cmpxchg8b qword ptr [edi] + jne again + }; + } + } + + static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order) BOOST_NOEXCEPT + { + storage_type const volatile* p = &storage; + storage_type value; + + if (((uint32_t)p & 0x00000007) == 0) + { +#if defined(_M_IX86_FP) && _M_IX86_FP >= 2 +#if defined(__AVX__) + __asm + { + mov edx, p + vmovq xmm4, qword ptr [edx] + vmovq value, xmm4 + }; +#else + __asm + { + mov edx, p + movq xmm4, qword ptr [edx] + movq value, xmm4 + }; +#endif +#else + __asm + { + mov edx, p + fild qword ptr [edx] + fistp value + }; +#endif + } + else + { + // We don't care for comparison result here; the previous value will be stored into value anyway. + // Also we don't care for ebx and ecx values, they just have to be equal to eax and edx before cmpxchg8b. + __asm + { + mov edi, p + mov eax, ebx + mov edx, ecx + lock cmpxchg8b qword ptr [edi] + mov dword ptr [value], eax + mov dword ptr [value + 4], edx + }; + } + + return value; + } + + static BOOST_FORCEINLINE bool compare_exchange_strong( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT + { + storage_type volatile* p = &storage; +#if defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64) + const storage_type old_val = (storage_type)BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(p, desired, expected); + const bool result = (old_val == expected); + expected = old_val; + return result; +#else + bool result; + __asm + { + mov edi, p + mov esi, expected + mov ebx, dword ptr [desired] + mov ecx, dword ptr [desired + 4] + mov eax, dword ptr [esi] + mov edx, dword ptr [esi + 4] + lock cmpxchg8b qword ptr [edi] + mov dword ptr [esi], eax + mov dword ptr [esi + 4], edx + sete result + }; + return result; +#endif + } + + static BOOST_FORCEINLINE bool compare_exchange_weak( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + { + return compare_exchange_strong(storage, expected, desired, success_order, failure_order); + } + + static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT + { + return true; + } +}; + +template< > +struct operations< 8u > : + public cas_based_operations< msvc_dcas_x86 > +{ +}; + +#elif defined(_M_AMD64) + +template< > +struct operations< 8u > : + public msvc_x86_operations< storage64_t, operations< 8u > > +{ + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(&storage, v)); + fence_after(order); + return v; + } + + static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(&storage, v)); + fence_after(order); + return v; + } + + static BOOST_FORCEINLINE bool compare_exchange_strong( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + { + storage_type previous = expected; + fence_before(success_order); + storage_type old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(&storage, desired, previous)); + bool success = (previous == old_val); + if (success) + fence_after(success_order); + else + fence_after(failure_order); + expected = old_val; + return success; + } + + static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND64(&storage, v)); + fence_after(order); + return v; + } + + static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR64(&storage, v)); + fence_after(order); + return v; + } + + static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR64(&storage, v)); + fence_after(order); + return v; + } +}; + +#endif + +#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B) + +struct msvc_dcas_x86_64 +{ + typedef storage128_t storage_type; + + static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT + { + storage_type value = const_cast< storage_type& >(storage); + while (!BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE128(&storage, v, &value)) {} + } + + static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order) BOOST_NOEXCEPT + { + storage_type value = storage_type(); + BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE128(&storage, value, &value); + return value; + } + + static BOOST_FORCEINLINE bool compare_exchange_strong( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT + { + return !!BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE128(&storage, desired, &expected); + } + + static BOOST_FORCEINLINE bool compare_exchange_weak( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + { + return compare_exchange_strong(storage, expected, desired, success_order, failure_order); + } + + static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT + { + return true; + } +}; + +template< > +struct operations< 16u > : + public cas_based_operations< msvc_dcas_x86_64 > +{ +}; + +#endif // defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B) + +BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT +{ + BOOST_ATOMIC_DETAIL_COMPILER_BARRIER(); + if (order == memory_order_seq_cst) + msvc_x86_operations_base::hardware_full_fence(); + BOOST_ATOMIC_DETAIL_COMPILER_BARRIER(); +} + +BOOST_FORCEINLINE void signal_fence(memory_order) BOOST_NOEXCEPT +{ + BOOST_ATOMIC_DETAIL_COMPILER_BARRIER(); +} + +} // namespace detail +} // namespace atomics +} // namespace boost + +#endif // BOOST_ATOMIC_DETAIL_OPS_MSVC_X86_HPP_INCLUDED_ diff --git a/include/boost/atomic/detail/ops_windows.hpp b/include/boost/atomic/detail/ops_windows.hpp new file mode 100644 index 0000000..ef22eda --- /dev/null +++ b/include/boost/atomic/detail/ops_windows.hpp @@ -0,0 +1,239 @@ +/* + * Distributed under the Boost Software License, Version 1.0. + * (See accompanying file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt) + * + * Copyright (c) 2009 Helge Bahmann + * Copyright (c) 2012 Tim Blechmann + * Copyright (c) 2014 Andrey Semashev + */ +/*! + * \file atomic/detail/ops_windows.hpp + * + * This header contains implementation of the \c operations template. + * + * This implementation is the most basic version for Windows. It should + * work for any non-MSVC-like compilers as long as there are Interlocked WinAPI + * functions available. This version is also used for WinCE. + * + * Notably, this implementation is not as efficient as other + * versions based on compiler intrinsics. + */ + +#ifndef BOOST_ATOMIC_DETAIL_OPS_WINDOWS_HPP_INCLUDED_ +#define BOOST_ATOMIC_DETAIL_OPS_WINDOWS_HPP_INCLUDED_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef BOOST_HAS_PRAGMA_ONCE +#pragma once +#endif + +namespace boost { +namespace atomics { +namespace detail { + +struct windows_operations_base +{ + static BOOST_FORCEINLINE void hardware_full_fence() BOOST_NOEXCEPT + { + long tmp; + BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&tmp, 0); + } + + static BOOST_FORCEINLINE void fence_before(memory_order) BOOST_NOEXCEPT + { + BOOST_ATOMIC_DETAIL_COMPILER_BARRIER(); + } + + static BOOST_FORCEINLINE void fence_after(memory_order) BOOST_NOEXCEPT + { + BOOST_ATOMIC_DETAIL_COMPILER_BARRIER(); + } +}; + +template< typename T, typename Derived > +struct windows_operations : + public windows_operations_base +{ + typedef T storage_type; + + static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + Derived::exchange(storage, v, order); + } + + static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT + { + return Derived::fetch_add(storage, (storage_type)0, order); + } + + static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + typedef typename make_signed< storage_type >::type signed_storage_type; + return Derived::fetch_add(storage, static_cast< storage_type >(-static_cast< signed_storage_type >(v)), order); + } + + static BOOST_FORCEINLINE bool compare_exchange_weak( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + { + return Derived::compare_exchange_strong(storage, expected, desired, success_order, failure_order); + } + + static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT + { + return Derived::exchange(storage, (storage_type)1, order) != 0; + } + + static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT + { + store(storage, (storage_type)0, order); + } + + static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT + { + return true; + } +}; + +template< > +struct operations< 4u > : + public windows_operations< storage32_t, operations< 4u > > +{ + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(&storage, v)); + fence_after(order); + return v; + } + + static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&storage, v)); + fence_after(order); + return v; + } + + static BOOST_FORCEINLINE bool compare_exchange_strong( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + { + storage_type previous = expected; + fence_before(success_order); + storage_type old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(&storage, desired, previous)); + bool success = (previous == old_val); + if (success) + fence_after(success_order); + else + fence_after(failure_order); + expected = old_val; + return success; + } + + static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { +#if defined(BOOST_ATOMIC_INTERLOCKED_AND) + fence_before(order); + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND(&storage, v)); + fence_after(order); + return v; +#else + storage_type res = storage; + while (!compare_exchange_strong(storage, res, res & v, order, memory_order_relaxed)) {} + return res; +#endif + } + + static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { +#if defined(BOOST_ATOMIC_INTERLOCKED_OR) + fence_before(order); + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR(&storage, v)); + fence_after(order); + return v; +#else + storage_type res = storage; + while (!compare_exchange_strong(storage, res, res | v, order, memory_order_relaxed)) {} + return res; +#endif + } + + static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { +#if defined(BOOST_ATOMIC_INTERLOCKED_XOR) + fence_before(order); + v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR(&storage, v)); + fence_after(order); + return v; +#else + storage_type res = storage; + while (!compare_exchange_strong(storage, res, res ^ v, order, memory_order_relaxed)) {} + return res; +#endif + } +}; + +template< > +struct operations< 1u > : + public operations< 4u > +{ + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + // We must resort to a CAS loop to handle overflows + storage_type res = storage; + while (!compare_exchange_strong(storage, res, (res + v) & 0x000000ff, order, memory_order_relaxed)) {} + return res; + } + + static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + typedef make_signed< storage_type >::type signed_storage_type; + return fetch_add(storage, static_cast< storage_type >(-static_cast< signed_storage_type >(v)), order); + } +}; + +template< > +struct operations< 2u > : + public operations< 4u > +{ + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + // We must resort to a CAS loop to handle overflows + storage_type res = storage; + while (!compare_exchange_strong(storage, res, (res + v) & 0x0000ffff, order, memory_order_relaxed)) {} + return res; + } + + static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + typedef make_signed< storage_type >::type signed_storage_type; + return fetch_add(storage, static_cast< storage_type >(-static_cast< signed_storage_type >(v)), order); + } +}; + +BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT +{ + BOOST_ATOMIC_DETAIL_COMPILER_BARRIER(); + if (order == memory_order_seq_cst) + windows_operations_base::hardware_full_fence(); + BOOST_ATOMIC_DETAIL_COMPILER_BARRIER(); +} + +BOOST_FORCEINLINE void signal_fence(memory_order) BOOST_NOEXCEPT +{ + BOOST_ATOMIC_DETAIL_COMPILER_BARRIER(); +} + +} // namespace detail +} // namespace atomics +} // namespace boost + +#endif // BOOST_ATOMIC_DETAIL_OPS_WINDOWS_HPP_INCLUDED_ diff --git a/include/boost/atomic/detail/platform.hpp b/include/boost/atomic/detail/platform.hpp index 87ddab5..927ea77 100644 --- a/include/boost/atomic/detail/platform.hpp +++ b/include/boost/atomic/detail/platform.hpp @@ -70,7 +70,7 @@ #define BOOST_ATOMIC_DETAIL_PLATFORM msvc_x86 -#elif defined(_MSC_VER) && defined(_M_ARM) +#elif defined(_MSC_VER) && _MSC_VER >= 1700 && defined(_M_ARM) #define BOOST_ATOMIC_DETAIL_PLATFORM msvc_arm From 652e87b7e7d84a8331e9a8eb6664d42134c4bf5d Mon Sep 17 00:00:00 2001 From: Andrey Semashev Date: Wed, 7 May 2014 22:21:04 +0400 Subject: [PATCH 10/23] Added Linux ARM backend. --- .../boost/atomic/detail/ops_gcc_atomic.hpp | 1 - include/boost/atomic/detail/ops_gcc_sync.hpp | 58 ++++- include/boost/atomic/detail/ops_linux_arm.hpp | 232 ++++++++++++++++++ 3 files changed, 289 insertions(+), 2 deletions(-) create mode 100644 include/boost/atomic/detail/ops_linux_arm.hpp diff --git a/include/boost/atomic/detail/ops_gcc_atomic.hpp b/include/boost/atomic/detail/ops_gcc_atomic.hpp index 5f81f19..8260780 100644 --- a/include/boost/atomic/detail/ops_gcc_atomic.hpp +++ b/include/boost/atomic/detail/ops_gcc_atomic.hpp @@ -359,7 +359,6 @@ BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT } } // namespace detail - } // namespace atomics } // namespace boost diff --git a/include/boost/atomic/detail/ops_gcc_sync.hpp b/include/boost/atomic/detail/ops_gcc_sync.hpp index 79a7311..1e969cc 100644 --- a/include/boost/atomic/detail/ops_gcc_sync.hpp +++ b/include/boost/atomic/detail/ops_gcc_sync.hpp @@ -16,6 +16,7 @@ #ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_SYNC_HPP_INCLUDED_ #define BOOST_ATOMIC_DETAIL_OPS_GCC_SYNC_HPP_INCLUDED_ +#include // UINT64_C #include #include #include @@ -183,6 +184,20 @@ struct operations< 1u > : #endif > { +#if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1) + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + // We must resort to a CAS loop to handle overflows + storage_type res = storage; + while (!compare_exchange_strong(storage, res, (res + v) & 0x000000ff, order, memory_order_relaxed)) {} + return res; + } + + static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + return fetch_add(storage, -v, order); + } +#endif }; #endif @@ -201,6 +216,20 @@ struct operations< 2u > : #endif > { +#if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2) + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + // We must resort to a CAS loop to handle overflows + storage_type res = storage; + while (!compare_exchange_strong(storage, res, (res + v) & 0x0000ffff, order, memory_order_relaxed)) {} + return res; + } + + static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + return fetch_add(storage, -v, order); + } +#endif }; #endif @@ -217,6 +246,20 @@ struct operations< 4u > : #endif > { +#if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + // We must resort to a CAS loop to handle overflows + storage_type res = storage; + while (!compare_exchange_strong(storage, res, (res + v) & 0xffffffff, order, memory_order_relaxed)) {} + return res; + } + + static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + return fetch_add(storage, -v, order); + } +#endif }; #endif @@ -231,6 +274,20 @@ struct operations< 8u > : #endif > { +#if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) && defined(BOOST_HAS_INT128) + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + // We must resort to a CAS loop to handle overflows + storage_type res = storage; + while (!compare_exchange_strong(storage, res, (res + v) & UINT64_C(0xffffffffffffffff), order, memory_order_relaxed)) {} + return res; + } + + static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + return fetch_add(storage, -v, order); + } +#endif }; #endif @@ -277,7 +334,6 @@ BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT } } // namespace detail - } // namespace atomics } // namespace boost diff --git a/include/boost/atomic/detail/ops_linux_arm.hpp b/include/boost/atomic/detail/ops_linux_arm.hpp new file mode 100644 index 0000000..3ce4c1e --- /dev/null +++ b/include/boost/atomic/detail/ops_linux_arm.hpp @@ -0,0 +1,232 @@ +/* + * Distributed under the Boost Software License, Version 1.0. + * (See accompanying file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt) + * + * Copyright (c) 2009, 2011 Helge Bahmann + * Copyright (c) 2009 Phil Endecott + * Copyright (c) 2013 Tim Blechmann + * Linux-specific code by Phil Endecott + * Copyright (c) 2014 Andrey Semashev + */ +/*! + * \file atomic/detail/ops_linux_arm.hpp + * + * This header contains implementation of the \c operations template. + */ + +#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_LINUX_ARM_HPP_INCLUDED_ +#define BOOST_ATOMIC_DETAIL_OPS_GCC_LINUX_ARM_HPP_INCLUDED_ + +#include +#include +#include +#include +#include +#include + +#ifdef BOOST_HAS_PRAGMA_ONCE +#pragma once +#endif + +namespace boost { +namespace atomics { +namespace detail { + +// Different ARM processors have different atomic instructions. In particular, +// architecture versions before v6 (which are still in widespread use, e.g. the +// Intel/Marvell XScale chips like the one in the NSLU2) have only atomic swap. +// On Linux the kernel provides some support that lets us abstract away from +// these differences: it provides emulated CAS and barrier functions at special +// addresses that are guaranteed not to be interrupted by the kernel. Using +// this facility is slightly slower than inline assembler would be, but much +// faster than a system call. +// +// While this emulated CAS is "strong" in the sense that it does not fail +// "spuriously" (i.e.: it never fails to perform the exchange when the value +// found equals the value expected), it does not return the found value on +// failure. To satisfy the atomic API, compare_exchange_{weak|strong} must +// return the found value on failure, and we have to manually load this value +// after the emulated CAS reports failure. This in turn introduces a race +// between the CAS failing (due to the "wrong" value being found) and subsequently +// loading (which might turn up the "right" value). From an application's +// point of view this looks like "spurious failure", and therefore the +// emulated CAS is only good enough to provide compare_exchange_weak +// semantics. + +struct linux_arm_cas +{ + typedef storage32_t storage_type; + + static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before_store(order); + storage = v; + fence_after_store(order); + } + + static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT + { + storage_type v = storage; + fence_after_load(order); + return v; + } + + static BOOST_FORCEINLINE bool compare_exchange_strong( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + { + while (true) + { + storage_type tmp = expected; + if (compare_exchange_weak(storage, tmp, desired, success_order, failure_order)) + return true; + if (tmp != expected) + { + expected = tmp; + return false; + } + } + } + + static BOOST_FORCEINLINE bool compare_exchange_weak( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT + { + typedef storage_type (*kernel_cmpxchg32_t)(storage_type oldval, storage_type newval, volatile storage_type* ptr); + + if (((kernel_cmpxchg32_t)0xffff0fc0)(expected, desired, &storage) == 0) + { + return true; + } + else + { + expected = storage; + return false; + } + } + + static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT + { + return true; + } + + static BOOST_FORCEINLINE void hardware_full_fence() BOOST_NOEXCEPT + { + typedef void (*kernel_dmb_t)(void); + ((kernel_dmb_t)0xffff0fa0)(); + } + +private: + static BOOST_FORCEINLINE void fence_before_store(memory_order order) BOOST_NOEXCEPT + { + switch (order) + { + case memory_order_release: + case memory_order_acq_rel: + case memory_order_seq_cst: + hardware_full_fence(); + break; + case memory_order_consume: + default:; + } + } + + static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT + { + if (order == memory_order_seq_cst) + hardware_full_fence(); + } + + static BOOST_FORCEINLINE void fence_after_load(memory_order order) BOOST_NOEXCEPT + { + switch (order) + { + case memory_order_acquire: + case memory_order_acq_rel: + case memory_order_seq_cst: + hardware_full_fence(); + break; + default:; + } + } +}; + +template< > +struct operations< 1u > : + public cas_based_operations< linux_arm_cas > +{ + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + // We must resort to a CAS loop to handle overflows + storage_type res = storage; + while (!compare_exchange_weak(storage, res, (res + v) & 0x000000ff, order, memory_order_relaxed)) {} + return res; + } + + static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + return fetch_add(storage, -v, order); + } +}; + +template< > +struct operations< 2u > : + public cas_based_operations< linux_arm_cas > +{ + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + // We must resort to a CAS loop to handle overflows + storage_type res = storage; + while (!compare_exchange_weak(storage, res, (res + v) & 0x0000ffff, order, memory_order_relaxed)) {} + return res; + } + + static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + return fetch_add(storage, -v, order); + } +}; + +template< > +struct operations< 4u > : + public cas_based_operations< linux_arm_cas > +{ +}; + +BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT +{ + switch (order) + { + case memory_order_relaxed: + break; + case memory_order_release: + case memory_order_consume: + case memory_order_acquire: + case memory_order_acq_rel: + case memory_order_seq_cst: + linux_arm_cas::hardware_full_fence(); + break; + } +} + +BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT +{ + switch (order) + { + case memory_order_relaxed: + case memory_order_consume: + break; + case memory_order_acquire: + case memory_order_release: + case memory_order_acq_rel: + case memory_order_seq_cst: + __asm__ __volatile__ ("" ::: "memory"); + break; + default:; + } +} + +} // namespace detail +} // namespace atomics +} // namespace boost + +#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_LINUX_ARM_HPP_INCLUDED_ From 4cb51c6b767bffcdb6db01c2238c891ce27481c1 Mon Sep 17 00:00:00 2001 From: Andrey Semashev Date: Fri, 9 May 2014 18:53:06 +0400 Subject: [PATCH 11/23] Signed flag is now forwarded to the backends. Forwarding the "signed" property allows to specialize operations implementation, which may be desired for some architectures. This also eliminates the formally implementation-defined result of unsigned-to- target type. Also for x86 implementations removed compiler barriers since the compilers already treat intrinsics/asm blocks as ones. --- include/boost/atomic/detail/atomic_flag.hpp | 2 +- .../boost/atomic/detail/atomic_template.hpp | 39 +- .../boost/atomic/detail/operations_fwd.hpp | 2 +- include/boost/atomic/detail/ops_emulated.hpp | 56 +-- .../atomic/detail/ops_extending_cas_based.hpp | 65 +++ .../boost/atomic/detail/ops_gcc_atomic.hpp | 48 +-- include/boost/atomic/detail/ops_gcc_sync.hpp | 116 ++---- include/boost/atomic/detail/ops_gcc_x86.hpp | 142 +++---- include/boost/atomic/detail/ops_linux_arm.hpp | 46 +-- include/boost/atomic/detail/ops_msvc_arm.hpp | 60 +-- include/boost/atomic/detail/ops_msvc_x86.hpp | 373 +++++++----------- include/boost/atomic/detail/ops_windows.hpp | 79 ++-- include/boost/atomic/detail/storage_types.hpp | 99 ++++- src/lockpool.cpp | 2 +- 14 files changed, 515 insertions(+), 614 deletions(-) create mode 100644 include/boost/atomic/detail/ops_extending_cas_based.hpp diff --git a/include/boost/atomic/detail/atomic_flag.hpp b/include/boost/atomic/detail/atomic_flag.hpp index 3c274a0..4e0e2ba 100644 --- a/include/boost/atomic/detail/atomic_flag.hpp +++ b/include/boost/atomic/detail/atomic_flag.hpp @@ -34,7 +34,7 @@ namespace atomics { struct atomic_flag { - typedef atomics::detail::operations< 1u > operations; + typedef atomics::detail::operations< 1u, false > operations; typedef operations::storage_type storage_type; storage_type m_storage; diff --git a/include/boost/atomic/detail/atomic_template.hpp b/include/boost/atomic/detail/atomic_template.hpp index 2073ccc..bfed528 100644 --- a/include/boost/atomic/detail/atomic_template.hpp +++ b/include/boost/atomic/detail/atomic_template.hpp @@ -19,8 +19,8 @@ #include #include #include +#include #include -#include #include #include #include @@ -33,8 +33,6 @@ #pragma warning(push) // 'boost::atomics::atomic' : multiple assignment operators specified #pragma warning(disable: 4522) -// In case of atomic: 'unsigned char' : forcing value to bool 'true' or 'false' (performance warning) -#pragma warning(disable: 4800) #endif namespace boost { @@ -73,9 +71,8 @@ class base_atomic< T, int > { private: typedef T value_type; - typedef typename make_unsigned< value_type >::type unsigned_value_type; typedef T difference_type; - typedef atomics::detail::operations< storage_size_of< value_type >::value > operations; + typedef atomics::detail::operations< storage_size_of< value_type >::value, boost::is_signed< T >::value > operations; protected: typedef value_type value_arg_type; @@ -88,7 +85,7 @@ protected: public: BOOST_DEFAULTED_FUNCTION(base_atomic(), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : m_storage(static_cast< unsigned_value_type >(v)) {} + BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : m_storage(v) {} void store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT { @@ -96,7 +93,7 @@ public: BOOST_ASSERT(order != memory_order_acquire); BOOST_ASSERT(order != memory_order_acq_rel); - operations::store(m_storage, static_cast< storage_type >(static_cast< unsigned_value_type >(v)), order); + operations::store(m_storage, static_cast< storage_type >(v), order); } value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT @@ -109,17 +106,17 @@ public: value_type fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT { - return static_cast< value_type >(operations::fetch_add(m_storage, static_cast< storage_type >(static_cast< unsigned_value_type >(v)), order)); + return static_cast< value_type >(operations::fetch_add(m_storage, static_cast< storage_type >(v), order)); } value_type fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT { - return static_cast< value_type >(operations::fetch_sub(m_storage, static_cast< storage_type >(static_cast< unsigned_value_type >(v)), order)); + return static_cast< value_type >(operations::fetch_sub(m_storage, static_cast< storage_type >(v), order)); } value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT { - return static_cast< value_type >(operations::exchange(m_storage, static_cast< storage_type >(static_cast< unsigned_value_type >(v)), order)); + return static_cast< value_type >(operations::exchange(m_storage, static_cast< storage_type >(v), order)); } bool compare_exchange_strong(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT @@ -128,8 +125,8 @@ public: BOOST_ASSERT(failure_order != memory_order_acq_rel); BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order)); - storage_type old_value = static_cast< storage_type >(static_cast< unsigned_value_type >(expected)); - const bool res = operations::compare_exchange_strong(m_storage, old_value, static_cast< storage_type >(static_cast< unsigned_value_type >(desired)), success_order, failure_order); + storage_type old_value = static_cast< storage_type >(expected); + const bool res = operations::compare_exchange_strong(m_storage, old_value, static_cast< storage_type >(desired), success_order, failure_order); expected = static_cast< value_type >(old_value); return res; } @@ -145,8 +142,8 @@ public: BOOST_ASSERT(failure_order != memory_order_acq_rel); BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order)); - storage_type old_value = static_cast< storage_type >(static_cast< unsigned_value_type >(expected)); - const bool res = operations::compare_exchange_weak(m_storage, old_value, static_cast< storage_type >(static_cast< unsigned_value_type >(desired)), success_order, failure_order); + storage_type old_value = static_cast< storage_type >(expected); + const bool res = operations::compare_exchange_weak(m_storage, old_value, static_cast< storage_type >(desired), success_order, failure_order); expected = static_cast< value_type >(old_value); return res; } @@ -158,17 +155,17 @@ public: value_type fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT { - return static_cast< value_type >(operations::fetch_and(m_storage, static_cast< storage_type >(static_cast< unsigned_value_type >(v)), order)); + return static_cast< value_type >(operations::fetch_and(m_storage, static_cast< storage_type >(v), order)); } value_type fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT { - return static_cast< value_type >(operations::fetch_or(m_storage, static_cast< storage_type >(static_cast< unsigned_value_type >(v)), order)); + return static_cast< value_type >(operations::fetch_or(m_storage, static_cast< storage_type >(v), order)); } value_type fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT { - return static_cast< value_type >(operations::fetch_xor(m_storage, static_cast< storage_type >(static_cast< unsigned_value_type >(v)), order)); + return static_cast< value_type >(operations::fetch_xor(m_storage, static_cast< storage_type >(v), order)); } bool is_lock_free() const volatile BOOST_NOEXCEPT @@ -231,7 +228,7 @@ class base_atomic< bool, int > { private: typedef bool value_type; - typedef atomics::detail::operations< storage_size_of< value_type >::value > operations; + typedef atomics::detail::operations< storage_size_of< value_type >::value, false > operations; protected: typedef value_type value_arg_type; @@ -318,7 +315,7 @@ class base_atomic< T, void > { private: typedef T value_type; - typedef atomics::detail::operations< storage_size_of< value_type >::value > operations; + typedef atomics::detail::operations< storage_size_of< value_type >::value, false > operations; protected: typedef value_type const& value_arg_type; @@ -408,7 +405,7 @@ class base_atomic< T*, void* > private: typedef T* value_type; typedef std::ptrdiff_t difference_type; - typedef atomics::detail::operations< storage_size_of< value_type >::value > operations; + typedef atomics::detail::operations< storage_size_of< value_type >::value, false > operations; protected: typedef value_type value_arg_type; @@ -538,7 +535,7 @@ class base_atomic< void*, void* > private: typedef void* value_type; typedef std::ptrdiff_t difference_type; - typedef atomics::detail::operations< storage_size_of< value_type >::value > operations; + typedef atomics::detail::operations< storage_size_of< value_type >::value, false > operations; protected: typedef value_type value_arg_type; diff --git a/include/boost/atomic/detail/operations_fwd.hpp b/include/boost/atomic/detail/operations_fwd.hpp index 3a26281..69049e4 100644 --- a/include/boost/atomic/detail/operations_fwd.hpp +++ b/include/boost/atomic/detail/operations_fwd.hpp @@ -24,7 +24,7 @@ namespace boost { namespace atomics { namespace detail { -template< unsigned int Size > +template< unsigned int Size, bool Signed > struct operations; } // namespace detail diff --git a/include/boost/atomic/detail/ops_emulated.hpp b/include/boost/atomic/detail/ops_emulated.hpp index 2fa0f96..298d7cd 100644 --- a/include/boost/atomic/detail/ops_emulated.hpp +++ b/include/boost/atomic/detail/ops_emulated.hpp @@ -14,7 +14,6 @@ #ifndef BOOST_ATOMIC_DETAIL_OPS_EMULATED_HPP_INCLUDED_ #define BOOST_ATOMIC_DETAIL_OPS_EMULATED_HPP_INCLUDED_ -#include #include #include #include @@ -137,60 +136,9 @@ struct emulated_operations } }; -template< unsigned int Size > -struct storage_t -{ - unsigned char data[Size]; - - bool operator== (storage_t const& that) const - { - return std::memcmp(data, that.data, Size) == 0; - } - bool operator!= (storage_t const& that) const - { - return std::memcmp(data, that.data, Size) != 0; - } -}; - -template< unsigned int Size > -struct default_storage_type -{ - typedef storage_t< Size > type; -}; - -template< > -struct default_storage_type< 1u > -{ - typedef storage8_t type; -}; - -template< > -struct default_storage_type< 2u > -{ - typedef storage16_t type; -}; - -template< > -struct default_storage_type< 4u > -{ - typedef storage32_t type; -}; - -template< > -struct default_storage_type< 8u > -{ - typedef storage64_t type; -}; - -template< > -struct default_storage_type< 16u > -{ - typedef storage128_t type; -}; - -template< unsigned int Size > +template< unsigned int Size, bool Signed > struct operations : - public emulated_operations< typename default_storage_type< Size >::type > + public emulated_operations< typename make_storage_type< Size, Signed >::type > { }; diff --git a/include/boost/atomic/detail/ops_extending_cas_based.hpp b/include/boost/atomic/detail/ops_extending_cas_based.hpp new file mode 100644 index 0000000..5945b15 --- /dev/null +++ b/include/boost/atomic/detail/ops_extending_cas_based.hpp @@ -0,0 +1,65 @@ +/* + * Distributed under the Boost Software License, Version 1.0. + * (See accompanying file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt) + * + * Copyright (c) 2014 Andrey Semashev + */ +/*! + * \file atomic/detail/ops_extending_cas_based.hpp + * + * This header contains a boilerplate of the \c operations template implementation that requires sign/zero extension in arithmetic operations. + */ + +#ifndef BOOST_ATOMIC_DETAIL_OPS_EXTENDING_CAS_BASED_HPP_INCLUDED_ +#define BOOST_ATOMIC_DETAIL_OPS_EXTENDING_CAS_BASED_HPP_INCLUDED_ + +#include +#include +#include + +#ifdef BOOST_HAS_PRAGMA_ONCE +#pragma once +#endif + +namespace boost { +namespace atomics { +namespace detail { + +template< typename Base, unsigned int Size, bool Signed > +struct extending_cas_based_operations : + public Base +{ + typedef typename Base::storage_type storage_type; + typedef typename make_storage_type< Size, Signed >::type emulated_storage_type; + + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + storage_type old_val = Base::load(storage, memory_order_relaxed); + emulated_storage_type new_val; + do + { + new_val = static_cast< emulated_storage_type >(old_val) + static_cast< emulated_storage_type >(v); + } + while (!Base::compare_exchange_weak(storage, old_val, static_cast< storage_type >(new_val), order, memory_order_relaxed)); + return old_val; + } + + static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + storage_type old_val = Base::load(storage, memory_order_relaxed); + emulated_storage_type new_val; + do + { + new_val = static_cast< emulated_storage_type >(old_val) - static_cast< emulated_storage_type >(v); + } + while (!Base::compare_exchange_weak(storage, old_val, static_cast< storage_type >(new_val), order, memory_order_relaxed)); + return old_val; + } +}; + +} // namespace detail +} // namespace atomics +} // namespace boost + +#endif // BOOST_ATOMIC_DETAIL_OPS_EXTENDING_CAS_BASED_HPP_INCLUDED_ diff --git a/include/boost/atomic/detail/ops_gcc_atomic.hpp b/include/boost/atomic/detail/ops_gcc_atomic.hpp index 8260780..89668b7 100644 --- a/include/boost/atomic/detail/ops_gcc_atomic.hpp +++ b/include/boost/atomic/detail/ops_gcc_atomic.hpp @@ -123,25 +123,25 @@ struct gcc_atomic_operations }; #if BOOST_ATOMIC_INT8_LOCK_FREE > 0 -template< > -struct operations< 1u > : - public gcc_atomic_operations< storage8_t > +template< bool Signed > +struct operations< 1u, Signed > : + public gcc_atomic_operations< typename make_storage_type< 1u, Signed >::type > { }; #endif #if BOOST_ATOMIC_INT16_LOCK_FREE > 0 -template< > -struct operations< 2u > : - public gcc_atomic_operations< storage16_t > +template< bool Signed > +struct operations< 2u, Signed > : + public gcc_atomic_operations< typename make_storage_type< 2u, Signed >::type > { }; #endif #if BOOST_ATOMIC_INT32_LOCK_FREE > 0 -template< > -struct operations< 4u > : - public gcc_atomic_operations< storage32_t > +template< bool Signed > +struct operations< 4u, Signed > : + public gcc_atomic_operations< typename make_storage_type< 4u, Signed >::type > { }; #endif @@ -150,9 +150,10 @@ struct operations< 4u > : #if defined(__clang__) && defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) // Workaround for clang bug http://llvm.org/bugs/show_bug.cgi?id=19355 +template< bool Signed > struct clang_dcas_x86 { - typedef storage64_t storage_type; + typedef typename make_storage_type< 8u, Signed >::type storage_type; static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT { @@ -263,17 +264,17 @@ struct clang_dcas_x86 } }; -template< > -struct operations< 8u > : - public cas_based_operations< clang_dcas_x86 > +template< bool Signed > +struct operations< 8u, Signed > : + public cas_based_operations< clang_dcas_x86< Signed > > { }; #else -template< > -struct operations< 8u > : - public gcc_atomic_operations< storage64_t > +template< bool Signed > +struct operations< 8u, Signed > : + public gcc_atomic_operations< typename make_storage_type< 8u, Signed >::type > { }; @@ -285,9 +286,10 @@ struct operations< 8u > : // Workaround for clang bug: http://llvm.org/bugs/show_bug.cgi?id=19149 // Clang 3.4 does not implement 128-bit __atomic* intrinsics even though it defines __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16 +template< bool Signed > struct clang_dcas_x86_64 { - typedef storage128_t storage_type; + typedef typename make_storage_type< 16u, Signed >::type storage_type; static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT { @@ -331,17 +333,17 @@ struct clang_dcas_x86_64 } }; -template< > -struct operations< 16u > : - public cas_based_operations< clang_dcas_x86_64 > +template< bool Signed > +struct operations< 16u, Signed > : + public cas_based_operations< clang_dcas_x86_64< Signed > > { }; #else -template< > -struct operations< 16u > : - public gcc_atomic_operations< storage128_t > +template< bool Signed > +struct operations< 16u, Signed > : + public gcc_atomic_operations< typename make_storage_type< 16u, Signed >::type > { }; diff --git a/include/boost/atomic/detail/ops_gcc_sync.hpp b/include/boost/atomic/detail/ops_gcc_sync.hpp index 1e969cc..78afd23 100644 --- a/include/boost/atomic/detail/ops_gcc_sync.hpp +++ b/include/boost/atomic/detail/ops_gcc_sync.hpp @@ -21,6 +21,7 @@ #include #include #include +#include #include #ifdef BOOST_HAS_PRAGMA_ONCE @@ -168,134 +169,69 @@ private: }; #if BOOST_ATOMIC_INT8_LOCK_FREE > 0 -template< > -struct operations< 1u > : - public gcc_sync_operations< +template< bool Signed > +struct operations< 1u, Signed > : #if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1) - storage8_t + public gcc_sync_operations< typename make_storage_type< 1u, Signed >::type > #elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2) - storage16_t + public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 2u, Signed >::type >, 1u, Signed > #elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) - storage32_t + public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 4u, Signed >::type >, 1u, Signed > #elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) - storage64_t + public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 8u, Signed >::type >, 1u, Signed > #else - storage128_t + public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 16u, Signed >::type >, 1u, Signed > #endif - > { -#if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1) - static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT - { - // We must resort to a CAS loop to handle overflows - storage_type res = storage; - while (!compare_exchange_strong(storage, res, (res + v) & 0x000000ff, order, memory_order_relaxed)) {} - return res; - } - - static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT - { - return fetch_add(storage, -v, order); - } -#endif }; #endif #if BOOST_ATOMIC_INT16_LOCK_FREE > 0 -template< > -struct operations< 2u > : - public gcc_sync_operations< +template< bool Signed > +struct operations< 2u, Signed > : #if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2) - storage16_t + public gcc_sync_operations< typename make_storage_type< 2u, Signed >::type > #elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) - storage32_t + public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 4u, Signed >::type >, 2u, Signed > #elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) - storage64_t + public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 8u, Signed >::type >, 2u, Signed > #else - storage128_t + public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 16u, Signed >::type >, 2u, Signed > #endif - > { -#if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2) - static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT - { - // We must resort to a CAS loop to handle overflows - storage_type res = storage; - while (!compare_exchange_strong(storage, res, (res + v) & 0x0000ffff, order, memory_order_relaxed)) {} - return res; - } - - static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT - { - return fetch_add(storage, -v, order); - } -#endif }; #endif #if BOOST_ATOMIC_INT32_LOCK_FREE > 0 -template< > -struct operations< 4u > : - public gcc_sync_operations< +template< bool Signed > +struct operations< 4u, Signed > : #if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) - storage32_t + public gcc_sync_operations< typename make_storage_type< 4u, Signed >::type > #elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) - storage64_t + public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 8u, Signed >::type >, 4u, Signed > #else - storage128_t + public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 16u, Signed >::type >, 4u, Signed > #endif - > { -#if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) - static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT - { - // We must resort to a CAS loop to handle overflows - storage_type res = storage; - while (!compare_exchange_strong(storage, res, (res + v) & 0xffffffff, order, memory_order_relaxed)) {} - return res; - } - - static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT - { - return fetch_add(storage, -v, order); - } -#endif }; #endif #if BOOST_ATOMIC_INT64_LOCK_FREE > 0 -template< > -struct operations< 8u > : - public gcc_sync_operations< +template< bool Signed > +struct operations< 8u, Signed > : #if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) - storage64_t + public gcc_sync_operations< typename make_storage_type< 8u, Signed >::type > #else - storage128_t + public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 16u, Signed >::type >, 8u, Signed > #endif - > { -#if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) && defined(BOOST_HAS_INT128) - static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT - { - // We must resort to a CAS loop to handle overflows - storage_type res = storage; - while (!compare_exchange_strong(storage, res, (res + v) & UINT64_C(0xffffffffffffffff), order, memory_order_relaxed)) {} - return res; - } - - static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT - { - return fetch_add(storage, -v, order); - } -#endif }; - #endif #if BOOST_ATOMIC_INT128_LOCK_FREE > 0 -template< > -struct operations< 16u > : - public gcc_sync_operations< storage128_t > +template< bool Signed > +struct operations< 16u, Signed > : + public gcc_sync_operations< typename make_storage_type< 16u, Signed >::type > { }; #endif diff --git a/include/boost/atomic/detail/ops_gcc_x86.hpp b/include/boost/atomic/detail/ops_gcc_x86.hpp index a2fe64f..de7baac 100644 --- a/include/boost/atomic/detail/ops_gcc_x86.hpp +++ b/include/boost/atomic/detail/ops_gcc_x86.hpp @@ -156,41 +156,41 @@ struct gcc_x86_operations : } }; -template< > -struct operations< 1u > : - public gcc_x86_operations< storage8_t, operations< 1u > > +template< bool Signed > +struct operations< 1u, Signed > : + public gcc_x86_operations< typename make_storage_type< 1u, Signed >::type, operations< 1u, Signed > > { - static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + typedef gcc_x86_operations< typename make_storage_type< 1u, Signed >::type, operations< 1u, Signed > > base_type; + typedef typename base_type::storage_type storage_type; + + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT { - fence_before(order); __asm__ __volatile__ ( "lock; xaddb %0, %1" : "+q" (v), "+m" (storage) : - : "cc" + : "cc", "memory" ); - fence_after(order); return v; } - static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT { - fence_before(order); __asm__ __volatile__ ( "xchgb %0, %1" : "+q" (v), "+m" (storage) + : + : "memory" ); - fence_after(order); return v; } static BOOST_FORCEINLINE bool compare_exchange_strong( - storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT { storage_type previous = expected; - fence_before(success_order); bool success; __asm__ __volatile__ ( @@ -198,12 +198,8 @@ struct operations< 1u > : "sete %2" : "+a" (previous), "+m" (storage), "=q" (success) : "q" (desired) - : "cc" + : "cc", "memory" ); - if (success) - fence_after(success_order); - else - fence_after(failure_order); expected = previous; return success; } @@ -246,41 +242,41 @@ struct operations< 1u > : #undef BOOST_ATOMIC_DETAIL_CAS_LOOP }; -template< > -struct operations< 2u > : - public gcc_x86_operations< storage16_t, operations< 2u > > +template< bool Signed > +struct operations< 2u, Signed > : + public gcc_x86_operations< typename make_storage_type< 2u, Signed >::type, operations< 2u, Signed > > { - static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + typedef gcc_x86_operations< typename make_storage_type< 2u, Signed >::type, operations< 2u, Signed > > base_type; + typedef typename base_type::storage_type storage_type; + + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT { - fence_before(order); __asm__ __volatile__ ( "lock; xaddw %0, %1" : "+q" (v), "+m" (storage) : - : "cc" + : "cc", "memory" ); - fence_after(order); return v; } - static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT { - fence_before(order); __asm__ __volatile__ ( "xchgw %0, %1" : "+q" (v), "+m" (storage) + : + : "memory" ); - fence_after(order); return v; } static BOOST_FORCEINLINE bool compare_exchange_strong( - storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT { storage_type previous = expected; - fence_before(success_order); bool success; __asm__ __volatile__ ( @@ -288,12 +284,8 @@ struct operations< 2u > : "sete %2" : "+a" (previous), "+m" (storage), "=q" (success) : "q" (desired) - : "cc" + : "cc", "memory" ); - if (success) - fence_after(success_order); - else - fence_after(failure_order); expected = previous; return success; } @@ -336,41 +328,41 @@ struct operations< 2u > : #undef BOOST_ATOMIC_DETAIL_CAS_LOOP }; -template< > -struct operations< 4u > : - public gcc_x86_operations< storage32_t, operations< 4u > > +template< bool Signed > +struct operations< 4u, Signed > : + public gcc_x86_operations< typename make_storage_type< 4u, Signed >::type, operations< 4u, Signed > > { - static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + typedef gcc_x86_operations< typename make_storage_type< 4u, Signed >::type, operations< 4u, Signed > > base_type; + typedef typename base_type::storage_type storage_type; + + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT { - fence_before(order); __asm__ __volatile__ ( "lock; xaddl %0, %1" : "+r" (v), "+m" (storage) : - : "cc" + : "cc", "memory" ); - fence_after(order); return v; } - static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT { - fence_before(order); __asm__ __volatile__ ( "xchgl %0, %1" : "+r" (v), "+m" (storage) + : + : "memory" ); - fence_after(order); return v; } static BOOST_FORCEINLINE bool compare_exchange_strong( - storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT { storage_type previous = expected; - fence_before(success_order); bool success; __asm__ __volatile__ ( @@ -378,12 +370,8 @@ struct operations< 4u > : "sete %2" : "+a" (previous), "+m" (storage), "=q" (success) : "r" (desired) - : "cc" + : "cc", "memory" ); - if (success) - fence_after(success_order); - else - fence_after(failure_order); expected = previous; return success; } @@ -428,9 +416,10 @@ struct operations< 4u > : #if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) +template< bool Signed > struct gcc_dcas_x86 { - typedef storage64_t storage_type; + typedef typename make_storage_type< 8u, Signed >::type storage_type; static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT { @@ -476,7 +465,7 @@ struct gcc_dcas_x86 "movl %[scratch], %%ebx" : [scratch] "=m,m" (scratch) : [value_lo] "a,a" ((uint32_t)v), "c,c" ((uint32_t)(v >> 32)), [dest] "D,S" (&storage) - : "memory", "cc", "edx" + : "cc", "edx", "memory" ); } } @@ -585,49 +574,49 @@ struct gcc_dcas_x86 } }; -template< > -struct operations< 8u > : - public cas_based_operations< gcc_dcas_x86 > +template< bool Signed > +struct operations< 8u, Signed > : + public cas_based_operations< gcc_dcas_x86< Signed > > { }; #elif defined(__x86_64__) -template< > -struct operations< 8u > : - public gcc_x86_operations< storage64_t, operations< 8u > > +template< bool Signed > +struct operations< 8u, Signed > : + public gcc_x86_operations< typename make_storage_type< 8u, Signed >::type, operations< 8u, Signed > > { - static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + typedef gcc_x86_operations< typename make_storage_type< 8u, Signed >::type, operations< 8u, Signed > > base_type; + typedef typename base_type::storage_type storage_type; + + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT { - fence_before(order); __asm__ __volatile__ ( "lock; xaddq %0, %1" : "+r" (v), "+m" (storage) : - : "cc" + : "cc", "memory" ); - fence_after(order); return v; } - static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT { - fence_before(order); __asm__ __volatile__ ( "xchgq %0, %1" : "+r" (v), "+m" (storage) + : + : "memory" ); - fence_after(order); return v; } static BOOST_FORCEINLINE bool compare_exchange_strong( - storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT { storage_type previous = expected; - fence_before(success_order); bool success; __asm__ __volatile__ ( @@ -635,12 +624,8 @@ struct operations< 8u > : "sete %2" : "+a" (previous), "+m" (storage), "=q" (success) : "r" (desired) - : "cc" + : "cc", "memory" ); - if (success) - fence_after(success_order); - else - fence_after(failure_order); expected = previous; return success; } @@ -687,9 +672,10 @@ struct operations< 8u > : #if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B) +template< bool Signed > struct gcc_dcas_x86_64 { - typedef storage128_t storage_type; + typedef typename make_storage_type< 16u, Signed >::type storage_type; static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT { @@ -703,7 +689,7 @@ struct gcc_dcas_x86_64 "jne 1b" : : "b" (p_value[0]), "c" (p_value[1]), [dest] "r" (&storage) - : "memory", "cc", "rax", "rdx" + : "cc", "rax", "rdx", "memory" ); } @@ -754,9 +740,9 @@ struct gcc_dcas_x86_64 } }; -template< > -struct operations< 16u > : - public cas_based_operations< gcc_dcas_x86_64 > +template< bool Signed > +struct operations< 16u, Signed > : + public cas_based_operations< gcc_dcas_x86_64< Signed > > { }; diff --git a/include/boost/atomic/detail/ops_linux_arm.hpp b/include/boost/atomic/detail/ops_linux_arm.hpp index 3ce4c1e..8533990 100644 --- a/include/boost/atomic/detail/ops_linux_arm.hpp +++ b/include/boost/atomic/detail/ops_linux_arm.hpp @@ -24,6 +24,7 @@ #include #include #include +#include #ifdef BOOST_HAS_PRAGMA_ONCE #pragma once @@ -54,9 +55,10 @@ namespace detail { // emulated CAS is only good enough to provide compare_exchange_weak // semantics. +template< bool Signed > struct linux_arm_cas { - typedef storage32_t storage_type; + typedef typename make_storage_type< 4u, Signed >::type storage_type; static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT { @@ -150,45 +152,21 @@ private: } }; -template< > -struct operations< 1u > : - public cas_based_operations< linux_arm_cas > +template< bool Signed > +struct operations< 1u, Signed > : + public extending_cas_based_operations< cas_based_operations< linux_arm_cas< Signed > >, 1u, Signed > { - static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT - { - // We must resort to a CAS loop to handle overflows - storage_type res = storage; - while (!compare_exchange_weak(storage, res, (res + v) & 0x000000ff, order, memory_order_relaxed)) {} - return res; - } - - static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT - { - return fetch_add(storage, -v, order); - } }; -template< > -struct operations< 2u > : - public cas_based_operations< linux_arm_cas > +template< bool Signed > +struct operations< 2u, Signed > : + public extending_cas_based_operations< cas_based_operations< linux_arm_cas< Signed > >, 2u, Signed > { - static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT - { - // We must resort to a CAS loop to handle overflows - storage_type res = storage; - while (!compare_exchange_weak(storage, res, (res + v) & 0x0000ffff, order, memory_order_relaxed)) {} - return res; - } - - static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT - { - return fetch_add(storage, -v, order); - } }; -template< > -struct operations< 4u > : - public cas_based_operations< linux_arm_cas > +template< bool Signed > +struct operations< 4u, Signed > : + public cas_based_operations< linux_arm_cas< Signed > > { }; diff --git a/include/boost/atomic/detail/ops_msvc_arm.hpp b/include/boost/atomic/detail/ops_msvc_arm.hpp index f6f5626..c640b87 100644 --- a/include/boost/atomic/detail/ops_msvc_arm.hpp +++ b/include/boost/atomic/detail/ops_msvc_arm.hpp @@ -118,17 +118,20 @@ struct msvc_arm_operations : } }; -template< > -struct operations< 1u > : - public msvc_arm_operations< storage8_t, operations< 1u > > +template< bool Signed > +struct operations< 1u, Signed > : + public msvc_arm_operations< typename make_storage_type< 1u, Signed >::type, operations< 1u, Signed > > { + typedef msvc_arm_operations< typename make_storage_type< 1u, Signed >::type, operations< 1u, Signed > > base_type; + typedef typename base_type::storage_type storage_type; + static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT { if (order != memory_order_seq_cst) { - fence_before_store(order); + base_type::fence_before_store(order); BOOST_ATOMIC_DETAIL_ARM_STORE8(&storage, v); - fence_after_store(order); + base_type::fence_after_store(order); } else { @@ -139,7 +142,7 @@ struct operations< 1u > : static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT { storage_type v = BOOST_ATOMIC_DETAIL_ARM_LOAD8(&storage); - fence_after_load(order); + base_type::fence_after_load(order); return v; } @@ -287,17 +290,20 @@ struct operations< 1u > : } }; -template< > -struct operations< 2u > : - public msvc_arm_operations< storage16_t, operations< 2u > > +template< bool Signed > +struct operations< 2u, Signed > : + public msvc_arm_operations< typename make_storage_type< 2u, Signed >::type, operations< 2u, Signed > > { + typedef msvc_arm_operations< typename make_storage_type< 2u, Signed >::type, operations< 2u, Signed > > base_type; + typedef typename base_type::storage_type storage_type; + static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT { if (order != memory_order_seq_cst) { - fence_before_store(order); + base_type::fence_before_store(order); BOOST_ATOMIC_DETAIL_ARM_STORE16(&storage, v); - fence_after_store(order); + base_type::fence_after_store(order); } else { @@ -308,7 +314,7 @@ struct operations< 2u > : static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT { storage_type v = BOOST_ATOMIC_DETAIL_ARM_LOAD16(&storage); - fence_after_load(order); + base_type::fence_after_load(order); return v; } @@ -456,17 +462,20 @@ struct operations< 2u > : } }; -template< > -struct operations< 4u > : - public msvc_arm_operations< storage32_t, operations< 4u > > +template< bool Signed > +struct operations< 4u, Signed > : + public msvc_arm_operations< typename make_storage_type< 4u, Signed >::type, operations< 4u, Signed > > { + typedef msvc_arm_operations< typename make_storage_type< 4u, Signed >::type, operations< 4u, Signed > > base_type; + typedef typename base_type::storage_type storage_type; + static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT { if (order != memory_order_seq_cst) { - fence_before_store(order); + base_type::fence_before_store(order); BOOST_ATOMIC_DETAIL_ARM_STORE32(&storage, v); - fence_after_store(order); + base_type::fence_after_store(order); } else { @@ -477,7 +486,7 @@ struct operations< 4u > : static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT { storage_type v = BOOST_ATOMIC_DETAIL_ARM_LOAD32(&storage); - fence_after_load(order); + base_type::fence_after_load(order); return v; } @@ -625,17 +634,20 @@ struct operations< 4u > : } }; -template< > -struct operations< 8u > : - public msvc_arm_operations< storage64_t, operations< 8u > > +template< bool Signed > +struct operations< 8u, Signed > : + public msvc_arm_operations< typename make_storage_type< 8u, Signed >::type, operations< 8u, Signed > > { + typedef msvc_arm_operations< typename make_storage_type< 8u, Signed >::type, operations< 8u, Signed > > base_type; + typedef typename base_type::storage_type storage_type; + static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT { if (order != memory_order_seq_cst) { - fence_before_store(order); + base_type::fence_before_store(order); BOOST_ATOMIC_DETAIL_ARM_STORE64(&storage, v); - fence_after_store(order); + base_type::fence_after_store(order); } else { @@ -646,7 +658,7 @@ struct operations< 8u > : static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT { storage_type v = BOOST_ATOMIC_DETAIL_ARM_LOAD64(&storage); - fence_after_load(order); + base_type::fence_after_load(order); return v; } diff --git a/include/boost/atomic/detail/ops_msvc_x86.hpp b/include/boost/atomic/detail/ops_msvc_x86.hpp index af94634..a0ab63d 100644 --- a/include/boost/atomic/detail/ops_msvc_x86.hpp +++ b/include/boost/atomic/detail/ops_msvc_x86.hpp @@ -17,7 +17,6 @@ #define BOOST_ATOMIC_DETAIL_OPS_MSVC_X86_HPP_INCLUDED_ #include -#include #include #include #include @@ -28,6 +27,9 @@ #include #endif #include +#if !defined(_M_IX86) && !(defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8) && defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16)) +#include +#endif #ifdef BOOST_HAS_PRAGMA_ONCE #pragma once @@ -127,155 +129,131 @@ struct msvc_x86_operations : } }; -template< > -struct operations< 4u > : - public msvc_x86_operations< storage32_t, operations< 4u > > +template< bool Signed > +struct operations< 4u, Signed > : + public msvc_x86_operations< typename make_storage_type< 4u, Signed >::type, operations< 4u, Signed > > { - static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + typedef msvc_x86_operations< typename make_storage_type< 4u, Signed >::type, operations< 4u, Signed > > base_type; + typedef typename base_type::storage_type storage_type; + + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT { - fence_before(order); - v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(&storage, v)); - fence_after(order); - return v; + return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(&storage, v)); } - static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT { - fence_before(order); - v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&storage, v)); - fence_after(order); - return v; + return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&storage, v)); } static BOOST_FORCEINLINE bool compare_exchange_strong( - storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT { storage_type previous = expected; - fence_before(success_order); storage_type old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(&storage, desired, previous)); - bool success = (previous == old_val); - if (success) - fence_after(success_order); - else - fence_after(failure_order); expected = old_val; - return success; + return (previous == old_val); } +#if defined(BOOST_ATOMIC_INTERLOCKED_AND) + static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT + { + return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND(&storage, v)); + } +#else static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT { -#if defined(BOOST_ATOMIC_INTERLOCKED_AND) - fence_before(order); - v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND(&storage, v)); - fence_after(order); - return v; -#else storage_type res = storage; while (!compare_exchange_strong(storage, res, res & v, order, memory_order_relaxed)) {} return res; -#endif } +#endif +#if defined(BOOST_ATOMIC_INTERLOCKED_OR) + static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT + { + return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR(&storage, v)); + } +#else static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT { -#if defined(BOOST_ATOMIC_INTERLOCKED_OR) - fence_before(order); - v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR(&storage, v)); - fence_after(order); - return v; -#else storage_type res = storage; while (!compare_exchange_strong(storage, res, res | v, order, memory_order_relaxed)) {} return res; -#endif } +#endif +#if defined(BOOST_ATOMIC_INTERLOCKED_XOR) + static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT + { + return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR(&storage, v)); + } +#else static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT { -#if defined(BOOST_ATOMIC_INTERLOCKED_XOR) - fence_before(order); - v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR(&storage, v)); - fence_after(order); - return v; -#else storage_type res = storage; while (!compare_exchange_strong(storage, res, res ^ v, order, memory_order_relaxed)) {} return res; -#endif } +#endif }; #if defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8) -template< > -struct operations< 1u > : - public msvc_x86_operations< storage8_t, operations< 1u > > +template< bool Signed > +struct operations< 1u, Signed > : + public msvc_x86_operations< typename make_storage_type< 1u, Signed >::type, operations< 1u, Signed > > { - static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + typedef msvc_x86_operations< typename make_storage_type< 1u, Signed >::type, operations< 1u, Signed > > base_type; + typedef typename base_type::storage_type storage_type; + + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT { - fence_before(order); - v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8(&storage, v)); - fence_after(order); - return v; + return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8(&storage, v)); } - static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT { - fence_before(order); - v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE8(&storage, v)); - fence_after(order); - return v; + return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE8(&storage, v)); } static BOOST_FORCEINLINE bool compare_exchange_strong( storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT { storage_type previous = expected; - fence_before(success_order); storage_type old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8(&storage, desired, previous)); - bool success = (previous == old_val); - if (success) - fence_after(success_order); - else - fence_after(failure_order); expected = old_val; - return success; + return (previous == old_val); } - static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT { - fence_before(order); - v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND8(&storage, v)); - fence_after(order); - return v; + return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND8(&storage, v)); } - static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT { - fence_before(order); - v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR8(&storage, v)); - fence_after(order); - return v; + return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR8(&storage, v)); } - static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT { - fence_before(order); - v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR8(&storage, v)); - fence_after(order); - return v; + return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR8(&storage, v)); } }; #elif defined(_M_IX86) -template< > -struct operations< 1u > : - public msvc_x86_operations< storage8_t, operations< 1u > > +template< bool Signed > +struct operations< 1u, Signed > : + public msvc_x86_operations< typename make_storage_type< 1u, Signed >::type, operations< 1u, Signed > > { + typedef msvc_x86_operations< typename make_storage_type< 1u, Signed >::type, operations< 1u, Signed > > base_type; + typedef typename base_type::storage_type storage_type; + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT { - fence_before(order); + base_type::fence_before(order); __asm { mov edx, storage @@ -283,13 +261,13 @@ struct operations< 1u > : lock xadd byte ptr [edx], al mov v, al }; - fence_after(order); + base_type::fence_after(order); return v; } static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT { - fence_before(order); + base_type::fence_before(order); __asm { mov edx, storage @@ -297,14 +275,14 @@ struct operations< 1u > : xchg byte ptr [edx], al mov v, al }; - fence_after(order); + base_type::fence_after(order); return v; } static BOOST_FORCEINLINE bool compare_exchange_strong( - storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order) BOOST_NOEXCEPT { - fence_before(success_order); + base_type::fence_before(success_order); bool success; __asm { @@ -316,16 +294,14 @@ struct operations< 1u > : mov byte ptr [esi], al sete success }; - if (success) - fence_after(success_order); - else - fence_after(failure_order); + // The success and failure fences are equivalent anyway + base_type::fence_after(success_order); return success; } static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT { - fence_before(order); + base_type::fence_before(order); __asm { xor edx, edx @@ -340,13 +316,13 @@ struct operations< 1u > : jne again mov v, al }; - fence_after(order); + base_type::fence_after(order); return v; } static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT { - fence_before(order); + base_type::fence_before(order); __asm { xor edx, edx @@ -361,13 +337,13 @@ struct operations< 1u > : jne again mov v, al }; - fence_after(order); + base_type::fence_after(order); return v; } static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT { - fence_before(order); + base_type::fence_before(order); __asm { xor edx, edx @@ -382,105 +358,77 @@ struct operations< 1u > : jne again mov v, al }; - fence_after(order); + base_type::fence_after(order); return v; } }; #else -template< > -struct operations< 1u > : - public operations< 4u > +template< bool Signed > +struct operations< 1u, Signed > : + public extending_cas_based_operations< operations< 4u, Signed >, 1u, Signed > { - static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT - { - // We must resort to a CAS loop to handle overflows - storage_type res = storage; - while (!compare_exchange_strong(storage, res, (res + v) & 0x000000ff, order, memory_order_relaxed)) {} - return res; - } - - static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT - { - typedef make_signed< storage_type >::type signed_storage_type; - return fetch_add(storage, static_cast< storage_type >(-static_cast< signed_storage_type >(v)), order); - } }; #endif #if defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16) -template< > -struct operations< 2u > : - public msvc_x86_operations< storage16_t, operations< 2u > > +template< bool Signed > +struct operations< 2u, Signed > : + public msvc_x86_operations< typename make_storage_type< 2u, Signed >::type, operations< 2u, Signed > > { - static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + typedef msvc_x86_operations< typename make_storage_type< 2u, Signed >::type, operations< 2u, Signed > > base_type; + typedef typename base_type::storage_type storage_type; + + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT { - fence_before(order); - v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16(&storage, v)); - fence_after(order); - return v; + return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16(&storage, v)); } - static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT { - fence_before(order); - v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE16(&storage, v)); - fence_after(order); - return v; + return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE16(&storage, v)); } static BOOST_FORCEINLINE bool compare_exchange_strong( - storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT { storage_type previous = expected; - fence_before(success_order); storage_type old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16(&storage, desired, previous)); - bool success = (previous == old_val); - if (success) - fence_after(success_order); - else - fence_after(failure_order); expected = old_val; - return success; + return (previous == old_val); } - static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT { - fence_before(order); - v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND16(&storage, v)); - fence_after(order); - return v; + return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND16(&storage, v)); } - static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT { - fence_before(order); - v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR16(&storage, v)); - fence_after(order); - return v; + return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR16(&storage, v)); } - static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT { - fence_before(order); - v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR16(&storage, v)); - fence_after(order); - return v; + return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR16(&storage, v)); } }; #elif defined(_M_IX86) -template< > -struct operations< 2u > : - public msvc_x86_operations< storage16_t, operations< 2u > > +template< bool Signed > +struct operations< 2u, Signed > : + public msvc_x86_operations< typename make_storage_type< 2u, Signed >::type, operations< 2u, Signed > > { + typedef msvc_x86_operations< typename make_storage_type< 2u, Signed >::type, operations< 2u, Signed > > base_type; + typedef typename base_type::storage_type storage_type; + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT { - fence_before(order); + base_type::fence_before(order); __asm { mov edx, storage @@ -488,13 +436,13 @@ struct operations< 2u > : lock xadd word ptr [edx], ax mov v, ax }; - fence_after(order); + base_type::fence_after(order); return v; } static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT { - fence_before(order); + base_type::fence_before(order); __asm { mov edx, storage @@ -502,14 +450,14 @@ struct operations< 2u > : xchg word ptr [edx], ax mov v, ax }; - fence_after(order); + base_type::fence_after(order); return v; } static BOOST_FORCEINLINE bool compare_exchange_strong( - storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order) BOOST_NOEXCEPT { - fence_before(success_order); + base_type::fence_before(success_order); bool success; __asm { @@ -521,16 +469,14 @@ struct operations< 2u > : mov word ptr [esi], ax sete success }; - if (success) - fence_after(success_order); - else - fence_after(failure_order); + // The success and failure fences are equivalent anyway + base_type::fence_after(success_order); return success; } static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT { - fence_before(order); + base_type::fence_before(order); __asm { xor edx, edx @@ -545,13 +491,13 @@ struct operations< 2u > : jne again mov v, ax }; - fence_after(order); + base_type::fence_after(order); return v; } static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT { - fence_before(order); + base_type::fence_before(order); __asm { xor edx, edx @@ -566,13 +512,13 @@ struct operations< 2u > : jne again mov v, ax }; - fence_after(order); + base_type::fence_after(order); return v; } static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT { - fence_before(order); + base_type::fence_before(order); __asm { xor edx, edx @@ -587,30 +533,17 @@ struct operations< 2u > : jne again mov v, ax }; - fence_after(order); + base_type::fence_after(order); return v; } }; #else -template< > -struct operations< 2u > : - public operations< 4u > +template< bool Signed > +struct operations< 2u, Signed > : + public extending_cas_based_operations< operations< 4u, Signed >, 2u, Signed > { - static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT - { - // We must resort to a CAS loop to handle overflows - storage_type res = storage; - while (!compare_exchange_strong(storage, res, (res + v) & 0x0000ffff, order, memory_order_relaxed)) {} - return res; - } - - static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT - { - typedef make_signed< storage_type >::type signed_storage_type; - return fetch_add(storage, static_cast< storage_type >(-static_cast< signed_storage_type >(v)), order); - } }; #endif @@ -618,9 +551,10 @@ struct operations< 2u > : #if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) +template< bool Signed > struct msvc_dcas_x86 { - typedef storage64_t storage_type; + typedef typename make_storage_type< 8u, Signed >::type storage_type; // Intel 64 and IA-32 Architectures Software Developer's Manual, Volume 3A, 8.1.1. Guaranteed Atomic Operations: // @@ -767,71 +701,53 @@ struct msvc_dcas_x86 } }; -template< > -struct operations< 8u > : - public cas_based_operations< msvc_dcas_x86 > +template< bool Signed > +struct operations< 8u, Signed > : + public cas_based_operations< msvc_dcas_x86< Signed > > { }; #elif defined(_M_AMD64) -template< > -struct operations< 8u > : - public msvc_x86_operations< storage64_t, operations< 8u > > +template< bool Signed > +struct operations< 8u, Signed > : + public msvc_x86_operations< typename make_storage_type< 8u, Signed >::type, operations< 8u, Signed > > { - static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + typedef msvc_x86_operations< typename make_storage_type< 8u, Signed >::type, operations< 8u, Signed > > base_type; + typedef typename base_type::storage_type storage_type; + + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT { - fence_before(order); - v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(&storage, v)); - fence_after(order); - return v; + return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(&storage, v)); } - static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT { - fence_before(order); - v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(&storage, v)); - fence_after(order); - return v; + return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(&storage, v)); } static BOOST_FORCEINLINE bool compare_exchange_strong( - storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT { storage_type previous = expected; - fence_before(success_order); storage_type old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(&storage, desired, previous)); - bool success = (previous == old_val); - if (success) - fence_after(success_order); - else - fence_after(failure_order); expected = old_val; - return success; + return (previous == old_val); } - static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT { - fence_before(order); - v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND64(&storage, v)); - fence_after(order); - return v; + return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND64(&storage, v)); } - static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT { - fence_before(order); - v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR64(&storage, v)); - fence_after(order); - return v; + return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR64(&storage, v)); } - static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT { - fence_before(order); - v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR64(&storage, v)); - fence_after(order); - return v; + return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR64(&storage, v)); } }; @@ -839,9 +755,10 @@ struct operations< 8u > : #if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B) +template< bool Signed > struct msvc_dcas_x86_64 { - typedef storage128_t storage_type; + typedef typename make_storage_type< 16u, Signed >::type storage_type; static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT { @@ -874,9 +791,9 @@ struct msvc_dcas_x86_64 } }; -template< > -struct operations< 16u > : - public cas_based_operations< msvc_dcas_x86_64 > +template< bool Signed > +struct operations< 16u, Signed > : + public cas_based_operations< msvc_dcas_x86_64< Signed > > { }; diff --git a/include/boost/atomic/detail/ops_windows.hpp b/include/boost/atomic/detail/ops_windows.hpp index ef22eda..87d33fc 100644 --- a/include/boost/atomic/detail/ops_windows.hpp +++ b/include/boost/atomic/detail/ops_windows.hpp @@ -31,6 +31,7 @@ #include #include #include +#include #ifdef BOOST_HAS_PRAGMA_ONCE #pragma once @@ -103,23 +104,26 @@ struct windows_operations : } }; -template< > -struct operations< 4u > : - public windows_operations< storage32_t, operations< 4u > > +template< bool Signed > +struct operations< 4u, Signed > : + public windows_operations< typename make_storage_type< 4u, Signed >::type, operations< 4u, Signed > > { + typedef windows_operations< typename make_storage_type< 4u, Signed >::type, operations< 4u, Signed > > base_type; + typedef typename base_type::storage_type storage_type; + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT { - fence_before(order); + base_type::fence_before(order); v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(&storage, v)); - fence_after(order); + base_type::fence_after(order); return v; } static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT { - fence_before(order); + base_type::fence_before(order); v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&storage, v)); - fence_after(order); + base_type::fence_after(order); return v; } @@ -127,23 +131,20 @@ struct operations< 4u > : storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT { storage_type previous = expected; - fence_before(success_order); + base_type::fence_before(success_order); storage_type old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(&storage, desired, previous)); - bool success = (previous == old_val); - if (success) - fence_after(success_order); - else - fence_after(failure_order); expected = old_val; - return success; + // The success and failure fences are the same anyway + base_type::fence_after(success_order); + return (previous == old_val); } static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT { #if defined(BOOST_ATOMIC_INTERLOCKED_AND) - fence_before(order); + base_type::fence_before(order); v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND(&storage, v)); - fence_after(order); + base_type::fence_after(order); return v; #else storage_type res = storage; @@ -155,9 +156,9 @@ struct operations< 4u > : static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT { #if defined(BOOST_ATOMIC_INTERLOCKED_OR) - fence_before(order); + base_type::fence_before(order); v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR(&storage, v)); - fence_after(order); + base_type::fence_after(order); return v; #else storage_type res = storage; @@ -169,9 +170,9 @@ struct operations< 4u > : static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT { #if defined(BOOST_ATOMIC_INTERLOCKED_XOR) - fence_before(order); + base_type::fence_before(order); v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR(&storage, v)); - fence_after(order); + base_type::fence_after(order); return v; #else storage_type res = storage; @@ -181,42 +182,16 @@ struct operations< 4u > : } }; -template< > -struct operations< 1u > : - public operations< 4u > +template< bool Signed > +struct operations< 1u, Signed > : + public extending_cas_based_operations< operations< 4u, Signed >, 1u, Signed > { - static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT - { - // We must resort to a CAS loop to handle overflows - storage_type res = storage; - while (!compare_exchange_strong(storage, res, (res + v) & 0x000000ff, order, memory_order_relaxed)) {} - return res; - } - - static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT - { - typedef make_signed< storage_type >::type signed_storage_type; - return fetch_add(storage, static_cast< storage_type >(-static_cast< signed_storage_type >(v)), order); - } }; -template< > -struct operations< 2u > : - public operations< 4u > +template< bool Signed > +struct operations< 2u, Signed > : + public extending_cas_based_operations< operations< 4u, Signed >, 2u, Signed > { - static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT - { - // We must resort to a CAS loop to handle overflows - storage_type res = storage; - while (!compare_exchange_strong(storage, res, (res + v) & 0x0000ffff, order, memory_order_relaxed)) {} - return res; - } - - static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT - { - typedef make_signed< storage_type >::type signed_storage_type; - return fetch_add(storage, static_cast< storage_type >(-static_cast< signed_storage_type >(v)), order); - } }; BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT diff --git a/include/boost/atomic/detail/storage_types.hpp b/include/boost/atomic/detail/storage_types.hpp index 1fda63b..a450609 100644 --- a/include/boost/atomic/detail/storage_types.hpp +++ b/include/boost/atomic/detail/storage_types.hpp @@ -16,6 +16,7 @@ #ifndef BOOST_ATOMIC_DETAIL_STORAGE_TYPES_HPP_INCLUDED_ #define BOOST_ATOMIC_DETAIL_STORAGE_TYPES_HPP_INCLUDED_ +#include #include #include @@ -27,17 +28,94 @@ namespace boost { namespace atomics { namespace detail { -typedef boost::uint8_t storage8_t; -typedef boost::uint16_t storage16_t; -typedef boost::uint32_t storage32_t; -typedef boost::uint64_t storage64_t; +template< unsigned int Size > +struct buffer_storage +{ + unsigned char data[Size]; + + bool operator== (buffer_storage const& that) const + { + return std::memcmp(data, that.data, Size) == 0; + } + bool operator!= (buffer_storage const& that) const + { + return std::memcmp(data, that.data, Size) != 0; + } +}; + +template< unsigned int Size, bool Signed > +struct make_storage_type +{ + typedef buffer_storage< Size > type; +}; + +template< > +struct make_storage_type< 1u, false > +{ + typedef boost::uint8_t type; +}; + +template< > +struct make_storage_type< 1u, true > +{ + typedef boost::int8_t type; +}; + +template< > +struct make_storage_type< 2u, false > +{ + typedef boost::uint16_t type; +}; + +template< > +struct make_storage_type< 2u, true > +{ + typedef boost::int16_t type; +}; + +template< > +struct make_storage_type< 4u, false > +{ + typedef boost::uint32_t type; +}; + +template< > +struct make_storage_type< 4u, true > +{ + typedef boost::int32_t type; +}; + +template< > +struct make_storage_type< 8u, false > +{ + typedef boost::uint64_t type; +}; + +template< > +struct make_storage_type< 8u, true > +{ + typedef boost::int64_t type; +}; #if defined(BOOST_HAS_INT128) -typedef boost::uint128_type storage128_t; -#else + +template< > +struct make_storage_type< 16u, false > +{ + typedef boost::uint128_type type; +}; + +template< > +struct make_storage_type< 16u, true > +{ + typedef boost::int128_type type; +}; + +#elif !defined(BOOST_NO_ALIGNMENT) + struct BOOST_ALIGNMENT(16) storage128_t { - storage64_t data[2]; + boost::uint64_t data[2]; }; BOOST_FORCEINLINE bool operator== (storage128_t const& left, storage128_t const& right) BOOST_NOEXCEPT @@ -48,6 +126,13 @@ BOOST_FORCEINLINE bool operator!= (storage128_t const& left, storage128_t const& { return !(left == right); } + +template< bool Signed > +struct make_storage_type< 16u, Signed > +{ + typedef storage128_t type; +}; + #endif template< typename T > diff --git a/src/lockpool.cpp b/src/lockpool.cpp index a6e6b8a..13269a1 100644 --- a/src/lockpool.cpp +++ b/src/lockpool.cpp @@ -55,7 +55,7 @@ struct BOOST_ALIGNMENT(BOOST_ATOMIC_CACHE_LINE_SIZE) padded_lock #if defined(BOOST_ATOMIC_USE_PTHREAD) typedef pthread_mutex_t lock_type; #else - typedef atomics::detail::operations< 1u > operations; + typedef atomics::detail::operations< 1u, false > operations; typedef operations::storage_type lock_type; #endif From d3af1bace981f23931dc930e0b52a2d9ffdc0ceb Mon Sep 17 00:00:00 2001 From: Andrey Semashev Date: Fri, 9 May 2014 19:04:41 +0400 Subject: [PATCH 12/23] Fixed compilation. --- include/boost/atomic/detail/ops_linux_arm.hpp | 20 +++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/include/boost/atomic/detail/ops_linux_arm.hpp b/include/boost/atomic/detail/ops_linux_arm.hpp index 8533990..0e83dc6 100644 --- a/include/boost/atomic/detail/ops_linux_arm.hpp +++ b/include/boost/atomic/detail/ops_linux_arm.hpp @@ -55,8 +55,18 @@ namespace detail { // emulated CAS is only good enough to provide compare_exchange_weak // semantics. +struct linux_arm_cas_base +{ + static BOOST_FORCEINLINE void hardware_full_fence() BOOST_NOEXCEPT + { + typedef void (*kernel_dmb_t)(void); + ((kernel_dmb_t)0xffff0fa0)(); + } +}; + template< bool Signed > -struct linux_arm_cas +struct linux_arm_cas : + public linux_arm_cas_base { typedef typename make_storage_type< 4u, Signed >::type storage_type; @@ -111,12 +121,6 @@ struct linux_arm_cas return true; } - static BOOST_FORCEINLINE void hardware_full_fence() BOOST_NOEXCEPT - { - typedef void (*kernel_dmb_t)(void); - ((kernel_dmb_t)0xffff0fa0)(); - } - private: static BOOST_FORCEINLINE void fence_before_store(memory_order order) BOOST_NOEXCEPT { @@ -181,7 +185,7 @@ BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT case memory_order_acquire: case memory_order_acq_rel: case memory_order_seq_cst: - linux_arm_cas::hardware_full_fence(); + linux_arm_cas_base::hardware_full_fence(); break; } } From f53911de3d389688c9a59f7d80ed5bc68505a874 Mon Sep 17 00:00:00 2001 From: Andrey Semashev Date: Fri, 9 May 2014 20:57:02 +0400 Subject: [PATCH 13/23] Implemented PorwerPC backend. --- include/boost/atomic/detail/ops_gcc_ppc.hpp | 785 ++++++++++++++++++++ 1 file changed, 785 insertions(+) create mode 100644 include/boost/atomic/detail/ops_gcc_ppc.hpp diff --git a/include/boost/atomic/detail/ops_gcc_ppc.hpp b/include/boost/atomic/detail/ops_gcc_ppc.hpp new file mode 100644 index 0000000..a332763 --- /dev/null +++ b/include/boost/atomic/detail/ops_gcc_ppc.hpp @@ -0,0 +1,785 @@ +/* + * Distributed under the Boost Software License, Version 1.0. + * (See accompanying file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt) + * + * Copyright (c) 2009 Helge Bahmann + * Copyright (c) 2013 Tim Blechmann + * Copyright (c) 2014 Andrey Semashev + */ +/*! + * \file atomic/detail/ops_gcc_ppc.hpp + * + * This header contains implementation of the \c operations template. + */ + +#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_PPC_HPP_INCLUDED_ +#define BOOST_ATOMIC_DETAIL_OPS_GCC_PPC_HPP_INCLUDED_ + +#include +#include +#include +#include +#include +#include + +#ifdef BOOST_HAS_PRAGMA_ONCE +#pragma once +#endif + +namespace boost { +namespace atomics { +namespace detail { + +/* + Refer to: Motorola: "Programming Environments Manual for 32-Bit + Implementations of the PowerPC Architecture", Appendix E: + "Synchronization Programming Examples" for an explanation of what is + going on here (can be found on the web at various places by the + name "MPCFPE32B.pdf", Google is your friend...) + + Most of the atomic operations map to instructions in a relatively + straight-forward fashion, but "load"s may at first glance appear + a bit strange as they map to: + + lwz %rX, addr + cmpw %rX, %rX + bne- 1f + 1: + + That is, the CPU is forced to perform a branch that "formally" depends + on the value retrieved from memory. This scheme has an overhead of + about 1-2 clock cycles per load, but it allows to map "acquire" to + the "isync" instruction instead of "sync" uniformly and for all type + of atomic operations. Since "isync" has a cost of about 15 clock + cycles, while "sync" hast a cost of about 50 clock cycles, the small + penalty to atomic loads more than compensates for this. + + Byte- and halfword-sized atomic values are realized by encoding the + value to be represented into a word, performing sign/zero extension + as appropriate. This means that after add/sub operations the value + needs fixing up to accurately preserve the wrap-around semantic of + the smaller type. (Nothing special needs to be done for the bit-wise + and the "exchange type" operators as the compiler already sees to + it that values carried in registers are extended appropriately and + everything falls into place naturally). + + The register constraint "b" instructs gcc to use any register + except r0; this is sometimes required because the encoding for + r0 is used to signify "constant zero" in a number of instructions, + making r0 unusable in this place. For simplicity this constraint + is used everywhere since I am to lazy to look this up on a + per-instruction basis, and ppc has enough registers for this not + to pose a problem. +*/ + +struct gcc_ppc_operations_base +{ + static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT + { + switch(order) + { + case memory_order_release: + case memory_order_acq_rel: +#if defined(__powerpc64__) + __asm__ __volatile__ ("lwsync" ::: "memory"); + break; +#endif + case memory_order_seq_cst: + __asm__ __volatile__ ("sync" ::: "memory"); + default:; + } + } + + static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT + { + switch(order) + { + case memory_order_acquire: + case memory_order_acq_rel: + case memory_order_seq_cst: + __asm__ __volatile__ ("isync"); + case memory_order_consume: + __asm__ __volatile__ ("" ::: "memory"); + default:; + } + } + + static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT + { + if (order == memory_order_seq_cst) + __asm__ __volatile__ ("sync"); + } +}; + + +template< bool Signed > +struct operations< 4u, Signed > : + public gcc_ppc_operations_base +{ + typedef typename make_storage_type< 4u, Signed >::type storage_type; + + static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + __asm__ __volatile__ + ( + "stw %1, %0\n" + : "+m"(storage) + : "r" (v) + ); + fence_after_store(order); + } + + static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT + { + storage_type v; + __asm__ __volatile__ + ( + "lwz %0, %1\n" + "cmpw %0, %0\n" + "bne- 1f\n" + "1:\n" + : "=&r" (v) + : "m" (storage) + : "cr0" + ); + fence_after(order); + return v; + } + + static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + storage_type original; + fence_before(order); + __asm__ __volatile__ + ( + "1:\n" + "lwarx %0,%y1\n" + "stwcx. %2,%y1\n" + "bne- 1b\n" + : "=&b" (original), "+Z"(storage) + : "b" (v) + : "cr0" + ); + fence_after(order); + return original; + } + + static BOOST_FORCEINLINE bool compare_exchange_weak( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + { + int success; + fence_before(success_order); + __asm__ __volatile__ + ( + "li %1, 0\n" + "lwarx %0,%y2\n" + "cmpw %0, %3\n" + "bne- 1f\n" + "stwcx. %4,%y2\n" + "bne- 1f\n" + "li %1, 1\n" + "1:" + : "=&b" (expected), "=&b" (success), "+Z"(storage) + : "b" (expected), "b" (desired) + : "cr0" + ); + if (success) + fence_after(success_order); + else + fence_after(failure_order); + return !!success; + } + + static BOOST_FORCEINLINE bool compare_exchange_strong( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + { + int success; + fence_before(success_order); + __asm__ __volatile__ + ( + "li %1, 0\n" + "0: lwarx %0,%y2\n" + "cmpw %0, %3\n" + "bne- 1f\n" + "stwcx. %4,%y2\n" + "bne- 0b\n" + "li %1, 1\n" + "1:" + : "=&b" (expected), "=&b" (success), "+Z"(storage) + : "b" (expected), "b" (desired) + : "cr0" + ); + if (success) + fence_after(success_order); + else + fence_after(failure_order); + return !!success; + } + + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + storage_type original, tmp; + fence_before(order); + __asm__ __volatile__ + ( + "1:\n" + "lwarx %0,%y2\n" + "add %1,%0,%3\n" + "stwcx. %1,%y2\n" + "bne- 1b\n" + : "=&b" (original), "=&b" (tmp), "+Z"(storage) + : "b" (v) + : "cc" + ); + fence_after(order); + return original; + } + + static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + storage_type original, tmp; + fence_before(order); + __asm__ __volatile__ + ( + "1:\n" + "lwarx %0,%y2\n" + "sub %1,%0,%3\n" + "stwcx. %1,%y2\n" + "bne- 1b\n" + : "=&b" (original), "=&b" (tmp), "+Z"(storage) + : "b" (v) + : "cc" + ); + fence_after(order); + return original; + } + + static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + storage_type original, tmp; + fence_before(order); + __asm__ __volatile__ + ( + "1:\n" + "lwarx %0,%y2\n" + "and %1,%0,%3\n" + "stwcx. %1,%y2\n" + "bne- 1b\n" + : "=&b" (original), "=&b" (tmp), "+Z"(storage) + : "b" (v) + : "cc" + ); + fence_after(order); + return original; + } + + static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + storage_type original, tmp; + fence_before(order); + __asm__ __volatile__ + ( + "1:\n" + "lwarx %0,%y2\n" + "or %1,%0,%3\n" + "stwcx. %1,%y2\n" + "bne- 1b\n" + : "=&b" (original), "=&b" (tmp), "+Z"(storage) + : "b" (v) + : "cc" + ); + fence_after(order); + return original; + } + + static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + storage_type original, tmp; + fence_before(order); + __asm__ __volatile__ + ( + "1:\n" + "lwarx %0,%y2\n" + "xor %1,%0,%3\n" + "stwcx. %1,%y2\n" + "bne- 1b\n" + : "=&b" (original), "=&b" (tmp), "+Z"(storage) + : "b" (v) + : "cc" + ); + fence_after(order); + return original; + } + + static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT + { + return !!exchange(storage, (storage_type)1, order); + } + + static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT + { + store(storage, 0, order); + } + + static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT + { + return true; + } +}; + + +template< > +struct operations< 1u, false > : + public operations< 4u, false > +{ + typedef operations< 4u, false > base_type; + typedef base_type::storage_type storage_type; + + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + storage_type original, tmp; + fence_before(order); + __asm__ __volatile__ + ( + "1:\n" + "lwarx %0,%y2\n" + "add %1,%0,%3\n" + "rlwinm %1, %1, 0, 0xff\n" + "stwcx. %1,%y2\n" + "bne- 1b\n" + : "=&b" (original), "=&b" (tmp), "+Z"(storage) + : "b" (v) + : "cc" + ); + fence_after(order); + return original; + } + + static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + storage_type original, tmp; + fence_before(order); + __asm__ __volatile__ + ( + "1:\n" + "lwarx %0,%y2\n" + "sub %1,%0,%3\n" + "rlwinm %1, %1, 0, 0xff\n" + "stwcx. %1,%y2\n" + "bne- 1b\n" + : "=&b" (original), "=&b" (tmp), "+Z"(storage) + : "b" (v) + : "cc" + ); + fence_after(order); + return original; + } +}; + +template< > +struct operations< 1u, true > : + public operations< 4u, true > +{ + typedef operations< 4u, true > base_type; + typedef base_type::storage_type storage_type; + + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + storage_type original, tmp; + fence_before(order); + __asm__ __volatile__ + ( + "1:\n" + "lwarx %0,%y2\n" + "add %1,%0,%3\n" + "extsb %1, %1\n" + "stwcx. %1,%y2\n" + "bne- 1b\n" + : "=&b" (original), "=&b" (tmp), "+Z"(storage) + : "b" (v) + : "cc" + ); + fence_after(order); + return original; + } + + static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + storage_type original, tmp; + fence_before(order); + __asm__ __volatile__ + ( + "1:\n" + "lwarx %0,%y2\n" + "sub %1,%0,%3\n" + "extsb %1, %1\n" + "stwcx. %1,%y2\n" + "bne- 1b\n" + : "=&b" (original), "=&b" (tmp), "+Z"(storage) + : "b" (v) + : "cc" + ); + fence_after(order); + return original; + } +}; + + +template< > +struct operations< 2u, false > : + public operations< 4u, false > +{ + typedef operations< 4u, false > base_type; + typedef base_type::storage_type storage_type; + + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + storage_type original, tmp; + fence_before(order); + __asm__ __volatile__ + ( + "1:\n" + "lwarx %0,%y2\n" + "add %1,%0,%3\n" + "rlwinm %1, %1, 0, 0xffff\n" + "stwcx. %1,%y2\n" + "bne- 1b\n" + : "=&b" (original), "=&b" (tmp), "+Z"(storage) + : "b" (v) + : "cc" + ); + fence_after(order); + return original; + } + + static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + storage_type original, tmp; + fence_before(order); + __asm__ __volatile__ + ( + "1:\n" + "lwarx %0,%y2\n" + "sub %1,%0,%3\n" + "rlwinm %1, %1, 0, 0xffff\n" + "stwcx. %1,%y2\n" + "bne- 1b\n" + : "=&b" (original), "=&b" (tmp), "+Z"(storage) + : "b" (v) + : "cc" + ); + fence_after(order); + return original; + } +}; + +template< > +struct operations< 2u, true > : + public operations< 4u, true > +{ + typedef operations< 4u, true > base_type; + typedef base_type::storage_type storage_type; + + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + storage_type original, tmp; + fence_before(order); + __asm__ __volatile__ + ( + "1:\n" + "lwarx %0,%y2\n" + "add %1,%0,%3\n" + "extsh %1, %1\n" + "stwcx. %1,%y2\n" + "bne- 1b\n" + : "=&b" (original), "=&b" (tmp), "+Z"(storage) + : "b" (v) + : "cc" + ); + fence_after(order); + return original; + } + + static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + storage_type original, tmp; + fence_before(order); + __asm__ __volatile__ + ( + "1:\n" + "lwarx %0,%y2\n" + "sub %1,%0,%3\n" + "extsh %1, %1\n" + "stwcx. %1,%y2\n" + "bne- 1b\n" + : "=&b" (original), "=&b" (tmp), "+Z"(storage) + : "b" (v) + : "cc" + ); + fence_after(order); + return original; + } +}; + + +#if defined(__powerpc64__) + +template< bool Signed > +struct operations< 8u, Signed > : + public gcc_ppc_operations_base +{ + typedef typename make_storage_type< 8u, Signed >::type storage_type; + + static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + __asm__ __volatile__ + ( + "std %1, %0\n" + : "+m"(storage) + : "r" (v) + ); + fence_after_store(order); + } + + static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT + { + storage_type v; + __asm__ __volatile__ + ( + "ld %0, %1\n" + "cmpd %0, %0\n" + "bne- 1f\n" + "1:\n" + : "=&b"(v) + : "m" (storage) + : "cr0" + ); + fence_after(order); + return v; + } + + static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + storage_type original; + fence_before(order); + __asm__ __volatile__ + ( + "1:\n" + "ldarx %0,%y1\n" + "stdcx. %2,%y1\n" + "bne- 1b\n" + : "=&b" (original), "+Z"(storage) + : "b" (v) + : "cr0" + ); + fence_after(order); + return original; + } + + static BOOST_FORCEINLINE bool compare_exchange_weak( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + { + int success; + fence_before(success_order); + __asm__ __volatile__ + ( + "li %1, 0\n" + "ldarx %0,%y2\n" + "cmpd %0, %3\n" + "bne- 1f\n" + "stdcx. %4,%y2\n" + "bne- 1f\n" + "li %1, 1\n" + "1:" + : "=&b" (expected), "=&b" (success), "+Z"(storage) + : "b" (expected), "b" (desired) + : "cr0" + ); + if (success) + fence_after(success_order); + else + fence_after(failure_order); + return !!success; + } + + static BOOST_FORCEINLINE bool compare_exchange_strong( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + { + int success; + fence_before(success_order); + __asm__ __volatile__ + ( + "li %1, 0\n" + "0: ldarx %0,%y2\n" + "cmpd %0, %3\n" + "bne- 1f\n" + "stdcx. %4,%y2\n" + "bne- 0b\n" + "li %1, 1\n" + "1:" + : "=&b" (expected), "=&b" (success), "+Z"(storage) + : "b" (expected), "b" (desired) + : "cr0" + ); + if (success) + fence_after(success_order); + else + fence_after(failure_order); + return !!success; + } + + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + storage_type original, tmp; + fence_before(order); + __asm__ __volatile__ + ( + "1:\n" + "ldarx %0,%y2\n" + "add %1,%0,%3\n" + "stdcx. %1,%y2\n" + "bne- 1b\n" + : "=&b" (original), "=&b" (tmp), "+Z"(storage) + : "b" (v) + : "cc" + ); + fence_after(order); + return original; + } + + static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + storage_type original, tmp; + fence_before(order); + __asm__ __volatile__ + ( + "1:\n" + "ldarx %0,%y2\n" + "sub %1,%0,%3\n" + "stdcx. %1,%y2\n" + "bne- 1b\n" + : "=&b" (original), "=&b" (tmp), "+Z"(storage) + : "b" (v) + : "cc" + ); + fence_after(order); + return original; + } + + static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + storage_type original, tmp; + fence_before(order); + __asm__ __volatile__ + ( + "1:\n" + "ldarx %0,%y2\n" + "and %1,%0,%3\n" + "stdcx. %1,%y2\n" + "bne- 1b\n" + : "=&b" (original), "=&b" (tmp), "+Z"(storage) + : "b" (v) + : "cc" + ); + fence_after(order); + return original; + } + + static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + storage_type original, tmp; + fence_before(order); + __asm__ __volatile__ + ( + "1:\n" + "ldarx %0,%y2\n" + "or %1,%0,%3\n" + "stdcx. %1,%y2\n" + "bne- 1b\n" + : "=&b" (original), "=&b" (tmp), "+Z"(storage) + : "b" (v) + : "cc" + ); + fence_after(order); + return original; + } + + static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + storage_type original, tmp; + fence_before(order); + __asm__ __volatile__ + ( + "1:\n" + "ldarx %0,%y2\n" + "xor %1,%0,%3\n" + "stdcx. %1,%y2\n" + "bne- 1b\n" + : "=&b" (original), "=&b" (tmp), "+Z"(storage) + : "b" (v) + : "cc" + ); + fence_after(order); + return original; + } + + static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT + { + return !!exchange(storage, (storage_type)1, order); + } + + static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT + { + store(storage, 0, order); + } + + static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT + { + return true; + } +}; + +#endif // defined(__powerpc64__) + + +BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT +{ + switch (order) + { + case memory_order_acquire: + __asm__ __volatile__ ("isync" ::: "memory"); + break; + case memory_order_release: +#if defined(__powerpc64__) + __asm__ __volatile__ ("lwsync" ::: "memory"); + break; +#endif + case memory_order_acq_rel: + case memory_order_seq_cst: + __asm__ __volatile__ ("sync" ::: "memory"); + default:; + } +} + +BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT +{ + switch (order) + { + case memory_order_acquire: + case memory_order_release: + case memory_order_acq_rel: + case memory_order_seq_cst: + __asm__ __volatile__ ("" ::: "memory"); + break; + default:; + } +} + +} // namespace detail +} // namespace atomics +} // namespace boost + +#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_PPC_HPP_INCLUDED_ From 4e9632a6695a7989186c7eef373282c665ef482a Mon Sep 17 00:00:00 2001 From: Andrey Semashev Date: Fri, 9 May 2014 22:24:37 +0400 Subject: [PATCH 14/23] Implemented gcc SPARCv9 backend. The backend also adds support for 64-bit atomics. --- .../boost/atomic/detail/caps_gcc_sparcv9.hpp | 1 + .../boost/atomic/detail/ops_gcc_sparcv9.hpp | 272 ++++++++++++++++++ include/boost/atomic/detail/ops_gcc_sync.hpp | 1 - include/boost/atomic/detail/ops_linux_arm.hpp | 73 +++-- 4 files changed, 309 insertions(+), 38 deletions(-) create mode 100644 include/boost/atomic/detail/ops_gcc_sparcv9.hpp diff --git a/include/boost/atomic/detail/caps_gcc_sparcv9.hpp b/include/boost/atomic/detail/caps_gcc_sparcv9.hpp index 2c645e0..caea997 100644 --- a/include/boost/atomic/detail/caps_gcc_sparcv9.hpp +++ b/include/boost/atomic/detail/caps_gcc_sparcv9.hpp @@ -25,6 +25,7 @@ #define BOOST_ATOMIC_INT8_LOCK_FREE 2 #define BOOST_ATOMIC_INT16_LOCK_FREE 2 #define BOOST_ATOMIC_INT32_LOCK_FREE 2 +#define BOOST_ATOMIC_INT64_LOCK_FREE 2 #define BOOST_ATOMIC_POINTER_LOCK_FREE 2 #define BOOST_ATOMIC_THREAD_FENCE 2 diff --git a/include/boost/atomic/detail/ops_gcc_sparcv9.hpp b/include/boost/atomic/detail/ops_gcc_sparcv9.hpp new file mode 100644 index 0000000..09c750e --- /dev/null +++ b/include/boost/atomic/detail/ops_gcc_sparcv9.hpp @@ -0,0 +1,272 @@ +/* + * Distributed under the Boost Software License, Version 1.0. + * (See accompanying file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt) + * + * Copyright (c) 2010 Helge Bahmann + * Copyright (c) 2013 Tim Blechmann + * Copyright (c) 2014 Andrey Semashev + */ +/*! + * \file atomic/detail/ops_gcc_sparcv9.hpp + * + * This header contains implementation of the \c operations template. + */ + +#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_SPARCV9_HPP_INCLUDED_ +#define BOOST_ATOMIC_DETAIL_OPS_GCC_SPARCV9_HPP_INCLUDED_ + +#include +#include +#include +#include +#include +#include +#include + +#ifdef BOOST_HAS_PRAGMA_ONCE +#pragma once +#endif + +namespace boost { +namespace atomics { +namespace detail { + +struct gcc_sparcv9_cas_base +{ + static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT + { + switch (order) + { + case memory_order_relaxed: + case memory_order_acquire: + case memory_order_consume: + break; + case memory_order_release: + case memory_order_acq_rel: + __asm__ __volatile__ ("membar #StoreStore | #LoadStore" ::: "memory"); + break; + case memory_order_seq_cst: + __asm__ __volatile__ ("membar #Sync" ::: "memory"); + break; + } + } + + static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT + { + switch (order) + { + case memory_order_relaxed: + case memory_order_consume: + case memory_order_release: + break; + case memory_order_acquire: + case memory_order_acq_rel: + __asm__ __volatile__ ("membar #LoadLoad | #LoadStore" ::: "memory"); + break; + case memory_order_seq_cst: + __asm__ __volatile__ ("membar #Sync" ::: "memory"); + break; + default:; + } + } + + static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT + { + if (order == memory_order_seq_cst) + __asm__ __volatile__ ("membar #Sync" ::: "memory"); + } + + + static BOOST_FORCEINLINE void fence_after_load(memory_order order) BOOST_NOEXCEPT + { + fence_after(order); + } +}; + +template< bool Signed > +struct gcc_sparcv9_cas32 : + public gcc_sparcv9_cas_base +{ + typedef typename make_storage_type< 4u, Signed >::type storage_type; + + static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before_store(order); + storage = v; + fence_after_store(order); + } + + static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT + { + storage_type v = storage; + fence_after_load(order); + return v; + } + + static BOOST_FORCEINLINE bool compare_exchange_strong( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + { + fence_before(success_order); + storage_type previous = expected; + __asm__ __volatile__ + ( + "cas [%1], %2, %0" + : "+r" (desired) + : "r" (&storage), "r" (previous) + : "memory" + ); + const bool success = (desired == previous); + if (success) + fence_after(success_order); + else + fence_after(failure_order); + expected = desired; + return success; + } + + static BOOST_FORCEINLINE bool compare_exchange_weak( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + { + return compare_exchange_strong(storage, expected, desired, success_order, failure_order); + } + + static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT + { + return true; + } +}; + +template< bool Signed > +struct operations< 4u, Signed > : + public cas_based_operations< gcc_sparcv9_cas32< Signed > > +{ + typedef cas_based_operations< gcc_sparcv9_cas32< Signed > > base_type; + typedef typename base_type::storage_type storage_type; + + static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + base_type::fence_before(order); + __asm__ __volatile__ + ( + "swap [%1], %0" + : "+r" (v) + : "r" (&storage) + : "memory" + ); + base_type::fence_after(order); + return v; + } + + static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT + { + return exchange(storage, (storage_type)1, order) != (storage_type)0; + } +}; + +template< bool Signed > +struct operations< 1u, Signed > : + public extending_cas_based_operations< operations< 4u, Signed >, 1u, Signed > +{ +}; + +template< bool Signed > +struct operations< 2u, Signed > : + public extending_cas_based_operations< operations< 4u, Signed >, 2u, Signed > +{ +}; + +template< bool Signed > +struct gcc_sparcv9_cas64 : + public gcc_sparcv9_cas_base +{ + typedef typename make_storage_type< 8u, Signed >::type storage_type; + + static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before_store(order); + storage = v; + fence_after_store(order); + } + + static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT + { + storage_type v = storage; + fence_after_load(order); + return v; + } + + static BOOST_FORCEINLINE bool compare_exchange_strong( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + { + fence_before(success_order); + storage_type previous = expected; + __asm__ __volatile__ + ( + "casx [%1], %2, %0" + : "+r" (desired) + : "r" (&storage), "r" (previous) + : "memory" + ); + const bool success = (desired == previous); + if (success) + fence_after(success_order); + else + fence_after(failure_order); + expected = desired; + return success; + } + + static BOOST_FORCEINLINE bool compare_exchange_weak( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + { + return compare_exchange_strong(storage, expected, desired, success_order, failure_order); + } + + static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT + { + return true; + } +}; + +template< bool Signed > +struct operations< 8u, Signed > : + public cas_based_operations< gcc_sparcv9_cas64< Signed > > +{ +}; + + +BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT +{ + switch(order) + { + case memory_order_relaxed: + break; + case memory_order_release: + __asm__ __volatile__ ("membar #StoreStore | #LoadStore" ::: "memory"); + break; + case memory_order_acquire: + __asm__ __volatile__ ("membar #LoadLoad | #LoadStore" ::: "memory"); + break; + case memory_order_acq_rel: + __asm__ __volatile__ ("membar #LoadLoad | #LoadStore | #StoreStore" ::: "memory"); + break; + case memory_order_consume: + break; + case memory_order_seq_cst: + __asm__ __volatile__ ("membar #Sync" ::: "memory"); + break; + default:; + } +} + +BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT +{ + __asm__ __volatile__ ("" ::: "memory"); +} + +} // namespace detail +} // namespace atomics +} // namespace boost + +#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_SPARCV9_HPP_INCLUDED_ diff --git a/include/boost/atomic/detail/ops_gcc_sync.hpp b/include/boost/atomic/detail/ops_gcc_sync.hpp index 78afd23..e5ef05f 100644 --- a/include/boost/atomic/detail/ops_gcc_sync.hpp +++ b/include/boost/atomic/detail/ops_gcc_sync.hpp @@ -16,7 +16,6 @@ #ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_SYNC_HPP_INCLUDED_ #define BOOST_ATOMIC_DETAIL_OPS_GCC_SYNC_HPP_INCLUDED_ -#include // UINT64_C #include #include #include diff --git a/include/boost/atomic/detail/ops_linux_arm.hpp b/include/boost/atomic/detail/ops_linux_arm.hpp index 0e83dc6..a8413d7 100644 --- a/include/boost/atomic/detail/ops_linux_arm.hpp +++ b/include/boost/atomic/detail/ops_linux_arm.hpp @@ -15,8 +15,8 @@ * This header contains implementation of the \c operations template. */ -#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_LINUX_ARM_HPP_INCLUDED_ -#define BOOST_ATOMIC_DETAIL_OPS_GCC_LINUX_ARM_HPP_INCLUDED_ +#ifndef BOOST_ATOMIC_DETAIL_OPS_LINUX_ARM_HPP_INCLUDED_ +#define BOOST_ATOMIC_DETAIL_OPS_LINUX_ARM_HPP_INCLUDED_ #include #include @@ -57,6 +57,39 @@ namespace detail { struct linux_arm_cas_base { + static BOOST_FORCEINLINE void fence_before_store(memory_order order) BOOST_NOEXCEPT + { + switch (order) + { + case memory_order_release: + case memory_order_acq_rel: + case memory_order_seq_cst: + hardware_full_fence(); + break; + case memory_order_consume: + default:; + } + } + + static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT + { + if (order == memory_order_seq_cst) + hardware_full_fence(); + } + + static BOOST_FORCEINLINE void fence_after_load(memory_order order) BOOST_NOEXCEPT + { + switch (order) + { + case memory_order_acquire: + case memory_order_acq_rel: + case memory_order_seq_cst: + hardware_full_fence(); + break; + default:; + } + } + static BOOST_FORCEINLINE void hardware_full_fence() BOOST_NOEXCEPT { typedef void (*kernel_dmb_t)(void); @@ -120,40 +153,6 @@ struct linux_arm_cas : { return true; } - -private: - static BOOST_FORCEINLINE void fence_before_store(memory_order order) BOOST_NOEXCEPT - { - switch (order) - { - case memory_order_release: - case memory_order_acq_rel: - case memory_order_seq_cst: - hardware_full_fence(); - break; - case memory_order_consume: - default:; - } - } - - static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT - { - if (order == memory_order_seq_cst) - hardware_full_fence(); - } - - static BOOST_FORCEINLINE void fence_after_load(memory_order order) BOOST_NOEXCEPT - { - switch (order) - { - case memory_order_acquire: - case memory_order_acq_rel: - case memory_order_seq_cst: - hardware_full_fence(); - break; - default:; - } - } }; template< bool Signed > @@ -211,4 +210,4 @@ BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT } // namespace atomics } // namespace boost -#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_LINUX_ARM_HPP_INCLUDED_ +#endif // BOOST_ATOMIC_DETAIL_OPS_LINUX_ARM_HPP_INCLUDED_ From 684917a6febe97db99f422c81aa841330832e451 Mon Sep 17 00:00:00 2001 From: Andrey Semashev Date: Sun, 11 May 2014 02:36:15 +0400 Subject: [PATCH 15/23] Implemented gcc ARM backend. Some files renamed. --- include/boost/atomic/detail/caps_gcc_arm.hpp | 18 + ...aps_gcc_sparcv9.hpp => caps_gcc_sparc.hpp} | 8 +- include/boost/atomic/detail/ops_cas_based.hpp | 2 +- include/boost/atomic/detail/ops_emulated.hpp | 4 +- .../atomic/detail/ops_extending_cas_based.hpp | 2 +- include/boost/atomic/detail/ops_gcc_arm.hpp | 984 ++++++++++++++++++ .../boost/atomic/detail/ops_gcc_atomic.hpp | 2 +- include/boost/atomic/detail/ops_gcc_ppc.hpp | 6 +- ...{ops_gcc_sparcv9.hpp => ops_gcc_sparc.hpp} | 29 +- include/boost/atomic/detail/ops_gcc_sync.hpp | 4 +- include/boost/atomic/detail/ops_gcc_x86.hpp | 4 +- include/boost/atomic/detail/ops_linux_arm.hpp | 2 +- include/boost/atomic/detail/ops_msvc_arm.hpp | 4 +- include/boost/atomic/detail/ops_msvc_x86.hpp | 4 +- include/boost/atomic/detail/ops_windows.hpp | 4 +- include/boost/atomic/detail/platform.hpp | 6 +- .../{storage_types.hpp => storage_type.hpp} | 28 +- 17 files changed, 1064 insertions(+), 47 deletions(-) rename include/boost/atomic/detail/{caps_gcc_sparcv9.hpp => caps_gcc_sparc.hpp} (75%) create mode 100644 include/boost/atomic/detail/ops_gcc_arm.hpp rename include/boost/atomic/detail/{ops_gcc_sparcv9.hpp => ops_gcc_sparc.hpp} (91%) rename include/boost/atomic/detail/{storage_types.hpp => storage_type.hpp} (77%) diff --git a/include/boost/atomic/detail/caps_gcc_arm.hpp b/include/boost/atomic/detail/caps_gcc_arm.hpp index 5cadf09..9c33242 100644 --- a/include/boost/atomic/detail/caps_gcc_arm.hpp +++ b/include/boost/atomic/detail/caps_gcc_arm.hpp @@ -24,9 +24,27 @@ #pragma once #endif +#if !(defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6ZK__)) +// ARMv7 and later have dmb instruction +#define BOOST_ATOMIC_DETAIL_ARM_HAS_DMB 1 +#endif + +#if !(defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6Z__)) +// ARMv6k and ARMv7 have 8 and 16 ldrex/strex variants +#define BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXB_STREXB 1 +#define BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXH_STREXH 1 +#if !defined(__ARM_ARCH_7M__) +// ARMv6k and ARMv7 except ARMv7-M have 64-bit ldrex/strex variants +#define BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXD_STREXD 1 +#endif +#endif + #define BOOST_ATOMIC_INT8_LOCK_FREE 2 #define BOOST_ATOMIC_INT16_LOCK_FREE 2 #define BOOST_ATOMIC_INT32_LOCK_FREE 2 +#if defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXD_STREXD) +#define BOOST_ATOMIC_INT64_LOCK_FREE 2 +#endif #define BOOST_ATOMIC_POINTER_LOCK_FREE 2 #define BOOST_ATOMIC_THREAD_FENCE 2 diff --git a/include/boost/atomic/detail/caps_gcc_sparcv9.hpp b/include/boost/atomic/detail/caps_gcc_sparc.hpp similarity index 75% rename from include/boost/atomic/detail/caps_gcc_sparcv9.hpp rename to include/boost/atomic/detail/caps_gcc_sparc.hpp index caea997..5806684 100644 --- a/include/boost/atomic/detail/caps_gcc_sparcv9.hpp +++ b/include/boost/atomic/detail/caps_gcc_sparc.hpp @@ -8,13 +8,13 @@ * Copyright (c) 2014 Andrey Semashev */ /*! - * \file atomic/detail/caps_gcc_sparcv9.hpp + * \file atomic/detail/caps_gcc_sparc.hpp * * This header defines feature capabilities macros */ -#ifndef BOOST_ATOMIC_DETAIL_CAPS_GCC_SPARCV9_HPP_INCLUDED_ -#define BOOST_ATOMIC_DETAIL_CAPS_GCC_SPARCV9_HPP_INCLUDED_ +#ifndef BOOST_ATOMIC_DETAIL_CAPS_GCC_SPARC_HPP_INCLUDED_ +#define BOOST_ATOMIC_DETAIL_CAPS_GCC_SPARC_HPP_INCLUDED_ #include @@ -31,4 +31,4 @@ #define BOOST_ATOMIC_THREAD_FENCE 2 #define BOOST_ATOMIC_SIGNAL_FENCE 2 -#endif // BOOST_ATOMIC_DETAIL_CAPS_GCC_SPARCV9_HPP_INCLUDED_ +#endif // BOOST_ATOMIC_DETAIL_CAPS_GCC_SPARC_HPP_INCLUDED_ diff --git a/include/boost/atomic/detail/ops_cas_based.hpp b/include/boost/atomic/detail/ops_cas_based.hpp index b9039ec..7f8d288 100644 --- a/include/boost/atomic/detail/ops_cas_based.hpp +++ b/include/boost/atomic/detail/ops_cas_based.hpp @@ -75,7 +75,7 @@ struct cas_based_operations : static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT { - return exchange(storage, (storage_type)1, order) != (storage_type)0; + return !!exchange(storage, (storage_type)1, order); } static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT diff --git a/include/boost/atomic/detail/ops_emulated.hpp b/include/boost/atomic/detail/ops_emulated.hpp index 298d7cd..597490f 100644 --- a/include/boost/atomic/detail/ops_emulated.hpp +++ b/include/boost/atomic/detail/ops_emulated.hpp @@ -16,7 +16,7 @@ #include #include -#include +#include #include #include #include @@ -122,7 +122,7 @@ struct emulated_operations static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT { - return exchange(storage, (storage_type)1, order) != (storage_type)0; + return !!exchange(storage, (storage_type)1, order); } static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT diff --git a/include/boost/atomic/detail/ops_extending_cas_based.hpp b/include/boost/atomic/detail/ops_extending_cas_based.hpp index 5945b15..d7f3c5f 100644 --- a/include/boost/atomic/detail/ops_extending_cas_based.hpp +++ b/include/boost/atomic/detail/ops_extending_cas_based.hpp @@ -16,7 +16,7 @@ #include #include -#include +#include #ifdef BOOST_HAS_PRAGMA_ONCE #pragma once diff --git a/include/boost/atomic/detail/ops_gcc_arm.hpp b/include/boost/atomic/detail/ops_gcc_arm.hpp new file mode 100644 index 0000000..d3e9e07 --- /dev/null +++ b/include/boost/atomic/detail/ops_gcc_arm.hpp @@ -0,0 +1,984 @@ +/* + * Distributed under the Boost Software License, Version 1.0. + * (See accompanying file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt) + * + * Copyright (c) 2009 Helge Bahmann + * Copyright (c) 2013 Tim Blechmann + * Copyright (c) 2014 Andrey Semashev + */ +/*! + * \file atomic/detail/ops_gcc_arm.hpp + * + * This header contains implementation of the \c operations template. + */ + +#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_ARM_HPP_INCLUDED_ +#define BOOST_ATOMIC_DETAIL_OPS_GCC_ARM_HPP_INCLUDED_ + +#include +#include +#include +#include +#include +#include +#include + +#ifdef BOOST_HAS_PRAGMA_ONCE +#pragma once +#endif + +namespace boost { +namespace atomics { +namespace detail { + +// From the ARM Architecture Reference Manual for architecture v6: +// +// LDREX{} , [] +// Specifies the destination register for the memory word addressed by +// Specifies the register containing the address. +// +// STREX{} , , [] +// Specifies the destination register for the returned status value. +// 0 if the operation updates memory +// 1 if the operation fails to update memory +// Specifies the register containing the word to be stored to memory. +// Specifies the register containing the address. +// Rd must not be the same register as Rm or Rn. +// +// ARM v7 is like ARM v6 plus: +// There are half-word and byte versions of the LDREX and STREX instructions, +// LDREXH, LDREXB, STREXH and STREXB. +// There are also double-word versions, LDREXD and STREXD. +// (Actually it looks like these are available from version 6k onwards.) +// FIXME these are not yet used; should be mostly a matter of copy-and-paste. +// I think you can supply an immediate offset to the address. +// +// A memory barrier is effected using a "co-processor 15" instruction, +// though a separate assembler mnemonic is available for it in v7. +// +// "Thumb 1" is a subset of the ARM instruction set that uses a 16-bit encoding. It +// doesn't include all instructions and in particular it doesn't include the co-processor +// instruction used for the memory barrier or the load-locked/store-conditional +// instructions. So, if we're compiling in "Thumb 1" mode, we need to wrap all of our +// asm blocks with code to temporarily change to ARM mode. +// +// You can only change between ARM and Thumb modes when branching using the bx instruction. +// bx takes an address specified in a register. The least significant bit of the address +// indicates the mode, so 1 is added to indicate that the destination code is Thumb. +// A temporary register is needed for the address and is passed as an argument to these +// macros. It must be one of the "low" registers accessible to Thumb code, specified +// using the "l" attribute in the asm statement. +// +// Architecture v7 introduces "Thumb 2", which does include (almost?) all of the ARM +// instruction set. (Actually, there was an extension of v6 called v6T2 which supported +// "Thumb 2" mode, but its architecture manual is no longer available, referring to v7.) +// So in v7 we don't need to change to ARM mode; we can write "universal +// assembler" which will assemble to Thumb 2 or ARM code as appropriate. The only thing +// we need to do to make this "universal" assembler mode work is to insert "IT" instructions +// to annotate the conditional instructions. These are ignored in other modes (e.g. v6), +// so they can always be present. + +#if defined(__thumb__) && !defined(__thumb2__) +#define BOOST_ATOMIC_DETAIL_ARM_ASM_START(TMPREG) "adr " #TMPREG ", 8f\n" "bx " #TMPREG "\n" ".arm\n" ".align 4\n" "8: " +#define BOOST_ATOMIC_DETAIL_ARM_ASM_END(TMPREG) "adr " #TMPREG ", 9f + 1\n" "bx " #TMPREG "\n" ".thumb\n" ".align 2\n" "9: " +#else +// The tmpreg may be wasted in this case, which is non-optimal. +#define BOOST_ATOMIC_DETAIL_ARM_ASM_START(TMPREG) +#define BOOST_ATOMIC_DETAIL_ARM_ASM_END(TMPREG) +#endif + +struct gcc_arm_operations_base +{ + static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT + { + switch (order) + { + case memory_order_release: + case memory_order_acq_rel: + case memory_order_seq_cst: + hardware_full_fence(); + break; + default:; + } + } + + static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT + { + switch (order) + { + case memory_order_acquire: + case memory_order_acq_rel: + case memory_order_seq_cst: + hardware_full_fence(); + break; + default:; + } + } + + static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT + { + if (order == memory_order_seq_cst) + hardware_full_fence(); + } + + static BOOST_FORCEINLINE void hardware_full_fence() BOOST_NOEXCEPT + { +#if defined(BOOST_ATOMIC_DETAIL_ARM_HAS_DMB) + __asm__ __volatile__ + ( + "dmb ish\n" + : + : + : "memory" + ); +#else + int tmp; + __asm__ __volatile__ + ( + BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0) + "mcr\tp15, 0, r0, c7, c10, 5\n" + BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0) + : "=&l" (tmp) + : + : "memory" + ); +#endif + } +}; + + +template< bool Signed > +struct operations< 4u, Signed > : + public gcc_arm_operations_base +{ + typedef typename make_storage_type< 4u, Signed >::type storage_type; + + static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + storage = v; + fence_after_store(order); + } + + static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT + { + storage_type v = storage; + fence_after(order); + return v; + } + + static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + storage_type original; + fence_before(order); + uint32_t tmp; + __asm__ __volatile__ + ( + BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp]) + "1:\n" + "ldrex %[original], %[storage]\n" // load the original value + "strex %[tmp], %[value], %[storage]\n" // store the replacement, tmp = store failed + "teq %[tmp], #0\n" // check if store succeeded + "bne 1b\n" + BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp]) + : [tmp] "=&l" (tmp), [original] "=&r" (original), [storage] "+Q" (storage) + : [value] "r" (v) + : "cc" + ); + fence_after(order); + return original; + } + + static BOOST_FORCEINLINE bool compare_exchange_weak( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + { + fence_before(success_order); + uint32_t success; + uint32_t tmp; + storage_type original; + __asm__ __volatile__ + ( + BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp]) + "mov %[success], #0\n" // success = 0 + "ldrex %[original], %[storage]\n" // original = *(&storage) + "teq %[original], %[expected]\n" // flags = original==expected + "itt eq\n" // [hint that the following 2 instructions are conditional on flags.equal] + "strexeq %[success], %[desired], %[storage]\n" // if (flags.equal) *(&storage) = desired, success = store failed + "eoreq %[success], %[success], #1\n" // if (flags.equal) success ^= 1 (i.e. make it 1 if store succeeded) + BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp]) + : [original] "=&r" (original), // %0 + [success] "=&r" (success), // %1 + [tmp] "=&l" (tmp), // %2 + [storage] "+Q" (storage) // %3 + : [expected] "r" (expected), // %4 + [desired] "r" (desired) // %5 + : "cc" + ); + if (success) + fence_after(success_order); + else + fence_after(failure_order); + expected = original; + return !!success; + } + + static BOOST_FORCEINLINE bool compare_exchange_strong( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + { + fence_before(success_order); + uint32_t success; + uint32_t tmp; + storage_type original; + __asm__ __volatile__ + ( + BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp]) + "mov %[success], #0\n" // success = 0 + "1:\n" + "ldrex %[original], %[storage]\n" // original = *(&storage) + "teq %[original], %[expected]\n" // flags = original==expected + "bne 2f\n" // if (!flags.equal) goto end + "strex %[success], %[desired], %[storage]\n" // *(&storage) = desired, success = store failed + "eors %[success], %[success], #1\n" // success ^= 1 (i.e. make it 1 if store succeeded); flags.equal = success == 0 + "beq 1b\n" // if (flags.equal) goto retry + "2:\n" + BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp]) + : [original] "=&r" (original), // %0 + [success] "=&r" (success), // %1 + [tmp] "=&l" (tmp), // %2 + [storage] "+Q" (storage) // %3 + : [expected] "r" (expected), // %4 + [desired] "r" (desired) // %5 + : "cc" + ); + if (success) + fence_after(success_order); + else + fence_after(failure_order); + expected = original; + return !!success; + } + + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + uint32_t tmp; + storage_type original, result; + __asm__ __volatile__ + ( + BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp]) + "1:\n" + "ldrex %[original], %[storage]\n" // original = *(&storage) + "add %[result], %[original], %[value]\n" // result = original + value + "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed + "teq %[tmp], #0\n" // flags = tmp==0 + "bne 1b\n" // if (!flags.equal) goto retry + BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp]) + : [original] "=&r" (original), // %0 + [result] "=&r" (result), // %1 + [tmp] "=&l" (tmp), // %2 + [storage] "+Q" (storage) // %3 + : [value] "r" (v), // %4 + : "cc" + ); + fence_after(order); + return original; + } + + static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + uint32_t tmp; + storage_type original, result; + __asm__ __volatile__ + ( + BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp]) + "1:\n" + "ldrex %[original], %[storage]\n" // original = *(&storage) + "sub %[result], %[original], %[value]\n" // result = original - value + "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed + "teq %[tmp], #0\n" // flags = tmp==0 + "bne 1b\n" // if (!flags.equal) goto retry + BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp]) + : [original] "=&r" (original), // %0 + [result] "=&r" (result), // %1 + [tmp] "=&l" (tmp), // %2 + [storage] "+Q" (storage) // %3 + : [value] "r" (v), // %4 + : "cc" + ); + fence_after(order); + return original; + } + + static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + uint32_t tmp; + storage_type original, result; + __asm__ __volatile__ + ( + BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp]) + "1:\n" + "ldrex %[original], %[storage]\n" // original = *(&storage) + "and %[result], %[original], %[value]\n" // result = original & value + "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed + "teq %[tmp], #0\n" // flags = tmp==0 + "bne 1b\n" // if (!flags.equal) goto retry + BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp]) + : [original] "=&r" (original), // %0 + [result] "=&r" (result), // %1 + [tmp] "=&l" (tmp), // %2 + [storage] "+Q" (storage) // %3 + : [value] "r" (v), // %4 + : "cc" + ); + fence_after(order); + return original; + } + + static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + uint32_t tmp; + storage_type original, result; + __asm__ __volatile__ + ( + BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp]) + "1:\n" + "ldrex %[original], %[storage]\n" // original = *(&storage) + "orr %[result], %[original], %[value]\n" // result = original | value + "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed + "teq %[tmp], #0\n" // flags = tmp==0 + "bne 1b\n" // if (!flags.equal) goto retry + BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp]) + : [original] "=&r" (original), // %0 + [result] "=&r" (result), // %1 + [tmp] "=&l" (tmp), // %2 + [storage] "+Q" (storage) // %3 + : [value] "r" (v), // %4 + : "cc" + ); + fence_after(order); + return original; + } + + static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + uint32_t tmp; + storage_type original, result; + __asm__ __volatile__ + ( + BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp]) + "1:\n" + "ldrex %[original], %[storage]\n" // original = *(&storage) + "eor %[result], %[original], %[value]\n" // result = original ^ value + "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed + "teq %[tmp], #0\n" // flags = tmp==0 + "bne 1b\n" // if (!flags.equal) goto retry + BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp]) + : [original] "=&r" (original), // %0 + [result] "=&r" (result), // %1 + [tmp] "=&l" (tmp), // %2 + [storage] "+Q" (storage) // %3 + : [value] "r" (v), // %4 + : "cc" + ); + fence_after(order); + return original; + } + + static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT + { + return !!exchange(storage, (storage_type)1, order); + } + + static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT + { + store(storage, 0, order); + } + + static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT + { + return true; + } +}; + + +template< > +struct operations< 1u, false > : + public operations< 4u, false > +{ + typedef operations< 4u, false > base_type; + typedef base_type::storage_type storage_type; + + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + uint32_t tmp; + storage_type original, result; + __asm__ __volatile__ + ( + BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp]) + "1:\n" + "ldrex %[original], %[storage]\n" // original = *(&storage) + "add %[result], %[original], %[value]\n" // result = original + value + "uxtb %[result], %[result]\n" // zero extend result from 8 to 32 bits + "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed + "teq %[tmp], #0\n" // flags = tmp==0 + "bne 1b\n" // if (!flags.equal) goto retry + BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp]) + : [original] "=&r" (original), // %0 + [result] "=&r" (result), // %1 + [tmp] "=&l" (tmp), // %2 + [storage] "+Q" (storage) // %3 + : [value] "r" (v), // %4 + : "cc" + ); + fence_after(order); + return original; + } + + static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + uint32_t tmp; + storage_type original, result; + __asm__ __volatile__ + ( + BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp]) + "1:\n" + "ldrex %[original], %[storage]\n" // original = *(&storage) + "sub %[result], %[original], %[value]\n" // result = original - value + "uxtb %[result], %[result]\n" // zero extend result from 8 to 32 bits + "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed + "teq %[tmp], #0\n" // flags = tmp==0 + "bne 1b\n" // if (!flags.equal) goto retry + BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp]) + : [original] "=&r" (original), // %0 + [result] "=&r" (result), // %1 + [tmp] "=&l" (tmp), // %2 + [storage] "+Q" (storage) // %3 + : [value] "r" (v), // %4 + : "cc" + ); + fence_after(order); + return original; + } +}; + +template< > +struct operations< 1u, true > : + public operations< 4u, true > +{ + typedef operations< 4u, true > base_type; + typedef base_type::storage_type storage_type; + + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + uint32_t tmp; + storage_type original, result; + __asm__ __volatile__ + ( + BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp]) + "1:\n" + "ldrex %[original], %[storage]\n" // original = *(&storage) + "add %[result], %[original], %[value]\n" // result = original + value + "sxtb %[result], %[result]\n" // sign extend result from 8 to 32 bits + "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed + "teq %[tmp], #0\n" // flags = tmp==0 + "bne 1b\n" // if (!flags.equal) goto retry + BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp]) + : [original] "=&r" (original), // %0 + [result] "=&r" (result), // %1 + [tmp] "=&l" (tmp), // %2 + [storage] "+Q" (storage) // %3 + : [value] "r" (v), // %4 + : "cc" + ); + fence_after(order); + return original; + } + + static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + uint32_t tmp; + storage_type original, result; + __asm__ __volatile__ + ( + BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp]) + "1:\n" + "ldrex %[original], %[storage]\n" // original = *(&storage) + "sub %[result], %[original], %[value]\n" // result = original - value + "sxtb %[result], %[result]\n" // sign extend result from 8 to 32 bits + "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed + "teq %[tmp], #0\n" // flags = tmp==0 + "bne 1b\n" // if (!flags.equal) goto retry + BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp]) + : [original] "=&r" (original), // %0 + [result] "=&r" (result), // %1 + [tmp] "=&l" (tmp), // %2 + [storage] "+Q" (storage) // %3 + : [value] "r" (v), // %4 + : "cc" + ); + fence_after(order); + return original; + } +}; + + +template< > +struct operations< 2u, false > : + public operations< 4u, false > +{ + typedef operations< 4u, false > base_type; + typedef base_type::storage_type storage_type; + + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + uint32_t tmp; + storage_type original, result; + __asm__ __volatile__ + ( + BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp]) + "1:\n" + "ldrex %[original], %[storage]\n" // original = *(&storage) + "add %[result], %[original], %[value]\n" // result = original + value + "uxth %[result], %[result]\n" // zero extend result from 16 to 32 bits + "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed + "teq %[tmp], #0\n" // flags = tmp==0 + "bne 1b\n" // if (!flags.equal) goto retry + BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp]) + : [original] "=&r" (original), // %0 + [result] "=&r" (result), // %1 + [tmp] "=&l" (tmp), // %2 + [storage] "+Q" (storage) // %3 + : [value] "r" (v), // %4 + : "cc" + ); + fence_after(order); + return original; + } + + static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + uint32_t tmp; + storage_type original, result; + __asm__ __volatile__ + ( + BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp]) + "1:\n" + "ldrex %[original], %[storage]\n" // original = *(&storage) + "sub %[result], %[original], %[value]\n" // result = original - value + "uxth %[result], %[result]\n" // zero extend result from 16 to 32 bits + "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed + "teq %[tmp], #0\n" // flags = tmp==0 + "bne 1b\n" // if (!flags.equal) goto retry + BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp]) + : [original] "=&r" (original), // %0 + [result] "=&r" (result), // %1 + [tmp] "=&l" (tmp), // %2 + [storage] "+Q" (storage) // %3 + : [value] "r" (v), // %4 + : "cc" + ); + fence_after(order); + return original; + } +}; + +template< > +struct operations< 2u, true > : + public operations< 4u, true > +{ + typedef operations< 4u, true > base_type; + typedef base_type::storage_type storage_type; + + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + uint32_t tmp; + storage_type original, result; + __asm__ __volatile__ + ( + BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp]) + "1:\n" + "ldrex %[original], %[storage]\n" // original = *(&storage) + "add %[result], %[original], %[value]\n" // result = original + value + "sxth %[result], %[result]\n" // sign extend result from 16 to 32 bits + "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed + "teq %[tmp], #0\n" // flags = tmp==0 + "bne 1b\n" // if (!flags.equal) goto retry + BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp]) + : [original] "=&r" (original), // %0 + [result] "=&r" (result), // %1 + [tmp] "=&l" (tmp), // %2 + [storage] "+Q" (storage) // %3 + : [value] "r" (v), // %4 + : "cc" + ); + fence_after(order); + return original; + } + + static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + uint32_t tmp; + storage_type original, result; + __asm__ __volatile__ + ( + BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp]) + "1:\n" + "ldrex %[original], %[storage]\n" // original = *(&storage) + "sub %[result], %[original], %[value]\n" // result = original - value + "sxth %[result], %[result]\n" // sign extend result from 16 to 32 bits + "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed + "teq %[tmp], #0\n" // flags = tmp==0 + "bne 1b\n" // if (!flags.equal) goto retry + BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp]) + : [original] "=&r" (original), // %0 + [result] "=&r" (result), // %1 + [tmp] "=&l" (tmp), // %2 + [storage] "+Q" (storage) // %3 + : [value] "r" (v), // %4 + : "cc" + ); + fence_after(order); + return original; + } +}; + + +#if defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXD_STREXD) + +// Unlike 32-bit operations, for 64-bit loads and stores we must use ldrexd/strexd. +// Any other instructions result in a non-atomic sequence of 32-bit accesses. +// See "ARM Architecture Reference Manual ARMv7-A and ARMv7-R edition", +// Section A3.5.3 "Atomicity in the ARM architecture". + +// In the asm blocks below we have to use 32-bit register pairs to compose 64-bit values. +// In order to pass the 64-bit operands to/from asm blocks, we use undocumented gcc feature: +// the lower half (Rt) of the operand is accessible normally, via the numbered placeholder (e.g. %0), +// and the upper half (Rt2) - via the same placeholder with an 'H' after the '%' sign (e.g. %H0). +// See: http://hardwarebug.org/2010/07/06/arm-inline-asm-secrets/ + +template< bool Signed > +struct operations< 8u, Signed > : + public gcc_ppc_operations_base +{ + typedef typename make_storage_type< 8u, Signed >::type storage_type; + + static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + exchange(storage, v, order); + } + + static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT + { + storage_type original; + uint32_t tmp; + __asm__ __volatile__ + ( + BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0) + "ldrexd %1, %H1, %2\n" + BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0) + : "=&l" (tmp), // %0 + "=&r" (original), // %1 + : "Q" (storage) // %2 + ); + fence_after(order); + return original; + } + + static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + storage_type original; + fence_before(order); + uint32_t tmp; + __asm__ __volatile__ + ( + BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0) + "1:\n" + "ldrexd %1, %H1, %2\n" // load the original value + "strexd %0, %3, %H3, %2\n" // store the replacement, tmp = store failed + "teq %0, #0\n" // check if store succeeded + "bne 1b\n" + BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0) + : "=&l" (tmp), // %0 + "=&r" (original), // %1 + "+Q" (storage) // %2 + : "r" (v) // %3 + : "cc" + ); + fence_after(order); + return original; + } + + static BOOST_FORCEINLINE bool compare_exchange_weak( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + { + fence_before(success_order); + uint32_t success; + uint32_t tmp; + storage_type original; + __asm__ __volatile__ + ( + BOOST_ATOMIC_DETAIL_ARM_ASM_START(%2) + "ldrexd %0, %H0, %3\n" // original = *(&storage) + "eor %1, %0, %4\n" // The three instructions are just a fancy way of comparing 2 64-bit integers: + "eor %2, %H0, %H4\n" // success = original[lo] ^ expected[lo]; tmp = original[hi] ^ expected[hi]; + "orrs %1, %1, %2\n" // success = success | tmp (i.e. 0 if original==expected); flags = original==expected + "itte eq\n" // [hint that the following 3 instructions are conditional on flags.equal] + "strexdeq %1, %5, %H5, %3\n" // if (flags.equal) *(&storage) = desired, success = store failed + "eoreq %1, %1, #1\n" // if (flags.equal) success ^= 1 (i.e. make it 1 if store succeeded) + "movne %1, #0\n" // if (!flags.equal) success = 0 + BOOST_ATOMIC_DETAIL_ARM_ASM_START(%2) + : "=&r" (original), // %0 + "=&r" (success), // %1 + "=&l" (tmp), // %2 + "+Q" (storage) // %3 + : "r" (expected), // %4 + "r" (desired) // %5 + : "cc" + ); + if (success) + fence_after(success_order); + else + fence_after(failure_order); + expected = original; + return !!success; + } + + static BOOST_FORCEINLINE bool compare_exchange_strong( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + { + fence_before(success_order); + uint32_t success; + uint32_t tmp; + storage_type original; + __asm__ __volatile__ + ( + BOOST_ATOMIC_DETAIL_ARM_ASM_START(%2) + "1:\n" + "ldrexd %0, %H0, %3\n" // original = *(&storage) + "eor %1, %0, %4\n" // The three instructions are just a fancy way of comparing 2 64-bit integers: + "eor %2, %H0, %H4\n" // success = original[lo] ^ expected[lo]; tmp = original[hi] ^ expected[hi]; + "orrs %1, %1, %2\n" // success = success | tmp (i.e. 0 if original==expected); flags = original==expected + "itt ne\n" // [hint that the following 2 instructions are conditional on flags.equal] + "movne %1, #0\n" // if (!flags.equal) success = 0 + "bne 2f\n" // if (!flags.equal) goto end + "strexd %1, %5, %H5, %3\n" // *(&storage) = desired, success = store failed + "eors %1, %1, #1\n" // success ^= 1 (i.e. make it 1 if store succeeded); flags.equal = success == 0 + "beq 1b\n" // if (flags.equal) goto retry + "2:\n" + BOOST_ATOMIC_DETAIL_ARM_ASM_START(%2) + : "=&r" (original), // %0 + "=&r" (success), // %1 + "=&l" (tmp), // %2 + "+Q" (storage) // %3 + : "r" (expected), // %4 + "r" (desired) // %5 + : "cc" + ); + if (success) + fence_after(success_order); + else + fence_after(failure_order); + expected = original; + return !!success; + } + + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + uint32_t tmp; + storage_type original, result; + __asm__ __volatile__ + ( + BOOST_ATOMIC_DETAIL_ARM_ASM_START(%2) + "1:\n" + "ldrexd %0, %H0, %3\n" // original = *(&storage) + "adds %1, %0, %4\n" // result = original + value + "adc %H1, %H0, %H4\n" + "strexd %2, %1, %H1, %3\n" // *(&storage) = result, tmp = store failed + "teq %2, #0\n" // flags = tmp==0 + "bne 1b\n" // if (!flags.equal) goto retry + BOOST_ATOMIC_DETAIL_ARM_ASM_START(%2) + : "=&r" (original), // %0 + "=&r" (result), // %1 + "=&l" (tmp), // %2 + "+Q" (storage) // %3 + : "r" (v), // %4 + : "cc" + ); + fence_after(order); + return original; + } + + static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + uint32_t tmp; + storage_type original, result; + __asm__ __volatile__ + ( + BOOST_ATOMIC_DETAIL_ARM_ASM_START(%2) + "1:\n" + "ldrexd %0, %H0, %3\n" // original = *(&storage) + "subs %1, %0, %4\n" // result = original - value + "sbc %H1, %H0, %H4\n" + "strexd %2, %1, %H1, %3\n" // *(&storage) = result, tmp = store failed + "teq %2, #0\n" // flags = tmp==0 + "bne 1b\n" // if (!flags.equal) goto retry + BOOST_ATOMIC_DETAIL_ARM_ASM_START(%2) + : "=&r" (original), // %0 + "=&r" (result), // %1 + "=&l" (tmp), // %2 + "+Q" (storage) // %3 + : "r" (v), // %4 + : "cc" + ); + fence_after(order); + return original; + } + + static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + uint32_t tmp; + storage_type original, result; + __asm__ __volatile__ + ( + BOOST_ATOMIC_DETAIL_ARM_ASM_START(%2) + "1:\n" + "ldrexd %0, %H0, %3\n" // original = *(&storage) + "and %1, %0, %4\n" // result = original & value + "and %H1, %H0, %H4\n" + "strexd %2, %1, %H1, %3\n" // *(&storage) = result, tmp = store failed + "teq %2, #0\n" // flags = tmp==0 + "bne 1b\n" // if (!flags.equal) goto retry + BOOST_ATOMIC_DETAIL_ARM_ASM_START(%2) + : "=&r" (original), // %0 + "=&r" (result), // %1 + "=&l" (tmp), // %2 + "+Q" (storage) // %3 + : "r" (v), // %4 + : "cc" + ); + fence_after(order); + return original; + } + + static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + uint32_t tmp; + storage_type original, result; + __asm__ __volatile__ + ( + BOOST_ATOMIC_DETAIL_ARM_ASM_START(%2) + "1:\n" + "ldrexd %0, %H0, %3\n" // original = *(&storage) + "orr %1, %0, %4\n" // result = original | value + "orr %H1, %H0, %H4\n" + "strexd %2, %1, %H1, %3\n" // *(&storage) = result, tmp = store failed + "teq %2, #0\n" // flags = tmp==0 + "bne 1b\n" // if (!flags.equal) goto retry + BOOST_ATOMIC_DETAIL_ARM_ASM_START(%2) + : "=&r" (original), // %0 + "=&r" (result), // %1 + "=&l" (tmp), // %2 + "+Q" (storage) // %3 + : "r" (v), // %4 + : "cc" + ); + fence_after(order); + return original; + } + + static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + uint32_t tmp; + storage_type original, result; + __asm__ __volatile__ + ( + BOOST_ATOMIC_DETAIL_ARM_ASM_START(%2) + "1:\n" + "ldrexd %0, %H0, %3\n" // original = *(&storage) + "eor %1, %0, %4\n" // result = original ^ value + "eor %H1, %H0, %H4\n" + "strexd %2, %1, %H1, %3\n" // *(&storage) = result, tmp = store failed + "teq %2, #0\n" // flags = tmp==0 + "bne 1b\n" // if (!flags.equal) goto retry + BOOST_ATOMIC_DETAIL_ARM_ASM_START(%2) + : "=&r" (original), // %0 + "=&r" (result), // %1 + "=&l" (tmp), // %2 + "+Q" (storage) // %3 + : "r" (v), // %4 + : "cc" + ); + fence_after(order); + return original; + } + + static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT + { + return !!exchange(storage, (storage_type)1, order); + } + + static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT + { + store(storage, 0, order); + } + + static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT + { + return true; + } +}; + +#endif // defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXD_STREXD) + + +BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT +{ + switch (order) + { + case memory_order_acquire: + case memory_order_release: + case memory_order_acq_rel: + case memory_order_seq_cst: + gcc_arm_operations_base::hardware_full_fence(); + break; + default:; + } +} + +BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT +{ + switch (order) + { + case memory_order_acquire: + case memory_order_release: + case memory_order_acq_rel: + case memory_order_seq_cst: + __asm__ __volatile__ ("" ::: "memory"); + break; + default:; + } +} + +} // namespace detail +} // namespace atomics +} // namespace boost + +#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_ARM_HPP_INCLUDED_ diff --git a/include/boost/atomic/detail/ops_gcc_atomic.hpp b/include/boost/atomic/detail/ops_gcc_atomic.hpp index 89668b7..918bc42 100644 --- a/include/boost/atomic/detail/ops_gcc_atomic.hpp +++ b/include/boost/atomic/detail/ops_gcc_atomic.hpp @@ -16,7 +16,7 @@ #include #include -#include +#include #include #include #if defined(__clang__) && (defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)) diff --git a/include/boost/atomic/detail/ops_gcc_ppc.hpp b/include/boost/atomic/detail/ops_gcc_ppc.hpp index a332763..528e275 100644 --- a/include/boost/atomic/detail/ops_gcc_ppc.hpp +++ b/include/boost/atomic/detail/ops_gcc_ppc.hpp @@ -18,7 +18,7 @@ #include #include -#include +#include #include #include #include @@ -77,7 +77,7 @@ struct gcc_ppc_operations_base { static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT { - switch(order) + switch (order) { case memory_order_release: case memory_order_acq_rel: @@ -93,7 +93,7 @@ struct gcc_ppc_operations_base static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT { - switch(order) + switch (order) { case memory_order_acquire: case memory_order_acq_rel: diff --git a/include/boost/atomic/detail/ops_gcc_sparcv9.hpp b/include/boost/atomic/detail/ops_gcc_sparc.hpp similarity index 91% rename from include/boost/atomic/detail/ops_gcc_sparcv9.hpp rename to include/boost/atomic/detail/ops_gcc_sparc.hpp index 09c750e..b444d58 100644 --- a/include/boost/atomic/detail/ops_gcc_sparcv9.hpp +++ b/include/boost/atomic/detail/ops_gcc_sparc.hpp @@ -8,17 +8,17 @@ * Copyright (c) 2014 Andrey Semashev */ /*! - * \file atomic/detail/ops_gcc_sparcv9.hpp + * \file atomic/detail/ops_gcc_sparc.hpp * * This header contains implementation of the \c operations template. */ -#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_SPARCV9_HPP_INCLUDED_ -#define BOOST_ATOMIC_DETAIL_OPS_GCC_SPARCV9_HPP_INCLUDED_ +#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_SPARC_HPP_INCLUDED_ +#define BOOST_ATOMIC_DETAIL_OPS_GCC_SPARC_HPP_INCLUDED_ #include #include -#include +#include #include #include #include @@ -32,7 +32,7 @@ namespace boost { namespace atomics { namespace detail { -struct gcc_sparcv9_cas_base +struct gcc_sparc_cas_base { static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT { @@ -77,7 +77,6 @@ struct gcc_sparcv9_cas_base __asm__ __volatile__ ("membar #Sync" ::: "memory"); } - static BOOST_FORCEINLINE void fence_after_load(memory_order order) BOOST_NOEXCEPT { fence_after(order); @@ -85,8 +84,8 @@ struct gcc_sparcv9_cas_base }; template< bool Signed > -struct gcc_sparcv9_cas32 : - public gcc_sparcv9_cas_base +struct gcc_sparc_cas32 : + public gcc_sparc_cas_base { typedef typename make_storage_type< 4u, Signed >::type storage_type; @@ -139,9 +138,9 @@ struct gcc_sparcv9_cas32 : template< bool Signed > struct operations< 4u, Signed > : - public cas_based_operations< gcc_sparcv9_cas32< Signed > > + public cas_based_operations< gcc_sparc_cas32< Signed > > { - typedef cas_based_operations< gcc_sparcv9_cas32< Signed > > base_type; + typedef cas_based_operations< gcc_sparc_cas32< Signed > > base_type; typedef typename base_type::storage_type storage_type; static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT @@ -160,7 +159,7 @@ struct operations< 4u, Signed > : static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT { - return exchange(storage, (storage_type)1, order) != (storage_type)0; + return !!exchange(storage, (storage_type)1, order); } }; @@ -177,8 +176,8 @@ struct operations< 2u, Signed > : }; template< bool Signed > -struct gcc_sparcv9_cas64 : - public gcc_sparcv9_cas_base +struct gcc_sparc_cas64 : + public gcc_sparc_cas_base { typedef typename make_storage_type< 8u, Signed >::type storage_type; @@ -231,7 +230,7 @@ struct gcc_sparcv9_cas64 : template< bool Signed > struct operations< 8u, Signed > : - public cas_based_operations< gcc_sparcv9_cas64< Signed > > + public cas_based_operations< gcc_sparc_cas64< Signed > > { }; @@ -269,4 +268,4 @@ BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT } // namespace atomics } // namespace boost -#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_SPARCV9_HPP_INCLUDED_ +#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_SPARC_HPP_INCLUDED_ diff --git a/include/boost/atomic/detail/ops_gcc_sync.hpp b/include/boost/atomic/detail/ops_gcc_sync.hpp index e5ef05f..695e079 100644 --- a/include/boost/atomic/detail/ops_gcc_sync.hpp +++ b/include/boost/atomic/detail/ops_gcc_sync.hpp @@ -18,7 +18,7 @@ #include #include -#include +#include #include #include #include @@ -112,7 +112,7 @@ struct gcc_sync_operations { if ((order & memory_order_release) != 0) __sync_synchronize(); - return __sync_lock_test_and_set(&storage, 1) != 0; + return !!__sync_lock_test_and_set(&storage, 1); } static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT diff --git a/include/boost/atomic/detail/ops_gcc_x86.hpp b/include/boost/atomic/detail/ops_gcc_x86.hpp index de7baac..8a1d7c5 100644 --- a/include/boost/atomic/detail/ops_gcc_x86.hpp +++ b/include/boost/atomic/detail/ops_gcc_x86.hpp @@ -18,7 +18,7 @@ #include #include -#include +#include #include #include #if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B) @@ -142,7 +142,7 @@ struct gcc_x86_operations : static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT { - return Derived::exchange(storage, (storage_type)1, order) != 0; + return !!Derived::exchange(storage, (storage_type)1, order); } static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT diff --git a/include/boost/atomic/detail/ops_linux_arm.hpp b/include/boost/atomic/detail/ops_linux_arm.hpp index a8413d7..8c393df 100644 --- a/include/boost/atomic/detail/ops_linux_arm.hpp +++ b/include/boost/atomic/detail/ops_linux_arm.hpp @@ -20,7 +20,7 @@ #include #include -#include +#include #include #include #include diff --git a/include/boost/atomic/detail/ops_msvc_arm.hpp b/include/boost/atomic/detail/ops_msvc_arm.hpp index c640b87..409bcea 100644 --- a/include/boost/atomic/detail/ops_msvc_arm.hpp +++ b/include/boost/atomic/detail/ops_msvc_arm.hpp @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include #include @@ -104,7 +104,7 @@ struct msvc_arm_operations : static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT { - return Derived::exchange(storage, (storage_type)1, order) != 0; + return !!Derived::exchange(storage, (storage_type)1, order); } static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT diff --git a/include/boost/atomic/detail/ops_msvc_x86.hpp b/include/boost/atomic/detail/ops_msvc_x86.hpp index a0ab63d..404535c 100644 --- a/include/boost/atomic/detail/ops_msvc_x86.hpp +++ b/include/boost/atomic/detail/ops_msvc_x86.hpp @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include #include #if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B) @@ -115,7 +115,7 @@ struct msvc_x86_operations : static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT { - return Derived::exchange(storage, (storage_type)1, order) != 0; + return !!Derived::exchange(storage, (storage_type)1, order); } static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT diff --git a/include/boost/atomic/detail/ops_windows.hpp b/include/boost/atomic/detail/ops_windows.hpp index 87d33fc..30c0d44 100644 --- a/include/boost/atomic/detail/ops_windows.hpp +++ b/include/boost/atomic/detail/ops_windows.hpp @@ -27,7 +27,7 @@ #include #include #include -#include +#include #include #include #include @@ -90,7 +90,7 @@ struct windows_operations : static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT { - return Derived::exchange(storage, (storage_type)1, order) != 0; + return !!Derived::exchange(storage, (storage_type)1, order); } static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT diff --git a/include/boost/atomic/detail/platform.hpp b/include/boost/atomic/detail/platform.hpp index 927ea77..d53b2ad 100644 --- a/include/boost/atomic/detail/platform.hpp +++ b/include/boost/atomic/detail/platform.hpp @@ -45,8 +45,8 @@ #elif defined(__GNUC__) &&\ (\ defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) ||\ - defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) ||\ - defined(__ARM_ARCH_6K__) ||\ + defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) ||\ + defined(__ARM_ARCH_6ZK__) ||\ defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) ||\ defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) ||\ defined(__ARM_ARCH_7EM__) || defined(__ARM_ARCH_7S__)\ @@ -56,7 +56,7 @@ #elif defined(__GNUC__) && defined(__sparc_v9__) -#define BOOST_ATOMIC_DETAIL_PLATFORM gcc_sparcv9 +#define BOOST_ATOMIC_DETAIL_PLATFORM gcc_sparc #elif defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 401) diff --git a/include/boost/atomic/detail/storage_types.hpp b/include/boost/atomic/detail/storage_type.hpp similarity index 77% rename from include/boost/atomic/detail/storage_types.hpp rename to include/boost/atomic/detail/storage_type.hpp index a450609..a024f1d 100644 --- a/include/boost/atomic/detail/storage_types.hpp +++ b/include/boost/atomic/detail/storage_type.hpp @@ -8,13 +8,13 @@ * Copyright (c) 2013 - 2014 Andrey Semashev */ /*! - * \file atomic/detail/storage_types.hpp + * \file atomic/detail/storage_type.hpp * * This header defines underlying types used as storage */ -#ifndef BOOST_ATOMIC_DETAIL_STORAGE_TYPES_HPP_INCLUDED_ -#define BOOST_ATOMIC_DETAIL_STORAGE_TYPES_HPP_INCLUDED_ +#ifndef BOOST_ATOMIC_DETAIL_STORAGE_TYPE_HPP_INCLUDED_ +#define BOOST_ATOMIC_DETAIL_STORAGE_TYPE_HPP_INCLUDED_ #include #include @@ -33,11 +33,22 @@ struct buffer_storage { unsigned char data[Size]; - bool operator== (buffer_storage const& that) const + BOOST_FORCEINLINE bool operator! () const BOOST_NOEXCEPT + { + bool result = true; + for (unsigned int i = 0; i < Size && result; ++i) + { + result &= data[i] == 0; + } + return result; + } + + BOOST_FORCEINLINE bool operator== (buffer_storage const& that) const BOOST_NOEXCEPT { return std::memcmp(data, that.data, Size) == 0; } - bool operator!= (buffer_storage const& that) const + + BOOST_FORCEINLINE bool operator!= (buffer_storage const& that) const BOOST_NOEXCEPT { return std::memcmp(data, that.data, Size) != 0; } @@ -116,6 +127,11 @@ struct make_storage_type< 16u, true > struct BOOST_ALIGNMENT(16) storage128_t { boost::uint64_t data[2]; + + BOOST_FORCEINLINE bool operator! () const BOOST_NOEXCEPT + { + return data[0] == 0 && data[1] == 0; + } }; BOOST_FORCEINLINE bool operator== (storage128_t const& left, storage128_t const& right) BOOST_NOEXCEPT @@ -149,4 +165,4 @@ struct storage_size_of } // namespace atomics } // namespace boost -#endif // BOOST_ATOMIC_DETAIL_STORAGE_TYPES_HPP_INCLUDED_ +#endif // BOOST_ATOMIC_DETAIL_STORAGE_TYPE_HPP_INCLUDED_ From c196e6da0cd559fdbd1b93546d58a067209dca1b Mon Sep 17 00:00:00 2001 From: Andrey Semashev Date: Sun, 11 May 2014 14:18:49 +0400 Subject: [PATCH 16/23] Fixed compilation of ARM backend. Simplified fences implementation. --- include/boost/atomic/detail/ops_gcc_arm.hpp | 78 +++++----------- include/boost/atomic/detail/ops_gcc_ppc.hpp | 41 +++------ include/boost/atomic/detail/ops_gcc_sparc.hpp | 56 ++++-------- include/boost/atomic/detail/ops_gcc_sync.hpp | 88 ++++++------------- include/boost/atomic/detail/ops_gcc_x86.hpp | 83 ++--------------- include/boost/atomic/detail/ops_linux_arm.hpp | 44 +--------- include/boost/atomic/detail/platform.hpp | 4 +- 7 files changed, 91 insertions(+), 303 deletions(-) diff --git a/include/boost/atomic/detail/ops_gcc_arm.hpp b/include/boost/atomic/detail/ops_gcc_arm.hpp index d3e9e07..12ce7b7 100644 --- a/include/boost/atomic/detail/ops_gcc_arm.hpp +++ b/include/boost/atomic/detail/ops_gcc_arm.hpp @@ -92,28 +92,14 @@ struct gcc_arm_operations_base { static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT { - switch (order) - { - case memory_order_release: - case memory_order_acq_rel: - case memory_order_seq_cst: + if ((order & memory_order_release) != 0) hardware_full_fence(); - break; - default:; - } } static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT { - switch (order) - { - case memory_order_acquire: - case memory_order_acq_rel: - case memory_order_seq_cst: + if ((order & memory_order_acquire) != 0) hardware_full_fence(); - break; - default:; - } } static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT @@ -278,7 +264,7 @@ struct operations< 4u, Signed > : [result] "=&r" (result), // %1 [tmp] "=&l" (tmp), // %2 [storage] "+Q" (storage) // %3 - : [value] "r" (v), // %4 + : [value] "r" (v) // %4 : "cc" ); fence_after(order); @@ -304,7 +290,7 @@ struct operations< 4u, Signed > : [result] "=&r" (result), // %1 [tmp] "=&l" (tmp), // %2 [storage] "+Q" (storage) // %3 - : [value] "r" (v), // %4 + : [value] "r" (v) // %4 : "cc" ); fence_after(order); @@ -330,7 +316,7 @@ struct operations< 4u, Signed > : [result] "=&r" (result), // %1 [tmp] "=&l" (tmp), // %2 [storage] "+Q" (storage) // %3 - : [value] "r" (v), // %4 + : [value] "r" (v) // %4 : "cc" ); fence_after(order); @@ -356,7 +342,7 @@ struct operations< 4u, Signed > : [result] "=&r" (result), // %1 [tmp] "=&l" (tmp), // %2 [storage] "+Q" (storage) // %3 - : [value] "r" (v), // %4 + : [value] "r" (v) // %4 : "cc" ); fence_after(order); @@ -382,7 +368,7 @@ struct operations< 4u, Signed > : [result] "=&r" (result), // %1 [tmp] "=&l" (tmp), // %2 [storage] "+Q" (storage) // %3 - : [value] "r" (v), // %4 + : [value] "r" (v) // %4 : "cc" ); fence_after(order); @@ -433,7 +419,7 @@ struct operations< 1u, false > : [result] "=&r" (result), // %1 [tmp] "=&l" (tmp), // %2 [storage] "+Q" (storage) // %3 - : [value] "r" (v), // %4 + : [value] "r" (v) // %4 : "cc" ); fence_after(order); @@ -460,7 +446,7 @@ struct operations< 1u, false > : [result] "=&r" (result), // %1 [tmp] "=&l" (tmp), // %2 [storage] "+Q" (storage) // %3 - : [value] "r" (v), // %4 + : [value] "r" (v) // %4 : "cc" ); fence_after(order); @@ -495,7 +481,7 @@ struct operations< 1u, true > : [result] "=&r" (result), // %1 [tmp] "=&l" (tmp), // %2 [storage] "+Q" (storage) // %3 - : [value] "r" (v), // %4 + : [value] "r" (v) // %4 : "cc" ); fence_after(order); @@ -522,7 +508,7 @@ struct operations< 1u, true > : [result] "=&r" (result), // %1 [tmp] "=&l" (tmp), // %2 [storage] "+Q" (storage) // %3 - : [value] "r" (v), // %4 + : [value] "r" (v) // %4 : "cc" ); fence_after(order); @@ -558,7 +544,7 @@ struct operations< 2u, false > : [result] "=&r" (result), // %1 [tmp] "=&l" (tmp), // %2 [storage] "+Q" (storage) // %3 - : [value] "r" (v), // %4 + : [value] "r" (v) // %4 : "cc" ); fence_after(order); @@ -585,7 +571,7 @@ struct operations< 2u, false > : [result] "=&r" (result), // %1 [tmp] "=&l" (tmp), // %2 [storage] "+Q" (storage) // %3 - : [value] "r" (v), // %4 + : [value] "r" (v) // %4 : "cc" ); fence_after(order); @@ -620,7 +606,7 @@ struct operations< 2u, true > : [result] "=&r" (result), // %1 [tmp] "=&l" (tmp), // %2 [storage] "+Q" (storage) // %3 - : [value] "r" (v), // %4 + : [value] "r" (v) // %4 : "cc" ); fence_after(order); @@ -647,7 +633,7 @@ struct operations< 2u, true > : [result] "=&r" (result), // %1 [tmp] "=&l" (tmp), // %2 [storage] "+Q" (storage) // %3 - : [value] "r" (v), // %4 + : [value] "r" (v) // %4 : "cc" ); fence_after(order); @@ -671,7 +657,7 @@ struct operations< 2u, true > : template< bool Signed > struct operations< 8u, Signed > : - public gcc_ppc_operations_base + public gcc_arm_operations_base { typedef typename make_storage_type< 8u, Signed >::type storage_type; @@ -690,7 +676,7 @@ struct operations< 8u, Signed > : "ldrexd %1, %H1, %2\n" BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0) : "=&l" (tmp), // %0 - "=&r" (original), // %1 + "=&r" (original) // %1 : "Q" (storage) // %2 ); fence_after(order); @@ -815,7 +801,7 @@ struct operations< 8u, Signed > : "=&r" (result), // %1 "=&l" (tmp), // %2 "+Q" (storage) // %3 - : "r" (v), // %4 + : "r" (v) // %4 : "cc" ); fence_after(order); @@ -842,7 +828,7 @@ struct operations< 8u, Signed > : "=&r" (result), // %1 "=&l" (tmp), // %2 "+Q" (storage) // %3 - : "r" (v), // %4 + : "r" (v) // %4 : "cc" ); fence_after(order); @@ -869,7 +855,7 @@ struct operations< 8u, Signed > : "=&r" (result), // %1 "=&l" (tmp), // %2 "+Q" (storage) // %3 - : "r" (v), // %4 + : "r" (v) // %4 : "cc" ); fence_after(order); @@ -896,7 +882,7 @@ struct operations< 8u, Signed > : "=&r" (result), // %1 "=&l" (tmp), // %2 "+Q" (storage) // %3 - : "r" (v), // %4 + : "r" (v) // %4 : "cc" ); fence_after(order); @@ -923,7 +909,7 @@ struct operations< 8u, Signed > : "=&r" (result), // %1 "=&l" (tmp), // %2 "+Q" (storage) // %3 - : "r" (v), // %4 + : "r" (v) // %4 : "cc" ); fence_after(order); @@ -951,30 +937,14 @@ struct operations< 8u, Signed > : BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT { - switch (order) - { - case memory_order_acquire: - case memory_order_release: - case memory_order_acq_rel: - case memory_order_seq_cst: + if ((order & (memory_order_acquire | memory_order_release)) != 0) gcc_arm_operations_base::hardware_full_fence(); - break; - default:; - } } BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT { - switch (order) - { - case memory_order_acquire: - case memory_order_release: - case memory_order_acq_rel: - case memory_order_seq_cst: + if ((order & ~memory_order_consume) != 0) __asm__ __volatile__ ("" ::: "memory"); - break; - default:; - } } } // namespace detail diff --git a/include/boost/atomic/detail/ops_gcc_ppc.hpp b/include/boost/atomic/detail/ops_gcc_ppc.hpp index 528e275..278c7ed 100644 --- a/include/boost/atomic/detail/ops_gcc_ppc.hpp +++ b/include/boost/atomic/detail/ops_gcc_ppc.hpp @@ -77,38 +77,29 @@ struct gcc_ppc_operations_base { static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT { - switch (order) - { - case memory_order_release: - case memory_order_acq_rel: #if defined(__powerpc64__) - __asm__ __volatile__ ("lwsync" ::: "memory"); - break; -#endif - case memory_order_seq_cst: + if (order == memory_order_seq_cst) __asm__ __volatile__ ("sync" ::: "memory"); - default:; - } + else if ((order & memory_order_release) != 0) + __asm__ __volatile__ ("lwsync" ::: "memory"); +#else + if ((order & memory_order_release) != 0) + __asm__ __volatile__ ("sync" ::: "memory"); +#endif } static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT { - switch (order) - { - case memory_order_acquire: - case memory_order_acq_rel: - case memory_order_seq_cst: - __asm__ __volatile__ ("isync"); - case memory_order_consume: + if ((order & memory_order_acquire) != 0) + __asm__ __volatile__ ("isync" ::: "memory"); + else if (order == memory_order_consume) __asm__ __volatile__ ("" ::: "memory"); - default:; - } } static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT { if (order == memory_order_seq_cst) - __asm__ __volatile__ ("sync"); + __asm__ __volatile__ ("sync" ::: "memory"); } }; @@ -766,16 +757,8 @@ BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT { - switch (order) - { - case memory_order_acquire: - case memory_order_release: - case memory_order_acq_rel: - case memory_order_seq_cst: + if ((order & ~memory_order_consume) != 0) __asm__ __volatile__ ("" ::: "memory"); - break; - default:; - } } } // namespace detail diff --git a/include/boost/atomic/detail/ops_gcc_sparc.hpp b/include/boost/atomic/detail/ops_gcc_sparc.hpp index b444d58..bbeaca1 100644 --- a/include/boost/atomic/detail/ops_gcc_sparc.hpp +++ b/include/boost/atomic/detail/ops_gcc_sparc.hpp @@ -36,39 +36,18 @@ struct gcc_sparc_cas_base { static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT { - switch (order) - { - case memory_order_relaxed: - case memory_order_acquire: - case memory_order_consume: - break; - case memory_order_release: - case memory_order_acq_rel: - __asm__ __volatile__ ("membar #StoreStore | #LoadStore" ::: "memory"); - break; - case memory_order_seq_cst: + if (order == memory_order_seq_cst) __asm__ __volatile__ ("membar #Sync" ::: "memory"); - break; - } + else if ((order & memory_order_release) != 0) + __asm__ __volatile__ ("membar #StoreStore | #LoadStore" ::: "memory"); } static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT { - switch (order) - { - case memory_order_relaxed: - case memory_order_consume: - case memory_order_release: - break; - case memory_order_acquire: - case memory_order_acq_rel: - __asm__ __volatile__ ("membar #LoadLoad | #LoadStore" ::: "memory"); - break; - case memory_order_seq_cst: + if (order == memory_order_seq_cst) __asm__ __volatile__ ("membar #Sync" ::: "memory"); - break; - default:; - } + else if ((order & memory_order_acquire) != 0) + __asm__ __volatile__ ("membar #StoreStore | #LoadStore" ::: "memory"); } static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT @@ -76,11 +55,6 @@ struct gcc_sparc_cas_base if (order == memory_order_seq_cst) __asm__ __volatile__ ("membar #Sync" ::: "memory"); } - - static BOOST_FORCEINLINE void fence_after_load(memory_order order) BOOST_NOEXCEPT - { - fence_after(order); - } }; template< bool Signed > @@ -99,7 +73,7 @@ struct gcc_sparc_cas32 : static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT { storage_type v = storage; - fence_after_load(order); + fence_after(order); return v; } @@ -191,7 +165,7 @@ struct gcc_sparc_cas64 : static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT { storage_type v = storage; - fence_after_load(order); + fence_after(order); return v; } @@ -237,10 +211,8 @@ struct operations< 8u, Signed > : BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT { - switch(order) + switch (order) { - case memory_order_relaxed: - break; case memory_order_release: __asm__ __volatile__ ("membar #StoreStore | #LoadStore" ::: "memory"); break; @@ -250,18 +222,20 @@ BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT case memory_order_acq_rel: __asm__ __volatile__ ("membar #LoadLoad | #LoadStore | #StoreStore" ::: "memory"); break; - case memory_order_consume: - break; case memory_order_seq_cst: __asm__ __volatile__ ("membar #Sync" ::: "memory"); break; - default:; + case memory_order_consume: + case memory_order_relaxed: + default: + break; } } BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT { - __asm__ __volatile__ ("" ::: "memory"); + if ((order & ~memory_order_consume) != 0) + __asm__ __volatile__ ("" ::: "memory"); } } // namespace detail diff --git a/include/boost/atomic/detail/ops_gcc_sync.hpp b/include/boost/atomic/detail/ops_gcc_sync.hpp index 695e079..e51b9e6 100644 --- a/include/boost/atomic/detail/ops_gcc_sync.hpp +++ b/include/boost/atomic/detail/ops_gcc_sync.hpp @@ -31,8 +31,30 @@ namespace boost { namespace atomics { namespace detail { +struct gcc_sync_operations_base +{ + static BOOST_FORCEINLINE void fence_before_store(memory_order order) BOOST_NOEXCEPT + { + if ((order & memory_order_release) != 0) + __sync_synchronize(); + } + + static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT + { + if (order == memory_order_seq_cst) + __sync_synchronize(); + } + + static BOOST_FORCEINLINE void fence_after_load(memory_order order) BOOST_NOEXCEPT + { + if ((order & (memory_order_acquire | memory_order_consume)) != 0) + __sync_synchronize(); + } +}; + template< typename T > -struct gcc_sync_operations +struct gcc_sync_operations : + public gcc_sync_operations_base { typedef T storage_type; @@ -126,45 +148,6 @@ struct gcc_sync_operations { return true; } - -private: - static BOOST_FORCEINLINE void fence_before_store(memory_order order) BOOST_NOEXCEPT - { - switch (order) - { - case memory_order_relaxed: - case memory_order_acquire: - case memory_order_consume: - break; - case memory_order_release: - case memory_order_acq_rel: - case memory_order_seq_cst: - __sync_synchronize(); - break; - } - } - - static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT - { - if (order == memory_order_seq_cst) - __sync_synchronize(); - } - - static BOOST_FORCEINLINE void fence_after_load(memory_order order) BOOST_NOEXCEPT - { - switch (order) - { - case memory_order_relaxed: - case memory_order_release: - break; - case memory_order_consume: - case memory_order_acquire: - case memory_order_acq_rel: - case memory_order_seq_cst: - __sync_synchronize(); - break; - } - } }; #if BOOST_ATOMIC_INT8_LOCK_FREE > 0 @@ -237,35 +220,14 @@ struct operations< 16u, Signed > : BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT { - switch (order) - { - case memory_order_relaxed: - break; - case memory_order_release: - case memory_order_consume: - case memory_order_acquire: - case memory_order_acq_rel: - case memory_order_seq_cst: + if (order != memory_order_relaxed) __sync_synchronize(); - break; - } } BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT { - switch (order) - { - case memory_order_relaxed: - case memory_order_consume: - break; - case memory_order_acquire: - case memory_order_release: - case memory_order_acq_rel: - case memory_order_seq_cst: + if ((order & ~memory_order_consume) != 0) __asm__ __volatile__ ("" ::: "memory"); - break; - default:; - } } } // namespace detail diff --git a/include/boost/atomic/detail/ops_gcc_x86.hpp b/include/boost/atomic/detail/ops_gcc_x86.hpp index 8a1d7c5..acbe9bc 100644 --- a/include/boost/atomic/detail/ops_gcc_x86.hpp +++ b/include/boost/atomic/detail/ops_gcc_x86.hpp @@ -44,61 +44,14 @@ struct gcc_x86_operations_base { static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT { - switch (order) - { - case memory_order_relaxed: - case memory_order_acquire: - case memory_order_consume: - break; - case memory_order_release: - case memory_order_acq_rel: + if ((order & memory_order_release) != 0) __asm__ __volatile__ ("" ::: "memory"); - break; - case memory_order_seq_cst: - __asm__ __volatile__ ("" ::: "memory"); - break; - default:; - } } static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT { - switch (order) - { - case memory_order_relaxed: - case memory_order_release: - break; - case memory_order_acquire: - case memory_order_acq_rel: + if ((order & memory_order_acquire) != 0) __asm__ __volatile__ ("" ::: "memory"); - break; - case memory_order_consume: - break; - case memory_order_seq_cst: - __asm__ __volatile__ ("" ::: "memory"); - break; - default:; - } - } - - static BOOST_FORCEINLINE void fence_after_load(memory_order order) BOOST_NOEXCEPT - { - switch (order) - { - case memory_order_relaxed: - case memory_order_release: - break; - case memory_order_acquire: - case memory_order_acq_rel: - __asm__ __volatile__ ("" ::: "memory"); - break; - case memory_order_consume: - break; - case memory_order_seq_cst: - __asm__ __volatile__ ("" ::: "memory"); - break; - default:; - } } }; @@ -125,7 +78,7 @@ struct gcc_x86_operations : static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT { storage_type v = storage; - fence_after_load(order); + fence_after(order); return v; } @@ -750,17 +703,8 @@ struct operations< 16u, Signed > : BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT { - switch (order) + if (order == memory_order_seq_cst) { - case memory_order_relaxed: - case memory_order_consume: - break; - case memory_order_acquire: - case memory_order_release: - case memory_order_acq_rel: - __asm__ __volatile__ ("" ::: "memory"); - break; - case memory_order_seq_cst: __asm__ __volatile__ ( #if defined(__x86_64__) || defined(__SSE2__) @@ -770,26 +714,17 @@ BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT #endif ::: "memory" ); - break; - default:; + } + else if ((order & (memory_order_acquire | memory_order_release)) != 0) + { + __asm__ __volatile__ ("" ::: "memory"); } } BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT { - switch (order) - { - case memory_order_relaxed: - case memory_order_consume: - break; - case memory_order_acquire: - case memory_order_release: - case memory_order_acq_rel: - case memory_order_seq_cst: + if ((order & ~memory_order_consume) != 0) __asm__ __volatile__ ("" ::: "memory"); - break; - default:; - } } } // namespace detail diff --git a/include/boost/atomic/detail/ops_linux_arm.hpp b/include/boost/atomic/detail/ops_linux_arm.hpp index 8c393df..e695297 100644 --- a/include/boost/atomic/detail/ops_linux_arm.hpp +++ b/include/boost/atomic/detail/ops_linux_arm.hpp @@ -59,16 +59,8 @@ struct linux_arm_cas_base { static BOOST_FORCEINLINE void fence_before_store(memory_order order) BOOST_NOEXCEPT { - switch (order) - { - case memory_order_release: - case memory_order_acq_rel: - case memory_order_seq_cst: + if ((order & memory_order_release) != 0) hardware_full_fence(); - break; - case memory_order_consume: - default:; - } } static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT @@ -79,15 +71,8 @@ struct linux_arm_cas_base static BOOST_FORCEINLINE void fence_after_load(memory_order order) BOOST_NOEXCEPT { - switch (order) - { - case memory_order_acquire: - case memory_order_acq_rel: - case memory_order_seq_cst: + if ((order & memory_order_acquire) != 0) hardware_full_fence(); - break; - default:; - } } static BOOST_FORCEINLINE void hardware_full_fence() BOOST_NOEXCEPT @@ -175,35 +160,14 @@ struct operations< 4u, Signed > : BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT { - switch (order) - { - case memory_order_relaxed: - break; - case memory_order_release: - case memory_order_consume: - case memory_order_acquire: - case memory_order_acq_rel: - case memory_order_seq_cst: + if ((order & (memory_order_acquire | memory_order_release)) != 0) linux_arm_cas_base::hardware_full_fence(); - break; - } } BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT { - switch (order) - { - case memory_order_relaxed: - case memory_order_consume: - break; - case memory_order_acquire: - case memory_order_release: - case memory_order_acq_rel: - case memory_order_seq_cst: + if ((order & ~memory_order_consume) != 0) __asm__ __volatile__ ("" ::: "memory"); - break; - default:; - } } } // namespace detail diff --git a/include/boost/atomic/detail/platform.hpp b/include/boost/atomic/detail/platform.hpp index d53b2ad..1fbd344 100644 --- a/include/boost/atomic/detail/platform.hpp +++ b/include/boost/atomic/detail/platform.hpp @@ -25,13 +25,13 @@ #define BOOST_ATOMIC_DETAIL_PLATFORM emulated #define BOOST_ATOMIC_EMULATED - +/* // Intel compiler does not support __atomic* intrinsics properly, although defines them (tested with 13.0.1 and 13.1.1 on Linux) #elif (defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 407) && !defined(BOOST_INTEL_CXX_VERSION))\ || (defined(BOOST_CLANG) && ((__clang_major__ * 100 + __clang_minor__) >= 302)) #define BOOST_ATOMIC_DETAIL_PLATFORM gcc_atomic - +*/ #elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) #define BOOST_ATOMIC_DETAIL_PLATFORM gcc_x86 From c319c6efd12b843c5c943f0da0dec525d5ae8d9c Mon Sep 17 00:00:00 2001 From: Andrey Semashev Date: Sun, 11 May 2014 17:00:00 +0400 Subject: [PATCH 17/23] Enabled gcc atomic backend for Intel Compiler on Linux. --- include/boost/atomic/detail/atomic_flag.hpp | 11 +- .../boost/atomic/detail/atomic_template.hpp | 153 +++++++++--------- .../boost/atomic/detail/ops_gcc_atomic.hpp | 32 ++++ include/boost/atomic/detail/platform.hpp | 6 +- include/boost/atomic/fences.hpp | 5 + 5 files changed, 127 insertions(+), 80 deletions(-) diff --git a/include/boost/atomic/detail/atomic_flag.hpp b/include/boost/atomic/detail/atomic_flag.hpp index 4e0e2ba..6a6667d 100644 --- a/include/boost/atomic/detail/atomic_flag.hpp +++ b/include/boost/atomic/detail/atomic_flag.hpp @@ -23,6 +23,11 @@ #pragma once #endif +/* + * IMPLEMENTATION NOTE: All interface functions MUST be declared with BOOST_FORCEINLINE, + * see comment for convert_memory_order_to_gcc in ops_gcc_atomic.hpp. + */ + namespace boost { namespace atomics { @@ -39,16 +44,16 @@ struct atomic_flag storage_type m_storage; - BOOST_CONSTEXPR atomic_flag() BOOST_NOEXCEPT : m_storage(0) + BOOST_FORCEINLINE BOOST_CONSTEXPR atomic_flag() BOOST_NOEXCEPT : m_storage(0) { } - bool test_and_set(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE bool test_and_set(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT { return operations::test_and_set(m_storage, order); } - void clear(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE void clear(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT { BOOST_ASSERT(order != memory_order_acquire); BOOST_ASSERT(order != memory_order_acq_rel); diff --git a/include/boost/atomic/detail/atomic_template.hpp b/include/boost/atomic/detail/atomic_template.hpp index bfed528..a4daff0 100644 --- a/include/boost/atomic/detail/atomic_template.hpp +++ b/include/boost/atomic/detail/atomic_template.hpp @@ -35,6 +35,11 @@ #pragma warning(disable: 4522) #endif +/* + * IMPLEMENTATION NOTE: All interface functions MUST be declared with BOOST_FORCEINLINE, + * see comment for convert_memory_order_to_gcc in ops_gcc_atomic.hpp. + */ + namespace boost { namespace atomics { namespace detail { @@ -87,7 +92,7 @@ public: BOOST_DEFAULTED_FUNCTION(base_atomic(), {}) BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : m_storage(v) {} - void store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE void store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT { BOOST_ASSERT(order != memory_order_consume); BOOST_ASSERT(order != memory_order_acquire); @@ -96,7 +101,7 @@ public: operations::store(m_storage, static_cast< storage_type >(v), order); } - value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT { BOOST_ASSERT(order != memory_order_release); BOOST_ASSERT(order != memory_order_acq_rel); @@ -104,22 +109,22 @@ public: return static_cast< value_type >(operations::load(m_storage, order)); } - value_type fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE value_type fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT { return static_cast< value_type >(operations::fetch_add(m_storage, static_cast< storage_type >(v), order)); } - value_type fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE value_type fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT { return static_cast< value_type >(operations::fetch_sub(m_storage, static_cast< storage_type >(v), order)); } - value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT { return static_cast< value_type >(operations::exchange(m_storage, static_cast< storage_type >(v), order)); } - bool compare_exchange_strong(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT { BOOST_ASSERT(failure_order != memory_order_release); BOOST_ASSERT(failure_order != memory_order_acq_rel); @@ -131,12 +136,12 @@ public: return res; } - bool compare_exchange_strong(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT { return compare_exchange_strong(expected, desired, order, atomics::detail::deduce_failure_order(order)); } - bool compare_exchange_weak(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT { BOOST_ASSERT(failure_order != memory_order_release); BOOST_ASSERT(failure_order != memory_order_acq_rel); @@ -148,72 +153,72 @@ public: return res; } - bool compare_exchange_weak(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT { return compare_exchange_weak(expected, desired, order, atomics::detail::deduce_failure_order(order)); } - value_type fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE value_type fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT { return static_cast< value_type >(operations::fetch_and(m_storage, static_cast< storage_type >(v), order)); } - value_type fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE value_type fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT { return static_cast< value_type >(operations::fetch_or(m_storage, static_cast< storage_type >(v), order)); } - value_type fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE value_type fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT { return static_cast< value_type >(operations::fetch_xor(m_storage, static_cast< storage_type >(v), order)); } - bool is_lock_free() const volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE bool is_lock_free() const volatile BOOST_NOEXCEPT { return operations::is_lock_free(m_storage); } - value_type operator++(int) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE value_type operator++(int) volatile BOOST_NOEXCEPT { return fetch_add(1); } - value_type operator++() volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE value_type operator++() volatile BOOST_NOEXCEPT { return fetch_add(1) + 1; } - value_type operator--(int) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE value_type operator--(int) volatile BOOST_NOEXCEPT { return fetch_sub(1); } - value_type operator--() volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE value_type operator--() volatile BOOST_NOEXCEPT { return fetch_sub(1) - 1; } - value_type operator+=(difference_type v) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE value_type operator+=(difference_type v) volatile BOOST_NOEXCEPT { return fetch_add(v) + v; } - value_type operator-=(difference_type v) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE value_type operator-=(difference_type v) volatile BOOST_NOEXCEPT { return fetch_sub(v) - v; } - value_type operator&=(value_type v) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE value_type operator&=(value_type v) volatile BOOST_NOEXCEPT { return fetch_and(v) & v; } - value_type operator|=(value_type v) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE value_type operator|=(value_type v) volatile BOOST_NOEXCEPT { return fetch_or(v) | v; } - value_type operator^=(value_type v) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE value_type operator^=(value_type v) volatile BOOST_NOEXCEPT { return fetch_xor(v) ^ v; } @@ -243,7 +248,7 @@ public: BOOST_DEFAULTED_FUNCTION(base_atomic(), {}) BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : m_storage(v) {} - void store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE void store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT { BOOST_ASSERT(order != memory_order_consume); BOOST_ASSERT(order != memory_order_acquire); @@ -252,7 +257,7 @@ public: operations::store(m_storage, static_cast< storage_type >(v), order); } - value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT { BOOST_ASSERT(order != memory_order_release); BOOST_ASSERT(order != memory_order_acq_rel); @@ -260,12 +265,12 @@ public: return !!operations::load(m_storage, order); } - value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT { return !!operations::exchange(m_storage, static_cast< storage_type >(v), order); } - bool compare_exchange_strong(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT { BOOST_ASSERT(failure_order != memory_order_release); BOOST_ASSERT(failure_order != memory_order_acq_rel); @@ -277,12 +282,12 @@ public: return res; } - bool compare_exchange_strong(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT { return compare_exchange_strong(expected, desired, order, atomics::detail::deduce_failure_order(order)); } - bool compare_exchange_weak(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT { BOOST_ASSERT(failure_order != memory_order_release); BOOST_ASSERT(failure_order != memory_order_acq_rel); @@ -294,12 +299,12 @@ public: return res; } - bool compare_exchange_weak(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT { return compare_exchange_weak(expected, desired, order, atomics::detail::deduce_failure_order(order)); } - bool is_lock_free() const volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE bool is_lock_free() const volatile BOOST_NOEXCEPT { return operations::is_lock_free(m_storage); } @@ -328,11 +333,11 @@ protected: public: BOOST_DEFAULTED_FUNCTION(base_atomic(), {}) - explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : m_storage(atomics::detail::union_cast< storage_type >(v)) + BOOST_FORCEINLINE explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : m_storage(atomics::detail::union_cast< storage_type >(v)) { } - void store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE void store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT { BOOST_ASSERT(order != memory_order_consume); BOOST_ASSERT(order != memory_order_acquire); @@ -341,7 +346,7 @@ public: operations::store(m_storage, atomics::detail::union_cast< storage_type >(v), order); } - value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT { BOOST_ASSERT(order != memory_order_release); BOOST_ASSERT(order != memory_order_acq_rel); @@ -349,12 +354,12 @@ public: return atomics::detail::union_cast< value_type >(operations::load(m_storage, order)); } - value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT { return atomics::detail::union_cast< value_type >(operations::exchange(m_storage, atomics::detail::union_cast< storage_type >(v), order)); } - bool compare_exchange_strong(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT { BOOST_ASSERT(failure_order != memory_order_release); BOOST_ASSERT(failure_order != memory_order_acq_rel); @@ -366,12 +371,12 @@ public: return res; } - bool compare_exchange_strong(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT { return compare_exchange_strong(expected, desired, order, atomics::detail::deduce_failure_order(order)); } - bool compare_exchange_weak(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT { BOOST_ASSERT(failure_order != memory_order_release); BOOST_ASSERT(failure_order != memory_order_acq_rel); @@ -383,12 +388,12 @@ public: return res; } - bool compare_exchange_weak(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT { return compare_exchange_weak(expected, desired, order, atomics::detail::deduce_failure_order(order)); } - bool is_lock_free() const volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE bool is_lock_free() const volatile BOOST_NOEXCEPT { return operations::is_lock_free(m_storage); } @@ -418,11 +423,11 @@ protected: public: BOOST_DEFAULTED_FUNCTION(base_atomic(), {}) - explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : m_storage(atomics::detail::union_cast< storage_type >(v)) + BOOST_FORCEINLINE explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : m_storage(atomics::detail::union_cast< storage_type >(v)) { } - void store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE void store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT { BOOST_ASSERT(order != memory_order_consume); BOOST_ASSERT(order != memory_order_acquire); @@ -431,7 +436,7 @@ public: operations::store(m_storage, atomics::detail::union_cast< storage_type >(v), order); } - value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT { BOOST_ASSERT(order != memory_order_release); BOOST_ASSERT(order != memory_order_acq_rel); @@ -439,22 +444,22 @@ public: return atomics::detail::union_cast< value_type >(operations::load(m_storage, order)); } - value_type fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE value_type fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT { return atomics::detail::union_cast< value_type >(operations::fetch_add(m_storage, static_cast< storage_type >(v * sizeof(T)), order)); } - value_type fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE value_type fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT { return atomics::detail::union_cast< value_type >(operations::fetch_sub(m_storage, static_cast< storage_type >(v * sizeof(T)), order)); } - value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT { return atomics::detail::union_cast< value_type >(operations::exchange(m_storage, atomics::detail::union_cast< storage_type >(v), order)); } - bool compare_exchange_strong(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT { BOOST_ASSERT(failure_order != memory_order_release); BOOST_ASSERT(failure_order != memory_order_acq_rel); @@ -466,12 +471,12 @@ public: return res; } - bool compare_exchange_strong(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT { return compare_exchange_strong(expected, desired, order, atomics::detail::deduce_failure_order(order)); } - bool compare_exchange_weak(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT { BOOST_ASSERT(failure_order != memory_order_release); BOOST_ASSERT(failure_order != memory_order_acq_rel); @@ -483,42 +488,42 @@ public: return res; } - bool compare_exchange_weak(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT { return compare_exchange_weak(expected, desired, order, atomics::detail::deduce_failure_order(order)); } - bool is_lock_free() const volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE bool is_lock_free() const volatile BOOST_NOEXCEPT { return operations::is_lock_free(m_storage); } - value_type operator++(int) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE value_type operator++(int) volatile BOOST_NOEXCEPT { return fetch_add(1); } - value_type operator++() volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE value_type operator++() volatile BOOST_NOEXCEPT { return fetch_add(1) + 1; } - value_type operator--(int) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE value_type operator--(int) volatile BOOST_NOEXCEPT { return fetch_sub(1); } - value_type operator--() volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE value_type operator--() volatile BOOST_NOEXCEPT { return fetch_sub(1) - 1; } - value_type operator+=(difference_type v) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE value_type operator+=(difference_type v) volatile BOOST_NOEXCEPT { return fetch_add(v) + v; } - value_type operator-=(difference_type v) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE value_type operator-=(difference_type v) volatile BOOST_NOEXCEPT { return fetch_sub(v) - v; } @@ -548,11 +553,11 @@ protected: public: BOOST_DEFAULTED_FUNCTION(base_atomic(), {}) - explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : m_storage(atomics::detail::union_cast< storage_type >(v)) + BOOST_FORCEINLINE explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : m_storage(atomics::detail::union_cast< storage_type >(v)) { } - void store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE void store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT { BOOST_ASSERT(order != memory_order_consume); BOOST_ASSERT(order != memory_order_acquire); @@ -561,7 +566,7 @@ public: operations::store(m_storage, atomics::detail::union_cast< storage_type >(v), order); } - value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT { BOOST_ASSERT(order != memory_order_release); BOOST_ASSERT(order != memory_order_acq_rel); @@ -569,22 +574,22 @@ public: return atomics::detail::union_cast< value_type >(operations::load(m_storage, order)); } - value_type fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE value_type fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT { return atomics::detail::union_cast< value_type >(operations::fetch_add(m_storage, static_cast< storage_type >(v), order)); } - value_type fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE value_type fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT { return atomics::detail::union_cast< value_type >(operations::fetch_sub(m_storage, static_cast< storage_type >(v), order)); } - value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT { return atomics::detail::union_cast< value_type >(operations::exchange(m_storage, atomics::detail::union_cast< storage_type >(v), order)); } - bool compare_exchange_strong(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT { BOOST_ASSERT(failure_order != memory_order_release); BOOST_ASSERT(failure_order != memory_order_acq_rel); @@ -596,12 +601,12 @@ public: return res; } - bool compare_exchange_strong(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT { return compare_exchange_strong(expected, desired, order, atomics::detail::deduce_failure_order(order)); } - bool compare_exchange_weak(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT { BOOST_ASSERT(failure_order != memory_order_release); BOOST_ASSERT(failure_order != memory_order_acq_rel); @@ -613,42 +618,42 @@ public: return res; } - bool compare_exchange_weak(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT { return compare_exchange_weak(expected, desired, order, atomics::detail::deduce_failure_order(order)); } - bool is_lock_free() const volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE bool is_lock_free() const volatile BOOST_NOEXCEPT { return operations::is_lock_free(m_storage); } - value_type operator++(int) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE value_type operator++(int) volatile BOOST_NOEXCEPT { return fetch_add(1); } - value_type operator++() volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE value_type operator++() volatile BOOST_NOEXCEPT { return (char*)fetch_add(1) + 1; } - value_type operator--(int) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE value_type operator--(int) volatile BOOST_NOEXCEPT { return fetch_sub(1); } - value_type operator--() volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE value_type operator--() volatile BOOST_NOEXCEPT { return (char*)fetch_sub(1) - 1; } - value_type operator+=(difference_type v) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE value_type operator+=(difference_type v) volatile BOOST_NOEXCEPT { return (char*)fetch_add(v) + v; } - value_type operator-=(difference_type v) volatile BOOST_NOEXCEPT + BOOST_FORCEINLINE value_type operator-=(difference_type v) volatile BOOST_NOEXCEPT { return (char*)fetch_sub(v) - v; } @@ -678,7 +683,7 @@ public: // operator=(value_arg_type) is considered ambiguous with operator=(atomic const&) // in assignment expressions, even though conversion to atomic<> is less preferred // than conversion to value_arg_type. - explicit BOOST_CONSTEXPR atomic(value_arg_type v) BOOST_NOEXCEPT : base_type(v) {} + BOOST_FORCEINLINE explicit BOOST_CONSTEXPR atomic(value_arg_type v) BOOST_NOEXCEPT : base_type(v) {} BOOST_FORCEINLINE value_type operator= (value_arg_type v) volatile BOOST_NOEXCEPT { diff --git a/include/boost/atomic/detail/ops_gcc_atomic.hpp b/include/boost/atomic/detail/ops_gcc_atomic.hpp index 918bc42..1052754 100644 --- a/include/boost/atomic/detail/ops_gcc_atomic.hpp +++ b/include/boost/atomic/detail/ops_gcc_atomic.hpp @@ -28,10 +28,42 @@ #pragma once #endif +#if defined(__INTEL_COMPILER) +// This is used to suppress warning #32013 described below for Intel Compiler. +// In debug builds the compiler does not inline any functions, so basically +// every atomic function call results in this warning. I don't know any other +// way to selectively disable just this one warning. +#pragma system_header +#endif + namespace boost { namespace atomics { namespace detail { +/*! + * The function converts \c boost::memory_order values to the compiler-specific constants. + * + * NOTE: The intention is that the function is optimized away by the compiler, and the + * compiler-specific constants are passed to the intrinsics. I know constexpr doesn't + * work in this case because the standard atomics interface require memory ordering + * constants to be passed as function arguments, at which point they stop being constexpr. + * However it is crucial that the compiler sees constants and not runtime values, + * because otherwise it just ignores the ordering value and always uses seq_cst. + * This is the case with Intel C++ Compiler 14.0.3 (Composer XE 2013 SP1, update 3) and + * gcc 4.8.2. Intel Compiler issues a warning in this case: + * + * warning #32013: Invalid memory order specified. Defaulting to seq_cst memory order. + * + * while gcc acts silently. + * + * To mitigate the problem ALL functions, including the atomic<> members must be + * declared with BOOST_FORCEINLINE. In this case the compilers are able to see that + * all functions are called with constant orderings and call intrinstcts properly. + * + * Unfortunately, this still doesn't work in debug mode as the compiler doesn't + * inline functions even when marked with BOOST_FORCEINLINE. In this case all atomic + * operaions will be executed with seq_cst semantics. + */ BOOST_FORCEINLINE BOOST_CONSTEXPR int convert_memory_order_to_gcc(memory_order order) BOOST_NOEXCEPT { return (order == memory_order_relaxed ? __ATOMIC_RELAXED : (order == memory_order_consume ? __ATOMIC_CONSUME : diff --git a/include/boost/atomic/detail/platform.hpp b/include/boost/atomic/detail/platform.hpp index 1fbd344..50609ac 100644 --- a/include/boost/atomic/detail/platform.hpp +++ b/include/boost/atomic/detail/platform.hpp @@ -25,13 +25,13 @@ #define BOOST_ATOMIC_DETAIL_PLATFORM emulated #define BOOST_ATOMIC_EMULATED -/* + // Intel compiler does not support __atomic* intrinsics properly, although defines them (tested with 13.0.1 and 13.1.1 on Linux) -#elif (defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 407) && !defined(BOOST_INTEL_CXX_VERSION))\ +#elif (defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 407))\ || (defined(BOOST_CLANG) && ((__clang_major__ * 100 + __clang_minor__) >= 302)) #define BOOST_ATOMIC_DETAIL_PLATFORM gcc_atomic -*/ + #elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) #define BOOST_ATOMIC_DETAIL_PLATFORM gcc_x86 diff --git a/include/boost/atomic/fences.hpp b/include/boost/atomic/fences.hpp index 66b5ac5..31e3040 100644 --- a/include/boost/atomic/fences.hpp +++ b/include/boost/atomic/fences.hpp @@ -24,6 +24,11 @@ #pragma once #endif +/* + * IMPLEMENTATION NOTE: All interface functions MUST be declared with BOOST_FORCEINLINE, + * see comment for convert_memory_order_to_gcc in ops_gcc_atomic.hpp. + */ + namespace boost { namespace atomics { From 5cb6e92ed0b0b709a6525e740f58d751a74fa7db Mon Sep 17 00:00:00 2001 From: Andrey Semashev Date: Sun, 11 May 2014 17:33:09 +0400 Subject: [PATCH 18/23] Extracted common c86 DCAS code to a separate header. --- .../boost/atomic/detail/ops_gcc_atomic.hpp | 167 +--------- include/boost/atomic/detail/ops_gcc_x86.hpp | 228 +------------ .../boost/atomic/detail/ops_gcc_x86_dcas.hpp | 308 ++++++++++++++++++ include/boost/atomic/detail/platform.hpp | 1 - 4 files changed, 312 insertions(+), 392 deletions(-) create mode 100644 include/boost/atomic/detail/ops_gcc_x86_dcas.hpp diff --git a/include/boost/atomic/detail/ops_gcc_atomic.hpp b/include/boost/atomic/detail/ops_gcc_atomic.hpp index 1052754..2297791 100644 --- a/include/boost/atomic/detail/ops_gcc_atomic.hpp +++ b/include/boost/atomic/detail/ops_gcc_atomic.hpp @@ -20,7 +20,7 @@ #include #include #if defined(__clang__) && (defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)) -#include +#include #include #endif @@ -182,123 +182,9 @@ struct operations< 4u, Signed > : #if defined(__clang__) && defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) // Workaround for clang bug http://llvm.org/bugs/show_bug.cgi?id=19355 -template< bool Signed > -struct clang_dcas_x86 -{ - typedef typename make_storage_type< 8u, Signed >::type storage_type; - - static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT - { - if ((((uint32_t)&storage) & 0x00000007) == 0) - { -#if defined(__SSE2__) - __asm__ __volatile__ - ( -#if defined(__AVX__) - "vmovq %1, %%xmm4\n\t" - "vmovq %%xmm4, %0\n\t" -#else - "movq %1, %%xmm4\n\t" - "movq %%xmm4, %0\n\t" -#endif - : "=m" (storage) - : "m" (v) - : "memory", "xmm4" - ); -#else - __asm__ __volatile__ - ( - "fildll %1\n\t" - "fistpll %0\n\t" - : "=m" (storage) - : "m" (v) - : "memory" - ); -#endif - } - else - { - uint32_t scratch; - __asm__ __volatile__ - ( - "movl %%ebx, %[scratch]\n\t" - "movl %[value_lo], %%ebx\n\t" - "movl 0(%[dest]), %%eax\n\t" - "movl 4(%[dest]), %%edx\n\t" - ".align 16\n\t" - "1: lock; cmpxchg8b 0(%[dest])\n\t" - "jne 1b\n\t" - "movl %[scratch], %%ebx" - : [scratch] "=m,m" (scratch) - : [value_lo] "a,a" ((uint32_t)v), "c,c" ((uint32_t)(v >> 32)), [dest] "D,S" (&storage) - : "memory", "cc", "edx" - ); - } - } - - static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT - { - storage_type value; - - if ((((uint32_t)&storage) & 0x00000007) == 0) - { -#if defined(__SSE2__) - __asm__ __volatile__ - ( -#if defined(__AVX__) - "vmovq %1, %%xmm4\n\t" - "vmovq %%xmm4, %0\n\t" -#else - "movq %1, %%xmm4\n\t" - "movq %%xmm4, %0\n\t" -#endif - : "=m" (value) - : "m" (storage) - : "memory", "xmm4" - ); -#else - __asm__ __volatile__ - ( - "fildll %1\n\t" - "fistpll %0\n\t" - : "=m" (value) - : "m" (storage) - : "memory" - ); -#endif - } - else - { - // We don't care for comparison result here; the previous value will be stored into value anyway. - value = __sync_val_compare_and_swap(&storage, (storage_type)0, (storage_type)0); - } - - return value; - } - - static BOOST_FORCEINLINE bool compare_exchange_strong( - storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT - { - storage_type old_expected = expected; - expected = __sync_val_compare_and_swap(&storage, old_expected, desired); - return expected == old_expected; - } - - static BOOST_FORCEINLINE bool compare_exchange_weak( - storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT - { - return compare_exchange_strong(storage, expected, desired, success_order, failure_order); - } - - static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT - { - return true; - } -}; - template< bool Signed > struct operations< 8u, Signed > : - public cas_based_operations< clang_dcas_x86< Signed > > + public cas_based_operations< gcc_dcas_x86< Signed > > { }; @@ -318,56 +204,9 @@ struct operations< 8u, Signed > : // Workaround for clang bug: http://llvm.org/bugs/show_bug.cgi?id=19149 // Clang 3.4 does not implement 128-bit __atomic* intrinsics even though it defines __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16 -template< bool Signed > -struct clang_dcas_x86_64 -{ - typedef typename make_storage_type< 16u, Signed >::type storage_type; - - static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT - { - uint64_t const* p_value = (uint64_t const*)&value; - __asm__ __volatile__ - ( - "movq 0(%[dest]), %%rax\n\t" - "movq 8(%[dest]), %%rdx\n\t" - ".align 16\n\t" - "1: lock; cmpxchg16b 0(%[dest])\n\t" - "jne 1b" - : - : "b" (p_value[0]), "c" (p_value[1]), [dest] "r" (&storage) - : "memory", "cc", "rax", "rdx" - ); - } - - static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT - { - storage_type value = storage_type(); - return __sync_val_compare_and_swap(&storage, value, value); - } - - static BOOST_FORCEINLINE bool compare_exchange_strong( - storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT - { - storage_type old_expected = expected; - expected = __sync_val_compare_and_swap(&storage, old_expected, desired); - return expected == old_expected; - } - - static BOOST_FORCEINLINE bool compare_exchange_weak( - storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT - { - return compare_exchange_strong(storage, expected, desired, success_order, failure_order); - } - - static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT - { - return true; - } -}; - template< bool Signed > struct operations< 16u, Signed > : - public cas_based_operations< clang_dcas_x86_64< Signed > > + public cas_based_operations< gcc_dcas_x86_64< Signed > > { }; diff --git a/include/boost/atomic/detail/ops_gcc_x86.hpp b/include/boost/atomic/detail/ops_gcc_x86.hpp index acbe9bc..c3c87c5 100644 --- a/include/boost/atomic/detail/ops_gcc_x86.hpp +++ b/include/boost/atomic/detail/ops_gcc_x86.hpp @@ -22,7 +22,7 @@ #include #include #if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B) -#include +#include #include #endif @@ -369,164 +369,6 @@ struct operations< 4u, Signed > : #if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) -template< bool Signed > -struct gcc_dcas_x86 -{ - typedef typename make_storage_type< 8u, Signed >::type storage_type; - - static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT - { - if ((((uint32_t)&storage) & 0x00000007) == 0) - { -#if defined(__SSE2__) - __asm__ __volatile__ - ( -#if defined(__AVX__) - "vmovq %1, %%xmm4\n\t" - "vmovq %%xmm4, %0\n\t" -#else - "movq %1, %%xmm4\n\t" - "movq %%xmm4, %0\n\t" -#endif - : "=m" (storage) - : "m" (v) - : "memory", "xmm4" - ); -#else - __asm__ __volatile__ - ( - "fildll %1\n\t" - "fistpll %0\n\t" - : "=m" (storage) - : "m" (v) - : "memory" - ); -#endif - } - else - { - uint32_t scratch; - __asm__ __volatile__ - ( - "movl %%ebx, %[scratch]\n\t" - "movl %[value_lo], %%ebx\n\t" - "movl 0(%[dest]), %%eax\n\t" - "movl 4(%[dest]), %%edx\n\t" - ".align 16\n\t" - "1: lock; cmpxchg8b 0(%[dest])\n\t" - "jne 1b\n\t" - "movl %[scratch], %%ebx" - : [scratch] "=m,m" (scratch) - : [value_lo] "a,a" ((uint32_t)v), "c,c" ((uint32_t)(v >> 32)), [dest] "D,S" (&storage) - : "cc", "edx", "memory" - ); - } - } - - static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order) BOOST_NOEXCEPT - { - storage_type value; - - if ((((uint32_t)&storage) & 0x00000007) == 0) - { -#if defined(__SSE2__) - __asm__ __volatile__ - ( -#if defined(__AVX__) - "vmovq %1, %%xmm4\n\t" - "vmovq %%xmm4, %0\n\t" -#else - "movq %1, %%xmm4\n\t" - "movq %%xmm4, %0\n\t" -#endif - : "=m" (value) - : "m" (storage) - : "memory", "xmm4" - ); -#else - __asm__ __volatile__ - ( - "fildll %1\n\t" - "fistpll %0\n\t" - : "=m" (value) - : "m" (storage) - : "memory" - ); -#endif - } - else - { - // We don't care for comparison result here; the previous value will be stored into value anyway. - // Also we don't care for ebx and ecx values, they just have to be equal to eax and edx before cmpxchg8b. - __asm__ __volatile__ - ( - "movl %%ebx, %%eax\n\t" - "movl %%ecx, %%edx\n\t" - "lock; cmpxchg8b %[storage]" - : "=&A" (value) - : [storage] "m" (storage) - : "cc", "memory" - ); - } - - return value; - } - - static BOOST_FORCEINLINE bool compare_exchange_strong( - storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT - { -#if defined(__PIC__) - // Make sure ebx is saved and restored properly in case - // of position independent code. To make this work - // setup register constraints such that ebx can not be - // used by accident e.g. as base address for the variable - // to be modified. Accessing "scratch" should always be okay, - // as it can only be placed on the stack (and therefore - // accessed through ebp or esp only). - // - // In theory, could push/pop ebx onto/off the stack, but movs - // to a prepared stack slot turn out to be faster. - - uint32_t scratch; - bool success; - __asm__ __volatile__ - ( - "movl %%ebx, %[scratch]\n\t" - "movl %[desired_lo], %%ebx\n\t" - "lock; cmpxchg8b %[dest]\n\t" - "movl %[scratch], %%ebx\n\t" - "sete %[success]" - : "+A,A,A,A,A,A" (expected), [dest] "+m,m,m,m,m,m" (storage), [scratch] "=m,m,m,m,m,m" (scratch), [success] "=q,m,q,m,q,m" (success) - : [desired_lo] "S,S,D,D,m,m" ((uint32_t)desired), "c,c,c,c,c,c" ((uint32_t)(desired >> 32)) - : "cc", "memory" - ); - return success; -#else - bool success; - __asm__ __volatile__ - ( - "lock; cmpxchg8b %[dest]\n\t" - "sete %[success]" - : "+A,A" (expected), [dest] "+m,m" (storage), [scratch] "=m,m" (scratch), [success] "=q,m" (success) - : "b,b" ((uint32_t)desired), "c,c" ((uint32_t)(desired >> 32)) - : "cc", "memory" - ); - return success; -#endif - } - - static BOOST_FORCEINLINE bool compare_exchange_weak( - storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT - { - return compare_exchange_strong(storage, expected, desired, success_order, failure_order); - } - - static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT - { - return true; - } -}; - template< bool Signed > struct operations< 8u, Signed > : public cas_based_operations< gcc_dcas_x86< Signed > > @@ -625,74 +467,6 @@ struct operations< 8u, Signed > : #if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B) -template< bool Signed > -struct gcc_dcas_x86_64 -{ - typedef typename make_storage_type< 16u, Signed >::type storage_type; - - static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT - { - uint64_t const* p_value = (uint64_t const*)&v; - __asm__ __volatile__ - ( - "movq 0(%[dest]), %%rax\n\t" - "movq 8(%[dest]), %%rdx\n\t" - ".align 16\n\t" - "1: lock; cmpxchg16b 0(%[dest])\n\t" - "jne 1b" - : - : "b" (p_value[0]), "c" (p_value[1]), [dest] "r" (&storage) - : "cc", "rax", "rdx", "memory" - ); - } - - static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order) BOOST_NOEXCEPT - { - storage_type value; - - // We don't care for comparison result here; the previous value will be stored into value anyway. - // Also we don't care for rbx and rcx values, they just have to be equal to rax and rdx before cmpxchg16b. - __asm__ __volatile__ - ( - "movq %%rbx, %%rax\n\t" - "movq %%rcx, %%rdx\n\t" - "lock; cmpxchg16b %[storage]" - : "=&A" (value) - : [storage] "m" (storage) - : "cc", "memory" - ); - - return value; - } - - static BOOST_FORCEINLINE bool compare_exchange_strong( - storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT - { - uint64_t const* p_desired = (uint64_t const*)&desired; - bool success; - __asm__ __volatile__ - ( - "lock; cmpxchg16b %[dest]\n\t" - "sete %[success]" - : "+A,A" (expected), [dest] "+m,m" (storage), [success] "=q,m" (success) - : "b,b" (p_desired[0]), "c,c" (p_desired[1]) - : "cc", "memory" - ); - return success; - } - - static BOOST_FORCEINLINE bool compare_exchange_weak( - storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT - { - return compare_exchange_strong(storage, expected, desired, success_order, failure_order); - } - - static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT - { - return true; - } -}; - template< bool Signed > struct operations< 16u, Signed > : public cas_based_operations< gcc_dcas_x86_64< Signed > > diff --git a/include/boost/atomic/detail/ops_gcc_x86_dcas.hpp b/include/boost/atomic/detail/ops_gcc_x86_dcas.hpp new file mode 100644 index 0000000..4241a87 --- /dev/null +++ b/include/boost/atomic/detail/ops_gcc_x86_dcas.hpp @@ -0,0 +1,308 @@ +/* + * Distributed under the Boost Software License, Version 1.0. + * (See accompanying file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt) + * + * Copyright (c) 2009 Helge Bahmann + * Copyright (c) 2012 Tim Blechmann + * Copyright (c) 2014 Andrey Semashev + */ +/*! + * \file atomic/detail/ops_gcc_x86_dcas.hpp + * + * This header contains implementation of the double-width CAS primitive for x86. + */ + +#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_X86_DCAS_HPP_INCLUDED_ +#define BOOST_ATOMIC_DETAIL_OPS_GCC_X86_DCAS_HPP_INCLUDED_ + +#include +#include +#include +#include +#include + +#ifdef BOOST_HAS_PRAGMA_ONCE +#pragma once +#endif + +namespace boost { +namespace atomics { +namespace detail { + +#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) + +template< bool Signed > +struct gcc_dcas_x86 +{ + typedef typename make_storage_type< 8u, Signed >::type storage_type; + + static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT + { + if ((((uint32_t)&storage) & 0x00000007) == 0) + { +#if defined(__SSE2__) + __asm__ __volatile__ + ( +#if defined(__AVX__) + "vmovq %1, %%xmm4\n\t" + "vmovq %%xmm4, %0\n\t" +#else + "movq %1, %%xmm4\n\t" + "movq %%xmm4, %0\n\t" +#endif + : "=m" (storage) + : "m" (v) + : "memory", "xmm4" + ); +#else + __asm__ __volatile__ + ( + "fildll %1\n\t" + "fistpll %0\n\t" + : "=m" (storage) + : "m" (v) + : "memory" + ); +#endif + } + else + { +#if defined(__PIC__) + uint32_t scratch; + __asm__ __volatile__ + ( + "movl %%ebx, %[scratch]\n\t" + "movl %[value_lo], %%ebx\n\t" + "movl 0(%[dest]), %%eax\n\t" + "movl 4(%[dest]), %%edx\n\t" + ".align 16\n\t" + "1: lock; cmpxchg8b 0(%[dest])\n\t" + "jne 1b\n\t" + "movl %[scratch], %%ebx" + : [scratch] "=m,m" (scratch) + : [value_lo] "a,a" ((uint32_t)v), "c,c" ((uint32_t)(v >> 32)), [dest] "D,S" (&storage) + : "cc", "edx", "memory" + ); +#else + __asm__ __volatile__ + ( + "movl 0(%[dest]), %%eax\n\t" + "movl 4(%[dest]), %%edx\n\t" + ".align 16\n\t" + "1: lock; cmpxchg8b 0(%[dest])\n\t" + "jne 1b\n\t" + : + : [value_lo] "b,b" ((uint32_t)v), "c,c" ((uint32_t)(v >> 32)), [dest] "D,S" (&storage) + : "cc", "eax", "edx", "memory" + ); +#endif + } + } + + static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order) BOOST_NOEXCEPT + { + storage_type value; + + if ((((uint32_t)&storage) & 0x00000007) == 0) + { +#if defined(__SSE2__) + __asm__ __volatile__ + ( +#if defined(__AVX__) + "vmovq %1, %%xmm4\n\t" + "vmovq %%xmm4, %0\n\t" +#else + "movq %1, %%xmm4\n\t" + "movq %%xmm4, %0\n\t" +#endif + : "=m" (value) + : "m" (storage) + : "memory", "xmm4" + ); +#else + __asm__ __volatile__ + ( + "fildll %1\n\t" + "fistpll %0\n\t" + : "=m" (value) + : "m" (storage) + : "memory" + ); +#endif + } + else + { +#if defined(__clang__) + // Clang cannot allocate eax:edx register pairs but it has sync intrinsics + value = __sync_val_compare_and_swap(&storage, (storage_type)0, (storage_type)0); +#else + // We don't care for comparison result here; the previous value will be stored into value anyway. + // Also we don't care for ebx and ecx values, they just have to be equal to eax and edx before cmpxchg8b. + __asm__ __volatile__ + ( + "movl %%ebx, %%eax\n\t" + "movl %%ecx, %%edx\n\t" + "lock; cmpxchg8b %[storage]" + : "=&A" (value) + : [storage] "m" (storage) + : "cc", "memory" + ); +#endif + } + + return value; + } + + static BOOST_FORCEINLINE bool compare_exchange_strong( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT + { +#if defined(__clang__) + // Clang cannot allocate eax:edx register pairs but it has sync intrinsics + storage_type old_expected = expected; + expected = __sync_val_compare_and_swap(&storage, old_expected, desired); + return expected == old_expected; +#elif defined(__PIC__) + // Make sure ebx is saved and restored properly in case + // of position independent code. To make this work + // setup register constraints such that ebx can not be + // used by accident e.g. as base address for the variable + // to be modified. Accessing "scratch" should always be okay, + // as it can only be placed on the stack (and therefore + // accessed through ebp or esp only). + // + // In theory, could push/pop ebx onto/off the stack, but movs + // to a prepared stack slot turn out to be faster. + + uint32_t scratch; + bool success; + __asm__ __volatile__ + ( + "movl %%ebx, %[scratch]\n\t" + "movl %[desired_lo], %%ebx\n\t" + "lock; cmpxchg8b %[dest]\n\t" + "movl %[scratch], %%ebx\n\t" + "sete %[success]" + : "+A,A,A,A,A,A" (expected), [dest] "+m,m,m,m,m,m" (storage), [scratch] "=m,m,m,m,m,m" (scratch), [success] "=q,m,q,m,q,m" (success) + : [desired_lo] "S,S,D,D,m,m" ((uint32_t)desired), "c,c,c,c,c,c" ((uint32_t)(desired >> 32)) + : "cc", "memory" + ); + return success; +#else + bool success; + __asm__ __volatile__ + ( + "lock; cmpxchg8b %[dest]\n\t" + "sete %[success]" + : "+A,A" (expected), [dest] "+m,m" (storage), [scratch] "=m,m" (scratch), [success] "=q,m" (success) + : "b,b" ((uint32_t)desired), "c,c" ((uint32_t)(desired >> 32)) + : "cc", "memory" + ); + return success; +#endif + } + + static BOOST_FORCEINLINE bool compare_exchange_weak( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + { + return compare_exchange_strong(storage, expected, desired, success_order, failure_order); + } + + static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT + { + return true; + } +}; + +#endif // defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) + +#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B) + +template< bool Signed > +struct gcc_dcas_x86_64 +{ + typedef typename make_storage_type< 16u, Signed >::type storage_type; + + static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT + { + uint64_t const* p_value = (uint64_t const*)&v; + __asm__ __volatile__ + ( + "movq 0(%[dest]), %%rax\n\t" + "movq 8(%[dest]), %%rdx\n\t" + ".align 16\n\t" + "1: lock; cmpxchg16b 0(%[dest])\n\t" + "jne 1b" + : + : "b" (p_value[0]), "c" (p_value[1]), [dest] "r" (&storage) + : "cc", "rax", "rdx", "memory" + ); + } + + static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order) BOOST_NOEXCEPT + { +#if defined(__clang__) + // Clang cannot allocate rax:rdx register pairs but it has sync intrinsics + storage_type value = storage_type(); + return __sync_val_compare_and_swap(&storage, value, value); +#else + storage_type value; + + // We don't care for comparison result here; the previous value will be stored into value anyway. + // Also we don't care for rbx and rcx values, they just have to be equal to rax and rdx before cmpxchg16b. + __asm__ __volatile__ + ( + "movq %%rbx, %%rax\n\t" + "movq %%rcx, %%rdx\n\t" + "lock; cmpxchg16b %[storage]" + : "=&A" (value) + : [storage] "m" (storage) + : "cc", "memory" + ); + + return value; +#endif + } + + static BOOST_FORCEINLINE bool compare_exchange_strong( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT + { +#if defined(__clang__) + // Clang cannot allocate rax:rdx register pairs but it has sync intrinsics + storage_type old_expected = expected; + expected = __sync_val_compare_and_swap(&storage, old_expected, desired); + return expected == old_expected; +#else + uint64_t const* p_desired = (uint64_t const*)&desired; + bool success; + __asm__ __volatile__ + ( + "lock; cmpxchg16b %[dest]\n\t" + "sete %[success]" + : "+A,A" (expected), [dest] "+m,m" (storage), [success] "=q,m" (success) + : "b,b" (p_desired[0]), "c,c" (p_desired[1]) + : "cc", "memory" + ); + return success; +#endif + } + + static BOOST_FORCEINLINE bool compare_exchange_weak( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + { + return compare_exchange_strong(storage, expected, desired, success_order, failure_order); + } + + static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT + { + return true; + } +}; + +#endif // defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B) + +} // namespace detail +} // namespace atomics +} // namespace boost + +#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_X86_DCAS_HPP_INCLUDED_ diff --git a/include/boost/atomic/detail/platform.hpp b/include/boost/atomic/detail/platform.hpp index 50609ac..f60eec4 100644 --- a/include/boost/atomic/detail/platform.hpp +++ b/include/boost/atomic/detail/platform.hpp @@ -26,7 +26,6 @@ #define BOOST_ATOMIC_DETAIL_PLATFORM emulated #define BOOST_ATOMIC_EMULATED -// Intel compiler does not support __atomic* intrinsics properly, although defines them (tested with 13.0.1 and 13.1.1 on Linux) #elif (defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 407))\ || (defined(BOOST_CLANG) && ((__clang_major__ * 100 + __clang_minor__) >= 302)) From 5c032ccf3afe5cad68e662f6056f517fda888d01 Mon Sep 17 00:00:00 2001 From: Andrey Semashev Date: Sun, 11 May 2014 20:21:41 +0400 Subject: [PATCH 19/23] Added gcc Alpha backend. Completely untested. --- .../boost/atomic/detail/caps_gcc_alpha.hpp | 34 + include/boost/atomic/detail/ops_gcc_alpha.hpp | 874 ++++++++++++++++++ include/boost/atomic/detail/ops_gcc_ppc.hpp | 55 +- include/boost/atomic/detail/platform.hpp | 8 +- 4 files changed, 941 insertions(+), 30 deletions(-) create mode 100644 include/boost/atomic/detail/caps_gcc_alpha.hpp create mode 100644 include/boost/atomic/detail/ops_gcc_alpha.hpp diff --git a/include/boost/atomic/detail/caps_gcc_alpha.hpp b/include/boost/atomic/detail/caps_gcc_alpha.hpp new file mode 100644 index 0000000..861432f --- /dev/null +++ b/include/boost/atomic/detail/caps_gcc_alpha.hpp @@ -0,0 +1,34 @@ +/* + * Distributed under the Boost Software License, Version 1.0. + * (See accompanying file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt) + * + * Copyright (c) 2009 Helge Bahmann + * Copyright (c) 2013 Tim Blechmann + * Copyright (c) 2014 Andrey Semashev + */ +/*! + * \file atomic/detail/caps_gcc_alpha.hpp + * + * This header defines feature capabilities macros + */ + +#ifndef BOOST_ATOMIC_DETAIL_CAPS_GCC_ALPHA_HPP_INCLUDED_ +#define BOOST_ATOMIC_DETAIL_CAPS_GCC_ALPHA_HPP_INCLUDED_ + +#include + +#ifdef BOOST_HAS_PRAGMA_ONCE +#pragma once +#endif + +#define BOOST_ATOMIC_INT8_LOCK_FREE 2 +#define BOOST_ATOMIC_INT16_LOCK_FREE 2 +#define BOOST_ATOMIC_INT32_LOCK_FREE 2 +#define BOOST_ATOMIC_INT64_LOCK_FREE 2 +#define BOOST_ATOMIC_POINTER_LOCK_FREE 2 + +#define BOOST_ATOMIC_THREAD_FENCE 2 +#define BOOST_ATOMIC_SIGNAL_FENCE 2 + +#endif // BOOST_ATOMIC_DETAIL_CAPS_GCC_ALPHA_HPP_INCLUDED_ diff --git a/include/boost/atomic/detail/ops_gcc_alpha.hpp b/include/boost/atomic/detail/ops_gcc_alpha.hpp new file mode 100644 index 0000000..90742a3 --- /dev/null +++ b/include/boost/atomic/detail/ops_gcc_alpha.hpp @@ -0,0 +1,874 @@ +/* + * Distributed under the Boost Software License, Version 1.0. + * (See accompanying file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt) + * + * Copyright (c) 2009 Helge Bahmann + * Copyright (c) 2013 Tim Blechmann + * Copyright (c) 2014 Andrey Semashev + */ +/*! + * \file atomic/detail/ops_gcc_alpha.hpp + * + * This header contains implementation of the \c operations template. + */ + +#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_ALPHA_HPP_INCLUDED_ +#define BOOST_ATOMIC_DETAIL_OPS_GCC_ALPHA_HPP_INCLUDED_ + +#include +#include +#include +#include +#include + +#ifdef BOOST_HAS_PRAGMA_ONCE +#pragma once +#endif + +namespace boost { +namespace atomics { +namespace detail { + +/* + Refer to http://h71000.www7.hp.com/doc/82final/5601/5601pro_004.html + (HP OpenVMS systems documentation) and the Alpha Architecture Reference Manual. + */ + +/* + NB: The most natural thing would be to write the increment/decrement + operators along the following lines: + + __asm__ __volatile__ + ( + "1: ldl_l %0,%1 \n" + "addl %0,1,%0 \n" + "stl_c %0,%1 \n" + "beq %0,1b\n" + : "=&b" (tmp) + : "m" (value) + : "cc" + ); + + However according to the comments on the HP website and matching + comments in the Linux kernel sources this defies branch prediction, + as the cpu assumes that backward branches are always taken; so + instead copy the trick from the Linux kernel, introduce a forward + branch and back again. + + I have, however, had a hard time measuring the difference between + the two versions in microbenchmarks -- I am leaving it in nevertheless + as it apparently does not hurt either. +*/ + +struct gcc_alpha_operations_base +{ + static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT + { + if ((order & (memory_order_release | memory_order_consume)) != 0) + __asm__ __volatile__ ("mb" ::: "memory"); + } + + static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT + { + if ((order & memory_order_acquire) != 0) + __asm__ __volatile__ ("mb" ::: "memory"); + } + + static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT + { + if (order == memory_order_seq_cst) + __asm__ __volatile__ ("mb" ::: "memory"); + } +}; + + +template< bool Signed > +struct operations< 4u, Signed > : + public gcc_alpha_operations_base +{ + typedef typename make_storage_type< 4u, Signed >::type storage_type; + + static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + storage = v; + fence_after_store(order); + } + + static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT + { + storage_type v = storage; + fence_after(order); + return v; + } + + static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + storage_type original, tmp; + fence_before(order); + __asm__ __volatile__ + ( + "1:\n" + "mov %3, %1\n" + "ldl_l %0, %2\n" + "stl_c %1, %2\n" + "beq %1, 2f\n" + + ".subsection 2\n" + "2: br 1b\n" + ".previous\n" + + : "=&r" (original), // %0 + "=&r" (tmp) // %1 + : "m" (storage), // %2 + "r" (v) // %3 + : + ); + fence_after(order); + return original; + } + + static BOOST_FORCEINLINE bool compare_exchange_weak( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + { + fence_before(success_order); + int success; + storage_type current; + __asm__ __volatile__ + ( + "1:\n" + "ldl_l %2, %4\n" // current = *(&storage) + "cmpeq %2, %0, %3\n" // success = current == expected + "mov %2, %0\n" // expected = current + "beq %3, 2f\n" // if (success == 0) goto end + "stl_c %1, %4\n" // storage = desired; desired = store succeeded + "mov %1, %3\n" // success = desired + "2:\n" + : "+&r" (expected), // %0 + "+&r" (desired), // %1 + "=&r" (current), // %2 + "=&r" (success) // %3 + : "m" (storage) // %4 + : + ); + if (success) + fence_after(success_order); + else + fence_after(failure_order); + return !!success; + } + + static BOOST_FORCEINLINE bool compare_exchange_strong( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + { + int success; + storage_type current, tmp; + fence_before(success_order); + __asm__ __volatile__ + ( + "1:\n" + "mov %5, %1\n" // tmp = desired + "ldl_l %2, %4\n" // current = *(&storage) + "cmpeq %2, %0, %3\n" // success = current == expected + "mov %2, %0\n" // expected = current + "beq %3, 2f\n" // if (success == 0) goto end + "stl_c %1, %4\n" // storage = tmp; tmp = store succeeded + "beq %1, 3f\n" // if (tmp == 0) goto retry + "mov %1, %3\n" // success = tmp + "2:\n" + + ".subsection 2\n" + "3: br 1b\n" + ".previous\n" + + : "+&r" (expected), // %0 + "=&r" (tmp), // %1 + "=&r" (current), // %2 + "=&r" (success) // %3 + : "m" (storage), // %4 + "r" (desired) // %5 + : + ); + if (success) + fence_after(success_order); + else + fence_after(failure_order); + return !!success; + } + + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + storage_type original, modified; + fence_before(order); + __asm__ __volatile__ + ( + "1:\n" + "ldl_l %0, %2\n" + "addl %0, %3, %1\n" + "stl_c %1, %2\n" + "beq %1, 2f\n" + + ".subsection 2\n" + "2: br 1b\n" + ".previous\n" + + : "=&r" (original), // %0 + "=&r" (modified) // %1 + : "m" (storage), // %2 + "r" (v) // %3 + : + ); + fence_after(order); + return original; + } + + static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + storage_type original, modified; + fence_before(order); + __asm__ __volatile__ + ( + "1:\n" + "ldl_l %0, %2\n" + "subl %0, %3, %1\n" + "stl_c %1, %2\n" + "beq %1, 2f\n" + + ".subsection 2\n" + "2: br 1b\n" + ".previous\n" + + : "=&r" (original), // %0 + "=&r" (modified) // %1 + : "m" (storage), // %2 + "r" (v) // %3 + : + ); + fence_after(order); + return original; + } + + static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + storage_type original, modified; + fence_before(order); + __asm__ __volatile__ + ( + "1:\n" + "ldl_l %0, %2\n" + "and %0, %3, %1\n" + "stl_c %1, %2\n" + "beq %1, 2f\n" + + ".subsection 2\n" + "2: br 1b\n" + ".previous\n" + + : "=&r" (original), // %0 + "=&r" (modified) // %1 + : "m" (storage), // %2 + "r" (v) // %3 + : + ); + fence_after(order); + return original; + } + + static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + storage_type original, modified; + fence_before(order); + __asm__ __volatile__ + ( + "1:\n" + "ldl_l %0, %2\n" + "bis %0, %3, %1\n" + "stl_c %1, %2\n" + "beq %1, 2f\n" + + ".subsection 2\n" + "2: br 1b\n" + ".previous\n" + + : "=&r" (original), // %0 + "=&r" (modified) // %1 + : "m" (storage), // %2 + "r" (v) // %3 + : + ); + fence_after(order); + return original; + } + + static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + storage_type original, modified; + fence_before(order); + __asm__ __volatile__ + ( + "1:\n" + "ldl_l %0, %2\n" + "xor %0, %3, %1\n" + "stl_c %1, %2\n" + "beq %1, 2f\n" + + ".subsection 2\n" + "2: br 1b\n" + ".previous\n" + + : "=&r" (original), // %0 + "=&r" (modified) // %1 + : "m" (storage), // %2 + "r" (v) // %3 + : + ); + fence_after(order); + return original; + } + + static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT + { + return !!exchange(storage, (storage_type)1, order); + } + + static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT + { + store(storage, 0, order); + } + + static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT + { + return true; + } +}; + + +template< > +struct operations< 1u, false > : + public operations< 4u, false > +{ + typedef operations< 4u, false > base_type; + typedef base_type::storage_type storage_type; + + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + storage_type original, modified; + fence_before(order); + __asm__ __volatile__ + ( + "1:\n" + "ldl_l %0, %2\n" + "addl %0, %3, %1\n" + "zapnot %1, #1, %1\n" + "stl_c %1, %2\n" + "beq %1, 2f\n" + + ".subsection 2\n" + "2: br 1b\n" + ".previous\n" + + : "=&r" (original), // %0 + "=&r" (modified) // %1 + : "m" (storage), // %2 + "r" (v) // %3 + : + ); + fence_after(order); + return original; + } + + static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + storage_type original, modified; + fence_before(order); + __asm__ __volatile__ + ( + "1:\n" + "ldl_l %0, %2\n" + "subl %0, %3, %1\n" + "zapnot %1, #1, %1\n" + "stl_c %1, %2\n" + "beq %1, 2f\n" + + ".subsection 2\n" + "2: br 1b\n" + ".previous\n" + + : "=&r" (original), // %0 + "=&r" (modified) // %1 + : "m" (storage), // %2 + "r" (v) // %3 + : + ); + fence_after(order); + return original; + } +}; + +template< > +struct operations< 1u, true > : + public operations< 4u, true > +{ + typedef operations< 4u, true > base_type; + typedef base_type::storage_type storage_type; + + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + storage_type original, modified; + fence_before(order); + __asm__ __volatile__ + ( + "1:\n" + "ldl_l %0, %2\n" + "addl %0, %3, %1\n" + "sextb %1, %1\n" + "stl_c %1, %2\n" + "beq %1, 2f\n" + + ".subsection 2\n" + "2: br 1b\n" + ".previous\n" + + : "=&r" (original), // %0 + "=&r" (modified) // %1 + : "m" (storage), // %2 + "r" (v) // %3 + : + ); + fence_after(order); + return original; + } + + static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + storage_type original, modified; + fence_before(order); + __asm__ __volatile__ + ( + "1:\n" + "ldl_l %0, %2\n" + "subl %0, %3, %1\n" + "sextb %1, %1\n" + "stl_c %1, %2\n" + "beq %1, 2f\n" + + ".subsection 2\n" + "2: br 1b\n" + ".previous\n" + + : "=&r" (original), // %0 + "=&r" (modified) // %1 + : "m" (storage), // %2 + "r" (v) // %3 + : + ); + fence_after(order); + return original; + } +}; + + +template< > +struct operations< 2u, false > : + public operations< 4u, false > +{ + typedef operations< 4u, false > base_type; + typedef base_type::storage_type storage_type; + + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + storage_type original, modified; + fence_before(order); + __asm__ __volatile__ + ( + "1:\n" + "ldl_l %0, %2\n" + "addl %0, %3, %1\n" + "zapnot %1, #3, %1\n" + "stl_c %1, %2\n" + "beq %1, 2f\n" + + ".subsection 2\n" + "2: br 1b\n" + ".previous\n" + + : "=&r" (original), // %0 + "=&r" (modified) // %1 + : "m" (storage), // %2 + "r" (v) // %3 + : + ); + fence_after(order); + return original; + } + + static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + storage_type original, modified; + fence_before(order); + __asm__ __volatile__ + ( + "1:\n" + "ldl_l %0, %2\n" + "subl %0, %3, %1\n" + "zapnot %1, #3, %1\n" + "stl_c %1, %2\n" + "beq %1, 2f\n" + + ".subsection 2\n" + "2: br 1b\n" + ".previous\n" + + : "=&r" (original), // %0 + "=&r" (modified) // %1 + : "m" (storage), // %2 + "r" (v) // %3 + : + ); + fence_after(order); + return original; + } +}; + +template< > +struct operations< 2u, true > : + public operations< 4u, true > +{ + typedef operations< 4u, true > base_type; + typedef base_type::storage_type storage_type; + + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + storage_type original, modified; + fence_before(order); + __asm__ __volatile__ + ( + "1:\n" + "ldl_l %0, %2\n" + "addl %0, %3, %1\n" + "sextw %1, %1\n" + "stl_c %1, %2\n" + "beq %1, 2f\n" + + ".subsection 2\n" + "2: br 1b\n" + ".previous\n" + + : "=&r" (original), // %0 + "=&r" (modified) // %1 + : "m" (storage), // %2 + "r" (v) // %3 + : + ); + fence_after(order); + return original; + } + + static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + storage_type original, modified; + fence_before(order); + __asm__ __volatile__ + ( + "1:\n" + "ldl_l %0, %2\n" + "subl %0, %3, %1\n" + "sextw %1, %1\n" + "stl_c %1, %2\n" + "beq %1, 2f\n" + + ".subsection 2\n" + "2: br 1b\n" + ".previous\n" + + : "=&r" (original), // %0 + "=&r" (modified) // %1 + : "m" (storage), // %2 + "r" (v) // %3 + : + ); + fence_after(order); + return original; + } +}; + + +template< bool Signed > +struct operations< 8u, Signed > : + public gcc_alpha_operations_base +{ + typedef typename make_storage_type< 8u, Signed >::type storage_type; + + static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before(order); + storage = v; + fence_after_store(order); + } + + static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT + { + storage_type v = storage; + fence_after(order); + return v; + } + + static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + storage_type original, tmp; + fence_before(order); + __asm__ __volatile__ + ( + "1:\n" + "mov %3, %1\n" + "ldq_l %0, %2\n" + "stq_c %1, %2\n" + "beq %1, 2f\n" + + ".subsection 2\n" + "2: br 1b\n" + ".previous\n" + + : "=&r" (original), // %0 + "=&r" (tmp) // %1 + : "m" (storage), // %2 + "r" (v) // %3 + : + ); + fence_after(order); + return original; + } + + static BOOST_FORCEINLINE bool compare_exchange_weak( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + { + fence_before(success_order); + int success; + storage_type current; + __asm__ __volatile__ + ( + "1:\n" + "ldq_l %2, %4\n" // current = *(&storage) + "cmpeq %2, %0, %3\n" // success = current == expected + "mov %2, %0\n" // expected = current + "beq %3, 2f\n" // if (success == 0) goto end + "stq_c %1, %4\n" // storage = desired; desired = store succeeded + "mov %1, %3\n" // success = desired + "2:\n" + : "+&r" (expected), // %0 + "+&r" (desired), // %1 + "=&r" (current), // %2 + "=&r" (success) // %3 + : "m" (storage) // %4 + : + ); + if (success) + fence_after(success_order); + else + fence_after(failure_order); + return !!success; + } + + static BOOST_FORCEINLINE bool compare_exchange_strong( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + { + int success; + storage_type current, tmp; + fence_before(success_order); + __asm__ __volatile__ + ( + "1:\n" + "mov %5, %1\n" // tmp = desired + "ldq_l %2, %4\n" // current = *(&storage) + "cmpeq %2, %0, %3\n" // success = current == expected + "mov %2, %0\n" // expected = current + "beq %3, 2f\n" // if (success == 0) goto end + "stq_c %1, %4\n" // storage = tmp; tmp = store succeeded + "beq %1, 3f\n" // if (tmp == 0) goto retry + "mov %1, %3\n" // success = tmp + "2:\n" + + ".subsection 2\n" + "3: br 1b\n" + ".previous\n" + + : "+&r" (expected), // %0 + "=&r" (tmp), // %1 + "=&r" (current), // %2 + "=&r" (success) // %3 + : "m" (storage), // %4 + "r" (desired) // %5 + : + ); + if (success) + fence_after(success_order); + else + fence_after(failure_order); + return !!success; + } + + static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + storage_type original, modified; + fence_before(order); + __asm__ __volatile__ + ( + "1:\n" + "ldq_l %0, %2\n" + "addq %0, %3, %1\n" + "stq_c %1, %2\n" + "beq %1, 2f\n" + + ".subsection 2\n" + "2: br 1b\n" + ".previous\n" + + : "=&r" (original), // %0 + "=&r" (modified) // %1 + : "m" (storage), // %2 + "r" (v) // %3 + : + ); + fence_after(order); + return original; + } + + static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + storage_type original, modified; + fence_before(order); + __asm__ __volatile__ + ( + "1:\n" + "ldq_l %0, %2\n" + "subq %0, %3, %1\n" + "stq_c %1, %2\n" + "beq %1, 2f\n" + + ".subsection 2\n" + "2: br 1b\n" + ".previous\n" + + : "=&r" (original), // %0 + "=&r" (modified) // %1 + : "m" (storage), // %2 + "r" (v) // %3 + : + ); + fence_after(order); + return original; + } + + static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + storage_type original, modified; + fence_before(order); + __asm__ __volatile__ + ( + "1:\n" + "ldq_l %0, %2\n" + "and %0, %3, %1\n" + "stq_c %1, %2\n" + "beq %1, 2f\n" + + ".subsection 2\n" + "2: br 1b\n" + ".previous\n" + + : "=&r" (original), // %0 + "=&r" (modified) // %1 + : "m" (storage), // %2 + "r" (v) // %3 + : + ); + fence_after(order); + return original; + } + + static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + storage_type original, modified; + fence_before(order); + __asm__ __volatile__ + ( + "1:\n" + "ldq_l %0, %2\n" + "bis %0, %3, %1\n" + "stq_c %1, %2\n" + "beq %1, 2f\n" + + ".subsection 2\n" + "2: br 1b\n" + ".previous\n" + + : "=&r" (original), // %0 + "=&r" (modified) // %1 + : "m" (storage), // %2 + "r" (v) // %3 + : + ); + fence_after(order); + return original; + } + + static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + storage_type original, modified; + fence_before(order); + __asm__ __volatile__ + ( + "1:\n" + "ldq_l %0, %2\n" + "xor %0, %3, %1\n" + "stq_c %1, %2\n" + "beq %1, 2f\n" + + ".subsection 2\n" + "2: br 1b\n" + ".previous\n" + + : "=&r" (original), // %0 + "=&r" (modified) // %1 + : "m" (storage), // %2 + "r" (v) // %3 + : + ); + fence_after(order); + return original; + } + + static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT + { + return !!exchange(storage, (storage_type)1, order); + } + + static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT + { + store(storage, 0, order); + } + + static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT + { + return true; + } +}; + + +BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT +{ + if (order != memory_order_relaxed) + __asm__ __volatile__ ("mb" ::: "memory"); +} + +BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT +{ + if ((order & ~memory_order_consume) != 0) + __asm__ __volatile__ ("" ::: "memory"); +} + +} // namespace detail +} // namespace atomics +} // namespace boost + +#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_ALPHA_HPP_INCLUDED_ diff --git a/include/boost/atomic/detail/ops_gcc_ppc.hpp b/include/boost/atomic/detail/ops_gcc_ppc.hpp index 278c7ed..68df536 100644 --- a/include/boost/atomic/detail/ops_gcc_ppc.hpp +++ b/include/boost/atomic/detail/ops_gcc_ppc.hpp @@ -20,7 +20,6 @@ #include #include #include -#include #include #ifdef BOOST_HAS_PRAGMA_ONCE @@ -116,7 +115,7 @@ struct operations< 4u, Signed > : __asm__ __volatile__ ( "stw %1, %0\n" - : "+m"(storage) + : "+m" (storage) : "r" (v) ); fence_after_store(order); @@ -149,7 +148,7 @@ struct operations< 4u, Signed > : "lwarx %0,%y1\n" "stwcx. %2,%y1\n" "bne- 1b\n" - : "=&b" (original), "+Z"(storage) + : "=&b" (original), "+Z" (storage) : "b" (v) : "cr0" ); @@ -172,7 +171,7 @@ struct operations< 4u, Signed > : "bne- 1f\n" "li %1, 1\n" "1:" - : "=&b" (expected), "=&b" (success), "+Z"(storage) + : "=&b" (expected), "=&b" (success), "+Z" (storage) : "b" (expected), "b" (desired) : "cr0" ); @@ -198,7 +197,7 @@ struct operations< 4u, Signed > : "bne- 0b\n" "li %1, 1\n" "1:" - : "=&b" (expected), "=&b" (success), "+Z"(storage) + : "=&b" (expected), "=&b" (success), "+Z" (storage) : "b" (expected), "b" (desired) : "cr0" ); @@ -220,7 +219,7 @@ struct operations< 4u, Signed > : "add %1,%0,%3\n" "stwcx. %1,%y2\n" "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(storage) + : "=&b" (original), "=&b" (tmp), "+Z" (storage) : "b" (v) : "cc" ); @@ -239,7 +238,7 @@ struct operations< 4u, Signed > : "sub %1,%0,%3\n" "stwcx. %1,%y2\n" "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(storage) + : "=&b" (original), "=&b" (tmp), "+Z" (storage) : "b" (v) : "cc" ); @@ -258,7 +257,7 @@ struct operations< 4u, Signed > : "and %1,%0,%3\n" "stwcx. %1,%y2\n" "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(storage) + : "=&b" (original), "=&b" (tmp), "+Z" (storage) : "b" (v) : "cc" ); @@ -277,7 +276,7 @@ struct operations< 4u, Signed > : "or %1,%0,%3\n" "stwcx. %1,%y2\n" "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(storage) + : "=&b" (original), "=&b" (tmp), "+Z" (storage) : "b" (v) : "cc" ); @@ -296,7 +295,7 @@ struct operations< 4u, Signed > : "xor %1,%0,%3\n" "stwcx. %1,%y2\n" "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(storage) + : "=&b" (original), "=&b" (tmp), "+Z" (storage) : "b" (v) : "cc" ); @@ -340,7 +339,7 @@ struct operations< 1u, false > : "rlwinm %1, %1, 0, 0xff\n" "stwcx. %1,%y2\n" "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(storage) + : "=&b" (original), "=&b" (tmp), "+Z" (storage) : "b" (v) : "cc" ); @@ -360,7 +359,7 @@ struct operations< 1u, false > : "rlwinm %1, %1, 0, 0xff\n" "stwcx. %1,%y2\n" "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(storage) + : "=&b" (original), "=&b" (tmp), "+Z" (storage) : "b" (v) : "cc" ); @@ -388,7 +387,7 @@ struct operations< 1u, true > : "extsb %1, %1\n" "stwcx. %1,%y2\n" "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(storage) + : "=&b" (original), "=&b" (tmp), "+Z" (storage) : "b" (v) : "cc" ); @@ -408,7 +407,7 @@ struct operations< 1u, true > : "extsb %1, %1\n" "stwcx. %1,%y2\n" "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(storage) + : "=&b" (original), "=&b" (tmp), "+Z" (storage) : "b" (v) : "cc" ); @@ -437,7 +436,7 @@ struct operations< 2u, false > : "rlwinm %1, %1, 0, 0xffff\n" "stwcx. %1,%y2\n" "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(storage) + : "=&b" (original), "=&b" (tmp), "+Z" (storage) : "b" (v) : "cc" ); @@ -457,7 +456,7 @@ struct operations< 2u, false > : "rlwinm %1, %1, 0, 0xffff\n" "stwcx. %1,%y2\n" "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(storage) + : "=&b" (original), "=&b" (tmp), "+Z" (storage) : "b" (v) : "cc" ); @@ -485,7 +484,7 @@ struct operations< 2u, true > : "extsh %1, %1\n" "stwcx. %1,%y2\n" "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(storage) + : "=&b" (original), "=&b" (tmp), "+Z" (storage) : "b" (v) : "cc" ); @@ -505,7 +504,7 @@ struct operations< 2u, true > : "extsh %1, %1\n" "stwcx. %1,%y2\n" "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(storage) + : "=&b" (original), "=&b" (tmp), "+Z" (storage) : "b" (v) : "cc" ); @@ -529,7 +528,7 @@ struct operations< 8u, Signed > : __asm__ __volatile__ ( "std %1, %0\n" - : "+m"(storage) + : "+m" (storage) : "r" (v) ); fence_after_store(order); @@ -544,7 +543,7 @@ struct operations< 8u, Signed > : "cmpd %0, %0\n" "bne- 1f\n" "1:\n" - : "=&b"(v) + : "=&b" (v) : "m" (storage) : "cr0" ); @@ -562,7 +561,7 @@ struct operations< 8u, Signed > : "ldarx %0,%y1\n" "stdcx. %2,%y1\n" "bne- 1b\n" - : "=&b" (original), "+Z"(storage) + : "=&b" (original), "+Z" (storage) : "b" (v) : "cr0" ); @@ -585,7 +584,7 @@ struct operations< 8u, Signed > : "bne- 1f\n" "li %1, 1\n" "1:" - : "=&b" (expected), "=&b" (success), "+Z"(storage) + : "=&b" (expected), "=&b" (success), "+Z" (storage) : "b" (expected), "b" (desired) : "cr0" ); @@ -611,7 +610,7 @@ struct operations< 8u, Signed > : "bne- 0b\n" "li %1, 1\n" "1:" - : "=&b" (expected), "=&b" (success), "+Z"(storage) + : "=&b" (expected), "=&b" (success), "+Z" (storage) : "b" (expected), "b" (desired) : "cr0" ); @@ -633,7 +632,7 @@ struct operations< 8u, Signed > : "add %1,%0,%3\n" "stdcx. %1,%y2\n" "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(storage) + : "=&b" (original), "=&b" (tmp), "+Z" (storage) : "b" (v) : "cc" ); @@ -652,7 +651,7 @@ struct operations< 8u, Signed > : "sub %1,%0,%3\n" "stdcx. %1,%y2\n" "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(storage) + : "=&b" (original), "=&b" (tmp), "+Z" (storage) : "b" (v) : "cc" ); @@ -671,7 +670,7 @@ struct operations< 8u, Signed > : "and %1,%0,%3\n" "stdcx. %1,%y2\n" "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(storage) + : "=&b" (original), "=&b" (tmp), "+Z" (storage) : "b" (v) : "cc" ); @@ -690,7 +689,7 @@ struct operations< 8u, Signed > : "or %1,%0,%3\n" "stdcx. %1,%y2\n" "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(storage) + : "=&b" (original), "=&b" (tmp), "+Z" (storage) : "b" (v) : "cc" ); @@ -709,7 +708,7 @@ struct operations< 8u, Signed > : "xor %1,%0,%3\n" "stdcx. %1,%y2\n" "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(storage) + : "=&b" (original), "=&b" (tmp), "+Z" (storage) : "b" (v) : "cc" ); diff --git a/include/boost/atomic/detail/platform.hpp b/include/boost/atomic/detail/platform.hpp index f60eec4..d0423d2 100644 --- a/include/boost/atomic/detail/platform.hpp +++ b/include/boost/atomic/detail/platform.hpp @@ -25,12 +25,12 @@ #define BOOST_ATOMIC_DETAIL_PLATFORM emulated #define BOOST_ATOMIC_EMULATED - +/* #elif (defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 407))\ || (defined(BOOST_CLANG) && ((__clang_major__ * 100 + __clang_minor__) >= 302)) #define BOOST_ATOMIC_DETAIL_PLATFORM gcc_atomic - +*/ #elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) #define BOOST_ATOMIC_DETAIL_PLATFORM gcc_x86 @@ -57,6 +57,10 @@ #define BOOST_ATOMIC_DETAIL_PLATFORM gcc_sparc +#elif defined(__GNUC__) && defined(__alpha__) + +#define BOOST_ATOMIC_DETAIL_PLATFORM gcc_alpha + #elif defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 401) #define BOOST_ATOMIC_DETAIL_PLATFORM gcc_sync From 3a81a36bbef67d004a60614adca7ef8b72dab6cb Mon Sep 17 00:00:00 2001 From: Andrey Semashev Date: Sun, 11 May 2014 20:28:12 +0400 Subject: [PATCH 20/23] Removed no longer used files. --- include/boost/atomic/detail/base.hpp | 605 ---- include/boost/atomic/detail/cas128strong.hpp | 290 -- include/boost/atomic/detail/cas32strong.hpp | 939 ----- include/boost/atomic/detail/cas32weak.hpp | 1017 ------ .../boost/atomic/detail/cas64strong-ptr.hpp | 247 -- include/boost/atomic/detail/cas64strong.hpp | 264 -- include/boost/atomic/detail/gcc-alpha.hpp | 368 -- include/boost/atomic/detail/gcc-armv6plus.hpp | 255 -- include/boost/atomic/detail/gcc-atomic.hpp | 1367 -------- include/boost/atomic/detail/gcc-cas.hpp | 163 - include/boost/atomic/detail/gcc-ppc.hpp | 3046 ----------------- include/boost/atomic/detail/gcc-sparcv9.hpp | 1335 -------- include/boost/atomic/detail/gcc-x86.hpp | 2021 ----------- include/boost/atomic/detail/generic-cas.hpp | 206 -- include/boost/atomic/detail/linux-arm.hpp | 192 -- include/boost/atomic/detail/windows.hpp | 1789 ---------- 16 files changed, 14104 deletions(-) delete mode 100644 include/boost/atomic/detail/base.hpp delete mode 100644 include/boost/atomic/detail/cas128strong.hpp delete mode 100644 include/boost/atomic/detail/cas32strong.hpp delete mode 100644 include/boost/atomic/detail/cas32weak.hpp delete mode 100644 include/boost/atomic/detail/cas64strong-ptr.hpp delete mode 100644 include/boost/atomic/detail/cas64strong.hpp delete mode 100644 include/boost/atomic/detail/gcc-alpha.hpp delete mode 100644 include/boost/atomic/detail/gcc-armv6plus.hpp delete mode 100644 include/boost/atomic/detail/gcc-atomic.hpp delete mode 100644 include/boost/atomic/detail/gcc-cas.hpp delete mode 100644 include/boost/atomic/detail/gcc-ppc.hpp delete mode 100644 include/boost/atomic/detail/gcc-sparcv9.hpp delete mode 100644 include/boost/atomic/detail/gcc-x86.hpp delete mode 100644 include/boost/atomic/detail/generic-cas.hpp delete mode 100644 include/boost/atomic/detail/linux-arm.hpp delete mode 100644 include/boost/atomic/detail/windows.hpp diff --git a/include/boost/atomic/detail/base.hpp b/include/boost/atomic/detail/base.hpp deleted file mode 100644 index eb105b9..0000000 --- a/include/boost/atomic/detail/base.hpp +++ /dev/null @@ -1,605 +0,0 @@ -#ifndef BOOST_ATOMIC_DETAIL_BASE_HPP -#define BOOST_ATOMIC_DETAIL_BASE_HPP - -// Copyright (c) 2009 Helge Bahmann -// Copyright (c) 2013 Tim Blechmann -// -// Distributed under the Boost Software License, Version 1.0. -// See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Base class definition and fallback implementation. -// To be overridden (through partial specialization) by -// platform implementations. - -#include - -#include -#include -#include -#include - -#ifdef BOOST_HAS_PRAGMA_ONCE -#pragma once -#endif - -#define BOOST_ATOMIC_DECLARE_BASE_OPERATORS \ - bool \ - compare_exchange_strong( \ - value_type & expected, \ - value_type desired, \ - memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT \ - { \ - return compare_exchange_strong(expected, desired, order, calculate_failure_order(order)); \ - } \ - \ - bool \ - compare_exchange_weak( \ - value_type & expected, \ - value_type desired, \ - memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT \ - { \ - return compare_exchange_weak(expected, desired, order, calculate_failure_order(order)); \ - } \ - -#define BOOST_ATOMIC_DECLARE_ADDITIVE_OPERATORS \ - value_type \ - operator++(int) volatile BOOST_NOEXCEPT \ - { \ - return fetch_add(1); \ - } \ - \ - value_type \ - operator++(void) volatile BOOST_NOEXCEPT \ - { \ - return fetch_add(1) + 1; \ - } \ - \ - value_type \ - operator--(int) volatile BOOST_NOEXCEPT \ - { \ - return fetch_sub(1); \ - } \ - \ - value_type \ - operator--(void) volatile BOOST_NOEXCEPT \ - { \ - return fetch_sub(1) - 1; \ - } \ - \ - value_type \ - operator+=(difference_type v) volatile BOOST_NOEXCEPT \ - { \ - return fetch_add(v) + v; \ - } \ - \ - value_type \ - operator-=(difference_type v) volatile BOOST_NOEXCEPT \ - { \ - return fetch_sub(v) - v; \ - } \ - -#define BOOST_ATOMIC_DECLARE_VOID_POINTER_ADDITIVE_OPERATORS \ - value_type \ - operator++(int) volatile BOOST_NOEXCEPT \ - { \ - return fetch_add(1); \ - } \ - \ - value_type \ - operator++(void) volatile BOOST_NOEXCEPT \ - { \ - return (char*)fetch_add(1) + 1; \ - } \ - \ - value_type \ - operator--(int) volatile BOOST_NOEXCEPT \ - { \ - return fetch_sub(1); \ - } \ - \ - value_type \ - operator--(void) volatile BOOST_NOEXCEPT \ - { \ - return (char*)fetch_sub(1) - 1; \ - } \ - \ - value_type \ - operator+=(difference_type v) volatile BOOST_NOEXCEPT \ - { \ - return (char*)fetch_add(v) + v; \ - } \ - \ - value_type \ - operator-=(difference_type v) volatile BOOST_NOEXCEPT \ - { \ - return (char*)fetch_sub(v) - v; \ - } \ - -#define BOOST_ATOMIC_DECLARE_BIT_OPERATORS \ - value_type \ - operator&=(difference_type v) volatile BOOST_NOEXCEPT \ - { \ - return fetch_and(v) & v; \ - } \ - \ - value_type \ - operator|=(difference_type v) volatile BOOST_NOEXCEPT \ - { \ - return fetch_or(v) | v; \ - } \ - \ - value_type \ - operator^=(difference_type v) volatile BOOST_NOEXCEPT\ - { \ - return fetch_xor(v) ^ v; \ - } \ - -#define BOOST_ATOMIC_DECLARE_POINTER_OPERATORS \ - BOOST_ATOMIC_DECLARE_BASE_OPERATORS \ - BOOST_ATOMIC_DECLARE_ADDITIVE_OPERATORS \ - -#define BOOST_ATOMIC_DECLARE_VOID_POINTER_OPERATORS \ - BOOST_ATOMIC_DECLARE_BASE_OPERATORS \ - BOOST_ATOMIC_DECLARE_VOID_POINTER_ADDITIVE_OPERATORS \ - -#define BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS \ - BOOST_ATOMIC_DECLARE_BASE_OPERATORS \ - BOOST_ATOMIC_DECLARE_ADDITIVE_OPERATORS \ - BOOST_ATOMIC_DECLARE_BIT_OPERATORS \ - -namespace boost { -namespace atomics { -namespace detail { - -inline memory_order -calculate_failure_order(memory_order order) -{ - switch(order) - { - case memory_order_acq_rel: - return memory_order_acquire; - case memory_order_release: - return memory_order_relaxed; - default: - return order; - } -} - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef lockpool::scoped_lock guard_type; - -protected: - typedef value_type const& value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : v_(v) - {} - - void - store(value_type const& v, memory_order /*order*/ = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - char * storage = storage_ptr(); - guard_type guard(storage); - - memcpy(storage, &v, sizeof(value_type)); - } - - value_type - load(memory_order /*order*/ = memory_order_seq_cst) volatile const BOOST_NOEXCEPT - { - char * storage = storage_ptr(); - guard_type guard(storage); - - value_type v; - memcpy(&v, storage, sizeof(value_type)); - return v; - } - - bool - compare_exchange_strong( - value_type & expected, - value_type const& desired, - memory_order /*success_order*/, - memory_order /*failure_order*/) volatile BOOST_NOEXCEPT - { - char * storage = storage_ptr(); - guard_type guard(storage); - - if (memcmp(storage, &expected, sizeof(value_type)) == 0) { - memcpy(storage, &desired, sizeof(value_type)); - return true; - } else { - memcpy(&expected, storage, sizeof(value_type)); - return false; - } - } - - bool - compare_exchange_weak( - value_type & expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return compare_exchange_strong(expected, desired, success_order, failure_order); - } - - value_type - exchange(value_type const& v, memory_order /*order*/=memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - char * storage = storage_ptr(); - guard_type guard(storage); - - value_type tmp; - memcpy(&tmp, storage, sizeof(value_type)); - - memcpy(storage, &v, sizeof(value_type)); - return tmp; - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return false; - } - - BOOST_ATOMIC_DECLARE_BASE_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - char * storage_ptr() volatile const BOOST_NOEXCEPT - { - return const_cast(&reinterpret_cast(v_)); - } - - T v_; -}; - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef T difference_type; - typedef lockpool::scoped_lock guard_type; - -protected: - typedef value_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {} - - void - store(value_type v, memory_order /*order*/ = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - guard_type guard(const_cast(&v_)); - - v_ = v; - } - - value_type - load(memory_order /*order*/ = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - guard_type guard(const_cast(&v_)); - - value_type v = const_cast(v_); - return v; - } - - value_type - exchange(value_type v, memory_order /*order*/ = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - guard_type guard(const_cast(&v_)); - - value_type old = v_; - v_ = v; - return old; - } - - bool - compare_exchange_strong(value_type & expected, value_type desired, - memory_order /*success_order*/, - memory_order /*failure_order*/) volatile BOOST_NOEXCEPT - { - guard_type guard(const_cast(&v_)); - - if (v_ == expected) { - v_ = desired; - return true; - } else { - expected = v_; - return false; - } - } - - bool - compare_exchange_weak(value_type & expected, value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return compare_exchange_strong(expected, desired, success_order, failure_order); - } - - value_type - fetch_add(difference_type v, memory_order /*order*/ = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - guard_type guard(const_cast(&v_)); - - value_type old = v_; - v_ += v; - return old; - } - - value_type - fetch_sub(difference_type v, memory_order /*order*/ = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - guard_type guard(const_cast(&v_)); - - value_type old = v_; - v_ -= v; - return old; - } - - value_type - fetch_and(value_type v, memory_order /*order*/ = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - guard_type guard(const_cast(&v_)); - - value_type old = v_; - v_ &= v; - return old; - } - - value_type - fetch_or(value_type v, memory_order /*order*/ = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - guard_type guard(const_cast(&v_)); - - value_type old = v_; - v_ |= v; - return old; - } - - value_type - fetch_xor(value_type v, memory_order /*order*/ = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - guard_type guard(const_cast(&v_)); - - value_type old = v_; - v_ ^= v; - return old; - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return false; - } - - BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - value_type v_; -}; - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T * value_type; - typedef std::ptrdiff_t difference_type; - typedef lockpool::scoped_lock guard_type; - -protected: - typedef value_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {} - - void - store(value_type v, memory_order /*order*/ = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - guard_type guard(const_cast(&v_)); - v_ = v; - } - - value_type - load(memory_order /*order*/ = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - guard_type guard(const_cast(&v_)); - - value_type v = const_cast(v_); - return v; - } - - value_type - exchange(value_type v, memory_order /*order*/ = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - guard_type guard(const_cast(&v_)); - - value_type old = v_; - v_ = v; - return old; - } - - bool - compare_exchange_strong(value_type & expected, value_type desired, - memory_order /*success_order*/, - memory_order /*failure_order*/) volatile BOOST_NOEXCEPT - { - guard_type guard(const_cast(&v_)); - - if (v_ == expected) { - v_ = desired; - return true; - } else { - expected = v_; - return false; - } - } - - bool - compare_exchange_weak(value_type & expected, value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return compare_exchange_strong(expected, desired, success_order, failure_order); - } - - value_type fetch_add(difference_type v, memory_order /*order*/ = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - guard_type guard(const_cast(&v_)); - - value_type old = v_; - v_ += v; - return old; - } - - value_type fetch_sub(difference_type v, memory_order /*order*/ = memory_order_seq_cst) volatile - { - guard_type guard(const_cast(&v_)); - - value_type old = v_; - v_ -= v; - return old; - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return false; - } - - BOOST_ATOMIC_DECLARE_POINTER_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - value_type v_; -}; - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef std::ptrdiff_t difference_type; - typedef void * value_type; - typedef lockpool::scoped_lock guard_type; - -protected: - typedef value_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {} - - void - store(value_type v, memory_order /*order*/ = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - guard_type guard(const_cast(&v_)); - v_ = v; - } - - value_type - load(memory_order /*order*/ = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - guard_type guard(const_cast(&v_)); - - value_type v = const_cast(v_); - return v; - } - - value_type - exchange(value_type v, memory_order /*order*/ = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - guard_type guard(const_cast(&v_)); - - value_type old = v_; - v_ = v; - return old; - } - - bool - compare_exchange_strong(value_type & expected, value_type desired, - memory_order /*success_order*/, - memory_order /*failure_order*/) volatile BOOST_NOEXCEPT - { - guard_type guard(const_cast(&v_)); - - if (v_ == expected) { - v_ = desired; - return true; - } else { - expected = v_; - return false; - } - } - - bool - compare_exchange_weak(value_type & expected, value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return compare_exchange_strong(expected, desired, success_order, failure_order); - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return false; - } - - value_type fetch_add(difference_type v, memory_order /*order*/ = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - guard_type guard(const_cast(&v_)); - - value_type old = v_; - char * cv = reinterpret_cast(old); - cv += v; - v_ = cv; - return old; - } - - value_type fetch_sub(difference_type v, memory_order /*order*/ = memory_order_seq_cst) volatile - { - guard_type guard(const_cast(&v_)); - - value_type old = v_; - char * cv = reinterpret_cast(old); - cv -= v; - v_ = cv; - return old; - } - - BOOST_ATOMIC_DECLARE_VOID_POINTER_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - value_type v_; -}; - -} -} -} - -#endif diff --git a/include/boost/atomic/detail/cas128strong.hpp b/include/boost/atomic/detail/cas128strong.hpp deleted file mode 100644 index dcb4d7d..0000000 --- a/include/boost/atomic/detail/cas128strong.hpp +++ /dev/null @@ -1,290 +0,0 @@ -#ifndef BOOST_ATOMIC_DETAIL_CAS128STRONG_HPP -#define BOOST_ATOMIC_DETAIL_CAS128STRONG_HPP - -// Distributed under the Boost Software License, Version 1.0. -// See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -// -// Copyright (c) 2011 Helge Bahmann -// Copyright (c) 2013 Tim Blechmann, Andrey Semashev - -// Build 128-bit atomic operation on integers/UDTs from platform_cmpxchg128_strong -// primitive. It is assumed that 128-bit loads/stores are not -// atomic, so they are implemented through platform_load128/platform_store128. - -#include -#include -#include -#include -#include -#include - -#ifdef BOOST_HAS_PRAGMA_ONCE -#pragma once -#endif - -namespace boost { -namespace atomics { -namespace detail { - -/* integral types */ - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef T difference_type; - -protected: - typedef value_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {} - - void - store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - platform_fence_before_store(order); - platform_store128(v, &v_); - platform_fence_after_store(order); - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - value_type v = platform_load128(&v_); - platform_fence_after_load(order); - return v; - } - - value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, v, order, memory_order_relaxed)); - return original; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return compare_exchange_strong(expected, desired, success_order, failure_order); - } - - bool - compare_exchange_strong( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - platform_fence_before(success_order); - - bool success = platform_cmpxchg128_strong(expected, desired, &v_); - - if (success) { - platform_fence_after(success_order); - } else { - platform_fence_after(failure_order); - } - - return success; - } - - value_type - fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, original + v, order, memory_order_relaxed)); - return original; - } - - value_type - fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, original - v, order, memory_order_relaxed)); - return original; - } - - value_type - fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, original & v, order, memory_order_relaxed)); - return original; - } - - value_type - fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, original | v, order, memory_order_relaxed)); - return original; - } - - value_type - fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, original ^ v, order, memory_order_relaxed)); - return original; - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - value_type v_; -}; - -/* generic types */ - -#if defined(BOOST_HAS_INT128) - -typedef boost::uint128_type storage128_type; - -#else // defined(BOOST_HAS_INT128) - -struct BOOST_ALIGNMENT(16) storage128_type -{ - uint64_t data[2]; -}; - -inline bool operator== (storage128_type const& left, storage128_type const& right) -{ - return left.data[0] == right.data[0] && left.data[1] == right.data[1]; -} -inline bool operator!= (storage128_type const& left, storage128_type const& right) -{ - return !(left == right); -} - -#endif // defined(BOOST_HAS_INT128) - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef storage128_type storage_type; - -protected: - typedef value_type const& value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - explicit base_atomic(value_type const& v) BOOST_NOEXCEPT - { - memset(&v_, 0, sizeof(v_)); - memcpy(&v_, &v, sizeof(value_type)); - } - - void - store(value_type const& value, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - storage_type value_s; - memset(&value_s, 0, sizeof(value_s)); - memcpy(&value_s, &value, sizeof(value_type)); - platform_fence_before_store(order); - platform_store128(value_s, &v_); - platform_fence_after_store(order); - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - storage_type value_s = platform_load128(&v_); - platform_fence_after_load(order); - value_type value; - memcpy(&value, &value_s, sizeof(value_type)); - return value; - } - - value_type - exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, v, order, memory_order_relaxed)); - return original; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return compare_exchange_strong(expected, desired, success_order, failure_order); - } - - bool - compare_exchange_strong( - value_type & expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - storage_type expected_s, desired_s; - memset(&expected_s, 0, sizeof(expected_s)); - memset(&desired_s, 0, sizeof(desired_s)); - memcpy(&expected_s, &expected, sizeof(value_type)); - memcpy(&desired_s, &desired, sizeof(value_type)); - - platform_fence_before(success_order); - bool success = platform_cmpxchg128_strong(expected_s, desired_s, &v_); - - if (success) { - platform_fence_after(success_order); - } else { - platform_fence_after(failure_order); - memcpy(&expected, &expected_s, sizeof(value_type)); - } - - return success; - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - BOOST_ATOMIC_DECLARE_BASE_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - storage_type v_; -}; - -} -} -} - -#endif diff --git a/include/boost/atomic/detail/cas32strong.hpp b/include/boost/atomic/detail/cas32strong.hpp deleted file mode 100644 index 7a35d45..0000000 --- a/include/boost/atomic/detail/cas32strong.hpp +++ /dev/null @@ -1,939 +0,0 @@ -#ifndef BOOST_ATOMIC_DETAIL_CAS32STRONG_HPP -#define BOOST_ATOMIC_DETAIL_CAS32STRONG_HPP - -// Distributed under the Boost Software License, Version 1.0. -// See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -// -// Copyright (c) 2011 Helge Bahmann -// Copyright (c) 2013 Tim Blechmann - - -// Build 8-, 16- and 32-bit atomic operations from -// a platform_cmpxchg32_strong primitive. - -#include -#include -#include -#include -#include -#include - -#ifdef BOOST_HAS_PRAGMA_ONCE -#pragma once -#endif - -namespace boost { -namespace atomics { -namespace detail { - -/* integral types */ - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef T difference_type; - typedef uint32_t storage_type; - -protected: - typedef value_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {} - - void - store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - platform_fence_before_store(order); - const_cast(v_) = v; - platform_fence_after_store(order); - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile - { - value_type v = const_cast(v_); - platform_fence_after_load(order); - return v; - } - - value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, v, order, memory_order_relaxed)); - return original; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return compare_exchange_strong(expected, desired, success_order, failure_order); - } - - bool - compare_exchange_strong( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - platform_fence_before(success_order); - - storage_type expected_s = (storage_type) expected; - storage_type desired_s = (storage_type) desired; - - bool success = platform_cmpxchg32_strong(expected_s, desired_s, &v_); - - if (success) { - platform_fence_after(success_order); - } else { - platform_fence_after(failure_order); - expected = (value_type) expected_s; - } - - return success; - } - - value_type - fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, original + v, order, memory_order_relaxed)); - return original; - } - - value_type - fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, original - v, order, memory_order_relaxed)); - return original; - } - - value_type - fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, original & v, order, memory_order_relaxed)); - return original; - } - - value_type - fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, original | v, order, memory_order_relaxed)); - return original; - } - - value_type - fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, original ^ v, order, memory_order_relaxed)); - return original; - } - - bool - is_lock_free(void) const volatile - { - return true; - } - - BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - storage_type v_; -}; - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef T difference_type; - typedef uint32_t storage_type; - -protected: - typedef value_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {} - - void - store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - platform_fence_before_store(order); - const_cast(v_) = v; - platform_fence_after_store(order); - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile - { - value_type v = const_cast(v_); - platform_fence_after_load(order); - return v; - } - - value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, v, order, memory_order_relaxed)); - return original; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return compare_exchange_strong(expected, desired, success_order, failure_order); - } - - bool - compare_exchange_strong( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - platform_fence_before(success_order); - - storage_type expected_s = (storage_type) expected; - storage_type desired_s = (storage_type) desired; - - bool success = platform_cmpxchg32_strong(expected_s, desired_s, &v_); - - if (success) { - platform_fence_after(success_order); - } else { - platform_fence_after(failure_order); - expected = (value_type) expected_s; - } - - return success; - } - - value_type - fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, original + v, order, memory_order_relaxed)); - return original; - } - - value_type - fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, original - v, order, memory_order_relaxed)); - return original; - } - - value_type - fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, original & v, order, memory_order_relaxed)); - return original; - } - - value_type - fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, original | v, order, memory_order_relaxed)); - return original; - } - - value_type - fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, original ^ v, order, memory_order_relaxed)); - return original; - } - - bool - is_lock_free(void) const volatile - { - return true; - } - - BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - storage_type v_; -}; - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef T difference_type; - -protected: - typedef value_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {} - - void - store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - platform_fence_before_store(order); - const_cast(v_) = v; - platform_fence_after_store(order); - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile - { - value_type v = const_cast(v_); - platform_fence_after_load(order); - return v; - } - - value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, v, order, memory_order_relaxed)); - return original; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return compare_exchange_strong(expected, desired, success_order, failure_order); - } - - bool - compare_exchange_strong( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - platform_fence_before(success_order); - - bool success = platform_cmpxchg32_strong(expected, desired, &v_); - - if (success) { - platform_fence_after(success_order); - } else { - platform_fence_after(failure_order); - } - - return success; - } - - value_type - fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, original + v, order, memory_order_relaxed)); - return original; - } - - value_type - fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, original - v, order, memory_order_relaxed)); - return original; - } - - value_type - fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, original & v, order, memory_order_relaxed)); - return original; - } - - value_type - fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, original | v, order, memory_order_relaxed)); - return original; - } - - value_type - fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, original ^ v, order, memory_order_relaxed)); - return original; - } - - bool - is_lock_free(void) const volatile - { - return true; - } - - BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - value_type v_; -}; - -/* pointer types */ - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef void * value_type; - typedef std::ptrdiff_t difference_type; - -protected: - typedef value_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {} - - void - store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - platform_fence_before_store(order); - const_cast(v_) = v; - platform_fence_after_store(order); - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile - { - value_type v = const_cast(v_); - platform_fence_after_load(order); - return v; - } - - value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, v, order, memory_order_relaxed)); - return original; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return compare_exchange_strong(expected, desired, success_order, failure_order); - } - - bool - compare_exchange_strong( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - platform_fence_before(success_order); - - bool success = platform_cmpxchg32_strong(expected, desired, &v_); - - if (success) { - platform_fence_after(success_order); - } else { - platform_fence_after(failure_order); - } - - return success; - } - - value_type - fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, (char*)original + v, order, memory_order_relaxed)); - return original; - } - - value_type - fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, (char*)original - v, order, memory_order_relaxed)); - return original; - } - - bool - is_lock_free(void) const volatile - { - return true; - } - - BOOST_ATOMIC_DECLARE_VOID_POINTER_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - value_type v_; -}; - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T * value_type; - typedef std::ptrdiff_t difference_type; - -protected: - typedef value_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {} - - void - store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - platform_fence_before_store(order); - const_cast(v_) = v; - platform_fence_after_store(order); - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile - { - value_type v = const_cast(v_); - platform_fence_after_load(order); - return v; - } - - value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, v, order, memory_order_relaxed)); - return original; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return compare_exchange_strong(expected, desired, success_order, failure_order); - } - - bool - compare_exchange_strong( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - platform_fence_before(success_order); - - bool success = platform_cmpxchg32_strong(expected, desired, &v_); - - if (success) { - platform_fence_after(success_order); - } else { - platform_fence_after(failure_order); - } - - return success; - } - - value_type - fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, original + v, order, memory_order_relaxed)); - return original; - } - - value_type - fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, original - v, order, memory_order_relaxed)); - return original; - } - - bool - is_lock_free(void) const volatile - { - return true; - } - - BOOST_ATOMIC_DECLARE_POINTER_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - value_type v_; -}; - -/* generic types */ - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef uint32_t storage_type; - -protected: - typedef value_type const& value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : v_(0) - { - memcpy(&v_, &v, sizeof(value_type)); - } - - void - store(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - storage_type tmp = 0; - memcpy(&tmp, &v, sizeof(value_type)); - platform_fence_before_store(order); - const_cast(v_) = tmp; - platform_fence_after_store(order); - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile - { - storage_type tmp = const_cast(v_); - platform_fence_after_load(order); - - value_type v; - memcpy(&v, &tmp, sizeof(value_type)); - return v; - } - - value_type - exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, v, order, memory_order_relaxed)); - return original; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return compare_exchange_strong(expected, desired, success_order, failure_order); - } - - bool - compare_exchange_strong( - value_type & expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - storage_type expected_s = 0, desired_s = 0; - memcpy(&expected_s, &expected, sizeof(value_type)); - memcpy(&desired_s, &desired, sizeof(value_type)); - - platform_fence_before(success_order); - bool success = platform_cmpxchg32_strong(expected_s, desired_s, &v_); - - if (success) { - platform_fence_after(success_order); - } else { - platform_fence_after(failure_order); - memcpy(&expected, &expected_s, sizeof(value_type)); - } - - return success; - } - - bool - is_lock_free(void) const volatile - { - return true; - } - - BOOST_ATOMIC_DECLARE_BASE_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - storage_type v_; -}; - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef uint32_t storage_type; - -protected: - typedef value_type const& value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : v_(0) - { - memcpy(&v_, &v, sizeof(value_type)); - } - - void - store(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - storage_type tmp = 0; - memcpy(&tmp, &v, sizeof(value_type)); - platform_fence_before_store(order); - const_cast(v_) = tmp; - platform_fence_after_store(order); - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile - { - storage_type tmp = const_cast(v_); - platform_fence_after_load(order); - - value_type v; - memcpy(&v, &tmp, sizeof(value_type)); - return v; - } - - value_type - exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, v, order, memory_order_relaxed)); - return original; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return compare_exchange_strong(expected, desired, success_order, failure_order); - } - - bool - compare_exchange_strong( - value_type & expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - - storage_type expected_s = 0, desired_s = 0; - memcpy(&expected_s, &expected, sizeof(value_type)); - memcpy(&desired_s, &desired, sizeof(value_type)); - - platform_fence_before(success_order); - bool success = platform_cmpxchg32_strong(expected_s, desired_s, &v_); - - if (success) { - platform_fence_after(success_order); - } else { - platform_fence_after(failure_order); - memcpy(&expected, &expected_s, sizeof(value_type)); - } - - return success; - } - - bool - is_lock_free(void) const volatile - { - return true; - } - - BOOST_ATOMIC_DECLARE_BASE_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - storage_type v_; -}; - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef uint32_t storage_type; - -protected: - typedef value_type const& value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : v_(0) - { - memcpy(&v_, &v, sizeof(value_type)); - } - - void - store(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - storage_type tmp = 0; - memcpy(&tmp, &v, sizeof(value_type)); - platform_fence_before_store(order); - const_cast(v_) = tmp; - platform_fence_after_store(order); - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile - { - storage_type tmp = const_cast(v_); - platform_fence_after_load(order); - - value_type v; - memcpy(&v, &tmp, sizeof(value_type)); - return v; - } - - value_type - exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, v, order, memory_order_relaxed)); - return original; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return compare_exchange_strong(expected, desired, success_order, failure_order); - } - - bool - compare_exchange_strong( - value_type & expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - - storage_type expected_s = 0, desired_s = 0; - memcpy(&expected_s, &expected, sizeof(value_type)); - memcpy(&desired_s, &desired, sizeof(value_type)); - - platform_fence_before(success_order); - bool success = platform_cmpxchg32_strong(expected_s, desired_s, &v_); - - if (success) { - platform_fence_after(success_order); - } else { - platform_fence_after(failure_order); - memcpy(&expected, &expected_s, sizeof(value_type)); - } - - return success; - } - - bool - is_lock_free(void) const volatile - { - return true; - } - - BOOST_ATOMIC_DECLARE_BASE_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - storage_type v_; -}; - -} -} -} - -#endif diff --git a/include/boost/atomic/detail/cas32weak.hpp b/include/boost/atomic/detail/cas32weak.hpp deleted file mode 100644 index d75215d..0000000 --- a/include/boost/atomic/detail/cas32weak.hpp +++ /dev/null @@ -1,1017 +0,0 @@ -#ifndef BOOST_ATOMIC_DETAIL_CAS32WEAK_HPP -#define BOOST_ATOMIC_DETAIL_CAS32WEAK_HPP - -// Distributed under the Boost Software License, Version 1.0. -// See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -// -// Copyright (c) 2011 Helge Bahmann -// Copyright (c) 2013 Tim Blechmann - - -#include -#include -#include -#include -#include -#include - -#ifdef BOOST_HAS_PRAGMA_ONCE -#pragma once -#endif - -namespace boost { -namespace atomics { -namespace detail { - -/* integral types */ - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef T difference_type; - typedef uint32_t storage_type; - -protected: - typedef value_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {} - - void - store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - platform_fence_before_store(order); - const_cast(v_) = v; - platform_fence_after_store(order); - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - value_type v = const_cast(v_); - platform_fence_after_load(order); - return v; - } - - value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, v, order, memory_order_relaxed)); - return original; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - platform_fence_before(success_order); - - storage_type expected_s = (storage_type) expected; - storage_type desired_s = (storage_type) desired; - - bool success = platform_cmpxchg32(expected_s, desired_s, &v_); - - if (success) { - platform_fence_after(success_order); - } else { - platform_fence_after(failure_order); - expected = (value_type) expected_s; - } - - return success; - } - - bool - compare_exchange_strong( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - while (true) - { - value_type tmp = expected; - if (compare_exchange_weak(tmp, desired, success_order, failure_order)) - return true; - if (tmp != expected) - { - expected = tmp; - return false; - } - } - } - - value_type - fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, original + v, order, memory_order_relaxed)); - return original; - } - - value_type - fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, original - v, order, memory_order_relaxed)); - return original; - } - - value_type - fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, original & v, order, memory_order_relaxed)); - return original; - } - - value_type - fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, original | v, order, memory_order_relaxed)); - return original; - } - - value_type - fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, original ^ v, order, memory_order_relaxed)); - return original; - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - storage_type v_; -}; - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef T difference_type; - typedef uint32_t storage_type; - -protected: - typedef value_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {} - - void - store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - platform_fence_before_store(order); - const_cast(v_) = v; - platform_fence_after_store(order); - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - value_type v = const_cast(v_); - platform_fence_after_load(order); - return v; - } - - value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, v, order, memory_order_relaxed)); - return original; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - platform_fence_before(success_order); - - storage_type expected_s = (storage_type) expected; - storage_type desired_s = (storage_type) desired; - - bool success = platform_cmpxchg32(expected_s, desired_s, &v_); - - if (success) { - platform_fence_after(success_order); - } else { - platform_fence_after(failure_order); - expected = (value_type) expected_s; - } - - return success; - } - - bool - compare_exchange_strong( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - while (true) - { - value_type tmp = expected; - if (compare_exchange_weak(tmp, desired, success_order, failure_order)) - return true; - if (tmp != expected) - { - expected = tmp; - return false; - } - } - } - - value_type - fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, original + v, order, memory_order_relaxed)); - return original; - } - - value_type - fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, original - v, order, memory_order_relaxed)); - return original; - } - - value_type - fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, original & v, order, memory_order_relaxed)); - return original; - } - - value_type - fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, original | v, order, memory_order_relaxed)); - return original; - } - - value_type - fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, original ^ v, order, memory_order_relaxed)); - return original; - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - storage_type v_; -}; - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef T difference_type; - -protected: - typedef value_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {} - - void - store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - platform_fence_before_store(order); - const_cast(v_) = v; - platform_fence_after_store(order); - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - value_type v = const_cast(v_); - platform_fence_after_load(order); - return v; - } - - value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, v, order, memory_order_relaxed)); - return original; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - platform_fence_before(success_order); - - bool success = platform_cmpxchg32(expected, desired, &v_); - - if (success) { - platform_fence_after(success_order); - } else { - platform_fence_after(failure_order); - } - - return success; - } - - bool - compare_exchange_strong( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - while (true) - { - value_type tmp = expected; - if (compare_exchange_weak(tmp, desired, success_order, failure_order)) - return true; - if (tmp != expected) - { - expected = tmp; - return false; - } - } - } - - value_type - fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, original + v, order, memory_order_relaxed)); - return original; - } - - value_type - fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, original - v, order, memory_order_relaxed)); - return original; - } - - value_type - fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, original & v, order, memory_order_relaxed)); - return original; - } - - value_type - fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, original | v, order, memory_order_relaxed)); - return original; - } - - value_type - fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, original ^ v, order, memory_order_relaxed)); - return original; - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - value_type v_; -}; - -/* pointer types */ - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef void * value_type; - typedef std::ptrdiff_t difference_type; - -protected: - typedef value_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {} - - void - store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - platform_fence_before_store(order); - const_cast(v_) = v; - platform_fence_after_store(order); - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - value_type v = const_cast(v_); - platform_fence_after_load(order); - return v; - } - - value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, v, order, memory_order_relaxed)); - return original; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - platform_fence_before(success_order); - - bool success = platform_cmpxchg32(expected, desired, &v_); - - if (success) { - platform_fence_after(success_order); - } else { - platform_fence_after(failure_order); - } - - return success; - } - - bool - compare_exchange_strong( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - while (true) - { - value_type tmp = expected; - if (compare_exchange_weak(tmp, desired, success_order, failure_order)) - return true; - if (tmp != expected) - { - expected = tmp; - return false; - } - } - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - value_type - fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, (char*)original + v, order, memory_order_relaxed)); - return original; - } - - value_type - fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, (char*)original - v, order, memory_order_relaxed)); - return original; - } - - BOOST_ATOMIC_DECLARE_VOID_POINTER_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - value_type v_; -}; - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T * value_type; - typedef std::ptrdiff_t difference_type; - -protected: - typedef value_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {} - - void - store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - platform_fence_before_store(order); - const_cast(v_) = v; - platform_fence_after_store(order); - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - value_type v = const_cast(v_); - platform_fence_after_load(order); - return v; - } - - value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, v, order, memory_order_relaxed)); - return original; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - platform_fence_before(success_order); - - bool success = platform_cmpxchg32(expected, desired, &v_); - - if (success) { - platform_fence_after(success_order); - } else { - platform_fence_after(failure_order); - } - - return success; - } - - bool - compare_exchange_strong( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - while (true) - { - value_type tmp = expected; - if (compare_exchange_weak(tmp, desired, success_order, failure_order)) - return true; - if (tmp != expected) - { - expected = tmp; - return false; - } - } - } - - value_type - fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, original + v, order, memory_order_relaxed)); - return original; - } - - value_type - fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, original - v, order, memory_order_relaxed)); - return original; - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - BOOST_ATOMIC_DECLARE_POINTER_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - value_type v_; -}; - -/* generic types */ - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef uint32_t storage_type; - -protected: - typedef value_type const& value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : v_(0) - { - memcpy(&v_, &v, sizeof(value_type)); - } - - void - store(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - storage_type tmp = 0; - memcpy(&tmp, &v, sizeof(value_type)); - platform_fence_before_store(order); - const_cast(v_) = tmp; - platform_fence_after_store(order); - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - storage_type tmp = const_cast(v_); - platform_fence_after_load(order); - - value_type v; - memcpy(&v, &tmp, sizeof(value_type)); - return v; - } - - value_type - exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, v, order, memory_order_relaxed)); - return original; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - storage_type expected_s = 0, desired_s = 0; - memcpy(&expected_s, &expected, sizeof(value_type)); - memcpy(&desired_s, &desired, sizeof(value_type)); - - platform_fence_before(success_order); - - bool success = platform_cmpxchg32(expected_s, desired_s, &v_); - - if (success) { - platform_fence_after(success_order); - } else { - platform_fence_after(failure_order); - memcpy(&expected, &expected_s, sizeof(value_type)); - } - - return success; - } - - bool - compare_exchange_strong( - value_type & expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - while (true) - { - value_type tmp = expected; - if (compare_exchange_weak(tmp, desired, success_order, failure_order)) - return true; - if (tmp != expected) - { - expected = tmp; - return false; - } - } - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - BOOST_ATOMIC_DECLARE_BASE_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - storage_type v_; -}; - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef uint32_t storage_type; - -protected: - typedef value_type const& value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : v_(0) - { - memcpy(&v_, &v, sizeof(value_type)); - } - - void - store(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - storage_type tmp = 0; - memcpy(&tmp, &v, sizeof(value_type)); - platform_fence_before_store(order); - const_cast(v_) = tmp; - platform_fence_after_store(order); - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - storage_type tmp = const_cast(v_); - platform_fence_after_load(order); - - value_type v; - memcpy(&v, &tmp, sizeof(value_type)); - return v; - } - - value_type - exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, v, order, memory_order_relaxed)); - return original; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - storage_type expected_s = 0, desired_s = 0; - memcpy(&expected_s, &expected, sizeof(value_type)); - memcpy(&desired_s, &desired, sizeof(value_type)); - - platform_fence_before(success_order); - - bool success = platform_cmpxchg32(expected_s, desired_s, &v_); - - if (success) { - platform_fence_after(success_order); - } else { - platform_fence_after(failure_order); - memcpy(&expected, &expected_s, sizeof(value_type)); - } - - return success; - } - - bool - compare_exchange_strong( - value_type & expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - while (true) - { - value_type tmp = expected; - if (compare_exchange_weak(tmp, desired, success_order, failure_order)) - return true; - if (tmp != expected) - { - expected = tmp; - return false; - } - } - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - BOOST_ATOMIC_DECLARE_BASE_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - storage_type v_; -}; - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef uint32_t storage_type; - -protected: - typedef value_type const& value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : v_(0) - { - memcpy(&v_, &v, sizeof(value_type)); - } - - void - store(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - storage_type tmp = 0; - memcpy(&tmp, &v, sizeof(value_type)); - platform_fence_before_store(order); - const_cast(v_) = tmp; - platform_fence_after_store(order); - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - storage_type tmp = const_cast(v_); - platform_fence_after_load(order); - - value_type v; - memcpy(&v, &tmp, sizeof(value_type)); - return v; - } - - value_type - exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, v, order, memory_order_relaxed)); - return original; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - storage_type expected_s = 0, desired_s = 0; - memcpy(&expected_s, &expected, sizeof(value_type)); - memcpy(&desired_s, &desired, sizeof(value_type)); - - platform_fence_before(success_order); - - bool success = platform_cmpxchg32(expected_s, desired_s, &v_); - - if (success) { - platform_fence_after(success_order); - } else { - platform_fence_after(failure_order); - memcpy(&expected, &expected_s, sizeof(value_type)); - } - - return success; - } - - bool - compare_exchange_strong( - value_type & expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - while (true) - { - value_type tmp = expected; - if (compare_exchange_weak(tmp, desired, success_order, failure_order)) - return true; - if (tmp != expected) - { - expected = tmp; - return false; - } - } - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - BOOST_ATOMIC_DECLARE_BASE_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - storage_type v_; -}; - -} -} -} - -#endif diff --git a/include/boost/atomic/detail/cas64strong-ptr.hpp b/include/boost/atomic/detail/cas64strong-ptr.hpp deleted file mode 100644 index 2f04112..0000000 --- a/include/boost/atomic/detail/cas64strong-ptr.hpp +++ /dev/null @@ -1,247 +0,0 @@ -#ifndef BOOST_ATOMIC_DETAIL_CAS64STRONG_PTR_HPP -#define BOOST_ATOMIC_DETAIL_CAS64STRONG_PTR_HPP - -// Distributed under the Boost Software License, Version 1.0. -// See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -// -// Copyright (c) 2011 Helge Bahmann -// Copyright (c) 2013 Tim Blechmann - -// Build 64-bit atomic operation on pointers from platform_cmpxchg64_strong -// primitive. It is assumed that 64-bit loads/stores are not -// atomic, so they are implemented through platform_load64/platform_store64. -// -// The reason for extracting pointer specializations to a separate header is -// that 64-bit CAS is available on some 32-bit platforms (notably, x86). -// On these platforms there is no need for 64-bit pointer specializations, -// since they will never be used. - -#include -#include -#include -#include -#include -#include - -#ifdef BOOST_HAS_PRAGMA_ONCE -#pragma once -#endif - -namespace boost { -namespace atomics { -namespace detail { - -/* pointer types */ - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef void * value_type; - typedef std::ptrdiff_t difference_type; - -protected: - typedef value_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {} - - void - store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - platform_fence_before_store(order); - platform_store64(v, &v_); - platform_fence_after_store(order); - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - value_type v = platform_load64(&v_); - platform_fence_after_load(order); - return v; - } - - value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, v, order, memory_order_relaxed)); - return original; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return compare_exchange_strong(expected, desired, success_order, failure_order); - } - - bool - compare_exchange_strong( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - platform_fence_before(success_order); - - bool success = platform_cmpxchg64_strong(expected, desired, &v_); - - if (success) { - platform_fence_after(success_order); - } else { - platform_fence_after(failure_order); - } - - return success; - } - - value_type - fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, (char*)original + v, order, memory_order_relaxed)); - return original; - } - - value_type - fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, (char*)original - v, order, memory_order_relaxed)); - return original; - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - BOOST_ATOMIC_DECLARE_VOID_POINTER_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - value_type v_; -}; - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T * value_type; - typedef std::ptrdiff_t difference_type; - -protected: - typedef value_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {} - - void - store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - platform_fence_before_store(order); - platform_store64(v, &v_); - platform_fence_after_store(order); - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - value_type v = platform_load64(&v_); - platform_fence_after_load(order); - return v; - } - - value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, v, order, memory_order_relaxed)); - return original; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return compare_exchange_strong(expected, desired, success_order, failure_order); - } - - bool - compare_exchange_strong( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - platform_fence_before(success_order); - - bool success = platform_cmpxchg64_strong(expected, desired, &v_); - - if (success) { - platform_fence_after(success_order); - } else { - platform_fence_after(failure_order); - } - - return success; - } - - value_type - fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, original + v, order, memory_order_relaxed)); - return original; - } - - value_type - fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, original - v, order, memory_order_relaxed)); - return original; - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - BOOST_ATOMIC_DECLARE_POINTER_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - value_type v_; -}; - -} -} -} - -#endif diff --git a/include/boost/atomic/detail/cas64strong.hpp b/include/boost/atomic/detail/cas64strong.hpp deleted file mode 100644 index c283f98..0000000 --- a/include/boost/atomic/detail/cas64strong.hpp +++ /dev/null @@ -1,264 +0,0 @@ -#ifndef BOOST_ATOMIC_DETAIL_CAS64STRONG_HPP -#define BOOST_ATOMIC_DETAIL_CAS64STRONG_HPP - -// Distributed under the Boost Software License, Version 1.0. -// See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -// -// Copyright (c) 2011 Helge Bahmann -// Copyright (c) 2013 Tim Blechmann - -// Build 64-bit atomic operation on integers/UDTs from platform_cmpxchg64_strong -// primitive. It is assumed that 64-bit loads/stores are not -// atomic, so they are implemented through platform_load64/platform_store64. - -#include -#include -#include -#include -#include -#include - -#ifdef BOOST_HAS_PRAGMA_ONCE -#pragma once -#endif - -namespace boost { -namespace atomics { -namespace detail { - -/* integral types */ - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef T difference_type; - -protected: - typedef value_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {} - - void - store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - platform_fence_before_store(order); - platform_store64(v, &v_); - platform_fence_after_store(order); - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - value_type v = platform_load64(&v_); - platform_fence_after_load(order); - return v; - } - - value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, v, order, memory_order_relaxed)); - return original; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return compare_exchange_strong(expected, desired, success_order, failure_order); - } - - bool - compare_exchange_strong( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - platform_fence_before(success_order); - - bool success = platform_cmpxchg64_strong(expected, desired, &v_); - - if (success) { - platform_fence_after(success_order); - } else { - platform_fence_after(failure_order); - } - - return success; - } - - value_type - fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, original + v, order, memory_order_relaxed)); - return original; - } - - value_type - fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, original - v, order, memory_order_relaxed)); - return original; - } - - value_type - fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, original & v, order, memory_order_relaxed)); - return original; - } - - value_type - fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, original | v, order, memory_order_relaxed)); - return original; - } - - value_type - fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, original ^ v, order, memory_order_relaxed)); - return original; - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - value_type v_; -}; - -/* generic types */ - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef uint64_t storage_type; - -protected: - typedef value_type const& value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : v_(0) - { - memcpy(&v_, &v, sizeof(value_type)); - } - - void - store(value_type const& value, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - storage_type value_s = 0; - memcpy(&value_s, &value, sizeof(value_type)); - platform_fence_before_store(order); - platform_store64(value_s, &v_); - platform_fence_after_store(order); - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - storage_type value_s = platform_load64(&v_); - platform_fence_after_load(order); - value_type value; - memcpy(&value, &value_s, sizeof(value_type)); - return value; - } - - value_type - exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original = load(memory_order_relaxed); - do { - } while (!compare_exchange_weak(original, v, order, memory_order_relaxed)); - return original; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return compare_exchange_strong(expected, desired, success_order, failure_order); - } - - bool - compare_exchange_strong( - value_type & expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - storage_type expected_s = 0, desired_s = 0; - memcpy(&expected_s, &expected, sizeof(value_type)); - memcpy(&desired_s, &desired, sizeof(value_type)); - - platform_fence_before(success_order); - bool success = platform_cmpxchg64_strong(expected_s, desired_s, &v_); - - if (success) { - platform_fence_after(success_order); - } else { - platform_fence_after(failure_order); - memcpy(&expected, &expected_s, sizeof(value_type)); - } - - return success; - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - BOOST_ATOMIC_DECLARE_BASE_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - storage_type v_; -}; - -} -} -} - -#endif diff --git a/include/boost/atomic/detail/gcc-alpha.hpp b/include/boost/atomic/detail/gcc-alpha.hpp deleted file mode 100644 index 2775499..0000000 --- a/include/boost/atomic/detail/gcc-alpha.hpp +++ /dev/null @@ -1,368 +0,0 @@ -#ifndef BOOST_ATOMIC_DETAIL_GCC_ALPHA_HPP -#define BOOST_ATOMIC_DETAIL_GCC_ALPHA_HPP - -// Copyright (c) 2009 Helge Bahmann -// Copyright (c) 2013 Tim Blechmann -// -// Distributed under the Boost Software License, Version 1.0. -// See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -#include -#include -#include - -#ifdef BOOST_HAS_PRAGMA_ONCE -#pragma once -#endif - -/* - Refer to http://h71000.www7.hp.com/doc/82final/5601/5601pro_004.html - (HP OpenVMS systems documentation) and the alpha reference manual. - */ - -/* - NB: The most natural thing would be to write the increment/decrement - operators along the following lines: - - __asm__ __volatile__( - "1: ldl_l %0,%1 \n" - "addl %0,1,%0 \n" - "stl_c %0,%1 \n" - "beq %0,1b\n" - : "=&b" (tmp) - : "m" (value) - : "cc" - ); - - However according to the comments on the HP website and matching - comments in the Linux kernel sources this defies branch prediction, - as the cpu assumes that backward branches are always taken; so - instead copy the trick from the Linux kernel, introduce a forward - branch and back again. - - I have, however, had a hard time measuring the difference between - the two versions in microbenchmarks -- I am leaving it in nevertheless - as it apparently does not hurt either. -*/ - -namespace boost { -namespace atomics { -namespace detail { - -inline void fence_before(memory_order order) -{ - switch(order) { - case memory_order_consume: - case memory_order_release: - case memory_order_acq_rel: - case memory_order_seq_cst: - __asm__ __volatile__ ("mb" ::: "memory"); - default:; - } -} - -inline void fence_after(memory_order order) -{ - switch(order) { - case memory_order_acquire: - case memory_order_acq_rel: - case memory_order_seq_cst: - __asm__ __volatile__ ("mb" ::: "memory"); - default:; - } -} - -template<> -inline void platform_atomic_thread_fence(memory_order order) -{ - switch(order) { - case memory_order_acquire: - case memory_order_consume: - case memory_order_release: - case memory_order_acq_rel: - case memory_order_seq_cst: - __asm__ __volatile__ ("mb" ::: "memory"); - default:; - } -} - -template -class atomic_alpha_32 -{ -public: - typedef T integral_type; - BOOST_CONSTEXPR atomic_alpha_32(T v) BOOST_NOEXCEPT: i(v) {} - atomic_alpha_32() {} - T load(memory_order order=memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - T v=*reinterpret_cast(&i); - fence_after(order); - return v; - } - void store(T v, memory_order order=memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - fence_before(order); - *reinterpret_cast(&i)=(int)v; - } - bool compare_exchange_weak( - T &expected, - T desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - fence_before(success_order); - int current, success; - __asm__ __volatile__( - "1: ldl_l %2, %4\n" - "cmpeq %2, %0, %3\n" - "mov %2, %0\n" - "beq %3, 3f\n" - "stl_c %1, %4\n" - "2:\n" - - ".subsection 2\n" - "3: mov %3, %1\n" - "br 2b\n" - ".previous\n" - - : "+&r" (expected), "+&r" (desired), "=&r"(current), "=&r"(success) - : "m" (i) - : - ); - if (desired) fence_after(success_order); - else fence_after(failure_order); - return desired; - } - - bool is_lock_free(void) const volatile BOOST_NOEXCEPT {return true;} -protected: - inline T fetch_add_var(T c, memory_order order) volatile BOOST_NOEXCEPT - { - fence_before(order); - T original, modified; - __asm__ __volatile__( - "1: ldl_l %0, %2\n" - "addl %0, %3, %1\n" - "stl_c %1, %2\n" - "beq %1, 2f\n" - - ".subsection 2\n" - "2: br 1b\n" - ".previous\n" - - : "=&r" (original), "=&r" (modified) - : "m" (i), "r" (c) - : - ); - fence_after(order); - return original; - } - inline T fetch_inc(memory_order order) volatile BOOST_NOEXCEPT - { - fence_before(order); - int original, modified; - __asm__ __volatile__( - "1: ldl_l %0, %2\n" - "addl %0, 1, %1\n" - "stl_c %1, %2\n" - "beq %1, 2f\n" - - ".subsection 2\n" - "2: br 1b\n" - ".previous\n" - - : "=&r" (original), "=&r" (modified) - : "m" (i) - : - ); - fence_after(order); - return original; - } - inline T fetch_dec(memory_order order) volatile BOOST_NOEXCEPT - { - fence_before(order); - int original, modified; - __asm__ __volatile__( - "1: ldl_l %0, %2\n" - "subl %0, 1, %1\n" - "stl_c %1, %2\n" - "beq %1, 2f\n" - - ".subsection 2\n" - "2: br 1b\n" - ".previous\n" - - : "=&r" (original), "=&r" (modified) - : "m" (i) - : - ); - fence_after(order); - return original; - } -private: - T i; -}; - -template -class atomic_alpha_64 -{ -public: - typedef T integral_type; - BOOST_CONSTEXPR atomic_alpha_64(T v) BOOST_NOEXCEPT: i(v) {} - atomic_alpha_64() {} - T load(memory_order order=memory_order_seq_cst) const volatile - { - T v=*reinterpret_cast(&i); - fence_after(order); - return v; - } - void store(T v, memory_order order=memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - fence_before(order); - *reinterpret_cast(&i)=v; - } - bool compare_exchange_weak( - T &expected, - T desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - fence_before(success_order); - int current, success; - __asm__ __volatile__( - "1: ldq_l %2, %4\n" - "cmpeq %2, %0, %3\n" - "mov %2, %0\n" - "beq %3, 3f\n" - "stq_c %1, %4\n" - "2:\n" - - ".subsection 2\n" - "3: mov %3, %1\n" - "br 2b\n" - ".previous\n" - - : "+&r" (expected), "+&r" (desired), "=&r"(current), "=&r"(success) - : "m" (i) - : - ); - if (desired) fence_after(success_order); - else fence_after(failure_order); - return desired; - } - - bool is_lock_free(void) const volatile BOOST_NOEXCEPT {return true;} -protected: - inline T fetch_add_var(T c, memory_order order) volatile BOOST_NOEXCEPT - { - fence_before(order); - T original, modified; - __asm__ __volatile__( - "1: ldq_l %0, %2\n" - "addq %0, %3, %1\n" - "stq_c %1, %2\n" - "beq %1, 2f\n" - - ".subsection 2\n" - "2: br 1b\n" - ".previous\n" - - : "=&r" (original), "=&r" (modified) - : "m" (i), "r" (c) - : - ); - fence_after(order); - return original; - } - inline T fetch_inc(memory_order order) volatile BOOST_NOEXCEPT - { - fence_before(order); - T original, modified; - __asm__ __volatile__( - "1: ldq_l %0, %2\n" - "addq %0, 1, %1\n" - "stq_c %1, %2\n" - "beq %1, 2f\n" - - ".subsection 2\n" - "2: br 1b\n" - ".previous\n" - - : "=&r" (original), "=&r" (modified) - : "m" (i) - : - ); - fence_after(order); - return original; - } - inline T fetch_dec(memory_order order) volatile BOOST_NOEXCEPT - { - fence_before(order); - T original, modified; - __asm__ __volatile__( - "1: ldq_l %0, %2\n" - "subq %0, 1, %1\n" - "stq_c %1, %2\n" - "beq %1, 2f\n" - - ".subsection 2\n" - "2: br 1b\n" - ".previous\n" - - : "=&r" (original), "=&r" (modified) - : "m" (i) - : - ); - fence_after(order); - return original; - } -private: - T i; -}; - -template -class platform_atomic_integral : - public build_atomic_from_typical > > -{ -public: - typedef build_atomic_from_typical > > super; - BOOST_CONSTEXPR platform_atomic_integral(T v) BOOST_NOEXCEPT: super(v) {} - platform_atomic_integral(void) {} -}; - -template -class platform_atomic_integral : - public build_atomic_from_typical > > -{ -public: - typedef build_atomic_from_typical > > super; - BOOST_CONSTEXPR platform_atomic_integral(T v) BOOST_NOEXCEPT: super(v) {} - platform_atomic_integral(void) {} -}; - -template -class platform_atomic_integral : - public build_atomic_from_larger_type, T> -{ -public: - typedef build_atomic_from_larger_type, T> super; - BOOST_CONSTEXPR platform_atomic_integral(T v) BOOST_NOEXCEPT: super(v) {} - platform_atomic_integral(void) {} -}; - -template -class platform_atomic_integral : - public build_atomic_from_larger_type, T> -{ -public: - typedef build_atomic_from_larger_type, T> super; - BOOST_CONSTEXPR platform_atomic_integral(T v) BOOST_NOEXCEPT: super(v) {} - platform_atomic_integral(void) {} -}; - -} -} -} - -#endif diff --git a/include/boost/atomic/detail/gcc-armv6plus.hpp b/include/boost/atomic/detail/gcc-armv6plus.hpp deleted file mode 100644 index cccd111..0000000 --- a/include/boost/atomic/detail/gcc-armv6plus.hpp +++ /dev/null @@ -1,255 +0,0 @@ -#ifndef BOOST_ATOMIC_DETAIL_GCC_ARMV6PLUS_HPP -#define BOOST_ATOMIC_DETAIL_GCC_ARMV6PLUS_HPP - -// Distributed under the Boost Software License, Version 1.0. -// See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -// -// Copyright (c) 2009 Helge Bahmann -// Copyright (c) 2009 Phil Endecott -// Copyright (c) 2013 Tim Blechmann -// ARM Code by Phil Endecott, based on other architectures. - -#include -#include - -#ifdef BOOST_HAS_PRAGMA_ONCE -#pragma once -#endif - -// From the ARM Architecture Reference Manual for architecture v6: -// -// LDREX{} , [] -// Specifies the destination register for the memory word addressed by -// Specifies the register containing the address. -// -// STREX{} , , [] -// Specifies the destination register for the returned status value. -// 0 if the operation updates memory -// 1 if the operation fails to update memory -// Specifies the register containing the word to be stored to memory. -// Specifies the register containing the address. -// Rd must not be the same register as Rm or Rn. -// -// ARM v7 is like ARM v6 plus: -// There are half-word and byte versions of the LDREX and STREX instructions, -// LDREXH, LDREXB, STREXH and STREXB. -// There are also double-word versions, LDREXD and STREXD. -// (Actually it looks like these are available from version 6k onwards.) -// FIXME these are not yet used; should be mostly a matter of copy-and-paste. -// I think you can supply an immediate offset to the address. -// -// A memory barrier is effected using a "co-processor 15" instruction, -// though a separate assembler mnemonic is available for it in v7. - -namespace boost { -namespace atomics { -namespace detail { - -// "Thumb 1" is a subset of the ARM instruction set that uses a 16-bit encoding. It -// doesn't include all instructions and in particular it doesn't include the co-processor -// instruction used for the memory barrier or the load-locked/store-conditional -// instructions. So, if we're compiling in "Thumb 1" mode, we need to wrap all of our -// asm blocks with code to temporarily change to ARM mode. -// -// You can only change between ARM and Thumb modes when branching using the bx instruction. -// bx takes an address specified in a register. The least significant bit of the address -// indicates the mode, so 1 is added to indicate that the destination code is Thumb. -// A temporary register is needed for the address and is passed as an argument to these -// macros. It must be one of the "low" registers accessible to Thumb code, specified -// using the "l" attribute in the asm statement. -// -// Architecture v7 introduces "Thumb 2", which does include (almost?) all of the ARM -// instruction set. So in v7 we don't need to change to ARM mode; we can write "universal -// assembler" which will assemble to Thumb 2 or ARM code as appropriate. The only thing -// we need to do to make this "universal" assembler mode work is to insert "IT" instructions -// to annotate the conditional instructions. These are ignored in other modes (e.g. v6), -// so they can always be present. - -#if defined(__thumb__) && !defined(__thumb2__) -#define BOOST_ATOMIC_ARM_ASM_START(TMPREG) "adr " #TMPREG ", 1f\n" "bx " #TMPREG "\n" ".arm\n" ".align 4\n" "1: " -#define BOOST_ATOMIC_ARM_ASM_END(TMPREG) "adr " #TMPREG ", 1f + 1\n" "bx " #TMPREG "\n" ".thumb\n" ".align 2\n" "1: " -#else -// The tmpreg is wasted in this case, which is non-optimal. -#define BOOST_ATOMIC_ARM_ASM_START(TMPREG) -#define BOOST_ATOMIC_ARM_ASM_END(TMPREG) -#endif - -#if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7EM__) || defined(__ARM_ARCH_7S__) -#define BOOST_ATOMIC_ARM_DMB "dmb\n" -#else -#define BOOST_ATOMIC_ARM_DMB "mcr\tp15, 0, r0, c7, c10, 5\n" -#endif - -inline void -arm_barrier(void) BOOST_NOEXCEPT -{ - int brtmp; - __asm__ __volatile__ - ( - BOOST_ATOMIC_ARM_ASM_START(%0) - BOOST_ATOMIC_ARM_DMB - BOOST_ATOMIC_ARM_ASM_END(%0) - : "=&l" (brtmp) :: "memory" - ); -} - -inline void -platform_fence_before(memory_order order) BOOST_NOEXCEPT -{ - switch(order) - { - case memory_order_release: - case memory_order_acq_rel: - case memory_order_seq_cst: - arm_barrier(); - case memory_order_consume: - default:; - } -} - -inline void -platform_fence_after(memory_order order) BOOST_NOEXCEPT -{ - switch(order) - { - case memory_order_acquire: - case memory_order_acq_rel: - case memory_order_seq_cst: - arm_barrier(); - default:; - } -} - -inline void -platform_fence_before_store(memory_order order) BOOST_NOEXCEPT -{ - platform_fence_before(order); -} - -inline void -platform_fence_after_store(memory_order order) BOOST_NOEXCEPT -{ - if (order == memory_order_seq_cst) - arm_barrier(); -} - -inline void -platform_fence_after_load(memory_order order) BOOST_NOEXCEPT -{ - platform_fence_after(order); -} - -template -inline bool -platform_cmpxchg32(T & expected, T desired, volatile T * ptr) BOOST_NOEXCEPT -{ - int success; - int tmp; - __asm__ __volatile__ - ( - BOOST_ATOMIC_ARM_ASM_START(%2) - "mov %1, #0\n" // success = 0 - "ldrex %0, %3\n" // expected' = *(&i) - "teq %0, %4\n" // flags = expected'==expected - "ittt eq\n" - "strexeq %2, %5, %3\n" // if (flags.equal) *(&i) = desired, tmp = !OK - "teqeq %2, #0\n" // if (flags.equal) flags = tmp==0 - "moveq %1, #1\n" // if (flags.equal) success = 1 - BOOST_ATOMIC_ARM_ASM_END(%2) - : "=&r" (expected), // %0 - "=&r" (success), // %1 - "=&l" (tmp), // %2 - "+Q" (*ptr) // %3 - : "r" (expected), // %4 - "r" (desired) // %5 - : "cc" - ); - return success; -} - -} -} - -#define BOOST_ATOMIC_THREAD_FENCE 2 -inline void -atomic_thread_fence(memory_order order) -{ - switch(order) - { - case memory_order_acquire: - case memory_order_release: - case memory_order_acq_rel: - case memory_order_seq_cst: - atomics::detail::arm_barrier(); - default:; - } -} - -#define BOOST_ATOMIC_SIGNAL_FENCE 2 -inline void -atomic_signal_fence(memory_order) -{ - __asm__ __volatile__ ("" ::: "memory"); -} - -class atomic_flag -{ -private: - uint32_t v_; - -public: - BOOST_CONSTEXPR atomic_flag(void) BOOST_NOEXCEPT : v_(0) {} - - void - clear(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - atomics::detail::platform_fence_before_store(order); - const_cast(v_) = 0; - atomics::detail::platform_fence_after_store(order); - } - - bool - test_and_set(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - atomics::detail::platform_fence_before(order); - uint32_t expected = v_; - do { - if (expected == 1) - break; - } while (!atomics::detail::platform_cmpxchg32(expected, (uint32_t)1, &v_)); - atomics::detail::platform_fence_after(order); - return expected; - } - - BOOST_DELETED_FUNCTION(atomic_flag(const atomic_flag &)) - BOOST_DELETED_FUNCTION(atomic_flag& operator=(const atomic_flag &)) -}; - -#define BOOST_ATOMIC_FLAG_LOCK_FREE 2 - -} - -#undef BOOST_ATOMIC_ARM_ASM_START -#undef BOOST_ATOMIC_ARM_ASM_END - -#include - -#if !defined(BOOST_ATOMIC_FORCE_FALLBACK) - -#define BOOST_ATOMIC_CHAR_LOCK_FREE 2 -#define BOOST_ATOMIC_CHAR16_T_LOCK_FREE 2 -#define BOOST_ATOMIC_CHAR32_T_LOCK_FREE 2 -#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 2 -#define BOOST_ATOMIC_SHORT_LOCK_FREE 2 -#define BOOST_ATOMIC_INT_LOCK_FREE 2 -#define BOOST_ATOMIC_LONG_LOCK_FREE 2 -#define BOOST_ATOMIC_LLONG_LOCK_FREE 0 -#define BOOST_ATOMIC_POINTER_LOCK_FREE 2 -#define BOOST_ATOMIC_BOOL_LOCK_FREE 2 - -#include - -#endif /* !defined(BOOST_ATOMIC_FORCE_FALLBACK) */ - -#endif diff --git a/include/boost/atomic/detail/gcc-atomic.hpp b/include/boost/atomic/detail/gcc-atomic.hpp deleted file mode 100644 index 9504e0e..0000000 --- a/include/boost/atomic/detail/gcc-atomic.hpp +++ /dev/null @@ -1,1367 +0,0 @@ -#ifndef BOOST_ATOMIC_DETAIL_GCC_ATOMIC_HPP -#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_HPP - -// Copyright (c) 2013 Andrey Semashev -// -// Distributed under the Boost Software License, Version 1.0. -// See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -#include -#include -#include -#include - -#ifdef BOOST_HAS_PRAGMA_ONCE -#pragma once -#endif - -namespace boost { -namespace atomics { -namespace detail { - -#if (defined(__i386__) && defined(__SSE2__)) || defined(__x86_64__) -#define BOOST_ATOMIC_X86_PAUSE() __asm__ __volatile__ ("pause\n") -#endif - -#if defined(__i386__) && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) -#define BOOST_ATOMIC_X86_HAS_CMPXCHG8B 1 -#endif - -#if defined(__x86_64__) && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) -#define BOOST_ATOMIC_X86_HAS_CMPXCHG16B 1 -#endif - -#if defined(BOOST_ATOMIC_X86_HAS_CMPXCHG16B) && defined(__clang__) -// Worraround for bug: http://llvm.org/bugs/show_bug.cgi?id=19149 -// Clang 3.4 does not implement 128-bit __atomic* intrinsics even though it defines __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16 -#define BOOST_ATOMIC_X86_NO_GCC_128_BIT_ATOMIC_INTRINSICS -#endif - -BOOST_FORCEINLINE BOOST_CONSTEXPR int convert_memory_order_to_gcc(memory_order order) BOOST_NOEXCEPT -{ - return (order == memory_order_relaxed ? __ATOMIC_RELAXED : (order == memory_order_consume ? __ATOMIC_CONSUME : - (order == memory_order_acquire ? __ATOMIC_ACQUIRE : (order == memory_order_release ? __ATOMIC_RELEASE : - (order == memory_order_acq_rel ? __ATOMIC_ACQ_REL : __ATOMIC_SEQ_CST))))); -} - -} // namespace detail -} // namespace atomics - -#if __GCC_ATOMIC_BOOL_LOCK_FREE == 2 - -class atomic_flag -{ -private: - bool v_; - -public: - BOOST_CONSTEXPR atomic_flag(void) BOOST_NOEXCEPT : v_(false) {} - - bool test_and_set(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - return __atomic_test_and_set(&v_, atomics::detail::convert_memory_order_to_gcc(order)); - } - - void clear(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - __atomic_clear(const_cast(&v_), atomics::detail::convert_memory_order_to_gcc(order)); - } - - BOOST_DELETED_FUNCTION(atomic_flag(atomic_flag const&)) - BOOST_DELETED_FUNCTION(atomic_flag& operator= (atomic_flag const&)) -}; - -#define BOOST_ATOMIC_FLAG_LOCK_FREE 2 - -#endif // __GCC_ATOMIC_BOOL_LOCK_FREE == 2 - -} // namespace boost - -#include - -#if !defined(BOOST_ATOMIC_FORCE_FALLBACK) - -#if __GCC_ATOMIC_CHAR_LOCK_FREE == 2 -#define BOOST_ATOMIC_CHAR_LOCK_FREE 2 -#endif -#if __GCC_ATOMIC_CHAR16_T_LOCK_FREE == 2 -#define BOOST_ATOMIC_CHAR16_T_LOCK_FREE 2 -#endif -#if __GCC_ATOMIC_CHAR32_T_LOCK_FREE == 2 -#define BOOST_ATOMIC_CHAR32_T_LOCK_FREE 2 -#endif -#if __GCC_ATOMIC_WCHAR_T_LOCK_FREE == 2 -#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 2 -#endif -#if __GCC_ATOMIC_SHORT_LOCK_FREE == 2 -#define BOOST_ATOMIC_SHORT_LOCK_FREE 2 -#endif -#if __GCC_ATOMIC_INT_LOCK_FREE == 2 -#define BOOST_ATOMIC_INT_LOCK_FREE 2 -#endif -#if __GCC_ATOMIC_LONG_LOCK_FREE == 2 -#define BOOST_ATOMIC_LONG_LOCK_FREE 2 -#endif -#if __GCC_ATOMIC_LLONG_LOCK_FREE == 2 -#define BOOST_ATOMIC_LLONG_LOCK_FREE 2 -#endif -#if defined(BOOST_ATOMIC_X86_HAS_CMPXCHG16B) && (defined(BOOST_HAS_INT128) || !defined(BOOST_NO_ALIGNMENT)) -#define BOOST_ATOMIC_INT128_LOCK_FREE 2 -#endif -#if __GCC_ATOMIC_POINTER_LOCK_FREE == 2 -#define BOOST_ATOMIC_POINTER_LOCK_FREE 2 -#endif -#if __GCC_ATOMIC_BOOL_LOCK_FREE == 2 -#define BOOST_ATOMIC_BOOL_LOCK_FREE 2 -#endif - -namespace boost { - -#define BOOST_ATOMIC_THREAD_FENCE 2 -BOOST_FORCEINLINE void atomic_thread_fence(memory_order order) -{ - __atomic_thread_fence(atomics::detail::convert_memory_order_to_gcc(order)); -} - -#define BOOST_ATOMIC_SIGNAL_FENCE 2 -BOOST_FORCEINLINE void atomic_signal_fence(memory_order order) -{ - __atomic_signal_fence(atomics::detail::convert_memory_order_to_gcc(order)); -} - -namespace atomics { -namespace detail { - -#if defined(BOOST_ATOMIC_CHAR_LOCK_FREE) && BOOST_ATOMIC_CHAR_LOCK_FREE > 0 - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef T difference_type; - -protected: - typedef value_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {} - - void store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - __atomic_store_n(&v_, v, atomics::detail::convert_memory_order_to_gcc(order)); - } - - value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - return __atomic_load_n(&v_, atomics::detail::convert_memory_order_to_gcc(order)); - } - - value_type fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - return __atomic_fetch_add(&v_, v, atomics::detail::convert_memory_order_to_gcc(order)); - } - - value_type fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - return __atomic_fetch_sub(&v_, v, atomics::detail::convert_memory_order_to_gcc(order)); - } - - value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - return __atomic_exchange_n(&v_, v, atomics::detail::convert_memory_order_to_gcc(order)); - } - - bool compare_exchange_strong( - value_type& expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return __atomic_compare_exchange_n(&v_, &expected, desired, false, - atomics::detail::convert_memory_order_to_gcc(success_order), - atomics::detail::convert_memory_order_to_gcc(failure_order)); - } - - bool compare_exchange_weak( - value_type& expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return __atomic_compare_exchange_n(&v_, &expected, desired, true, - atomics::detail::convert_memory_order_to_gcc(success_order), - atomics::detail::convert_memory_order_to_gcc(failure_order)); - } - - value_type fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - return __atomic_fetch_and(&v_, v, atomics::detail::convert_memory_order_to_gcc(order)); - } - - value_type fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - return __atomic_fetch_or(&v_, v, atomics::detail::convert_memory_order_to_gcc(order)); - } - - value_type fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - return __atomic_fetch_xor(&v_, v, atomics::detail::convert_memory_order_to_gcc(order)); - } - - bool is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return __atomic_is_lock_free(sizeof(v_), &v_); - } - - BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - value_type v_; -}; - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef uint8_t storage_type; - -protected: - typedef value_type const& value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : - v_(reinterpret_cast(v)) - { - } - - void store(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - __atomic_store(&v_, (storage_type*)&v, atomics::detail::convert_memory_order_to_gcc(order)); - } - - value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - value_type v; - __atomic_load(&v_, (storage_type*)&v, atomics::detail::convert_memory_order_to_gcc(order)); - return v; - } - - value_type exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type r; - __atomic_exchange(&v_, (storage_type*)&v, (storage_type*)&r, atomics::detail::convert_memory_order_to_gcc(order)); - return r; - } - - bool compare_exchange_strong( - value_type& expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return __atomic_compare_exchange(&v_, (storage_type*)&expected, (storage_type*)&desired, false, - atomics::detail::convert_memory_order_to_gcc(success_order), - atomics::detail::convert_memory_order_to_gcc(failure_order)); - } - - bool compare_exchange_weak( - value_type & expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return __atomic_compare_exchange(&v_, (storage_type*)&expected, (storage_type*)&desired, true, - atomics::detail::convert_memory_order_to_gcc(success_order), - atomics::detail::convert_memory_order_to_gcc(failure_order)); - } - - bool is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return __atomic_is_lock_free(sizeof(v_), &v_); - } - - BOOST_ATOMIC_DECLARE_BASE_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - storage_type v_; -}; - -#endif // defined(BOOST_ATOMIC_CHAR_LOCK_FREE) && BOOST_ATOMIC_CHAR_LOCK_FREE > 0 - -#if defined(BOOST_ATOMIC_SHORT_LOCK_FREE) && BOOST_ATOMIC_SHORT_LOCK_FREE > 0 - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef T difference_type; - -protected: - typedef value_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {} - - void store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - __atomic_store_n(&v_, v, atomics::detail::convert_memory_order_to_gcc(order)); - } - - value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - return __atomic_load_n(&v_, atomics::detail::convert_memory_order_to_gcc(order)); - } - - value_type fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - return __atomic_fetch_add(&v_, v, atomics::detail::convert_memory_order_to_gcc(order)); - } - - value_type fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - return __atomic_fetch_sub(&v_, v, atomics::detail::convert_memory_order_to_gcc(order)); - } - - value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - return __atomic_exchange_n(&v_, v, atomics::detail::convert_memory_order_to_gcc(order)); - } - - bool compare_exchange_strong( - value_type& expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return __atomic_compare_exchange_n(&v_, &expected, desired, false, - atomics::detail::convert_memory_order_to_gcc(success_order), - atomics::detail::convert_memory_order_to_gcc(failure_order)); - } - - bool compare_exchange_weak( - value_type& expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return __atomic_compare_exchange_n(&v_, &expected, desired, true, - atomics::detail::convert_memory_order_to_gcc(success_order), - atomics::detail::convert_memory_order_to_gcc(failure_order)); - } - - value_type fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - return __atomic_fetch_and(&v_, v, atomics::detail::convert_memory_order_to_gcc(order)); - } - - value_type fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - return __atomic_fetch_or(&v_, v, atomics::detail::convert_memory_order_to_gcc(order)); - } - - value_type fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - return __atomic_fetch_xor(&v_, v, atomics::detail::convert_memory_order_to_gcc(order)); - } - - bool is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return __atomic_is_lock_free(sizeof(v_), &v_); - } - - BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - value_type v_; -}; - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef uint16_t storage_type; - -protected: - typedef value_type const& value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : - v_(reinterpret_cast(v)) - { - } - - void store(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - __atomic_store(&v_, (storage_type*)&v, atomics::detail::convert_memory_order_to_gcc(order)); - } - - value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - value_type v; - __atomic_load(&v_, (storage_type*)&v, atomics::detail::convert_memory_order_to_gcc(order)); - return v; - } - - value_type exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type r; - __atomic_exchange(&v_, (storage_type*)&v, (storage_type*)&r, atomics::detail::convert_memory_order_to_gcc(order)); - return r; - } - - bool compare_exchange_strong( - value_type& expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return __atomic_compare_exchange(&v_, (storage_type*)&expected, (storage_type*)&desired, false, - atomics::detail::convert_memory_order_to_gcc(success_order), - atomics::detail::convert_memory_order_to_gcc(failure_order)); - } - - bool compare_exchange_weak( - value_type & expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return __atomic_compare_exchange(&v_, (storage_type*)&expected, (storage_type*)&desired, true, - atomics::detail::convert_memory_order_to_gcc(success_order), - atomics::detail::convert_memory_order_to_gcc(failure_order)); - } - - bool is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return __atomic_is_lock_free(sizeof(v_), &v_); - } - - BOOST_ATOMIC_DECLARE_BASE_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - storage_type v_; -}; - -#endif // defined(BOOST_ATOMIC_SHORT_LOCK_FREE) && BOOST_ATOMIC_SHORT_LOCK_FREE > 0 - -#if defined(BOOST_ATOMIC_INT_LOCK_FREE) && BOOST_ATOMIC_INT_LOCK_FREE > 0 - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef T difference_type; - -protected: - typedef value_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {} - - void store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - __atomic_store_n(&v_, v, atomics::detail::convert_memory_order_to_gcc(order)); - } - - value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - return __atomic_load_n(&v_, atomics::detail::convert_memory_order_to_gcc(order)); - } - - value_type fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - return __atomic_fetch_add(&v_, v, atomics::detail::convert_memory_order_to_gcc(order)); - } - - value_type fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - return __atomic_fetch_sub(&v_, v, atomics::detail::convert_memory_order_to_gcc(order)); - } - - value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - return __atomic_exchange_n(&v_, v, atomics::detail::convert_memory_order_to_gcc(order)); - } - - bool compare_exchange_strong( - value_type& expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return __atomic_compare_exchange_n(&v_, &expected, desired, false, - atomics::detail::convert_memory_order_to_gcc(success_order), - atomics::detail::convert_memory_order_to_gcc(failure_order)); - } - - bool compare_exchange_weak( - value_type& expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return __atomic_compare_exchange_n(&v_, &expected, desired, true, - atomics::detail::convert_memory_order_to_gcc(success_order), - atomics::detail::convert_memory_order_to_gcc(failure_order)); - } - - value_type fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - return __atomic_fetch_and(&v_, v, atomics::detail::convert_memory_order_to_gcc(order)); - } - - value_type fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - return __atomic_fetch_or(&v_, v, atomics::detail::convert_memory_order_to_gcc(order)); - } - - value_type fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - return __atomic_fetch_xor(&v_, v, atomics::detail::convert_memory_order_to_gcc(order)); - } - - bool is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return __atomic_is_lock_free(sizeof(v_), &v_); - } - - BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - value_type v_; -}; - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef uint32_t storage_type; - -protected: - typedef value_type const& value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : v_(0) - { - memcpy(&v_, &v, sizeof(value_type)); - } - - void store(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - storage_type tmp = 0; - memcpy(&tmp, &v, sizeof(value_type)); - __atomic_store_n(&v_, tmp, atomics::detail::convert_memory_order_to_gcc(order)); - } - - value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - storage_type tmp = __atomic_load_n(&v_, atomics::detail::convert_memory_order_to_gcc(order)); - value_type v; - memcpy(&v, &tmp, sizeof(value_type)); - return v; - } - - value_type exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - storage_type tmp = 0; - memcpy(&tmp, &v, sizeof(value_type)); - tmp = __atomic_exchange_n(&v_, tmp, atomics::detail::convert_memory_order_to_gcc(order)); - value_type res; - memcpy(&res, &tmp, sizeof(value_type)); - return res; - } - - bool compare_exchange_strong( - value_type& expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - storage_type expected_s = 0, desired_s = 0; - memcpy(&expected_s, &expected, sizeof(value_type)); - memcpy(&desired_s, &desired, sizeof(value_type)); - const bool success = __atomic_compare_exchange_n(&v_, &expected_s, desired_s, false, - atomics::detail::convert_memory_order_to_gcc(success_order), - atomics::detail::convert_memory_order_to_gcc(failure_order)); - memcpy(&expected, &expected_s, sizeof(value_type)); - return success; - } - - bool compare_exchange_weak( - value_type& expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - storage_type expected_s = 0, desired_s = 0; - memcpy(&expected_s, &expected, sizeof(value_type)); - memcpy(&desired_s, &desired, sizeof(value_type)); - const bool success = __atomic_compare_exchange_n(&v_, &expected_s, desired_s, true, - atomics::detail::convert_memory_order_to_gcc(success_order), - atomics::detail::convert_memory_order_to_gcc(failure_order)); - memcpy(&expected, &expected_s, sizeof(value_type)); - return success; - } - - bool is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return __atomic_is_lock_free(sizeof(v_), &v_); - } - - BOOST_ATOMIC_DECLARE_BASE_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - storage_type v_; -}; - -#endif // defined(BOOST_ATOMIC_INT_LOCK_FREE) && BOOST_ATOMIC_INT_LOCK_FREE > 0 - -#if defined(BOOST_ATOMIC_LLONG_LOCK_FREE) && BOOST_ATOMIC_LLONG_LOCK_FREE > 0 - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef T difference_type; - -protected: - typedef value_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {} - - void store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - __atomic_store_n(&v_, v, atomics::detail::convert_memory_order_to_gcc(order)); - } - - value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - return __atomic_load_n(&v_, atomics::detail::convert_memory_order_to_gcc(order)); - } - - value_type fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - return __atomic_fetch_add(&v_, v, atomics::detail::convert_memory_order_to_gcc(order)); - } - - value_type fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - return __atomic_fetch_sub(&v_, v, atomics::detail::convert_memory_order_to_gcc(order)); - } - - value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - return __atomic_exchange_n(&v_, v, atomics::detail::convert_memory_order_to_gcc(order)); - } - - bool compare_exchange_strong( - value_type& expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return __atomic_compare_exchange_n(&v_, &expected, desired, false, - atomics::detail::convert_memory_order_to_gcc(success_order), - atomics::detail::convert_memory_order_to_gcc(failure_order)); - } - - bool compare_exchange_weak( - value_type& expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return __atomic_compare_exchange_n(&v_, &expected, desired, true, - atomics::detail::convert_memory_order_to_gcc(success_order), - atomics::detail::convert_memory_order_to_gcc(failure_order)); - } - - value_type fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - return __atomic_fetch_and(&v_, v, atomics::detail::convert_memory_order_to_gcc(order)); - } - - value_type fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - return __atomic_fetch_or(&v_, v, atomics::detail::convert_memory_order_to_gcc(order)); - } - - value_type fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - return __atomic_fetch_xor(&v_, v, atomics::detail::convert_memory_order_to_gcc(order)); - } - - bool is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return __atomic_is_lock_free(sizeof(v_), &v_); - } - - BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - value_type v_; -}; - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef uint64_t storage_type; - -protected: - typedef value_type const& value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : v_(0) - { - memcpy(&v_, &v, sizeof(value_type)); - } - - void store(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - storage_type tmp = 0; - memcpy(&tmp, &v, sizeof(value_type)); - __atomic_store_n(&v_, tmp, atomics::detail::convert_memory_order_to_gcc(order)); - } - - value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - storage_type tmp = __atomic_load_n(&v_, atomics::detail::convert_memory_order_to_gcc(order)); - value_type v; - memcpy(&v, &tmp, sizeof(value_type)); - return v; - } - - value_type exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - storage_type tmp = 0; - memcpy(&tmp, &v, sizeof(value_type)); - tmp = __atomic_exchange_n(&v_, tmp, atomics::detail::convert_memory_order_to_gcc(order)); - value_type res; - memcpy(&res, &tmp, sizeof(value_type)); - return res; - } - - bool compare_exchange_strong( - value_type& expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - storage_type expected_s = 0, desired_s = 0; - memcpy(&expected_s, &expected, sizeof(value_type)); - memcpy(&desired_s, &desired, sizeof(value_type)); - const bool success = __atomic_compare_exchange_n(&v_, &expected_s, desired_s, false, - atomics::detail::convert_memory_order_to_gcc(success_order), - atomics::detail::convert_memory_order_to_gcc(failure_order)); - memcpy(&expected, &expected_s, sizeof(value_type)); - return success; - } - - bool compare_exchange_weak( - value_type& expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - storage_type expected_s = 0, desired_s = 0; - memcpy(&expected_s, &expected, sizeof(value_type)); - memcpy(&desired_s, &desired, sizeof(value_type)); - const bool success = __atomic_compare_exchange_n(&v_, &expected_s, desired_s, true, - atomics::detail::convert_memory_order_to_gcc(success_order), - atomics::detail::convert_memory_order_to_gcc(failure_order)); - memcpy(&expected, &expected_s, sizeof(value_type)); - return success; - } - - bool is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return __atomic_is_lock_free(sizeof(v_), &v_); - } - - BOOST_ATOMIC_DECLARE_BASE_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - storage_type v_; -}; - -#endif // defined(BOOST_ATOMIC_LLONG_LOCK_FREE) && BOOST_ATOMIC_LLONG_LOCK_FREE > 0 - -#if defined(BOOST_ATOMIC_INT128_LOCK_FREE) && BOOST_ATOMIC_INT128_LOCK_FREE > 0 && !defined(BOOST_ATOMIC_X86_NO_GCC_128_BIT_ATOMIC_INTRINSICS) - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef T difference_type; - -protected: - typedef value_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {} - - void store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - __atomic_store_n(&v_, v, atomics::detail::convert_memory_order_to_gcc(order)); - } - - value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - return __atomic_load_n(&v_, atomics::detail::convert_memory_order_to_gcc(order)); - } - - value_type fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - return __atomic_fetch_add(&v_, v, atomics::detail::convert_memory_order_to_gcc(order)); - } - - value_type fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - return __atomic_fetch_sub(&v_, v, atomics::detail::convert_memory_order_to_gcc(order)); - } - - value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - return __atomic_exchange_n(&v_, v, atomics::detail::convert_memory_order_to_gcc(order)); - } - - bool compare_exchange_strong( - value_type& expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return __atomic_compare_exchange_n(&v_, &expected, desired, false, - atomics::detail::convert_memory_order_to_gcc(success_order), - atomics::detail::convert_memory_order_to_gcc(failure_order)); - } - - bool compare_exchange_weak( - value_type& expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return __atomic_compare_exchange_n(&v_, &expected, desired, true, - atomics::detail::convert_memory_order_to_gcc(success_order), - atomics::detail::convert_memory_order_to_gcc(failure_order)); - } - - value_type fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - return __atomic_fetch_and(&v_, v, atomics::detail::convert_memory_order_to_gcc(order)); - } - - value_type fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - return __atomic_fetch_or(&v_, v, atomics::detail::convert_memory_order_to_gcc(order)); - } - - value_type fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - return __atomic_fetch_xor(&v_, v, atomics::detail::convert_memory_order_to_gcc(order)); - } - - bool is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return __atomic_is_lock_free(sizeof(v_), &v_); - } - - BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - value_type v_; -}; - -#if defined(BOOST_HAS_INT128) - -typedef boost::uint128_type storage128_type; - -#else // defined(BOOST_HAS_INT128) - -struct BOOST_ALIGNMENT(16) storage128_type -{ - uint64_t data[2]; -}; - -inline bool operator== (storage128_type const& left, storage128_type const& right) -{ - return left.data[0] == right.data[0] && left.data[1] == right.data[1]; -} -inline bool operator!= (storage128_type const& left, storage128_type const& right) -{ - return !(left == right); -} - -#endif // defined(BOOST_HAS_INT128) - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef storage128_type storage_type; - -protected: - typedef value_type const& value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - explicit base_atomic(value_type const& v) BOOST_NOEXCEPT - { - memset(&v_, 0, sizeof(v_)); - memcpy(&v_, &v, sizeof(value_type)); - } - - void store(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - storage_type tmp; - memset(&tmp, 0, sizeof(tmp)); - memcpy(&tmp, &v, sizeof(value_type)); - __atomic_store_n(&v_, tmp, atomics::detail::convert_memory_order_to_gcc(order)); - } - - value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - storage_type tmp = __atomic_load_n(&v_, atomics::detail::convert_memory_order_to_gcc(order)); - value_type v; - memcpy(&v, &tmp, sizeof(value_type)); - return v; - } - - value_type exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - storage_type tmp; - memset(&tmp, 0, sizeof(tmp)); - memcpy(&tmp, &v, sizeof(value_type)); - tmp = __atomic_exchange_n(&v_, tmp, atomics::detail::convert_memory_order_to_gcc(order)); - value_type res; - memcpy(&res, &tmp, sizeof(value_type)); - return res; - } - - bool compare_exchange_strong( - value_type& expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - storage_type expected_s, desired_s; - memset(&expected_s, 0, sizeof(expected_s)); - memset(&desired_s, 0, sizeof(desired_s)); - memcpy(&expected_s, &expected, sizeof(value_type)); - memcpy(&desired_s, &desired, sizeof(value_type)); - const bool success = __atomic_compare_exchange_n(&v_, &expected_s, desired_s, false, - atomics::detail::convert_memory_order_to_gcc(success_order), - atomics::detail::convert_memory_order_to_gcc(failure_order)); - memcpy(&expected, &expected_s, sizeof(value_type)); - return success; - } - - bool compare_exchange_weak( - value_type& expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - storage_type expected_s, desired_s; - memset(&expected_s, 0, sizeof(expected_s)); - memset(&desired_s, 0, sizeof(desired_s)); - memcpy(&expected_s, &expected, sizeof(value_type)); - memcpy(&desired_s, &desired, sizeof(value_type)); - const bool success = __atomic_compare_exchange_n(&v_, &expected_s, desired_s, true, - atomics::detail::convert_memory_order_to_gcc(success_order), - atomics::detail::convert_memory_order_to_gcc(failure_order)); - memcpy(&expected, &expected_s, sizeof(value_type)); - return success; - } - - bool is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return __atomic_is_lock_free(sizeof(v_), &v_); - } - - BOOST_ATOMIC_DECLARE_BASE_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - storage_type v_; -}; - -#endif // defined(BOOST_ATOMIC_INT128_LOCK_FREE) && BOOST_ATOMIC_INT128_LOCK_FREE > 0 && !defined(BOOST_ATOMIC_X86_NO_GCC_128_BIT_ATOMIC_INTRINSICS) - - -/* pointers */ - -#if defined(BOOST_ATOMIC_POINTER_LOCK_FREE) && BOOST_ATOMIC_POINTER_LOCK_FREE > 0 - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T* value_type; - typedef std::ptrdiff_t difference_type; - -protected: - typedef value_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {} - - void store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - __atomic_store_n(&v_, v, atomics::detail::convert_memory_order_to_gcc(order)); - } - - value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - return __atomic_load_n(&v_, atomics::detail::convert_memory_order_to_gcc(order)); - } - - value_type fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - return __atomic_fetch_add(&v_, v * sizeof(T), atomics::detail::convert_memory_order_to_gcc(order)); - } - - value_type fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - return __atomic_fetch_sub(&v_, v * sizeof(T), atomics::detail::convert_memory_order_to_gcc(order)); - } - - value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - return __atomic_exchange_n(&v_, v, atomics::detail::convert_memory_order_to_gcc(order)); - } - - bool compare_exchange_strong( - value_type& expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return __atomic_compare_exchange_n(&v_, &expected, desired, false, - atomics::detail::convert_memory_order_to_gcc(success_order), - atomics::detail::convert_memory_order_to_gcc(failure_order)); - } - - bool compare_exchange_weak( - value_type& expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return __atomic_compare_exchange_n(&v_, &expected, desired, true, - atomics::detail::convert_memory_order_to_gcc(success_order), - atomics::detail::convert_memory_order_to_gcc(failure_order)); - } - - bool is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return __atomic_is_lock_free(sizeof(v_), &v_); - } - - BOOST_ATOMIC_DECLARE_POINTER_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - value_type v_; -}; - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef void* value_type; - typedef std::ptrdiff_t difference_type; - -protected: - typedef value_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {} - - void store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - __atomic_store_n(&v_, v, atomics::detail::convert_memory_order_to_gcc(order)); - } - - value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - return __atomic_load_n(&v_, atomics::detail::convert_memory_order_to_gcc(order)); - } - - value_type fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - return __atomic_fetch_add(&v_, v, atomics::detail::convert_memory_order_to_gcc(order)); - } - - value_type fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - return __atomic_fetch_sub(&v_, v, atomics::detail::convert_memory_order_to_gcc(order)); - } - - value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - return __atomic_exchange_n(&v_, v, atomics::detail::convert_memory_order_to_gcc(order)); - } - - bool compare_exchange_strong( - value_type& expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return __atomic_compare_exchange_n(&v_, &expected, desired, false, - atomics::detail::convert_memory_order_to_gcc(success_order), - atomics::detail::convert_memory_order_to_gcc(failure_order)); - } - - bool compare_exchange_weak( - value_type& expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return __atomic_compare_exchange_n(&v_, &expected, desired, true, - atomics::detail::convert_memory_order_to_gcc(success_order), - atomics::detail::convert_memory_order_to_gcc(failure_order)); - } - - bool is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return __atomic_is_lock_free(sizeof(v_), &v_); - } - - BOOST_ATOMIC_DECLARE_VOID_POINTER_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - value_type v_; -}; - -#endif // defined(BOOST_ATOMIC_POINTER_LOCK_FREE) && BOOST_ATOMIC_POINTER_LOCK_FREE > 0 - -#if defined(BOOST_ATOMIC_INT128_LOCK_FREE) && BOOST_ATOMIC_INT128_LOCK_FREE > 0 && defined(BOOST_ATOMIC_X86_NO_GCC_128_BIT_ATOMIC_INTRINSICS) - -inline void platform_fence_before(memory_order order) -{ - switch(order) - { - case memory_order_relaxed: - case memory_order_acquire: - case memory_order_consume: - break; - case memory_order_release: - case memory_order_acq_rel: - __asm__ __volatile__ ("" ::: "memory"); - /* release */ - break; - case memory_order_seq_cst: - __asm__ __volatile__ ("" ::: "memory"); - /* seq */ - break; - default:; - } -} - -inline void platform_fence_after(memory_order order) -{ - switch(order) - { - case memory_order_relaxed: - case memory_order_release: - break; - case memory_order_acquire: - case memory_order_acq_rel: - __asm__ __volatile__ ("" ::: "memory"); - /* acquire */ - break; - case memory_order_consume: - /* consume */ - break; - case memory_order_seq_cst: - __asm__ __volatile__ ("" ::: "memory"); - /* seq */ - break; - default:; - } -} - -inline void platform_fence_after_load(memory_order order) -{ - switch(order) - { - case memory_order_relaxed: - case memory_order_release: - break; - case memory_order_acquire: - case memory_order_acq_rel: - __asm__ __volatile__ ("" ::: "memory"); - break; - case memory_order_consume: - break; - case memory_order_seq_cst: - __asm__ __volatile__ ("" ::: "memory"); - break; - default:; - } -} - -inline void platform_fence_before_store(memory_order order) -{ - switch(order) - { - case memory_order_relaxed: - case memory_order_acquire: - case memory_order_consume: - break; - case memory_order_release: - case memory_order_acq_rel: - __asm__ __volatile__ ("" ::: "memory"); - /* release */ - break; - case memory_order_seq_cst: - __asm__ __volatile__ ("" ::: "memory"); - /* seq */ - break; - default:; - } -} - -inline void platform_fence_after_store(memory_order order) -{ - switch(order) - { - case memory_order_relaxed: - case memory_order_release: - break; - case memory_order_acquire: - case memory_order_acq_rel: - __asm__ __volatile__ ("" ::: "memory"); - /* acquire */ - break; - case memory_order_consume: - /* consume */ - break; - case memory_order_seq_cst: - __asm__ __volatile__ ("" ::: "memory"); - /* seq */ - break; - default:; - } -} - -template -inline bool platform_cmpxchg128_strong(T& expected, T desired, volatile T* ptr) BOOST_NOEXCEPT -{ - T old_expected = expected; - expected = __sync_val_compare_and_swap(ptr, old_expected, desired); - return expected == old_expected; -} - -template -inline void platform_store128(T value, volatile T* ptr) BOOST_NOEXCEPT -{ - uint64_t const* p_value = (uint64_t const*)&value; - __asm__ __volatile__ - ( - "movq 0(%[dest]), %%rax\n\t" - "movq 8(%[dest]), %%rdx\n\t" - ".align 16\n\t" - "1: lock; cmpxchg16b 0(%[dest])\n\t" - "jne 1b" - : - : "b" (p_value[0]), "c" (p_value[1]), [dest] "r" (ptr) - : "memory", "cc", "rax", "rdx" - ); -} - -template -inline T platform_load128(const volatile T* ptr) BOOST_NOEXCEPT -{ - T value = T(); - return __sync_val_compare_and_swap(ptr, value, value); -} - -#endif // defined(BOOST_ATOMIC_INT128_LOCK_FREE) && BOOST_ATOMIC_INT128_LOCK_FREE > 0 && defined(BOOST_ATOMIC_X86_NO_GCC_128_BIT_ATOMIC_INTRINSICS) - -} // namespace detail -} // namespace atomics -} // namespace boost - -#if defined(BOOST_ATOMIC_INT128_LOCK_FREE) && BOOST_ATOMIC_INT128_LOCK_FREE > 0 && defined(BOOST_ATOMIC_X86_NO_GCC_128_BIT_ATOMIC_INTRINSICS) -#undef BOOST_ATOMIC_X86_NO_GCC_128_BIT_ATOMIC_INTRINSICS -#include -#endif // defined(BOOST_ATOMIC_INT128_LOCK_FREE) && BOOST_ATOMIC_INT128_LOCK_FREE > 0 && defined(BOOST_ATOMIC_X86_NO_GCC_128_BIT_ATOMIC_INTRINSICS) - -#endif // !defined(BOOST_ATOMIC_FORCE_FALLBACK) - -#endif // BOOST_ATOMIC_DETAIL_GCC_ATOMIC_HPP diff --git a/include/boost/atomic/detail/gcc-cas.hpp b/include/boost/atomic/detail/gcc-cas.hpp deleted file mode 100644 index da85fa8..0000000 --- a/include/boost/atomic/detail/gcc-cas.hpp +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright (c) 2011 Helge Bahmann -// Copyright (c) 2013 Tim Blechmann -// -// Distributed under the Boost Software License, Version 1.0. -// See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -// Use the gnu builtin __sync_val_compare_and_swap to build -// atomic operations for 32 bit and smaller. - -#ifndef BOOST_ATOMIC_DETAIL_GENERIC_CAS_HPP -#define BOOST_ATOMIC_DETAIL_GENERIC_CAS_HPP - -#include -#include - -#ifdef BOOST_HAS_PRAGMA_ONCE -#pragma once -#endif - -namespace boost { - -#define BOOST_ATOMIC_THREAD_FENCE 2 -inline void -atomic_thread_fence(memory_order order) -{ - switch(order) - { - case memory_order_relaxed: - break; - case memory_order_release: - case memory_order_consume: - case memory_order_acquire: - case memory_order_acq_rel: - case memory_order_seq_cst: - __sync_synchronize(); - break; - } -} - -namespace atomics { -namespace detail { - -inline void -platform_fence_before(memory_order) -{ - /* empty, as compare_and_swap is synchronizing already */ -} - -inline void -platform_fence_after(memory_order) -{ - /* empty, as compare_and_swap is synchronizing already */ -} - -inline void -platform_fence_before_store(memory_order order) -{ - switch(order) - { - case memory_order_relaxed: - case memory_order_acquire: - case memory_order_consume: - break; - case memory_order_release: - case memory_order_acq_rel: - case memory_order_seq_cst: - __sync_synchronize(); - break; - } -} - -inline void -platform_fence_after_store(memory_order order) -{ - if (order == memory_order_seq_cst) - __sync_synchronize(); -} - -inline void -platform_fence_after_load(memory_order order) -{ - switch(order) - { - case memory_order_relaxed: - case memory_order_release: - break; - case memory_order_consume: - case memory_order_acquire: - case memory_order_acq_rel: - case memory_order_seq_cst: - __sync_synchronize(); - break; - } -} - -template -inline bool -platform_cmpxchg32_strong(T & expected, T desired, volatile T * ptr) -{ - T found = __sync_val_compare_and_swap(ptr, expected, desired); - bool success = (found == expected); - expected = found; - return success; -} - -} -} - -class atomic_flag -{ -private: - uint32_t v_; - -public: - BOOST_CONSTEXPR atomic_flag(void) BOOST_NOEXCEPT : v_(0) {} - - void - clear(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - atomics::detail::platform_fence_before_store(order); - const_cast(v_) = 0; - atomics::detail::platform_fence_after_store(order); - } - - bool - test_and_set(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - atomics::detail::platform_fence_before(order); - uint32_t expected = v_; - do { - if (expected == 1) - break; - } while (!atomics::detail::platform_cmpxchg32_strong(expected, (uint32_t)1, &v_)); - atomics::detail::platform_fence_after(order); - return expected; - } - - BOOST_DELETED_FUNCTION(atomic_flag(atomic_flag const&)) - BOOST_DELETED_FUNCTION(atomic_flag& operator= (atomic_flag const&)) -}; - -#define BOOST_ATOMIC_FLAG_LOCK_FREE 2 - -} - -#include - -#if !defined(BOOST_ATOMIC_FORCE_FALLBACK) - -#define BOOST_ATOMIC_CHAR_LOCK_FREE 2 -#define BOOST_ATOMIC_SHORT_LOCK_FREE 2 -#define BOOST_ATOMIC_INT_LOCK_FREE 2 -#define BOOST_ATOMIC_LONG_LOCK_FREE (__SIZEOF_LONG__ <= 4 ? 2 : 0) -#define BOOST_ATOMIC_LLONG_LOCK_FREE (__SIZEOF_LONG_LONG__ <= 4 ? 2 : 0) -#define BOOST_ATOMIC_POINTER_LOCK_FREE (__SIZEOF_POINTER__ <= 4 ? 2 : 0) -#define BOOST_ATOMIC_BOOL_LOCK_FREE 2 - -#include - -#endif /* !defined(BOOST_ATOMIC_FORCE_FALLBACK) */ - -#endif diff --git a/include/boost/atomic/detail/gcc-ppc.hpp b/include/boost/atomic/detail/gcc-ppc.hpp deleted file mode 100644 index ce75c20..0000000 --- a/include/boost/atomic/detail/gcc-ppc.hpp +++ /dev/null @@ -1,3046 +0,0 @@ -#ifndef BOOST_ATOMIC_DETAIL_GCC_PPC_HPP -#define BOOST_ATOMIC_DETAIL_GCC_PPC_HPP - -// Copyright (c) 2009 Helge Bahmann -// Copyright (c) 2013 Tim Blechmann -// -// Distributed under the Boost Software License, Version 1.0. -// See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -#include -#include -#include -#include - -#ifdef BOOST_HAS_PRAGMA_ONCE -#pragma once -#endif - -/* - Refer to: Motorola: "Programming Environments Manual for 32-Bit - Implementations of the PowerPC Architecture", Appendix E: - "Synchronization Programming Examples" for an explanation of what is - going on here (can be found on the web at various places by the - name "MPCFPE32B.pdf", Google is your friend...) - - Most of the atomic operations map to instructions in a relatively - straight-forward fashion, but "load"s may at first glance appear - a bit strange as they map to: - - lwz %rX, addr - cmpw %rX, %rX - bne- 1f - 1: - - That is, the CPU is forced to perform a branch that "formally" depends - on the value retrieved from memory. This scheme has an overhead of - about 1-2 clock cycles per load, but it allows to map "acquire" to - the "isync" instruction instead of "sync" uniformly and for all type - of atomic operations. Since "isync" has a cost of about 15 clock - cycles, while "sync" hast a cost of about 50 clock cycles, the small - penalty to atomic loads more than compensates for this. - - Byte- and halfword-sized atomic values are realized by encoding the - value to be represented into a word, performing sign/zero extension - as appropriate. This means that after add/sub operations the value - needs fixing up to accurately preserve the wrap-around semantic of - the smaller type. (Nothing special needs to be done for the bit-wise - and the "exchange type" operators as the compiler already sees to - it that values carried in registers are extended appropriately and - everything falls into place naturally). - - The register constraint "b" instructs gcc to use any register - except r0; this is sometimes required because the encoding for - r0 is used to signify "constant zero" in a number of instructions, - making r0 unusable in this place. For simplicity this constraint - is used everywhere since I am to lazy to look this up on a - per-instruction basis, and ppc has enough registers for this not - to pose a problem. -*/ - -namespace boost { -namespace atomics { -namespace detail { - -inline void -ppc_fence_before(memory_order order) -{ - switch(order) - { - case memory_order_release: - case memory_order_acq_rel: -#if defined(__powerpc64__) - __asm__ __volatile__ ("lwsync" ::: "memory"); - break; -#endif - case memory_order_seq_cst: - __asm__ __volatile__ ("sync" ::: "memory"); - default:; - } -} - -inline void -ppc_fence_after(memory_order order) -{ - switch(order) - { - case memory_order_acquire: - case memory_order_acq_rel: - case memory_order_seq_cst: - __asm__ __volatile__ ("isync"); - case memory_order_consume: - __asm__ __volatile__ ("" ::: "memory"); - default:; - } -} - -inline void -ppc_fence_after_store(memory_order order) -{ - switch(order) - { - case memory_order_seq_cst: - __asm__ __volatile__ ("sync"); - default:; - } -} - -} -} - -class atomic_flag -{ -private: - uint32_t v_; - -public: - BOOST_CONSTEXPR atomic_flag(void) BOOST_NOEXCEPT : v_(0) {} - - void - clear(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - atomics::detail::ppc_fence_before(order); - const_cast(v_) = 0; - atomics::detail::ppc_fence_after_store(order); - } - - bool - test_and_set(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - uint32_t original; - atomics::detail::ppc_fence_before(order); - __asm__ __volatile__ - ( - "1:\n" - "lwarx %0,%y1\n" - "stwcx. %2,%y1\n" - "bne- 1b\n" - : "=&b" (original), "+Z"(v_) - : "b" (1) - : "cr0" - ); - atomics::detail::ppc_fence_after(order); - return original; - } - - BOOST_DELETED_FUNCTION(atomic_flag(atomic_flag const&)) - BOOST_DELETED_FUNCTION(atomic_flag& operator= (atomic_flag const&)) -}; - -} /* namespace boost */ - -#define BOOST_ATOMIC_FLAG_LOCK_FREE 2 - -#include - -#if !defined(BOOST_ATOMIC_FORCE_FALLBACK) - -#define BOOST_ATOMIC_CHAR_LOCK_FREE 2 -#define BOOST_ATOMIC_CHAR16_T_LOCK_FREE 2 -#define BOOST_ATOMIC_CHAR32_T_LOCK_FREE 2 -#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 2 -#define BOOST_ATOMIC_SHORT_LOCK_FREE 2 -#define BOOST_ATOMIC_INT_LOCK_FREE 2 -#define BOOST_ATOMIC_LONG_LOCK_FREE 2 -#define BOOST_ATOMIC_POINTER_LOCK_FREE 2 -#if defined(__powerpc64__) -#define BOOST_ATOMIC_LLONG_LOCK_FREE 2 -#else -#define BOOST_ATOMIC_LLONG_LOCK_FREE 0 -#endif -#define BOOST_ATOMIC_BOOL_LOCK_FREE 2 - -namespace boost { -namespace atomics { -namespace detail { - -/* integral types */ - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef int32_t storage_type; - typedef T difference_type; - -protected: - typedef value_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {} - - void - store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - ppc_fence_before(order); - __asm__ __volatile__ - ( - "stw %1, %0\n" - : "+m"(v_) - : "r" (v) - ); - ppc_fence_after_store(order); - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - value_type v; - __asm__ __volatile__ - ( - "lwz %0, %1\n" - "cmpw %0, %0\n" - "bne- 1f\n" - "1:\n" - : "=&r" (v) - : "m" (v_) - ); - ppc_fence_after(order); - return v; - } - - value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original; - ppc_fence_before(order); - __asm__ __volatile__ - ( - "1:\n" - "lwarx %0,%y1\n" - "stwcx. %2,%y1\n" - "bne- 1b\n" - : "=&b" (original), "+Z"(v_) - : "b" (v) - : "cr0" - ); - ppc_fence_after(order); - return original; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - int success; - ppc_fence_before(success_order); - __asm__ __volatile__ - ( - "li %1, 0\n" - "lwarx %0,%y2\n" - "cmpw %0, %3\n" - "bne- 1f\n" - "stwcx. %4,%y2\n" - "bne- 1f\n" - "li %1, 1\n" - "1:" - : "=&b" (expected), "=&b" (success), "+Z"(v_) - : "b" (expected), "b" (desired) - : "cr0" - ); - if (success) - ppc_fence_after(success_order); - else - ppc_fence_after(failure_order); - return success; - } - - bool - compare_exchange_strong( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - int success; - ppc_fence_before(success_order); - __asm__ __volatile__ - ( - "li %1, 0\n" - "0: lwarx %0,%y2\n" - "cmpw %0, %3\n" - "bne- 1f\n" - "stwcx. %4,%y2\n" - "bne- 0b\n" - "li %1, 1\n" - "1:" - : "=&b" (expected), "=&b" (success), "+Z"(v_) - : "b" (expected), "b" (desired) - : "cr0" - ); - if (success) - ppc_fence_after(success_order); - else - ppc_fence_after(failure_order); - return success; - } - - value_type - fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original, tmp; - ppc_fence_before(order); - __asm__ __volatile__ - ( - "1:\n" - "lwarx %0,%y2\n" - "add %1,%0,%3\n" - "extsb %1, %1\n" - "stwcx. %1,%y2\n" - "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(v_) - : "b" (v) - : "cc" - ); - ppc_fence_after(order); - return original; - } - - value_type - fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original, tmp; - ppc_fence_before(order); - __asm__ __volatile__ - ( - "1:\n" - "lwarx %0,%y2\n" - "sub %1,%0,%3\n" - "extsb %1, %1\n" - "stwcx. %1,%y2\n" - "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(v_) - : "b" (v) - : "cc" - ); - ppc_fence_after(order); - return original; - } - - value_type - fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original, tmp; - ppc_fence_before(order); - __asm__ __volatile__ - ( - "1:\n" - "lwarx %0,%y2\n" - "and %1,%0,%3\n" - "stwcx. %1,%y2\n" - "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(v_) - : "b" (v) - : "cc" - ); - ppc_fence_after(order); - return original; - } - - value_type - fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original, tmp; - ppc_fence_before(order); - __asm__ __volatile__ - ( - "1:\n" - "lwarx %0,%y2\n" - "or %1,%0,%3\n" - "stwcx. %1,%y2\n" - "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(v_) - : "b" (v) - : "cc" - ); - ppc_fence_after(order); - return original; - } - - value_type - fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original, tmp; - ppc_fence_before(order); - __asm__ __volatile__ - ( - "1:\n" - "lwarx %0,%y2\n" - "xor %1,%0,%3\n" - "stwcx. %1,%y2\n" - "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(v_) - : "b" (v) - : "cc" - ); - ppc_fence_after(order); - return original; - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - storage_type v_; -}; - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef uint32_t storage_type; - typedef T difference_type; - -protected: - typedef value_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {} - - void - store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - ppc_fence_before(order); - __asm__ __volatile__ - ( - "stw %1, %0\n" - : "+m"(v_) - : "r" (v) - ); - ppc_fence_after_store(order); - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - value_type v; - __asm__ __volatile__ - ( - "lwz %0, %1\n" - "cmpw %0, %0\n" - "bne- 1f\n" - "1:\n" - : "=&r" (v) - : "m" (v_) - ); - ppc_fence_after(order); - return v; - } - - value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original; - ppc_fence_before(order); - __asm__ __volatile__ - ( - "1:\n" - "lwarx %0,%y1\n" - "stwcx. %2,%y1\n" - "bne- 1b\n" - : "=&b" (original), "+Z"(v_) - : "b" (v) - : "cr0" - ); - ppc_fence_after(order); - return original; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - int success; - ppc_fence_before(success_order); - __asm__ __volatile__ - ( - "li %1, 0\n" - "lwarx %0,%y2\n" - "cmpw %0, %3\n" - "bne- 1f\n" - "stwcx. %4,%y2\n" - "bne- 1f\n" - "li %1, 1\n" - "1:" - : "=&b" (expected), "=&b" (success), "+Z"(v_) - : "b" (expected), "b" (desired) - : "cr0" - ); - if (success) - ppc_fence_after(success_order); - else - ppc_fence_after(failure_order); - return success; - } - - bool - compare_exchange_strong( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - int success; - ppc_fence_before(success_order); - __asm__ __volatile__ - ( - "li %1, 0\n" - "0: lwarx %0,%y2\n" - "cmpw %0, %3\n" - "bne- 1f\n" - "stwcx. %4,%y2\n" - "bne- 0b\n" - "li %1, 1\n" - "1:" - : "=&b" (expected), "=&b" (success), "+Z"(v_) - : "b" (expected), "b" (desired) - : "cr0" - ); - if (success) - ppc_fence_after(success_order); - else - ppc_fence_after(failure_order); - return success; - } - - value_type - fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original, tmp; - ppc_fence_before(order); - __asm__ __volatile__ - ( - "1:\n" - "lwarx %0,%y2\n" - "add %1,%0,%3\n" - "rlwinm %1, %1, 0, 0xff\n" - "stwcx. %1,%y2\n" - "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(v_) - : "b" (v) - : "cc" - ); - ppc_fence_after(order); - return original; - } - - value_type - fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original, tmp; - ppc_fence_before(order); - __asm__ __volatile__ - ( - "1:\n" - "lwarx %0,%y2\n" - "sub %1,%0,%3\n" - "rlwinm %1, %1, 0, 0xff\n" - "stwcx. %1,%y2\n" - "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(v_) - : "b" (v) - : "cc" - ); - ppc_fence_after(order); - return original; - } - - value_type - fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original, tmp; - ppc_fence_before(order); - __asm__ __volatile__ - ( - "1:\n" - "lwarx %0,%y2\n" - "and %1,%0,%3\n" - "stwcx. %1,%y2\n" - "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(v_) - : "b" (v) - : "cc" - ); - ppc_fence_after(order); - return original; - } - - value_type - fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original, tmp; - ppc_fence_before(order); - __asm__ __volatile__ - ( - "1:\n" - "lwarx %0,%y2\n" - "or %1,%0,%3\n" - "stwcx. %1,%y2\n" - "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(v_) - : "b" (v) - : "cc" - ); - ppc_fence_after(order); - return original; - } - - value_type - fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original, tmp; - ppc_fence_before(order); - __asm__ __volatile__ - ( - "1:\n" - "lwarx %0,%y2\n" - "xor %1,%0,%3\n" - "stwcx. %1,%y2\n" - "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(v_) - : "b" (v) - : "cc" - ); - ppc_fence_after(order); - return original; - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - storage_type v_; -}; - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef int32_t storage_type; - typedef T difference_type; - -protected: - typedef value_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {} - - void - store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - ppc_fence_before(order); - __asm__ __volatile__ - ( - "stw %1, %0\n" - : "+m"(v_) - : "r" (v) - ); - ppc_fence_after_store(order); - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - value_type v; - __asm__ __volatile__ - ( - "lwz %0, %1\n" - "cmpw %0, %0\n" - "bne- 1f\n" - "1:\n" - : "=&r" (v) - : "m" (v_) - ); - ppc_fence_after(order); - return v; - } - - value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original; - ppc_fence_before(order); - __asm__ __volatile__ - ( - "1:\n" - "lwarx %0,%y1\n" - "stwcx. %2,%y1\n" - "bne- 1b\n" - : "=&b" (original), "+Z"(v_) - : "b" (v) - : "cr0" - ); - ppc_fence_after(order); - return original; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - int success; - ppc_fence_before(success_order); - __asm__ __volatile__ - ( - "li %1, 0\n" - "lwarx %0,%y2\n" - "cmpw %0, %3\n" - "bne- 1f\n" - "stwcx. %4,%y2\n" - "bne- 1f\n" - "li %1, 1\n" - "1:" - : "=&b" (expected), "=&b" (success), "+Z"(v_) - : "b" (expected), "b" (desired) - : "cr0" - ); - if (success) - ppc_fence_after(success_order); - else - ppc_fence_after(failure_order); - return success; - } - - bool - compare_exchange_strong( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - int success; - ppc_fence_before(success_order); - __asm__ __volatile__ - ( - "li %1, 0\n" - "0: lwarx %0,%y2\n" - "cmpw %0, %3\n" - "bne- 1f\n" - "stwcx. %4,%y2\n" - "bne- 0b\n" - "li %1, 1\n" - "1:" - : "=&b" (expected), "=&b" (success), "+Z"(v_) - : "b" (expected), "b" (desired) - : "cr0" - ); - if (success) - ppc_fence_after(success_order); - else - ppc_fence_after(failure_order); - return success; - } - - value_type - fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original, tmp; - ppc_fence_before(order); - __asm__ __volatile__ - ( - "1:\n" - "lwarx %0,%y2\n" - "add %1,%0,%3\n" - "extsh %1, %1\n" - "stwcx. %1,%y2\n" - "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(v_) - : "b" (v) - : "cc" - ); - ppc_fence_after(order); - return original; - } - - value_type - fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original, tmp; - ppc_fence_before(order); - __asm__ __volatile__ - ( - "1:\n" - "lwarx %0,%y2\n" - "sub %1,%0,%3\n" - "extsh %1, %1\n" - "stwcx. %1,%y2\n" - "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(v_) - : "b" (v) - : "cc" - ); - ppc_fence_after(order); - return original; - } - - value_type - fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original, tmp; - ppc_fence_before(order); - __asm__ __volatile__ - ( - "1:\n" - "lwarx %0,%y2\n" - "and %1,%0,%3\n" - "stwcx. %1,%y2\n" - "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(v_) - : "b" (v) - : "cc" - ); - ppc_fence_after(order); - return original; - } - - value_type - fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original, tmp; - ppc_fence_before(order); - __asm__ __volatile__ - ( - "1:\n" - "lwarx %0,%y2\n" - "or %1,%0,%3\n" - "stwcx. %1,%y2\n" - "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(v_) - : "b" (v) - : "cc" - ); - ppc_fence_after(order); - return original; - } - - value_type - fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original, tmp; - ppc_fence_before(order); - __asm__ __volatile__ - ( - "1:\n" - "lwarx %0,%y2\n" - "xor %1,%0,%3\n" - "stwcx. %1,%y2\n" - "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(v_) - : "b" (v) - : "cc" - ); - ppc_fence_after(order); - return original; - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - storage_type v_; -}; - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef uint32_t storage_type; - typedef T difference_type; - -protected: - typedef value_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {} - - void - store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - ppc_fence_before(order); - __asm__ __volatile__ - ( - "stw %1, %0\n" - : "+m"(v_) - : "r" (v) - ); - ppc_fence_after_store(order); - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - value_type v; - __asm__ __volatile__ - ( - "lwz %0, %1\n" - "cmpw %0, %0\n" - "bne- 1f\n" - "1:\n" - : "=&r" (v) - : "m" (v_) - ); - ppc_fence_after(order); - return v; - } - - value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original; - ppc_fence_before(order); - __asm__ __volatile__ - ( - "1:\n" - "lwarx %0,%y1\n" - "stwcx. %2,%y1\n" - "bne- 1b\n" - : "=&b" (original), "+Z"(v_) - : "b" (v) - : "cr0" - ); - ppc_fence_after(order); - return original; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - int success; - ppc_fence_before(success_order); - __asm__ __volatile__ - ( - "li %1, 0\n" - "lwarx %0,%y2\n" - "cmpw %0, %3\n" - "bne- 1f\n" - "stwcx. %4,%y2\n" - "bne- 1f\n" - "li %1, 1\n" - "1:" - : "=&b" (expected), "=&b" (success), "+Z"(v_) - : "b" (expected), "b" (desired) - : "cr0" - ); - if (success) - ppc_fence_after(success_order); - else - ppc_fence_after(failure_order); - return success; - } - - bool - compare_exchange_strong( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - int success; - ppc_fence_before(success_order); - __asm__ __volatile__ - ( - "li %1, 0\n" - "0: lwarx %0,%y2\n" - "cmpw %0, %3\n" - "bne- 1f\n" - "stwcx. %4,%y2\n" - "bne- 0b\n" - "li %1, 1\n" - "1:" - : "=&b" (expected), "=&b" (success), "+Z"(v_) - : "b" (expected), "b" (desired) - : "cr0" - ); - if (success) - ppc_fence_after(success_order); - else - ppc_fence_after(failure_order); - return success; - } - - value_type - fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original, tmp; - ppc_fence_before(order); - __asm__ __volatile__ - ( - "1:\n" - "lwarx %0,%y2\n" - "add %1,%0,%3\n" - "rlwinm %1, %1, 0, 0xffff\n" - "stwcx. %1,%y2\n" - "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(v_) - : "b" (v) - : "cc" - ); - ppc_fence_after(order); - return original; - } - - value_type - fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original, tmp; - ppc_fence_before(order); - __asm__ __volatile__ - ( - "1:\n" - "lwarx %0,%y2\n" - "sub %1,%0,%3\n" - "rlwinm %1, %1, 0, 0xffff\n" - "stwcx. %1,%y2\n" - "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(v_) - : "b" (v) - : "cc" - ); - ppc_fence_after(order); - return original; - } - - value_type - fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original, tmp; - ppc_fence_before(order); - __asm__ __volatile__ - ( - "1:\n" - "lwarx %0,%y2\n" - "and %1,%0,%3\n" - "stwcx. %1,%y2\n" - "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(v_) - : "b" (v) - : "cc" - ); - ppc_fence_after(order); - return original; - } - - value_type - fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original, tmp; - ppc_fence_before(order); - __asm__ __volatile__ - ( - "1:\n" - "lwarx %0,%y2\n" - "or %1,%0,%3\n" - "stwcx. %1,%y2\n" - "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(v_) - : "b" (v) - : "cc" - ); - ppc_fence_after(order); - return original; - } - - value_type - fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original, tmp; - ppc_fence_before(order); - __asm__ __volatile__ - ( - "1:\n" - "lwarx %0,%y2\n" - "xor %1,%0,%3\n" - "stwcx. %1,%y2\n" - "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(v_) - : "b" (v) - : "cc" - ); - ppc_fence_after(order); - return original; - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - storage_type v_; -}; - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef T difference_type; - -protected: - typedef value_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {} - - void - store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - ppc_fence_before(order); - const_cast(v_) = v; - ppc_fence_after_store(order); - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - value_type v = const_cast(v_); - __asm__ __volatile__ - ( - "cmpw %0, %0\n" - "bne- 1f\n" - "1:\n" - : "+b"(v) - : - : "cr0" - ); - ppc_fence_after(order); - return v; - } - - value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original; - ppc_fence_before(order); - __asm__ __volatile__ - ( - "1:\n" - "lwarx %0,%y1\n" - "stwcx. %2,%y1\n" - "bne- 1b\n" - : "=&b" (original), "+Z"(v_) - : "b" (v) - : "cr0" - ); - ppc_fence_after(order); - return original; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - int success; - ppc_fence_before(success_order); - __asm__ __volatile__ - ( - "li %1, 0\n" - "lwarx %0,%y2\n" - "cmpw %0, %3\n" - "bne- 1f\n" - "stwcx. %4,%y2\n" - "bne- 1f\n" - "li %1, 1\n" - "1:" - : "=&b" (expected), "=&b" (success), "+Z"(v_) - : "b" (expected), "b" (desired) - : "cr0" - ); - if (success) - ppc_fence_after(success_order); - else - ppc_fence_after(failure_order); - return success; - } - - bool - compare_exchange_strong( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - int success; - ppc_fence_before(success_order); - __asm__ __volatile__ - ( - "li %1, 0\n" - "0: lwarx %0,%y2\n" - "cmpw %0, %3\n" - "bne- 1f\n" - "stwcx. %4,%y2\n" - "bne- 0b\n" - "li %1, 1\n" - "1:" - : "=&b" (expected), "=&b" (success), "+Z"(v_) - : "b" (expected), "b" (desired) - : "cr0" - ); - if (success) - ppc_fence_after(success_order); - else - ppc_fence_after(failure_order); - return success; - } - - value_type - fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original, tmp; - ppc_fence_before(order); - __asm__ __volatile__ - ( - "1:\n" - "lwarx %0,%y2\n" - "add %1,%0,%3\n" - "stwcx. %1,%y2\n" - "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(v_) - : "b" (v) - : "cc" - ); - ppc_fence_after(order); - return original; - } - - value_type - fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original, tmp; - ppc_fence_before(order); - __asm__ __volatile__ - ( - "1:\n" - "lwarx %0,%y2\n" - "sub %1,%0,%3\n" - "stwcx. %1,%y2\n" - "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(v_) - : "b" (v) - : "cc" - ); - ppc_fence_after(order); - return original; - } - - value_type - fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original, tmp; - ppc_fence_before(order); - __asm__ __volatile__ - ( - "1:\n" - "lwarx %0,%y2\n" - "and %1,%0,%3\n" - "stwcx. %1,%y2\n" - "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(v_) - : "b" (v) - : "cc" - ); - ppc_fence_after(order); - return original; - } - - value_type - fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original, tmp; - ppc_fence_before(order); - __asm__ __volatile__ - ( - "1:\n" - "lwarx %0,%y2\n" - "or %1,%0,%3\n" - "stwcx. %1,%y2\n" - "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(v_) - : "b" (v) - : "cc" - ); - ppc_fence_after(order); - return original; - } - - value_type - fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original, tmp; - ppc_fence_before(order); - __asm__ __volatile__ - ( - "1:\n" - "lwarx %0,%y2\n" - "xor %1,%0,%3\n" - "stwcx. %1,%y2\n" - "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(v_) - : "b" (v) - : "cc" - ); - ppc_fence_after(order); - return original; - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - value_type v_; -}; - -#if defined(__powerpc64__) - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef T difference_type; - -protected: - typedef value_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {} - - void - store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - ppc_fence_before(order); - const_cast(v_) = v; - ppc_fence_after_store(order); - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - value_type v = const_cast(v_); - __asm__ __volatile__ - ( - "cmpd %0, %0\n" - "bne- 1f\n" - "1:\n" - : "+b"(v) - : - : "cr0" - ); - ppc_fence_after(order); - return v; - } - - value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original; - ppc_fence_before(order); - __asm__ __volatile__ - ( - "1:\n" - "ldarx %0,%y1\n" - "stdcx. %2,%y1\n" - "bne- 1b\n" - : "=&b" (original), "+Z"(v_) - : "b" (v) - : "cr0" - ); - ppc_fence_after(order); - return original; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - int success; - ppc_fence_before(success_order); - __asm__ __volatile__ - ( - "li %1, 0\n" - "ldarx %0,%y2\n" - "cmpd %0, %3\n" - "bne- 1f\n" - "stdcx. %4,%y2\n" - "bne- 1f\n" - "li %1, 1\n" - "1:" - : "=&b" (expected), "=&b" (success), "+Z"(v_) - : "b" (expected), "b" (desired) - : "cr0" - ); - if (success) - ppc_fence_after(success_order); - else - ppc_fence_after(failure_order); - return success; - } - - bool - compare_exchange_strong( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - int success; - ppc_fence_before(success_order); - __asm__ __volatile__ - ( - "li %1, 0\n" - "0: ldarx %0,%y2\n" - "cmpd %0, %3\n" - "bne- 1f\n" - "stdcx. %4,%y2\n" - "bne- 0b\n" - "li %1, 1\n" - "1:" - : "=&b" (expected), "=&b" (success), "+Z"(v_) - : "b" (expected), "b" (desired) - : "cr0" - ); - if (success) - ppc_fence_after(success_order); - else - ppc_fence_after(failure_order); - return success; - } - - value_type - fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original, tmp; - ppc_fence_before(order); - __asm__ __volatile__ - ( - "1:\n" - "ldarx %0,%y2\n" - "add %1,%0,%3\n" - "stdcx. %1,%y2\n" - "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(v_) - : "b" (v) - : "cc" - ); - ppc_fence_after(order); - return original; - } - - value_type - fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original, tmp; - ppc_fence_before(order); - __asm__ __volatile__ - ( - "1:\n" - "ldarx %0,%y2\n" - "sub %1,%0,%3\n" - "stdcx. %1,%y2\n" - "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(v_) - : "b" (v) - : "cc" - ); - ppc_fence_after(order); - return original; - } - - value_type - fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original, tmp; - ppc_fence_before(order); - __asm__ __volatile__ - ( - "1:\n" - "ldarx %0,%y2\n" - "and %1,%0,%3\n" - "stdcx. %1,%y2\n" - "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(v_) - : "b" (v) - : "cc" - ); - ppc_fence_after(order); - return original; - } - - value_type - fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original, tmp; - ppc_fence_before(order); - __asm__ __volatile__ - ( - "1:\n" - "ldarx %0,%y2\n" - "or %1,%0,%3\n" - "stdcx. %1,%y2\n" - "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(v_) - : "b" (v) - : "cc" - ); - ppc_fence_after(order); - return original; - } - - value_type - fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original, tmp; - ppc_fence_before(order); - __asm__ __volatile__ - ( - "1:\n" - "ldarx %0,%y2\n" - "xor %1,%0,%3\n" - "stdcx. %1,%y2\n" - "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(v_) - : "b" (v) - : "cc" - ); - ppc_fence_after(order); - return original; - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - value_type v_; -}; - -#endif - -/* pointer types */ - -#if !defined(__powerpc64__) - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef std::ptrdiff_t difference_type; - typedef void * value_type; - -protected: - typedef value_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {} - - void - store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - ppc_fence_before(order); - __asm__ __volatile__ - ( - "stw %1, %0\n" - : "+m" (v_) - : "r" (v) - ); - ppc_fence_after_store(order); - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - value_type v; - __asm__ __volatile__ - ( - "lwz %0, %1\n" - "cmpw %0, %0\n" - "bne- 1f\n" - "1:\n" - : "=r"(v) - : "m"(v_) - : "cr0" - ); - ppc_fence_after(order); - return v; - } - - value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original; - ppc_fence_before(order); - __asm__ __volatile__ - ( - "1:\n" - "lwarx %0,%y1\n" - "stwcx. %2,%y1\n" - "bne- 1b\n" - : "=&b" (original), "+Z"(v_) - : "b" (v) - : "cr0" - ); - ppc_fence_after(order); - return original; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - int success; - ppc_fence_before(success_order); - __asm__ __volatile__ - ( - "li %1, 0\n" - "lwarx %0,%y2\n" - "cmpw %0, %3\n" - "bne- 1f\n" - "stwcx. %4,%y2\n" - "bne- 1f\n" - "li %1, 1\n" - "1:" - : "=&b" (expected), "=&b" (success), "+Z"(v_) - : "b" (expected), "b" (desired) - : "cr0" - ); - if (success) - ppc_fence_after(success_order); - else - ppc_fence_after(failure_order); - return success; - } - - bool - compare_exchange_strong( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - int success; - ppc_fence_before(success_order); - __asm__ __volatile__ - ( - "li %1, 0\n" - "0: lwarx %0,%y2\n" - "cmpw %0, %3\n" - "bne- 1f\n" - "stwcx. %4,%y2\n" - "bne- 0b\n" - "li %1, 1\n" - "1:" - : "=&b" (expected), "=&b" (success), "+Z"(v_) - : "b" (expected), "b" (desired) - : "cr0" - ); - if (success) - ppc_fence_after(success_order); - else - ppc_fence_after(failure_order); - return success; - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - value_type - fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original, tmp; - ppc_fence_before(order); - __asm__ __volatile__ - ( - "1:\n" - "lwarx %0,%y2\n" - "add %1,%0,%3\n" - "stwcx. %1,%y2\n" - "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(v_) - : "b" (v) - : "cc" - ); - ppc_fence_after(order); - return original; - } - - value_type - fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original, tmp; - ppc_fence_before(order); - __asm__ __volatile__ - ( - "1:\n" - "lwarx %0,%y2\n" - "sub %1,%0,%3\n" - "stwcx. %1,%y2\n" - "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(v_) - : "b" (v) - : "cc" - ); - ppc_fence_after(order); - return original; - } - - BOOST_ATOMIC_DECLARE_VOID_POINTER_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - value_type v_; -}; - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T * value_type; - typedef std::ptrdiff_t difference_type; - -protected: - typedef value_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {} - - void - store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - ppc_fence_before(order); - __asm__ __volatile__ - ( - "stw %1, %0\n" - : "+m" (v_) - : "r" (v) - ); - ppc_fence_after_store(order); - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - value_type v; - __asm__ __volatile__ - ( - "lwz %0, %1\n" - "cmpw %0, %0\n" - "bne- 1f\n" - "1:\n" - : "=r"(v) - : "m"(v_) - : "cr0" - ); - ppc_fence_after(order); - return v; - } - - value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original; - ppc_fence_before(order); - __asm__ __volatile__ - ( - "1:\n" - "lwarx %0,%y1\n" - "stwcx. %2,%y1\n" - "bne- 1b\n" - : "=&b" (original), "+Z"(v_) - : "b" (v) - : "cr0" - ); - ppc_fence_after(order); - return original; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - int success; - ppc_fence_before(success_order); - __asm__ __volatile__ - ( - "li %1, 0\n" - "lwarx %0,%y2\n" - "cmpw %0, %3\n" - "bne- 1f\n" - "stwcx. %4,%y2\n" - "bne- 1f\n" - "li %1, 1\n" - "1:" - : "=&b" (expected), "=&b" (success), "+Z"(v_) - : "b" (expected), "b" (desired) - : "cr0" - ); - if (success) - ppc_fence_after(success_order); - else - ppc_fence_after(failure_order); - return success; - } - - bool - compare_exchange_strong( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - int success; - ppc_fence_before(success_order); - __asm__ __volatile__ - ( - "li %1, 0\n" - "0: lwarx %0,%y2\n" - "cmpw %0, %3\n" - "bne- 1f\n" - "stwcx. %4,%y2\n" - "bne- 0b\n" - "li %1, 1\n" - "1:" - : "=&b" (expected), "=&b" (success), "+Z"(v_) - : "b" (expected), "b" (desired) - : "cr0" - ); - if (success) - ppc_fence_after(success_order); - else - ppc_fence_after(failure_order); - return success; - } - - value_type - fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - v = v * sizeof(*v_); - value_type original, tmp; - ppc_fence_before(order); - __asm__ __volatile__ - ( - "1:\n" - "lwarx %0,%y2\n" - "add %1,%0,%3\n" - "stwcx. %1,%y2\n" - "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(v_) - : "b" (v) - : "cc" - ); - ppc_fence_after(order); - return original; - } - - value_type - fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - v = v * sizeof(*v_); - value_type original, tmp; - ppc_fence_before(order); - __asm__ __volatile__ - ( - "1:\n" - "lwarx %0,%y2\n" - "sub %1,%0,%3\n" - "stwcx. %1,%y2\n" - "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(v_) - : "b" (v) - : "cc" - ); - ppc_fence_after(order); - return original; - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - BOOST_ATOMIC_DECLARE_POINTER_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - value_type v_; -}; - -#else - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef std::ptrdiff_t difference_type; - typedef void * value_type; - -protected: - typedef value_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {} - - void - store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - ppc_fence_before(order); - __asm__ __volatile__ - ( - "std %1, %0\n" - : "+m" (v_) - : "r" (v) - ); - ppc_fence_after_store(order); - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - value_type v; - __asm__ __volatile__ - ( - "ld %0, %1\n" - "cmpd %0, %0\n" - "bne- 1f\n" - "1:\n" - : "=r"(v) - : "m"(v_) - : "cr0" - ); - ppc_fence_after(order); - return v; - } - - value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original; - ppc_fence_before(order); - __asm__ __volatile__ - ( - "1:\n" - "ldarx %0,%y1\n" - "stdcx. %2,%y1\n" - "bne- 1b\n" - : "=&b" (original), "+Z"(v_) - : "b" (v) - : "cr0" - ); - ppc_fence_after(order); - return original; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - int success; - ppc_fence_before(success_order); - __asm__ __volatile__ - ( - "li %1, 0\n" - "ldarx %0,%y2\n" - "cmpd %0, %3\n" - "bne- 1f\n" - "stdcx. %4,%y2\n" - "bne- 1f\n" - "li %1, 1\n" - "1:" - : "=&b" (expected), "=&b" (success), "+Z"(v_) - : "b" (expected), "b" (desired) - : "cr0" - ); - if (success) - ppc_fence_after(success_order); - else - ppc_fence_after(failure_order); - return success; - } - - bool - compare_exchange_strong( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - int success; - ppc_fence_before(success_order); - __asm__ __volatile__ - ( - "li %1, 0\n" - "0: ldarx %0,%y2\n" - "cmpd %0, %3\n" - "bne- 1f\n" - "stdcx. %4,%y2\n" - "bne- 0b\n" - "li %1, 1\n" - "1:" - : "=&b" (expected), "=&b" (success), "+Z"(v_) - : "b" (expected), "b" (desired) - : "cr0" - ); - if (success) - ppc_fence_after(success_order); - else - ppc_fence_after(failure_order); - return success; - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - value_type - fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original, tmp; - ppc_fence_before(order); - __asm__ __volatile__ - ( - "1:\n" - "ldarx %0,%y2\n" - "add %1,%0,%3\n" - "stdcx. %1,%y2\n" - "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(v_) - : "b" (v) - : "cc" - ); - ppc_fence_after(order); - return original; - } - - value_type - fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original, tmp; - ppc_fence_before(order); - __asm__ __volatile__ - ( - "1:\n" - "ldarx %0,%y2\n" - "sub %1,%0,%3\n" - "stdcx. %1,%y2\n" - "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(v_) - : "b" (v) - : "cc" - ); - ppc_fence_after(order); - return original; - } - - BOOST_ATOMIC_DECLARE_VOID_POINTER_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - value_type v_; -}; - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T * value_type; - typedef std::ptrdiff_t difference_type; - -protected: - typedef value_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {} - - void - store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - ppc_fence_before(order); - __asm__ __volatile__ - ( - "std %1, %0\n" - : "+m" (v_) - : "r" (v) - ); - ppc_fence_after_store(order); - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - value_type v; - __asm__ __volatile__ - ( - "ld %0, %1\n" - "cmpd %0, %0\n" - "bne- 1f\n" - "1:\n" - : "=r"(v) - : "m"(v_) - : "cr0" - ); - ppc_fence_after(order); - return v; - } - - value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type original; - ppc_fence_before(order); - __asm__ __volatile__ - ( - "1:\n" - "ldarx %0,%y1\n" - "stdcx. %2,%y1\n" - "bne- 1b\n" - : "=&b" (original), "+Z"(v_) - : "b" (v) - : "cr0" - ); - ppc_fence_after(order); - return original; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - int success; - ppc_fence_before(success_order); - __asm__ __volatile__ - ( - "li %1, 0\n" - "ldarx %0,%y2\n" - "cmpd %0, %3\n" - "bne- 1f\n" - "stdcx. %4,%y2\n" - "bne- 1f\n" - "li %1, 1\n" - "1:" - : "=&b" (expected), "=&b" (success), "+Z"(v_) - : "b" (expected), "b" (desired) - : "cr0" - ); - if (success) - ppc_fence_after(success_order); - else - ppc_fence_after(failure_order); - return success; - } - - bool - compare_exchange_strong( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - int success; - ppc_fence_before(success_order); - __asm__ __volatile__ - ( - "li %1, 0\n" - "0: ldarx %0,%y2\n" - "cmpd %0, %3\n" - "bne- 1f\n" - "stdcx. %4,%y2\n" - "bne- 0b\n" - "li %1, 1\n" - "1:" - : "=&b" (expected), "=&b" (success), "+Z"(v_) - : "b" (expected), "b" (desired) - : "cr0" - ); - if (success) - ppc_fence_after(success_order); - else - ppc_fence_after(failure_order); - return success; - } - - value_type - fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - v = v * sizeof(*v_); - value_type original, tmp; - ppc_fence_before(order); - __asm__ __volatile__ - ( - "1:\n" - "ldarx %0,%y2\n" - "add %1,%0,%3\n" - "stdcx. %1,%y2\n" - "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(v_) - : "b" (v) - : "cc" - ); - ppc_fence_after(order); - return original; - } - - value_type - fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - v = v * sizeof(*v_); - value_type original, tmp; - ppc_fence_before(order); - __asm__ __volatile__ - ( - "1:\n" - "ldarx %0,%y2\n" - "sub %1,%0,%3\n" - "stdcx. %1,%y2\n" - "bne- 1b\n" - : "=&b" (original), "=&b" (tmp), "+Z"(v_) - : "b" (v) - : "cc" - ); - ppc_fence_after(order); - return original; - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - BOOST_ATOMIC_DECLARE_POINTER_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - value_type v_; -}; - -#endif - -/* generic */ - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef uint32_t storage_type; - -protected: - typedef value_type const& value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : v_(0) - { - memcpy(&v_, &v, sizeof(value_type)); - } - - void - store(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - storage_type tmp = 0; - memcpy(&tmp, &v, sizeof(value_type)); - ppc_fence_before(order); - __asm__ __volatile__ - ( - "stw %1, %0\n" - : "+m" (v_) - : "r" (tmp) - ); - ppc_fence_after_store(order); - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - storage_type tmp; - __asm__ __volatile__ - ( - "lwz %0, %1\n" - "cmpw %0, %0\n" - "bne- 1f\n" - "1:\n" - : "=r"(tmp) - : "m"(v_) - : "cr0" - ); - ppc_fence_after(order); - - value_type v; - memcpy(&v, &tmp, sizeof(value_type)); - return v; - } - - value_type - exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - storage_type tmp = 0, original; - memcpy(&tmp, &v, sizeof(value_type)); - ppc_fence_before(order); - __asm__ __volatile__ - ( - "1:\n" - "lwarx %0,%y1\n" - "stwcx. %2,%y1\n" - "bne- 1b\n" - : "=&b" (original), "+Z"(v_) - : "b" (tmp) - : "cr0" - ); - ppc_fence_after(order); - value_type res; - memcpy(&res, &original, sizeof(value_type)); - return res; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - storage_type expected_s = 0, desired_s = 0; - memcpy(&expected_s, &expected, sizeof(value_type)); - memcpy(&desired_s, &desired, sizeof(value_type)); - - int success; - ppc_fence_before(success_order); - __asm__ __volatile__ - ( - "li %1, 0\n" - "lwarx %0,%y2\n" - "cmpw %0, %3\n" - "bne- 1f\n" - "stwcx. %4,%y2\n" - "bne- 1f\n" - "li %1, 1\n" - "1:" - : "=&b" (expected_s), "=&b" (success), "+Z"(v_) - : "b" (expected_s), "b" (desired_s) - : "cr0" - ); - if (success) - ppc_fence_after(success_order); - else - ppc_fence_after(failure_order); - memcpy(&expected, &expected_s, sizeof(value_type)); - return success; - } - - bool - compare_exchange_strong( - value_type & expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - storage_type expected_s = 0, desired_s = 0; - memcpy(&expected_s, &expected, sizeof(value_type)); - memcpy(&desired_s, &desired, sizeof(value_type)); - - int success; - ppc_fence_before(success_order); - __asm__ __volatile__ - ( - "li %1, 0\n" - "0: lwarx %0,%y2\n" - "cmpw %0, %3\n" - "bne- 1f\n" - "stwcx. %4,%y2\n" - "bne- 0b\n" - "li %1, 1\n" - "1:" - : "=&b" (expected_s), "=&b" (success), "+Z"(v_) - : "b" (expected_s), "b" (desired_s) - : "cr0" - ); - if (success) - ppc_fence_after(success_order); - else - ppc_fence_after(failure_order); - memcpy(&expected, &expected_s, sizeof(value_type)); - return success; - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - BOOST_ATOMIC_DECLARE_BASE_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - storage_type v_; -}; - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef uint32_t storage_type; - -protected: - typedef value_type const& value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : v_(0) - { - memcpy(&v_, &v, sizeof(value_type)); - } - - void - store(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - storage_type tmp = 0; - memcpy(&tmp, &v, sizeof(value_type)); - ppc_fence_before(order); - __asm__ __volatile__ - ( - "stw %1, %0\n" - : "+m" (v_) - : "r" (tmp) - ); - ppc_fence_after_store(order); - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - storage_type tmp; - __asm__ __volatile__ - ( - "lwz %0, %1\n" - "cmpw %0, %0\n" - "bne- 1f\n" - "1:\n" - : "=r"(tmp) - : "m"(v_) - : "cr0" - ); - ppc_fence_after(order); - - value_type v; - memcpy(&v, &tmp, sizeof(value_type)); - return v; - } - - value_type - exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - storage_type tmp = 0, original; - memcpy(&tmp, &v, sizeof(value_type)); - ppc_fence_before(order); - __asm__ __volatile__ - ( - "1:\n" - "lwarx %0,%y1\n" - "stwcx. %2,%y1\n" - "bne- 1b\n" - : "=&b" (original), "+Z"(v_) - : "b" (tmp) - : "cr0" - ); - ppc_fence_after(order); - value_type res; - memcpy(&res, &original, sizeof(value_type)); - return res; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - storage_type expected_s = 0, desired_s = 0; - memcpy(&expected_s, &expected, sizeof(value_type)); - memcpy(&desired_s, &desired, sizeof(value_type)); - - int success; - ppc_fence_before(success_order); - __asm__ __volatile__ - ( - "li %1, 0\n" - "lwarx %0,%y2\n" - "cmpw %0, %3\n" - "bne- 1f\n" - "stwcx. %4,%y2\n" - "bne- 1f\n" - "li %1, 1\n" - "1:" - : "=&b" (expected_s), "=&b" (success), "+Z"(v_) - : "b" (expected_s), "b" (desired_s) - : "cr0" - ); - if (success) - ppc_fence_after(success_order); - else - ppc_fence_after(failure_order); - memcpy(&expected, &expected_s, sizeof(value_type)); - return success; - } - - bool - compare_exchange_strong( - value_type & expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - storage_type expected_s = 0, desired_s = 0; - memcpy(&expected_s, &expected, sizeof(value_type)); - memcpy(&desired_s, &desired, sizeof(value_type)); - - int success; - ppc_fence_before(success_order); - __asm__ __volatile__ - ( - "li %1, 0\n" - "0: lwarx %0,%y2\n" - "cmpw %0, %3\n" - "bne- 1f\n" - "stwcx. %4,%y2\n" - "bne- 0b\n" - "li %1, 1\n" - "1:" - : "=&b" (expected_s), "=&b" (success), "+Z"(v_) - : "b" (expected_s), "b" (desired_s) - : "cr0" - ); - if (success) - ppc_fence_after(success_order); - else - ppc_fence_after(failure_order); - memcpy(&expected, &expected_s, sizeof(value_type)); - return success; - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - BOOST_ATOMIC_DECLARE_BASE_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - storage_type v_; -}; - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef uint32_t storage_type; - -protected: - typedef value_type const& value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : v_(0) - { - memcpy(&v_, &v, sizeof(value_type)); - } - - void - store(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - storage_type tmp = 0; - memcpy(&tmp, &v, sizeof(value_type)); - ppc_fence_before(order); - __asm__ __volatile__ - ( - "stw %1, %0\n" - : "+m" (v_) - : "r" (tmp) - ); - ppc_fence_after_store(order); - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - storage_type tmp; - __asm__ __volatile__ - ( - "lwz %0, %1\n" - "cmpw %0, %0\n" - "bne- 1f\n" - "1:\n" - : "=r"(tmp) - : "m"(v_) - : "cr0" - ); - ppc_fence_after(order); - - value_type v; - memcpy(&v, &tmp, sizeof(value_type)); - return v; - } - - value_type - exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - storage_type tmp = 0, original; - memcpy(&tmp, &v, sizeof(value_type)); - ppc_fence_before(order); - __asm__ __volatile__ - ( - "1:\n" - "lwarx %0,%y1\n" - "stwcx. %2,%y1\n" - "bne- 1b\n" - : "=&b" (original), "+Z"(v_) - : "b" (tmp) - : "cr0" - ); - ppc_fence_after(order); - value_type res; - memcpy(&res, &original, sizeof(value_type)); - return res; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - storage_type expected_s = 0, desired_s = 0; - memcpy(&expected_s, &expected, sizeof(value_type)); - memcpy(&desired_s, &desired, sizeof(value_type)); - - int success; - ppc_fence_before(success_order); - __asm__ __volatile__ - ( - "li %1, 0\n" - "lwarx %0,%y2\n" - "cmpw %0, %3\n" - "bne- 1f\n" - "stwcx. %4,%y2\n" - "bne- 1f\n" - "li %1, 1\n" - "1:" - : "=&b" (expected_s), "=&b" (success), "+Z"(v_) - : "b" (expected_s), "b" (desired_s) - : "cr0" - ); - if (success) - ppc_fence_after(success_order); - else - ppc_fence_after(failure_order); - memcpy(&expected, &expected_s, sizeof(value_type)); - return success; - } - - bool - compare_exchange_strong( - value_type & expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - storage_type expected_s = 0, desired_s = 0; - memcpy(&expected_s, &expected, sizeof(value_type)); - memcpy(&desired_s, &desired, sizeof(value_type)); - - int success; - ppc_fence_before(success_order); - __asm__ __volatile__ - ( - "li %1, 0\n" - "0: lwarx %0,%y2\n" - "cmpw %0, %3\n" - "bne- 1f\n" - "stwcx. %4,%y2\n" - "bne- 0b\n" - "li %1, 1\n" - "1:" - : "=&b" (expected_s), "=&b" (success), "+Z"(v_) - : "b" (expected_s), "b" (desired_s) - : "cr0" - ); - if (success) - ppc_fence_after(success_order); - else - ppc_fence_after(failure_order); - memcpy(&expected, &expected_s, sizeof(value_type)); - return success; - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - BOOST_ATOMIC_DECLARE_BASE_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - storage_type v_; -}; - -#if defined(__powerpc64__) - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef uint64_t storage_type; - -protected: - typedef value_type const& value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : v_(0) - { - memcpy(&v_, &v, sizeof(value_type)); - } - - void - store(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - storage_type tmp; - memcpy(&tmp, &v, sizeof(value_type)); - ppc_fence_before(order); - __asm__ __volatile__ - ( - "std %1, %0\n" - : "+m" (v_) - : "r" (tmp) - ); - ppc_fence_after_store(order); - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - storage_type tmp; - __asm__ __volatile__ - ( - "ld %0, %1\n" - "cmpd %0, %0\n" - "bne- 1f\n" - "1:\n" - : "=r"(tmp) - : "m"(v_) - : "cr0" - ); - ppc_fence_after(order); - - value_type v; - memcpy(&v, &tmp, sizeof(value_type)); - return v; - } - - value_type - exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - storage_type tmp = 0, original; - memcpy(&tmp, &v, sizeof(value_type)); - ppc_fence_before(order); - __asm__ __volatile__ - ( - "1:\n" - "ldarx %0,%y1\n" - "stdcx. %2,%y1\n" - "bne- 1b\n" - : "=&b" (original), "+Z"(v_) - : "b" (tmp) - : "cr0" - ); - ppc_fence_after(order); - value_type res; - memcpy(&res, &original, sizeof(value_type)); - return res; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - storage_type expected_s, desired_s; - memcpy(&expected_s, &expected, sizeof(value_type)); - memcpy(&desired_s, &desired, sizeof(value_type)); - - int success; - ppc_fence_before(success_order); - __asm__ __volatile__ - ( - "li %1, 0\n" - "ldarx %0,%y2\n" - "cmpd %0, %3\n" - "bne- 1f\n" - "stdcx. %4,%y2\n" - "bne- 1f\n" - "li %1, 1\n" - "1:" - : "=&b" (expected_s), "=&b" (success), "+Z"(v_) - : "b" (expected_s), "b" (desired_s) - : "cr0" - ); - if (success) - ppc_fence_after(success_order); - else - ppc_fence_after(failure_order); - memcpy(&expected, &expected_s, sizeof(value_type)); - return success; - } - - bool - compare_exchange_strong( - value_type & expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - storage_type expected_s, desired_s; - memcpy(&expected_s, &expected, sizeof(value_type)); - memcpy(&desired_s, &desired, sizeof(value_type)); - - int success; - ppc_fence_before(success_order); - __asm__ __volatile__ - ( - "li %1, 0\n" - "0: ldarx %0,%y2\n" - "cmpd %0, %3\n" - "bne- 1f\n" - "stdcx. %4,%y2\n" - "bne- 0b\n" - "li %1, 1\n" - "1:" - : "=&b" (expected_s), "=&b" (success), "+Z"(v_) - : "b" (expected_s), "b" (desired_s) - : "cr0" - ); - if (success) - ppc_fence_after(success_order); - else - ppc_fence_after(failure_order); - memcpy(&expected, &expected_s, sizeof(value_type)); - return success; - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - BOOST_ATOMIC_DECLARE_BASE_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - storage_type v_; -}; - -#endif - -} -} - -#define BOOST_ATOMIC_THREAD_FENCE 2 -inline void -atomic_thread_fence(memory_order order) -{ - switch(order) - { - case memory_order_acquire: - __asm__ __volatile__ ("isync" ::: "memory"); - break; - case memory_order_release: -#if defined(__powerpc64__) - __asm__ __volatile__ ("lwsync" ::: "memory"); - break; -#endif - case memory_order_acq_rel: - case memory_order_seq_cst: - __asm__ __volatile__ ("sync" ::: "memory"); - default:; - } -} - -#define BOOST_ATOMIC_SIGNAL_FENCE 2 -inline void -atomic_signal_fence(memory_order order) -{ - switch(order) - { - case memory_order_acquire: - case memory_order_release: - case memory_order_acq_rel: - case memory_order_seq_cst: - __asm__ __volatile__ ("" ::: "memory"); - break; - default:; - } -} - -} - -#endif /* !defined(BOOST_ATOMIC_FORCE_FALLBACK) */ - -#endif diff --git a/include/boost/atomic/detail/gcc-sparcv9.hpp b/include/boost/atomic/detail/gcc-sparcv9.hpp deleted file mode 100644 index aa4f93f..0000000 --- a/include/boost/atomic/detail/gcc-sparcv9.hpp +++ /dev/null @@ -1,1335 +0,0 @@ -#ifndef BOOST_ATOMIC_DETAIL_GCC_SPARC_HPP -#define BOOST_ATOMIC_DETAIL_GCC_SPARC_HPP - -// Copyright (c) 2010 Helge Bahmann -// Copyright (c) 2013 Tim Blechmann -// -// Distributed under the Boost Software License, Version 1.0. -// See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -#include -#include -#include -#include - -#ifdef BOOST_HAS_PRAGMA_ONCE -#pragma once -#endif - -namespace boost { -namespace atomics { -namespace detail { - -inline void -platform_fence_before(memory_order order) -{ - switch(order) - { - case memory_order_relaxed: - case memory_order_acquire: - case memory_order_consume: - break; - case memory_order_release: - case memory_order_acq_rel: - __asm__ __volatile__ ("membar #StoreStore | #LoadStore" ::: "memory"); - /* release */ - break; - case memory_order_seq_cst: - __asm__ __volatile__ ("membar #Sync" ::: "memory"); - /* seq */ - break; - } -} - -inline void -platform_fence_after(memory_order order) -{ - switch(order) - { - case memory_order_relaxed: - case memory_order_release: - break; - case memory_order_acquire: - case memory_order_acq_rel: - __asm__ __volatile__ ("membar #LoadLoad | #LoadStore" ::: "memory"); - /* acquire */ - break; - case memory_order_consume: - /* consume */ - break; - case memory_order_seq_cst: - __asm__ __volatile__ ("membar #Sync" ::: "memory"); - /* seq */ - break; - default:; - } -} - -inline void -platform_fence_after_store(memory_order order) -{ - switch(order) - { - case memory_order_seq_cst: - __asm__ __volatile__ ("membar #Sync" ::: "memory"); - default:; - } -} - - -inline void -platform_fence_after_load(memory_order order) -{ - platform_fence_after(order); -} - -} -} - -class atomic_flag -{ -private: - uint32_t v_; - -public: - BOOST_CONSTEXPR atomic_flag(void) BOOST_NOEXCEPT : v_(0) {} - - void - clear(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - atomics::detail::platform_fence_before(order); - const_cast(v_) = 0; - atomics::detail::platform_fence_after_store(order); - } - - bool - test_and_set(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - atomics::detail::platform_fence_before(order); - uint32_t tmp = 1; - __asm__ ( - "cas [%1], %2, %0" - : "+r" (tmp) - : "r" (&v_), "r" (0) - : "memory" - ); - atomics::detail::platform_fence_after(order); - return tmp; - } - - BOOST_DELETED_FUNCTION(atomic_flag(atomic_flag const&)) - BOOST_DELETED_FUNCTION(atomic_flag& operator= (atomic_flag const&)) -}; - -} /* namespace boost */ - -#define BOOST_ATOMIC_FLAG_LOCK_FREE 2 - -#include - -#if !defined(BOOST_ATOMIC_FORCE_FALLBACK) - -#define BOOST_ATOMIC_CHAR_LOCK_FREE 2 -#define BOOST_ATOMIC_CHAR16_T_LOCK_FREE 2 -#define BOOST_ATOMIC_CHAR32_T_LOCK_FREE 2 -#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 2 -#define BOOST_ATOMIC_SHORT_LOCK_FREE 2 -#define BOOST_ATOMIC_INT_LOCK_FREE 2 -#define BOOST_ATOMIC_LONG_LOCK_FREE 2 -#define BOOST_ATOMIC_LLONG_LOCK_FREE 0 -#define BOOST_ATOMIC_POINTER_LOCK_FREE 2 -#define BOOST_ATOMIC_BOOL_LOCK_FREE 2 - -namespace boost { - -#define BOOST_ATOMIC_THREAD_FENCE 2 -inline void -atomic_thread_fence(memory_order order) -{ - switch(order) - { - case memory_order_relaxed: - break; - case memory_order_release: - __asm__ __volatile__ ("membar #StoreStore | #LoadStore" ::: "memory"); - break; - case memory_order_acquire: - __asm__ __volatile__ ("membar #LoadLoad | #LoadStore" ::: "memory"); - break; - case memory_order_acq_rel: - __asm__ __volatile__ ("membar #LoadLoad | #LoadStore | #StoreStore" ::: "memory"); - break; - case memory_order_consume: - break; - case memory_order_seq_cst: - __asm__ __volatile__ ("membar #Sync" ::: "memory"); - break; - default:; - } -} - -#define BOOST_ATOMIC_SIGNAL_FENCE 2 -inline void -atomic_signal_fence(memory_order) -{ - __asm__ __volatile__ ("" ::: "memory"); -} - -namespace atomics { -namespace detail { - -/* integral types */ - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef T difference_type; - typedef int32_t storage_type; - -protected: - typedef value_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {} - - void - store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - platform_fence_before(order); - const_cast(v_) = v; - platform_fence_after_store(order); - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - value_type v = const_cast(v_); - platform_fence_after_load(order); - return v; - } - - value_type - fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type tmp = load(memory_order_relaxed); - do {} while(!compare_exchange_weak(tmp, tmp + v, order, memory_order_relaxed)); - return tmp; - } - - value_type - fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type tmp = load(memory_order_relaxed); - do {} while(!compare_exchange_weak(tmp, tmp - v, order, memory_order_relaxed)); - return tmp; - } - - value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type tmp = load(memory_order_relaxed); - do {} while(!compare_exchange_weak(tmp, v, order, memory_order_relaxed)); - return tmp; - } - - bool - compare_exchange_strong( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - platform_fence_before(success_order); - storage_type desired_s = desired; - __asm__ ( - "cas [%1], %2, %0" - : "+r" (desired_s) - : "r" (&v_), "r" ((storage_type)expected) - : "memory" - ); - desired = desired_s; - bool success = (desired == expected); - if (success) - platform_fence_after(success_order); - else - platform_fence_after(failure_order); - expected = desired; - return success; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return compare_exchange_strong(expected, desired, success_order, failure_order); - } - - value_type - fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type tmp = load(memory_order_relaxed); - do {} while(!compare_exchange_weak(tmp, tmp & v, order, memory_order_relaxed)); - return tmp; - } - - value_type - fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type tmp = load(memory_order_relaxed); - do {} while(!compare_exchange_weak(tmp, tmp | v, order, memory_order_relaxed)); - return tmp; - } - - value_type - fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type tmp = load(memory_order_relaxed); - do {} while(!compare_exchange_weak(tmp, tmp ^ v, order, memory_order_relaxed)); - return tmp; - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - storage_type v_; -}; - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef T difference_type; - typedef uint32_t storage_type; - -protected: - typedef value_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {} - - void - store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - platform_fence_before(order); - const_cast(v_) = v; - platform_fence_after_store(order); - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - value_type v = const_cast(v_); - platform_fence_after_load(order); - return v; - } - - value_type - fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type tmp = load(memory_order_relaxed); - do {} while(!compare_exchange_weak(tmp, tmp + v, order, memory_order_relaxed)); - return tmp; - } - - value_type - fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type tmp = load(memory_order_relaxed); - do {} while(!compare_exchange_weak(tmp, tmp - v, order, memory_order_relaxed)); - return tmp; - } - - value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type tmp = load(memory_order_relaxed); - do {} while(!compare_exchange_weak(tmp, v, order, memory_order_relaxed)); - return tmp; - } - - bool - compare_exchange_strong( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - platform_fence_before(success_order); - storage_type desired_s = desired; - __asm__ ( - "cas [%1], %2, %0" - : "+r" (desired_s) - : "r" (&v_), "r" ((storage_type)expected) - : "memory" - ); - desired = desired_s; - bool success = (desired == expected); - if (success) - platform_fence_after(success_order); - else - platform_fence_after(failure_order); - expected = desired; - return success; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return compare_exchange_strong(expected, desired, success_order, failure_order); - } - - value_type - fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type tmp = load(memory_order_relaxed); - do {} while(!compare_exchange_weak(tmp, tmp & v, order, memory_order_relaxed)); - return tmp; - } - - value_type - fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type tmp = load(memory_order_relaxed); - do {} while(!compare_exchange_weak(tmp, tmp | v, order, memory_order_relaxed)); - return tmp; - } - - value_type - fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type tmp = load(memory_order_relaxed); - do {} while(!compare_exchange_weak(tmp, tmp ^ v, order, memory_order_relaxed)); - return tmp; - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - storage_type v_; -}; - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef T difference_type; - typedef int32_t storage_type; - -protected: - typedef value_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {} - - void - store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - platform_fence_before(order); - const_cast(v_) = v; - platform_fence_after_store(order); - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - value_type v = const_cast(v_); - platform_fence_after_load(order); - return v; - } - - value_type - fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type tmp = load(memory_order_relaxed); - do {} while(!compare_exchange_weak(tmp, tmp + v, order, memory_order_relaxed)); - return tmp; - } - - value_type - fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type tmp = load(memory_order_relaxed); - do {} while(!compare_exchange_weak(tmp, tmp - v, order, memory_order_relaxed)); - return tmp; - } - - value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type tmp = load(memory_order_relaxed); - do {} while(!compare_exchange_weak(tmp, v, order, memory_order_relaxed)); - return tmp; - } - - bool - compare_exchange_strong( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - platform_fence_before(success_order); - storage_type desired_s = desired; - __asm__ ( - "cas [%1], %2, %0" - : "+r" (desired_s) - : "r" (&v_), "r" ((storage_type)expected) - : "memory" - ); - desired = desired_s; - bool success = (desired == expected); - if (success) - platform_fence_after(success_order); - else - platform_fence_after(failure_order); - expected = desired; - return success; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return compare_exchange_strong(expected, desired, success_order, failure_order); - } - - value_type - fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type tmp = load(memory_order_relaxed); - do {} while(!compare_exchange_weak(tmp, tmp & v, order, memory_order_relaxed)); - return tmp; - } - - value_type - fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type tmp = load(memory_order_relaxed); - do {} while(!compare_exchange_weak(tmp, tmp | v, order, memory_order_relaxed)); - return tmp; - } - - value_type - fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type tmp = load(memory_order_relaxed); - do {} while(!compare_exchange_weak(tmp, tmp ^ v, order, memory_order_relaxed)); - return tmp; - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - storage_type v_; -}; - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef T difference_type; - typedef uint32_t storage_type; - -protected: - typedef value_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {} - - void - store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - platform_fence_before(order); - const_cast(v_) = v; - platform_fence_after_store(order); - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - value_type v = const_cast(v_); - platform_fence_after_load(order); - return v; - } - - value_type - fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type tmp = load(memory_order_relaxed); - do {} while(!compare_exchange_weak(tmp, tmp + v, order, memory_order_relaxed)); - return tmp; - } - - value_type - fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type tmp = load(memory_order_relaxed); - do {} while(!compare_exchange_weak(tmp, tmp - v, order, memory_order_relaxed)); - return tmp; - } - - value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type tmp = load(memory_order_relaxed); - do {} while(!compare_exchange_weak(tmp, v, order, memory_order_relaxed)); - return tmp; - } - - bool - compare_exchange_strong( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - platform_fence_before(success_order); - storage_type desired_s = desired; - __asm__ ( - "cas [%1], %2, %0" - : "+r" (desired_s) - : "r" (&v_), "r" ((storage_type)expected) - : "memory" - ); - desired = desired_s; - bool success = (desired == expected); - if (success) - platform_fence_after(success_order); - else - platform_fence_after(failure_order); - expected = desired; - return success; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return compare_exchange_strong(expected, desired, success_order, failure_order); - } - - value_type - fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type tmp = load(memory_order_relaxed); - do {} while(!compare_exchange_weak(tmp, tmp & v, order, memory_order_relaxed)); - return tmp; - } - - value_type - fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type tmp = load(memory_order_relaxed); - do {} while(!compare_exchange_weak(tmp, tmp | v, order, memory_order_relaxed)); - return tmp; - } - - value_type - fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type tmp = load(memory_order_relaxed); - do {} while(!compare_exchange_weak(tmp, tmp ^ v, order, memory_order_relaxed)); - return tmp; - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - storage_type v_; -}; - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef T difference_type; - -protected: - typedef value_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {} - - void - store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - platform_fence_before(order); - const_cast(v_) = v; - platform_fence_after_store(order); - } - - value_type - load(memory_order order = memory_order_seq_cst)const volatile BOOST_NOEXCEPT - { - value_type v = const_cast(v_); - platform_fence_after_load(order); - return v; - } - - value_type - fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type tmp = load(memory_order_relaxed); - do {} while(!compare_exchange_weak(tmp, tmp + v, order, memory_order_relaxed)); - return tmp; - } - - value_type - fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type tmp = load(memory_order_relaxed); - do {} while(!compare_exchange_weak(tmp, tmp - v, order, memory_order_relaxed)); - return tmp; - } - - value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type tmp = load(memory_order_relaxed); - do {} while(!compare_exchange_weak(tmp, v, order, memory_order_relaxed)); - return tmp; - } - - bool - compare_exchange_strong( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - platform_fence_before(success_order); - __asm__ ( - "cas [%1], %2, %0" - : "+r" (desired) - : "r" (&v_), "r" (expected) - : "memory" - ); - bool success = (desired == expected); - if (success) - platform_fence_after(success_order); - else - platform_fence_after(failure_order); - expected = desired; - return success; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return compare_exchange_strong(expected, desired, success_order, failure_order); - } - - value_type - fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type tmp = load(memory_order_relaxed); - do {} while(!compare_exchange_weak(tmp, tmp & v, order, memory_order_relaxed)); - return tmp; - } - - value_type - fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type tmp = load(memory_order_relaxed); - do {} while(!compare_exchange_weak(tmp, tmp | v, order, memory_order_relaxed)); - return tmp; - } - - value_type - fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type tmp = load(memory_order_relaxed); - do {} while(!compare_exchange_weak(tmp, tmp ^ v, order, memory_order_relaxed)); - return tmp; - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - value_type v_; -}; - -/* pointer types */ - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef std::ptrdiff_t difference_type; - typedef void * value_type; - -protected: - typedef value_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {} - - void - store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - platform_fence_before(order); - const_cast(v_) = v; - platform_fence_after_store(order); - } - - value_type load(memory_order order = memory_order_seq_cst)const volatile BOOST_NOEXCEPT - { - value_type v = const_cast(v_); - platform_fence_after_load(order); - return v; - } - - value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type tmp = load(memory_order_relaxed); - do {} while(!compare_exchange_weak(tmp, v, order, memory_order_relaxed)); - return tmp; - } - - bool - compare_exchange_strong( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - platform_fence_before(success_order); - __asm__ ( - "cas [%1], %2, %0" - : "+r" (desired) - : "r" (&v_), "r" (expected) - : "memory" - ); - bool success = (desired == expected); - if (success) - platform_fence_after(success_order); - else - platform_fence_after(failure_order); - expected = desired; - return success; - } - - - bool compare_exchange_weak(value_type & expected, value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return compare_exchange_strong(expected, desired, success_order, failure_order); - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - value_type - fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type tmp = load(memory_order_relaxed); - do {} while(!compare_exchange_weak(tmp, (char*)tmp + v, order, memory_order_relaxed)); - return tmp; - } - - value_type - fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type tmp = load(memory_order_relaxed); - do {} while(!compare_exchange_weak(tmp, (char*)tmp - v, order, memory_order_relaxed)); - return tmp; - } - - BOOST_ATOMIC_DECLARE_VOID_POINTER_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - value_type v_; -}; - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T * value_type; - typedef std::ptrdiff_t difference_type; - -protected: - typedef value_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {} - - void - store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - platform_fence_before(order); - const_cast(v_) = v; - platform_fence_after_store(order); - } - - value_type - load(memory_order order = memory_order_seq_cst)const volatile BOOST_NOEXCEPT - { - value_type v = const_cast(v_); - platform_fence_after_load(order); - return v; - } - - value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type tmp = load(memory_order_relaxed); - do {} while(!compare_exchange_weak(tmp, v, order, memory_order_relaxed)); - return tmp; - } - - bool - compare_exchange_strong( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - platform_fence_before(success_order); - __asm__ ( - "cas [%1], %2, %0" - : "+r" (desired) - : "r" (&v_), "r" (expected) - : "memory" - ); - bool success = (desired == expected); - if (success) - platform_fence_after(success_order); - else - platform_fence_after(failure_order); - expected = desired; - return success; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return compare_exchange_strong(expected, desired, success_order, failure_order); - } - - value_type - fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type tmp = load(memory_order_relaxed); - do {} while(!compare_exchange_weak(tmp, tmp + v, order, memory_order_relaxed)); - return tmp; - } - - value_type - fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type tmp = load(memory_order_relaxed); - do {} while(!compare_exchange_weak(tmp, tmp - v, order, memory_order_relaxed)); - return tmp; - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - BOOST_ATOMIC_DECLARE_POINTER_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - value_type v_; -}; - -/* generic types */ - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef uint32_t storage_type; - -protected: - typedef value_type const& value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : v_(0) - { - memcpy(&v_, &v, sizeof(value_type)); - } - - void - store(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - storage_type tmp = 0; - memcpy(&tmp, &v, sizeof(value_type)); - platform_fence_before(order); - const_cast(v_) = tmp; - platform_fence_after_store(order); - } - - value_type - load(memory_order order = memory_order_seq_cst)const volatile BOOST_NOEXCEPT - { - storage_type tmp = const_cast(v_); - platform_fence_after_load(order); - value_type v; - memcpy(&v, &tmp, sizeof(value_type)); - return v; - } - - value_type - exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type tmp = load(memory_order_relaxed); - do {} while(!compare_exchange_weak(tmp, v, order, memory_order_relaxed)); - return tmp; - } - - bool - compare_exchange_strong( - value_type & expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - storage_type expected_s = 0, desired_s = 0; - memcpy(&expected_s, &expected, sizeof(value_type)); - memcpy(&desired_s, &desired, sizeof(value_type)); - platform_fence_before(success_order); - __asm__ ( - "cas [%1], %2, %0" - : "+r" (desired_s) - : "r" (&v_), "r" (expected_s) - : "memory" - ); - bool success = (desired_s == expected_s); - if (success) - platform_fence_after(success_order); - else - platform_fence_after(failure_order); - memcpy(&expected, &desired_s, sizeof(value_type)); - return success; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return compare_exchange_strong(expected, desired, success_order, failure_order); - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - BOOST_ATOMIC_DECLARE_BASE_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - storage_type v_; -}; - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef uint32_t storage_type; - -protected: - typedef value_type const& value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : v_(0) - { - memcpy(&v_, &v, sizeof(value_type)); - } - - void - store(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - storage_type tmp = 0; - memcpy(&tmp, &v, sizeof(value_type)); - platform_fence_before(order); - const_cast(v_) = tmp; - platform_fence_after_store(order); - } - - value_type - load(memory_order order = memory_order_seq_cst)const volatile BOOST_NOEXCEPT - { - storage_type tmp = const_cast(v_); - platform_fence_after_load(order); - value_type v; - memcpy(&v, &tmp, sizeof(value_type)); - return v; - } - - value_type - exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type tmp = load(memory_order_relaxed); - do {} while(!compare_exchange_weak(tmp, v, order, memory_order_relaxed)); - return tmp; - } - - bool - compare_exchange_strong( - value_type & expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - storage_type expected_s = 0, desired_s = 0; - memcpy(&expected_s, &expected, sizeof(value_type)); - memcpy(&desired_s, &desired, sizeof(value_type)); - platform_fence_before(success_order); - __asm__ ( - "cas [%1], %2, %0" - : "+r" (desired_s) - : "r" (&v_), "r" (expected_s) - : "memory" - ); - bool success = (desired_s == expected_s); - if (success) - platform_fence_after(success_order); - else - platform_fence_after(failure_order); - memcpy(&expected, &desired_s, sizeof(value_type)); - return success; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return compare_exchange_strong(expected, desired, success_order, failure_order); - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - BOOST_ATOMIC_DECLARE_BASE_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - storage_type v_; -}; - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef uint32_t storage_type; - -protected: - typedef value_type const& value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : v_(0) - { - memcpy(&v_, &v, sizeof(value_type)); - } - - void - store(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - storage_type tmp = 0; - memcpy(&tmp, &v, sizeof(value_type)); - platform_fence_before(order); - const_cast(v_) = tmp; - platform_fence_after_store(order); - } - - value_type - load(memory_order order = memory_order_seq_cst)const volatile BOOST_NOEXCEPT - { - storage_type tmp = const_cast(v_); - platform_fence_after_load(order); - value_type v; - memcpy(&v, &tmp, sizeof(value_type)); - return v; - } - - value_type - exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type tmp = load(memory_order_relaxed); - do {} while(!compare_exchange_weak(tmp, v, order, memory_order_relaxed)); - return tmp; - } - - bool - compare_exchange_strong( - value_type & expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - storage_type expected_s = 0, desired_s = 0; - memcpy(&expected_s, &expected, sizeof(value_type)); - memcpy(&desired_s, &desired, sizeof(value_type)); - platform_fence_before(success_order); - __asm__ ( - "cas [%1], %2, %0" - : "+r" (desired_s) - : "r" (&v_), "r" (expected_s) - : "memory" - ); - bool success = (desired_s == expected_s); - if (success) - platform_fence_after(success_order); - else - platform_fence_after(failure_order); - memcpy(&expected, &desired_s, sizeof(value_type)); - return success; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return compare_exchange_strong(expected, desired, success_order, failure_order); - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - BOOST_ATOMIC_DECLARE_BASE_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - storage_type v_; -}; - -#endif /* !defined(BOOST_ATOMIC_FORCE_FALLBACK) */ - -} -} -} - -#endif diff --git a/include/boost/atomic/detail/gcc-x86.hpp b/include/boost/atomic/detail/gcc-x86.hpp deleted file mode 100644 index bb198b0..0000000 --- a/include/boost/atomic/detail/gcc-x86.hpp +++ /dev/null @@ -1,2021 +0,0 @@ -#ifndef BOOST_ATOMIC_DETAIL_GCC_X86_HPP -#define BOOST_ATOMIC_DETAIL_GCC_X86_HPP - -// Copyright (c) 2009 Helge Bahmann -// Copyright (c) 2012 Tim Blechmann -// -// Distributed under the Boost Software License, Version 1.0. -// See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -#include -#include -#include -#include - -#ifdef BOOST_HAS_PRAGMA_ONCE -#pragma once -#endif - -namespace boost { -namespace atomics { -namespace detail { - -#if defined(__x86_64__) || defined(__SSE2__) -# define BOOST_ATOMIC_X86_FENCE_INSTR "mfence\n" -#else -# define BOOST_ATOMIC_X86_FENCE_INSTR "lock ; addl $0, (%%esp)\n" -#endif - -#define BOOST_ATOMIC_X86_PAUSE() __asm__ __volatile__ ("pause\n") - -#if defined(__i386__) &&\ - (\ - defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) ||\ - defined(__i586__) || defined(__i686__) || defined(__pentium4__) || defined(__nocona__) || defined(__core2__) || defined(__corei7__) ||\ - defined(__k6__) || defined(__athlon__) || defined(__k8__) || defined(__amdfam10__) || defined(__bdver1__) || defined(__bdver2__) || defined(__bdver3__) || defined(__btver1__) || defined(__btver2__)\ - ) -#define BOOST_ATOMIC_X86_HAS_CMPXCHG8B 1 -#endif - -#if defined(__x86_64__) && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) -#define BOOST_ATOMIC_X86_HAS_CMPXCHG16B 1 -#endif - -inline void -platform_fence_before(memory_order order) -{ - switch(order) - { - case memory_order_relaxed: - case memory_order_acquire: - case memory_order_consume: - break; - case memory_order_release: - case memory_order_acq_rel: - __asm__ __volatile__ ("" ::: "memory"); - /* release */ - break; - case memory_order_seq_cst: - __asm__ __volatile__ ("" ::: "memory"); - /* seq */ - break; - default:; - } -} - -inline void -platform_fence_after(memory_order order) -{ - switch(order) - { - case memory_order_relaxed: - case memory_order_release: - break; - case memory_order_acquire: - case memory_order_acq_rel: - __asm__ __volatile__ ("" ::: "memory"); - /* acquire */ - break; - case memory_order_consume: - /* consume */ - break; - case memory_order_seq_cst: - __asm__ __volatile__ ("" ::: "memory"); - /* seq */ - break; - default:; - } -} - -inline void -platform_fence_after_load(memory_order order) -{ - switch(order) - { - case memory_order_relaxed: - case memory_order_release: - break; - case memory_order_acquire: - case memory_order_acq_rel: - __asm__ __volatile__ ("" ::: "memory"); - break; - case memory_order_consume: - break; - case memory_order_seq_cst: - __asm__ __volatile__ ("" ::: "memory"); - break; - default:; - } -} - -inline void -platform_fence_before_store(memory_order order) -{ - switch(order) - { - case memory_order_relaxed: - case memory_order_acquire: - case memory_order_consume: - break; - case memory_order_release: - case memory_order_acq_rel: - __asm__ __volatile__ ("" ::: "memory"); - /* release */ - break; - case memory_order_seq_cst: - __asm__ __volatile__ ("" ::: "memory"); - /* seq */ - break; - default:; - } -} - -inline void -platform_fence_after_store(memory_order order) -{ - switch(order) - { - case memory_order_relaxed: - case memory_order_release: - break; - case memory_order_acquire: - case memory_order_acq_rel: - __asm__ __volatile__ ("" ::: "memory"); - /* acquire */ - break; - case memory_order_consume: - /* consume */ - break; - case memory_order_seq_cst: - __asm__ __volatile__ ("" ::: "memory"); - /* seq */ - break; - default:; - } -} - -} -} - -class atomic_flag -{ -private: - uint32_t v_; - -public: - BOOST_CONSTEXPR atomic_flag(void) BOOST_NOEXCEPT : v_(0) {} - - bool - test_and_set(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - uint32_t v = 1; - atomics::detail::platform_fence_before(order); - __asm__ __volatile__ - ( - "xchgl %0, %1" - : "+r" (v), "+m" (v_) - ); - atomics::detail::platform_fence_after(order); - return v; - } - - void - clear(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - if (order == memory_order_seq_cst) { - uint32_t v = 0; - __asm__ __volatile__ - ( - "xchgl %0, %1" - : "+r" (v), "+m" (v_) - ); - } else { - atomics::detail::platform_fence_before(order); - v_ = 0; - } - } - - BOOST_DELETED_FUNCTION(atomic_flag(atomic_flag const&)) - BOOST_DELETED_FUNCTION(atomic_flag& operator= (atomic_flag const&)) -}; - -} /* namespace boost */ - -#define BOOST_ATOMIC_FLAG_LOCK_FREE 2 - -#include - -#if !defined(BOOST_ATOMIC_FORCE_FALLBACK) - -#define BOOST_ATOMIC_CHAR_LOCK_FREE 2 -#define BOOST_ATOMIC_CHAR16_T_LOCK_FREE 2 -#define BOOST_ATOMIC_CHAR32_T_LOCK_FREE 2 -#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 2 -#define BOOST_ATOMIC_SHORT_LOCK_FREE 2 -#define BOOST_ATOMIC_INT_LOCK_FREE 2 -#define BOOST_ATOMIC_LONG_LOCK_FREE 2 - -#if defined(__x86_64__) || defined(BOOST_ATOMIC_X86_HAS_CMPXCHG8B) -#define BOOST_ATOMIC_LLONG_LOCK_FREE 2 -#endif - -#if defined(BOOST_ATOMIC_X86_HAS_CMPXCHG16B) && (defined(BOOST_HAS_INT128) || !defined(BOOST_NO_ALIGNMENT)) -#define BOOST_ATOMIC_INT128_LOCK_FREE 2 -#endif - -#define BOOST_ATOMIC_POINTER_LOCK_FREE 2 -#define BOOST_ATOMIC_BOOL_LOCK_FREE 2 - -namespace boost { - -#define BOOST_ATOMIC_THREAD_FENCE 2 -inline void -atomic_thread_fence(memory_order order) -{ - switch(order) - { - case memory_order_relaxed: - break; - case memory_order_release: - __asm__ __volatile__ ("" ::: "memory"); - break; - case memory_order_acquire: - __asm__ __volatile__ ("" ::: "memory"); - break; - case memory_order_acq_rel: - __asm__ __volatile__ ("" ::: "memory"); - break; - case memory_order_consume: - break; - case memory_order_seq_cst: - __asm__ __volatile__ (BOOST_ATOMIC_X86_FENCE_INSTR ::: "memory"); - break; - default:; - } -} - -#define BOOST_ATOMIC_SIGNAL_FENCE 2 -inline void -atomic_signal_fence(memory_order) -{ - __asm__ __volatile__ ("" ::: "memory"); -} - -namespace atomics { -namespace detail { - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef T difference_type; - -protected: - typedef value_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {} - - void - store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - if (order != memory_order_seq_cst) { - platform_fence_before(order); - const_cast(v_) = v; - } else { - exchange(v, order); - } - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - value_type v = const_cast(v_); - platform_fence_after_load(order); - return v; - } - - value_type - fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - platform_fence_before(order); - __asm__ __volatile__ - ( - "lock ; xaddb %0, %1" - : "+q" (v), "+m" (v_) - : - : "cc" - ); - platform_fence_after(order); - return v; - } - - value_type - fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - return fetch_add(-v, order); - } - - value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - platform_fence_before(order); - __asm__ __volatile__ - ( - "xchgb %0, %1" - : "+q" (v), "+m" (v_) - ); - platform_fence_after(order); - return v; - } - - bool - compare_exchange_strong( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - value_type previous = expected; - platform_fence_before(success_order); - bool success; - __asm__ __volatile__ - ( - "lock ; cmpxchgb %3, %1\n\t" - "sete %2" - : "+a" (previous), "+m" (v_), "=q" (success) - : "q" (desired) - : "cc" - ); - if (success) - platform_fence_after(success_order); - else - platform_fence_after(failure_order); - expected = previous; - return success; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return compare_exchange_strong(expected, desired, success_order, failure_order); - } - - value_type - fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type tmp = load(memory_order_relaxed); - while (!compare_exchange_weak(tmp, tmp & v, order, memory_order_relaxed)) - { - BOOST_ATOMIC_X86_PAUSE(); - } - return tmp; - } - - value_type - fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type tmp = load(memory_order_relaxed); - while (!compare_exchange_weak(tmp, tmp | v, order, memory_order_relaxed)) - { - BOOST_ATOMIC_X86_PAUSE(); - } - return tmp; - } - - value_type - fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type tmp = load(memory_order_relaxed); - while (!compare_exchange_weak(tmp, tmp ^ v, order, memory_order_relaxed)) - { - BOOST_ATOMIC_X86_PAUSE(); - } - return tmp; - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - value_type v_; -}; - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef T difference_type; - -protected: - typedef value_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {} - - void - store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - if (order != memory_order_seq_cst) { - platform_fence_before(order); - const_cast(v_) = v; - } else { - exchange(v, order); - } - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - value_type v = const_cast(v_); - platform_fence_after_load(order); - return v; - } - - value_type - fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - platform_fence_before(order); - __asm__ __volatile__ - ( - "lock ; xaddw %0, %1" - : "+q" (v), "+m" (v_) - : - : "cc" - ); - platform_fence_after(order); - return v; - } - - value_type - fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - return fetch_add(-v, order); - } - - value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - platform_fence_before(order); - __asm__ __volatile__ - ( - "xchgw %0, %1" - : "+q" (v), "+m" (v_) - ); - platform_fence_after(order); - return v; - } - - bool - compare_exchange_strong( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - value_type previous = expected; - platform_fence_before(success_order); - bool success; - __asm__ __volatile__ - ( - "lock ; cmpxchgw %3, %1\n\t" - "sete %2" - : "+a" (previous), "+m" (v_), "=q" (success) - : "q" (desired) - : "cc" - ); - if (success) - platform_fence_after(success_order); - else - platform_fence_after(failure_order); - expected = previous; - return success; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return compare_exchange_strong(expected, desired, success_order, failure_order); - } - - value_type - fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type tmp = load(memory_order_relaxed); - while (!compare_exchange_weak(tmp, tmp & v, order, memory_order_relaxed)) - { - BOOST_ATOMIC_X86_PAUSE(); - } - return tmp; - } - - value_type - fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type tmp = load(memory_order_relaxed); - while (!compare_exchange_weak(tmp, tmp | v, order, memory_order_relaxed)) - { - BOOST_ATOMIC_X86_PAUSE(); - } - return tmp; - } - - value_type - fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type tmp = load(memory_order_relaxed); - while (!compare_exchange_weak(tmp, tmp ^ v, order, memory_order_relaxed)) - { - BOOST_ATOMIC_X86_PAUSE(); - } - return tmp; - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - value_type v_; -}; - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef T difference_type; - -protected: - typedef value_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {} - - void - store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - if (order != memory_order_seq_cst) { - platform_fence_before(order); - const_cast(v_) = v; - } else { - exchange(v, order); - } - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - value_type v = const_cast(v_); - platform_fence_after_load(order); - return v; - } - - value_type - fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - platform_fence_before(order); - __asm__ __volatile__ - ( - "lock ; xaddl %0, %1" - : "+r" (v), "+m" (v_) - : - : "cc" - ); - platform_fence_after(order); - return v; - } - - value_type - fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - return fetch_add(-v, order); - } - - value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - platform_fence_before(order); - __asm__ __volatile__ - ( - "xchgl %0, %1" - : "+r" (v), "+m" (v_) - ); - platform_fence_after(order); - return v; - } - - bool - compare_exchange_strong( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - value_type previous = expected; - platform_fence_before(success_order); - bool success; - __asm__ __volatile__ - ( - "lock ; cmpxchgl %3, %1\n\t" - "sete %2" - : "+a,a" (previous), "+m,m" (v_), "=q,m" (success) - : "r,r" (desired) - : "cc" - ); - if (success) - platform_fence_after(success_order); - else - platform_fence_after(failure_order); - expected = previous; - return success; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return compare_exchange_strong(expected, desired, success_order, failure_order); - } - - value_type - fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type tmp = load(memory_order_relaxed); - while (!compare_exchange_weak(tmp, tmp & v, order, memory_order_relaxed)) - { - BOOST_ATOMIC_X86_PAUSE(); - } - return tmp; - } - - value_type - fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type tmp = load(memory_order_relaxed); - while (!compare_exchange_weak(tmp, tmp | v, order, memory_order_relaxed)) - { - BOOST_ATOMIC_X86_PAUSE(); - } - return tmp; - } - - value_type - fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type tmp = load(memory_order_relaxed); - while (!compare_exchange_weak(tmp, tmp ^ v, order, memory_order_relaxed)) - { - BOOST_ATOMIC_X86_PAUSE(); - } - return tmp; - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - value_type v_; -}; - -#if defined(__x86_64__) -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef T difference_type; - -protected: - typedef value_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {} - - void - store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - if (order != memory_order_seq_cst) { - platform_fence_before(order); - const_cast(v_) = v; - } else { - exchange(v, order); - } - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - value_type v = const_cast(v_); - platform_fence_after_load(order); - return v; - } - - value_type - fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - platform_fence_before(order); - __asm__ __volatile__ - ( - "lock ; xaddq %0, %1" - : "+r" (v), "+m" (v_) - : - : "cc" - ); - platform_fence_after(order); - return v; - } - - value_type - fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - return fetch_add(-v, order); - } - - value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - platform_fence_before(order); - __asm__ __volatile__ - ( - "xchgq %0, %1" - : "+r" (v), "+m" (v_) - ); - platform_fence_after(order); - return v; - } - - bool - compare_exchange_strong( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - value_type previous = expected; - platform_fence_before(success_order); - bool success; - __asm__ __volatile__ - ( - "lock ; cmpxchgq %3, %1\n\t" - "sete %2" - : "+a,a" (previous), "+m,m" (v_), "=q,m" (success) - : "r,r" (desired) - : "cc" - ); - if (success) - platform_fence_after(success_order); - else - platform_fence_after(failure_order); - expected = previous; - return success; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return compare_exchange_strong(expected, desired, success_order, failure_order); - } - - value_type - fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type tmp = load(memory_order_relaxed); - while (!compare_exchange_weak(tmp, tmp & v, order, memory_order_relaxed)) - { - BOOST_ATOMIC_X86_PAUSE(); - } - return tmp; - } - - value_type - fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type tmp = load(memory_order_relaxed); - while (!compare_exchange_weak(tmp, tmp | v, order, memory_order_relaxed)) - { - BOOST_ATOMIC_X86_PAUSE(); - } - return tmp; - } - - value_type - fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - value_type tmp = load(memory_order_relaxed); - while (!compare_exchange_weak(tmp, tmp ^ v, order, memory_order_relaxed)) - { - BOOST_ATOMIC_X86_PAUSE(); - } - return tmp; - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - value_type v_; -}; - -#endif - -/* pointers */ - -// NOTE: x32 target is still regarded to as x86_64 and can only be detected by the size of pointers -#if !defined(__x86_64__) || (defined(__SIZEOF_POINTER__) && __SIZEOF_POINTER__ == 4) - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef std::ptrdiff_t difference_type; - typedef void * value_type; - -protected: - typedef value_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {} - - void - store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - if (order != memory_order_seq_cst) { - platform_fence_before(order); - const_cast(v_) = v; - } else { - exchange(v, order); - } - } - - value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - value_type v = const_cast(v_); - platform_fence_after_load(order); - return v; - } - - value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - platform_fence_before(order); - __asm__ __volatile__ - ( - "xchgl %0, %1" - : "+r" (v), "+m" (v_) - ); - platform_fence_after(order); - return v; - } - - bool compare_exchange_strong(value_type & expected, value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - value_type previous = expected; - platform_fence_before(success_order); - bool success; - __asm__ __volatile__ - ( - "lock ; cmpxchgl %3, %1\n\t" - "sete %2" - : "+a,a" (previous), "+m,m" (v_), "=q,m" (success) - : "r,r" (desired) - : "cc" - ); - if (success) - platform_fence_after(success_order); - else - platform_fence_after(failure_order); - expected = previous; - return success; - } - - bool compare_exchange_weak(value_type & expected, value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return compare_exchange_strong(expected, desired, success_order, failure_order); - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - value_type - fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - platform_fence_before(order); - __asm__ __volatile__ - ( - "lock ; xaddl %0, %1" - : "+r" (v), "+m" (v_) - : - : "cc" - ); - platform_fence_after(order); - return reinterpret_cast(v); - } - - value_type - fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - return fetch_add(-v, order); - } - - BOOST_ATOMIC_DECLARE_VOID_POINTER_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - value_type v_; -}; - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T * value_type; - typedef std::ptrdiff_t difference_type; - -protected: - typedef value_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {} - - void - store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - if (order != memory_order_seq_cst) { - platform_fence_before(order); - const_cast(v_) = v; - } else { - exchange(v, order); - } - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - value_type v = const_cast(v_); - platform_fence_after_load(order); - return v; - } - - value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - platform_fence_before(order); - __asm__ __volatile__ - ( - "xchgl %0, %1" - : "+r" (v), "+m" (v_) - ); - platform_fence_after(order); - return v; - } - - bool - compare_exchange_strong( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - value_type previous = expected; - platform_fence_before(success_order); - bool success; - __asm__ __volatile__ - ( - "lock ; cmpxchgl %3, %1\n\t" - "sete %2" - : "+a,a" (previous), "+m,m" (v_), "=q,m" (success) - : "r,r" (desired) - : "cc" - ); - if (success) - platform_fence_after(success_order); - else - platform_fence_after(failure_order); - expected = previous; - return success; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return compare_exchange_strong(expected, desired, success_order, failure_order); - } - - value_type - fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - v = v * sizeof(*v_); - platform_fence_before(order); - __asm__ __volatile__ - ( - "lock ; xaddl %0, %1" - : "+r" (v), "+m" (v_) - : - : "cc" - ); - platform_fence_after(order); - return reinterpret_cast(v); - } - - value_type - fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - return fetch_add(-v, order); - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - BOOST_ATOMIC_DECLARE_POINTER_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - value_type v_; -}; - -#else - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef std::ptrdiff_t difference_type; - typedef void * value_type; - -protected: - typedef value_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {} - - void - store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - if (order != memory_order_seq_cst) { - platform_fence_before(order); - const_cast(v_) = v; - } else { - exchange(v, order); - } - } - - value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - value_type v = const_cast(v_); - platform_fence_after_load(order); - return v; - } - - value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - platform_fence_before(order); - __asm__ __volatile__ - ( - "xchgq %0, %1" - : "+r" (v), "+m" (v_) - ); - platform_fence_after(order); - return v; - } - - bool compare_exchange_strong(value_type & expected, value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - value_type previous = expected; - platform_fence_before(success_order); - bool success; - __asm__ __volatile__ - ( - "lock ; cmpxchgq %3, %1\n\t" - "sete %2" - : "+a,a" (previous), "+m,m" (v_), "=q,m" (success) - : "r,r" (desired) - : "cc" - ); - if (success) - platform_fence_after(success_order); - else - platform_fence_after(failure_order); - expected = previous; - return success; - } - - bool compare_exchange_weak(value_type & expected, value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return compare_exchange_strong(expected, desired, success_order, failure_order); - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - value_type - fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - platform_fence_before(order); - __asm__ __volatile__ - ( - "lock ; xaddq %0, %1" - : "+r" (v), "+m" (v_) - : - : "cc" - ); - platform_fence_after(order); - return reinterpret_cast(v); - } - - value_type - fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - return fetch_add(-v, order); - } - - BOOST_ATOMIC_DECLARE_VOID_POINTER_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - value_type v_; -}; - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T * value_type; - typedef std::ptrdiff_t difference_type; - -protected: - typedef value_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {} - - void - store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - if (order != memory_order_seq_cst) { - platform_fence_before(order); - const_cast(v_) = v; - } else { - exchange(v, order); - } - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - value_type v = const_cast(v_); - platform_fence_after_load(order); - return v; - } - - value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - platform_fence_before(order); - __asm__ __volatile__ - ( - "xchgq %0, %1" - : "+r" (v), "+m" (v_) - ); - platform_fence_after(order); - return v; - } - - bool - compare_exchange_strong( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - value_type previous = expected; - platform_fence_before(success_order); - bool success; - __asm__ __volatile__ - ( - "lock ; cmpxchgq %3, %1\n\t" - "sete %2" - : "+a,a" (previous), "+m,m" (v_), "=q,m" (success) - : "r,r" (desired) - : "cc" - ); - if (success) - platform_fence_after(success_order); - else - platform_fence_after(failure_order); - expected = previous; - return success; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return compare_exchange_strong(expected, desired, success_order, failure_order); - } - - value_type - fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - v = v * sizeof(*v_); - platform_fence_before(order); - __asm__ __volatile__ - ( - "lock ; xaddq %0, %1" - : "+r" (v), "+m" (v_) - : - : "cc" - ); - platform_fence_after(order); - return reinterpret_cast(v); - } - - value_type - fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - return fetch_add(-v, order); - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - BOOST_ATOMIC_DECLARE_POINTER_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - value_type v_; -}; - -#endif - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef uint8_t storage_type; - -protected: - typedef value_type const& value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : - v_(reinterpret_cast(v)) - { - } - - void - store(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - if (order != memory_order_seq_cst) { - storage_type tmp; - memcpy(&tmp, &v, sizeof(value_type)); - platform_fence_before(order); - const_cast(v_) = tmp; - } else { - exchange(v, order); - } - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - storage_type tmp = const_cast(v_); - platform_fence_after_load(order); - value_type v; - memcpy(&v, &tmp, sizeof(value_type)); - return v; - } - - value_type - exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - storage_type tmp; - memcpy(&tmp, &v, sizeof(value_type)); - platform_fence_before(order); - __asm__ __volatile__ - ( - "xchgb %0, %1" - : "+q" (tmp), "+m" (v_) - ); - platform_fence_after(order); - value_type res; - memcpy(&res, &tmp, sizeof(value_type)); - return res; - } - - bool - compare_exchange_strong( - value_type & expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - storage_type expected_s, desired_s; - memcpy(&expected_s, &expected, sizeof(value_type)); - memcpy(&desired_s, &desired, sizeof(value_type)); - storage_type previous_s = expected_s; - platform_fence_before(success_order); - bool success; - __asm__ __volatile__ - ( - "lock ; cmpxchgb %3, %1\n\t" - "sete %2" - : "+a" (previous_s), "+m" (v_), "=q" (success) - : "q" (desired_s) - : "cc" - ); - if (success) - platform_fence_after(success_order); - else - platform_fence_after(failure_order); - memcpy(&expected, &previous_s, sizeof(value_type)); - return success; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return compare_exchange_strong(expected, desired, success_order, failure_order); - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - BOOST_ATOMIC_DECLARE_BASE_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - storage_type v_; -}; - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef uint16_t storage_type; - -protected: - typedef value_type const& value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : - v_(reinterpret_cast(v)) - { - } - - void - store(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - if (order != memory_order_seq_cst) { - storage_type tmp; - memcpy(&tmp, &v, sizeof(value_type)); - platform_fence_before(order); - const_cast(v_) = tmp; - } else { - exchange(v, order); - } - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - storage_type tmp = const_cast(v_); - platform_fence_after_load(order); - value_type v; - memcpy(&v, &tmp, sizeof(value_type)); - return v; - } - - value_type - exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - storage_type tmp; - memcpy(&tmp, &v, sizeof(value_type)); - platform_fence_before(order); - __asm__ __volatile__ - ( - "xchgw %0, %1" - : "+q" (tmp), "+m" (v_) - ); - platform_fence_after(order); - value_type res; - memcpy(&res, &tmp, sizeof(value_type)); - return res; - } - - bool - compare_exchange_strong( - value_type & expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - storage_type expected_s, desired_s; - memcpy(&expected_s, &expected, sizeof(value_type)); - memcpy(&desired_s, &desired, sizeof(value_type)); - storage_type previous_s = expected_s; - platform_fence_before(success_order); - bool success; - __asm__ __volatile__ - ( - "lock ; cmpxchgw %3, %1\n\t" - "sete %2" - : "+a" (previous_s), "+m" (v_), "=q" (success) - : "q" (desired_s) - : "cc" - ); - if (success) - platform_fence_after(success_order); - else - platform_fence_after(failure_order); - memcpy(&expected, &previous_s, sizeof(value_type)); - return success; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return compare_exchange_strong(expected, desired, success_order, failure_order); - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - BOOST_ATOMIC_DECLARE_BASE_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - storage_type v_; -}; - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef uint32_t storage_type; - -protected: - typedef value_type const& value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : v_(0) - { - memcpy(&v_, &v, sizeof(value_type)); - } - - void - store(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - if (order != memory_order_seq_cst) { - storage_type tmp = 0; - memcpy(&tmp, &v, sizeof(value_type)); - platform_fence_before(order); - const_cast(v_) = tmp; - } else { - exchange(v, order); - } - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - storage_type tmp = const_cast(v_); - platform_fence_after_load(order); - value_type v; - memcpy(&v, &tmp, sizeof(value_type)); - return v; - } - - value_type - exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - storage_type tmp = 0; - memcpy(&tmp, &v, sizeof(value_type)); - platform_fence_before(order); - __asm__ __volatile__ - ( - "xchgl %0, %1" - : "+q" (tmp), "+m" (v_) - ); - platform_fence_after(order); - value_type res; - memcpy(&res, &tmp, sizeof(value_type)); - return res; - } - - bool - compare_exchange_strong( - value_type & expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - storage_type expected_s = 0, desired_s = 0; - memcpy(&expected_s, &expected, sizeof(value_type)); - memcpy(&desired_s, &desired, sizeof(value_type)); - storage_type previous_s = expected_s; - platform_fence_before(success_order); - bool success; - __asm__ __volatile__ - ( - "lock ; cmpxchgl %3, %1\n\t" - "sete %2" - : "+a,a" (previous_s), "+m,m" (v_), "=q,m" (success) - : "q,q" (desired_s) - : "cc" - ); - if (success) - platform_fence_after(success_order); - else - platform_fence_after(failure_order); - memcpy(&expected, &previous_s, sizeof(value_type)); - return success; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return compare_exchange_strong(expected, desired, success_order, failure_order); - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - BOOST_ATOMIC_DECLARE_BASE_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - storage_type v_; -}; - -#if defined(__x86_64__) -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef uint64_t storage_type; - -protected: - typedef value_type const& value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : v_(0) - { - memcpy(&v_, &v, sizeof(value_type)); - } - - void - store(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - if (order != memory_order_seq_cst) { - storage_type tmp = 0; - memcpy(&tmp, &v, sizeof(value_type)); - platform_fence_before(order); - const_cast(v_) = tmp; - } else { - exchange(v, order); - } - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - storage_type tmp = const_cast(v_); - platform_fence_after_load(order); - value_type v; - memcpy(&v, &tmp, sizeof(value_type)); - return v; - } - - value_type - exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - storage_type tmp = 0; - memcpy(&tmp, &v, sizeof(value_type)); - platform_fence_before(order); - __asm__ __volatile__ - ( - "xchgq %0, %1" - : "+q" (tmp), "+m" (v_) - ); - platform_fence_after(order); - value_type res; - memcpy(&res, &tmp, sizeof(value_type)); - return res; - } - - bool - compare_exchange_strong( - value_type & expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - storage_type expected_s = 0, desired_s = 0; - memcpy(&expected_s, &expected, sizeof(value_type)); - memcpy(&desired_s, &desired, sizeof(value_type)); - storage_type previous_s = expected_s; - platform_fence_before(success_order); - bool success; - __asm__ __volatile__ - ( - "lock ; cmpxchgq %3, %1\n\t" - "sete %2" - : "+a,a" (previous_s), "+m,m" (v_), "=q,m" (success) - : "q,q" (desired_s) - : "cc" - ); - if (success) - platform_fence_after(success_order); - else - platform_fence_after(failure_order); - memcpy(&expected, &previous_s, sizeof(value_type)); - return success; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return compare_exchange_strong(expected, desired, success_order, failure_order); - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - BOOST_ATOMIC_DECLARE_BASE_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - storage_type v_; -}; -#endif - -#if !defined(__x86_64__) && defined(BOOST_ATOMIC_X86_HAS_CMPXCHG8B) - -template -inline bool -platform_cmpxchg64_strong(T & expected, T desired, volatile T * ptr) BOOST_NOEXCEPT -{ -#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 - const T oldval = __sync_val_compare_and_swap(ptr, expected, desired); - const bool result = (oldval == expected); - expected = oldval; - return result; -#else - uint32_t scratch; - /* Make sure ebx is saved and restored properly in case - this object is compiled as "position independent". Since - programmers on x86 tend to forget specifying -DPIC or - similar, always assume PIC. - - To make this work uniformly even in the non-PIC case, - setup register constraints such that ebx can not be - used by accident e.g. as base address for the variable - to be modified. Accessing "scratch" should always be okay, - as it can only be placed on the stack (and therefore - accessed through ebp or esp only). - - In theory, could push/pop ebx onto/off the stack, but movs - to a prepared stack slot turn out to be faster. */ - bool success; - __asm__ __volatile__ - ( - "movl %%ebx, %[scratch]\n\t" - "movl %[desired_lo], %%ebx\n\t" - "lock; cmpxchg8b %[dest]\n\t" - "movl %[scratch], %%ebx\n\t" - "sete %[success]" - : "+A,A,A,A,A,A" (expected), [dest] "+m,m,m,m,m,m" (*ptr), [scratch] "=m,m,m,m,m,m" (scratch), [success] "=q,m,q,m,q,m" (success) - : [desired_lo] "S,S,D,D,m,m" ((uint32_t)desired), "c,c,c,c,c,c" ((uint32_t)(desired >> 32)) - : "memory", "cc" - ); - return success; -#endif -} - -// Intel 64 and IA-32 Architectures Software Developer's Manual, Volume 3A, 8.1.1. Guaranteed Atomic Operations: -// -// The Pentium processor (and newer processors since) guarantees that the following additional memory operations will always be carried out atomically: -// * Reading or writing a quadword aligned on a 64-bit boundary -// -// Luckily, the memory is almost always 8-byte aligned in our case because atomic<> uses 64 bit native types for storage and dynamic memory allocations -// have at least 8 byte alignment. The only unfortunate case is when atomic is placeod on the stack and it is not 8-byte aligned (like on 32 bit Windows). - -template -inline void -platform_store64(T value, volatile T * ptr) BOOST_NOEXCEPT -{ - if (((uint32_t)ptr & 0x00000007) == 0) - { -#if defined(__SSE2__) - __asm__ __volatile__ - ( - "movq %1, %%xmm4\n\t" - "movq %%xmm4, %0\n\t" - : "=m" (*ptr) - : "m" (value) - : "memory", "xmm4" - ); -#else - __asm__ __volatile__ - ( - "fildll %1\n\t" - "fistpll %0\n\t" - : "=m" (*ptr) - : "m" (value) - : "memory" - ); -#endif - } - else - { - uint32_t scratch; - __asm__ __volatile__ - ( - "movl %%ebx, %[scratch]\n\t" - "movl %[value_lo], %%ebx\n\t" - "movl 0(%[dest]), %%eax\n\t" - "movl 4(%[dest]), %%edx\n\t" - ".align 16\n\t" - "1: lock; cmpxchg8b 0(%[dest])\n\t" - "jne 1b\n\t" - "movl %[scratch], %%ebx" - : [scratch] "=m,m" (scratch) - : [value_lo] "a,a" ((uint32_t)value), "c,c" ((uint32_t)(value >> 32)), [dest] "D,S" (ptr) - : "memory", "cc", "edx" - ); - } -} - -template -inline T -platform_load64(const volatile T * ptr) BOOST_NOEXCEPT -{ - T value; - - if (((uint32_t)ptr & 0x00000007) == 0) - { -#if defined(__SSE2__) - __asm__ __volatile__ - ( - "movq %1, %%xmm4\n\t" - "movq %%xmm4, %0\n\t" - : "=m" (value) - : "m" (*ptr) - : "memory", "xmm4" - ); -#else - __asm__ __volatile__ - ( - "fildll %1\n\t" - "fistpll %0\n\t" - : "=m" (value) - : "m" (*ptr) - : "memory" - ); -#endif - } - else - { - // We don't care for comparison result here; the previous value will be stored into value anyway. - // Also we don't care for ebx and ecx values, they just have to be equal to eax and edx before cmpxchg8b. - __asm__ __volatile__ - ( - "movl %%ebx, %%eax\n\t" - "movl %%ecx, %%edx\n\t" - "lock; cmpxchg8b %[dest]" - : "=&A" (value) - : [dest] "m" (*ptr) - : "cc" - ); - } - - return value; -} - -#endif - -#if defined(BOOST_ATOMIC_INT128_LOCK_FREE) && BOOST_ATOMIC_INT128_LOCK_FREE > 0 - -template -inline bool -platform_cmpxchg128_strong(T& expected, T desired, volatile T* ptr) BOOST_NOEXCEPT -{ - uint64_t const* p_desired = (uint64_t const*)&desired; - bool success; - __asm__ __volatile__ - ( - "lock; cmpxchg16b %[dest]\n\t" - "sete %[success]" - : "+A,A" (expected), [dest] "+m,m" (*ptr), [success] "=q,m" (success) - : "b,b" (p_desired[0]), "c,c" (p_desired[1]) - : "memory", "cc" - ); - return success; -} - -template -inline void -platform_store128(T value, volatile T* ptr) BOOST_NOEXCEPT -{ - uint64_t const* p_value = (uint64_t const*)&value; - __asm__ __volatile__ - ( - "movq 0(%[dest]), %%rax\n\t" - "movq 8(%[dest]), %%rdx\n\t" - ".align 16\n\t" - "1: lock; cmpxchg16b 0(%[dest])\n\t" - "jne 1b" - : - : "b" (p_value[0]), "c" (p_value[1]), [dest] "r" (ptr) - : "memory", "cc", "rax", "rdx" - ); -} - -template -inline T -platform_load128(const volatile T* ptr) BOOST_NOEXCEPT -{ - T value; - - // We don't care for comparison result here; the previous value will be stored into value anyway. - // Also we don't care for rbx and rcx values, they just have to be equal to rax and rdx before cmpxchg16b. - __asm__ __volatile__ - ( - "movq %%rbx, %%rax\n\t" - "movq %%rcx, %%rdx\n\t" - "lock; cmpxchg16b %[dest]" - : "=&A" (value) - : [dest] "m" (*ptr) - : "cc" - ); - - return value; -} - -#endif // defined(BOOST_ATOMIC_INT128_LOCK_FREE) && BOOST_ATOMIC_INT128_LOCK_FREE > 0 - -} -} -} - -/* pull in 64-bit atomic type using cmpxchg8b above */ -#if !defined(__x86_64__) && defined(BOOST_ATOMIC_X86_HAS_CMPXCHG8B) -#include -#endif - -/* pull in 128-bit atomic type using cmpxchg16b above */ -#if defined(BOOST_ATOMIC_INT128_LOCK_FREE) && BOOST_ATOMIC_INT128_LOCK_FREE > 0 -#include -#endif - -#endif /* !defined(BOOST_ATOMIC_FORCE_FALLBACK) */ - -#endif diff --git a/include/boost/atomic/detail/generic-cas.hpp b/include/boost/atomic/detail/generic-cas.hpp deleted file mode 100644 index cf4a3d7..0000000 --- a/include/boost/atomic/detail/generic-cas.hpp +++ /dev/null @@ -1,206 +0,0 @@ -#ifndef BOOST_ATOMIC_DETAIL_GENERIC_CAS_HPP -#define BOOST_ATOMIC_DETAIL_GENERIC_CAS_HPP - -// Copyright (c) 2009 Helge Bahmann -// -// Distributed under the Boost Software License, Version 1.0. -// See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -#include -#include -#include -#include -#include -#include - -#ifdef BOOST_HAS_PRAGMA_ONCE -#pragma once -#endif - -/* fallback implementation for various compilation targets; -this is *not* efficient, particularly because all operations -are fully fenced (full memory barriers before and after -each operation) */ - -#if defined(__GNUC__) - namespace boost { namespace atomics { namespace detail { - inline int32_t - fenced_compare_exchange_strong_32(volatile int32_t *ptr, int32_t expected, int32_t desired) - { - return __sync_val_compare_and_swap_4(ptr, expected, desired); - } - #define BOOST_ATOMIC_HAVE_CAS32 1 - - #if defined(__amd64__) || defined(__i686__) - inline int64_t - fenced_compare_exchange_strong_64(int64_t *ptr, int64_t expected, int64_t desired) - { - return __sync_val_compare_and_swap_8(ptr, expected, desired); - } - #define BOOST_ATOMIC_HAVE_CAS64 1 - #endif - }}} - -#elif defined(__ICL) || defined(_MSC_VER) - - #if defined(_MSC_VER) - #include - #include - #endif - - namespace boost { namespace atomics { namespace detail { - inline int32_t - fenced_compare_exchange_strong(int32_t *ptr, int32_t expected, int32_t desired) - { - return _InterlockedCompareExchange(reinterpret_cast(ptr), desired, expected); - } - #define BOOST_ATOMIC_HAVE_CAS32 1 - #if defined(_WIN64) - inline int64_t - fenced_compare_exchange_strong(int64_t *ptr, int64_t expected, int64_t desired) - { - return _InterlockedCompareExchange64(ptr, desired, expected); - } - #define BOOST_ATOMIC_HAVE_CAS64 1 - #endif - }}} - -#elif (defined(__ICC) || defined(__ECC)) - namespace boost { namespace atomics { namespace detail { - inline int32_t - fenced_compare_exchange_strong_32(int32_t *ptr, int32_t expected, int32_t desired) - { - return _InterlockedCompareExchange((void*)ptr, desired, expected); - } - #define BOOST_ATOMIC_HAVE_CAS32 1 - #if defined(__x86_64) - inline int64_t - fenced_compare_exchange_strong(int64_t *ptr, int64_t expected, int64_t desired) - { - return cas64(ptr, expected, desired); - } - #define BOOST_ATOMIC_HAVE_CAS64 1 - #elif defined(__ECC) //IA-64 version - inline int64_t - fenced_compare_exchange_strong(int64_t *ptr, int64_t expected, int64_t desired) - { - return _InterlockedCompareExchange64((void*)ptr, desired, expected); - } - #define BOOST_ATOMIC_HAVE_CAS64 1 - #endif - }}} - -#elif (defined(__SUNPRO_CC) && defined(__sparc)) - #include - namespace boost { namespace atomics { namespace detail { - inline int32_t - fenced_compare_exchange_strong_32(int32_t *ptr, int32_t expected, int32_t desired) - { - return atomic_cas_32((volatile unsigned int*)ptr, expected, desired); - } - #define BOOST_ATOMIC_HAVE_CAS32 1 - - /* FIXME: check for 64 bit mode */ - inline int64_t - fenced_compare_exchange_strong_64(int64_t *ptr, int64_t expected, int64_t desired) - { - return atomic_cas_64((volatile unsigned long long*)ptr, expected, desired); - } - #define BOOST_ATOMIC_HAVE_CAS64 1 - }}} -#endif - - -namespace boost { -namespace atomics { -namespace detail { - -#ifdef BOOST_ATOMIC_HAVE_CAS32 -template -class atomic_generic_cas32 -{ -private: - typedef atomic_generic_cas32 this_type; -public: - explicit atomic_generic_cas32(T v) : i((int32_t)v) {} - atomic_generic_cas32() {} - T load(memory_order order=memory_order_seq_cst) const volatile - { - T expected=(T)i; - do { } while(!const_cast(this)->compare_exchange_weak(expected, expected, order, memory_order_relaxed)); - return expected; - } - void store(T v, memory_order order=memory_order_seq_cst) volatile - { - exchange(v); - } - bool compare_exchange_strong( - T &expected, - T desired, - memory_order success_order, - memory_order failure_order) volatile - { - T found; - found=(T)fenced_compare_exchange_strong_32(&i, (int32_t)expected, (int32_t)desired); - bool success=(found==expected); - expected=found; - return success; - } - bool compare_exchange_weak( - T &expected, - T desired, - memory_order success_order, - memory_order failure_order) volatile - { - return compare_exchange_strong(expected, desired, success_order, failure_order); - } - T exchange(T r, memory_order order=memory_order_seq_cst) volatile - { - T expected=(T)i; - do { } while(!compare_exchange_weak(expected, r, order, memory_order_relaxed)); - return expected; - } - - bool is_lock_free(void) const volatile {return true;} - typedef T integral_type; -private: - mutable int32_t i; -}; - -template -class platform_atomic_integral : - public build_atomic_from_exchange > -{ -public: - typedef build_atomic_from_exchange > super; - explicit platform_atomic_integral(T v) : super(v) {} - platform_atomic_integral(void) {} -}; - -template -class platform_atomic_integral : - public build_atomic_from_larger_type, T> -{ -public: - typedef build_atomic_from_larger_type, T> super; - - explicit platform_atomic_integral(T v) : super(v) {} - platform_atomic_integral(void) {} -}; - -template -class platform_atomic_integral : - public build_atomic_from_larger_type, T> -{ -public: - typedef build_atomic_from_larger_type, T> super; - - explicit platform_atomic_integral(T v) : super(v) {} - platform_atomic_integral(void) {} -}; -#endif - -} } } - -#endif diff --git a/include/boost/atomic/detail/linux-arm.hpp b/include/boost/atomic/detail/linux-arm.hpp deleted file mode 100644 index 702ac3b..0000000 --- a/include/boost/atomic/detail/linux-arm.hpp +++ /dev/null @@ -1,192 +0,0 @@ -#ifndef BOOST_ATOMIC_DETAIL_LINUX_ARM_HPP -#define BOOST_ATOMIC_DETAIL_LINUX_ARM_HPP - -// Distributed under the Boost Software License, Version 1.0. -// See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -// -// Copyright (c) 2009, 2011 Helge Bahmann -// Copyright (c) 2009 Phil Endecott -// Copyright (c) 2013 Tim Blechmann -// Linux-specific code by Phil Endecott - -// Different ARM processors have different atomic instructions. In particular, -// architecture versions before v6 (which are still in widespread use, e.g. the -// Intel/Marvell XScale chips like the one in the NSLU2) have only atomic swap. -// On Linux the kernel provides some support that lets us abstract away from -// these differences: it provides emulated CAS and barrier functions at special -// addresses that are guaranteed not to be interrupted by the kernel. Using -// this facility is slightly slower than inline assembler would be, but much -// faster than a system call. -// -// While this emulated CAS is "strong" in the sense that it does not fail -// "spuriously" (i.e.: it never fails to perform the exchange when the value -// found equals the value expected), it does not return the found value on -// failure. To satisfy the atomic API, compare_exchange_{weak|strong} must -// return the found value on failure, and we have to manually load this value -// after the emulated CAS reports failure. This in turn introduces a race -// between the CAS failing (due to the "wrong" value being found) and subsequently -// loading (which might turn up the "right" value). From an application's -// point of view this looks like "spurious failure", and therefore the -// emulated CAS is only good enough to provide compare_exchange_weak -// semantics. - -#include -#include -#include -#include - -#ifdef BOOST_HAS_PRAGMA_ONCE -#pragma once -#endif - -namespace boost { -namespace atomics { -namespace detail { - -inline void -arm_barrier(void) -{ - void (*kernel_dmb)(void) = (void (*)(void)) 0xffff0fa0; - kernel_dmb(); -} - -inline void -platform_fence_before(memory_order order) -{ - switch(order) { - case memory_order_release: - case memory_order_acq_rel: - case memory_order_seq_cst: - arm_barrier(); - case memory_order_consume: - default:; - } -} - -inline void -platform_fence_after(memory_order order) -{ - switch(order) { - case memory_order_acquire: - case memory_order_acq_rel: - case memory_order_seq_cst: - arm_barrier(); - default:; - } -} - -inline void -platform_fence_before_store(memory_order order) -{ - platform_fence_before(order); -} - -inline void -platform_fence_after_store(memory_order order) -{ - if (order == memory_order_seq_cst) - arm_barrier(); -} - -inline void -platform_fence_after_load(memory_order order) -{ - platform_fence_after(order); -} - -template -inline bool -platform_cmpxchg32(T & expected, T desired, volatile T * ptr) -{ - typedef T (*kernel_cmpxchg32_t)(T oldval, T newval, volatile T * ptr); - - if (((kernel_cmpxchg32_t) 0xffff0fc0)(expected, desired, ptr) == 0) { - return true; - } else { - expected = *ptr; - return false; - } -} - -} -} - -#define BOOST_ATOMIC_THREAD_FENCE 2 -inline void -atomic_thread_fence(memory_order order) -{ - switch(order) { - case memory_order_acquire: - case memory_order_release: - case memory_order_acq_rel: - case memory_order_seq_cst: - atomics::detail::arm_barrier(); - default:; - } -} - -#define BOOST_ATOMIC_SIGNAL_FENCE 2 -inline void -atomic_signal_fence(memory_order) -{ - __asm__ __volatile__ ("" ::: "memory"); -} - -class atomic_flag -{ -private: - uint32_t v_; - -public: - BOOST_CONSTEXPR atomic_flag(void) BOOST_NOEXCEPT : v_(0) {} - - void - clear(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - atomics::detail::platform_fence_before_store(order); - const_cast(v_) = 0; - atomics::detail::platform_fence_after_store(order); - } - - bool - test_and_set(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - atomics::detail::platform_fence_before(order); - uint32_t expected = v_; - do { - if (expected == 1) - break; - } while (!atomics::detail::platform_cmpxchg32(expected, (uint32_t)1, &v_)); - atomics::detail::platform_fence_after(order); - return expected; - } - - BOOST_DELETED_FUNCTION(atomic_flag(atomic_flag const&)) - BOOST_DELETED_FUNCTION(atomic_flag& operator= (atomic_flag const&)) -}; - -#define BOOST_ATOMIC_FLAG_LOCK_FREE 2 - -} - -#include - -#if !defined(BOOST_ATOMIC_FORCE_FALLBACK) - -#define BOOST_ATOMIC_CHAR_LOCK_FREE 2 -#define BOOST_ATOMIC_CHAR16_T_LOCK_FREE 2 -#define BOOST_ATOMIC_CHAR32_T_LOCK_FREE 2 -#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 2 -#define BOOST_ATOMIC_SHORT_LOCK_FREE 2 -#define BOOST_ATOMIC_INT_LOCK_FREE 2 -#define BOOST_ATOMIC_LONG_LOCK_FREE 2 -#define BOOST_ATOMIC_LLONG_LOCK_FREE 0 -#define BOOST_ATOMIC_POINTER_LOCK_FREE 2 -#define BOOST_ATOMIC_BOOL_LOCK_FREE 2 - -#include - -#endif /* !defined(BOOST_ATOMIC_FORCE_FALLBACK) */ - -#endif diff --git a/include/boost/atomic/detail/windows.hpp b/include/boost/atomic/detail/windows.hpp deleted file mode 100644 index 02d1106..0000000 --- a/include/boost/atomic/detail/windows.hpp +++ /dev/null @@ -1,1789 +0,0 @@ -#ifndef BOOST_ATOMIC_DETAIL_WINDOWS_HPP -#define BOOST_ATOMIC_DETAIL_WINDOWS_HPP - -// Copyright (c) 2009 Helge Bahmann -// Copyright (c) 2012 Andrey Semashev -// Copyright (c) 2013 Tim Blechmann, Andrey Semashev -// -// Distributed under the Boost Software License, Version 1.0. -// See accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) - -#include -#include -#include -#include -#include -#include - -#ifdef BOOST_HAS_PRAGMA_ONCE -#pragma once -#endif - -#ifdef _MSC_VER -#pragma warning(push) -// 'order' : unreferenced formal parameter -#pragma warning(disable: 4100) -#endif - -#if defined(_MSC_VER) && (defined(_M_AMD64) || defined(_M_IX86)) -extern "C" void _mm_pause(void); -#pragma intrinsic(_mm_pause) -#define BOOST_ATOMIC_X86_PAUSE() _mm_pause() -#else -#define BOOST_ATOMIC_X86_PAUSE() -#endif - -#if defined(_M_IX86) && _M_IX86 >= 500 -#define BOOST_ATOMIC_X86_HAS_CMPXCHG8B 1 -#endif - -// Define hardware barriers -#if defined(_MSC_VER) && (defined(_M_AMD64) || (defined(_M_IX86) && defined(_M_IX86_FP) && _M_IX86_FP >= 2)) -extern "C" void _mm_mfence(void); -#pragma intrinsic(_mm_mfence) -#endif - -#if defined(BOOST_MSVC) && defined(_M_ARM) -extern "C" void __dmb(unsigned int); -#pragma intrinsic(__dmb) -extern "C" __int8 __iso_volatile_load8(const volatile __int8*); -#pragma intrinsic(__iso_volatile_load8) -extern "C" __int16 __iso_volatile_load16(const volatile __int16*); -#pragma intrinsic(__iso_volatile_load16) -extern "C" __int32 __iso_volatile_load32(const volatile __int32*); -#pragma intrinsic(__iso_volatile_load32) -extern "C" __int64 __iso_volatile_load64(const volatile __int64*); -#pragma intrinsic(__iso_volatile_load64) -extern "C" void __iso_volatile_store8(volatile __int8*, __int8); -#pragma intrinsic(__iso_volatile_store8) -extern "C" void __iso_volatile_store16(volatile __int16*, __int16); -#pragma intrinsic(__iso_volatile_store16) -extern "C" void __iso_volatile_store32(volatile __int32*, __int32); -#pragma intrinsic(__iso_volatile_store32) -extern "C" void __iso_volatile_store64(volatile __int64*, __int64); -#pragma intrinsic(__iso_volatile_store64) - -#define BOOST_ATOMIC_LOAD8(p) __iso_volatile_load8((const volatile __int8*)(p)) -#define BOOST_ATOMIC_LOAD16(p) __iso_volatile_load16((const volatile __int16*)(p)) -#define BOOST_ATOMIC_LOAD32(p) __iso_volatile_load32((const volatile __int32*)(p)) -#define BOOST_ATOMIC_LOAD64(p) __iso_volatile_load64((const volatile __int64*)(p)) -#define BOOST_ATOMIC_STORE8(p, v) __iso_volatile_store8((const volatile __int8*)(p), (__int8)(v)) -#define BOOST_ATOMIC_STORE16(p, v) __iso_volatile_store16((const volatile __int16*)(p), (__int16)(v)) -#define BOOST_ATOMIC_STORE32(p, v) __iso_volatile_store32((const volatile __int32*)(p), (__int32)(v)) -#define BOOST_ATOMIC_STORE64(p, v) __iso_volatile_store64((const volatile __int64*)(p), (__int64)(v)) - -#else - -#define BOOST_ATOMIC_LOAD8(p) *p -#define BOOST_ATOMIC_LOAD16(p) *p -#define BOOST_ATOMIC_LOAD32(p) *p -#define BOOST_ATOMIC_LOAD64(p) *p -#define BOOST_ATOMIC_STORE8(p, v) *p = v -#define BOOST_ATOMIC_STORE16(p, v) *p = v -#define BOOST_ATOMIC_STORE32(p, v) *p = v -#define BOOST_ATOMIC_STORE64(p, v) *p = v - -#endif - -// Define compiler barriers -#if defined(__INTEL_COMPILER) -#define BOOST_ATOMIC_COMPILER_BARRIER() __memory_barrier() -#elif defined(_MSC_VER) && !defined(_WIN32_WCE) -extern "C" void _ReadWriteBarrier(void); -#pragma intrinsic(_ReadWriteBarrier) -#define BOOST_ATOMIC_COMPILER_BARRIER() _ReadWriteBarrier() -#endif - -#ifndef BOOST_ATOMIC_COMPILER_BARRIER -#define BOOST_ATOMIC_COMPILER_BARRIER() -#endif - -namespace boost { -namespace atomics { -namespace detail { - -BOOST_FORCEINLINE void hardware_full_fence(void) BOOST_NOEXCEPT -{ -#if defined(BOOST_MSVC) && defined(_M_ARM) - __dmb(0xB); // _ARM_BARRIER_ISH, see armintr.h from MSVC 11 and later -#elif defined(_MSC_VER) && (defined(_M_AMD64) || (defined(_M_IX86) && defined(_M_IX86_FP) && _M_IX86_FP >= 2)) - // Use mfence only if SSE2 is available - _mm_mfence(); -#else - long tmp; - BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&tmp, 0); -#endif -} - -BOOST_FORCEINLINE void -platform_fence_before(memory_order order) BOOST_NOEXCEPT -{ - BOOST_ATOMIC_COMPILER_BARRIER(); - -#if defined(BOOST_MSVC) && defined(_M_ARM) - switch(order) - { - case memory_order_release: - case memory_order_acq_rel: - case memory_order_seq_cst: - hardware_full_fence(); - case memory_order_consume: - default:; - } - - BOOST_ATOMIC_COMPILER_BARRIER(); -#endif -} - -BOOST_FORCEINLINE void -platform_fence_after(memory_order order) BOOST_NOEXCEPT -{ - BOOST_ATOMIC_COMPILER_BARRIER(); - -#if defined(BOOST_MSVC) && defined(_M_ARM) - switch(order) - { - case memory_order_acquire: - case memory_order_acq_rel: - case memory_order_seq_cst: - hardware_full_fence(); - default:; - } - - BOOST_ATOMIC_COMPILER_BARRIER(); -#endif -} - -BOOST_FORCEINLINE void -platform_fence_before_store(memory_order order) BOOST_NOEXCEPT -{ - platform_fence_before(order); -} - -BOOST_FORCEINLINE void -platform_fence_after_store(memory_order order) BOOST_NOEXCEPT -{ - BOOST_ATOMIC_COMPILER_BARRIER(); - -#if defined(BOOST_MSVC) && defined(_M_ARM) - if (order == memory_order_seq_cst) - hardware_full_fence(); - - BOOST_ATOMIC_COMPILER_BARRIER(); -#endif -} - -BOOST_FORCEINLINE void -platform_fence_after_load(memory_order order) BOOST_NOEXCEPT -{ - BOOST_ATOMIC_COMPILER_BARRIER(); - - // On x86 and x86_64 there is no need for a hardware barrier, - // even if seq_cst memory order is requested, because all - // seq_cst writes are implemented with lock-prefixed operations - // or xchg which has implied lock prefix. Therefore normal loads - // are already ordered with seq_cst stores on these architectures. - -#if !(defined(_MSC_VER) && (defined(_M_AMD64) || defined(_M_IX86))) - if (order == memory_order_seq_cst) - hardware_full_fence(); -#endif -} - -} // namespace detail -} // namespace atomics - -#define BOOST_ATOMIC_THREAD_FENCE 2 -BOOST_FORCEINLINE void -atomic_thread_fence(memory_order order) -{ - BOOST_ATOMIC_COMPILER_BARRIER(); - if (order == memory_order_seq_cst) - atomics::detail::hardware_full_fence(); -} - -#define BOOST_ATOMIC_SIGNAL_FENCE 2 -BOOST_FORCEINLINE void -atomic_signal_fence(memory_order) -{ - BOOST_ATOMIC_COMPILER_BARRIER(); -} - -class atomic_flag -{ -private: - uint32_t v_; - -public: - BOOST_CONSTEXPR atomic_flag(void) BOOST_NOEXCEPT : v_(0) {} - - bool - test_and_set(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - BOOST_ATOMIC_COMPILER_BARRIER(); - const uint32_t old = (uint32_t)BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&v_, 1); - BOOST_ATOMIC_COMPILER_BARRIER(); - return old != 0; - } - - void - clear(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - if (order != memory_order_seq_cst) { - atomics::detail::platform_fence_before_store(order); - BOOST_ATOMIC_STORE32(&v_, 0); - atomics::detail::platform_fence_after_store(order); - } else { - BOOST_ATOMIC_COMPILER_BARRIER(); - BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&v_, 0); - BOOST_ATOMIC_COMPILER_BARRIER(); - } - } - - BOOST_DELETED_FUNCTION(atomic_flag(const atomic_flag&)) - BOOST_DELETED_FUNCTION(atomic_flag & operator= (const atomic_flag&)) -}; - -} // namespace boost - -#define BOOST_ATOMIC_FLAG_LOCK_FREE 2 - -#include - -#if !defined(BOOST_ATOMIC_FORCE_FALLBACK) - -#define BOOST_ATOMIC_CHAR_LOCK_FREE 2 -#define BOOST_ATOMIC_SHORT_LOCK_FREE 2 -#define BOOST_ATOMIC_INT_LOCK_FREE 2 -#define BOOST_ATOMIC_LONG_LOCK_FREE 2 -#if defined(BOOST_ATOMIC_X86_HAS_CMPXCHG8B) || defined(_M_AMD64) || defined(_M_IA64) -#define BOOST_ATOMIC_LLONG_LOCK_FREE 2 -#else -#define BOOST_ATOMIC_LLONG_LOCK_FREE 0 -#endif -#define BOOST_ATOMIC_POINTER_LOCK_FREE 2 -#define BOOST_ATOMIC_BOOL_LOCK_FREE 2 - -namespace boost { -namespace atomics { -namespace detail { - -#if defined(_MSC_VER) -#pragma warning(push) -// 'char' : forcing value to bool 'true' or 'false' (performance warning) -#pragma warning(disable: 4800) -#endif - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; -#ifdef BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8 - typedef value_type storage_type; -#else - typedef uint32_t storage_type; -#endif - typedef T difference_type; - -protected: - typedef value_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {} - - void - store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - if (order != memory_order_seq_cst) { - platform_fence_before_store(order); - BOOST_ATOMIC_STORE8(&v_, static_cast< storage_type >(v)); - platform_fence_after_store(order); - } else { - exchange(v, order); - } - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - value_type v = static_cast< value_type >(BOOST_ATOMIC_LOAD8(&v_)); - platform_fence_after_load(order); - return v; - } - - value_type - fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - platform_fence_before(order); -#ifdef BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8 - v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8(&v_, v)); -#else - v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(&v_, v)); -#endif - platform_fence_after(order); - return v; - } - - value_type - fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - typedef typename make_signed< value_type >::type signed_value_type; - return fetch_add(static_cast< value_type >(-static_cast< signed_value_type >(v)), order); - } - - value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - platform_fence_before(order); -#ifdef BOOST_ATOMIC_INTERLOCKED_EXCHANGE8 - v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE8(&v_, v)); -#else - v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&v_, v)); -#endif - platform_fence_after(order); - return v; - } - - bool - compare_exchange_strong( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - value_type previous = expected; - platform_fence_before(success_order); -#ifdef BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8 - value_type oldval = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8(&v_, desired, previous)); -#else - value_type oldval = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(&v_, desired, previous)); -#endif - bool success = (previous == oldval); - if (success) - platform_fence_after(success_order); - else - platform_fence_after(failure_order); - expected = oldval; - return success; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return compare_exchange_strong(expected, desired, success_order, failure_order); - } - - value_type - fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { -#ifdef BOOST_ATOMIC_INTERLOCKED_AND8 - platform_fence_before(order); - v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_AND8(&v_, v)); - platform_fence_after(order); - return v; -#elif defined(BOOST_ATOMIC_INTERLOCKED_AND) - platform_fence_before(order); - v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_AND(&v_, v)); - platform_fence_after(order); - return v; -#else - value_type tmp = load(memory_order_relaxed); - for (; !compare_exchange_weak(tmp, tmp & v, order, memory_order_relaxed);) - { - BOOST_ATOMIC_X86_PAUSE(); - } - return tmp; -#endif - } - - value_type - fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { -#ifdef BOOST_ATOMIC_INTERLOCKED_OR8 - platform_fence_before(order); - v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_OR8(&v_, v)); - platform_fence_after(order); - return v; -#elif defined(BOOST_ATOMIC_INTERLOCKED_OR) - platform_fence_before(order); - v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_OR(&v_, v)); - platform_fence_after(order); - return v; -#else - value_type tmp = load(memory_order_relaxed); - for (; !compare_exchange_weak(tmp, tmp | v, order, memory_order_relaxed);) - { - BOOST_ATOMIC_X86_PAUSE(); - } - return tmp; -#endif - } - - value_type - fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { -#ifdef BOOST_ATOMIC_INTERLOCKED_XOR8 - platform_fence_before(order); - v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_XOR8(&v_, v)); - platform_fence_after(order); - return v; -#elif defined(BOOST_ATOMIC_INTERLOCKED_XOR) - platform_fence_before(order); - v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_XOR(&v_, v)); - platform_fence_after(order); - return v; -#else - value_type tmp = load(memory_order_relaxed); - for (; !compare_exchange_weak(tmp, tmp ^ v, order, memory_order_relaxed);) - { - BOOST_ATOMIC_X86_PAUSE(); - } - return tmp; -#endif - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - storage_type v_; -}; - -#if defined(_MSC_VER) -#pragma warning(pop) -#endif - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; -#ifdef BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16 - typedef value_type storage_type; -#else - typedef uint32_t storage_type; -#endif - typedef T difference_type; - -protected: - typedef value_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {} - - void - store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - if (order != memory_order_seq_cst) { - platform_fence_before_store(order); - BOOST_ATOMIC_STORE16(&v_, static_cast< storage_type >(v)); - platform_fence_after_store(order); - } else { - exchange(v, order); - } - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - value_type v = static_cast< value_type >(BOOST_ATOMIC_LOAD16(&v_)); - platform_fence_after_load(order); - return v; - } - - value_type - fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - platform_fence_before(order); -#ifdef BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16 - v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16(&v_, v)); -#else - v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(&v_, v)); -#endif - platform_fence_after(order); - return v; - } - - value_type - fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - typedef typename make_signed< value_type >::type signed_value_type; - return fetch_add(static_cast< value_type >(-static_cast< signed_value_type >(v)), order); - } - - value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - platform_fence_before(order); -#ifdef BOOST_ATOMIC_INTERLOCKED_EXCHANGE16 - v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE16(&v_, v)); -#else - v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&v_, v)); -#endif - platform_fence_after(order); - return v; - } - - bool - compare_exchange_strong( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - value_type previous = expected; - platform_fence_before(success_order); -#ifdef BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16 - value_type oldval = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16(&v_, desired, previous)); -#else - value_type oldval = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(&v_, desired, previous)); -#endif - bool success = (previous == oldval); - if (success) - platform_fence_after(success_order); - else - platform_fence_after(failure_order); - expected = oldval; - return success; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return compare_exchange_strong(expected, desired, success_order, failure_order); - } - - value_type - fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { -#ifdef BOOST_ATOMIC_INTERLOCKED_AND16 - platform_fence_before(order); - v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_AND16(&v_, v)); - platform_fence_after(order); - return v; -#elif defined(BOOST_ATOMIC_INTERLOCKED_AND) - platform_fence_before(order); - v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_AND(&v_, v)); - platform_fence_after(order); - return v; -#else - value_type tmp = load(memory_order_relaxed); - for (; !compare_exchange_weak(tmp, tmp & v, order, memory_order_relaxed);) - { - BOOST_ATOMIC_X86_PAUSE(); - } - return tmp; -#endif - } - - value_type - fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { -#ifdef BOOST_ATOMIC_INTERLOCKED_OR16 - platform_fence_before(order); - v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_OR16(&v_, v)); - platform_fence_after(order); - return v; -#elif defined(BOOST_ATOMIC_INTERLOCKED_OR) - platform_fence_before(order); - v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_OR(&v_, v)); - platform_fence_after(order); - return v; -#else - value_type tmp = load(memory_order_relaxed); - for (; !compare_exchange_weak(tmp, tmp | v, order, memory_order_relaxed);) - { - BOOST_ATOMIC_X86_PAUSE(); - } - return tmp; -#endif - } - - value_type - fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { -#ifdef BOOST_ATOMIC_INTERLOCKED_XOR16 - platform_fence_before(order); - v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_XOR16(&v_, v)); - platform_fence_after(order); - return v; -#elif defined(BOOST_ATOMIC_INTERLOCKED_XOR) - platform_fence_before(order); - v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_XOR(&v_, v)); - platform_fence_after(order); - return v; -#else - value_type tmp = load(memory_order_relaxed); - for (; !compare_exchange_weak(tmp, tmp ^ v, order, memory_order_relaxed);) - { - BOOST_ATOMIC_X86_PAUSE(); - } - return tmp; -#endif - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - storage_type v_; -}; - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef value_type storage_type; - typedef T difference_type; - -protected: - typedef value_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {} - - void - store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - if (order != memory_order_seq_cst) { - platform_fence_before_store(order); - BOOST_ATOMIC_STORE32(&v_, static_cast< storage_type >(v)); - platform_fence_after_store(order); - } else { - exchange(v, order); - } - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - value_type v = static_cast< value_type >(BOOST_ATOMIC_LOAD32(&v_)); - platform_fence_after_load(order); - return v; - } - - value_type - fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - platform_fence_before(order); - v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(&v_, v)); - platform_fence_after(order); - return v; - } - - value_type - fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - typedef typename make_signed< value_type >::type signed_value_type; - return fetch_add(static_cast< value_type >(-static_cast< signed_value_type >(v)), order); - } - - value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - platform_fence_before(order); - v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&v_, v)); - platform_fence_after(order); - return v; - } - - bool - compare_exchange_strong( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - value_type previous = expected; - platform_fence_before(success_order); - value_type oldval = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(&v_, desired, previous)); - bool success = (previous == oldval); - if (success) - platform_fence_after(success_order); - else - platform_fence_after(failure_order); - expected = oldval; - return success; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return compare_exchange_strong(expected, desired, success_order, failure_order); - } - - value_type - fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { -#if defined(BOOST_ATOMIC_INTERLOCKED_AND) - platform_fence_before(order); - v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_AND(&v_, v)); - platform_fence_after(order); - return v; -#else - value_type tmp = load(memory_order_relaxed); - for (; !compare_exchange_weak(tmp, tmp & v, order, memory_order_relaxed);) - { - BOOST_ATOMIC_X86_PAUSE(); - } - return tmp; -#endif - } - - value_type - fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { -#if defined(BOOST_ATOMIC_INTERLOCKED_OR) - platform_fence_before(order); - v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_OR(&v_, v)); - platform_fence_after(order); - return v; -#else - value_type tmp = load(memory_order_relaxed); - for(; !compare_exchange_weak(tmp, tmp | v, order, memory_order_relaxed);) - { - BOOST_ATOMIC_X86_PAUSE(); - } - return tmp; -#endif - } - - value_type - fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { -#if defined(BOOST_ATOMIC_INTERLOCKED_XOR) - platform_fence_before(order); - v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_XOR(&v_, v)); - platform_fence_after(order); - return v; -#else - value_type tmp = load(memory_order_relaxed); - for (; !compare_exchange_weak(tmp, tmp ^ v, order, memory_order_relaxed);) - { - BOOST_ATOMIC_X86_PAUSE(); - } - return tmp; -#endif - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - storage_type v_; -}; - -// MSVC 2012 fails to recognize sizeof(T) as a constant expression in template specializations -enum msvc_sizeof_pointer_workaround { sizeof_pointer = sizeof(void*) }; - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef std::ptrdiff_t difference_type; - typedef void* value_type; - -protected: - typedef value_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {} - - void - store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - if (order != memory_order_seq_cst) { - platform_fence_before_store(order); -#if defined(BOOST_MSVC) && defined(_M_ARM) - BOOST_ATOMIC_STORE32(&v_, v); -#else - const_cast(v_) = v; -#endif - platform_fence_after_store(order); - } else { - exchange(v, order); - } - } - - value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { -#if defined(BOOST_MSVC) && defined(_M_ARM) - value_type v = (value_type)BOOST_ATOMIC_LOAD32(&v_); -#else - value_type v = const_cast(v_); -#endif - platform_fence_after_load(order); - return v; - } - - value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - platform_fence_before(order); - v = (value_type)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(&v_, v); - platform_fence_after(order); - return v; - } - - bool compare_exchange_strong(value_type & expected, value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - value_type previous = expected; - platform_fence_before(success_order); - value_type oldval = (value_type)BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(&v_, desired, previous); - bool success = (previous == oldval); - if (success) - platform_fence_after(success_order); - else - platform_fence_after(failure_order); - expected = oldval; - return success; - } - - bool compare_exchange_weak(value_type & expected, value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return compare_exchange_strong(expected, desired, success_order, failure_order); - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - value_type - fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - platform_fence_before(order); - value_type res = (value_type)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(&v_, v); - platform_fence_after(order); - return res; - } - - value_type - fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - return fetch_add(-v, order); - } - - BOOST_ATOMIC_DECLARE_VOID_POINTER_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - value_type v_; -}; - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T* value_type; - typedef std::ptrdiff_t difference_type; - -protected: - typedef value_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {} - - void - store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - if (order != memory_order_seq_cst) { - platform_fence_before_store(order); -#if defined(BOOST_MSVC) && defined(_M_ARM) - BOOST_ATOMIC_STORE32(&v_, v); -#else - const_cast(v_) = v; -#endif - platform_fence_after_store(order); - } else { - exchange(v, order); - } - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { -#if defined(BOOST_MSVC) && defined(_M_ARM) - value_type v = (value_type)BOOST_ATOMIC_LOAD32(&v_); -#else - value_type v = const_cast(v_); -#endif - platform_fence_after_load(order); - return v; - } - - value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - platform_fence_before(order); - v = (value_type)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(&v_, v); - platform_fence_after(order); - return v; - } - - bool - compare_exchange_strong( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - value_type previous = expected; - platform_fence_before(success_order); - value_type oldval = (value_type)BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(&v_, desired, previous); - bool success = (previous == oldval); - if (success) - platform_fence_after(success_order); - else - platform_fence_after(failure_order); - expected = oldval; - return success; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return compare_exchange_strong(expected, desired, success_order, failure_order); - } - - value_type - fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - v = v * sizeof(*v_); - platform_fence_before(order); - value_type res = (value_type)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(&v_, v); - platform_fence_after(order); - return res; - } - - value_type - fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - return fetch_add(-v, order); - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - BOOST_ATOMIC_DECLARE_POINTER_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - value_type v_; -}; - - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; -#ifdef BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8 - typedef uint8_t storage_type; -#else - typedef uint32_t storage_type; -#endif - -protected: - typedef value_type const& value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - -#ifdef BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8 - BOOST_CONSTEXPR explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : v_(reinterpret_cast< storage_type const& >(v)) - { - } -#else - explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : v_(0) - { - memcpy(&v_, &v, sizeof(value_type)); - } -#endif - - void - store(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - if (order != memory_order_seq_cst) { - storage_type tmp = 0; - memcpy(&tmp, &v, sizeof(value_type)); - platform_fence_before_store(order); - BOOST_ATOMIC_STORE8(&v_, tmp); - platform_fence_after_store(order); - } else { - exchange(v, order); - } - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - storage_type tmp = (storage_type)BOOST_ATOMIC_LOAD8(&v_); - platform_fence_after_load(order); - value_type v; - memcpy(&v, &tmp, sizeof(value_type)); - return v; - } - - value_type - exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - storage_type tmp = 0; - memcpy(&tmp, &v, sizeof(value_type)); - platform_fence_before(order); -#ifdef BOOST_ATOMIC_INTERLOCKED_EXCHANGE8 - tmp = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE8(&v_, tmp)); -#else - tmp = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&v_, tmp)); -#endif - platform_fence_after(order); - value_type res; - memcpy(&res, &tmp, sizeof(value_type)); - return res; - } - - bool - compare_exchange_strong( - value_type & expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - storage_type expected_s = 0, desired_s = 0; - memcpy(&expected_s, &expected, sizeof(value_type)); - memcpy(&desired_s, &desired, sizeof(value_type)); - platform_fence_before(success_order); -#ifdef BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8 - storage_type oldval = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8(&v_, desired_s, expected_s)); -#else - storage_type oldval = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(&v_, desired_s, expected_s)); -#endif - bool success = (oldval == expected_s); - if (success) - platform_fence_after(success_order); - else - platform_fence_after(failure_order); - memcpy(&expected, &oldval, sizeof(value_type)); - return success; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return compare_exchange_strong(expected, desired, success_order, failure_order); - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - BOOST_ATOMIC_DECLARE_BASE_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - storage_type v_; -}; - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; -#ifdef BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16 - typedef uint16_t storage_type; -#else - typedef uint32_t storage_type; -#endif - -protected: - typedef value_type const& value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - -#ifdef BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16 - BOOST_CONSTEXPR explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : v_(reinterpret_cast< storage_type const& >(v)) - { - } -#else - explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : v_(0) - { - memcpy(&v_, &v, sizeof(value_type)); - } -#endif - - void - store(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - if (order != memory_order_seq_cst) { - storage_type tmp = 0; - memcpy(&tmp, &v, sizeof(value_type)); - platform_fence_before_store(order); - BOOST_ATOMIC_STORE16(&v_, tmp); - platform_fence_after_store(order); - } else { - exchange(v, order); - } - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - storage_type tmp = (storage_type)BOOST_ATOMIC_LOAD16(&v_); - platform_fence_after_load(order); - value_type v; - memcpy(&v, &tmp, sizeof(value_type)); - return v; - } - - value_type - exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - storage_type tmp = 0; - memcpy(&tmp, &v, sizeof(value_type)); - platform_fence_before(order); -#ifdef BOOST_ATOMIC_INTERLOCKED_EXCHANGE16 - tmp = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE16(&v_, tmp)); -#else - tmp = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&v_, tmp)); -#endif - platform_fence_after(order); - value_type res; - memcpy(&res, &tmp, sizeof(value_type)); - return res; - } - - bool - compare_exchange_strong( - value_type & expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - storage_type expected_s = 0, desired_s = 0; - memcpy(&expected_s, &expected, sizeof(value_type)); - memcpy(&desired_s, &desired, sizeof(value_type)); - platform_fence_before(success_order); -#ifdef BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16 - storage_type oldval = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16(&v_, desired_s, expected_s)); -#else - storage_type oldval = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(&v_, desired_s, expected_s)); -#endif - bool success = (oldval == expected_s); - if (success) - platform_fence_after(success_order); - else - platform_fence_after(failure_order); - memcpy(&expected, &oldval, sizeof(value_type)); - return success; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return compare_exchange_strong(expected, desired, success_order, failure_order); - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - BOOST_ATOMIC_DECLARE_BASE_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - storage_type v_; -}; - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef uint32_t storage_type; - -protected: - typedef value_type const& value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - explicit base_atomic(value_type const& v) : v_(0) - { - memcpy(&v_, &v, sizeof(value_type)); - } - - void - store(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - if (order != memory_order_seq_cst) { - storage_type tmp = 0; - memcpy(&tmp, &v, sizeof(value_type)); - platform_fence_before_store(order); - BOOST_ATOMIC_STORE32(&v_, tmp); - platform_fence_after_store(order); - } else { - exchange(v, order); - } - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - storage_type tmp = (storage_type)BOOST_ATOMIC_LOAD32(&v_); - platform_fence_after_load(order); - value_type v; - memcpy(&v, &tmp, sizeof(value_type)); - return v; - } - - value_type - exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - storage_type tmp = 0; - memcpy(&tmp, &v, sizeof(value_type)); - platform_fence_before(order); - tmp = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&v_, tmp)); - platform_fence_after(order); - value_type res; - memcpy(&res, &tmp, sizeof(value_type)); - return res; - } - - bool - compare_exchange_strong( - value_type & expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - storage_type expected_s = 0, desired_s = 0; - memcpy(&expected_s, &expected, sizeof(value_type)); - memcpy(&desired_s, &desired, sizeof(value_type)); - platform_fence_before(success_order); - storage_type oldval = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(&v_, desired_s, expected_s)); - bool success = (oldval == expected_s); - if (success) - platform_fence_after(success_order); - else - platform_fence_after(failure_order); - memcpy(&expected, &oldval, sizeof(value_type)); - return success; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return compare_exchange_strong(expected, desired, success_order, failure_order); - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - BOOST_ATOMIC_DECLARE_BASE_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - storage_type v_; -}; - -#if defined(_M_AMD64) || defined(_M_IA64) - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef value_type storage_type; - typedef T difference_type; - -protected: - typedef value_type value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : v_(v) {} - - void - store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - if (order != memory_order_seq_cst) { - platform_fence_before_store(order); - BOOST_ATOMIC_STORE64(&v_, v); - platform_fence_after_store(order); - } else { - exchange(v, order); - } - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - value_type v = static_cast< value_type >(BOOST_ATOMIC_LOAD64(&v_)); - platform_fence_after_load(order); - return v; - } - - value_type - fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - platform_fence_before(order); - v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(&v_, v)); - platform_fence_after(order); - return v; - } - - value_type - fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - typedef typename make_signed< value_type >::type signed_value_type; - return fetch_add(static_cast< value_type >(-static_cast< signed_value_type >(v)), order); - } - - value_type - exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - platform_fence_before(order); - v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(&v_, v)); - platform_fence_after(order); - return v; - } - - bool - compare_exchange_strong( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - value_type previous = expected; - platform_fence_before(success_order); - value_type oldval = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(&v_, desired, previous)); - bool success = (previous == oldval); - if (success) - platform_fence_after(success_order); - else - platform_fence_after(failure_order); - expected = oldval; - return success; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return compare_exchange_strong(expected, desired, success_order, failure_order); - } - - value_type - fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { -#if defined(BOOST_ATOMIC_INTERLOCKED_AND64) - platform_fence_before(order); - v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_AND64(&v_, v)); - platform_fence_after(order); - return v; -#else - value_type tmp = load(memory_order_relaxed); - for (; !compare_exchange_weak(tmp, tmp & v, order, memory_order_relaxed);) - { - BOOST_ATOMIC_X86_PAUSE(); - } - return tmp; -#endif - } - - value_type - fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { -#if defined(BOOST_ATOMIC_INTERLOCKED_OR64) - platform_fence_before(order); - v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_OR64(&v_, v)); - platform_fence_after(order); - return v; -#else - value_type tmp = load(memory_order_relaxed); - for (; !compare_exchange_weak(tmp, tmp | v, order, memory_order_relaxed);) - { - BOOST_ATOMIC_X86_PAUSE(); - } - return tmp; -#endif - } - - value_type - fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { -#if defined(BOOST_ATOMIC_INTERLOCKED_XOR64) - platform_fence_before(order); - v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_XOR64(&v_, v)); - platform_fence_after(order); - return v; -#else - value_type tmp = load(memory_order_relaxed); - for (; !compare_exchange_weak(tmp, tmp ^ v, order, memory_order_relaxed);) - { - BOOST_ATOMIC_X86_PAUSE(); - } - return tmp; -#endif - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - storage_type v_; -}; - -template -class base_atomic -{ -private: - typedef base_atomic this_type; - typedef T value_type; - typedef uint64_t storage_type; - -protected: - typedef value_type const& value_arg_type; - -public: - BOOST_DEFAULTED_FUNCTION(base_atomic(void), {}) - explicit base_atomic(value_type const& v) : v_(0) - { - memcpy(&v_, &v, sizeof(value_type)); - } - - void - store(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - if (order != memory_order_seq_cst) { - storage_type tmp = 0; - memcpy(&tmp, &v, sizeof(value_type)); - platform_fence_before_store(order); - BOOST_ATOMIC_STORE64(&v_, tmp); - platform_fence_after_store(order); - } else { - exchange(v, order); - } - } - - value_type - load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT - { - storage_type tmp = (storage_type)BOOST_ATOMIC_LOAD64(&v_); - platform_fence_after_load(order); - value_type v; - memcpy(&v, &tmp, sizeof(value_type)); - return v; - } - - value_type - exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT - { - storage_type tmp = 0; - memcpy(&tmp, &v, sizeof(value_type)); - platform_fence_before(order); - tmp = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(&v_, tmp)); - platform_fence_after(order); - value_type res; - memcpy(&res, &tmp, sizeof(value_type)); - return res; - } - - bool - compare_exchange_strong( - value_type & expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - storage_type expected_s = 0, desired_s = 0; - memcpy(&expected_s, &expected, sizeof(value_type)); - memcpy(&desired_s, &desired, sizeof(value_type)); - platform_fence_before(success_order); - storage_type oldval = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(&v_, desired_s, expected_s)); - bool success = (oldval == expected_s); - if (success) - platform_fence_after(success_order); - else - platform_fence_after(failure_order); - memcpy(&expected, &oldval, sizeof(value_type)); - return success; - } - - bool - compare_exchange_weak( - value_type & expected, - value_type const& desired, - memory_order success_order, - memory_order failure_order) volatile BOOST_NOEXCEPT - { - return compare_exchange_strong(expected, desired, success_order, failure_order); - } - - bool - is_lock_free(void) const volatile BOOST_NOEXCEPT - { - return true; - } - - BOOST_ATOMIC_DECLARE_BASE_OPERATORS - - BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&)) - BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&)) - -private: - storage_type v_; -}; - -#elif defined(BOOST_ATOMIC_X86_HAS_CMPXCHG8B) - -template -inline bool -platform_cmpxchg64_strong(T & expected, T desired, volatile T * p) BOOST_NOEXCEPT -{ -#if defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64) - const T oldval = BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(p, desired, expected); - const bool result = (oldval == expected); - expected = oldval; - return result; -#else - bool result; - __asm - { - mov edi, p - mov esi, expected - mov ebx, dword ptr [desired] - mov ecx, dword ptr [desired + 4] - mov eax, dword ptr [esi] - mov edx, dword ptr [esi + 4] - lock cmpxchg8b qword ptr [edi] - mov dword ptr [esi], eax - mov dword ptr [esi + 4], edx - sete result - }; - return result; -#endif -} - -// Intel 64 and IA-32 Architectures Software Developer's Manual, Volume 3A, 8.1.1. Guaranteed Atomic Operations: -// -// The Pentium processor (and newer processors since) guarantees that the following additional memory operations will always be carried out atomically: -// * Reading or writing a quadword aligned on a 64-bit boundary -// -// Luckily, the memory is almost always 8-byte aligned in our case because atomic<> uses 64 bit native types for storage and dynamic memory allocations -// have at least 8 byte alignment. The only unfortunate case is when atomic is placeod on the stack and it is not 8-byte aligned (like on 32 bit Windows). - -template -inline void -platform_store64(T value, volatile T * p) BOOST_NOEXCEPT -{ - if (((uint32_t)p & 0x00000007) == 0) - { -#if defined(_M_IX86_FP) && _M_IX86_FP >= 2 - __asm - { - mov edx, p - movq xmm4, value - movq qword ptr [edx], xmm4 - }; -#else - __asm - { - mov edx, p - fild value - fistp qword ptr [edx] - }; -#endif - } - else - { - __asm - { - mov edi, p - mov ebx, dword ptr [value] - mov ecx, dword ptr [value + 4] - mov eax, dword ptr [edi] - mov edx, dword ptr [edi + 4] - align 16 -again: - lock cmpxchg8b qword ptr [edi] - jne again - }; - } -} - -template -inline T -platform_load64(const volatile T * p) BOOST_NOEXCEPT -{ - T value; - - if (((uint32_t)p & 0x00000007) == 0) - { -#if defined(_M_IX86_FP) && _M_IX86_FP >= 2 - __asm - { - mov edx, p - movq xmm4, qword ptr [edx] - movq value, xmm4 - }; -#else - __asm - { - mov edx, p - fild qword ptr [edx] - fistp value - }; -#endif - } - else - { - // We don't care for comparison result here; the previous value will be stored into value anyway. - // Also we don't care for ebx and ecx values, they just have to be equal to eax and edx before cmpxchg8b. - __asm - { - mov edi, p - mov eax, ebx - mov edx, ecx - lock cmpxchg8b qword ptr [edi] - mov dword ptr [value], eax - mov dword ptr [value + 4], edx - }; - } - - return value; -} - -#endif - -} // namespace detail -} // namespace atomics -} // namespace boost - -#undef BOOST_ATOMIC_COMPILER_BARRIER -#undef BOOST_ATOMIC_LOAD8 -#undef BOOST_ATOMIC_LOAD16 -#undef BOOST_ATOMIC_LOAD32 -#undef BOOST_ATOMIC_LOAD64 -#undef BOOST_ATOMIC_STORE8 -#undef BOOST_ATOMIC_STORE16 -#undef BOOST_ATOMIC_STORE32 -#undef BOOST_ATOMIC_STORE64 - -/* pull in 64-bit atomic type using cmpxchg8b above */ -#if defined(BOOST_ATOMIC_X86_HAS_CMPXCHG8B) -#include -#endif - -#endif /* !defined(BOOST_ATOMIC_FORCE_FALLBACK) */ - -#ifdef _MSC_VER -#pragma warning(pop) -#endif - -#endif From 890313b8d7be9597ed19a773cfa1620de3c4a67f Mon Sep 17 00:00:00 2001 From: Andrey Semashev Date: Sun, 11 May 2014 23:13:58 +0400 Subject: [PATCH 21/23] Added missing include. --- include/boost/atomic/detail/ops_msvc_x86.hpp | 1 + 1 file changed, 1 insertion(+) diff --git a/include/boost/atomic/detail/ops_msvc_x86.hpp b/include/boost/atomic/detail/ops_msvc_x86.hpp index 404535c..d25a5cd 100644 --- a/include/boost/atomic/detail/ops_msvc_x86.hpp +++ b/include/boost/atomic/detail/ops_msvc_x86.hpp @@ -17,6 +17,7 @@ #define BOOST_ATOMIC_DETAIL_OPS_MSVC_X86_HPP_INCLUDED_ #include +#include #include #include #include From 08d30a232011ea8517fd656daa4f659505751657 Mon Sep 17 00:00:00 2001 From: Andrey Semashev Date: Sun, 11 May 2014 23:14:47 +0400 Subject: [PATCH 22/23] Added missing include. --- include/boost/atomic/detail/ops_msvc_arm.hpp | 1 + 1 file changed, 1 insertion(+) diff --git a/include/boost/atomic/detail/ops_msvc_arm.hpp b/include/boost/atomic/detail/ops_msvc_arm.hpp index 409bcea..bde1e78 100644 --- a/include/boost/atomic/detail/ops_msvc_arm.hpp +++ b/include/boost/atomic/detail/ops_msvc_arm.hpp @@ -18,6 +18,7 @@ #include #include +#include #include #include #include From 00c21f9978e74c7cc2c2b7fb1ae18cd664abac10 Mon Sep 17 00:00:00 2001 From: Andrey Semashev Date: Sat, 17 May 2014 19:20:51 +0400 Subject: [PATCH 23/23] Documented new feature test and configuration macros. Fixed disabled gcc atomic backend. --- doc/atomic.hpp | 31 +++++-- doc/atomic.qbk | 109 +++++++++++++++++++---- include/boost/atomic/detail/platform.hpp | 4 +- 3 files changed, 121 insertions(+), 23 deletions(-) diff --git a/doc/atomic.hpp b/doc/atomic.hpp index 4e1cdbb..60e61c2 100644 --- a/doc/atomic.hpp +++ b/doc/atomic.hpp @@ -90,7 +90,7 @@ public: \param value Initial value Creates and initializes an atomic variable. */ - atomic(Type value); + explicit atomic(Type value); /** \brief Read the current value of the atomic variable @@ -496,15 +496,14 @@ public: */ Type operator--(int); -private: /** \brief Deleted copy constructor */ - atomic(const atomic &); + atomic(const atomic &) = delete; /** \brief Deleted copy assignment */ - void operator=(const atomic &); + const atomic & operator=(const atomic &) = delete; }; /** - \brief Insert explicit fence + \brief Insert explicit fence for thread synchronization \param order Memory ordering constraint Inserts an explicit fence. The exact semantic depends on the @@ -523,4 +522,26 @@ private: */ void atomic_thread_fence(memory_order order); +/** + \brief Insert explicit fence for synchronization with a signal handler + \param order Memory ordering constraint + + Inserts an explicit fence to synchronize with a signal handler called within + the context of the same thread. The fence ensures the corresponding operations + around it are complete and/or not started. The exact semantic depends on the + type of fence inserted: + + - \c memory_order_relaxed: No operation + - \c memory_order_release: Ensures the operations before the fence are complete + - \c memory_order_acquire or \c memory_order_consume: Ensures the operations + after the fence are not started. + - \c memory_order_acq_rel or \c memory_order_seq_cst: Ensures the operations + around the fence do not cross it. + + Note that this call does not affect visibility order of the memory operations + to other threads. It is functionally similar to \c atomic_thread_fence, only + it does not generate any instructions to synchronize hardware threads. +*/ +void atomic_signal_fence(memory_order order); + } diff --git a/doc/atomic.qbk b/doc/atomic.qbk index bbd2257..6307335 100644 --- a/doc/atomic.qbk +++ b/doc/atomic.qbk @@ -1,5 +1,6 @@ [/ / Copyright (c) 2009 Helge Bahmann + / Copyright (c) 2014 Andrey Semashev / / Distributed under the Boost Software License, Version 1.0. (See accompanying / file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) @@ -7,7 +8,7 @@ [library Boost.Atomic [quickbook 1.4] - [authors [Bahmann, Helge]] + [authors [Bahmann, Helge][Semashev, Andrey]] [copyright 2011 Helge Bahmann] [copyright 2012 Tim Blechmann] [copyright 2013 Andrey Semashev] @@ -90,6 +91,7 @@ hierarchies may generally reorder memory references at will. As a consequence a program such as: [c++] + int x = 0, int y = 0; thread1: @@ -339,8 +341,53 @@ on the same or different atomic variables, or use a "stand-alone" [section:interface Programming interfaces] +[section:configuration Configuration and building] + +The library contains header-only and compiled parts. The library is +header-only for lock-free cases but requires a separate binary to +implement the lock-based emulation. Users are able to detect whether +linking to the compiled part is required by checking the +[link atomic.interface.feature_macros feature macros]. + +The following macros affect library behavior: + +[table + [[Macro] [Description]] + [[`BOOST_ATOMIC_NO_CMPXCHG16B`] [Affects 64-bit x86 MSVC builds. When defined, + the library assumes the target CPU does not support `cmpxchg16b` instruction used + to support 128-bit atomic operations. This is the case with some early 64-bit AMD CPUs, + all Intel CPUs and current AMD CPUs support this instruction. The library does not + perform runtime detection of this instruction, so running the code that uses 128-bit + atomics on such CPUs will result in crashes, unless this macro is defined. Note that + the macro does not affect GCC and compatible compilers because the library infers + this information from the compiler-defined macros.]] + [[`BOOST_ATOMIC_FORCE_FALLBACK`] [When defined, all operations are implemented with locks. + This is mostly used for testing and should not be used in real world projects.]] + [[`BOOST_ATOMIC_DYN_LINK` and `BOOST_ALL_DYN_LINK`] [Control library linking. If defined, + the library assumes dynamic linking, otherwise static. The latter macro affects all Boost + libraries, not just [*Boost.Atomic].]] + [[`BOOST_ATOMIC_NO_LIB` and `BOOST_ALL_NO_LIB`] [Control library auto-linking on Windows. + When defined, disables auto-linking. The latter macro affects all Boost libraries, + not just [*Boost.Atomic].]] +] + +Besides macros, it is important to specify the correct compiler options for the target CPU. +With GCC and compatible compilers this affects whether particular atomic operations are +lock-free or not. + +Boost building process is described in the [@http://www.boost.org/doc/libs/release/more/getting_started/ Getting Started guide]. +For example, you can build [*Boost.Atomic] with the following command line: + +[pre + bjam --with-atomic variant=release instruction-set=core2 stage +] + +[endsect] + [section:interface_memory_order Memory order] + #include + The enumeration [^boost::memory_order] defines the following values to represent memory ordering constraints: @@ -352,7 +399,8 @@ values to represent memory ordering constraints: operation. This constraint is suitable only when either a) further operations do not depend on the outcome of the atomic operation or b) ordering is enforced through - stand-alone `atomic_thread_fence` operations + stand-alone `atomic_thread_fence` operations. The operation on + the atomic value itself is still atomic though. ]] [[`memory_order_release`] [ Perform `release` operation. Informally speaking, @@ -365,8 +413,8 @@ values to represent memory ordering constraints: before this point. ]] [[`memory_order_consume`] [ - Perform `consume` operation. More restrictive (and - usually more efficient) than `memory_order_acquire` + Perform `consume` operation. More relaxed (and + on some architectures more efficient) than `memory_order_acquire` as it only affects succeeding operations that are computationally-dependent on the value retrieved from an atomic variable. @@ -374,7 +422,7 @@ values to represent memory ordering constraints: [[`memory_order_acq_rel`] [Perform both `release` and `acquire` operation]] [[`memory_order_seq_cst`] [ Enforce sequential consistency. Implies `memory_order_acq_rel`, but - additional enforces total order for all operations such qualified. + additionally enforces total order for all operations such qualified. ]] ] @@ -385,6 +433,8 @@ of the various ordering constraints. [section:interface_atomic_object Atomic objects] + #include + [^boost::atomic<['T]>] provides methods for atomically accessing variables of a suitable type [^['T]]. The type is suitable if it satisfies one of the following constraints: @@ -551,6 +601,8 @@ constraint. [section:interface_fences Fences] + #include + [table [[Syntax] [Description]] [ @@ -567,6 +619,8 @@ constraint. [section:feature_macros Feature testing macros] + #include + [*Boost.Atomic] defines a number of macros to allow compile-time detection whether an atomic data type is implemented using "true" atomic operations, or whether an internal "lock" is @@ -617,10 +671,6 @@ sometimes require a lock, and to `2` if they are always lock-free: [`BOOST_ATOMIC_LLONG_LOCK_FREE`] [Indicate whether `atomic` (including signed/unsigned variants) is lock-free] ] - [ - [`BOOST_ATOMIC_INT128_LOCK_FREE`] - [Indicate whether `atomic` (including signed/unsigned variants) is lock-free. This macro is a non-standard extension.] - ] [ [`BOOST_ATOMIC_ADDRESS_LOCK_FREE` or `BOOST_ATOMIC_POINTER_LOCK_FREE`] [Indicate whether `atomic` is lock-free] @@ -635,6 +685,36 @@ sometimes require a lock, and to `2` if they are always lock-free: ] ] +In addition to these standard macros, [*Boost.Atomic] also defines a number of extension macros, +which can also be useful. Like the standard ones, these macros are defined to values `0`, `1` and `2` +to indicate whether the corresponding operations are lock-free or not. + +[table + [[Macro] [Description]] + [ + [`BOOST_ATOMIC_INT8_LOCK_FREE`] + [Indicate whether `atomic` is lock-free.] + ] + [ + [`BOOST_ATOMIC_INT16_LOCK_FREE`] + [Indicate whether `atomic` is lock-free.] + ] + [ + [`BOOST_ATOMIC_INT32_LOCK_FREE`] + [Indicate whether `atomic` is lock-free.] + ] + [ + [`BOOST_ATOMIC_INT64_LOCK_FREE`] + [Indicate whether `atomic` is lock-free.] + ] + [ + [`BOOST_ATOMIC_INT128_LOCK_FREE`] + [Indicate whether `atomic` is lock-free.] + ] +] + +In the table above, `intN_type` is a type that fits storage of contiguous `N` bits, suitably aligned for atomic operations. + [endsect] [endsect] @@ -684,7 +764,7 @@ limitations that cannot be lifted without compiler support: cases this may result in a less efficient code than a C++11 compiler could generate. * [*No interprocess fallback]: using `atomic` in shared memory only works - correctly, if `atomic::is_lock_free() == true` + correctly, if `atomic::is_lock_free() == true`. [endsect] @@ -729,17 +809,14 @@ implementation behaves as expected: [*Boost.Atomic] has been tested on and is known to work on the following compilers/platforms: -* gcc 4.x: i386, x86_64, ppc32, ppc64, armv5, armv6, alpha -* Visual Studio Express 2008/Windows XP, i386 - -If you have an unsupported platform, contact me and I will -work to add support for it. +* gcc 4.x: i386, x86_64, ppc32, ppc64, sparcv9, armv6, alpha +* Visual Studio Express 2008/Windows XP, x86, x64, ARM [endsect] [section:acknowledgements Acknowledgements] -* Adam Wulkiewicz created the logo used on the [@https://github.com/boostorg/log GitHub project page]. The logo was taken from his [@https://github.com/awulkiew/boost-logos collection] of Boost logos. +* Adam Wulkiewicz created the logo used on the [@https://github.com/boostorg/atomic GitHub project page]. The logo was taken from his [@https://github.com/awulkiew/boost-logos collection] of Boost logos. [endsect] diff --git a/include/boost/atomic/detail/platform.hpp b/include/boost/atomic/detail/platform.hpp index d0423d2..85e6a12 100644 --- a/include/boost/atomic/detail/platform.hpp +++ b/include/boost/atomic/detail/platform.hpp @@ -25,12 +25,12 @@ #define BOOST_ATOMIC_DETAIL_PLATFORM emulated #define BOOST_ATOMIC_EMULATED -/* + #elif (defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 407))\ || (defined(BOOST_CLANG) && ((__clang_major__ * 100 + __clang_minor__) >= 302)) #define BOOST_ATOMIC_DETAIL_PLATFORM gcc_atomic -*/ + #elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) #define BOOST_ATOMIC_DETAIL_PLATFORM gcc_x86