2
0
mirror of https://github.com/boostorg/atomic.git synced 2026-02-01 20:12:09 +00:00

Converted memory_order to scoped enum on C++11 and later.

This follows the change expected in C++2a, which has been accepted into
N4713 (proposal P0439). The old memory order constants are still
available for backward compatibility.

http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0439r0.html
This commit is contained in:
Andrey Semashev
2018-01-29 00:38:44 +03:00
parent 9c8bca1d01
commit 10c61bb25d
12 changed files with 78 additions and 50 deletions

View File

@@ -56,7 +56,7 @@ BOOST_FORCEINLINE BOOST_CONSTEXPR bool cas_failure_order_must_not_be_stronger_th
{
// 15 == (memory_order_seq_cst | memory_order_consume), see memory_order.hpp
// Given the enum values we can test the strength of memory order requirements with this single condition.
return (failure_order & 15u) <= (success_order & 15u);
return (static_cast< unsigned int >(failure_order) & 15u) <= (static_cast< unsigned int >(success_order) & 15u);
}
template< typename T, bool IsFunction = boost::atomics::detail::is_function< T >::value >

View File

@@ -68,13 +68,13 @@ struct gcc_alpha_operations_base
static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT
{
if ((order & memory_order_release) != 0)
if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
__asm__ __volatile__ ("mb" ::: "memory");
}
static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT
{
if ((order & (memory_order_consume | memory_order_acquire)) != 0)
if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
__asm__ __volatile__ ("mb" ::: "memory");
}

View File

@@ -78,13 +78,13 @@ struct gcc_arm_operations_base
static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT
{
if ((order & memory_order_release) != 0)
if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
hardware_full_fence();
}
static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT
{
if ((order & (memory_order_consume | memory_order_acquire)) != 0)
if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
hardware_full_fence();
}

View File

@@ -54,10 +54,10 @@ namespace detail {
* The function converts \c boost::memory_order values to the compiler-specific constants.
*
* NOTE: The intention is that the function is optimized away by the compiler, and the
* compiler-specific constants are passed to the intrinsics. I know constexpr doesn't
* compiler-specific constants are passed to the intrinsics. Unfortunately, constexpr doesn't
* work in this case because the standard atomics interface require memory ordering
* constants to be passed as function arguments, at which point they stop being constexpr.
* However it is crucial that the compiler sees constants and not runtime values,
* However, it is crucial that the compiler sees constants and not runtime values,
* because otherwise it just ignores the ordering value and always uses seq_cst.
* This is the case with Intel C++ Compiler 14.0.3 (Composer XE 2013 SP1, update 3) and
* gcc 4.8.2. Intel Compiler issues a warning in this case:
@@ -71,8 +71,8 @@ namespace detail {
* all functions are called with constant orderings and call intrinstcts properly.
*
* Unfortunately, this still doesn't work in debug mode as the compiler doesn't
* inline functions even when marked with BOOST_FORCEINLINE. In this case all atomic
* operaions will be executed with seq_cst semantics.
* propagate constants even when functions are marked with BOOST_FORCEINLINE. In this case
* all atomic operaions will be executed with seq_cst semantics.
*/
BOOST_FORCEINLINE BOOST_CONSTEXPR int convert_memory_order_to_gcc(memory_order order) BOOST_NOEXCEPT
{

View File

@@ -105,7 +105,7 @@ struct operations< 4u, Signed > :
storage_type v;
if (order == memory_order_seq_cst)
__asm__ __volatile__ ("sync" ::: "memory");
if ((order & (memory_order_consume | memory_order_acquire)) != 0)
if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
{
__asm__ __volatile__
(
@@ -336,7 +336,7 @@ struct operations< 1u, Signed > :
storage_type v;
if (order == memory_order_seq_cst)
__asm__ __volatile__ ("sync" ::: "memory");
if ((order & (memory_order_consume | memory_order_acquire)) != 0)
if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
{
__asm__ __volatile__
(
@@ -667,7 +667,7 @@ struct operations< 2u, Signed > :
storage_type v;
if (order == memory_order_seq_cst)
__asm__ __volatile__ ("sync" ::: "memory");
if ((order & (memory_order_consume | memory_order_acquire)) != 0)
if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
{
__asm__ __volatile__
(
@@ -997,7 +997,7 @@ struct operations< 8u, Signed > :
storage_type v;
if (order == memory_order_seq_cst)
__asm__ __volatile__ ("sync" ::: "memory");
if ((order & (memory_order_consume | memory_order_acquire)) != 0)
if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
{
__asm__ __volatile__
(
@@ -1204,20 +1204,16 @@ struct operations< 8u, Signed > :
BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
{
switch (order)
if (order != memory_order_relaxed)
{
case memory_order_consume:
case memory_order_acquire:
case memory_order_release:
case memory_order_acq_rel:
#if defined(__powerpc64__) || defined(__PPC64__)
__asm__ __volatile__ ("lwsync" ::: "memory");
break;
#endif
case memory_order_seq_cst:
if (order != memory_order_seq_cst)
__asm__ __volatile__ ("lwsync" ::: "memory");
else
__asm__ __volatile__ ("sync" ::: "memory");
#else
__asm__ __volatile__ ("sync" ::: "memory");
break;
default:;
#endif
}
}

View File

@@ -47,17 +47,17 @@ struct gcc_ppc_operations_base
#if defined(__powerpc64__) || defined(__PPC64__)
if (order == memory_order_seq_cst)
__asm__ __volatile__ ("sync" ::: "memory");
else if ((order & memory_order_release) != 0)
else if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
__asm__ __volatile__ ("lwsync" ::: "memory");
#else
if ((order & memory_order_release) != 0)
if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
__asm__ __volatile__ ("sync" ::: "memory");
#endif
}
static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT
{
if ((order & (memory_order_consume | memory_order_acquire)) != 0)
if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
__asm__ __volatile__ ("isync" ::: "memory");
}
};

View File

@@ -41,7 +41,7 @@ struct gcc_sparc_cas_base
{
if (order == memory_order_seq_cst)
__asm__ __volatile__ ("membar #Sync" ::: "memory");
else if ((order & memory_order_release) != 0)
else if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
__asm__ __volatile__ ("membar #StoreStore | #LoadStore" ::: "memory");
}
@@ -49,7 +49,7 @@ struct gcc_sparc_cas_base
{
if (order == memory_order_seq_cst)
__asm__ __volatile__ ("membar #Sync" ::: "memory");
else if ((order & (memory_order_consume | memory_order_acquire)) != 0)
else if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
__asm__ __volatile__ ("membar #StoreStore | #LoadStore" ::: "memory");
}

View File

@@ -38,7 +38,7 @@ struct gcc_sync_operations_base
static BOOST_FORCEINLINE void fence_before_store(memory_order order) BOOST_NOEXCEPT
{
if ((order & memory_order_release) != 0)
if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
__sync_synchronize();
}
@@ -50,7 +50,7 @@ struct gcc_sync_operations_base
static BOOST_FORCEINLINE void fence_after_load(memory_order order) BOOST_NOEXCEPT
{
if ((order & (memory_order_acquire | memory_order_consume)) != 0)
if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_acquire) | static_cast< unsigned int >(memory_order_consume))) != 0u)
__sync_synchronize();
}
};
@@ -90,7 +90,7 @@ struct gcc_sync_operations :
// GCC docs mention that not all architectures may support full exchange semantics for this intrinsic. However, GCC's implementation of
// std::atomic<> uses this intrinsic unconditionally. We do so as well. In case if some architectures actually don't support this, we can always
// add a check here and fall back to a CAS loop.
if ((order & memory_order_release) != 0)
if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
__sync_synchronize();
return __sync_lock_test_and_set(&storage, v);
}
@@ -135,7 +135,7 @@ struct gcc_sync_operations :
static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
if ((order & memory_order_release) != 0)
if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
__sync_synchronize();
return !!__sync_lock_test_and_set(&storage, 1);
}

View File

@@ -41,13 +41,13 @@ struct gcc_x86_operations_base
static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT
{
if ((order & memory_order_release) != 0)
if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
__asm__ __volatile__ ("" ::: "memory");
}
static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT
{
if ((order & memory_order_acquire) != 0)
if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
__asm__ __volatile__ ("" ::: "memory");
}
};
@@ -543,7 +543,7 @@ BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
::: "memory"
);
}
else if ((order & (memory_order_acquire | memory_order_release)) != 0)
else if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_acquire) | static_cast< unsigned int >(memory_order_release))) != 0u)
{
__asm__ __volatile__ ("" ::: "memory");
}

View File

@@ -62,7 +62,7 @@ struct linux_arm_cas_base
static BOOST_FORCEINLINE void fence_before_store(memory_order order) BOOST_NOEXCEPT
{
if ((order & memory_order_release) != 0)
if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
hardware_full_fence();
}
@@ -74,7 +74,7 @@ struct linux_arm_cas_base
static BOOST_FORCEINLINE void fence_after_load(memory_order order) BOOST_NOEXCEPT
{
if ((order & (memory_order_consume | memory_order_acquire)) != 0)
if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
hardware_full_fence();
}

View File

@@ -65,7 +65,7 @@ struct msvc_arm_operations_base
{
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
if ((order & memory_order_release) != 0)
if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
hardware_full_fence();
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
@@ -85,7 +85,7 @@ struct msvc_arm_operations_base
{
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
if ((order & (memory_order_consume | memory_order_acquire)) != 0)
if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
hardware_full_fence();
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
@@ -94,7 +94,8 @@ struct msvc_arm_operations_base
static BOOST_FORCEINLINE BOOST_CONSTEXPR memory_order cas_common_order(memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
// Combine order flags together and promote memory_order_consume to memory_order_acquire
return static_cast< memory_order >(((failure_order | success_order) & ~memory_order_consume) | (((failure_order | success_order) & memory_order_consume) << 1u));
return static_cast< memory_order >(((static_cast< unsigned int >(failure_order) | static_cast< unsigned int >(success_order)) & ~static_cast< unsigned int >(memory_order_consume))
| (((static_cast< unsigned int >(failure_order) | static_cast< unsigned int >(success_order)) & static_cast< unsigned int >(memory_order_consume)) << 1u));
}
};

View File

@@ -1,22 +1,22 @@
#ifndef BOOST_MEMORY_ORDER_HPP_INCLUDED
#define BOOST_MEMORY_ORDER_HPP_INCLUDED
// MS compatible compilers support #pragma once
#if defined(_MSC_VER) && (_MSC_VER >= 1020)
# pragma once
#endif
// boost/memory_order.hpp
//
// Defines enum boost::memory_order per the C++0x working draft
//
// Copyright (c) 2008, 2009 Peter Dimov
// Copyright (c) 2018 Andrey Semashev
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_MEMORY_ORDER_HPP_INCLUDED
#define BOOST_MEMORY_ORDER_HPP_INCLUDED
#include <boost/config.hpp>
#if defined(BOOST_HAS_PRAGMA_ONCE)
# pragma once
#endif
namespace boost
{
@@ -42,6 +42,35 @@ namespace boost
// efficiently in compare_exchange methods.
//
#if !defined(BOOST_NO_CXX11_SCOPED_ENUMS)
enum class memory_order : unsigned int
{
relaxed = 0,
consume = 1,
acquire = 2,
release = 4,
acq_rel = 6, // acquire | release
seq_cst = 14 // acq_rel | 8
};
#if !defined(BOOST_NO_CXX17_INLINE_VARIABLES)
#define BOOST_MEMORY_ORDER_INLINE_VARIABLE inline
#else
#define BOOST_MEMORY_ORDER_INLINE_VARIABLE
#endif
BOOST_MEMORY_ORDER_INLINE_VARIABLE BOOST_CONSTEXPR_OR_CONST memory_order memory_order_relaxed = memory_order::relaxed;
BOOST_MEMORY_ORDER_INLINE_VARIABLE BOOST_CONSTEXPR_OR_CONST memory_order memory_order_consume = memory_order::consume;
BOOST_MEMORY_ORDER_INLINE_VARIABLE BOOST_CONSTEXPR_OR_CONST memory_order memory_order_acquire = memory_order::acquire;
BOOST_MEMORY_ORDER_INLINE_VARIABLE BOOST_CONSTEXPR_OR_CONST memory_order memory_order_release = memory_order::release;
BOOST_MEMORY_ORDER_INLINE_VARIABLE BOOST_CONSTEXPR_OR_CONST memory_order memory_order_acq_rel = memory_order::acq_rel;
BOOST_MEMORY_ORDER_INLINE_VARIABLE BOOST_CONSTEXPR_OR_CONST memory_order memory_order_seq_cst = memory_order::seq_cst;
#undef BOOST_MEMORY_ORDER_INLINE_VARIABLE
#else // !defined(BOOST_NO_CXX11_SCOPED_ENUMS)
enum memory_order
{
memory_order_relaxed = 0,
@@ -52,6 +81,8 @@ enum memory_order
memory_order_seq_cst = 14 // acq_rel | 8
};
#endif // !defined(BOOST_NO_CXX11_SCOPED_ENUMS)
} // namespace boost
#endif // #ifndef BOOST_MEMORY_ORDER_HPP_INCLUDED