From 6d7c0ec2eea408ade98b5308bd2d0b7d8c07380e Mon Sep 17 00:00:00 2001 From: Andrey Semashev Date: Sun, 13 Sep 2015 21:25:58 +0300 Subject: [PATCH] Added generic implementation of extended ops. Reorganized platform macros. --- include/boost/atomic/capabilities.hpp | 2 +- .../boost/atomic/detail/ext_ops_generic.hpp | 142 ++++++++++++++++++ .../atomic/detail/operations_lockfree.hpp | 2 +- include/boost/atomic/detail/platform.hpp | 86 ++++++----- 4 files changed, 192 insertions(+), 40 deletions(-) create mode 100644 include/boost/atomic/detail/ext_ops_generic.hpp diff --git a/include/boost/atomic/capabilities.hpp b/include/boost/atomic/capabilities.hpp index 05bbb0f..7e5205d 100644 --- a/include/boost/atomic/capabilities.hpp +++ b/include/boost/atomic/capabilities.hpp @@ -19,7 +19,7 @@ #include #if !defined(BOOST_ATOMIC_EMULATED) -#include BOOST_ATOMIC_DETAIL_HEADER(boost/atomic/detail/caps_) +#include BOOST_ATOMIC_DETAIL_BACKEND_HEADER(boost/atomic/detail/caps_) #endif #ifdef BOOST_HAS_PRAGMA_ONCE diff --git a/include/boost/atomic/detail/ext_ops_generic.hpp b/include/boost/atomic/detail/ext_ops_generic.hpp new file mode 100644 index 0000000..46a789e --- /dev/null +++ b/include/boost/atomic/detail/ext_ops_generic.hpp @@ -0,0 +1,142 @@ +/* + * Distributed under the Boost Software License, Version 1.0. + * (See accompanying file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt) + * + * Copyright (c) 2015 Andrey Semashev + */ +/*! + * \file atomic/detail/ext_ops_generic.hpp + * + * This header contains generic implementation of the extended atomic operations. + */ + +#ifndef BOOST_ATOMIC_DETAIL_EXT_OPS_GENERIC_HPP_INCLUDED_ +#define BOOST_ATOMIC_DETAIL_EXT_OPS_GENERIC_HPP_INCLUDED_ + +#include +#include +#include +#include + +#ifdef BOOST_HAS_PRAGMA_ONCE +#pragma once +#endif + +namespace boost { +namespace atomics { +namespace detail { + +template< typename Base, std::size_t Size, bool Signed > +struct generic_extended_operations : + public Base +{ + typedef Base base_type; + typedef typename base_type::storage_type storage_type; + typedef typename make_storage_type< Size, Signed >::type emulated_storage_type; + typedef typename make_storage_type< Size, false >::type unsigned_emulated_storage_type; + + static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT + { + storage_type old_val; + atomics::detail::non_atomic_load(storage, old_val); + while (!Base::compare_exchange_weak(storage, old_val, static_cast< storage_type >(-static_cast< emulated_storage_type >(old_val)), order, memory_order_relaxed)) {} + return old_val; + } + + static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT + { + return Base::fetch_xor(storage, static_cast< storage_type >(~static_cast< emulated_storage_type >(0)), order); + } + + static BOOST_FORCEINLINE void opaque_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + Base::fetch_add(storage, v, order); + } + + static BOOST_FORCEINLINE void opaque_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + Base::fetch_sub(storage, v, order); + } + + static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT + { + fetch_negate(storage, order); + } + + static BOOST_FORCEINLINE void opaque_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + Base::fetch_and(storage, v, order); + } + + static BOOST_FORCEINLINE void opaque_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + Base::fetch_or(storage, v, order); + } + + static BOOST_FORCEINLINE void opaque_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + Base::fetch_xor(storage, v, order); + } + + static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT + { + fetch_complement(storage, order); + } + + static BOOST_FORCEINLINE bool add_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + storage_type old_val = Base::fetch_add(storage, v, order); + emulated_storage_type new_val = static_cast< emulated_storage_type >(old_val) + static_cast< emulated_storage_type >(v); + return !new_val; + } + + static BOOST_FORCEINLINE bool sub_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + storage_type old_val = Base::fetch_sub(storage, v, order); + emulated_storage_type new_val = static_cast< emulated_storage_type >(old_val) - static_cast< emulated_storage_type >(v); + return !new_val; + } + + static BOOST_FORCEINLINE bool and_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + return !(Base::fetch_and(storage, v, order) & v); + } + + static BOOST_FORCEINLINE bool or_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + return !(Base::fetch_or(storage, v, order) | v); + } + + static BOOST_FORCEINLINE bool xor_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + return !(Base::fetch_xor(storage, v, order) ^ v); + } + + static BOOST_FORCEINLINE bool bit_test_and_set(storage_type volatile& storage, unsigned int bit_number, memory_order order) BOOST_NOEXCEPT + { + storage_type mask = storage_type(((unsigned_emulated_storage_type)1u) << bit_number); + storage_type old_val = Base::fetch_or(storage, mask, order); + return !!(old_val & mask); + } + + static BOOST_FORCEINLINE bool bit_test_and_reset(storage_type volatile& storage, unsigned int bit_number, memory_order order) BOOST_NOEXCEPT + { + storage_type mask = storage_type(((unsigned_emulated_storage_type)1u) << bit_number); + storage_type old_val = Base::fetch_and(storage, ~mask, order); + return !!(old_val & mask); + } + + static BOOST_FORCEINLINE bool bit_test_and_complement(storage_type volatile& storage, unsigned int bit_number, memory_order order) BOOST_NOEXCEPT + { + storage_type mask = storage_type(((unsigned_emulated_storage_type)1u) << bit_number); + storage_type old_val = Base::fetch_xor(storage, mask, order); + return !!(old_val & mask); + } +}; + +} // namespace detail +} // namespace atomics +} // namespace boost + +#endif // BOOST_ATOMIC_DETAIL_EXT_OPS_GENERIC_HPP_INCLUDED_ diff --git a/include/boost/atomic/detail/operations_lockfree.hpp b/include/boost/atomic/detail/operations_lockfree.hpp index b465403..62b4583 100644 --- a/include/boost/atomic/detail/operations_lockfree.hpp +++ b/include/boost/atomic/detail/operations_lockfree.hpp @@ -18,7 +18,7 @@ #include #if !defined(BOOST_ATOMIC_EMULATED) -#include BOOST_ATOMIC_DETAIL_HEADER(boost/atomic/detail/ops_) +#include BOOST_ATOMIC_DETAIL_BACKEND_HEADER(boost/atomic/detail/ops_) #else #include #endif diff --git a/include/boost/atomic/detail/platform.hpp b/include/boost/atomic/detail/platform.hpp index 786b1f1..a95ec53 100644 --- a/include/boost/atomic/detail/platform.hpp +++ b/include/boost/atomic/detail/platform.hpp @@ -23,27 +23,11 @@ #if !defined(BOOST_ATOMIC_FORCE_FALLBACK) -// Compiler-based backends -#if (defined(__ibmxl__) || defined(__IBMCPP__)) && defined(__PPC__) +// Determine the target platform. +// The target platform describes the compiler and target architecture. It can be used by more generic backends, such as the ones +// based on compiler intrinsics, to implement specialized operations in a non-generic way. -// IBM XL C++ Compiler has to be checked before GCC/Clang as it pretends to be one but does not support __atomic* intrinsics. -// It does support GCC inline assembler though. -#define BOOST_ATOMIC_DETAIL_PLATFORM gcc_ppc - -#elif ((defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 407)) ||\ - (defined(BOOST_CLANG) && ((__clang_major__ * 100 + __clang_minor__) >= 302))) &&\ - (\ - (__GCC_ATOMIC_BOOL_LOCK_FREE + 0) == 2 ||\ - (__GCC_ATOMIC_CHAR_LOCK_FREE + 0) == 2 ||\ - (__GCC_ATOMIC_SHORT_LOCK_FREE + 0) == 2 ||\ - (__GCC_ATOMIC_INT_LOCK_FREE + 0) == 2 ||\ - (__GCC_ATOMIC_LONG_LOCK_FREE + 0) == 2 ||\ - (__GCC_ATOMIC_LLONG_LOCK_FREE + 0) == 2\ - ) - -#define BOOST_ATOMIC_DETAIL_PLATFORM gcc_atomic - -#elif (defined(__GNUC__) || defined(__SUNPRO_CC)) && (defined(__i386__) || defined(__x86_64__)) +#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) #define BOOST_ATOMIC_DETAIL_PLATFORM gcc_x86 @@ -74,17 +58,6 @@ #define BOOST_ATOMIC_DETAIL_PLATFORM gcc_alpha -#elif defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 401) &&\ - (\ - defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1) ||\ - defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2) ||\ - defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) ||\ - defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) ||\ - defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)\ - ) - -#define BOOST_ATOMIC_DETAIL_PLATFORM gcc_sync - #elif defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_X64)) #define BOOST_ATOMIC_DETAIL_PLATFORM msvc_x86 @@ -95,28 +68,65 @@ #endif +// Compiler-based backends + +// IBM XL C++ Compiler has to be checked before GCC/Clang as it pretends to be one but does not support __atomic* intrinsics. +// It does support GCC inline assembler though. +#if !(defined(__ibmxl__) || defined(__IBMCPP__)) &&\ + ((defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 407)) ||\ + (defined(BOOST_CLANG) && ((__clang_major__ * 100 + __clang_minor__) >= 302))) &&\ + (\ + (__GCC_ATOMIC_BOOL_LOCK_FREE + 0) == 2 ||\ + (__GCC_ATOMIC_CHAR_LOCK_FREE + 0) == 2 ||\ + (__GCC_ATOMIC_SHORT_LOCK_FREE + 0) == 2 ||\ + (__GCC_ATOMIC_INT_LOCK_FREE + 0) == 2 ||\ + (__GCC_ATOMIC_LONG_LOCK_FREE + 0) == 2 ||\ + (__GCC_ATOMIC_LLONG_LOCK_FREE + 0) == 2\ + ) + +#define BOOST_ATOMIC_DETAIL_BACKEND gcc_atomic + +#elif defined(BOOST_ATOMIC_DETAIL_PLATFORM) + +#define BOOST_ATOMIC_DETAIL_BACKEND BOOST_ATOMIC_DETAIL_PLATFORM + +#elif defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 401) &&\ + (\ + defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1) ||\ + defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2) ||\ + defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) ||\ + defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) ||\ + defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)\ + ) + +#define BOOST_ATOMIC_DETAIL_BACKEND gcc_sync + +#endif + // OS-based backends -#if !defined(BOOST_ATOMIC_DETAIL_PLATFORM) + +#if !defined(BOOST_ATOMIC_DETAIL_BACKEND) #if defined(__linux__) && defined(__arm__) -#define BOOST_ATOMIC_DETAIL_PLATFORM linux_arm +#define BOOST_ATOMIC_DETAIL_BACKEND linux_arm #elif defined(BOOST_WINDOWS) || defined(_WIN32_CE) -#define BOOST_ATOMIC_DETAIL_PLATFORM windows +#define BOOST_ATOMIC_DETAIL_BACKEND windows #endif -#endif // !defined(BOOST_ATOMIC_DETAIL_PLATFORM) +#endif // !defined(BOOST_ATOMIC_DETAIL_BACKEND) #endif // !defined(BOOST_ATOMIC_FORCE_FALLBACK) -#if !defined(BOOST_ATOMIC_DETAIL_PLATFORM) -#define BOOST_ATOMIC_DETAIL_PLATFORM emulated +#if !defined(BOOST_ATOMIC_DETAIL_BACKEND) +#define BOOST_ATOMIC_DETAIL_BACKEND emulated #define BOOST_ATOMIC_EMULATED #endif -#define BOOST_ATOMIC_DETAIL_HEADER(prefix) +#define BOOST_ATOMIC_DETAIL_PLATFROM_HEADER(prefix) +#define BOOST_ATOMIC_DETAIL_BACKEND_HEADER(prefix) #endif // BOOST_ATOMIC_DETAIL_PLATFORM_HPP_INCLUDED_