diff --git a/include/boost/atomic/detail/caps_gcc_sparcv9.hpp b/include/boost/atomic/detail/caps_gcc_sparcv9.hpp index 2c645e0..caea997 100644 --- a/include/boost/atomic/detail/caps_gcc_sparcv9.hpp +++ b/include/boost/atomic/detail/caps_gcc_sparcv9.hpp @@ -25,6 +25,7 @@ #define BOOST_ATOMIC_INT8_LOCK_FREE 2 #define BOOST_ATOMIC_INT16_LOCK_FREE 2 #define BOOST_ATOMIC_INT32_LOCK_FREE 2 +#define BOOST_ATOMIC_INT64_LOCK_FREE 2 #define BOOST_ATOMIC_POINTER_LOCK_FREE 2 #define BOOST_ATOMIC_THREAD_FENCE 2 diff --git a/include/boost/atomic/detail/ops_gcc_sparcv9.hpp b/include/boost/atomic/detail/ops_gcc_sparcv9.hpp new file mode 100644 index 0000000..09c750e --- /dev/null +++ b/include/boost/atomic/detail/ops_gcc_sparcv9.hpp @@ -0,0 +1,272 @@ +/* + * Distributed under the Boost Software License, Version 1.0. + * (See accompanying file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt) + * + * Copyright (c) 2010 Helge Bahmann + * Copyright (c) 2013 Tim Blechmann + * Copyright (c) 2014 Andrey Semashev + */ +/*! + * \file atomic/detail/ops_gcc_sparcv9.hpp + * + * This header contains implementation of the \c operations template. + */ + +#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_SPARCV9_HPP_INCLUDED_ +#define BOOST_ATOMIC_DETAIL_OPS_GCC_SPARCV9_HPP_INCLUDED_ + +#include +#include +#include +#include +#include +#include +#include + +#ifdef BOOST_HAS_PRAGMA_ONCE +#pragma once +#endif + +namespace boost { +namespace atomics { +namespace detail { + +struct gcc_sparcv9_cas_base +{ + static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT + { + switch (order) + { + case memory_order_relaxed: + case memory_order_acquire: + case memory_order_consume: + break; + case memory_order_release: + case memory_order_acq_rel: + __asm__ __volatile__ ("membar #StoreStore | #LoadStore" ::: "memory"); + break; + case memory_order_seq_cst: + __asm__ __volatile__ ("membar #Sync" ::: "memory"); + break; + } + } + + static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT + { + switch (order) + { + case memory_order_relaxed: + case memory_order_consume: + case memory_order_release: + break; + case memory_order_acquire: + case memory_order_acq_rel: + __asm__ __volatile__ ("membar #LoadLoad | #LoadStore" ::: "memory"); + break; + case memory_order_seq_cst: + __asm__ __volatile__ ("membar #Sync" ::: "memory"); + break; + default:; + } + } + + static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT + { + if (order == memory_order_seq_cst) + __asm__ __volatile__ ("membar #Sync" ::: "memory"); + } + + + static BOOST_FORCEINLINE void fence_after_load(memory_order order) BOOST_NOEXCEPT + { + fence_after(order); + } +}; + +template< bool Signed > +struct gcc_sparcv9_cas32 : + public gcc_sparcv9_cas_base +{ + typedef typename make_storage_type< 4u, Signed >::type storage_type; + + static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before_store(order); + storage = v; + fence_after_store(order); + } + + static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT + { + storage_type v = storage; + fence_after_load(order); + return v; + } + + static BOOST_FORCEINLINE bool compare_exchange_strong( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + { + fence_before(success_order); + storage_type previous = expected; + __asm__ __volatile__ + ( + "cas [%1], %2, %0" + : "+r" (desired) + : "r" (&storage), "r" (previous) + : "memory" + ); + const bool success = (desired == previous); + if (success) + fence_after(success_order); + else + fence_after(failure_order); + expected = desired; + return success; + } + + static BOOST_FORCEINLINE bool compare_exchange_weak( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + { + return compare_exchange_strong(storage, expected, desired, success_order, failure_order); + } + + static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT + { + return true; + } +}; + +template< bool Signed > +struct operations< 4u, Signed > : + public cas_based_operations< gcc_sparcv9_cas32< Signed > > +{ + typedef cas_based_operations< gcc_sparcv9_cas32< Signed > > base_type; + typedef typename base_type::storage_type storage_type; + + static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + base_type::fence_before(order); + __asm__ __volatile__ + ( + "swap [%1], %0" + : "+r" (v) + : "r" (&storage) + : "memory" + ); + base_type::fence_after(order); + return v; + } + + static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT + { + return exchange(storage, (storage_type)1, order) != (storage_type)0; + } +}; + +template< bool Signed > +struct operations< 1u, Signed > : + public extending_cas_based_operations< operations< 4u, Signed >, 1u, Signed > +{ +}; + +template< bool Signed > +struct operations< 2u, Signed > : + public extending_cas_based_operations< operations< 4u, Signed >, 2u, Signed > +{ +}; + +template< bool Signed > +struct gcc_sparcv9_cas64 : + public gcc_sparcv9_cas_base +{ + typedef typename make_storage_type< 8u, Signed >::type storage_type; + + static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT + { + fence_before_store(order); + storage = v; + fence_after_store(order); + } + + static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT + { + storage_type v = storage; + fence_after_load(order); + return v; + } + + static BOOST_FORCEINLINE bool compare_exchange_strong( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + { + fence_before(success_order); + storage_type previous = expected; + __asm__ __volatile__ + ( + "casx [%1], %2, %0" + : "+r" (desired) + : "r" (&storage), "r" (previous) + : "memory" + ); + const bool success = (desired == previous); + if (success) + fence_after(success_order); + else + fence_after(failure_order); + expected = desired; + return success; + } + + static BOOST_FORCEINLINE bool compare_exchange_weak( + storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT + { + return compare_exchange_strong(storage, expected, desired, success_order, failure_order); + } + + static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT + { + return true; + } +}; + +template< bool Signed > +struct operations< 8u, Signed > : + public cas_based_operations< gcc_sparcv9_cas64< Signed > > +{ +}; + + +BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT +{ + switch(order) + { + case memory_order_relaxed: + break; + case memory_order_release: + __asm__ __volatile__ ("membar #StoreStore | #LoadStore" ::: "memory"); + break; + case memory_order_acquire: + __asm__ __volatile__ ("membar #LoadLoad | #LoadStore" ::: "memory"); + break; + case memory_order_acq_rel: + __asm__ __volatile__ ("membar #LoadLoad | #LoadStore | #StoreStore" ::: "memory"); + break; + case memory_order_consume: + break; + case memory_order_seq_cst: + __asm__ __volatile__ ("membar #Sync" ::: "memory"); + break; + default:; + } +} + +BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT +{ + __asm__ __volatile__ ("" ::: "memory"); +} + +} // namespace detail +} // namespace atomics +} // namespace boost + +#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_SPARCV9_HPP_INCLUDED_ diff --git a/include/boost/atomic/detail/ops_gcc_sync.hpp b/include/boost/atomic/detail/ops_gcc_sync.hpp index 78afd23..e5ef05f 100644 --- a/include/boost/atomic/detail/ops_gcc_sync.hpp +++ b/include/boost/atomic/detail/ops_gcc_sync.hpp @@ -16,7 +16,6 @@ #ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_SYNC_HPP_INCLUDED_ #define BOOST_ATOMIC_DETAIL_OPS_GCC_SYNC_HPP_INCLUDED_ -#include // UINT64_C #include #include #include diff --git a/include/boost/atomic/detail/ops_linux_arm.hpp b/include/boost/atomic/detail/ops_linux_arm.hpp index 0e83dc6..a8413d7 100644 --- a/include/boost/atomic/detail/ops_linux_arm.hpp +++ b/include/boost/atomic/detail/ops_linux_arm.hpp @@ -15,8 +15,8 @@ * This header contains implementation of the \c operations template. */ -#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_LINUX_ARM_HPP_INCLUDED_ -#define BOOST_ATOMIC_DETAIL_OPS_GCC_LINUX_ARM_HPP_INCLUDED_ +#ifndef BOOST_ATOMIC_DETAIL_OPS_LINUX_ARM_HPP_INCLUDED_ +#define BOOST_ATOMIC_DETAIL_OPS_LINUX_ARM_HPP_INCLUDED_ #include #include @@ -57,6 +57,39 @@ namespace detail { struct linux_arm_cas_base { + static BOOST_FORCEINLINE void fence_before_store(memory_order order) BOOST_NOEXCEPT + { + switch (order) + { + case memory_order_release: + case memory_order_acq_rel: + case memory_order_seq_cst: + hardware_full_fence(); + break; + case memory_order_consume: + default:; + } + } + + static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT + { + if (order == memory_order_seq_cst) + hardware_full_fence(); + } + + static BOOST_FORCEINLINE void fence_after_load(memory_order order) BOOST_NOEXCEPT + { + switch (order) + { + case memory_order_acquire: + case memory_order_acq_rel: + case memory_order_seq_cst: + hardware_full_fence(); + break; + default:; + } + } + static BOOST_FORCEINLINE void hardware_full_fence() BOOST_NOEXCEPT { typedef void (*kernel_dmb_t)(void); @@ -120,40 +153,6 @@ struct linux_arm_cas : { return true; } - -private: - static BOOST_FORCEINLINE void fence_before_store(memory_order order) BOOST_NOEXCEPT - { - switch (order) - { - case memory_order_release: - case memory_order_acq_rel: - case memory_order_seq_cst: - hardware_full_fence(); - break; - case memory_order_consume: - default:; - } - } - - static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT - { - if (order == memory_order_seq_cst) - hardware_full_fence(); - } - - static BOOST_FORCEINLINE void fence_after_load(memory_order order) BOOST_NOEXCEPT - { - switch (order) - { - case memory_order_acquire: - case memory_order_acq_rel: - case memory_order_seq_cst: - hardware_full_fence(); - break; - default:; - } - } }; template< bool Signed > @@ -211,4 +210,4 @@ BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT } // namespace atomics } // namespace boost -#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_LINUX_ARM_HPP_INCLUDED_ +#endif // BOOST_ATOMIC_DETAIL_OPS_LINUX_ARM_HPP_INCLUDED_