diff --git a/include/boost/atomic/detail/lockpool.hpp b/include/boost/atomic/detail/lockpool.hpp index 4246486..6af9479 100644 --- a/include/boost/atomic/detail/lockpool.hpp +++ b/include/boost/atomic/detail/lockpool.hpp @@ -2,6 +2,7 @@ #define BOOST_ATOMIC_DETAIL_LOCKPOOL_HPP // Copyright (c) 2011 Helge Bahmann +// Copyright (c) 2013 Andrey Semashev // // Distributed under the Boost Software License, Version 1.0. // See accompanying file LICENSE_1_0.txt or copy at @@ -9,9 +10,6 @@ #include #include -#ifndef BOOST_ATOMIC_FLAG_LOCK_FREE -#include -#endif #ifdef BOOST_HAS_PRAGMA_ONCE #pragma once @@ -21,28 +19,22 @@ namespace boost { namespace atomics { namespace detail { -#ifndef BOOST_ATOMIC_FLAG_LOCK_FREE +#if !defined(BOOST_ATOMIC_FLAG_LOCK_FREE) || BOOST_ATOMIC_FLAG_LOCK_FREE != 2 class lockpool { public: - typedef boost::detail::lightweight_mutex lock_type; - class scoped_lock : - public lock_type::scoped_lock + class scoped_lock { - typedef lock_type::scoped_lock base_type; + void* lock_; public: - explicit scoped_lock(const volatile void * addr) : base_type(get_lock_for(addr)) - { - } + explicit BOOST_ATOMIC_DECL scoped_lock(const volatile void* addr); + BOOST_ATOMIC_DECL ~scoped_lock(); BOOST_DELETED_FUNCTION(scoped_lock(scoped_lock const&)) BOOST_DELETED_FUNCTION(scoped_lock& operator=(scoped_lock const&)) }; - -private: - static BOOST_ATOMIC_DECL lock_type& get_lock_for(const volatile void * addr); }; #else @@ -55,13 +47,13 @@ public: class scoped_lock { private: - atomic_flag& flag_; + lock_type& flag_; public: explicit scoped_lock(const volatile void * addr) : flag_(get_lock_for(addr)) { - for (; flag_.test_and_set(memory_order_acquire);) + while (flag_.test_and_set(memory_order_acquire)) { #if defined(BOOST_ATOMIC_X86_PAUSE) BOOST_ATOMIC_X86_PAUSE(); diff --git a/src/lockpool.cpp b/src/lockpool.cpp index 1f11cce..31c7d82 100644 --- a/src/lockpool.cpp +++ b/src/lockpool.cpp @@ -1,8 +1,18 @@ #include #include +#include #include +#if !defined(BOOST_ATOMIC_FLAG_LOCK_FREE) || BOOST_ATOMIC_FLAG_LOCK_FREE != 2 +#if !defined(BOOST_HAS_PTHREADS) +#error Boost.Atomic: Unsupported target platform, POSIX threads are required when native atomic operations are not available +#endif +#include +#define BOOST_ATOMIC_USE_PTHREAD +#endif + // Copyright (c) 2011 Helge Bahmann +// Copyright (c) 2013 Andrey Semashev // // Distributed under the Boost Software License, Version 1.0. // See accompanying file LICENSE_1_0.txt or copy at @@ -15,7 +25,7 @@ namespace detail { namespace { // This seems to be the maximum across all modern CPUs -// NOTE: This constant is made as a macro because some compilers (gcc 4.4 for one) don't allow enums or regular constants in alignment attributes +// NOTE: This constant is made as a macro because some compilers (gcc 4.4 for one) don't allow enums or namespace scope constants in alignment attributes #define BOOST_ATOMIC_CACHE_LINE_SIZE 64 template< unsigned int N > @@ -30,26 +40,64 @@ struct padding< 0 > struct BOOST_ALIGNMENT(BOOST_ATOMIC_CACHE_LINE_SIZE) padded_lock { - lockpool::lock_type lock; +#if defined(BOOST_ATOMIC_USE_PTHREAD) + typedef pthread_mutex_t lock_type; +#else + typedef lockpool::lock_type lock_type; +#endif + + lock_type lock; // The additional padding is needed to avoid false sharing between locks - enum { padding_size = (sizeof(lockpool::lock_type) <= BOOST_ATOMIC_CACHE_LINE_SIZE ? - (BOOST_ATOMIC_CACHE_LINE_SIZE - sizeof(lockpool::lock_type)) : - (BOOST_ATOMIC_CACHE_LINE_SIZE - sizeof(lockpool::lock_type) % BOOST_ATOMIC_CACHE_LINE_SIZE)) }; + enum { padding_size = (sizeof(lock_type) <= BOOST_ATOMIC_CACHE_LINE_SIZE ? + (BOOST_ATOMIC_CACHE_LINE_SIZE - sizeof(lock_type)) : + (BOOST_ATOMIC_CACHE_LINE_SIZE - sizeof(lock_type) % BOOST_ATOMIC_CACHE_LINE_SIZE)) }; padding< padding_size > pad; }; -static padded_lock lock_pool_[41]; +static padded_lock lock_pool_[41] +#if defined(BOOST_ATOMIC_USE_PTHREAD) += +{ + PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, + PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, + PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, + PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, + PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, + PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, + PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, + PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, + PTHREAD_MUTEX_INITIALIZER +} +#endif +; } // namespace +#if !defined(BOOST_ATOMIC_USE_PTHREAD) + // NOTE: This function must NOT be inline. Otherwise MSVC 9 will sometimes generate broken code for modulus operation which result in crashes. BOOST_ATOMIC_DECL lockpool::lock_type& lockpool::get_lock_for(const volatile void* addr) { - std::size_t index = reinterpret_cast(addr) % (sizeof(lock_pool_) / sizeof(*lock_pool_)); + std::size_t index = reinterpret_cast< std::size_t >(addr) % (sizeof(lock_pool_) / sizeof(*lock_pool_)); return lock_pool_[index].lock; } +#else // !defined(BOOST_ATOMIC_USE_PTHREAD) + +BOOST_ATOMIC_DECL lockpool::scoped_lock::scoped_lock(const volatile void* addr) : + lock_(&lock_pool_[reinterpret_cast< std::size_t >(addr) % (sizeof(lock_pool_) / sizeof(*lock_pool_))].lock) +{ + BOOST_VERIFY(pthread_mutex_lock(static_cast< pthread_mutex_t* >(lock_)) == 0); } + +BOOST_ATOMIC_DECL lockpool::scoped_lock::~scoped_lock() +{ + BOOST_VERIFY(pthread_mutex_unlock(static_cast< pthread_mutex_t* >(lock_)) == 0); } -} + +#endif // !defined(BOOST_ATOMIC_USE_PTHREAD) + +} // namespace detail +} // namespace atomics +} // namespace boost