2
0
mirror of https://github.com/boostorg/atomic.git synced 2026-02-02 20:32:09 +00:00

Avoid the lock padding member altogether if padding size is 0.

This commit is contained in:
Andrey Semashev
2014-12-20 17:05:24 +03:00
parent fcff86225e
commit d6cc2c93c1

View File

@@ -40,34 +40,37 @@ namespace {
// NOTE: This constant is made as a macro because some compilers (gcc 4.4 for one) don't allow enums or namespace scope constants in alignment attributes
#define BOOST_ATOMIC_CACHE_LINE_SIZE 64
template< unsigned int N >
struct padding
{
char data[N];
};
template< >
struct padding< 0 >
{
};
struct BOOST_ALIGNMENT(BOOST_ATOMIC_CACHE_LINE_SIZE) padded_lock
{
#if defined(BOOST_ATOMIC_USE_PTHREAD)
typedef pthread_mutex_t lock_type;
typedef pthread_mutex_t lock_type;
#else
typedef atomics::detail::operations< 1u, false > operations;
typedef operations::storage_type lock_type;
typedef atomics::detail::operations< 1u, false > lock_operations;
typedef lock_operations::storage_type lock_type;
#endif
lock_type lock;
// The additional padding is needed to avoid false sharing between locks
enum { padding_size = (sizeof(lock_type) <= BOOST_ATOMIC_CACHE_LINE_SIZE ?
enum
{
padding_size = (sizeof(lock_type) <= BOOST_ATOMIC_CACHE_LINE_SIZE ?
(BOOST_ATOMIC_CACHE_LINE_SIZE - sizeof(lock_type)) :
(BOOST_ATOMIC_CACHE_LINE_SIZE - sizeof(lock_type) % BOOST_ATOMIC_CACHE_LINE_SIZE)) };
padding< padding_size > pad;
(BOOST_ATOMIC_CACHE_LINE_SIZE - sizeof(lock_type) % BOOST_ATOMIC_CACHE_LINE_SIZE))
};
static padded_lock g_lock_pool[41]
template< unsigned int PaddingSize >
struct BOOST_ALIGNMENT(BOOST_ATOMIC_CACHE_LINE_SIZE) padded_lock
{
lock_type lock;
// The additional padding is needed to avoid false sharing between locks
char padding[PaddingSize];
};
template< >
struct BOOST_ALIGNMENT(BOOST_ATOMIC_CACHE_LINE_SIZE) padded_lock< 0u >
{
lock_type lock;
};
typedef padded_lock< padding_size > padded_lock_t;
static padded_lock_t g_lock_pool[41]
#if defined(BOOST_ATOMIC_USE_PTHREAD)
=
{
@@ -93,7 +96,7 @@ static padded_lock g_lock_pool[41]
BOOST_ATOMIC_DECL lockpool::scoped_lock::scoped_lock(const volatile void* addr) BOOST_NOEXCEPT :
m_lock(&g_lock_pool[reinterpret_cast< std::size_t >(addr) % (sizeof(g_lock_pool) / sizeof(*g_lock_pool))].lock)
{
while (padded_lock::operations::test_and_set(*static_cast< padded_lock::lock_type* >(m_lock), memory_order_acquire))
while (lock_operations::test_and_set(*static_cast< lock_type* >(m_lock), memory_order_acquire))
{
atomics::detail::pause();
}
@@ -101,7 +104,7 @@ BOOST_ATOMIC_DECL lockpool::scoped_lock::scoped_lock(const volatile void* addr)
BOOST_ATOMIC_DECL lockpool::scoped_lock::~scoped_lock() BOOST_NOEXCEPT
{
padded_lock::operations::clear(*static_cast< padded_lock::lock_type* >(m_lock), memory_order_release);
lock_operations::clear(*static_cast< lock_type* >(m_lock), memory_order_release);
}
BOOST_ATOMIC_DECL void signal_fence() BOOST_NOEXCEPT;