2
0
mirror of https://github.com/boostorg/atomic.git synced 2026-02-02 20:32:09 +00:00

Added a generic atomic<> implementation based on the new __atomic intrinsics available in gcc 4.7 and clang 3.2. The new implementation adds support for 128-bit atomic. Other implementations updated to employ new macros in Boost.Config for defaulted and deleted functions (this is not complete yet). Fixed padding for the lock pool. Initializing constructor for atomic<> made explicit to avoid ambiguities of different overloads of operator=.

[SVN r84800]
This commit is contained in:
Andrey Semashev
2013-06-15 20:43:48 +00:00
parent 179e72cfd1
commit 9cab8a9ebf
23 changed files with 1640 additions and 79 deletions

View File

@@ -1,3 +1,4 @@
#include <boost/config.hpp>
#include <boost/atomic.hpp>
// Copyright (c) 2011 Helge Bahmann
@@ -10,13 +11,39 @@ namespace boost {
namespace atomics {
namespace detail {
static lockpool::lock_type lock_pool_[41];
namespace {
// This seems to be the maximum across all modern CPUs
enum { cache_line_size = 64 };
template< unsigned int N >
struct padding
{
char data[N];
};
template< >
struct padding< 0 >
{
};
struct BOOST_ALIGNMENT(cache_line_size) padded_lock
{
lockpool::lock_type lock;
// The additional padding is needed to avoid false sharing between locks
enum { padding_size = (sizeof(lockpool::lock_type) <= cache_line_size ? (cache_line_size - sizeof(lockpool::lock_type)) : (cache_line_size - sizeof(lockpool::lock_type) % cache_line_size)) };
padding< padding_size > pad;
};
static padded_lock lock_pool_[41];
} // namespace
// NOTE: This function must NOT be inline. Otherwise MSVC 9 will sometimes generate broken code for modulus operation which result in crashes.
BOOST_ATOMIC_DECL lockpool::lock_type& lockpool::get_lock_for(const volatile void* addr)
{
std::size_t index = reinterpret_cast<std::size_t>(addr) % (sizeof(lock_pool_) / sizeof(*lock_pool_));
return lock_pool_[index];
return lock_pool_[index].lock;
}
}