2
0
mirror of https://github.com/boostorg/atomic.git synced 2026-02-20 01:22:08 +00:00

Attempt to fix compilation on Windows CE. Restored full fence in the platform_fence_after_load function on architectures other than x86 and x86_64. The fence is not required only on those architectures.

[SVN r82157]
This commit is contained in:
Andrey Semashev
2012-12-21 21:30:41 +00:00
parent 442273fa49
commit 6aed631bd0
2 changed files with 17 additions and 9 deletions

View File

@@ -18,9 +18,9 @@
#include <boost/detail/interlocked.hpp>
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare) BOOST_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare)
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval) BOOST_INTERLOCKED_EXCHANGE(dest, newval)
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, addend) BOOST_INTERLOCKED_EXCHANGE_ADD(dest, addend)
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare) BOOST_INTERLOCKED_COMPARE_EXCHANGE((long*)(dest), exchange, compare)
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval) BOOST_INTERLOCKED_EXCHANGE((long*)(dest), newval)
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, addend) BOOST_INTERLOCKED_EXCHANGE_ADD((long*)(dest), addend)
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) BOOST_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare)
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) BOOST_INTERLOCKED_EXCHANGE_POINTER(dest, newval)
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_INTERLOCKED_EXCHANGE_ADD((long*)(dest), byte_offset))

View File

@@ -42,7 +42,7 @@ extern "C" void _mm_mfence(void);
#pragma intrinsic(_mm_mfence)
#endif
BOOST_FORCEINLINE void x86_full_fence(void)
BOOST_FORCEINLINE void hardware_full_fence(void)
{
#if defined(_MSC_VER) && (defined(_M_AMD64) || (defined(_M_IX86) && defined(_M_IX86_FP) && _M_IX86_FP >= 2))
// Use mfence only if SSE2 is available
@@ -54,13 +54,10 @@ BOOST_FORCEINLINE void x86_full_fence(void)
}
// Define compiler barriers
#if defined(_MSC_VER) && _MSC_VER >= 1310
#if defined(_MSC_VER) && _MSC_VER >= 1310 && !defined(_WIN32_WCE)
extern "C" void _ReadWriteBarrier();
#pragma intrinsic(_ReadWriteBarrier)
#define BOOST_ATOMIC_READ_WRITE_BARRIER() _ReadWriteBarrier()
#endif
#ifndef BOOST_ATOMIC_READ_WRITE_BARRIER
@@ -95,6 +92,17 @@ BOOST_FORCEINLINE void
platform_fence_after_load(memory_order order)
{
BOOST_ATOMIC_READ_WRITE_BARRIER();
// On x86 and x86_64 there is no need for a hardware barrier,
// even if seq_cst memory order is requested, because all
// seq_cst writes are implemented with lock-prefixed operations
// or xchg which has implied lock prefix. Therefore normal loads
// are already ordered with seq_cst stores on these architectures.
#if !(defined(_MSC_VER) && (defined(_M_AMD64) || defined(_M_IX86)))
if (order == memory_order_seq_cst)
hardware_full_fence();
#endif
}
} // namespace detail
@@ -106,7 +114,7 @@ atomic_thread_fence(memory_order order)
{
BOOST_ATOMIC_READ_WRITE_BARRIER();
if (order == memory_order_seq_cst)
atomics::detail::x86_full_fence();
atomics::detail::hardware_full_fence();
}
#define BOOST_ATOMIC_SIGNAL_FENCE 2