2
0
mirror of https://github.com/boostorg/atomic.git synced 2026-01-19 04:02:09 +00:00

Boost.Atomic and Boost.Lockfree merged from trunk.

[SVN r81976]
This commit is contained in:
Andrey Semashev
2012-12-15 18:28:27 +00:00
commit b5da965c5b
36 changed files with 14998 additions and 0 deletions

96
.gitattributes vendored Normal file
View File

@@ -0,0 +1,96 @@
* text=auto !eol svneol=native#text/plain
*.gitattributes text svneol=native#text/plain
# Scriptish formats
*.bat text svneol=native#text/plain
*.bsh text svneol=native#text/x-beanshell
*.cgi text svneol=native#text/plain
*.cmd text svneol=native#text/plain
*.js text svneol=native#text/javascript
*.php text svneol=native#text/x-php
*.pl text svneol=native#text/x-perl
*.pm text svneol=native#text/x-perl
*.py text svneol=native#text/x-python
*.sh eol=lf svneol=LF#text/x-sh
configure eol=lf svneol=LF#text/x-sh
# Image formats
*.bmp binary svneol=unset#image/bmp
*.gif binary svneol=unset#image/gif
*.ico binary svneol=unset#image/ico
*.jpeg binary svneol=unset#image/jpeg
*.jpg binary svneol=unset#image/jpeg
*.png binary svneol=unset#image/png
*.tif binary svneol=unset#image/tiff
*.tiff binary svneol=unset#image/tiff
*.svg text svneol=native#image/svg%2Bxml
# Data formats
*.pdf binary svneol=unset#application/pdf
*.avi binary svneol=unset#video/avi
*.doc binary svneol=unset#application/msword
*.dsp text svneol=crlf#text/plain
*.dsw text svneol=crlf#text/plain
*.eps binary svneol=unset#application/postscript
*.gz binary svneol=unset#application/gzip
*.mov binary svneol=unset#video/quicktime
*.mp3 binary svneol=unset#audio/mpeg
*.ppt binary svneol=unset#application/vnd.ms-powerpoint
*.ps binary svneol=unset#application/postscript
*.psd binary svneol=unset#application/photoshop
*.rdf binary svneol=unset#text/rdf
*.rss text svneol=unset#text/xml
*.rtf binary svneol=unset#text/rtf
*.sln text svneol=native#text/plain
*.swf binary svneol=unset#application/x-shockwave-flash
*.tgz binary svneol=unset#application/gzip
*.vcproj text svneol=native#text/xml
*.vcxproj text svneol=native#text/xml
*.vsprops text svneol=native#text/xml
*.wav binary svneol=unset#audio/wav
*.xls binary svneol=unset#application/vnd.ms-excel
*.zip binary svneol=unset#application/zip
# Text formats
.htaccess text svneol=native#text/plain
*.bbk text svneol=native#text/xml
*.cmake text svneol=native#text/plain
*.css text svneol=native#text/css
*.dtd text svneol=native#text/xml
*.htm text svneol=native#text/html
*.html text svneol=native#text/html
*.ini text svneol=native#text/plain
*.log text svneol=native#text/plain
*.mak text svneol=native#text/plain
*.qbk text svneol=native#text/plain
*.rst text svneol=native#text/plain
*.sql text svneol=native#text/x-sql
*.txt text svneol=native#text/plain
*.xhtml text svneol=native#text/xhtml%2Bxml
*.xml text svneol=native#text/xml
*.xsd text svneol=native#text/xml
*.xsl text svneol=native#text/xml
*.xslt text svneol=native#text/xml
*.xul text svneol=native#text/xul
*.yml text svneol=native#text/plain
boost-no-inspect text svneol=native#text/plain
CHANGES text svneol=native#text/plain
COPYING text svneol=native#text/plain
INSTALL text svneol=native#text/plain
Jamfile text svneol=native#text/plain
Jamroot text svneol=native#text/plain
Jamfile.v2 text svneol=native#text/plain
Jamrules text svneol=native#text/plain
Makefile* text svneol=native#text/plain
README text svneol=native#text/plain
TODO text svneol=native#text/plain
# Code formats
*.c text svneol=native#text/plain
*.cpp text svneol=native#text/plain
*.h text svneol=native#text/plain
*.hpp text svneol=native#text/plain
*.ipp text svneol=native#text/plain
*.tpp text svneol=native#text/plain
*.jam text svneol=native#text/plain
*.java text svneol=native#text/plain

32
build/Jamfile.v2 Normal file
View File

@@ -0,0 +1,32 @@
# Boost.Atomic Library Jamfile
#
# Copyright Helge Bahmann 2011.
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import common ;
project boost/atomic
: requirements
<threading>multi
<link>shared:<define>BOOST_ATOMIC_DYN_LINK=1
<define>BOOST_ATOMIC_SOURCE
: usage-requirements
<link>shared:<define>BOOST_ATOMIC_DYN_LINK=1
: source-location ../src
;
alias atomic_sources
: lockpool.cpp
;
explicit atomic_sources ;
lib boost_atomic
: atomic_sources
;
boost-install boost_atomic ;

26
doc/Jamfile.v2 Normal file
View File

@@ -0,0 +1,26 @@
# Boost.Atomic library documentation Jamfile
#
# Copyright Helge Bahmann 2011.
# Copyright Tim Blechmann 2012.
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import quickbook ;
import boostbook : boostbook ;
xml atomic : atomic.qbk ;
boostbook standalone
: atomic
: <xsl:param>boost.root=../../../..
<xsl:param>boost.libraries=../../../libraries.htm
<format>pdf:<xsl:param>boost.url.prefix=http://www.boost.org/doc/libs/release/libs/atomic/doc/html
;
install css : [ glob $(BOOST_ROOT)/doc/src/*.css ]
: <location>html ;
install images : [ glob $(BOOST_ROOT)/doc/src/images/*.png ]
: <location>html/images ;
explicit css ;
explicit images ;

526
doc/atomic.hpp Normal file
View File

@@ -0,0 +1,526 @@
/** \file boost/atomic.hpp */
// Copyright (c) 2009 Helge Bahmann
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
/* this is just a pseudo-header file fed to doxygen
to more easily generate the class documentation; will
be replaced by proper documentation down the road */
namespace boost {
/**
\brief Memory ordering constraints
This defines the relative order of one atomic operation
and other memory operations (loads, stores, other atomic operations)
executed by the same thread.
The order of operations specified by the programmer in the
source code ("program order") does not necessarily match
the order in which they are actually executed on the target system:
Both compiler as well as processor may reorder operations
quite arbitrarily. <B>Specifying the wrong ordering
constraint will therefore generally result in an incorrect program.</B>
*/
enum memory_order {
/**
\brief No constraint
Atomic operation and other memory operations may be reordered freely.
*/
memory_order_relaxed,
/**
\brief Data dependence constraint
Atomic operation must strictly precede any memory operation that
computationally depends on the outcome of the atomic operation.
*/
memory_order_consume,
/**
\brief Acquire memory
Atomic operation must strictly precede all memory operations that
follow in program order.
*/
memory_order_acquire,
/**
\brief Release memory
Atomic operation must strictly follow all memory operations that precede
in program order.
*/
memory_order_release,
/**
\brief Acquire and release memory
Combines the effects of \ref memory_order_acquire and \ref memory_order_release
*/
memory_order_acq_rel,
/**
\brief Sequentially consistent
Produces the same result \ref memory_order_acq_rel, but additionally
enforces globally sequential consistent execution
*/
memory_order_seq_cst
};
/**
\brief Atomic datatype
An atomic variable. Provides methods to modify this variable atomically.
Valid template parameters are:
- integral data types (char, short, int, ...)
- pointer data types
- any other data type that has a non-throwing default
constructor and that can be copied via <TT>memcpy</TT>
Unless specified otherwise, any memory ordering constraint can be used
with any of the atomic operations.
*/
template<typename Type>
class atomic {
public:
/**
\brief Create uninitialized atomic variable
Creates an atomic variable. Its initial value is undefined.
*/
atomic();
/**
\brief Create an initialize atomic variable
\param value Initial value
Creates and initializes an atomic variable.
*/
atomic(Type value);
/**
\brief Read the current value of the atomic variable
\param order Memory ordering constraint, see \ref memory_order
\return Current value of the variable
Valid memory ordering constraints are:
- @c memory_order_relaxed
- @c memory_order_consume
- @c memory_order_acquire
- @c memory_order_seq_cst
*/
Type load(memory_order order=memory_order_seq_cst) const;
/**
\brief Write new value to atomic variable
\param value New value
\param order Memory ordering constraint, see \ref memory_order
Valid memory ordering constraints are:
- @c memory_order_relaxed
- @c memory_order_release
- @c memory_order_seq_cst
*/
void store(Type value, memory_order order=memory_order_seq_cst);
/**
\brief Atomically compare and exchange variable
\param expected Expected old value
\param desired Desired new value
\param order Memory ordering constraint, see \ref memory_order
\return @c true if value was changed
Atomically performs the following operation
\code
if (variable==expected) {
variable=desired;
return true;
} else {
expected=variable;
return false;
}
\endcode
This operation may fail "spuriously", i.e. the state of the variable
is unchanged even though the expected value was found (this is the
case on architectures using "load-linked"/"store conditional" to
implement the operation).
The established memory order will be @c order if the operation
is successful. If the operation is unsuccesful, the
memory order will be
- @c memory_order_relaxed if @c order is @c memory_order_acquire ,
@c memory_order_relaxed or @c memory_order_consume
- @c memory_order_release if @c order is @c memory_order_acq_release
or @c memory_order_release
- @c memory_order_seq_cst if @c order is @c memory_order_seq_cst
*/
bool compare_exchange_weak(
Type &expected,
Type desired,
memory_order order=memory_order_seq_cst);
/**
\brief Atomically compare and exchange variable
\param expected Expected old value
\param desired Desired new value
\param success_order Memory ordering constraint if operation
is successful
\param failure_order Memory ordering constraint if operation is unsuccesful
\return @c true if value was changed
Atomically performs the following operation
\code
if (variable==expected) {
variable=desired;
return true;
} else {
expected=variable;
return false;
}
\endcode
This operation may fail "spuriously", i.e. the state of the variable
is unchanged even though the expected value was found (this is the
case on architectures using "load-linked"/"store conditional" to
implement the operation).
The constraint imposed by @c success_order may not be
weaker than the constraint imposed by @c failure_order.
*/
bool compare_exchange_weak(
Type &expected,
Type desired,
memory_order success_order,
memory_order failure_order);
/**
\brief Atomically compare and exchange variable
\param expected Expected old value
\param desired Desired new value
\param order Memory ordering constraint, see \ref memory_order
\return @c true if value was changed
Atomically performs the following operation
\code
if (variable==expected) {
variable=desired;
return true;
} else {
expected=variable;
return false;
}
\endcode
In contrast to \ref compare_exchange_weak, this operation will never
fail spuriously. Since compare-and-swap must generally be retried
in a loop, implementors are advised to prefer \ref compare_exchange_weak
where feasible.
The established memory order will be @c order if the operation
is successful. If the operation is unsuccesful, the
memory order will be
- @c memory_order_relaxed if @c order is @c memory_order_acquire ,
@c memory_order_relaxed or @c memory_order_consume
- @c memory_order_release if @c order is @c memory_order_acq_release
or @c memory_order_release
- @c memory_order_seq_cst if @c order is @c memory_order_seq_cst
*/
bool compare_exchange_strong(
Type &expected,
Type desired,
memory_order order=memory_order_seq_cst);
/**
\brief Atomically compare and exchange variable
\param expected Expected old value
\param desired Desired new value
\param success_order Memory ordering constraint if operation
is successful
\param failure_order Memory ordering constraint if operation is unsuccesful
\return @c true if value was changed
Atomically performs the following operation
\code
if (variable==expected) {
variable=desired;
return true;
} else {
expected=variable;
return false;
}
\endcode
In contrast to \ref compare_exchange_weak, this operation will never
fail spuriously. Since compare-and-swap must generally be retried
in a loop, implementors are advised to prefer \ref compare_exchange_weak
where feasible.
The constraint imposed by @c success_order may not be
weaker than the constraint imposed by @c failure_order.
*/
bool compare_exchange_strong(
Type &expected,
Type desired,
memory_order success_order,
memory_order failure_order);
/**
\brief Atomically exchange variable
\param value New value
\param order Memory ordering constraint, see \ref memory_order
\return Old value of the variable
Atomically exchanges the value of the variable with the new
value and returns its old value.
*/
Type exchange(Type value, memory_order order=memory_order_seq_cst);
/**
\brief Atomically add and return old value
\param operand Operand
\param order Memory ordering constraint, see \ref memory_order
\return Old value of the variable
Atomically adds operand to the variable and returns its
old value.
*/
Type fetch_add(Type operand, memory_order order=memory_order_seq_cst);
/**
\brief Atomically subtract and return old value
\param operand Operand
\param order Memory ordering constraint, see \ref memory_order
\return Old value of the variable
Atomically subtracts operand from the variable and returns its
old value.
This method is available only if \c Type is an integral type
or a non-void pointer type. If it is a pointer type,
@c operand is of type @c ptrdiff_t and the operation
is performed following the rules for pointer arithmetic
in C++.
*/
Type fetch_sub(Type operand, memory_order order=memory_order_seq_cst);
/**
\brief Atomically perform bitwise "AND" and return old value
\param operand Operand
\param order Memory ordering constraint, see \ref memory_order
\return Old value of the variable
Atomically performs bitwise "AND" with the variable and returns its
old value.
This method is available only if \c Type is an integral type
or a non-void pointer type. If it is a pointer type,
@c operand is of type @c ptrdiff_t and the operation
is performed following the rules for pointer arithmetic
in C++.
*/
Type fetch_and(Type operand, memory_order order=memory_order_seq_cst);
/**
\brief Atomically perform bitwise "OR" and return old value
\param operand Operand
\param order Memory ordering constraint, see \ref memory_order
\return Old value of the variable
Atomically performs bitwise "OR" with the variable and returns its
old value.
This method is available only if \c Type is an integral type.
*/
Type fetch_or(Type operand, memory_order order=memory_order_seq_cst);
/**
\brief Atomically perform bitwise "XOR" and return old value
\param operand Operand
\param order Memory ordering constraint, see \ref memory_order
\return Old value of the variable
Atomically performs bitwise "XOR" with the variable and returns its
old value.
This method is available only if \c Type is an integral type.
*/
Type fetch_xor(Type operand, memory_order order=memory_order_seq_cst);
/**
\brief Implicit load
\return Current value of the variable
The same as <tt>load(memory_order_seq_cst)</tt>. Avoid using
the implicit conversion operator, use \ref load with
an explicit memory ordering constraint.
*/
operator Type(void) const;
/**
\brief Implicit store
\param value New value
\return Copy of @c value
The same as <tt>store(value, memory_order_seq_cst)</tt>. Avoid using
the implicit conversion operator, use \ref store with
an explicit memory ordering constraint.
*/
Type operator=(Type v);
/**
\brief Atomically perform bitwise "AND" and return new value
\param operand Operand
\return New value of the variable
The same as <tt>fetch_and(operand, memory_order_seq_cst)&operand</tt>.
Avoid using the implicit bitwise "AND" operator, use \ref fetch_and
with an explicit memory ordering constraint.
*/
Type operator&=(Type operand);
/**
\brief Atomically perform bitwise "OR" and return new value
\param operand Operand
\return New value of the variable
The same as <tt>fetch_or(operand, memory_order_seq_cst)|operand</tt>.
Avoid using the implicit bitwise "OR" operator, use \ref fetch_or
with an explicit memory ordering constraint.
This method is available only if \c Type is an integral type.
*/
Type operator|=(Type operand);
/**
\brief Atomically perform bitwise "XOR" and return new value
\param operand Operand
\return New value of the variable
The same as <tt>fetch_xor(operand, memory_order_seq_cst)^operand</tt>.
Avoid using the implicit bitwise "XOR" operator, use \ref fetch_xor
with an explicit memory ordering constraint.
This method is available only if \c Type is an integral type.
*/
Type operator^=(Type operand);
/**
\brief Atomically add and return new value
\param operand Operand
\return New value of the variable
The same as <tt>fetch_add(operand, memory_order_seq_cst)+operand</tt>.
Avoid using the implicit add operator, use \ref fetch_add
with an explicit memory ordering constraint.
This method is available only if \c Type is an integral type
or a non-void pointer type. If it is a pointer type,
@c operand is of type @c ptrdiff_t and the operation
is performed following the rules for pointer arithmetic
in C++.
*/
Type operator+=(Type operand);
/**
\brief Atomically subtract and return new value
\param operand Operand
\return New value of the variable
The same as <tt>fetch_sub(operand, memory_order_seq_cst)-operand</tt>.
Avoid using the implicit subtract operator, use \ref fetch_sub
with an explicit memory ordering constraint.
This method is available only if \c Type is an integral type
or a non-void pointer type. If it is a pointer type,
@c operand is of type @c ptrdiff_t and the operation
is performed following the rules for pointer arithmetic
in C++.
*/
Type operator-=(Type operand);
/**
\brief Atomically increment and return new value
\return New value of the variable
The same as <tt>fetch_add(1, memory_order_seq_cst)+1</tt>.
Avoid using the implicit increment operator, use \ref fetch_add
with an explicit memory ordering constraint.
This method is available only if \c Type is an integral type
or a non-void pointer type. If it is a pointer type,
the operation
is performed following the rules for pointer arithmetic
in C++.
*/
Type operator++(void);
/**
\brief Atomically increment and return old value
\return Old value of the variable
The same as <tt>fetch_add(1, memory_order_seq_cst)</tt>.
Avoid using the implicit increment operator, use \ref fetch_add
with an explicit memory ordering constraint.
This method is available only if \c Type is an integral type
or a non-void pointer type. If it is a pointer type,
the operation
is performed following the rules for pointer arithmetic
in C++.
*/
Type operator++(int);
/**
\brief Atomically subtract and return new value
\return New value of the variable
The same as <tt>fetch_sub(1, memory_order_seq_cst)-1</tt>.
Avoid using the implicit increment operator, use \ref fetch_sub
with an explicit memory ordering constraint.
This method is available only if \c Type is an integral type
or a non-void pointer type. If it is a pointer type,
the operation
is performed following the rules for pointer arithmetic
in C++.
*/
Type operator--(void);
/**
\brief Atomically subtract and return old value
\return Old value of the variable
The same as <tt>fetch_sub(1, memory_order_seq_cst)</tt>.
Avoid using the implicit increment operator, use \ref fetch_sub
with an explicit memory ordering constraint.
This method is available only if \c Type is an integral type
or a non-void pointer type. If it is a pointer type,
the operation
is performed following the rules for pointer arithmetic
in C++.
*/
Type operator--(int);
private:
/** \brief Deleted copy constructor */
atomic(const atomic &);
/** \brief Deleted copy assignment */
void operator=(const atomic &);
};
/**
\brief Insert explicit fence
\param order Memory ordering constraint
Inserts an explicit fence. The exact semantic depends on the
type of fence inserted:
- \c memory_order_relaxed: No operation
- \c memory_order_release: Performs a "release" operation
- \c memory_order_acquire or \c memory_order_consume: Performs an
"acquire" operation
- \c memory_order_acq_rel: Performs both an "acquire" and a "release"
operation
- \c memory_order_seq_cst: Performs both an "acquire" and a "release"
operation and in addition there exists a global total order of
all \c memory_order_seq_cst operations
*/
void atomic_thread_fence(memory_order order);
}

707
doc/atomic.qbk Normal file
View File

@@ -0,0 +1,707 @@
[/
/ Copyright (c) 2009 Helge Bahmann
/
/ Distributed under the Boost Software License, Version 1.0. (See accompanying
/ file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
/]
[library Boost.Atomic
[quickbook 1.4]
[authors [Bahmann, Helge]]
[copyright 2011 Helge Bahmann]
[copyright 2012 Tim Blechmann]
[id atomic]
[dirname atomic]
[purpose Atomic operations]
[license
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or copy at
[@http://www.boost.org/LICENSE_1_0.txt])
]
]
[section:introduction Introduction]
[section:introduction_presenting Presenting Boost.Atomic]
[*Boost.Atomic] is a library that provides [^atomic]
data types and operations on these data types, as well as memory
ordering constraints required for coordinating multiple threads through
atomic variables. It implements the interface as defined by the C++11
standard, but makes this feature available for platforms lacking
system/compiler support for this particular C++11 feature.
Users of this library should already be familiar with concurrency
in general, as well as elementary concepts such as "mutual exclusion".
The implementation makes use of processor-specific instructions where
possible (via inline assembler, platform libraries or compiler
intrinsics), and falls back to "emulating" atomic operations through
locking.
[endsect]
[section:introduction_purpose Purpose]
Operations on "ordinary" variables are not guaranteed to be atomic.
This means that with [^int n=0] initially, two threads concurrently
executing
[c++]
void function()
{
n ++;
}
might result in [^n==1] instead of 2: Each thread will read the
old value into a processor register, increment it and write the result
back. Both threads may therefore write [^1], unaware that the other thread
is doing likewise.
Declaring [^atomic<int> n=0] instead, the same operation on
this variable will always result in [^n==2] as each operation on this
variable is ['atomic]: This means that each operation behaves as if it
were strictly sequentialized with respect to the other.
Atomic variables are useful for two purposes:
* as a means for coordinating multiple threads via custom
coordination protocols
* as faster alternatives to "locked" access to simple variables
Take a look at the [link atomic.usage_examples examples] section
for common patterns.
[endsect]
[endsect]
[section:thread_coordination Thread coordination using Boost.Atomic]
The most common use of [*Boost.Atomic] is to realize custom
thread synchronization protocols: The goal is to coordinate
accesses of threads to shared variables in order to avoid
"conflicts". The
programmer must be aware of the fact that
compilers, CPUs and the cache
hierarchies may generally reorder memory references at will.
As a consequence a program such as:
[c++]
int x = 0, int y = 0;
thread1:
x = 1;
y = 1;
thread2
if (y == 1) {
assert(x == 1);
}
might indeed fail as there is no guarantee that the read of `x`
by thread2 "sees" the write by thread1.
[*Boost.Atomic] uses a synchronisation concept based on the
['happens-before] relation to describe the guarantees under
which situations such as the above one cannot occur.
The remainder of this section will discuss ['happens-before] in
a "hands-on" way instead of giving a fully formalized definition.
The reader is encouraged to additionally have a
look at the discussion of the correctness of a few of the
[link atomic.usage_examples examples] afterwards.
[section:mutex Enforcing ['happens-before] through mutual exclusion]
As an introductury example to understand how arguing using
['happens-before] works, consider two threads synchronizing
using a common mutex:
[c++]
mutex m;
thread1:
m.lock();
... /* A */
m.unlock();
thread2:
m.lock();
... /* B */
m.unlock();
The "lockset-based intuition" would be to argue that A and B
cannot be executed concurrently as the code paths require a
common lock to be held.
One can however also arrive at the same conclusion using
['happens-before]: Either thread1 or thread2 will succeed first
at [^m.lock()]. If this is be thread1, then as a consequence,
thread2 cannot succeed at [^m.lock()] before thread1 has executed
[^m.unlock()], consequently A ['happens-before] B in this case.
By symmetry, if thread2 suceeds at [^m.unlock()] first, we can
conclude B ['happens-before] A.
Since this already exhausts all options, we can conclude that
either A ['happens-before] B or B ['happens-before] A must
always hold. Obviously cannot state ['which] of the two relationships
holds, but either one is sufficient to conclude that A and B
cannot conflict.
Compare the [link boost_atomic.usage_examples.example_spinlock spinlock]
implementation to see how the mutual exclusion concept can be
mapped to [*Boost.Atomic].
[endsect]
[section:release_acquire ['happens-before] through [^release] and [^acquire]]
The most basic pattern for coordinating threads via [*Boost.Atomic]
uses [^release] and [^acquire] on an atomic variable for coordination: If ...
* ... thread1 performs an operation A,
* ... thread1 subsequently writes (or atomically
modifies) an atomic variable with [^release] semantic,
* ... thread2 reads (or atomically reads-and-modifies)
the value this value from the same atomic variable with
[^acquire] semantic and
* ... thread2 subsequently performs an operation B,
... then A ['happens-before] B.
Consider the following example
[c++]
atomic<int> a(0);
thread1:
... /* A */
a.fetch_add(1, memory_order_release);
thread2:
int tmp = a.load(memory_order_acquire);
if (tmp == 1) {
... /* B */
} else {
... /* C */
}
In this example, two avenues for execution are possible:
* The [^store] operation by thread1 precedes the [^load] by thread2:
In this case thread2 will execute B and "A ['happens-before] B"
holds as all of the criteria above are satisfied.
* The [^load] operation by thread2 precedes the [^store] by thread1:
In this case, thread2 will execute C, but "A ['happens-before] C"
does ['not] hold: thread2 does not read the value written by
thread1 through [^a].
Therefore, A and B cannot conflict, but A and C ['can] conflict.
[endsect]
[section:fences Fences]
Ordering constraints are generally specified together with an access to
an atomic variable. It is however also possible to issue "fence"
operations in isolation, in this case the fence operates in
conjunction with preceding (for `acquire`, `consume` or `seq_cst`
operations) or succeeding (for `release` or `seq_cst`) atomic
operations.
The example from the previous section could also be written in
the following way:
[c++]
atomic<int> a(0);
thread1:
... /* A */
atomic_thread_fence(memory_order_release);
a.fetch_add(1, memory_order_relaxed);
thread2:
int tmp = a.load(memory_order_relaxed);
if (tmp == 1) {
atomic_thread_fence(memory_order_acquire);
... /* B */
} else {
... /* C */
}
This provides the same ordering guarantees as previously, but
elides a (possibly expensive) memory ordering operation in
the case C is executed.
[endsect]
[section:release_consume ['happens-before] through [^release] and [^consume]]
The second pattern for coordinating threads via [*Boost.Atomic]
uses [^release] and [^consume] on an atomic variable for coordination: If ...
* ... thread1 performs an operation A,
* ... thread1 subsequently writes (or atomically modifies) an
atomic variable with [^release] semantic,
* ... thread2 reads (or atomically reads-and-modifies)
the value this value from the same atomic variable with [^consume] semantic and
* ... thread2 subsequently performs an operation B that is ['computationally
dependent on the value of the atomic variable],
... then A ['happens-before] B.
Consider the following example
[c++]
atomic<int> a(0);
complex_data_structure data[2];
thread1:
data[1] = ...; /* A */
a.store(1, memory_order_release);
thread2:
int index = a.load(memory_order_consume);
complex_data_structure tmp = data[index]; /* B */
In this example, two avenues for execution are possible:
* The [^store] operation by thread1 precedes the [^load] by thread2:
In this case thread2 will read [^data\[1\]] and "A ['happens-before] B"
holds as all of the criteria above are satisfied.
* The [^load] operation by thread2 precedes the [^store] by thread1:
In this case thread2 will read [^data\[0\]] and "A ['happens-before] B"
does ['not] hold: thread2 does not read the value written by
thread1 through [^a].
Here, the ['happens-before] relationship helps ensure that any
accesses (presumable writes) to [^data\[1\]] by thread1 happen before
before the accesses (presumably reads) to [^data\[1\]] by thread2:
Lacking this relationship, thread2 might see stale/inconsistent
data.
Note that in this example, the fact that operation B is computationally
dependent on the atomic variable, therefore the following program would
be erroneous:
[c++]
atomic<int> a(0);
complex_data_structure data[2];
thread1:
data[1] = ...; /* A */
a.store(1, memory_order_release);
thread2:
int index = a.load(memory_order_consume);
complex_data_structure tmp;
if (index == 0)
tmp = data[0];
else
tmp = data[1];
[^consume] is most commonly (and most safely! see
[link atomic.limitations limitations]) used with
pointers, compare for example the
[link boost_atomic.usage_examples.singleton singleton with double-checked locking].
[endsect]
[section:seq_cst Sequential consistency]
The third pattern for coordinating threads via [*Boost.Atomic]
uses [^seq_cst] for coordination: If ...
* ... thread1 performs an operation A,
* ... thread1 subsequently performs any operation with [^seq_cst],
* ... thread1 subsequently performs an operation B,
* ... thread2 performs an operation C,
* ... thread2 subsequently performs any operation with [^seq_cst],
* ... thread2 subsequently performs an operation D,
then either "A ['happens-before] D" or "C ['happens-before] B" holds.
In this case it does not matter whether thread1 and thread2 operate
on the same or different atomic variables, or use a "stand-alone"
[^atomic_thread_fence] operation.
[endsect]
[endsect]
[section:interface Programming interfaces]
[section:interface_memory_order Memory order]
The enumeration [^boost::memory_order] defines the following
values to represent memory ordering constraints:
[table
[[Constant] [Description]]
[[`memory_order_relaxed`] [No ordering constraint.
Informally speaking, following operations may be reordered before,
preceding operations may be reordered after the atomic
operation. This constraint is suitable only when
either a) further operations do not depend on the outcome
of the atomic operation or b) ordering is enforced through
stand-alone `atomic_thread_fence` operations
]]
[[`memory_order_release`] [
Perform `release` operation. Informally speaking,
prevents all preceding memory operations to be reordered
past this point.
]]
[[`memory_order_acquire`] [
Perform `acquire` operation. Informally speaking,
prevents succeeding memory operations to be reordered
before this point.
]]
[[`memory_order_consume`] [
Perform `consume` operation. More restrictive (and
usually more efficient) than `memory_order_acquire`
as it only affects succeeding operations that are
computationally-dependent on the value retrieved from
an atomic variable.
]]
[[`memory_order_acq_rel`] [Perform both `release` and `acquire` operation]]
[[`memory_order_seq_cst`] [
Enforce sequential consistency. Implies `memory_order_acq_rel`, but
additional enforces total order for all operations such qualified.
]]
]
See section [link atomic.thread_coordination ['happens-before]] for explanation
of the various ordering constraints.
[endsect]
[section:interface_atomic_object Atomic objects]
[^boost::atomic<['T]>] provides methods for atomically accessing
variables of a suitable type [^['T]]. The type is suitable if
it satisfies one of the following constraints:
* it is an integer, boolean, enum or pointer type
* it is any other data-type ([^class] or [^struct]) that has
a non-throwing default constructor, that is copyable via
[^memcpy] and comparable via [^memcmp].
Note that all classes having a trivial default constructor,
no destructor and no virtual methods satisfy the second condition
according to C++98. On a given platform, other data-types ['may]
also satisfy this constraint, however you should exercise
caution as the behaviour becomes implementation-defined. Also be warned
that structures with "padding" between data members may compare
non-equal via [^memcmp] even though all members are equal.
[section:interface_atomic_generic [^boost::atomic<['T]>] template class]
All atomic objects supports the following operations:
[table
[[Syntax] [Description]]
[
[`atomic()`]
[Initialize to an unspecified value]
]
[
[`atomic(T initial_value)`]
[Initialize to [^initial_value]]
]
[
[`bool is_lock_free()`]
[Checks if the atomic object is lock-free]
]
[
[`T load(memory_order order)`]
[Return current value]
]
[
[`void store(T value, memory_order order)`]
[Write new value to atomic variable]
]
[
[`T exchange(T new_value, memory_order order)`]
[Exchange current value with `new_value`, returning current value]
]
[
[`bool compare_exchange_weak(T & expected, T desired, memory_order order)`]
[Compare current value with `expected`, change it to `desired` if matches.
Returns `true` if an exchange has been performed, and always writes the
previous value back in `expected`. May fail spuriously, so must generally be
retried in a loop.]
]
[
[`bool compare_exchange_weak(T & expected, T desired, memory_order success_order, memory_order failure_order)`]
[Compare current value with `expected`, change it to `desired` if matches.
Returns `true` if an exchange has been performed, and always writes the
previous value back in `expected`. May fail spuriously, so must generally be
retried in a loop.]
]
[
[`bool compare_exchange_strong(T & expected, T desired, memory_order order)`]
[Compare current value with `expected`, change it to `desired` if matches.
Returns `true` if an exchange has been performed, and always writes the
previous value back in `expected`.]
]
[
[`bool compare_exchange_strong(T & expected, T desired, memory_order success_order, memory_order failure_order))`]
[Compare current value with `expected`, change it to `desired` if matches.
Returns `true` if an exchange has been performed, and always writes the
previous value back in `expected`.]
]
]
`order` always has `memory_order_seq_cst` as default parameter.
The `compare_exchange_weak`/`compare_exchange_strong` variants
taking four parameters differ from the three parameter variants
in that they allow a different memory ordering constraint to
be specified in case the operation fails.
In addition to these explicit operations, each
[^atomic<['T]>] object also supports
implicit [^store] and [^load] through the use of "assignment"
and "conversion to [^T]" operators. Avoid using these operators,
as they do not allow explicit specification of a memory ordering
constraint.
[endsect]
[section:interface_atomic_integral [^boost::atomic<['integral]>] template class]
In addition to the operations listed in the previous section,
[^boost::atomic<['I]>] for integral
types [^['I]] supports the following operations:
[table
[[Syntax] [Description]]
[
[`T fetch_add(T v, memory_order order)`]
[Add `v` to variable, returning previous value]
]
[
[`T fetch_sub(T v, memory_order order)`]
[Subtract `v` from variable, returning previous value]
]
[
[`T fetch_and(T v, memory_order order)`]
[Apply bit-wise "and" with `v` to variable, returning previous value]
]
[
[`T fetch_or(T v, memory_order order)`]
[Apply bit-wise "or" with `v` to variable, returning previous value]
]
[
[`T fetch_xor(T v, memory_order order)`]
[Apply bit-wise "xor" with `v` to variable, returning previous value]
]
]
`order` always has `memory_order_seq_cst` as default parameter.
In addition to these explicit operations, each
[^boost::atomic<['I]>] object also
supports implicit pre-/post- increment/decrement, as well
as the operators `+=`, `-=`, `&=`, `|=` and `^=`.
Avoid using these operators,
as they do not allow explicit specification of a memory ordering
constraint.
[endsect]
[section:interface_atomic_pointer [^boost::atomic<['pointer]>] template class]
In addition to the operations applicable to all atomic object,
[^boost::atomic<['P]>] for pointer
types [^['P]] (other than [^void] pointers) support the following operations:
[table
[[Syntax] [Description]]
[
[`T fetch_add(ptrdiff_t v, memory_order order)`]
[Add `v` to variable, returning previous value]
]
[
[`T fetch_sub(ptrdiff_t v, memory_order order)`]
[Subtract `v` from variable, returning previous value]
]
]
`order` always has `memory_order_seq_cst` as default parameter.
In addition to these explicit operations, each
[^boost::atomic<['P]>] object also
supports implicit pre-/post- increment/decrement, as well
as the operators `+=`, `-=`. Avoid using these operators,
as they do not allow explicit specification of a memory ordering
constraint.
[endsect]
[endsect]
[section:interface_fences Fences]
[table
[[Syntax] [Description]]
[
[`void atomic_thread_fence(memory_order order)`]
[Issue fence for coordination with other threads.]
]
[
[`void atomic_signal_fence(memory_order order)`]
[Issue fence for coordination with signal handler (only in same thread).]
]
]
[endsect]
[section:feature_macros Feature testing macros]
[*Boost.Atomic] defines a number of macros to allow compile-time
detection whether an atomic data type is implemented using
"true" atomic operations, or whether an internal "lock" is
used to provide atomicity. The following macros will be
defined to `0` if operations on the data type always
require a lock, to `1` if operations on the data type may
sometimes require a lock, and to `2` if they are always lock-free:
[table
[[Macro] [Description]]
[
[`BOOST_ATOMIC_CHAR_LOCK_FREE`]
[Indicate whether `atomic<char>` (including signed/unsigned variants) is lock-free]
]
[
[`BOOST_ATOMIC_SHORT_LOCK_FREE`]
[Indicate whether `atomic<short>` (including signed/unsigned variants) is lock-free]
]
[
[`BOOST_ATOMIC_INT_LOCK_FREE`]
[Indicate whether `atomic<int>` (including signed/unsigned variants) is lock-free]
]
[
[`BOOST_ATOMIC_LONG_LOCK_FREE`]
[Indicate whether `atomic<long>` (including signed/unsigned variants) is lock-free]
]
[
[`BOOST_ATOMIC_LLONG_LOCK_FREE`]
[Indicate whether `atomic<long long>` (including signed/unsigned variants) is lock-free]
]
[
[`BOOST_ATOMIC_ADDRESS_LOCK_FREE`]
[Indicate whether `atomic<T *>` is lock-free]
]
]
[endsect]
[endsect]
[section:usage_examples Usage examples]
[include examples.qbk]
[endsect]
[/
[section:platform_support Implementing support for additional platforms]
[include platform.qbk]
[endsect]
]
[/ [xinclude autodoc.xml] ]
[section:limitations Limitations]
While [*Boost.Atomic] strives to implement the atomic operations
from C++11 as faithfully as possible, there are a few
limitations that cannot be lifted without compiler support:
* [*Using non-POD-classes as template paramater to `atomic<T>` results
in undefined behavior]: This means that any class containing a
constructor, destructor, virtual methods or access control
specifications is not a valid argument in C++98. C++11 relaxes
this slightly by allowing "trivial" classes containing only
empty constructors. [*Advise]: Use only POD types.
* [*C++98 compilers may transform computation- to control-dependency]:
Crucially, `memory_order_consume` only affects computationally-dependent
operations, but in general there is nothing preventing a compiler
from transforming a computation dependency into a control dependency.
A C++11 compiler would be forbidden from such a transformation.
[*Advise]: Use `memory_order_consume` only in conjunction with
pointer values, as the compiler cannot speculate and transform
these into control dependencies.
* [*Fence operations enforce "too strong" compiler ordering]:
Semantically, `memory_order_acquire`/`memory_order_consume`
and `memory_order_release` need to restrain reordering of
memory operations only in one direction. Since there is no
way to express this constraint to the compiler, these act
as "full compiler barriers" in this implementation. In corner
cases this may lead to worse code than a C++11 compiler
could generate.
* [*No interprocess fallback]: using `atomic<T>` in shared memory only works
correctly, if `atomic<T>::is_lock_free == true`
[endsect]
[section:porting Porting]
[section:unit_tests Unit tests]
[*Boost.Atomic] provides a unit test suite to verify that the
implementation behaves as expected:
* [*fallback_api.cpp] verifies that the fallback-to-locking aspect
of [*Boost.Atomic] compiles and has correct value semantics.
* [*native_api.cpp] verifies that all atomic operations have correct
value semantics (e.g. "fetch_add" really adds the desired value,
returing the previous). It is a rough "smoke-test" to help weed
out the most obvious mistakes (for example with overflow,
signed/unsigned extension, ...).
* [*lockfree.cpp] verifies that the [*BOOST_ATOMIC_*_LOCKFREE] macros
are set properly according to the expectations for a given
platform, and that they match up with the [*is_lock_free] member
functions of the [*atomic] object instances.
* [*atomicity.cpp] lets two threads race against each other modifying
a shared variable, verifying that the operations behave atomic
as appropriate. By nature, this test is necessarily stochastic, and
the test self-calibrates to yield 99% confidence that a
positive result indicates absence of an error. This test is
very useful on uni-processor systems with preemption already.
* [*ordering.cpp] lets two threads race against each other accessing
multiple shared variables, verifying that the operations
exhibit the expected ordering behavior. By nature, this test is
necessarily stochastic, and the test attempts to self-calibrate to
yield 99% confidence that a positive result indicates absence
of an error. This only works on true multi-processor (or multi-core)
systems. It does not yield any result on uni-processor systems
or emulators (due to there being no observable reordering even
the order=relaxed case) and will report that fact.
[endsect]
[section:tested_compilers Tested compilers]
[*Boost.Atomic] has been tested on and is known to work on
the following compilers/platforms:
* gcc 4.x: i386, x86_64, ppc32, ppc64, armv5, armv6, alpha
* Visual Studio Express 2008/Windows XP, i386
If you have an unsupported platform, contact me and I will
work to add support for it.
[endsect]
[endsect]

398
doc/examples.qbk Normal file
View File

@@ -0,0 +1,398 @@
[/
/ Copyright (c) 2009 Helge Bahmann
/
/ Distributed under the Boost Software License, Version 1.0. (See accompanying
/ file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
/]
[section:example_reference_counters Reference counting]
The purpose of a ['reference counter] is to count the number
of pointers to an object. The object can be destroyed as
soon as the reference counter reaches zero.
[section Implementation]
[c++]
#include <boost/intrusive_ptr.hpp>
#include <boost/atomic.hpp>
class X {
public:
typedef boost::intrusive_ptr<X> pointer;
X() : refcount_(0) {}
private:
mutable boost::atomic<int> refcount_;
friend void intrusive_ptr_add_ref(const X * x)
{
x->refcount_.fetch_add(1, boost::memory_order_relaxed);
}
friend void intrusive_ptr_release(const X * x)
{
if (x->refcount_.fetch_sub(1, boost::memory_order_release) == 1) {
boost::atomic_thread_fence(boost::memory_order_acquire);
delete x;
}
}
};
[endsect]
[section Usage]
[c++]
X::pointer x = new X;
[endsect]
[section Discussion]
Increasing the reference counter can always be done with
[^memory_order_relaxed]: New references to an object can only
be formed from an existing reference, and passing an existing
reference from one thread to another must already provide any
required synchronization.
It is important to enforce any possible access to the object in
one thread (through an existing reference) to ['happen before]
deleting the object in a different thread. This is achieved
by a "release" operation after dropping a reference (any
access to the object through this reference must obviously
happened before), and an "acquire" operation before
deleting the object.
It would be possible to use [^memory_order_acq_rel] for the
[^fetch_sub] operation, but this results in unneeded "acquire"
operations when the reference counter does not yet reach zero
and may impose a performance penalty.
[endsect]
[endsect]
[section:example_spinlock Spinlock]
The purpose of a ['spin lock] is to prevent multiple threads
from concurrently accessing a shared data structure. In contrast
to a mutex, threads will busy-wait and waste CPU cycles instead
of yielding the CPU to another thread. ['Do not use spinlocks
unless you are certain that you understand the consequences.]
[section Implementation]
[c++]
#include <boost/atomic.hpp>
class spinlock {
private:
typedef enum {Locked, Unlocked} LockState;
boost::atomic<LockState> state_;
public:
spinlock() : state_(Unlocked) {}
lock()
{
while (state_.exchange(Locked, boost::memory_order_acquire) == Locked) {
/* busy-wait */
}
}
unlock()
{
state_.store(Unlocked, boost::memory_order_release);
}
};
[endsect]
[section Usage]
[c++]
spinlock s;
s.lock();
// access data structure here
s.unlock();
[endsect]
[section Discussion]
The purpose of the spinlock is to make sure that one access
to the shared data structure always strictly "happens before"
another. The usage of acquire/release in lock/unlock is required
and sufficient to guarantee this ordering.
It would be correct to write the "lock" operation in the following
way:
[c++]
lock()
{
while (state_.exchange(Locked, boost::memory_order_relaxed) == Locked) {
/* busy-wait */
}
atomic_thread_fence(boost::memory_order_acquire);
}
This "optimization" is however a) useless and b) may in fact hurt:
a) Since the thread will be busily spinning on a blocked spinlock,
it does not matter if it will waste the CPU cycles with just
"exchange" operations or with both useless "exchange" and "acquire"
operations. b) A tight "exchange" loop without any
memory-synchronizing instruction introduced through an "acquire"
operation will on some systems monopolize the memory subsystem
and degrade the performance of other system components.
[endsect]
[endsect]
[section:singleton Singleton with double-checked locking pattern]
The purpose of the ['Singleton with double-checked locking pattern] is to ensure
that at most one instance of a particular object is created.
If one instance has been created already, access to the existing
object should be as light-weight as possible.
[section Implementation]
[c++]
#include <boost/atomic.hpp>
#include <boost/thread/mutex.hpp>
class X {
public:
static X * instance()
{
X * tmp = instance_.load(boost::memory_order_consume);
if (!tmp) {
boost::mutex::scoped_lock guard(instantiation_mutex);
tmp = instance_.load(boost::memory_order_consume);
if (!tmp) {
tmp = new X;
instance_.store(tmp, boost::memory_order_release);
}
}
return tmp;
}
private:
static boost::atomic<X *> instance_;
static boost::mutex instantiation_mutex;
}
boost::atomic<X *> X::instance_(0);
[endsect]
[section Usage]
[c++]
X * x = X::instance();
// dereference x
[endsect]
[section Discussion]
The mutex makes sure that only one instance of the object is
ever created. The [^instance] method must make sure that any
dereference of the object strictly "happens after" creating
the instance in another thread. The use of [^memory_order_release]
after creating and initializing the object and [^memory_order_consume]
before dereferencing the object provides this guarantee.
It would be permissible to use [^memory_order_acquire] instead of
[^memory_order_consume], but this provides a stronger guarantee
than is required since only operations depending on the value of
the pointer need to be ordered.
[endsect]
[endsect]
[section:example_ringbuffer Wait-free ring buffer]
A ['wait-free ring buffer] provides a mechanism for relaying objects
from one single "producer" thread to one single "consumer" thread without
any locks. The operations on this data structure are "wait-free" which
means that each operation finishes within a constant number of steps.
This makes this data structure suitable for use in hard real-time systems
or for communication with interrupt/signal handlers.
[section Implementation]
[c++]
#include <boost/atomic.hpp>
template<typename T, size_t Size>
class ringbuffer {
public:
ringbuffer() : head_(0), tail_(0) {}
bool push(const T & value)
{
size_t head = head_.load(boost::memory_order_relaxed);
size_t next_head = next(head);
if (next_head == tail_.load(boost::memory_order_acquire))
return false;
ring_[head] = value;
head_.store(next_head, boost::memory_order_release);
return true;
}
bool pop(T & value)
{
size_t tail = tail_.load(boost::memory_order_relaxed);
if (tail == head_.load(boost::memory_order_acquire))
return false;
value = ring_[tail];
tail_.store(next(tail), boost::memory_order_release));
return true;
}
private:
size_t next(size_t current)
{
return (current + 1) % Size;
}
T ring_[Size];
boost::atomic<size_t> head_, tail_;
}
[endsect]
[section Usage]
[c++]
ringbuffer<int, 32> r;
// try to insert an element
if (r.push(42)) { /* succeeded */ }
else { /* buffer full */ }
// try to retrieve an element
int value;
if (r.pop(value)) { /* succeeded */ }
else { /* buffer empty */ }
[endsect]
[section Discussion]
The implementation makes sure that the ring indices do
not "lap-around" each other to ensure that no elements
are either lost or read twice.
Furthermore it must guarantee that read-access to a
particular object in [^pop] "happens after" it has been
written in [^push]. This is achieved by writing [^head_ ]
with "release" and reading it with "acquire". Conversely
the implementation also ensures that read access to
a particular ring element "happens before" before
rewriting this element with a new value by accessing [^tail_]
with appropriate ordering constraints.
[endsect]
[endsect]
[section:mp_queue Wait-free multi-producer queue]
The purpose of the ['wait-free multi-producer queue] is to allow
an arbitrary number of producers to enqueue objects which are
retrieved and processed in FIFO order by a single consumer.
[section Implementation]
[c++]
template<typename T>
class waitfree_queue {
public:
struct node {
T data;
node * next;
}
void push(const T &data)
{
node * n = new node;
n.data = data;
node * stale_head = head_.load(boost::memory_order_relaxed);
do {
node->next = stale_head;
} while (!head_.compare_exchange_weak(stale_head, node, boost::memory_order_release);
}
node * pop_all(void)
{
T * last = pop_all_reverse(), * first = 0;
while(last) {
T * tmp = last;
last = last->next;
tmp->next = first;
first = tmp;
}
return first;
}
waitfree_queue() : head_(0) {}
// alternative interface if ordering is of no importance
node * pop_all_reverse(void)
{
return head_.exchange(0, boost::memory_order_consume);
}
private:
boost::atomic<node *> head_;
}
[endsect]
[section Usage]
[c++]
waitfree_queue<int> q;
// insert elements
q.push(42);
q.push(2);
// pop elements
waitfree_queue<int>::node * x = q.pop_all()
while(x) {
X * tmp = x;
x = x->next;
// process tmp->data, probably delete it afterwards
delete tmp;
}
[endsect]
[section Discussion]
The implementation guarantees that all objects enqueued are
processed in the order they were enqueued by building a singly-linked
list of object in reverse processing order. The queue is atomically
emptied by the consumer and brought into correct order.
It must be guaranteed that any access to an object to be enqueued
by the producer "happens before" any access by the consumer. This
is assured by inserting objects into the list with ['release] and
dequeuing them with ['consume] memory order. It is not
necessary to use ['acquire] memory order in [^waitfree_queue::pop_all]
because all operations involved depend on the value of
the atomic pointer through dereference
[endsect]
[endsect]

312
doc/platform.qbk Normal file
View File

@@ -0,0 +1,312 @@
[/
/ Copyright (c) 2009 Helge Bahmann
/
/ Distributed under the Boost Software License, Version 1.0. (See accompanying
/ file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
/]
[section:template_organization Organization of class template layers]
The implementation uses multiple layers of template classes that
inherit from the next lower level each and refine or adapt the respective
underlying class:
* [^boost::atomic<T>] is the topmost-level, providing
the external interface. Implementation-wise, it does not add anything
(except for hiding copy constructor and assignment operator).
* [^boost::detail::atomic::internal_atomic&<T,S=sizeof(T),I=is_integral_type<T> >]:
This layer is mainly responsible for providing the overloaded operators
mapping to API member functions (e.g. [^+=] to [^fetch_add]).
The defaulted template parameter [^I] allows
to expose the correct API functions (via partial template
specialization): For non-integral types, it only
publishes the various [^exchange] functions
as well as load and store, for integral types it
additionally exports arithmetic and logic operations.
[br]
Depending on whether the given type is integral, it
inherits from either [^boost::detail::atomic::platform_atomic<T,S=sizeof(T)>]
or [^boost::detail::atomic::platform_atomic_integral<T,S=sizeof(T)>].
There is however some special-casing: for non-integral types
of size 1, 2, 4 or 8, it will coerce the datatype into an integer representation
and delegate to [^boost::detail::atomic::platform_atomic_integral<T,S=sizeof(T)>]
-- the rationale is that platform implementors only need to provide
integer-type operations.
* [^boost::detail::atomic::platform_atomic_integral<T,S=sizeof(T)>]
must provide the full set of operations for an integral type T
(i.e. [^load], [^store], [^exchange],
[^compare_exchange_weak], [^compare_exchange_strong],
[^fetch_add], [^fetch_sub], [^fetch_and],
[^fetch_or], [^fetch_xor], [^is_lock_free]).
The default implementation uses locking to emulate atomic operations, so
this is the level at which implementors should provide template specializations
to add support for platform-specific atomic operations.
[br]
The two separate template parameters allow separate specialization
on size and type (which, with fixed size, cannot
specify more than signedness/unsignedness). The rationale is that
most platform-specific atomic operations usually depend only on the
operand size, so that common implementations for signed/unsigned
types are possible. Signedness allows to properly to choose sign-extending
instructions for the [^load] operation, avoiding later
conversion. The expectation is that in most implementations this will
be a normal assignment in C, possibly accompanied by memory
fences, so that the compiler can automatically choose the correct
instruction.
* At the lowest level, [^boost::detail::atomic::platform_atomic<T,S=sizeof(T)>]
provides the most basic atomic operations ([^load], [^store],
[^exchange], [^compare_exchange_weak],
[^compare_exchange_strong]) for arbitrarily generic data types.
The default implementation uses locking as a fallback mechanism.
Implementors generally do not have to specialize at this level
(since these will not be used for the common integral type sizes
of 1, 2, 4 and 8 bytes), but if s/he can if s/he so wishes to
provide truly atomic operations for "odd" data type sizes.
Some amount of care must be taken as the "raw" data type
passed in from the user through [^boost::atomic<T>]
is visible here -- it thus needs to be type-punned or otherwise
manipulated byte-by-byte to avoid using overloaded assigment,
comparison operators and copy constructors.
[endsect]
[section:platform_atomic_implementation Implementing platform-specific atomic operations]
In principle implementors are responsible for providing the
full range of named member functions of an atomic object
(i.e. [^load], [^store], [^exchange],
[^compare_exchange_weak], [^compare_exchange_strong],
[^fetch_add], [^fetch_sub], [^fetch_and],
[^fetch_or], [^fetch_xor], [^is_lock_free]).
These must be implemented as partial template specializations for
[^boost::detail::atomic::platform_atomic_integral<T,S=sizeof(T)>]:
[c++]
template<typename T>
class platform_atomic_integral<T, 4>
{
public:
explicit platform_atomic_integral(T v) : i(v) {}
platform_atomic_integral(void) {}
T load(memory_order order=memory_order_seq_cst) const volatile
{
// platform-specific code
}
void store(T v, memory_order order=memory_order_seq_cst) volatile
{
// platform-specific code
}
private:
volatile T i;
};
As noted above, it will usually suffice to specialize on the second
template argument, indicating the size of the data type in bytes.
[section:automatic_buildup Templates for automatic build-up]
Often only a portion of the required operations can be
usefully mapped to machine instructions. Several helper template
classes are provided that can automatically synthesize missing methods to
complete an implementation.
At the minimum, an implementor must provide the
[^load], [^store],
[^compare_exchange_weak] and
[^is_lock_free] methods:
[c++]
template<typename T>
class my_atomic_32 {
public:
my_atomic_32() {}
my_atomic_32(T initial_value) : value(initial_value) {}
T load(memory_order order=memory_order_seq_cst) volatile const
{
// platform-specific code
}
void store(T new_value, memory_order order=memory_order_seq_cst) volatile
{
// platform-specific code
}
bool compare_exchange_weak(T &expected, T desired,
memory_order success_order,
memory_order_failure_order) volatile
{
// platform-specific code
}
bool is_lock_free() const volatile {return true;}
protected:
// typedef is required for classes inheriting from this
typedef T integral_type;
private:
T value;
};
The template [^boost::detail::atomic::build_atomic_from_minimal]
can then take care of the rest:
[c++]
template<typename T>
class platform_atomic_integral<T, 4>
: public boost::detail::atomic::build_atomic_from_minimal<my_atomic_32<T> >
{
public:
typedef build_atomic_from_minimal<my_atomic_32<T> > super;
explicit platform_atomic_integral(T v) : super(v) {}
platform_atomic_integral(void) {}
};
There are several helper classes to assist in building "complete"
atomic implementations from different starting points:
* [^build_atomic_from_minimal] requires
* [^load]
* [^store]
* [^compare_exchange_weak] (4-operand version)
* [^build_atomic_from_exchange] requires
* [^load]
* [^store]
* [^compare_exchange_weak] (4-operand version)
* [^compare_exchange_strong] (4-operand version)
* [^exchange]
* [^build_atomic_from_add] requires
* [^load]
* [^store]
* [^compare_exchange_weak] (4-operand version)
* [^compare_exchange_strong] (4-operand version)
* [^exchange]
* [^fetch_add]
* [^build_atomic_from_typical] (<I>supported on gcc only</I>) requires
* [^load]
* [^store]
* [^compare_exchange_weak] (4-operand version)
* [^compare_exchange_strong] (4-operand version)
* [^exchange]
* [^fetch_add_var] (protected method)
* [^fetch_inc] (protected method)
* [^fetch_dec] (protected method)
This will generate a [^fetch_add] method
that calls [^fetch_inc]/[^fetch_dec]
when the given parameter is a compile-time constant
equal to +1 or -1 respectively, and [^fetch_add_var]
in all other cases. This provides a mechanism for
optimizing the extremely common case of an atomic
variable being used as a counter.
The prototypes for these methods to be implemented is:
[c++]
template<typename T>
class my_atomic {
public:
T fetch_inc(memory_order order) volatile;
T fetch_dec(memory_order order) volatile;
T fetch_add_var(T counter, memory_order order) volatile;
};
These helper templates are defined in [^boost/atomic/detail/builder.hpp].
[endsect]
[section:automatic_buildup_small Build sub-word-sized atomic data types]
There is one other helper template that can build sub-word-sized
atomic data types even though the underlying architecture allows
only word-sized atomic operations:
[c++]
template<typename T>
class platform_atomic_integral<T, 1> :
public build_atomic_from_larger_type<my_atomic_32<uint32_t>, T>
{
public:
typedef build_atomic_from_larger_type<my_atomic_32<uint32_t>, T> super;
explicit platform_atomic_integral(T v) : super(v) {}
platform_atomic_integral(void) {}
};
The above would create an atomic data type of 1 byte size, and
use masking and shifts to map it to 32-bit atomic operations.
The base type must implement [^load], [^store]
and [^compare_exchange_weak] for this to work.
[endsect]
[section:other_sizes Atomic data types for unusual object sizes]
In unusual circumstances, an implementor may also opt to specialize
[^public boost::detail::atomic::platform_atomic<T,S=sizeof(T)>]
to provide support for atomic objects not fitting an integral size.
If you do that, keep the following things in mind:
* There is no reason to ever do this for object sizes
of 1, 2, 4 and 8
* Only the following methods need to be implemented:
* [^load]
* [^store]
* [^compare_exchange_weak] (4-operand version)
* [^compare_exchange_strong] (4-operand version)
* [^exchange]
The type of the data to be stored in the atomic
variable (template parameter [^T])
is exposed to this class, and the type may have
overloaded assignment and comparison operators --
using these overloaded operators however will result
in an error. The implementor is responsible for
accessing the objects in a way that does not
invoke either of these operators (using e.g.
[^memcpy] or type-casts).
[endsect]
[endsect]
[section:platform_atomic_fences Fences]
Platform implementors need to provide a function performing
the action required for [funcref boost::atomic_thread_fence atomic_thread_fence]
(the fallback implementation will just perform an atomic operation
on an integer object). This is achieved by specializing the
[^boost::detail::atomic::platform_atomic_thread_fence] template
function in the following way:
[c++]
template<>
void platform_atomic_thread_fence(memory_order order)
{
// platform-specific code here
}
[endsect]
[section:platform_atomic_puttogether Putting it altogether]
The template specializations should be put into a header file
in the [^boost/atomic/detail] directory, preferrably
specifying supported compiler and architecture in its name.
The file [^boost/atomic/detail/platform.hpp] must
subsequently be modified to conditionally include the new
header.
[endsect]

18
include/boost/atomic.hpp Normal file
View File

@@ -0,0 +1,18 @@
#ifndef BOOST_ATOMIC_HPP
#define BOOST_ATOMIC_HPP
// Copyright (c) 2011 Helge Bahmann
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// This header includes all Boost.Atomic public headers
#include <boost/atomic/atomic.hpp>
#ifdef BOOST_ATOMIC_HAS_PRAGMA_ONCE
#pragma once
#endif
#endif

View File

@@ -0,0 +1,162 @@
#ifndef BOOST_ATOMIC_ATOMIC_HPP
#define BOOST_ATOMIC_ATOMIC_HPP
// Copyright (c) 2011 Helge Bahmann
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include <cstddef>
#include <boost/cstdint.hpp>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/platform.hpp>
#include <boost/atomic/detail/type-classification.hpp>
#include <boost/type_traits/is_signed.hpp>
#ifdef BOOST_ATOMIC_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
#ifndef BOOST_ATOMIC_CHAR_LOCK_FREE
#define BOOST_ATOMIC_CHAR_LOCK_FREE 0
#endif
#ifndef BOOST_ATOMIC_CHAR16_T_LOCK_FREE
#define BOOST_ATOMIC_CHAR16_T_LOCK_FREE 0
#endif
#ifndef BOOST_ATOMIC_CHAR32_T_LOCK_FREE
#define BOOST_ATOMIC_CHAR32_T_LOCK_FREE 0
#endif
#ifndef BOOST_ATOMIC_WCHAR_T_LOCK_FREE
#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 0
#endif
#ifndef BOOST_ATOMIC_SHORT_LOCK_FREE
#define BOOST_ATOMIC_SHORT_LOCK_FREE 0
#endif
#ifndef BOOST_ATOMIC_INT_LOCK_FREE
#define BOOST_ATOMIC_INT_LOCK_FREE 0
#endif
#ifndef BOOST_ATOMIC_LONG_LOCK_FREE
#define BOOST_ATOMIC_LONG_LOCK_FREE 0
#endif
#ifndef BOOST_ATOMIC_LLONG_LOCK_FREE
#define BOOST_ATOMIC_LLONG_LOCK_FREE 0
#endif
#ifndef BOOST_ATOMIC_POINTER_LOCK_FREE
#define BOOST_ATOMIC_POINTER_LOCK_FREE 0
#endif
#define BOOST_ATOMIC_ADDRESS_LOCK_FREE BOOST_ATOMIC_POINTER_LOCK_FREE
#ifndef BOOST_ATOMIC_BOOL_LOCK_FREE
#define BOOST_ATOMIC_BOOL_LOCK_FREE 0
#endif
#ifndef BOOST_ATOMIC_THREAD_FENCE
#define BOOST_ATOMIC_THREAD_FENCE 0
inline void atomic_thread_fence(memory_order)
{
}
#endif
#ifndef BOOST_ATOMIC_SIGNAL_FENCE
#define BOOST_ATOMIC_SIGNAL_FENCE 0
inline void atomic_signal_fence(memory_order order)
{
atomic_thread_fence(order);
}
#endif
template<typename T>
class atomic :
public atomics::detail::base_atomic<T, typename atomics::detail::classify<T>::type, atomics::detail::storage_size_of<T>::value, boost::is_signed<T>::value >
{
private:
typedef T value_type;
typedef atomics::detail::base_atomic<T, typename atomics::detail::classify<T>::type, atomics::detail::storage_size_of<T>::value, boost::is_signed<T>::value > super;
public:
atomic(void) : super() {}
explicit atomic(const value_type & v) : super(v) {}
atomic & operator=(value_type v) volatile
{
super::operator=(v);
return *const_cast<atomic *>(this);
}
private:
atomic(const atomic &) /* =delete */ ;
atomic & operator=(const atomic &) /* =delete */ ;
};
typedef atomic<char> atomic_char;
typedef atomic<unsigned char> atomic_uchar;
typedef atomic<signed char> atomic_schar;
typedef atomic<uint8_t> atomic_uint8_t;
typedef atomic<int8_t> atomic_int8_t;
typedef atomic<unsigned short> atomic_ushort;
typedef atomic<short> atomic_short;
typedef atomic<uint16_t> atomic_uint16_t;
typedef atomic<int16_t> atomic_int16_t;
typedef atomic<unsigned int> atomic_uint;
typedef atomic<int> atomic_int;
typedef atomic<uint32_t> atomic_uint32_t;
typedef atomic<int32_t> atomic_int32_t;
typedef atomic<unsigned long> atomic_ulong;
typedef atomic<long> atomic_long;
typedef atomic<uint64_t> atomic_uint64_t;
typedef atomic<int64_t> atomic_int64_t;
#ifdef BOOST_HAS_LONG_LONG
typedef atomic<boost::ulong_long_type> atomic_ullong;
typedef atomic<boost::long_long_type> atomic_llong;
#endif
typedef atomic<void*> atomic_address;
typedef atomic<bool> atomic_bool;
typedef atomic<wchar_t> atomic_wchar_t;
#if !defined(BOOST_NO_CXX11_CHAR16_T)
typedef atomic<char16_t> atomic_char16_t;
#endif
#if !defined(BOOST_NO_CXX11_CHAR32_T)
typedef atomic<char32_t> atomic_char32_t;
#endif
#ifndef BOOST_ATOMIC_FLAG_LOCK_FREE
#define BOOST_ATOMIC_FLAG_LOCK_FREE 0
class atomic_flag
{
public:
atomic_flag(void) : v_(false) {}
bool
test_and_set(memory_order order = memory_order_seq_cst)
{
return v_.exchange(true, order);
}
void
clear(memory_order order = memory_order_seq_cst) volatile
{
v_.store(false, order);
}
private:
atomic_flag(const atomic_flag &) /* = delete */ ;
atomic_flag & operator=(const atomic_flag &) /* = delete */ ;
atomic<bool> v_;
};
#endif
}
#endif

View File

@@ -0,0 +1,519 @@
#ifndef BOOST_ATOMIC_DETAIL_BASE_HPP
#define BOOST_ATOMIC_DETAIL_BASE_HPP
// Copyright (c) 2009 Helge Bahmann
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Base class definition and fallback implementation.
// To be overridden (through partial specialization) by
// platform implementations.
#include <string.h>
#include <cstddef>
#include <boost/cstdint.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/lockpool.hpp>
#ifdef BOOST_ATOMIC_HAS_PRAGMA_ONCE
#pragma once
#endif
#define BOOST_ATOMIC_DECLARE_BASE_OPERATORS \
operator value_type(void) volatile const \
{ \
return load(memory_order_seq_cst); \
} \
\
this_type & \
operator=(value_type v) volatile \
{ \
store(v, memory_order_seq_cst); \
return *const_cast<this_type *>(this); \
} \
\
bool \
compare_exchange_strong( \
value_type & expected, \
value_type desired, \
memory_order order = memory_order_seq_cst) volatile \
{ \
return compare_exchange_strong(expected, desired, order, calculate_failure_order(order)); \
} \
\
bool \
compare_exchange_weak( \
value_type & expected, \
value_type desired, \
memory_order order = memory_order_seq_cst) volatile \
{ \
return compare_exchange_weak(expected, desired, order, calculate_failure_order(order)); \
} \
\
#define BOOST_ATOMIC_DECLARE_ADDITIVE_OPERATORS \
value_type \
operator++(int) volatile \
{ \
return fetch_add(1); \
} \
\
value_type \
operator++(void) volatile \
{ \
return fetch_add(1) + 1; \
} \
\
value_type \
operator--(int) volatile \
{ \
return fetch_sub(1); \
} \
\
value_type \
operator--(void) volatile \
{ \
return fetch_sub(1) - 1; \
} \
\
value_type \
operator+=(difference_type v) volatile \
{ \
return fetch_add(v) + v; \
} \
\
value_type \
operator-=(difference_type v) volatile \
{ \
return fetch_sub(v) - v; \
} \
#define BOOST_ATOMIC_DECLARE_BIT_OPERATORS \
value_type \
operator&=(difference_type v) volatile \
{ \
return fetch_and(v) & v; \
} \
\
value_type \
operator|=(difference_type v) volatile \
{ \
return fetch_or(v) | v; \
} \
\
value_type \
operator^=(difference_type v) volatile \
{ \
return fetch_xor(v) ^ v; \
} \
#define BOOST_ATOMIC_DECLARE_POINTER_OPERATORS \
BOOST_ATOMIC_DECLARE_BASE_OPERATORS \
BOOST_ATOMIC_DECLARE_ADDITIVE_OPERATORS \
#define BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS \
BOOST_ATOMIC_DECLARE_BASE_OPERATORS \
BOOST_ATOMIC_DECLARE_ADDITIVE_OPERATORS \
BOOST_ATOMIC_DECLARE_BIT_OPERATORS \
namespace boost {
namespace atomics {
namespace detail {
inline memory_order
calculate_failure_order(memory_order order)
{
switch(order) {
case memory_order_acq_rel:
return memory_order_acquire;
case memory_order_release:
return memory_order_relaxed;
default:
return order;
}
}
template<typename T, typename C, unsigned int Size, bool Sign>
class base_atomic {
private:
typedef base_atomic this_type;
typedef T value_type;
typedef lockpool::scoped_lock guard_type;
public:
base_atomic(void) {}
explicit base_atomic(const value_type & v)
{
memcpy(&v_, &v, sizeof(value_type));
}
void
store(value_type const& v, memory_order /*order*/ = memory_order_seq_cst) volatile
{
guard_type guard(const_cast<char *>(v_));
memcpy(const_cast<char *>(v_), &v, sizeof(value_type));
}
value_type
load(memory_order /*order*/ = memory_order_seq_cst) volatile const
{
guard_type guard(const_cast<const char *>(v_));
value_type v;
memcpy(&v, const_cast<const char *>(v_), sizeof(value_type));
return v;
}
bool
compare_exchange_strong(
value_type & expected,
value_type const& desired,
memory_order /*success_order*/,
memory_order /*failure_order*/) volatile
{
guard_type guard(const_cast<char *>(v_));
if (memcmp(const_cast<char *>(v_), &expected, sizeof(value_type)) == 0) {
memcpy(const_cast<char *>(v_), &desired, sizeof(value_type));
return true;
} else {
memcpy(&expected, const_cast<char *>(v_), sizeof(value_type));
return false;
}
}
bool
compare_exchange_weak(
value_type & expected,
value_type const& desired,
memory_order success_order,
memory_order failure_order) volatile
{
return compare_exchange_strong(expected, desired, success_order, failure_order);
}
value_type
exchange(value_type const& v, memory_order /*order*/=memory_order_seq_cst) volatile
{
guard_type guard(const_cast<char *>(v_));
value_type tmp;
memcpy(&tmp, const_cast<char *>(v_), sizeof(value_type));
memcpy(const_cast<char *>(v_), &v, sizeof(value_type));
return tmp;
}
bool
is_lock_free(void) const volatile
{
return false;
}
BOOST_ATOMIC_DECLARE_BASE_OPERATORS
private:
base_atomic(const base_atomic &) /* = delete */ ;
void operator=(const base_atomic &) /* = delete */ ;
char v_[sizeof(value_type)];
};
template<typename T, unsigned int Size, bool Sign>
class base_atomic<T, int, Size, Sign> {
private:
typedef base_atomic this_type;
typedef T value_type;
typedef T difference_type;
typedef lockpool::scoped_lock guard_type;
public:
explicit base_atomic(value_type v) : v_(v) {}
base_atomic(void) {}
void
store(value_type v, memory_order /*order*/ = memory_order_seq_cst) volatile
{
guard_type guard(const_cast<value_type *>(&v_));
v_ = v;
}
value_type
load(memory_order /*order*/ = memory_order_seq_cst) const volatile
{
guard_type guard(const_cast<value_type *>(&v_));
value_type v = const_cast<const volatile value_type &>(v_);
return v;
}
value_type
exchange(value_type v, memory_order /*order*/ = memory_order_seq_cst) volatile
{
guard_type guard(const_cast<value_type *>(&v_));
value_type old = v_;
v_ = v;
return old;
}
bool
compare_exchange_strong(value_type & expected, value_type desired,
memory_order /*success_order*/,
memory_order /*failure_order*/) volatile
{
guard_type guard(const_cast<value_type *>(&v_));
if (v_ == expected) {
v_ = desired;
return true;
} else {
expected = v_;
return false;
}
}
bool
compare_exchange_weak(value_type & expected, value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
return compare_exchange_strong(expected, desired, success_order, failure_order);
}
value_type
fetch_add(difference_type v, memory_order /*order*/ = memory_order_seq_cst) volatile
{
guard_type guard(const_cast<value_type *>(&v_));
value_type old = v_;
v_ += v;
return old;
}
value_type
fetch_sub(difference_type v, memory_order /*order*/ = memory_order_seq_cst) volatile
{
guard_type guard(const_cast<value_type *>(&v_));
value_type old = v_;
v_ -= v;
return old;
}
value_type
fetch_and(value_type v, memory_order /*order*/ = memory_order_seq_cst) volatile
{
guard_type guard(const_cast<value_type *>(&v_));
value_type old = v_;
v_ &= v;
return old;
}
value_type
fetch_or(value_type v, memory_order /*order*/ = memory_order_seq_cst) volatile
{
guard_type guard(const_cast<value_type *>(&v_));
value_type old = v_;
v_ |= v;
return old;
}
value_type
fetch_xor(value_type v, memory_order /*order*/ = memory_order_seq_cst) volatile
{
guard_type guard(const_cast<value_type *>(&v_));
value_type old = v_;
v_ ^= v;
return old;
}
bool
is_lock_free(void) const volatile
{
return false;
}
BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS
private:
base_atomic(const base_atomic &) /* = delete */ ;
void operator=(const base_atomic &) /* = delete */ ;
value_type v_;
};
template<typename T, unsigned int Size, bool Sign>
class base_atomic<T *, void *, Size, Sign> {
private:
typedef base_atomic this_type;
typedef T * value_type;
typedef ptrdiff_t difference_type;
typedef lockpool::scoped_lock guard_type;
public:
explicit base_atomic(value_type v) : v_(v) {}
base_atomic(void) {}
void
store(value_type v, memory_order /*order*/ = memory_order_seq_cst) volatile
{
guard_type guard(const_cast<value_type *>(&v_));
v_ = v;
}
value_type
load(memory_order /*order*/ = memory_order_seq_cst) const volatile
{
guard_type guard(const_cast<value_type *>(&v_));
value_type v = const_cast<const volatile value_type &>(v_);
return v;
}
value_type
exchange(value_type v, memory_order /*order*/ = memory_order_seq_cst) volatile
{
guard_type guard(const_cast<value_type *>(&v_));
value_type old = v_;
v_ = v;
return old;
}
bool
compare_exchange_strong(value_type & expected, value_type desired,
memory_order /*success_order*/,
memory_order /*failure_order*/) volatile
{
guard_type guard(const_cast<value_type *>(&v_));
if (v_ == expected) {
v_ = desired;
return true;
} else {
expected = v_;
return false;
}
}
bool
compare_exchange_weak(value_type & expected, value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
return compare_exchange_strong(expected, desired, success_order, failure_order);
}
value_type fetch_add(difference_type v, memory_order /*order*/ = memory_order_seq_cst) volatile
{
guard_type guard(const_cast<value_type *>(&v_));
value_type old = v_;
v_ += v;
return old;
}
value_type fetch_sub(difference_type v, memory_order /*order*/ = memory_order_seq_cst) volatile
{
guard_type guard(const_cast<value_type *>(&v_));
value_type old = v_;
v_ -= v;
return old;
}
bool
is_lock_free(void) const volatile
{
return false;
}
BOOST_ATOMIC_DECLARE_POINTER_OPERATORS
private:
base_atomic(const base_atomic &) /* = delete */ ;
void operator=(const base_atomic &) /* = delete */ ;
value_type v_;
};
template<unsigned int Size, bool Sign>
class base_atomic<void *, void *, Size, Sign> {
private:
typedef base_atomic this_type;
typedef void * value_type;
typedef lockpool::scoped_lock guard_type;
public:
explicit base_atomic(value_type v) : v_(v) {}
base_atomic(void) {}
void
store(value_type v, memory_order /*order*/ = memory_order_seq_cst) volatile
{
guard_type guard(const_cast<value_type *>(&v_));
v_ = v;
}
value_type
load(memory_order /*order*/ = memory_order_seq_cst) const volatile
{
guard_type guard(const_cast<value_type *>(&v_));
value_type v = const_cast<const volatile value_type &>(v_);
return v;
}
value_type
exchange(value_type v, memory_order /*order*/ = memory_order_seq_cst) volatile
{
guard_type guard(const_cast<value_type *>(&v_));
value_type old = v_;
v_ = v;
return old;
}
bool
compare_exchange_strong(value_type & expected, value_type desired,
memory_order /*success_order*/,
memory_order /*failure_order*/) volatile
{
guard_type guard(const_cast<value_type *>(&v_));
if (v_ == expected) {
v_ = desired;
return true;
} else {
expected = v_;
return false;
}
}
bool
compare_exchange_weak(value_type & expected, value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
return compare_exchange_strong(expected, desired, success_order, failure_order);
}
bool
is_lock_free(void) const volatile
{
return false;
}
BOOST_ATOMIC_DECLARE_BASE_OPERATORS
private:
base_atomic(const base_atomic &) /* = delete */ ;
void operator=(const base_atomic &) /* = delete */ ;
value_type v_;
};
}
}
}
#endif

View File

@@ -0,0 +1,872 @@
#ifndef BOOST_ATOMIC_DETAIL_CAS32STRONG_HPP
#define BOOST_ATOMIC_DETAIL_CAS32STRONG_HPP
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// Copyright (c) 2011 Helge Bahmann
// Build 8-, 16- and 32-bit atomic operations from
// a platform_cmpxchg32_strong primitive.
#include <cstddef>
#include <boost/cstdint.hpp>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/base.hpp>
#ifdef BOOST_ATOMIC_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
/* integral types */
template<typename T, bool Sign>
class base_atomic<T, int, 1, Sign> {
typedef base_atomic this_type;
typedef T value_type;
typedef T difference_type;
typedef uint32_t storage_type;
public:
explicit base_atomic(value_type v) : v_(v) {}
base_atomic(void) {}
void
store(value_type v, memory_order order = memory_order_seq_cst) volatile
{
platform_fence_before_store(order);
const_cast<volatile storage_type &>(v_) = v;
platform_fence_after_store(order);
}
value_type
load(memory_order order = memory_order_seq_cst) const volatile
{
value_type v = const_cast<const volatile storage_type &>(v_);
platform_fence_after_load(order);
return v;
}
value_type
exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
return original;
}
bool
compare_exchange_weak(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
return compare_exchange_strong(expected, desired, success_order, failure_order);
}
bool
compare_exchange_strong(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
platform_fence_before(success_order);
storage_type expected_s = (storage_type) expected;
storage_type desired_s = (storage_type) desired;
bool success = platform_cmpxchg32_strong(expected_s, desired_s, &v_);
if (success) {
platform_fence_after(success_order);
} else {
platform_fence_after(failure_order);
expected = (value_type) expected_s;
}
return success;
}
value_type
fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original + v, order, memory_order_relaxed));
return original;
}
value_type
fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original - v, order, memory_order_relaxed));
return original;
}
value_type
fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original & v, order, memory_order_relaxed));
return original;
}
value_type
fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original | v, order, memory_order_relaxed));
return original;
}
value_type
fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original ^ v, order, memory_order_relaxed));
return original;
}
bool
is_lock_free(void) const volatile
{
return true;
}
BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS
private:
base_atomic(const base_atomic &) /* = delete */ ;
void operator=(const base_atomic &) /* = delete */ ;
storage_type v_;
};
template<typename T, bool Sign>
class base_atomic<T, int, 2, Sign> {
typedef base_atomic this_type;
typedef T value_type;
typedef T difference_type;
typedef uint32_t storage_type;
public:
explicit base_atomic(value_type v) : v_(v) {}
base_atomic(void) {}
void
store(value_type v, memory_order order = memory_order_seq_cst) volatile
{
platform_fence_before_store(order);
const_cast<volatile storage_type &>(v_) = v;
platform_fence_after_store(order);
}
value_type
load(memory_order order = memory_order_seq_cst) const volatile
{
value_type v = const_cast<const volatile storage_type &>(v_);
platform_fence_after_load(order);
return v;
}
value_type
exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
return original;
}
bool
compare_exchange_weak(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
return compare_exchange_strong(expected, desired, success_order, failure_order);
}
bool
compare_exchange_strong(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
platform_fence_before(success_order);
storage_type expected_s = (storage_type) expected;
storage_type desired_s = (storage_type) desired;
bool success = platform_cmpxchg32_strong(expected_s, desired_s, &v_);
if (success) {
platform_fence_after(success_order);
} else {
platform_fence_after(failure_order);
expected = (value_type) expected_s;
}
return success;
}
value_type
fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original + v, order, memory_order_relaxed));
return original;
}
value_type
fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original - v, order, memory_order_relaxed));
return original;
}
value_type
fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original & v, order, memory_order_relaxed));
return original;
}
value_type
fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original | v, order, memory_order_relaxed));
return original;
}
value_type
fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original ^ v, order, memory_order_relaxed));
return original;
}
bool
is_lock_free(void) const volatile
{
return true;
}
BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS
private:
base_atomic(const base_atomic &) /* = delete */ ;
void operator=(const base_atomic &) /* = delete */ ;
storage_type v_;
};
template<typename T, bool Sign>
class base_atomic<T, int, 4, Sign> {
typedef base_atomic this_type;
typedef T value_type;
typedef T difference_type;
public:
explicit base_atomic(value_type v) : v_(v) {}
base_atomic(void) {}
void
store(value_type v, memory_order order = memory_order_seq_cst) volatile
{
platform_fence_before_store(order);
const_cast<volatile value_type &>(v_) = v;
platform_fence_after_store(order);
}
value_type
load(memory_order order = memory_order_seq_cst) const volatile
{
value_type v = const_cast<const volatile value_type &>(v_);
platform_fence_after_load(order);
return v;
}
value_type
exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
return original;
}
bool
compare_exchange_weak(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
return compare_exchange_strong(expected, desired, success_order, failure_order);
}
bool
compare_exchange_strong(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
platform_fence_before(success_order);
bool success = platform_cmpxchg32_strong(expected, desired, &v_);
if (success) {
platform_fence_after(success_order);
} else {
platform_fence_after(failure_order);
}
return success;
}
value_type
fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original + v, order, memory_order_relaxed));
return original;
}
value_type
fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original - v, order, memory_order_relaxed));
return original;
}
value_type
fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original & v, order, memory_order_relaxed));
return original;
}
value_type
fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original | v, order, memory_order_relaxed));
return original;
}
value_type
fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original ^ v, order, memory_order_relaxed));
return original;
}
bool
is_lock_free(void) const volatile
{
return true;
}
BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS
private:
base_atomic(const base_atomic &) /* = delete */ ;
void operator=(const base_atomic &) /* = delete */ ;
value_type v_;
};
/* pointer types */
template<bool Sign>
class base_atomic<void *, void *, 4, Sign> {
typedef base_atomic this_type;
typedef void * value_type;
typedef ptrdiff_t difference_type;
public:
explicit base_atomic(value_type v) : v_(v) {}
base_atomic(void) {}
void
store(value_type v, memory_order order = memory_order_seq_cst) volatile
{
platform_fence_before_store(order);
const_cast<volatile value_type &>(v_) = v;
platform_fence_after_store(order);
}
value_type
load(memory_order order = memory_order_seq_cst) const volatile
{
value_type v = const_cast<const volatile value_type &>(v_);
platform_fence_after_load(order);
return v;
}
value_type
exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
return original;
}
bool
compare_exchange_weak(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
return compare_exchange_strong(expected, desired, success_order, failure_order);
}
bool
compare_exchange_strong(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
platform_fence_before(success_order);
bool success = platform_cmpxchg32_strong(expected, desired, &v_);
if (success) {
platform_fence_after(success_order);
} else {
platform_fence_after(failure_order);
}
return success;
}
value_type
fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original + v, order, memory_order_relaxed));
return original;
}
value_type
fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original - v, order, memory_order_relaxed));
return original;
}
bool
is_lock_free(void) const volatile
{
return true;
}
BOOST_ATOMIC_DECLARE_BASE_OPERATORS
private:
base_atomic(const base_atomic &) /* = delete */ ;
void operator=(const base_atomic &) /* = delete */ ;
value_type v_;
};
template<typename T, bool Sign>
class base_atomic<T *, void *, 4, Sign> {
typedef base_atomic this_type;
typedef T * value_type;
typedef ptrdiff_t difference_type;
public:
explicit base_atomic(value_type v) : v_(v) {}
base_atomic(void) {}
void
store(value_type v, memory_order order = memory_order_seq_cst) volatile
{
platform_fence_before_store(order);
const_cast<volatile value_type &>(v_) = v;
platform_fence_after_store(order);
}
value_type
load(memory_order order = memory_order_seq_cst) const volatile
{
value_type v = const_cast<const volatile value_type &>(v_);
platform_fence_after_load(order);
return v;
}
value_type
exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
return original;
}
bool
compare_exchange_weak(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
return compare_exchange_strong(expected, desired, success_order, failure_order);
}
bool
compare_exchange_strong(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
platform_fence_before(success_order);
bool success = platform_cmpxchg32_strong(expected, desired, &v_);
if (success) {
platform_fence_after(success_order);
} else {
platform_fence_after(failure_order);
}
return success;
}
value_type
fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original + v, order, memory_order_relaxed));
return original;
}
value_type
fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original - v, order, memory_order_relaxed));
return original;
}
bool
is_lock_free(void) const volatile
{
return true;
}
BOOST_ATOMIC_DECLARE_POINTER_OPERATORS
private:
base_atomic(const base_atomic &) /* = delete */ ;
void operator=(const base_atomic &) /* = delete */ ;
value_type v_;
};
/* generic types */
template<typename T, bool Sign>
class base_atomic<T, void, 1, Sign> {
typedef base_atomic this_type;
typedef T value_type;
typedef uint32_t storage_type;
public:
explicit base_atomic(value_type const& v)
{
memcpy(&v_, &v, sizeof(value_type));
}
base_atomic(void) {}
void
store(value_type const& v, memory_order order = memory_order_seq_cst) volatile
{
storage_type tmp = 0;
memcpy(&tmp, &v, sizeof(value_type));
platform_fence_before_store(order);
const_cast<volatile storage_type &>(v_) = tmp;
platform_fence_after_store(order);
}
value_type
load(memory_order order = memory_order_seq_cst) const volatile
{
storage_type tmp = const_cast<const volatile storage_type &>(v_);
platform_fence_after_load(order);
value_type v;
memcpy(&v, &tmp, sizeof(value_type));
return v;
}
value_type
exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
return original;
}
bool
compare_exchange_weak(
value_type & expected,
value_type const& desired,
memory_order success_order,
memory_order failure_order) volatile
{
return compare_exchange_strong(expected, desired, success_order, failure_order);
}
bool
compare_exchange_strong(
value_type & expected,
value_type const& desired,
memory_order success_order,
memory_order failure_order) volatile
{
storage_type expected_s = 0, desired_s = 0;
memcpy(&expected_s, &expected, sizeof(value_type));
memcpy(&desired_s, &desired, sizeof(value_type));
platform_fence_before(success_order);
bool success = platform_cmpxchg32_strong(expected_s, desired_s, &v_);
if (success) {
platform_fence_after(success_order);
} else {
platform_fence_after(failure_order);
memcpy(&expected, &expected_s, sizeof(value_type));
}
return success;
}
bool
is_lock_free(void) const volatile
{
return true;
}
BOOST_ATOMIC_DECLARE_BASE_OPERATORS
private:
base_atomic(const base_atomic &) /* = delete */ ;
void operator=(const base_atomic &) /* = delete */ ;
storage_type v_;
};
template<typename T, bool Sign>
class base_atomic<T, void, 2, Sign> {
typedef base_atomic this_type;
typedef T value_type;
typedef uint32_t storage_type;
public:
explicit base_atomic(value_type const& v)
{
memcpy(&v_, &v, sizeof(value_type));
}
base_atomic(void) {}
void
store(value_type const& v, memory_order order = memory_order_seq_cst) volatile
{
storage_type tmp = 0;
memcpy(&tmp, &v, sizeof(value_type));
platform_fence_before_store(order);
const_cast<volatile storage_type &>(v_) = tmp;
platform_fence_after_store(order);
}
value_type
load(memory_order order = memory_order_seq_cst) const volatile
{
storage_type tmp = const_cast<const volatile storage_type &>(v_);
platform_fence_after_load(order);
value_type v;
memcpy(&v, &tmp, sizeof(value_type));
return v;
}
value_type
exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
return original;
}
bool
compare_exchange_weak(
value_type & expected,
value_type const& desired,
memory_order success_order,
memory_order failure_order) volatile
{
return compare_exchange_strong(expected, desired, success_order, failure_order);
}
bool
compare_exchange_strong(
value_type & expected,
value_type const& desired,
memory_order success_order,
memory_order failure_order) volatile
{
storage_type expected_s = 0, desired_s = 0;
memcpy(&expected_s, &expected, sizeof(value_type));
memcpy(&desired_s, &desired, sizeof(value_type));
platform_fence_before(success_order);
bool success = platform_cmpxchg32_strong(expected_s, desired_s, &v_);
if (success) {
platform_fence_after(success_order);
} else {
platform_fence_after(failure_order);
memcpy(&expected, &expected_s, sizeof(value_type));
}
return success;
}
bool
is_lock_free(void) const volatile
{
return true;
}
BOOST_ATOMIC_DECLARE_BASE_OPERATORS
private:
base_atomic(const base_atomic &) /* = delete */ ;
void operator=(const base_atomic &) /* = delete */ ;
storage_type v_;
};
template<typename T, bool Sign>
class base_atomic<T, void, 4, Sign> {
typedef base_atomic this_type;
typedef T value_type;
typedef uint32_t storage_type;
public:
explicit base_atomic(value_type const& v) : v_(0)
{
memcpy(&v_, &v, sizeof(value_type));
}
base_atomic(void) {}
void
store(value_type const& v, memory_order order = memory_order_seq_cst) volatile
{
storage_type tmp = 0;
memcpy(&tmp, &v, sizeof(value_type));
platform_fence_before_store(order);
const_cast<volatile storage_type &>(v_) = tmp;
platform_fence_after_store(order);
}
value_type
load(memory_order order = memory_order_seq_cst) const volatile
{
storage_type tmp = const_cast<const volatile storage_type &>(v_);
platform_fence_after_load(order);
value_type v;
memcpy(&v, &tmp, sizeof(value_type));
return v;
}
value_type
exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
return original;
}
bool
compare_exchange_weak(
value_type & expected,
value_type const& desired,
memory_order success_order,
memory_order failure_order) volatile
{
return compare_exchange_strong(expected, desired, success_order, failure_order);
}
bool
compare_exchange_strong(
value_type & expected,
value_type const& desired,
memory_order success_order,
memory_order failure_order) volatile
{
storage_type expected_s = 0, desired_s = 0;
memcpy(&expected_s, &expected, sizeof(value_type));
memcpy(&desired_s, &desired, sizeof(value_type));
platform_fence_before(success_order);
bool success = platform_cmpxchg32_strong(expected_s, desired_s, &v_);
if (success) {
platform_fence_after(success_order);
} else {
platform_fence_after(failure_order);
memcpy(&expected, &expected_s, sizeof(value_type));
}
return success;
}
bool
is_lock_free(void) const volatile
{
return true;
}
BOOST_ATOMIC_DECLARE_BASE_OPERATORS
private:
base_atomic(const base_atomic &) /* = delete */ ;
void operator=(const base_atomic &) /* = delete */ ;
storage_type v_;
};
}
}
}
#endif

View File

@@ -0,0 +1,916 @@
#ifndef BOOST_ATOMIC_DETAIL_CAS32WEAK_HPP
#define BOOST_ATOMIC_DETAIL_CAS32WEAK_HPP
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// Copyright (c) 2011 Helge Bahmann
#include <cstddef>
#include <boost/cstdint.hpp>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/base.hpp>
#ifdef BOOST_ATOMIC_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
/* integral types */
template<typename T, bool Sign>
class base_atomic<T, int, 1, Sign> {
typedef base_atomic this_type;
typedef T value_type;
typedef T difference_type;
typedef uint32_t storage_type;
public:
explicit base_atomic(value_type v) : v_(v) {}
base_atomic(void) {}
void
store(value_type v, memory_order order = memory_order_seq_cst) volatile
{
platform_fence_before_store(order);
const_cast<volatile storage_type &>(v_) = v;
platform_fence_after_store(order);
}
value_type
load(memory_order order = memory_order_seq_cst) const volatile
{
value_type v = const_cast<const volatile storage_type &>(v_);
platform_fence_after_load(order);
return v;
}
value_type
exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
return original;
}
bool
compare_exchange_weak(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
platform_fence_before(success_order);
storage_type expected_s = (storage_type) expected;
storage_type desired_s = (storage_type) desired;
bool success = platform_cmpxchg32(expected_s, desired_s, &v_);
if (success) {
platform_fence_after(success_order);
} else {
platform_fence_after(failure_order);
expected = (value_type) expected_s;
}
return success;
}
bool
compare_exchange_strong(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
for(;;) {
value_type tmp = expected;
if (compare_exchange_weak(tmp, desired, success_order, failure_order))
return true;
if (tmp != expected) {
expected = tmp;
return false;
}
}
}
value_type
fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original + v, order, memory_order_relaxed));
return original;
}
value_type
fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original - v, order, memory_order_relaxed));
return original;
}
value_type
fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original & v, order, memory_order_relaxed));
return original;
}
value_type
fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original | v, order, memory_order_relaxed));
return original;
}
value_type
fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original ^ v, order, memory_order_relaxed));
return original;
}
bool
is_lock_free(void) const volatile
{
return true;
}
BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS
private:
base_atomic(const base_atomic &) /* = delete */ ;
void operator=(const base_atomic &) /* = delete */ ;
storage_type v_;
};
template<typename T, bool Sign>
class base_atomic<T, int, 2, Sign> {
typedef base_atomic this_type;
typedef T value_type;
typedef T difference_type;
typedef uint32_t storage_type;
public:
explicit base_atomic(value_type v) : v_(v) {}
base_atomic(void) {}
void
store(value_type v, memory_order order = memory_order_seq_cst) volatile
{
platform_fence_before_store(order);
const_cast<volatile storage_type &>(v_) = v;
platform_fence_after_store(order);
}
value_type
load(memory_order order = memory_order_seq_cst) const volatile
{
value_type v = const_cast<const volatile storage_type &>(v_);
platform_fence_after_load(order);
return v;
}
value_type
exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
return original;
}
bool
compare_exchange_weak(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
platform_fence_before(success_order);
storage_type expected_s = (storage_type) expected;
storage_type desired_s = (storage_type) desired;
bool success = platform_cmpxchg32(expected_s, desired_s, &v_);
if (success) {
platform_fence_after(success_order);
} else {
platform_fence_after(failure_order);
expected = (value_type) expected_s;
}
return success;
}
bool
compare_exchange_strong(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
for(;;) {
value_type tmp = expected;
if (compare_exchange_weak(tmp, desired, success_order, failure_order))
return true;
if (tmp != expected) {
expected = tmp;
return false;
}
}
}
value_type
fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original + v, order, memory_order_relaxed));
return original;
}
value_type
fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original - v, order, memory_order_relaxed));
return original;
}
value_type
fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original & v, order, memory_order_relaxed));
return original;
}
value_type
fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original | v, order, memory_order_relaxed));
return original;
}
value_type
fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original ^ v, order, memory_order_relaxed));
return original;
}
bool
is_lock_free(void) const volatile
{
return true;
}
BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS
private:
base_atomic(const base_atomic &) /* = delete */ ;
void operator=(const base_atomic &) /* = delete */ ;
storage_type v_;
};
template<typename T, bool Sign>
class base_atomic<T, int, 4, Sign> {
typedef base_atomic this_type;
typedef T value_type;
typedef T difference_type;
public:
explicit base_atomic(value_type v) : v_(v) {}
base_atomic(void) {}
void
store(value_type v, memory_order order = memory_order_seq_cst) volatile
{
platform_fence_before_store(order);
const_cast<volatile value_type &>(v_) = v;
platform_fence_after_store(order);
}
value_type
load(memory_order order = memory_order_seq_cst) const volatile
{
value_type v = const_cast<const volatile value_type &>(v_);
platform_fence_after_load(order);
return v;
}
value_type
exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
return original;
}
bool
compare_exchange_weak(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
platform_fence_before(success_order);
bool success = platform_cmpxchg32(expected, desired, &v_);
if (success) {
platform_fence_after(success_order);
} else {
platform_fence_after(failure_order);
}
return success;
}
bool
compare_exchange_strong(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
for(;;) {
value_type tmp = expected;
if (compare_exchange_weak(tmp, desired, success_order, failure_order))
return true;
if (tmp != expected) {
expected = tmp;
return false;
}
}
}
value_type
fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original + v, order, memory_order_relaxed));
return original;
}
value_type
fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original - v, order, memory_order_relaxed));
return original;
}
value_type
fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original & v, order, memory_order_relaxed));
return original;
}
value_type
fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original | v, order, memory_order_relaxed));
return original;
}
value_type
fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original ^ v, order, memory_order_relaxed));
return original;
}
bool
is_lock_free(void) const volatile
{
return true;
}
BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS
private:
base_atomic(const base_atomic &) /* = delete */ ;
void operator=(const base_atomic &) /* = delete */ ;
value_type v_;
};
/* pointer types */
template<bool Sign>
class base_atomic<void *, void *, 4, Sign> {
typedef base_atomic this_type;
typedef void * value_type;
typedef ptrdiff_t difference_type;
public:
explicit base_atomic(value_type v) : v_(v) {}
base_atomic(void) {}
void
store(value_type v, memory_order order = memory_order_seq_cst) volatile
{
platform_fence_before_store(order);
const_cast<volatile value_type &>(v_) = v;
platform_fence_after_store(order);
}
value_type
load(memory_order order = memory_order_seq_cst) const volatile
{
value_type v = const_cast<const volatile value_type &>(v_);
platform_fence_after_load(order);
return v;
}
value_type
exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
return original;
}
bool
compare_exchange_weak(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
platform_fence_before(success_order);
bool success = platform_cmpxchg32(expected, desired, &v_);
if (success) {
platform_fence_after(success_order);
} else {
platform_fence_after(failure_order);
}
return success;
}
bool
compare_exchange_strong(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
for(;;) {
value_type tmp = expected;
if (compare_exchange_weak(tmp, desired, success_order, failure_order))
return true;
if (tmp != expected) {
expected = tmp;
return false;
}
}
}
bool
is_lock_free(void) const volatile
{
return true;
}
BOOST_ATOMIC_DECLARE_BASE_OPERATORS
private:
base_atomic(const base_atomic &) /* = delete */ ;
void operator=(const base_atomic &) /* = delete */ ;
value_type v_;
};
template<typename T, bool Sign>
class base_atomic<T *, void *, 4, Sign> {
typedef base_atomic this_type;
typedef T * value_type;
typedef ptrdiff_t difference_type;
public:
explicit base_atomic(value_type v) : v_(v) {}
base_atomic(void) {}
void
store(value_type v, memory_order order = memory_order_seq_cst) volatile
{
platform_fence_before_store(order);
const_cast<volatile value_type &>(v_) = v;
platform_fence_after_store(order);
}
value_type
load(memory_order order = memory_order_seq_cst) const volatile
{
value_type v = const_cast<const volatile value_type &>(v_);
platform_fence_after_load(order);
return v;
}
value_type
exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
return original;
}
bool
compare_exchange_weak(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
platform_fence_before(success_order);
bool success = platform_cmpxchg32(expected, desired, &v_);
if (success) {
platform_fence_after(success_order);
} else {
platform_fence_after(failure_order);
}
return success;
}
bool
compare_exchange_strong(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
for(;;) {
value_type tmp = expected;
if (compare_exchange_weak(tmp, desired, success_order, failure_order))
return true;
if (tmp != expected) {
expected = tmp;
return false;
}
}
}
value_type
fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original + v, order, memory_order_relaxed));
return original;
}
value_type
fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original - v, order, memory_order_relaxed));
return original;
}
bool
is_lock_free(void) const volatile
{
return true;
}
BOOST_ATOMIC_DECLARE_POINTER_OPERATORS
private:
base_atomic(const base_atomic &) /* = delete */ ;
void operator=(const base_atomic &) /* = delete */ ;
value_type v_;
};
/* generic types */
template<typename T, bool Sign>
class base_atomic<T, void, 1, Sign> {
typedef base_atomic this_type;
typedef T value_type;
typedef uint32_t storage_type;
public:
explicit base_atomic(value_type const& v) : v_(0)
{
memcpy(&v_, &v, sizeof(value_type));
}
base_atomic(void) {}
void
store(value_type const& v, memory_order order = memory_order_seq_cst) volatile
{
storage_type tmp = 0;
memcpy(&tmp, &v, sizeof(value_type));
platform_fence_before_store(order);
const_cast<volatile storage_type &>(v_) = tmp;
platform_fence_after_store(order);
}
value_type
load(memory_order order = memory_order_seq_cst) const volatile
{
storage_type tmp = const_cast<const volatile storage_type &>(v_);
platform_fence_after_load(order);
value_type v;
memcpy(&v, &tmp, sizeof(value_type));
return v;
}
value_type
exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
return original;
}
bool
compare_exchange_weak(
value_type & expected,
value_type const& desired,
memory_order success_order,
memory_order failure_order) volatile
{
storage_type expected_s = 0, desired_s = 0;
memcpy(&expected_s, &expected, sizeof(value_type));
memcpy(&desired_s, &desired, sizeof(value_type));
platform_fence_before(success_order);
bool success = platform_cmpxchg32(expected_s, desired_s, &v_);
if (success) {
platform_fence_after(success_order);
} else {
platform_fence_after(failure_order);
memcpy(&expected, &expected_s, sizeof(value_type));
}
return success;
}
bool
compare_exchange_strong(
value_type & expected,
value_type const& desired,
memory_order success_order,
memory_order failure_order) volatile
{
for(;;) {
value_type tmp = expected;
if (compare_exchange_weak(tmp, desired, success_order, failure_order))
return true;
if (tmp != expected) {
expected = tmp;
return false;
}
}
}
bool
is_lock_free(void) const volatile
{
return true;
}
BOOST_ATOMIC_DECLARE_BASE_OPERATORS
private:
base_atomic(const base_atomic &) /* = delete */ ;
void operator=(const base_atomic &) /* = delete */ ;
storage_type v_;
};
template<typename T, bool Sign>
class base_atomic<T, void, 2, Sign> {
typedef base_atomic this_type;
typedef T value_type;
typedef uint32_t storage_type;
public:
explicit base_atomic(value_type const& v) : v_(0)
{
memcpy(&v_, &v, sizeof(value_type));
}
base_atomic(void) {}
void
store(value_type const& v, memory_order order = memory_order_seq_cst) volatile
{
storage_type tmp = 0;
memcpy(&tmp, &v, sizeof(value_type));
platform_fence_before_store(order);
const_cast<volatile storage_type &>(v_) = tmp;
platform_fence_after_store(order);
}
value_type
load(memory_order order = memory_order_seq_cst) const volatile
{
storage_type tmp = const_cast<const volatile storage_type &>(v_);
platform_fence_after_load(order);
value_type v;
memcpy(&v, &tmp, sizeof(value_type));
return v;
}
value_type
exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
return original;
}
bool
compare_exchange_weak(
value_type & expected,
value_type const& desired,
memory_order success_order,
memory_order failure_order) volatile
{
storage_type expected_s = 0, desired_s = 0;
memcpy(&expected_s, &expected, sizeof(value_type));
memcpy(&desired_s, &desired, sizeof(value_type));
platform_fence_before(success_order);
bool success = platform_cmpxchg32(expected_s, desired_s, &v_);
if (success) {
platform_fence_after(success_order);
} else {
platform_fence_after(failure_order);
memcpy(&expected, &expected_s, sizeof(value_type));
}
return success;
}
bool
compare_exchange_strong(
value_type & expected,
value_type const& desired,
memory_order success_order,
memory_order failure_order) volatile
{
for(;;) {
value_type tmp = expected;
if (compare_exchange_weak(tmp, desired, success_order, failure_order))
return true;
if (tmp != expected) {
expected = tmp;
return false;
}
}
}
bool
is_lock_free(void) const volatile
{
return true;
}
BOOST_ATOMIC_DECLARE_BASE_OPERATORS
private:
base_atomic(const base_atomic &) /* = delete */ ;
void operator=(const base_atomic &) /* = delete */ ;
storage_type v_;
};
template<typename T, bool Sign>
class base_atomic<T, void, 4, Sign> {
typedef base_atomic this_type;
typedef T value_type;
typedef uint32_t storage_type;
public:
explicit base_atomic(value_type const& v) : v_(0)
{
memcpy(&v_, &v, sizeof(value_type));
}
base_atomic(void) {}
void
store(value_type const& v, memory_order order = memory_order_seq_cst) volatile
{
storage_type tmp = 0;
memcpy(&tmp, &v, sizeof(value_type));
platform_fence_before_store(order);
const_cast<volatile storage_type &>(v_) = tmp;
platform_fence_after_store(order);
}
value_type
load(memory_order order = memory_order_seq_cst) const volatile
{
storage_type tmp = const_cast<const volatile storage_type &>(v_);
platform_fence_after_load(order);
value_type v;
memcpy(&v, &tmp, sizeof(value_type));
return v;
}
value_type
exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
return original;
}
bool
compare_exchange_weak(
value_type & expected,
value_type const& desired,
memory_order success_order,
memory_order failure_order) volatile
{
storage_type expected_s = 0, desired_s = 0;
memcpy(&expected_s, &expected, sizeof(value_type));
memcpy(&desired_s, &desired, sizeof(value_type));
platform_fence_before(success_order);
bool success = platform_cmpxchg32(expected_s, desired_s, &v_);
if (success) {
platform_fence_after(success_order);
} else {
platform_fence_after(failure_order);
memcpy(&expected, &expected_s, sizeof(value_type));
}
return success;
}
bool
compare_exchange_strong(
value_type & expected,
value_type const& desired,
memory_order success_order,
memory_order failure_order) volatile
{
for(;;) {
value_type tmp = expected;
if (compare_exchange_weak(tmp, desired, success_order, failure_order))
return true;
if (tmp != expected) {
expected = tmp;
return false;
}
}
}
bool
is_lock_free(void) const volatile
{
return true;
}
BOOST_ATOMIC_DECLARE_BASE_OPERATORS
private:
base_atomic(const base_atomic &) /* = delete */ ;
void operator=(const base_atomic &) /* = delete */ ;
storage_type v_;
};
}
}
}
#endif

View File

@@ -0,0 +1,438 @@
#ifndef BOOST_ATOMIC_DETAIL_CAS64STRONG_HPP
#define BOOST_ATOMIC_DETAIL_CAS64STRONG_HPP
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// Copyright (c) 2011 Helge Bahmann
// Build 64-bit atomic operation from platform_cmpxchg64_strong
// primitive. It is assumed that 64-bit loads/stores are not
// atomic, so they are funnelled through cmpxchg as well.
#include <cstddef>
#include <boost/cstdint.hpp>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/base.hpp>
#ifdef BOOST_ATOMIC_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
/* integral types */
template<typename T, bool Sign>
class base_atomic<T, int, 8, Sign> {
typedef base_atomic this_type;
typedef T value_type;
typedef T difference_type;
public:
explicit base_atomic(value_type v) : v_(v) {}
base_atomic(void) {}
void
store(value_type v, memory_order order = memory_order_seq_cst) volatile
{
platform_fence_before_store(order);
platform_store64(v, &v_);
platform_fence_after_store(order);
}
value_type
load(memory_order order = memory_order_seq_cst) const volatile
{
value_type v = platform_load64(&v_);
platform_fence_after_load(order);
return v;
}
value_type
exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
return original;
}
bool
compare_exchange_weak(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
return compare_exchange_strong(expected, desired, success_order, failure_order);
}
bool
compare_exchange_strong(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
platform_fence_before(success_order);
bool success = platform_cmpxchg64_strong(expected, desired, &v_);
if (success) {
platform_fence_after(success_order);
} else {
platform_fence_after(failure_order);
}
return success;
}
value_type
fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original + v, order, memory_order_relaxed));
return original;
}
value_type
fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original - v, order, memory_order_relaxed));
return original;
}
value_type
fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original & v, order, memory_order_relaxed));
return original;
}
value_type
fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original | v, order, memory_order_relaxed));
return original;
}
value_type
fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original ^ v, order, memory_order_relaxed));
return original;
}
bool
is_lock_free(void) const volatile
{
return true;
}
BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS
private:
base_atomic(const base_atomic &) /* = delete */ ;
void operator=(const base_atomic &) /* = delete */ ;
value_type v_;
};
/* pointer types */
template<bool Sign>
class base_atomic<void *, void *, 8, Sign> {
typedef base_atomic this_type;
typedef void * value_type;
typedef ptrdiff_t difference_type;
public:
explicit base_atomic(value_type v) : v_(v) {}
base_atomic(void) {}
void
store(value_type v, memory_order order = memory_order_seq_cst) volatile
{
platform_fence_before_store(order);
platform_store64(v, &v_);
platform_fence_after_store(order);
}
value_type
load(memory_order order = memory_order_seq_cst) const volatile
{
value_type v = platform_load64(&v_);
platform_fence_after_load(order);
return v;
}
value_type
exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
return original;
}
bool
compare_exchange_weak(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
return compare_exchange_strong(expected, desired, success_order, failure_order);
}
bool
compare_exchange_strong(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
platform_fence_before(success_order);
bool success = platform_cmpxchg64_strong(expected, desired, &v_);
if (success) {
platform_fence_after(success_order);
} else {
platform_fence_after(failure_order);
}
return success;
}
value_type
fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original + v, order, memory_order_relaxed));
return original;
}
value_type
fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original - v, order, memory_order_relaxed));
return original;
}
bool
is_lock_free(void) const volatile
{
return true;
}
BOOST_ATOMIC_DECLARE_BASE_OPERATORS
private:
base_atomic(const base_atomic &) /* = delete */ ;
void operator=(const base_atomic &) /* = delete */ ;
value_type v_;
};
template<typename T, bool Sign>
class base_atomic<T *, void *, 8, Sign> {
typedef base_atomic this_type;
typedef T * value_type;
typedef ptrdiff_t difference_type;
public:
explicit base_atomic(value_type v) : v_(v) {}
base_atomic(void) {}
void
store(value_type v, memory_order order = memory_order_seq_cst) volatile
{
platform_fence_before_store(order);
platform_store64(v, &v_);
platform_fence_after_store(order);
}
value_type
load(memory_order order = memory_order_seq_cst) const volatile
{
value_type v = platform_load64(&v_);
platform_fence_after_load(order);
return v;
}
value_type
exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
return original;
}
bool
compare_exchange_weak(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
return compare_exchange_strong(expected, desired, success_order, failure_order);
}
bool
compare_exchange_strong(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
platform_fence_before(success_order);
bool success = platform_cmpxchg64_strong(expected, desired, &v_);
if (success) {
platform_fence_after(success_order);
} else {
platform_fence_after(failure_order);
}
return success;
}
value_type
fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original + v, order, memory_order_relaxed));
return original;
}
value_type
fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, original - v, order, memory_order_relaxed));
return original;
}
bool
is_lock_free(void) const volatile
{
return true;
}
BOOST_ATOMIC_DECLARE_POINTER_OPERATORS
private:
base_atomic(const base_atomic &) /* = delete */ ;
void operator=(const base_atomic &) /* = delete */ ;
value_type v_;
};
/* generic types */
template<typename T, bool Sign>
class base_atomic<T, void, 8, Sign> {
typedef base_atomic this_type;
typedef T value_type;
typedef uint64_t storage_type;
public:
explicit base_atomic(value_type const& v) : v_(0)
{
memcpy(&v_, &v, sizeof(value_type));
}
base_atomic(void) {}
void
store(value_type const& value, memory_order order = memory_order_seq_cst) volatile
{
storage_type value_s = 0;
memcpy(&value_s, &value, sizeof(value_s));
platform_fence_before_store(order);
platform_store64(value_s, &v_);
platform_fence_after_store(order);
}
value_type
load(memory_order order = memory_order_seq_cst) const volatile
{
storage_type value_s = platform_load64(&v_);
platform_fence_after_load(order);
value_type value;
memcpy(&value, &value_s, sizeof(value_s));
return value;
}
value_type
exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile
{
value_type original = load(memory_order_relaxed);
do {
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
return original;
}
bool
compare_exchange_weak(
value_type & expected,
value_type const& desired,
memory_order success_order,
memory_order failure_order) volatile
{
return compare_exchange_strong(expected, desired, success_order, failure_order);
}
bool
compare_exchange_strong(
value_type & expected,
value_type const& desired,
memory_order success_order,
memory_order failure_order) volatile
{
storage_type expected_s = 0, desired_s = 0;
memcpy(&expected_s, &expected, sizeof(value_type));
memcpy(&desired_s, &desired, sizeof(value_type));
platform_fence_before(success_order);
bool success = platform_cmpxchg64_strong(expected_s, desired_s, &v_);
if (success) {
platform_fence_after(success_order);
} else {
platform_fence_after(failure_order);
memcpy(&expected, &expected_s, sizeof(value_type));
}
return success;
}
bool
is_lock_free(void) const volatile
{
return true;
}
BOOST_ATOMIC_DECLARE_BASE_OPERATORS
private:
base_atomic(const base_atomic &) /* = delete */ ;
void operator=(const base_atomic &) /* = delete */ ;
storage_type v_;
};
}
}
}
#endif

View File

@@ -0,0 +1,54 @@
#ifndef BOOST_ATOMIC_DETAIL_CONFIG_HPP
#define BOOST_ATOMIC_DETAIL_CONFIG_HPP
// Copyright (c) 2012 Hartmut Kaiser
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include <boost/config.hpp>
#if (defined(_MSC_VER) && (_MSC_VER >= 1020)) || defined(__GNUC__) || defined(BOOST_CLANG) || defined(BOOST_INTEL) || defined(__COMO__) || defined(__DMC__)
#define BOOST_ATOMIC_HAS_PRAGMA_ONCE
#endif
#ifdef BOOST_ATOMIC_HAS_PRAGMA_ONCE
#pragma once
#endif
///////////////////////////////////////////////////////////////////////////////
// Set up dll import/export options
#if (defined(BOOST_ATOMIC_DYN_LINK) || defined(BOOST_ALL_DYN_LINK)) && \
!defined(BOOST_ATOMIC_STATIC_LINK)
#if defined(BOOST_ATOMIC_SOURCE)
#define BOOST_ATOMIC_DECL BOOST_SYMBOL_EXPORT
#define BOOST_ATOMIC_BUILD_DLL
#else
#define BOOST_ATOMIC_DECL BOOST_SYMBOL_IMPORT
#endif
#endif // building a shared library
#ifndef BOOST_ATOMIC_DECL
#define BOOST_ATOMIC_DECL
#endif
///////////////////////////////////////////////////////////////////////////////
// Auto library naming
#if !defined(BOOST_ATOMIC_SOURCE) && !defined(BOOST_ALL_NO_LIB) && \
!defined(BOOST_ATOMIC_NO_LIB)
#define BOOST_LIB_NAME boost_atomic
// tell the auto-link code to select a dll when required:
#if defined(BOOST_ALL_DYN_LINK) || defined(BOOST_ATOMIC_DYN_LINK)
#define BOOST_DYN_LINK
#endif
#include <boost/config/auto_link.hpp>
#endif // auto-linking disabled
#endif

View File

@@ -0,0 +1,359 @@
#ifndef BOOST_ATOMIC_DETAIL_GCC_ALPHA_HPP
#define BOOST_ATOMIC_DETAIL_GCC_ALPHA_HPP
// Copyright (c) 2009 Helge Bahmann
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/base.hpp>
#include <boost/atomic/detail/builder.hpp>
#ifdef BOOST_ATOMIC_HAS_PRAGMA_ONCE
#pragma once
#endif
/*
Refer to http://h71000.www7.hp.com/doc/82final/5601/5601pro_004.html
(HP OpenVMS systems documentation) and the alpha reference manual.
*/
/*
NB: The most natural thing would be to write the increment/decrement
operators along the following lines:
__asm__ __volatile__(
"1: ldl_l %0,%1 \n"
"addl %0,1,%0 \n"
"stl_c %0,%1 \n"
"beq %0,1b\n"
: "=&b" (tmp)
: "m" (value)
: "cc"
);
However according to the comments on the HP website and matching
comments in the Linux kernel sources this defies branch prediction,
as the cpu assumes that backward branches are always taken; so
instead copy the trick from the Linux kernel, introduce a forward
branch and back again.
I have, however, had a hard time measuring the difference between
the two versions in microbenchmarks -- I am leaving it in nevertheless
as it apparently does not hurt either.
*/
namespace boost {
namespace atomics {
namespace detail {
inline void fence_before(memory_order order)
{
switch(order) {
case memory_order_consume:
case memory_order_release:
case memory_order_acq_rel:
case memory_order_seq_cst:
__asm__ __volatile__ ("mb" ::: "memory");
default:;
}
}
inline void fence_after(memory_order order)
{
switch(order) {
case memory_order_acquire:
case memory_order_acq_rel:
case memory_order_seq_cst:
__asm__ __volatile__ ("mb" ::: "memory");
default:;
}
}
template<>
inline void platform_atomic_thread_fence(memory_order order)
{
switch(order) {
case memory_order_acquire:
case memory_order_consume:
case memory_order_release:
case memory_order_acq_rel:
case memory_order_seq_cst:
__asm__ __volatile__ ("mb" ::: "memory");
default:;
}
}
template<typename T>
class atomic_alpha_32 {
public:
typedef T integral_type;
explicit atomic_alpha_32(T v) : i(v) {}
atomic_alpha_32() {}
T load(memory_order order=memory_order_seq_cst) const volatile
{
T v=*reinterpret_cast<volatile const int *>(&i);
fence_after(order);
return v;
}
void store(T v, memory_order order=memory_order_seq_cst) volatile
{
fence_before(order);
*reinterpret_cast<volatile int *>(&i)=(int)v;
}
bool compare_exchange_weak(
T &expected,
T desired,
memory_order success_order,
memory_order failure_order) volatile
{
fence_before(success_order);
int current, success;
__asm__ __volatile__(
"1: ldl_l %2, %4\n"
"cmpeq %2, %0, %3\n"
"mov %2, %0\n"
"beq %3, 3f\n"
"stl_c %1, %4\n"
"2:\n"
".subsection 2\n"
"3: mov %3, %1\n"
"br 2b\n"
".previous\n"
: "+&r" (expected), "+&r" (desired), "=&r"(current), "=&r"(success)
: "m" (i)
:
);
if (desired) fence_after(success_order);
else fence_after(failure_order);
return desired;
}
bool is_lock_free(void) const volatile {return true;}
protected:
inline T fetch_add_var(T c, memory_order order) volatile
{
fence_before(order);
T original, modified;
__asm__ __volatile__(
"1: ldl_l %0, %2\n"
"addl %0, %3, %1\n"
"stl_c %1, %2\n"
"beq %1, 2f\n"
".subsection 2\n"
"2: br 1b\n"
".previous\n"
: "=&r" (original), "=&r" (modified)
: "m" (i), "r" (c)
:
);
fence_after(order);
return original;
}
inline T fetch_inc(memory_order order) volatile
{
fence_before(order);
int original, modified;
__asm__ __volatile__(
"1: ldl_l %0, %2\n"
"addl %0, 1, %1\n"
"stl_c %1, %2\n"
"beq %1, 2f\n"
".subsection 2\n"
"2: br 1b\n"
".previous\n"
: "=&r" (original), "=&r" (modified)
: "m" (i)
:
);
fence_after(order);
return original;
}
inline T fetch_dec(memory_order order) volatile
{
fence_before(order);
int original, modified;
__asm__ __volatile__(
"1: ldl_l %0, %2\n"
"subl %0, 1, %1\n"
"stl_c %1, %2\n"
"beq %1, 2f\n"
".subsection 2\n"
"2: br 1b\n"
".previous\n"
: "=&r" (original), "=&r" (modified)
: "m" (i)
:
);
fence_after(order);
return original;
}
private:
T i;
};
template<typename T>
class atomic_alpha_64 {
public:
typedef T integral_type;
explicit atomic_alpha_64(T v) : i(v) {}
atomic_alpha_64() {}
T load(memory_order order=memory_order_seq_cst) const volatile
{
T v=*reinterpret_cast<volatile const T *>(&i);
fence_after(order);
return v;
}
void store(T v, memory_order order=memory_order_seq_cst) volatile
{
fence_before(order);
*reinterpret_cast<volatile T *>(&i)=v;
}
bool compare_exchange_weak(
T &expected,
T desired,
memory_order success_order,
memory_order failure_order) volatile
{
fence_before(success_order);
int current, success;
__asm__ __volatile__(
"1: ldq_l %2, %4\n"
"cmpeq %2, %0, %3\n"
"mov %2, %0\n"
"beq %3, 3f\n"
"stq_c %1, %4\n"
"2:\n"
".subsection 2\n"
"3: mov %3, %1\n"
"br 2b\n"
".previous\n"
: "+&r" (expected), "+&r" (desired), "=&r"(current), "=&r"(success)
: "m" (i)
:
);
if (desired) fence_after(success_order);
else fence_after(failure_order);
return desired;
}
bool is_lock_free(void) const volatile {return true;}
protected:
inline T fetch_add_var(T c, memory_order order) volatile
{
fence_before(order);
T original, modified;
__asm__ __volatile__(
"1: ldq_l %0, %2\n"
"addq %0, %3, %1\n"
"stq_c %1, %2\n"
"beq %1, 2f\n"
".subsection 2\n"
"2: br 1b\n"
".previous\n"
: "=&r" (original), "=&r" (modified)
: "m" (i), "r" (c)
:
);
fence_after(order);
return original;
}
inline T fetch_inc(memory_order order) volatile
{
fence_before(order);
T original, modified;
__asm__ __volatile__(
"1: ldq_l %0, %2\n"
"addq %0, 1, %1\n"
"stq_c %1, %2\n"
"beq %1, 2f\n"
".subsection 2\n"
"2: br 1b\n"
".previous\n"
: "=&r" (original), "=&r" (modified)
: "m" (i)
:
);
fence_after(order);
return original;
}
inline T fetch_dec(memory_order order) volatile
{
fence_before(order);
T original, modified;
__asm__ __volatile__(
"1: ldq_l %0, %2\n"
"subq %0, 1, %1\n"
"stq_c %1, %2\n"
"beq %1, 2f\n"
".subsection 2\n"
"2: br 1b\n"
".previous\n"
: "=&r" (original), "=&r" (modified)
: "m" (i)
:
);
fence_after(order);
return original;
}
private:
T i;
};
template<typename T>
class platform_atomic_integral<T, 4> : public build_atomic_from_typical<build_exchange<atomic_alpha_32<T> > > {
public:
typedef build_atomic_from_typical<build_exchange<atomic_alpha_32<T> > > super;
explicit platform_atomic_integral(T v) : super(v) {}
platform_atomic_integral(void) {}
};
template<typename T>
class platform_atomic_integral<T, 8> : public build_atomic_from_typical<build_exchange<atomic_alpha_64<T> > > {
public:
typedef build_atomic_from_typical<build_exchange<atomic_alpha_64<T> > > super;
explicit platform_atomic_integral(T v) : super(v) {}
platform_atomic_integral(void) {}
};
template<typename T>
class platform_atomic_integral<T, 1>: public build_atomic_from_larger_type<atomic_alpha_32<uint32_t>, T> {
public:
typedef build_atomic_from_larger_type<atomic_alpha_32<uint32_t>, T> super;
explicit platform_atomic_integral(T v) : super(v) {}
platform_atomic_integral(void) {}
};
template<typename T>
class platform_atomic_integral<T, 2>: public build_atomic_from_larger_type<atomic_alpha_32<uint32_t>, T> {
public:
typedef build_atomic_from_larger_type<atomic_alpha_32<uint32_t>, T> super;
explicit platform_atomic_integral(T v) : super(v) {}
platform_atomic_integral(void) {}
};
}
}
}
#endif

View File

@@ -0,0 +1,250 @@
#ifndef BOOST_ATOMIC_DETAIL_GCC_ARMV6PLUS_HPP
#define BOOST_ATOMIC_DETAIL_GCC_ARMV6PLUS_HPP
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// Copyright (c) 2009 Helge Bahmann
// Copyright (c) 2009 Phil Endecott
// ARM Code by Phil Endecott, based on other architectures.
#include <cstddef>
#include <boost/cstdint.hpp>
#include <boost/atomic/detail/config.hpp>
#ifdef BOOST_ATOMIC_HAS_PRAGMA_ONCE
#pragma once
#endif
// From the ARM Architecture Reference Manual for architecture v6:
//
// LDREX{<cond>} <Rd>, [<Rn>]
// <Rd> Specifies the destination register for the memory word addressed by <Rd>
// <Rn> Specifies the register containing the address.
//
// STREX{<cond>} <Rd>, <Rm>, [<Rn>]
// <Rd> Specifies the destination register for the returned status value.
// 0 if the operation updates memory
// 1 if the operation fails to update memory
// <Rm> Specifies the register containing the word to be stored to memory.
// <Rn> Specifies the register containing the address.
// Rd must not be the same register as Rm or Rn.
//
// ARM v7 is like ARM v6 plus:
// There are half-word and byte versions of the LDREX and STREX instructions,
// LDREXH, LDREXB, STREXH and STREXB.
// There are also double-word versions, LDREXD and STREXD.
// (Actually it looks like these are available from version 6k onwards.)
// FIXME these are not yet used; should be mostly a matter of copy-and-paste.
// I think you can supply an immediate offset to the address.
//
// A memory barrier is effected using a "co-processor 15" instruction,
// though a separate assembler mnemonic is available for it in v7.
namespace boost {
namespace atomics {
namespace detail {
// "Thumb 1" is a subset of the ARM instruction set that uses a 16-bit encoding. It
// doesn't include all instructions and in particular it doesn't include the co-processor
// instruction used for the memory barrier or the load-locked/store-conditional
// instructions. So, if we're compiling in "Thumb 1" mode, we need to wrap all of our
// asm blocks with code to temporarily change to ARM mode.
//
// You can only change between ARM and Thumb modes when branching using the bx instruction.
// bx takes an address specified in a register. The least significant bit of the address
// indicates the mode, so 1 is added to indicate that the destination code is Thumb.
// A temporary register is needed for the address and is passed as an argument to these
// macros. It must be one of the "low" registers accessible to Thumb code, specified
// usng the "l" attribute in the asm statement.
//
// Architecture v7 introduces "Thumb 2", which does include (almost?) all of the ARM
// instruction set. So in v7 we don't need to change to ARM mode; we can write "universal
// assembler" which will assemble to Thumb 2 or ARM code as appropriate. The only thing
// we need to do to make this "universal" assembler mode work is to insert "IT" instructions
// to annotate the conditional instructions. These are ignored in other modes (e.g. v6),
// so they can always be present.
#if defined(__thumb__) && !defined(__ARM_ARCH_7A__)
// FIXME also other v7 variants.
#define BOOST_ATOMIC_ARM_ASM_START(TMPREG) "adr " #TMPREG ", 1f\n" "bx " #TMPREG "\n" ".arm\n" ".align 4\n" "1: "
#define BOOST_ATOMIC_ARM_ASM_END(TMPREG) "adr " #TMPREG ", 1f + 1\n" "bx " #TMPREG "\n" ".thumb\n" ".align 2\n" "1: "
#else
// The tmpreg is wasted in this case, which is non-optimal.
#define BOOST_ATOMIC_ARM_ASM_START(TMPREG)
#define BOOST_ATOMIC_ARM_ASM_END(TMPREG)
#endif
#if defined(__ARM_ARCH_7A__)
// FIXME ditto.
#define BOOST_ATOMIC_ARM_DMB "dmb\n"
#else
#define BOOST_ATOMIC_ARM_DMB "mcr\tp15, 0, r0, c7, c10, 5\n"
#endif
inline void
arm_barrier(void)
{
int brtmp;
__asm__ __volatile__ (
BOOST_ATOMIC_ARM_ASM_START(%0)
BOOST_ATOMIC_ARM_DMB
BOOST_ATOMIC_ARM_ASM_END(%0)
: "=&l" (brtmp) :: "memory"
);
}
inline void
platform_fence_before(memory_order order)
{
switch(order) {
case memory_order_release:
case memory_order_acq_rel:
case memory_order_seq_cst:
arm_barrier();
case memory_order_consume:
default:;
}
}
inline void
platform_fence_after(memory_order order)
{
switch(order) {
case memory_order_acquire:
case memory_order_acq_rel:
case memory_order_seq_cst:
arm_barrier();
default:;
}
}
inline void
platform_fence_before_store(memory_order order)
{
platform_fence_before(order);
}
inline void
platform_fence_after_store(memory_order order)
{
if (order == memory_order_seq_cst)
arm_barrier();
}
inline void
platform_fence_after_load(memory_order order)
{
platform_fence_after(order);
}
template<typename T>
inline bool
platform_cmpxchg32(T & expected, T desired, volatile T * ptr)
{
int success;
int tmp;
__asm__ (
BOOST_ATOMIC_ARM_ASM_START(%2)
"mov %1, #0\n" // success = 0
"ldrex %0, %3\n" // expected' = *(&i)
"teq %0, %4\n" // flags = expected'==expected
"ittt eq\n"
"strexeq %2, %5, %3\n" // if (flags.equal) *(&i) = desired, tmp = !OK
"teqeq %2, #0\n" // if (flags.equal) flags = tmp==0
"moveq %1, #1\n" // if (flags.equal) success = 1
BOOST_ATOMIC_ARM_ASM_END(%2)
: "=&r" (expected), // %0
"=&r" (success), // %1
"=&l" (tmp), // %2
"+Q" (*ptr) // %3
: "r" (expected), // %4
"r" (desired) // %5
: "cc"
);
return success;
}
}
}
#define BOOST_ATOMIC_THREAD_FENCE 2
inline void
atomic_thread_fence(memory_order order)
{
switch(order) {
case memory_order_acquire:
case memory_order_release:
case memory_order_acq_rel:
case memory_order_seq_cst:
atomics::detail::arm_barrier();
default:;
}
}
#define BOOST_ATOMIC_SIGNAL_FENCE 2
inline void
atomic_signal_fence(memory_order)
{
__asm__ __volatile__ ("" ::: "memory");
}
class atomic_flag {
private:
atomic_flag(const atomic_flag &) /* = delete */ ;
atomic_flag & operator=(const atomic_flag &) /* = delete */ ;
uint32_t v_;
public:
atomic_flag(void) : v_(false) {}
void
clear(memory_order order = memory_order_seq_cst) volatile
{
atomics::detail::platform_fence_before_store(order);
const_cast<volatile uint32_t &>(v_) = 0;
atomics::detail::platform_fence_after_store(order);
}
bool
test_and_set(memory_order order = memory_order_seq_cst) volatile
{
atomics::detail::platform_fence_before(order);
uint32_t expected = v_;
do {
if (expected == 1)
break;
} while (!atomics::detail::platform_cmpxchg32(expected, (uint32_t)1, &v_));
atomics::detail::platform_fence_after(order);
return expected;
}
};
#define BOOST_ATOMIC_FLAG_LOCK_FREE 2
}
#undef BOOST_ATOMIC_ARM_ASM_START
#undef BOOST_ATOMIC_ARM_ASM_END
#include <boost/atomic/detail/base.hpp>
#if !defined(BOOST_ATOMIC_FORCE_FALLBACK)
#define BOOST_ATOMIC_CHAR_LOCK_FREE 2
#define BOOST_ATOMIC_CHAR16_T_LOCK_FREE 2
#define BOOST_ATOMIC_CHAR32_T_LOCK_FREE 2
#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 2
#define BOOST_ATOMIC_SHORT_LOCK_FREE 2
#define BOOST_ATOMIC_INT_LOCK_FREE 2
#define BOOST_ATOMIC_LONG_LOCK_FREE 2
#define BOOST_ATOMIC_LLONG_LOCK_FREE 0
#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
#define BOOST_ATOMIC_BOOL_LOCK_FREE 2
#include <boost/atomic/detail/cas32weak.hpp>
#endif /* !defined(BOOST_ATOMIC_FORCE_FALLBACK) */
#endif

View File

@@ -0,0 +1,155 @@
// Copyright (c) 2011 Helge Bahmann
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Use the gnu builtin __sync_val_compare_and_swap to build
// atomic operations for 32 bit and smaller.
#ifndef BOOST_ATOMIC_DETAIL_GENERIC_CAS_HPP
#define BOOST_ATOMIC_DETAIL_GENERIC_CAS_HPP
#include <cstddef>
#include <boost/cstdint.hpp>
#include <boost/atomic/detail/config.hpp>
#ifdef BOOST_ATOMIC_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
#define BOOST_ATOMIC_THREAD_FENCE 2
inline void
atomic_thread_fence(memory_order order)
{
switch(order) {
case memory_order_relaxed:
break;
case memory_order_release:
case memory_order_consume:
case memory_order_acquire:
case memory_order_acq_rel:
case memory_order_seq_cst:
__sync_synchronize();
break;
}
}
namespace atomics {
namespace detail {
inline void
platform_fence_before(memory_order)
{
/* empty, as compare_and_swap is synchronizing already */
}
inline void
platform_fence_after(memory_order)
{
/* empty, as compare_and_swap is synchronizing already */
}
inline void
platform_fence_before_store(memory_order order)
{
switch(order) {
case memory_order_relaxed:
case memory_order_acquire:
case memory_order_consume:
break;
case memory_order_release:
case memory_order_acq_rel:
case memory_order_seq_cst:
__sync_synchronize();
break;
}
}
inline void
platform_fence_after_store(memory_order order)
{
if (order == memory_order_seq_cst)
__sync_synchronize();
}
inline void
platform_fence_after_load(memory_order order)
{
switch(order) {
case memory_order_relaxed:
case memory_order_release:
break;
case memory_order_consume:
case memory_order_acquire:
case memory_order_acq_rel:
case memory_order_seq_cst:
__sync_synchronize();
break;
}
}
template<typename T>
inline bool
platform_cmpxchg32_strong(T & expected, T desired, volatile T * ptr)
{
T found = __sync_val_compare_and_swap(ptr, expected, desired);
bool success = (found == expected);
expected = found;
return success;
}
class atomic_flag {
private:
atomic_flag(const atomic_flag &) /* = delete */ ;
atomic_flag & operator=(const atomic_flag &) /* = delete */ ;
uint32_t v_;
public:
atomic_flag(void) : v_(false) {}
void
clear(memory_order order = memory_order_seq_cst) volatile
{
atomics::detail::platform_fence_before_store(order);
const_cast<volatile uint32_t &>(v_) = 0;
atomics::detail::platform_fence_after_store(order);
}
bool
test_and_set(memory_order order = memory_order_seq_cst) volatile
{
atomics::detail::platform_fence_before(order);
uint32_t expected = v_;
do {
if (expected == 1)
break;
} while (!atomics::detail::platform_cmpxchg32(expected, (uint32_t)1, &v_));
atomics::detail::platform_fence_after(order);
return expected;
}
};
#define BOOST_ATOMIC_FLAG_LOCK_FREE 2
}
}
}
#include <boost/atomic/detail/base.hpp>
#if !defined(BOOST_ATOMIC_FORCE_FALLBACK)
#define BOOST_ATOMIC_CHAR_LOCK_FREE 2
#define BOOST_ATOMIC_SHORT_LOCK_FREE 2
#define BOOST_ATOMIC_INT_LOCK_FREE 2
#define BOOST_ATOMIC_LONG_LOCK_FREE (sizeof(long) <= 4 ? 2 : 0)
#define BOOST_ATOMIC_LLONG_LOCK_FREE (sizeof(long long) <= 4 ? 2 : 0)
#define BOOST_ATOMIC_POINTER_LOCK_FREE (sizeof(void *) <= 4 ? 2 : 0)
#define BOOST_ATOMIC_BOOL_LOCK_FREE 2
#include <boost/atomic/detail/cas32strong.hpp>
#endif /* !defined(BOOST_ATOMIC_FORCE_FALLBACK) */
#endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,199 @@
#ifndef BOOST_ATOMIC_DETAIL_GENERIC_CAS_HPP
#define BOOST_ATOMIC_DETAIL_GENERIC_CAS_HPP
// Copyright (c) 2009 Helge Bahmann
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include <cstddef>
#include <boost/cstdint.hpp>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/base.hpp>
#include <boost/atomic/detail/builder.hpp>
#ifdef BOOST_ATOMIC_HAS_PRAGMA_ONCE
#pragma once
#endif
/* fallback implementation for various compilation targets;
this is *not* efficient, particularly because all operations
are fully fenced (full memory barriers before and after
each operation) */
#if defined(__GNUC__)
namespace boost { namespace atomics { namespace detail {
inline int32_t
fenced_compare_exchange_strong_32(volatile int32_t *ptr, int32_t expected, int32_t desired)
{
return __sync_val_compare_and_swap_4(ptr, expected, desired);
}
#define BOOST_ATOMIC_HAVE_CAS32 1
#if defined(__amd64__) || defined(__i686__)
inline int64_t
fenced_compare_exchange_strong_64(int64_t *ptr, int64_t expected, int64_t desired)
{
return __sync_val_compare_and_swap_8(ptr, expected, desired);
}
#define BOOST_ATOMIC_HAVE_CAS64 1
#endif
}}}
#elif defined(__ICL) || defined(_MSC_VER)
#if defined(_MSC_VER)
#include <Windows.h>
#include <intrin.h>
#endif
namespace boost { namespace atomics { namespace detail {
inline int32_t
fenced_compare_exchange_strong(int32_t *ptr, int32_t expected, int32_t desired)
{
return _InterlockedCompareExchange(reinterpret_cast<volatile long*>(ptr), desired, expected);
}
#define BOOST_ATOMIC_HAVE_CAS32 1
#if defined(_WIN64)
inline int64_t
fenced_compare_exchange_strong(int64_t *ptr, int64_t expected, int64_t desired)
{
return _InterlockedCompareExchange64(ptr, desired, expected);
}
#define BOOST_ATOMIC_HAVE_CAS64 1
#endif
}}}
#elif (defined(__ICC) || defined(__ECC))
namespace boost { namespace atomics { namespace detail {
inline int32_t
fenced_compare_exchange_strong_32(int32_t *ptr, int32_t expected, int32_t desired)
{
return _InterlockedCompareExchange((void*)ptr, desired, expected);
}
#define BOOST_ATOMIC_HAVE_CAS32 1
#if defined(__x86_64)
inline int64_t
fenced_compare_exchange_strong(int64_t *ptr, int64_t expected, int64_t desired)
{
return cas64<int>(ptr, expected, desired);
}
#define BOOST_ATOMIC_HAVE_CAS64 1
#elif defined(__ECC) //IA-64 version
inline int64_t
fenced_compare_exchange_strong(int64_t *ptr, int64_t expected, int64_t desired)
{
return _InterlockedCompareExchange64((void*)ptr, desired, expected);
}
#define BOOST_ATOMIC_HAVE_CAS64 1
#endif
}}}
#elif (defined(__SUNPRO_CC) && defined(__sparc))
#include <sys/atomic.h>
namespace boost { namespace atomics { namespace detail {
inline int32_t
fenced_compare_exchange_strong_32(int32_t *ptr, int32_t expected, int32_t desired)
{
return atomic_cas_32((volatile unsigned int*)ptr, expected, desired);
}
#define BOOST_ATOMIC_HAVE_CAS32 1
/* FIXME: check for 64 bit mode */
inline int64_t
fenced_compare_exchange_strong_64(int64_t *ptr, int64_t expected, int64_t desired)
{
return atomic_cas_64((volatile unsigned long long*)ptr, expected, desired);
}
#define BOOST_ATOMIC_HAVE_CAS64 1
}}}
#endif
namespace boost {
namespace atomics {
namespace detail {
#ifdef BOOST_ATOMIC_HAVE_CAS32
template<typename T>
class atomic_generic_cas32 {
private:
typedef atomic_generic_cas32 this_type;
public:
explicit atomic_generic_cas32(T v) : i((int32_t)v) {}
atomic_generic_cas32() {}
T load(memory_order order=memory_order_seq_cst) const volatile
{
T expected=(T)i;
do { } while(!const_cast<this_type *>(this)->compare_exchange_weak(expected, expected, order, memory_order_relaxed));
return expected;
}
void store(T v, memory_order order=memory_order_seq_cst) volatile
{
exchange(v);
}
bool compare_exchange_strong(
T &expected,
T desired,
memory_order success_order,
memory_order failure_order) volatile
{
T found;
found=(T)fenced_compare_exchange_strong_32(&i, (int32_t)expected, (int32_t)desired);
bool success=(found==expected);
expected=found;
return success;
}
bool compare_exchange_weak(
T &expected,
T desired,
memory_order success_order,
memory_order failure_order) volatile
{
return compare_exchange_strong(expected, desired, success_order, failure_order);
}
T exchange(T r, memory_order order=memory_order_seq_cst) volatile
{
T expected=(T)i;
do { } while(!compare_exchange_weak(expected, r, order, memory_order_relaxed));
return expected;
}
bool is_lock_free(void) const volatile {return true;}
typedef T integral_type;
private:
mutable int32_t i;
};
template<typename T>
class platform_atomic_integral<T, 4> : public build_atomic_from_exchange<atomic_generic_cas32<T> > {
public:
typedef build_atomic_from_exchange<atomic_generic_cas32<T> > super;
explicit platform_atomic_integral(T v) : super(v) {}
platform_atomic_integral(void) {}
};
template<typename T>
class platform_atomic_integral<T, 1>: public build_atomic_from_larger_type<atomic_generic_cas32<int32_t>, T> {
public:
typedef build_atomic_from_larger_type<atomic_generic_cas32<int32_t>, T> super;
explicit platform_atomic_integral(T v) : super(v) {}
platform_atomic_integral(void) {}
};
template<typename T>
class platform_atomic_integral<T, 2>: public build_atomic_from_larger_type<atomic_generic_cas32<int32_t>, T> {
public:
typedef build_atomic_from_larger_type<atomic_generic_cas32<int32_t>, T> super;
explicit platform_atomic_integral(T v) : super(v) {}
platform_atomic_integral(void) {}
};
#endif
} } }
#endif

View File

@@ -0,0 +1,206 @@
#ifndef BOOST_ATOMIC_DETAIL_INTERLOCKED_HPP
#define BOOST_ATOMIC_DETAIL_INTERLOCKED_HPP
// Copyright (c) 2009 Helge Bahmann
// Copyright (c) 2012 Andrey Semashev
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include <boost/atomic/detail/config.hpp>
#ifdef BOOST_ATOMIC_HAS_PRAGMA_ONCE
#pragma once
#endif
#if defined(_WIN32_WCE)
#include <boost/detail/interlocked.hpp>
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare) BOOST_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare)
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval) BOOST_INTERLOCKED_EXCHANGE(dest, newval)
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, addend) BOOST_INTERLOCKED_EXCHANGE_ADD(dest, addend)
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) BOOST_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare)
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) BOOST_INTERLOCKED_EXCHANGE_POINTER(dest, newval)
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_INTERLOCKED_EXCHANGE_ADD((long*)(dest), byte_offset))
#elif defined(_MSC_VER)
#include <intrin.h>
#pragma intrinsic(_InterlockedCompareExchange)
#pragma intrinsic(_InterlockedExchangeAdd)
#pragma intrinsic(_InterlockedExchange)
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare) _InterlockedCompareExchange((long*)(dest), (long)(exchange), (long)(compare))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, addend) _InterlockedExchangeAdd((long*)(dest), (long)(addend))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval) _InterlockedExchange((long*)(dest), (long)(newval))
#if _MSC_VER >= 1400
#pragma intrinsic(_InterlockedAnd)
#pragma intrinsic(_InterlockedOr)
#pragma intrinsic(_InterlockedXor)
#define BOOST_ATOMIC_INTERLOCKED_AND(dest, arg) _InterlockedAnd((long*)(dest), (long)(arg))
#define BOOST_ATOMIC_INTERLOCKED_OR(dest, arg) _InterlockedOr((long*)(dest), (long)(arg))
#define BOOST_ATOMIC_INTERLOCKED_XOR(dest, arg) _InterlockedXor((long*)(dest), (long)(arg))
#endif // _MSC_VER >= 1400
#if _MSC_VER >= 1600
// MSVC 2010 and later provide intrinsics for 8 and 16 bit integers.
// Note that for each bit count these macros must be either all defined or all not defined.
// Otherwise atomic<> operations will be implemented inconsistently.
#pragma intrinsic(_InterlockedCompareExchange8)
#pragma intrinsic(_InterlockedExchangeAdd8)
#pragma intrinsic(_InterlockedExchange8)
#pragma intrinsic(_InterlockedAnd8)
#pragma intrinsic(_InterlockedOr8)
#pragma intrinsic(_InterlockedXor8)
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8(dest, exchange, compare) _InterlockedCompareExchange8((char*)(dest), (char)(exchange), (char)(compare))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8(dest, addend) _InterlockedExchangeAdd8((char*)(dest), (char)(addend))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE8(dest, newval) _InterlockedExchange8((char*)(dest), (char)(newval))
#define BOOST_ATOMIC_INTERLOCKED_AND8(dest, arg) _InterlockedAnd8((char*)(dest), (char)(arg))
#define BOOST_ATOMIC_INTERLOCKED_OR8(dest, arg) _InterlockedOr8((char*)(dest), (char)(arg))
#define BOOST_ATOMIC_INTERLOCKED_XOR8(dest, arg) _InterlockedXor8((char*)(dest), (char)(arg))
#pragma intrinsic(_InterlockedCompareExchange16)
#pragma intrinsic(_InterlockedExchangeAdd16)
#pragma intrinsic(_InterlockedExchange16)
#pragma intrinsic(_InterlockedAnd16)
#pragma intrinsic(_InterlockedOr16)
#pragma intrinsic(_InterlockedXor16)
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16(dest, exchange, compare) _InterlockedCompareExchange16((short*)(dest), (short)(exchange), (short)(compare))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16(dest, addend) _InterlockedExchangeAdd16((short*)(dest), (short)(addend))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE16(dest, newval) _InterlockedExchange16((short*)(dest), (short)(newval))
#define BOOST_ATOMIC_INTERLOCKED_AND16(dest, arg) _InterlockedAnd16((short*)(dest), (short)(arg))
#define BOOST_ATOMIC_INTERLOCKED_OR16(dest, arg) _InterlockedOr16((short*)(dest), (short)(arg))
#define BOOST_ATOMIC_INTERLOCKED_XOR16(dest, arg) _InterlockedXor16((short*)(dest), (short)(arg))
#endif // _MSC_VER >= 1600
#if defined(_M_AMD64) || defined(_M_IA64)
#pragma intrinsic(_InterlockedCompareExchange64)
#pragma intrinsic(_InterlockedExchangeAdd64)
#pragma intrinsic(_InterlockedExchange64)
#pragma intrinsic(_InterlockedAnd64)
#pragma intrinsic(_InterlockedOr64)
#pragma intrinsic(_InterlockedXor64)
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(dest, exchange, compare) _InterlockedCompareExchange64((__int64*)(dest), (__int64)(exchange), (__int64)(compare))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(dest, addend) _InterlockedExchangeAdd64((__int64*)(dest), (__int64)(addend))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(dest, newval) _InterlockedExchange64((__int64*)(dest), (__int64)(newval))
#define BOOST_ATOMIC_INTERLOCKED_AND64(dest, arg) _InterlockedAnd64((__int64*)(dest), (__int64)(arg))
#define BOOST_ATOMIC_INTERLOCKED_OR64(dest, arg) _InterlockedOr64((__int64*)(dest), (__int64)(arg))
#define BOOST_ATOMIC_INTERLOCKED_XOR64(dest, arg) _InterlockedXor64((__int64*)(dest), (__int64)(arg))
#pragma intrinsic(_InterlockedCompareExchangePointer)
#pragma intrinsic(_InterlockedExchangePointer)
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) _InterlockedCompareExchangePointer((void**)(dest), (void*)(exchange), (void*)(compare))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) _InterlockedExchangePointer((void**)(dest), (void*)(newval))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64((long*)(dest), byte_offset))
#else // defined(_M_AMD64)
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) ((void*)_InterlockedCompareExchange((long*)(dest), (long)(exchange), (long)(compare)))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) ((void*)_InterlockedExchange((long*)(dest), (long)(newval)))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD((long*)(dest), byte_offset))
#endif // defined(_M_AMD64)
#else // defined(_MSC_VER)
#if defined(BOOST_USE_WINDOWS_H)
#include <windows.h>
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare) InterlockedCompareExchange((long*)(dest), (long)(exchange), (long)(compare))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval) InterlockedExchange((long*)(dest), (long)(newval))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, addend) InterlockedExchangeAdd((long*)(dest), (long)(addend))
#if defined(_WIN64)
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(dest, exchange, compare) InterlockedCompareExchange64((__int64*)(dest), (__int64)(exchange), (__int64)(compare))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(dest, newval) InterlockedExchange64((__int64*)(dest), (__int64)(newval))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(dest, addend) InterlockedExchangeAdd64((__int64*)(dest), (__int64)(addend))
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) InterlockedCompareExchangePointer((void**)(dest), (void*)(exchange), (void*)(compare))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) InterlockedExchangePointer((void**)(dest), (void*)(newval))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(dest, byte_offset))
#else // defined(_WIN64)
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) ((void*)BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, byte_offset))
#endif // defined(_WIN64)
#else // defined(BOOST_USE_WINDOWS_H)
#if defined(__MINGW64__)
#define BOOST_ATOMIC_INTERLOCKED_IMPORT
#else
#define BOOST_ATOMIC_INTERLOCKED_IMPORT __declspec(dllimport)
#endif
namespace boost {
namespace atomics {
namespace detail {
extern "C" {
BOOST_ATOMIC_INTERLOCKED_IMPORT long __stdcall InterlockedCompareExchange(long volatile*, long, long);
BOOST_ATOMIC_INTERLOCKED_IMPORT long __stdcall InterlockedExchange(long volatile*, long);
BOOST_ATOMIC_INTERLOCKED_IMPORT long __stdcall InterlockedExchangeAdd(long volatile*, long);
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare) boost::atomics::detail::InterlockedCompareExchange((long*)(dest), (long)(exchange), (long)(compare))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval) boost::atomics::detail::InterlockedExchange((long*)(dest), (long)(newval))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, addend) boost::atomics::detail::InterlockedExchangeAdd((long*)(dest), (long)(addend))
#if defined(_WIN64)
BOOST_ATOMIC_INTERLOCKED_IMPORT __int64 __stdcall InterlockedCompareExchange64(__int64 volatile*, __int64, __int64);
BOOST_ATOMIC_INTERLOCKED_IMPORT __int64 __stdcall InterlockedExchange64(__int64 volatile*, __int64);
BOOST_ATOMIC_INTERLOCKED_IMPORT __int64 __stdcall InterlockedExchangeAdd64(__int64 volatile*, __int64);
BOOST_ATOMIC_INTERLOCKED_IMPORT void* __stdcall InterlockedCompareExchangePointer(void* volatile *, void*, void*);
BOOST_ATOMIC_INTERLOCKED_IMPORT void* __stdcall InterlockedExchangePointer(void* volatile *, void*);
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(dest, exchange, compare) boost::atomics::detail::InterlockedCompareExchange64((__int64*)(dest), (__int64)(exchange), (__int64)(compare))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(dest, newval) boost::atomics::detail::InterlockedExchange64((__int64*)(dest), (__int64)(newval))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(dest, addend) boost::atomics::detail::InterlockedExchangeAdd64((__int64*)(dest), (__int64)(addend))
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) boost::atomics::detail::InterlockedCompareExchangePointer((void**)(dest), (void*)(exchange), (void*)(compare))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) boost::atomics::detail::InterlockedExchangePointer((void**)(dest), (void*)(newval))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(dest, byte_offset))
#else // defined(_WIN64)
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) ((void*)BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval))
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, byte_offset))
#endif // defined(_WIN64)
} // extern "C"
} // namespace detail
} // namespace atomics
} // namespace boost
#undef BOOST_ATOMIC_INTERLOCKED_IMPORT
#endif // defined(BOOST_USE_WINDOWS_H)
#endif // defined(_MSC_VER)
#endif

View File

@@ -0,0 +1,187 @@
#ifndef BOOST_ATOMIC_DETAIL_LINUX_ARM_HPP
#define BOOST_ATOMIC_DETAIL_LINUX_ARM_HPP
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// Copyright (c) 2009, 2011 Helge Bahmann
// Copyright (c) 2009 Phil Endecott
// Linux-specific code by Phil Endecott
// Different ARM processors have different atomic instructions. In particular,
// architecture versions before v6 (which are still in widespread use, e.g. the
// Intel/Marvell XScale chips like the one in the NSLU2) have only atomic swap.
// On Linux the kernel provides some support that lets us abstract away from
// these differences: it provides emulated CAS and barrier functions at special
// addresses that are garaunteed not to be interrupted by the kernel. Using
// this facility is slightly slower than inline assembler would be, but much
// faster than a system call.
//
// While this emulated CAS is "strong" in the sense that it does not fail
// "spuriously" (i.e.: it never fails to perform the exchange when the value
// found equals the value expected), it does not return the found value on
// failure. To satisfy the atomic API, compare_exchange_{weak|strong} must
// return the found value on failure, and we have to manually load this value
// after the emulated CAS reports failure. This in turn introduces a race
// between the CAS failing (due to the "wrong" value being found) and subsequently
// loading (which might turn up the "right" value). From an application's
// point of view this looks like "spurious failure", and therefore the
// emulated CAS is only good enough to provide compare_exchange_weak
// semantics.
#include <cstddef>
#include <boost/cstdint.hpp>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#ifdef BOOST_ATOMIC_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
inline void
arm_barrier(void)
{
void (*kernel_dmb)(void) = (void (*)(void)) 0xffff0fa0;
kernel_dmb();
}
inline void
platform_fence_before(memory_order order)
{
switch(order) {
case memory_order_release:
case memory_order_acq_rel:
case memory_order_seq_cst:
arm_barrier();
case memory_order_consume:
default:;
}
}
inline void
platform_fence_after(memory_order order)
{
switch(order) {
case memory_order_acquire:
case memory_order_acq_rel:
case memory_order_seq_cst:
arm_barrier();
default:;
}
}
inline void
platform_fence_before_store(memory_order order)
{
platform_fence_before(order);
}
inline void
platform_fence_after_store(memory_order order)
{
if (order == memory_order_seq_cst)
arm_barrier();
}
inline void
platform_fence_after_load(memory_order order)
{
platform_fence_after(order);
}
template<typename T>
inline bool
platform_cmpxchg32(T & expected, T desired, volatile T * ptr)
{
typedef T (*kernel_cmpxchg32_t)(T oldval, T newval, volatile T * ptr);
if (((kernel_cmpxchg32_t) 0xffff0fc0)(expected, desired, ptr) == 0) {
return true;
} else {
expected = *ptr;
return false;
}
}
}
}
#define BOOST_ATOMIC_THREAD_FENCE 2
inline void
atomic_thread_fence(memory_order order)
{
switch(order) {
case memory_order_acquire:
case memory_order_release:
case memory_order_acq_rel:
case memory_order_seq_cst:
atomics::detail::arm_barrier();
default:;
}
}
#define BOOST_ATOMIC_SIGNAL_FENCE 2
inline void
atomic_signal_fence(memory_order)
{
__asm__ __volatile__ ("" ::: "memory");
}
class atomic_flag {
private:
atomic_flag(const atomic_flag &) /* = delete */ ;
atomic_flag & operator=(const atomic_flag &) /* = delete */ ;
uint32_t v_;
public:
atomic_flag(void) : v_(false) {}
void
clear(memory_order order = memory_order_seq_cst) volatile
{
atomics::detail::platform_fence_before_store(order);
const_cast<volatile uint32_t &>(v_) = 0;
atomics::detail::platform_fence_after_store(order);
}
bool
test_and_set(memory_order order = memory_order_seq_cst) volatile
{
atomics::detail::platform_fence_before(order);
uint32_t expected = v_;
do {
if (expected == 1)
break;
} while (!atomics::detail::platform_cmpxchg32(expected, (uint32_t)1, &v_));
atomics::detail::platform_fence_after(order);
return expected;
}
};
#define BOOST_ATOMIC_FLAG_LOCK_FREE 2
}
#include <boost/atomic/detail/base.hpp>
#if !defined(BOOST_ATOMIC_FORCE_FALLBACK)
#define BOOST_ATOMIC_CHAR_LOCK_FREE 2
#define BOOST_ATOMIC_CHAR16_T_LOCK_FREE 2
#define BOOST_ATOMIC_CHAR32_T_LOCK_FREE 2
#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 2
#define BOOST_ATOMIC_SHORT_LOCK_FREE 2
#define BOOST_ATOMIC_INT_LOCK_FREE 2
#define BOOST_ATOMIC_LONG_LOCK_FREE 2
#define BOOST_ATOMIC_LLONG_LOCK_FREE 0
#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
#define BOOST_ATOMIC_BOOL_LOCK_FREE 2
#include <boost/atomic/detail/cas32weak.hpp>
#endif /* !defined(BOOST_ATOMIC_FORCE_FALLBACK) */
#endif

View File

@@ -0,0 +1,92 @@
#ifndef BOOST_ATOMIC_DETAIL_LOCKPOOL_HPP
#define BOOST_ATOMIC_DETAIL_LOCKPOOL_HPP
// Copyright (c) 2011 Helge Bahmann
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include <boost/atomic/detail/config.hpp>
#ifndef BOOST_ATOMIC_FLAG_LOCK_FREE
#include <boost/thread/mutex.hpp>
#endif
#ifdef BOOST_ATOMIC_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
#ifndef BOOST_ATOMIC_FLAG_LOCK_FREE
class lockpool
{
public:
typedef mutex lock_type;
class scoped_lock
{
private:
lock_type& mtx_;
scoped_lock(scoped_lock const&) /* = delete */;
scoped_lock& operator=(scoped_lock const&) /* = delete */;
public:
explicit
scoped_lock(const volatile void * addr) : mtx_(get_lock_for(addr))
{
mtx_.lock();
}
~scoped_lock()
{
mtx_.unlock();
}
};
private:
static BOOST_ATOMIC_DECL lock_type& get_lock_for(const volatile void * addr);
};
#else
class lockpool
{
public:
typedef atomic_flag lock_type;
class scoped_lock
{
private:
atomic_flag& flag_;
scoped_lock(const scoped_lock &) /* = delete */;
scoped_lock& operator=(const scoped_lock &) /* = delete */;
public:
explicit
scoped_lock(const volatile void * addr) : flag_(get_lock_for(addr))
{
do {
} while (flag_.test_and_set(memory_order_acquire));
}
~scoped_lock(void)
{
flag_.clear(memory_order_release);
}
};
private:
static BOOST_ATOMIC_DECL lock_type& get_lock_for(const volatile void * addr);
};
#endif
}
}
}
#endif

View File

@@ -0,0 +1,62 @@
#ifndef BOOST_ATOMIC_DETAIL_PLATFORM_HPP
#define BOOST_ATOMIC_DETAIL_PLATFORM_HPP
// Copyright (c) 2009 Helge Bahmann
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Platform selection file
#include <boost/atomic/detail/config.hpp>
#ifdef BOOST_ATOMIC_HAS_PRAGMA_ONCE
#pragma once
#endif
#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
#include <boost/atomic/detail/gcc-x86.hpp>
#elif 0 && defined(__GNUC__) && defined(__alpha__) /* currently does not work correctly */
#include <boost/atomic/detail/base.hpp>
#include <boost/atomic/detail/gcc-alpha.hpp>
#elif defined(__GNUC__) && (defined(__POWERPC__) || defined(__PPC__))
#include <boost/atomic/detail/gcc-ppc.hpp>
// This list of ARM architecture versions comes from Apple's arm/arch.h header.
// I don't know how complete it is.
#elif defined(__GNUC__) && (defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \
|| defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) \
|| defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_7A__))
#include <boost/atomic/detail/gcc-armv6plus.hpp>
#elif defined(__linux__) && defined(__arm__)
#include <boost/atomic/detail/linux-arm.hpp>
#elif defined(__GNUC__) && defined(__sparc_v9__)
#include <boost/atomic/detail/gcc-sparcv9.hpp>
#elif defined(BOOST_WINDOWS) || defined(_WIN32_CE)
#include <boost/atomic/detail/windows.hpp>
#elif 0 && defined(__GNUC__) /* currently does not work correctly */
#include <boost/atomic/detail/base.hpp>
#include <boost/atomic/detail/gcc-cas.hpp>
#else
#include <boost/atomic/detail/base.hpp>
#endif
#endif

View File

@@ -0,0 +1,45 @@
#ifndef BOOST_ATOMIC_DETAIL_TYPE_CLASSIFICATION_HPP
#define BOOST_ATOMIC_DETAIL_TYPE_CLASSIFICATION_HPP
// Copyright (c) 2011 Helge Bahmann
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include <boost/atomic/detail/config.hpp>
#include <boost/type_traits/is_integral.hpp>
#ifdef BOOST_ATOMIC_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
template<typename T, bool IsInt = boost::is_integral<T>::value>
struct classify
{
typedef void type;
};
template<typename T>
struct classify<T, true> {typedef int type;};
template<typename T>
struct classify<T*, false> {typedef void* type;};
template<typename T>
struct storage_size_of
{
enum _
{
size = sizeof(T),
value = (size == 3 ? 4 : (size == 5 || size == 6 || size == 7 ? 8 : size))
};
};
}}}
#endif

File diff suppressed because it is too large Load Diff

13
index.html Normal file
View File

@@ -0,0 +1,13 @@
<html>
<head>
<meta http-equiv="refresh" content="0; URL=../../doc/html/atomic.html">
</head>
<body>
Automatic redirection failed, please go to
<a href="../../doc/html/atomic.html">../../doc/html/atomic.html</a> &nbsp;<hr>
<p>&copy; Copyright Beman Dawes, 2001</p>
<p>Distributed under the Boost Software License, Version 1.0. (See accompanying
file <a href="../../LICENSE_1_0.txt">LICENSE_1_0.txt</a> or copy
at <a href="http://www.boost.org/LICENSE_1_0.txt">www.boost.org/LICENSE_1_0.txt</a>)</p>
</body>
</html>

24
src/lockpool.cpp Normal file
View File

@@ -0,0 +1,24 @@
#include <boost/atomic.hpp>
// Copyright (c) 2011 Helge Bahmann
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
namespace boost {
namespace atomics {
namespace detail {
static lockpool::lock_type lock_pool_[41];
// NOTE: This function must NOT be inline. Otherwise MSVC 9 will sometimes generate broken code for modulus operation which result in crashes.
BOOST_ATOMIC_DECL lockpool::lock_type& lockpool::get_lock_for(const volatile void* addr)
{
std::size_t index = reinterpret_cast<std::size_t>(addr) % (sizeof(lock_pool_) / sizeof(*lock_pool_));
return lock_pool_[index];
}
}
}
}

25
test/Jamfile.v2 Normal file
View File

@@ -0,0 +1,25 @@
# Boost.Atomic Library test Jamfile
#
# Copyright (c) 2011 Helge Bahmann
# Copyright (c) 2012 Tim Blechmann
#
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import testing ;
project boost/atomic/test
: requirements
<threading>multi
<library>../../thread/build//boost_thread
<library>/boost/atomic//boost_atomic/<link>static
;
test-suite atomic
: [ run native_api.cpp ]
[ run fallback_api.cpp ]
[ run atomicity.cpp ]
[ run ordering.cpp ]
[ run lockfree.cpp ]
;

325
test/api_test_helpers.hpp Normal file
View File

@@ -0,0 +1,325 @@
// Copyright (c) 2011 Helge Bahmann
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_ATOMIC_API_TEST_HELPERS_HPP
#define BOOST_ATOMIC_API_TEST_HELPERS_HPP
/* provide helpers that exercise whether the API
functions of "boost::atomic" provide the correct
operational semantic in the case of sequential
execution */
static void
test_flag_api(void)
{
boost::atomic_flag f;
BOOST_CHECK( !f.test_and_set() );
BOOST_CHECK( f.test_and_set() );
f.clear();
BOOST_CHECK( !f.test_and_set() );
}
template<typename T>
void test_base_operators(T value1, T value2, T value3)
{
/* explicit load/store */
{
boost::atomic<T> a(value1);
BOOST_CHECK( a.load() == value1 );
}
{
boost::atomic<T> a(value1);
a.store(value2);
BOOST_CHECK( a.load() == value2 );
}
/* overloaded assignment/conversion */
{
boost::atomic<T> a(value1);
BOOST_CHECK( value1 == a );
}
{
boost::atomic<T> a;
a = value2;
BOOST_CHECK( value2 == a );
}
/* exchange-type operators */
{
boost::atomic<T> a(value1);
T n = a.exchange(value2);
BOOST_CHECK( a.load() == value2 && n == value1 );
}
{
boost::atomic<T> a(value1);
T expected = value1;
bool success = a.compare_exchange_strong(expected, value3);
BOOST_CHECK( success );
BOOST_CHECK( a.load() == value3 && expected == value1 );
}
{
boost::atomic<T> a(value1);
T expected = value2;
bool success = a.compare_exchange_strong(expected, value3);
BOOST_CHECK( !success );
BOOST_CHECK( a.load() == value1 && expected == value1 );
}
{
boost::atomic<T> a(value1);
T expected;
bool success;
do {
expected = value1;
success = a.compare_exchange_weak(expected, value3);
} while(!success);
BOOST_CHECK( success );
BOOST_CHECK( a.load() == value3 && expected == value1 );
}
{
boost::atomic<T> a(value1);
T expected;
bool success;
do {
expected = value2;
success = a.compare_exchange_weak(expected, value3);
if (expected != value2)
break;
} while(!success);
BOOST_CHECK( !success );
BOOST_CHECK( a.load() == value1 && expected == value1 );
}
}
template<typename T, typename D>
void test_additive_operators(T value, D delta)
{
/* note: the tests explicitly cast the result of any addition
to the type to be tested to force truncation of the result to
the correct range in case of overflow */
/* explicit add/sub */
{
boost::atomic<T> a(value);
T n = a.fetch_add(delta);
BOOST_CHECK( a.load() == T(value + delta) );
BOOST_CHECK( n == value );
}
{
boost::atomic<T> a(value);
T n = a.fetch_sub(delta);
BOOST_CHECK( a.load() == T(value - delta) );
BOOST_CHECK( n == value );
}
/* overloaded modify/assign*/
{
boost::atomic<T> a(value);
T n = (a += delta);
BOOST_CHECK( a.load() == T(value + delta) );
BOOST_CHECK( n == T(value + delta) );
}
{
boost::atomic<T> a(value);
T n = (a -= delta);
BOOST_CHECK( a.load() == T(value - delta) );
BOOST_CHECK( n == T(value - delta) );
}
/* overloaded increment/decrement */
{
boost::atomic<T> a(value);
T n = a++;
BOOST_CHECK( a.load() == T(value + 1) );
BOOST_CHECK( n == value );
}
{
boost::atomic<T> a(value);
T n = ++a;
BOOST_CHECK( a.load() == T(value + 1) );
BOOST_CHECK( n == T(value + 1) );
}
{
boost::atomic<T> a(value);
T n = a--;
BOOST_CHECK( a.load() == T(value - 1) );
BOOST_CHECK( n == value );
}
{
boost::atomic<T> a(value);
T n = --a;
BOOST_CHECK( a.load() == T(value - 1) );
BOOST_CHECK( n == T(value - 1) );
}
}
template<typename T>
void test_additive_wrap(T value)
{
{
boost::atomic<T> a(value);
T n = a.fetch_add(1) + 1;
BOOST_CHECK( a.compare_exchange_strong(n, n) );
}
{
boost::atomic<T> a(value);
T n = a.fetch_sub(1) - 1;
BOOST_CHECK( a.compare_exchange_strong(n, n) );
}
}
template<typename T>
void test_bit_operators(T value, T delta)
{
/* explicit and/or/xor */
{
boost::atomic<T> a(value);
T n = a.fetch_and(delta);
BOOST_CHECK( a.load() == T(value & delta) );
BOOST_CHECK( n == value );
}
{
boost::atomic<T> a(value);
T n = a.fetch_or(delta);
BOOST_CHECK( a.load() == T(value | delta) );
BOOST_CHECK( n == value );
}
{
boost::atomic<T> a(value);
T n = a.fetch_xor(delta);
BOOST_CHECK( a.load() == T(value ^ delta) );
BOOST_CHECK( n == value );
}
/* overloaded modify/assign */
{
boost::atomic<T> a(value);
T n = (a &= delta);
BOOST_CHECK( a.load() == T(value & delta) );
BOOST_CHECK( n == T(value & delta) );
}
{
boost::atomic<T> a(value);
T n = (a |= delta);
BOOST_CHECK( a.load() == T(value | delta) );
BOOST_CHECK( n == T(value | delta) );
}
{
boost::atomic<T> a(value);
T n = (a ^= delta);
BOOST_CHECK( a.load() == T(value ^ delta) );
BOOST_CHECK( n == T(value ^ delta) );
}
}
template<typename T>
void test_integral_api(void)
{
BOOST_CHECK( sizeof(boost::atomic<T>) >= sizeof(T));
test_base_operators<T>(42, 43, 44);
test_additive_operators<T, T>(42, 17);
test_bit_operators<T>((T)0x5f5f5f5f5f5f5f5fULL, (T)0xf5f5f5f5f5f5f5f5ULL);
/* test for unsigned overflow/underflow */
test_additive_operators<T, T>((T)-1, 1);
test_additive_operators<T, T>(0, 1);
/* test for signed overflow/underflow */
test_additive_operators<T, T>(((T)-1) >> (sizeof(T) * 8 - 1), 1);
test_additive_operators<T, T>(1 + (((T)-1) >> (sizeof(T) * 8 - 1)), 1);
test_additive_wrap<T>(0);
test_additive_wrap<T>((T) -1);
test_additive_wrap<T>(-1LL << (sizeof(T) * 8 - 1));
test_additive_wrap<T>(~ (-1LL << (sizeof(T) * 8 - 1)));
}
template<typename T>
void test_pointer_api(void)
{
BOOST_CHECK( sizeof(boost::atomic<T *>) >= sizeof(T *));
BOOST_CHECK( sizeof(boost::atomic<void *>) >= sizeof(T *));
T values[3];
test_base_operators<T*>(&values[0], &values[1], &values[2]);
test_additive_operators<T*>(&values[1], 1);
test_base_operators<void*>(&values[0], &values[1], &values[2]);
boost::atomic<void *> ptr;
boost::atomic<intptr_t> integral;
BOOST_CHECK( ptr.is_lock_free() == integral.is_lock_free() );
}
enum test_enum {
foo, bar, baz
};
static void
test_enum_api(void)
{
test_base_operators(foo, bar, baz);
}
template<typename T>
struct test_struct {
typedef T value_type;
value_type i;
inline bool operator==(const test_struct & c) const {return i == c.i;}
inline bool operator!=(const test_struct & c) const {return i != c.i;}
};
template<typename T>
void
test_struct_api(void)
{
T a = {1}, b = {2}, c = {3};
test_base_operators(a, b, c);
{
boost::atomic<T> sa;
boost::atomic<typename T::value_type> si;
BOOST_CHECK( sa.is_lock_free() == si.is_lock_free() );
}
}
struct large_struct {
long data[64];
inline bool operator==(const large_struct & c) const
{
return memcmp(data, &c.data, sizeof(data)) == 0;
}
inline bool operator!=(const large_struct & c) const
{
return memcmp(data, &c.data, sizeof(data)) != 0;
}
};
static void
test_large_struct_api(void)
{
large_struct a = {{1}}, b = {{2}}, c = {{3}};
test_base_operators(a, b, c);
}
#endif

275
test/atomicity.cpp Normal file
View File

@@ -0,0 +1,275 @@
// Copyright (c) 2011 Helge Bahmann
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Attempt to determine whether the operations on atomic variables
// do in fact behave atomically: Let multiple threads race modifying
// a shared atomic variable and verify that it behaves as expected.
//
// We assume that "observable race condition" events are exponentially
// distributed, with unknown "average time between observable races"
// (which is just the reciprocal of exp distribution parameter lambda).
// Use a non-atomic implementation that intentionally exhibits a
// (hopefully tight) race to compute the maximum-likelihood estimate
// for this time. From this, compute an estimate that covers the
// unknown value with 0.995 confidence (using chi square quantile).
//
// Use this estimate to pick a timeout for the race tests of the
// atomic implementations such that under the assumed distribution
// we get 0.995 probability to detect a race (if there is one).
//
// Overall this yields 0.995 * 0.995 > 0.99 confidence that the
// operations truely behave atomic if this test program does not
// report an error.
#include <algorithm>
#include <boost/atomic.hpp>
#include <boost/bind.hpp>
#include <boost/date_time/posix_time/time_formatters.hpp>
#include <boost/test/test_tools.hpp>
#include <boost/test/included/test_exec_monitor.hpp>
#include <boost/thread.hpp>
/* helper class to let two instances of a function race against each
other, with configurable timeout and early abort on detection of error */
class concurrent_runner {
public:
/* concurrently run the function in two threads, until either timeout
or one of the functions returns "false"; returns true if timeout
was reached, or false if early abort and updates timeout accordingly */
static bool
execute(
const boost::function<bool(size_t)> & fn,
boost::posix_time::time_duration & timeout)
{
concurrent_runner runner(fn);
runner.wait_finish(timeout);
return !runner.failure();
}
concurrent_runner(
const boost::function<bool(size_t)> & fn)
: finished_(false), failure_(false),
first_thread_(boost::bind(&concurrent_runner::thread_function, this, fn, 0)),
second_thread_(boost::bind(&concurrent_runner::thread_function, this, fn, 1))
{
}
void
wait_finish(boost::posix_time::time_duration & timeout)
{
boost::system_time start = boost::get_system_time();
boost::system_time end = start + timeout;
{
boost::mutex::scoped_lock guard(m_);
while (boost::get_system_time() < end && !finished())
c_.timed_wait(guard, end);
}
finished_.store(true, boost::memory_order_relaxed);
first_thread_.join();
second_thread_.join();
boost::posix_time::time_duration duration = boost::get_system_time() - start;
if (duration < timeout)
timeout = duration;
}
bool
finished(void) const throw() {
return finished_.load(boost::memory_order_relaxed);
}
bool
failure(void) const throw() {
return failure_;
}
private:
void
thread_function(boost::function<bool(size_t)> function, size_t instance)
{
while (!finished()) {
if (!function(instance)) {
boost::mutex::scoped_lock guard(m_);
failure_ = true;
finished_.store(true, boost::memory_order_relaxed);
c_.notify_all();
break;
}
}
}
boost::mutex m_;
boost::condition_variable c_;
boost::atomic<bool> finished_;
bool failure_;
boost::thread first_thread_;
boost::thread second_thread_;
};
bool
racy_add(volatile unsigned int & value, size_t instance)
{
size_t shift = instance * 8;
unsigned int mask = 0xff << shift;
for (size_t n = 0; n < 255; n++) {
unsigned int tmp = value;
value = tmp + (1 << shift);
if ((tmp & mask) != (n << shift))
return false;
}
unsigned int tmp = value;
value = tmp & ~mask;
if ((tmp & mask) != mask)
return false;
return true;
}
/* compute estimate for average time between races being observable, in usecs */
static double
estimate_avg_race_time(void)
{
double sum = 0.0;
/* take 10 samples */
for (size_t n = 0; n < 10; n++) {
boost::posix_time::time_duration timeout(0, 0, 10);
volatile unsigned int value(0);
bool success = concurrent_runner::execute(
boost::bind(racy_add, boost::ref(value), _1),
timeout
);
if (success) {
BOOST_FAIL("Failed to establish baseline time for reproducing race condition");
}
sum = sum + timeout.total_microseconds();
}
/* determine maximum likelihood estimate for average time between
race observations */
double avg_race_time_mle = (sum / 10);
/* pick 0.995 confidence (7.44 = chi square 0.995 confidence) */
double avg_race_time_995 = avg_race_time_mle * 2 * 10 / 7.44;
return avg_race_time_995;
}
template<typename value_type, size_t shift_>
bool
test_arithmetic(boost::atomic<value_type> & shared_value, size_t instance)
{
size_t shift = instance * 8;
value_type mask = 0xff << shift;
value_type increment = 1 << shift;
value_type expected = 0;
for (size_t n = 0; n < 255; n++) {
value_type tmp = shared_value.fetch_add(increment, boost::memory_order_relaxed);
if ( (tmp & mask) != (expected << shift) )
return false;
expected ++;
}
for (size_t n = 0; n < 255; n++) {
value_type tmp = shared_value.fetch_sub(increment, boost::memory_order_relaxed);
if ( (tmp & mask) != (expected << shift) )
return false;
expected --;
}
return true;
}
template<typename value_type, size_t shift_>
bool
test_bitops(boost::atomic<value_type> & shared_value, size_t instance)
{
size_t shift = instance * 8;
value_type mask = 0xff << shift;
value_type expected = 0;
for (size_t k = 0; k < 8; k++) {
value_type mod = 1 << k;
value_type tmp = shared_value.fetch_or(mod << shift, boost::memory_order_relaxed);
if ( (tmp & mask) != (expected << shift))
return false;
expected = expected | mod;
}
for (size_t k = 0; k < 8; k++) {
value_type tmp = shared_value.fetch_and( ~ (1 << (shift + k)), boost::memory_order_relaxed);
if ( (tmp & mask) != (expected << shift))
return false;
expected = expected & ~(1<<k);
}
for (size_t k = 0; k < 8; k++) {
value_type mod = 255 ^ (1 << k);
value_type tmp = shared_value.fetch_xor(mod << shift, boost::memory_order_relaxed);
if ( (tmp & mask) != (expected << shift))
return false;
expected = expected ^ mod;
}
value_type tmp = shared_value.fetch_and( ~mask, boost::memory_order_relaxed);
if ( (tmp & mask) != (expected << shift) )
return false;
return true;
}
int test_main(int, char *[])
{
boost::posix_time::time_duration reciprocal_lambda;
double avg_race_time = estimate_avg_race_time();
/* 5.298 = 0.995 quantile of exponential distribution */
const boost::posix_time::time_duration timeout = boost::posix_time::microseconds((long)(5.298 * avg_race_time));
{
boost::atomic<unsigned int> value(0);
/* testing two different operations in this loop, therefore
enlarge timeout */
boost::posix_time::time_duration tmp(timeout * 2);
bool success = concurrent_runner::execute(
boost::bind(test_arithmetic<unsigned int, 0>, boost::ref(value), _1),
tmp
);
BOOST_CHECK_MESSAGE(success, "concurrent arithmetic");
}
{
boost::atomic<unsigned int> value(0);
/* testing three different operations in this loop, therefore
enlarge timeout */
boost::posix_time::time_duration tmp(timeout * 3);
bool success = concurrent_runner::execute(
boost::bind(test_bitops<unsigned int, 0>, boost::ref(value), _1),
tmp
);
BOOST_CHECK_MESSAGE(success, "concurrent bitops");
}
return 0;
}

52
test/fallback_api.cpp Normal file
View File

@@ -0,0 +1,52 @@
// Copyright (c) 2011 Helge Bahmann
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
/* force fallback implementation using locks */
#define BOOST_ATOMIC_FORCE_FALLBACK 1
#include <boost/atomic.hpp>
#include <boost/cstdint.hpp>
#include <boost/test/minimal.hpp>
#include "api_test_helpers.hpp"
int test_main(int, char *[])
{
test_flag_api();
test_integral_api<char>();
test_integral_api<signed char>();
test_integral_api<unsigned char>();
test_integral_api<boost::uint8_t>();
test_integral_api<boost::int8_t>();
test_integral_api<short>();
test_integral_api<unsigned short>();
test_integral_api<boost::uint16_t>();
test_integral_api<boost::int16_t>();
test_integral_api<int>();
test_integral_api<unsigned int>();
test_integral_api<boost::uint32_t>();
test_integral_api<boost::int32_t>();
test_integral_api<long>();
test_integral_api<unsigned long>();
test_integral_api<boost::uint64_t>();
test_integral_api<boost::int64_t>();
test_integral_api<long long>();
test_integral_api<unsigned long long>();
test_pointer_api<int>();
test_enum_api();
test_struct_api<test_struct<boost::uint8_t> >();
test_struct_api<test_struct<boost::uint16_t> >();
test_struct_api<test_struct<boost::uint32_t> >();
test_struct_api<test_struct<boost::uint64_t> >();
test_large_struct_api();
return 0;
}

181
test/lockfree.cpp Normal file
View File

@@ -0,0 +1,181 @@
// Copyright (c) 2011 Helge Bahmann
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Verify that definition of the "LOCK_FREE" macros and the
// "is_lock_free" members is consistent and matches expectations.
// Also, if any operation is lock-free, then the platform
// implementation must provide overridden fence implementations.
#include <iostream>
#include <boost/atomic.hpp>
#include <boost/test/minimal.hpp>
static const char * lock_free_level[] = {
"never",
"sometimes",
"always"
};
template<typename T>
void
verify_lock_free(const char * type_name, int lock_free_macro_val, int lock_free_expect)
{
BOOST_CHECK(lock_free_macro_val >= 0 && lock_free_macro_val <= 2);
BOOST_CHECK(lock_free_macro_val == lock_free_expect);
boost::atomic<T> value;
if (lock_free_macro_val == 0)
BOOST_CHECK(!value.is_lock_free());
if (lock_free_macro_val == 2)
BOOST_CHECK(value.is_lock_free());
std::cout << "atomic<" << type_name << "> is " << lock_free_level[lock_free_macro_val] << " lock free\n";
}
#if defined(__GNUC__) && defined(__i386__)
#define EXPECT_CHAR_LOCK_FREE 2
#define EXPECT_SHORT_LOCK_FREE 2
#define EXPECT_INT_LOCK_FREE 2
#define EXPECT_LONG_LOCK_FREE 2
#define EXPECT_LLONG_LOCK_FREE 1
#define EXPECT_POINTER_LOCK_FREE 2
#define EXPECT_BOOL_LOCK_FREE 2
#elif defined(__GNUC__) && defined(__x86_64__)
#define EXPECT_CHAR_LOCK_FREE 2
#define EXPECT_SHORT_LOCK_FREE 2
#define EXPECT_INT_LOCK_FREE 2
#define EXPECT_LONG_LOCK_FREE 2
#define EXPECT_LLONG_LOCK_FREE 2
#define EXPECT_POINTER_LOCK_FREE 2
#define EXPECT_BOOL_LOCK_FREE 2
#elif defined(__GNUC__) && (defined(__POWERPC__) || defined(__PPC__))
#define EXPECT_CHAR_LOCK_FREE 2
#define EXPECT_CHAR16_T_LOCK_FREE 2
#define EXPECT_CHAR32_T_LOCK_FREE 2
#define EXPECT_WCHAR_T_LOCK_FREE 2
#define EXPECT_SHORT_LOCK_FREE 2
#define EXPECT_INT_LOCK_FREE 2
#define EXPECT_LONG_LOCK_FREE 2
#if defined(__powerpc64__)
#define EXPECT_LLONG_LOCK_FREE 2
#else
#define EXPECT_LLONG_LOCK_FREE 0
#endif
#define EXPECT_POINTER_LOCK_FREE 2
#define EXPECT_BOOL_LOCK_FREE 2
#elif defined(__GNUC__) && defined(__alpha__)
#define EXPECT_CHAR_LOCK_FREE 2
#define EXPECT_CHAR16_T_LOCK_FREE 2
#define EXPECT_CHAR32_T_LOCK_FREE 2
#define EXPECT_WCHAR_T_LOCK_FREE 2
#define EXPECT_SHORT_LOCK_FREE 2
#define EXPECT_INT_LOCK_FREE 2
#define EXPECT_LONG_LOCK_FREE 2
#define EXPECT_LLONG_LOCK_FREE 2
#define EXPECT_POINTER_LOCK_FREE 2
#define EXPECT_BOOL_LOCK_FREE 2
#elif defined(__GNUC__) && (defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \
|| defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) \
|| defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_7A__))
#define EXPECT_CHAR_LOCK_FREE 2
#define EXPECT_SHORT_LOCK_FREE 2
#define EXPECT_INT_LOCK_FREE 2
#define EXPECT_LONG_LOCK_FREE 2
#define EXPECT_LLONG_LOCK_FREE 0
#define EXPECT_POINTER_LOCK_FREE 2
#define EXPECT_BOOL_LOCK_FREE 2
#elif defined(__linux__) && defined(__arm__)
#define EXPECT_CHAR_LOCK_FREE 2
#define EXPECT_SHORT_LOCK_FREE 2
#define EXPECT_INT_LOCK_FREE 2
#define EXPECT_LONG_LOCK_FREE 2
#define EXPECT_LLONG_LOCK_FREE 0
#define EXPECT_POINTER_LOCK_FREE 2
#define EXPECT_BOOL_LOCK_FREE 2
#elif defined(__GNUC__) && defined(__sparc_v9__)
#define EXPECT_CHAR_LOCK_FREE 2
#define EXPECT_SHORT_LOCK_FREE 2
#define EXPECT_INT_LOCK_FREE 2
#define EXPECT_LONG_LOCK_FREE 2
#define EXPECT_LLONG_LOCK_FREE 0
#define EXPECT_POINTER_LOCK_FREE 2
#define EXPECT_BOOL_LOCK_FREE 2
#elif defined(BOOST_USE_WINDOWS_H) || defined(_WIN32_CE) || defined(BOOST_MSVC) || defined(BOOST_INTEL_WIN) || defined(WIN32) || defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__)
#define EXPECT_CHAR_LOCK_FREE 2
#define EXPECT_SHORT_LOCK_FREE 2
#define EXPECT_INT_LOCK_FREE 2
#define EXPECT_LONG_LOCK_FREE 2
#if defined(_WIN64)
#define EXPECT_LLONG_LOCK_FREE 2
#else
#define EXPECT_LLONG_LOCK_FREE 0
#endif
#define EXPECT_POINTER_LOCK_FREE 2
#define EXPECT_BOOL_LOCK_FREE 2
#elif 0 && defined(__GNUC__)
#define EXPECT_CHAR_LOCK_FREE 2
#define EXPECT_SHORT_LOCK_FREE 2
#define EXPECT_INT_LOCK_FREE 2
#define EXPECT_LONG_LOCK_FREE (sizeof(long) <= 4 ? 2 : 0)
#define EXPECT_LLONG_LOCK_FREE (sizeof(long long) <= 4 ? 2 : 0)
#define EXPECT_POINTER_LOCK_FREE (sizeof(void *) <= 4 ? 2 : 0)
#define EXPECT_BOOL_LOCK_FREE 2
#else
#define EXPECT_CHAR_LOCK_FREE 0
#define EXPECT_SHORT_LOCK_FREE 0
#define EXPECT_INT_LOCK_FREE 0
#define EXPECT_LONG_LOCK_FREE 0
#define EXPECT_LLONG_LOCK_FREE 0
#define EXPECT_POINTER_LOCK_FREE 0
#define EXPECT_BOOL_LOCK_FREE 0
#endif
int test_main(int, char *[])
{
verify_lock_free<char>("char", BOOST_ATOMIC_CHAR_LOCK_FREE, EXPECT_CHAR_LOCK_FREE);
verify_lock_free<short>("short", BOOST_ATOMIC_SHORT_LOCK_FREE, EXPECT_SHORT_LOCK_FREE);
verify_lock_free<int>("int", BOOST_ATOMIC_INT_LOCK_FREE, EXPECT_INT_LOCK_FREE);
verify_lock_free<long>("long", BOOST_ATOMIC_LONG_LOCK_FREE, EXPECT_LONG_LOCK_FREE);
#ifdef BOOST_HAS_LONG_LONG
verify_lock_free<long long>("long long", BOOST_ATOMIC_LLONG_LOCK_FREE, EXPECT_LLONG_LOCK_FREE);
#endif
verify_lock_free<void *>("void *", BOOST_ATOMIC_POINTER_LOCK_FREE, EXPECT_SHORT_LOCK_FREE);
verify_lock_free<bool>("bool", BOOST_ATOMIC_BOOL_LOCK_FREE, EXPECT_BOOL_LOCK_FREE);
bool any_lock_free =
BOOST_ATOMIC_CHAR_LOCK_FREE ||
BOOST_ATOMIC_SHORT_LOCK_FREE ||
BOOST_ATOMIC_INT_LOCK_FREE ||
BOOST_ATOMIC_LONG_LOCK_FREE ||
BOOST_ATOMIC_LLONG_LOCK_FREE ||
BOOST_ATOMIC_BOOL_LOCK_FREE;
BOOST_CHECK(!any_lock_free || BOOST_ATOMIC_THREAD_FENCE);
return 0;
}

49
test/native_api.cpp Normal file
View File

@@ -0,0 +1,49 @@
// Copyright (c) 2011 Helge Bahmann
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include <boost/atomic.hpp>
#include <boost/cstdint.hpp>
#include <boost/test/minimal.hpp>
#include "api_test_helpers.hpp"
int test_main(int, char *[])
{
test_flag_api();
test_integral_api<char>();
test_integral_api<signed char>();
test_integral_api<unsigned char>();
test_integral_api<boost::uint8_t>();
test_integral_api<boost::int8_t>();
test_integral_api<short>();
test_integral_api<unsigned short>();
test_integral_api<boost::uint16_t>();
test_integral_api<boost::int16_t>();
test_integral_api<int>();
test_integral_api<unsigned int>();
test_integral_api<boost::uint32_t>();
test_integral_api<boost::int32_t>();
test_integral_api<long>();
test_integral_api<unsigned long>();
test_integral_api<boost::uint64_t>();
test_integral_api<boost::int64_t>();
test_integral_api<long long>();
test_integral_api<unsigned long long>();
test_pointer_api<int>();
test_enum_api();
test_struct_api<test_struct<boost::uint8_t> >();
test_struct_api<test_struct<boost::uint16_t> >();
test_struct_api<test_struct<boost::uint32_t> >();
test_struct_api<test_struct<boost::uint64_t> >();
test_large_struct_api();
return 0;
}

252
test/ordering.cpp Normal file
View File

@@ -0,0 +1,252 @@
// Copyright (c) 2011 Helge Bahmann
// Copyright (c) 2012 Tim Blechmann
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Attempt to determine whether the memory ordering/ fence operations
// work as expected:
// Let two threads race accessing multiple shared variables and
// verify that "observable" order of operations matches with the
// ordering constraints specified.
//
// We assume that "memory ordering violation" events are exponentially
// distributed, with unknown "average time between violations"
// (which is just the reciprocal of exp distribution parameter lambda).
// Use a "relaxed ordering" implementation that intentionally exhibits
// a (hopefully observable) such violation to compute the maximum-likelihood
// estimate for this time. From this, compute an estimate that covers the
// unknown value with 0.995 confidence (using chi square quantile).
//
// Use this estimate to pick a timeout for the race tests of the
// atomic implementations such that under the assumed distribution
// we get 0.995 probability to detect a race (if there is one).
//
// Overall this yields 0.995 * 0.995 > 0.99 confidence that the
// fences work as expected if this test program does not
// report an error.
#include <boost/atomic.hpp>
#include <boost/date_time/posix_time/time_formatters.hpp>
#include <boost/test/test_tools.hpp>
#include <boost/test/included/test_exec_monitor.hpp>
#include <boost/thread.hpp>
// Two threads perform the following operations:
//
// thread # 1 thread # 2
// store(a, 1) store(b, 1)
// read(a) read(b)
// x = read(b) y = read(a)
//
// Under relaxed memory ordering, the case (x, y) == (0, 0) is
// possible. Under sequential consistency, this case is impossible.
//
// This "problem" is reproducible on all platforms, even x86.
template<boost::memory_order store_order, boost::memory_order load_order>
class total_store_order_test {
public:
total_store_order_test(void);
void run(boost::posix_time::time_duration & timeout);
bool detected_conflict(void) const { return detected_conflict_; }
private:
void thread1fn(void);
void thread2fn(void);
void check_conflict(void);
boost::atomic<int> a_;
/* insert a bit of padding to push the two variables into
different cache lines and increase the likelihood of detecting
a conflict */
char pad_[512];
boost::atomic<int> b_;
boost::barrier barrier_;
int vrfya1_, vrfyb1_, vrfya2_, vrfyb2_;
boost::atomic<bool> terminate_threads_;
boost::atomic<int> termination_consensus_;
bool detected_conflict_;
boost::mutex m_;
boost::condition_variable c_;
};
template<boost::memory_order store_order, boost::memory_order load_order>
total_store_order_test<store_order, load_order>::total_store_order_test(void)
: a_(0), b_(0), barrier_(2),
terminate_threads_(false), termination_consensus_(0),
detected_conflict_(false)
{
}
template<boost::memory_order store_order, boost::memory_order load_order>
void
total_store_order_test<store_order, load_order>::run(boost::posix_time::time_duration & timeout)
{
boost::system_time start = boost::get_system_time();
boost::system_time end = start + timeout;
boost::thread t1(boost::bind(&total_store_order_test::thread1fn, this));
boost::thread t2(boost::bind(&total_store_order_test::thread2fn, this));
{
boost::mutex::scoped_lock guard(m_);
while (boost::get_system_time() < end && !detected_conflict_)
c_.timed_wait(guard, end);
}
terminate_threads_.store(true, boost::memory_order_relaxed);
t2.join();
t1.join();
boost::posix_time::time_duration duration = boost::get_system_time() - start;
if (duration < timeout)
timeout = duration;
}
volatile int backoff_dummy;
template<boost::memory_order store_order, boost::memory_order load_order>
void
total_store_order_test<store_order, load_order>::thread1fn(void)
{
for (;;) {
a_.store(1, store_order);
int a = a_.load(load_order);
int b = b_.load(load_order);
barrier_.wait();
vrfya1_ = a;
vrfyb1_ = b;
barrier_.wait();
check_conflict();
/* both threads synchronize via barriers, so either
both threads must exit here, or they must both do
another round, otherwise one of them will wait forever */
if (terminate_threads_.load(boost::memory_order_relaxed)) for (;;) {
int tmp = termination_consensus_.fetch_or(1, boost::memory_order_relaxed);
if (tmp == 3)
return;
if (tmp & 4)
break;
}
termination_consensus_.fetch_xor(4, boost::memory_order_relaxed);
unsigned int delay = rand() % 10000;
a_.store(0, boost::memory_order_relaxed);
barrier_.wait();
while(delay--) { backoff_dummy = delay; }
}
}
template<boost::memory_order store_order, boost::memory_order load_order>
void
total_store_order_test<store_order, load_order>::thread2fn(void)
{
for (;;) {
b_.store(1, store_order);
int b = b_.load(load_order);
int a = a_.load(load_order);
barrier_.wait();
vrfya2_ = a;
vrfyb2_ = b;
barrier_.wait();
check_conflict();
/* both threads synchronize via barriers, so either
both threads must exit here, or they must both do
another round, otherwise one of them will wait forever */
if (terminate_threads_.load(boost::memory_order_relaxed)) for (;;) {
int tmp = termination_consensus_.fetch_or(2, boost::memory_order_relaxed);
if (tmp == 3)
return;
if (tmp & 4)
break;
}
termination_consensus_.fetch_xor(4, boost::memory_order_relaxed);
unsigned int delay = rand() % 10000;
b_.store(0, boost::memory_order_relaxed);
barrier_.wait();
while(delay--) { backoff_dummy = delay; }
}
}
template<boost::memory_order store_order, boost::memory_order load_order>
void
total_store_order_test<store_order, load_order>::check_conflict(void)
{
if (vrfyb1_ == 0 && vrfya2_ == 0) {
boost::mutex::scoped_lock guard(m_);
detected_conflict_ = true;
terminate_threads_.store(true, boost::memory_order_relaxed);
c_.notify_all();
}
}
void
test_seq_cst(void)
{
double sum = 0.0;
/* take 10 samples */
for (size_t n = 0; n < 10; n++) {
boost::posix_time::time_duration timeout(0, 0, 10);
total_store_order_test<boost::memory_order_relaxed, boost::memory_order_relaxed> test;
test.run(timeout);
if (!test.detected_conflict()) {
BOOST_WARN_MESSAGE(false, "Failed to detect order=seq_cst violation while ith order=relaxed -- intrinsic ordering too strong for this test");
return;
}
std::cout << "seq_cst violation with order=relaxed after " << boost::posix_time::to_simple_string(timeout) << "\n";
sum = sum + timeout.total_microseconds();
}
/* determine maximum likelihood estimate for average time between
race observations */
double avg_race_time_mle = (sum / 10);
/* pick 0.995 confidence (7.44 = chi square 0.995 confidence) */
double avg_race_time_995 = avg_race_time_mle * 2 * 10 / 7.44;
/* 5.298 = 0.995 quantile of exponential distribution */
boost::posix_time::time_duration timeout = boost::posix_time::microseconds((long)(5.298 * avg_race_time_995));
std::cout << "run seq_cst for " << boost::posix_time::to_simple_string(timeout) << "\n";
total_store_order_test<boost::memory_order_seq_cst, boost::memory_order_relaxed> test;
test.run(timeout);
BOOST_CHECK_MESSAGE(!test.detected_conflict(), "sequential consistency");
}
int test_main(int, char *[])
{
test_seq_cst();
return 0;
}