2
0
mirror of https://github.com/boostorg/thread.git synced 2026-02-03 21:52:07 +00:00

Compare commits

..

45 Commits

Author SHA1 Message Date
William E. Kempf
9b8c62d45a Merged from RC_1_29_0
[SVN r16126]
2002-11-05 22:43:42 +00:00
nobody
7d438e9f25 This commit was manufactured by cvs2svn to create branch
'thread_development'.

[SVN r16006]
2002-10-28 14:10:15 +00:00
William E. Kempf
3699cc97a6 Added new tutorial files
[SVN r16005]
2002-10-28 14:10:14 +00:00
Vladimir Prus
5a7377acda Made Boost.Thread compile with V2.
[SVN r15969]
2002-10-23 13:22:56 +00:00
Beman Dawes
6aaee629b5 Fix acknowledgements and copyright
[SVN r15759]
2002-10-07 00:07:46 +00:00
Björn Karlsson
b465fe569c Merged from branch to trunk
[SVN r15613]
2002-10-01 15:00:37 +00:00
Björn Karlsson
5e6f72a688 Merged from branch to trunk
[SVN r15524]
2002-09-26 09:17:19 +00:00
John D. Moore
d525388fb2 Simple tests for shared memory objects. Need multiple process tests as well.
[SVN r14428]
2002-07-12 19:33:02 +00:00
John D. Moore
1029d0d2bb Implementations for win32 + POSIX. Win32 tested.
[SVN r14427]
2002-07-12 19:30:30 +00:00
John D. Moore
00038f78e5 Moved to lower level class
[SVN r14426]
2002-07-12 19:25:32 +00:00
William E. Kempf
2dc67ea9ad Removed lock from get/set in TSS.
[SVN r14356]
2002-07-08 21:08:51 +00:00
William E. Kempf
602faacb35 Changes to tss for proper ordering of destructors
[SVN r14354]
2002-07-08 17:37:39 +00:00
William E. Kempf
0f078f33cb POSIX bug fixes.
[SVN r14316]
2002-07-05 17:52:56 +00:00
William E. Kempf
cf77472dfe POSIX bug fixes.
[SVN r14315]
2002-07-05 17:50:20 +00:00
William E. Kempf
8cc40e5e07 POSIX bug fixes.
[SVN r14314]
2002-07-05 17:45:13 +00:00
William E. Kempf
28d675b4c9 POSIX bug fixes.
[SVN r14313]
2002-07-05 17:38:43 +00:00
William E. Kempf
27b2b14c17 Added generation tracking to tss.
[SVN r14312]
2002-07-05 17:30:36 +00:00
William E. Kempf
cc5dd8a978 Fixed order of thread exit routines.
[SVN r14294]
2002-07-03 18:06:30 +00:00
William E. Kempf
81ee94ce3c Modified TSS to use defined destruction ordering.
[SVN r14291]
2002-07-03 15:54:10 +00:00
William E. Kempf
184e604287 'Fixed' tss bug.
[SVN r14252]
2002-06-27 22:18:05 +00:00
William E. Kempf
50dccc1f66 Removed CS and most 'process_detach' stuff from threadmon
[SVN r14250]
2002-06-27 19:40:41 +00:00
John D. Moore
30b766a02e Initial version for discussion
[SVN r14236]
2002-06-25 12:35:44 +00:00
William E. Kempf
48e8588af0 First go at refactoring tss and next version of thread class.
[SVN r14221]
2002-06-21 18:33:59 +00:00
William E. Kempf
a40b3657f6 Fixed pthread_detach bug
[SVN r13989]
2002-05-20 16:33:01 +00:00
William E. Kempf
1000f77180 Refactored thread
[SVN r13988]
2002-05-20 16:26:57 +00:00
William E. Kempf
518c5c8215 Added thread_group tests
[SVN r13952]
2002-05-16 14:34:03 +00:00
William E. Kempf
d30279342a Flushed output
[SVN r13943]
2002-05-15 22:40:00 +00:00
William E. Kempf
4c9ca0edae Fixed member initialization order warnings
[SVN r13942]
2002-05-15 22:36:35 +00:00
William E. Kempf
c163f90071 Added missing typename keywords
[SVN r13941]
2002-05-15 22:34:16 +00:00
William E. Kempf
1404d8558b Removed id parameters from thread_pool tests
[SVN r13940]
2002-05-15 22:32:38 +00:00
William E. Kempf
c8762d5c91 Removed id parameters from thread_pool tests
[SVN r13939]
2002-05-15 22:27:48 +00:00
William E. Kempf
11d3f9feb4 Changed <limits> to <boost/limits.hpp>
[SVN r13938]
2002-05-15 22:16:30 +00:00
William E. Kempf
3130149dfd Fixed bugs in rw_mutex.cpp caused by missing typename keywords
[SVN r13937]
2002-05-15 22:13:20 +00:00
William E. Kempf
4808ccb316 Refactoring tests
[SVN r13936]
2002-05-15 22:01:36 +00:00
William E. Kempf
7ae3687ed8 Bug fixes for rw_mutex and refactoring of tests
[SVN r13925]
2002-05-15 17:46:53 +00:00
William E. Kempf
bb5f17bf04 Bug fixes for rw_mutex and refactoring of tests
[SVN r13924]
2002-05-15 17:43:01 +00:00
William E. Kempf
a32df1aad9 Merged trunk changes
[SVN r13911]
2002-05-15 15:27:18 +00:00
William E. Kempf
da86b991e5 Updated thread_pool.html
[SVN r13907]
2002-05-15 14:54:18 +00:00
William E. Kempf
d12508b97f Refactored thread_pool implementation
[SVN r13627]
2002-05-02 21:42:22 +00:00
William E. Kempf
98af8c50c4 Updated documentation for barrier
[SVN r13598]
2002-04-30 21:46:53 +00:00
William E. Kempf
7bb5c1b4e2 Modified barrier implementation
[SVN r13597]
2002-04-30 21:27:21 +00:00
William E. Kempf
07ecf15f4c Added rw_mutex
[SVN r13593]
2002-04-30 19:10:14 +00:00
William E. Kempf
d5006255fd Added barrier.
[SVN r13581]
2002-04-29 16:19:27 +00:00
William E. Kempf
d7d8b4cedd Added thread_pool.
[SVN r13580]
2002-04-29 12:53:02 +00:00
nobody
c9f3478a21 This commit was manufactured by cvs2svn to create branch
'thread_development'.

[SVN r13571]
2002-04-26 21:15:41 +00:00
49 changed files with 5617 additions and 285 deletions

View File

@@ -52,7 +52,8 @@ if $(NT) && ! $(PTW32)
# Declare the Boost.Threads static link library libboost_thread.
# Base names of the source files for libboost_thread.
CPP_SOURCES = condition mutex recursive_mutex thread tss xtime once exceptions ;
CPP_SOURCES =
condition mutex recursive_mutex thread tss xtime once exceptions thread_pool barrier rw_mutex ;
lib boost_thread : <template>thread_libs ../src/$(CPP_SOURCES).cpp ;

View File

@@ -22,12 +22,16 @@
designer, and implementor of <b>Boost.Threads</b>.</p>
<p>Mac OS Carbon implementation written by <a href="../../../people/mac_murrett.htm">Mac
Murrett</a>.</p>
<p>Important contributions were also made by Jeremy Siek (lots of input on the
design and on the implementation), Alexander Terekhov (lots of input on the
Win32 implementation, especially in regards to boost::condition, as well as
a lot of explanation of POSIX behavior), Greg Colvin (lots of input on the design),
Paul Mclachlan, Thomas Matelich and Iain Hanson (for help in trying to get the
build to work on other platforms), and Kevin S. Van Horn (for several updates/corrections
<p><a href="mailto:jdmoore99@comcast.net">Dave Moore</a> provided initial submissions
and further comments on the <code>barrier</code>, <code>thread_pool</code>,
<code>rw_mutex</code>, <code>rw_try_mutex</code> and <code>rw_timed_mutex</code>
classes.</p>
<p>Important contributions were also made by Jeremy Siek (lots of input on the
design and on the implementation), Alexander Terekhov (lots of input on the
Win32 implementation, especially in regards to boost::condition, as well as
a lot of explanation of POSIX behavior), Greg Colvin (lots of input on the design),
Paul Mclachlan, Thomas Matelich and Iain Hanson (for help in trying to get the
build to work on other platforms), and Kevin S. Van Horn (for several updates/corrections
to the documentation).</p>
<p>The documentation was written by William E. Kempf. Beman Dawes provided additional
documentation material and editing.</p>

168
doc/barrier.html Normal file
View File

@@ -0,0 +1,168 @@
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
<link rel="stylesheet" type="text/css" href="../../../boost.css">
<title>Boost.Threads - Header &lt;boost/thread/barrier.hpp&gt;</title>
</head>
<body link="#0000ff" vlink="#800080">
<table border="0" cellpadding="7" cellspacing="0" width="100%" summary=
"header">
<tr>
<td valign="top" width="300">
<h3><a href="../../../index.htm"><img height="86" width="277" alt="C++ Boost" src="../../../c++boost.gif" border="0"></a></h3>
</td>
<td valign="top">
<h1 align="center">Boost.Threads</h1>
<h2 align="center">Header &lt;<a href="../../../boost/thread/barrier.hpp">boost/thread/barrier.hpp</a>&gt;</h2>
</td>
</tr>
</table>
<hr>
<h2>Contents</h2>
<dl class="page-index">
<dt><a href="#introduction">Introduction</a></dt>
<dt><a href="#classes">Classes</a></dt>
<dl class="page-index">
<dt><a href="#class-barrier">Class <code>barrier</code></a></dt>
<dl class="page-index">
<dt><a href="#class-barrier-synopsis">Class <code>barrier</code> synopsis</a></dt>
<dt><a href="#class-barrier-ctors">Class <code>barrier</code> constructors
and destructor</a></dt>
<dt><a href="#class-barrier-modifiers">Class <code>barrier</code> modifier
functions</a></dt>
</dl>
</dl>
<dt><a href="#examples">Example(s)</a></dt>
</dl>
<hr>
<h2><a name="introduction"></a>Introduction</h2>
<p>Include the header &lt;<a href="../../../boost/thread/barrier.hpp">boost/thread/barrier.hpp</a>&gt;
to define the class <code>boost::barrier</code>.</p>
<h2><a name="classes"></a>Classes</h2>
<h3><a name="class-barrier"></a>Class <code>barrier</code></h3>
<p>An object of class <code>barrier</code> is a synchronization primitive used
to cause a set of threads to wait until they each perform a certain function
or each reach a particular point in their execution. When a barrier is created,
it is initialized with a thread count "N". The first N-1 calls to wait() will
all cause their threads to be blocked. The Nth call to wait() will allow all
of the waiting threads, including the Nth thread, to be placed in a ready state.
Should an additional thread make an N+1th call to wait() on the barrier, it
will be as though this was the first call to wait(), and the process will be
repeated until another N threads call wait(). This functionality allows the
same set of N threads to re-use a barrier object to synchronize their execution
at multiple points during their execution.</p>
<p>See <A href="definitions.html">Formal Definitions</A> for definitions of thread
states <A href="definitions.html#state">blocked</A> and <A href="definitions.html#state">
ready</A>. Note that "waiting" is a synonym for blocked.</p>
<h4><a name="class-barrier-synopsis"></a>Class <code>barrier</code> synopsis</h4>
<pre>
namespace boost
{
class barrier : private <A href="../../utility/utility.htm#Class noncopyable">boost::noncopyable</A> // Exposition only.
// Class barrier meets the <A href="overview.html#NonCopyable" .. utility.htm#Class? utility>NonCopyable</A> requirement.
{
public:
barrier(size_t count);
~barrier();
bool wait();
};
};
</pre>
<h4><a name="class-barrier-ctors"></a>Class <code>barrier</code> constructors
and destructor</h4>
<pre>
barrier(size_t count);
</pre>
<dl class="function-semantics">
<dt><b>Effects:</b> Constructs a <code>barrier</code> object that will cause
count threads to block on a call to <code>wait()</code>.</dt>
</dl>
<pre>
~barrier();
</pre>
<dl class="function-semantics">
<dt><b>Effects:</b> Destroys <code>*this</code>. If threads are still executing
their <code>wait()</code> operations, the behavior for these threads is undefined.</dt>
</dl>
<h4><a name="class-barrier-modifiers"></a>Class <code>barrier</code> modifier
functions</h4>
<pre>
bool wait();
</pre>
<dl class="function-semantics">
<dt><b>Effects:</b> Wait until N threads call wait(), where N equals the count
provided to the constructor for the barrier object.</dt>
<dt><b>Returns:</b> Exactly one of the N threads will receive a return value
of <code>true</code>, the others will receive a value of <code>false</code>.
Precisely which thread receives the return value of <code>true</code> will
be implementation defined. Applications can use this value to designate one
thread as a leader that will take a certain action, and the other threads
emerging from the barrier can wait for that action to take place.</dt>
<dt><b>Danger:</b> If the barrier is destroyed before <code>wait()</code> can
return, the behavior is undefined.</dt>
</dl>
<h2><a name="examples"></a>Example(s)</h2>
<pre>
#include <a href="../../../boost/thread/condition.hpp">&lt;boost/thread/barrier.hpp&gt;</a>
#include <a href="../../../boost/thread/thread.hpp">&lt;boost/thread/thread.hpp&gt;</a>
#include <a href="../../../boost/thread/mutex.hpp">&lt;boost/thread/mutex.hpp&gt;</a>
#include &lt;iostream&gt;
const int N_THREADS = 10;
boost::barrier gen_barrier(N_THREADS);
int global_parameter = 0;
boost::mutex mutex;
static void worker()
{
for (int i = 0; i &lt; 5; ++i)
{
// Simulate 5 cycles of computation...
if (gen_barrier.wait())
{
boost::mutex::scoped_lock lock(mutex);
global_parameter++;
}
}
// Let one worker "report" the results
if (gen_barrier.wait())
{
boost::mutex::scoped_lock lock(lock);
std::cout &lt;&lt; "Global Parameter=" &lt;&lt; global_parameter &lt;&lt; "\n";
}
}
int main(int, char*[])
{
boost::thread_group g;
global_parameter = 0;
for (int i = 0; i &lt; N_THREADS; ++i)
g.create_thread(&amp;worker);
g.join_all();
}
</pre>
<p>The output is:</p>
<pre>
Global Parameter=5
</pre>
<hr>
<p>Revised
<!--webbot bot="Timestamp" S-Type="EDITED" S-Format="%d %B, %Y" startspan -->
05 November, 2001
<!--webbot bot="Timestamp" endspan i-checksum="39359" -->
</p>
<p><i>&copy; Copyright <a href="mailto:jdmoore99@comcast.net">Dave Moore</a> and <a href="mailto:wekempf@cox.net">William E. Kempf</a> 2001-2002.
All Rights Reserved.</i></p>
<p>Permission to use, copy, modify, distribute and sell this software and its
documentation for any purpose is hereby granted without fee, provided that the
above copyright notice appear in all copies and that both that copyright notice
and this permission notice appear in supporting documentation. William E. Kempf
makes no representations about the suitability of this software for any purpose.
It is provided &quot;as is&quot; without express or implied warranty.</p>
</body>
</html>

View File

@@ -36,7 +36,7 @@
</dl>
<hr>
<h2><a name="introduction"></a>Introduction</h2>
<p>Include the header &lt;<a href="../../../boost/thread/condition.hpp">boost/thread/condition.hpp</a>&gt;
<p>Include the header &lt;<a href="../../../boost/thread/condition.hpp">boost/thread/condition.hpp</a>&gt;
to define the class condition.</p>
<h2><a name="classes"></a>Classes</h2>
<h3><a name="class-condition"></a>Class <code>condition</code></h3>

View File

@@ -40,7 +40,7 @@
</dl>
<hr>
<h2><a name="introduction"></a>Introduction</h2>
<p>Include the header &lt;<a href="../../../boost/thread/exceptions.hpp">boost/thread/exceptions.hpp</a>&gt;
<p>Include the header &lt;<a href="../../../boost/thread/exceptions.hpp">boost/thread/exceptions.hpp</a>&gt;
to define the exception types that may be thrown by <b>Boost.Threads</b> classes.</p>
<h2><a name="classes"></a>Classes</h2>
<h3><a name="class-lock_error"></a>Class <code>lock_error</code></h3>

View File

@@ -36,6 +36,13 @@
</dl>
<dt>Reference</dt>
<dl class="index">
<dt><a href="barrier.html"><code>&lt;boost/thread/barrier.hpp&gt;</code></a></dt>
<dl class="index">
<dt><a href="barrier.html#classes">Classes</a></dt>
<dl class="index">
<dt><a href="barrier.html#class-barrier"><code>barrier</code></a></dt>
</dl>
</dl>
<dt><a href="condition.html"><code>&lt;boost/thread/condition.hpp&gt;</code></a></dt>
<dl class="index">
<dt><a href="condition.html#classes">Classes</a></dt>

View File

@@ -51,7 +51,7 @@
</dl>
<hr>
<h2><a name="introduction"></a>Introduction</h2>
<p>Include the header &lt;<a href="../../../boost/thread/mutex.hpp">boost/thread/mutex.hpp</a>&gt;
<p>Include the header &lt;<a href="../../../boost/thread/mutex.hpp">boost/thread/mutex.hpp</a>&gt;
to define the <code><a href="#mutex Synopsis">mutex</a></code>, <code><a href=
"#try_mutex Synopsis">try_mutex</a></code> and <code><a href=
"#timed_mutex Synopsis">timed_mutex</a></code> classes.</p>

View File

@@ -37,12 +37,12 @@
</dl>
<hr>
<h2><a name="introduction"></a>Introduction</h2>
<p>Include the header &lt;<a href="../../../boost/thread/once.hpp">boost/thread/once.hpp</a>&gt;
to define the <code>call_once</code> function, <code>once_flag</code> type and
<p>Include the header &lt;<a href="../../../boost/thread/once.hpp">boost/thread/once.hpp</a>&gt;
to define the <code>call_once</code> function, <code>once_flag</code> type and
<code>BOOST_ONCE_INIT</code> constant.</p>
<p>The <code>call_once</code> function and <code>once_flag</code> type (statically
initialized to <code>BOOST_ONCE_INIT</code>) can be used to run a routine exactly
once. This can be used to initialize data in a <a href="definitions.html#Thread-safe">
<p>The <code>call_once</code> function and <code>once_flag</code> type (statically
initialized to <code>BOOST_ONCE_INIT</code>) can be used to run a routine exactly
once. This can be used to initialize data in a <a href="definitions.html#Thread-safe">
thread-safe</a> manner.</p>
<h2><a name="macros"></a>Macros</h2>
<pre>

View File

@@ -48,21 +48,21 @@
</dl>
<hr>
<h2><a name="introduction"></a>Introduction</h2>
<p>Include the header &lt;<a href="../../../boost/thread/recursive_mutex.hpp">boost/thread/recursive_mutex.hpp</a>&gt;
to define the <a href="#class-recursive_mutex">recursive_mutex</a>, <a href="#class-recursive_try_mutex">recursive_try_mutex</a>
<p>Include the header &lt;<a href="../../../boost/thread/recursive_mutex.hpp">boost/thread/recursive_mutex.hpp</a>&gt;
to define the <a href="#class-recursive_mutex">recursive_mutex</a>, <a href="#class-recursive_try_mutex">recursive_try_mutex</a>
and <a href="#class-recursive_timed_mutex">recursive_timed_mutex</a> classes.</p>
<p>The <a href="#class-recursive_mutex">recursive_mutex</a>, <a href="#class-recursive_try_mutex">recursive_try_mutex</a>
and <a href="#class-recursive_timed_mutex">recursive_timed_mutex</a> classes
are models of <a href="mutex_concept.html#Mutex-concept">Mutex</a>, <a href="mutex_concept.html#TryMutex-concept">TryMutex</a>,
and <a href="mutex_concept.html#TimedMutex-concept">TimedMutex</a> respectively.
These types should be used to synchronize access to shared resources when recursive
locking by a single thread is likely to occur. A good example for this is when
a class supplies &quot;internal synchronization&quot; to ensure <a href="definitions.html#Thread-safe">
thread-safety</a> and a function of the class may have to call other functions
of the class which also attempt to lock the mutex. For recursive locking mechanics,
<p>The <a href="#class-recursive_mutex">recursive_mutex</a>, <a href="#class-recursive_try_mutex">recursive_try_mutex</a>
and <a href="#class-recursive_timed_mutex">recursive_timed_mutex</a> classes
are models of <a href="mutex_concept.html#Mutex-concept">Mutex</a>, <a href="mutex_concept.html#TryMutex-concept">TryMutex</a>,
and <a href="mutex_concept.html#TimedMutex-concept">TimedMutex</a> respectively.
These types should be used to synchronize access to shared resources when recursive
locking by a single thread is likely to occur. A good example for this is when
a class supplies &quot;internal synchronization&quot; to ensure <a href="definitions.html#Thread-safe">
thread-safety</a> and a function of the class may have to call other functions
of the class which also attempt to lock the mutex. For recursive locking mechanics,
see <a href="mutex.html">mutexes</a>.</p>
<p>Each class supplies one or more typedefs for lock types which model matching
lock concepts. For the best possible performance you should use the mutex class
<p>Each class supplies one or more typedefs for lock types which model matching
lock concepts. For the best possible performance you should use the mutex class
that supports the minimum set of lock types that you need.</p>
<table summary="lock types" border="1" cellpadding="5">
<tr>

247
doc/rw_lock_concept.html Normal file
View File

@@ -0,0 +1,247 @@
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
<link rel="stylesheet" type="text/css" href="../../../boost.css">
<title>Boost.Threads - RWLock Concepts</title>
</head>
<body link="#0000ff" vlink="#800080">
<table border="0" cellpadding="7" cellspacing="0" width="100%" summary=
"header">
<tr>
<td valign="top" width="300">
<h3><a href="../../../index.htm"><img height="86" width="277" alt="C++ Boost" src="../../../c++boost.gif" border="0"></a></h3>
</td>
<td valign="top">
<h1 align="center">Boost.Threads</h1>
<h2 align="center">RWLock Concepts</h2>
</td>
</tr>
</table>
<hr>
<dl class="index">
<dt><a href="#introduction">Introduction</a></dt>
<dt><a href="#requirements">Concept Requirements</a></dt>
<dl class="index">
<dt><a href="#lock-state-enumeration">Lock State Enumeration</a></dt>
<dt><a href="#RWLock-concept">RWLock Concept</a></dt>
<dt><a href="#ScopedRWLock-concept">ScopedRWLock Concept</a></dt>
<dt><a href="#ScopedTryRWLock-concept">ScopedTryRWLock Concept</a></dt>
<dt><a href="#ScopedTimedRWLock-concept">ScopedTimedRWLock Concept</a></dt>
</dl>
<dt><a href="#models">Models</a></h2>
<dt><a href="#footnotes">Footnotes</a></dt>
</dl>
<h2><a name="introduction"></a>Introduction</h2>
<p>The lock concepts provide exception safe means for locking and unlocking a
<a href="rw_mutex_concept.html"> rw_mutex model</a>. In other words they are
an implementation of the <i>Scoped Locking</i> <a href="bibliography.html#Schmidt 00">[Schmidt
00]</a> pattern. The <a href="#ScopedRWLock">ScopedRWLock</a> concept, with
<a href="#ScopedTryRWLock"> ScopedTryRWLock</a> and <a href="#ScopedTimedRWLock">ScopedTimedRWLock</a>
refinements, formalize the requirements.</p>
<p>Lock models are constructed with a reference to a <a href="mutex_concept.html">mutex
model</a> and typically acquire ownership of the <a href="mutex_concept.html">mutex
model</a> by setting its state to locked. They also ensure ownership is relinquished
in the destructor. Lock models also expose functions to query the lock status
and to manually lock and unlock the <a href="mutex_concept.html">mutex model</a>.</p>
<p>Instances of lock models are meant to be short lived, expected to be used at
block scope only. The lock models are not <a href="definitions.html#Thread-safe">thread-safe</a>.
Lock models must maintain state to indicate whether or not they've been locked
and this state is not protected by any synchronization concepts. For this reason
an instance of a lock model should never be shared between multiple threads.</p>
<h2><a name="requirements"></a>Concept Requirements</h2>
<p>[For documentation purposes, portions of the concept requirements are repeated
in the documentation for specific lock classes. Those copies need to be kept
in sync with the requirements here.]</p>
<h3><a name="lock-state-enumeration"></a>Lock State Enumeration</h3>
<p>An enumerated value that can be one of three possible values - {NO_LOCK, SHARED_LOCK,
or EXCL_LOCK).&nbsp; Each class modeling the Lock Concept will maintain this
state as its view of the lock-state of the controlled rw_mutex.</p>
<h3><a name="RWLock-concept"></a>RWLock Concept</h3>
<p>For a <a href="#ScopedRWLock">ScopedRWLock</a>, <a href="#ScopedTryRWLock">ScopedTryRWLock</a>,
or <a href="#ScopedTimedRWLock">ScopedTimedRWLock</a> type <code>L</code> and
an object <code>lk</code> and const object <code>clk</code> of that type, the
following expressions must be well-formed and have the indicated effects.</p>
<p>The Lock concept is used as a base for the <a href="#ScopedRWLock">ScopedRWLock</a>,
<a href="#ScopedTryRWLock">ScopedTryRWLock</a>, and <a href="#ScopedTimedRWLock">ScopedTimedRWLock</a>
refinements. The associated rw_mutex type is as specified for each of those
refinements respectively.</p>
<table summary="Lock expressions" border="1" cellpadding="5">
<tr>
<td><b>Expression</b></td>
<td><b>Effects</b></td>
</tr>
<tr>
<td valign="top"><code>(&amp;lk)-&gt;~L();</code></td>
<td><code>if (locked()) unlock();</code></td>
</tr>
<tr>
<td valign="top"><code>(&amp;clk)-&gt;operator const void*()</code></td>
<td>Returns type void*, non-zero if if the associated rw_mutex has been locked
by <code> clk</code>, otherwise 0.</td>
</tr>
<tr>
<td valign="top"><code>clk.locked()</code></td>
<td>Returns a <code>bool</code>, <code>(&amp;clk)-&gt;operator const void*()
!= 0</code></td>
</tr>
<tr>
<td valign="top"><code>clk.lockstate()</code></td>
<td><code>Returns an enumeration of the lock state,&nbsp;NO_LOCK, EXCL_LOCK
or SHARED_LOCK</code></td>
</tr>
<tr>
<td valign="top"><code>lk.wrlock()</code></td>
<td>Throws lock_error if locked(). If the associated rw_mutex is already locked
by some other thread, places the current thread in the <a href="definitions.html#State">
Blocked</a> state until the associated rw_mutex is unlocked, after which
the current thread is placed in the <a href="definitions.html#State">Ready</a>
state, eventually to be returned to the <a href="definitions.html#State">Running</a>
state.<br>
Postcondition: locked() and lockstate() == EXCL_LOCK</td>
</tr>
<tr>
<td valign="top"><code>lk.rdlock()</code></td>
<td>Throws lock_error if locked().&nbsp; If the associated rw_mutex cannot
immediately grant the shared lock, places the current thread in the <a href="definitions.html#State">
Blocked</a> state until the associated rw_ mutex can grant a shared lock,
after which the current thread is placed in the <a href="definitions.html#State">Ready</a>
state, eventually to be returned to the <a href="definitions.html#State">Running</a>
state.&nbsp;<br>
Postcondition: locked() and lockstate() == SHARED_LOCK</td>
</tr>
<tr>
<td valign="top"><code>lk.unlock()</code></td>
<td>If !locked(), throws lock_error, otherwise unlocks the associated rw_mutex.<br>
Postcondition: !locked()</td>
</tr>
</table>
<h3><a name="ScopedRWLock-concept"></a>ScopedRWLock Concept</h3>
<p>A ScopedRWLock must meet the <a href="#Lock">RWLock</a> requirements. For a
ScopedRWLock type <code>L</code> and an object <code>lk</code> of that type,
and an object <code>m</code> of a type meeting the <a href="mutex_concept.html#Mutex">
RWMutex</a> requirements, and an object <code>s</code> of type <code>lock_state</code>,
the following expressions must be well-formed and have the indicated effects.</p>
<table summary="ScopedRWLock expressions" border="1" cellpadding="5" width="732">
<tr>
<td width="91"><b>Expression</b></td>
<td width="609"><b>Effects</b></td>
</tr>
<tr>
<td valign="top" width="91"><code>L lk(m);</code></td>
<td width="609">Constructs an object <code>lk</code>, and associates rw_mutex
<code>m</code> with it, then calls <code>lock()</code></td>
</tr>
<tr>
<td valign="top" width="91"><code>L lk(m,s);</code></td>
<td width="609">Constructs an object <code>lk</code>, and associates rw_mutex
<code>m</code> with it, then if <code>s==SHARED_LOCK</code>, calls <code>sharedlock()
or if s==EXCL_LOCK then calls lock()</code></td>
</tr>
</table>
<h3><a name="ScopedTryRWLock-concept"></a>ScopedTryRWLock Concept</h3>
<p>A ScopedTryRWLock must meet the <a href="#Lock">RWLock</a> requirements. For
a ScopedTryRWLock type <code>L</code> and an object <code>lk</code> of that
type, and an object <code>m</code> of a type meeting the <a href="mutex_concept.html#TryMutex">
TryRWMutex</a> requirements, and an object <code>s</code> of type <code>lock_state</code>,
the following expressions must be well-formed and have the indicated effects.</p>
<table summary="ScopedTryRWLock expressions" border="1" cellpadding="5">
<tr>
<td width="157"><b>Expression</b></td>
<td><b>Effects</b></td>
</tr>
<tr>
<td valign="top" width="157"><code>L lk(m);</code></td>
<td>Constructs an object <code>lk</code>, and associates rw_mutex <code>m</code>
with it, then calls <code>try_lock()</code></td>
</tr>
<tr>
<td valign="top" width="157"><code>L lk(m,s);</code></td>
<td>Constructs an object <code>lk</code>, and associates rw_mutex <code>m</code>
with it, then if <code>s==SHARED_LOCK</code>, calls <code>sharedlock() or
if s==EXCL_LOCK then calls lock()</code></td>
</tr>
<tr>
<td valign="top" width="157"><code>lk.try_wrlock()</code></td>
<td>If locked(), throws <code>lock_error</code>. Makes a non-blocking attempt
to exclusive-lock the associated rw_mutex, returning <code>true</code> if
the lock attempt is successful, otherwise <code>false</code>.</td>
</tr>
<tr>
<td valign="top" width="157"><code>lk.try_rdlock()</code></td>
<td>If locked(), throws <code>lock_error</code>. Makes a non-blocking attempt
to shared-lock the associated rw_mutex, returning <code>true</code> if the
lock attempt is successful, otherwise <code>false</code>.</td>
</tr>
</table>
<h3><a name="ScopedTimedRWLock-concept"></a>ScopedTimedRWLock Concept</h3>
<p>A ScopedTimedRWLock must meet the <a href="#Lock">RWLock</a> requirements.
For a ScopedTimedRWLock type <code>L</code> and an object <code>lk</code> of
that type, and an object <code>m</code> of a type meeting the <a href="mutex_concept.html#TimedMutex">
TimedRWMutex</a> requirements, and an object <code>s</code> of type <code>lock_state</code>,
and an object <code>t</code> of type <code><a href="xtime.html">xtime</a></code>,
the following expressions must be well-formed and have the indicated effects.</p>
<table summary="ScopedTimedRWLock expressions" border="1" cellpadding="5">
<tr>
<td width="164"><b>Expression</b></td>
<td><b>Effects</b></td>
</tr>
<tr>
<td valign="top" width="164"><code>L lk(m,t);</code></td>
<td>Constructs an object <code>lk</code>, and associates rw_mutex <code>m</code>
with it, then calls <code>timed_lock(t)</code></td>
</tr>
<tr>
<td valign="top" width="164"><code>L lk(m,s);</code></td>
<td>Constructs an object <code>lk</code>, and associates rw_ mutex <code>m</code>
with it, then if <code>s==SHARED_LOCK</code>, calls <code>sharedlock() or
if s==EXCL_LOCK then calls lock()</code></td>
</tr>
<tr>
<td valign="top" width="164"><code>lk.timed_wrlock(t)</code></td>
<td>If locked(), throws lock_error. Makes a blocking attempt to exclusive-lock
the associated rw_mutex, and returns <code>true</code> if successful within
the specified time <code>t</code>, otherwise <code>false</code>.</td>
</tr>
<tr>
<td valign="top" width="164"><code>lk.timed_rdlock(t)</code></td>
<td>If locked(), throws lock_error. Makes a blocking attempt to shared-lock
the associated rw_mutex, and returns <code>true</code> if successful within
the specified time <code>t</code>, otherwise <code>false</code>.</td>
</tr>
</table>
<h2><a name="models"></a>Models</h2>
<p><b>Boost.Threads</b> currently supplies three classes which model lock concepts.</p>
<p>These classes are normally accessed via typedefs of the same name supplied
by a <a href="mutex_concept.html"> mutex model</a>.</p>
<table summary="Lock concept classes" border="1" cellpadding="5">
<tr>
<td><b>Concept</b></td>
<td><b>Refines</b></td>
<td><b>Classes Modeling the Concept</b></td>
</tr>
<tr>
<td><a href="#ScopedRWLock">ScopedRWLock</a></td>
<td>&nbsp;</td>
<td><a href="scoped_rw_lock.html">scoped_rw_lock</a></td>
</tr>
<tr>
<td><a href="#ScopedTryRWLock">ScopedTryRWLock</a></td>
<td><a href="#ScopedRWLock">ScopedRWLock</a></td>
<td><a href="scoped_try_rw_lock.html">scoped_try_rw_lock</a> </td>
</tr>
<tr>
<td><a href="#ScopedTimedRWLock">ScopedTimedRWLock</a></td>
<td><a href="#ScopedRWLock">ScopedRWLock</a></td>
<td><a href="scoped_timed_rw_lock.html">scoped_timed_rw_lock</a></td>
</tr>
</table>
<hr>
<p>Revised
<!--webbot bot="Timestamp" S-Type="EDITED" S-Format="%d %B, %Y" startspan -->
05 November, 2001
<!--webbot bot="Timestamp" endspan i-checksum="39359" -->
</p>
<p><i>&copy; Copyright <a href="mailto:{{address}}">{{author}}</a> 2002. All Rights
Reserved.</i></p>
</body>
</html>

353
doc/rw_mutex.html Normal file
View File

@@ -0,0 +1,353 @@
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
<link rel="stylesheet" type="text/css" href="../../../boost.css">
<title>Boost.Threads - Header &lt;boost/thread/rw_mutex.hpp&gt;</title>
</head>
<body link="#0000ff" vlink="#800080">
<table border="0" cellpadding="7" cellspacing="0" width="100%" summary=
"header">
<tr>
<td valign="top" width="300">
<h3><a href="../../../index.htm"><img height="86" width="277" alt="C++ Boost" src="../../../c++boost.gif" border="0"></a></h3>
</td>
<td valign="top">
<h1 align="center">Boost.Threads</h1>
<h2 align="center">Header &lt;<a href="../../../boost/thread/rw_mutex.hpp">boost/thread/rw_mutex.hpp</a>&gt;</h2>
</td>
</tr>
</table>
<hr>
<h2>Contents</h2>
<dl class="page-index">
<dt><a href="#introduction">Introduction</a></dt>
<dt><a href="#macros">Macros</a></dt>
<dl class="page-index">
<dt><a href="#macro-spec">{{macro name}}</a></dt>
</dl>
<dt><a href="#values">Values</a></dt>
<dl class="page-index">
<dt><a href="#value-spec">{{value name}}</a></dt>
</dl>
<dt><a href="#types">Types</a></dt>
<dl class="page-index">
<dt><a href="#type-spec">{{type name}}</a></dt>
</dl>
<dt><a href="#classes">Classes</a></dt>
<dl class="page-index">
<dt><a href="#class-spec">Class <code>{{class name}}</code></a></dt>
<dl class="page-index">
<dt><a href="#class-spec-synopsis">Class <code>{{class name}}</code> synopsis</a></dt>
<dt><a href="#class-spec-ctors">Class <code>{{class name}}</code> constructors
and destructor</a></dt>
<dt><a href="#class-spec-comparisons">Class <code>{{class name}}</code>
comparison functions</a></dt>
<dt><a href="#class-spec-modifiers">Class <code>{{class name}}</code> modifier
functions</a></dt>
<dt><a href="#class-spec-observers">Class <code>{{class name}}</code> observer
functions</a></dt>
<dt><a href="#class-spec-statics">Class <code>{{class name}}</code> static
functions</a></dt>
</dl>
</dl>
<dt><a href="#functions">Functions</a></dt>
<dl class="page-index">
<dt><a href="#function-spec">{{function name}}</a></dt>
</dl>
<dt><a href="#objects">Objects</a></dt>
<dl class="page-index">
<dt><a href="#object-spec">{{object name}}</a></dt>
</dl>
<dt><a href="#examples">Example(s)</a></dt>
</dl>
<hr>
<h2><a name="introduction"></a>Introduction</h2>
<p>The <tt><a href="#rw_mutex Synopsis">rw_mutex</a></tt>, <tt><a href="#try_rw_mutex Synopsis">
try_rw_mutex</a></tt> and <tt><a href="#timed_rw_mutex Synopsis">timed_rw_mutex</a></tt>
classes define full featured models of the <a href="rw_mutex_concept.html#rw_mutex">
RWMutex</a>, <a href="rw_mutex_concept.html#TryMutex">TryRWMutex</a>, and <a href="rw_mutex_concept.html#TimedMutex">
TimedRWMutex</a> concepts. These types should be used to synchronize access
to shared resources.&nbsp; Recursive or non-recursive locking mechanics are
acheived by supplying the appropriate Mutex type as a parameter.</p>
<p>Each class supplies one or more typedefs for lock types which model matching
lock concepts. For the best possible performance you should use the rw_mutex
class that supports the minimum set of lock types that you need.</p>
<table summary="lock types" border="1" cellpadding="5">
<tr>
<td><b>rw_mutex Class</b></td>
<td><b>Lock name</b></td>
<td><b>Implementation defined Lock Type</b></td>
<td><b>Lock Concept</b></td>
</tr>
<tr>
<td valign="top"><a href="#rw_mutex Synopsis"><code> rw_mutex</code></a></td>
<td valign="center"><code> scoped_rw_lock</code></td>
<td valign="center"><code><a href="scoped_lock.html">boost::</a></code><a href="scoped_lock.html"><code>detail::thread::scoped_rw_lock&lt;rw_mutex&gt;</code></a></td>
<td valign="center">ScopedRWLock</td>
</tr>
<tr>
<td valign="top"><tt><a href="#try_rw_mutex Synopsis"> try_rw_mutex</a></tt>
</td>
<td valign="center"><code> scoped_rw_lock<br>
scoped_try_rw_lock</code></td>
<td valign="center"><code><a href="scoped_lock.html">boost::</a></code><a href="scoped_lock.html"><code>detail::thread::scoped_rw_lock&lt;try_rw_mutex&gt;</code></a>
<code><a href="scoped_try_lock.html"> <br>
</a><a href="scoped_lock.html">boost::</a></code><a href="scoped_lock.html"><code>detail::thread::scoped_try_rw_lock&lt;try_rw_mutex&gt;</code></a></td>
<td valign="center">ScopedRWLock<br>
ScopedRWTryLock</td>
</tr>
<tr>
<td valign="top"><code><a href="#timed_rw_mutex Synopsis"> timed_rw_mutex</a></code>
</td>
<td valign="center"><code> scoped_rw_lock<br>
scoped_try_rw_lock<br>
scoped_timed_rw_lock</code></td>
<td valign="center"><code><a href="scoped_lock.html">boost::</a></code><a href="scoped_lock.html"><code>detail::thread::scoped_rw_lock&lt;timed_rw_mutex&gt;</code></a>
<code><a href="scoped_try_lock.html"> <br>
</a><a href="scoped_lock.html">boost::</a></code><a href="scoped_lock.html"><code>detail::thread::scoped_try_rw_lock&lt;timed_rw_mutex&gt;</code></a><a href="scoped_timed_lock.html"><code><br>
</code></a><code><a href="scoped_lock.html">boost::</a></code><a href="scoped_lock.html"><code>detail::thread::scoped_timed_rw_lock&lt;timed_rw_mutex&gt;</code></a></td>
<td valign="center">ScopedRWLock<br>
ScopedRWTryLock<br>
ScopedRWTimedLock</td>
</tr>
</table>
<p>The <tt>rw_mutex</tt>, <tt>try_rw_mutex</tt> and <tt>timed_rw_mutex</tt> classes
leave the locking strategy as Unspecified.&nbsp; Programmers should assume that
threads that lock a rw_mutex, try_rw_mutex, or timed_rw_mutex multiple times
will deadlock, unless all of the lock requests are for read-locks.&nbsp;&nbsp;</p>
<p>The <tt>rw_mutex</tt>, <tt>try_rw_mutex</tt> and <tt>timed_rw_mutex</tt> allow
the programmer to explicitly choose&nbsp;the <a href="rw_mutex_concept.html#SchedulingPolicies">
scheduling policy</a> for the lock.&nbsp; This scheduling policy will dictate
how competing readers and writers will acquire the lock.&nbsp; It does not,
however, dictate the order that individual read or write requests will be granted,
in comparison to other requests of the same type.&nbsp;&nbsp;Programmers should
assume that threads waiting for a lock on objects of these types acquire the
lock in a random order, even though the specific behavior for a given platform
may be different.</p>
<H2>Release Notes/Caveats</H2>
<UL>
<LI> Self-deadlock is virtually guaranteed if a thread tries to lock the same
rw_mutex multiple times, unless all locks are read-locks (but see below)</LI>
<LI> This implementation does not protect against reader overflow.&nbsp; If
more than INT_MAX readers obtain or try to obtain a lock simultaneously, the
behavior is undefined.&nbsp; This will be addressed in a future release, but
it seems that detecting this condition &amp; reporting an error or throwing
an exception should suffice for realistic uses.&nbsp; Having readers beyond
INT_MAX wait for the count to decrease only pushes the overflow problem onto
another variable...&nbsp; Suggestions?</LI>
<LI> See the comments at the head of rw_mutex.cpp for a description of the implementation
itself.</LI>
</UL>
<h2><a name="macros"></a>Macros</h2>
<p><a name="macro-spec"></a>{{Macro specifications}}</p>
<h2><a name="values"></a>Values</h2>
<pre>
namespace boost {
typedef enum
{
sp_writer_priority,
sp_reader_priority,
sp_alternating_many_reads,
sp_alternating_single_reads
} rw_scheduling_policy;
typedef enum
{
NO_LOCK,
SHARED_LOCK,
EXCL_LOCK
} lockstate;
}
</pre>
<h2><a name="types"></a>Types</h2>
<p><a name="type-spec"></a>{{Type specifications}}</p>
<h2><a name="classes"></a>Classes</h2>
<h3><a name="class-rw_mutex"></a>Class <code>rw_mutex</code></h3>
<p>{{text}}</p>
<h4><a name="class-rw_mutex-synopsis"></a>Class <code>rw_mutex</code> synopsis</h4>
<pre>
namespace boost
{
class rw_mutex : private <a href="../../utility/utility.htm">boost::noncopyable</a> // Exposition only.
// Class mutex meets the <a href="overview.html#non-copyable">NonCopyable</a> requirement.
{
public:
typedef <i>[implementation defined; see <a href="#introduction">Introduction</a>]</i> scoped_rw_lock;
rw_mutex(rw_scheduling_policy sp=sp_writer_priority);
~rw_mutex();
};
};
</pre>
<h4><a name="class-rw_mutex-ctors"></a>Class <code>rw_mutex</code> constructors
and destructor</h4>
<pre>
rw_mutex(rw_scheduling_policy sp=sp_writer_priority);
</pre>
<dl class="function-semantics">
<dt><b>Postconditions:</b> <code>*this</code> is in the NO_LOCK state.</dt>
</dl>
<pre>
~rw_mutex();
</pre>
<dl class="function-semantics">
<dt><b>Requires:</b> <code>*this</code> is in the NO_LOCK state.</dt>
<dt><b>Effects:</b> Destroys <code>*this</code>.</dt>
<dt><b>Danger:</b> Destruction of a locked rw_mutex is a serious programming
error resulting in undefined behavior such as a program crash.</dt>
</dl>
<h3><a name="class-rw_try_mutex"></a>Class <code>rw_try_mutex</code></h3>
<p>{{text}}</p>
<h4><a name="class-rw_try_mutex-synopsis"></a>Class <code>rw_try_mutex</code>
synopsis</h4>
<pre>
namespace boost
{
class rw_mutex : private <a href="../../utility/utility.htm">boost::noncopyable</a> // Exposition only.
// Class mutex meets the <a href="overview.html#non-copyable">NonCopyable</a> requirement.
{
public:
typedef <i>[implementation defined; see <a href="#introduction">Introduction</a>]</i> scoped_rw_lock;
typedef <i>[implementation defined; see <a href="#introduction">Introduction</a>]</i> scoped_rw_try_lock;
rw_try_mutex(rw_scheduling_policy sp=sp_writer_priority);
~rw_try_mutex();
};
};
</pre>
<h4><a name="class-rw_try_mutex-ctors"></a>Class <code>rw_try_mutex</code> constructors
and destructor</h4>
<pre>
rw_try_mutex(rw_scheduling_policy sp=sp_writer_priority);
</pre>
<dl class="function-semantics">
<dt><b>Postconditions:</b> <code>*this</code> is in the NO_LOCK state.</dt>
</dl>
<pre>
~rw_try_mutex();
</pre>
<dl class="function-semantics">
<dt><b>Requires:</b> <code>*this</code> is in the NO_LOCK state.</dt>
<dt><b>Effects:</b> Destroys <code>*this</code>.</dt>
<dt><b>Danger:</b> Destruction of a locked rw_mutex is a serious programming
error resulting in undefined behavior such as a program crash.</dt>
</dl>
<h3><a name="class-rw_timed_mutex"></a>Class <code>rw_timed_mutex</code></h3>
<p>{{text}}</p>
<h4><a name="class-rw_timed_mutex-synopsis"></a>Class <code>rw_timed_mutex</code>
synopsis</h4>
<pre>
namespace boost
{
class rw_timed_mutex : private <a href="../../utility/utility.htm">boost::noncopyable</a> // Exposition only.
// Class mutex meets the <a href="overview.html#non-copyable">NonCopyable</a> requirement.
{
public:
typedef <i>[implementation defined; see <a href="#introduction">Introduction</a>]</i> scoped_rw_lock;
typedef <i>[implementation defined; see <a href="#introduction">Introduction</a>]</i> scoped_rw_try_lock;
typedef <i>[implementation defined; see <a href="#introduction">Introduction</a>]</i> scoped_rw_timed_lock;
rw_timed_mutex(rw_scheduling_policy sp=sp_writer_priority);
~rw_timed_mutex();
};
};
</pre>
<h4><a name="class-rw_timed_mutex-ctors"></a>Class <code>rw_timed_mutex</code>
constructors and destructor</h4>
<pre>
rw_timed_mutex(rw_scheduling_policy sp=sp_writer_priority);
</pre>
<dl class="function-semantics">
<dt><b>Postconditions:</b> <code>*this</code> is in the NO_LOCK state.</dt>
</dl>
<pre>
~rw_timed_mutex();
</pre>
<dl class="function-semantics">
<dt><b>Requires:</b> <code>*this</code> is in the NO_LOCK state.</dt>
<dt><b>Effects:</b> Destroys <code>*this</code>.</dt>
<dt><b>Danger:</b> Destruction of a locked rw_mutex is a serious programming
error resulting in undefined behavior such as a program crash.</dt>
</dl>
<h2><a name="examples"></a>Example(s)</h2>
<pre>
#include <a href="../../../boost/thread/rw_mutex.hpp">&lt;boost/thread/rw_mutex.hpp&gt;</a>
#include &lt;boost/thread/mutex.hpp&gt;
#include <a href="../../../boost/thread/thread.hpp">&lt;boost/thread/thread.hpp&gt;</a>
#include &lt;iostream&gt;
boost::mutex io_mutex; // The iostreams are not guaranteed to be <a href="definitions.html#Thread-safe">thread-safe</a>!
class counter
{
public:
counter() : count(0) { }
int increment() {
boost::rw_mutex::scoped_lock scoped_rw_lock(rw_mutex);
return ++count;
}
int get() {
boost::rw_mutex::scoped_lock scoped_rw_lock(rw_mutex,SHARED_LOCK);
return count;
}
private:
boost::rw_mutex rwm(boost::sp_writer_priority);
int count;
};
counter c;
void change_count(void*)
{
int i = c.increment();
boost::rw_mutex::scoped_lock scoped_lock(io_mutex);
std::cout &lt;&lt; "count == " &lt;&lt; i &lt;&lt; std::endl;
}
void get_count(void*)
{
int i = c.get();
boost::rw_mutex::scoped_lock scoped_lock(io_mutex);
std::cout &lt;&lt; "get_count == " &lt;&lt; i &lt;&lt; std::endl;
}
int main(int, char*[])
{
const int num_threads = 4;
boost::thread_group thrds;
for (int i=0; i &lt; num_threads; ++i)
{
thrds.create_thread(&amp;change_count, 0);
thrds.create_thread(&amp;get_count,0);
}
thrds.join_all();
return 0;
}
</pre>
<p>Typicial output might be:</p>
<pre>
count == 1
get_count == 1
get_count == 1
count == 2
count == 3
get_count == 3
count == 4
get_count == 4
</pre>
<p>Of course, exact output is platform dependent since the locking behavior with
competing readers and writers is undefined.</p>
<p>Revised
<!--webbot bot="Timestamp" S-Type="EDITED" S-Format="%d %B, %Y" startspan -->
05 November, 2001
<!--webbot bot="Timestamp" endspan i-checksum="39359" -->
</p>
<p><i>&copy; Copyright <a href="mailto:{{address}}">{{author}}</a> 2002. All Rights
Reserved.</i></p>
</body>
</html>

611
doc/rw_mutex_concept.html Normal file
View File

@@ -0,0 +1,611 @@
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
<link rel="stylesheet" type="text/css" href="../../../boost.css">
<title>Boost.Threads - RWMutex Concept</title>
</head>
<body link="#0000ff" vlink="#800080">
<table border="0" cellpadding="7" cellspacing="0" width="100%" summary=
"header">
<tr>
<td valign="top" width="300">
<h3><a href="../../../index.htm"><img height="86" width="277" alt="C++ Boost" src="../../../c++boost.gif" border="0"></a></h3>
</td>
<td valign="top">
<h1 align="center">Boost.Threads</h1>
<h2 align="center">RWMutex Concept</h2>
</td>
</tr>
</table>
<hr>
<dl class="index">
<dt><a href="#introduction">Introduction</a></dt>
<dt><a href="#locking-strategies">Locking Strategies</a></dt>
<dl class="page-index">
<dt><a href="#locking-strategy-recursive">Recursive</a></dt>
<dt><a href="#locking-strategy-checked">Checked</a></dt>
<dt><a href="#locking-strategy-unchecked">Unchecked</a></dt>
<dt><a href="#locking-strategy-unspecified">Unspecified</a></dt>
</dl>
<dt><a href="#scheduling-policies">Scheduling Policies</a></dt>
<dt><a href="#requirements">Concept Requirements</a></dt>
<dt><a href="#models">Models</a></dt>
</dl>
<h2><a name="introduction"></a>Introduction</h2>
<p>A rw_mutex (short for reader-writer mutual-exclusion) concept serializes access
to a resource shared between multiple threads, where multiple readers can share
simultaneous access, but writers require exclusive access.&nbsp; The <a href="#Mutex">
RWMutex</a> concept, with <a href="#TryMutex">TryRWMutex</a> and <a href="#TimedMutex">
TimedRWMutex</a> refinements, formalize the requirements. A model that implements
RWMutex and its refinements has three states: <b>shared-locked</b> ,<b>exclusive-locked</b>
and <b>unlocked</b>. Before reading from a&nbsp; shared resource, a thread <b>shared-locks</b>
a Boost.Threads rw_mutex model object, insuring <a href="definitions.html#Thread-safe">
thread-safe</a> access for reading from the shared resource. Before writing
to a shared resource, a thread <b>exclusive-locks</b> a Boost.Threads rw_mutex
model object, insuring <a href="definitions.html#Thread-safe">thread-safe</a>
access for altering the shared resource.&nbsp; When use of the shared resource
is complete, the thread unlocks the mutex model object, allowing another thread
to acquire the lock and use the shared resource.</p>
<p> Some traditional C thread APIs like Pthreads provide implementations for rw_mutex
(also known as reader-writer locks).&nbsp; Others like Windows thread APIs do
not provide a rw_mutex primitive.&nbsp; Some of those APIs expose functions
to lock and unlock a rw_mutex model. This is dangerous since it's easy to forget
to unlock a locked rw_mutex. When the flow of control is complex, with multiple
return points, the likelihood of forgetting to unlock a rw_mutex model would
become even greater. When exceptions are thrown, it becomes nearly impossible
to ensure that the rw_mutex is unlocked properly when using these traditional
API's. The result is <a href="definitions.html#Deadlock">deadlock</a>.</p>
<p>Many C++ threading libraries use a pattern known as <i>Scoped Locking</i> <a href="bibliography.html#Schmidt 00">
[Schmidt 00]</a> to free the programmer from the need to explicitly lock and
unlock rw_mutexes. With this pattern, a <a href="lock_concept.html">lock concept</a>
is employed where the lock model's constructor locks the associated rw_mutex
model and the destructor automatically does the unlocking. The <b>Boost.Threads</b>
library takes this pattern to the extreme in that lock concepts are the only
way to lock and unlock a rw_mutex model: lock and unlock functions are not exposed
by any <b>Boost.Threads</b> rw_mutex models. This helps to ensure safe usage
patterns, especially when code throws exceptions.</p>
<h2><a name="locking-strategies"></a>Locking Strategies</h2>
<P>Every rw_mutex model follows one of several locking strategies. These strategies
define the semantics for the locking operation when the calling thread already
owns a lock on the rw_mutex model.</P>
<h3><a name="locking-strategy-recursive"></a>Recursive</h3>
<P>With a recursive locking strategy when a thread attempts to acquire an additional&nbsp;lock
on the rw_mutex model for which it already owns a lock, the operation is successful,
except&nbsp;possibly in the case where a shared-lock holding thread attempts
to&nbsp;obtain an exclusive lock.&nbsp; </P>
<P>
<TABLE id="Table9" width="100%" border="1">
<TR>
<TD width="22%">Lock Type Held</TD>
<TD width="18%">Lock Request Type</TD>
<TD width="60%">Action</TD>
</TR>
<TR>
<TD width="22%">shared-lock</TD>
<TD width="18%">shared-lock</TD>
<TD width="60%">Grant the shared lock immediately</TD>
</TR>
<TR>
<TD width="22%">shared-lock</TD>
<TD width="18%">exclusive-lock</TD>
<TD width="60%">
<P>If this thread is the only holder of the shared-lock, grants the exclusive
lock immediately.&nbsp; Otherwise throws lock_error() exception.</P>
</TD>
</TR>
<TR>
<TD width="22%">exclusive-locked</TD>
<TD width="18%">shared-lock</TD>
<TD width="60%">Grants the (additional) shared lock immediately.</TD>
</TR>
<TR>
<TD width="22%">exclusive-locked</TD>
<TD width="18%">exclusive-lock</TD>
<TD width="60%"> Grant the exclusive lock immediately</TD>
</TR>
</TABLE>
<P>Internally a lock count is maintained and the owning thread must unlock the
mutex model the same number of times that it's locked it before the mutex model's
state returns to unlocked. Since mutex models in <B>Boost.Threads</B> expose
locking functionality only through lock concepts, a thread will always unlock
a mutex model the same number of times that it locked it. This helps to eliminate
a whole set of errors typically found in traditional C style thread APIs.</P>
<P>Classes <A href="recursive_mutex.html">recursive_rw_mutex</A>, <A href="recursive_mutex.html">
recursive_try_rw_mutex</A> and <A href="recursive_mutex.html">recursive_timed_rw_mutex</A>
will use this locking strategy.&nbsp; Successful implementation of this locking
strategy may require thread identification (see below).</P>
<h3><a name="locking-strategy-checked"></a>Checked</h3>
<P>With a checked locking strategy when a thread attempts to acquire a lock on
the mutex model for which the thread already owns a lock, the operation will
fail with some sort of error indication, except in the case of multiple&nbsp;shared-lock
acquisition which is&nbsp;a normal operation for ANY RWMutex. &nbsp;Further,
attempts by a thread to unlock a mutex that was not locked by the thread will
also return some sort of error indication. In <B>Boost.Threads</B>, an exception
of type <A href="lock_error.html">lock_error</A> would be thrown in these cases.</P>
<B>
<P>
<TABLE id="Table10" width="100%" border="1">
<TR>
<TD width="22%">Lock Type Held</TD>
<TD width="18%">Lock Request Type</TD>
<TD width="60%">Action</TD>
</TR>
<TR>
<TD width="22%">shared-lock</TD>
<TD width="18%">shared-lock</TD>
<TD width="60%">Grant the shared lock immediately</TD>
</TR>
<TR>
<TD width="22%">shared-lock</TD>
<TD width="18%">exclusive-lock</TD>
<TD width="60%">
<P>Throw lock_error()</P>
</TD>
</TR>
<TR>
<TD width="22%">exclusive-locked</TD>
<TD width="18%">shared-lock</TD>
<TD width="60%">Throw lock_error()</TD>
</TR>
<TR>
<TD width="22%">exclusive-locked</TD>
<TD width="18%">exclusive-lock</TD>
<TD width="60%"> Throw lock_error()</TD>
</TR>
</TABLE>
<p></P>
</B>
<P><B> Boost.Threads</b> does not currently provide any rw_mutex models that use
this strategy.&nbsp; A successful implementation of this locking strategy would
likely require thread identification.</P>
<h3><a name="locking-strategy-unchecked"></a>Unchecked</h3>
<P>With an unchecked locking strategy when a thread attempts to acquire a lock
on the rw_mutex model for which the thread already owns a lock the operation
will <A href="definitions.html#Deadlock"> deadlock</A>. In general this locking
strategy is less safe than a checked or recursive strategy, but it can be&nbsp;a
faster strategy and so is employed by many libraries.</P>
<P>
<TABLE id="Table11" width="100%" border="1">
<TR>
<TD width="22%">Lock Type Held</TD>
<TD width="18%">Lock Request Type</TD>
<TD width="60%">Action</TD>
</TR>
<TR>
<TD width="22%">shared-lock</TD>
<TD width="18%">shared-lock</TD>
<TD width="60%">Grant the shared lock immediately</TD>
</TR>
<TR>
<TD width="22%">shared-lock</TD>
<TD width="18%">exclusive-lock</TD>
<TD width="60%">
<P>Deadlock</P>
</TD>
</TR>
<TR>
<TD width="22%">exclusive-locked</TD>
<TD width="18%">shared-lock</TD>
<TD width="60%">Deadlock</TD>
</TR>
<TR>
<TD width="22%">exclusive-locked</TD>
<TD width="18%">exclusive-lock</TD>
<TD width="60%"> Deadlock</TD>
</TR>
</TABLE>
<p></P>
<P><B>Boost.Threads</B> does not currently provide any mutex models that use this
strategy.&nbsp; For RWMutexes on platforms that contain natively recursive synchronization
primitives, implementing a guaranteed-deadlock can actually involve extra work,
and would likely require thread identification.</P>
<h3><a name="locking-strategy-unspecified"></a>Unspecified</h3>
<P>With an unspecified locking strategy, when a thread attempts to acquire a lock
on a rw_mutex model for which the thread already owns a lock the operation results
in <B>undefined behavior</B>. When a rw_mutex model has an unspecified locking
strategy the programmer must assume that the rw_mutex model instead uses an
unchecked strategy as the worse case, although some platforms may exhibit a
mix of unchecked and recursive behavior.</P>
<P>
<TABLE id="Table12" width="100%" border="1">
<TR>
<TD width="22%">Lock Type Held</TD>
<TD width="18%">Lock Request Type</TD>
<TD width="60%">Action</TD>
</TR>
<TR>
<TD width="22%">shared-lock</TD>
<TD width="18%">shared-lock</TD>
<TD width="60%">Grant the shared lock immediately</TD>
</TR>
<TR>
<TD width="22%">shared-lock</TD>
<TD width="18%">exclusive-lock</TD>
<TD width="60%">
<P>Undefined, but generally deadlock</P>
</TD>
</TR>
<TR>
<TD width="22%">exclusive-locked</TD>
<TD width="18%">shared-lock</TD>
<TD width="60%">Undefined, but generally deadlock</TD>
</TR>
<TR>
<TD width="22%">exclusive-locked</TD>
<TD width="18%">exclusive-lock</TD>
<TD width="60%"> Undefined, but generally deadlock</TD>
</TR>
</TABLE>
<p></P>
<P>In general a rw_mutex model with an unspecified locking strategy is unsafe,
and it requires programmer discipline to use the rw_mutex model properly. However,
this strategy allows an implementation to be as fast as possible with no restrictions
on its implementation. This is especially true for portable implementations
that wrap the native threading support of a platform. For this reason, the classes
<A href="rw_mutex.html">rw_mutex</A>, <A href="rw_mutex.html">try_rw_mutex</A>
and <A href="rw_mutex.html">timed_rw_mutex</A> use this locking strategy despite
the lack of safety.</P>
<h3>An Aside - Thread Identification</h3>
<P>RWMutexes can support specific Locking Strategies (recursive and checked) which
help to detect and protect against self-deadlock.&nbsp; Self-deadlock can occur
when a holder of a locked RWMutex attempts to obtain another lock.&nbsp; Given
an implemention "I" which is susceptible to self-deadlock but otherwise correct
and efficient, a recursive or checked implementation "Ir" or "Ic" can use the
same basic implementation, but make special checks against self-deadlock by
tracking the identities of thread(s) currently holding locks.&nbsp; This approach
makes deadlock detection othrogonal to the basic RWMutex implementaion.&nbsp;
</P>
<P> Alternatively, a different basic implementation for RWMutex concepts&nbsp;,
I' (I-Prime) may exist which uses recursive or checked versions of synchronization
primitives to produce a recursive or checked RWMutex while still providing flexibility
in terms of Scheduling Policies.
<P>Please refer to the <b>Boost.Threads</b> <a href="mutex_concept.html#LockingStrategies">
mutex concept</a> documentation for a discussion of locking strategies.&nbsp;
The rw_mutex supports both the <a href="mutex_concept.html#Recursive"> recursive</a>
and <a href="mutex_concept.html#Unspecified">unspecified</a> locking strategies.&nbsp;
RWMutexes are parameterized on a Mutex type which they use to control exclusive-locking
and access to internal state.
<H3>Another Aside - <A name="LockingPromotion">Lock Promotion</A></H3>
<P>RWMutexes can support lock promotion, where a mutex which is in the shared-locked
state transitions to an exclusive-locked state without releasing the lock.&nbsp;
If this functionality is supported at all by Boost.Threads, it will only be
through an explicit promote() operations.&nbsp; Extra care must be taken to
ensure that only one thread holding a shared lock can block awaiting promotion
at any given time.&nbsp;&nbsp;If more than one shared-lock holder is allowed
to enter a blocked state while waiting to be promoted, deadlock will result
since both threads will be waiting for the other to release their shared lock.
<h2><a name="scheduling-policies"></a>Scheduling Policies</h2>
<p>Every rw_mutex model follows one of several scheduling policies. These policies
define the semantics when the mutex model is unlocked and there is more than
one thread waiting to acquire a lock. In other words, the policy defines which
waiting thread shall acquire the lock.&nbsp; For rw_mutex, it is particularly
important to define the behavior when threads are requesting both shared and
exclusive access simultaneously.&nbsp; This will be referred to as "inter-class
scheduling".&nbsp;&nbsp;</p>
<p>For some types of inter-class scheduling, an intra-class scheduling policy
can also be defined that will describe the order in which waiting threads of
the same class will acquire the thread.</p>
<h3><a name="ReaderPriority">ReaderPriority</a></h3>
<p>With ReaderPriority, any pending request for a shared lock will have priority
over a pending request for an exclusive lock, irrespective of the current lock
state of the rw_mutex, and irrespective of the relative order that the pending
requests arrive.</p>
<table border="1" width="100%" id="Table1">
<tr>
<td width="22%">Current rw_mutex state</td>
<td width="18%">Request Type</td>
<td width="60%">Action</td>
</tr>
<tr>
<td width="22%">unlocked</td>
<td width="18%">shared-lock</td>
<td width="60%">Grant the shared lock immediately</td>
</tr>
<tr>
<td width="22%">shared-locked</td>
<td width="18%">shared-lock</td>
<td width="60%">Grant the additional shared lock immediately.</td>
</tr>
<tr>
<td width="22%">exclusive-locked</td>
<td width="18%">shared-lock</td>
<td width="60%">Wait to acquire the lock until the thread holding the exclusive-lock
releases its lock.&nbsp; A shared lock will be granted to all pending readers
before&nbsp;any other thread can acquire an exclusive lock.</td>
</tr>
<tr>
<td width="22%">unlocked</td>
<td width="18%">exclusive-lock</td>
<td width="60%">Grant the exclusive lock immediately, if and only if there
are no pending shared-lock requests.</td>
</tr>
<tr>
<td width="22%">shared-locked</td>
<td width="18%">exclusive-lock</td>
<td width="60%"> Wait to acquire the lock until all threads holding shared
locks release their locks -AND- no requests for shared locks exist.&nbsp;
If other exclusive-lock requests exist, the lock is granted in accordance
with the intra-request scheduling policy.</td>
</tr>
<tr>
<td width="22%">exclusive-locked</td>
<td width="18%">exclusive-lock</td>
<td width="60%">Wait to acquire the lock until the thread holding the exclusive
lock releases its lock -AND- no requests for shared locks exist.&nbsp; If
other exclusive-lock requests exist, the lock is granted in accordance with
the intra-request scheduling policy.</td>
</tr>
</table>
<h3><a name="WriterPriority">WriterPriority</a></h3>
<p>With WriterPriority, any pending request for an exclusive lock will have priority
over a pending request for a shared lock, irrespective of the current lock state
of the rw_mutex, and irrespective of the relative order that the pending requests
arrive.</p>
<table border="1" width="100%" id="Table2">
<tr>
<td width="22%">Current rw_mutex state</td>
<td width="18%">Request Type</td>
<td width="60%">Action</td>
</tr>
<tr>
<td width="22%">unlocked</td>
<td width="18%">shared-lock</td>
<td width="60%">Grant the shared lock immediately.</td>
</tr>
<tr>
<td width="22%">shared-locked</td>
<td width="18%">shared-lock</td>
<td width="60%">Grant the additional shared lock immediately, -IF- no outstanding
requests for an exclusive lock exist.</td>
</tr>
<tr>
<td width="22%">exclusive-locked</td>
<td width="18%">shared-lock</td>
<td width="60%"> Wait to acquire the lock until the thread holding the exclusive-lock
releases its lock.&nbsp; The shared lock will be granted once&nbsp;no other
outstanding exclusive-lock requests exist.</td>
</tr>
<tr>
<td width="22%">unlocked</td>
<td width="18%">exclusive-lock</td>
<td width="60%">Grant the exclusive lock immediately.</td>
</tr>
<tr>
<td width="22%">shared-locked</td>
<td width="18%">exclusive-lock</td>
<td width="60%">Wait to acquire the lock until all threads holding shared
locks release their locks.&nbsp; If other exclusive-lock requests exist,
the lock is granted in accordance with the intra-request scheduling policy.&nbsp;
This request will be granted before any new shared-lock requests are granted.</td>
</tr>
<tr>
<td width="22%">exclusive-locked</td>
<td width="18%">exclusive-lock</td>
<td width="60%">Wait to acquire the lock until the thread holding the exclusive
lock releases its lock.&nbsp; If other exclusive-lock requests exist, the
lock is granted in accordance with the intra-request scheduling policy.&nbsp;
This request will be granted before any new shared-lock requests are granted.</td>
</tr>
</table>
<h3><a name="AlteratingManyPriority">AlternatingPriority</a>/ManyReads</h3>
<p>With AlternatingPriority/ManyReads, reader or writer starvation is avoided
by alternatively granting shared or exclusive access when pending requests exist
for both types of locks.&nbsp; Outstanding shared-lock requests are treated
as a group when it is the "reader's turn"</p>
<table border="1" width="100%" id="Table3">
<tr>
<td width="22%">Current rw_mutex state</td>
<td width="18%">Request Type</td>
<td width="60%">Action</td>
</tr>
<tr>
<td width="22%">unlocked</td>
<td width="18%">shared-lock</td>
<td width="60%">Grant the shared lock immediately.</td>
</tr>
<tr>
<td width="22%">shared-locked</td>
<td width="18%">shared-lock</td>
<td width="60%">Grant the additional shared lock immediately, -IF- no outstanding
requests for an exclusive lock exist.&nbsp; If outstanding exclusive-lock
requests exist, this lock will not be granted until at least one of the
exclusive locks is granted and released. If other shared-lock requests exist,
all shared-locks will be granted as a group.</td>
</tr>
<tr>
<td width="22%">exclusive-locked</td>
<td width="18%">shared-lock</td>
<td width="60%"> Wait to acquire the lock until the thread holding the exclusive-lock
releases its lock.&nbsp; If other outstanding exclusive-lock requests exist,
they will have to wait until all current shared-lock requests are serviced.</td>
</tr>
<tr>
<td width="22%">unlocked</td>
<td width="18%">exclusive-lock</td>
<td width="60%">Grant the exclusive lock immediately.</td>
</tr>
<tr>
<td width="22%">shared-locked</td>
<td width="18%">exclusive-lock</td>
<td width="60%">
<P>Wait to acquire the lock until all threads holding shared locks release
their locks.&nbsp; </P>
<P>If other&nbsp;exclusive-lock requests exist, this lock will be granted
to one of them in accordance with the intra-request scheduling policy.</P>
</td>
</tr>
<tr>
<td width="22%">exclusive-locked</td>
<td width="18%">exclusive-lock</td>
<td width="60%">Wait to acquire the lock until the thread holding the exclusive
lock releases its lock.&nbsp;&nbsp; If other outstanding shared-lock requests
exist, this lock will not be granted until all of the currently waiting
shared locks&nbsp;are granted and released.&nbsp; If other exclusive-lock
requests exist, this lock will be granted in accordance with the intra-request
scheduling policy.</td>
</tr>
</table>
<H3>
<H3><A name="AlteratingSinglePriority">AlternatingPriority</A>/SingleReads</H3>
</H3>
<P>With AlternatingPriority/ManyReads, reader or writer starvation is avoided
by alternatively granting shared or exclusive access when pending requests exist
for both types of locks.&nbsp; Outstanding shared-lock requests are services
one at a time&nbsp;when it is the "reader's turn"</P>
<H3>
<TABLE id="Table13" width="100%" border="1">
<TR>
<TD width="22%">Current rw_mutex state</TD>
<TD width="18%">Request Type</TD>
<TD width="60%">Action</TD>
</TR>
<TR>
<TD width="22%">unlocked</TD>
<TD width="18%">shared-lock</TD>
<TD width="60%">Grant the shared lock immediately.</TD>
</TR>
<TR>
<TD width="22%">shared-locked</TD>
<TD width="18%">shared-lock</TD>
<TD width="60%">Grant the additional shared lock immediately, -IF- no outstanding
requests for an exclusive lock exist.&nbsp; If outstanding exclusive-lock
requests exist, this lock will not be granted until at least one of the
exclusive locks is granted and released. </TD>
</TR>
<TR>
<TD width="22%">exclusive-locked</TD>
<TD width="18%">shared-lock</TD>
<TD width="60%">
<P>Wait to acquire the lock until the thread holding the exclusive-lock
releases its lock.</P>
<P>If other outstanding exclusive-lock requests exist, exactly one shared-lock
request will be granted before the next exclusive lock is granted.</P>
</TD>
</TR>
<TR>
<TD width="22%">unlocked</TD>
<TD width="18%">exclusive-lock</TD>
<TD width="60%">Grant the exclusive lock immediately.</TD>
</TR>
<TR>
<TD width="22%">shared-locked</TD>
<TD width="18%">exclusive-lock</TD>
<TD width="60%">
<P>Wait to acquire the lock until all threads holding shared locks release
their locks.&nbsp; </P>
<P>If other&nbsp;exclusive-lock requests exist, this lock will be granted
to one of them in accordance with the intra-request scheduling policy.</P>
</TD>
</TR>
<TR>
<TD width="22%">exclusive-locked</TD>
<TD width="18%">exclusive-lock</TD>
<TD width="60%">Wait to acquire the lock until the thread holding the exclusive
lock releases its lock.&nbsp;&nbsp; If other outstanding shared-lock requests
exist, this lock can not be granted until exactly one shared-lock request
is granted and released.&nbsp; If other exclusive-lock requests exist,
this lock will be granted in accordance with the intra-request scheduling
policy.</TD>
</TR>
</TABLE>
</H3>
<h3>Intra-Request Scheduling Policy</h3>
<p>Please refer to the <b>Boost.Threads</b> <a href="mutex_concept.html#SchedulingPolicies">
mutex concept</a> documentation for a discussion of mutex scheduling policies,
which are identical to RWMutex Intra-Request scheduling policies.&nbsp; The
rw_mutex supports only the <a href="mutex_concept.html#UnspecifiedScheduling">
Unspecified</a> intra-request scheduling policy.&nbsp; That is, given a set
of threads waiting for exclusive locks, the order (amongst themselves) in which
they receive the lock is unspecified.</p>
<h2><a name="requirements"></a>Concept Requirements</h2>
<h3>RW<a name="Mutex">Mutex</a> Concept</h3>
<p>A RWMutex object has three states: shared-locked, exclusive-locked, and unlocked.
RWMutex object state can only be determined by an object meeting the <a href="rw_lock_concept.html#ScopedLock">
ScopedRWLock</a> requirements and constructed for the RWMutex object.</p>
<p>A RWMutex is <a href="../../utility/utility.htm#Class noncopyable">noncopyable</a>.</p>
<p> For a RWMutex type RWM,&nbsp;and an object m of that type, the following expressions
must be well-formed and have the indicated effects.</p>
<table summary="Mutex expressions" border="1" cellpadding="5" id="Table5">
<tr>
<td><b>Expression</b></td>
<td><b>Effects</b></td>
</tr>
<tr>
<td><code>RWM m;</code></td>
<td>Constructs a rw_mutex object m. Post-condition: m is unlocked.</td>
</tr>
<tr>
<td><code>(&amp;m)-&gt;~RWM();</code></td>
<td>Precondition: m is unlocked. Destroys a rw_mutex object m.</td>
</tr>
<tr>
<td><code>RWM::scoped_rw_lock</code></td>
<td>A type meeting the <a href="rw_lock_concept.html#ScopedRWLock">ScopedRWLock</a>
requirements.&nbsp;&nbsp;</td>
</tr>
</table>
<h3><a name="TryMutex">TryRWMutex</a> Concept</h3>
<p>A TryRWMutex must meet the <a href="#RWMutex">RWMutex</a> requirements. In
addition, for a TryRWMutex type RWM and an object m of that type, the following
expressions must be well-formed and have the indicated effects.</p>
<table summary="TryMutex expressions" border="1" cellpadding="5" id="Table6">
<tr>
<td><b>Expression</b></td>
<td><b>Effects</b></td>
</tr>
<tr>
<td><code>RWM::scoped_try_rw_lock</code></td>
<td>A type meeting the <a href="rw_lock_concept.html#ScopedTryRWLock">ScopedTryRWLock</a>
requirements.</td>
</tr>
</table>
<h3><a name="TimedMutex">TimedRWMutex</a> Concept</h3>
<p>A TimedRWMutex must meet the <a href="#TryMutex">TryRWMutex</a> requirements.
In addition, for a TimedRWMutex type RWM and an object m of that type, the following
expressions must be well-formed and have the indicated effects.</p>
<table summary="TimedMutex expressions" border="1" cellpadding="5" id="Table7">
<tr>
<td><b>Expression</b></td>
<td><b>Effects</b></td>
</tr>
<tr>
<td><code>RWM::scoped_timed_rw_lock</code></td>
<td>A type meeting the <a href="rw_lock_concept.html#ScopedTimedRWLock">ScopedTimedRWLock</a>
requirements.</td>
</tr>
</table>
<h2><a name="models"></a>Models</h2>
<p><b>Boost.Threads</b> currently supplies three classes which model rw_mutex
concepts.</p>
<table summary="Mutex concept classes" border="1" cellpadding="5" id="Table8">
<tr>
<td><b>Concept</b></td>
<td><b>Refines</b></td>
<td><b>Classes Modeling the Concept</b></td>
</tr>
<tr>
<td valign="top"><a href="#Mutex">RWMutex</a></td>
<td valign="top">&nbsp;</td>
<td><a href="rw_mutex.html">rw_mutex&lt;Mutex&gt;</a></td>
</tr>
<tr>
<td valign="top"><a href="#TryMutex">TryRWMutex</a></td>
<td valign="top"><a href="#Mutex">RWMutex</a></td>
<td><a href="rw_mutex.html">try_rw_mutex&lt;TryMutex&gt; </a> </td>
</tr>
<tr>
<td valign="top"><a href="#TimedMutex">TimedRWMutex</a></td>
<td valign="top"><a href="#TryMutex">TryRWMutex</a></td>
<td><a href="rw_mutex.html">timed_rw_mutex&lt;TimedMutex&gt; </a> </td>
</tr>
</table>
<hr>
<p>Revised
<!--webbot bot="Timestamp" S-Type="EDITED" S-Format="%d %B, %Y" startspan -->
05 November, 2001
<!--webbot bot="Timestamp" endspan i-checksum="39359" -->
</p>
<p><i>&copy; Copyright <a href="mailto:{{address}}">{{author}}</a> 2002. All Rights
Reserved.</i></p>
</body>
</html>

View File

@@ -52,8 +52,8 @@
</dl>
<hr>
<h2><a name="introduction"></a>Introduction</h2>
<p>The header &lt;<a href="../../../boost/thread/thread.hpp">boost/thread.hpp</a>&gt;
defines the classes <a href="#class-thread">thread</a> and <a href="#class-thread_group">thread_group</a>
<p>The header &lt;<a href="../../../boost/thread/thread.hpp">boost/thread.hpp</a>&gt;
defines the classes <a href="#class-thread">thread</a> and <a href="#class-thread_group">thread_group</a>
which are used to create, observe and manage threads and groups of threads.</p>
<h2><a name="classes"></a>Classes</h2>
<h3><a name="class-thread"></a>Class <code>thread</code></h3>

207
doc/thread_pool.html Normal file
View File

@@ -0,0 +1,207 @@
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
<link rel="stylesheet" type="text/css" href="../../../boost.css">
<title>Boost.Threads - Header &lt;boost/thread/thread_pool.hpp&gt;</title>
</head>
<body link="#0000ff" vlink="#800080">
<table border="0" cellpadding="7" cellspacing="0" width="100%" summary=
"header">
<tr>
<td valign="top" width="300">
<h3><a href="../../../index.htm"><img height="86" width="277" alt="C++ Boost" src="../../../c++boost.gif" border="0"></a></h3>
</td>
<td valign="top">
<h1 align="center">Boost.Threads</h1>
<h2 align="center">Header &lt;<a href="../../../boost/thread/thread_pool.hpp">boost/thread/thread_pool.hpp</a>&gt;</h2>
</td>
</tr>
</table>
<hr>
<h2>Contents</h2>
<dl class="page-index">
<dt><a href="#introduction">Introduction</a></dt>
<dt><a href="#classes">Classes</a></dt>
<dl class="page-index">
<dt><a href="#class-thread_pool">Class <code>thread_pool</code></a></dt>
<dl class="page-index">
<dt><a href="#class-thread_pool-synopsis">Class <code>thread_pool</code> synopsis</a></dt>
<dt><a href="#class-thread_pool-ctors">Class <code>thread_pool</code> constructors and destructor</a></dt>
<dt><a href="#class-thread_pool-modifiers">Class <code>thread_pool</code> modifier functions</a></dt>
</dl>
</dl>
<dt><a href="#examples">Example(s)</a></dt>
</dl>
<hr>
<h2><a name="introduction"></a>Introduction</h2>
<p>Include the header &lt;<a href="../../../boost/thread/thread_pool.hpp">boost/thread/thread_pool.hpp</a>&gt;
to define the <a href="#class-thread_pool">thread_pool</a> class.</p>
<h2><a name="classes"></a>Classes</h2>
<h3><a name="class-thread_pool"></a>Class <code>thread_pool</code></h3>
<p>The <tt>thread_pool</tt> class provides&nbsp;an interface for&nbsp;running
jobs on a dynamically managed set&nbsp;of worker threads called a pool.&nbsp;
When a job is added, it can execute on any&nbsp;available thread in the pool.&nbsp;
This class controls&nbsp;both the maximum and minimum number of threads&nbsp;in
the pool.&nbsp; If a thread in the pool is sitting idle&nbsp;for a period&nbsp;of
time, it&nbsp;will exit unless by exiting the number of threads would dip below
the minimum. Thread pools provide an optimization over creating a new thread
for each job since the pool can often remove the overhead of thread creation.</p>
<h4><a name="class-thread_pool-synopsis"></a>Class <code>thread_pool</code> synopsis</h4>
<pre>
namespace boost
{
class thread_pool : <a href="../../utility/utility.htm#Class noncopyable">boost::noncopyable</a> // Exposition only.
// Class thread meets the <a href="overview.html#non-copyable">NonCopyable</a> requirement.
{
public:
thread_pool(int max_threads=std::numeric_limits&lt;int&gt;::max(),
int min_threads=0,
int timeout_secs=5);
~thread_pool();
void add(const boost::function0&lt;void&gt; &amp;job);
void join();
void cancel();
void detach();
};
};
</pre>
<h4><a name="class-spec-ctors"></a>Class <code>thread_pool</code> constructors and destructor</h4>
<pre>
thread_pool(int max_threads=std::numeric_limits&lt;int&gt;::max(),
int min_threads=0,
int timeout_secs=5);
</pre>
<dl class="function-semantics">
<dt><b>Effects:</b> Constructs a thread pool object and starts min_threads threads
running in the pool.</dt>
</dl>
<pre>
~thread_pool();
</pre>
<dl class="function-semantics">
<dt><b>Effects:</b> Calls join() if neither join() nor detach() were called
previously for this thread_pool.&nbsp; If detach() was not called, destroys all
resources associated with the threads in the pool and with the queue of jobs
still waiting to be executed.</dt>
</dl>
<h4><a name="class-spec-modifiers"></a>Class <code>thread_pool</code> modifier
functions</h4>
<pre>
void add(const boost::function0&lt;void&gt;&amp; job);
</pre>
<dl class="function-semantics">
<dt><b>Effects:</b> Adds <tt>job</tt> to the <tt>thread_pool</tt> object's list of
jobs waiting to be executed.&nbsp; If any threads in the pool are idle, the job
will be execute as soon as the idle thread is scheduled by the operating
system.&nbsp; If no threads are idle and the number of threads in the pool is
less than the maximum number provided to the constructor, an additional thread
is created and added to the pool.&nbsp; That new thread will execute this job
as soon as it is scheduled by the operating system.&nbsp; If no threads are
idle and&nbsp;the thread count is at the maximum, this job will be queued until
a thread becomes available.&nbsp; Currently, queued jobs are processed in FIFO
order.</dt>
<dt><b>Throws:</b> std::runtime_error if join() or detach() have
previously been called for this thread_pool object.</dt>
</dl>
<pre>
void detach();
</pre>
<dl class="function-semantics">
<dt><b>Effects:</b> Relinquishes control of the pool of threads by this thread_pool
object.&nbsp; Any threads in the pool will continue to run and continue to
process any queued jobs, but no new threads will be created, and any subsequent
attempts to add new jobs will result in an exception.</dt>
<dt><b>Throws:</b> std::runtime_error if join()&nbsp;has previously
been called for this thread_pool object.</dt>
</dl>
<pre>
void cancel();
</pre>
<dl class="function-semantics">
<dt><b>Effects:</b> Removes all queued jobs from the thread_pool's internal queue,
and calls cancel() on all boost::thread objects in the pool.&nbsp; The specific
behavior of those threads will be dictated by their cancellation behavior - the
pool threads may be executing a user's job that deferrs cancellation, for
example.</dt>
<dt><b>Throws:</b> std::runtime_error if join() or detach() have
previously been called for this thread_pool object.</dt>
<dt><b>Note:</b> for the current version (1.27.0) of Boost.Threads, thread::cancel() is
not provided.&nbsp; This function -will- clear out all queued jobs, but any
currently executing jobs will not be cancelled.</dt>
</dl>
<pre>
void join();
</pre>
<dl class="function-semantics">
<dt><b>Effects:</b> Waits until all queued jobs are completed by the thread pool,
and then join()s will all of the threads in the pool.&nbsp; When join()
returns, no running threads will remain in the pool, and this object is invalid
for anything except destruction.&nbsp; Any calls to cancel(), join(), detach(),
or add() will result in an exception.</dt>
</dl>
<h2><a name="examples"></a>Example(s)</h2>
<pre>
#include &lt;boost/thread/thread_pool.hpp&gt;
#include &lt;boost/thread/mutex.hpp&gt;
#include &lt;iostream&gt;
boost::mutex io_mutex;
class job_adapter {
public:
job_adapter(void (*func)(int), int param) :
_func(func), _param(param){ }
void operator()() const { _func(_param); }
private:
void (*_func)(int);
int _param;
};
void simple_job(int param)
{
boost::mutex::scoped_lock l(io_mutex);
std::cout &lt;&lt; param &lt;&lt; " squared is " &lt;&lt; (param*param) &lt;&lt; "\n";
}
int main(int argc, char* argv[])
{
boost::thread_pool tp;
for (int i = 1; i &lt;= 10; ++i)
tp.add(simple_job);
tp.join();
return 0;
}
</pre>
<p>Typical output would be:</p>
<pre>
1 squared is 1
2 squared is 4
3 squared is 9
4 squared is 16
5 squared is 25
7 squared is 49
6 squared is 36
8 squared is 64
10 squared is 100
9 squared is 81
</pre>
<P>While the jobs are dispatched in the order they are received, the scheduling of
the individual threads in the pool is platform-dependent.</P>
<P>
<p>Revised
<!--webbot bot="Timestamp" S-Type="EDITED" S-Format="%d %B, %Y" startspan -->
05 November, 2001
<!--webbot bot="Timestamp" endspan i-checksum="39359" -->
</p>
<p><i>&copy; Copyright <a href="mailto:wekempf@cox.net">William E. Kempf</a>, David Moore 2001-2002.
All Rights Reserved.</i></p>
<p>Permission to use, copy, modify, distribute and sell this software and its
documentation for any purpose is hereby granted without fee, provided that the
above copyright notice appear in all copies and that both that copyright notice
and this permission notice appear in supporting documentation. William E. Kempf
makes no representations about the suitability of this software for any purpose.
It is provided &quot;as is&quot; without express or implied warranty.</p>
</body>
</html>

View File

@@ -39,8 +39,8 @@
</dl>
<hr>
<h2><a name="introduction"></a>Introduction</h2>
<p>The header &lt;<a href="../../../boost/thread/tss.hpp">boost/thread/tss.hpp</a>&gt;
defines the class <a href="#class-thread_specific_ptr">thread_specific_ptr</a>
<p>The header &lt;<a href="../../../boost/thread/tss.hpp">boost/thread/tss.hpp</a>&gt;
defines the class <a href="#class-thread_specific_ptr">thread_specific_ptr</a>
which is used to manage data associated with specific thread instances.</p>
<h2><a name="classes"></a>Classes</h2>
<h3><a name="class-thread_specific_ptr"></a>Class <code>thread_specific_ptr</code></h3>
@@ -48,10 +48,13 @@
specific storage. Thread specific storage is data associated with individual
threads and is often used to make operations <a href="definitions.html#Thread-safe">thread-safe</a>
that rely on global data.</p>
<p>Template <code>thread_specific_ptr</code> stores a pointer to an object obtained
via <code>new</code> on a thread-by-thread basis and calls delete on the contained
pointer when the thread terminates. Each thread initially stores the null pointer
in each <code> thread_specific_ptr</code> instance.</p>
<p>Template <code>thread_specific_ptr</code> stores a pointer to an object obtained
via <code>new</code> on a thread-by-thread basis and calls a specified cleanup
handler on the contained pointer when the thread terminates. The cleanup handlers
are called in the reverse order of construction of the <code>thread_specific_ptr</code>s,
and for the initial thread are called by the destructor, providing the same
ordering gaurantees as for normal declarations. Each thread initially stores
the null pointer in each <code> thread_specific_ptr</code> instance.</p>
<p>The template <code>thread_specific_ptr</code> is useful in the following cases:</p>
<ul>
<li>An interface was originally written assuming a single thread of control
@@ -88,23 +91,72 @@ thread_specific_ptr();
</pre>
<dl class="function-semantics">
<dt><b>Requires:</b> The expression <code>delete get()</code> is well formed.</dt>
<dt><b>Postconditions:</b> A thread specific storage has been reserved for use
by <code>*this</code> in all threads, with each thread initially storing a
null pointer.</dt>
<dt><b>Throws:</b> <code>boost::thread_resource_error</code> if the necessary
<dt><b>Effects:</b> A thread-specific data key is allocated and visible to all
threads in the process. Upon creation, the value <code>NULL</code> will be
associated with the new key in all active threads. Upon thread creation, the
value <code>NULL</code> will be associated with all defined keys in the new
thread. A cleanup method is registered with the key that will call <code>delete</code>
on the value associated with the key for a thread when it exits. When a thread
exits, if a key has a registered cleanup method and the thread has a non-<code>NULL</code>
value associated with that key, the value of the key is set to <code>NULL</code>
and then the cleanup method is called with the previously associated value
as its sole argument. The order in which registered cleanup methods are called
when a thread exits is undefined. If after all the cleanup methods have been
called for all non-<code>NULL</code> values, there are still some non-<code>NULL</code>
values with associated cleanup handlers the result is undefined behavior.</dt>
<dt><b>Throws:</b> <code>boost::thread_resource_error</code> if the necessary
resources can not be obtained.</dt>
<dt><b>Note:</b> There is an implementation specific limit to the number of
thread specific storage objects that can be created, and this limit may be
small.</dt>
<dt><b>Note:</b> There may be an implementation specific limit to the number
of thread specific storage objects that can be created, and this limit may
be small.</dt>
<dt><b>Rationale:</b> The most common need for cleanup will be to call <code>delete</code>
on the associated value. If other forms of cleanup are required the overloaded
constructor should be called instead.</dt>
</dl>
<pre>
thread_specific_ptr(void (*cleanup)(void*));
</pre>
<dl class="function-semantics">
<dt><b>Effects:</b> A thread-specific data key is allocated and visible to all
threads in the process. Upon creation, the value <code>NULL</code> will be
associated with the new key in all active threads. Upon thread creation, the
value <code>NULL</code> will be associated with all defined keys in the new
thread. The <code>cleanup</code> method is registered with the key and will
be called for a thread with the value associated with the key for that thread
when it exits. When a thread exits, if a key has a registered cleanup method
and the thread has a non-<code>NULL</code> value associated with that key,
the value of the key is set to <code>NULL</code> and then the cleanup method
is called with the previously associated value as its sole argument. The order
in which registered cleanup methods are called when a thread exits is undefined.
If after all the cleanup methods have been called for all non-<code>NULL</code>
values, there are still some non-<code>NULL</code> values with associated
cleanup handlers the result is undefined behavior.</dt>
<dt><b>Throws:</b> <code>boost::thread_resource_error</code> if the necessary
resources can not be obtained.</dt>
<dt><b>Note:</b> There may be an implementation specific limit to the number
of thread specific storage objects that can be created, and this limit may
be small.</dt>
<dt><b>Rationale:</b> There is the occasional need to register specialized cleanup
methods, or to register no cleanup method at all (done by passing <code>NULL</code>
to this constructor.</dt>
</dl>
<pre>
~thread_specific_ptr();
</pre>
<dl class="function-semantics">
<dt><b>Note:</b> Does not destroy any data that may be stored in any thread&#39;s
thread specific storage. For this reason you should not destroy a <code>thread_specific_ptr</code>
object until you are certain there are no threads running that have made use
<dl class="function-semantics">
<dt><b>Effects:</b> Deletes the thread-specific data key allocated by the constructor.
The thread-specific data values associated with the key need not be <code>NULL</code>.
It is the responsibility of the application to perform any cleanup actions
for data associated with the key.</dt>
<dt><b>Note:</b> Does not destroy any data that may be stored in any thread&#39;s
thread specific storage. For this reason you should not destroy a <code>thread_specific_ptr</code>
object until you are certain there are no threads running that have made use
of its thread specific storage.</dt>
<dt><b>Rationale:</b> Associated data is not cleaned up because registered cleanup
methods need to be run in the thread that allocated the associated data to
be gauranteed to work correctly. There's no safe way to inject the call into
another thread's execution path, making it impossible to call the cleanup
methods safely.</dt>
</dl>
<h4><a name="class-thread_specific_ptr-modifiers"></a>Class <code>thread_specific_ptr</code>
modifier functions</h4>
@@ -115,16 +167,17 @@ T* release();
<dt><b>Postconditions:</b> <code>*this</code> holds the null pointer for the
current thread.</dt>
<dt><b>Returns:</b> <code>this-&gt;get()</code> prior to the call.</dt>
<dt><b>Rationale:</b> This method provides a mechanism for the user to relinquish
control of the data associated with the thread-specific key.</dt>
</dl>
<pre>
void reset(T* p=0);
</pre>
<dl class="function-semantics">
<dt><b>Effects:</b> If <code>this-&gt;get()!= p</code> then <code>delete this-&gt;get()</code>.
</dt>
<dt><b>Postconditions:</b> <code>*this</code> holds the pointer <code> p</code>
<dl class="function-semantics">
<dt><b>Effects:</b> If <code>this-&gt;get()!= p &amp;&amp; p != NULL</code>
then call the associated cleanup function. </dt>
<dt><b>Postconditions:</b> <code>*this</code> holds the pointer <code> p</code>
for the current thread.</dt>
<dt><b>Note:</b> The pointer will be deleted when the thread terminates.</dt>
</dl>
<h4><a name="class-thread_specific_ptr-observers"></a>Class <code>thread_specific_ptr</code>
observer functions</h4>
@@ -139,15 +192,15 @@ T* get() const;
<pre>
T* operator-&gt;() const;
</pre>
<dl class="function-semantics">
<dt><b>Returns:</b> <code>this-&lt;get()</code>.</dt>
<dl class="function-semantics">
<dt><b>Returns:</b> <code>this-&gt;get()</code>.</dt>
</dl>
<pre>
T& operator*() const;
</pre>
<dl class="function-semantics">
<dt><b>Requires:</b> <code>this-&lt;get() != 0</code></dt>
<dt><b>Returns:</b> <code>this-&lt;get()</code>.</dt>
<dl class="function-semantics">
<dt><b>Requires:</b> <code>this-&gt;get() != 0</code></dt>
<dt><b>Returns:</b> <code>this-&gt;get()</code>.</dt>
</dl>
<h2><a name="examples"></a>Example(s)</h2>
<p><a href="../example/tss.cpp">libs/thread/example/tss.cpp</a></p>

View File

@@ -40,9 +40,9 @@
</dl>
<hr>
<h2><a name="introduction"></a>Introduction</h2>
<p>The header &lt;<a href="../../../boost/thread/xtime.hpp">boost/thread/xtime.hpp</a>&gt;
defines functions and data types used to perform high-resolution time operations.
This is a temporary solution that will be replaced by a more robust time library
<p>The header &lt;<a href="../../../boost/thread/xtime.hpp">boost/thread/xtime.hpp</a>&gt;
defines functions and data types used to perform high-resolution time operations.
This is a temporary solution that will be replaced by a more robust time library
once available in Boost.</p>
<h2><a name="values"></a>Values</h2>
<pre><a name="value-spec"></a>

1
example/monitor/Carbon.r Normal file
View File

@@ -0,0 +1 @@
/*

106
example/monitor/monitor.cpp Normal file
View File

@@ -0,0 +1,106 @@
#include <vector>
#include <iostream>
#include <boost/thread/condition.hpp>
#include <boost/thread/mutex.hpp>
#include <boost/thread/recursive_mutex.hpp>
#include <boost/thread/thread.hpp>
namespace {
const int ITERS = 100;
boost::mutex io_mutex;
}
template <typename M>
class buffer_t
{
public:
typedef typename M::scoped_lock scoped_lock;
buffer_t(int n)
: p(0), c(0), full(0), buf(n)
{
}
void send(int m)
{
scoped_lock lk(mutex);
while (full == buf.size())
cond.wait(lk);
buf[p] = m;
p = (p+1) % buf.size();
++full;
cond.notify_one();
}
int receive()
{
scoped_lock lk(mutex);
while (full == 0)
cond.wait(lk);
int i = buf[c];
c = (c+1) % buf.size();
--full;
cond.notify_one();
return i;
}
static buffer_t& get_buffer()
{
static buffer_t buf(2);
return buf;
}
static void do_sender_thread()
{
for (int n = 0; n < ITERS; ++n)
{
{
boost::mutex::scoped_lock lock(io_mutex);
std::cout << "sending: " << n << std::endl;
}
get_buffer().send(n);
}
}
static void do_receiver_thread()
{
for (int x=0; x < (ITERS/2); ++x)
{
int n = get_buffer().receive();
{
boost::mutex::scoped_lock lock(io_mutex);
std::cout << "received: " << n << std::endl;
}
}
}
private:
M mutex;
boost::condition cond;
unsigned int p, c, full;
std::vector<int> buf;
};
template <typename M>
void do_test(M* dummy=0)
{
typedef buffer_t<M> buffer_type;
buffer_type::get_buffer();
boost::thread thrd1(&buffer_type::do_receiver_thread);
boost::thread thrd2(&buffer_type::do_receiver_thread);
boost::thread thrd3(&buffer_type::do_sender_thread);
thrd1.join();
thrd2.join();
thrd3.join();
}
void test_buffer()
{
do_test<boost::mutex>();
do_test<boost::recursive_mutex>();
}
int main()
{
test_buffer();
return 0;
}

BIN
example/monitor/monitor.mcp Normal file

Binary file not shown.

View File

@@ -0,0 +1 @@
/*

View File

@@ -0,0 +1,171 @@
#include <boost/thread/mutex.hpp>
#include <boost/thread/condition.hpp>
#include <boost/thread/thread.hpp>
#include <boost/thread/xtime.hpp>
#include <iostream>
#include <time.h>
namespace
{
boost::mutex iomx;
}
class canteen
{
public:
canteen() : m_chickens(0) { }
void get(int id)
{
boost::mutex::scoped_lock lock(m_mutex);
while (m_chickens == 0)
{
{
boost::mutex::scoped_lock lock(iomx);
std::cout << "(" << clock() << ") Phil" << id <<
": wot, no chickens? I'll WAIT ..." << std::endl;
}
m_condition.wait(lock);
}
{
boost::mutex::scoped_lock lock(iomx);
std::cout << "(" << clock() << ") Phil" << id <<
": those chickens look good ... one please ..." << std::endl;
}
m_chickens--;
}
void put(int value)
{
boost::mutex::scoped_lock lock(m_mutex);
{
boost::mutex::scoped_lock lock(iomx);
std::cout << "(" << clock() <<
") Chef: ouch ... make room ... this dish is very hot ..." << std::endl;
}
boost::xtime xt;
boost::xtime_get(&xt, boost::TIME_UTC);
xt.sec += 3;
boost::thread::sleep(xt);
m_chickens += value;
{
boost::mutex::scoped_lock lock(iomx);
std::cout << "(" << clock() <<
") Chef: more chickens ... " << m_chickens <<
" now available ... NOTIFYING ..." << std::endl;
}
m_condition.notify_all();
}
private:
boost::mutex m_mutex;
boost::condition m_condition;
int m_chickens;
};
canteen g_canteen;
void chef()
{
const int chickens = 4;
{
boost::mutex::scoped_lock lock(iomx);
std::cout << "(" << clock() << ") Chef: starting ..." << std::endl;
}
for (;;)
{
{
boost::mutex::scoped_lock lock(iomx);
std::cout << "(" << clock() << ") Chef: cooking ..." << std::endl;
}
boost::xtime xt;
boost::xtime_get(&xt, boost::TIME_UTC);
xt.sec += 2;
boost::thread::sleep(xt);
{
boost::mutex::scoped_lock lock(iomx);
std::cout << "(" << clock() << ") Chef: " << chickens
<< " chickens, ready-to-go ..." << std::endl;
}
g_canteen.put(chickens);
}
}
struct phil
{
phil(int id) : m_id(id) { }
void run() {
{
boost::mutex::scoped_lock lock(iomx);
std::cout << "(" << clock() << ") Phil" << m_id << ": starting ..." << std::endl;
}
for (;;)
{
if (m_id > 0)
{
boost::xtime xt;
boost::xtime_get(&xt, boost::TIME_UTC);
xt.sec += 3;
boost::thread::sleep(xt);
}
{
boost::mutex::scoped_lock lock(iomx);
std::cout << "(" << clock() << ") Phil" << m_id
<< ": gotta eat ..." << std::endl;
}
g_canteen.get(m_id);
{
boost::mutex::scoped_lock lock(iomx);
std::cout << "(" << clock() << ") Phil" << m_id
<< ": mmm ... that's good ..." << std::endl;
}
}
}
static void do_thread(void* param) {
static_cast<phil*>(param)->run();
}
int m_id;
};
struct thread_adapt
{
thread_adapt(void (*func)(void*), void* param) : _func(func), _param(param) { }
int operator()() const
{
_func(_param);
return 0;
}
void (*_func)(void*);
void* _param;
};
class thread_adapter
{
public:
thread_adapter(void (*func)(void*), void* param) : _func(func), _param(param) { }
void operator()() const { _func(_param); }
private:
void (*_func)(void*);
void* _param;
};
int main(int argc, char* argv[])
{
boost::thread thrd_chef(&chef);
phil p[] = { phil(0), phil(1), phil(2), phil(3), phil(4) };
boost::thread thrd_phil0(thread_adapter(&phil::do_thread, &p[0]));
boost::thread thrd_phil1(thread_adapter(&phil::do_thread, &p[1]));
boost::thread thrd_phil2(thread_adapter(&phil::do_thread, &p[2]));
boost::thread thrd_phil3(thread_adapter(&phil::do_thread, &p[3]));
boost::thread thrd_phil4(thread_adapter(&phil::do_thread, &p[4]));
thrd_chef.join();
thrd_phil0.join();
thrd_phil1.join();
thrd_phil2.join();
thrd_phil3.join();
thrd_phil4.join();
return 0;
}

Binary file not shown.

1
example/tennis/Carbon.r Normal file
View File

@@ -0,0 +1 @@
/*

120
example/tennis/tennis.cpp Normal file
View File

@@ -0,0 +1,120 @@
#include <boost/thread/mutex.hpp>
#include <boost/thread/condition.hpp>
#include <boost/thread/thread.hpp>
#include <boost/thread/xtime.hpp>
#include <iostream>
#if defined(BOOST_HAS_WINTHREADS)
# include <windows.h>
# include <process.h>
#endif
enum game_state
{
START,
PLAYER_A,
PLAYER_B,
GAME_OVER,
ONE_PLAYER_GONE,
BOTH_PLAYERS_GONE
};
int state;
boost::mutex mutex;
boost::condition cond;
char* player_name(int state)
{
if (state == PLAYER_A)
return "PLAYER-A";
if (state == PLAYER_B)
return "PLAYER-B";
throw "bad player";
return 0;
}
void player(void* param)
{
boost::mutex::scoped_lock lock(mutex);
int active = (int)param;
int other = active == PLAYER_A ? PLAYER_B : PLAYER_A;
while (state < GAME_OVER)
{
std::cout << player_name(active) << ": Play." << std::endl;
state = other;
cond.notify_all();
do
{
cond.wait(lock);
if (state == other)
std::cout << "---" << player_name(active) << ": Spurious wakeup!" << std::endl;
} while (state == other);
}
++state;
std::cout << player_name(active) << ": Gone." << std::endl;
cond.notify_all();
}
struct thread_adapt
{
thread_adapt(void (*func)(void*), void* param) : _func(func), _param(param) { }
int operator()() const
{
_func(_param);
return 0;
}
void (*_func)(void*);
void* _param;
};
class thread_adapter
{
public:
thread_adapter(void (*func)(void*), void* param) : _func(func), _param(param) { }
void operator()() const { _func(_param); }
private:
void (*_func)(void*);
void* _param;
};
int main(int argc, char* argv[])
{
state = START;
boost::thread thrda(thread_adapter(&player, (void*)PLAYER_A));
boost::thread thrdb(thread_adapter(&player, (void*)PLAYER_B));
boost::xtime xt;
boost::xtime_get(&xt, boost::TIME_UTC);
xt.sec += 1;
boost::thread::sleep(xt);
{
boost::mutex::scoped_lock lock(mutex);
std::cout << "---Noise ON..." << std::endl;
}
for (int i = 0; i < 1000000; ++i)
cond.notify_all();
{
boost::mutex::scoped_lock lock(mutex);
std::cout << "---Noise OFF..." << std::endl;
state = GAME_OVER;
cond.notify_all();
do
{
cond.wait(lock);
} while (state != BOTH_PLAYERS_GONE);
}
std::cout << "GAME OVER" << std::endl;
thrda.join();
thrdb.join();
return 0;
}

BIN
example/tennis/tennis.mcp Normal file

Binary file not shown.

View File

@@ -0,0 +1,38 @@
// Copyright (C) 2002
// David Moore, William E. Kempf
//
// Permission to use, copy, modify, distribute and sell this software
// and its documentation for any purpose is hereby granted without fee,
// provided that the above copyright notice appear in all copies and
// that both that copyright notice and this permission notice appear
// in supporting documentation. William E. Kempf makes no representations
// about the suitability of this software for any purpose.
// It is provided "as is" without express or implied warranty.
#ifndef BOOST_BARRIER_JDM030602_HPP
#define BOOST_BARRIER_JDM030602_HPP
#include <boost/thread/mutex.hpp>
#include <boost/thread/condition.hpp>
namespace boost {
class barrier
{
public:
barrier(unsigned int count);
~barrier();
bool wait();
private:
mutex m_mutex;
condition m_cond;
unsigned int m_threshold;
unsigned int m_count;
unsigned int m_generation;
};
} // namespace boost
#endif

View File

@@ -0,0 +1,334 @@
// Copyright (C) 2002
// David Moore
//
// Original scoped_lock implementation
// Copyright (C) 2001
// William E. Kempf
//
// Permission to use, copy, modify, distribute and sell this software
// and its documentation for any purpose is hereby granted without fee,
// provided that the above copyright notice appear in all copies and
// that both that copyright notice and this permission notice appear
// in supporting documentation. David Moore makes no representations
// about the suitability of this software for any purpose.
// It is provided "as is" without express or implied warranty.
#ifndef BOOST_XRWLOCK_JDM031002_HPP
#define BOOST_XRWLOCK_JDM031002_HPP
#include <boost/utility.hpp>
#include <boost/thread/exceptions.hpp>
namespace boost {
class condition;
struct xtime;
typedef enum
{
NO_LOCK=0,
SHARED_LOCK=1,
EXCL_LOCK=2
} rw_lock_state;
namespace detail { namespace thread {
template <typename Mutex>
class rw_lock_ops : private noncopyable
{
private:
rw_lock_ops() { }
public:
static void wrlock(Mutex& m)
{
m.do_wrlock();
}
static void rdlock(Mutex& m)
{
m.do_rdlock();
}
static void wrunlock(Mutex& m)
{
m.do_wrunlock();
}
static void rdunlock(Mutex &m)
{
m.do_rdunlock();
}
static bool try_wrlock(Mutex &m)
{
return m.do_try_wrlock();
}
static bool try_rdlock(Mutex &m)
{
return m.do_try_rdlock();
}
static bool timed_wrlock(Mutex &m,const xtime &xt)
{
return m.do_timed_wrlock(xt);
}
static bool timed_rdlock(Mutex &m,const xtime &xt)
{
return m.do_timed_rdlock(xt);
}
};
template <typename RWMutex>
class scoped_rw_lock : private noncopyable
{
public:
typedef RWMutex mutex_type;
explicit scoped_rw_lock(RWMutex& mx, rw_lock_state initial_state=SHARED_LOCK)
: m_mutex(mx), m_locked(NO_LOCK)
{
if(initial_state == SHARED_LOCK)
rdlock();
else if(initial_state == EXCL_LOCK)
wrlock();
}
~scoped_rw_lock()
{
if(m_locked != NO_LOCK)
unlock();
}
void rdlock()
{
if (m_locked != NO_LOCK) throw lock_error();
rw_lock_ops<RWMutex>::rdlock(m_mutex);
m_locked = SHARED_LOCK;
}
void wrlock()
{
if(m_locked != NO_LOCK) throw lock_error();
rw_lock_ops<RWMutex>::wrlock(m_mutex);
m_locked = EXCL_LOCK;
}
void unlock()
{
if (m_locked == NO_LOCK) throw lock_error();
if(m_locked == SHARED_LOCK)
rw_lock_ops<RWMutex>::rdunlock(m_mutex);
else
rw_lock_ops<RWMutex>::wrunlock(m_mutex);
m_locked = NO_LOCK;
}
bool locked() const
{
return m_locked != NO_LOCK;
}
operator const void*() const
{
return (m_locked != NO_LOCK) ? this : 0;
}
rw_lock_state state() const
{
return m_locked;
}
private:
RWMutex& m_mutex;
rw_lock_state m_locked;
};
template <typename TryRWMutex>
class scoped_try_rw_lock : private noncopyable
{
public:
typedef TryRWMutex mutex_type;
explicit scoped_try_rw_lock(TryRWMutex& mx)
: m_mutex(mx), m_locked(NO_LOCK)
{
try_rdlock();
}
scoped_try_rw_lock(TryRWMutex& mx, rw_lock_state initial_state)
: m_mutex(mx), m_locked(NO_LOCK)
{
if(initial_state == SHARED_LOCK)
rdlock();
else if(initial_state == EXCL_LOCK)
wrlock();
}
~scoped_try_rw_lock()
{
if(m_locked != NO_LOCK)
unlock();
}
void rdlock()
{
if (m_locked != NO_LOCK) throw lock_error();
rw_lock_ops<TryRWMutex>::rdlock(m_mutex);
m_locked = SHARED_LOCK;
}
bool try_rdlock()
{
if (m_locked != NO_LOCK) throw lock_error();
if(rw_lock_ops<TryRWMutex>::try_rdlock(m_mutex))
{
m_locked = SHARED_LOCK;
return true;
}
return false;
}
void wrlock()
{
if(m_locked != NO_LOCK) throw lock_error();
rw_lock_ops<TryRWMutex>::wrlock(m_mutex);
m_locked = EXCL_LOCK;
}
bool try_wrlock()
{
if (m_locked != NO_LOCK) throw lock_error();
if(rw_lock_ops<TryRWMutex>::try_wrlock(m_mutex))
{
m_locked = EXCL_LOCK;
return true;
}
return false;
}
void unlock()
{
if (m_locked == NO_LOCK) throw lock_error();
if(m_locked == SHARED_LOCK)
rw_lock_ops<TryRWMutex>::rdunlock(m_mutex);
else
rw_lock_ops<TryRWMutex>::wrunlock(m_mutex);
m_locked = NO_LOCK;
}
bool locked() const
{
return m_locked != NO_LOCK;
}
operator const void*() const
{
return (m_locked != NO_LOCK) ? this : 0;
}
rw_lock_state state() const
{
return m_locked;
}
private:
TryRWMutex& m_mutex;
rw_lock_state m_locked;
};
template <typename TimedRWMutex>
class scoped_timed_rw_lock : private noncopyable
{
public:
typedef TimedRWMutex mutex_type;
explicit scoped_timed_rw_lock(TimedRWMutex& mx, const xtime &xt)
: m_mutex(mx), m_locked(NO_LOCK)
{
timed_sharedlock(xt);
}
scoped_timed_rw_lock(TimedRWMutex& mx, rw_lock_state initial_state)
: m_mutex(mx), m_locked(NO_LOCK)
{
if(initial_state == SHARED_LOCK)
rdlock();
else if(initial_state == EXCL_LOCK)
wrlock();
}
~scoped_timed_rw_lock()
{
if(m_locked != NO_LOCK)
unlock();
}
void rdlock()
{
if (m_locked != NO_LOCK) throw lock_error();
rw_lock_ops<TimedRWMutex>::rdlock(m_mutex);
m_locked = SHARED_LOCK;
}
bool timed_rdlock(const xtime &xt)
{
if (m_locked != NO_LOCK) throw lock_error();
if(rw_lock_ops<TimedRWMutex>::timed_rdlock(m_mutex,xt))
{
m_locked = SHARED_LOCK;
return true;
}
return false;
}
void wrlock()
{
if(m_locked != NO_LOCK) throw lock_error();
rw_lock_ops<TimedRWMutex>::wrlock(m_mutex);
m_locked = EXCL_LOCK;
}
bool timed_wrlock(const xtime &xt)
{
if (m_locked != NO_LOCK) throw lock_error();
if(rw_lock_ops<TimedRWMutex>::timed_wrlock(m_mutex,xt))
{
m_locked = EXCL_LOCK;
return true;
}
return false;
}
void unlock()
{
if (m_locked == NO_LOCK) throw lock_error();
if(m_locked == SHARED_LOCK)
rw_lock_ops<TimedRWMutex>::rdunlock(m_mutex);
else
rw_lock_ops<TimedRWMutex>::wrunlock(m_mutex);
m_locked = NO_LOCK;
}
bool locked() const
{
return m_locked != NO_LOCK;
}
operator const void*() const
{
return (m_locked != NO_LOCK) ? this : 0;
}
rw_lock_state state() const
{
return m_locked;
}
private:
TimedRWMutex& m_mutex;
rw_lock_state m_locked;
};
} // namespace thread
} // namespace detail
} // namespace boost
// Change Log:
// 03/10/02 Initial version
#endif

View File

@@ -0,0 +1,214 @@
// Copyright (C) 2002
// David Moore
//
// Original mutex design and implementation
// Copyright (C) 2001
// William E. Kempf
//
// Permission to use, copy, modify, distribute and sell this software
// and its documentation for any purpose is hereby granted without fee,
// provided that the above copyright notice appear in all copies and
// that both that copyright notice and this permission notice appear
// in supporting documentation. David Moore makes no representations
// about the suitability of this software for any purpose.
// It is provided "as is" without express or implied warranty.
//
//
// A Boost::threads implementation of a synchronization
// primitive which can allow multiple readers or a single
// writer to have access to a shared resource.
#ifndef BOOST_RW_MUTEX_JDM030602_HPP
#define BOOST_RW_MUTEX_JDM030602_HPP
#include <boost/config.hpp>
#ifndef BOOST_HAS_THREADS
# error Thread support is unavailable!
#endif
#include <boost/utility.hpp>
#include <boost/thread/mutex.hpp>
#include <boost/thread/detail/lock.hpp>
#include <boost/thread/detail/rw_lock.hpp>
#include <boost/thread/condition.hpp>
namespace boost {
typedef enum
{
sp_writer_priority,
sp_reader_priority,
sp_alternating_many_reads,
sp_alternating_single_reads
} rw_scheduling_policy;
namespace detail { namespace thread {
// Shared implementation construct for explicit Scheduling Policies
//
// This implementation is susceptible to self-deadlock, though....
template<typename Mutex>
struct rw_mutex_impl
{
typedef detail::thread::scoped_lock<Mutex> scoped_lock;
typedef detail::thread::scoped_try_lock<Mutex> scoped_try_lock;
typedef detail::thread::scoped_timed_lock<Mutex> scoped_timed_lock;
rw_mutex_impl(rw_scheduling_policy sp) :
m_num_waiting_writers(0),
m_num_waiting_readers(0),
m_num_waiting_promotion(0),
m_state(0),
m_sp(sp),
m_readers_next(1)
{}
Mutex m_prot;
condition m_waiting_writers;
condition m_waiting_readers;
int m_num_waiting_writers;
int m_num_waiting_readers;
condition m_waiting_promotion;
int m_num_waiting_promotion;
int m_state; // -1 = excl locked
// 0 = unlocked
// 1-> INT_MAX - shared locked
rw_scheduling_policy m_sp;
// For alternating priority policies, who goes next?
int m_readers_next;
void do_rdlock();
void do_wrlock();
void do_wrunlock();
void do_rdunlock();
bool do_try_wrlock();
bool do_try_rdlock();
bool do_timed_wrlock(const xtime &xt);
bool do_timed_rdlock(const xtime &xt);
bool do_try_promote_rdlock();
void do_wakeups();
};
}// namespace detail
}// namespace thread
class rw_mutex : private noncopyable
{
public:
rw_mutex(rw_scheduling_policy sp) :
m_impl(sp)
{
}
~rw_mutex(){}
rw_scheduling_policy policy() const {return m_impl.m_sp;}
friend class detail::thread::rw_lock_ops<rw_mutex>;
typedef detail::thread::scoped_rw_lock<rw_mutex> scoped_rw_lock;
typedef detail::thread::scoped_try_rw_lock<rw_mutex> scoped_try_rw_lock;
private:
// Operations that will eventually be done only
// via lock types
void do_wrlock();
void do_rdlock();
void do_wrunlock();
void do_rdunlock();
detail::thread::rw_mutex_impl<mutex> m_impl;
};
class try_rw_mutex : private noncopyable
{
public:
try_rw_mutex(rw_scheduling_policy sp) :
m_impl(sp)
{
}
~try_rw_mutex(){}
rw_scheduling_policy policy() const {return m_impl.m_sp;}
friend class detail::thread::rw_lock_ops<try_rw_mutex>;
typedef detail::thread::scoped_rw_lock<try_rw_mutex> scoped_rw_lock;
typedef detail::thread::scoped_try_rw_lock<try_rw_mutex> scoped_try_rw_lock;
private:
// Operations that will eventually be done only
// via lock types
void do_wrlock();
void do_rdlock();
void do_wrunlock();
void do_rdunlock();
bool do_try_wrlock();
bool do_try_rdlock();
detail::thread::rw_mutex_impl<try_mutex> m_impl;
};
class timed_rw_mutex : private noncopyable
{
public:
timed_rw_mutex(rw_scheduling_policy sp) :
m_impl(sp)
{
}
~timed_rw_mutex(){}
rw_scheduling_policy policy() const {return m_impl.m_sp;}
friend class detail::thread::rw_lock_ops<timed_rw_mutex>;
typedef detail::thread::scoped_rw_lock<timed_rw_mutex> scoped_rw_lock;
typedef detail::thread::scoped_try_rw_lock<timed_rw_mutex> scoped_try_rw_lock;
typedef detail::thread::scoped_timed_rw_lock<timed_rw_mutex> scoped_timed_rw_lock;
private:
// Operations that will eventually be done only
// via lock types
void do_wrlock();
void do_rdlock();
void do_wrunlock();
void do_rdunlock();
bool do_try_wrlock();
bool do_try_rdlock();
bool do_timed_wrlock(const xtime &xt);
bool do_timed_rdlock(const xtime &xt);
detail::thread::rw_mutex_impl<timed_mutex> m_impl;
};
} // namespace boost
#endif

View File

@@ -0,0 +1,56 @@
// Copyright (C) 2002
// William E. Kempf, David Moore
//
// Permission to use, copy, modify, distribute and sell this software
// and its documentation for any purpose is hereby granted without fee,
// provided that the above copyright notice appear in all copies and
// that both that copyright notice and this permission notice appear
// in supporting documentation. William E. Kempf makes no representations
// about the suitability of this software for any purpose.
// It is provided "as is" without express or implied warranty.
#ifndef BOOST_MUTEX_JDM062402_HPP
#define BOOST_MUTEX_JDM062402_HPP
#include <boost/config.hpp>
#ifndef BOOST_HAS_THREADS
# error Thread support is unavailable!
#endif
#include <boost/utility.hpp>
#include <boost/function.hpp>
#include <boost/thread/exceptions.hpp>
#include <string>
namespace boost {
class shared_memory
{
public:
// Obtain a shared memory block len bytes long, zero initialized
shared_memory(const char *name,size_t len);
// Obtain a shared memory block and initialize it with initfunc
shared_memory(const char *name,size_t len,const boost::function2<void,void *,size_t> &initfunc);
~shared_memory();
void *get(){return m_ptr;}
private:
void create(const char *name,
size_t len);
void *m_ptr; // Pointer to shared memory block
int m_mem_obj; // Platform specific handle to shared memory block
void *m_h_event; // Platform specific handle to event saying block initialized.
size_t m_len;
boost::function2<void,void *,size_t> m_initfunc;
};
}; // namespace boost
#endif

View File

@@ -34,9 +34,17 @@ namespace boost {
struct xtime;
class thread_cancel
{
public:
thread_cancel() { }
};
class thread : private noncopyable
{
public:
enum category_type { boost, native, adopted };
thread();
explicit thread(const function0<void>& threadfunc);
~thread();
@@ -44,23 +52,16 @@ public:
bool operator==(const thread& other) const;
bool operator!=(const thread& other) const;
category_type category() const;
void join();
void cancel();
static void test_cancel();
static void sleep(const xtime& xt);
static void yield();
private:
#if defined(BOOST_HAS_WINTHREADS)
void* m_thread;
unsigned int m_id;
#elif defined(BOOST_HAS_PTHREADS)
private:
pthread_t m_thread;
#elif defined(BOOST_HAS_MPTASKS)
MPQueueID m_pJoinQueueID;
MPTaskID m_pTaskID;
#endif
bool m_joinable;
void* m_handle;
};
class thread_group : private noncopyable
@@ -72,6 +73,7 @@ public:
thread* create_thread(const function0<void>& threadfunc);
void add_thread(thread* thrd);
void remove_thread(thread* thrd);
thread* thread_group::find(thread& thrd);
void join_all();
private:

View File

@@ -0,0 +1,52 @@
// Copyright (C) 2002 David Moore
//
// Based on Boost.Threads
// Copyright (C) 2001
// William E. Kempf
//
// Derived loosely from work queue manager in "Programming POSIX Threads"
// by David Butenhof.
//
// Permission to use, copy, modify, distribute and sell this software
// and its documentation for any purpose is hereby granted without fee,
// provided that the above copyright notice appear in all copies and
// that both that copyright notice and this permission notice appear
// in supporting documentation. William E. Kempf makes no representations
// about the suitability of this software for any purpose.
// It is provided "as is" without express or implied warranty.
#ifndef BOOST_THREAD_POOL_JDM031802_HPP
#define BOOST_THREAD_POOL_JDM031802_HPP
#include <boost/config.hpp>
#ifndef BOOST_HAS_THREADS
# error Thread support is unavailable!
#endif
#include <boost/function.hpp>
#include <boost/limits.hpp>
namespace boost {
class thread_pool
{
public:
thread_pool(int max_threads=std::numeric_limits<int>::max(),
int min_threads=0,
int timeout_secs=5,
int timeout_nsecs=0);
~thread_pool();
void add(const boost::function0<void> &job);
void join();
void cancel();
void detach();
private:
class impl;
impl* m_pimpl;
};
} // namespace boost
#endif

View File

@@ -18,6 +18,7 @@
#endif
#include <boost/utility.hpp>
#include <boost/function.hpp>
#if defined(BOOST_HAS_PTHREADS)
# include <pthread.h>
@@ -26,56 +27,71 @@
#endif
namespace boost {
namespace detail {
class tss_ref
{
public:
tss_ref();
};
class tss : private noncopyable
{
public:
tss(void (*cleanup)(void*)=0);
tss(boost::function1<void, void*> cleanup);
~tss();
void* get() const;
bool set(void* value);
void set(void* value);
void cleanup(void* p);
private:
#if defined(BOOST_HAS_WINTHREADS)
unsigned long m_key;
void (*m_cleanup)(void*);
#elif defined(BOOST_HAS_PTHREADS)
pthread_key_t m_key;
#elif defined(BOOST_HAS_MPTASKS)
TaskStorageIndex m_key;
void (*m_cleanup)(void*);
#endif
int m_slot;
int m_generation;
};
#if defined(BOOST_HAS_MPTASKS)
void thread_cleanup();
#endif
}
struct tss_adapter
{
tss_adapter(boost::function1<void, void*> cleanup) : m_cleanup(cleanup) { }
void operator()(void* p) { m_cleanup(p); }
boost::function1<void, void*> m_cleanup;
};
} // namespace detail
template <typename T>
class thread_specific_ptr : private noncopyable
{
public:
thread_specific_ptr() : m_tss(&thread_specific_ptr<T>::cleanup) { }
thread_specific_ptr() : m_tss(detail::tss_adapter(&thread_specific_ptr<T>::cleanup)) { }
thread_specific_ptr(void (*clean)(void*)) : m_tss(detail::tss_adapter(clean)) { }
~thread_specific_ptr() { reset(); }
T* get() const { return static_cast<T*>(m_tss.get()); }
T* operator->() const { return get(); }
T& operator*() const { return *get(); }
T* release() { T* temp = get(); m_tss.set(0); return temp; }
void reset(T* p=0) { T* cur = get(); if (cur == p) return; delete cur; m_tss.set(p); }
void reset(T* p=0) { T* cur = get(); if (cur == p) return; m_tss.set(p); if (cur) m_tss.cleanup(cur); }
private:
static void cleanup(void* p) { delete static_cast<T*>(p); }
mutable detail::tss m_tss;
detail::tss m_tss;
};
} // namespace boost
namespace {
// This injects a tss_ref into every namespace and helps to insure we get a proper
// value for the "main" thread
boost::detail::tss_ref _tss_ref__7BAFF4714CFC42ae9C425F60CE3714D8;
}
// Change Log:
// 6 Jun 01 WEKEMPF Initial version.
// 30 May 02 WEKEMPF Added interface to set specific cleanup handlers. Removed TLS slot limits
// from most implementations.
#endif // BOOST_TSS_WEK070601_HPP

45
src/barrier.cpp Normal file
View File

@@ -0,0 +1,45 @@
// Copyright (C) 2002
// David Moore, William E. Kempf
//
// Permission to use, copy, modify, distribute and sell this software
// and its documentation for any purpose is hereby granted without fee,
// provided that the above copyright notice appear in all copies and
// that both that copyright notice and this permission notice appear
// in supporting documentation. William E. Kempf makes no representations
// about the suitability of this software for any purpose.
// It is provided "as is" without express or implied warranty.
#include <boost/thread/barrier.hpp>
namespace boost {
barrier::barrier(unsigned int count)
: m_threshold(count), m_count(count), m_generation(0)
{
if (count == 0)
throw std::invalid_argument("count cannot be zero.");
}
barrier::~barrier()
{
}
bool barrier::wait()
{
boost::mutex::scoped_lock lock(m_mutex);
unsigned int gen = m_generation;
if (--m_count == 0)
{
m_generation++;
m_count = m_threshold;
m_cond.notify_all();
return true;
}
while (gen == m_generation)
m_cond.wait(lock);
return false;
}
} // namespace boost

599
src/rw_mutex.cpp Normal file
View File

@@ -0,0 +1,599 @@
// rw_mutex.cpp
//
// Implementaion for Reader/Writer lock
//
// Notes:
//
// This implementation is based on ACE's rw_lock_t implementation, with the added
// functionality of supporting different Scheduling Policies.
//
// The underlying implementation is shared by rw_mutex, try_rw_mutex, and
// timed_rw_mutex.
//
// The basic implementation strategy involves a mutex, m_prot which is locked during
// ANY rw_mutex operation, locking or unlocking. m_prot protects the invariants
// of the implementation.
//
// The variable m_state takes the following values:
// -1 - Exclusive locked
// 0 - Unlocked
// 1 -> INT_MAX, shared locked, m_state == # of shared locks.
//
// Should a thread need to block for a shared or exclusive lock, two
// member condition variables, m_waiting_readers and m_waiting_writers
// are available for waiting. m_prot is used as the controlling mutex
// when waiting in these cases.
//
// The number of waiting readers and writers are tracked via member variables
// m_num_waiting_readers and m_num_waiting_writers.
//
//
// This particular implementation cannot prevent self-deadlock w/o adding some means
// of identifying the thread(s) holding locks.
// A recursive_try_mutex used "under the hood" could be used to detect and prevent
// exclusive->exclusive self-deadlock since only the same thread would be able to
// obtain a second lock on this recursive mutex....
/*
// for example, if rw_mutex_impl has an additional member:
//
// struct rw_mutex_impl {
// // ...
// recursive_try_mutex m_self_detect;
// }
//
template<typename Mutex>
void
rw_mutex_impl<Mutex>::
do_wrlock()
{
// Lock our exclusive access. This protects internal state
Mutex::scoped_lock l(m_prot);
if(m_state == -1)
{
recursive_try_mutex sl(m_self_detect);
if(sl.locked())
{
// It is us that already held the lock!
// Do something to hold the m_self_detect lock
// and bail out
}
else
{
// It is someone else. fall back to normal waiting.
}
}
// Wait until no exclusive lock is held.
//
// Note: Scheduling priorities are enforced in the unlock()
// call. unlock will wake the proper thread.
while(m_state != 0)
{
m_num_waiting_writers++;
m_waiting_writers.wait(l);
m_num_waiting_writers--;
}
m_state = -1;
}
// Unfortunately, the above doesn't work to detect shared->exclusive deadlock where
// a shared lock holder tries for an exclusive lock.
*/
#include <boost/thread/rw_mutex.hpp>
#include <cassert>
namespace boost {
namespace detail { namespace thread {
template<typename Mutex>
void
rw_mutex_impl<Mutex>::
do_rdlock()
{
// Lock our exclusive access. This protects internal state
typename Mutex::scoped_lock l(m_prot);
// Wait until no exclusive lock is held.
//
// Note: Scheduling priorities are enforced in the unlock()
// call. unlock will wake the proper thread.
while(m_state < 0)
{
m_num_waiting_readers++;
m_waiting_readers.wait(l);
m_num_waiting_readers--;
}
// Increase the reader count
m_state++;
}
template<typename Mutex>
void
rw_mutex_impl<Mutex>::
do_wrlock()
{
// Lock our exclusive access. This protects internal state
typename Mutex::scoped_lock l(m_prot);
// Wait until no exclusive lock is held.
//
// Note: Scheduling priorities are enforced in the unlock()
// call. unlock will wake the proper thread.
while(m_state != 0)
{
m_num_waiting_writers++;
m_waiting_writers.wait(l);
m_num_waiting_writers--;
}
m_state = -1;
}
template<typename Mutex>
bool
rw_mutex_impl<Mutex>::
do_try_rdlock()
{
bool ret;
// Lock our exclusive access. This protects internal state
typename Mutex::scoped_lock l(m_prot);
if(!l.locked())
return false;
if(m_state == -1)
{
// We are already locked exclusively. A try_rdlock always returns
// immediately in this case
ret = false;
}
else if(m_num_waiting_writers > 0)
{
// There are also waiting writers. Use scheduling policy.
if(m_sp == sp_reader_priority)
{
m_state++;
ret = true;
}
else if(m_sp == sp_writer_priority)
{
// A writer is waiting - don't grant this try lock, and
// return immediately (don't increase waiting_readers count)
ret = false;
}
else
{
// For alternating scheduling priority,
// I don't think that try_ locks should step in front of others
// who have already indicated that they are waiting.
// It seems that this could "game" the system and defeat
// the alternating mechanism.
ret = false;
}
}
else
{
// No waiting writers. Grant (additonal) read lock regardless of
// scheduling policy.
m_state++;
ret = true;
}
return ret;
}
/*
*
* try_promote_rdlock - not yet in production....
*
*
*
template<typename Mutex>
bool
rw_mutex_impl<Mutex>::
do_try_promote_rdlock()
{
RWMutexImpl::scoped_lock l(m_prot);
if(m_state == -1)
{
// promoting a write-locked to a read lock is a serious error.
throw lock_error();
}
else if(m_num_waiting_promotion > 0)
{
// Someone else is already trying to upgrade. Avoid deadlock by
// returning false.
return false;
}
else
{
while(m_state > 1) // While there are other readers
{
m_num_waiting_writers++;
m_num_waiting_promotion = 1;
m_waiting_promotion.wait(l);
m_num_waiting_promotion = 0;
m_num_waiting_writers--;
}
// We got the exclusive lock!
m_state == -1;
}
}
*/
template<typename Mutex>
bool
rw_mutex_impl<Mutex>::
do_try_wrlock()
{
bool ret;
typename Mutex::scoped_lock l(m_prot);
if(!l.locked())
return false;
if(m_state != 0)
{
// We are already busy and locked.
// Scheduling priority doesn't matter here.
ret = false;
}
else
{
m_state = -1;
ret = true;
}
return ret;
}
template<typename Mutex>
bool
rw_mutex_impl<Mutex>::
do_timed_rdlock(const boost::xtime &xt)
{
// Lock our exclusive access. This protects internal state
typename Mutex::scoped_timed_lock l(m_prot,xt);
if(!l.locked())
return false;
// Wait until no exclusive lock is held.
//
// Note: Scheduling priorities are enforced in the unlock()
// call. unlock will wake the proper thread.
while(m_state < 0)
{
m_num_waiting_readers++;
if(!m_waiting_readers.timed_wait(l,xt))
{
m_num_waiting_readers--;
return false;
}
m_num_waiting_readers--;
}
// Increase the reader count
m_state++;
return true;
}
template<typename Mutex>
bool
rw_mutex_impl<Mutex>::
do_timed_wrlock(const boost::xtime &xt)
{
typename Mutex::scoped_timed_lock l(m_prot,xt);
if(!l.locked())
return false;
// Wait until no exclusive lock is held.
//
// Note: Scheduling priorities are enforced in the unlock()
// call. unlock will wake the proper thread.
while(m_state != 0)
{
m_num_waiting_writers++;
if(!m_waiting_writers.timed_wait(l,xt))
{
m_num_waiting_writers--;
return false;
}
m_num_waiting_writers--;
}
m_state = -1;
return true;
}
template<typename Mutex>
void
rw_mutex_impl<Mutex>::
do_rdunlock()
{
// Protect internal state.
typename Mutex::scoped_lock l(m_prot);
if(m_state > 0) // Release a reader.
m_state--;
else
throw lock_error(); // Trying to release a writer???
// If we have someone waiting to be promoted....
if(m_num_waiting_promotion == 1 && m_state == 1)
{
m_waiting_promotion.notify_one();
}
else if(m_state == 0)
{
do_wakeups();
}
}
template<typename Mutex>
void
rw_mutex_impl<Mutex>::
do_wakeups()
{
if( m_num_waiting_writers > 0 &&
m_num_waiting_readers > 0)
{
// We have both types waiting, and -either- could proceed.
// Choose which to release based on scheduling policy.
if(m_sp == sp_reader_priority)
{
m_waiting_readers.notify_all();
}
else if(m_sp == sp_writer_priority)
{
m_waiting_writers.notify_one();
}
else // one of the alternating mechanisms
{
if(m_readers_next == 1)
{
m_readers_next = 0;
if(m_sp == sp_alternating_many_reads)
{
m_waiting_readers.notify_all();
}
else
{
// sp_alternating_single_reads
m_waiting_readers.notify_one();
}
}
else
{
m_waiting_writers.notify_one();
m_readers_next = 1;
}
}
}
else if(m_num_waiting_writers > 0)
{
// Only writers - scheduling doesn't matter
m_waiting_writers.notify_one();
}
else if(m_num_waiting_readers > 0)
{
// Only readers - scheduling doesn't matter
m_waiting_readers.notify_all();
}
}
template<typename Mutex>
void
rw_mutex_impl<Mutex>::
do_wrunlock()
{
// Protect internal state.
typename Mutex::scoped_lock l(m_prot);
if(m_state == -1)
m_state = 0;
else
throw lock_error();
// After a writer is unlocked, we are always back in the unlocked state.
//
do_wakeups();
}
} // namespace thread
} // namespace detail
void
rw_mutex::
do_rdlock()
{
m_impl.do_rdlock();
}
void
rw_mutex::
do_wrlock()
{
m_impl.do_wrlock();
}
void
rw_mutex::
do_rdunlock()
{
m_impl.do_rdunlock();
}
void
rw_mutex::
do_wrunlock()
{
m_impl.do_wrunlock();
}
void
try_rw_mutex::
do_rdlock()
{
m_impl.do_rdlock();
}
void
try_rw_mutex::
do_wrlock()
{
m_impl.do_wrlock();
}
void
try_rw_mutex::
do_wrunlock()
{
m_impl.do_wrunlock();
}
void
try_rw_mutex::
do_rdunlock()
{
m_impl.do_rdunlock();
}
bool
try_rw_mutex::
do_try_rdlock()
{
return m_impl.do_try_rdlock();
}
bool
try_rw_mutex::
do_try_wrlock()
{
return m_impl.do_try_wrlock();
}
void
timed_rw_mutex::
do_rdlock()
{
m_impl.do_rdlock();
}
void
timed_rw_mutex::
do_wrlock()
{
m_impl.do_wrlock();
}
void
timed_rw_mutex::
do_rdunlock()
{
m_impl.do_rdunlock();
}
void
timed_rw_mutex::
do_wrunlock()
{
m_impl.do_wrunlock();
}
bool
timed_rw_mutex::
do_try_rdlock()
{
return m_impl.do_try_rdlock();
}
bool
timed_rw_mutex::
do_try_wrlock()
{
return m_impl.do_try_wrlock();
}
bool
timed_rw_mutex::
do_timed_rdlock(const xtime &xt)
{
return m_impl.do_timed_rdlock(xt);
}
bool
timed_rw_mutex::
do_timed_wrlock(const xtime &xt)
{
return m_impl.do_timed_wrlock(xt);
}
} // namespace boost

262
src/shared_memory.cpp Normal file
View File

@@ -0,0 +1,262 @@
// Copyright (C) 2002
// William E. Kempf, David Moore
//
// Permission to use, copy, modify, distribute and sell this software
// and its documentation for any purpose is hereby granted without fee,
// provided that the above copyright notice appear in all copies and
// that both that copyright notice and this permission notice appear
// in supporting documentation. William E. Kempf makes no representations
// about the suitability of this software for any purpose.
// It is provided "as is" without express or implied warranty.
#include <boost/thread/shared_memory.hpp>
#if defined(BOOST_HAS_WINTHREADS)
#include <windows.h>
// Next line should really be BOOST_HAS_POSIX_xxx
#elif defined(BOOST_HAS_PTHREADS)
#include <sys/shm.h>
#include <sys/mman.h>
// Need to busy-wait on POSIX
#include <boost/thread/thread.hpp>
#include <boost/thread/xtime.hpp>
#endif
namespace
{
const int HEADER_ALIGN=16;
struct hdr
{
size_t len;
unsigned int count;
};
void fillzero(void *ptr, size_t len)
{
memset(ptr,0,len);
}
void noinit(void *,size_t)
{
}
};
namespace boost
{
shared_memory::
shared_memory(const char *name,size_t len) : m_ptr(NULL),m_mem_obj(0),m_h_event(NULL),
m_len(len),m_initfunc(&noinit)
{
create(name,len);
}
// Obtain a shared memory block and initialize it with initfunc
shared_memory::
shared_memory(const char *name,size_t len,const boost::function2<void,void *,size_t> &initfunc) :
m_ptr(NULL),m_mem_obj(0),m_h_event(NULL),m_len(len),m_initfunc(initfunc)
{
create(name,len);
}
shared_memory::
~shared_memory()
{
if(m_ptr)
{
m_ptr = ((char *) m_ptr - HEADER_ALIGN);
hdr *p_hdr = (hdr *)m_ptr;
if(p_hdr)
{
p_hdr->count--;
}
#if defined(BOOST_HAS_WINTHREADS)
UnmapViewOfFile(m_ptr);
if(m_mem_obj)
{
CloseHandle(reinterpret_cast<HANDLE>(m_mem_obj));
}
if(m_h_event)
{
CloseHandle(reinterpret_cast<HANDLE>(m_h_event));
}
#elif defined (BOOST_HAS_PTHREADS)
if(p_hdr->count == 0)
{
shm_unlink(m_name);
}
munmap(m_ptr,m_len);
#endif
}
}
void
shared_memory::
create(const char *name,size_t len)
{
#if defined(BOOST_HAS_WINTHREADS)
HANDLE h_map = NULL;
HANDLE h_event = NULL;
DWORD ret;
bool b_creator = false;
std::string obj_name = "_EVT_";
obj_name += name;
h_event = CreateEvent(NULL,TRUE,FALSE,obj_name.c_str());
if(h_event == NULL)
{
throw boost::thread_resource_error();
}
b_creator = (GetLastError() != ERROR_ALREADY_EXISTS);
h_map = CreateFileMapping(INVALID_HANDLE_VALUE,NULL,PAGE_READWRITE,0,len+HEADER_ALIGN,name);
if(h_map)
{
m_ptr = (char *)MapViewOfFile(h_map,FILE_MAP_WRITE,0,0,0);
if(m_ptr)
{
// Get a pointer to our header, and move our real ptr past this.
hdr *p_hdr = (hdr *)m_ptr;
m_ptr = ((char *)m_ptr + HEADER_ALIGN);
if(b_creator)
{
// Call the initialization function for the user area.
m_initfunc(m_ptr,len);
p_hdr->len = len;
p_hdr->count = 1;
SetEvent(h_event);
}
else
{
ret = WaitForSingleObject(h_event,INFINITE);
if(ret != WAIT_OBJECT_0)
{
CloseHandle(h_event);
CloseHandle(h_map);
throw boost::thread_resource_error();
}
// We've got a previously constructed object.
(p_hdr->count)++;
}
}
else
{
CloseHandle(h_event);
throw boost::thread_resource_error();
}
}
m_mem_obj = reinterpret_cast<int>(h_map);
m_h_event = reinterpret_cast<void *>(h_event);
#elif defined (BOOST_HAS_PTHREADS)
int fd_smo; // descriptor to shared memory object
bool b_creator = true;
fd_smo = shm_open(name,O_RDWR|O_CREAT|O_EXCL,SHM_MODE);
if(fd_smo == -1)
{
if(errno == EEXIST)
{
// We lost the race. We should just re-open with shared access
// below.
fd_smo = shm_open(name,O_RDWR,SHM_MODE);
b_creator = false
}
else
{
throw boost::thread_resource_error();
}
}
else
{
// We're the creator. Use ftrunctate to set the size.
b_creator = true;
//
// Add error check on ftruncate.
ftruncate(fd_smo,len+HEADER_ALIGN);
}
m_ptr = (char *)mmap(NULL,len + HEADER_ALIGN,
PROT_READ|PROT_WRITE,
MAP_SHARED,
fd_smo,
0);
if(m_ptr)
{
// Get a pointer to our header, and move our real ptr past this.
hdr *p_hdr = (hdr *)ptr;
m_ptr = ((char *)m_ptr + HEADER_ALIGN);
if(b_creator)
{
// Call the initialization function for the user area.
//flock(fd_smo);
initfunc(ptr,len);
p_hdr->len = len;
p_hdr->count = 1;
//funlock(fd_sm0);
}
else
{
// Need an event here. For now, busy wait.
while(p_hdr->len == 0)
{
//flock(fd_smo);
//funlock(fd_smo);
boost::xtime xt;
boost::xtime_get(&xt,boost::TIME_UTC);
xt.sec++;
boost::thread::sleep(xt);
}
// We've got a previously constructed object.
(p_hdr->count)++;
}
}
close(fd_smo);
mem_obj = NULL; //reinterpret_cast<int>(h_map);
event_obj = NULL; //reinterpret_cast<void *>(h_event);
#endif
}
} // namespace boost

View File

@@ -12,44 +12,195 @@
#include <boost/thread/thread.hpp>
#include <boost/thread/xtime.hpp>
#include <boost/thread/condition.hpp>
#include <boost/thread/tss.hpp>
#include <new>
#include <memory>
#include <cassert>
#if defined(BOOST_HAS_WINTHREADS)
# include <windows.h>
# include <process.h>
#elif defined(BOOST_HAS_MPTASKS)
# include <DriverServices.h>
# include "init.hpp"
# include "safe.hpp"
# include <boost/thread/tss.hpp>
# include <DriverServices.h>
# include "init.hpp"
# include "safe.hpp"
#endif
#include "timeconv.inl"
namespace {
class thread_param
class thread_data
{
public:
thread_param(const boost::function0<void>& threadfunc) : m_threadfunc(threadfunc), m_started(false) { }
void wait()
{
boost::mutex::scoped_lock scoped_lock(m_mutex);
while (!m_started)
m_condition.wait(scoped_lock);
}
void started()
{
boost::mutex::scoped_lock scoped_lock(m_mutex);
m_started = true;
m_condition.notify_one();
}
enum
{
creating,
running,
joining,
joined
};
thread_data(const boost::function0<void>& threadfunc);
thread_data();
~thread_data();
void addref();
bool release();
boost::thread::category_type category();
void join();
void cancel();
void test_cancel();
void run();
private:
boost::mutex m_mutex;
boost::condition m_condition;
const boost::function0<void>& m_threadfunc;
bool m_started;
boost::condition m_cond;
boost::function0<void> m_threadfunc;
unsigned int m_refcount;
int m_state;
#if defined(BOOST_HAS_WINTHREADS)
HANDLE m_thread;
#elif defined(BOOST_HAS_PTHREADS)
pthread_t m_thread;
#elif defined(BOOST_HAS_MPTASKS)
MPQueueID m_pJoinQueueID;
MPTaskID m_pTaskID;
#endif
boost::thread::category_type m_category;
bool m_canceled;
};
void release_tss_data(void* pdata)
{
thread_data* tdata = (thread_data*)pdata;
assert(tdata);
if (tdata->release())
delete tdata;
}
boost::thread_specific_ptr<thread_data> tss_thread_data(&release_tss_data);
thread_data::thread_data(const boost::function0<void>& threadfunc)
: m_threadfunc(threadfunc), m_refcount(2), m_state(creating), m_category(boost::thread::boost), m_canceled(false)
{
}
thread_data::thread_data()
: m_refcount(2), m_state(running), m_category(boost::thread::native), m_canceled(false)
{
#if defined(BOOST_HAS_WINTHREADS)
DuplicateHandle(GetCurrentProcess(), GetCurrentThread(), GetCurrentProcess(),
&m_thread, 0, FALSE, DUPLICATE_SAME_ACCESS);
#elif defined(BOOST_HAS_PTHREADS)
m_thread = pthread_self();
#endif
}
thread_data::~thread_data()
{
if (m_category == boost::thread::native || m_state != joined)
{
int res = 0;
#if defined(BOOST_HAS_WINTHREADS)
res = CloseHandle(m_thread);
assert(res);
#elif defined(BOOST_HAS_PTHREADS)
res = pthread_detach(m_thread);
assert(res == 0);
#elif defined(BOOST_HAS_MPTASKS)
OSStatus lStatus = threads::mac::detail::safe_wait_on_queue(m_pJoinQueueID, NULL, NULL, NULL, kDurationForever);
assert(lStatus == noErr);
#endif
}
}
void thread_data::addref()
{
boost::mutex::scoped_lock lock(m_mutex);
++m_refcount;
}
bool thread_data::release()
{
boost::mutex::scoped_lock lock(m_mutex);
return (--m_refcount == 0);
}
boost::thread::category_type thread_data::category()
{
boost::mutex::scoped_lock lock(m_mutex);
return m_category;
}
void thread_data::join()
{
boost::mutex::scoped_lock lock(m_mutex);
while (m_state == creating || m_state == joining)
m_cond.wait(lock);
if (m_state != joined)
{
m_state = joining;
m_cond.notify_all();
lock.unlock();
int res = 0;
#if defined(BOOST_HAS_WINTHREADS)
res = WaitForSingleObject(m_thread, INFINITE);
assert(res == WAIT_OBJECT_0);
res = CloseHandle(m_thread);
assert(res);
#elif defined(BOOST_HAS_PTHREADS)
res = pthread_join(m_thread, 0);
assert(res == 0);
#elif defined(BOOST_HAS_MPTASKS)
OSStatus lStatus = threads::mac::detail::safe_wait_on_queue(m_pJoinQueueID, NULL, NULL, NULL, kDurationForever);
assert(lStatus == noErr);
#endif
lock.lock();
m_state = joined;
}
}
void thread_data::cancel()
{
boost::mutex::scoped_lock lock(m_mutex);
m_canceled = true;
}
void thread_data::test_cancel()
{
boost::mutex::scoped_lock lock(m_mutex);
if (m_canceled)
throw boost::thread_cancel();
}
void thread_data::run()
{
{
boost::mutex::scoped_lock lock(m_mutex);
#if defined(BOOST_HAS_WINTHREADS)
DuplicateHandle(GetCurrentProcess(), GetCurrentThread(), GetCurrentProcess(),
&m_thread, 0, FALSE, DUPLICATE_SAME_ACCESS);
#elif defined(BOOST_HAS_PTHREADS)
m_thread = pthread_self();
#endif
m_state = thread_data::running;
m_cond.notify_all();
}
m_threadfunc();
}
struct thread_equals
{
thread_equals(boost::thread& thrd) : m_thrd(thrd) { }
bool operator()(boost::thread* thrd) { return *thrd == m_thrd; }
boost::thread& m_thrd;
};
} // unnamed namespace
@@ -65,13 +216,16 @@ static OSStatus thread_proxy(void* param)
{
try
{
thread_param* p = static_cast<thread_param*>(param);
boost::function0<void> threadfunc = p->m_threadfunc;
p->started();
threadfunc();
thread_data* tdata = static_cast<thread_data*>(param);
tss_thread_data.reset(tdata);
tdata->run();
}
catch (boost::thread_cancel)
{
}
catch (...)
{
terminate();
}
#if defined(BOOST_HAS_MPTASKS)
::boost::detail::thread_cleanup();
@@ -84,32 +238,42 @@ static OSStatus thread_proxy(void* param)
namespace boost {
thread::thread()
: m_joinable(false)
: m_handle(0)
{
#if defined(BOOST_HAS_WINTHREADS)
m_thread = reinterpret_cast<void*>(GetCurrentThread());
m_id = GetCurrentThreadId();
#elif defined(BOOST_HAS_PTHREADS)
m_thread = pthread_self();
#elif defined(BOOST_HAS_MPTASKS)
#if defined(BOOST_HAS_MPTASKS)
threads::mac::detail::thread_init();
threads::mac::detail::create_singletons();
m_pTaskID = MPCurrentTaskID();
m_pJoinQueueID = kInvalidID;
#endif
thread_data* tdata = tss_thread_data.get();
if (tdata == 0)
{
tdata = new(std::nothrow) thread_data;
if (!tdata)
throw thread_resource_error();
tss_thread_data.reset(tdata);
}
else
tdata->addref();
m_handle = tdata;
}
thread::thread(const function0<void>& threadfunc)
: m_joinable(true)
: m_handle(0)
{
thread_param param(threadfunc);
std::auto_ptr<thread_data> param(new(std::nothrow) thread_data(threadfunc));
if (param.get() == 0)
throw thread_resource_error();
#if defined(BOOST_HAS_WINTHREADS)
m_thread = reinterpret_cast<void*>(_beginthreadex(0, 0, &thread_proxy, &param, 0, &m_id));
if (!m_thread)
unsigned int id;
HANDLE h = (HANDLE)_beginthreadex(0, 0, &thread_proxy, param.get(), 0, &id);
if (!h)
throw thread_resource_error();
#elif defined(BOOST_HAS_PTHREADS)
int res = 0;
res = pthread_create(&m_thread, 0, &thread_proxy, &param);
pthread_t t;
res = pthread_create(&t, 0, &thread_proxy, param.get());
if (res != 0)
throw thread_resource_error();
#elif defined(BOOST_HAS_MPTASKS)
@@ -123,7 +287,7 @@ thread::thread(const function0<void>& threadfunc)
lStatus = MPCreateQueue(&m_pJoinQueueID);
if(lStatus != noErr) throw thread_resource_error();
lStatus = MPCreateTask(&thread_proxy, &param, 0UL, m_pJoinQueueID, NULL, NULL,
lStatus = MPCreateTask(&thread_proxy, param.get(), 0UL, m_pJoinQueueID, NULL, NULL,
0UL, &m_pTaskID);
if(lStatus != noErr)
{
@@ -132,36 +296,19 @@ thread::thread(const function0<void>& threadfunc)
throw thread_resource_error();
}
#endif
param.wait();
m_handle = param.release();
}
thread::~thread()
{
if (m_joinable)
{
#if defined(BOOST_HAS_WINTHREADS)
int res = 0;
res = CloseHandle(reinterpret_cast<HANDLE>(m_thread));
assert(res);
#elif defined(BOOST_HAS_PTHREADS)
pthread_detach(m_thread);
#elif defined(BOOST_HAS_MPTASKS)
assert(m_pJoinQueueID != kInvalidID);
OSStatus lStatus = MPDeleteQueue(m_pJoinQueueID);
assert(lStatus == noErr);
#endif
}
// thread_data* tdata = static_cast<thread_data*>(m_handle);
// if (tdata && tdata->release())
// delete tdata;
}
bool thread::operator==(const thread& other) const
{
#if defined(BOOST_HAS_WINTHREADS)
return other.m_id == m_id;
#elif defined(BOOST_HAS_PTHREADS)
return pthread_equal(m_thread, other.m_thread) != 0;
#elif defined(BOOST_HAS_MPTASKS)
return other.m_pTaskID == m_pTaskID;
#endif
return m_handle == other.m_handle;
}
bool thread::operator!=(const thread& other) const
@@ -169,24 +316,29 @@ bool thread::operator!=(const thread& other) const
return !operator==(other);
}
thread::category_type thread::category() const
{
thread_data* tdata = static_cast<thread_data*>(m_handle);
return tdata->category();
}
void thread::join()
{
int res = 0;
#if defined(BOOST_HAS_WINTHREADS)
res = WaitForSingleObject(reinterpret_cast<HANDLE>(m_thread), INFINITE);
assert(res == WAIT_OBJECT_0);
res = CloseHandle(reinterpret_cast<HANDLE>(m_thread));
assert(res);
#elif defined(BOOST_HAS_PTHREADS)
res = pthread_join(m_thread, 0);
assert(res == 0);
#elif defined(BOOST_HAS_MPTASKS)
OSStatus lStatus = threads::mac::detail::safe_wait_on_queue(m_pJoinQueueID, NULL, NULL, NULL, kDurationForever);
assert(lStatus == noErr);
#endif
// This isn't a race condition since any race that could occur would
// have us in undefined behavior territory any way.
m_joinable = false;
thread_data* tdata = static_cast<thread_data*>(m_handle);
tdata->join();
}
void thread::cancel()
{
thread_data* tdata = static_cast<thread_data*>(m_handle);
tdata->cancel();
}
void thread::test_cancel()
{
thread self;
thread_data* tdata = static_cast<thread_data*>(self.m_handle);
tdata->test_cancel();
}
void thread::sleep(const xtime& xt)
@@ -295,11 +447,25 @@ void thread_group::remove_thread(thread* thrd)
// For now we'll simply ignore requests to remove a thread object that's not in the group.
// Should we consider this an error and either throw or return an error value?
std::list<thread*>::iterator it = std::find(m_threads.begin(), m_threads.end(), thrd);
assert(it != m_threads.end());
if (it != m_threads.end())
m_threads.erase(it);
}
thread* thread_group::find(thread& thrd)
{
mutex::scoped_lock scoped_lock(m_mutex);
// For now we'll simply ignore requests to remove a thread object that's not in the group.
// Should we consider this an error and either throw or return an error value?
std::list<thread*>::iterator it = std::find_if(m_threads.begin(), m_threads.end(), thread_equals(thrd));
if (it != m_threads.end())
return *it;
return 0;
}
void thread_group::join_all()
{
mutex::scoped_lock scoped_lock(m_mutex);

361
src/thread_pool.cpp Normal file
View File

@@ -0,0 +1,361 @@
// Copyright (C) 2002 David Moore
//
// Based on Boost.Threads
// Copyright (C) 2001
// William E. Kempf
//
// Derived loosely from work queue manager in "Programming POSIX Threads"
// by David Butenhof.
//
// Permission to use, copy, modify, distribute and sell this software
// and its documentation for any purpose is hereby granted without fee,
// provided that the above copyright notice appear in all copies and
// that both that copyright notice and this permission notice appear
// in supporting documentation. William E. Kempf makes no representations
// about the suitability of this software for any purpose.
// It is provided "as is" without express or implied warranty.
#include <boost/thread/thread_pool.hpp>
#include <boost/thread/thread.hpp>
#include <boost/thread/condition.hpp>
#include <boost/thread/mutex.hpp>
#include <boost/thread/xtime.hpp>
#include <boost/bind.hpp>
#include <list>
#include <queue>
#include <stdexcept>
#include <cassert>
namespace boost
{
class thread_pool::impl
{
public:
impl(int max_threads, int min_threads, int timeout_secs, int timeout_nsecs);
~impl();
void add(const boost::function0<void> &job);
void join();
void cancel();
void detach();
void worker_harness();
private:
typedef enum
{
RUNNING,
CANCELLING,
JOINING,
JOINED,
DETACHED
} thread_pool_state;
typedef std::queue<boost::function0<void> > job_q;
condition m_more_work;
condition m_done;
mutex m_prot;
job_q m_jobs;
thread_group m_workers;
thread_pool_state m_state;
int m_max_threads; // Max threads allowed
int m_min_threads;
int m_thread_count; // Current number of threads
int m_idle_count; // Number of idle threads
int m_timeout_secs; // How long to keep idle threads
int m_timeout_nsecs;
};
thread_pool::impl::impl(int max_threads, int min_threads, int timeout_secs, int timeout_nsecs)
: m_state(RUNNING), m_max_threads(max_threads), m_min_threads(min_threads),
m_thread_count(0), m_idle_count(0), m_timeout_secs(timeout_secs), m_timeout_nsecs(timeout_nsecs)
{
// Immediately launch some worker threads.
//
// Not an exception safe implementation, yet.
while (min_threads-- > 0)
{
m_workers.create_thread(bind(&thread_pool::impl::worker_harness, this));
m_thread_count++;
}
}
thread_pool::impl::~impl()
{
// Join in the destructor, unless they have already
// joined or detached.
mutex::scoped_lock lock(m_prot);
if (m_state == RUNNING)
{
lock.unlock();
join();
}
}
void thread_pool::impl::add(const boost::function0<void> &job)
{
mutex::scoped_lock lock(m_prot);
// Note - can never reach this point if m_state == CANCELLED
// because the m_prot is held during the entire cancel operation.
assert(m_state == RUNNING);
m_jobs.push(job);
if (m_idle_count > 0)
m_more_work.notify_one();
else if (m_thread_count < m_max_threads)
{
// No idle threads, and we're below our limit. Spawn a new
// worker.
// What we really need is thread::detach(), or "create suspended"
m_workers.create_thread(bind(&thread_pool::impl::worker_harness, this));
m_thread_count++;
}
}
void thread_pool::impl::join()
{
mutex::scoped_lock lock(m_prot);
assert(m_state == RUNNING);
if (m_thread_count > 0)
{
m_state = JOINING;
// if any threads are idling, wake them.
if (m_idle_count > 0)
m_more_work.notify_all();
// Track the shutdown progress of the threads.
while (m_thread_count > 0)
m_done.wait(lock);
}
m_workers.join_all();
m_state = JOINED;
}
// This is a "weak" form of cancel which empties out the job queue and takes
// the thread count down to zero.
//
// Upon receiving more work, the thread count would grow back up to min_threads.
//
// Cancel will be much stronger once full thread cancellation is in place!
void thread_pool::impl::cancel()
{
mutex::scoped_lock lock(m_prot);
assert(m_state == RUNNING);
if (m_thread_count > 0)
{
m_state = CANCELLING;
// Cancelling kills any unexecuted jobs.
while (!m_jobs.empty())
m_jobs.pop();
/* If we had cancel, this would be something like....
m_workers.cancel_all();
while(m_cancel_count > 0)
m_all_cancelled.wait(lock);
*/
}
m_state = RUNNING; // Go back to accepting work.
}
void thread_pool::impl::detach()
{
mutex::scoped_lock lock(m_prot);
if (m_state == RUNNING)
{
m_min_threads = 0;
m_state = DETACHED;
}
else
{
// detach during/after a join has no effect - the join will
// complete.
}
}
void thread_pool::impl::worker_harness()
{
boost::thread me;
xtime timeout;
int timedout;
mutex::scoped_lock lock(m_prot);
for (;;)
{
timedout = 0;
xtime_get(&timeout, boost::TIME_UTC);
timeout.sec += m_timeout_secs;
timeout.nsec += m_timeout_nsecs;
while (m_jobs.empty() && (m_state == RUNNING))
{
m_idle_count++;
bool status = m_more_work.timed_wait(lock, timeout);
m_idle_count--;
if (!status)
{
timedout = 1;
return;
}
}
if (!m_jobs.empty() && m_state != CANCELLING)
{
boost::function0<void> jobfunc = m_jobs.front();
m_jobs.pop();
lock.unlock();
jobfunc();
lock.lock();
}
else if (m_jobs.empty() && m_state == JOINING)
{
m_thread_count--;
// If we are the last worker exiting, let everyone know about it!
if (m_thread_count == 0)
m_done.notify_all();
return;
}
else if (m_jobs.empty() && m_state == DETACHED)
{
m_thread_count--;
// If we are the last worker exiting, let everyone know about it!
if (m_thread_count == 0)
{
lock.unlock();
delete this;
}
return;
}
/*
* If there's no more work, and we wait for as long as
* we're allowed, then terminate this server thread.
*/
if (m_jobs.empty() && timedout)
{
if (m_thread_count > m_min_threads)
{
m_thread_count--;
if (m_state == DETACHED &&
m_thread_count == 0)
{
lock.unlock();
delete this;
return;
}
// We aren't in a JOINING or CANCELLING state, so trim
// down our resource usage and clean ourselves up.
thread* thrd = m_workers.find(me);
m_workers.remove_thread(thrd);
delete thrd;
return;
}
}
}
}
thread_pool::thread_pool(int max_threads, int min_threads, int timeout_secs, int timeout_nsecs)
: m_pimpl(new impl(max_threads, min_threads, timeout_secs, timeout_nsecs))
{
}
thread_pool::~thread_pool()
{
if (m_pimpl != NULL)
delete m_pimpl;
}
void thread_pool::add(const boost::function0<void> &job)
{
assert(m_pimpl);
m_pimpl->add(job);
}
void thread_pool::join()
{
assert(m_pimpl);
m_pimpl->join();
}
void thread_pool::cancel()
{
assert(m_pimpl);
m_pimpl->cancel();
}
void thread_pool::detach()
{
assert(m_pimpl);
// Tell our implementation it is running detached.
m_pimpl->detach();
m_pimpl = NULL;
}
/*thread_grp::thread_grp()
{
}
thread_grp::~thread_grp()
{
for (std::list<boost::thread*>::iterator it = m_threads.begin(); it != m_threads.end(); ++it)
delete (*it);
}
boost::thread* thread_grp::create_thread(const function0<void>& threadfunc)
{
std::auto_ptr<boost::thread> thrd(new boost::thread(threadfunc));
m_threads.push_back(thrd.get());
return thrd.release();
}
// Delete the thread in the group with identity equal to thrd
void thread_grp::delete_thread_equal_to(boost::thread *thrd)
{
std::list<boost::thread*>::iterator it = m_threads.begin();
for ( ;it != m_threads.end(); ++it)
{
if (**it == *thrd)
break;
}
assert(it != m_threads.end());
if (it != m_threads.end())
{
boost::thread *pthread = *it;
m_threads.erase(it);
delete pthread;
}
}
void thread_grp::join_all()
{
for (std::list<boost::thread*>::iterator it =
m_threads.begin(); it != m_threads.end(); ++it)
{
(*it)->join();
}
}*/
} // namespace boost

View File

@@ -19,13 +19,10 @@
typedef void (__cdecl * handler)(void);
typedef std::list<handler> exit_handlers;
typedef std::set<exit_handlers*> registered_handlers;
namespace
{
CRITICAL_SECTION cs;
DWORD key;
registered_handlers registry;
}
#if defined(__BORLANDC__)
@@ -38,7 +35,6 @@ BOOL WINAPI DllMain(HANDLE module, DWORD reason, LPVOID)
switch (reason)
{
case DLL_PROCESS_ATTACH:
InitializeCriticalSection(&cs);
key = TlsAlloc();
break;
case DLL_THREAD_ATTACH:
@@ -49,39 +45,14 @@ BOOL WINAPI DllMain(HANDLE module, DWORD reason, LPVOID)
exit_handlers* handlers = static_cast<exit_handlers*>(TlsGetValue(key));
if (handlers)
{
for (exit_handlers::iterator it = handlers->begin(); it != handlers->end(); ++it)
for (exit_handlers::reverse_iterator it = handlers->rbegin(); it != handlers->rend(); ++it)
(*it)();
// Remove the exit handler list from the registered lists and then destroy it.
EnterCriticalSection(&cs);
registry.erase(handlers);
LeaveCriticalSection(&cs);
delete handlers;
}
}
break;
case DLL_PROCESS_DETACH:
{
// Assume the main thread is ending (call its handlers) and all other threads
// have already ended. If this DLL is loaded and unloaded dynamically at run time
// this is a bad assumption, but this is the best we can do.
exit_handlers* handlers = static_cast<exit_handlers*>(TlsGetValue(key));
if (handlers)
{
for (exit_handlers::iterator it = handlers->begin(); it != handlers->end(); ++it)
(*it)();
}
// Destroy any remaining exit handlers. Above we assumed there'd only be the main
// thread left, but to insure we don't get memory leaks we won't make that assumption
// here.
EnterCriticalSection(&cs);
for (registered_handlers::iterator it = registry.begin(); it != registry.end(); ++it)
delete (*it);
LeaveCriticalSection(&cs);
DeleteCriticalSection(&cs);
TlsFree(key);
}
break;
}
return TRUE;
@@ -112,25 +83,9 @@ int on_thread_exit(void (__cdecl * func)(void))
delete handlers;
return -1;
}
// Attempt to register this new handler so that memory can be properly
// cleaned up.
try
{
EnterCriticalSection(&cs);
registry.insert(handlers);
LeaveCriticalSection(&cs);
}
catch (...)
{
LeaveCriticalSection(&cs);
delete handlers;
return -1;
}
}
// Attempt to add the handler to the list of exit handlers. If it's been previously
// added just report success and exit.
// Attempt to add the handler to the list of exit handlers.
try
{
handlers->push_front(func);

View File

@@ -11,66 +11,302 @@
#include <boost/thread/tss.hpp>
#include <boost/thread/once.hpp>
#include <boost/thread/mutex.hpp>
#include <boost/thread/exceptions.hpp>
#include <vector>
#include <stdexcept>
#include <cassert>
#if defined(BOOST_HAS_WINTHREADS)
# include <windows.h>
# include "threadmon.hpp"
#endif
#if defined(BOOST_HAS_WINTHREADS)
#include "threadmon.hpp"
#include <map>
namespace {
typedef std::pair<void(*)(void*), void*> cleanup_info;
typedef std::map<int, cleanup_info> cleanup_handlers;
typedef std::vector<std::pair<int, void*> > tss_slots;
DWORD key;
boost::once_flag once = BOOST_ONCE_INIT;
struct tss_slot_info
{
boost::function1<void, void*> cleanup;
int generation;
int next_free;
};
typedef std::vector<tss_slot_info> tss_slot_info_vector;
void init_cleanup_key()
{
key = TlsAlloc();
assert(key != 0xFFFFFFFF);
}
struct tss_data_t
{
boost::mutex mutex;
tss_slot_info_vector slot_info;
#if defined(BOOST_HAS_WINTHREADS)
DWORD native_key;
#elif defined(BOOST_HAS_PTHREADS)
pthread_key_t native_key;
#endif
int next_free;
};
void __cdecl cleanup()
{
cleanup_handlers* handlers = static_cast<cleanup_handlers*>(TlsGetValue(key));
for (cleanup_handlers::iterator it = handlers->begin(); it != handlers->end(); ++it)
{
cleanup_info info = it->second;
if (info.second)
info.first(info.second);
}
delete handlers;
}
tss_data_t* tss_data = 0;
boost::once_flag tss_once = BOOST_ONCE_INIT;
cleanup_handlers* get_handlers()
{
boost::call_once(&init_cleanup_key, once);
extern "C" void cleanup_slots(void* p)
{
tss_slots* slots = static_cast<tss_slots*>(p);
boost::mutex::scoped_lock lock(tss_data->mutex);
for (tss_slot_info_vector::size_type i = 0; i < tss_data->slot_info.size(); ++i)
{
int generation = (*slots)[i].first;
void *& data = (*slots)[i].second;
if (generation == tss_data->slot_info[i].generation && data != 0)
{
tss_data->slot_info[i].cleanup(data);
data = 0;
}
}
}
cleanup_handlers* handlers = static_cast<cleanup_handlers*>(TlsGetValue(key));
if (!handlers)
{
try
{
handlers = new cleanup_handlers;
}
catch (...)
{
return 0;
}
int res = 0;
res = TlsSetValue(key, handlers);
assert(res);
res = on_thread_exit(&cleanup);
assert(res == 0);
}
#if defined(BOOST_HAS_WINTHREADS)
void __cdecl tss_thread_exit()
{
tss_slots* slots = static_cast<tss_slots*>(TlsGetValue(tss_data->native_key));
if (slots)
cleanup_slots(slots);
}
#endif
return handlers;
}
void init_tss_data()
{
// Intentional memory "leak"
// This is the only way to insure the mutex in the global data structure
// is available when cleanup handlers are run, since the execution order
// of cleanup handlers is unspecified on any platform with regards to
// C++ destructor ordering rules.
tss_data = new tss_data_t;
#if defined(BOOST_HAS_WINTHREADS)
tss_data->native_key = TlsAlloc();
assert(tss_data->native_key != 0xFFFFFFFF);
#elif defined(BOOST_HAS_PTHREADS)
int res = 0;
res = pthread_key_create(&tss_data->native_key, &cleanup_slots);
assert(res == 0);
#endif
tss_data->next_free = -1;
}
tss_slots* get_slots(bool alloc)
{
tss_slots* slots = 0;
#if defined(BOOST_HAS_WINTHREADS)
slots = static_cast<tss_slots*>(TlsGetValue(tss_data->native_key));
#elif defined(BOOST_HAS_PTHREADS)
slots = static_cast<tss_slots*>(pthread_getspecific(tss_data->native_key));
#endif
if (slots == 0 && alloc)
{
std::auto_ptr<tss_slots> temp(new tss_slots);
#if defined(BOOST_HAS_WINTHREADS)
if (!TlsSetValue(tss_data->native_key, temp.get()))
return 0;
on_thread_exit(&tss_thread_exit);
#elif defined(BOOST_HAS_PTHREADS)
if (pthread_setspecific(tss_data->native_key, temp.get()) != 0)
return 0;
#endif
slots = temp.release();
}
return slots;
}
} // namespace
namespace boost {
namespace detail {
tss_ref::tss_ref()
{
boost::call_once(&init_tss_data, tss_once);
}
tss::tss(boost::function1<void, void*> cleanup)
{
boost::mutex::scoped_lock lock(tss_data->mutex);
m_slot = tss_data->next_free;
if (m_slot == -1)
{
tss_slot_info info;
info.generation = 0;
info.next_free = -1;
try
{
tss_data->slot_info.push_back(info);
}
catch (...)
{
throw boost::thread_resource_error();
}
m_slot = tss_data->slot_info.size() - 1;
}
tss_data->next_free = tss_data->slot_info[m_slot].next_free;
tss_data->slot_info[m_slot].next_free = -1;
tss_data->slot_info[m_slot].cleanup = cleanup;
// Record the current slots "generation", which is used to insure
// we don't access a pointer that was set in a previous incarnation
// of a reused slot which has been deallocated. Recording this here
// is an optimization that allows us to have lock free access to
// TSS data.
m_generation = tss_data->slot_info[m_slot].generation;
}
tss::~tss()
{
boost::mutex::scoped_lock lock(tss_data->mutex);
// Increment the "generation" here. We do this here in the destructor
// instead of the constructor specifically to "leak" data stored in
// an thread's TSS immediately, rather then later, which may give
// users a false sense that their code isn't flawed.
tss_data->slot_info[m_slot].generation++;
tss_data->slot_info[m_slot].next_free = tss_data->next_free;
tss_data->next_free = m_slot;
}
void* tss::get() const
{
tss_slots* slots = get_slots(false);
if (!slots)
return 0;
if (m_slot >= slots->size())
return 0;
if ((*slots)[m_slot].first == m_generation)
return (*slots)[m_slot].second;
return 0;
}
void tss::set(void* value)
{
tss_slots* slots = get_slots(true);
if (!slots)
throw boost::thread_resource_error();
if (m_slot >= slots->size())
{
try
{
slots->resize(m_slot + 1);
}
catch (...)
{
throw boost::thread_resource_error();
}
}
(*slots)[m_slot].first = m_generation;
(*slots)[m_slot].second = value;
}
void tss::cleanup(void* value)
{
boost::mutex::scoped_lock lock(tss_data->mutex);
tss_data->slot_info[m_slot].cleanup(value);
}
} // namespace detail
} // namespace boost
/*
#if defined(BOOST_HAS_WINTHREADS)
namespace {
typedef std::vector<std::pair<boost::detail::tss*, int> > key_type;
typedef std::vector<void*> slots_type;
DWORD key;
boost::once_flag once = BOOST_ONCE_INIT;
boost::mutex* pmutex;
key_type* pkeys;
int next_key;
void __cdecl cleanup_tss_data();
void init_tss()
{
// static boost::mutex mutex;
// static key_type keys;
// pmutex = &mutex;
// pkeys = &keys;
pmutex = new boost::mutex;
pkeys = new key_type;
key = TlsAlloc();
assert(key != 0xFFFFFFFF);
next_key = 0;
}
int alloc_key(boost::detail::tss* ptss)
{
boost::call_once(&init_tss, once);
boost::mutex::scoped_lock lock(*pmutex);
int key = next_key;
if (key >= pkeys->size())
{
pkeys->resize(key+1);
(*pkeys)[key].second = pkeys->size();
}
next_key = (*pkeys)[key].second;
(*pkeys)[key].first = ptss;
return key;
}
void free_key(int key)
{
boost::call_once(&init_tss, once);
boost::mutex::scoped_lock lock(*pmutex);
assert(key >= 0 && key < pkeys->size());
(*pkeys)[key].first = 0;
(*pkeys)[key].second = next_key;
next_key = key;
}
slots_type* get_tss_data()
{
boost::call_once(&init_tss, once);
if (key == 0xFFFFFFFF)
return 0;
slots_type* pdata = (slots_type*)TlsGetValue(key);
if (pdata == 0)
{
std::auto_ptr<slots_type> slots(new(std::nothrow) slots_type);
if (!TlsSetValue(key, slots.get()))
return 0;
on_thread_exit(&cleanup_tss_data);
pdata = slots.release();
}
return pdata;
}
void __cdecl cleanup_tss_data()
{
slots_type* pdata = get_tss_data();
if (pdata)
{
boost::mutex::scoped_lock lock(*pmutex);
for (int key = 0; key < pdata->size(); ++key)
{
void* pvalue = (*pdata)[key];
boost::detail::tss* ptss = pkeys && key < pkeys->size() ? (*pkeys)[key].first : 0;
if (ptss && pvalue)
ptss->cleanup(pvalue);
}
delete pdata;
}
}
}
#elif defined(BOOST_HAS_MPTASKS)
#include <map>
@@ -117,7 +353,6 @@ namespace boost {
namespace detail {
void thread_cleanup()
{
cleanup_handlers* handlers = reinterpret_cast<cleanup_handlers*>(MPGetTaskStorageValue(key));
@@ -143,39 +378,53 @@ void thread_cleanup()
namespace boost { namespace detail {
#if defined(BOOST_HAS_WINTHREADS)
tss::tss(void (*cleanup)(void*))
tss::tss(boost::function1<void, void*> cleanup)
{
m_key = TlsAlloc();
if (m_key == 0xFFFFFFFF)
throw thread_resource_error();
m_cleanup = cleanup;
m_key = alloc_key(this);
m_clean = cleanup;
m_module = (void*)LoadLibrary("boostthreadmon.dll");
}
tss::~tss()
{
int res = 0;
res = TlsFree(m_key);
assert(res);
free_key(m_key);
FreeLibrary((HMODULE)m_module);
}
void* tss::get() const
{
return TlsGetValue(m_key);
slots_type* pdata = get_tss_data();
if (pdata)
{
if (m_key >= pdata->size())
return 0;
return (*pdata)[m_key];
}
return 0;
}
bool tss::set(void* value)
void tss::set(void* value)
{
if (value && m_cleanup)
{
cleanup_handlers* handlers = get_handlers();
assert(handlers);
if (!handlers)
return false;
cleanup_info info(m_cleanup, value);
(*handlers)[m_key] = info;
}
return !!TlsSetValue(m_key, value);
slots_type* pdata = get_tss_data();
if (!pdata)
throw thread_resource_error();
if (m_key >= pdata->size())
{
try
{
pdata->resize(m_key+1);
}
catch (...)
{
throw thread_resource_error();
}
}
(*pdata)[m_key] = value;
}
void tss::cleanup(void* value)
{
m_clean(value);
}
#elif defined(BOOST_HAS_PTHREADS)
tss::tss(void (*cleanup)(void*))
@@ -198,9 +447,12 @@ void* tss::get() const
return pthread_getspecific(m_key);
}
bool tss::set(void* value)
void tss::set(void* value)
{
return pthread_setspecific(m_key, value) == 0;
int res = pthread_setspecific(m_key, value) == 0;
assert(res == 0 || res = ENOMEM);
if (res == ENOMEM)
throw thread_resource_error();
}
#elif defined(BOOST_HAS_MPTASKS)
tss::tss(void (*cleanup)(void*))
@@ -224,9 +476,9 @@ void* tss::get() const
return(reinterpret_cast<void *>(ulValue));
}
bool tss::set(void* value)
void tss::set(void* value)
{
if (value && m_cleanup)
if (m_cleanup)
{
cleanup_handlers* handlers = get_handlers();
assert(handlers);
@@ -236,12 +488,15 @@ bool tss::set(void* value)
(*handlers)[m_key] = info;
}
OSStatus lStatus = MPSetTaskStorageValue(m_key, reinterpret_cast<TaskStorageValue>(value));
return(lStatus == noErr);
// return(lStatus == noErr);
}
#endif
} // namespace detail
} // namespace boost
*/
// Change Log:
// 6 Jun 01 WEKEMPF Initial version.
// 30 May 02 WEKEMPF Added interface to set specific cleanup handlers. Removed TLS slot limits
// from most implementations.

39
test/test_barrier.cpp Normal file
View File

@@ -0,0 +1,39 @@
#include <boost/thread/thread.hpp>
#include <boost/thread/barrier.hpp>
#include <boost/test/test_tools.hpp>
namespace {
// Shared variables for generation barrier test
const int N_THREADS=10;
boost::barrier gen_barrier(N_THREADS);
boost::mutex mutex;
long global_parameter;
void barrier_thread()
{
for (int i = 0; i < 5; ++i)
{
if (gen_barrier.wait())
{
boost::mutex::scoped_lock lock(mutex);
global_parameter++;
}
}
}
} // namespace
void test_barrier()
{
boost::thread_group g;
global_parameter = 0;
for (int i = 0; i < N_THREADS; ++i)
g.create_thread(&barrier_thread);
g.join_all();
BOOST_TEST(global_parameter == 5);
}

30
test/test_call_once.cpp Normal file
View File

@@ -0,0 +1,30 @@
#include <boost/thread/thread.hpp>
#include <boost/thread/once.hpp>
#include <boost/test/test_tools.hpp>
namespace {
int once_value = 0;
boost::once_flag once = BOOST_ONCE_INIT;
void init_once_value()
{
once_value++;
}
void test_once_thread()
{
boost::call_once(&init_once_value, once);
}
} // namespace
void test_call_once()
{
const int NUMTHREADS=5;
boost::thread_group threads;
for (int i=0; i<NUMTHREADS; ++i)
threads.create_thread(&test_once_thread);
threads.join_all();
BOOST_TEST(once_value == 1);
}

53
test/test_harness.cpp Normal file
View File

@@ -0,0 +1,53 @@
#define BOOST_INCLUDE_MAIN
#include <boost/test/test_tools.hpp>
#include <iostream>
#include <process.h>
extern void test_xtime_get();
extern void test_thread();
extern void test_thread_group();
extern void test_mutex();
extern void test_try_mutex();
extern void test_timed_mutex();
extern void test_recursive_mutex();
extern void test_recursive_try_mutex();
extern void test_recursive_timed_mutex();
extern void test_condition();
extern void test_thread_specific_ptr();
extern void test_call_once();
extern void test_barrier();
extern void test_thread_pool();
extern void test_rw_mutex();
namespace {
void run_test(void (*func)())
{
// Indicate testing progress...
std::cout << '.' << std::flush;
(*func)();
}
} // namespace
int test_main(int, char*[])
{
run_test(&test_xtime_get);
run_test(&test_thread);
run_test(&test_thread_group);
run_test(&test_mutex);
run_test(&test_try_mutex);
run_test(&test_timed_mutex);
run_test(&test_recursive_mutex);
run_test(&test_recursive_try_mutex);
run_test(&test_recursive_timed_mutex);
run_test(&test_condition);
run_test(&test_thread_specific_ptr);
run_test(&test_call_once);
run_test(&test_barrier);
run_test(&test_thread_pool);
// run_test(&test_rw_mutex);
// _endthreadex(0);
return 0;
}

309
test/test_rw_mutex.cpp Normal file
View File

@@ -0,0 +1,309 @@
#include <boost/thread/thread.hpp>
#include <boost/thread/xtime.hpp>
#include <boost/thread/rw_mutex.hpp>
#include <boost/test/test_tools.hpp>
#include <iostream>
namespace {
int shared_val = 0;
boost::xtime xsecs(int secs)
{
boost::xtime ret;
BOOST_TEST(boost::TIME_UTC == boost::xtime_get(&ret, boost::TIME_UTC));
ret.sec += secs;
return ret;
}
template <typename RW>
class thread_adapter
{
public:
thread_adapter(void (*func)(void*,RW &), void* param1,RW &param2)
: _func(func), _param1(param1) ,_param2(param2){ }
void operator()() const { _func(_param1, _param2); }
private:
void (*_func)(void*, RW &);
void* _param1;
RW& _param2;
};
template <typename RW>
struct data
{
data(int id, RW &m, int secs=0) : m_id(id), m_value(-1), m_secs(secs), m_rw_mutex(m) { }
int m_id;
int m_value;
int m_secs;
RW& m_rw_mutex; // Reader/Writer mutex
};
// plain_writer excercises the "infinite" lock for each
// RW_mutex type.
template<typename RW>
void plain_writer(void *arg,RW &rw)
{
data<RW> *pdata = (data<RW> *) arg;
// std::cout << "-->W" << pdata->m_id << "\n";
typename RW::scoped_rw_lock l(rw,boost::EXCL_LOCK);
boost::thread::sleep(xsecs(3));
shared_val += 10;
pdata->m_value = shared_val;
}
template<typename RW>
void plain_reader(void *arg,RW &rw)
{
data<RW> *pdata = (data<RW> *) arg;
typename RW::scoped_rw_lock l(rw,boost::SHARED_LOCK);
pdata->m_value = shared_val;
}
template<typename RW>
void try_writer(void *arg,RW &rw)
{
data<RW> *pdata = (data<RW> *) arg;
// std::cout << "-->W" << pdata->m_id << "\n";
typename RW::scoped_try_rw_lock l(rw,boost::NO_LOCK);
if(l.try_wrlock())
{
boost::thread::sleep(xsecs(3));
shared_val += 10;
pdata->m_value = shared_val;
}
}
template<typename RW>
void try_reader(void *arg,RW &rw)
{
data<RW> *pdata = (data<RW> *) arg;
typename RW::scoped_try_rw_lock l(rw);
if(l.try_rdlock())
{
pdata->m_value = shared_val;
}
}
template<typename RW>
void timed_writer(void *arg,RW &rw)
{
data<RW> *pdata = (data<RW> *) arg;
boost::xtime xt;
xt = xsecs(pdata->m_secs);
typename RW::scoped_timed_rw_lock l(rw,boost::NO_LOCK);
if(l.timed_wrlock(xt))
{
boost::thread::sleep(xsecs(3));
shared_val += 10;
pdata->m_value = shared_val;
}
}
template<typename RW>
void timed_reader(void *arg,RW &rw)
{
data<RW> *pdata = (data<RW> *) arg;
boost::xtime xt;
xt = xsecs(pdata->m_secs);
typename RW::scoped_timed_rw_lock l(rw,boost::NO_LOCK);
if(l.timed_rdlock(xt))
{
pdata->m_value = shared_val;
}
}
template<typename RW>
void dump_times(const char *prefix,data<RW> *pdata)
{
std::cout << " " << prefix << pdata->m_id <<
" In:" << pdata->m_start.LowPart <<
" Holding:" << pdata->m_holding.LowPart <<
" Out: " << pdata->m_end.LowPart << std::endl;
}
template<typename RW>
void test_plain_rw_mutex(RW &rw_mutex)
{
shared_val = 0;
data<RW> r1(1,rw_mutex);
data<RW> r2(2,rw_mutex);
data<RW> w1(1,rw_mutex);
data<RW> w2(2,rw_mutex);
// Writer one launches, holds the lock for 3 seconds.
boost::thread tw1(thread_adapter<RW>(plain_writer,&w1,rw_mutex));
// Writer two launches, tries to grab the lock, "clearly"
// after Writer one will already be holding it.
boost::thread::sleep(xsecs(1));
boost::thread tw2(thread_adapter<RW>(plain_writer,&w2,rw_mutex));
// Reader one launches, "clearly" after writer two, and "clearly"
// while writer 1 still holds the lock
boost::thread::sleep(xsecs(1));
boost::thread tr1(thread_adapter<RW>(plain_reader,&r1,rw_mutex));
boost::thread tr2(thread_adapter<RW>(plain_reader,&r2,rw_mutex));
tr2.join();
tr1.join();
tw2.join();
tw1.join();
if(rw_mutex.policy() == boost::sp_writer_priority)
{
BOOST_TEST(w1.m_value == 10);
BOOST_TEST(w2.m_value == 20);
BOOST_TEST(r1.m_value == 20); // Readers get in after 2nd writer
BOOST_TEST(r2.m_value == 20);
}
else if(rw_mutex.policy() == boost::sp_reader_priority)
{
BOOST_TEST(w1.m_value == 10);
BOOST_TEST(w2.m_value == 20);
BOOST_TEST(r1.m_value == 10); // Readers get in before 2nd writer
BOOST_TEST(r2.m_value == 10);
}
else if(rw_mutex.policy() == boost::sp_alternating_many_reads)
{
BOOST_TEST(w1.m_value == 10);
BOOST_TEST(w2.m_value == 20);
BOOST_TEST(r1.m_value == 10); // Readers get in before 2nd writer
BOOST_TEST(r2.m_value == 10);
}
else if(rw_mutex.policy() == boost::sp_alternating_single_reads)
{
BOOST_TEST(w1.m_value == 10);
BOOST_TEST(w2.m_value == 20);
// One Reader gets in before 2nd writer, but we can't tell
// which reader will "win", so just check their sum.
BOOST_TEST((r1.m_value + r2.m_value == 30));
}
}
template<typename RW>
void test_try_rw_mutex(RW &rw_mutex)
{
data<RW> r1(1,rw_mutex);
data<RW> w1(2,rw_mutex);
data<RW> w2(3,rw_mutex);
// We start with some specialized tests for "try" behavior
shared_val = 0;
// Writer one launches, holds the lock for 3 seconds.
boost::thread tw1(thread_adapter<RW>(try_writer,&w1,rw_mutex));
// Reader one launches, "clearly" after writer #1 holds the lock
// and before it releases the lock.
boost::thread::sleep(xsecs(1));
boost::thread tr1(thread_adapter<RW>(try_reader,&r1,rw_mutex));
// Writer two launches in the same timeframe.
boost::thread tw2(thread_adapter<RW>(try_writer,&w2,rw_mutex));
tw2.join();
tr1.join();
tw1.join();
BOOST_TEST(w1.m_value == 10);
BOOST_TEST(r1.m_value == -1); // Try would return w/o waiting
BOOST_TEST(w2.m_value == -1); // Try would return w/o waiting
// We finish by repeating the plain tests with the try lock
// This is important to verify that try locks are proper rw_mutexes as
// well.
test_plain_rw_mutex(rw_mutex);
}
template<typename RW>
void test_timed_rw_mutex(RW &rw_mutex)
{
data<RW> r1(1,rw_mutex,1);
data<RW> r2(2,rw_mutex,3);
data<RW> w1(3,rw_mutex,3);
data<RW> w2(4,rw_mutex,1);
// We begin with some specialized tests for "timed" behavior
shared_val = 0;
// Writer one will hold the lock for 3 seconds.
boost::thread tw1(thread_adapter<RW>(timed_writer,&w1,rw_mutex));
boost::thread::sleep(xsecs(1));
// Writer two will "clearly" try for the lock after the readers
// have tried for it. Writer will wait up 1 second for the lock. This write will fail.
boost::thread tw2(thread_adapter<RW>(timed_writer,&w2,rw_mutex));
// Readers one and two will "clearly" try for the lock after writer
// one already holds it. 1st reader will wait 1 second, and will fail
// to get the lock. 2nd reader will wait 3 seconds, and will get
// the lock.
boost::thread tr1(thread_adapter<RW>(timed_reader,&r1,rw_mutex));
boost::thread tr2(thread_adapter<RW>(timed_reader,&r2,rw_mutex));
tw1.join();
tr1.join();
tr2.join();
tw2.join();
BOOST_TEST(w1.m_value == 10);
BOOST_TEST(r1.m_value == -1);
BOOST_TEST(r2.m_value == 10);
BOOST_TEST(w2.m_value == -1);
// We follow by repeating the try tests with the timed lock.
// This is important to verify that timed locks are proper try locks as well
test_try_rw_mutex(rw_mutex);
}
} // namespace
void test_rw_mutex()
{
int i;
for(i = (int) boost::sp_writer_priority;
i <= (int) boost::sp_alternating_single_reads;
i++)
{
boost::rw_mutex plain_rw((boost::rw_scheduling_policy) i);
boost::try_rw_mutex try_rw((boost::rw_scheduling_policy) i);
boost::timed_rw_mutex timed_rw((boost::rw_scheduling_policy) i);
std::cout << "plain test, sp=" << i << "\n";
test_plain_rw_mutex(plain_rw);
std::cout << "try test, sp=" << i << "\n";
test_try_rw_mutex(try_rw);
std::cout << "timed test, sp=" << i << "\n";
test_timed_rw_mutex(timed_rw);
}
}

154
test/test_shared_memory.cpp Normal file
View File

@@ -0,0 +1,154 @@
#include <boost/thread/shared_memory.hpp>
#include <boost/thread/thread.hpp>
#include <boost/thread/xtime.hpp>
#include <boost/test/test_tools.hpp>
#include <boost/bind.hpp>
namespace
{
struct test_struct
{
char buf[128];
double val;
test_struct(const char *msg="",double v=0.0)
{
strcpy(buf,msg);
val = v;
}
};
};
struct CreateDefault
{
test_struct *operator()(void *place,size_t)
{
return new(place) test_struct;
}
};
struct CreateSlow
{
test_struct *operator()(void *place,size_t)
{
boost::xtime xt;
boost::xtime_get(&xt,boost::TIME_UTC);
xt.sec++;
boost::thread::sleep(xt);
return new(place) test_struct;
}
};
struct CreateWithParams
{
CreateWithParams(const char *msg, double v) : m_p_msg(msg),m_v(v)
{}
test_struct *operator()(void *place,size_t)
{
return new(place) test_struct(m_p_msg,m_v);
}
const char *m_p_msg;
double m_v;
};
struct CreateSlowWithParams
{
CreateSlowWithParams(const char *msg, double v) : m_p_msg(msg),m_v(v)
{}
test_struct *operator()(void *place,size_t)
{
boost::xtime xt;
boost::xtime_get(&xt,boost::TIME_UTC);
xt.sec++;
boost::thread::sleep(xt);
return new(place) test_struct(m_p_msg,m_v);
}
const char *m_p_msg;
double m_v;
};
void test_default_ctor()
{
const char *shared_name = "TestStruct";
typedef boost::shared_memory so;
so creator(shared_name,sizeof(test_struct),CreateDefault());
test_struct *pts = (test_struct *)creator.get();
BOOST_TEST(pts->buf[0] == 0);
BOOST_TEST(pts->val == 0.0);
strcpy(pts->buf,shared_name);
pts->val = 7.0;
so user(shared_name,sizeof(test_struct));
test_struct *pts2 = (test_struct *)user.get();
BOOST_TEST(strcmp(pts->buf,shared_name)==0);
BOOST_TEST(pts->val == 7.0);
}
void test_slow_create_thread()
{
const char *shared_name = "SlowStruct";
boost::shared_memory creator(shared_name,sizeof(test_struct),CreateSlowWithParams(shared_name,8.0));
test_struct *pts = (test_struct *)creator.get();
BOOST_TEST(strcmp(pts->buf,shared_name)==0);
BOOST_TEST(pts->val == 8.0);
}
void test_slow_user_thread()
{
const char *shared_name = "SlowStruct";
boost::shared_memory user(shared_name,sizeof(test_struct));
test_struct *pts2 = (test_struct *)user.get();
BOOST_TEST(strcmp(pts2->buf,shared_name)==0);
BOOST_TEST(pts2->val == 8.0);
}
void test_slow_ctor()
{
boost::thread t1(&test_slow_create_thread);
// Give the creator a chance to get moving.
boost::xtime xt;
boost::xtime_get(&xt,boost::TIME_UTC);
xt.nsec += 250000000;
boost::thread::sleep(xt);
boost::thread t2(&test_slow_user_thread);
t2.join();
t1.join();
}
void test_shared_memory()
{
test_default_ctor();
test_slow_ctor();
}

View File

@@ -1,6 +1,6 @@
#include <boost/thread/thread.hpp>
#include <boost/thread/xtime.hpp>
//#include <boost/test/test_tools.hpp>
#include <boost/test/unit_test.hpp>
namespace

View File

@@ -0,0 +1,44 @@
#include <boost/thread/thread.hpp>
#include <boost/thread/xtime.hpp>
#include <boost/test/test_tools.hpp>
#include <utils.inl>
namespace {
int test_value;
void simple_thread()
{
test_value = 999;
}
void test_create_thread()
{
test_value = 0;
boost::thread_group thrds;
BOOST_TEST(thrds.create_thread(&simple_thread) != 0);
thrds.join_all();
BOOST_TEST(test_value == 999);
}
void test_add_find_remove()
{
test_value = 0;
boost::thread_group thrds;
boost::thread* pthread = new boost::thread(&simple_thread);
thrds.add_thread(pthread);
BOOST_TEST(thrds.find(*pthread) == pthread);
thrds.remove_thread(pthread);
BOOST_TEST(thrds.find(*pthread) == 0);
pthread->join();
BOOST_TEST(test_value == 999);
}
} // namespace
void test_thread_group()
{
test_create_thread();
test_add_find_remove();
}

258
test/test_thread_pool.cpp Normal file
View File

@@ -0,0 +1,258 @@
#include <boost/thread/thread.hpp>
#include <boost/thread/xtime.hpp>
#include <boost/thread/condition.hpp>
#include <boost/thread/thread_pool.hpp>
#include <boost/test/test_tools.hpp>
const int MAX_POOL_THREADS=8;
const int MIN_POOL_THREADS=2;
const int POOL_TIMEOUT = 2; // seconds
const int ITERATIONS=25;
boost::mutex detach_prot;
boost::condition detached;
boost::condition waiting_for_detach;
int at_detach=0;
bool pool_detached=false;
const int DETACH_THREADS=2;
// Constant to cause the cpubound thread to take approx 0.5 seconds
// to complete. Doesn't have to be exact, but should take "a while"
const double SQRT_PER_SECOND=3000000.0;
enum
{
CHATTY_WORKER,
FAST_WORKER,
SLOW_WORKER,
CPUBOUND_WORKER,
WORKER_TYPE_COUNT
};
int work_counts[WORKER_TYPE_COUNT];
class job_adapter
{
public:
job_adapter(void (*func)(void*), void* param)
: _func(func), _param(param){ }
void operator()() const { _func(_param); }
private:
void (*_func)(void*);
void* _param;
};
void chatty_worker()
{
work_counts[CHATTY_WORKER]++;
}
void fast_worker()
{
work_counts[FAST_WORKER]++;
}
void slow_worker()
{
boost::xtime xt;
boost::xtime_get(&xt,boost::TIME_UTC);
xt.sec++;
boost::thread::sleep(xt);
work_counts[SLOW_WORKER]++;
}
void cpubound_worker()
{
double d;
double limit = SQRT_PER_SECOND/2.0;
for(d = 1.0; d < limit; d+=1.0)
{
sqrt(d);
}
work_counts[CPUBOUND_WORKER]++;
}
struct recursive_args
{
boost::thread_pool *ptp;
int count;
};
void recursive_worker(void *arg)
{
recursive_args *pargs = static_cast<recursive_args *>(arg);
if(--pargs->count > 0)
pargs->ptp->add(job_adapter(recursive_worker,pargs));
}
void detach_worker(void *arg)
{
int detach_threads = reinterpret_cast<int>(arg);
boost::mutex::scoped_lock l(detach_prot);
// If we are the Nth thread to reach this, notify
// our caller that everyone is ready to detach!
if(++at_detach==detach_threads)
waiting_for_detach.notify_all();
while(!pool_detached)
detached.wait(l);
// Call slow worker to do a bit of work after this...
slow_worker();
}
// Test a thread_pool with all different sorts of workers
void test_heterogeneous()
{
memset(work_counts,0,sizeof(work_counts));
boost::thread_pool tp(MAX_POOL_THREADS,MIN_POOL_THREADS,POOL_TIMEOUT);
for(int i = 0; i < ITERATIONS; i++)
{
tp.add(&chatty_worker);
tp.add(&fast_worker);
tp.add(&slow_worker);
tp.add(&cpubound_worker);
}
tp.join();
BOOST_TEST(work_counts[CHATTY_WORKER] == ITERATIONS);
BOOST_TEST(work_counts[FAST_WORKER] == ITERATIONS);
BOOST_TEST(work_counts[SLOW_WORKER] == ITERATIONS);
BOOST_TEST(work_counts[CPUBOUND_WORKER] == ITERATIONS);
}
void test_recursive()
{
recursive_args ra;
boost::thread_pool tp;
ra.ptp = &tp;
ra.count = ITERATIONS;
// Recursive_worker will add another job to the queue before returning
tp.add(job_adapter(recursive_worker,static_cast<void *>(&ra)));
// busy wait for bottom to be reached.
while(ra.count > 0)
boost::thread::yield();
tp.join();
BOOST_TEST(ra.count == 0);
}
// Test cancellation of thread_pool operations.
void test_cancel()
{
int wc_after_cancel[WORKER_TYPE_COUNT];
memset(work_counts,0,sizeof(work_counts));
boost::thread_pool tp(MAX_POOL_THREADS,MIN_POOL_THREADS,POOL_TIMEOUT);
for(int i = 0; i < ITERATIONS; i++)
{
tp.add(&chatty_worker);
tp.add(&fast_worker);
tp.add(&slow_worker);
tp.add(&cpubound_worker);
}
tp.cancel();
// Save our worker counts
memcpy(wc_after_cancel,work_counts,sizeof(wc_after_cancel));
// Do a bit more work to prove we can continue after a cancel
tp.add(&chatty_worker);
tp.add(&fast_worker);
tp.add(&slow_worker);
tp.add(&cpubound_worker);
tp.join();
// Check our counts
// As long as ITERATIONS is decently sized, there is no way
// these tasks could have completed before the cancel...
BOOST_TEST(wc_after_cancel[SLOW_WORKER] < ITERATIONS);
BOOST_TEST(wc_after_cancel[CPUBOUND_WORKER] < ITERATIONS);
// Since they could not have completed, if we are processing jobs
// in a FIFO order, the others can't have completed either.
BOOST_TEST(wc_after_cancel[CHATTY_WORKER] < ITERATIONS);
BOOST_TEST(wc_after_cancel[FAST_WORKER] < ITERATIONS);
// Check to see that more work was accomplished after the cancel.
BOOST_TEST(wc_after_cancel[SLOW_WORKER] < work_counts[SLOW_WORKER]);
BOOST_TEST(wc_after_cancel[CPUBOUND_WORKER] < work_counts[CPUBOUND_WORKER]);
BOOST_TEST(wc_after_cancel[CHATTY_WORKER] < work_counts[CHATTY_WORKER]);
BOOST_TEST(wc_after_cancel[FAST_WORKER] < work_counts[FAST_WORKER]);
}
void test_detach()
{
int wc_after_detach;
memset(work_counts,0,sizeof(work_counts));
{
boost::mutex::scoped_lock l(detach_prot);
// For detach testing, we want a known size thread pool so that we can make a better guess
// at when the detached process will finish
boost::thread_pool tp(DETACH_THREADS,0);
for(int i = 0; i < DETACH_THREADS; i++)
{
tp.add(job_adapter(detach_worker,reinterpret_cast<void *>(DETACH_THREADS)));
}
// Wait for all of the threads to reach a known point
waiting_for_detach.wait(l);
tp.detach();
wc_after_detach = work_counts[SLOW_WORKER];
// Let our threads know we've detached.
pool_detached = true;
detached.notify_all();
}
// Our detached threads should finish approx 1 sec after this.
// We could reliably sync. with the exit of detach_worker, but we
// can't reliably sync. with the cleanup of the thread_pool harness,
// so for the purposes of this test, we'll sleep 3 secs, and check some values.
boost::xtime xt;
boost::xtime_get(&xt,boost::TIME_UTC);
xt.sec += 3;
boost::thread::sleep(xt);
// Work should still complete after detach
BOOST_TEST(work_counts[SLOW_WORKER] == DETACH_THREADS);
// None of the work should have occurred before attach.
BOOST_TEST(0 == wc_after_detach);
}
void test_thread_pool()
{
test_heterogeneous();
test_recursive();
test_cancel();
test_detach();
}

37
test/utils.inl Normal file
View File

@@ -0,0 +1,37 @@
#include <boost/thread/xtime.hpp>
namespace {
#if defined(BOOST_NO_INT64_T)
typedef boost::int_fast32_t sec_type;
#else
typedef boost::int_fast64_t sec_type;
#endif
typedef boost::int_fast32_t nsec_type;
static void xtime_get(boost::xtime& xt, sec_type secs, nsec_type nsecs=0)
{
boost::xtime_get(&xt, boost::TIME_UTC);
xt.sec += secs;
xt.nsec += nsecs;
}
static int xtime_cmp(const boost::xtime& xt1, const boost::xtime& xt2)
{
int cmp = (int)(xt1.sec - xt2.sec);
if (cmp == 0)
cmp = (int)(xt1.nsec - xt2.nsec);
return cmp;
}
static bool xtime_in_range(const boost::xtime& xt, sec_type min, sec_type max)
{
boost::xtime xt_min, xt_max;
boost::xtime_get(&xt_min, boost::TIME_UTC);
xt_max = xt_min;
xt_min.sec += min;
xt_max.sec += max;
return (xtime_cmp(xt, xt_min) >= 0) && (xtime_cmp(xt, xt_max) <= 0);
}
} // namespace

2
tutorial/.cvsignore Normal file
View File

@@ -0,0 +1,2 @@
bin
*.pdb