mirror of
https://github.com/boostorg/thread.git
synced 2026-02-03 09:42:16 +00:00
Compare commits
6 Commits
boost-1.34
...
boost-1.34
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1215b371c9 | ||
|
|
8f61694057 | ||
|
|
67f7de5305 | ||
|
|
6faecefb73 | ||
|
|
68c5bd44e8 | ||
|
|
3656277053 |
@@ -37,6 +37,16 @@
|
||||
<purpose>
|
||||
<para>The <classname>read_write_mutex</classname> class is a model of the
|
||||
<link linkend="thread.concepts.ReadWriteMutex">ReadWriteMutex</link> concept.</para>
|
||||
<note> Unfortunately it turned out that the current implementation of Read/Write Mutex has
|
||||
some serious problems. So it was decided not to put this implementation into
|
||||
release grade code. Also discussions on the mailing list led to the
|
||||
conclusion that the current concepts need to be rethought. In particular
|
||||
the schedulings <link linkend="thread.concepts.read-write-scheduling-policies.inter-class">
|
||||
Inter-Class Scheduling Policies</link> are deemed unnecessary.
|
||||
There seems to be common belief that a fair scheme suffices.
|
||||
The following documentation has been retained however, to give
|
||||
readers of this document the opportunity to study the original design.
|
||||
</note>
|
||||
</purpose>
|
||||
|
||||
<description>
|
||||
@@ -160,6 +170,16 @@
|
||||
<purpose>
|
||||
<para>The <classname>try_read_write_mutex</classname> class is a model of the
|
||||
<link linkend="thread.concepts.TryReadWriteMutex">TryReadWriteMutex</link> concept.</para>
|
||||
<note> Unfortunately it turned out that the current implementation of Read/Write Mutex has
|
||||
some serious problems. So it was decided not to put this implementation into
|
||||
release grade code. Also discussions on the mailing list led to the
|
||||
conclusion that the current concepts need to be rethought. In particular
|
||||
the schedulings <link linkend="thread.concepts.read-write-scheduling-policies.inter-class">
|
||||
Inter-Class Scheduling Policies</link> are deemed unnecessary.
|
||||
There seems to be common belief that a fair scheme suffices.
|
||||
The following documentation has been retained however, to give
|
||||
readers of this document the opportunity to study the original design.
|
||||
</note>
|
||||
</purpose>
|
||||
|
||||
<description>
|
||||
@@ -302,6 +322,16 @@
|
||||
<purpose>
|
||||
<para>The <classname>timed_read_write_mutex</classname> class is a model of the
|
||||
<link linkend="thread.concepts.TimedReadWriteMutex">TimedReadWriteMutex</link> concept.</para>
|
||||
<note> Unfortunately it turned out that the current implementation of Read/Write Mutex has
|
||||
some serious problems. So it was decided not to put this implementation into
|
||||
release grade code. Also discussions on the mailing list led to the
|
||||
conclusion that the current concepts need to be rethought. In particular
|
||||
the schedulings <link linkend="thread.concepts.read-write-scheduling-policies.inter-class">
|
||||
Inter-Class Scheduling Policies</link> are deemed unnecessary.
|
||||
There seems to be common belief that a fair scheme suffices.
|
||||
The following documentation has been retained however, to give
|
||||
readers of this document the opportunity to study the original design.
|
||||
</note>
|
||||
</purpose>
|
||||
|
||||
<description>
|
||||
|
||||
@@ -1,287 +0,0 @@
|
||||
// Copyright (C) 2002-2003
|
||||
// David Moore, William E. Kempf, Michael Glassford
|
||||
//
|
||||
// Distributed under the Boost Software License, Version 1.0. (See accompanying
|
||||
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
|
||||
|
||||
// A Boost::threads implementation of a synchronization
|
||||
// primitive which can allow multiple readers or a single
|
||||
// writer to have access to a shared resource.
|
||||
|
||||
#ifndef BOOST_READ_WRITE_MUTEX_JDM030602_HPP
|
||||
#define BOOST_READ_WRITE_MUTEX_JDM030602_HPP
|
||||
|
||||
#error Read Write Mutex is broken, do not include this header
|
||||
|
||||
#include <boost/thread/detail/config.hpp>
|
||||
|
||||
#include <boost/utility.hpp>
|
||||
#include <boost/detail/workaround.hpp>
|
||||
|
||||
#include <boost/thread/mutex.hpp>
|
||||
#include <boost/thread/detail/lock.hpp>
|
||||
#include <boost/thread/detail/read_write_lock.hpp>
|
||||
#include <boost/thread/condition.hpp>
|
||||
|
||||
namespace boost {
|
||||
// disable warnings about non dll import
|
||||
// see: http://www.boost.org/more/separate_compilation.html#dlls
|
||||
#ifdef BOOST_MSVC
|
||||
# pragma warning(push)
|
||||
# pragma warning(disable: 4251 4231 4660 4275)
|
||||
#endif
|
||||
namespace read_write_scheduling_policy {
|
||||
enum read_write_scheduling_policy_enum
|
||||
{
|
||||
writer_priority, //Prefer writers; can starve readers
|
||||
reader_priority, //Prefer readers; can starve writers
|
||||
alternating_many_reads, //Alternate readers and writers; before a writer, release all queued readers
|
||||
alternating_single_read //Alternate readers and writers; before a writer, release only one queued reader
|
||||
};
|
||||
} // namespace read_write_scheduling_policy
|
||||
|
||||
namespace detail {
|
||||
|
||||
namespace thread {
|
||||
|
||||
// Shared implementation construct for explicit Scheduling Policies
|
||||
// This implementation is susceptible to self-deadlock, though....
|
||||
template<typename Mutex>
|
||||
struct read_write_mutex_impl
|
||||
{
|
||||
typedef Mutex mutex_type;
|
||||
typedef detail::thread::scoped_lock<Mutex> scoped_lock;
|
||||
typedef detail::thread::scoped_try_lock<Mutex> scoped_try_lock;
|
||||
typedef detail::thread::scoped_timed_lock<Mutex> scoped_timed_lock;
|
||||
|
||||
read_write_mutex_impl(read_write_scheduling_policy::read_write_scheduling_policy_enum sp);
|
||||
#if !BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(582))
|
||||
~read_write_mutex_impl();
|
||||
#endif
|
||||
|
||||
Mutex m_prot;
|
||||
|
||||
const read_write_scheduling_policy::read_write_scheduling_policy_enum m_sp;
|
||||
int m_state; //-1 = write lock; 0 = unlocked; >0 = read locked
|
||||
|
||||
boost::condition m_waiting_writers;
|
||||
boost::condition m_waiting_readers;
|
||||
boost::condition m_waiting_promotion;
|
||||
int m_num_waiting_writers;
|
||||
int m_num_waiting_readers;
|
||||
bool m_state_waiting_promotion;
|
||||
|
||||
int m_num_waking_writers;
|
||||
int m_num_waking_readers;
|
||||
int m_num_max_waking_writers; //Debug only
|
||||
int m_num_max_waking_readers; //Debug only
|
||||
|
||||
bool m_readers_next;
|
||||
|
||||
void do_read_lock();
|
||||
void do_write_lock();
|
||||
void do_write_unlock();
|
||||
void do_read_unlock();
|
||||
bool do_try_write_lock();
|
||||
bool do_try_read_lock();
|
||||
bool do_timed_write_lock(const xtime &xt);
|
||||
bool do_timed_read_lock(const xtime &xt);
|
||||
|
||||
void do_demote_to_read_lock();
|
||||
bool do_try_demote_to_read_lock();
|
||||
bool do_timed_demote_to_read_lock(const xtime &xt);
|
||||
|
||||
void do_promote_to_write_lock();
|
||||
bool do_try_promote_to_write_lock();
|
||||
bool do_timed_promote_to_write_lock(const xtime &xt);
|
||||
|
||||
bool locked();
|
||||
read_write_lock_state::read_write_lock_state_enum state();
|
||||
|
||||
private:
|
||||
|
||||
bool do_demote_to_read_lock_impl();
|
||||
|
||||
enum scheduling_reason
|
||||
{
|
||||
scheduling_reason_unlock,
|
||||
scheduling_reason_timeout,
|
||||
scheduling_reason_demote
|
||||
};
|
||||
|
||||
void do_scheduling_impl(const scheduling_reason reason);
|
||||
bool do_wake_one_reader(void);
|
||||
bool do_wake_all_readers(void);
|
||||
bool do_wake_writer(void);
|
||||
bool waker_exists(void);
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
|
||||
} // namespace thread
|
||||
|
||||
class BOOST_THREAD_DECL read_write_mutex : private noncopyable
|
||||
{
|
||||
public:
|
||||
|
||||
read_write_mutex(read_write_scheduling_policy::read_write_scheduling_policy_enum sp);
|
||||
~read_write_mutex();
|
||||
|
||||
read_write_scheduling_policy::read_write_scheduling_policy_enum policy() const { return m_impl.m_sp; }
|
||||
|
||||
friend class detail::thread::read_write_lock_ops<read_write_mutex>;
|
||||
|
||||
typedef detail::thread::scoped_read_write_lock<
|
||||
read_write_mutex> scoped_read_write_lock;
|
||||
|
||||
typedef detail::thread::scoped_read_lock<
|
||||
read_write_mutex> scoped_read_lock;
|
||||
|
||||
typedef detail::thread::scoped_write_lock<
|
||||
read_write_mutex> scoped_write_lock;
|
||||
|
||||
private:
|
||||
|
||||
// Operations that will eventually be done only
|
||||
// via lock types
|
||||
void do_write_lock();
|
||||
void do_read_lock();
|
||||
void do_write_unlock();
|
||||
void do_read_unlock();
|
||||
|
||||
void do_demote_to_read_lock();
|
||||
|
||||
void do_promote_to_write_lock();
|
||||
|
||||
bool locked();
|
||||
read_write_lock_state::read_write_lock_state_enum state();
|
||||
|
||||
detail::thread::read_write_mutex_impl<mutex> m_impl;
|
||||
};
|
||||
|
||||
class BOOST_THREAD_DECL try_read_write_mutex : private noncopyable
|
||||
{
|
||||
public:
|
||||
|
||||
try_read_write_mutex(read_write_scheduling_policy::read_write_scheduling_policy_enum sp);
|
||||
~try_read_write_mutex();
|
||||
|
||||
read_write_scheduling_policy::read_write_scheduling_policy_enum policy() const { return m_impl.m_sp; }
|
||||
|
||||
friend class detail::thread::read_write_lock_ops<try_read_write_mutex>;
|
||||
|
||||
typedef detail::thread::scoped_read_write_lock<
|
||||
try_read_write_mutex> scoped_read_write_lock;
|
||||
typedef detail::thread::scoped_try_read_write_lock<
|
||||
try_read_write_mutex> scoped_try_read_write_lock;
|
||||
|
||||
typedef detail::thread::scoped_read_lock<
|
||||
try_read_write_mutex> scoped_read_lock;
|
||||
typedef detail::thread::scoped_try_read_lock<
|
||||
try_read_write_mutex> scoped_try_read_lock;
|
||||
|
||||
typedef detail::thread::scoped_write_lock<
|
||||
try_read_write_mutex> scoped_write_lock;
|
||||
typedef detail::thread::scoped_try_write_lock<
|
||||
try_read_write_mutex> scoped_try_write_lock;
|
||||
|
||||
private:
|
||||
|
||||
// Operations that will eventually be done only
|
||||
// via lock types
|
||||
void do_write_lock();
|
||||
void do_read_lock();
|
||||
void do_write_unlock();
|
||||
void do_read_unlock();
|
||||
bool do_try_write_lock();
|
||||
bool do_try_read_lock();
|
||||
|
||||
|
||||
void do_demote_to_read_lock();
|
||||
bool do_try_demote_to_read_lock();
|
||||
|
||||
void do_promote_to_write_lock();
|
||||
bool do_try_promote_to_write_lock();
|
||||
|
||||
bool locked();
|
||||
read_write_lock_state::read_write_lock_state_enum state();
|
||||
|
||||
detail::thread::read_write_mutex_impl<try_mutex> m_impl;
|
||||
};
|
||||
|
||||
class BOOST_THREAD_DECL timed_read_write_mutex : private noncopyable
|
||||
{
|
||||
public:
|
||||
|
||||
timed_read_write_mutex(read_write_scheduling_policy::read_write_scheduling_policy_enum sp);
|
||||
~timed_read_write_mutex();
|
||||
|
||||
read_write_scheduling_policy::read_write_scheduling_policy_enum policy() const { return m_impl.m_sp; }
|
||||
|
||||
friend class detail::thread::read_write_lock_ops<timed_read_write_mutex>;
|
||||
|
||||
typedef detail::thread::scoped_read_write_lock<
|
||||
timed_read_write_mutex> scoped_read_write_lock;
|
||||
typedef detail::thread::scoped_try_read_write_lock<
|
||||
timed_read_write_mutex> scoped_try_read_write_lock;
|
||||
typedef detail::thread::scoped_timed_read_write_lock<
|
||||
timed_read_write_mutex> scoped_timed_read_write_lock;
|
||||
|
||||
typedef detail::thread::scoped_read_lock<
|
||||
timed_read_write_mutex> scoped_read_lock;
|
||||
typedef detail::thread::scoped_try_read_lock<
|
||||
timed_read_write_mutex> scoped_try_read_lock;
|
||||
typedef detail::thread::scoped_timed_read_lock<
|
||||
timed_read_write_mutex> scoped_timed_read_lock;
|
||||
|
||||
typedef detail::thread::scoped_write_lock<
|
||||
timed_read_write_mutex> scoped_write_lock;
|
||||
typedef detail::thread::scoped_try_write_lock<
|
||||
timed_read_write_mutex> scoped_try_write_lock;
|
||||
typedef detail::thread::scoped_timed_write_lock<
|
||||
timed_read_write_mutex> scoped_timed_write_lock;
|
||||
|
||||
private:
|
||||
|
||||
// Operations that will eventually be done only
|
||||
// via lock types
|
||||
void do_write_lock();
|
||||
void do_read_lock();
|
||||
void do_write_unlock();
|
||||
void do_read_unlock();
|
||||
bool do_try_write_lock();
|
||||
bool do_try_read_lock();
|
||||
bool do_timed_write_lock(const xtime &xt);
|
||||
bool do_timed_read_lock(const xtime &xt);
|
||||
|
||||
void do_demote_to_read_lock();
|
||||
bool do_try_demote_to_read_lock();
|
||||
bool do_timed_demote_to_read_lock(const xtime &xt);
|
||||
|
||||
void do_promote_to_write_lock();
|
||||
bool do_try_promote_to_write_lock();
|
||||
bool do_timed_promote_to_write_lock(const xtime &xt);
|
||||
|
||||
bool locked();
|
||||
read_write_lock_state::read_write_lock_state_enum state();
|
||||
|
||||
detail::thread::read_write_mutex_impl<timed_mutex> m_impl;
|
||||
};
|
||||
#ifdef BOOST_MSVC
|
||||
# pragma warning(pop)
|
||||
#endif
|
||||
} // namespace boost
|
||||
|
||||
#endif
|
||||
|
||||
// Change Log:
|
||||
// 10 Mar 02
|
||||
// Original version.
|
||||
// 4 May 04 GlassfordM
|
||||
// Implement lock promotion and demotion.
|
||||
// Add locked() and state() member functions for debugging
|
||||
// (should these be made public?).
|
||||
// Rename to improve consistency and eliminate abbreviations:
|
||||
// Use "read" and "write" instead of "shared" and "exclusive".
|
||||
// Change "rd" to "read", "wr" to "write", "rw" to "read_write".
|
||||
// Add mutex_type typdef.
|
||||
@@ -24,7 +24,15 @@ void init_TryEnterCriticalSection()
|
||||
version_info.dwMajorVersion >= 4)
|
||||
{
|
||||
if (HMODULE kernel_module = GetModuleHandle(TEXT("KERNEL32.DLL")))
|
||||
g_TryEnterCriticalSection = reinterpret_cast<TryEnterCriticalSection_type>(GetProcAddress(kernel_module, TEXT("TryEnterCriticalSection")));
|
||||
{
|
||||
g_TryEnterCriticalSection = reinterpret_cast<TryEnterCriticalSection_type>(
|
||||
#if defined(BOOST_NO_ANSI_APIS)
|
||||
GetProcAddressW(kernel_module, L"TryEnterCriticalSection")
|
||||
#else
|
||||
GetProcAddress(kernel_module, "TryEnterCriticalSection")
|
||||
#endif
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -31,7 +31,7 @@ boost::once_flag tss_data_once = BOOST_ONCE_INIT;
|
||||
boost::mutex* tss_data_mutex = 0;
|
||||
tss_data_cleanup_handlers_type* tss_data_cleanup_handlers = 0;
|
||||
#if defined(BOOST_HAS_WINTHREADS)
|
||||
DWORD tss_data_native_key;
|
||||
DWORD tss_data_native_key=TLS_OUT_OF_INDEXES;
|
||||
#elif defined(BOOST_HAS_PTHREADS)
|
||||
pthread_key_t tss_data_native_key;
|
||||
#elif defined(BOOST_HAS_MPTASKS)
|
||||
@@ -60,6 +60,7 @@ void tss_data_dec_use(boost::mutex::scoped_lock& lk)
|
||||
tss_data_mutex = 0;
|
||||
#if defined(BOOST_HAS_WINTHREADS)
|
||||
TlsFree(tss_data_native_key);
|
||||
tss_data_native_key=TLS_OUT_OF_INDEXES;
|
||||
#elif defined(BOOST_HAS_PTHREADS)
|
||||
pthread_key_delete(tss_data_native_key);
|
||||
#elif defined(BOOST_HAS_MPTASKS)
|
||||
@@ -78,6 +79,9 @@ extern "C" void cleanup_slots(void* p)
|
||||
(*(*tss_data_cleanup_handlers)[i])((*slots)[i]);
|
||||
(*slots)[i] = 0;
|
||||
}
|
||||
#if defined(BOOST_HAS_WINTHREADS)
|
||||
TlsSetValue(tss_data_native_key,0);
|
||||
#endif
|
||||
tss_data_dec_use(lock);
|
||||
delete slots;
|
||||
}
|
||||
@@ -97,7 +101,7 @@ void init_tss_data()
|
||||
|
||||
//Allocate tls slot
|
||||
tss_data_native_key = TlsAlloc();
|
||||
if (tss_data_native_key == 0xFFFFFFFF)
|
||||
if (tss_data_native_key == TLS_OUT_OF_INDEXES)
|
||||
return;
|
||||
#elif defined(BOOST_HAS_PTHREADS)
|
||||
int res = pthread_key_create(&tss_data_native_key, &cleanup_slots);
|
||||
|
||||
@@ -1,786 +0,0 @@
|
||||
// Copyright (C) 2001-2003
|
||||
// William E. Kempf
|
||||
//
|
||||
// Distributed under the Boost Software License, Version 1.0. (See accompanying
|
||||
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
|
||||
|
||||
#include <boost/thread/detail/config.hpp>
|
||||
|
||||
#include <boost/thread/thread.hpp>
|
||||
#include <boost/thread/xtime.hpp>
|
||||
#include <boost/thread/read_write_mutex.hpp>
|
||||
|
||||
#include <boost/test/unit_test.hpp>
|
||||
|
||||
#include <iostream>
|
||||
|
||||
#define TS_CHECK(pred) \
|
||||
do { if (!(pred)) BOOST_ERROR (#pred); } while (0)
|
||||
#define TS_CHECK_MSG(pred, msg) \
|
||||
do { if (!(pred)) BOOST_ERROR (msg); } while (0)
|
||||
|
||||
namespace {
|
||||
|
||||
int shared_val = 0;
|
||||
|
||||
boost::xtime xsecs(int secs)
|
||||
{
|
||||
//Create an xtime that is secs seconds from now
|
||||
boost::xtime ret;
|
||||
TS_CHECK (boost::TIME_UTC == boost::xtime_get(&ret, boost::TIME_UTC));
|
||||
ret.sec += secs;
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define MESSAGE "w1=" << w1.value_ << ", w2=" << w2.value_ << ", r1=" << r1.value_ << ", r2=" << r2.value_
|
||||
|
||||
template <typename RW>
|
||||
class thread_adapter
|
||||
{
|
||||
public:
|
||||
|
||||
thread_adapter(
|
||||
void (*func)(void*, RW&),
|
||||
void* param1,
|
||||
RW ¶m2
|
||||
)
|
||||
: func_(func)
|
||||
, param1_(param1)
|
||||
, param2_(param2)
|
||||
{}
|
||||
|
||||
void operator()() const
|
||||
{
|
||||
func_(param1_, param2_);
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
void (*func_)(void*, RW&);
|
||||
void* param1_;
|
||||
RW& param2_;
|
||||
};
|
||||
|
||||
const int k_data_init = -1;
|
||||
|
||||
template <typename RW>
|
||||
struct data
|
||||
{
|
||||
data(
|
||||
int id,
|
||||
RW& m,
|
||||
int wait_for_lock_secs,
|
||||
int sleep_with_lock_secs,
|
||||
bool demote_after_write = false
|
||||
)
|
||||
: id_(id)
|
||||
, wait_for_lock_secs_(wait_for_lock_secs)
|
||||
, sleep_with_lock_secs_(sleep_with_lock_secs)
|
||||
, test_promotion_and_demotion_(demote_after_write)
|
||||
, value_(k_data_init)
|
||||
, rw_(m)
|
||||
{}
|
||||
|
||||
int id_;
|
||||
int wait_for_lock_secs_;
|
||||
int sleep_with_lock_secs_;
|
||||
bool test_promotion_and_demotion_;
|
||||
int value_;
|
||||
|
||||
RW& rw_;
|
||||
};
|
||||
|
||||
template<typename RW>
|
||||
void plain_writer(void* arg, RW& rw)
|
||||
{
|
||||
try
|
||||
{
|
||||
data<RW>* pdata = (data<RW>*) arg;
|
||||
TS_CHECK_MSG(pdata->wait_for_lock_secs_ == 0, "pdata->wait_for_lock_secs_: " << pdata->wait_for_lock_secs_);
|
||||
|
||||
typename RW::scoped_read_write_lock l(
|
||||
rw,
|
||||
pdata->test_promotion_and_demotion_
|
||||
? boost::read_write_lock_state::read_locked
|
||||
: boost::read_write_lock_state::write_locked
|
||||
);
|
||||
|
||||
bool succeeded = true;
|
||||
|
||||
if (pdata->test_promotion_and_demotion_)
|
||||
{
|
||||
try
|
||||
{
|
||||
l.promote();
|
||||
}
|
||||
catch(const boost::lock_error&)
|
||||
{
|
||||
succeeded = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (succeeded)
|
||||
{
|
||||
if (pdata->sleep_with_lock_secs_ > 0)
|
||||
boost::thread::sleep(xsecs(pdata->sleep_with_lock_secs_));
|
||||
|
||||
shared_val += 10;
|
||||
|
||||
if (pdata->test_promotion_and_demotion_)
|
||||
l.demote();
|
||||
|
||||
pdata->value_ = shared_val;
|
||||
}
|
||||
}
|
||||
catch(...)
|
||||
{
|
||||
TS_CHECK_MSG(false, "plain_writer() exception!");
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
template<typename RW>
|
||||
void plain_reader(void* arg, RW& rw)
|
||||
{
|
||||
try
|
||||
{
|
||||
data<RW>* pdata = (data<RW>*)arg;
|
||||
TS_CHECK(!pdata->test_promotion_and_demotion_);
|
||||
TS_CHECK_MSG(pdata->wait_for_lock_secs_ == 0, "pdata->wait_for_lock_secs_: " << pdata->wait_for_lock_secs_);
|
||||
|
||||
typename RW::scoped_read_write_lock l(rw, boost::read_write_lock_state::read_locked);
|
||||
|
||||
if (pdata->sleep_with_lock_secs_ > 0)
|
||||
boost::thread::sleep(xsecs(pdata->sleep_with_lock_secs_));
|
||||
|
||||
pdata->value_ = shared_val;
|
||||
}
|
||||
catch(...)
|
||||
{
|
||||
TS_CHECK_MSG(false, "plain_reader() exception!");
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
template<typename RW>
|
||||
void try_writer(void* arg, RW& rw)
|
||||
{
|
||||
try
|
||||
{
|
||||
data<RW>* pdata = (data<RW>*) arg;
|
||||
TS_CHECK_MSG(pdata->wait_for_lock_secs_ == 0, "pdata->wait_for_lock_secs_: " << pdata->wait_for_lock_secs_);
|
||||
|
||||
typename RW::scoped_try_read_write_lock l(rw, boost::read_write_lock_state::unlocked);
|
||||
|
||||
bool succeeded = false;
|
||||
|
||||
if (pdata->test_promotion_and_demotion_)
|
||||
succeeded = l.try_read_lock() && l.try_promote();
|
||||
else
|
||||
succeeded = l.try_write_lock();
|
||||
|
||||
if (succeeded)
|
||||
{
|
||||
if (pdata->sleep_with_lock_secs_ > 0)
|
||||
boost::thread::sleep(xsecs(pdata->sleep_with_lock_secs_));
|
||||
|
||||
shared_val += 10;
|
||||
|
||||
if (pdata->test_promotion_and_demotion_)
|
||||
l.demote();
|
||||
|
||||
pdata->value_ = shared_val;
|
||||
}
|
||||
}
|
||||
catch(...)
|
||||
{
|
||||
TS_CHECK_MSG(false, "try_writer() exception!");
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
template<typename RW>
|
||||
void try_reader(void*arg, RW& rw)
|
||||
{
|
||||
try
|
||||
{
|
||||
data<RW>* pdata = (data<RW>*)arg;
|
||||
TS_CHECK(!pdata->test_promotion_and_demotion_);
|
||||
TS_CHECK_MSG(pdata->wait_for_lock_secs_ == 0, "pdata->wait_for_lock_secs_: " << pdata->wait_for_lock_secs_);
|
||||
|
||||
typename RW::scoped_try_read_write_lock l(rw, boost::read_write_lock_state::unlocked);
|
||||
|
||||
if (l.try_read_lock())
|
||||
{
|
||||
if (pdata->sleep_with_lock_secs_ > 0)
|
||||
boost::thread::sleep(xsecs(pdata->sleep_with_lock_secs_));
|
||||
|
||||
pdata->value_ = shared_val;
|
||||
}
|
||||
}
|
||||
catch(...)
|
||||
{
|
||||
TS_CHECK_MSG(false, "try_reader() exception!");
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
template<typename RW>
|
||||
void timed_writer(void* arg, RW& rw)
|
||||
{
|
||||
try
|
||||
{
|
||||
data<RW>* pdata = (data<RW>*)arg;
|
||||
|
||||
typename RW::scoped_timed_read_write_lock l(rw, boost::read_write_lock_state::unlocked);
|
||||
|
||||
bool succeeded = false;
|
||||
|
||||
boost::xtime xt = xsecs(pdata->wait_for_lock_secs_);
|
||||
if (pdata->test_promotion_and_demotion_)
|
||||
succeeded = l.timed_read_lock(xt) && l.timed_promote(xt);
|
||||
else
|
||||
succeeded = l.timed_write_lock(xt);
|
||||
|
||||
if (succeeded)
|
||||
{
|
||||
if (pdata->sleep_with_lock_secs_ > 0)
|
||||
boost::thread::sleep(xsecs(pdata->sleep_with_lock_secs_));
|
||||
|
||||
shared_val += 10;
|
||||
|
||||
if (pdata->test_promotion_and_demotion_)
|
||||
l.demote();
|
||||
|
||||
pdata->value_ = shared_val;
|
||||
}
|
||||
}
|
||||
catch(...)
|
||||
{
|
||||
TS_CHECK_MSG(false, "timed_writer() exception!");
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
template<typename RW>
|
||||
void timed_reader(void* arg, RW& rw)
|
||||
{
|
||||
try
|
||||
{
|
||||
data<RW>* pdata = (data<RW>*)arg;
|
||||
TS_CHECK(!pdata->test_promotion_and_demotion_);
|
||||
|
||||
typename RW::scoped_timed_read_write_lock l(rw,boost::read_write_lock_state::unlocked);
|
||||
|
||||
boost::xtime xt = xsecs(pdata->wait_for_lock_secs_);
|
||||
if (l.timed_read_lock(xt))
|
||||
{
|
||||
if (pdata->sleep_with_lock_secs_ > 0)
|
||||
boost::thread::sleep(xsecs(pdata->sleep_with_lock_secs_));
|
||||
|
||||
pdata->value_ = shared_val;
|
||||
}
|
||||
}
|
||||
catch(...)
|
||||
{
|
||||
TS_CHECK_MSG(false, "timed_reader() exception!");
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
template<typename RW>
|
||||
void clear_data(data<RW>& data1, data<RW>& data2, data<RW>& data3, data<RW>& data4)
|
||||
{
|
||||
shared_val = 0;
|
||||
data1.value_ = k_data_init;
|
||||
data2.value_ = k_data_init;
|
||||
data3.value_ = k_data_init;
|
||||
data4.value_ = k_data_init;
|
||||
}
|
||||
|
||||
bool shared_test_writelocked = false;
|
||||
bool shared_test_readlocked = false;
|
||||
bool shared_test_unlocked = false;
|
||||
|
||||
template<typename RW>
|
||||
void run_try_tests(void* arg, RW& rw)
|
||||
{
|
||||
try
|
||||
{
|
||||
TS_CHECK(shared_test_writelocked || shared_test_readlocked || shared_test_unlocked);
|
||||
|
||||
typename RW::scoped_try_read_write_lock l(rw, boost::read_write_lock_state::unlocked);
|
||||
|
||||
if (shared_test_writelocked)
|
||||
{
|
||||
//Verify that write lock blocks other write locks
|
||||
TS_CHECK(!l.try_write_lock());
|
||||
|
||||
//Verify that write lock blocks read locks
|
||||
TS_CHECK(!l.try_read_lock());
|
||||
}
|
||||
else if (shared_test_readlocked)
|
||||
{
|
||||
//Verify that read lock blocks write locks
|
||||
TS_CHECK(!l.try_write_lock());
|
||||
|
||||
//Verify that read lock does not block other read locks
|
||||
TS_CHECK(l.try_read_lock());
|
||||
|
||||
//Verify that read lock blocks promotion
|
||||
TS_CHECK(!l.try_promote());
|
||||
}
|
||||
else if (shared_test_unlocked)
|
||||
{
|
||||
//Verify that unlocked does not blocks write locks
|
||||
TS_CHECK(l.try_write_lock());
|
||||
|
||||
//Verify that unlocked does not block demotion
|
||||
TS_CHECK(l.try_demote());
|
||||
|
||||
l.unlock();
|
||||
|
||||
//Verify that unlocked does not block read locks
|
||||
TS_CHECK(l.try_read_lock());
|
||||
|
||||
//Verify that unlocked does not block promotion
|
||||
TS_CHECK(l.try_promote());
|
||||
|
||||
l.unlock();
|
||||
}
|
||||
}
|
||||
catch(...)
|
||||
{
|
||||
TS_CHECK_MSG(false, "run_try_tests() exception!");
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
template<typename RW>
|
||||
void test_plain_read_write_mutex(RW& rw, bool test_promotion_and_demotion)
|
||||
{
|
||||
//Verify that a write lock prevents both readers and writers from obtaining a lock
|
||||
{
|
||||
shared_val = 0;
|
||||
data<RW> r1(1, rw, 0, 0);
|
||||
data<RW> r2(2, rw, 0, 0);
|
||||
data<RW> w1(3, rw, 0, 0);
|
||||
data<RW> w2(4, rw, 0, 0);
|
||||
|
||||
//Write-lock the mutex and queue up other readers and writers
|
||||
|
||||
typename RW::scoped_read_write_lock l(rw, boost::read_write_lock_state::write_locked);
|
||||
|
||||
boost::thread tr1(thread_adapter<RW>(plain_reader, &r1, rw));
|
||||
boost::thread tr2(thread_adapter<RW>(plain_reader, &r2, rw));
|
||||
boost::thread tw1(thread_adapter<RW>(plain_writer, &w1, rw));
|
||||
boost::thread tw2(thread_adapter<RW>(plain_writer, &w2, rw));
|
||||
|
||||
boost::thread::sleep(xsecs(1));
|
||||
|
||||
//At this point, neither queued readers nor queued writers should have obtained access
|
||||
|
||||
TS_CHECK_MSG(w1.value_ == k_data_init, MESSAGE);
|
||||
TS_CHECK_MSG(w2.value_ == k_data_init, MESSAGE);
|
||||
TS_CHECK_MSG(r1.value_ == k_data_init, MESSAGE);
|
||||
TS_CHECK_MSG(r2.value_ == k_data_init, MESSAGE);
|
||||
|
||||
if (test_promotion_and_demotion)
|
||||
{
|
||||
l.demote();
|
||||
boost::thread::sleep(xsecs(1));
|
||||
//:boost::thread tr3(thread_adapter<RW>(plain_reader, &r3, rw));
|
||||
|
||||
if (rw.policy() == boost::read_write_scheduling_policy::writer_priority)
|
||||
{
|
||||
//Expected result:
|
||||
//Since writers have priority, demotion doesn't release any readers.
|
||||
TS_CHECK_MSG(w1.value_ == k_data_init, MESSAGE);
|
||||
TS_CHECK_MSG(w2.value_ == k_data_init, MESSAGE);
|
||||
TS_CHECK_MSG(r1.value_ == k_data_init, MESSAGE);
|
||||
TS_CHECK_MSG(r2.value_ == k_data_init, MESSAGE);
|
||||
}
|
||||
else if (rw.policy() == boost::read_write_scheduling_policy::reader_priority)
|
||||
{
|
||||
//Expected result:
|
||||
//Since readers have priority, demotion releases all readers.
|
||||
TS_CHECK_MSG(w1.value_ == k_data_init, MESSAGE);
|
||||
TS_CHECK_MSG(w2.value_ == k_data_init, MESSAGE);
|
||||
TS_CHECK_MSG(r1.value_ == 0, MESSAGE);
|
||||
TS_CHECK_MSG(r2.value_ == 0, MESSAGE);
|
||||
}
|
||||
else if (rw.policy() == boost::read_write_scheduling_policy::alternating_many_reads)
|
||||
{
|
||||
//Expected result:
|
||||
//Since readers can be released many at a time, demotion releases all queued readers.
|
||||
TS_CHECK_MSG(w1.value_ == k_data_init, MESSAGE);
|
||||
TS_CHECK_MSG(w2.value_ == k_data_init, MESSAGE);
|
||||
TS_CHECK_MSG(r1.value_ == 0, MESSAGE);
|
||||
TS_CHECK_MSG(r2.value_ == 0, MESSAGE);
|
||||
//:TS_CHECK_MSG(r3.value_ == k_data_init, MESSAGE);
|
||||
}
|
||||
else if (rw.policy() == boost::read_write_scheduling_policy::alternating_single_read)
|
||||
{
|
||||
//Expected result:
|
||||
//Since readers can be released only one at a time, demotion releases one queued reader.
|
||||
TS_CHECK_MSG(w1.value_ == k_data_init, MESSAGE);
|
||||
TS_CHECK_MSG(w2.value_ == k_data_init, MESSAGE);
|
||||
TS_CHECK_MSG(r1.value_ == k_data_init || r1.value_ == 0, MESSAGE);
|
||||
TS_CHECK_MSG(r2.value_ == k_data_init || r2.value_ == 0, MESSAGE);
|
||||
TS_CHECK_MSG(r1.value_ != r2.value_, MESSAGE);
|
||||
}
|
||||
}
|
||||
|
||||
l.unlock();
|
||||
|
||||
tr2.join();
|
||||
tr1.join();
|
||||
tw2.join();
|
||||
tw1.join();
|
||||
|
||||
if (rw.policy() == boost::read_write_scheduling_policy::writer_priority)
|
||||
{
|
||||
if (!test_promotion_and_demotion)
|
||||
{
|
||||
//Expected result:
|
||||
//1) either w1 or w2 obtains and releases the lock
|
||||
//2) the other of w1 and w2 obtains and releases the lock
|
||||
//3) r1 and r2 obtain and release the lock "simultaneously"
|
||||
TS_CHECK_MSG(w1.value_ == 10 || w1.value_ == 20, MESSAGE);
|
||||
TS_CHECK_MSG(w2.value_ == 10 || w2.value_ == 20, MESSAGE);
|
||||
TS_CHECK_MSG(w1.value_ != w2.value_, MESSAGE);
|
||||
TS_CHECK_MSG(r1.value_ == 20, MESSAGE);
|
||||
TS_CHECK_MSG(r2.value_ == 20, MESSAGE);
|
||||
}
|
||||
else
|
||||
{
|
||||
//Expected result:
|
||||
//The same, except that either w1 or w2 (but not both) may
|
||||
//fail to promote to a write lock,
|
||||
//and r1, r2, or both may "sneak in" ahead of w1 and/or w2
|
||||
//by obtaining a read lock before w1 or w2 can promote
|
||||
//their initial read lock to a write lock.
|
||||
TS_CHECK_MSG(w1.value_ == k_data_init || w1.value_ == 10 || w1.value_ == 20, MESSAGE);
|
||||
TS_CHECK_MSG(w2.value_ == k_data_init || w2.value_ == 10 || w2.value_ == 20, MESSAGE);
|
||||
TS_CHECK_MSG(w1.value_ != w2.value_, MESSAGE);
|
||||
TS_CHECK_MSG(r1.value_ == k_data_init || r1.value_ == 10 || r1.value_ == 20, MESSAGE);
|
||||
TS_CHECK_MSG(r2.value_ == k_data_init || r2.value_ == 10 || r2.value_ == 20, MESSAGE);
|
||||
}
|
||||
}
|
||||
else if (rw.policy() == boost::read_write_scheduling_policy::reader_priority)
|
||||
{
|
||||
if (!test_promotion_and_demotion)
|
||||
{
|
||||
//Expected result:
|
||||
//1) r1 and r2 obtain and release the lock "simultaneously"
|
||||
//2) either w1 or w2 obtains and releases the lock
|
||||
//3) the other of w1 and w2 obtains and releases the lock
|
||||
TS_CHECK_MSG(w1.value_ == 10 || w1.value_ == 20, MESSAGE);
|
||||
TS_CHECK_MSG(w2.value_ == 10 || w2.value_ == 20, MESSAGE);
|
||||
TS_CHECK_MSG(w1.value_ != w2.value_, MESSAGE);
|
||||
TS_CHECK_MSG(r1.value_ == 0, MESSAGE);
|
||||
TS_CHECK_MSG(r2.value_ == 0, MESSAGE);
|
||||
}
|
||||
else
|
||||
{
|
||||
//Expected result:
|
||||
//The same, except that either w1 or w2 (but not both) may
|
||||
//fail to promote to a write lock.
|
||||
TS_CHECK_MSG(w1.value_ == k_data_init || w1.value_ == 10 || w1.value_ == 20, MESSAGE);
|
||||
TS_CHECK_MSG(w2.value_ == k_data_init || w2.value_ == 10 || w2.value_ == 20, MESSAGE);
|
||||
TS_CHECK_MSG(w1.value_ != w2.value_, MESSAGE);
|
||||
TS_CHECK_MSG(r1.value_ == 0, MESSAGE);
|
||||
TS_CHECK_MSG(r2.value_ == 0, MESSAGE);
|
||||
}
|
||||
}
|
||||
else if (rw.policy() == boost::read_write_scheduling_policy::alternating_many_reads)
|
||||
{
|
||||
if (!test_promotion_and_demotion)
|
||||
{
|
||||
//Expected result:
|
||||
//1) r1 and r2 obtain and release the lock "simultaneously"
|
||||
//2) either w1 or w2 obtains and releases the lock
|
||||
//3) the other of w1 and w2 obtains and releases the lock
|
||||
TS_CHECK_MSG(w1.value_ == 10 || w1.value_ == 20, MESSAGE);
|
||||
TS_CHECK_MSG(w2.value_ == 10 || w2.value_ == 20, MESSAGE);
|
||||
TS_CHECK_MSG(w1.value_ != w2.value_, MESSAGE);
|
||||
TS_CHECK_MSG(r1.value_ == 0, MESSAGE);
|
||||
TS_CHECK_MSG(r2.value_ == 0, MESSAGE);
|
||||
}
|
||||
else
|
||||
{
|
||||
//Expected result:
|
||||
//The same, except that either w1 or w2 (but not both) may
|
||||
//fail to promote to a write lock.
|
||||
TS_CHECK_MSG(w1.value_ == k_data_init || w1.value_ == 10 || w1.value_ == 20, MESSAGE);
|
||||
TS_CHECK_MSG(w2.value_ == k_data_init || w2.value_ == 10 || w2.value_ == 20, MESSAGE);
|
||||
TS_CHECK_MSG(w1.value_ != w2.value_, MESSAGE);
|
||||
TS_CHECK_MSG(r1.value_ == 0, MESSAGE);
|
||||
TS_CHECK_MSG(r2.value_ == 0, MESSAGE);
|
||||
}
|
||||
}
|
||||
else if (rw.policy() == boost::read_write_scheduling_policy::alternating_single_read)
|
||||
{
|
||||
if (!test_promotion_and_demotion)
|
||||
{
|
||||
//Expected result:
|
||||
//1) either r1 or r2 obtains and releases the lock
|
||||
//2) either w1 or w2 obtains and releases the lock
|
||||
//3) the other of r1 and r2 obtains and releases the lock
|
||||
//4) the other of w1 and w2 obtains and release the lock
|
||||
TS_CHECK_MSG(w1.value_ == 10 || w1.value_ == 20, MESSAGE);
|
||||
TS_CHECK_MSG(w2.value_ == 10 || w2.value_ == 20, MESSAGE);
|
||||
TS_CHECK_MSG(w1.value_ != w2.value_, MESSAGE);
|
||||
TS_CHECK_MSG(r1.value_ == 0 || r1.value_ == 10, MESSAGE);
|
||||
TS_CHECK_MSG(r2.value_ == 0 || r2.value_ == 10, MESSAGE);
|
||||
TS_CHECK_MSG(r1.value_ != r2.value_, MESSAGE);
|
||||
}
|
||||
else
|
||||
{
|
||||
//Expected result:
|
||||
//Since w1 and w2 start as read locks, r1, r2, w1, and w2
|
||||
//obtain read locks "simultaneously". Each of w1 and w2,
|
||||
//after it obtain a read lock, attempts to promote to a
|
||||
//write lock; this attempt fails if the other has
|
||||
//already done so and currently holds the write lock;
|
||||
//otherwise it will succeed as soon as any other
|
||||
//read locks have been released.
|
||||
//In other words, any ordering is possible, and either
|
||||
//w1 or w2 (but not both) may fail to obtain the lock
|
||||
//altogether.
|
||||
TS_CHECK_MSG(w1.value_ == k_data_init || w1.value_ == 10 || w1.value_ == 20, MESSAGE);
|
||||
TS_CHECK_MSG(w2.value_ == k_data_init || w2.value_ == 10 || w2.value_ == 20, MESSAGE);
|
||||
TS_CHECK_MSG(w1.value_ != w2.value_, MESSAGE);
|
||||
TS_CHECK_MSG(r1.value_ == 0 || r1.value_ == 10 || r1.value_ == 20, MESSAGE);
|
||||
TS_CHECK_MSG(r2.value_ == 0 || r2.value_ == 10 || r2.value_ == 20, MESSAGE);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//Verify that a read lock prevents readers but not writers from obtaining a lock
|
||||
{
|
||||
shared_val = 0;
|
||||
data<RW> r1(1, rw, 0, 0);
|
||||
data<RW> r2(2, rw, 0, 0);
|
||||
data<RW> w1(3, rw, 0, 0);
|
||||
data<RW> w2(4, rw, 0, 0);
|
||||
|
||||
//Read-lock the mutex and queue up other readers and writers
|
||||
|
||||
typename RW::scoped_read_write_lock l(rw, boost::read_write_lock_state::read_locked);
|
||||
|
||||
boost::thread tr1(thread_adapter<RW>(plain_reader, &r1, rw));
|
||||
boost::thread tr2(thread_adapter<RW>(plain_reader, &r2, rw));
|
||||
|
||||
boost::thread::sleep(xsecs(1));
|
||||
|
||||
boost::thread tw1(thread_adapter<RW>(plain_writer, &w1, rw));
|
||||
boost::thread tw2(thread_adapter<RW>(plain_writer, &w2, rw));
|
||||
|
||||
boost::thread::sleep(xsecs(1));
|
||||
|
||||
//Expected result: all readers passed through before the writers entered
|
||||
TS_CHECK_MSG(w1.value_ == k_data_init, MESSAGE);
|
||||
TS_CHECK_MSG(w2.value_ == k_data_init, MESSAGE);
|
||||
TS_CHECK_MSG(r1.value_ == 0, MESSAGE);
|
||||
TS_CHECK_MSG(r2.value_ == 0, MESSAGE);
|
||||
|
||||
if (test_promotion_and_demotion)
|
||||
{
|
||||
l.promote();
|
||||
}
|
||||
|
||||
l.unlock();
|
||||
|
||||
tr2.join();
|
||||
tr1.join();
|
||||
tw2.join();
|
||||
tw1.join();
|
||||
}
|
||||
|
||||
//Verify that a read lock prevents readers but not writers from obtaining a lock
|
||||
{
|
||||
shared_val = 0;
|
||||
data<RW> r1(1, rw, 0, 0);
|
||||
data<RW> r2(2, rw, 0, 0);
|
||||
data<RW> w1(3, rw, 0, 0);
|
||||
data<RW> w2(4, rw, 0, 0);
|
||||
|
||||
//Read-lock the mutex and queue up other readers and writers
|
||||
|
||||
typename RW::scoped_read_write_lock l(rw, boost::read_write_lock_state::read_locked);
|
||||
|
||||
boost::thread tw1(thread_adapter<RW>(plain_writer, &w1, rw));
|
||||
boost::thread tw2(thread_adapter<RW>(plain_writer, &w2, rw));
|
||||
|
||||
boost::thread::sleep(xsecs(1));
|
||||
|
||||
boost::thread tr1(thread_adapter<RW>(plain_reader, &r1, rw));
|
||||
boost::thread tr2(thread_adapter<RW>(plain_reader, &r2, rw));
|
||||
|
||||
boost::thread::sleep(xsecs(1));
|
||||
|
||||
if (rw.policy() == boost::read_write_scheduling_policy::writer_priority)
|
||||
{
|
||||
//Expected result:
|
||||
//Writers have priority, so no readers have been released
|
||||
TS_CHECK_MSG(w1.value_ == k_data_init, MESSAGE);
|
||||
TS_CHECK_MSG(w2.value_ == k_data_init, MESSAGE);
|
||||
TS_CHECK_MSG(r1.value_ == k_data_init, MESSAGE);
|
||||
TS_CHECK_MSG(r2.value_ == k_data_init, MESSAGE);
|
||||
}
|
||||
else if (rw.policy() == boost::read_write_scheduling_policy::reader_priority)
|
||||
{
|
||||
//Expected result:
|
||||
//Readers have priority, so all readers have been released
|
||||
TS_CHECK_MSG(w1.value_ == k_data_init, MESSAGE);
|
||||
TS_CHECK_MSG(w2.value_ == k_data_init, MESSAGE);
|
||||
TS_CHECK_MSG(r1.value_ == 0, MESSAGE);
|
||||
TS_CHECK_MSG(r2.value_ == 0, MESSAGE);
|
||||
}
|
||||
else if (rw.policy() == boost::read_write_scheduling_policy::alternating_many_reads)
|
||||
{
|
||||
//Expected result:
|
||||
//It's the writers' turn, so no readers have been released
|
||||
TS_CHECK_MSG(w1.value_ == k_data_init, MESSAGE);
|
||||
TS_CHECK_MSG(w2.value_ == k_data_init, MESSAGE);
|
||||
TS_CHECK_MSG(r1.value_ == k_data_init, MESSAGE);
|
||||
TS_CHECK_MSG(r2.value_ == k_data_init, MESSAGE);
|
||||
}
|
||||
else if (rw.policy() == boost::read_write_scheduling_policy::alternating_single_read)
|
||||
{
|
||||
//Expected result:
|
||||
//It's the writers' turn, so no readers have been released
|
||||
TS_CHECK_MSG(w1.value_ == k_data_init, MESSAGE);
|
||||
TS_CHECK_MSG(w2.value_ == k_data_init, MESSAGE);
|
||||
TS_CHECK_MSG(r1.value_ == k_data_init, MESSAGE);
|
||||
TS_CHECK_MSG(r2.value_ == k_data_init, MESSAGE);
|
||||
}
|
||||
|
||||
if (test_promotion_and_demotion)
|
||||
{
|
||||
l.promote();
|
||||
}
|
||||
|
||||
l.unlock();
|
||||
|
||||
tr2.join();
|
||||
tr1.join();
|
||||
tw2.join();
|
||||
tw1.join();
|
||||
}
|
||||
}
|
||||
|
||||
template<typename RW>
|
||||
void test_try_read_write_mutex(RW& rw, bool test_promotion_and_demotion)
|
||||
{
|
||||
//Repeat the plain tests with the try lock.
|
||||
//This is important to verify that try locks are proper
|
||||
//read_write_mutexes as well.
|
||||
|
||||
test_plain_read_write_mutex(rw, test_promotion_and_demotion);
|
||||
|
||||
//Verify try_* operations with write-locked mutex
|
||||
{
|
||||
typename RW::scoped_try_read_write_lock l(rw, boost::read_write_lock_state::write_locked);
|
||||
|
||||
shared_test_writelocked = true;
|
||||
shared_test_readlocked = false;
|
||||
shared_test_unlocked = false;
|
||||
|
||||
boost::thread test_thread(thread_adapter<RW>(run_try_tests, NULL, rw));
|
||||
test_thread.join();
|
||||
}
|
||||
|
||||
//Verify try_* operations with read-locked mutex
|
||||
{
|
||||
typename RW::scoped_try_read_write_lock l(rw, boost::read_write_lock_state::read_locked);
|
||||
|
||||
shared_test_writelocked = false;
|
||||
shared_test_readlocked = true;
|
||||
shared_test_unlocked = false;
|
||||
|
||||
boost::thread test_thread(thread_adapter<RW>(run_try_tests, NULL, rw));
|
||||
test_thread.join();
|
||||
}
|
||||
|
||||
//Verify try_* operations with unlocked mutex
|
||||
{
|
||||
shared_test_writelocked = false;
|
||||
shared_test_readlocked = false;
|
||||
shared_test_unlocked = true;
|
||||
|
||||
boost::thread test_thread(thread_adapter<RW>(run_try_tests, NULL, rw));
|
||||
test_thread.join();
|
||||
}
|
||||
}
|
||||
|
||||
template<typename RW>
|
||||
void test_timed_read_write_mutex(RW& rw, bool test_promotion_and_demotion)
|
||||
{
|
||||
//Repeat the try tests with the timed lock.
|
||||
//This is important to verify that timed locks are proper
|
||||
//try locks as well.
|
||||
|
||||
test_try_read_write_mutex(rw, test_promotion_and_demotion);
|
||||
|
||||
//:More tests here
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
void do_test_read_write_mutex(bool test_promotion_and_demotion)
|
||||
{
|
||||
//Run every test for each scheduling policy
|
||||
|
||||
for(int i = (int) boost::read_write_scheduling_policy::writer_priority;
|
||||
i <= (int) boost::read_write_scheduling_policy::alternating_single_read;
|
||||
i++)
|
||||
{
|
||||
std::cout << "plain test, sp=" << i
|
||||
<< (test_promotion_and_demotion ? " with promotion & demotion" : " without promotion & demotion")
|
||||
<< "\n";
|
||||
std::cout.flush();
|
||||
|
||||
{
|
||||
boost::read_write_mutex plain_rw(static_cast<boost::read_write_scheduling_policy::read_write_scheduling_policy_enum>(i));
|
||||
test_plain_read_write_mutex(plain_rw, test_promotion_and_demotion);
|
||||
}
|
||||
|
||||
std::cout << "try test, sp=" << i
|
||||
<< (test_promotion_and_demotion ? " with promotion & demotion" : " without promotion & demotion")
|
||||
<< "\n";
|
||||
std::cout.flush();
|
||||
|
||||
{
|
||||
boost::try_read_write_mutex try_rw(static_cast<boost::read_write_scheduling_policy::read_write_scheduling_policy_enum>(i));
|
||||
test_try_read_write_mutex(try_rw, test_promotion_and_demotion);
|
||||
}
|
||||
|
||||
std::cout << "timed test, sp=" << i
|
||||
<< (test_promotion_and_demotion ? " with promotion & demotion" : " without promotion & demotion")
|
||||
<< "\n";
|
||||
std::cout.flush();
|
||||
|
||||
{
|
||||
boost::timed_read_write_mutex timed_rw(static_cast<boost::read_write_scheduling_policy::read_write_scheduling_policy_enum>(i));
|
||||
test_timed_read_write_mutex(timed_rw, test_promotion_and_demotion);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void test_read_write_mutex()
|
||||
{
|
||||
do_test_read_write_mutex(false);
|
||||
do_test_read_write_mutex(true);
|
||||
}
|
||||
|
||||
boost::unit_test_framework::test_suite* init_unit_test_suite(int, char*[])
|
||||
{
|
||||
boost::unit_test_framework::test_suite* test =
|
||||
BOOST_TEST_SUITE("Boost.Threads: read_write_mutex test suite");
|
||||
|
||||
test->add(BOOST_TEST_CASE(&test_read_write_mutex));
|
||||
|
||||
return test;
|
||||
}
|
||||
Reference in New Issue
Block a user