diff --git a/include/boost/fiber/condition.hpp b/include/boost/fiber/condition.hpp index 6fb23e6e..7530c09a 100644 --- a/include/boost/fiber/condition.hpp +++ b/include/boost/fiber/condition.hpp @@ -39,10 +39,10 @@ enum class cv_status { class BOOST_FIBERS_DECL condition { private: - typedef detail::wait_queue< context > wqueue_t; + typedef detail::wait_queue< context > wait_queue_t; detail::spinlock splk_; - wqueue_t waiting_; + wait_queue_t wait_queue_; public: condition(); @@ -73,7 +73,7 @@ public: // store this fiber in waiting-queue // in order notify (resume) this fiber later BOOST_ASSERT( ! f->wait_is_linked() ); - waiting_.push_back( * f); + wait_queue_.push_back( * f); // unlock external lt.unlock(); @@ -87,7 +87,7 @@ public: lt.lock(); } catch (...) { detail::spinlock_lock lk( splk_); - detail::erase_and_dispose( waiting_, f); + f->wait_unlink(); throw; } } @@ -104,7 +104,7 @@ public: // store this fiber in waiting-queue // in order notify (resume) this fiber later BOOST_ASSERT( ! f->wait_is_linked() ); - waiting_.push_back( * f); + wait_queue_.push_back( * f); // unlock external lt.unlock(); @@ -116,7 +116,7 @@ public: // this fiber was not notified before timeout // lock spinlock again detail::spinlock_lock lk( splk_); - detail::erase_and_dispose( waiting_, f); + f->wait_unlink(); status = cv_status::timeout; } @@ -125,7 +125,7 @@ public: lt.lock(); } catch (...) { detail::spinlock_lock lk( splk_); - detail::erase_and_dispose( waiting_, f); + f->wait_unlink(); throw; } diff --git a/include/boost/fiber/context.hpp b/include/boost/fiber/context.hpp index b5c125c4..9820d945 100644 --- a/include/boost/fiber/context.hpp +++ b/include/boost/fiber/context.hpp @@ -43,9 +43,7 @@ class fiber_properties; class scheduler; struct sched_algorithm; -class BOOST_FIBERS_DECL context : public detail::state_hook, - public detail::yield_hook, - public detail::wait_hook { +class BOOST_FIBERS_DECL context { private: enum class fiber_status { ready = 0, @@ -99,7 +97,7 @@ private: scheduler * scheduler_; boost::context::execution_context ctx_; fss_data_t fss_data_; - std::vector< context * > waiting_; + std::vector< context * > wait_queue_; std::exception_ptr except_; std::chrono::steady_clock::time_point tp_; fiber_properties * properties_; @@ -168,6 +166,11 @@ public: } }; + detail::runnable_hook runnable_hook_; + detail::ready_hook ready_hook_; + detail::sleep_hook sleep_hook_; + detail::wait_hook wait_hook_; + static context * active() noexcept; static void active( context * active) noexcept; @@ -181,7 +184,7 @@ public: scheduler_( nullptr), ctx_( boost::context::execution_context::current() ), fss_data_(), - waiting_(), + wait_queue_(), except_(), tp_( (std::chrono::steady_clock::time_point::max)() ), properties_( nullptr) { @@ -225,7 +228,7 @@ public: BOOST_ASSERT_MSG( false, "fiber already terminated"); }), fss_data_(), - waiting_(), + wait_queue_(), except_(), tp_( (std::chrono::steady_clock::time_point::max)() ), properties_( nullptr) { @@ -401,16 +404,24 @@ public: std::chrono::steady_clock::duration do_wait_interval() noexcept; - bool state_is_linked() { - return detail::state_hook::is_linked(); + bool runnable_is_linked() { + return runnable_hook_.is_linked(); } - bool yield_is_linked() { - return detail::yield_hook::is_linked(); + bool ready_is_linked() { + return ready_hook_.is_linked(); + } + + bool sleep_is_linked() { + return sleep_hook_.is_linked(); } bool wait_is_linked() { - return detail::wait_hook::is_linked(); + return wait_hook_.is_linked(); + } + + void wait_unlink() { + wait_hook_.unlink(); } friend void intrusive_ptr_add_ref( context * f) { diff --git a/include/boost/fiber/detail/queues.hpp b/include/boost/fiber/detail/queues.hpp index 9cf679c6..e470087f 100644 --- a/include/boost/fiber/detail/queues.hpp +++ b/include/boost/fiber/detail/queues.hpp @@ -20,50 +20,57 @@ namespace boost { namespace fibers { namespace detail { -struct state_tag; -typedef intrusive::list_base_hook< - intrusive::tag< state_tag >, +struct runnable_tag; +typedef intrusive::list_member_hook< + intrusive::tag< runnable_tag >, intrusive::link_mode< intrusive::safe_link > -> state_hook; +> runnable_hook; template< typename T > -using state_queue = intrusive::list< +using runnable_queue = intrusive::list< T, - intrusive::base_hook< state_hook > >; + intrusive::member_hook< T, runnable_hook, & T::runnable_hook_ > >; -struct yield_tag; -typedef intrusive::list_base_hook< - intrusive::tag< yield_tag >, +struct ready_tag; +typedef intrusive::list_member_hook< + intrusive::tag< ready_tag >, intrusive::link_mode< intrusive::safe_link > -> yield_hook; +> ready_hook; template< typename T > -using yield_queue = intrusive::list< +using ready_queue = intrusive::list< T, - intrusive::base_hook< yield_hook >, + intrusive::member_hook< T, ready_hook, & T::ready_hook_ >, + intrusive::constant_time_size< false > >; + +struct sleep_tag; +typedef intrusive::list_member_hook< + intrusive::tag< sleep_tag >, + intrusive::link_mode< + intrusive::safe_link + > +> sleep_hook; +template< typename T > +using sleep_queue = intrusive::list< + T, + intrusive::member_hook< T, sleep_hook, & T::sleep_hook_ >, intrusive::constant_time_size< false > >; struct wait_tag; -typedef intrusive::list_base_hook< +typedef intrusive::list_member_hook< intrusive::tag< wait_tag >, intrusive::link_mode< - intrusive::safe_link + intrusive::auto_unlink > > wait_hook; template< typename T > using wait_queue = intrusive::list< T, - intrusive::base_hook< wait_hook >, + intrusive::member_hook< T, wait_hook, & T::wait_hook_ >, intrusive::constant_time_size< false > >; -template< typename Lst, typename Ctx > -void erase_and_dispose( Lst & lst, Ctx * ctx){ - typename Lst::iterator i( Lst::s_iterator_to( * ctx) ); - lst.erase_and_dispose( i, []( Ctx * ctx){ intrusive_ptr_release( ctx); }); -} - }}} #ifdef BOOST_HAS_ABI_HEADERS diff --git a/include/boost/fiber/mutex.hpp b/include/boost/fiber/mutex.hpp index dba3353f..95d8e17a 100644 --- a/include/boost/fiber/mutex.hpp +++ b/include/boost/fiber/mutex.hpp @@ -28,12 +28,12 @@ private: unlocked }; - typedef detail::wait_queue< context > wqueue_t; + typedef detail::wait_queue< context > wait_queue_t; detail::spinlock splk_; mutex_status state_; context::id owner_; - wqueue_t waiting_; + wait_queue_t wait_queue_; bool lock_if_unlocked_(); diff --git a/include/boost/fiber/recursive_mutex.hpp b/include/boost/fiber/recursive_mutex.hpp index c0ea0625..39228b96 100644 --- a/include/boost/fiber/recursive_mutex.hpp +++ b/include/boost/fiber/recursive_mutex.hpp @@ -32,13 +32,13 @@ private: unlocked }; - typedef detail::wait_queue< context > wqueue_t; + typedef detail::wait_queue< context > wait_queue_t; detail::spinlock splk_; mutex_status state_; context::id owner_; std::size_t count_; - wqueue_t waiting_; + wait_queue_t wait_queue_; bool lock_if_unlocked_(); diff --git a/include/boost/fiber/recursive_timed_mutex.hpp b/include/boost/fiber/recursive_timed_mutex.hpp index b978559b..96f01891 100644 --- a/include/boost/fiber/recursive_timed_mutex.hpp +++ b/include/boost/fiber/recursive_timed_mutex.hpp @@ -34,13 +34,13 @@ private: unlocked }; - typedef detail::wait_queue< context > wqueue_t; + typedef detail::wait_queue< context > wait_queue_t; detail::spinlock splk_; mutex_status state_; context::id owner_; std::size_t count_; - wqueue_t waiting_; + wait_queue_t wait_queue_; bool lock_if_unlocked_(); diff --git a/include/boost/fiber/round_robin.hpp b/include/boost/fiber/round_robin.hpp index a2bf56ad..672f09b1 100644 --- a/include/boost/fiber/round_robin.hpp +++ b/include/boost/fiber/round_robin.hpp @@ -26,9 +26,9 @@ class context; class BOOST_FIBERS_DECL round_robin : public sched_algorithm { private: - typedef detail::state_queue< context > rqueue_t; + typedef detail::runnable_queue< context > runnable_queue_t; - rqueue_t rqueue_; + runnable_queue_t runnable_queue_; public: virtual void awakened( context *); diff --git a/include/boost/fiber/scheduler.hpp b/include/boost/fiber/scheduler.hpp index 1a9d767d..e2c125ed 100644 --- a/include/boost/fiber/scheduler.hpp +++ b/include/boost/fiber/scheduler.hpp @@ -9,7 +9,7 @@ #include #include #include -#include +#include #include #include @@ -32,15 +32,15 @@ struct sched_algorithm; class BOOST_FIBERS_DECL scheduler { private: - typedef detail::state_queue< context > tqueue_t; - typedef detail::state_queue< context > wqueue_t; - typedef detail::yield_queue< context > yqueue_t; + typedef detail::ready_queue< context > ready_queue_t; + typedef detail::sleep_queue< context > sleep_queue_t; + typedef std::vector< context * > terminated_queue_t; std::unique_ptr< sched_algorithm > sched_algo_; context * main_context_; - wqueue_t wqueue_; - tqueue_t tqueue_; - yqueue_t yqueue_; + ready_queue_t ready_queue_; + sleep_queue_t sleep_queue_; + terminated_queue_t terminated_queue_; std::chrono::steady_clock::duration wait_interval_; void resume_( context *, context *); @@ -67,7 +67,7 @@ public: void join( context *,context *); - std::size_t ready_fibers() const noexcept; + size_t ready_fibers() const noexcept; void set_sched_algo( std::unique_ptr< sched_algorithm >); diff --git a/include/boost/fiber/timed_mutex.hpp b/include/boost/fiber/timed_mutex.hpp index 318ba260..026fcacd 100644 --- a/include/boost/fiber/timed_mutex.hpp +++ b/include/boost/fiber/timed_mutex.hpp @@ -31,12 +31,12 @@ private: unlocked }; - typedef detail::wait_queue< context > wqueue_t; + typedef detail::wait_queue< context > wait_queue_t; detail::spinlock splk_; mutex_status state_; context::id owner_; - wqueue_t waiting_; + wait_queue_t wait_queue_; bool lock_if_unlocked_(); diff --git a/src/condition.cpp b/src/condition.cpp index 94208af8..3e9cd8ff 100644 --- a/src/condition.cpp +++ b/src/condition.cpp @@ -17,11 +17,11 @@ namespace fibers { condition::condition() : splk_(), - waiting_() { + wait_queue_() { } condition::~condition() { - BOOST_ASSERT( waiting_.empty() ); + BOOST_ASSERT( wait_queue_.empty() ); } void @@ -30,9 +30,9 @@ condition::notify_one() { detail::spinlock_lock lk( splk_); // get one waiting fiber - if ( ! waiting_.empty() ) { - f = & waiting_.front(); - waiting_.pop_front(); + if ( ! wait_queue_.empty() ) { + f = & wait_queue_.front(); + wait_queue_.pop_front(); } lk.unlock(); @@ -44,11 +44,11 @@ condition::notify_one() { void condition::notify_all() { - wqueue_t waiting; + wait_queue_t waiting; detail::spinlock_lock lk( splk_); // get all waiting fibers - waiting.swap( waiting_); + waiting.swap( wait_queue_); lk.unlock(); // notify all waiting fibers diff --git a/src/context.cpp b/src/context.cpp index a3be082f..7add37c3 100644 --- a/src/context.cpp +++ b/src/context.cpp @@ -41,7 +41,7 @@ context::active( context * active) noexcept { } context::~context() { - BOOST_ASSERT( waiting_.empty() ); + BOOST_ASSERT( wait_queue_.empty() ); delete properties_; } @@ -53,7 +53,7 @@ context::release() { // get all waiting fibers splk_.lock(); - waiting.swap( waiting_); + waiting.swap( wait_queue_); splk_.unlock(); // notify all waiting fibers @@ -78,7 +78,7 @@ context::join( context * f) { if ( is_terminated() ) { return false; } - waiting_.push_back( f); + wait_queue_.push_back( f); return true; } diff --git a/src/mutex.cpp b/src/mutex.cpp index 5d2f1438..08f14eb0 100644 --- a/src/mutex.cpp +++ b/src/mutex.cpp @@ -37,12 +37,12 @@ mutex::mutex() : splk_(), state_( mutex_status::unlocked), owner_(), - waiting_() { + wait_queue_() { } mutex::~mutex() { BOOST_ASSERT( ! owner_); - BOOST_ASSERT( waiting_.empty() ); + BOOST_ASSERT( wait_queue_.empty() ); } void @@ -58,7 +58,7 @@ mutex::lock() { // store this fiber in order to be notified later BOOST_ASSERT( ! f->wait_is_linked() ); - waiting_.push_back( * f); + wait_queue_.push_back( * f); // suspend this fiber context::active()->do_wait( lk); @@ -87,9 +87,9 @@ mutex::unlock() { detail::spinlock_lock lk( splk_); context * f( nullptr); - if ( ! waiting_.empty() ) { - f = & waiting_.front(); - waiting_.pop_front(); + if ( ! wait_queue_.empty() ) { + f = & wait_queue_.front(); + wait_queue_.pop_front(); BOOST_ASSERT( nullptr != f); } owner_ = context::id(); diff --git a/src/recursive_mutex.cpp b/src/recursive_mutex.cpp index f739a077..bb43f385 100644 --- a/src/recursive_mutex.cpp +++ b/src/recursive_mutex.cpp @@ -42,13 +42,13 @@ recursive_mutex::recursive_mutex() : state_( mutex_status::unlocked), owner_(), count_( 0), - waiting_() { + wait_queue_() { } recursive_mutex::~recursive_mutex() { BOOST_ASSERT( ! owner_); BOOST_ASSERT( 0 == count_); - BOOST_ASSERT( waiting_.empty() ); + BOOST_ASSERT( wait_queue_.empty() ); } void @@ -64,7 +64,7 @@ recursive_mutex::lock() { // store this fiber in order to be notified later BOOST_ASSERT( ! f->wait_is_linked() ); - waiting_.push_back( * f); + wait_queue_.push_back( * f); // suspend this fiber context::active()->do_wait( lk); @@ -94,9 +94,9 @@ recursive_mutex::unlock() { detail::spinlock_lock lk( splk_); context * f( nullptr); if ( 0 == --count_) { - if ( ! waiting_.empty() ) { - f = & waiting_.front(); - waiting_.pop_front(); + if ( ! wait_queue_.empty() ) { + f = & wait_queue_.front(); + wait_queue_.pop_front(); BOOST_ASSERT( nullptr != f); } owner_ = context::id(); diff --git a/src/recursive_timed_mutex.cpp b/src/recursive_timed_mutex.cpp index e45d9740..f805f627 100644 --- a/src/recursive_timed_mutex.cpp +++ b/src/recursive_timed_mutex.cpp @@ -42,13 +42,13 @@ recursive_timed_mutex::recursive_timed_mutex() : state_( mutex_status::unlocked), owner_(), count_( 0), - waiting_() { + wait_queue_() { } recursive_timed_mutex::~recursive_timed_mutex() { BOOST_ASSERT( ! owner_); BOOST_ASSERT( 0 == count_); - BOOST_ASSERT( waiting_.empty() ); + BOOST_ASSERT( wait_queue_.empty() ); } void @@ -64,7 +64,7 @@ recursive_timed_mutex::lock() { // store this fiber in order to be notified later BOOST_ASSERT( ! f->wait_is_linked() ); - waiting_.push_back( * f); + wait_queue_.push_back( * f); // suspend this fiber context::active()->do_wait( lk); @@ -103,12 +103,12 @@ recursive_timed_mutex::try_lock_until_( std::chrono::steady_clock::time_point co // store this fiber in order to be notified later BOOST_ASSERT( ! f->wait_is_linked() ); - waiting_.push_back( * f); + wait_queue_.push_back( * f); // suspend this fiber until notified or timed-out if ( ! context::active()->do_wait_until( timeout_time, lk) ) { lk.lock(); - detail::erase_and_dispose( waiting_, f); + f->wait_unlink(); lk.unlock(); return false; } @@ -123,9 +123,9 @@ recursive_timed_mutex::unlock() { detail::spinlock_lock lk( splk_); context * f( nullptr); if ( 0 == --count_) { - if ( ! waiting_.empty() ) { - f = & waiting_.front(); - waiting_.pop_front(); + if ( ! wait_queue_.empty() ) { + f = & wait_queue_.front(); + wait_queue_.pop_front(); BOOST_ASSERT( nullptr != f); } owner_ = context::id(); diff --git a/src/round_robin.cpp b/src/round_robin.cpp index 124e55de..f12ab429 100644 --- a/src/round_robin.cpp +++ b/src/round_robin.cpp @@ -19,16 +19,16 @@ void round_robin::awakened( context * f) { BOOST_ASSERT( nullptr != f); - BOOST_ASSERT( ! f->state_is_linked() ); - rqueue_.push_back( * f); + BOOST_ASSERT( ! f->runnable_is_linked() ); + runnable_queue_.push_back( * f); } context * round_robin::pick_next() { context * victim( nullptr); - if ( ! rqueue_.empty() ) { - victim = & rqueue_.front(); - rqueue_.pop_front(); + if ( ! runnable_queue_.empty() ) { + victim = & runnable_queue_.front(); + runnable_queue_.pop_front(); BOOST_ASSERT( nullptr != victim); } return victim; @@ -36,7 +36,7 @@ round_robin::pick_next() { std::size_t round_robin::ready_fibers() const noexcept { - return rqueue_.size(); + return runnable_queue_.size(); } }} diff --git a/src/scheduler.cpp b/src/scheduler.cpp index 7c9cda26..da2579db 100644 --- a/src/scheduler.cpp +++ b/src/scheduler.cpp @@ -27,9 +27,9 @@ namespace fibers { scheduler::scheduler( context * main_context) noexcept : sched_algo_( new round_robin() ), main_context_( main_context), - wqueue_(), - tqueue_(), - yqueue_(), + ready_queue_(), + sleep_queue_(), + terminated_queue_(), wait_interval_( std::chrono::milliseconds( 10) ) { } @@ -39,15 +39,15 @@ scheduler::~scheduler() noexcept { // NOTE: at this stage the fibers in the waiting-queue // can only be detached fibers // interrupt all waiting fibers (except main-fiber) - for ( context & f : wqueue_) { + for ( context & f : sleep_queue_) { f.request_interruption( true); } // move all fibers which are ready (state_ready) // from waiting-queue to the runnable-queue std::chrono::steady_clock::time_point now( std::chrono::steady_clock::now() ); - wqueue_t::iterator e = wqueue_.end(); - for ( wqueue_t::iterator i = wqueue_.begin(); i != e;) { + sleep_queue_t::iterator e = sleep_queue_.end(); + for ( sleep_queue_t::iterator i = sleep_queue_.begin(); i != e;) { context * f = & ( * i); BOOST_ASSERT( ! f->is_running() ); BOOST_ASSERT( ! f->is_terminated() ); @@ -59,12 +59,14 @@ scheduler::~scheduler() noexcept { } if ( f->is_ready() ) { - i = wqueue_.erase( i); - // Pass the newly-unlinked context* to sched_algo. + i = sleep_queue_.erase( i); + // unlink after accesing iterator f->time_point_reset(); - BOOST_ASSERT( ! f->state_is_linked() ); - BOOST_ASSERT( ! f->wait_is_linked() ); - BOOST_ASSERT( ! f->yield_is_linked() ); + // Pass the newly-unlinked context* to sched_algo. + BOOST_ASSERT( ! f->runnable_is_linked() ); + BOOST_ASSERT( ! f->ready_is_linked() ); + BOOST_ASSERT( ! f->sleep_is_linked() ); + //BOOST_ASSERT( ! f->wait_is_linked() ); sched_algo_->awakened( f); } else { ++i; @@ -72,10 +74,11 @@ scheduler::~scheduler() noexcept { } // pop new fiber from ready-queue context * f( sched_algo_->pick_next() ); - BOOST_ASSERT( ! f->state_is_linked() ); - BOOST_ASSERT( ! f->wait_is_linked() ); - BOOST_ASSERT( ! f->yield_is_linked() ); if ( nullptr != f) { + BOOST_ASSERT( ! f->runnable_is_linked() ); + BOOST_ASSERT( ! f->ready_is_linked() ); + BOOST_ASSERT( ! f->sleep_is_linked() ); + //BOOST_ASSERT( ! f->wait_is_linked() ); BOOST_ASSERT_MSG( f->is_ready(), "fiber with invalid state in ready-queue"); // set scheduler f->set_scheduler( this); @@ -86,29 +89,29 @@ scheduler::~scheduler() noexcept { // set main-fiber to state_waiting main_context_->set_waiting(); // push main-fiber to waiting-queue - BOOST_ASSERT( ! main_context_->state_is_linked() ); - yqueue_.push_back( * main_context_); + BOOST_ASSERT( ! main_context_->runnable_is_linked() ); + ready_queue_.push_back( * main_context_); // resume fiber f resume_( main_context_, f); - } else if ( wqueue_.empty() ) { + } else if ( sleep_queue_.empty() ) { // ready- and waiting-queue are empty so we can quit break; } } // destroy terminated fibers from terminated-queue - tqueue_t::iterator e = tqueue_.end(); - for ( tqueue_t::iterator i = tqueue_.begin(); i != e;) { - context * f( & ( * i) ); + for ( context * f : terminated_queue_) { BOOST_ASSERT( f->is_terminated() ); - i = tqueue_.erase( i); - BOOST_ASSERT( ! f->state_is_linked() ); + BOOST_ASSERT( ! f->runnable_is_linked() ); + BOOST_ASSERT( ! f->ready_is_linked() ); + BOOST_ASSERT( ! f->sleep_is_linked() ); BOOST_ASSERT( ! f->wait_is_linked() ); - BOOST_ASSERT( ! f->yield_is_linked() ); intrusive_ptr_release( f); // might call ~context() } + terminated_queue_.clear(); BOOST_ASSERT( context::active() == main_context_); - BOOST_ASSERT( wqueue_.empty() ); - BOOST_ASSERT( yqueue_.empty() ); + BOOST_ASSERT( ready_queue_.empty() ); + BOOST_ASSERT( sleep_queue_.empty() ); + BOOST_ASSERT( terminated_queue_.empty() ); BOOST_ASSERT( 0 == sched_algo_->ready_fibers() ); } @@ -132,8 +135,8 @@ scheduler::resume_( context * af, context * f) { context::active( f); // push terminated fibers to terminated-queue if ( af->is_terminated() ) { - BOOST_ASSERT( ! af->state_is_linked() ); - tqueue_.push_back( * af); + BOOST_ASSERT( ! af->runnable_is_linked() ); + terminated_queue_.push_back( af); } // resume active-fiber == f f->resume(); @@ -145,9 +148,9 @@ scheduler::spawn( context * f) { BOOST_ASSERT( f->is_ready() ); BOOST_ASSERT( f != context::active() ); // add new fiber to the scheduler - BOOST_ASSERT( ! f->state_is_linked() ); - BOOST_ASSERT( ! f->wait_is_linked() ); - BOOST_ASSERT( ! f->yield_is_linked() ); + BOOST_ASSERT( ! f->runnable_is_linked() ); + BOOST_ASSERT( ! f->ready_is_linked() ); + BOOST_ASSERT( ! f->sleep_is_linked() ); sched_algo_->awakened( f); } @@ -158,15 +161,16 @@ scheduler::run( context * af) { for (;;) { { // move all fibers in yield-queue to ready-queue - yqueue_t::iterator e = yqueue_.end(); - for ( yqueue_t::iterator i = yqueue_.begin(); i != e;) { + ready_queue_t::iterator e = ready_queue_.end(); + for ( ready_queue_t::iterator i = ready_queue_.begin(); i != e;) { context * f = & ( * i); BOOST_ASSERT( f->is_ready() ); - i = yqueue_.erase( i); - BOOST_ASSERT( ! f->state_is_linked() ); - BOOST_ASSERT( ! f->wait_is_linked() ); - BOOST_ASSERT( ! f->yield_is_linked() ); + i = ready_queue_.erase( i); + BOOST_ASSERT( ! f->runnable_is_linked() ); + BOOST_ASSERT( ! f->ready_is_linked() ); + BOOST_ASSERT( ! f->sleep_is_linked() ); + //BOOST_ASSERT( ! f->wait_is_linked() ); sched_algo_->awakened( f); } } @@ -175,8 +179,8 @@ scheduler::run( context * af) { // from waiting-queue to the ready-queue std::chrono::steady_clock::time_point now( std::chrono::steady_clock::now() ); - wqueue_t::iterator e = wqueue_.end(); - for ( wqueue_t::iterator i = wqueue_.begin(); i != e;) { + sleep_queue_t::iterator e = sleep_queue_.end(); + for ( sleep_queue_t::iterator i = sleep_queue_.begin(); i != e;) { context * f = & ( * i); BOOST_ASSERT( ! f->is_running() ); BOOST_ASSERT( ! f->is_terminated() ); @@ -186,12 +190,14 @@ scheduler::run( context * af) { f->set_ready(); } if ( f->is_ready() ) { - i = wqueue_.erase( i); - // Pass the newly-unlinked context* to sched_algo. + i = sleep_queue_.erase( i); + // unlink only after accessing iterator f->time_point_reset(); - BOOST_ASSERT( ! f->state_is_linked() ); - BOOST_ASSERT( ! f->wait_is_linked() ); - BOOST_ASSERT( ! f->yield_is_linked() ); + // Pass the newly-unlinked context* to sched_algo. + BOOST_ASSERT( ! f->runnable_is_linked() ); + BOOST_ASSERT( ! f->ready_is_linked() ); + BOOST_ASSERT( ! f->sleep_is_linked() ); + //BOOST_ASSERT( ! f->wait_is_linked() ); sched_algo_->awakened( f); } else { ++i; @@ -201,20 +207,23 @@ scheduler::run( context * af) { // pop new fiber from ready-queue context * f( sched_algo_->pick_next() ); if ( nullptr != f) { + BOOST_ASSERT( ! f->runnable_is_linked() ); + BOOST_ASSERT( ! f->ready_is_linked() ); + BOOST_ASSERT( ! f->sleep_is_linked() ); + //BOOST_ASSERT( ! f->wait_is_linked() ); BOOST_ASSERT_MSG( f->is_ready(), "fiber with invalid state in ready-queue"); // resume fiber f resume_( af, f); // destroy terminated fibers from terminated-queue - tqueue_t::iterator e = tqueue_.end(); - for ( tqueue_t::iterator i = tqueue_.begin(); i != e;) { - context * f( & ( * i) ); + for ( context * f : terminated_queue_) { BOOST_ASSERT( f->is_terminated() ); - i = tqueue_.erase( i); - BOOST_ASSERT( ! f->state_is_linked() ); + BOOST_ASSERT( ! f->runnable_is_linked() ); + BOOST_ASSERT( ! f->ready_is_linked() ); + BOOST_ASSERT( ! f->sleep_is_linked() ); BOOST_ASSERT( ! f->wait_is_linked() ); - BOOST_ASSERT( ! f->yield_is_linked() ); intrusive_ptr_release( f); // might call ~context() } + terminated_queue_.clear(); return; } else { // no fibers ready to run; the thread should sleep @@ -246,8 +255,8 @@ scheduler::wait_until( context * af, lk.unlock(); // push active-fiber to waiting-queue af->time_point( timeout_time); - BOOST_ASSERT( ! af->state_is_linked() ); - wqueue_.push_back( * af); + BOOST_ASSERT( ! af->sleep_is_linked() ); + sleep_queue_.push_back( * af); // switch to another fiber run( af); // fiber has been resumed @@ -264,9 +273,9 @@ scheduler::yield( context * af) { BOOST_ASSERT( af->is_running() ); // set active-fiber to state_waiting af->set_ready(); - // push active-fiber to yqueue_ - BOOST_ASSERT( ! af->yield_is_linked() ); - yqueue_.push_back( * af); + // push active-fiber to ready_queue_ + BOOST_ASSERT( ! af->ready_is_linked() ); + ready_queue_.push_back( * af); // switch to another fiber run( af); // fiber has been resumed @@ -284,8 +293,8 @@ scheduler::join( context * af, context * f) { // set active-fiber to state_waiting af->set_waiting(); // push active-fiber to waiting-queue - BOOST_ASSERT( ! af->state_is_linked() ); - wqueue_.push_back( * af); + BOOST_ASSERT( ! af->sleep_is_linked() ); + sleep_queue_.push_back( * af); // add active-fiber to joinig-list of f if ( ! f->join( af) ) { // f must be already terminated therefore we set diff --git a/src/timed_mutex.cpp b/src/timed_mutex.cpp index 48ce5b9a..475ed2b2 100644 --- a/src/timed_mutex.cpp +++ b/src/timed_mutex.cpp @@ -37,12 +37,12 @@ timed_mutex::timed_mutex() : splk_(), state_( mutex_status::unlocked), owner_(), - waiting_() { + wait_queue_() { } timed_mutex::~timed_mutex() { BOOST_ASSERT( ! owner_); - BOOST_ASSERT( waiting_.empty() ); + BOOST_ASSERT( wait_queue_.empty() ); } void @@ -58,7 +58,7 @@ timed_mutex::lock() { // store this fiber in order to be notified later BOOST_ASSERT( ! f->wait_is_linked() ); - waiting_.push_back( * f); + wait_queue_.push_back( * f); // suspend this fiber context::active()->do_wait( lk); @@ -97,12 +97,12 @@ timed_mutex::try_lock_until_( std::chrono::steady_clock::time_point const& timeo // store this fiber in order to be notified later BOOST_ASSERT( ! f->wait_is_linked() ); - waiting_.push_back( * f); + wait_queue_.push_back( * f); // suspend this fiber until notified or timed-out if ( ! context::active()->do_wait_until( timeout_time, lk) ) { lk.lock(); - detail::erase_and_dispose( waiting_, f); + f->wait_unlink(); lk.unlock(); return false; } @@ -116,9 +116,9 @@ timed_mutex::unlock() { detail::spinlock_lock lk( splk_); context * f( nullptr); - if ( ! waiting_.empty() ) { - f = & waiting_.front(); - waiting_.pop_front(); + if ( ! wait_queue_.empty() ) { + f = & wait_queue_.front(); + wait_queue_.pop_front(); BOOST_ASSERT( nullptr != f); } owner_ = context::id();