diff --git a/doc/fibers.xml b/doc/fibers.xml index 925cd799..1249a021 100644 --- a/doc/fibers.xml +++ b/doc/fibers.xml @@ -1,6 +1,6 @@ - @@ -98,27 +98,31 @@ can count on thread-local storage; however that storage will be shared among all fibers running on the same thread. + + + BOOST_FIBERS_NO_ATOMICS + - The fiber synchronization objects provided - by this library will, by default, safely synchronize fibers running on different - threads. However, this level of synchronization can be removed (for performance) - by building the library with BOOST_FIBERS_NO_ATOMICS + The fiber synchronization objects provided by this library will, by default, + safely synchronize fibers running on different threads. However, this level + of synchronization can be removed (for performance) by building the library + with BOOST_FIBERS_NO_ATOMICS defined. When the library is built with that macro, you must ensure that all the fibers referencing a particular synchronization object are running in the - same thread. Please see synchronization. + same thread. Please see Synchronization. For fiber-local storage, please see fiber_specific_ptr. - + Blocking - Normally, when this documentation states that a particular fiber blocks, - it means that it yields control, allowing other fibers on the same thread to - run. The synchronization mechanisms provided by Boost.Fiber - have this behavior. + Normally, when this documentation states that a particular fiber blocks + (or equivalently, suspends), it means that it yields control, + allowing other fibers on the same thread to run. The synchronization mechanisms + provided by Boost.Fiber have this behavior. A fiber may, of course, use normal thread synchronization mechanisms; however @@ -365,8 +369,9 @@ // this leads to undefined behaviour - The spawned fiber is enqueued in the list of ready-to-run fibers - at construction. + The spawned fiber does not immediately start running. It is enqueued + in the list of ready-to-run fibers, and will run when the scheduler gets around + to it. @@ -437,11 +442,10 @@ constructor Destruction - When a fiber object representing a valid execution context is destroyed, - the program terminates if the fiber is fiber::joinable(). If + When a fiber object representing a valid execution context (the fiber + is fiber::joinable()) is destroyed, the program terminates. If you intend the fiber to outlive the fiber object that launched it, - use the fiber::detach() -method. + use the fiber::detach() method. { boost::fibers::fiber f( some_fn); @@ -773,6 +777,7 @@ by calling the fiber::get_id() + Constructor @@ -835,7 +840,10 @@ by calling the fiber::get_id() See also: - stack + std::allocator_arg_t, Stack + allocation @@ -1022,8 +1030,7 @@ by calling the fiber::get_id() Effects: - If *this - refers to a fiber of execution, waits for that fiber to complete. + Waits for the referenced fiber of execution to complete. @@ -1031,9 +1038,8 @@ by calling the fiber::get_id() Postconditions: - If *this - refers to a fiber of execution on entry, that fiber has completed. - *this + The fiber of execution referenced on entry has completed. *this no longer refers to any fiber of execution. @@ -1218,8 +1224,6 @@ by calling the fiber::get_id() refers to a fiber of execution. use_scheduling_algorithm() has been called from this thread with a subclass of sched_algorithm_with_properties<> with the same template argument PROPS. - *this - has been scheduled for execution at least once. @@ -1250,8 +1254,7 @@ by calling the fiber::get_id() sched_algorithm_with_properties<> provides a way for a user-coded scheduler to associate extended properties, such as priority, with a fiber instance. This method allows access - to those user-provided properties — but only after this fiber has been - scheduled for the first time. + to those user-provided properties. @@ -1484,7 +1487,7 @@ by calling the fiber::get_id() Returns: - Returns the number of fibers ready to run. + the number of fibers ready to run. @@ -2079,6 +2082,15 @@ by calling the fiber::get_id() + + Note: + + + The first time this function is called from the main fiber of a thread, + it may internally yield, permitting other fibers to run. + + + See also: @@ -2565,10 +2577,10 @@ by calling the fiber::get_id() - Effects: + Returns: - Returns the number of fibers ready to run. + the number of fibers ready to run. @@ -2658,7 +2670,7 @@ by calling the fiber::get_id() Returns: - Returns the number of fibers ready to run. + the number of fibers ready to run. @@ -2889,7 +2901,7 @@ by calling the fiber::get_id() Returns: - 0 if scheduling algorithm has no fibers ready to run, otherwise nonzero. + the number of fibers ready to run. @@ -2927,10 +2939,10 @@ by calling the fiber::get_id() The fiber's associated PROPS - instance is already passed to awakened() and property_change(). However, every sched_algorithm subclass - is expected to track a collection of ready fiber_context instances. + instance is already passed to sched_algorithm_with_properties::awakened() and + sched_algorithm_with_properties::property_change(). + However, every sched_algorithm subclass is expected + to track a collection of ready fiber_context instances. This method allows your custom scheduler to retrieve the fiber_properties subclass instance for any fiber_context in its collection. @@ -3094,6 +3106,11 @@ by calling the fiber::get_id() required to model a stack-allocator concept. + + A stack_allocator can be passed to fiber::fiber() or to fibers::async(). + fiber::get_id() The fiber synchronization objects provided by this library will, by default, safely synchronize fibers running on different threads. However, this level of synchronization can be removed (for performance) by building the library - with BOOST_FIBERS_NO_ATOMICS + with BOOST_FIBERS_NO_ATOMICS defined. When the library is built with that macro, you must ensure that all the fibers referencing a particular synchronization object are running in the same thread. @@ -3343,8 +3360,8 @@ by calling the fiber::get_id() Any fiber blocked in lock() is suspended in the scheduler until the - owning fiber releases the lock by calling unlock() is suspended until the owning fiber releases + the lock by calling unlock(). @@ -4088,12 +4105,12 @@ by calling the fiber::get_id() The class condition_variable - provides a mechanism for a fiber to wait for notification on condition_variable. When the fiber awakens - from the wait, then it checks to see if the appropriate condition is now - true, and continues if so. If the condition is not true, then the fiber calls - wait again to resume waiting. - In the simplest case, this condition is just a boolean variable: + provides a mechanism for a fiber to wait for notification from another fiber. + When the fiber awakens from the wait, then it checks to see if the appropriate + condition is now true, and continues if so. If the condition is not true, + then the fiber calls wait + again to resume waiting. In the simplest case, this condition is just a boolean + variable: boost::fibers::condition_variable cond; boost::fibers::mutex mtx; @@ -4184,7 +4201,7 @@ by calling the fiber::get_id() #include <boost/fiber/condition.hpp> enum cv_status { - no_timeout = 1, + no_timeout, timeout }; @@ -4686,11 +4703,12 @@ by calling the fiber::get_id() } return true; - That is, even if wait_for() times out, it can still return true if pred() returns true - at that time. + (except of course that timeout_duration + is adjusted for each iteration). The point is that, even if wait_for() + times out, it can still return true + if pred() + returns true at that time. @@ -4805,7 +4823,7 @@ by calling the fiber::get_id() Fiber B calls barrier::wait(). Fiber B is blocked! + role="special">(). Fiber B is blocked! @@ -4831,8 +4849,16 @@ by calling the fiber::get_id() It is unwise to tie the lifespan of a barrier to any one of its participating fibers. Although conceptually all waiting fibers awaken "simultaneously," because of the nature of fibers, in practice they will awaken one by one - in indeterminate order. The rest of the waiting fibers will still be blocked - in wait(), + in indeterminate order. + + The current implementation wakes fibers in FIFO order: the first to call + wait() + wakes first, and so forth. But it is perilous to rely on the order in + which the various fibers will reach the wait() call. + + The rest of the waiting fibers will still be blocked in wait(), which must, before returning, access data members in the barrier object. @@ -5089,11 +5115,13 @@ by calling the fiber::get_id() value_type value_pop(); channel_op_status try_pop( value_type & va); template< typename Rep, typename Period > - channel_op_status pop_wait_for( value_type & va, - std::chrono::duration< Rep, Period > const& timeout_duration); + channel_op_status pop_wait_for( + value_type & va, + std::chrono::duration< Rep, Period > const& timeout_duration); template< typename Clock, typename Duration > - channel_op_status pop_wait_until( value_type & va, - std::chrono::time_point< Clock, Duration > const& timeout_time); + channel_op_status pop_wait_until( + value_type & va, + std::chrono::time_point< Clock, Duration > const& timeout_time); }; @@ -5292,8 +5320,9 @@ by calling the fiber::get_id() template< typename Rep, typename Period > -channel_op_status pop_wait_for( value_type & va, - std::chrono::duration< Rep, Period > const& timeout_duration) +channel_op_status pop_wait_for( + value_type & va, + std::chrono::duration< Rep, Period > const& timeout_duration) @@ -5334,8 +5363,9 @@ by calling the fiber::get_id() template< typename Clock, typename Duration > -channel_op_status pop_wait_until( value_type & va, - std::chrono::time_point< Clock, Duration > const& timeout_time) +channel_op_status pop_wait_until( + value_type & va, + std::chrono::time_point< Clock, Duration > const& timeout_time) @@ -5397,27 +5427,32 @@ by calling the fiber::get_id() channel_op_status push( value_type const& va); channel_op_status push( value_type && va); template< typename Rep, typename Period > - channel_op_status push_wait_for( value_type const& va, - std::chrono::duration< Rep, Period > const& timeout_duration); + channel_op_status push_wait_for( + value_type const& va, + std::chrono::duration< Rep, Period > const& timeout_duration); channel_op_status push_wait_for( value_type && va, - std::chrono::duration< Rep, Period > const& timeout_duration); + std::chrono::duration< Rep, Period > const& timeout_duration); template< typename Clock, typename Duration > - channel_op_status push_wait_until( value_type const& va, - std::chrono::time_point< Clock, Duration > const& timeout_time); + channel_op_status push_wait_until( + value_type const& va, + std::chrono::time_point< Clock, Duration > const& timeout_time); template< typename Clock, typename Duration > - channel_op_status push_wait_until( value_type && va, - std::chrono::time_point< Clock, Duration > const& timeout_time); + channel_op_status push_wait_until( + value_type && va, + std::chrono::time_point< Clock, Duration > const& timeout_time); channel_op_status try_push( value_type const& va); channel_op_status try_push( value_type && va); channel_op_status pop( value_type & va); value_type value_pop(); template< typename Rep, typename Period > - channel_op_status pop_wait_for( value_type & va, - std::chrono::duration< Rep, Period > const& timeout_duration); + channel_op_status pop_wait_for( + value_type & va, + std::chrono::duration< Rep, Period > const& timeout_duration); template< typename Clock, typename Duration > - channel_op_status pop_wait_until( value_type & va, - std::chrono::time_point< Clock, Duration > const& timeout_time); + channel_op_status pop_wait_until( + value_type & va, + std::chrono::time_point< Clock, Duration > const& timeout_time); channel_op_status try_pop( value_type & va); }; @@ -5648,11 +5683,13 @@ by calling the fiber::get_id() template< typename Rep, typename Period > -channel_op_status push_wait_for( value_type const& va, - std::chrono::duration< Rep, Period > const& timeout_duration); +channel_op_status push_wait_for( + value_type const& va, + std::chrono::duration< Rep, Period > const& timeout_duration); template< typename Rep, typename Period > -channel_op_status push_wait_for( value_type && va, - std::chrono::duration< Rep, Period > const& timeout_duration); +channel_op_status push_wait_for( + value_type && va, + std::chrono::duration< Rep, Period > const& timeout_duration); @@ -5702,11 +5739,13 @@ by calling the fiber::get_id() template< typename Clock, typename Duration > -channel_op_status push_wait_until( value_type const& va, - std::chrono::time_point< Clock, Duration > const& timeout_time); +channel_op_status push_wait_until( + value_type const& va, + std::chrono::time_point< Clock, Duration > const& timeout_time); template< typename Clock, typename Duration > -channel_op_status push_wait_until( value_type && va, - std::chrono::time_point< Clock, Duration > const& timeout_time); +channel_op_status push_wait_until( + value_type && va, + std::chrono::time_point< Clock, Duration > const& timeout_time); @@ -5909,8 +5948,9 @@ by calling the fiber::get_id() template< typename Rep, typename Period > -channel_op_status pop_wait_for( value_type & va, - std::chrono::duration< Rep, Period > const& timeout_duration) +channel_op_status pop_wait_for( + value_type & va, + std::chrono::duration< Rep, Period > const& timeout_duration) @@ -5956,8 +5996,9 @@ by calling the fiber::get_id() template< typename Clock, typename Duration > -channel_op_status pop_wait_until( value_type & va, - std::chrono::time_point< Clock, Duration > const& timeout_time) +channel_op_status pop_wait_until( + value_type & va, + std::chrono::time_point< Clock, Duration > const& timeout_time) @@ -6034,10 +6075,10 @@ are of the associated asynchronous result, but not vice-versa. - async() - is a simple way of running asynchronous tasks. A call to async() spawns a fiber and returns a future<> that - will deliver the result of the fiber function. + fibers::async() is a simple way of running asynchronous tasks. + A call to async() + spawns a fiber and returns a future<> that will deliver + the result of the fiber function. - - deferred - - - - - Effects: - - - The function is deferred, e.g. result will be computed only when - explictly requested. - - - - - Note: - - - Not implemented yet. - - - - - - - Launch policy deferred, - which indicates you simply want to defer the function call until a later - time (lazy evaluation), is not supported yet. - - @@ -6244,7 +6253,7 @@ are std::chrono::time_point< Clock, Duration > const& timeout_time) const; }; - + Default constructor @@ -6273,7 +6282,7 @@ are - + Move constructor @@ -6286,7 +6295,10 @@ are Constructs a future with the shared - state of other. After construction false == other.valid() + state of other. After construction false + == other.valid(). @@ -6299,7 +6311,7 @@ are - + Destructor ~future(); @@ -6448,6 +6460,25 @@ are + + Returns: + + + a shared_future<> containing the shared + state formerly belonging to *this. + + + + + Postcondition: + + + false == + valid() + + + Throws: @@ -6560,6 +6591,17 @@ are + + Note: + + + get_exception_ptr() does not invalidate + the future. After calling get_exception_ptr(), you may still call future::get(). + + + @@ -6687,7 +6729,7 @@ are A shared_future<> contains a shared - state which might be shared with other futures. + state which might be shared with other shared_future<> instances. template< typename R > class shared_future { @@ -6719,13 +6761,15 @@ are void wait() const; template< class Rep, class Period > - future_status wait_for( std::chrono::duration< Rep, Period > const& timeout_duration) const; + future_status wait_for( + std::chrono::duration< Rep, Period > const& timeout_duration) const; template< typename Clock, typename Duration > - future_status wait_until( std::chrono::time_point< Clock, Duration > const& timeout_time) const; + future_status wait_until( + std::chrono::time_point< Clock, Duration > const& timeout_time) const; }; - + Default constructor @@ -6754,7 +6798,7 @@ are - + Move constructor @@ -6768,7 +6812,38 @@ are Constructs a shared_future with the shared - state of other. After construction false == other.valid() + state of other. After construction false + == other.valid(). + + + + + Throws: + + + Nothing. + + + + + + Copy constructor + +shared_future( shared_future const& other) noexcept; + + + + + Effects: + + + Constructs a shared_future with the shared + state of other. After construction other.valid() is unchanged. @@ -6782,32 +6857,6 @@ are - Copy constructor - -shared_future( shared_future const& other) noexcept; - - - - - Effects: - - - Constructs a shared_future with the shared - state of other. After construction true == other.valid() - - - - - Throws: - - - Nothing. - - - - - Destructor ~shared_future(); @@ -6861,8 +6910,7 @@ are the assignment, the state of other.valid() depends on which overload was invoked: - true for the overload - accepting shared_future + unchanged for the overload accepting shared_future const&, otherwise false. @@ -7010,6 +7058,17 @@ are + + Note: + + + get_exception_ptr() does not invalidate + the shared_future. After calling get_exception_ptr(), you may still call shared_future::get(). + + + @@ -7184,9 +7243,10 @@ are Notes: - The overload accepting std::allocator_arg_t - uses the passed StackAllocator + The overload accepting std::allocator_arg_t uses the + passed StackAllocator when constructing the launched fiber. @@ -7284,6 +7344,16 @@ are + + See also: + + + std::allocator_arg_t + + + + + See also: + + + std::allocator_arg_t + + + delete this->get() is well-formed; fn(this->get()) - does not throw + role="special">() is well-formed; fn(this->get()) does not throw @@ -8049,7 +8131,7 @@ encapsulates. All the fiber specific instances associated to this fiber_specific_ptr (except - maybe the one associated to this fiber) must be null. + maybe the one associated to this fiber) must be nullptr. @@ -8137,6 +8219,16 @@ encapsulates. + + Requires: + + + this->get() + is not nullptr. + + + Returns: @@ -8224,7 +8316,7 @@ encapsulates. this->get()==0 + role="keyword">nullptr @@ -8244,7 +8336,7 @@ encapsulates. function reset() -void reset(T* new_value=0); +void reset(T* new_value); @@ -8416,9 +8508,10 @@ encapsulates. This tactic for resuming a pending fiber works even if the callback is called on a different thread than the one on which the initiating fiber is running. - In fact, the example program's dummy AsyncAPI - implementation illustrates that: it simulates async I/O by launching a new - thread that sleeps briefly and then calls the relevant callback. + In fact, the example program's + dummy AsyncAPI implementation + illustrates that: it simulates async I/O by launching a new thread that sleeps + briefly and then calls the relevant callback. @@ -8478,6 +8571,7 @@ encapsulates. std::tie( ec, data) = read_ec( api); + Data or Exception @@ -8702,7 +8796,8 @@ encapsulates. template< typename Allocator, typename ReturnType, typename Arg2 > -struct handler_type< boost::fibers::asio::yield_t< Allocator >, ReturnType( boost::system::error_code, Arg2) > { +struct handler_type< boost::fibers::asio::yield_t< Allocator >, + ReturnType( boost::system::error_code, Arg2) > { typedef fibers::asio::detail::yield_handler< Arg2 > type; }; @@ -8736,14 +8831,14 @@ encapsulates. template< typename T > class promise_handler_base { -private: - typedef boost::shared_ptr< boost::fibers::promise< T > > promise_ptr; - public: + typedef std::shared_ptr< boost::fibers::promise< T > > promise_ptr; + // Construct from any promise_completion_token subclass special value. template< typename Allocator > promise_handler_base( boost::fibers::asio::promise_completion_token< Allocator > const& pct) : - promise_( new boost::fibers::promise< T >( std::allocator_arg, pct.get_allocator() ) ) + promise_( std::make_shared< boost::fibers::promise< T > >( + std::allocator_arg, pct.get_allocator() ) ) {} bool should_set_value( boost::system::error_code const& ec) { @@ -8779,10 +8874,11 @@ encapsulates. instance is copied on its way into underlying Asio machinery.) - Asio, having consulted the yield_handler - traits specialization, instantiates a yield_handler - (aka promise_handler) as the - async operation's callback: + Asio, having consulted the handler_type<> traits specialization, instantiates + a yield_handler (aka promise_handler) as the async operation's + callback: template< typename T > @@ -8804,12 +8900,12 @@ encapsulates. - Like the lambda callback in our read(AsyncAPI&) presented earlier, promise_handler::operator()() - either calls promise::set_value() -or promise::set_exception() (via + Like the lambda callback in our read(AsyncAPI&) presented earlier, promise_handler::operator()() + either calls promise::set_value() or promise::set_exception() (via promise_handler_base::should_set_value()). @@ -8888,7 +8984,7 @@ or promise::set_exception()< // guaranteed not to return EWOULDBLOCK -int read_chunk( NonblockingAPI & api, std::string& data, std::size_t desired) { +int read_chunk( NonblockingAPI & api, std::string & data, std::size_t desired) { int error; while ( EWOULDBLOCK == ( error = api.read( data, desired) ) ) { // not ready yet -- try again on the next iteration of the @@ -9348,8 +9444,8 @@ calls either a return value or an exception. Therefore, we will change wait_first_value()'s bounded_channel<> to - hold future<T> + hold future< + T > items instead of simply T. @@ -9502,12 +9598,12 @@ calls Given the bounded_queue<future<T>> we already constructed for wait_first_outcome(), - though, we can readily recast the interface function to deliver the first - successful result. + role="special">< future< T > > + we already constructed for wait_first_outcome(), though, we can readily recast the interface + function to deliver the first successful result. That does beg the question: what if all the task functions @@ -9887,7 +9983,7 @@ to return that result We can address that problem with a counting - façade for the bounded_channelunbounded_channel<>. In fact, our façade need only support the producer end of the channel. @@ -10012,14 +10108,14 @@ to return that result we can elaborate wait_all_values() and wait_all_values_source() - by passing future<T> + by passing future< + T > instead of plain T. wait_all_until_error() pops that future<T< T > and calls its future::get(): @@ -10069,8 +10165,8 @@ to return that result Naturally this complicates the API for wait_all_until_error_source(). The caller must both retrieve a future<T> + role="identifier">future< + T > and call its get() method. It would, of course, be possible to return a façade over the consumer end of the channel that would implicitly perform the getfiber (raw) were compiled with BOOST_FIBERS_NO_ATOMICS. - - Overhead of creating and joining + + + The measurement of fibers include the memory allocation of fiber stacks! + + +
+ Overhead of join @@ -10517,12 +10618,12 @@ to return that result - 1.1 µs + 950 ns - 955 ns + 850 ns @@ -10573,12 +10674,12 @@ to return that result - 3.2 µs + 21 ns - 3.2 µs + 15 ns @@ -10619,12 +10720,12 @@ to return that result - 1.3 µs + 189 ns - 1.1 µs + 168 ns @@ -10665,12 +10766,12 @@ to return that result - 3.0 µs + 1.4 µs - 2.4 µs + 1.4 µs @@ -10721,12 +10822,12 @@ to return that result - 4.83 µs + 1.96 µs - 3.76 µs + 1.85 µs @@ -10743,12 +10844,12 @@ to return that result - 4.84 µs + 1.40 µs - 2.78 µs + 1.27 µs @@ -10765,12 +10866,12 @@ to return that result - 5.24 µs + 1.84 µs - 2.45 µs + 1.81 µs @@ -10787,12 +10888,12 @@ to return that result - 4.86 µs + 1.13 µs - 2.91 µs + 1.10 µs @@ -10809,12 +10910,12 @@ to return that result - 5.04 µs + 1.46 µs - 3.60 µs + 1.26 µs @@ -10831,12 +10932,12 @@ to return that result - 5.07 µs + 2.11 µs - 4.57 µs + 1.90 µs @@ -10853,12 +10954,12 @@ to return that result - 5.12 µs + 2.36 µs - 4.21 µs + 1.89 µs @@ -11042,9 +11143,9 @@ to return that result // With this scheduler, fibers with higher priority values are// preferred over fibers with lower priority values. But fibers with// equal priority values are processed in round-robin fashion. So when - // we're handed a new fiber_base, put it at the end of the fibers with - // that same priority. In other words: search for the first fiber in - // the queue with LOWER priority, and insert before that one. + // we're handed a new fiber_context*, put it at the end of the fibers + // with that same priority. In other words: search for the first fiber + // in the queue with LOWER priority, and insert before that one.boost::fibers::fiber_context**fp=&head_;for(;*fp;fp=&(*fp)->nxt){if(properties(*fp).get_priority()<f_priority){ @@ -11085,57 +11186,31 @@ to return that result // point of a property_change() override is to reshuffle the ready// queue according to the updated priority value. - // Despite the added complexity of the loop body, make a single pass - // over the queue to find both the existing item and the new desired - // insertion point. + // Find 'f' in the queue. Note that it might not be in our queue at + // all, if caller is changing the priority of (say) the running fiber.boolfound=false; - boost::fibers::fiber_context**insert=nullptr,**fp=&head_; - for(;*fp;fp=&(*fp)->nxt){ + for(boost::fibers::fiber_context**fp=&head_;*fp;fp=&(*fp)->nxt){if(*fp==f){// found the passed fiber in our list -- unlink itfound=true;*fp=(*fp)->nxt;f->nxt=nullptr; - // If that was the last item in the list, stop. - if(!*fp){ - break; - } - // If we've already found the new insertion point, no need to - // continue looping. - if(insert){ - break; - } - } - // As in awakened(), we're looking for the first fiber in the - // queue with priority lower than the passed fiber. - if(properties(*fp).get_priority()<props.get_priority()){ - insert=fp; - // If we've already found and unlinked the passed fiber, no - // need to continue looping. - if(found){ - break; - } + break;}} - // property_change() should only be called if f->is_ready(). However, - // a waiting fiber can change state to is_ready() while still on the - // fiber_manager's waiting queue. Every such fiber will be swept onto - // our ready queue before the next pick_next() call, but still it's - // possible to get a property_change() call for a fiber that - // is_ready() but is not yet on our ready queue. If it's not there, no - // action required: we'll handle it next time it hits awakened(). + + // It's possible to get a property_change() call for a fiber that is + // not on our ready queue. If it's not there, no need to move it: + // we'll handle it next time it hits awakened().if(!found){ return; } - // There might not be any ready fibers with lower priority. In that - // case, append to the end of the queue. - if (! insert) - insert = fp; - } - // Insert f at the new insertion point in the queue. - f->nxt = * insert; - * insert = f; + + // Here we know that f was in our ready queue, but we've unlinked it. + // We happen to have a method that will (re-)add a fiber_context* to + // the ready queue. + awakened(f, props); } }; @@ -11238,12 +11313,7 @@ to return that result role="identifier">fiber_properties subclass. -void init( std::string const& name, int priority) { - priority_props & props( - boost::this_fiber::properties< priority_props >() ); - props.name = name; - props.set_priority( priority); -} +boost::this_fiber::properties< priority_props >().name = "main"; @@ -11256,40 +11326,24 @@ to return that result parameter. -void change_fn( std::string const& name, - int priority, - boost::fibers::fiber & other, - int other_priority, - boost::fibers::barrier& barrier) { - init( name, priority); - - barrier.wait(); - // We assume a couple things about 'other': - // - that it was also waiting on the same barrier - // - that it has lower priority than this fiber. - // If both are true, 'other' is now ready to run but is sitting in - // priority_scheduler's ready queue. Change its priority. - priority_props & other_props( - other.properties< priority_props >() ); - other_props.set_priority( other_priority); +template< typename Fn > +boost::fibers::fiber launch( Fn && func, std::string const& name, int priority) { + boost::fibers::fiber fiber( func); + priority_props & props( fiber.properties< priority_props >() ); + props.name = name; + props.set_priority( priority); + return fiber; } - Since launching a new fiber schedules that fiber right away, code such as the - following: - -boost::fibers::fiber newfiber( fiber_function); -newfiber.properties< priority_props >().name = "newfiber"; - - - will not necessarily set the property as soon as you expect. It is generally - preferable to pass the initial property values to your fiber_function() and have it set them itself. In the example - above, change_fn() - accepts its own name and priority and calls init() to set the corresponding properties. + Launching a new fiber schedules that fiber as ready, but does not + immediately enter its fiber-function. The current fiber + retains control until it blocks (or yields, or terminates) for some other reason. + As shown in the launch() + function above, it is reasonable to launch a fiber and immediately set relevant + properties -- such as, for instance, its priority. Your custom scheduler can + then make use of this information next time the fiber manager calls sched_algorithm_with_properties::pick_next().
diff --git a/doc/html/fiber/callbacks.html b/doc/html/fiber/callbacks.html index 53b61fd3..e3db86b1 100644 --- a/doc/html/fiber/callbacks.html +++ b/doc/html/fiber/callbacks.html @@ -150,9 +150,10 @@

This tactic for resuming a pending fiber works even if the callback is called on a different thread than the one on which the initiating fiber is running. - In fact, the example program's dummy AsyncAPI - implementation illustrates that: it simulates async I/O by launching a new - thread that sleeps briefly and then calls the relevant callback. + In fact, the example program's + dummy AsyncAPI implementation + illustrates that: it simulates async I/O by launching a new thread that sleeps + briefly and then calls the relevant callback.

@@ -215,7 +216,7 @@

-

+

Data or Exception @@ -444,7 +445,8 @@

template< typename Allocator, typename ReturnType, typename Arg2 >
-struct handler_type< boost::fibers::asio::yield_t< Allocator >, ReturnType( boost::system::error_code, Arg2) > {
+struct handler_type< boost::fibers::asio::yield_t< Allocator >,
+                     ReturnType( boost::system::error_code, Arg2) > {
     typedef fibers::asio::detail::yield_handler< Arg2 >    type;
 };
 
@@ -481,14 +483,14 @@

template< typename T >
 class promise_handler_base {
-private:
-    typedef boost::shared_ptr< boost::fibers::promise< T > > promise_ptr;
-
 public:
+    typedef std::shared_ptr< boost::fibers::promise< T > > promise_ptr;
+
     // Construct from any promise_completion_token subclass special value.
     template< typename Allocator >
     promise_handler_base( boost::fibers::asio::promise_completion_token< Allocator > const& pct) :
-        promise_( new boost::fibers::promise< T >( std::allocator_arg, pct.get_allocator() ) )
+        promise_( std::make_shared< boost::fibers::promise< T > >(
+                      std::allocator_arg, pct.get_allocator() ) )
     {}
 
     bool should_set_value( boost::system::error_code const& ec) {
@@ -524,10 +526,9 @@
       instance is copied on its way into underlying Asio machinery.)
     

- Asio, having consulted the yield_handler - traits specialization, instantiates a yield_handler - (aka promise_handler) as the - async operation's callback: + Asio, having consulted the handler_type<> traits specialization, instantiates + a yield_handler (aka promise_handler) as the async operation's + callback:

@@ -551,9 +552,8 @@

- Like the lambda callback in our read(AsyncAPI&) presented earlier, promise_handler::operator()() - either calls promise::set_value() -or promise::set_exception() (via + Like the lambda callback in our read(AsyncAPI&) presented earlier, promise_handler::operator()() + either calls promise::set_value() or promise::set_exception() (via promise_handler_base::should_set_value()).

diff --git a/doc/html/fiber/custom.html b/doc/html/fiber/custom.html index 82b8d6cf..86264a33 100644 --- a/doc/html/fiber/custom.html +++ b/doc/html/fiber/custom.html @@ -61,7 +61,7 @@

The first essential point is that we must associate an integer priority with - each fiber.[6] + each fiber.[7]

One might suggest deriving a custom fiber subclass to store such @@ -196,9 +196,9 @@ // With this scheduler, fibers with higher priority values are // preferred over fibers with lower priority values. But fibers with // equal priority values are processed in round-robin fashion. So when - // we're handed a new fiber_base, put it at the end of the fibers with - // that same priority. In other words: search for the first fiber in - // the queue with LOWER priority, and insert before that one. + // we're handed a new fiber_context*, put it at the end of the fibers + // with that same priority. In other words: search for the first fiber + // in the queue with LOWER priority, and insert before that one. boost::fibers::fiber_context ** fp = & head_; for ( ; * fp; fp = & ( * fp)->nxt) { if ( properties( * fp).get_priority() < f_priority) { @@ -239,57 +239,31 @@ // point of a property_change() override is to reshuffle the ready // queue according to the updated priority value. - // Despite the added complexity of the loop body, make a single pass - // over the queue to find both the existing item and the new desired - // insertion point. + // Find 'f' in the queue. Note that it might not be in our queue at + // all, if caller is changing the priority of (say) the running fiber. bool found = false; - boost::fibers::fiber_context ** insert = nullptr, ** fp = & head_; - for ( ; * fp; fp = & ( * fp)->nxt) { + for ( boost::fibers::fiber_context ** fp = & head_; * fp; fp = & ( * fp)->nxt) { if ( * fp == f) { // found the passed fiber in our list -- unlink it found = true; * fp = ( * fp)->nxt; f->nxt = nullptr; - // If that was the last item in the list, stop. - if ( ! * fp) { - break; - } - // If we've already found the new insertion point, no need to - // continue looping. - if ( insert) { - break; - } - } - // As in awakened(), we're looking for the first fiber in the - // queue with priority lower than the passed fiber. - if ( properties( * fp).get_priority() < props.get_priority() ) { - insert = fp; - // If we've already found and unlinked the passed fiber, no - // need to continue looping. - if ( found) { - break; - } + break; } } - // property_change() should only be called if f->is_ready(). However, - // a waiting fiber can change state to is_ready() while still on the - // fiber_manager's waiting queue. Every such fiber will be swept onto - // our ready queue before the next pick_next() call, but still it's - // possible to get a property_change() call for a fiber that - // is_ready() but is not yet on our ready queue. If it's not there, no - // action required: we'll handle it next time it hits awakened(). + + // It's possible to get a property_change() call for a fiber that is + // not on our ready queue. If it's not there, no need to move it: + // we'll handle it next time it hits awakened(). if ( ! found) { 8 return; } - // There might not be any ready fibers with lower priority. In that - // case, append to the end of the queue. - if (! insert) - insert = fp; - } - // Insert f at the new insertion point in the queue. - f->nxt = * insert; - * insert = f; + + // Here we know that f was in our ready queue, but we've unlinked it. + // We happen to have a method that will (re-)add a fiber_context* to + // the ready queue. + awakened(f, props); } }; @@ -403,12 +377,7 @@

-
void init( std::string const& name, int priority) {
-    priority_props & props(
-            boost::this_fiber::properties< priority_props >() );
-    props.name = name;
-    props.set_priority( priority);
-}
+
boost::this_fiber::properties< priority_props >().name = "main";
 

@@ -420,42 +389,29 @@

-
void change_fn( std::string const& name,
-                int priority,
-                boost::fibers::fiber & other,
-                int other_priority,
-                boost::fibers::barrier& barrier) {
-    init( name, priority);
-
-    barrier.wait();
-    // We assume a couple things about 'other':
-    // - that it was also waiting on the same barrier
-    // - that it has lower priority than this fiber.
-    // If both are true, 'other' is now ready to run but is sitting in
-    // priority_scheduler's ready queue. Change its priority.
-    priority_props & other_props(
-            other.properties< priority_props >() );
-    other_props.set_priority( other_priority);
+
template< typename Fn >
+boost::fibers::fiber launch( Fn && func, std::string const& name, int priority) {
+    boost::fibers::fiber fiber( func);
+    priority_props & props( fiber.properties< priority_props >() );
+    props.name = name;
+    props.set_priority( priority);
+    return fiber;
 }
 

- Since launching a new fiber schedules that fiber right away, code such as the - following: -

-
boost::fibers::fiber newfiber( fiber_function);
-newfiber.properties< priority_props >().name = "newfiber";
-
-

- will not necessarily set the property as soon as you expect. It is generally - preferable to pass the initial property values to your fiber_function() and have it set them itself. In the example - above, change_fn() - accepts its own name and priority and calls init() to set the corresponding properties. + Launching a new fiber schedules that fiber as ready, but does not + immediately enter its fiber-function. The current fiber + retains control until it blocks (or yields, or terminates) for some other reason. + As shown in the launch() + function above, it is reasonable to launch a fiber and immediately set relevant + properties -- such as, for instance, its priority. Your custom scheduler can + then make use of this information next time the fiber manager calls sched_algorithm_with_properties::pick_next().



-

[6] +

[7] A previous version of the Fiber library implicitly tracked an int priority for each fiber, even though the default scheduler ignored it. This has been dropped, since the library now supports arbitrary scheduler-specific fiber diff --git a/doc/html/fiber/fiber_mgmt.html b/doc/html/fiber/fiber_mgmt.html index bb355d5b..e4f67964 100644 --- a/doc/html/fiber/fiber_mgmt.html +++ b/doc/html/fiber/fiber_mgmt.html @@ -123,8 +123,9 @@ // this leads to undefined behaviour

- The spawned fiber is enqueued in the list of ready-to-run fibers - at construction. + The spawned fiber does not immediately start running. It is enqueued + in the list of ready-to-run fibers, and will run when the scheduler gets around + to it.

@@ -188,11 +189,10 @@ constructor Destruction

- When a fiber object representing a valid execution context is destroyed, - the program terminates if the fiber is fiber::joinable(). If + When a fiber object representing a valid execution context (the fiber + is fiber::joinable()) is destroyed, the program terminates. If you intend the fiber to outlive the fiber object that launched it, - use the fiber::detach() -method. + use the fiber::detach() method.

{
     boost::fibers::fiber f( some_fn);
diff --git a/doc/html/fiber/fiber_mgmt/fiber.html b/doc/html/fiber/fiber_mgmt/fiber.html
index 3bc49452..474b6795 100644
--- a/doc/html/fiber/fiber_mgmt/fiber.html
+++ b/doc/html/fiber/fiber_mgmt/fiber.html
@@ -110,7 +110,7 @@
             

-
+
Constructor
@@ -151,7 +151,8 @@

See also:

- stack + std::allocator_arg_t, Stack + allocation

@@ -288,14 +289,11 @@

Effects:

- If *this - refers to a fiber of execution, waits for that fiber to complete. + Waits for the referenced fiber of execution to complete.

Postconditions:

- If *this - refers to a fiber of execution on entry, that fiber has completed. - *this + The fiber of execution referenced on entry has completed. *this no longer refers to any fiber of execution.

Throws:
@@ -437,8 +435,6 @@ refers to a fiber of execution. use_scheduling_algorithm() has been called from this thread with a subclass of sched_algorithm_with_properties<> with the same template argument PROPS. - *this - has been scheduled for execution at least once.

Returns:

@@ -454,8 +450,7 @@ sched_algorithm_with_properties<> provides a way for a user-coded scheduler to associate extended properties, such as priority, with a fiber instance. This method allows access - to those user-provided properties — but only after this fiber has been - scheduled for the first time. + to those user-provided properties.

See also:

@@ -638,7 +633,7 @@

Returns:

- Returns the number of fibers ready to run. + the number of fibers ready to run.

Throws:

diff --git a/doc/html/fiber/fiber_mgmt/this_fiber.html b/doc/html/fiber/fiber_mgmt/this_fiber.html index c9f9e81f..81ad8a78 100644 --- a/doc/html/fiber/fiber_mgmt/this_fiber.html +++ b/doc/html/fiber/fiber_mgmt/this_fiber.html @@ -231,6 +231,11 @@ such as priority, with a fiber instance. This function allows access to those user-provided properties.

+
Note:
+

+ The first time this function is called from the main fiber of a thread, + it may internally yield, permitting other fibers to run. +

See also:

Customization diff --git a/doc/html/fiber/fls.html b/doc/html/fiber/fls.html index 1d58b1c3..187b744a 100644 --- a/doc/html/fiber/fls.html +++ b/doc/html/fiber/fls.html @@ -97,8 +97,7 @@

Requires:

- delete this->get() is well-formed; fn(this->get()) - does not throw + delete this->get() is well-formed; fn(this->get()) does not throw

Effects:

@@ -129,7 +128,7 @@

All the fiber specific instances associated to this fiber_specific_ptr (except - maybe the one associated to this fiber) must be null. + maybe the one associated to this fiber) must be nullptr.

Effects:

@@ -212,6 +211,11 @@

+
Requires:
+

+ this->get() + is not nullptr. +

Returns:

this->get() @@ -276,7 +280,7 @@

Postcondition:

- this->get()==0 + this->get()==nullptr

Throws:

@@ -294,7 +298,7 @@

-
void reset(T* new_value=0);
+
void reset(T* new_value);
 

diff --git a/doc/html/fiber/nonblocking.html b/doc/html/fiber/nonblocking.html index 2628dbea..ea50d932 100644 --- a/doc/html/fiber/nonblocking.html +++ b/doc/html/fiber/nonblocking.html @@ -96,7 +96,7 @@

// guaranteed not to return EWOULDBLOCK
-int read_chunk( NonblockingAPI & api, std::string& data, std::size_t desired) {
+int read_chunk( NonblockingAPI & api, std::string & data, std::size_t desired) {
     int error;
     while ( EWOULDBLOCK == ( error = api.read( data, desired) ) ) {
         // not ready yet -- try again on the next iteration of the
diff --git a/doc/html/fiber/overview.html b/doc/html/fiber/overview.html
index 7eaf3b6d..7b9fa914 100644
--- a/doc/html/fiber/overview.html
+++ b/doc/html/fiber/overview.html
@@ -100,27 +100,31 @@
       can count on thread-local storage; however that storage will be shared among
       all fibers running on the same thread.
     

+

+ + BOOST_FIBERS_NO_ATOMICS +

- The fiber synchronization objects provided - by this library will, by default, safely synchronize fibers running on different - threads. However, this level of synchronization can be removed (for performance) - by building the library with BOOST_FIBERS_NO_ATOMICS + The fiber synchronization objects provided by this library will, by default, + safely synchronize fibers running on different threads. However, this level + of synchronization can be removed (for performance) by building the library + with BOOST_FIBERS_NO_ATOMICS defined. When the library is built with that macro, you must ensure that all the fibers referencing a particular synchronization object are running in the - same thread. Please see synchronization. + same thread. Please see Synchronization.

For fiber-local storage, please see fiber_specific_ptr.

- + Blocking

- Normally, when this documentation states that a particular fiber blocks, - it means that it yields control, allowing other fibers on the same thread to - run. The synchronization mechanisms provided by Boost.Fiber - have this behavior. + Normally, when this documentation states that a particular fiber blocks + (or equivalently, suspends), it means that it yields control, + allowing other fibers on the same thread to run. The synchronization mechanisms + provided by Boost.Fiber have this behavior.

A fiber may, of course, use normal thread synchronization mechanisms; however diff --git a/doc/html/fiber/performance.html b/doc/html/fiber/performance.html index 96be62b2..31648fcc 100644 --- a/doc/html/fiber/performance.html +++ b/doc/html/fiber/performance.html @@ -29,7 +29,7 @@

Performance measurements were taken using std::chrono::highresolution_clock, with overhead corrections. The code was compiled using the build options: variant - = release, optimization = speed [5]. + = release, optimization = speed [6].

The columns labeled fiber (atomics) were compiled @@ -37,9 +37,18 @@ on different threads. The columns labeled fiber (raw) were compiled with BOOST_FIBERS_NO_ATOMICS.

+
+ + + + + +
[Note]Note

+ The measurement of fibers include the memory allocation of fiber stacks! +

-

Table 1.1. Overhead of creating and joining

-
+

Table 1.1. Overhead of join

+
@@ -82,12 +91,12 @@ @@ -186,12 +195,12 @@ @@ -233,12 +242,12 @@ @@ -292,12 +301,12 @@ @@ -314,12 +323,12 @@ @@ -336,12 +345,12 @@ @@ -358,12 +367,12 @@ @@ -380,12 +389,12 @@ @@ -402,12 +411,12 @@ @@ -424,12 +433,12 @@ @@ -441,7 +450,7 @@



-

[5] +

[6] Intel Core2 Q6700, x86_64, 3GHz

diff --git a/doc/html/fiber/scheduling.html b/doc/html/fiber/scheduling.html index 79a967dd..9aa642a9 100644 --- a/doc/html/fiber/scheduling.html +++ b/doc/html/fiber/scheduling.html @@ -162,9 +162,9 @@

-
Effects:
+
Returns:

- Returns the number of fibers ready to run. + the number of fibers ready to run.

@@ -256,7 +256,7 @@
Returns:

- Returns the number of fibers ready to run. + the number of fibers ready to run.

@@ -469,7 +469,7 @@
Returns:

- 0 if scheduling algorithm has no fibers ready to run, otherwise nonzero. + the number of fibers ready to run.

Note:

@@ -500,8 +500,10 @@

Note:

The fiber's associated PROPS - instance is already passed to awakened() and property_change(). However, every sched_algorithm subclass - is expected to track a collection of ready fiber_context instances. + instance is already passed to sched_algorithm_with_properties::awakened() and + sched_algorithm_with_properties::property_change(). + However, every sched_algorithm subclass is expected + to track a collection of ready fiber_context instances. This method allows your custom scheduler to retrieve the fiber_properties subclass instance for any fiber_context in its collection. diff --git a/doc/html/fiber/stack.html b/doc/html/fiber/stack.html index 7e668ce6..f2b57e84 100644 --- a/doc/html/fiber/stack.html +++ b/doc/html/fiber/stack.html @@ -33,6 +33,9 @@ required to model a stack-allocator concept.

+

+ A stack_allocator can be passed to fiber::fiber() or to fibers::async(). +

stack-allocator concept diff --git a/doc/html/fiber/synchronization.html b/doc/html/fiber/synchronization.html index 9262e9cf..5589c715 100644 --- a/doc/html/fiber/synchronization.html +++ b/doc/html/fiber/synchronization.html @@ -52,7 +52,7 @@ The fiber synchronization objects provided by this library will, by default, safely synchronize fibers running on different threads. However, this level of synchronization can be removed (for performance) by building the library - with BOOST_FIBERS_NO_ATOMICS + with BOOST_FIBERS_NO_ATOMICS defined. When the library is built with that macro, you must ensure that all the fibers referencing a particular synchronization object are running in the same thread. diff --git a/doc/html/fiber/synchronization/barriers.html b/doc/html/fiber/synchronization/barriers.html index 42a9138d..58cc5fce 100644 --- a/doc/html/fiber/synchronization/barriers.html +++ b/doc/html/fiber/synchronization/barriers.html @@ -61,7 +61,7 @@ Fiber "main" is unblocked, as desired.
  • - Fiber B calls barrier::wait(). Fiber B is blocked! + Fiber B calls barrier::wait(). Fiber B is blocked!
  • Fiber A calls barrier::wait(). Fibers A and B are unblocked. @@ -82,8 +82,7 @@ It is unwise to tie the lifespan of a barrier to any one of its participating fibers. Although conceptually all waiting fibers awaken "simultaneously," because of the nature of fibers, in practice they will awaken one by one - in indeterminate order. The rest of the waiting fibers will still be blocked - in wait(), + in indeterminate order.[4] The rest of the waiting fibers will still be blocked in wait(), which must, before returning, access data members in the barrier object.

  • - 1.1 µs + 950 ns

    - 955 ns + 850 ns

    @@ -139,12 +148,12 @@

    - 3.2 µs + 21 ns

    - 3.2 µs + 15 ns

    - 1.3 µs + 189 ns

    - 1.1 µs + 168 ns

    - 3.0 µs + 1.4 µs

    - 2.4 µs + 1.4 µs

    - 4.83 µs + 1.96 µs

    - 3.76 µs + 1.85 µs

    - 4.84 µs + 1.40 µs

    - 2.78 µs + 1.27 µs

    - 5.24 µs + 1.84 µs

    - 2.45 µs + 1.81 µs

    - 4.86 µs + 1.13 µs

    - 2.91 µs + 1.10 µs

    - 5.04 µs + 1.46 µs

    - 3.60 µs + 1.26 µs

    - 5.07 µs + 2.11 µs

    - 4.57 µs + 1.90 µs

    - 5.12 µs + 2.36 µs

    - 4.21 µs + 1.89 µs

    @@ -172,6 +171,15 @@

    +
    +

    +

    [4] + The current implementation wakes fibers in FIFO order: the first to call + wait() + wakes first, and so forth. But it is perilous to rely on the order in + which the various fibers will reach the wait() call. +

    +
    diff --git a/doc/html/fiber/synchronization/channels.html b/doc/html/fiber/synchronization/channels.html index d9e7fdf6..bfe03fda 100644 --- a/doc/html/fiber/synchronization/channels.html +++ b/doc/html/fiber/synchronization/channels.html @@ -163,11 +163,13 @@ value_typevalue_pop();channel_op_statustry_pop(value_type&va);template<typenameRep,typenamePeriod> - channel_op_statuspop_wait_for(value_type&va, - std::chrono::duration<Rep,Period>const&timeout_duration); + channel_op_statuspop_wait_for( + value_type&va, + std::chrono::duration<Rep,Period>const&timeout_duration);template<typenameClock,typenameDuration> - channel_op_statuspop_wait_until(value_type&va, - std::chrono::time_point<Clock,Duration>const&timeout_time); + channel_op_statuspop_wait_until( + value_type&va, + std::chrono::time_point<Clock,Duration>const&timeout_time);};

    @@ -331,8 +333,9 @@

    template< typename Rep, typename Period >
    -channel_op_status pop_wait_for( value_type & va,
    -                                std::chrono::duration< Rep, Period > const& timeout_duration)
    +channel_op_status pop_wait_for(
    +    value_type & va,
    +    std::chrono::duration< Rep, Period > const& timeout_duration)
     

    @@ -367,8 +370,9 @@

    template< typename Clock, typename Duration >
    -channel_op_status pop_wait_until( value_type & va,
    -                                  std::chrono::time_point< Clock, Duration > const& timeout_time)
    +channel_op_status pop_wait_until(
    +    value_type & va,
    +    std::chrono::time_point< Clock, Duration > const& timeout_time)
     

    @@ -424,27 +428,32 @@ channel_op_status push( value_type const& va); channel_op_status push( value_type && va); template< typename Rep, typename Period > - channel_op_status push_wait_for( value_type const& va, - std::chrono::duration< Rep, Period > const& timeout_duration); + channel_op_status push_wait_for( + value_type const& va, + std::chrono::duration< Rep, Period > const& timeout_duration); channel_op_status push_wait_for( value_type && va, - std::chrono::duration< Rep, Period > const& timeout_duration); + std::chrono::duration< Rep, Period > const& timeout_duration); template< typename Clock, typename Duration > - channel_op_status push_wait_until( value_type const& va, - std::chrono::time_point< Clock, Duration > const& timeout_time); + channel_op_status push_wait_until( + value_type const& va, + std::chrono::time_point< Clock, Duration > const& timeout_time); template< typename Clock, typename Duration > - channel_op_status push_wait_until( value_type && va, - std::chrono::time_point< Clock, Duration > const& timeout_time); + channel_op_status push_wait_until( + value_type && va, + std::chrono::time_point< Clock, Duration > const& timeout_time); channel_op_status try_push( value_type const& va); channel_op_status try_push( value_type && va); channel_op_status pop( value_type & va); value_type value_pop(); template< typename Rep, typename Period > - channel_op_status pop_wait_for( value_type & va, - std::chrono::duration< Rep, Period > const& timeout_duration); + channel_op_status pop_wait_for( + value_type & va, + std::chrono::duration< Rep, Period > const& timeout_duration); template< typename Clock, typename Duration > - channel_op_status pop_wait_until( value_type & va, - std::chrono::time_point< Clock, Duration > const& timeout_time); + channel_op_status pop_wait_until( + value_type & va, + std::chrono::time_point< Clock, Duration > const& timeout_time); channel_op_status try_pop( value_type & va); }; @@ -617,11 +626,13 @@

    template< typename Rep, typename Period >
    -channel_op_status push_wait_for( value_type const& va,
    -                                 std::chrono::duration< Rep, Period > const& timeout_duration);
    +channel_op_status push_wait_for(
    +    value_type const& va,
    +    std::chrono::duration< Rep, Period > const& timeout_duration);
     template< typename Rep, typename Period >
    -channel_op_status push_wait_for( value_type && va,
    -                                 std::chrono::duration< Rep, Period > const& timeout_duration);
    +channel_op_status push_wait_for(
    +    value_type && va,
    +    std::chrono::duration< Rep, Period > const& timeout_duration);
     

    @@ -656,11 +667,13 @@

    template< typename Clock, typename Duration >
    -channel_op_status push_wait_until( value_type const& va,
    -                                   std::chrono::time_point< Clock, Duration > const& timeout_time);
    +channel_op_status push_wait_until(
    +    value_type const& va,
    +    std::chrono::time_point< Clock, Duration > const& timeout_time);
     template< typename Clock, typename Duration >
    -channel_op_status push_wait_until( value_type && va,
    -                                   std::chrono::time_point< Clock, Duration > const& timeout_time);
    +channel_op_status push_wait_until(
    +    value_type && va,
    +    std::chrono::time_point< Clock, Duration > const& timeout_time);
     

    @@ -824,8 +837,9 @@

    template< typename Rep, typename Period >
    -channel_op_status pop_wait_for( value_type & va,
    -                                std::chrono::duration< Rep, Period > const& timeout_duration)
    +channel_op_status pop_wait_for(
    +    value_type & va,
    +    std::chrono::duration< Rep, Period > const& timeout_duration)
     

    @@ -862,8 +876,9 @@

    template< typename Clock, typename Duration >
    -channel_op_status pop_wait_until( value_type & va,
    -                                  std::chrono::time_point< Clock, Duration > const& timeout_time)
    +channel_op_status pop_wait_until(
    +    value_type & va,
    +    std::chrono::time_point< Clock, Duration > const& timeout_time)
     

    diff --git a/doc/html/fiber/synchronization/conditions.html b/doc/html/fiber/synchronization/conditions.html index ffa934de..229aa483 100644 --- a/doc/html/fiber/synchronization/conditions.html +++ b/doc/html/fiber/synchronization/conditions.html @@ -40,11 +40,12 @@

    The class condition_variable - provides a mechanism for a fiber to wait for notification on condition_variable. When the fiber awakens - from the wait, then it checks to see if the appropriate condition is now - true, and continues if so. If the condition is not true, then the fiber calls - wait again to resume waiting. - In the simplest case, this condition is just a boolean variable: + provides a mechanism for a fiber to wait for notification from another fiber. + When the fiber awakens from the wait, then it checks to see if the appropriate + condition is now true, and continues if so. If the condition is not true, + then the fiber calls wait + again to resume waiting. In the simplest case, this condition is just a boolean + variable:

    boost::fibers::condition_variable cond;
     boost::fibers::mutex mtx;
    @@ -123,7 +124,7 @@
     
    #include <boost/fiber/condition.hpp>
     
     enum cv_status {
    -    no_timeout = 1,
    +    no_timeout,
         timeout
     };
     
    @@ -500,8 +501,11 @@
     return true;
     

    - That is, even if wait_for() times out, it can still return true if pred() returns true - at that time. + (except of course that timeout_duration + is adjusted for each iteration). The point is that, even if wait_for() + times out, it can still return true + if pred() + returns true at that time.

    Postcondition:
    diff --git a/doc/html/fiber/synchronization/futures.html b/doc/html/fiber/synchronization/futures.html index 643d27dc..4c17601f 100644 --- a/doc/html/fiber/synchronization/futures.html +++ b/doc/html/fiber/synchronization/futures.html @@ -68,9 +68,10 @@ are of the associated asynchronous result, but not vice-versa.

    - async() - is a simple way of running asynchronous tasks. A call to async() spawns a fiber and returns a future<> that - will deliver the result of the fiber function. + fibers::async() is a simple way of running asynchronous tasks. + A call to async() + spawns a fiber and returns a future<> that will deliver + the result of the fiber function.

    diff --git a/doc/html/fiber/synchronization/futures/future.html b/doc/html/fiber/synchronization/futures/future.html index b8a99bae..2e4ee95b 100644 --- a/doc/html/fiber/synchronization/futures/future.html +++ b/doc/html/fiber/synchronization/futures/future.html @@ -94,35 +94,6 @@

    -
    - - deferred -
    -
    -

    -
    -
    Effects:
    -

    - The function is deferred, e.g. result will be computed only when - explictly requested. -

    -
    Note:
    -

    - Not implemented yet. -

    -
    -
    -
    - - - - - -
    [Warning]Warning

    - Launch policy deferred, - which indicates you simply want to defer the function call until a later - time (lazy evaluation), is not supported yet. -

    @@ -171,7 +142,7 @@ };
    - + Default constructor
    @@ -192,7 +163,7 @@
    - + Move constructor
    future( future && other) noexcept;
    @@ -203,7 +174,8 @@
     
    Effects:

    Constructs a future with the shared - state of other. After construction false == other.valid() + state of other. After construction false + == other.valid().

    Throws:

    @@ -212,7 +184,7 @@

    - + Destructor
    ~future();
    @@ -331,6 +303,16 @@
     

    Move the state to a shared_future<>.

    +
    Returns:
    +

    + a shared_future<> containing the shared + state formerly belonging to *this. +

    +
    Postcondition:
    +

    + false == + valid() +

    Throws:

    future_error with @@ -411,6 +393,11 @@ error condition future_errc::no_state or fiber_interrupted.

    +
    Note:
    +

    + get_exception_ptr() does not invalidate + the future. After calling get_exception_ptr(), you may still call future::get(). +

    @@ -522,7 +509,7 @@

    A shared_future<> contains a shared - state which might be shared with other futures. + state which might be shared with other shared_future<> instances.

    template< typename R >
     class shared_future {
    @@ -554,14 +541,16 @@
         void wait() const;
     
         template< class Rep, class Period >
    -    future_status wait_for( std::chrono::duration< Rep, Period > const& timeout_duration) const;
    +    future_status wait_for(
    +        std::chrono::duration< Rep, Period > const& timeout_duration) const;
     
         template< typename Clock, typename Duration >
    -    future_status wait_until( std::chrono::time_point< Clock, Duration > const& timeout_time) const;
    +    future_status wait_until(
    +        std::chrono::time_point< Clock, Duration > const& timeout_time) const;
     };
     
    - + Default constructor
    @@ -583,7 +572,7 @@
    - + Move constructor
    shared_future( future && other) noexcept;
    @@ -595,7 +584,8 @@
     
    Effects:

    Constructs a shared_future with the shared - state of other. After construction false == other.valid() + state of other. After construction false + == other.valid().

    Throws:

    @@ -604,7 +594,7 @@

    - + Copy constructor
    shared_future( shared_future const& other) noexcept;
    @@ -615,7 +605,7 @@
     
    Effects:

    Constructs a shared_future with the shared - state of other. After construction true == other.valid() + state of other. After construction other.valid() is unchanged.

    Throws:

    @@ -624,7 +614,7 @@

    - + Destructor
    ~shared_future();
    @@ -668,8 +658,7 @@
                     Moves or copies the shared state
                     of other to this. After
                     the assignment, the state of other.valid() depends on which overload was invoked:
    -                true for the overload
    -                accepting shared_future
    +                unchanged for the overload accepting shared_future
                     const&,
                     otherwise false.
                   

    @@ -781,6 +770,11 @@ error condition future_errc::no_state or fiber_interrupted.

    +
    Note:
    +

    + get_exception_ptr() does not invalidate + the shared_future. After calling get_exception_ptr(), you may still call shared_future::get(). +

    @@ -924,8 +918,8 @@

    Notes:

    - The overload accepting std::allocator_arg_t - uses the passed StackAllocator + The overload accepting std::allocator_arg_t uses the + passed StackAllocator when constructing the launched fiber.

    diff --git a/doc/html/fiber/synchronization/futures/packaged_task.html b/doc/html/fiber/synchronization/futures/packaged_task.html index 3ffbc9b9..291a2b23 100644 --- a/doc/html/fiber/synchronization/futures/packaged_task.html +++ b/doc/html/fiber/synchronization/futures/packaged_task.html @@ -142,6 +142,10 @@ encapsulates. The signature of Fn should have a return type convertible to R.

    +
    See also:
    +

    + std::allocator_arg_t +

    diff --git a/doc/html/fiber/synchronization/futures/promise.html b/doc/html/fiber/synchronization/futures/promise.html index c2dec327..c3958bb2 100644 --- a/doc/html/fiber/synchronization/futures/promise.html +++ b/doc/html/fiber/synchronization/futures/promise.html @@ -103,6 +103,10 @@

    Nothing.

    +
    See also:
    +

    + std::allocator_arg_t +

    diff --git a/doc/html/fiber/synchronization/mutex_types.html b/doc/html/fiber/synchronization/mutex_types.html index 213b682f..618a5f26 100644 --- a/doc/html/fiber/synchronization/mutex_types.html +++ b/doc/html/fiber/synchronization/mutex_types.html @@ -56,8 +56,8 @@ concurrent calls to lock(), try_lock() and unlock() shall be permitted.

    - Any fiber blocked in lock() is suspended in the scheduler until the - owning fiber releases the lock by calling unlock(). + Any fiber blocked in lock() is suspended until the owning fiber releases + the lock by calling unlock().

    diff --git a/doc/html/fiber/when_any/when_all_functionality/when_all__return_values.html b/doc/html/fiber/when_any/when_all_functionality/when_all__return_values.html index 6fbe91e2..a2b38776 100644 --- a/doc/html/fiber/when_any/when_all_functionality/when_all__return_values.html +++ b/doc/html/fiber/when_any/when_all_functionality/when_all__return_values.html @@ -40,7 +40,7 @@ available?

    - Fortunately we can present both APIs. Let's define wait_all_values_source() to return shared_ptr<unbounded_channel<T>>.[4] + Fortunately we can present both APIs. Let's define wait_all_values_source() to return shared_ptr<unbounded_channel<T>>.[5]

    Given wait_all_values_source(), it's straightforward to implement wait_all_values(): @@ -93,7 +93,7 @@

    We can address that problem with a counting - façade for the bounded_channel<>. In fact, our façade need only + façade for the unbounded_channel<>. In fact, our façade need only support the producer end of the channel.

    @@ -203,7 +203,7 @@



    -

    [4] +

    [5] We could have used either bounded_channel<> or unbounded_channel<>. We chose unbounded_channel<> on the assumption that its simpler semantics imply a cheaper implementation. diff --git a/doc/html/fiber/when_any/when_all_functionality/when_all_until_first_exception.html b/doc/html/fiber/when_any/when_all_functionality/when_all_until_first_exception.html index a595d2a3..e189b1ae 100644 --- a/doc/html/fiber/when_any/when_all_functionality/when_all_until_first_exception.html +++ b/doc/html/fiber/when_any/when_all_functionality/when_all_until_first_exception.html @@ -30,11 +30,12 @@

    Naturally, just as with wait_first_outcome(), we can elaborate wait_all_values() and wait_all_values_source() - by passing future<T> + by passing future< + T > instead of plain T.

    - wait_all_until_error() pops that future<T> and calls its future::get(): + wait_all_until_error() pops that future< T > and calls its future::get():

    @@ -85,7 +86,8 @@

    Naturally this complicates the - API for wait_all_until_error_source(). The caller must both retrieve a future<T> + API for wait_all_until_error_source(). The caller must both retrieve a future< + T > and call its get() method. It would, of course, be possible to return a façade over the consumer end of the channel that would implicitly perform the get() and return a simple T diff --git a/doc/html/fiber/when_any/when_any/when_any__produce_first_outcome__whether_result_or_exception.html b/doc/html/fiber/when_any/when_any/when_any__produce_first_outcome__whether_result_or_exception.html index fa3aa034..eefbdc65 100644 --- a/doc/html/fiber/when_any/when_any/when_any__produce_first_outcome__whether_result_or_exception.html +++ b/doc/html/fiber/when_any/when_any/when_any__produce_first_outcome__whether_result_or_exception.html @@ -37,7 +37,8 @@ Let's at least ensure that such an exception would propagate to the fiber awaiting the first result. We can use future<> to transport either a return value or an exception. Therefore, we will change wait_first_value()'s bounded_channel<> to - hold future<T> + hold future< + T > items instead of simply T.

    diff --git a/doc/html/fiber/when_any/when_any/when_any__produce_first_success.html b/doc/html/fiber/when_any/when_any/when_any__produce_first_success.html index 9d5193bb..49a44218 100644 --- a/doc/html/fiber/when_any/when_any/when_any__produce_first_success.html +++ b/doc/html/fiber/when_any/when_any/when_any__produce_first_success.html @@ -40,9 +40,9 @@ first!

    - Given the bounded_queue<future<T>> we already constructed for wait_first_outcome(), - though, we can readily recast the interface function to deliver the first - successful result. + Given the bounded_queue< future< T > > + we already constructed for wait_first_outcome(), though, we can readily recast the interface + function to deliver the first successful result.

    That does beg the question: what if all the task functions diff --git a/doc/html/index.html b/doc/html/index.html index d4c9d03f..a6782021 100644 --- a/doc/html/index.html +++ b/doc/html/index.html @@ -107,7 +107,7 @@

    - +

    Last revised: September 01, 2015 at 15:31:11 GMT

    Last revised: September 05, 2015 at 11:41:45 GMT


    diff --git a/performance/fiber/overhead_create.cpp b/performance/fiber/overhead_create.cpp index 3ade3c46..d4d4eea3 100644 --- a/performance/fiber/overhead_create.cpp +++ b/performance/fiber/overhead_create.cpp @@ -34,7 +34,7 @@ duration_type measure( duration_type overhead) { boost::fibers::fiber( worker).join(); - duration_type result; + duration_type result = duration_type::zero(); BOOST_PP_REPEAT_FROM_TO(1, JOBS, JOIN, _) diff --git a/performance/fiber/overhead_detach.cpp b/performance/fiber/overhead_detach.cpp index 2153a77a..18db3880 100644 --- a/performance/fiber/overhead_detach.cpp +++ b/performance/fiber/overhead_detach.cpp @@ -34,7 +34,7 @@ duration_type measure( duration_type overhead) { boost::fibers::fiber( worker).join(); - duration_type result; + duration_type result = duration_type::zero(); BOOST_PP_REPEAT_FROM_TO(1, JOBS, DETACH, _) diff --git a/performance/fiber/overhead_future.cpp b/performance/fiber/overhead_future.cpp index 8c73271c..6654dd49 100644 --- a/performance/fiber/overhead_future.cpp +++ b/performance/fiber/overhead_future.cpp @@ -36,7 +36,7 @@ duration_type measure( duration_type overhead) { boost::fibers::fiber( worker).join(); - duration_type result; + duration_type result = duration_type::zero(); BOOST_PP_REPEAT_FROM_TO(1, JOBS, WAIT, _) diff --git a/performance/fiber/overhead_join.cpp b/performance/fiber/overhead_join.cpp index c3adabda..d2a48d0e 100644 --- a/performance/fiber/overhead_join.cpp +++ b/performance/fiber/overhead_join.cpp @@ -34,7 +34,7 @@ duration_type measure( duration_type overhead) { boost::fibers::fiber( worker).join(); - duration_type result; + duration_type result = duration_type::zero(); BOOST_PP_REPEAT_FROM_TO(1, JOBS, JOIN, _) diff --git a/performance/fiber/overhead_yield.cpp b/performance/fiber/overhead_yield.cpp index a38304c3..7df914bc 100644 --- a/performance/fiber/overhead_yield.cpp +++ b/performance/fiber/overhead_yield.cpp @@ -32,10 +32,12 @@ void worker( duration_type * result) duration_type measure( duration_type overhead) { - duration_type result; + duration_type result = duration_type::zero(); boost::fibers::fiber( worker, & result).join(); + result = duration_type::zero(); + BOOST_PP_REPEAT_FROM_TO(1, JOBS, JOIN, _) result /= JOBS; // loops