mirror of
https://github.com/boostorg/fiber.git
synced 2026-02-11 11:42:23 +00:00
Merge pull request #64 from nat-goodspeed/develop
Finish proofreading pass.
This commit is contained in:
@@ -30,7 +30,7 @@ Consider the following scenario:
|
||||
# Fiber "main" launches fibers A, B, C and D, then calls `barrier::wait()`.
|
||||
# Fiber C finishes first and likewise calls `barrier::wait()`.
|
||||
# Fiber "main" is unblocked, as desired.
|
||||
# Fiber B calls `barrier::wait()`. Fiber B is ['blocked]!
|
||||
# Fiber B calls `barrier::wait()`. Fiber B is ['blocked!]
|
||||
# Fiber A calls `barrier::wait()`. Fibers A and B are unblocked.
|
||||
# Fiber D calls `barrier::wait()`. Fiber D is blocked indefinitely.
|
||||
|
||||
|
||||
@@ -55,9 +55,10 @@ All we have to do is:
|
||||
|
||||
[note This tactic for resuming a pending fiber works even if the callback is
|
||||
called on a different thread than the one on which the initiating fiber is
|
||||
running. In fact, the example program's dummy `AsyncAPI` implementation
|
||||
illustrates that: it simulates async I/O by launching a new thread that sleeps
|
||||
briefly and then calls the relevant callback.]
|
||||
running. In fact, [@../../examples/adapt_callbacks.cpp the example program's]
|
||||
dummy `AsyncAPI` implementation illustrates that: it simulates async I/O by
|
||||
launching a new thread that sleeps briefly and then calls the relevant
|
||||
callback.]
|
||||
|
||||
[heading Success or Exception]
|
||||
|
||||
@@ -83,6 +84,7 @@ identical to `write_ec()`. You can call it like this:
|
||||
|
||||
[callbacks_read_ec_call]
|
||||
|
||||
[#Data_or_Exception]
|
||||
[heading Data or Exception]
|
||||
|
||||
But a more natural API for a function that obtains data is to return only the
|
||||
@@ -195,14 +197,14 @@ appropriate type. (We store a `shared_ptr< promise< T > >` because the
|
||||
`promise_handler` instance is copied on its way into underlying Asio
|
||||
machinery.)
|
||||
|
||||
Asio, having consulted the `yield_handler` traits specialization, instantiates
|
||||
Asio, having consulted the `handler_type<>` traits specialization, instantiates
|
||||
a `yield_handler` (aka `promise_handler`) as the async operation's callback:
|
||||
|
||||
[fibers_asio_promise_handler]
|
||||
|
||||
Like the lambda callback in our `read(AsyncAPI&)` presented earlier,
|
||||
`promise_handler::operator()()` either calls [member_link promise..set_value]
|
||||
or [member_link promise..set_exception] (via
|
||||
Like the lambda callback in our [link Data_or_Exception `read(AsyncAPI&)`]
|
||||
presented earlier, `promise_handler::operator()()` either calls [member_link
|
||||
promise..set_value] or [member_link promise..set_exception] (via
|
||||
`promise_handler_base::should_set_value()`).
|
||||
|
||||
[/ @path link is relative to (eventual) doc/html/index.html, hence ../..]
|
||||
|
||||
@@ -92,11 +92,13 @@ channel operations return the state of the channel.
|
||||
value_type value_pop();
|
||||
channel_op_status try_pop( value_type & va);
|
||||
template< typename Rep, typename Period >
|
||||
channel_op_status pop_wait_for( value_type & va,
|
||||
std::chrono::duration< Rep, Period > const& timeout_duration);
|
||||
channel_op_status pop_wait_for(
|
||||
value_type & va,
|
||||
std::chrono::duration< Rep, Period > const& timeout_duration);
|
||||
template< typename Clock, typename Duration >
|
||||
channel_op_status pop_wait_until( value_type & va,
|
||||
std::chrono::time_point< Clock, Duration > const& timeout_time);
|
||||
channel_op_status pop_wait_until(
|
||||
value_type & va,
|
||||
std::chrono::time_point< Clock, Duration > const& timeout_time);
|
||||
};
|
||||
|
||||
[template xchannel_close[cls]
|
||||
@@ -185,8 +187,9 @@ value `success` and `va` contains dequeued value), or the channel gets
|
||||
[member_heading [cls]..pop_wait_for]
|
||||
|
||||
template< typename Rep, typename Period >
|
||||
channel_op_status pop_wait_for( value_type & va,
|
||||
std::chrono::duration< Rep, Period > const& timeout_duration)
|
||||
channel_op_status pop_wait_for(
|
||||
value_type & va,
|
||||
std::chrono::duration< Rep, Period > const& timeout_duration)
|
||||
|
||||
[variablelist
|
||||
[[Effects:] [Accepts `std::chrono::duration` and internally computes a timeout
|
||||
@@ -201,8 +204,9 @@ time as (system time + `timeout_duration`).
|
||||
[member_heading [cls]..pop_wait_until]
|
||||
|
||||
template< typename Clock, typename Duration >
|
||||
channel_op_status pop_wait_until( value_type & va,
|
||||
std::chrono::time_point< Clock, Duration > const& timeout_time)
|
||||
channel_op_status pop_wait_until(
|
||||
value_type & va,
|
||||
std::chrono::time_point< Clock, Duration > const& timeout_time)
|
||||
|
||||
[variablelist
|
||||
[[Effects:] [Accepts a `std::chrono::time_point< Clock, Duration >`.
|
||||
@@ -236,27 +240,32 @@ time as (system time + `timeout_duration`).
|
||||
channel_op_status push( value_type const& va);
|
||||
channel_op_status push( value_type && va);
|
||||
template< typename Rep, typename Period >
|
||||
channel_op_status push_wait_for( value_type const& va,
|
||||
std::chrono::duration< Rep, Period > const& timeout_duration);
|
||||
channel_op_status push_wait_for(
|
||||
value_type const& va,
|
||||
std::chrono::duration< Rep, Period > const& timeout_duration);
|
||||
channel_op_status push_wait_for( value_type && va,
|
||||
std::chrono::duration< Rep, Period > const& timeout_duration);
|
||||
std::chrono::duration< Rep, Period > const& timeout_duration);
|
||||
template< typename Clock, typename Duration >
|
||||
channel_op_status push_wait_until( value_type const& va,
|
||||
std::chrono::time_point< Clock, Duration > const& timeout_time);
|
||||
channel_op_status push_wait_until(
|
||||
value_type const& va,
|
||||
std::chrono::time_point< Clock, Duration > const& timeout_time);
|
||||
template< typename Clock, typename Duration >
|
||||
channel_op_status push_wait_until( value_type && va,
|
||||
std::chrono::time_point< Clock, Duration > const& timeout_time);
|
||||
channel_op_status push_wait_until(
|
||||
value_type && va,
|
||||
std::chrono::time_point< Clock, Duration > const& timeout_time);
|
||||
channel_op_status try_push( value_type const& va);
|
||||
channel_op_status try_push( value_type && va);
|
||||
|
||||
channel_op_status pop( value_type & va);
|
||||
value_type value_pop();
|
||||
template< typename Rep, typename Period >
|
||||
channel_op_status pop_wait_for( value_type & va,
|
||||
std::chrono::duration< Rep, Period > const& timeout_duration);
|
||||
channel_op_status pop_wait_for(
|
||||
value_type & va,
|
||||
std::chrono::duration< Rep, Period > const& timeout_duration);
|
||||
template< typename Clock, typename Duration >
|
||||
channel_op_status pop_wait_until( value_type & va,
|
||||
std::chrono::time_point< Clock, Duration > const& timeout_time);
|
||||
channel_op_status pop_wait_until(
|
||||
value_type & va,
|
||||
std::chrono::time_point< Clock, Duration > const& timeout_time);
|
||||
channel_op_status try_pop( value_type & va);
|
||||
};
|
||||
|
||||
@@ -318,11 +327,13 @@ the number of values in the channel drops to `lwm` (return value
|
||||
[member_heading bounded_channel..push_wait_for]
|
||||
|
||||
template< typename Rep, typename Period >
|
||||
channel_op_status push_wait_for( value_type const& va,
|
||||
std::chrono::duration< Rep, Period > const& timeout_duration);
|
||||
channel_op_status push_wait_for(
|
||||
value_type const& va,
|
||||
std::chrono::duration< Rep, Period > const& timeout_duration);
|
||||
template< typename Rep, typename Period >
|
||||
channel_op_status push_wait_for( value_type && va,
|
||||
std::chrono::duration< Rep, Period > const& timeout_duration);
|
||||
channel_op_status push_wait_for(
|
||||
value_type && va,
|
||||
std::chrono::duration< Rep, Period > const& timeout_duration);
|
||||
|
||||
[variablelist
|
||||
[[Effects:] [Accepts `std::chrono::duration` and internally computes a
|
||||
@@ -335,11 +346,13 @@ time_point (return value `timeout`).]]
|
||||
[member_heading bounded_channel..push_wait_until]
|
||||
|
||||
template< typename Clock, typename Duration >
|
||||
channel_op_status push_wait_until( value_type const& va,
|
||||
std::chrono::time_point< Clock, Duration > const& timeout_time);
|
||||
channel_op_status push_wait_until(
|
||||
value_type const& va,
|
||||
std::chrono::time_point< Clock, Duration > const& timeout_time);
|
||||
template< typename Clock, typename Duration >
|
||||
channel_op_status push_wait_until( value_type && va,
|
||||
std::chrono::time_point< Clock, Duration > const& timeout_time);
|
||||
channel_op_status push_wait_until(
|
||||
value_type && va,
|
||||
std::chrono::time_point< Clock, Duration > const& timeout_time);
|
||||
|
||||
[variablelist
|
||||
[[Effects:] [Accepts an absolute `timeout_time` in any supported time_point
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
class condition_variable_any;
|
||||
|
||||
The class `condition_variable` provides a mechanism for a fiber to wait for
|
||||
notification on `condition_variable`. When the fiber awakens from the wait, then
|
||||
notification from another fiber. When the fiber awakens from the wait, then
|
||||
it checks to see if the appropriate condition is now true, and continues if so.
|
||||
If the condition is not true, then the fiber calls `wait` again to resume
|
||||
waiting. In the simplest case, this condition is just a boolean variable:
|
||||
@@ -85,7 +85,7 @@ optimize as described for `boost::thread::condition_variable`.
|
||||
#include <boost/fiber/condition.hpp>
|
||||
|
||||
enum cv_status {
|
||||
no_timeout = 1,
|
||||
no_timeout,
|
||||
timeout
|
||||
};
|
||||
|
||||
@@ -301,8 +301,9 @@ while ( ! pred() ) {
|
||||
}
|
||||
return true;
|
||||
|
||||
`` That is, even if `wait_for()` times out, it can still return `true` if
|
||||
`pred()` returns `true` at that time.]]
|
||||
`` (except of course that `timeout_duration` is adjusted for each iteration).
|
||||
The point is that, even if `wait_for()` times out, it can still return `true`
|
||||
if `pred()` returns `true` at that time.]]
|
||||
[[Postcondition:] [`lk` is locked by the current fiber.]]
|
||||
[[Throws:] [__fiber_exception__ if an error
|
||||
occurs. __fiber_interrupted__ if the wait was interrupted by a call to
|
||||
|
||||
@@ -329,7 +329,7 @@ operators on __fiber_id__ yield a total order for every non-equal __fiber_id__.
|
||||
[[Note:] [StackAllocator is required to allocate a stack for the internal
|
||||
__econtext__. If StackAllocator is not explicitly passed, a
|
||||
__fixedsize_stack__ is used by default.]]
|
||||
[[See also:] [[link stack Stack allocation]]]
|
||||
[[See also:] [__allocator_arg_t__, [link stack Stack allocation]]]
|
||||
]
|
||||
|
||||
[heading Move constructor]
|
||||
|
||||
@@ -90,6 +90,8 @@
|
||||
[template dblink[id text] '''<link linkend="'''[id]'''">'''[text]'''</link>''']
|
||||
[template `[text] '''<code>'''[text]'''</code>''']
|
||||
|
||||
[def __allocator_arg_t__
|
||||
[@http://en.cppreference.com/w/cpp/memory/allocator_arg_t `std::allocator_arg_t`]]
|
||||
[def __barrier__ [class_link barrier]]
|
||||
[def __condition__ [class_link condition_variable]]
|
||||
[def __disable_interruption__ [class_link disable_interruption]]
|
||||
@@ -181,7 +183,7 @@ consumers would behave strangely.
|
||||
The fiber synchronization objects provided by this library will, by default,
|
||||
safely synchronize fibers running on different threads. However, this level of
|
||||
synchronization can be removed (for performance) by building the library with
|
||||
`BOOST_FIBERS_NO_ATOMICS` defined. When the library is built with that macro,
|
||||
[*`BOOST_FIBERS_NO_ATOMICS`] defined. When the library is built with that macro,
|
||||
you must ensure that all the fibers referencing a particular synchronization
|
||||
object are running in the same thread.
|
||||
|
||||
|
||||
10
doc/fls.qbk
10
doc/fls.qbk
@@ -58,7 +58,8 @@ order.
|
||||
explicit fiber_specific_ptr( void(*fn)(T*) );
|
||||
|
||||
[variablelist
|
||||
[[Requires:] [`delete this->get()` is well-formed; fn(this->get()) does not throw]]
|
||||
[[Requires:] [`delete this->get()` is well-formed; `fn(this->get())` does not
|
||||
throw]]
|
||||
[[Effects:] [Construct a __fsp__ object for storing a pointer to an object of
|
||||
type `T` specific to each fiber. When `reset()` is called, or the
|
||||
fiber exits, __fsp__ calls `fn(this->get())`. If the no-arguments constructor
|
||||
@@ -73,7 +74,7 @@ will be used to destroy the fiber-local objects.]]
|
||||
|
||||
[variablelist
|
||||
[[Requires:] [All the fiber specific instances associated to this __fsp__
|
||||
(except maybe the one associated to this fiber) must be null.]]
|
||||
(except maybe the one associated to this fiber) must be nullptr.]]
|
||||
[[Effects:] [Calls `this->reset()` to clean up the associated value for the
|
||||
current fiber, and destroys `*this`.]]
|
||||
[[Throws:] [Nothing.]]
|
||||
@@ -103,6 +104,7 @@ each fiber.]
|
||||
T* operator->() const;
|
||||
|
||||
[variablelist
|
||||
[[Requires:] [`this->get()` is not `nullptr`.]]
|
||||
[[Returns:] [`this->get()`]]
|
||||
[[Throws:] [Nothing.]]
|
||||
]
|
||||
@@ -124,13 +126,13 @@ each fiber.]
|
||||
[variablelist
|
||||
[[Effects:] [Return `this->get()` and store `nullptr` as the pointer associated
|
||||
with the current fiber without invoking the cleanup function.]]
|
||||
[[Postcondition:] [`this->get()==0`]]
|
||||
[[Postcondition:] [`this->get()==nullptr`]]
|
||||
[[Throws:] [Nothing.]]
|
||||
]
|
||||
|
||||
[member_heading fiber_specific_ptr..reset]
|
||||
|
||||
void reset(T* new_value=0);
|
||||
void reset(T* new_value);
|
||||
|
||||
[variablelist
|
||||
[[Effects:] [If `this->get()!=new_value` and `this->get()` is not `nullptr`,
|
||||
|
||||
@@ -48,14 +48,6 @@ Timed wait-operations (__wait_for__ and __wait_until__) return the state of the
|
||||
[[Effects:] [The [link shared_state shared state] did not become ready before timeout has passed.]]
|
||||
]
|
||||
|
||||
[heading `deferred`]
|
||||
[variablelist
|
||||
[[Effects:] [The function is deferred, e.g. result will be computed only when explictly requested.]]
|
||||
[[Note:] [Not implemented yet.]]
|
||||
]
|
||||
|
||||
[warning Launch policy `deferred`, which indicates you simply want to defer the function call until a later time (lazy evaluation), is not supported yet.]
|
||||
|
||||
|
||||
[template_heading future]
|
||||
|
||||
@@ -115,11 +107,11 @@ After construction `false == valid()`.]]
|
||||
[template future_move_copy_ctor_variablelist[xfuture post_valid]
|
||||
[variablelist
|
||||
[[Effects:] [Constructs a [xfuture] with the [link shared_state shared state] of other.
|
||||
After construction [^[post_valid] == other.valid()]]]
|
||||
After construction [post_valid].]]
|
||||
[[Throws:] [Nothing.]]
|
||||
]
|
||||
]
|
||||
[future_move_copy_ctor_variablelist future..false]
|
||||
[future_move_copy_ctor_variablelist future..`false == other.valid()`]
|
||||
|
||||
[heading Destructor]
|
||||
|
||||
@@ -173,6 +165,9 @@ After the assignment, `false == other.valid()`.]]
|
||||
|
||||
[variablelist
|
||||
[[Effects:] [Move the state to a __shared_future__.]]
|
||||
[[Returns:] [a __shared_future__ containing the [link shared_state shared
|
||||
state] formerly belonging to `*this`.]]
|
||||
[[Postcondition:] [`false == valid()`]]
|
||||
[[Throws:] [__future_error__ with error condition __no_state__.]]
|
||||
]
|
||||
|
||||
@@ -206,6 +201,9 @@ __fiber_interrupted__. Any exception passed to `promise::set_exception()`.]]
|
||||
default-constructed `std::exception_ptr`. If `set_exception()` is called,
|
||||
returns the passed `std::exception_ptr`.]]
|
||||
[[Throws:] [__future_error__ with error condition __no_state__ or __fiber_interrupted__.]]
|
||||
[[Note:] [ `get_exception_ptr()` does ['not] invalidate the [`[xfuture]].
|
||||
After calling `get_exception_ptr()`, you may still call [member_link
|
||||
[xfuture]..get].]]
|
||||
]
|
||||
]
|
||||
[future_get_exception_ptr future]
|
||||
@@ -247,7 +245,8 @@ returns the passed `std::exception_ptr`.]]
|
||||
|
||||
[template_heading shared_future]
|
||||
|
||||
A __shared_future__ contains a [link shared_state shared state] which might be shared with other futures.
|
||||
A __shared_future__ contains a [link shared_state shared state] which might be
|
||||
shared with other __shared_future__ instances.
|
||||
|
||||
template< typename R >
|
||||
class shared_future {
|
||||
@@ -279,10 +278,12 @@ A __shared_future__ contains a [link shared_state shared state] which might be s
|
||||
void wait() const;
|
||||
|
||||
template< class Rep, class Period >
|
||||
future_status wait_for( std::chrono::duration< Rep, Period > const& timeout_duration) const;
|
||||
future_status wait_for(
|
||||
std::chrono::duration< Rep, Period > const& timeout_duration) const;
|
||||
|
||||
template< typename Clock, typename Duration >
|
||||
future_status wait_until( std::chrono::time_point< Clock, Duration > const& timeout_time) const;
|
||||
future_status wait_until(
|
||||
std::chrono::time_point< Clock, Duration > const& timeout_time) const;
|
||||
};
|
||||
|
||||
[heading Default constructor]
|
||||
@@ -296,13 +297,13 @@ A __shared_future__ contains a [link shared_state shared state] which might be s
|
||||
shared_future( future && other) noexcept;
|
||||
shared_future( shared_future && other) noexcept;
|
||||
|
||||
[future_move_copy_ctor_variablelist shared_future..false]
|
||||
[future_move_copy_ctor_variablelist shared_future..`false == other.valid()`]
|
||||
|
||||
[heading Copy constructor]
|
||||
|
||||
shared_future( shared_future const& other) noexcept;
|
||||
|
||||
[future_move_copy_ctor_variablelist shared_future..true]
|
||||
[future_move_copy_ctor_variablelist shared_future..`other.valid()` is unchanged]
|
||||
|
||||
[heading Destructor]
|
||||
|
||||
@@ -319,7 +320,7 @@ A __shared_future__ contains a [link shared_state shared state] which might be s
|
||||
[variablelist
|
||||
[[Effects:] [Moves or copies the [link shared_state shared state] of other to
|
||||
`this`. After the assignment, the state of `other.valid()` depends on which
|
||||
overload was invoked: `true` for the overload accepting `shared_future
|
||||
overload was invoked: unchanged for the overload accepting `shared_future
|
||||
const&`, otherwise `false`.]]
|
||||
[[Throws:] [Nothing.]]
|
||||
]
|
||||
@@ -357,7 +358,7 @@ const&`, otherwise `false`.]]
|
||||
shared_state shared state] associated with the asynchronous execution of
|
||||
`fn`.]]
|
||||
[[Throws:] [__fiber_exception__ or __future_error__ if an error occurs.]]
|
||||
[[Notes:] [The overload accepting `std::allocator_arg_t` uses the passed
|
||||
[[Notes:] [The overload accepting __allocator_arg_t__ uses the passed
|
||||
`StackAllocator` when constructing the launched `fiber`.]]
|
||||
]
|
||||
|
||||
|
||||
@@ -32,9 +32,9 @@ are safe. You can move an instance of __future__ into an instance of
|
||||
__shared_future__, thus transferring ownership of the associated asynchronous
|
||||
result, but not vice-versa.
|
||||
|
||||
__async__ is a simple way of running asynchronous tasks. A call to __async__
|
||||
spawns a fiber and returns a __future__ that will deliver the result of the
|
||||
fiber function.
|
||||
[ns_function_link fibers..async] is a simple way of running asynchronous
|
||||
tasks. A call to __async__ spawns a fiber and returns a __future__ that will
|
||||
deliver the result of the fiber function.
|
||||
|
||||
|
||||
[heading Creating asynchronous values]
|
||||
|
||||
@@ -29,8 +29,8 @@ __mutex__ provides an exclusive-ownership mutex. At most one fiber can own the
|
||||
lock on a given instance of __mutex__ at any time. Multiple concurrent calls to
|
||||
__lock__, __try_lock__ and __unlock__ shall be permitted.
|
||||
|
||||
Any fiber blocked in __lock__ is suspended in the scheduler until the owning
|
||||
fiber releases the lock by calling __unlock__.
|
||||
Any fiber blocked in __lock__ is suspended until the owning fiber releases the
|
||||
lock by calling __unlock__.
|
||||
|
||||
[member_heading mutex..lock]
|
||||
|
||||
|
||||
@@ -69,21 +69,24 @@ fiber can count on thread-local storage; however that storage will be shared
|
||||
among all fibers running on the same thread.
|
||||
|
||||
[#cross_thread_sync]
|
||||
[heading BOOST_FIBERS_NO_ATOMICS]
|
||||
The fiber synchronization objects provided by this library will, by default,
|
||||
safely synchronize fibers running on different threads. However, this level of
|
||||
synchronization can be removed (for performance) by building the library with
|
||||
`BOOST_FIBERS_NO_ATOMICS` defined. When the library is built with that macro,
|
||||
[*`BOOST_FIBERS_NO_ATOMICS`] defined. When the library is built with that macro,
|
||||
you must ensure that all the fibers referencing a particular synchronization
|
||||
object are running in the same thread. Please see [link synchronization].
|
||||
object are running in the same thread. Please see [link synchronization
|
||||
Synchronization].
|
||||
|
||||
For fiber-local storage, please see __fsp__.
|
||||
|
||||
[#blocking]
|
||||
[heading Blocking]
|
||||
|
||||
Normally, when this documentation states that a particular fiber ['blocks], it
|
||||
means that it yields control, allowing other fibers on the same thread to run.
|
||||
The synchronization mechanisms provided by __boost_fiber__ have this behavior.
|
||||
Normally, when this documentation states that a particular fiber ['blocks] (or
|
||||
equivalently, ['suspends),] it means that it yields control, allowing other
|
||||
fibers on the same thread to run. The synchronization mechanisms provided by
|
||||
__boost_fiber__ have this behavior.
|
||||
|
||||
A fiber may, of course, use normal thread synchronization mechanisms; however
|
||||
a fiber that invokes any of these mechanisms will block its entire thread,
|
||||
|
||||
@@ -82,6 +82,7 @@ shared_state shared state].]]
|
||||
shared_state shared state] and stores the callable target `fn` internally.]]
|
||||
[[Throws:] [Nothing.]]
|
||||
[[Note:] [The signature of `Fn` should have a return type convertible to `R`.]]
|
||||
[[See also:] [__allocator_arg_t__]]
|
||||
]
|
||||
|
||||
[heading Move constructor]
|
||||
|
||||
@@ -58,6 +58,7 @@ later be retrieved from the corresponding __future__ object. `promise<>` and
|
||||
[variablelist
|
||||
[[Effects:] [Creates a promise with an empty [link shared_state shared state] by using `alloc`.]]
|
||||
[[Throws:] [Nothing.]]
|
||||
[[See also:] [__allocator_arg_t__]]
|
||||
]
|
||||
|
||||
[heading Move constructor]
|
||||
|
||||
@@ -370,7 +370,7 @@ producer fiber is independent. It has no idea whether it is the last one to
|
||||
|
||||
[#wait_nchannel]
|
||||
We can address that problem with a counting façade for the
|
||||
`bounded_channel<>`. In fact, our façade need only support the producer end of
|
||||
`unbounded_channel<>`. In fact, our façade need only support the producer end of
|
||||
the channel.
|
||||
|
||||
[wait_nchannel]
|
||||
|
||||
@@ -33,14 +33,14 @@ namespace detail {
|
||||
//[fibers_asio_promise_handler_base
|
||||
template< typename T >
|
||||
class promise_handler_base {
|
||||
private:
|
||||
typedef boost::shared_ptr< boost::fibers::promise< T > > promise_ptr;
|
||||
|
||||
public:
|
||||
typedef std::shared_ptr< boost::fibers::promise< T > > promise_ptr;
|
||||
|
||||
// Construct from any promise_completion_token subclass special value.
|
||||
template< typename Allocator >
|
||||
promise_handler_base( boost::fibers::asio::promise_completion_token< Allocator > const& pct) :
|
||||
promise_( new boost::fibers::promise< T >( std::allocator_arg, pct.get_allocator() ) )
|
||||
promise_( std::make_shared< boost::fibers::promise< T > >(
|
||||
std::allocator_arg, pct.get_allocator() ) )
|
||||
//<-
|
||||
, ecp_( pct.ec_)
|
||||
//->
|
||||
@@ -112,6 +112,7 @@ public:
|
||||
}
|
||||
}
|
||||
//<-
|
||||
using promise_handler_base< T >::promise_ptr;
|
||||
using promise_handler_base< T >::get_promise;
|
||||
//->
|
||||
};
|
||||
@@ -140,6 +141,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
using promise_handler_base< void >::promise_ptr;
|
||||
using promise_handler_base< void >::get_promise;
|
||||
};
|
||||
|
||||
@@ -152,7 +154,8 @@ namespace detail {
|
||||
// from the handler are propagated back to the caller via the future.
|
||||
template< typename Function, typename T >
|
||||
void asio_handler_invoke( Function f, fibers::asio::detail::promise_handler< T > * h) {
|
||||
boost::shared_ptr< boost::fibers::promise< T > > p( h->get_promise() );
|
||||
typename fibers::asio::detail::promise_handler< T >::promise_ptr
|
||||
p( h->get_promise() );
|
||||
try {
|
||||
f();
|
||||
} catch (...) {
|
||||
|
||||
@@ -65,13 +65,15 @@ private:
|
||||
|
||||
// Handler type specialisation for yield for a nullary callback.
|
||||
template< typename Allocator, typename ReturnType >
|
||||
struct handler_type< boost::fibers::asio::yield_t< Allocator >, ReturnType() > {
|
||||
struct handler_type< boost::fibers::asio::yield_t< Allocator >,
|
||||
ReturnType() > {
|
||||
typedef boost::fibers::asio::detail::yield_handler< void > type;
|
||||
};
|
||||
|
||||
// Handler type specialisation for yield for a single-argument callback.
|
||||
template< typename Allocator, typename ReturnType, typename Arg1 >
|
||||
struct handler_type< boost::fibers::asio::yield_t< Allocator >, ReturnType( Arg1) > {
|
||||
struct handler_type< boost::fibers::asio::yield_t< Allocator >,
|
||||
ReturnType( Arg1) > {
|
||||
typedef fibers::asio::detail::yield_handler< Arg1 > type;
|
||||
};
|
||||
|
||||
@@ -80,7 +82,8 @@ struct handler_type< boost::fibers::asio::yield_t< Allocator >, ReturnType( Arg1
|
||||
// error_code indicating error will be conveyed to consumer code via an
|
||||
// exception. Normal return implies (! error_code).
|
||||
template< typename Allocator, typename ReturnType >
|
||||
struct handler_type< boost::fibers::asio::yield_t< Allocator >, ReturnType( boost::system::error_code) > {
|
||||
struct handler_type< boost::fibers::asio::yield_t< Allocator >,
|
||||
ReturnType( boost::system::error_code) > {
|
||||
typedef fibers::asio::detail::yield_handler< void > type;
|
||||
};
|
||||
|
||||
@@ -91,7 +94,8 @@ struct handler_type< boost::fibers::asio::yield_t< Allocator >, ReturnType( boos
|
||||
// error_code).
|
||||
//[asio_handler_type
|
||||
template< typename Allocator, typename ReturnType, typename Arg2 >
|
||||
struct handler_type< boost::fibers::asio::yield_t< Allocator >, ReturnType( boost::system::error_code, Arg2) > {
|
||||
struct handler_type< boost::fibers::asio::yield_t< Allocator >,
|
||||
ReturnType( boost::system::error_code, Arg2) > {
|
||||
typedef fibers::asio::detail::yield_handler< Arg2 > type;
|
||||
};
|
||||
//]
|
||||
|
||||
@@ -90,9 +90,9 @@ public:
|
||||
// With this scheduler, fibers with higher priority values are
|
||||
// preferred over fibers with lower priority values. But fibers with
|
||||
// equal priority values are processed in round-robin fashion. So when
|
||||
// we're handed a new fiber_base, put it at the end of the fibers with
|
||||
// that same priority. In other words: search for the first fiber in
|
||||
// the queue with LOWER priority, and insert before that one.
|
||||
// we're handed a new fiber_context*, put it at the end of the fibers
|
||||
// with that same priority. In other words: search for the first fiber
|
||||
// in the queue with LOWER priority, and insert before that one.
|
||||
boost::fibers::fiber_context ** fp = & head_;
|
||||
for ( ; * fp; fp = & ( * fp)->nxt) {
|
||||
if ( properties( * fp).get_priority() < f_priority) {
|
||||
|
||||
Reference in New Issue
Block a user