2
0
mirror of https://github.com/boostorg/fiber.git synced 2026-02-12 12:02:54 +00:00

Nat Goodspeed:

- Change some doc references from 'algorithm' to 'sched_algorithm'.
- Initial cut at supporting arbitrary user-coded scheduler properties.
- Set fiber_properties::sched_algo_ every time through awakened().
- Define sched_algorithm methods on fiber_base*, not worker_fiber*.
- Simplify detail::fifo by making tail_ point to last link pointer.
- Reimplement waiting_queue::push() using pointer-to-pointer trick.
- Reimplement waiting_queue::move_to() using fiber_base** scan.
- Make bounded_queue::tail_ a ptr* to simplify appending new nodes.
- Make unbounded_queue::tail_ a ptr* to simplify linking new nodes.
- Remove thread_affinity flag and access methods.
- Re-add thread_affinity specific to workstealing_round_robin.
- Remove 'priority' for every fiber, and its support methods.
This commit is contained in:
Oliver Kowalke
2014-11-12 20:03:12 +01:00
parent 6a1257442b
commit 1e751d7dbf
22 changed files with 391 additions and 279 deletions

View File

@@ -12,47 +12,71 @@
# include BOOST_ABI_PREFIX
#endif
workstealing_round_robin::workstealing_round_robin():
rhead_(0),
rtail_(&rhead_)
{}
void
workstealing_round_robin::awakened( boost::fibers::detail::worker_fiber * f)
workstealing_round_robin::awakened( boost::fibers::fiber_base * f)
{
// forward this call to base-class method
boost::fibers::sched_algorithm_with_properties<affinity>::awakened(f);
boost::mutex::scoped_lock lk( mtx_);
rqueue_.push_back( f);
// append this fiber_base* to ready queue
BOOST_ASSERT(! f->nxt_);
*rtail_ = f;
rtail_ = &f->nxt_;
}
boost::fibers::detail::worker_fiber *
boost::fibers::fiber_base *
workstealing_round_robin::pick_next()
{
boost::mutex::scoped_lock lk( mtx_);
boost::fibers::detail::worker_fiber * f = 0;
if ( ! rqueue_.empty() )
boost::fibers::fiber_base * f = 0;
if ( rhead_ )
{
f = rqueue_.front();
rqueue_.pop_front();
f = rhead_;
// pop head item from ready queue
rhead_ = rhead_->nxt_;
f->nxt_ = 0;
// if that was the last item, reset tail_
if (! rhead_)
rtail_ = &rhead_;
}
return f;
}
void
workstealing_round_robin::priority( boost::fibers::detail::worker_fiber * f, int prio) BOOST_NOEXCEPT
{
BOOST_ASSERT( f);
// set only priority to fiber
// round-robin does not respect priorities
f->priority( prio);
}
boost::fibers::fiber
workstealing_round_robin::steal() BOOST_NOEXCEPT
{
boost::mutex::scoped_lock lk( mtx_);
boost::fibers::detail::worker_fiber * f = 0;
if ( ! rqueue_.empty() )
// Search the queue for the LAST fiber_base that's willing to migrate,
// in other words (! thread_affinity).
boost::fibers::fiber_base ** fp = &rhead_, ** found = 0;
for ( ; *fp; fp = &(*fp)->nxt_)
{
f = rqueue_.back();
rqueue_.pop_back();
// do not consider any fiber whose thread_affinity is set
if (! properties(*fp).thread_affinity)
found = fp;
}
return boost::fibers::fiber( f);
if (! found)
{
// either the queue is completely empty or all current entries have
// thread_affinity set
return boost::fibers::fiber(static_cast<boost::fibers::fiber_base*>(0));
}
// We found at least one fiber_base whose thread_affinity is NOT set;
// *found points to the last of these. Unlink and return it.
boost::fibers::fiber_base* ret = *found;
*found = ret->nxt_;
ret->nxt_ = 0;
// if that was the last item, reset tail_
if (! *found)
rtail_ = &rhead_;
return boost::fibers::fiber(ret);
}
#ifdef BOOST_HAS_ABI_HEADERS