2
0
mirror of https://github.com/boostorg/asio.git synced 2026-01-28 06:42:08 +00:00

Merge from trunk...

Fix compile error in regex overload of async_read_until.hpp. Fixes #5688

Explicitly specify the signal() function from the global namespace. Fixes #5722

Don't read the clock unless the heap is non-empty.

Change the SSL buffers sizes so that they're large enough to hold a complete TLS record. Fixes #5854

Make sure the synchronous null_buffers operations obey the user's non_blocking setting. Fixes #5756

Set size of select fd_set at runtime when using Windows.

Disable warning due to const qualifier being applied to function type.

Fix crash due to gcc_x86_fenced_block that shows up when using the Intel C++ compiler. Fixes #5763

Specialise operations for buffer sequences that are arrays of exactly two buffers.

Initialise all OpenSSL algorithms.

Fix error mapping when session is gracefully shut down.

Various performance improvements:

* Split the task_io_service's run and poll code.

* Use thread-local operation queues in single-threaded use cases (i.e. concurrency_hint is 1) to eliminate a lock/unlock pair.

* Only fence block exit when a handler is being run directly out of the io_service.

* Prefer x86 mfence-based fenced block when available.

* Use a plain ol' long for the atomic_count when all thread support is disabled.

* Allow some epoll_reactor speculative operations to be performed without holding the lock.

* Improve locality of reference by performing an epoll_reactor's I/O operation immediately before the corresponding handler is called. This also improves scalability across CPUs when multiple threads are running the io_service.

* Pass same error_code variable through to each operation's complete() function.

* Optimise creation of and access to the io_service implementation.

Remove unused state in HTTP server examples.

Add latency test programs.


[SVN r74863]
This commit is contained in:
Christopher Kohlhoff
2011-10-09 21:59:57 +00:00
parent 18a48222ed
commit 77dec8e703
105 changed files with 7800 additions and 504 deletions

View File

@@ -63,6 +63,9 @@
<member><link linkend="boost_asio.reference.async_write">async_write</link></member>
<member><link linkend="boost_asio.reference.async_write_at">async_write_at</link></member>
<member><link linkend="boost_asio.reference.buffer">buffer</link></member>
<member><link linkend="boost_asio.reference.buffer_cast">buffer_cast</link></member>
<member><link linkend="boost_asio.reference.buffer_copy">buffer_copy</link></member>
<member><link linkend="boost_asio.reference.buffer_size">buffer_size</link></member>
<member><link linkend="boost_asio.reference.buffers_begin">buffers_begin</link></member>
<member><link linkend="boost_asio.reference.buffers_end">buffers_end</link></member>
<member><link linkend="boost_asio.reference.has_service">has_service</link></member>
@@ -71,6 +74,7 @@
<member><link linkend="boost_asio.reference.read_until">read_until</link></member>
<member><link linkend="boost_asio.reference.transfer_all">transfer_all</link></member>
<member><link linkend="boost_asio.reference.transfer_at_least">transfer_at_least</link></member>
<member><link linkend="boost_asio.reference.transfer_exactly">transfer_exactly</link></member>
<member><link linkend="boost_asio.reference.use_service">use_service</link></member>
<member><link linkend="boost_asio.reference.write">write</link></member>
<member><link linkend="boost_asio.reference.write_at">write_at</link></member>
@@ -82,6 +86,7 @@
<member><link linkend="boost_asio.reference.placeholders__bytes_transferred">placeholders::bytes_transferred</link></member>
<member><link linkend="boost_asio.reference.placeholders__error">placeholders::error</link></member>
<member><link linkend="boost_asio.reference.placeholders__iterator">placeholders::iterator</link></member>
<member><link linkend="boost_asio.reference.placeholders__signal_number">placeholders::signal_number</link></member>
</simplelist>
<bridgehead renderas="sect3">Error Codes</bridgehead>
<simplelist type="vert" columns="1">
@@ -163,6 +168,8 @@
</simplelist>
<bridgehead renderas="sect3">Free Functions</bridgehead>
<simplelist type="vert" columns="1">
<member><link linkend="boost_asio.reference.async_connect">async_connect</link></member>
<member><link linkend="boost_asio.reference.connect">connect</link></member>
<member><link linkend="boost_asio.reference.ip__host_name">ip::host_name</link></member>
</simplelist>
</entry>
@@ -171,8 +178,9 @@
<simplelist type="vert" columns="1">
<member><link linkend="boost_asio.reference.basic_datagram_socket">basic_datagram_socket</link></member>
<member><link linkend="boost_asio.reference.basic_deadline_timer">basic_deadline_timer</link></member>
<member><link linkend="boost_asio.reference.basic_socket">basic_socket</link></member>
<member><link linkend="boost_asio.reference.basic_raw_socket">basic_raw_socket</link></member>
<member><link linkend="boost_asio.reference.basic_seq_packet_socket">basic_seq_packet_socket</link></member>
<member><link linkend="boost_asio.reference.basic_socket">basic_socket</link></member>
<member><link linkend="boost_asio.reference.basic_socket_acceptor">basic_socket_acceptor</link></member>
<member><link linkend="boost_asio.reference.basic_socket_iostream">basic_socket_iostream</link></member>
<member><link linkend="boost_asio.reference.basic_socket_streambuf">basic_socket_streambuf</link></member>
@@ -188,6 +196,7 @@
<member><link linkend="boost_asio.reference.datagram_socket_service">datagram_socket_service</link></member>
<member><link linkend="boost_asio.reference.ip__resolver_service">ip::resolver_service</link></member>
<member><link linkend="boost_asio.reference.raw_socket_service">raw_socket_service</link></member>
<member><link linkend="boost_asio.reference.seq_packet_socket_service">seq_packet_socket_service</link></member>
<member><link linkend="boost_asio.reference.socket_acceptor_service">socket_acceptor_service</link></member>
<member><link linkend="boost_asio.reference.stream_socket_service">stream_socket_service</link></member>
</simplelist>
@@ -225,6 +234,7 @@
<bridgehead renderas="sect3">Type Requirements</bridgehead>
<simplelist type="vert" columns="1">
<member><link linkend="boost_asio.reference.AcceptHandler">AcceptHandler</link></member>
<member><link linkend="boost_asio.reference.ComposedConnectHandler">ComposedConnectHandler</link></member>
<member><link linkend="boost_asio.reference.ConnectHandler">ConnectHandler</link></member>
<member><link linkend="boost_asio.reference.DatagramSocketService">DatagramSocketService</link></member>
<member><link linkend="boost_asio.reference.Endpoint">Endpoint</link></member>
@@ -235,6 +245,7 @@
<member><link linkend="boost_asio.reference.RawSocketService">RawSocketService</link></member>
<member><link linkend="boost_asio.reference.ResolveHandler">ResolveHandler</link></member>
<member><link linkend="boost_asio.reference.ResolverService">ResolverService</link></member>
<member><link linkend="boost_asio.reference.SeqPacketSocketService">SeqPacketSocketService</link></member>
<member><link linkend="boost_asio.reference.SettableSocketOption">SettableSocketOption</link></member>
<member><link linkend="boost_asio.reference.SocketAcceptorService">SocketAcceptorService</link></member>
<member><link linkend="boost_asio.reference.SocketService">SocketService</link></member>
@@ -257,9 +268,12 @@
<entry valign="center" namest="b" nameend="b">
<bridgehead renderas="sect2">SSL</bridgehead>
</entry>
<entry valign="center" namest="c" nameend="d">
<entry valign="center" namest="c" nameend="c">
<bridgehead renderas="sect2">Serial Ports</bridgehead>
</entry>
<entry valign="center" namest="d" nameend="d">
<bridgehead renderas="sect2">Signal Handling</bridgehead>
</entry>
</row>
</thead>
<tbody>
@@ -290,17 +304,18 @@
<simplelist type="vert" columns="1">
<member><link linkend="boost_asio.reference.ssl__context">ssl::context</link></member>
<member><link linkend="boost_asio.reference.ssl__context_base">ssl::context_base</link></member>
<member><link linkend="boost_asio.reference.ssl__rfc2818_verification">ssl::rfc2818_verification</link></member>
<member><link linkend="boost_asio.reference.ssl__stream_base">ssl::stream_base</link></member>
<member><link linkend="boost_asio.reference.ssl__verify_context">ssl::verify_context</link></member>
</simplelist>
<bridgehead renderas="sect3">Class Templates</bridgehead>
<simplelist type="vert" columns="1">
<member><link linkend="boost_asio.reference.ssl__basic_context">ssl::basic_context</link></member>
<member><link linkend="boost_asio.reference.ssl__stream">ssl::stream</link></member>
</simplelist>
<bridgehead renderas="sect3">Services</bridgehead>
<bridgehead renderas="sect3">Type Requirements</bridgehead>
<simplelist type="vert" columns="1">
<member><link linkend="boost_asio.reference.ssl__context_service">ssl::context_service</link></member>
<member><link linkend="boost_asio.reference.ssl__stream_service">ssl::stream_service</link></member>
<member><link linkend="boost_asio.reference.HandshakeHandler">HandshakeHandler</link></member>
<member><link linkend="boost_asio.reference.ShutdownHandler">ShutdownHandler</link></member>
</simplelist>
</entry>
<entry valign="top">
@@ -317,8 +332,6 @@
<simplelist type="vert" columns="1">
<member><link linkend="boost_asio.reference.serial_port_service">serial_port_service</link></member>
</simplelist>
</entry>
<entry valign="top">
<bridgehead renderas="sect3">Serial Port Options</bridgehead>
<simplelist type="vert" columns="1">
<member><link linkend="boost_asio.reference.serial_port_base__baud_rate">serial_port_base::baud_rate</link></member>
@@ -334,6 +347,25 @@
<member><link linkend="boost_asio.reference.SettableSerialPortOption">SettableSerialPortOption</link></member>
</simplelist>
</entry>
<entry valign="top">
<bridgehead renderas="sect3">Classes</bridgehead>
<simplelist type="vert" columns="1">
<member><link linkend="boost_asio.reference.signal_set">signal_set</link></member>
</simplelist>
<bridgehead renderas="sect3">Class Templates</bridgehead>
<simplelist type="vert" columns="1">
<member><link linkend="boost_asio.reference.basic_signal_set">basic_signal_set</link></member>
</simplelist>
<bridgehead renderas="sect3">Services</bridgehead>
<simplelist type="vert" columns="1">
<member><link linkend="boost_asio.reference.signal_set_service">signal_set_service</link></member>
</simplelist>
<bridgehead renderas="sect3">Type Requirements</bridgehead>
<simplelist type="vert" columns="1">
<member><link linkend="boost_asio.reference.SignalSetService">SignalSetService</link></member>
<member><link linkend="boost_asio.reference.SignalHandler">SignalHandler</link></member>
</simplelist>
</entry>
</row>
</tbody>
</tgroup>

View File

@@ -54,17 +54,6 @@ boost::tribool request_parser::consume(request& req, char input)
req.method.push_back(input);
return boost::indeterminate;
}
case uri_start:
if (is_ctl(input))
{
return false;
}
else
{
state_ = uri;
req.uri.push_back(input);
return boost::indeterminate;
}
case uri:
if (input == ' ')
{

View File

@@ -68,7 +68,6 @@ private:
{
method_start,
method,
uri_start,
uri,
http_version_h,
http_version_t_1,

View File

@@ -54,17 +54,6 @@ boost::tribool request_parser::consume(request& req, char input)
req.method.push_back(input);
return boost::indeterminate;
}
case uri_start:
if (is_ctl(input))
{
return false;
}
else
{
state_ = uri;
req.uri.push_back(input);
return boost::indeterminate;
}
case uri:
if (input == ' ')
{

View File

@@ -68,7 +68,6 @@ private:
{
method_start,
method,
uri_start,
uri,
http_version_h,
http_version_t_1,

View File

@@ -54,17 +54,6 @@ boost::tribool request_parser::consume(request& req, char input)
req.method.push_back(input);
return boost::indeterminate;
}
case uri_start:
if (is_ctl(input))
{
return false;
}
else
{
state_ = uri;
req.uri.push_back(input);
return boost::indeterminate;
}
case uri:
if (input == ' ')
{

View File

@@ -68,7 +68,6 @@ private:
{
method_start,
method,
uri_start,
uri,
http_version_h,
http_version_t_1,

View File

@@ -17,7 +17,9 @@
#include <boost/asio/detail/config.hpp>
#if defined(BOOST_ASIO_HAS_STD_ATOMIC)
#if !defined(BOOST_HAS_THREADS) || defined(BOOST_ASIO_DISABLE_THREADS)
// Nothing to include.
#elif defined(BOOST_ASIO_HAS_STD_ATOMIC)
# include <atomic>
#else // defined(BOOST_ASIO_HAS_STD_ATOMIC)
# include <boost/detail/atomic_count.hpp>
@@ -27,7 +29,9 @@ namespace boost {
namespace asio {
namespace detail {
#if defined(BOOST_ASIO_HAS_STD_ATOMIC)
#if !defined(BOOST_HAS_THREADS) || defined(BOOST_ASIO_DISABLE_THREADS)
typedef long atomic_count;
#elif defined(BOOST_ASIO_HAS_STD_ATOMIC)
typedef std::atomic<long> atomic_count;
#else // defined(BOOST_ASIO_HAS_STD_ATOMIC)
typedef boost::detail::atomic_count atomic_count;

View File

@@ -17,6 +17,7 @@
#include <boost/asio/detail/config.hpp>
#include <boost/asio/buffer.hpp>
#include <boost/asio/detail/array_fwd.hpp>
#include <boost/asio/detail/socket_types.hpp>
#include <boost/asio/detail/push_options.hpp>
@@ -247,6 +248,112 @@ private:
std::size_t total_buffer_size_;
};
template <typename Buffer, typename Elem>
class buffer_sequence_adapter<Buffer, boost::array<Elem, 2> >
: buffer_sequence_adapter_base
{
public:
explicit buffer_sequence_adapter(
const boost::array<Elem, 2>& buffer_sequence)
{
init_native_buffer(buffers_[0], Buffer(buffer_sequence[0]));
init_native_buffer(buffers_[1], Buffer(buffer_sequence[1]));
total_buffer_size_ = boost::asio::buffer_size(buffer_sequence[0])
+ boost::asio::buffer_size(buffer_sequence[1]);
}
native_buffer_type* buffers()
{
return buffers_;
}
std::size_t count() const
{
return 2;
}
bool all_empty() const
{
return total_buffer_size_ == 0;
}
static bool all_empty(const boost::array<Elem, 2>& buffer_sequence)
{
return boost::asio::buffer_size(buffer_sequence[0]) == 0
&& boost::asio::buffer_size(buffer_sequence[1]) == 0;
}
static void validate(const boost::array<Elem, 2>& buffer_sequence)
{
boost::asio::buffer_cast<const void*>(buffer_sequence[0]);
boost::asio::buffer_cast<const void*>(buffer_sequence[1]);
}
static Buffer first(const boost::array<Elem, 2>& buffer_sequence)
{
return Buffer(buffer_sequence[0]);
}
private:
native_buffer_type buffers_[2];
std::size_t total_buffer_size_;
};
#if defined(BOOST_ASIO_HAS_STD_ARRAY)
template <typename Buffer, typename Elem>
class buffer_sequence_adapter<Buffer, std::array<Elem, 2> >
: buffer_sequence_adapter_base
{
public:
explicit buffer_sequence_adapter(
const std::array<Elem, 2>& buffer_sequence)
{
init_native_buffer(buffers_[0], Buffer(buffer_sequence[0]));
init_native_buffer(buffers_[1], Buffer(buffer_sequence[1]));
total_buffer_size_ = boost::asio::buffer_size(buffer_sequence[0])
+ boost::asio::buffer_size(buffer_sequence[1]);
}
native_buffer_type* buffers()
{
return buffers_;
}
std::size_t count() const
{
return 2;
}
bool all_empty() const
{
return total_buffer_size_ == 0;
}
static bool all_empty(const std::array<Elem, 2>& buffer_sequence)
{
return boost::asio::buffer_size(buffer_sequence[0]) == 0
&& boost::asio::buffer_size(buffer_sequence[1]) == 0;
}
static void validate(const std::array<Elem, 2>& buffer_sequence)
{
boost::asio::buffer_cast<const void*>(buffer_sequence[0]);
boost::asio::buffer_cast<const void*>(buffer_sequence[1]);
}
static Buffer first(const std::array<Elem, 2>& buffer_sequence)
{
return Buffer(buffer_sequence[0]);
}
private:
native_buffer_type buffers_[2];
std::size_t total_buffer_size_;
};
#endif // defined(BOOST_ASIO_HAS_STD_ARRAY)
} // namespace detail
} // namespace asio
} // namespace boost

View File

@@ -27,34 +27,60 @@ namespace detail {
// Helper class to determine whether or not the current thread is inside an
// invocation of io_service::run() for a specified io_service object.
template <typename Owner>
template <typename Key, typename Value = unsigned char>
class call_stack
{
public:
// Context class automatically pushes an owner on to the stack.
// Context class automatically pushes the key/value pair on to the stack.
class context
: private noncopyable
{
public:
// Push the owner on to the stack.
explicit context(Owner* d)
: owner_(d),
next_(call_stack<Owner>::top_)
// Push the key on to the stack.
explicit context(Key* k)
: key_(k),
next_(call_stack<Key, Value>::top_)
{
call_stack<Owner>::top_ = this;
value_ = reinterpret_cast<unsigned char*>(this);
call_stack<Key, Value>::top_ = this;
}
// Pop the owner from the stack.
// Push the key/value pair on to the stack.
context(Key* k, Value& v)
: key_(k),
value_(&v),
next_(call_stack<Key, Value>::top_)
{
call_stack<Key, Value>::top_ = this;
}
// Pop the key/value pair from the stack.
~context()
{
call_stack<Owner>::top_ = next_;
call_stack<Key, Value>::top_ = next_;
}
// Find the next context with the same key.
Value* next_by_key() const
{
context* elem = next_;
while (elem)
{
if (elem->key_ == key_)
return elem->value_;
elem = elem->next_;
}
return 0;
}
private:
friend class call_stack<Owner>;
friend class call_stack<Key, Value>;
// The owner associated with the context.
Owner* owner_;
// The key associated with the context.
Key* key_;
// The value associated with the context.
Value* value_;
// The next element in the stack.
context* next_;
@@ -62,17 +88,18 @@ public:
friend class context;
// Determine whether the specified owner is on the stack.
static bool contains(Owner* d)
// Determine whether the specified owner is on the stack. Returns address of
// key if present, 0 otherwise.
static Value* contains(Key* k)
{
context* elem = top_;
while (elem)
{
if (elem->owner_ == d)
return true;
if (elem->key_ == k)
return elem->value_;
elem = elem->next_;
}
return false;
return 0;
}
private:
@@ -80,9 +107,9 @@ private:
static tss_ptr<context> top_;
};
template <typename Owner>
tss_ptr<typename call_stack<Owner>::context>
call_stack<Owner>::top_;
template <typename Key, typename Value>
tss_ptr<typename call_stack<Key, Value>::context>
call_stack<Key, Value>::top_;
} // namespace detail
} // namespace asio

View File

@@ -40,7 +40,8 @@ public:
}
static void do_complete(io_service_impl* owner, operation* base,
boost::system::error_code /*ec*/, std::size_t /*bytes_transferred*/)
const boost::system::error_code& /*ec*/,
std::size_t /*bytes_transferred*/)
{
// Take ownership of the handler object.
completion_handler* h(static_cast<completion_handler*>(base));
@@ -61,7 +62,7 @@ public:
// Make the upcall if required.
if (owner)
{
boost::asio::detail::fenced_block b;
fenced_block b(fenced_block::half);
BOOST_ASIO_HANDLER_INVOCATION_BEGIN(());
boost_asio_handler_invoke_helpers::invoke(handler, handler);
BOOST_ASIO_HANDLER_INVOCATION_END;

View File

@@ -0,0 +1,38 @@
//
// detail/dependent_type.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2011 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef BOOST_ASIO_DETAIL_DEPENDENT_TYPE_HPP
#define BOOST_ASIO_DETAIL_DEPENDENT_TYPE_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include <boost/asio/detail/config.hpp>
#include <boost/asio/detail/push_options.hpp>
namespace boost {
namespace asio {
namespace detail {
template <typename DependsOn, typename T>
struct dependent_type
{
typedef T type;
};
} // namespace detail
} // namespace asio
} // namespace boost
#include <boost/asio/detail/pop_options.hpp>
#endif // BOOST_ASIO_DETAIL_DEPENDENT_TYPE_HPP

View File

@@ -93,9 +93,11 @@ BOOST_ASIO_DECL int fcntl(int d, long cmd, boost::system::error_code& ec);
BOOST_ASIO_DECL int fcntl(int d, long cmd,
long arg, boost::system::error_code& ec);
BOOST_ASIO_DECL int poll_read(int d, boost::system::error_code& ec);
BOOST_ASIO_DECL int poll_read(int d,
state_type state, boost::system::error_code& ec);
BOOST_ASIO_DECL int poll_write(int d, boost::system::error_code& ec);
BOOST_ASIO_DECL int poll_write(int d,
state_type state, boost::system::error_code& ec);
} // namespace descriptor_ops
} // namespace detail

View File

@@ -76,7 +76,8 @@ public:
}
static void do_complete(io_service_impl* owner, operation* base,
boost::system::error_code /*ec*/, std::size_t /*bytes_transferred*/)
const boost::system::error_code& /*ec*/,
std::size_t /*bytes_transferred*/)
{
// Take ownership of the handler object.
descriptor_read_op* o(static_cast<descriptor_read_op*>(base));
@@ -98,7 +99,7 @@ public:
// Make the upcall if required.
if (owner)
{
boost::asio::detail::fenced_block b;
fenced_block b(fenced_block::half);
BOOST_ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));
boost_asio_handler_invoke_helpers::invoke(handler, handler.handler_);
BOOST_ASIO_HANDLER_INVOCATION_END;

View File

@@ -76,7 +76,8 @@ public:
}
static void do_complete(io_service_impl* owner, operation* base,
boost::system::error_code /*ec*/, std::size_t /*bytes_transferred*/)
const boost::system::error_code& /*ec*/,
std::size_t /*bytes_transferred*/)
{
// Take ownership of the handler object.
descriptor_write_op* o(static_cast<descriptor_write_op*>(base));
@@ -98,7 +99,7 @@ public:
// Make the upcall if required.
if (owner)
{
boost::asio::detail::fenced_block b;
fenced_block b(fenced_block::half);
BOOST_ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));
boost_asio_handler_invoke_helpers::invoke(handler, handler.handler_);
BOOST_ASIO_HANDLER_INVOCATION_END;

View File

@@ -21,6 +21,7 @@
#include <boost/limits.hpp>
#include <boost/asio/io_service.hpp>
#include <boost/asio/detail/atomic_count.hpp>
#include <boost/asio/detail/epoll_reactor_fwd.hpp>
#include <boost/asio/detail/mutex.hpp>
#include <boost/asio/detail/object_pool.hpp>
@@ -47,16 +48,28 @@ public:
connect_op = 1, except_op = 2, max_ops = 3 };
// Per-descriptor queues.
class descriptor_state
class descriptor_state : operation
{
friend class epoll_reactor;
friend class object_pool_access;
descriptor_state* next_;
descriptor_state* prev_;
bool op_queue_is_empty_[max_ops];
mutex mutex_;
epoll_reactor* reactor_;
int descriptor_;
op_queue<reactor_op> op_queue_[max_ops];
bool shutdown_;
descriptor_state* next_;
descriptor_state* prev_;
BOOST_ASIO_DECL descriptor_state();
void set_ready_events(uint32_t events) { task_result_ = events; }
BOOST_ASIO_DECL operation* perform_io(uint32_t events);
BOOST_ASIO_DECL static void do_complete(
io_service_impl* owner, operation* base,
const boost::system::error_code& ec, std::size_t bytes_transferred);
};
// Per-descriptor data.
@@ -160,6 +173,12 @@ private:
// Create the timerfd file descriptor. Does not throw.
BOOST_ASIO_DECL static int do_timerfd_create();
// Allocate a new descriptor state object.
BOOST_ASIO_DECL descriptor_state* allocate_descriptor_state();
// Free an existing descriptor state object.
BOOST_ASIO_DECL void free_descriptor_state(descriptor_state* s);
// Helper function to add a new timer queue.
BOOST_ASIO_DECL void do_add_timer_queue(timer_queue_base& queue);
@@ -206,6 +225,10 @@ private:
// Keep track of all registered descriptors.
object_pool<descriptor_state> registered_descriptors_;
// Helper class to do post-perform_io cleanup.
struct perform_io_cleanup_on_block_exit;
friend struct perform_io_cleanup_on_block_exit;
};
} // namespace detail

View File

@@ -29,13 +29,13 @@
# include <boost/asio/detail/gcc_arm_fenced_block.hpp>
#elif defined(__GNUC__) && (defined(__hppa) || defined(__hppa__))
# include <boost/asio/detail/gcc_hppa_fenced_block.hpp>
#elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
# include <boost/asio/detail/gcc_x86_fenced_block.hpp>
#elif defined(__GNUC__) \
&& ((__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4)) \
&& !defined(__INTEL_COMPILER) && !defined(__ICL) \
&& !defined(__ICC) && !defined(__ECC) && !defined(__PATHSCALE__)
# include <boost/asio/detail/gcc_sync_fenced_block.hpp>
#elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
# include <boost/asio/detail/gcc_x86_fenced_block.hpp>
#elif defined(BOOST_WINDOWS) && !defined(UNDER_CE)
# include <boost/asio/detail/win_fenced_block.hpp>
#else
@@ -58,13 +58,13 @@ typedef solaris_fenced_block fenced_block;
typedef gcc_arm_fenced_block fenced_block;
#elif defined(__GNUC__) && (defined(__hppa) || defined(__hppa__))
typedef gcc_hppa_fenced_block fenced_block;
#elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
typedef gcc_x86_fenced_block fenced_block;
#elif defined(__GNUC__) \
&& ((__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4)) \
&& !defined(__INTEL_COMPILER) && !defined(__ICL) \
&& !defined(__ICC) && !defined(__ECC) && !defined(__PATHSCALE__)
typedef gcc_sync_fenced_block fenced_block;
#elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
typedef gcc_x86_fenced_block fenced_block;
#elif defined(BOOST_WINDOWS) && !defined(UNDER_CE)
typedef win_fenced_block fenced_block;
#else

View File

@@ -29,8 +29,16 @@ class gcc_arm_fenced_block
: private noncopyable
{
public:
// Constructor.
gcc_arm_fenced_block()
enum half_t { half };
enum full_t { full };
// Constructor for a half fenced block.
explicit gcc_arm_fenced_block(half_t)
{
}
// Constructor for a full fenced block.
explicit gcc_arm_fenced_block(full_t)
{
barrier();
}

View File

@@ -29,8 +29,16 @@ class gcc_hppa_fenced_block
: private noncopyable
{
public:
// Constructor.
gcc_hppa_fenced_block()
enum half_t { half };
enum full_t { full };
// Constructor for a half fenced block.
explicit gcc_hppa_fenced_block(half_t)
{
}
// Constructor for a full fenced block.
explicit gcc_hppa_fenced_block(full_t)
{
barrier();
}

View File

@@ -32,8 +32,10 @@ class gcc_sync_fenced_block
: private noncopyable
{
public:
enum half_or_full_t { half, full };
// Constructor.
gcc_sync_fenced_block()
explicit gcc_sync_fenced_block(half_or_full_t)
: value_(0)
{
__sync_lock_test_and_set(&value_, 1);

View File

@@ -29,25 +29,46 @@ class gcc_x86_fenced_block
: private noncopyable
{
public:
// Constructor.
gcc_x86_fenced_block()
enum half_t { half };
enum full_t { full };
// Constructor for a half fenced block.
explicit gcc_x86_fenced_block(half_t)
{
barrier();
}
// Constructor for a full fenced block.
explicit gcc_x86_fenced_block(full_t)
{
barrier1();
}
// Destructor.
~gcc_x86_fenced_block()
{
barrier();
barrier2();
}
private:
static int barrier()
static int barrier1()
{
int r = 0;
__asm__ __volatile__ ("xchgl %%eax, %0" : "=m" (r) : : "memory", "cc");
int r = 0, m = 1;
__asm__ __volatile__ (
"xchgl %0, %1" :
"=r"(r), "=m"(m) :
"0"(1), "m"(m) :
"memory", "cc");
return r;
}
static void barrier2()
{
#if defined(__SSE2__)
__asm__ __volatile__ ("mfence" ::: "memory");
#else // defined(__SSE2__)
barrier1();
#endif // defined(__SSE2__)
}
};
} // namespace detail

View File

@@ -203,7 +203,7 @@ std::size_t sync_read(int d, state_type state, buf* bufs,
return 0;
// Wait for descriptor to become ready.
if (descriptor_ops::poll_read(d, ec) < 0)
if (descriptor_ops::poll_read(d, 0, ec) < 0)
return 0;
}
}
@@ -280,7 +280,7 @@ std::size_t sync_write(int d, state_type state, const buf* bufs,
return 0;
// Wait for descriptor to become ready.
if (descriptor_ops::poll_write(d, ec) < 0)
if (descriptor_ops::poll_write(d, 0, ec) < 0)
return 0;
}
}
@@ -387,7 +387,7 @@ int fcntl(int d, long cmd, long arg, boost::system::error_code& ec)
return result;
}
int poll_read(int d, boost::system::error_code& ec)
int poll_read(int d, state_type state, boost::system::error_code& ec)
{
if (d == -1)
{
@@ -399,14 +399,18 @@ int poll_read(int d, boost::system::error_code& ec)
fds.fd = d;
fds.events = POLLIN;
fds.revents = 0;
int timeout = (state & user_set_non_blocking) ? 0 : -1;
errno = 0;
int result = error_wrapper(::poll(&fds, 1, -1), ec);
if (result >= 0)
int result = error_wrapper(::poll(&fds, 1, timeout), ec);
if (result == 0)
ec = (state & user_set_non_blocking)
? boost::asio::error::would_block : boost::system::error_code();
else if (result > 0)
ec = boost::system::error_code();
return result;
}
int poll_write(int d, boost::system::error_code& ec)
int poll_write(int d, state_type state, boost::system::error_code& ec)
{
if (d == -1)
{
@@ -418,9 +422,13 @@ int poll_write(int d, boost::system::error_code& ec)
fds.fd = d;
fds.events = POLLOUT;
fds.revents = 0;
int timeout = (state & user_set_non_blocking) ? 0 : -1;
errno = 0;
int result = error_wrapper(::poll(&fds, 1, -1), ec);
if (result >= 0)
int result = error_wrapper(::poll(&fds, 1, timeout), ec);
if (result == 0)
ec = (state & user_set_non_blocking)
? boost::asio::error::would_block : boost::system::error_code();
else if (result > 0)
ec = boost::system::error_code();
return result;
}

View File

@@ -148,13 +148,19 @@ void epoll_reactor::init_task()
int epoll_reactor::register_descriptor(socket_type descriptor,
epoll_reactor::per_descriptor_data& descriptor_data)
{
mutex::scoped_lock lock(registered_descriptors_mutex_);
descriptor_data = allocate_descriptor_state();
descriptor_data = registered_descriptors_.alloc();
descriptor_data->descriptor_ = descriptor;
descriptor_data->shutdown_ = false;
{
mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
lock.unlock();
descriptor_data->reactor_ = this;
descriptor_data->descriptor_ = descriptor;
descriptor_data->shutdown_ = false;
for (int i = 0; i < max_ops; ++i)
descriptor_data->op_queue_is_empty_[i] =
descriptor_data->op_queue_[i].empty();
}
epoll_event ev = { 0, { 0 } };
ev.events = EPOLLIN | EPOLLERR | EPOLLHUP | EPOLLOUT | EPOLLPRI | EPOLLET;
@@ -170,14 +176,20 @@ int epoll_reactor::register_internal_descriptor(
int op_type, socket_type descriptor,
epoll_reactor::per_descriptor_data& descriptor_data, reactor_op* op)
{
mutex::scoped_lock lock(registered_descriptors_mutex_);
descriptor_data = allocate_descriptor_state();
descriptor_data = registered_descriptors_.alloc();
descriptor_data->descriptor_ = descriptor;
descriptor_data->shutdown_ = false;
descriptor_data->op_queue_[op_type].push(op);
{
mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
lock.unlock();
descriptor_data->reactor_ = this;
descriptor_data->descriptor_ = descriptor;
descriptor_data->shutdown_ = false;
descriptor_data->op_queue_[op_type].push(op);
for (int i = 0; i < max_ops; ++i)
descriptor_data->op_queue_is_empty_[i] =
descriptor_data->op_queue_[i].empty();
}
epoll_event ev = { 0, { 0 } };
ev.events = EPOLLIN | EPOLLERR | EPOLLHUP | EPOLLOUT | EPOLLPRI | EPOLLET;
@@ -208,6 +220,22 @@ void epoll_reactor::start_op(int op_type, socket_type descriptor,
return;
}
bool perform_speculative = allow_speculative;
if (perform_speculative)
{
if (descriptor_data->op_queue_is_empty_[op_type]
&& (op_type != read_op
|| descriptor_data->op_queue_is_empty_[except_op]))
{
if (op->perform())
{
io_service_.post_immediate_completion(op);
return;
}
perform_speculative = false;
}
}
mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
if (descriptor_data->shutdown_)
@@ -216,17 +244,24 @@ void epoll_reactor::start_op(int op_type, socket_type descriptor,
return;
}
if (descriptor_data->op_queue_[op_type].empty())
for (int i = 0; i < max_ops; ++i)
descriptor_data->op_queue_is_empty_[i] =
descriptor_data->op_queue_[i].empty();
if (descriptor_data->op_queue_is_empty_[op_type])
{
if (allow_speculative
&& (op_type != read_op
|| descriptor_data->op_queue_[except_op].empty()))
if (allow_speculative)
{
if (op->perform())
if (perform_speculative
&& (op_type != read_op
|| descriptor_data->op_queue_is_empty_[except_op]))
{
descriptor_lock.unlock();
io_service_.post_immediate_completion(op);
return;
if (op->perform())
{
descriptor_lock.unlock();
io_service_.post_immediate_completion(op);
return;
}
}
}
else
@@ -240,6 +275,7 @@ void epoll_reactor::start_op(int op_type, socket_type descriptor,
}
descriptor_data->op_queue_[op_type].push(op);
descriptor_data->op_queue_is_empty_[op_type] = false;
io_service_.work_started();
}
@@ -274,7 +310,6 @@ void epoll_reactor::deregister_descriptor(socket_type descriptor,
return;
mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_);
if (!descriptor_data->shutdown_)
{
@@ -305,11 +340,9 @@ void epoll_reactor::deregister_descriptor(socket_type descriptor,
descriptor_lock.unlock();
registered_descriptors_.free(descriptor_data);
free_descriptor_state(descriptor_data);
descriptor_data = 0;
descriptors_lock.unlock();
io_service_.post_deferred_completions(ops);
}
}
@@ -321,7 +354,6 @@ void epoll_reactor::deregister_internal_descriptor(socket_type descriptor,
return;
mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_);
if (!descriptor_data->shutdown_)
{
@@ -337,15 +369,19 @@ void epoll_reactor::deregister_internal_descriptor(socket_type descriptor,
descriptor_lock.unlock();
registered_descriptors_.free(descriptor_data);
free_descriptor_state(descriptor_data);
descriptor_data = 0;
descriptors_lock.unlock();
}
}
void epoll_reactor::run(bool block, op_queue<operation>& ops)
{
// This code relies on the fact that the task_io_service queues the reactor
// task behind all descriptor operations generated by this function. This
// means, that by the time we reach this point, any previously returned
// descriptor operations have already been dequeued. Therefore it is now safe
// for us to reuse and return them for the task_io_service to queue again.
// Calculate a timeout only if timerfd is not used.
int timeout;
if (timer_fd_ != -1)
@@ -392,28 +428,12 @@ void epoll_reactor::run(bool block, op_queue<operation>& ops)
#endif // defined(BOOST_ASIO_HAS_TIMERFD)
else
{
// The descriptor operation doesn't count as work in and of itself, so we
// don't call work_started() here. This still allows the io_service to
// stop if the only remaining operations are descriptor operations.
descriptor_state* descriptor_data = static_cast<descriptor_state*>(ptr);
mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
// Exception operations must be processed first to ensure that any
// out-of-band data is read before normal data.
static const int flag[max_ops] = { EPOLLIN, EPOLLOUT, EPOLLPRI };
for (int j = max_ops - 1; j >= 0; --j)
{
if (events[i].events & (flag[j] | EPOLLERR | EPOLLHUP))
{
while (reactor_op* op = descriptor_data->op_queue_[j].front())
{
if (op->perform())
{
descriptor_data->op_queue_[j].pop();
ops.push(op);
}
else
break;
}
}
}
descriptor_data->set_ready_events(events[i].events);
ops.push(descriptor_data);
}
}
@@ -491,6 +511,18 @@ int epoll_reactor::do_timerfd_create()
#endif // defined(BOOST_ASIO_HAS_TIMERFD)
}
epoll_reactor::descriptor_state* epoll_reactor::allocate_descriptor_state()
{
mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_);
return registered_descriptors_.alloc();
}
void epoll_reactor::free_descriptor_state(epoll_reactor::descriptor_state* s)
{
mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_);
registered_descriptors_.free(s);
}
void epoll_reactor::do_add_timer_queue(timer_queue_base& queue)
{
mutex::scoped_lock lock(mutex_);
@@ -539,6 +571,92 @@ int epoll_reactor::get_timeout(itimerspec& ts)
}
#endif // defined(BOOST_ASIO_HAS_TIMERFD)
struct epoll_reactor::perform_io_cleanup_on_block_exit
{
explicit perform_io_cleanup_on_block_exit(epoll_reactor* r)
: reactor_(r), first_op_(0)
{
}
~perform_io_cleanup_on_block_exit()
{
if (first_op_)
{
// Post the remaining completed operations for invocation.
if (!ops_.empty())
reactor_->io_service_.post_deferred_completions(ops_);
// A user-initiated operation has completed, but there's no need to
// explicitly call work_finished() here. Instead, we'll take advantage of
// the fact that the task_io_service will call work_finished() once we
// return.
}
else
{
// No user-initiated operations have completed, so we need to compensate
// for the work_finished() call that the task_io_service will make once
// this operation returns.
reactor_->io_service_.work_started();
}
}
epoll_reactor* reactor_;
op_queue<operation> ops_;
operation* first_op_;
};
epoll_reactor::descriptor_state::descriptor_state()
: operation(&epoll_reactor::descriptor_state::do_complete)
{
}
operation* epoll_reactor::descriptor_state::perform_io(uint32_t events)
{
perform_io_cleanup_on_block_exit io_cleanup(reactor_);
mutex::scoped_lock descriptor_lock(mutex_);
// Exception operations must be processed first to ensure that any
// out-of-band data is read before normal data.
static const int flag[max_ops] = { EPOLLIN, EPOLLOUT, EPOLLPRI };
for (int j = max_ops - 1; j >= 0; --j)
{
if (events & (flag[j] | EPOLLERR | EPOLLHUP))
{
while (reactor_op* op = op_queue_[j].front())
{
if (op->perform())
{
op_queue_[j].pop();
io_cleanup.ops_.push(op);
}
else
break;
}
}
}
// The first operation will be returned for completion now. The others will
// be posted for later by the io_cleanup object's destructor.
io_cleanup.first_op_ = io_cleanup.ops_.front();
io_cleanup.ops_.pop();
return io_cleanup.first_op_;
}
void epoll_reactor::descriptor_state::do_complete(
io_service_impl* owner, operation* base,
const boost::system::error_code& ec, std::size_t bytes_transferred)
{
if (owner)
{
descriptor_state* descriptor_data = static_cast<descriptor_state*>(base);
uint32_t events = static_cast<uint32_t>(bytes_transferred);
if (operation* op = descriptor_data->perform_io(events))
{
op->complete(*owner, ec, 0);
}
}
}
} // namespace detail
} // namespace asio
} // namespace boost

View File

@@ -132,6 +132,10 @@ int kqueue_reactor::register_descriptor(socket_type descriptor,
descriptor_data->descriptor_ = descriptor;
descriptor_data->shutdown_ = false;
for (int i = 0; i < max_ops; ++i)
descriptor_data->op_queue_is_empty_[i] =
descriptor_data->op_queue_[i].empty();
return 0;
}
@@ -146,6 +150,10 @@ int kqueue_reactor::register_internal_descriptor(
descriptor_data->shutdown_ = false;
descriptor_data->op_queue_[op_type].push(op);
for (int i = 0; i < max_ops; ++i)
descriptor_data->op_queue_is_empty_[i] =
descriptor_data->op_queue_[i].empty();
struct kevent event;
switch (op_type)
{
@@ -186,6 +194,21 @@ void kqueue_reactor::start_op(int op_type, socket_type descriptor,
return;
}
if (allow_speculative)
{
if (descriptor_data->op_queue_is_empty_[op_type]
&& (op_type != read_op
|| descriptor_data->op_queue_is_empty_[except_op]))
{
if (op->perform())
{
io_service_.post_immediate_completion(op);
return;
}
allow_speculative = false;
}
}
mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
if (descriptor_data->shutdown_)
@@ -194,12 +217,16 @@ void kqueue_reactor::start_op(int op_type, socket_type descriptor,
return;
}
bool first = descriptor_data->op_queue_[op_type].empty();
for (int i = 0; i < max_ops; ++i)
descriptor_data->op_queue_is_empty_[i] =
descriptor_data->op_queue_[i].empty();
bool first = descriptor_data->op_queue_is_empty_[op_type];
if (first)
{
if (allow_speculative)
{
if (op_type != read_op || descriptor_data->op_queue_[except_op].empty())
if (op_type != read_op || descriptor_data->op_queue_is_empty_[except_op])
{
if (op->perform())
{
@@ -212,6 +239,7 @@ void kqueue_reactor::start_op(int op_type, socket_type descriptor,
}
descriptor_data->op_queue_[op_type].push(op);
descriptor_data->op_queue_is_empty_[op_type] = false;
io_service_.work_started();
if (first)

View File

@@ -172,27 +172,28 @@ void select_reactor::run(bool block, op_queue<operation>& ops)
#endif // defined(BOOST_ASIO_HAS_IOCP)
// Set up the descriptor sets.
fd_set_adapter fds[max_select_ops];
fds[read_op].set(interrupter_.read_descriptor());
for (int i = 0; i < max_select_ops; ++i)
fd_sets_[i].reset();
fd_sets_[read_op].set(interrupter_.read_descriptor());
socket_type max_fd = 0;
bool have_work_to_do = !timer_queues_.all_empty();
for (int i = 0; i < max_select_ops; ++i)
{
have_work_to_do = have_work_to_do || !op_queue_[i].empty();
op_queue_[i].get_descriptors(fds[i], ops);
if (fds[i].max_descriptor() > max_fd)
max_fd = fds[i].max_descriptor();
op_queue_[i].get_descriptors(fd_sets_[i], ops);
if (fd_sets_[i].max_descriptor() > max_fd)
max_fd = fd_sets_[i].max_descriptor();
}
#if defined(BOOST_WINDOWS) || defined(__CYGWIN__)
// Connection operations on Windows use both except and write fd_sets.
have_work_to_do = have_work_to_do || !op_queue_[connect_op].empty();
op_queue_[connect_op].get_descriptors(fds[write_op], ops);
if (fds[write_op].max_descriptor() > max_fd)
max_fd = fds[write_op].max_descriptor();
op_queue_[connect_op].get_descriptors(fds[except_op], ops);
if (fds[except_op].max_descriptor() > max_fd)
max_fd = fds[except_op].max_descriptor();
op_queue_[connect_op].get_descriptors(fd_sets_[write_op], ops);
if (fd_sets_[write_op].max_descriptor() > max_fd)
max_fd = fd_sets_[write_op].max_descriptor();
op_queue_[connect_op].get_descriptors(fd_sets_[except_op], ops);
if (fd_sets_[except_op].max_descriptor() > max_fd)
max_fd = fd_sets_[except_op].max_descriptor();
#endif // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
// We can return immediately if there's no work to do and the reactor is
@@ -209,11 +210,14 @@ void select_reactor::run(bool block, op_queue<operation>& ops)
// Block on the select call until descriptors become ready.
boost::system::error_code ec;
int retval = socket_ops::select(static_cast<int>(max_fd + 1),
fds[read_op], fds[write_op], fds[except_op], tv, ec);
fd_sets_[read_op], fd_sets_[write_op], fd_sets_[except_op], tv, ec);
// Reset the interrupter.
if (retval > 0 && fds[read_op].is_set(interrupter_.read_descriptor()))
if (retval > 0 && fd_sets_[read_op].is_set(interrupter_.read_descriptor()))
{
interrupter_.reset();
--retval;
}
lock.lock();
@@ -223,15 +227,15 @@ void select_reactor::run(bool block, op_queue<operation>& ops)
#if defined(BOOST_WINDOWS) || defined(__CYGWIN__)
// Connection operations on Windows use both except and write fd_sets.
op_queue_[connect_op].perform_operations_for_descriptors(
fds[except_op], ops);
fd_sets_[except_op], ops);
op_queue_[connect_op].perform_operations_for_descriptors(
fds[write_op], ops);
fd_sets_[write_op], ops);
#endif // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
// Exception operations must be processed first to ensure that any
// out-of-band data is read before normal data.
for (int i = max_select_ops - 1; i >= 0; --i)
op_queue_[i].perform_operations_for_descriptors(fds[i], ops);
op_queue_[i].perform_operations_for_descriptors(fd_sets_[i], ops);
}
timer_queues_.get_ready_timers(ops);
}

View File

@@ -21,6 +21,24 @@ namespace boost {
namespace asio {
namespace detail {
template <typename Service, typename Arg>
service_registry::service_registry(
boost::asio::io_service& o, Service*, Arg arg)
: owner_(o),
first_service_(new Service(o, arg))
{
boost::asio::io_service::service::key key;
init_key(key, Service::id);
first_service_->key_ = key;
first_service_->next_ = 0;
}
template <typename Service>
Service& service_registry::first_service()
{
return *static_cast<Service*>(first_service_);
}
template <typename Service>
Service& service_registry::use_service()
{

View File

@@ -26,12 +26,6 @@ namespace boost {
namespace asio {
namespace detail {
service_registry::service_registry(boost::asio::io_service& o)
: owner_(o),
first_service_(0)
{
}
service_registry::~service_registry()
{
// Shutdown all services. This must be done in a separate loop before the

View File

@@ -71,7 +71,7 @@ void asio_signal_handler(int signal_number)
#endif // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
#if defined(BOOST_ASIO_HAS_SIGNAL) && !defined(BOOST_ASIO_HAS_SIGACTION)
signal(signal_number, asio_signal_handler);
::signal(signal_number, asio_signal_handler);
#endif // defined(BOOST_ASIO_HAS_SIGNAL) && !defined(BOOST_ASIO_HAS_SIGACTION)
}
@@ -98,7 +98,8 @@ public:
}
static void do_complete(io_service_impl* /*owner*/, operation* base,
boost::system::error_code /*ec*/, std::size_t /*bytes_transferred*/)
const boost::system::error_code& /*ec*/,
std::size_t /*bytes_transferred*/)
{
pipe_read_op* o(static_cast<pipe_read_op*>(base));
delete o;

View File

@@ -148,7 +148,7 @@ socket_type sync_accept(socket_type s, state_type state,
return invalid_socket;
// Wait for socket to become ready.
if (socket_ops::poll_read(s, ec) < 0)
if (socket_ops::poll_read(s, 0, ec) < 0)
return invalid_socket;
}
}
@@ -735,7 +735,7 @@ size_t sync_recv(socket_type s, state_type state, buf* bufs,
return 0;
// Wait for socket to become ready.
if (socket_ops::poll_read(s, ec) < 0)
if (socket_ops::poll_read(s, 0, ec) < 0)
return 0;
}
}
@@ -873,7 +873,7 @@ size_t sync_recvfrom(socket_type s, state_type state, buf* bufs,
return 0;
// Wait for socket to become ready.
if (socket_ops::poll_read(s, ec) < 0)
if (socket_ops::poll_read(s, 0, ec) < 0)
return 0;
}
}
@@ -984,7 +984,7 @@ size_t sync_recvmsg(socket_type s, state_type state,
return 0;
// Wait for socket to become ready.
if (socket_ops::poll_read(s, ec) < 0)
if (socket_ops::poll_read(s, 0, ec) < 0)
return 0;
}
}
@@ -1110,7 +1110,7 @@ size_t sync_send(socket_type s, state_type state, const buf* bufs,
return 0;
// Wait for socket to become ready.
if (socket_ops::poll_write(s, ec) < 0)
if (socket_ops::poll_write(s, 0, ec) < 0)
return 0;
}
}
@@ -1233,7 +1233,7 @@ size_t sync_sendto(socket_type s, state_type state, const buf* bufs,
return 0;
// Wait for socket to become ready.
if (socket_ops::poll_write(s, ec) < 0)
if (socket_ops::poll_write(s, 0, ec) < 0)
return 0;
}
}
@@ -1683,7 +1683,7 @@ int select(int nfds, fd_set* readfds, fd_set* writefds,
#endif
}
int poll_read(socket_type s, boost::system::error_code& ec)
int poll_read(socket_type s, state_type state, boost::system::error_code& ec)
{
if (s == invalid_socket)
{
@@ -1697,11 +1697,12 @@ int poll_read(socket_type s, boost::system::error_code& ec)
fd_set fds;
FD_ZERO(&fds);
FD_SET(s, &fds);
timeval zero_timeout;
zero_timeout.tv_sec = 0;
zero_timeout.tv_usec = 0;
timeval* timeout = (state & user_set_non_blocking) ? &zero_timeout : 0;
clear_last_error();
int result = error_wrapper(::select(s, &fds, 0, 0, 0), ec);
if (result >= 0)
ec = boost::system::error_code();
return result;
int result = error_wrapper(::select(s, &fds, 0, 0, timeout), ec);
#else // defined(BOOST_WINDOWS)
// || defined(__CYGWIN__)
// || defined(__SYMBIAN32__)
@@ -1709,17 +1710,21 @@ int poll_read(socket_type s, boost::system::error_code& ec)
fds.fd = s;
fds.events = POLLIN;
fds.revents = 0;
int timeout = (state & user_set_non_blocking) ? 0 : -1;
clear_last_error();
int result = error_wrapper(::poll(&fds, 1, -1), ec);
if (result >= 0)
ec = boost::system::error_code();
return result;
int result = error_wrapper(::poll(&fds, 1, timeout), ec);
#endif // defined(BOOST_WINDOWS)
// || defined(__CYGWIN__)
// || defined(__SYMBIAN32__)
if (result == 0)
ec = (state & user_set_non_blocking)
? boost::asio::error::would_block : boost::system::error_code();
else if (result > 0)
ec = boost::system::error_code();
return result;
}
int poll_write(socket_type s, boost::system::error_code& ec)
int poll_write(socket_type s, state_type state, boost::system::error_code& ec)
{
if (s == invalid_socket)
{
@@ -1733,11 +1738,12 @@ int poll_write(socket_type s, boost::system::error_code& ec)
fd_set fds;
FD_ZERO(&fds);
FD_SET(s, &fds);
timeval zero_timeout;
zero_timeout.tv_sec = 0;
zero_timeout.tv_usec = 0;
timeval* timeout = (state & user_set_non_blocking) ? &zero_timeout : 0;
clear_last_error();
int result = error_wrapper(::select(s, 0, &fds, 0, 0), ec);
if (result >= 0)
ec = boost::system::error_code();
return result;
int result = error_wrapper(::select(s, 0, &fds, 0, timeout), ec);
#else // defined(BOOST_WINDOWS)
// || defined(__CYGWIN__)
// || defined(__SYMBIAN32__)
@@ -1745,14 +1751,18 @@ int poll_write(socket_type s, boost::system::error_code& ec)
fds.fd = s;
fds.events = POLLOUT;
fds.revents = 0;
int timeout = (state & user_set_non_blocking) ? 0 : -1;
clear_last_error();
int result = error_wrapper(::poll(&fds, 1, -1), ec);
if (result >= 0)
ec = boost::system::error_code();
return result;
int result = error_wrapper(::poll(&fds, 1, timeout), ec);
#endif // defined(BOOST_WINDOWS)
// || defined(__CYGWIN__)
// || defined(__SYMBIAN32__)
if (result == 0)
ec = (state & user_set_non_blocking)
? boost::asio::error::would_block : boost::system::error_code();
else if (result > 0)
ec = boost::system::error_code();
return result;
}
int poll_connect(socket_type s, boost::system::error_code& ec)

View File

@@ -61,7 +61,7 @@ void strand_service::dispatch(strand_service::implementation_type& impl,
// If we are already in the strand then the handler can run immediately.
if (call_stack<strand_impl>::contains(impl))
{
boost::asio::detail::fenced_block b;
fenced_block b(fenced_block::full);
boost_asio_handler_invoke_helpers::invoke(handler, handler);
return;
}

View File

@@ -79,7 +79,7 @@ bool strand_service::do_dispatch(implementation_type& impl, operation* op)
{
// If we are running inside the io_service, and no other handler is queued
// or running, then the handler can run immediately.
bool can_dispatch = call_stack<io_service_impl>::contains(&io_service_);
bool can_dispatch = io_service_.can_dispatch();
impl->mutex_.lock();
bool first = (++impl->count_ == 1);
if (can_dispatch && first)
@@ -115,7 +115,7 @@ void strand_service::do_post(implementation_type& impl, operation* op)
}
void strand_service::do_complete(io_service_impl* owner, operation* base,
boost::system::error_code /*ec*/, std::size_t /*bytes_transferred*/)
const boost::system::error_code& ec, std::size_t /*bytes_transferred*/)
{
if (owner)
{
@@ -134,7 +134,7 @@ void strand_service::do_complete(io_service_impl* owner, operation* base,
on_do_complete_exit on_exit = { owner, impl };
(void)on_exit;
o->complete(*owner);
o->complete(*owner, ec, 0);
}
}

View File

@@ -15,7 +15,6 @@
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include <boost/asio/detail/call_stack.hpp>
#include <boost/asio/detail/completion_handler.hpp>
#include <boost/asio/detail/fenced_block.hpp>
#include <boost/asio/detail/handler_alloc_helpers.hpp>
@@ -30,9 +29,9 @@ namespace detail {
template <typename Handler>
void task_io_service::dispatch(Handler handler)
{
if (call_stack<task_io_service>::contains(this))
if (thread_call_stack::contains(this))
{
boost::asio::detail::fenced_block b;
fenced_block b(fenced_block::full);
boost_asio_handler_invoke_helpers::invoke(handler, handler);
}
else

View File

@@ -20,7 +20,6 @@
#if !defined(BOOST_ASIO_HAS_IOCP)
#include <boost/limits.hpp>
#include <boost/asio/detail/call_stack.hpp>
#include <boost/asio/detail/event.hpp>
#include <boost/asio/detail/reactor.hpp>
#include <boost/asio/detail/task_io_service.hpp>
@@ -48,24 +47,37 @@ struct task_io_service::task_cleanup
op_queue<operation>* ops_;
};
struct task_io_service::work_finished_on_block_exit
struct task_io_service::work_cleanup
{
~work_finished_on_block_exit()
~work_cleanup()
{
task_io_service_->work_finished();
#if defined(BOOST_HAS_THREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
if (!ops_->empty())
{
lock_->lock();
task_io_service_->op_queue_.push(*ops_);
}
#endif // defined(BOOST_HAS_THREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
}
task_io_service* task_io_service_;
mutex::scoped_lock* lock_;
op_queue<operation>* ops_;
};
struct task_io_service::idle_thread_info
struct task_io_service::thread_info
{
event wakeup_event;
idle_thread_info* next;
event* wakeup_event;
op_queue<operation>* private_op_queue;
thread_info* next;
};
task_io_service::task_io_service(boost::asio::io_service& io_service)
task_io_service::task_io_service(
boost::asio::io_service& io_service, std::size_t concurrency_hint)
: boost::asio::detail::service_base<task_io_service>(io_service),
one_thread_(concurrency_hint == 1),
mutex_(),
task_(0),
task_interrupted_(true),
@@ -77,10 +89,6 @@ task_io_service::task_io_service(boost::asio::io_service& io_service)
BOOST_ASIO_HANDLER_TRACKING_INIT;
}
void task_io_service::init(std::size_t /*concurrency_hint*/)
{
}
void task_io_service::shutdown_service()
{
mutex::scoped_lock lock(mutex_);
@@ -120,15 +128,22 @@ std::size_t task_io_service::run(boost::system::error_code& ec)
return 0;
}
call_stack<task_io_service>::context ctx(this);
idle_thread_info this_idle_thread;
this_idle_thread.next = 0;
thread_info this_thread;
event wakeup_event;
this_thread.wakeup_event = &wakeup_event;
op_queue<operation> private_op_queue;
#if defined(BOOST_HAS_THREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
this_thread.private_op_queue = one_thread_ == 1 ? &private_op_queue : 0;
#else // defined(BOOST_HAS_THREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
this_thread.private_op_queue = 0;
#endif // defined(BOOST_HAS_THREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
this_thread.next = 0;
thread_call_stack::context ctx(this, this_thread);
mutex::scoped_lock lock(mutex_);
std::size_t n = 0;
for (; do_one(lock, &this_idle_thread); lock.lock())
for (; do_run_one(lock, this_thread, private_op_queue, ec); lock.lock())
if (n != (std::numeric_limits<std::size_t>::max)())
++n;
return n;
@@ -143,31 +158,53 @@ std::size_t task_io_service::run_one(boost::system::error_code& ec)
return 0;
}
call_stack<task_io_service>::context ctx(this);
idle_thread_info this_idle_thread;
this_idle_thread.next = 0;
thread_info this_thread;
event wakeup_event;
this_thread.wakeup_event = &wakeup_event;
op_queue<operation> private_op_queue;
this_thread.private_op_queue = 0;
this_thread.next = 0;
thread_call_stack::context ctx(this, this_thread);
mutex::scoped_lock lock(mutex_);
return do_one(lock, &this_idle_thread);
return do_run_one(lock, this_thread, private_op_queue, ec);
}
std::size_t task_io_service::poll(boost::system::error_code& ec)
{
ec = boost::system::error_code();
if (outstanding_work_ == 0)
{
stop();
ec = boost::system::error_code();
return 0;
}
call_stack<task_io_service>::context ctx(this);
thread_info this_thread;
this_thread.wakeup_event = 0;
op_queue<operation> private_op_queue;
#if defined(BOOST_HAS_THREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
this_thread.private_op_queue = one_thread_ == 1 ? &private_op_queue : 0;
#else // defined(BOOST_HAS_THREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
this_thread.private_op_queue = 0;
#endif // defined(BOOST_HAS_THREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
this_thread.next = 0;
thread_call_stack::context ctx(this, this_thread);
mutex::scoped_lock lock(mutex_);
#if defined(BOOST_HAS_THREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
// We want to support nested calls to poll() and poll_one(), so any handlers
// that are already on a thread-private queue need to be put on to the main
// queue now.
if (one_thread_)
if (thread_info* outer_thread_info = ctx.next_by_key())
if (outer_thread_info->private_op_queue)
op_queue_.push(*outer_thread_info->private_op_queue);
#endif // defined(BOOST_HAS_THREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
std::size_t n = 0;
for (; do_one(lock, 0); lock.lock())
for (; do_poll_one(lock, private_op_queue, ec); lock.lock())
if (n != (std::numeric_limits<std::size_t>::max)())
++n;
return n;
@@ -182,11 +219,26 @@ std::size_t task_io_service::poll_one(boost::system::error_code& ec)
return 0;
}
call_stack<task_io_service>::context ctx(this);
thread_info this_thread;
this_thread.wakeup_event = 0;
op_queue<operation> private_op_queue;
this_thread.private_op_queue = 0;
this_thread.next = 0;
thread_call_stack::context ctx(this, this_thread);
mutex::scoped_lock lock(mutex_);
return do_one(lock, 0);
#if defined(BOOST_HAS_THREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
// We want to support nested calls to poll() and poll_one(), so any handlers
// that are already on a thread-private queue need to be put on to the main
// queue now.
if (one_thread_)
if (thread_info* outer_thread_info = ctx.next_by_key())
if (outer_thread_info->private_op_queue)
op_queue_.push(*outer_thread_info->private_op_queue);
#endif // defined(BOOST_HAS_THREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
return do_poll_one(lock, private_op_queue, ec);
}
void task_io_service::stop()
@@ -215,6 +267,20 @@ void task_io_service::post_immediate_completion(task_io_service::operation* op)
void task_io_service::post_deferred_completion(task_io_service::operation* op)
{
#if defined(BOOST_HAS_THREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
if (one_thread_)
{
if (thread_info* this_thread = thread_call_stack::contains(this))
{
if (this_thread->private_op_queue)
{
this_thread->private_op_queue->push(op);
return;
}
}
}
#endif // defined(BOOST_HAS_THREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
mutex::scoped_lock lock(mutex_);
op_queue_.push(op);
wake_one_thread_and_unlock(lock);
@@ -225,6 +291,20 @@ void task_io_service::post_deferred_completions(
{
if (!ops.empty())
{
#if defined(BOOST_HAS_THREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
if (one_thread_)
{
if (thread_info* this_thread = thread_call_stack::contains(this))
{
if (this_thread->private_op_queue)
{
this_thread->private_op_queue->push(ops);
return;
}
}
}
#endif // defined(BOOST_HAS_THREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
mutex::scoped_lock lock(mutex_);
op_queue_.push(ops);
wake_one_thread_and_unlock(lock);
@@ -238,11 +318,10 @@ void task_io_service::abandon_operations(
ops2.push(ops);
}
std::size_t task_io_service::do_one(mutex::scoped_lock& lock,
task_io_service::idle_thread_info* this_idle_thread)
std::size_t task_io_service::do_run_one(mutex::scoped_lock& lock,
task_io_service::thread_info& this_thread,
op_queue<operation>& private_op_queue, const boost::system::error_code& ec)
{
bool polling = !this_idle_thread;
bool task_has_run = false;
while (!stopped_)
{
if (!op_queue_.empty())
@@ -254,63 +333,105 @@ std::size_t task_io_service::do_one(mutex::scoped_lock& lock,
if (o == &task_operation_)
{
task_interrupted_ = more_handlers || polling;
task_interrupted_ = more_handlers;
// If the task has already run and we're polling then we're done.
if (task_has_run && polling)
{
task_interrupted_ = true;
op_queue_.push(&task_operation_);
return 0;
}
task_has_run = true;
if (!more_handlers || !wake_one_idle_thread_and_unlock(lock))
if (more_handlers && !one_thread_)
wake_one_idle_thread_and_unlock(lock);
else
lock.unlock();
op_queue<operation> completed_ops;
task_cleanup c = { this, &lock, &completed_ops };
(void)c;
task_cleanup on_exit = { this, &lock, &completed_ops };
(void)on_exit;
// Run the task. May throw an exception. Only block if the operation
// queue is empty and we're not polling, otherwise we want to return
// as soon as possible.
task_->run(!more_handlers && !polling, completed_ops);
task_->run(!more_handlers, completed_ops);
}
else
{
if (more_handlers)
std::size_t task_result = o->task_result_;
if (more_handlers && !one_thread_)
wake_one_thread_and_unlock(lock);
else
lock.unlock();
// Ensure the count of outstanding work is decremented on block exit.
work_finished_on_block_exit on_exit = { this };
work_cleanup on_exit = { this, &lock, &private_op_queue };
(void)on_exit;
// Complete the operation. May throw an exception.
o->complete(*this); // deletes the operation object
// Complete the operation. May throw an exception. Deletes the object.
o->complete(*this, ec, task_result);
return 1;
}
}
else if (this_idle_thread)
{
// Nothing to run right now, so just wait for work to do.
this_idle_thread->next = first_idle_thread_;
first_idle_thread_ = this_idle_thread;
this_idle_thread->wakeup_event.clear(lock);
this_idle_thread->wakeup_event.wait(lock);
}
else
{
return 0;
// Nothing to run right now, so just wait for work to do.
this_thread.next = first_idle_thread_;
first_idle_thread_ = &this_thread;
this_thread.wakeup_event->clear(lock);
this_thread.wakeup_event->wait(lock);
}
}
return 0;
}
std::size_t task_io_service::do_poll_one(mutex::scoped_lock& lock,
op_queue<operation>& private_op_queue, const boost::system::error_code& ec)
{
if (stopped_)
return 0;
operation* o = op_queue_.front();
if (o == &task_operation_)
{
op_queue_.pop();
lock.unlock();
{
op_queue<operation> completed_ops;
task_cleanup c = { this, &lock, &completed_ops };
(void)c;
// Run the task. May throw an exception. Only block if the operation
// queue is empty and we're not polling, otherwise we want to return
// as soon as possible.
task_->run(false, completed_ops);
}
o = op_queue_.front();
if (o == &task_operation_)
return 0;
}
if (o == 0)
return 0;
op_queue_.pop();
bool more_handlers = (!op_queue_.empty());
std::size_t task_result = o->task_result_;
if (more_handlers && !one_thread_)
wake_one_thread_and_unlock(lock);
else
lock.unlock();
// Ensure the count of outstanding work is decremented on block exit.
work_cleanup on_exit = { this, &lock, &private_op_queue };
(void)on_exit;
// Complete the operation. May throw an exception. Deletes the object.
o->complete(*this, ec, task_result);
return 1;
}
void task_io_service::stop_all_threads(
mutex::scoped_lock& lock)
{
@@ -318,10 +439,10 @@ void task_io_service::stop_all_threads(
while (first_idle_thread_)
{
idle_thread_info* idle_thread = first_idle_thread_;
thread_info* idle_thread = first_idle_thread_;
first_idle_thread_ = idle_thread->next;
idle_thread->next = 0;
idle_thread->wakeup_event.signal(lock);
idle_thread->wakeup_event->signal(lock);
}
if (!task_interrupted_ && task_)
@@ -336,10 +457,10 @@ bool task_io_service::wake_one_idle_thread_and_unlock(
{
if (first_idle_thread_)
{
idle_thread_info* idle_thread = first_idle_thread_;
thread_info* idle_thread = first_idle_thread_;
first_idle_thread_ = idle_thread->next;
idle_thread->next = 0;
idle_thread->wakeup_event.signal_and_unlock(lock);
idle_thread->wakeup_event->signal_and_unlock(lock);
return true;
}
return false;

View File

@@ -19,7 +19,6 @@
#if defined(BOOST_ASIO_HAS_IOCP)
#include <boost/asio/detail/call_stack.hpp>
#include <boost/asio/detail/completion_handler.hpp>
#include <boost/asio/detail/fenced_block.hpp>
#include <boost/asio/detail/handler_alloc_helpers.hpp>
@@ -36,7 +35,7 @@ void win_iocp_io_service::dispatch(Handler handler)
{
if (call_stack<win_iocp_io_service>::contains(this))
{
boost::asio::detail::fenced_block b;
fenced_block b(fenced_block::full);
boost_asio_handler_invoke_helpers::invoke(handler, handler);
}
else

View File

@@ -62,7 +62,8 @@ struct win_iocp_io_service::timer_thread_function
win_iocp_io_service* io_service_;
};
win_iocp_io_service::win_iocp_io_service(boost::asio::io_service& io_service)
win_iocp_io_service::win_iocp_io_service(
boost::asio::io_service& io_service, size_t concurrency_hint)
: boost::asio::detail::service_base<win_iocp_io_service>(io_service),
iocp_(),
outstanding_work_(0),
@@ -71,10 +72,7 @@ win_iocp_io_service::win_iocp_io_service(boost::asio::io_service& io_service)
dispatch_required_(0)
{
BOOST_ASIO_HANDLER_TRACKING_INIT;
}
void win_iocp_io_service::init(size_t concurrency_hint)
{
iocp_.handle = ::CreateIoCompletionPort(INVALID_HANDLE_VALUE, 0, 0,
static_cast<DWORD>((std::min<size_t>)(concurrency_hint, DWORD(~0))));
if (!iocp_.handle)

View File

@@ -62,12 +62,16 @@ public:
{
friend class kqueue_reactor;
friend class object_pool_access;
descriptor_state* next_;
descriptor_state* prev_;
bool op_queue_is_empty_[max_ops];
mutex mutex_;
int descriptor_;
op_queue<reactor_op> op_queue_[max_ops];
bool shutdown_;
descriptor_state* next_;
descriptor_state* prev_;
};
// Per-descriptor data.

View File

@@ -31,8 +31,16 @@ class macos_fenced_block
: private noncopyable
{
public:
// Constructor.
macos_fenced_block()
enum half_t { half };
enum full_t { full };
// Constructor for a half fenced block.
explicit macos_fenced_block(half_t)
{
}
// Constructor for a full fenced block.
explicit macos_fenced_block(full_t)
{
OSMemoryBarrier();
}

View File

@@ -25,8 +25,10 @@ class null_fenced_block
: private noncopyable
{
public:
enum half_or_full_t { half, full };
// Constructor.
null_fenced_block()
explicit null_fenced_block(half_or_full_t)
{
}

View File

@@ -20,6 +20,7 @@
#if !defined(BOOST_WINDOWS) && !defined(__CYGWIN__)
#include <cstring>
#include <boost/asio/detail/noncopyable.hpp>
#include <boost/asio/detail/socket_types.hpp>
#include <boost/asio/detail/push_options.hpp>
@@ -29,7 +30,7 @@ namespace asio {
namespace detail {
// Adapts the FD_SET type to meet the Descriptor_Set concept's requirements.
class posix_fd_set_adapter
class posix_fd_set_adapter : noncopyable
{
public:
posix_fd_set_adapter()
@@ -39,6 +40,12 @@ public:
FD_ZERO(&fd_set_);
}
void reset()
{
using namespace std; // Needed for memset on Solaris.
FD_ZERO(&fd_set_);
}
bool set(socket_type descriptor)
{
if (descriptor < (socket_type)FD_SETSIZE)

View File

@@ -100,6 +100,7 @@
# pragma warning (disable:4103)
# pragma warning (push)
# pragma warning (disable:4127)
# pragma warning (disable:4180)
# pragma warning (disable:4244)
# pragma warning (disable:4355)
# pragma warning (disable:4512)

View File

@@ -177,7 +177,7 @@ public:
const null_buffers&, boost::system::error_code& ec)
{
// Wait for descriptor to become ready.
descriptor_ops::poll_write(impl.descriptor_, ec);
descriptor_ops::poll_write(impl.descriptor_, impl.state_, ec);
return 0;
}
@@ -239,7 +239,7 @@ public:
const null_buffers&, boost::system::error_code& ec)
{
// Wait for descriptor to become ready.
descriptor_ops::poll_read(impl.descriptor_, ec);
descriptor_ops::poll_read(impl.descriptor_, impl.state_, ec);
return 0;
}

View File

@@ -47,7 +47,8 @@ public:
}
static void do_complete(io_service_impl* owner, operation* base,
boost::system::error_code /*ec*/, std::size_t /*bytes_transferred*/)
const boost::system::error_code& /*ec*/,
std::size_t /*bytes_transferred*/)
{
// Take ownership of the handler object.
reactive_null_buffers_op* o(static_cast<reactive_null_buffers_op*>(base));
@@ -69,7 +70,7 @@ public:
// Make the upcall if required.
if (owner)
{
boost::asio::detail::fenced_block b;
fenced_block b(fenced_block::half);
BOOST_ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));
boost_asio_handler_invoke_helpers::invoke(handler, handler.handler_);
BOOST_ASIO_HANDLER_INVOCATION_END;

View File

@@ -95,7 +95,8 @@ public:
}
static void do_complete(io_service_impl* owner, operation* base,
boost::system::error_code /*ec*/, std::size_t /*bytes_transferred*/)
const boost::system::error_code& /*ec*/,
std::size_t /*bytes_transferred*/)
{
// Take ownership of the handler object.
reactive_socket_accept_op* o(static_cast<reactive_socket_accept_op*>(base));
@@ -117,7 +118,7 @@ public:
// Make the upcall if required.
if (owner)
{
boost::asio::detail::fenced_block b;
fenced_block b(fenced_block::half);
BOOST_ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_));
boost_asio_handler_invoke_helpers::invoke(handler, handler.handler_);
BOOST_ASIO_HANDLER_INVOCATION_END;

View File

@@ -64,7 +64,8 @@ public:
}
static void do_complete(io_service_impl* owner, operation* base,
boost::system::error_code /*ec*/, std::size_t /*bytes_transferred*/)
const boost::system::error_code& /*ec*/,
std::size_t /*bytes_transferred*/)
{
// Take ownership of the handler object.
reactive_socket_connect_op* o
@@ -87,7 +88,7 @@ public:
// Make the upcall if required.
if (owner)
{
boost::asio::detail::fenced_block b;
fenced_block b(fenced_block::half);
BOOST_ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_));
boost_asio_handler_invoke_helpers::invoke(handler, handler);
BOOST_ASIO_HANDLER_INVOCATION_END;

View File

@@ -82,7 +82,8 @@ public:
}
static void do_complete(io_service_impl* owner, operation* base,
boost::system::error_code /*ec*/, std::size_t /*bytes_transferred*/)
const boost::system::error_code& /*ec*/,
std::size_t /*bytes_transferred*/)
{
// Take ownership of the handler object.
reactive_socket_recv_op* o(static_cast<reactive_socket_recv_op*>(base));
@@ -104,7 +105,7 @@ public:
// Make the upcall if required.
if (owner)
{
boost::asio::detail::fenced_block b;
fenced_block b(fenced_block::half);
BOOST_ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));
boost_asio_handler_invoke_helpers::invoke(handler, handler.handler_);
BOOST_ASIO_HANDLER_INVOCATION_END;

View File

@@ -91,7 +91,8 @@ public:
}
static void do_complete(io_service_impl* owner, operation* base,
boost::system::error_code /*ec*/, std::size_t /*bytes_transferred*/)
const boost::system::error_code& /*ec*/,
std::size_t /*bytes_transferred*/)
{
// Take ownership of the handler object.
reactive_socket_recvfrom_op* o(
@@ -114,7 +115,7 @@ public:
// Make the upcall if required.
if (owner)
{
boost::asio::detail::fenced_block b;
fenced_block b(fenced_block::half);
BOOST_ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));
boost_asio_handler_invoke_helpers::invoke(handler, handler.handler_);
BOOST_ASIO_HANDLER_INVOCATION_END;

View File

@@ -83,7 +83,8 @@ public:
}
static void do_complete(io_service_impl* owner, operation* base,
boost::system::error_code /*ec*/, std::size_t /*bytes_transferred*/)
const boost::system::error_code& /*ec*/,
std::size_t /*bytes_transferred*/)
{
// Take ownership of the handler object.
reactive_socket_recvmsg_op* o(
@@ -106,7 +107,7 @@ public:
// Make the upcall if required.
if (owner)
{
boost::asio::detail::fenced_block b;
fenced_block b(fenced_block::half);
BOOST_ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));
boost_asio_handler_invoke_helpers::invoke(handler, handler.handler_);
BOOST_ASIO_HANDLER_INVOCATION_END;

View File

@@ -79,7 +79,8 @@ public:
}
static void do_complete(io_service_impl* owner, operation* base,
boost::system::error_code /*ec*/, std::size_t /*bytes_transferred*/)
const boost::system::error_code& /*ec*/,
std::size_t /*bytes_transferred*/)
{
// Take ownership of the handler object.
reactive_socket_send_op* o(static_cast<reactive_socket_send_op*>(base));
@@ -101,7 +102,7 @@ public:
// Make the upcall if required.
if (owner)
{
boost::asio::detail::fenced_block b;
fenced_block b(fenced_block::half);
BOOST_ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));
boost_asio_handler_invoke_helpers::invoke(handler, handler.handler_);
BOOST_ASIO_HANDLER_INVOCATION_END;

View File

@@ -82,7 +82,8 @@ public:
}
static void do_complete(io_service_impl* owner, operation* base,
boost::system::error_code /*ec*/, std::size_t /*bytes_transferred*/)
const boost::system::error_code& /*ec*/,
std::size_t /*bytes_transferred*/)
{
// Take ownership of the handler object.
reactive_socket_sendto_op* o(static_cast<reactive_socket_sendto_op*>(base));
@@ -104,7 +105,7 @@ public:
// Make the upcall if required.
if (owner)
{
boost::asio::detail::fenced_block b;
fenced_block b(fenced_block::half);
BOOST_ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));
boost_asio_handler_invoke_helpers::invoke(handler, handler.handler_);
BOOST_ASIO_HANDLER_INVOCATION_END;

View File

@@ -204,7 +204,7 @@ public:
boost::system::error_code& ec)
{
// Wait for socket to become ready.
socket_ops::poll_write(impl.socket_, ec);
socket_ops::poll_write(impl.socket_, impl.state_, ec);
return 0;
}
@@ -278,7 +278,7 @@ public:
boost::system::error_code& ec)
{
// Wait for socket to become ready.
socket_ops::poll_read(impl.socket_, ec);
socket_ops::poll_read(impl.socket_, impl.state_, ec);
// Reset endpoint since it can be given no sensible value at this time.
sender_endpoint = endpoint_type();

View File

@@ -188,7 +188,7 @@ public:
socket_base::message_flags, boost::system::error_code& ec)
{
// Wait for socket to become ready.
socket_ops::poll_write(impl.socket_, ec);
socket_ops::poll_write(impl.socket_, impl.state_, ec);
return 0;
}
@@ -253,7 +253,7 @@ public:
socket_base::message_flags, boost::system::error_code& ec)
{
// Wait for socket to become ready.
socket_ops::poll_read(impl.socket_, ec);
socket_ops::poll_read(impl.socket_, impl.state_, ec);
return 0;
}
@@ -327,7 +327,7 @@ public:
socket_base::message_flags& out_flags, boost::system::error_code& ec)
{
// Wait for socket to become ready.
socket_ops::poll_read(impl.socket_, ec);
socket_ops::poll_read(impl.socket_, impl.state_, ec);
// Clear out_flags, since we cannot give it any other sensible value when
// performing a null_buffers operation.

View File

@@ -53,7 +53,8 @@ public:
}
static void do_complete(io_service_impl* owner, operation* base,
boost::system::error_code /*ec*/, std::size_t /*bytes_transferred*/)
const boost::system::error_code& /*ec*/,
std::size_t /*bytes_transferred*/)
{
// Take ownership of the operation object.
resolve_endpoint_op* o(static_cast<resolve_endpoint_op*>(base));
@@ -96,7 +97,7 @@ public:
if (owner)
{
boost::asio::detail::fenced_block b;
fenced_block b(fenced_block::half);
BOOST_ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, "..."));
boost_asio_handler_invoke_helpers::invoke(handler, handler.handler_);
BOOST_ASIO_HANDLER_INVOCATION_END;

View File

@@ -61,7 +61,8 @@ public:
}
static void do_complete(io_service_impl* owner, operation* base,
boost::system::error_code /*ec*/, std::size_t /*bytes_transferred*/)
const boost::system::error_code& /*ec*/,
std::size_t /*bytes_transferred*/)
{
// Take ownership of the operation object.
resolve_op* o(static_cast<resolve_op*>(base));
@@ -106,7 +107,7 @@ public:
if (owner)
{
boost::asio::detail::fenced_block b;
fenced_block b(fenced_block::half);
BOOST_ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, "..."));
boost_asio_handler_invoke_helpers::invoke(handler, handler.handler_);
BOOST_ASIO_HANDLER_INVOCATION_END;

View File

@@ -24,6 +24,7 @@
#include <boost/limits.hpp>
#include <cstddef>
#include <boost/asio/detail/fd_set_adapter.hpp>
#include <boost/asio/detail/mutex.hpp>
#include <boost/asio/detail/op_queue.hpp>
#include <boost/asio/detail/reactor_op.hpp>
@@ -183,6 +184,9 @@ private:
// The queues of read, write and except operations.
reactor_op_queue<socket_type> op_queue_[max_ops];
// The file descriptor sets to be passed to the select system call.
fd_set_adapter fd_sets_[max_select_ops];
// The timer queues.
timer_queue_set timer_queues_;

View File

@@ -52,8 +52,10 @@ class service_registry
: private noncopyable
{
public:
// Constructor.
BOOST_ASIO_DECL service_registry(boost::asio::io_service& o);
// Constructor. Adds the initial service.
template <typename Service, typename Arg>
service_registry(boost::asio::io_service& o,
Service* initial_service, Arg arg);
// Destructor.
BOOST_ASIO_DECL ~service_registry();
@@ -61,6 +63,11 @@ public:
// Notify all services of a fork event.
BOOST_ASIO_DECL void notify_fork(boost::asio::io_service::fork_event fork_ev);
// Get the first service object cast to the specified type. Called during
// io_service construction and so performs no locking or type checking.
template <typename Service>
Service& first_service();
// Get the service object corresponding to the specified service type. Will
// create a new service object automatically if no such object already
// exists. Ownership of the service object is not transferred to the caller.
@@ -123,8 +130,8 @@ private:
const boost::asio::io_service::service::key& key,
factory_type factory);
// Add a service object. Returns false on error, in which case ownership of
// the object is retained by the caller.
// Add a service object. Throws on error, in which case ownership of the
// object is retained by the caller.
BOOST_ASIO_DECL void do_add_service(
const boost::asio::io_service::service::key& key,
boost::asio::io_service::service* new_service);

View File

@@ -40,7 +40,8 @@ public:
}
static void do_complete(io_service_impl* owner, operation* base,
boost::system::error_code /*ec*/, std::size_t /*bytes_transferred*/)
const boost::system::error_code& /*ec*/,
std::size_t /*bytes_transferred*/)
{
// Take ownership of the handler object.
signal_handler* h(static_cast<signal_handler*>(base));
@@ -62,7 +63,7 @@ public:
// Make the upcall if required.
if (owner)
{
boost::asio::detail::fenced_block b;
fenced_block b(fenced_block::half);
BOOST_ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));
boost_asio_handler_invoke_helpers::invoke(handler, handler.handler_);
BOOST_ASIO_HANDLER_INVOCATION_END;

View File

@@ -17,8 +17,8 @@
#include <boost/asio/detail/config.hpp>
#include <csignal>
#include <cstddef>
#include <signal.h>
#include <boost/asio/error.hpp>
#include <boost/asio/io_service.hpp>
#include <boost/asio/detail/handler_alloc_helpers.hpp>

View File

@@ -254,9 +254,11 @@ BOOST_ASIO_DECL int ioctl(socket_type s, state_type& state,
BOOST_ASIO_DECL int select(int nfds, fd_set* readfds, fd_set* writefds,
fd_set* exceptfds, timeval* timeout, boost::system::error_code& ec);
BOOST_ASIO_DECL int poll_read(socket_type s, boost::system::error_code& ec);
BOOST_ASIO_DECL int poll_read(socket_type s,
state_type state, boost::system::error_code& ec);
BOOST_ASIO_DECL int poll_write(socket_type s, boost::system::error_code& ec);
BOOST_ASIO_DECL int poll_write(socket_type s,
state_type state, boost::system::error_code& ec);
BOOST_ASIO_DECL int poll_connect(socket_type s, boost::system::error_code& ec);

View File

@@ -31,8 +31,16 @@ class solaris_fenced_block
: private noncopyable
{
public:
// Constructor.
solaris_fenced_block()
enum half_t { half };
enum full_t { full };
// Constructor for a half fenced block.
explicit solaris_fenced_block(half_t)
{
}
// Constructor for a full fenced block.
explicit solaris_fenced_block(full_t)
{
membar_consumer();
}

View File

@@ -95,7 +95,7 @@ private:
BOOST_ASIO_DECL void do_post(implementation_type& impl, operation* op);
BOOST_ASIO_DECL static void do_complete(io_service_impl* owner,
operation* base, boost::system::error_code ec,
operation* base, const boost::system::error_code& ec,
std::size_t bytes_transferred);
// The io_service implementation used to post completions.

View File

@@ -22,6 +22,7 @@
#include <boost/system/error_code.hpp>
#include <boost/asio/io_service.hpp>
#include <boost/asio/detail/atomic_count.hpp>
#include <boost/asio/detail/call_stack.hpp>
#include <boost/asio/detail/mutex.hpp>
#include <boost/asio/detail/op_queue.hpp>
#include <boost/asio/detail/reactor_fwd.hpp>
@@ -40,11 +41,10 @@ class task_io_service
public:
typedef task_io_service_operation operation;
// Constructor.
BOOST_ASIO_DECL task_io_service(boost::asio::io_service& io_service);
// How many concurrent threads are likely to run the io_service.
BOOST_ASIO_DECL void init(std::size_t concurrency_hint);
// Constructor. Specifies the number of concurrent threads that are likely to
// run the io_service. If set to 1 certain optimisation are performed.
BOOST_ASIO_DECL task_io_service(boost::asio::io_service& io_service,
std::size_t concurrency_hint = 0);
// Destroy all user-defined handler objects owned by the service.
BOOST_ASIO_DECL void shutdown_service();
@@ -86,6 +86,12 @@ public:
stop();
}
// Return whether a handler can be dispatched immediately.
bool can_dispatch()
{
return thread_call_stack::contains(this);
}
// Request invocation of the given handler.
template <typename Handler>
void dispatch(Handler handler);
@@ -112,11 +118,17 @@ public:
private:
// Structure containing information about an idle thread.
struct idle_thread_info;
struct thread_info;
// Run at most one operation. Blocks only if this_idle_thread is non-null.
BOOST_ASIO_DECL std::size_t do_one(mutex::scoped_lock& lock,
idle_thread_info* this_idle_thread);
BOOST_ASIO_DECL std::size_t do_run_one(mutex::scoped_lock& lock,
thread_info& this_thread, op_queue<operation>& private_op_queue,
const boost::system::error_code& ec);
// Poll for at most one operation.
BOOST_ASIO_DECL std::size_t do_poll_one(mutex::scoped_lock& lock,
op_queue<operation>& private_op_queue,
const boost::system::error_code& ec);
// Stop the task and all idle threads.
BOOST_ASIO_DECL void stop_all_threads(mutex::scoped_lock& lock);
@@ -135,8 +147,12 @@ private:
struct task_cleanup;
friend struct task_cleanup;
// Helper class to call work_finished() on block exit.
struct work_finished_on_block_exit;
// Helper class to call work-related operations on block exit.
struct work_cleanup;
friend struct work_cleanup;
// Whether to optimise for single-threaded use cases.
const bool one_thread_;
// Mutex to protect access to internal data.
mutable mutex mutex_;
@@ -165,8 +181,11 @@ private:
// Flag to indicate that the dispatcher has been shut down.
bool shutdown_;
// Per-thread call stack to track the state of each thread in the io_service.
typedef call_stack<task_io_service, thread_info> thread_call_stack;
// The threads that are currently idle.
idle_thread_info* first_idle_thread_;
thread_info* first_idle_thread_;
};
} // namespace detail

View File

@@ -31,9 +31,10 @@ namespace detail {
class task_io_service_operation BOOST_ASIO_INHERIT_TRACKED_HANDLER
{
public:
void complete(task_io_service& owner)
void complete(task_io_service& owner,
const boost::system::error_code& ec, std::size_t bytes_transferred)
{
func_(&owner, this, boost::system::error_code(), 0);
func_(&owner, this, ec, bytes_transferred);
}
void destroy()
@@ -44,11 +45,12 @@ public:
protected:
typedef void (*func_type)(task_io_service*,
task_io_service_operation*,
boost::system::error_code, std::size_t);
const boost::system::error_code&, std::size_t);
task_io_service_operation(func_type func)
: next_(0),
func_(func)
func_(func),
task_result_(0)
{
}
@@ -61,6 +63,9 @@ private:
friend class op_queue_access;
task_io_service_operation* next_;
func_type func_;
protected:
friend class task_io_service;
unsigned int task_result_; // Passed into bytes transferred.
};
} // namespace detail

View File

@@ -159,12 +159,15 @@ public:
// Dequeue all timers not later than the current time.
virtual void get_ready_timers(op_queue<operation>& ops)
{
const time_type now = Time_Traits::now();
while (!heap_.empty() && !Time_Traits::less_than(now, heap_[0].time_))
if (!heap_.empty())
{
per_timer_data* timer = heap_[0].timer_;
ops.push(timer->op_queue_);
remove_timer(*timer);
const time_type now = Time_Traits::now();
while (!heap_.empty() && !Time_Traits::less_than(now, heap_[0].time_))
{
per_timer_data* timer = heap_[0].timer_;
ops.push(timer->op_queue_);
remove_timer(*timer);
}
}
}

View File

@@ -40,7 +40,8 @@ public:
}
static void do_complete(io_service_impl* owner, operation* base,
boost::system::error_code /*ec*/, std::size_t /*bytes_transferred*/)
const boost::system::error_code& /*ec*/,
std::size_t /*bytes_transferred*/)
{
// Take ownership of the handler object.
wait_handler* h(static_cast<wait_handler*>(base));
@@ -62,7 +63,7 @@ public:
// Make the upcall if required.
if (owner)
{
boost::asio::detail::fenced_block b;
fenced_block b(fenced_block::half);
BOOST_ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_));
boost_asio_handler_invoke_helpers::invoke(handler, handler.handler_);
BOOST_ASIO_HANDLER_INVOCATION_END;

View File

@@ -19,6 +19,7 @@
#if defined(BOOST_WINDOWS) || defined(__CYGWIN__)
#include <boost/asio/detail/noncopyable.hpp>
#include <boost/asio/detail/socket_types.hpp>
#include <boost/asio/detail/push_options.hpp>
@@ -28,39 +29,67 @@ namespace asio {
namespace detail {
// Adapts the FD_SET type to meet the Descriptor_Set concept's requirements.
class win_fd_set_adapter
class win_fd_set_adapter : noncopyable
{
public:
enum { win_fd_set_size = 1024 };
enum { default_fd_set_size = 1024 };
win_fd_set_adapter()
: max_descriptor_(invalid_socket)
: capacity_(default_fd_set_size),
max_descriptor_(invalid_socket)
{
fd_set_.fd_count = 0;
fd_set_ = static_cast<win_fd_set*>(::operator new(
sizeof(win_fd_set) - sizeof(SOCKET)
+ sizeof(SOCKET) * (capacity_)));
fd_set_->fd_count = 0;
}
~win_fd_set_adapter()
{
::operator delete(fd_set_);
}
void reset()
{
fd_set_->fd_count = 0;
max_descriptor_ = invalid_socket;
}
bool set(socket_type descriptor)
{
for (u_int i = 0; i < fd_set_.fd_count; ++i)
if (fd_set_.fd_array[i] == descriptor)
for (u_int i = 0; i < fd_set_->fd_count; ++i)
if (fd_set_->fd_array[i] == descriptor)
return true;
if (fd_set_.fd_count < win_fd_set_size)
if (fd_set_->fd_count == capacity_)
{
fd_set_.fd_array[fd_set_.fd_count++] = descriptor;
return true;
u_int new_capacity = capacity_ + capacity_ / 2;
win_fd_set* new_fd_set = static_cast<win_fd_set*>(::operator new(
sizeof(win_fd_set) - sizeof(SOCKET)
+ sizeof(SOCKET) * (new_capacity)));
new_fd_set->fd_count = fd_set_->fd_count;
for (u_int i = 0; i < fd_set_->fd_count; ++i)
new_fd_set->fd_array[i] = fd_set_->fd_array[i];
::operator delete(fd_set_);
fd_set_ = new_fd_set;
capacity_ = new_capacity;
}
return false;
fd_set_->fd_array[fd_set_->fd_count++] = descriptor;
return true;
}
bool is_set(socket_type descriptor) const
{
return !!__WSAFDIsSet(descriptor,
const_cast<fd_set*>(reinterpret_cast<const fd_set*>(&fd_set_)));
const_cast<fd_set*>(reinterpret_cast<const fd_set*>(fd_set_)));
}
operator fd_set*()
{
return reinterpret_cast<fd_set*>(&fd_set_);
return reinterpret_cast<fd_set*>(fd_set_);
}
socket_type max_descriptor() const
@@ -69,15 +98,19 @@ public:
}
private:
// This structure is defined to be compatible with the Windows API fd_set
// structure, but without being dependent on the value of FD_SETSIZE.
// structure, but without being dependent on the value of FD_SETSIZE. We use
// the "struct hack" to allow the number of descriptors to be varied at
// runtime.
struct win_fd_set
{
u_int fd_count;
SOCKET fd_array[win_fd_set_size];
SOCKET fd_array[1];
};
win_fd_set fd_set_;
win_fd_set* fd_set_;
u_int capacity_;
socket_type max_descriptor_;
};

View File

@@ -31,8 +31,16 @@ class win_fenced_block
: private noncopyable
{
public:
// Constructor.
win_fenced_block()
enum half_t { half };
enum full_t { full };
// Constructor for a half fenced block.
explicit win_fenced_block(half_t)
{
}
// Constructor for a full fenced block.
explicit win_fenced_block(full_t)
{
#if defined(__BORLANDC__)
LONG barrier = 0;

View File

@@ -50,8 +50,11 @@ public:
}
static void do_complete(io_service_impl* owner, operation* base,
boost::system::error_code ec, std::size_t bytes_transferred)
const boost::system::error_code& result_ec,
std::size_t bytes_transferred)
{
boost::system::error_code ec(result_ec);
// Take ownership of the operation object.
win_iocp_handle_read_op* o(static_cast<win_iocp_handle_read_op*>(base));
ptr p = { boost::addressof(o->handler_), o, o };
@@ -85,7 +88,7 @@ public:
// Make the upcall if required.
if (owner)
{
boost::asio::detail::fenced_block b;
fenced_block b(fenced_block::half);
BOOST_ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));
boost_asio_handler_invoke_helpers::invoke(handler, handler.handler_);
BOOST_ASIO_HANDLER_INVOCATION_END;

View File

@@ -49,7 +49,7 @@ public:
}
static void do_complete(io_service_impl* owner, operation* base,
boost::system::error_code ec, std::size_t bytes_transferred)
const boost::system::error_code& ec, std::size_t bytes_transferred)
{
// Take ownership of the operation object.
win_iocp_handle_write_op* o(static_cast<win_iocp_handle_write_op*>(base));
@@ -80,7 +80,7 @@ public:
// Make the upcall if required.
if (owner)
{
boost::asio::detail::fenced_block b;
fenced_block b(fenced_block::half);
BOOST_ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));
boost_asio_handler_invoke_helpers::invoke(handler, handler.handler_);
BOOST_ASIO_HANDLER_INVOCATION_END;

View File

@@ -21,6 +21,7 @@
#include <boost/limits.hpp>
#include <boost/asio/io_service.hpp>
#include <boost/asio/detail/call_stack.hpp>
#include <boost/asio/detail/mutex.hpp>
#include <boost/asio/detail/op_queue.hpp>
#include <boost/asio/detail/scoped_ptr.hpp>
@@ -45,10 +46,11 @@ class win_iocp_io_service
: public boost::asio::detail::service_base<win_iocp_io_service>
{
public:
// Constructor.
BOOST_ASIO_DECL win_iocp_io_service(boost::asio::io_service& io_service);
BOOST_ASIO_DECL void init(size_t concurrency_hint);
// Constructor. Specifies a concurrency hint that is passed through to the
// underlying I/O completion port.
BOOST_ASIO_DECL win_iocp_io_service(boost::asio::io_service& io_service,
size_t concurrency_hint = 0);
// Destroy all user-defined handler objects owned by the service.
BOOST_ASIO_DECL void shutdown_service();
@@ -102,6 +104,12 @@ public:
stop();
}
// Return whether a handler can be dispatched immediately.
bool can_dispatch()
{
return call_stack<win_iocp_io_service>::contains(this) != 0;
}
// Request invocation of the given handler.
template <typename Handler>
void dispatch(Handler handler);

View File

@@ -56,8 +56,11 @@ public:
}
static void do_complete(io_service_impl* owner, operation* base,
boost::system::error_code ec, std::size_t bytes_transferred)
const boost::system::error_code& result_ec,
std::size_t bytes_transferred)
{
boost::system::error_code ec(result_ec);
// Take ownership of the operation object.
win_iocp_null_buffers_op* o(static_cast<win_iocp_null_buffers_op*>(base));
ptr p = { boost::addressof(o->handler_), o, o };
@@ -95,7 +98,7 @@ public:
// Make the upcall if required.
if (owner)
{
boost::asio::detail::fenced_block b;
fenced_block b(fenced_block::half);
BOOST_ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));
boost_asio_handler_invoke_helpers::invoke(handler, handler.handler_);
BOOST_ASIO_HANDLER_INVOCATION_END;

View File

@@ -38,8 +38,8 @@ class win_iocp_operation
{
public:
void complete(win_iocp_io_service& owner,
const boost::system::error_code& ec = boost::system::error_code(),
std::size_t bytes_transferred = 0)
const boost::system::error_code& ec,
std::size_t bytes_transferred)
{
func_(&owner, this, ec, bytes_transferred);
}
@@ -50,8 +50,9 @@ public:
}
protected:
typedef void (*func_type)(win_iocp_io_service*,
win_iocp_operation*, boost::system::error_code, std::size_t);
typedef void (*func_type)(
win_iocp_io_service*, win_iocp_operation*,
const boost::system::error_code&, std::size_t);
win_iocp_operation(func_type func)
: next_(0),

View File

@@ -46,7 +46,7 @@ public:
}
static void do_complete(io_service_impl* owner, operation* base,
boost::system::error_code ec, std::size_t bytes_transferred)
const boost::system::error_code& ec, std::size_t bytes_transferred)
{
// Take ownership of the operation object.
win_iocp_overlapped_op* o(static_cast<win_iocp_overlapped_op*>(base));
@@ -68,7 +68,7 @@ public:
// Make the upcall if required.
if (owner)
{
boost::asio::detail::fenced_block b;
fenced_block b(fenced_block::half);
BOOST_ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));
boost_asio_handler_invoke_helpers::invoke(handler, handler.handler_);
BOOST_ASIO_HANDLER_INVOCATION_END;

View File

@@ -73,8 +73,11 @@ public:
}
static void do_complete(io_service_impl* owner, operation* base,
boost::system::error_code ec, std::size_t /*bytes_transferred*/)
const boost::system::error_code& result_ec,
std::size_t /*bytes_transferred*/)
{
boost::system::error_code ec(result_ec);
// Take ownership of the operation object.
win_iocp_socket_accept_op* o(static_cast<win_iocp_socket_accept_op*>(base));
ptr p = { boost::addressof(o->handler_), o, o };
@@ -134,7 +137,7 @@ public:
// Make the upcall if required.
if (owner)
{
boost::asio::detail::fenced_block b;
fenced_block b(fenced_block::half);
BOOST_ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_));
boost_asio_handler_invoke_helpers::invoke(handler, handler.handler_);
BOOST_ASIO_HANDLER_INVOCATION_END;

View File

@@ -53,8 +53,11 @@ public:
}
static void do_complete(io_service_impl* owner, operation* base,
boost::system::error_code ec, std::size_t bytes_transferred)
const boost::system::error_code& result_ec,
std::size_t bytes_transferred)
{
boost::system::error_code ec(result_ec);
// Take ownership of the operation object.
win_iocp_socket_recv_op* o(static_cast<win_iocp_socket_recv_op*>(base));
ptr p = { boost::addressof(o->handler_), o, o };
@@ -89,7 +92,7 @@ public:
// Make the upcall if required.
if (owner)
{
boost::asio::detail::fenced_block b;
fenced_block b(fenced_block::half);
BOOST_ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));
boost_asio_handler_invoke_helpers::invoke(handler, handler.handler_);
BOOST_ASIO_HANDLER_INVOCATION_END;

View File

@@ -59,8 +59,11 @@ public:
}
static void do_complete(io_service_impl* owner, operation* base,
boost::system::error_code ec, std::size_t bytes_transferred)
const boost::system::error_code& result_ec,
std::size_t bytes_transferred)
{
boost::system::error_code ec(result_ec);
// Take ownership of the operation object.
win_iocp_socket_recvfrom_op* o(
static_cast<win_iocp_socket_recvfrom_op*>(base));
@@ -96,7 +99,7 @@ public:
// Make the upcall if required.
if (owner)
{
boost::asio::detail::fenced_block b;
fenced_block b(fenced_block::half);
BOOST_ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));
boost_asio_handler_invoke_helpers::invoke(handler, handler.handler_);
BOOST_ASIO_HANDLER_INVOCATION_END;

View File

@@ -55,8 +55,11 @@ public:
}
static void do_complete(io_service_impl* owner, operation* base,
boost::system::error_code ec, std::size_t bytes_transferred)
const boost::system::error_code& result_ec,
std::size_t bytes_transferred)
{
boost::system::error_code ec(result_ec);
// Take ownership of the operation object.
win_iocp_socket_recvmsg_op* o(
static_cast<win_iocp_socket_recvmsg_op*>(base));
@@ -90,7 +93,7 @@ public:
// Make the upcall if required.
if (owner)
{
boost::asio::detail::fenced_block b;
fenced_block b(fenced_block::half);
BOOST_ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));
boost_asio_handler_invoke_helpers::invoke(handler, handler.handler_);
BOOST_ASIO_HANDLER_INVOCATION_END;

View File

@@ -51,8 +51,11 @@ public:
}
static void do_complete(io_service_impl* owner, operation* base,
boost::system::error_code ec, std::size_t bytes_transferred)
const boost::system::error_code& result_ec,
std::size_t bytes_transferred)
{
boost::system::error_code ec(result_ec);
// Take ownership of the operation object.
win_iocp_socket_send_op* o(static_cast<win_iocp_socket_send_op*>(base));
ptr p = { boost::addressof(o->handler_), o, o };
@@ -84,7 +87,7 @@ public:
// Make the upcall if required.
if (owner)
{
boost::asio::detail::fenced_block b;
fenced_block b(fenced_block::half);
BOOST_ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));
boost_asio_handler_invoke_helpers::invoke(handler, handler.handler_);
BOOST_ASIO_HANDLER_INVOCATION_END;

View File

@@ -281,7 +281,7 @@ public:
boost::system::error_code& ec)
{
// Wait for socket to become ready.
socket_ops::poll_write(impl.socket_, ec);
socket_ops::poll_write(impl.socket_, impl.state_, ec);
return 0;
}
@@ -358,7 +358,7 @@ public:
socket_base::message_flags, boost::system::error_code& ec)
{
// Wait for socket to become ready.
socket_ops::poll_read(impl.socket_, ec);
socket_ops::poll_read(impl.socket_, impl.state_, ec);
// Reset endpoint since it can be given no sensible value at this time.
sender_endpoint = endpoint_type();

View File

@@ -206,7 +206,7 @@ public:
socket_base::message_flags, boost::system::error_code& ec)
{
// Wait for socket to become ready.
socket_ops::poll_write(impl.socket_, ec);
socket_ops::poll_write(impl.socket_, impl.state_, ec);
return 0;
}
@@ -273,7 +273,7 @@ public:
socket_base::message_flags, boost::system::error_code& ec)
{
// Wait for socket to become ready.
socket_ops::poll_read(impl.socket_, ec);
socket_ops::poll_read(impl.socket_, impl.state_, ec);
return 0;
}
@@ -343,7 +343,7 @@ public:
socket_base::message_flags& out_flags, boost::system::error_code& ec)
{
// Wait for socket to become ready.
socket_ops::poll_read(impl.socket_, ec);
socket_ops::poll_read(impl.socket_, impl.state_, ec);
// Clear out_flags, since we cannot give it any other sensible value when
// performing a null_buffers operation.

View File

@@ -33,6 +33,13 @@ inline Service& use_service(io_service& ios)
return ios.service_registry_->template use_service<Service>();
}
template <>
inline detail::io_service_impl& use_service<detail::io_service_impl>(
io_service& ios)
{
return ios.impl_;
}
template <typename Service>
inline void add_service(io_service& ios, Service* svc)
{
@@ -102,25 +109,25 @@ io_service::wrap(Handler handler)
}
inline io_service::work::work(boost::asio::io_service& io_service)
: io_service_(io_service)
: io_service_impl_(io_service.impl_)
{
io_service_.impl_.work_started();
io_service_impl_.work_started();
}
inline io_service::work::work(const work& other)
: io_service_(other.io_service_)
: io_service_impl_(other.io_service_impl_)
{
io_service_.impl_.work_started();
io_service_impl_.work_started();
}
inline io_service::work::~work()
{
io_service_.impl_.work_finished();
io_service_impl_.work_finished();
}
inline boost::asio::io_service& io_service::work::get_io_service()
{
return io_service_;
return io_service_impl_.get_io_service();
}
inline boost::asio::io_service& io_service::service::get_io_service()

View File

@@ -18,6 +18,7 @@
#include <boost/asio/detail/config.hpp>
#include <boost/limits.hpp>
#include <boost/asio/io_service.hpp>
#include <boost/asio/detail/scoped_ptr.hpp>
#include <boost/asio/detail/service_registry.hpp>
#include <boost/asio/detail/throw_error.hpp>
@@ -33,17 +34,18 @@ namespace boost {
namespace asio {
io_service::io_service()
: service_registry_(new boost::asio::detail::service_registry(*this)),
impl_(service_registry_->use_service<impl_type>())
: service_registry_(new boost::asio::detail::service_registry(
*this, static_cast<impl_type*>(0),
(std::numeric_limits<std::size_t>::max)())),
impl_(service_registry_->first_service<impl_type>())
{
impl_.init((std::numeric_limits<std::size_t>::max)());
}
io_service::io_service(std::size_t concurrency_hint)
: service_registry_(new boost::asio::detail::service_registry(*this)),
impl_(service_registry_->use_service<impl_type>())
: service_registry_(new boost::asio::detail::service_registry(
*this, static_cast<impl_type*>(0), concurrency_hint)),
impl_(service_registry_->first_service<impl_type>())
{
impl_.init(concurrency_hint);
}
io_service::~io_service()

View File

@@ -18,9 +18,11 @@
#include <algorithm>
#include <boost/asio/buffer.hpp>
#include <boost/asio/completion_condition.hpp>
#include <boost/asio/detail/array_fwd.hpp>
#include <boost/asio/detail/base_from_completion_cond.hpp>
#include <boost/asio/detail/bind_handler.hpp>
#include <boost/asio/detail/consuming_buffers.hpp>
#include <boost/asio/detail/dependent_type.hpp>
#include <boost/asio/detail/handler_alloc_helpers.hpp>
#include <boost/asio/detail/handler_invoke_helpers.hpp>
#include <boost/asio/detail/handler_type_requirements.hpp>
@@ -280,6 +282,168 @@ namespace detail
ReadHandler handler_;
};
template <typename AsyncReadStream, typename Elem,
typename CompletionCondition, typename ReadHandler>
class read_op<AsyncReadStream, boost::array<Elem, 2>,
CompletionCondition, ReadHandler>
: detail::base_from_completion_cond<CompletionCondition>
{
public:
read_op(AsyncReadStream& stream, const boost::array<Elem, 2>& buffers,
CompletionCondition completion_condition, ReadHandler& handler)
: detail::base_from_completion_cond<
CompletionCondition>(completion_condition),
stream_(stream),
buffers_(buffers),
total_transferred_(0),
handler_(BOOST_ASIO_MOVE_CAST(ReadHandler)(handler))
{
}
#if defined(BOOST_ASIO_HAS_MOVE)
read_op(const read_op& other)
: detail::base_from_completion_cond<CompletionCondition>(other),
stream_(other.stream_),
buffers_(other.buffers_),
total_transferred_(other.total_transferred_),
handler_(other.handler_)
{
}
read_op(read_op&& other)
: detail::base_from_completion_cond<CompletionCondition>(other),
stream_(other.stream_),
buffers_(other.buffers_),
total_transferred_(other.total_transferred_),
handler_(BOOST_ASIO_MOVE_CAST(ReadHandler)(other.handler_))
{
}
#endif // defined(BOOST_ASIO_HAS_MOVE)
void operator()(const boost::system::error_code& ec,
std::size_t bytes_transferred, int start = 0)
{
typename boost::asio::detail::dependent_type<Elem,
boost::array<boost::asio::mutable_buffer, 2> >::type bufs = {{
boost::asio::mutable_buffer(buffers_[0]),
boost::asio::mutable_buffer(buffers_[1]) }};
std::size_t buffer_size0 = boost::asio::buffer_size(bufs[0]);
std::size_t buffer_size1 = boost::asio::buffer_size(bufs[1]);
std::size_t n = 0;
switch (start)
{
case 1:
n = this->check_for_completion(ec, total_transferred_);
for (;;)
{
bufs[0] = boost::asio::buffer(bufs[0] + total_transferred_, n);
bufs[1] = boost::asio::buffer(
bufs[1] + (total_transferred_ < buffer_size0
? 0 : total_transferred_ - buffer_size0),
n - boost::asio::buffer_size(bufs[0]));
stream_.async_read_some(bufs, BOOST_ASIO_MOVE_CAST(read_op)(*this));
return; default:
total_transferred_ += bytes_transferred;
if ((!ec && bytes_transferred == 0)
|| (n = this->check_for_completion(ec, total_transferred_)) == 0
|| total_transferred_ == buffer_size0 + buffer_size1)
break;
}
handler_(ec, static_cast<const std::size_t&>(total_transferred_));
}
}
//private:
AsyncReadStream& stream_;
boost::array<Elem, 2> buffers_;
std::size_t total_transferred_;
ReadHandler handler_;
};
#if defined(BOOST_ASIO_HAS_STD_ARRAY)
template <typename AsyncReadStream, typename Elem,
typename CompletionCondition, typename ReadHandler>
class read_op<AsyncReadStream, std::array<Elem, 2>,
CompletionCondition, ReadHandler>
: detail::base_from_completion_cond<CompletionCondition>
{
public:
read_op(AsyncReadStream& stream, const std::array<Elem, 2>& buffers,
CompletionCondition completion_condition, ReadHandler& handler)
: detail::base_from_completion_cond<
CompletionCondition>(completion_condition),
stream_(stream),
buffers_(buffers),
total_transferred_(0),
handler_(BOOST_ASIO_MOVE_CAST(ReadHandler)(handler))
{
}
#if defined(BOOST_ASIO_HAS_MOVE)
read_op(const read_op& other)
: detail::base_from_completion_cond<CompletionCondition>(other),
stream_(other.stream_),
buffers_(other.buffers_),
total_transferred_(other.total_transferred_),
handler_(other.handler_)
{
}
read_op(read_op&& other)
: detail::base_from_completion_cond<CompletionCondition>(other),
stream_(other.stream_),
buffers_(other.buffers_),
total_transferred_(other.total_transferred_),
handler_(BOOST_ASIO_MOVE_CAST(ReadHandler)(other.handler_))
{
}
#endif // defined(BOOST_ASIO_HAS_MOVE)
void operator()(const boost::system::error_code& ec,
std::size_t bytes_transferred, int start = 0)
{
typename boost::asio::detail::dependent_type<Elem,
std::array<boost::asio::mutable_buffer, 2> >::type bufs = {{
boost::asio::mutable_buffer(buffers_[0]),
boost::asio::mutable_buffer(buffers_[1]) }};
std::size_t buffer_size0 = boost::asio::buffer_size(bufs[0]);
std::size_t buffer_size1 = boost::asio::buffer_size(bufs[1]);
std::size_t n = 0;
switch (start)
{
case 1:
n = this->check_for_completion(ec, total_transferred_);
for (;;)
{
bufs[0] = boost::asio::buffer(bufs[0] + total_transferred_, n);
bufs[1] = boost::asio::buffer(
bufs[1] + (total_transferred_ < buffer_size0
? 0 : total_transferred_ - buffer_size0),
n - boost::asio::buffer_size(bufs[0]));
stream_.async_read_some(bufs, BOOST_ASIO_MOVE_CAST(read_op)(*this));
return; default:
total_transferred_ += bytes_transferred;
if ((!ec && bytes_transferred == 0)
|| (n = this->check_for_completion(ec, total_transferred_)) == 0
|| total_transferred_ == buffer_size0 + buffer_size1)
break;
}
handler_(ec, static_cast<const std::size_t&>(total_transferred_));
}
}
//private:
AsyncReadStream& stream_;
std::array<Elem, 2> buffers_;
std::size_t total_transferred_;
ReadHandler handler_;
};
#endif // defined(BOOST_ASIO_HAS_STD_ARRAY)
template <typename AsyncReadStream, typename MutableBufferSequence,
typename CompletionCondition, typename ReadHandler>
inline void* asio_handler_allocate(std::size_t size,

View File

@@ -18,9 +18,11 @@
#include <algorithm>
#include <boost/asio/buffer.hpp>
#include <boost/asio/completion_condition.hpp>
#include <boost/asio/detail/array_fwd.hpp>
#include <boost/asio/detail/base_from_completion_cond.hpp>
#include <boost/asio/detail/bind_handler.hpp>
#include <boost/asio/detail/consuming_buffers.hpp>
#include <boost/asio/detail/dependent_type.hpp>
#include <boost/asio/detail/handler_alloc_helpers.hpp>
#include <boost/asio/detail/handler_invoke_helpers.hpp>
#include <boost/asio/detail/handler_type_requirements.hpp>
@@ -300,6 +302,180 @@ namespace detail
ReadHandler handler_;
};
template <typename AsyncRandomAccessReadDevice, typename Elem,
typename CompletionCondition, typename ReadHandler>
class read_at_op<AsyncRandomAccessReadDevice, boost::array<Elem, 2>,
CompletionCondition, ReadHandler>
: detail::base_from_completion_cond<CompletionCondition>
{
public:
read_at_op(AsyncRandomAccessReadDevice& device,
boost::uint64_t offset, const boost::array<Elem, 2>& buffers,
CompletionCondition completion_condition, ReadHandler& handler)
: detail::base_from_completion_cond<
CompletionCondition>(completion_condition),
device_(device),
offset_(offset),
buffers_(buffers),
total_transferred_(0),
handler_(BOOST_ASIO_MOVE_CAST(ReadHandler)(handler))
{
}
#if defined(BOOST_ASIO_HAS_MOVE)
read_at_op(const read_at_op& other)
: detail::base_from_completion_cond<CompletionCondition>(other),
device_(other.device_),
offset_(other.offset_),
buffers_(other.buffers_),
total_transferred_(other.total_transferred_),
handler_(other.handler_)
{
}
read_at_op(read_at_op&& other)
: detail::base_from_completion_cond<CompletionCondition>(other),
device_(other.device_),
offset_(other.offset_),
buffers_(other.buffers_),
total_transferred_(other.total_transferred_),
handler_(BOOST_ASIO_MOVE_CAST(ReadHandler)(other.handler_))
{
}
#endif // defined(BOOST_ASIO_HAS_MOVE)
void operator()(const boost::system::error_code& ec,
std::size_t bytes_transferred, int start = 0)
{
typename boost::asio::detail::dependent_type<Elem,
boost::array<boost::asio::mutable_buffer, 2> >::type bufs = {{
boost::asio::mutable_buffer(buffers_[0]),
boost::asio::mutable_buffer(buffers_[1]) }};
std::size_t buffer_size0 = boost::asio::buffer_size(bufs[0]);
std::size_t buffer_size1 = boost::asio::buffer_size(bufs[1]);
std::size_t n = 0;
switch (start)
{
case 1:
n = this->check_for_completion(ec, total_transferred_);
for (;;)
{
bufs[0] = boost::asio::buffer(bufs[0] + total_transferred_, n);
bufs[1] = boost::asio::buffer(
bufs[1] + (total_transferred_ < buffer_size0
? 0 : total_transferred_ - buffer_size0),
n - boost::asio::buffer_size(bufs[0]));
device_.async_read_some_at(offset_ + total_transferred_,
bufs, BOOST_ASIO_MOVE_CAST(read_at_op)(*this));
return; default:
total_transferred_ += bytes_transferred;
if ((!ec && bytes_transferred == 0)
|| (n = this->check_for_completion(ec, total_transferred_)) == 0
|| total_transferred_ == buffer_size0 + buffer_size1)
break;
}
handler_(ec, static_cast<const std::size_t&>(total_transferred_));
}
}
//private:
AsyncRandomAccessReadDevice& device_;
boost::uint64_t offset_;
boost::array<Elem, 2> buffers_;
std::size_t total_transferred_;
ReadHandler handler_;
};
#if defined(BOOST_ASIO_HAS_STD_ARRAY)
template <typename AsyncRandomAccessReadDevice, typename Elem,
typename CompletionCondition, typename ReadHandler>
class read_at_op<AsyncRandomAccessReadDevice, std::array<Elem, 2>,
CompletionCondition, ReadHandler>
: detail::base_from_completion_cond<CompletionCondition>
{
public:
read_at_op(AsyncRandomAccessReadDevice& device,
boost::uint64_t offset, const std::array<Elem, 2>& buffers,
CompletionCondition completion_condition, ReadHandler& handler)
: detail::base_from_completion_cond<
CompletionCondition>(completion_condition),
device_(device),
offset_(offset),
buffers_(buffers),
total_transferred_(0),
handler_(BOOST_ASIO_MOVE_CAST(ReadHandler)(handler))
{
}
#if defined(BOOST_ASIO_HAS_MOVE)
read_at_op(const read_at_op& other)
: detail::base_from_completion_cond<CompletionCondition>(other),
device_(other.device_),
offset_(other.offset_),
buffers_(other.buffers_),
total_transferred_(other.total_transferred_),
handler_(other.handler_)
{
}
read_at_op(read_at_op&& other)
: detail::base_from_completion_cond<CompletionCondition>(other),
device_(other.device_),
offset_(other.offset_),
buffers_(other.buffers_),
total_transferred_(other.total_transferred_),
handler_(BOOST_ASIO_MOVE_CAST(ReadHandler)(other.handler_))
{
}
#endif // defined(BOOST_ASIO_HAS_MOVE)
void operator()(const boost::system::error_code& ec,
std::size_t bytes_transferred, int start = 0)
{
typename boost::asio::detail::dependent_type<Elem,
std::array<boost::asio::mutable_buffer, 2> >::type bufs = {{
boost::asio::mutable_buffer(buffers_[0]),
boost::asio::mutable_buffer(buffers_[1]) }};
std::size_t buffer_size0 = boost::asio::buffer_size(bufs[0]);
std::size_t buffer_size1 = boost::asio::buffer_size(bufs[1]);
std::size_t n = 0;
switch (start)
{
case 1:
n = this->check_for_completion(ec, total_transferred_);
for (;;)
{
bufs[0] = boost::asio::buffer(bufs[0] + total_transferred_, n);
bufs[1] = boost::asio::buffer(
bufs[1] + (total_transferred_ < buffer_size0
? 0 : total_transferred_ - buffer_size0),
n - boost::asio::buffer_size(bufs[0]));
device_.async_read_some_at(offset_ + total_transferred_,
bufs, BOOST_ASIO_MOVE_CAST(read_at_op)(*this));
return; default:
total_transferred_ += bytes_transferred;
if ((!ec && bytes_transferred == 0)
|| (n = this->check_for_completion(ec, total_transferred_)) == 0
|| total_transferred_ == buffer_size0 + buffer_size1)
break;
}
handler_(ec, static_cast<const std::size_t&>(total_transferred_));
}
}
//private:
AsyncRandomAccessReadDevice& device_;
boost::uint64_t offset_;
std::array<Elem, 2> buffers_;
std::size_t total_transferred_;
ReadHandler handler_;
};
#endif // defined(BOOST_ASIO_HAS_STD_ARRAY)
template <typename AsyncRandomAccessReadDevice,
typename MutableBufferSequence, typename CompletionCondition,
typename ReadHandler>

View File

@@ -861,7 +861,7 @@ namespace detail
inline read_until_expr_op<AsyncReadStream, Allocator, RegEx, ReadHandler>
make_read_until_expr_op(AsyncReadStream& s,
boost::asio::basic_streambuf<Allocator>& b,
const boost::regex& expr, ReadHandler handler)
const RegEx& expr, ReadHandler handler)
{
return read_until_expr_op<AsyncReadStream, Allocator, RegEx, ReadHandler>(
s, b, expr, handler);

View File

@@ -17,9 +17,11 @@
#include <boost/asio/buffer.hpp>
#include <boost/asio/completion_condition.hpp>
#include <boost/asio/detail/array_fwd.hpp>
#include <boost/asio/detail/base_from_completion_cond.hpp>
#include <boost/asio/detail/bind_handler.hpp>
#include <boost/asio/detail/consuming_buffers.hpp>
#include <boost/asio/detail/dependent_type.hpp>
#include <boost/asio/detail/handler_alloc_helpers.hpp>
#include <boost/asio/detail/handler_invoke_helpers.hpp>
#include <boost/asio/detail/handler_type_requirements.hpp>
@@ -339,6 +341,168 @@ namespace detail
WriteHandler handler_;
};
template <typename AsyncWriteStream, typename Elem,
typename CompletionCondition, typename WriteHandler>
class write_op<AsyncWriteStream, boost::array<Elem, 2>,
CompletionCondition, WriteHandler>
: detail::base_from_completion_cond<CompletionCondition>
{
public:
write_op(AsyncWriteStream& stream, const boost::array<Elem, 2>& buffers,
CompletionCondition completion_condition, WriteHandler& handler)
: detail::base_from_completion_cond<
CompletionCondition>(completion_condition),
stream_(stream),
buffers_(buffers),
total_transferred_(0),
handler_(BOOST_ASIO_MOVE_CAST(WriteHandler)(handler))
{
}
#if defined(BOOST_ASIO_HAS_MOVE)
write_op(const write_op& other)
: detail::base_from_completion_cond<CompletionCondition>(other),
stream_(other.stream_),
buffers_(other.buffers_),
total_transferred_(other.total_transferred_),
handler_(other.handler_)
{
}
write_op(write_op&& other)
: detail::base_from_completion_cond<CompletionCondition>(other),
stream_(other.stream_),
buffers_(other.buffers_),
total_transferred_(other.total_transferred_),
handler_(BOOST_ASIO_MOVE_CAST(WriteHandler)(other.handler_))
{
}
#endif // defined(BOOST_ASIO_HAS_MOVE)
void operator()(const boost::system::error_code& ec,
std::size_t bytes_transferred, int start = 0)
{
typename boost::asio::detail::dependent_type<Elem,
boost::array<boost::asio::const_buffer, 2> >::type bufs = {{
boost::asio::const_buffer(buffers_[0]),
boost::asio::const_buffer(buffers_[1]) }};
std::size_t buffer_size0 = boost::asio::buffer_size(bufs[0]);
std::size_t buffer_size1 = boost::asio::buffer_size(bufs[1]);
std::size_t n = 0;
switch (start)
{
case 1:
n = this->check_for_completion(ec, total_transferred_);
for (;;)
{
bufs[0] = boost::asio::buffer(bufs[0] + total_transferred_, n);
bufs[1] = boost::asio::buffer(
bufs[1] + (total_transferred_ < buffer_size0
? 0 : total_transferred_ - buffer_size0),
n - boost::asio::buffer_size(bufs[0]));
stream_.async_write_some(bufs, BOOST_ASIO_MOVE_CAST(write_op)(*this));
return; default:
total_transferred_ += bytes_transferred;
if ((!ec && bytes_transferred == 0)
|| (n = this->check_for_completion(ec, total_transferred_)) == 0
|| total_transferred_ == buffer_size0 + buffer_size1)
break;
}
handler_(ec, static_cast<const std::size_t&>(total_transferred_));
}
}
//private:
AsyncWriteStream& stream_;
boost::array<Elem, 2> buffers_;
std::size_t total_transferred_;
WriteHandler handler_;
};
#if defined(BOOST_ASIO_HAS_STD_ARRAY)
template <typename AsyncWriteStream, typename Elem,
typename CompletionCondition, typename WriteHandler>
class write_op<AsyncWriteStream, std::array<Elem, 2>,
CompletionCondition, WriteHandler>
: detail::base_from_completion_cond<CompletionCondition>
{
public:
write_op(AsyncWriteStream& stream, const std::array<Elem, 2>& buffers,
CompletionCondition completion_condition, WriteHandler& handler)
: detail::base_from_completion_cond<
CompletionCondition>(completion_condition),
stream_(stream),
buffers_(buffers),
total_transferred_(0),
handler_(BOOST_ASIO_MOVE_CAST(WriteHandler)(handler))
{
}
#if defined(BOOST_ASIO_HAS_MOVE)
write_op(const write_op& other)
: detail::base_from_completion_cond<CompletionCondition>(other),
stream_(other.stream_),
buffers_(other.buffers_),
total_transferred_(other.total_transferred_),
handler_(other.handler_)
{
}
write_op(write_op&& other)
: detail::base_from_completion_cond<CompletionCondition>(other),
stream_(other.stream_),
buffers_(other.buffers_),
total_transferred_(other.total_transferred_),
handler_(BOOST_ASIO_MOVE_CAST(WriteHandler)(other.handler_))
{
}
#endif // defined(BOOST_ASIO_HAS_MOVE)
void operator()(const boost::system::error_code& ec,
std::size_t bytes_transferred, int start = 0)
{
typename boost::asio::detail::dependent_type<Elem,
std::array<boost::asio::const_buffer, 2> >::type bufs = {{
boost::asio::const_buffer(buffers_[0]),
boost::asio::const_buffer(buffers_[1]) }};
std::size_t buffer_size0 = boost::asio::buffer_size(bufs[0]);
std::size_t buffer_size1 = boost::asio::buffer_size(bufs[1]);
std::size_t n = 0;
switch (start)
{
case 1:
n = this->check_for_completion(ec, total_transferred_);
for (;;)
{
bufs[0] = boost::asio::buffer(bufs[0] + total_transferred_, n);
bufs[1] = boost::asio::buffer(
bufs[1] + (total_transferred_ < buffer_size0
? 0 : total_transferred_ - buffer_size0),
n - boost::asio::buffer_size(bufs[0]));
stream_.async_write_some(bufs, BOOST_ASIO_MOVE_CAST(write_op)(*this));
return; default:
total_transferred_ += bytes_transferred;
if ((!ec && bytes_transferred == 0)
|| (n = this->check_for_completion(ec, total_transferred_)) == 0
|| total_transferred_ == buffer_size0 + buffer_size1)
break;
}
handler_(ec, static_cast<const std::size_t&>(total_transferred_));
}
}
//private:
AsyncWriteStream& stream_;
std::array<Elem, 2> buffers_;
std::size_t total_transferred_;
WriteHandler handler_;
};
#endif // defined(BOOST_ASIO_HAS_STD_ARRAY)
template <typename AsyncWriteStream, typename ConstBufferSequence,
typename CompletionCondition, typename WriteHandler>
inline void* asio_handler_allocate(std::size_t size,

View File

@@ -17,9 +17,11 @@
#include <boost/asio/buffer.hpp>
#include <boost/asio/completion_condition.hpp>
#include <boost/asio/detail/array_fwd.hpp>
#include <boost/asio/detail/base_from_completion_cond.hpp>
#include <boost/asio/detail/bind_handler.hpp>
#include <boost/asio/detail/consuming_buffers.hpp>
#include <boost/asio/detail/dependent_type.hpp>
#include <boost/asio/detail/handler_alloc_helpers.hpp>
#include <boost/asio/detail/handler_invoke_helpers.hpp>
#include <boost/asio/detail/handler_type_requirements.hpp>
@@ -362,6 +364,180 @@ namespace detail
WriteHandler handler_;
};
template <typename AsyncRandomAccessWriteDevice, typename Elem,
typename CompletionCondition, typename WriteHandler>
class write_at_op<AsyncRandomAccessWriteDevice, boost::array<Elem, 2>,
CompletionCondition, WriteHandler>
: detail::base_from_completion_cond<CompletionCondition>
{
public:
write_at_op(AsyncRandomAccessWriteDevice& device,
boost::uint64_t offset, const boost::array<Elem, 2>& buffers,
CompletionCondition completion_condition, WriteHandler& handler)
: detail::base_from_completion_cond<
CompletionCondition>(completion_condition),
device_(device),
offset_(offset),
buffers_(buffers),
total_transferred_(0),
handler_(BOOST_ASIO_MOVE_CAST(WriteHandler)(handler))
{
}
#if defined(BOOST_ASIO_HAS_MOVE)
write_at_op(const write_at_op& other)
: detail::base_from_completion_cond<CompletionCondition>(other),
device_(other.device_),
offset_(other.offset_),
buffers_(other.buffers_),
total_transferred_(other.total_transferred_),
handler_(other.handler_)
{
}
write_at_op(write_at_op&& other)
: detail::base_from_completion_cond<CompletionCondition>(other),
device_(other.device_),
offset_(other.offset_),
buffers_(other.buffers_),
total_transferred_(other.total_transferred_),
handler_(BOOST_ASIO_MOVE_CAST(WriteHandler)(other.handler_))
{
}
#endif // defined(BOOST_ASIO_HAS_MOVE)
void operator()(const boost::system::error_code& ec,
std::size_t bytes_transferred, int start = 0)
{
typename boost::asio::detail::dependent_type<Elem,
boost::array<boost::asio::const_buffer, 2> >::type bufs = {{
boost::asio::const_buffer(buffers_[0]),
boost::asio::const_buffer(buffers_[1]) }};
std::size_t buffer_size0 = boost::asio::buffer_size(bufs[0]);
std::size_t buffer_size1 = boost::asio::buffer_size(bufs[1]);
std::size_t n = 0;
switch (start)
{
case 1:
n = this->check_for_completion(ec, total_transferred_);
for (;;)
{
bufs[0] = boost::asio::buffer(bufs[0] + total_transferred_, n);
bufs[1] = boost::asio::buffer(
bufs[1] + (total_transferred_ < buffer_size0
? 0 : total_transferred_ - buffer_size0),
n - boost::asio::buffer_size(bufs[0]));
device_.async_write_some_at(offset_ + total_transferred_,
bufs, BOOST_ASIO_MOVE_CAST(write_at_op)(*this));
return; default:
total_transferred_ += bytes_transferred;
if ((!ec && bytes_transferred == 0)
|| (n = this->check_for_completion(ec, total_transferred_)) == 0
|| total_transferred_ == buffer_size0 + buffer_size1)
break;
}
handler_(ec, static_cast<const std::size_t&>(total_transferred_));
}
}
//private:
AsyncRandomAccessWriteDevice& device_;
boost::uint64_t offset_;
boost::array<Elem, 2> buffers_;
std::size_t total_transferred_;
WriteHandler handler_;
};
#if defined(BOOST_ASIO_HAS_STD_ARRAY)
template <typename AsyncRandomAccessWriteDevice, typename Elem,
typename CompletionCondition, typename WriteHandler>
class write_at_op<AsyncRandomAccessWriteDevice, std::array<Elem, 2>,
CompletionCondition, WriteHandler>
: detail::base_from_completion_cond<CompletionCondition>
{
public:
write_at_op(AsyncRandomAccessWriteDevice& device,
boost::uint64_t offset, const std::array<Elem, 2>& buffers,
CompletionCondition completion_condition, WriteHandler& handler)
: detail::base_from_completion_cond<
CompletionCondition>(completion_condition),
device_(device),
offset_(offset),
buffers_(buffers),
total_transferred_(0),
handler_(BOOST_ASIO_MOVE_CAST(WriteHandler)(handler))
{
}
#if defined(BOOST_ASIO_HAS_MOVE)
write_at_op(const write_at_op& other)
: detail::base_from_completion_cond<CompletionCondition>(other),
device_(other.device_),
offset_(other.offset_),
buffers_(other.buffers_),
total_transferred_(other.total_transferred_),
handler_(other.handler_)
{
}
write_at_op(write_at_op&& other)
: detail::base_from_completion_cond<CompletionCondition>(other),
device_(other.device_),
offset_(other.offset_),
buffers_(other.buffers_),
total_transferred_(other.total_transferred_),
handler_(BOOST_ASIO_MOVE_CAST(WriteHandler)(other.handler_))
{
}
#endif // defined(BOOST_ASIO_HAS_MOVE)
void operator()(const boost::system::error_code& ec,
std::size_t bytes_transferred, int start = 0)
{
typename boost::asio::detail::dependent_type<Elem,
std::array<boost::asio::const_buffer, 2> >::type bufs = {{
boost::asio::const_buffer(buffers_[0]),
boost::asio::const_buffer(buffers_[1]) }};
std::size_t buffer_size0 = boost::asio::buffer_size(bufs[0]);
std::size_t buffer_size1 = boost::asio::buffer_size(bufs[1]);
std::size_t n = 0;
switch (start)
{
case 1:
n = this->check_for_completion(ec, total_transferred_);
for (;;)
{
bufs[0] = boost::asio::buffer(bufs[0] + total_transferred_, n);
bufs[1] = boost::asio::buffer(
bufs[1] + (total_transferred_ < buffer_size0
? 0 : total_transferred_ - buffer_size0),
n - boost::asio::buffer_size(bufs[0]));
device_.async_write_some_at(offset_ + total_transferred_,
bufs, BOOST_ASIO_MOVE_CAST(write_at_op)(*this));
return; default:
total_transferred_ += bytes_transferred;
if ((!ec && bytes_transferred == 0)
|| (n = this->check_for_completion(ec, total_transferred_)) == 0
|| total_transferred_ == buffer_size0 + buffer_size1)
break;
}
handler_(ec, static_cast<const std::size_t&>(total_transferred_));
}
}
//private:
AsyncRandomAccessWriteDevice& device_;
boost::uint64_t offset_;
std::array<Elem, 2> buffers_;
std::size_t total_transferred_;
WriteHandler handler_;
};
#endif // defined(BOOST_ASIO_HAS_STD_ARRAY)
template <typename AsyncRandomAccessWriteDevice, typename ConstBufferSequence,
typename CompletionCondition, typename WriteHandler>
inline void* asio_handler_allocate(std::size_t size,

View File

@@ -658,8 +658,8 @@ private:
// Prevent assignment.
void operator=(const work& other);
// The io_service.
boost::asio::io_service& io_service_;
// The io_service implementation.
detail::io_service_impl& io_service_impl_;
};
/// Class used to uniquely identify a service.

View File

@@ -195,9 +195,13 @@ const boost::system::error_code& engine::map_error_code(
return ec;
// Otherwise, the peer should have negotiated a proper shutdown.
ec = boost::system::error_code(
ERR_PACK(ERR_LIB_SSL, 0, SSL_R_SHORT_READ),
boost::asio::error::get_ssl_category());
if ((::SSL_get_shutdown(ssl_) & SSL_RECEIVED_SHUTDOWN) == 0)
{
ec = boost::system::error_code(
ERR_PACK(ERR_LIB_SSL, 0, SSL_R_SHORT_READ),
boost::asio::error::get_ssl_category());
}
return ec;
}

View File

@@ -38,7 +38,7 @@ public:
{
::SSL_library_init();
::SSL_load_error_strings();
::OpenSSL_add_ssl_algorithms();
::OpenSSL_add_all_algorithms();
mutexes_.resize(::CRYPTO_num_locks());
for (size_t i = 0; i < mutexes_.size(); ++i)

View File

@@ -34,13 +34,17 @@ namespace detail {
struct stream_core
{
// According to the OpenSSL documentation, this is the buffer size that is is
// sufficient to hold the largest possible TLS record.
enum { max_tls_record_size = 17 * 1024 };
stream_core(SSL_CTX* context, boost::asio::io_service& io_service)
: engine_(context),
pending_read_(io_service),
pending_write_(io_service),
output_buffer_space_(16384),
output_buffer_space_(max_tls_record_size),
output_buffer_(boost::asio::buffer(output_buffer_space_)),
input_buffer_space_(16384),
input_buffer_space_(max_tls_record_size),
input_buffer_(boost::asio::buffer(input_buffer_space_))
{
pending_read_.expires_at(boost::posix_time::neg_infin);

View File

@@ -19,6 +19,7 @@
// Test that header file is self-contained.
#include <boost/asio/ip/tcp.hpp>
#include <boost/array.hpp>
#include <boost/bind.hpp>
#include <cstring>
#include <boost/asio/io_service.hpp>
@@ -156,6 +157,12 @@ void test()
io_service ios;
char mutable_char_buffer[128] = "";
const char const_char_buffer[128] = "";
boost::array<boost::asio::mutable_buffer, 2> mutable_buffers = {{
boost::asio::buffer(mutable_char_buffer, 10),
boost::asio::buffer(mutable_char_buffer + 10, 10) }};
boost::array<boost::asio::const_buffer, 2> const_buffers = {{
boost::asio::buffer(const_char_buffer, 10),
boost::asio::buffer(const_char_buffer + 10, 10) }};
socket_base::message_flags in_flags = 0;
archetypes::settable_socket_option<void> settable_socket_option1;
archetypes::settable_socket_option<int> settable_socket_option2;
@@ -293,50 +300,75 @@ void test()
socket1.send(buffer(mutable_char_buffer));
socket1.send(buffer(const_char_buffer));
socket1.send(mutable_buffers);
socket1.send(const_buffers);
socket1.send(null_buffers());
socket1.send(buffer(mutable_char_buffer), in_flags);
socket1.send(buffer(const_char_buffer), in_flags);
socket1.send(mutable_buffers, in_flags);
socket1.send(const_buffers, in_flags);
socket1.send(null_buffers(), in_flags);
socket1.send(buffer(mutable_char_buffer), in_flags, ec);
socket1.send(buffer(const_char_buffer), in_flags, ec);
socket1.send(mutable_buffers, in_flags, ec);
socket1.send(const_buffers, in_flags, ec);
socket1.send(null_buffers(), in_flags, ec);
socket1.async_send(buffer(mutable_char_buffer), &send_handler);
socket1.async_send(buffer(const_char_buffer), &send_handler);
socket1.async_send(mutable_buffers, &send_handler);
socket1.async_send(const_buffers, &send_handler);
socket1.async_send(null_buffers(), &send_handler);
socket1.async_send(buffer(mutable_char_buffer), in_flags, &send_handler);
socket1.async_send(buffer(const_char_buffer), in_flags, &send_handler);
socket1.async_send(mutable_buffers, in_flags, &send_handler);
socket1.async_send(const_buffers, in_flags, &send_handler);
socket1.async_send(null_buffers(), in_flags, &send_handler);
socket1.receive(buffer(mutable_char_buffer));
socket1.receive(mutable_buffers);
socket1.receive(null_buffers());
socket1.receive(buffer(mutable_char_buffer), in_flags);
socket1.receive(mutable_buffers, in_flags);
socket1.receive(null_buffers(), in_flags);
socket1.receive(buffer(mutable_char_buffer), in_flags, ec);
socket1.receive(mutable_buffers, in_flags, ec);
socket1.receive(null_buffers(), in_flags, ec);
socket1.async_receive(buffer(mutable_char_buffer), &receive_handler);
socket1.async_receive(mutable_buffers, &receive_handler);
socket1.async_receive(null_buffers(), &receive_handler);
socket1.async_receive(buffer(mutable_char_buffer), in_flags,
&receive_handler);
socket1.async_receive(mutable_buffers, in_flags, &receive_handler);
socket1.async_receive(null_buffers(), in_flags, &receive_handler);
socket1.write_some(buffer(mutable_char_buffer));
socket1.write_some(buffer(const_char_buffer));
socket1.write_some(mutable_buffers);
socket1.write_some(const_buffers);
socket1.write_some(null_buffers());
socket1.write_some(buffer(mutable_char_buffer), ec);
socket1.write_some(buffer(const_char_buffer), ec);
socket1.write_some(mutable_buffers, ec);
socket1.write_some(const_buffers, ec);
socket1.write_some(null_buffers(), ec);
socket1.async_write_some(buffer(mutable_char_buffer), &write_some_handler);
socket1.async_write_some(buffer(const_char_buffer), &write_some_handler);
socket1.async_write_some(mutable_buffers, &write_some_handler);
socket1.async_write_some(const_buffers, &write_some_handler);
socket1.async_write_some(null_buffers(), &write_some_handler);
socket1.read_some(buffer(mutable_char_buffer));
socket1.read_some(mutable_buffers);
socket1.read_some(null_buffers());
socket1.read_some(buffer(mutable_char_buffer), ec);
socket1.read_some(mutable_buffers, ec);
socket1.read_some(null_buffers(), ec);
socket1.async_read_some(buffer(mutable_char_buffer), &read_some_handler);
socket1.async_read_some(mutable_buffers, &read_some_handler);
socket1.async_read_some(null_buffers(), &read_some_handler);
}
catch (std::exception&)

44
test/latency/Jamfile.v2 Normal file
View File

@@ -0,0 +1,44 @@
#
# Copyright (c) 2003-2011 Christopher M. Kohlhoff (chris at kohlhoff dot com)
#
# Distributed under the Boost Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#
import os ;
if [ os.name ] = SOLARIS
{
lib socket ;
lib nsl ;
}
else if [ os.name ] = NT
{
lib ws2_32 ;
lib mswsock ;
}
else if [ os.name ] = HPUX
{
lib ipv6 ;
}
project
: requirements
<library>/boost/system//boost_system
<library>/boost/thread//boost_thread
<define>BOOST_ALL_NO_LIB=1
<threading>multi
<os>SOLARIS:<library>socket
<os>SOLARIS:<library>nsl
<os>NT:<define>_WIN32_WINNT=0x0501
<os>NT,<toolset>gcc:<library>ws2_32
<os>NT,<toolset>gcc:<library>mswsock
<os>NT,<toolset>gcc-cygwin:<define>__USE_W32_SOCKETS
<os>HPUX,<toolset>gcc:<define>_XOPEN_SOURCE_EXTENDED
<os>HPUX:<library>ipv6
;
exe tcp_server : tcp_server.cpp ;
exe tcp_client : tcp_client.cpp ;
exe udp_server : udp_server.cpp ;
exe udp_client : udp_client.cpp ;

View File

@@ -0,0 +1,52 @@
//
// allocator.hpp
// ~~~~~~~~~~~~~
//
// Copyright (c) 2003-2011 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ALLOCATOR_HPP
#define ALLOCATOR_HPP
#include <boost/aligned_storage.hpp>
// Represents a single connection from a client.
class allocator
{
public:
allocator()
: in_use_(false)
{
}
void* allocate(std::size_t n)
{
if (in_use_ || n >= 1024)
return ::operator new(n);
in_use_ = true;
return static_cast<void*>(&space_);
}
void deallocate(void* p)
{
if (p != static_cast<void*>(&space_))
::operator delete(p);
else
in_use_ = false;
}
private:
allocator(const allocator&);
allocator& operator=(const allocator&);
// Whether the reusable memory space is currently in use.
bool in_use_;
// The reusable memory space made available by the allocator.
boost::aligned_storage<1024>::type space_;
};
#endif // ALLOCATOR_HPP

View File

@@ -0,0 +1,87 @@
//
// coroutine.hpp
// ~~~~~~~~~~~~~
//
// Copyright (c) 2003-2011 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef COROUTINE_HPP
#define COROUTINE_HPP
class coroutine
{
public:
coroutine() : value_(0) {}
bool is_child() const { return value_ < 0; }
bool is_parent() const { return !is_child(); }
bool is_complete() const { return value_ == -1; }
private:
friend class coroutine_ref;
int value_;
};
class coroutine_ref
{
public:
coroutine_ref(coroutine& c) : value_(c.value_), modified_(false) {}
coroutine_ref(coroutine* c) : value_(c->value_), modified_(false) {}
~coroutine_ref() { if (!modified_) value_ = -1; }
operator int() const { return value_; }
int& operator=(int v) { modified_ = true; return value_ = v; }
private:
void operator=(const coroutine_ref&);
int& value_;
bool modified_;
};
#define CORO_REENTER(c) \
switch (coroutine_ref _coro_value = c) \
case -1: if (_coro_value) \
{ \
goto terminate_coroutine; \
terminate_coroutine: \
_coro_value = -1; \
goto bail_out_of_coroutine; \
bail_out_of_coroutine: \
break; \
} \
else case 0:
#define CORO_YIELD_IMPL(n) \
for (_coro_value = (n);;) \
if (_coro_value == 0) \
{ \
case (n): ; \
break; \
} \
else \
switch (_coro_value ? 0 : 1) \
for (;;) \
case -1: if (_coro_value) \
goto terminate_coroutine; \
else for (;;) \
case 1: if (_coro_value) \
goto bail_out_of_coroutine; \
else case 0:
#define CORO_FORK_IMPL(n) \
for (_coro_value = -(n);; _coro_value = (n)) \
if (_coro_value == (n)) \
{ \
case -(n): ; \
break; \
} \
else
#if defined(_MSC_VER)
# define CORO_YIELD CORO_YIELD_IMPL(__COUNTER__ + 1)
# define CORO_FORK CORO_FORK_IMPL(__COUNTER__ + 1)
#else // defined(_MSC_VER)
# define CORO_YIELD CORO_YIELD_IMPL(__LINE__)
# define CORO_FORK CORO_FORK_IMPL(__LINE__)
#endif // defined(_MSC_VER)
#endif // COROUTINE_HPP

View File

@@ -0,0 +1,53 @@
//
// high_res_clock.hpp
// ~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2011 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef HIGH_RES_CLOCK_HPP
#define HIGH_RES_CLOCK_HPP
#include <boost/config.hpp>
#include <boost/cstdint.hpp>
#if defined(BOOST_WINDOWS)
inline boost::uint64_t high_res_clock()
{
LARGE_INTEGER i;
QueryPerformanceCounter(&i);
return i.QuadPart;
}
#elif defined(__GNUC__) && defined(__x86_64__)
inline boost::uint64_t high_res_clock()
{
unsigned long low, high;
__asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high));
return (((boost::uint64_t)high) << 32) | low;
}
#else
#include <boost/date_time/posix_time/posix_time_types.hpp>
inline boost::uint64_t high_res_clock()
{
boost::posix_time::ptime now =
boost::posix_time::microsec_clock::universal_time();
boost::posix_time::ptime epoch(
boost::gregorian::date(1970, 1, 1),
boost::posix_time::seconds(0));
return (now - epoch).total_microseconds();
}
#endif
#endif // HIGH_RES_CLOCK_HPP

125
test/latency/tcp_client.cpp Normal file
View File

@@ -0,0 +1,125 @@
//
// tcp_client.cpp
// ~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2011 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#include <boost/asio/ip/tcp.hpp>
#include <boost/asio/read.hpp>
#include <boost/asio/write.hpp>
#include <boost/date_time/posix_time/posix_time_types.hpp>
#include <boost/shared_ptr.hpp>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <vector>
#include "high_res_clock.hpp"
using boost::asio::ip::tcp;
using boost::posix_time::ptime;
using boost::posix_time::microsec_clock;
const int num_samples = 100000;
struct transfer_all
{
typedef std::size_t result_type;
std::size_t operator()(const boost::system::error_code& ec, std::size_t)
{
return (ec && ec != boost::asio::error::would_block) ? 0 : ~0;
}
};
int main(int argc, char* argv[])
{
if (argc != 6)
{
std::fprintf(stderr,
"Usage: tcp_client <ip> <port> "
"<nconns> <bufsize> {spin|block}\n");
return 1;
}
const char* ip = argv[1];
unsigned short port = static_cast<unsigned short>(std::atoi(argv[2]));
int num_connections = std::atoi(argv[3]);
std::size_t buf_size = static_cast<std::size_t>(std::atoi(argv[4]));
bool spin = (std::strcmp(argv[5], "spin") == 0);
boost::asio::io_service io_service;
std::vector<boost::shared_ptr<tcp::socket> > sockets;
for (int i = 0; i < num_connections; ++i)
{
boost::shared_ptr<tcp::socket> s(new tcp::socket(io_service));
tcp::endpoint target(boost::asio::ip::address::from_string(ip), port);
s->connect(target);
s->set_option(tcp::no_delay(true));
if (spin)
{
tcp::socket::non_blocking_io nbio(true);
s->io_control(nbio);
}
sockets.push_back(s);
}
std::vector<unsigned char> write_buf(buf_size);
std::vector<unsigned char> read_buf(buf_size);
ptime start = microsec_clock::universal_time();
boost::uint64_t start_hr = high_res_clock();
boost::uint64_t samples[num_samples];
for (int i = 0; i < num_samples; ++i)
{
tcp::socket& socket = *sockets[i % num_connections];
boost::uint64_t t = high_res_clock();
boost::system::error_code ec;
boost::asio::write(socket,
boost::asio::buffer(write_buf),
transfer_all(), ec);
boost::asio::read(socket,
boost::asio::buffer(read_buf),
transfer_all(), ec);
samples[i] = high_res_clock() - t;
}
ptime stop = microsec_clock::universal_time();
boost::uint64_t stop_hr = high_res_clock();
boost::uint64_t elapsed_usec = (stop - start).total_microseconds();
boost::uint64_t elapsed_hr = stop_hr - start_hr;
double scale = 1.0 * elapsed_usec / elapsed_hr;
std::sort(samples, samples + num_samples);
std::printf(" 0.0%%\t%f\n", samples[0] * scale);
std::printf(" 0.1%%\t%f\n", samples[num_samples / 1000 - 1] * scale);
std::printf(" 1.0%%\t%f\n", samples[num_samples / 100 - 1] * scale);
std::printf(" 10.0%%\t%f\n", samples[num_samples / 10 - 1] * scale);
std::printf(" 20.0%%\t%f\n", samples[num_samples * 2 / 10 - 1] * scale);
std::printf(" 30.0%%\t%f\n", samples[num_samples * 3 / 10 - 1] * scale);
std::printf(" 40.0%%\t%f\n", samples[num_samples * 4 / 10 - 1] * scale);
std::printf(" 50.0%%\t%f\n", samples[num_samples * 5 / 10 - 1] * scale);
std::printf(" 60.0%%\t%f\n", samples[num_samples * 6 / 10 - 1] * scale);
std::printf(" 70.0%%\t%f\n", samples[num_samples * 7 / 10 - 1] * scale);
std::printf(" 80.0%%\t%f\n", samples[num_samples * 8 / 10 - 1] * scale);
std::printf(" 90.0%%\t%f\n", samples[num_samples * 9 / 10 - 1] * scale);
std::printf(" 99.0%%\t%f\n", samples[num_samples * 99 / 100 - 1] * scale);
std::printf(" 99.9%%\t%f\n", samples[num_samples * 999 / 1000 - 1] * scale);
std::printf("100.0%%\t%f\n", samples[num_samples - 1] * scale);
double total = 0.0;
for (int i = 0; i < num_samples; ++i) total += samples[i] * scale;
std::printf(" mean\t%f\n", total / num_samples);
}

114
test/latency/tcp_server.cpp Normal file
View File

@@ -0,0 +1,114 @@
//
// tcp_server.cpp
// ~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2011 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#include <boost/asio/io_service.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <boost/asio/read.hpp>
#include <boost/asio/write.hpp>
#include <boost/shared_ptr.hpp>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <vector>
using boost::asio::ip::tcp;
#include "yield.hpp"
class tcp_server : coroutine
{
public:
tcp_server(tcp::acceptor& acceptor, std::size_t buf_size) :
acceptor_(acceptor),
socket_(acceptor_.get_io_service()),
buffer_(buf_size)
{
}
void operator()(boost::system::error_code ec, std::size_t n = 0)
{
reenter (this) for (;;)
{
yield acceptor_.async_accept(socket_, ref(this));
while (!ec)
{
yield boost::asio::async_read(socket_,
boost::asio::buffer(buffer_), ref(this));
if (!ec)
{
for (std::size_t i = 0; i < n; ++i) buffer_[i] = ~buffer_[i];
yield boost::asio::async_write(socket_,
boost::asio::buffer(buffer_), ref(this));
}
}
socket_.close();
}
}
struct ref
{
explicit ref(tcp_server* p)
: p_(p)
{
}
void operator()(boost::system::error_code ec, std::size_t n = 0)
{
(*p_)(ec, n);
}
private:
tcp_server* p_;
};
private:
tcp::acceptor& acceptor_;
tcp::socket socket_;
std::vector<unsigned char> buffer_;
tcp::endpoint sender_;
};
#include "unyield.hpp"
int main(int argc, char* argv[])
{
if (argc != 5)
{
std::fprintf(stderr,
"Usage: tcp_server <port> <nconns> "
"<bufsize> {spin|block}\n");
return 1;
}
unsigned short port = static_cast<unsigned short>(std::atoi(argv[1]));
int max_connections = std::atoi(argv[2]);
std::size_t buf_size = std::atoi(argv[3]);
bool spin = (std::strcmp(argv[4], "spin") == 0);
boost::asio::io_service io_service(1);
tcp::acceptor acceptor(io_service, tcp::endpoint(tcp::v4(), port));
std::vector<boost::shared_ptr<tcp_server> > servers;
for (int i = 0; i < max_connections; ++i)
{
boost::shared_ptr<tcp_server> s(new tcp_server(acceptor, buf_size));
servers.push_back(s);
(*s)(boost::system::error_code());
}
if (spin)
for (;;) io_service.poll();
else
io_service.run();
}

105
test/latency/udp_client.cpp Normal file
View File

@@ -0,0 +1,105 @@
//
// udp_client.cpp
// ~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2011 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#include <boost/asio/ip/udp.hpp>
#include <boost/date_time/posix_time/posix_time_types.hpp>
#include <algorithm>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <vector>
#include "high_res_clock.hpp"
using boost::asio::ip::udp;
using boost::posix_time::ptime;
using boost::posix_time::microsec_clock;
const int num_samples = 100000;
int main(int argc, char* argv[])
{
if (argc != 6)
{
std::fprintf(stderr,
"Usage: udp_client <ip> <port1> "
"<nports> <bufsize> {spin|block}\n");
return 1;
}
const char* ip = argv[1];
unsigned short first_port = static_cast<unsigned short>(std::atoi(argv[2]));
unsigned short num_ports = static_cast<unsigned short>(std::atoi(argv[3]));
std::size_t buf_size = static_cast<std::size_t>(std::atoi(argv[4]));
bool spin = (std::strcmp(argv[5], "spin") == 0);
boost::asio::io_service io_service;
udp::socket socket(io_service, udp::endpoint(udp::v4(), 0));
if (spin)
{
udp::socket::non_blocking_io nbio(true);
socket.io_control(nbio);
}
udp::endpoint target(boost::asio::ip::address::from_string(ip), first_port);
unsigned short last_port = first_port + num_ports - 1;
std::vector<unsigned char> write_buf(buf_size);
std::vector<unsigned char> read_buf(buf_size);
ptime start = microsec_clock::universal_time();
boost::uint64_t start_hr = high_res_clock();
boost::uint64_t samples[num_samples];
for (int i = 0; i < num_samples; ++i)
{
boost::uint64_t t = high_res_clock();
boost::system::error_code ec;
socket.send_to(boost::asio::buffer(write_buf), target, 0, ec);
do socket.receive(boost::asio::buffer(read_buf), 0, ec);
while (ec == boost::asio::error::would_block);
samples[i] = high_res_clock() - t;
if (target.port() == last_port)
target.port(first_port);
else
target.port(target.port() + 1);
}
ptime stop = microsec_clock::universal_time();
boost::uint64_t stop_hr = high_res_clock();
boost::uint64_t elapsed_usec = (stop - start).total_microseconds();
boost::uint64_t elapsed_hr = stop_hr - start_hr;
double scale = 1.0 * elapsed_usec / elapsed_hr;
std::sort(samples, samples + num_samples);
std::printf(" 0.0%%\t%f\n", samples[0] * scale);
std::printf(" 0.1%%\t%f\n", samples[num_samples / 1000 - 1] * scale);
std::printf(" 1.0%%\t%f\n", samples[num_samples / 100 - 1] * scale);
std::printf(" 10.0%%\t%f\n", samples[num_samples / 10 - 1] * scale);
std::printf(" 20.0%%\t%f\n", samples[num_samples * 2 / 10 - 1] * scale);
std::printf(" 30.0%%\t%f\n", samples[num_samples * 3 / 10 - 1] * scale);
std::printf(" 40.0%%\t%f\n", samples[num_samples * 4 / 10 - 1] * scale);
std::printf(" 50.0%%\t%f\n", samples[num_samples * 5 / 10 - 1] * scale);
std::printf(" 60.0%%\t%f\n", samples[num_samples * 6 / 10 - 1] * scale);
std::printf(" 70.0%%\t%f\n", samples[num_samples * 7 / 10 - 1] * scale);
std::printf(" 80.0%%\t%f\n", samples[num_samples * 8 / 10 - 1] * scale);
std::printf(" 90.0%%\t%f\n", samples[num_samples * 9 / 10 - 1] * scale);
std::printf(" 99.0%%\t%f\n", samples[num_samples * 99 / 100 - 1] * scale);
std::printf(" 99.9%%\t%f\n", samples[num_samples * 999 / 1000 - 1] * scale);
std::printf("100.0%%\t%f\n", samples[num_samples - 1] * scale);
double total = 0.0;
for (int i = 0; i < num_samples; ++i) total += samples[i] * scale;
std::printf(" mean\t%f\n", total / num_samples);
}

125
test/latency/udp_server.cpp Normal file
View File

@@ -0,0 +1,125 @@
//
// udp_server.cpp
// ~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2011 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#include <boost/asio/io_service.hpp>
#include <boost/asio/ip/udp.hpp>
#include <boost/shared_ptr.hpp>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <vector>
#include "allocator.hpp"
using boost::asio::ip::udp;
#include "yield.hpp"
class udp_server : coroutine
{
public:
udp_server(boost::asio::io_service& io_service,
unsigned short port, std::size_t buf_size) :
socket_(io_service, udp::endpoint(udp::v4(), port)),
buffer_(buf_size)
{
}
void operator()(boost::system::error_code ec, std::size_t n = 0)
{
reenter (this) for (;;)
{
yield socket_.async_receive_from(
boost::asio::buffer(buffer_),
sender_, ref(this));
if (!ec)
{
for (std::size_t i = 0; i < n; ++i) buffer_[i] = ~buffer_[i];
socket_.send_to(boost::asio::buffer(buffer_, n), sender_, 0, ec);
}
}
}
friend void* asio_handler_allocate(std::size_t n, udp_server* s)
{
return s->allocator_.allocate(n);
}
friend void asio_handler_deallocate(void* p, std::size_t, udp_server* s)
{
s->allocator_.deallocate(p);
}
struct ref
{
explicit ref(udp_server* p)
: p_(p)
{
}
void operator()(boost::system::error_code ec, std::size_t n = 0)
{
(*p_)(ec, n);
}
private:
udp_server* p_;
friend void* asio_handler_allocate(std::size_t n, ref* r)
{
return asio_handler_allocate(n, r->p_);
}
friend void asio_handler_deallocate(void* p, std::size_t n, ref* r)
{
asio_handler_deallocate(p, n, r->p_);
}
};
private:
udp::socket socket_;
std::vector<unsigned char> buffer_;
udp::endpoint sender_;
allocator allocator_;
};
#include "unyield.hpp"
int main(int argc, char* argv[])
{
if (argc != 5)
{
std::fprintf(stderr,
"Usage: udp_server <port1> <nports> "
"<bufsize> {spin|block}\n");
return 1;
}
unsigned short first_port = static_cast<unsigned short>(std::atoi(argv[1]));
unsigned short num_ports = static_cast<unsigned short>(std::atoi(argv[2]));
std::size_t buf_size = std::atoi(argv[3]);
bool spin = (std::strcmp(argv[4], "spin") == 0);
boost::asio::io_service io_service(1);
std::vector<boost::shared_ptr<udp_server> > servers;
for (unsigned short i = 0; i < num_ports; ++i)
{
unsigned short port = first_port + i;
boost::shared_ptr<udp_server> s(new udp_server(io_service, port, buf_size));
servers.push_back(s);
(*s)(boost::system::error_code());
}
if (spin)
for (;;) io_service.poll();
else
io_service.run();
}

21
test/latency/unyield.hpp Normal file
View File

@@ -0,0 +1,21 @@
//
// unyield.hpp
// ~~~~~~~~~~~
//
// Copyright (c) 2003-2011 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifdef reenter
# undef reenter
#endif
#ifdef yield
# undef yield
#endif
#ifdef fork
# undef fork
#endif

Some files were not shown because too many files have changed in this diff Show More