diff --git a/doc/quickref.xml b/doc/quickref.xml
index 28dcab18..ed6548aa 100644
--- a/doc/quickref.xml
+++ b/doc/quickref.xml
@@ -63,6 +63,9 @@
async_write
async_write_at
buffer
+ buffer_cast
+ buffer_copy
+ buffer_size
buffers_begin
buffers_end
has_service
@@ -71,6 +74,7 @@
read_until
transfer_all
transfer_at_least
+ transfer_exactly
use_service
write
write_at
@@ -82,6 +86,7 @@
placeholders::bytes_transferred
placeholders::error
placeholders::iterator
+ placeholders::signal_number
Error Codes
@@ -163,6 +168,8 @@
Free Functions
+ async_connect
+ connect
ip::host_name
@@ -171,8 +178,9 @@
basic_datagram_socket
basic_deadline_timer
- basic_socket
basic_raw_socket
+ basic_seq_packet_socket
+ basic_socket
basic_socket_acceptor
basic_socket_iostream
basic_socket_streambuf
@@ -188,6 +196,7 @@
datagram_socket_service
ip::resolver_service
raw_socket_service
+ seq_packet_socket_service
socket_acceptor_service
stream_socket_service
@@ -225,6 +234,7 @@
Type Requirements
AcceptHandler
+ ComposedConnectHandler
ConnectHandler
DatagramSocketService
Endpoint
@@ -235,6 +245,7 @@
RawSocketService
ResolveHandler
ResolverService
+ SeqPacketSocketService
SettableSocketOption
SocketAcceptorService
SocketService
@@ -257,9 +268,12 @@
SSL
-
+
Serial Ports
+
+ Signal Handling
+
@@ -290,17 +304,18 @@
ssl::context
ssl::context_base
+ ssl::rfc2818_verification
ssl::stream_base
+ ssl::verify_context
Class Templates
- ssl::basic_context
ssl::stream
- Services
+ Type Requirements
- ssl::context_service
- ssl::stream_service
+ HandshakeHandler
+ ShutdownHandler
@@ -317,8 +332,6 @@
serial_port_service
-
-
Serial Port Options
serial_port_base::baud_rate
@@ -334,6 +347,25 @@
SettableSerialPortOption
+
+ Classes
+
+ signal_set
+
+ Class Templates
+
+ basic_signal_set
+
+ Services
+
+ signal_set_service
+
+ Type Requirements
+
+ SignalSetService
+ SignalHandler
+
+
diff --git a/example/http/server/request_parser.cpp b/example/http/server/request_parser.cpp
index 0a48350b..fbb5b137 100644
--- a/example/http/server/request_parser.cpp
+++ b/example/http/server/request_parser.cpp
@@ -54,17 +54,6 @@ boost::tribool request_parser::consume(request& req, char input)
req.method.push_back(input);
return boost::indeterminate;
}
- case uri_start:
- if (is_ctl(input))
- {
- return false;
- }
- else
- {
- state_ = uri;
- req.uri.push_back(input);
- return boost::indeterminate;
- }
case uri:
if (input == ' ')
{
diff --git a/example/http/server/request_parser.hpp b/example/http/server/request_parser.hpp
index 3731da7f..ff4d74f0 100644
--- a/example/http/server/request_parser.hpp
+++ b/example/http/server/request_parser.hpp
@@ -68,7 +68,6 @@ private:
{
method_start,
method,
- uri_start,
uri,
http_version_h,
http_version_t_1,
diff --git a/example/http/server2/request_parser.cpp b/example/http/server2/request_parser.cpp
index 499850fa..2417c5ef 100644
--- a/example/http/server2/request_parser.cpp
+++ b/example/http/server2/request_parser.cpp
@@ -54,17 +54,6 @@ boost::tribool request_parser::consume(request& req, char input)
req.method.push_back(input);
return boost::indeterminate;
}
- case uri_start:
- if (is_ctl(input))
- {
- return false;
- }
- else
- {
- state_ = uri;
- req.uri.push_back(input);
- return boost::indeterminate;
- }
case uri:
if (input == ' ')
{
diff --git a/example/http/server2/request_parser.hpp b/example/http/server2/request_parser.hpp
index 31a93731..ca1bfb56 100644
--- a/example/http/server2/request_parser.hpp
+++ b/example/http/server2/request_parser.hpp
@@ -68,7 +68,6 @@ private:
{
method_start,
method,
- uri_start,
uri,
http_version_h,
http_version_t_1,
diff --git a/example/http/server3/request_parser.cpp b/example/http/server3/request_parser.cpp
index 30eb63a7..40ffd2b4 100644
--- a/example/http/server3/request_parser.cpp
+++ b/example/http/server3/request_parser.cpp
@@ -54,17 +54,6 @@ boost::tribool request_parser::consume(request& req, char input)
req.method.push_back(input);
return boost::indeterminate;
}
- case uri_start:
- if (is_ctl(input))
- {
- return false;
- }
- else
- {
- state_ = uri;
- req.uri.push_back(input);
- return boost::indeterminate;
- }
case uri:
if (input == ' ')
{
diff --git a/example/http/server3/request_parser.hpp b/example/http/server3/request_parser.hpp
index d8016067..f921ccd4 100644
--- a/example/http/server3/request_parser.hpp
+++ b/example/http/server3/request_parser.hpp
@@ -68,7 +68,6 @@ private:
{
method_start,
method,
- uri_start,
uri,
http_version_h,
http_version_t_1,
diff --git a/include/boost/asio/detail/atomic_count.hpp b/include/boost/asio/detail/atomic_count.hpp
index 943e6904..f8d9b690 100644
--- a/include/boost/asio/detail/atomic_count.hpp
+++ b/include/boost/asio/detail/atomic_count.hpp
@@ -17,7 +17,9 @@
#include
-#if defined(BOOST_ASIO_HAS_STD_ATOMIC)
+#if !defined(BOOST_HAS_THREADS) || defined(BOOST_ASIO_DISABLE_THREADS)
+// Nothing to include.
+#elif defined(BOOST_ASIO_HAS_STD_ATOMIC)
# include
#else // defined(BOOST_ASIO_HAS_STD_ATOMIC)
# include
@@ -27,7 +29,9 @@ namespace boost {
namespace asio {
namespace detail {
-#if defined(BOOST_ASIO_HAS_STD_ATOMIC)
+#if !defined(BOOST_HAS_THREADS) || defined(BOOST_ASIO_DISABLE_THREADS)
+typedef long atomic_count;
+#elif defined(BOOST_ASIO_HAS_STD_ATOMIC)
typedef std::atomic atomic_count;
#else // defined(BOOST_ASIO_HAS_STD_ATOMIC)
typedef boost::detail::atomic_count atomic_count;
diff --git a/include/boost/asio/detail/buffer_sequence_adapter.hpp b/include/boost/asio/detail/buffer_sequence_adapter.hpp
index 061daf10..9af763e8 100644
--- a/include/boost/asio/detail/buffer_sequence_adapter.hpp
+++ b/include/boost/asio/detail/buffer_sequence_adapter.hpp
@@ -17,6 +17,7 @@
#include
#include
+#include
#include
#include
@@ -247,6 +248,112 @@ private:
std::size_t total_buffer_size_;
};
+template
+class buffer_sequence_adapter >
+ : buffer_sequence_adapter_base
+{
+public:
+ explicit buffer_sequence_adapter(
+ const boost::array& buffer_sequence)
+ {
+ init_native_buffer(buffers_[0], Buffer(buffer_sequence[0]));
+ init_native_buffer(buffers_[1], Buffer(buffer_sequence[1]));
+ total_buffer_size_ = boost::asio::buffer_size(buffer_sequence[0])
+ + boost::asio::buffer_size(buffer_sequence[1]);
+ }
+
+ native_buffer_type* buffers()
+ {
+ return buffers_;
+ }
+
+ std::size_t count() const
+ {
+ return 2;
+ }
+
+ bool all_empty() const
+ {
+ return total_buffer_size_ == 0;
+ }
+
+ static bool all_empty(const boost::array& buffer_sequence)
+ {
+ return boost::asio::buffer_size(buffer_sequence[0]) == 0
+ && boost::asio::buffer_size(buffer_sequence[1]) == 0;
+ }
+
+ static void validate(const boost::array& buffer_sequence)
+ {
+ boost::asio::buffer_cast(buffer_sequence[0]);
+ boost::asio::buffer_cast(buffer_sequence[1]);
+ }
+
+ static Buffer first(const boost::array& buffer_sequence)
+ {
+ return Buffer(buffer_sequence[0]);
+ }
+
+private:
+ native_buffer_type buffers_[2];
+ std::size_t total_buffer_size_;
+};
+
+#if defined(BOOST_ASIO_HAS_STD_ARRAY)
+
+template
+class buffer_sequence_adapter >
+ : buffer_sequence_adapter_base
+{
+public:
+ explicit buffer_sequence_adapter(
+ const std::array& buffer_sequence)
+ {
+ init_native_buffer(buffers_[0], Buffer(buffer_sequence[0]));
+ init_native_buffer(buffers_[1], Buffer(buffer_sequence[1]));
+ total_buffer_size_ = boost::asio::buffer_size(buffer_sequence[0])
+ + boost::asio::buffer_size(buffer_sequence[1]);
+ }
+
+ native_buffer_type* buffers()
+ {
+ return buffers_;
+ }
+
+ std::size_t count() const
+ {
+ return 2;
+ }
+
+ bool all_empty() const
+ {
+ return total_buffer_size_ == 0;
+ }
+
+ static bool all_empty(const std::array& buffer_sequence)
+ {
+ return boost::asio::buffer_size(buffer_sequence[0]) == 0
+ && boost::asio::buffer_size(buffer_sequence[1]) == 0;
+ }
+
+ static void validate(const std::array& buffer_sequence)
+ {
+ boost::asio::buffer_cast(buffer_sequence[0]);
+ boost::asio::buffer_cast(buffer_sequence[1]);
+ }
+
+ static Buffer first(const std::array& buffer_sequence)
+ {
+ return Buffer(buffer_sequence[0]);
+ }
+
+private:
+ native_buffer_type buffers_[2];
+ std::size_t total_buffer_size_;
+};
+
+#endif // defined(BOOST_ASIO_HAS_STD_ARRAY)
+
} // namespace detail
} // namespace asio
} // namespace boost
diff --git a/include/boost/asio/detail/call_stack.hpp b/include/boost/asio/detail/call_stack.hpp
index d5f9099a..45935c52 100644
--- a/include/boost/asio/detail/call_stack.hpp
+++ b/include/boost/asio/detail/call_stack.hpp
@@ -27,34 +27,60 @@ namespace detail {
// Helper class to determine whether or not the current thread is inside an
// invocation of io_service::run() for a specified io_service object.
-template
+template
class call_stack
{
public:
- // Context class automatically pushes an owner on to the stack.
+ // Context class automatically pushes the key/value pair on to the stack.
class context
: private noncopyable
{
public:
- // Push the owner on to the stack.
- explicit context(Owner* d)
- : owner_(d),
- next_(call_stack::top_)
+ // Push the key on to the stack.
+ explicit context(Key* k)
+ : key_(k),
+ next_(call_stack::top_)
{
- call_stack::top_ = this;
+ value_ = reinterpret_cast(this);
+ call_stack::top_ = this;
}
- // Pop the owner from the stack.
+ // Push the key/value pair on to the stack.
+ context(Key* k, Value& v)
+ : key_(k),
+ value_(&v),
+ next_(call_stack::top_)
+ {
+ call_stack::top_ = this;
+ }
+
+ // Pop the key/value pair from the stack.
~context()
{
- call_stack::top_ = next_;
+ call_stack::top_ = next_;
+ }
+
+ // Find the next context with the same key.
+ Value* next_by_key() const
+ {
+ context* elem = next_;
+ while (elem)
+ {
+ if (elem->key_ == key_)
+ return elem->value_;
+ elem = elem->next_;
+ }
+ return 0;
}
private:
- friend class call_stack;
+ friend class call_stack;
- // The owner associated with the context.
- Owner* owner_;
+ // The key associated with the context.
+ Key* key_;
+
+ // The value associated with the context.
+ Value* value_;
// The next element in the stack.
context* next_;
@@ -62,17 +88,18 @@ public:
friend class context;
- // Determine whether the specified owner is on the stack.
- static bool contains(Owner* d)
+ // Determine whether the specified owner is on the stack. Returns address of
+ // key if present, 0 otherwise.
+ static Value* contains(Key* k)
{
context* elem = top_;
while (elem)
{
- if (elem->owner_ == d)
- return true;
+ if (elem->key_ == k)
+ return elem->value_;
elem = elem->next_;
}
- return false;
+ return 0;
}
private:
@@ -80,9 +107,9 @@ private:
static tss_ptr top_;
};
-template
-tss_ptr::context>
-call_stack::top_;
+template
+tss_ptr::context>
+call_stack::top_;
} // namespace detail
} // namespace asio
diff --git a/include/boost/asio/detail/completion_handler.hpp b/include/boost/asio/detail/completion_handler.hpp
index 144ad7f0..74200b6b 100644
--- a/include/boost/asio/detail/completion_handler.hpp
+++ b/include/boost/asio/detail/completion_handler.hpp
@@ -40,7 +40,8 @@ public:
}
static void do_complete(io_service_impl* owner, operation* base,
- boost::system::error_code /*ec*/, std::size_t /*bytes_transferred*/)
+ const boost::system::error_code& /*ec*/,
+ std::size_t /*bytes_transferred*/)
{
// Take ownership of the handler object.
completion_handler* h(static_cast(base));
@@ -61,7 +62,7 @@ public:
// Make the upcall if required.
if (owner)
{
- boost::asio::detail::fenced_block b;
+ fenced_block b(fenced_block::half);
BOOST_ASIO_HANDLER_INVOCATION_BEGIN(());
boost_asio_handler_invoke_helpers::invoke(handler, handler);
BOOST_ASIO_HANDLER_INVOCATION_END;
diff --git a/include/boost/asio/detail/dependent_type.hpp b/include/boost/asio/detail/dependent_type.hpp
new file mode 100644
index 00000000..4606d3a6
--- /dev/null
+++ b/include/boost/asio/detail/dependent_type.hpp
@@ -0,0 +1,38 @@
+//
+// detail/dependent_type.hpp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2011 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef BOOST_ASIO_DETAIL_DEPENDENT_TYPE_HPP
+#define BOOST_ASIO_DETAIL_DEPENDENT_TYPE_HPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include
+
+#include
+
+namespace boost {
+namespace asio {
+namespace detail {
+
+template
+struct dependent_type
+{
+ typedef T type;
+};
+
+} // namespace detail
+} // namespace asio
+} // namespace boost
+
+#include
+
+#endif // BOOST_ASIO_DETAIL_DEPENDENT_TYPE_HPP
diff --git a/include/boost/asio/detail/descriptor_ops.hpp b/include/boost/asio/detail/descriptor_ops.hpp
index 1eccf3a7..11d4fb1f 100644
--- a/include/boost/asio/detail/descriptor_ops.hpp
+++ b/include/boost/asio/detail/descriptor_ops.hpp
@@ -93,9 +93,11 @@ BOOST_ASIO_DECL int fcntl(int d, long cmd, boost::system::error_code& ec);
BOOST_ASIO_DECL int fcntl(int d, long cmd,
long arg, boost::system::error_code& ec);
-BOOST_ASIO_DECL int poll_read(int d, boost::system::error_code& ec);
+BOOST_ASIO_DECL int poll_read(int d,
+ state_type state, boost::system::error_code& ec);
-BOOST_ASIO_DECL int poll_write(int d, boost::system::error_code& ec);
+BOOST_ASIO_DECL int poll_write(int d,
+ state_type state, boost::system::error_code& ec);
} // namespace descriptor_ops
} // namespace detail
diff --git a/include/boost/asio/detail/descriptor_read_op.hpp b/include/boost/asio/detail/descriptor_read_op.hpp
index 94861e08..d97d3faf 100644
--- a/include/boost/asio/detail/descriptor_read_op.hpp
+++ b/include/boost/asio/detail/descriptor_read_op.hpp
@@ -76,7 +76,8 @@ public:
}
static void do_complete(io_service_impl* owner, operation* base,
- boost::system::error_code /*ec*/, std::size_t /*bytes_transferred*/)
+ const boost::system::error_code& /*ec*/,
+ std::size_t /*bytes_transferred*/)
{
// Take ownership of the handler object.
descriptor_read_op* o(static_cast(base));
@@ -98,7 +99,7 @@ public:
// Make the upcall if required.
if (owner)
{
- boost::asio::detail::fenced_block b;
+ fenced_block b(fenced_block::half);
BOOST_ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));
boost_asio_handler_invoke_helpers::invoke(handler, handler.handler_);
BOOST_ASIO_HANDLER_INVOCATION_END;
diff --git a/include/boost/asio/detail/descriptor_write_op.hpp b/include/boost/asio/detail/descriptor_write_op.hpp
index 60efb18c..fec9a0c2 100644
--- a/include/boost/asio/detail/descriptor_write_op.hpp
+++ b/include/boost/asio/detail/descriptor_write_op.hpp
@@ -76,7 +76,8 @@ public:
}
static void do_complete(io_service_impl* owner, operation* base,
- boost::system::error_code /*ec*/, std::size_t /*bytes_transferred*/)
+ const boost::system::error_code& /*ec*/,
+ std::size_t /*bytes_transferred*/)
{
// Take ownership of the handler object.
descriptor_write_op* o(static_cast(base));
@@ -98,7 +99,7 @@ public:
// Make the upcall if required.
if (owner)
{
- boost::asio::detail::fenced_block b;
+ fenced_block b(fenced_block::half);
BOOST_ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));
boost_asio_handler_invoke_helpers::invoke(handler, handler.handler_);
BOOST_ASIO_HANDLER_INVOCATION_END;
diff --git a/include/boost/asio/detail/epoll_reactor.hpp b/include/boost/asio/detail/epoll_reactor.hpp
index 0c3dcecc..cae14961 100644
--- a/include/boost/asio/detail/epoll_reactor.hpp
+++ b/include/boost/asio/detail/epoll_reactor.hpp
@@ -21,6 +21,7 @@
#include
#include
+#include
#include
#include
#include
@@ -47,16 +48,28 @@ public:
connect_op = 1, except_op = 2, max_ops = 3 };
// Per-descriptor queues.
- class descriptor_state
+ class descriptor_state : operation
{
friend class epoll_reactor;
friend class object_pool_access;
+
+ descriptor_state* next_;
+ descriptor_state* prev_;
+
+ bool op_queue_is_empty_[max_ops];
+
mutex mutex_;
+ epoll_reactor* reactor_;
int descriptor_;
op_queue op_queue_[max_ops];
bool shutdown_;
- descriptor_state* next_;
- descriptor_state* prev_;
+
+ BOOST_ASIO_DECL descriptor_state();
+ void set_ready_events(uint32_t events) { task_result_ = events; }
+ BOOST_ASIO_DECL operation* perform_io(uint32_t events);
+ BOOST_ASIO_DECL static void do_complete(
+ io_service_impl* owner, operation* base,
+ const boost::system::error_code& ec, std::size_t bytes_transferred);
};
// Per-descriptor data.
@@ -160,6 +173,12 @@ private:
// Create the timerfd file descriptor. Does not throw.
BOOST_ASIO_DECL static int do_timerfd_create();
+ // Allocate a new descriptor state object.
+ BOOST_ASIO_DECL descriptor_state* allocate_descriptor_state();
+
+ // Free an existing descriptor state object.
+ BOOST_ASIO_DECL void free_descriptor_state(descriptor_state* s);
+
// Helper function to add a new timer queue.
BOOST_ASIO_DECL void do_add_timer_queue(timer_queue_base& queue);
@@ -206,6 +225,10 @@ private:
// Keep track of all registered descriptors.
object_pool registered_descriptors_;
+
+ // Helper class to do post-perform_io cleanup.
+ struct perform_io_cleanup_on_block_exit;
+ friend struct perform_io_cleanup_on_block_exit;
};
} // namespace detail
diff --git a/include/boost/asio/detail/fenced_block.hpp b/include/boost/asio/detail/fenced_block.hpp
index 4cb27dc1..15187606 100644
--- a/include/boost/asio/detail/fenced_block.hpp
+++ b/include/boost/asio/detail/fenced_block.hpp
@@ -29,13 +29,13 @@
# include
#elif defined(__GNUC__) && (defined(__hppa) || defined(__hppa__))
# include
+#elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
+# include
#elif defined(__GNUC__) \
&& ((__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4)) \
&& !defined(__INTEL_COMPILER) && !defined(__ICL) \
&& !defined(__ICC) && !defined(__ECC) && !defined(__PATHSCALE__)
# include
-#elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
-# include
#elif defined(BOOST_WINDOWS) && !defined(UNDER_CE)
# include
#else
@@ -58,13 +58,13 @@ typedef solaris_fenced_block fenced_block;
typedef gcc_arm_fenced_block fenced_block;
#elif defined(__GNUC__) && (defined(__hppa) || defined(__hppa__))
typedef gcc_hppa_fenced_block fenced_block;
+#elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
+typedef gcc_x86_fenced_block fenced_block;
#elif defined(__GNUC__) \
&& ((__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4)) \
&& !defined(__INTEL_COMPILER) && !defined(__ICL) \
&& !defined(__ICC) && !defined(__ECC) && !defined(__PATHSCALE__)
typedef gcc_sync_fenced_block fenced_block;
-#elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
-typedef gcc_x86_fenced_block fenced_block;
#elif defined(BOOST_WINDOWS) && !defined(UNDER_CE)
typedef win_fenced_block fenced_block;
#else
diff --git a/include/boost/asio/detail/gcc_arm_fenced_block.hpp b/include/boost/asio/detail/gcc_arm_fenced_block.hpp
index 4081f7f3..2e3fff68 100644
--- a/include/boost/asio/detail/gcc_arm_fenced_block.hpp
+++ b/include/boost/asio/detail/gcc_arm_fenced_block.hpp
@@ -29,8 +29,16 @@ class gcc_arm_fenced_block
: private noncopyable
{
public:
- // Constructor.
- gcc_arm_fenced_block()
+ enum half_t { half };
+ enum full_t { full };
+
+ // Constructor for a half fenced block.
+ explicit gcc_arm_fenced_block(half_t)
+ {
+ }
+
+ // Constructor for a full fenced block.
+ explicit gcc_arm_fenced_block(full_t)
{
barrier();
}
diff --git a/include/boost/asio/detail/gcc_hppa_fenced_block.hpp b/include/boost/asio/detail/gcc_hppa_fenced_block.hpp
index 6c5226b3..3bbac9bc 100644
--- a/include/boost/asio/detail/gcc_hppa_fenced_block.hpp
+++ b/include/boost/asio/detail/gcc_hppa_fenced_block.hpp
@@ -29,8 +29,16 @@ class gcc_hppa_fenced_block
: private noncopyable
{
public:
- // Constructor.
- gcc_hppa_fenced_block()
+ enum half_t { half };
+ enum full_t { full };
+
+ // Constructor for a half fenced block.
+ explicit gcc_hppa_fenced_block(half_t)
+ {
+ }
+
+ // Constructor for a full fenced block.
+ explicit gcc_hppa_fenced_block(full_t)
{
barrier();
}
diff --git a/include/boost/asio/detail/gcc_sync_fenced_block.hpp b/include/boost/asio/detail/gcc_sync_fenced_block.hpp
index 65472ccf..3458d24a 100644
--- a/include/boost/asio/detail/gcc_sync_fenced_block.hpp
+++ b/include/boost/asio/detail/gcc_sync_fenced_block.hpp
@@ -32,8 +32,10 @@ class gcc_sync_fenced_block
: private noncopyable
{
public:
+ enum half_or_full_t { half, full };
+
// Constructor.
- gcc_sync_fenced_block()
+ explicit gcc_sync_fenced_block(half_or_full_t)
: value_(0)
{
__sync_lock_test_and_set(&value_, 1);
diff --git a/include/boost/asio/detail/gcc_x86_fenced_block.hpp b/include/boost/asio/detail/gcc_x86_fenced_block.hpp
index fb8473c7..01e2cd49 100644
--- a/include/boost/asio/detail/gcc_x86_fenced_block.hpp
+++ b/include/boost/asio/detail/gcc_x86_fenced_block.hpp
@@ -29,25 +29,46 @@ class gcc_x86_fenced_block
: private noncopyable
{
public:
- // Constructor.
- gcc_x86_fenced_block()
+ enum half_t { half };
+ enum full_t { full };
+
+ // Constructor for a half fenced block.
+ explicit gcc_x86_fenced_block(half_t)
{
- barrier();
+ }
+
+ // Constructor for a full fenced block.
+ explicit gcc_x86_fenced_block(full_t)
+ {
+ barrier1();
}
// Destructor.
~gcc_x86_fenced_block()
{
- barrier();
+ barrier2();
}
private:
- static int barrier()
+ static int barrier1()
{
- int r = 0;
- __asm__ __volatile__ ("xchgl %%eax, %0" : "=m" (r) : : "memory", "cc");
+ int r = 0, m = 1;
+ __asm__ __volatile__ (
+ "xchgl %0, %1" :
+ "=r"(r), "=m"(m) :
+ "0"(1), "m"(m) :
+ "memory", "cc");
return r;
}
+
+ static void barrier2()
+ {
+#if defined(__SSE2__)
+ __asm__ __volatile__ ("mfence" ::: "memory");
+#else // defined(__SSE2__)
+ barrier1();
+#endif // defined(__SSE2__)
+ }
};
} // namespace detail
diff --git a/include/boost/asio/detail/impl/descriptor_ops.ipp b/include/boost/asio/detail/impl/descriptor_ops.ipp
index ca2222c4..28179429 100644
--- a/include/boost/asio/detail/impl/descriptor_ops.ipp
+++ b/include/boost/asio/detail/impl/descriptor_ops.ipp
@@ -203,7 +203,7 @@ std::size_t sync_read(int d, state_type state, buf* bufs,
return 0;
// Wait for descriptor to become ready.
- if (descriptor_ops::poll_read(d, ec) < 0)
+ if (descriptor_ops::poll_read(d, 0, ec) < 0)
return 0;
}
}
@@ -280,7 +280,7 @@ std::size_t sync_write(int d, state_type state, const buf* bufs,
return 0;
// Wait for descriptor to become ready.
- if (descriptor_ops::poll_write(d, ec) < 0)
+ if (descriptor_ops::poll_write(d, 0, ec) < 0)
return 0;
}
}
@@ -387,7 +387,7 @@ int fcntl(int d, long cmd, long arg, boost::system::error_code& ec)
return result;
}
-int poll_read(int d, boost::system::error_code& ec)
+int poll_read(int d, state_type state, boost::system::error_code& ec)
{
if (d == -1)
{
@@ -399,14 +399,18 @@ int poll_read(int d, boost::system::error_code& ec)
fds.fd = d;
fds.events = POLLIN;
fds.revents = 0;
+ int timeout = (state & user_set_non_blocking) ? 0 : -1;
errno = 0;
- int result = error_wrapper(::poll(&fds, 1, -1), ec);
- if (result >= 0)
+ int result = error_wrapper(::poll(&fds, 1, timeout), ec);
+ if (result == 0)
+ ec = (state & user_set_non_blocking)
+ ? boost::asio::error::would_block : boost::system::error_code();
+ else if (result > 0)
ec = boost::system::error_code();
return result;
}
-int poll_write(int d, boost::system::error_code& ec)
+int poll_write(int d, state_type state, boost::system::error_code& ec)
{
if (d == -1)
{
@@ -418,9 +422,13 @@ int poll_write(int d, boost::system::error_code& ec)
fds.fd = d;
fds.events = POLLOUT;
fds.revents = 0;
+ int timeout = (state & user_set_non_blocking) ? 0 : -1;
errno = 0;
- int result = error_wrapper(::poll(&fds, 1, -1), ec);
- if (result >= 0)
+ int result = error_wrapper(::poll(&fds, 1, timeout), ec);
+ if (result == 0)
+ ec = (state & user_set_non_blocking)
+ ? boost::asio::error::would_block : boost::system::error_code();
+ else if (result > 0)
ec = boost::system::error_code();
return result;
}
diff --git a/include/boost/asio/detail/impl/epoll_reactor.ipp b/include/boost/asio/detail/impl/epoll_reactor.ipp
index 3be24262..9e4ec39b 100644
--- a/include/boost/asio/detail/impl/epoll_reactor.ipp
+++ b/include/boost/asio/detail/impl/epoll_reactor.ipp
@@ -148,13 +148,19 @@ void epoll_reactor::init_task()
int epoll_reactor::register_descriptor(socket_type descriptor,
epoll_reactor::per_descriptor_data& descriptor_data)
{
- mutex::scoped_lock lock(registered_descriptors_mutex_);
+ descriptor_data = allocate_descriptor_state();
- descriptor_data = registered_descriptors_.alloc();
- descriptor_data->descriptor_ = descriptor;
- descriptor_data->shutdown_ = false;
+ {
+ mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
- lock.unlock();
+ descriptor_data->reactor_ = this;
+ descriptor_data->descriptor_ = descriptor;
+ descriptor_data->shutdown_ = false;
+
+ for (int i = 0; i < max_ops; ++i)
+ descriptor_data->op_queue_is_empty_[i] =
+ descriptor_data->op_queue_[i].empty();
+ }
epoll_event ev = { 0, { 0 } };
ev.events = EPOLLIN | EPOLLERR | EPOLLHUP | EPOLLOUT | EPOLLPRI | EPOLLET;
@@ -170,14 +176,20 @@ int epoll_reactor::register_internal_descriptor(
int op_type, socket_type descriptor,
epoll_reactor::per_descriptor_data& descriptor_data, reactor_op* op)
{
- mutex::scoped_lock lock(registered_descriptors_mutex_);
+ descriptor_data = allocate_descriptor_state();
- descriptor_data = registered_descriptors_.alloc();
- descriptor_data->descriptor_ = descriptor;
- descriptor_data->shutdown_ = false;
- descriptor_data->op_queue_[op_type].push(op);
+ {
+ mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
- lock.unlock();
+ descriptor_data->reactor_ = this;
+ descriptor_data->descriptor_ = descriptor;
+ descriptor_data->shutdown_ = false;
+ descriptor_data->op_queue_[op_type].push(op);
+
+ for (int i = 0; i < max_ops; ++i)
+ descriptor_data->op_queue_is_empty_[i] =
+ descriptor_data->op_queue_[i].empty();
+ }
epoll_event ev = { 0, { 0 } };
ev.events = EPOLLIN | EPOLLERR | EPOLLHUP | EPOLLOUT | EPOLLPRI | EPOLLET;
@@ -208,6 +220,22 @@ void epoll_reactor::start_op(int op_type, socket_type descriptor,
return;
}
+ bool perform_speculative = allow_speculative;
+ if (perform_speculative)
+ {
+ if (descriptor_data->op_queue_is_empty_[op_type]
+ && (op_type != read_op
+ || descriptor_data->op_queue_is_empty_[except_op]))
+ {
+ if (op->perform())
+ {
+ io_service_.post_immediate_completion(op);
+ return;
+ }
+ perform_speculative = false;
+ }
+ }
+
mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
if (descriptor_data->shutdown_)
@@ -216,17 +244,24 @@ void epoll_reactor::start_op(int op_type, socket_type descriptor,
return;
}
- if (descriptor_data->op_queue_[op_type].empty())
+ for (int i = 0; i < max_ops; ++i)
+ descriptor_data->op_queue_is_empty_[i] =
+ descriptor_data->op_queue_[i].empty();
+
+ if (descriptor_data->op_queue_is_empty_[op_type])
{
- if (allow_speculative
- && (op_type != read_op
- || descriptor_data->op_queue_[except_op].empty()))
+ if (allow_speculative)
{
- if (op->perform())
+ if (perform_speculative
+ && (op_type != read_op
+ || descriptor_data->op_queue_is_empty_[except_op]))
{
- descriptor_lock.unlock();
- io_service_.post_immediate_completion(op);
- return;
+ if (op->perform())
+ {
+ descriptor_lock.unlock();
+ io_service_.post_immediate_completion(op);
+ return;
+ }
}
}
else
@@ -240,6 +275,7 @@ void epoll_reactor::start_op(int op_type, socket_type descriptor,
}
descriptor_data->op_queue_[op_type].push(op);
+ descriptor_data->op_queue_is_empty_[op_type] = false;
io_service_.work_started();
}
@@ -274,7 +310,6 @@ void epoll_reactor::deregister_descriptor(socket_type descriptor,
return;
mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
- mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_);
if (!descriptor_data->shutdown_)
{
@@ -305,11 +340,9 @@ void epoll_reactor::deregister_descriptor(socket_type descriptor,
descriptor_lock.unlock();
- registered_descriptors_.free(descriptor_data);
+ free_descriptor_state(descriptor_data);
descriptor_data = 0;
- descriptors_lock.unlock();
-
io_service_.post_deferred_completions(ops);
}
}
@@ -321,7 +354,6 @@ void epoll_reactor::deregister_internal_descriptor(socket_type descriptor,
return;
mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
- mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_);
if (!descriptor_data->shutdown_)
{
@@ -337,15 +369,19 @@ void epoll_reactor::deregister_internal_descriptor(socket_type descriptor,
descriptor_lock.unlock();
- registered_descriptors_.free(descriptor_data);
+ free_descriptor_state(descriptor_data);
descriptor_data = 0;
-
- descriptors_lock.unlock();
}
}
void epoll_reactor::run(bool block, op_queue& ops)
{
+ // This code relies on the fact that the task_io_service queues the reactor
+ // task behind all descriptor operations generated by this function. This
+ // means, that by the time we reach this point, any previously returned
+ // descriptor operations have already been dequeued. Therefore it is now safe
+ // for us to reuse and return them for the task_io_service to queue again.
+
// Calculate a timeout only if timerfd is not used.
int timeout;
if (timer_fd_ != -1)
@@ -392,28 +428,12 @@ void epoll_reactor::run(bool block, op_queue& ops)
#endif // defined(BOOST_ASIO_HAS_TIMERFD)
else
{
+ // The descriptor operation doesn't count as work in and of itself, so we
+ // don't call work_started() here. This still allows the io_service to
+ // stop if the only remaining operations are descriptor operations.
descriptor_state* descriptor_data = static_cast(ptr);
- mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
-
- // Exception operations must be processed first to ensure that any
- // out-of-band data is read before normal data.
- static const int flag[max_ops] = { EPOLLIN, EPOLLOUT, EPOLLPRI };
- for (int j = max_ops - 1; j >= 0; --j)
- {
- if (events[i].events & (flag[j] | EPOLLERR | EPOLLHUP))
- {
- while (reactor_op* op = descriptor_data->op_queue_[j].front())
- {
- if (op->perform())
- {
- descriptor_data->op_queue_[j].pop();
- ops.push(op);
- }
- else
- break;
- }
- }
- }
+ descriptor_data->set_ready_events(events[i].events);
+ ops.push(descriptor_data);
}
}
@@ -491,6 +511,18 @@ int epoll_reactor::do_timerfd_create()
#endif // defined(BOOST_ASIO_HAS_TIMERFD)
}
+epoll_reactor::descriptor_state* epoll_reactor::allocate_descriptor_state()
+{
+ mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_);
+ return registered_descriptors_.alloc();
+}
+
+void epoll_reactor::free_descriptor_state(epoll_reactor::descriptor_state* s)
+{
+ mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_);
+ registered_descriptors_.free(s);
+}
+
void epoll_reactor::do_add_timer_queue(timer_queue_base& queue)
{
mutex::scoped_lock lock(mutex_);
@@ -539,6 +571,92 @@ int epoll_reactor::get_timeout(itimerspec& ts)
}
#endif // defined(BOOST_ASIO_HAS_TIMERFD)
+struct epoll_reactor::perform_io_cleanup_on_block_exit
+{
+ explicit perform_io_cleanup_on_block_exit(epoll_reactor* r)
+ : reactor_(r), first_op_(0)
+ {
+ }
+
+ ~perform_io_cleanup_on_block_exit()
+ {
+ if (first_op_)
+ {
+ // Post the remaining completed operations for invocation.
+ if (!ops_.empty())
+ reactor_->io_service_.post_deferred_completions(ops_);
+
+ // A user-initiated operation has completed, but there's no need to
+ // explicitly call work_finished() here. Instead, we'll take advantage of
+ // the fact that the task_io_service will call work_finished() once we
+ // return.
+ }
+ else
+ {
+ // No user-initiated operations have completed, so we need to compensate
+ // for the work_finished() call that the task_io_service will make once
+ // this operation returns.
+ reactor_->io_service_.work_started();
+ }
+ }
+
+ epoll_reactor* reactor_;
+ op_queue ops_;
+ operation* first_op_;
+};
+
+epoll_reactor::descriptor_state::descriptor_state()
+ : operation(&epoll_reactor::descriptor_state::do_complete)
+{
+}
+
+operation* epoll_reactor::descriptor_state::perform_io(uint32_t events)
+{
+ perform_io_cleanup_on_block_exit io_cleanup(reactor_);
+ mutex::scoped_lock descriptor_lock(mutex_);
+
+ // Exception operations must be processed first to ensure that any
+ // out-of-band data is read before normal data.
+ static const int flag[max_ops] = { EPOLLIN, EPOLLOUT, EPOLLPRI };
+ for (int j = max_ops - 1; j >= 0; --j)
+ {
+ if (events & (flag[j] | EPOLLERR | EPOLLHUP))
+ {
+ while (reactor_op* op = op_queue_[j].front())
+ {
+ if (op->perform())
+ {
+ op_queue_[j].pop();
+ io_cleanup.ops_.push(op);
+ }
+ else
+ break;
+ }
+ }
+ }
+
+ // The first operation will be returned for completion now. The others will
+ // be posted for later by the io_cleanup object's destructor.
+ io_cleanup.first_op_ = io_cleanup.ops_.front();
+ io_cleanup.ops_.pop();
+ return io_cleanup.first_op_;
+}
+
+void epoll_reactor::descriptor_state::do_complete(
+ io_service_impl* owner, operation* base,
+ const boost::system::error_code& ec, std::size_t bytes_transferred)
+{
+ if (owner)
+ {
+ descriptor_state* descriptor_data = static_cast(base);
+ uint32_t events = static_cast(bytes_transferred);
+ if (operation* op = descriptor_data->perform_io(events))
+ {
+ op->complete(*owner, ec, 0);
+ }
+ }
+}
+
} // namespace detail
} // namespace asio
} // namespace boost
diff --git a/include/boost/asio/detail/impl/kqueue_reactor.ipp b/include/boost/asio/detail/impl/kqueue_reactor.ipp
index f56c4c7c..8402a9ec 100644
--- a/include/boost/asio/detail/impl/kqueue_reactor.ipp
+++ b/include/boost/asio/detail/impl/kqueue_reactor.ipp
@@ -132,6 +132,10 @@ int kqueue_reactor::register_descriptor(socket_type descriptor,
descriptor_data->descriptor_ = descriptor;
descriptor_data->shutdown_ = false;
+ for (int i = 0; i < max_ops; ++i)
+ descriptor_data->op_queue_is_empty_[i] =
+ descriptor_data->op_queue_[i].empty();
+
return 0;
}
@@ -146,6 +150,10 @@ int kqueue_reactor::register_internal_descriptor(
descriptor_data->shutdown_ = false;
descriptor_data->op_queue_[op_type].push(op);
+ for (int i = 0; i < max_ops; ++i)
+ descriptor_data->op_queue_is_empty_[i] =
+ descriptor_data->op_queue_[i].empty();
+
struct kevent event;
switch (op_type)
{
@@ -186,6 +194,21 @@ void kqueue_reactor::start_op(int op_type, socket_type descriptor,
return;
}
+ if (allow_speculative)
+ {
+ if (descriptor_data->op_queue_is_empty_[op_type]
+ && (op_type != read_op
+ || descriptor_data->op_queue_is_empty_[except_op]))
+ {
+ if (op->perform())
+ {
+ io_service_.post_immediate_completion(op);
+ return;
+ }
+ allow_speculative = false;
+ }
+ }
+
mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
if (descriptor_data->shutdown_)
@@ -194,12 +217,16 @@ void kqueue_reactor::start_op(int op_type, socket_type descriptor,
return;
}
- bool first = descriptor_data->op_queue_[op_type].empty();
+ for (int i = 0; i < max_ops; ++i)
+ descriptor_data->op_queue_is_empty_[i] =
+ descriptor_data->op_queue_[i].empty();
+
+ bool first = descriptor_data->op_queue_is_empty_[op_type];
if (first)
{
if (allow_speculative)
{
- if (op_type != read_op || descriptor_data->op_queue_[except_op].empty())
+ if (op_type != read_op || descriptor_data->op_queue_is_empty_[except_op])
{
if (op->perform())
{
@@ -212,6 +239,7 @@ void kqueue_reactor::start_op(int op_type, socket_type descriptor,
}
descriptor_data->op_queue_[op_type].push(op);
+ descriptor_data->op_queue_is_empty_[op_type] = false;
io_service_.work_started();
if (first)
diff --git a/include/boost/asio/detail/impl/select_reactor.ipp b/include/boost/asio/detail/impl/select_reactor.ipp
index 71173539..4d42bed4 100644
--- a/include/boost/asio/detail/impl/select_reactor.ipp
+++ b/include/boost/asio/detail/impl/select_reactor.ipp
@@ -172,27 +172,28 @@ void select_reactor::run(bool block, op_queue& ops)
#endif // defined(BOOST_ASIO_HAS_IOCP)
// Set up the descriptor sets.
- fd_set_adapter fds[max_select_ops];
- fds[read_op].set(interrupter_.read_descriptor());
+ for (int i = 0; i < max_select_ops; ++i)
+ fd_sets_[i].reset();
+ fd_sets_[read_op].set(interrupter_.read_descriptor());
socket_type max_fd = 0;
bool have_work_to_do = !timer_queues_.all_empty();
for (int i = 0; i < max_select_ops; ++i)
{
have_work_to_do = have_work_to_do || !op_queue_[i].empty();
- op_queue_[i].get_descriptors(fds[i], ops);
- if (fds[i].max_descriptor() > max_fd)
- max_fd = fds[i].max_descriptor();
+ op_queue_[i].get_descriptors(fd_sets_[i], ops);
+ if (fd_sets_[i].max_descriptor() > max_fd)
+ max_fd = fd_sets_[i].max_descriptor();
}
#if defined(BOOST_WINDOWS) || defined(__CYGWIN__)
// Connection operations on Windows use both except and write fd_sets.
have_work_to_do = have_work_to_do || !op_queue_[connect_op].empty();
- op_queue_[connect_op].get_descriptors(fds[write_op], ops);
- if (fds[write_op].max_descriptor() > max_fd)
- max_fd = fds[write_op].max_descriptor();
- op_queue_[connect_op].get_descriptors(fds[except_op], ops);
- if (fds[except_op].max_descriptor() > max_fd)
- max_fd = fds[except_op].max_descriptor();
+ op_queue_[connect_op].get_descriptors(fd_sets_[write_op], ops);
+ if (fd_sets_[write_op].max_descriptor() > max_fd)
+ max_fd = fd_sets_[write_op].max_descriptor();
+ op_queue_[connect_op].get_descriptors(fd_sets_[except_op], ops);
+ if (fd_sets_[except_op].max_descriptor() > max_fd)
+ max_fd = fd_sets_[except_op].max_descriptor();
#endif // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
// We can return immediately if there's no work to do and the reactor is
@@ -209,11 +210,14 @@ void select_reactor::run(bool block, op_queue& ops)
// Block on the select call until descriptors become ready.
boost::system::error_code ec;
int retval = socket_ops::select(static_cast(max_fd + 1),
- fds[read_op], fds[write_op], fds[except_op], tv, ec);
+ fd_sets_[read_op], fd_sets_[write_op], fd_sets_[except_op], tv, ec);
// Reset the interrupter.
- if (retval > 0 && fds[read_op].is_set(interrupter_.read_descriptor()))
+ if (retval > 0 && fd_sets_[read_op].is_set(interrupter_.read_descriptor()))
+ {
interrupter_.reset();
+ --retval;
+ }
lock.lock();
@@ -223,15 +227,15 @@ void select_reactor::run(bool block, op_queue& ops)
#if defined(BOOST_WINDOWS) || defined(__CYGWIN__)
// Connection operations on Windows use both except and write fd_sets.
op_queue_[connect_op].perform_operations_for_descriptors(
- fds[except_op], ops);
+ fd_sets_[except_op], ops);
op_queue_[connect_op].perform_operations_for_descriptors(
- fds[write_op], ops);
+ fd_sets_[write_op], ops);
#endif // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
// Exception operations must be processed first to ensure that any
// out-of-band data is read before normal data.
for (int i = max_select_ops - 1; i >= 0; --i)
- op_queue_[i].perform_operations_for_descriptors(fds[i], ops);
+ op_queue_[i].perform_operations_for_descriptors(fd_sets_[i], ops);
}
timer_queues_.get_ready_timers(ops);
}
diff --git a/include/boost/asio/detail/impl/service_registry.hpp b/include/boost/asio/detail/impl/service_registry.hpp
index 97f5771c..ffe09d48 100644
--- a/include/boost/asio/detail/impl/service_registry.hpp
+++ b/include/boost/asio/detail/impl/service_registry.hpp
@@ -21,6 +21,24 @@ namespace boost {
namespace asio {
namespace detail {
+template
+service_registry::service_registry(
+ boost::asio::io_service& o, Service*, Arg arg)
+ : owner_(o),
+ first_service_(new Service(o, arg))
+{
+ boost::asio::io_service::service::key key;
+ init_key(key, Service::id);
+ first_service_->key_ = key;
+ first_service_->next_ = 0;
+}
+
+template
+Service& service_registry::first_service()
+{
+ return *static_cast(first_service_);
+}
+
template
Service& service_registry::use_service()
{
diff --git a/include/boost/asio/detail/impl/service_registry.ipp b/include/boost/asio/detail/impl/service_registry.ipp
index 8c80f6cc..822fedc5 100644
--- a/include/boost/asio/detail/impl/service_registry.ipp
+++ b/include/boost/asio/detail/impl/service_registry.ipp
@@ -26,12 +26,6 @@ namespace boost {
namespace asio {
namespace detail {
-service_registry::service_registry(boost::asio::io_service& o)
- : owner_(o),
- first_service_(0)
-{
-}
-
service_registry::~service_registry()
{
// Shutdown all services. This must be done in a separate loop before the
diff --git a/include/boost/asio/detail/impl/signal_set_service.ipp b/include/boost/asio/detail/impl/signal_set_service.ipp
index c1dedd4c..8af50860 100644
--- a/include/boost/asio/detail/impl/signal_set_service.ipp
+++ b/include/boost/asio/detail/impl/signal_set_service.ipp
@@ -71,7 +71,7 @@ void asio_signal_handler(int signal_number)
#endif // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
#if defined(BOOST_ASIO_HAS_SIGNAL) && !defined(BOOST_ASIO_HAS_SIGACTION)
- signal(signal_number, asio_signal_handler);
+ ::signal(signal_number, asio_signal_handler);
#endif // defined(BOOST_ASIO_HAS_SIGNAL) && !defined(BOOST_ASIO_HAS_SIGACTION)
}
@@ -98,7 +98,8 @@ public:
}
static void do_complete(io_service_impl* /*owner*/, operation* base,
- boost::system::error_code /*ec*/, std::size_t /*bytes_transferred*/)
+ const boost::system::error_code& /*ec*/,
+ std::size_t /*bytes_transferred*/)
{
pipe_read_op* o(static_cast(base));
delete o;
diff --git a/include/boost/asio/detail/impl/socket_ops.ipp b/include/boost/asio/detail/impl/socket_ops.ipp
index 55b63481..b2ccb82a 100644
--- a/include/boost/asio/detail/impl/socket_ops.ipp
+++ b/include/boost/asio/detail/impl/socket_ops.ipp
@@ -148,7 +148,7 @@ socket_type sync_accept(socket_type s, state_type state,
return invalid_socket;
// Wait for socket to become ready.
- if (socket_ops::poll_read(s, ec) < 0)
+ if (socket_ops::poll_read(s, 0, ec) < 0)
return invalid_socket;
}
}
@@ -735,7 +735,7 @@ size_t sync_recv(socket_type s, state_type state, buf* bufs,
return 0;
// Wait for socket to become ready.
- if (socket_ops::poll_read(s, ec) < 0)
+ if (socket_ops::poll_read(s, 0, ec) < 0)
return 0;
}
}
@@ -873,7 +873,7 @@ size_t sync_recvfrom(socket_type s, state_type state, buf* bufs,
return 0;
// Wait for socket to become ready.
- if (socket_ops::poll_read(s, ec) < 0)
+ if (socket_ops::poll_read(s, 0, ec) < 0)
return 0;
}
}
@@ -984,7 +984,7 @@ size_t sync_recvmsg(socket_type s, state_type state,
return 0;
// Wait for socket to become ready.
- if (socket_ops::poll_read(s, ec) < 0)
+ if (socket_ops::poll_read(s, 0, ec) < 0)
return 0;
}
}
@@ -1110,7 +1110,7 @@ size_t sync_send(socket_type s, state_type state, const buf* bufs,
return 0;
// Wait for socket to become ready.
- if (socket_ops::poll_write(s, ec) < 0)
+ if (socket_ops::poll_write(s, 0, ec) < 0)
return 0;
}
}
@@ -1233,7 +1233,7 @@ size_t sync_sendto(socket_type s, state_type state, const buf* bufs,
return 0;
// Wait for socket to become ready.
- if (socket_ops::poll_write(s, ec) < 0)
+ if (socket_ops::poll_write(s, 0, ec) < 0)
return 0;
}
}
@@ -1683,7 +1683,7 @@ int select(int nfds, fd_set* readfds, fd_set* writefds,
#endif
}
-int poll_read(socket_type s, boost::system::error_code& ec)
+int poll_read(socket_type s, state_type state, boost::system::error_code& ec)
{
if (s == invalid_socket)
{
@@ -1697,11 +1697,12 @@ int poll_read(socket_type s, boost::system::error_code& ec)
fd_set fds;
FD_ZERO(&fds);
FD_SET(s, &fds);
+ timeval zero_timeout;
+ zero_timeout.tv_sec = 0;
+ zero_timeout.tv_usec = 0;
+ timeval* timeout = (state & user_set_non_blocking) ? &zero_timeout : 0;
clear_last_error();
- int result = error_wrapper(::select(s, &fds, 0, 0, 0), ec);
- if (result >= 0)
- ec = boost::system::error_code();
- return result;
+ int result = error_wrapper(::select(s, &fds, 0, 0, timeout), ec);
#else // defined(BOOST_WINDOWS)
// || defined(__CYGWIN__)
// || defined(__SYMBIAN32__)
@@ -1709,17 +1710,21 @@ int poll_read(socket_type s, boost::system::error_code& ec)
fds.fd = s;
fds.events = POLLIN;
fds.revents = 0;
+ int timeout = (state & user_set_non_blocking) ? 0 : -1;
clear_last_error();
- int result = error_wrapper(::poll(&fds, 1, -1), ec);
- if (result >= 0)
- ec = boost::system::error_code();
- return result;
+ int result = error_wrapper(::poll(&fds, 1, timeout), ec);
#endif // defined(BOOST_WINDOWS)
// || defined(__CYGWIN__)
// || defined(__SYMBIAN32__)
+ if (result == 0)
+ ec = (state & user_set_non_blocking)
+ ? boost::asio::error::would_block : boost::system::error_code();
+ else if (result > 0)
+ ec = boost::system::error_code();
+ return result;
}
-int poll_write(socket_type s, boost::system::error_code& ec)
+int poll_write(socket_type s, state_type state, boost::system::error_code& ec)
{
if (s == invalid_socket)
{
@@ -1733,11 +1738,12 @@ int poll_write(socket_type s, boost::system::error_code& ec)
fd_set fds;
FD_ZERO(&fds);
FD_SET(s, &fds);
+ timeval zero_timeout;
+ zero_timeout.tv_sec = 0;
+ zero_timeout.tv_usec = 0;
+ timeval* timeout = (state & user_set_non_blocking) ? &zero_timeout : 0;
clear_last_error();
- int result = error_wrapper(::select(s, 0, &fds, 0, 0), ec);
- if (result >= 0)
- ec = boost::system::error_code();
- return result;
+ int result = error_wrapper(::select(s, 0, &fds, 0, timeout), ec);
#else // defined(BOOST_WINDOWS)
// || defined(__CYGWIN__)
// || defined(__SYMBIAN32__)
@@ -1745,14 +1751,18 @@ int poll_write(socket_type s, boost::system::error_code& ec)
fds.fd = s;
fds.events = POLLOUT;
fds.revents = 0;
+ int timeout = (state & user_set_non_blocking) ? 0 : -1;
clear_last_error();
- int result = error_wrapper(::poll(&fds, 1, -1), ec);
- if (result >= 0)
- ec = boost::system::error_code();
- return result;
+ int result = error_wrapper(::poll(&fds, 1, timeout), ec);
#endif // defined(BOOST_WINDOWS)
// || defined(__CYGWIN__)
// || defined(__SYMBIAN32__)
+ if (result == 0)
+ ec = (state & user_set_non_blocking)
+ ? boost::asio::error::would_block : boost::system::error_code();
+ else if (result > 0)
+ ec = boost::system::error_code();
+ return result;
}
int poll_connect(socket_type s, boost::system::error_code& ec)
diff --git a/include/boost/asio/detail/impl/strand_service.hpp b/include/boost/asio/detail/impl/strand_service.hpp
index bb3698a2..4194a253 100644
--- a/include/boost/asio/detail/impl/strand_service.hpp
+++ b/include/boost/asio/detail/impl/strand_service.hpp
@@ -61,7 +61,7 @@ void strand_service::dispatch(strand_service::implementation_type& impl,
// If we are already in the strand then the handler can run immediately.
if (call_stack::contains(impl))
{
- boost::asio::detail::fenced_block b;
+ fenced_block b(fenced_block::full);
boost_asio_handler_invoke_helpers::invoke(handler, handler);
return;
}
diff --git a/include/boost/asio/detail/impl/strand_service.ipp b/include/boost/asio/detail/impl/strand_service.ipp
index 62a8d5c0..a375cf68 100644
--- a/include/boost/asio/detail/impl/strand_service.ipp
+++ b/include/boost/asio/detail/impl/strand_service.ipp
@@ -79,7 +79,7 @@ bool strand_service::do_dispatch(implementation_type& impl, operation* op)
{
// If we are running inside the io_service, and no other handler is queued
// or running, then the handler can run immediately.
- bool can_dispatch = call_stack::contains(&io_service_);
+ bool can_dispatch = io_service_.can_dispatch();
impl->mutex_.lock();
bool first = (++impl->count_ == 1);
if (can_dispatch && first)
@@ -115,7 +115,7 @@ void strand_service::do_post(implementation_type& impl, operation* op)
}
void strand_service::do_complete(io_service_impl* owner, operation* base,
- boost::system::error_code /*ec*/, std::size_t /*bytes_transferred*/)
+ const boost::system::error_code& ec, std::size_t /*bytes_transferred*/)
{
if (owner)
{
@@ -134,7 +134,7 @@ void strand_service::do_complete(io_service_impl* owner, operation* base,
on_do_complete_exit on_exit = { owner, impl };
(void)on_exit;
- o->complete(*owner);
+ o->complete(*owner, ec, 0);
}
}
diff --git a/include/boost/asio/detail/impl/task_io_service.hpp b/include/boost/asio/detail/impl/task_io_service.hpp
index ee23cb90..39888562 100644
--- a/include/boost/asio/detail/impl/task_io_service.hpp
+++ b/include/boost/asio/detail/impl/task_io_service.hpp
@@ -15,7 +15,6 @@
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
-#include
#include
#include
#include
@@ -30,9 +29,9 @@ namespace detail {
template
void task_io_service::dispatch(Handler handler)
{
- if (call_stack::contains(this))
+ if (thread_call_stack::contains(this))
{
- boost::asio::detail::fenced_block b;
+ fenced_block b(fenced_block::full);
boost_asio_handler_invoke_helpers::invoke(handler, handler);
}
else
diff --git a/include/boost/asio/detail/impl/task_io_service.ipp b/include/boost/asio/detail/impl/task_io_service.ipp
index 5b1d0697..11320e57 100644
--- a/include/boost/asio/detail/impl/task_io_service.ipp
+++ b/include/boost/asio/detail/impl/task_io_service.ipp
@@ -20,7 +20,6 @@
#if !defined(BOOST_ASIO_HAS_IOCP)
#include
-#include
#include
#include
#include
@@ -48,24 +47,37 @@ struct task_io_service::task_cleanup
op_queue* ops_;
};
-struct task_io_service::work_finished_on_block_exit
+struct task_io_service::work_cleanup
{
- ~work_finished_on_block_exit()
+ ~work_cleanup()
{
task_io_service_->work_finished();
+
+#if defined(BOOST_HAS_THREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
+ if (!ops_->empty())
+ {
+ lock_->lock();
+ task_io_service_->op_queue_.push(*ops_);
+ }
+#endif // defined(BOOST_HAS_THREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
}
task_io_service* task_io_service_;
+ mutex::scoped_lock* lock_;
+ op_queue* ops_;
};
-struct task_io_service::idle_thread_info
+struct task_io_service::thread_info
{
- event wakeup_event;
- idle_thread_info* next;
+ event* wakeup_event;
+ op_queue* private_op_queue;
+ thread_info* next;
};
-task_io_service::task_io_service(boost::asio::io_service& io_service)
+task_io_service::task_io_service(
+ boost::asio::io_service& io_service, std::size_t concurrency_hint)
: boost::asio::detail::service_base(io_service),
+ one_thread_(concurrency_hint == 1),
mutex_(),
task_(0),
task_interrupted_(true),
@@ -77,10 +89,6 @@ task_io_service::task_io_service(boost::asio::io_service& io_service)
BOOST_ASIO_HANDLER_TRACKING_INIT;
}
-void task_io_service::init(std::size_t /*concurrency_hint*/)
-{
-}
-
void task_io_service::shutdown_service()
{
mutex::scoped_lock lock(mutex_);
@@ -120,15 +128,22 @@ std::size_t task_io_service::run(boost::system::error_code& ec)
return 0;
}
- call_stack::context ctx(this);
-
- idle_thread_info this_idle_thread;
- this_idle_thread.next = 0;
+ thread_info this_thread;
+ event wakeup_event;
+ this_thread.wakeup_event = &wakeup_event;
+ op_queue private_op_queue;
+#if defined(BOOST_HAS_THREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
+ this_thread.private_op_queue = one_thread_ == 1 ? &private_op_queue : 0;
+#else // defined(BOOST_HAS_THREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
+ this_thread.private_op_queue = 0;
+#endif // defined(BOOST_HAS_THREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
+ this_thread.next = 0;
+ thread_call_stack::context ctx(this, this_thread);
mutex::scoped_lock lock(mutex_);
std::size_t n = 0;
- for (; do_one(lock, &this_idle_thread); lock.lock())
+ for (; do_run_one(lock, this_thread, private_op_queue, ec); lock.lock())
if (n != (std::numeric_limits::max)())
++n;
return n;
@@ -143,31 +158,53 @@ std::size_t task_io_service::run_one(boost::system::error_code& ec)
return 0;
}
- call_stack::context ctx(this);
-
- idle_thread_info this_idle_thread;
- this_idle_thread.next = 0;
+ thread_info this_thread;
+ event wakeup_event;
+ this_thread.wakeup_event = &wakeup_event;
+ op_queue private_op_queue;
+ this_thread.private_op_queue = 0;
+ this_thread.next = 0;
+ thread_call_stack::context ctx(this, this_thread);
mutex::scoped_lock lock(mutex_);
- return do_one(lock, &this_idle_thread);
+ return do_run_one(lock, this_thread, private_op_queue, ec);
}
std::size_t task_io_service::poll(boost::system::error_code& ec)
{
+ ec = boost::system::error_code();
if (outstanding_work_ == 0)
{
stop();
- ec = boost::system::error_code();
return 0;
}
- call_stack::context ctx(this);
+ thread_info this_thread;
+ this_thread.wakeup_event = 0;
+ op_queue private_op_queue;
+#if defined(BOOST_HAS_THREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
+ this_thread.private_op_queue = one_thread_ == 1 ? &private_op_queue : 0;
+#else // defined(BOOST_HAS_THREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
+ this_thread.private_op_queue = 0;
+#endif // defined(BOOST_HAS_THREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
+ this_thread.next = 0;
+ thread_call_stack::context ctx(this, this_thread);
mutex::scoped_lock lock(mutex_);
+#if defined(BOOST_HAS_THREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
+ // We want to support nested calls to poll() and poll_one(), so any handlers
+ // that are already on a thread-private queue need to be put on to the main
+ // queue now.
+ if (one_thread_)
+ if (thread_info* outer_thread_info = ctx.next_by_key())
+ if (outer_thread_info->private_op_queue)
+ op_queue_.push(*outer_thread_info->private_op_queue);
+#endif // defined(BOOST_HAS_THREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
+
std::size_t n = 0;
- for (; do_one(lock, 0); lock.lock())
+ for (; do_poll_one(lock, private_op_queue, ec); lock.lock())
if (n != (std::numeric_limits::max)())
++n;
return n;
@@ -182,11 +219,26 @@ std::size_t task_io_service::poll_one(boost::system::error_code& ec)
return 0;
}
- call_stack::context ctx(this);
+ thread_info this_thread;
+ this_thread.wakeup_event = 0;
+ op_queue private_op_queue;
+ this_thread.private_op_queue = 0;
+ this_thread.next = 0;
+ thread_call_stack::context ctx(this, this_thread);
mutex::scoped_lock lock(mutex_);
- return do_one(lock, 0);
+#if defined(BOOST_HAS_THREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
+ // We want to support nested calls to poll() and poll_one(), so any handlers
+ // that are already on a thread-private queue need to be put on to the main
+ // queue now.
+ if (one_thread_)
+ if (thread_info* outer_thread_info = ctx.next_by_key())
+ if (outer_thread_info->private_op_queue)
+ op_queue_.push(*outer_thread_info->private_op_queue);
+#endif // defined(BOOST_HAS_THREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
+
+ return do_poll_one(lock, private_op_queue, ec);
}
void task_io_service::stop()
@@ -215,6 +267,20 @@ void task_io_service::post_immediate_completion(task_io_service::operation* op)
void task_io_service::post_deferred_completion(task_io_service::operation* op)
{
+#if defined(BOOST_HAS_THREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
+ if (one_thread_)
+ {
+ if (thread_info* this_thread = thread_call_stack::contains(this))
+ {
+ if (this_thread->private_op_queue)
+ {
+ this_thread->private_op_queue->push(op);
+ return;
+ }
+ }
+ }
+#endif // defined(BOOST_HAS_THREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
+
mutex::scoped_lock lock(mutex_);
op_queue_.push(op);
wake_one_thread_and_unlock(lock);
@@ -225,6 +291,20 @@ void task_io_service::post_deferred_completions(
{
if (!ops.empty())
{
+#if defined(BOOST_HAS_THREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
+ if (one_thread_)
+ {
+ if (thread_info* this_thread = thread_call_stack::contains(this))
+ {
+ if (this_thread->private_op_queue)
+ {
+ this_thread->private_op_queue->push(ops);
+ return;
+ }
+ }
+ }
+#endif // defined(BOOST_HAS_THREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
+
mutex::scoped_lock lock(mutex_);
op_queue_.push(ops);
wake_one_thread_and_unlock(lock);
@@ -238,11 +318,10 @@ void task_io_service::abandon_operations(
ops2.push(ops);
}
-std::size_t task_io_service::do_one(mutex::scoped_lock& lock,
- task_io_service::idle_thread_info* this_idle_thread)
+std::size_t task_io_service::do_run_one(mutex::scoped_lock& lock,
+ task_io_service::thread_info& this_thread,
+ op_queue& private_op_queue, const boost::system::error_code& ec)
{
- bool polling = !this_idle_thread;
- bool task_has_run = false;
while (!stopped_)
{
if (!op_queue_.empty())
@@ -254,63 +333,105 @@ std::size_t task_io_service::do_one(mutex::scoped_lock& lock,
if (o == &task_operation_)
{
- task_interrupted_ = more_handlers || polling;
+ task_interrupted_ = more_handlers;
- // If the task has already run and we're polling then we're done.
- if (task_has_run && polling)
- {
- task_interrupted_ = true;
- op_queue_.push(&task_operation_);
- return 0;
- }
- task_has_run = true;
-
- if (!more_handlers || !wake_one_idle_thread_and_unlock(lock))
+ if (more_handlers && !one_thread_)
+ wake_one_idle_thread_and_unlock(lock);
+ else
lock.unlock();
op_queue completed_ops;
- task_cleanup c = { this, &lock, &completed_ops };
- (void)c;
+ task_cleanup on_exit = { this, &lock, &completed_ops };
+ (void)on_exit;
// Run the task. May throw an exception. Only block if the operation
// queue is empty and we're not polling, otherwise we want to return
// as soon as possible.
- task_->run(!more_handlers && !polling, completed_ops);
+ task_->run(!more_handlers, completed_ops);
}
else
{
- if (more_handlers)
+ std::size_t task_result = o->task_result_;
+
+ if (more_handlers && !one_thread_)
wake_one_thread_and_unlock(lock);
else
lock.unlock();
// Ensure the count of outstanding work is decremented on block exit.
- work_finished_on_block_exit on_exit = { this };
+ work_cleanup on_exit = { this, &lock, &private_op_queue };
(void)on_exit;
- // Complete the operation. May throw an exception.
- o->complete(*this); // deletes the operation object
+ // Complete the operation. May throw an exception. Deletes the object.
+ o->complete(*this, ec, task_result);
return 1;
}
}
- else if (this_idle_thread)
- {
- // Nothing to run right now, so just wait for work to do.
- this_idle_thread->next = first_idle_thread_;
- first_idle_thread_ = this_idle_thread;
- this_idle_thread->wakeup_event.clear(lock);
- this_idle_thread->wakeup_event.wait(lock);
- }
else
{
- return 0;
+ // Nothing to run right now, so just wait for work to do.
+ this_thread.next = first_idle_thread_;
+ first_idle_thread_ = &this_thread;
+ this_thread.wakeup_event->clear(lock);
+ this_thread.wakeup_event->wait(lock);
}
}
return 0;
}
+std::size_t task_io_service::do_poll_one(mutex::scoped_lock& lock,
+ op_queue& private_op_queue, const boost::system::error_code& ec)
+{
+ if (stopped_)
+ return 0;
+
+ operation* o = op_queue_.front();
+ if (o == &task_operation_)
+ {
+ op_queue_.pop();
+ lock.unlock();
+
+ {
+ op_queue completed_ops;
+ task_cleanup c = { this, &lock, &completed_ops };
+ (void)c;
+
+ // Run the task. May throw an exception. Only block if the operation
+ // queue is empty and we're not polling, otherwise we want to return
+ // as soon as possible.
+ task_->run(false, completed_ops);
+ }
+
+ o = op_queue_.front();
+ if (o == &task_operation_)
+ return 0;
+ }
+
+ if (o == 0)
+ return 0;
+
+ op_queue_.pop();
+ bool more_handlers = (!op_queue_.empty());
+
+ std::size_t task_result = o->task_result_;
+
+ if (more_handlers && !one_thread_)
+ wake_one_thread_and_unlock(lock);
+ else
+ lock.unlock();
+
+ // Ensure the count of outstanding work is decremented on block exit.
+ work_cleanup on_exit = { this, &lock, &private_op_queue };
+ (void)on_exit;
+
+ // Complete the operation. May throw an exception. Deletes the object.
+ o->complete(*this, ec, task_result);
+
+ return 1;
+}
+
void task_io_service::stop_all_threads(
mutex::scoped_lock& lock)
{
@@ -318,10 +439,10 @@ void task_io_service::stop_all_threads(
while (first_idle_thread_)
{
- idle_thread_info* idle_thread = first_idle_thread_;
+ thread_info* idle_thread = first_idle_thread_;
first_idle_thread_ = idle_thread->next;
idle_thread->next = 0;
- idle_thread->wakeup_event.signal(lock);
+ idle_thread->wakeup_event->signal(lock);
}
if (!task_interrupted_ && task_)
@@ -336,10 +457,10 @@ bool task_io_service::wake_one_idle_thread_and_unlock(
{
if (first_idle_thread_)
{
- idle_thread_info* idle_thread = first_idle_thread_;
+ thread_info* idle_thread = first_idle_thread_;
first_idle_thread_ = idle_thread->next;
idle_thread->next = 0;
- idle_thread->wakeup_event.signal_and_unlock(lock);
+ idle_thread->wakeup_event->signal_and_unlock(lock);
return true;
}
return false;
diff --git a/include/boost/asio/detail/impl/win_iocp_io_service.hpp b/include/boost/asio/detail/impl/win_iocp_io_service.hpp
index f174dc70..a8155741 100644
--- a/include/boost/asio/detail/impl/win_iocp_io_service.hpp
+++ b/include/boost/asio/detail/impl/win_iocp_io_service.hpp
@@ -19,7 +19,6 @@
#if defined(BOOST_ASIO_HAS_IOCP)
-#include
#include
#include
#include
@@ -36,7 +35,7 @@ void win_iocp_io_service::dispatch(Handler handler)
{
if (call_stack::contains(this))
{
- boost::asio::detail::fenced_block b;
+ fenced_block b(fenced_block::full);
boost_asio_handler_invoke_helpers::invoke(handler, handler);
}
else
diff --git a/include/boost/asio/detail/impl/win_iocp_io_service.ipp b/include/boost/asio/detail/impl/win_iocp_io_service.ipp
index 46076690..d142fcff 100644
--- a/include/boost/asio/detail/impl/win_iocp_io_service.ipp
+++ b/include/boost/asio/detail/impl/win_iocp_io_service.ipp
@@ -62,7 +62,8 @@ struct win_iocp_io_service::timer_thread_function
win_iocp_io_service* io_service_;
};
-win_iocp_io_service::win_iocp_io_service(boost::asio::io_service& io_service)
+win_iocp_io_service::win_iocp_io_service(
+ boost::asio::io_service& io_service, size_t concurrency_hint)
: boost::asio::detail::service_base(io_service),
iocp_(),
outstanding_work_(0),
@@ -71,10 +72,7 @@ win_iocp_io_service::win_iocp_io_service(boost::asio::io_service& io_service)
dispatch_required_(0)
{
BOOST_ASIO_HANDLER_TRACKING_INIT;
-}
-void win_iocp_io_service::init(size_t concurrency_hint)
-{
iocp_.handle = ::CreateIoCompletionPort(INVALID_HANDLE_VALUE, 0, 0,
static_cast((std::min)(concurrency_hint, DWORD(~0))));
if (!iocp_.handle)
diff --git a/include/boost/asio/detail/kqueue_reactor.hpp b/include/boost/asio/detail/kqueue_reactor.hpp
index 707c2e52..b10b4ee3 100644
--- a/include/boost/asio/detail/kqueue_reactor.hpp
+++ b/include/boost/asio/detail/kqueue_reactor.hpp
@@ -62,12 +62,16 @@ public:
{
friend class kqueue_reactor;
friend class object_pool_access;
+
+ descriptor_state* next_;
+ descriptor_state* prev_;
+
+ bool op_queue_is_empty_[max_ops];
+
mutex mutex_;
int descriptor_;
op_queue op_queue_[max_ops];
bool shutdown_;
- descriptor_state* next_;
- descriptor_state* prev_;
};
// Per-descriptor data.
diff --git a/include/boost/asio/detail/macos_fenced_block.hpp b/include/boost/asio/detail/macos_fenced_block.hpp
index d37eea61..c042763e 100644
--- a/include/boost/asio/detail/macos_fenced_block.hpp
+++ b/include/boost/asio/detail/macos_fenced_block.hpp
@@ -31,8 +31,16 @@ class macos_fenced_block
: private noncopyable
{
public:
- // Constructor.
- macos_fenced_block()
+ enum half_t { half };
+ enum full_t { full };
+
+ // Constructor for a half fenced block.
+ explicit macos_fenced_block(half_t)
+ {
+ }
+
+ // Constructor for a full fenced block.
+ explicit macos_fenced_block(full_t)
{
OSMemoryBarrier();
}
diff --git a/include/boost/asio/detail/null_fenced_block.hpp b/include/boost/asio/detail/null_fenced_block.hpp
index 70680c5c..07a07d0a 100644
--- a/include/boost/asio/detail/null_fenced_block.hpp
+++ b/include/boost/asio/detail/null_fenced_block.hpp
@@ -25,8 +25,10 @@ class null_fenced_block
: private noncopyable
{
public:
+ enum half_or_full_t { half, full };
+
// Constructor.
- null_fenced_block()
+ explicit null_fenced_block(half_or_full_t)
{
}
diff --git a/include/boost/asio/detail/posix_fd_set_adapter.hpp b/include/boost/asio/detail/posix_fd_set_adapter.hpp
index b8be5960..8d9abb0e 100644
--- a/include/boost/asio/detail/posix_fd_set_adapter.hpp
+++ b/include/boost/asio/detail/posix_fd_set_adapter.hpp
@@ -20,6 +20,7 @@
#if !defined(BOOST_WINDOWS) && !defined(__CYGWIN__)
#include
+#include
#include
#include
@@ -29,7 +30,7 @@ namespace asio {
namespace detail {
// Adapts the FD_SET type to meet the Descriptor_Set concept's requirements.
-class posix_fd_set_adapter
+class posix_fd_set_adapter : noncopyable
{
public:
posix_fd_set_adapter()
@@ -39,6 +40,12 @@ public:
FD_ZERO(&fd_set_);
}
+ void reset()
+ {
+ using namespace std; // Needed for memset on Solaris.
+ FD_ZERO(&fd_set_);
+ }
+
bool set(socket_type descriptor)
{
if (descriptor < (socket_type)FD_SETSIZE)
diff --git a/include/boost/asio/detail/push_options.hpp b/include/boost/asio/detail/push_options.hpp
index 050549d4..00981a19 100644
--- a/include/boost/asio/detail/push_options.hpp
+++ b/include/boost/asio/detail/push_options.hpp
@@ -100,6 +100,7 @@
# pragma warning (disable:4103)
# pragma warning (push)
# pragma warning (disable:4127)
+# pragma warning (disable:4180)
# pragma warning (disable:4244)
# pragma warning (disable:4355)
# pragma warning (disable:4512)
diff --git a/include/boost/asio/detail/reactive_descriptor_service.hpp b/include/boost/asio/detail/reactive_descriptor_service.hpp
index 551ecaa1..0ee9442b 100644
--- a/include/boost/asio/detail/reactive_descriptor_service.hpp
+++ b/include/boost/asio/detail/reactive_descriptor_service.hpp
@@ -177,7 +177,7 @@ public:
const null_buffers&, boost::system::error_code& ec)
{
// Wait for descriptor to become ready.
- descriptor_ops::poll_write(impl.descriptor_, ec);
+ descriptor_ops::poll_write(impl.descriptor_, impl.state_, ec);
return 0;
}
@@ -239,7 +239,7 @@ public:
const null_buffers&, boost::system::error_code& ec)
{
// Wait for descriptor to become ready.
- descriptor_ops::poll_read(impl.descriptor_, ec);
+ descriptor_ops::poll_read(impl.descriptor_, impl.state_, ec);
return 0;
}
diff --git a/include/boost/asio/detail/reactive_null_buffers_op.hpp b/include/boost/asio/detail/reactive_null_buffers_op.hpp
index cf939b23..8913c8dc 100644
--- a/include/boost/asio/detail/reactive_null_buffers_op.hpp
+++ b/include/boost/asio/detail/reactive_null_buffers_op.hpp
@@ -47,7 +47,8 @@ public:
}
static void do_complete(io_service_impl* owner, operation* base,
- boost::system::error_code /*ec*/, std::size_t /*bytes_transferred*/)
+ const boost::system::error_code& /*ec*/,
+ std::size_t /*bytes_transferred*/)
{
// Take ownership of the handler object.
reactive_null_buffers_op* o(static_cast(base));
@@ -69,7 +70,7 @@ public:
// Make the upcall if required.
if (owner)
{
- boost::asio::detail::fenced_block b;
+ fenced_block b(fenced_block::half);
BOOST_ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));
boost_asio_handler_invoke_helpers::invoke(handler, handler.handler_);
BOOST_ASIO_HANDLER_INVOCATION_END;
diff --git a/include/boost/asio/detail/reactive_socket_accept_op.hpp b/include/boost/asio/detail/reactive_socket_accept_op.hpp
index 3957f3ae..cbfd632d 100644
--- a/include/boost/asio/detail/reactive_socket_accept_op.hpp
+++ b/include/boost/asio/detail/reactive_socket_accept_op.hpp
@@ -95,7 +95,8 @@ public:
}
static void do_complete(io_service_impl* owner, operation* base,
- boost::system::error_code /*ec*/, std::size_t /*bytes_transferred*/)
+ const boost::system::error_code& /*ec*/,
+ std::size_t /*bytes_transferred*/)
{
// Take ownership of the handler object.
reactive_socket_accept_op* o(static_cast(base));
@@ -117,7 +118,7 @@ public:
// Make the upcall if required.
if (owner)
{
- boost::asio::detail::fenced_block b;
+ fenced_block b(fenced_block::half);
BOOST_ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_));
boost_asio_handler_invoke_helpers::invoke(handler, handler.handler_);
BOOST_ASIO_HANDLER_INVOCATION_END;
diff --git a/include/boost/asio/detail/reactive_socket_connect_op.hpp b/include/boost/asio/detail/reactive_socket_connect_op.hpp
index 66dddad8..a1bf150d 100644
--- a/include/boost/asio/detail/reactive_socket_connect_op.hpp
+++ b/include/boost/asio/detail/reactive_socket_connect_op.hpp
@@ -64,7 +64,8 @@ public:
}
static void do_complete(io_service_impl* owner, operation* base,
- boost::system::error_code /*ec*/, std::size_t /*bytes_transferred*/)
+ const boost::system::error_code& /*ec*/,
+ std::size_t /*bytes_transferred*/)
{
// Take ownership of the handler object.
reactive_socket_connect_op* o
@@ -87,7 +88,7 @@ public:
// Make the upcall if required.
if (owner)
{
- boost::asio::detail::fenced_block b;
+ fenced_block b(fenced_block::half);
BOOST_ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_));
boost_asio_handler_invoke_helpers::invoke(handler, handler);
BOOST_ASIO_HANDLER_INVOCATION_END;
diff --git a/include/boost/asio/detail/reactive_socket_recv_op.hpp b/include/boost/asio/detail/reactive_socket_recv_op.hpp
index 255b0ee0..ac9b85c4 100644
--- a/include/boost/asio/detail/reactive_socket_recv_op.hpp
+++ b/include/boost/asio/detail/reactive_socket_recv_op.hpp
@@ -82,7 +82,8 @@ public:
}
static void do_complete(io_service_impl* owner, operation* base,
- boost::system::error_code /*ec*/, std::size_t /*bytes_transferred*/)
+ const boost::system::error_code& /*ec*/,
+ std::size_t /*bytes_transferred*/)
{
// Take ownership of the handler object.
reactive_socket_recv_op* o(static_cast(base));
@@ -104,7 +105,7 @@ public:
// Make the upcall if required.
if (owner)
{
- boost::asio::detail::fenced_block b;
+ fenced_block b(fenced_block::half);
BOOST_ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));
boost_asio_handler_invoke_helpers::invoke(handler, handler.handler_);
BOOST_ASIO_HANDLER_INVOCATION_END;
diff --git a/include/boost/asio/detail/reactive_socket_recvfrom_op.hpp b/include/boost/asio/detail/reactive_socket_recvfrom_op.hpp
index 601bcc46..31564c11 100644
--- a/include/boost/asio/detail/reactive_socket_recvfrom_op.hpp
+++ b/include/boost/asio/detail/reactive_socket_recvfrom_op.hpp
@@ -91,7 +91,8 @@ public:
}
static void do_complete(io_service_impl* owner, operation* base,
- boost::system::error_code /*ec*/, std::size_t /*bytes_transferred*/)
+ const boost::system::error_code& /*ec*/,
+ std::size_t /*bytes_transferred*/)
{
// Take ownership of the handler object.
reactive_socket_recvfrom_op* o(
@@ -114,7 +115,7 @@ public:
// Make the upcall if required.
if (owner)
{
- boost::asio::detail::fenced_block b;
+ fenced_block b(fenced_block::half);
BOOST_ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));
boost_asio_handler_invoke_helpers::invoke(handler, handler.handler_);
BOOST_ASIO_HANDLER_INVOCATION_END;
diff --git a/include/boost/asio/detail/reactive_socket_recvmsg_op.hpp b/include/boost/asio/detail/reactive_socket_recvmsg_op.hpp
index 56de631d..5ccf3fe5 100644
--- a/include/boost/asio/detail/reactive_socket_recvmsg_op.hpp
+++ b/include/boost/asio/detail/reactive_socket_recvmsg_op.hpp
@@ -83,7 +83,8 @@ public:
}
static void do_complete(io_service_impl* owner, operation* base,
- boost::system::error_code /*ec*/, std::size_t /*bytes_transferred*/)
+ const boost::system::error_code& /*ec*/,
+ std::size_t /*bytes_transferred*/)
{
// Take ownership of the handler object.
reactive_socket_recvmsg_op* o(
@@ -106,7 +107,7 @@ public:
// Make the upcall if required.
if (owner)
{
- boost::asio::detail::fenced_block b;
+ fenced_block b(fenced_block::half);
BOOST_ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));
boost_asio_handler_invoke_helpers::invoke(handler, handler.handler_);
BOOST_ASIO_HANDLER_INVOCATION_END;
diff --git a/include/boost/asio/detail/reactive_socket_send_op.hpp b/include/boost/asio/detail/reactive_socket_send_op.hpp
index 691a2205..578aaf7d 100644
--- a/include/boost/asio/detail/reactive_socket_send_op.hpp
+++ b/include/boost/asio/detail/reactive_socket_send_op.hpp
@@ -79,7 +79,8 @@ public:
}
static void do_complete(io_service_impl* owner, operation* base,
- boost::system::error_code /*ec*/, std::size_t /*bytes_transferred*/)
+ const boost::system::error_code& /*ec*/,
+ std::size_t /*bytes_transferred*/)
{
// Take ownership of the handler object.
reactive_socket_send_op* o(static_cast(base));
@@ -101,7 +102,7 @@ public:
// Make the upcall if required.
if (owner)
{
- boost::asio::detail::fenced_block b;
+ fenced_block b(fenced_block::half);
BOOST_ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));
boost_asio_handler_invoke_helpers::invoke(handler, handler.handler_);
BOOST_ASIO_HANDLER_INVOCATION_END;
diff --git a/include/boost/asio/detail/reactive_socket_sendto_op.hpp b/include/boost/asio/detail/reactive_socket_sendto_op.hpp
index 712d4b71..2982b382 100644
--- a/include/boost/asio/detail/reactive_socket_sendto_op.hpp
+++ b/include/boost/asio/detail/reactive_socket_sendto_op.hpp
@@ -82,7 +82,8 @@ public:
}
static void do_complete(io_service_impl* owner, operation* base,
- boost::system::error_code /*ec*/, std::size_t /*bytes_transferred*/)
+ const boost::system::error_code& /*ec*/,
+ std::size_t /*bytes_transferred*/)
{
// Take ownership of the handler object.
reactive_socket_sendto_op* o(static_cast(base));
@@ -104,7 +105,7 @@ public:
// Make the upcall if required.
if (owner)
{
- boost::asio::detail::fenced_block b;
+ fenced_block b(fenced_block::half);
BOOST_ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));
boost_asio_handler_invoke_helpers::invoke(handler, handler.handler_);
BOOST_ASIO_HANDLER_INVOCATION_END;
diff --git a/include/boost/asio/detail/reactive_socket_service.hpp b/include/boost/asio/detail/reactive_socket_service.hpp
index 76e9cbf7..f07c9de2 100644
--- a/include/boost/asio/detail/reactive_socket_service.hpp
+++ b/include/boost/asio/detail/reactive_socket_service.hpp
@@ -204,7 +204,7 @@ public:
boost::system::error_code& ec)
{
// Wait for socket to become ready.
- socket_ops::poll_write(impl.socket_, ec);
+ socket_ops::poll_write(impl.socket_, impl.state_, ec);
return 0;
}
@@ -278,7 +278,7 @@ public:
boost::system::error_code& ec)
{
// Wait for socket to become ready.
- socket_ops::poll_read(impl.socket_, ec);
+ socket_ops::poll_read(impl.socket_, impl.state_, ec);
// Reset endpoint since it can be given no sensible value at this time.
sender_endpoint = endpoint_type();
diff --git a/include/boost/asio/detail/reactive_socket_service_base.hpp b/include/boost/asio/detail/reactive_socket_service_base.hpp
index 52aa4e38..fb1ad5d0 100644
--- a/include/boost/asio/detail/reactive_socket_service_base.hpp
+++ b/include/boost/asio/detail/reactive_socket_service_base.hpp
@@ -188,7 +188,7 @@ public:
socket_base::message_flags, boost::system::error_code& ec)
{
// Wait for socket to become ready.
- socket_ops::poll_write(impl.socket_, ec);
+ socket_ops::poll_write(impl.socket_, impl.state_, ec);
return 0;
}
@@ -253,7 +253,7 @@ public:
socket_base::message_flags, boost::system::error_code& ec)
{
// Wait for socket to become ready.
- socket_ops::poll_read(impl.socket_, ec);
+ socket_ops::poll_read(impl.socket_, impl.state_, ec);
return 0;
}
@@ -327,7 +327,7 @@ public:
socket_base::message_flags& out_flags, boost::system::error_code& ec)
{
// Wait for socket to become ready.
- socket_ops::poll_read(impl.socket_, ec);
+ socket_ops::poll_read(impl.socket_, impl.state_, ec);
// Clear out_flags, since we cannot give it any other sensible value when
// performing a null_buffers operation.
diff --git a/include/boost/asio/detail/resolve_endpoint_op.hpp b/include/boost/asio/detail/resolve_endpoint_op.hpp
index 102c62c7..773fc623 100644
--- a/include/boost/asio/detail/resolve_endpoint_op.hpp
+++ b/include/boost/asio/detail/resolve_endpoint_op.hpp
@@ -53,7 +53,8 @@ public:
}
static void do_complete(io_service_impl* owner, operation* base,
- boost::system::error_code /*ec*/, std::size_t /*bytes_transferred*/)
+ const boost::system::error_code& /*ec*/,
+ std::size_t /*bytes_transferred*/)
{
// Take ownership of the operation object.
resolve_endpoint_op* o(static_cast(base));
@@ -96,7 +97,7 @@ public:
if (owner)
{
- boost::asio::detail::fenced_block b;
+ fenced_block b(fenced_block::half);
BOOST_ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, "..."));
boost_asio_handler_invoke_helpers::invoke(handler, handler.handler_);
BOOST_ASIO_HANDLER_INVOCATION_END;
diff --git a/include/boost/asio/detail/resolve_op.hpp b/include/boost/asio/detail/resolve_op.hpp
index 7eb70b84..81be6661 100644
--- a/include/boost/asio/detail/resolve_op.hpp
+++ b/include/boost/asio/detail/resolve_op.hpp
@@ -61,7 +61,8 @@ public:
}
static void do_complete(io_service_impl* owner, operation* base,
- boost::system::error_code /*ec*/, std::size_t /*bytes_transferred*/)
+ const boost::system::error_code& /*ec*/,
+ std::size_t /*bytes_transferred*/)
{
// Take ownership of the operation object.
resolve_op* o(static_cast(base));
@@ -106,7 +107,7 @@ public:
if (owner)
{
- boost::asio::detail::fenced_block b;
+ fenced_block b(fenced_block::half);
BOOST_ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, "..."));
boost_asio_handler_invoke_helpers::invoke(handler, handler.handler_);
BOOST_ASIO_HANDLER_INVOCATION_END;
diff --git a/include/boost/asio/detail/select_reactor.hpp b/include/boost/asio/detail/select_reactor.hpp
index 6cefe502..55476a76 100644
--- a/include/boost/asio/detail/select_reactor.hpp
+++ b/include/boost/asio/detail/select_reactor.hpp
@@ -24,6 +24,7 @@
#include
#include
+#include
#include
#include
#include
@@ -183,6 +184,9 @@ private:
// The queues of read, write and except operations.
reactor_op_queue op_queue_[max_ops];
+ // The file descriptor sets to be passed to the select system call.
+ fd_set_adapter fd_sets_[max_select_ops];
+
// The timer queues.
timer_queue_set timer_queues_;
diff --git a/include/boost/asio/detail/service_registry.hpp b/include/boost/asio/detail/service_registry.hpp
index ab97fdb6..e62c2482 100644
--- a/include/boost/asio/detail/service_registry.hpp
+++ b/include/boost/asio/detail/service_registry.hpp
@@ -52,8 +52,10 @@ class service_registry
: private noncopyable
{
public:
- // Constructor.
- BOOST_ASIO_DECL service_registry(boost::asio::io_service& o);
+ // Constructor. Adds the initial service.
+ template
+ service_registry(boost::asio::io_service& o,
+ Service* initial_service, Arg arg);
// Destructor.
BOOST_ASIO_DECL ~service_registry();
@@ -61,6 +63,11 @@ public:
// Notify all services of a fork event.
BOOST_ASIO_DECL void notify_fork(boost::asio::io_service::fork_event fork_ev);
+ // Get the first service object cast to the specified type. Called during
+ // io_service construction and so performs no locking or type checking.
+ template
+ Service& first_service();
+
// Get the service object corresponding to the specified service type. Will
// create a new service object automatically if no such object already
// exists. Ownership of the service object is not transferred to the caller.
@@ -123,8 +130,8 @@ private:
const boost::asio::io_service::service::key& key,
factory_type factory);
- // Add a service object. Returns false on error, in which case ownership of
- // the object is retained by the caller.
+ // Add a service object. Throws on error, in which case ownership of the
+ // object is retained by the caller.
BOOST_ASIO_DECL void do_add_service(
const boost::asio::io_service::service::key& key,
boost::asio::io_service::service* new_service);
diff --git a/include/boost/asio/detail/signal_handler.hpp b/include/boost/asio/detail/signal_handler.hpp
index 31480a12..3e3ad44d 100644
--- a/include/boost/asio/detail/signal_handler.hpp
+++ b/include/boost/asio/detail/signal_handler.hpp
@@ -40,7 +40,8 @@ public:
}
static void do_complete(io_service_impl* owner, operation* base,
- boost::system::error_code /*ec*/, std::size_t /*bytes_transferred*/)
+ const boost::system::error_code& /*ec*/,
+ std::size_t /*bytes_transferred*/)
{
// Take ownership of the handler object.
signal_handler* h(static_cast(base));
@@ -62,7 +63,7 @@ public:
// Make the upcall if required.
if (owner)
{
- boost::asio::detail::fenced_block b;
+ fenced_block b(fenced_block::half);
BOOST_ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));
boost_asio_handler_invoke_helpers::invoke(handler, handler.handler_);
BOOST_ASIO_HANDLER_INVOCATION_END;
diff --git a/include/boost/asio/detail/signal_set_service.hpp b/include/boost/asio/detail/signal_set_service.hpp
index d91650f3..ce025682 100644
--- a/include/boost/asio/detail/signal_set_service.hpp
+++ b/include/boost/asio/detail/signal_set_service.hpp
@@ -17,8 +17,8 @@
#include
-#include
#include
+#include
#include
#include
#include
diff --git a/include/boost/asio/detail/socket_ops.hpp b/include/boost/asio/detail/socket_ops.hpp
index 9d1644cd..45640c7a 100644
--- a/include/boost/asio/detail/socket_ops.hpp
+++ b/include/boost/asio/detail/socket_ops.hpp
@@ -254,9 +254,11 @@ BOOST_ASIO_DECL int ioctl(socket_type s, state_type& state,
BOOST_ASIO_DECL int select(int nfds, fd_set* readfds, fd_set* writefds,
fd_set* exceptfds, timeval* timeout, boost::system::error_code& ec);
-BOOST_ASIO_DECL int poll_read(socket_type s, boost::system::error_code& ec);
+BOOST_ASIO_DECL int poll_read(socket_type s,
+ state_type state, boost::system::error_code& ec);
-BOOST_ASIO_DECL int poll_write(socket_type s, boost::system::error_code& ec);
+BOOST_ASIO_DECL int poll_write(socket_type s,
+ state_type state, boost::system::error_code& ec);
BOOST_ASIO_DECL int poll_connect(socket_type s, boost::system::error_code& ec);
diff --git a/include/boost/asio/detail/solaris_fenced_block.hpp b/include/boost/asio/detail/solaris_fenced_block.hpp
index 0b117ad6..0f5fd89c 100644
--- a/include/boost/asio/detail/solaris_fenced_block.hpp
+++ b/include/boost/asio/detail/solaris_fenced_block.hpp
@@ -31,8 +31,16 @@ class solaris_fenced_block
: private noncopyable
{
public:
- // Constructor.
- solaris_fenced_block()
+ enum half_t { half };
+ enum full_t { full };
+
+ // Constructor for a half fenced block.
+ explicit solaris_fenced_block(half_t)
+ {
+ }
+
+ // Constructor for a full fenced block.
+ explicit solaris_fenced_block(full_t)
{
membar_consumer();
}
diff --git a/include/boost/asio/detail/strand_service.hpp b/include/boost/asio/detail/strand_service.hpp
index ae111845..654132f9 100644
--- a/include/boost/asio/detail/strand_service.hpp
+++ b/include/boost/asio/detail/strand_service.hpp
@@ -95,7 +95,7 @@ private:
BOOST_ASIO_DECL void do_post(implementation_type& impl, operation* op);
BOOST_ASIO_DECL static void do_complete(io_service_impl* owner,
- operation* base, boost::system::error_code ec,
+ operation* base, const boost::system::error_code& ec,
std::size_t bytes_transferred);
// The io_service implementation used to post completions.
diff --git a/include/boost/asio/detail/task_io_service.hpp b/include/boost/asio/detail/task_io_service.hpp
index 08e7a572..b6b2587c 100644
--- a/include/boost/asio/detail/task_io_service.hpp
+++ b/include/boost/asio/detail/task_io_service.hpp
@@ -22,6 +22,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -40,11 +41,10 @@ class task_io_service
public:
typedef task_io_service_operation operation;
- // Constructor.
- BOOST_ASIO_DECL task_io_service(boost::asio::io_service& io_service);
-
- // How many concurrent threads are likely to run the io_service.
- BOOST_ASIO_DECL void init(std::size_t concurrency_hint);
+ // Constructor. Specifies the number of concurrent threads that are likely to
+ // run the io_service. If set to 1 certain optimisation are performed.
+ BOOST_ASIO_DECL task_io_service(boost::asio::io_service& io_service,
+ std::size_t concurrency_hint = 0);
// Destroy all user-defined handler objects owned by the service.
BOOST_ASIO_DECL void shutdown_service();
@@ -86,6 +86,12 @@ public:
stop();
}
+ // Return whether a handler can be dispatched immediately.
+ bool can_dispatch()
+ {
+ return thread_call_stack::contains(this);
+ }
+
// Request invocation of the given handler.
template
void dispatch(Handler handler);
@@ -112,11 +118,17 @@ public:
private:
// Structure containing information about an idle thread.
- struct idle_thread_info;
+ struct thread_info;
// Run at most one operation. Blocks only if this_idle_thread is non-null.
- BOOST_ASIO_DECL std::size_t do_one(mutex::scoped_lock& lock,
- idle_thread_info* this_idle_thread);
+ BOOST_ASIO_DECL std::size_t do_run_one(mutex::scoped_lock& lock,
+ thread_info& this_thread, op_queue& private_op_queue,
+ const boost::system::error_code& ec);
+
+ // Poll for at most one operation.
+ BOOST_ASIO_DECL std::size_t do_poll_one(mutex::scoped_lock& lock,
+ op_queue& private_op_queue,
+ const boost::system::error_code& ec);
// Stop the task and all idle threads.
BOOST_ASIO_DECL void stop_all_threads(mutex::scoped_lock& lock);
@@ -135,8 +147,12 @@ private:
struct task_cleanup;
friend struct task_cleanup;
- // Helper class to call work_finished() on block exit.
- struct work_finished_on_block_exit;
+ // Helper class to call work-related operations on block exit.
+ struct work_cleanup;
+ friend struct work_cleanup;
+
+ // Whether to optimise for single-threaded use cases.
+ const bool one_thread_;
// Mutex to protect access to internal data.
mutable mutex mutex_;
@@ -165,8 +181,11 @@ private:
// Flag to indicate that the dispatcher has been shut down.
bool shutdown_;
+ // Per-thread call stack to track the state of each thread in the io_service.
+ typedef call_stack thread_call_stack;
+
// The threads that are currently idle.
- idle_thread_info* first_idle_thread_;
+ thread_info* first_idle_thread_;
};
} // namespace detail
diff --git a/include/boost/asio/detail/task_io_service_operation.hpp b/include/boost/asio/detail/task_io_service_operation.hpp
index 72cbefd6..dbb9a246 100644
--- a/include/boost/asio/detail/task_io_service_operation.hpp
+++ b/include/boost/asio/detail/task_io_service_operation.hpp
@@ -31,9 +31,10 @@ namespace detail {
class task_io_service_operation BOOST_ASIO_INHERIT_TRACKED_HANDLER
{
public:
- void complete(task_io_service& owner)
+ void complete(task_io_service& owner,
+ const boost::system::error_code& ec, std::size_t bytes_transferred)
{
- func_(&owner, this, boost::system::error_code(), 0);
+ func_(&owner, this, ec, bytes_transferred);
}
void destroy()
@@ -44,11 +45,12 @@ public:
protected:
typedef void (*func_type)(task_io_service*,
task_io_service_operation*,
- boost::system::error_code, std::size_t);
+ const boost::system::error_code&, std::size_t);
task_io_service_operation(func_type func)
: next_(0),
- func_(func)
+ func_(func),
+ task_result_(0)
{
}
@@ -61,6 +63,9 @@ private:
friend class op_queue_access;
task_io_service_operation* next_;
func_type func_;
+protected:
+ friend class task_io_service;
+ unsigned int task_result_; // Passed into bytes transferred.
};
} // namespace detail
diff --git a/include/boost/asio/detail/timer_queue.hpp b/include/boost/asio/detail/timer_queue.hpp
index 78974a45..64d2cb7f 100644
--- a/include/boost/asio/detail/timer_queue.hpp
+++ b/include/boost/asio/detail/timer_queue.hpp
@@ -159,12 +159,15 @@ public:
// Dequeue all timers not later than the current time.
virtual void get_ready_timers(op_queue& ops)
{
- const time_type now = Time_Traits::now();
- while (!heap_.empty() && !Time_Traits::less_than(now, heap_[0].time_))
+ if (!heap_.empty())
{
- per_timer_data* timer = heap_[0].timer_;
- ops.push(timer->op_queue_);
- remove_timer(*timer);
+ const time_type now = Time_Traits::now();
+ while (!heap_.empty() && !Time_Traits::less_than(now, heap_[0].time_))
+ {
+ per_timer_data* timer = heap_[0].timer_;
+ ops.push(timer->op_queue_);
+ remove_timer(*timer);
+ }
}
}
diff --git a/include/boost/asio/detail/wait_handler.hpp b/include/boost/asio/detail/wait_handler.hpp
index 77fedf70..ef1d7701 100644
--- a/include/boost/asio/detail/wait_handler.hpp
+++ b/include/boost/asio/detail/wait_handler.hpp
@@ -40,7 +40,8 @@ public:
}
static void do_complete(io_service_impl* owner, operation* base,
- boost::system::error_code /*ec*/, std::size_t /*bytes_transferred*/)
+ const boost::system::error_code& /*ec*/,
+ std::size_t /*bytes_transferred*/)
{
// Take ownership of the handler object.
wait_handler* h(static_cast(base));
@@ -62,7 +63,7 @@ public:
// Make the upcall if required.
if (owner)
{
- boost::asio::detail::fenced_block b;
+ fenced_block b(fenced_block::half);
BOOST_ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_));
boost_asio_handler_invoke_helpers::invoke(handler, handler.handler_);
BOOST_ASIO_HANDLER_INVOCATION_END;
diff --git a/include/boost/asio/detail/win_fd_set_adapter.hpp b/include/boost/asio/detail/win_fd_set_adapter.hpp
index 8636a91c..4e4f9652 100644
--- a/include/boost/asio/detail/win_fd_set_adapter.hpp
+++ b/include/boost/asio/detail/win_fd_set_adapter.hpp
@@ -19,6 +19,7 @@
#if defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+#include
#include
#include
@@ -28,39 +29,67 @@ namespace asio {
namespace detail {
// Adapts the FD_SET type to meet the Descriptor_Set concept's requirements.
-class win_fd_set_adapter
+class win_fd_set_adapter : noncopyable
{
public:
- enum { win_fd_set_size = 1024 };
+ enum { default_fd_set_size = 1024 };
win_fd_set_adapter()
- : max_descriptor_(invalid_socket)
+ : capacity_(default_fd_set_size),
+ max_descriptor_(invalid_socket)
{
- fd_set_.fd_count = 0;
+ fd_set_ = static_cast(::operator new(
+ sizeof(win_fd_set) - sizeof(SOCKET)
+ + sizeof(SOCKET) * (capacity_)));
+ fd_set_->fd_count = 0;
+ }
+
+ ~win_fd_set_adapter()
+ {
+ ::operator delete(fd_set_);
+ }
+
+ void reset()
+ {
+ fd_set_->fd_count = 0;
+ max_descriptor_ = invalid_socket;
}
bool set(socket_type descriptor)
{
- for (u_int i = 0; i < fd_set_.fd_count; ++i)
- if (fd_set_.fd_array[i] == descriptor)
+ for (u_int i = 0; i < fd_set_->fd_count; ++i)
+ if (fd_set_->fd_array[i] == descriptor)
return true;
- if (fd_set_.fd_count < win_fd_set_size)
+
+ if (fd_set_->fd_count == capacity_)
{
- fd_set_.fd_array[fd_set_.fd_count++] = descriptor;
- return true;
+ u_int new_capacity = capacity_ + capacity_ / 2;
+ win_fd_set* new_fd_set = static_cast(::operator new(
+ sizeof(win_fd_set) - sizeof(SOCKET)
+ + sizeof(SOCKET) * (new_capacity)));
+
+ new_fd_set->fd_count = fd_set_->fd_count;
+ for (u_int i = 0; i < fd_set_->fd_count; ++i)
+ new_fd_set->fd_array[i] = fd_set_->fd_array[i];
+
+ ::operator delete(fd_set_);
+ fd_set_ = new_fd_set;
+ capacity_ = new_capacity;
}
- return false;
+
+ fd_set_->fd_array[fd_set_->fd_count++] = descriptor;
+ return true;
}
bool is_set(socket_type descriptor) const
{
return !!__WSAFDIsSet(descriptor,
- const_cast(reinterpret_cast(&fd_set_)));
+ const_cast(reinterpret_cast(fd_set_)));
}
operator fd_set*()
{
- return reinterpret_cast(&fd_set_);
+ return reinterpret_cast(fd_set_);
}
socket_type max_descriptor() const
@@ -69,15 +98,19 @@ public:
}
private:
+
// This structure is defined to be compatible with the Windows API fd_set
- // structure, but without being dependent on the value of FD_SETSIZE.
+ // structure, but without being dependent on the value of FD_SETSIZE. We use
+ // the "struct hack" to allow the number of descriptors to be varied at
+ // runtime.
struct win_fd_set
{
u_int fd_count;
- SOCKET fd_array[win_fd_set_size];
+ SOCKET fd_array[1];
};
- win_fd_set fd_set_;
+ win_fd_set* fd_set_;
+ u_int capacity_;
socket_type max_descriptor_;
};
diff --git a/include/boost/asio/detail/win_fenced_block.hpp b/include/boost/asio/detail/win_fenced_block.hpp
index 769db2eb..071e8a8d 100644
--- a/include/boost/asio/detail/win_fenced_block.hpp
+++ b/include/boost/asio/detail/win_fenced_block.hpp
@@ -31,8 +31,16 @@ class win_fenced_block
: private noncopyable
{
public:
- // Constructor.
- win_fenced_block()
+ enum half_t { half };
+ enum full_t { full };
+
+ // Constructor for a half fenced block.
+ explicit win_fenced_block(half_t)
+ {
+ }
+
+ // Constructor for a full fenced block.
+ explicit win_fenced_block(full_t)
{
#if defined(__BORLANDC__)
LONG barrier = 0;
diff --git a/include/boost/asio/detail/win_iocp_handle_read_op.hpp b/include/boost/asio/detail/win_iocp_handle_read_op.hpp
index c7c24569..d1bc0d26 100644
--- a/include/boost/asio/detail/win_iocp_handle_read_op.hpp
+++ b/include/boost/asio/detail/win_iocp_handle_read_op.hpp
@@ -50,8 +50,11 @@ public:
}
static void do_complete(io_service_impl* owner, operation* base,
- boost::system::error_code ec, std::size_t bytes_transferred)
+ const boost::system::error_code& result_ec,
+ std::size_t bytes_transferred)
{
+ boost::system::error_code ec(result_ec);
+
// Take ownership of the operation object.
win_iocp_handle_read_op* o(static_cast(base));
ptr p = { boost::addressof(o->handler_), o, o };
@@ -85,7 +88,7 @@ public:
// Make the upcall if required.
if (owner)
{
- boost::asio::detail::fenced_block b;
+ fenced_block b(fenced_block::half);
BOOST_ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));
boost_asio_handler_invoke_helpers::invoke(handler, handler.handler_);
BOOST_ASIO_HANDLER_INVOCATION_END;
diff --git a/include/boost/asio/detail/win_iocp_handle_write_op.hpp b/include/boost/asio/detail/win_iocp_handle_write_op.hpp
index 510b0924..e1158eeb 100644
--- a/include/boost/asio/detail/win_iocp_handle_write_op.hpp
+++ b/include/boost/asio/detail/win_iocp_handle_write_op.hpp
@@ -49,7 +49,7 @@ public:
}
static void do_complete(io_service_impl* owner, operation* base,
- boost::system::error_code ec, std::size_t bytes_transferred)
+ const boost::system::error_code& ec, std::size_t bytes_transferred)
{
// Take ownership of the operation object.
win_iocp_handle_write_op* o(static_cast(base));
@@ -80,7 +80,7 @@ public:
// Make the upcall if required.
if (owner)
{
- boost::asio::detail::fenced_block b;
+ fenced_block b(fenced_block::half);
BOOST_ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));
boost_asio_handler_invoke_helpers::invoke(handler, handler.handler_);
BOOST_ASIO_HANDLER_INVOCATION_END;
diff --git a/include/boost/asio/detail/win_iocp_io_service.hpp b/include/boost/asio/detail/win_iocp_io_service.hpp
index 09ee6123..422fada7 100644
--- a/include/boost/asio/detail/win_iocp_io_service.hpp
+++ b/include/boost/asio/detail/win_iocp_io_service.hpp
@@ -21,6 +21,7 @@
#include
#include
+#include
#include
#include
#include
@@ -45,10 +46,11 @@ class win_iocp_io_service
: public boost::asio::detail::service_base
{
public:
- // Constructor.
- BOOST_ASIO_DECL win_iocp_io_service(boost::asio::io_service& io_service);
- BOOST_ASIO_DECL void init(size_t concurrency_hint);
+ // Constructor. Specifies a concurrency hint that is passed through to the
+ // underlying I/O completion port.
+ BOOST_ASIO_DECL win_iocp_io_service(boost::asio::io_service& io_service,
+ size_t concurrency_hint = 0);
// Destroy all user-defined handler objects owned by the service.
BOOST_ASIO_DECL void shutdown_service();
@@ -102,6 +104,12 @@ public:
stop();
}
+ // Return whether a handler can be dispatched immediately.
+ bool can_dispatch()
+ {
+ return call_stack::contains(this) != 0;
+ }
+
// Request invocation of the given handler.
template
void dispatch(Handler handler);
diff --git a/include/boost/asio/detail/win_iocp_null_buffers_op.hpp b/include/boost/asio/detail/win_iocp_null_buffers_op.hpp
index ee0646ae..74e50c21 100644
--- a/include/boost/asio/detail/win_iocp_null_buffers_op.hpp
+++ b/include/boost/asio/detail/win_iocp_null_buffers_op.hpp
@@ -56,8 +56,11 @@ public:
}
static void do_complete(io_service_impl* owner, operation* base,
- boost::system::error_code ec, std::size_t bytes_transferred)
+ const boost::system::error_code& result_ec,
+ std::size_t bytes_transferred)
{
+ boost::system::error_code ec(result_ec);
+
// Take ownership of the operation object.
win_iocp_null_buffers_op* o(static_cast(base));
ptr p = { boost::addressof(o->handler_), o, o };
@@ -95,7 +98,7 @@ public:
// Make the upcall if required.
if (owner)
{
- boost::asio::detail::fenced_block b;
+ fenced_block b(fenced_block::half);
BOOST_ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));
boost_asio_handler_invoke_helpers::invoke(handler, handler.handler_);
BOOST_ASIO_HANDLER_INVOCATION_END;
diff --git a/include/boost/asio/detail/win_iocp_operation.hpp b/include/boost/asio/detail/win_iocp_operation.hpp
index 2893d897..102e509e 100644
--- a/include/boost/asio/detail/win_iocp_operation.hpp
+++ b/include/boost/asio/detail/win_iocp_operation.hpp
@@ -38,8 +38,8 @@ class win_iocp_operation
{
public:
void complete(win_iocp_io_service& owner,
- const boost::system::error_code& ec = boost::system::error_code(),
- std::size_t bytes_transferred = 0)
+ const boost::system::error_code& ec,
+ std::size_t bytes_transferred)
{
func_(&owner, this, ec, bytes_transferred);
}
@@ -50,8 +50,9 @@ public:
}
protected:
- typedef void (*func_type)(win_iocp_io_service*,
- win_iocp_operation*, boost::system::error_code, std::size_t);
+ typedef void (*func_type)(
+ win_iocp_io_service*, win_iocp_operation*,
+ const boost::system::error_code&, std::size_t);
win_iocp_operation(func_type func)
: next_(0),
diff --git a/include/boost/asio/detail/win_iocp_overlapped_op.hpp b/include/boost/asio/detail/win_iocp_overlapped_op.hpp
index f22c3820..f8409cef 100644
--- a/include/boost/asio/detail/win_iocp_overlapped_op.hpp
+++ b/include/boost/asio/detail/win_iocp_overlapped_op.hpp
@@ -46,7 +46,7 @@ public:
}
static void do_complete(io_service_impl* owner, operation* base,
- boost::system::error_code ec, std::size_t bytes_transferred)
+ const boost::system::error_code& ec, std::size_t bytes_transferred)
{
// Take ownership of the operation object.
win_iocp_overlapped_op* o(static_cast(base));
@@ -68,7 +68,7 @@ public:
// Make the upcall if required.
if (owner)
{
- boost::asio::detail::fenced_block b;
+ fenced_block b(fenced_block::half);
BOOST_ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));
boost_asio_handler_invoke_helpers::invoke(handler, handler.handler_);
BOOST_ASIO_HANDLER_INVOCATION_END;
diff --git a/include/boost/asio/detail/win_iocp_socket_accept_op.hpp b/include/boost/asio/detail/win_iocp_socket_accept_op.hpp
index c15d06fa..b53fc777 100644
--- a/include/boost/asio/detail/win_iocp_socket_accept_op.hpp
+++ b/include/boost/asio/detail/win_iocp_socket_accept_op.hpp
@@ -73,8 +73,11 @@ public:
}
static void do_complete(io_service_impl* owner, operation* base,
- boost::system::error_code ec, std::size_t /*bytes_transferred*/)
+ const boost::system::error_code& result_ec,
+ std::size_t /*bytes_transferred*/)
{
+ boost::system::error_code ec(result_ec);
+
// Take ownership of the operation object.
win_iocp_socket_accept_op* o(static_cast(base));
ptr p = { boost::addressof(o->handler_), o, o };
@@ -134,7 +137,7 @@ public:
// Make the upcall if required.
if (owner)
{
- boost::asio::detail::fenced_block b;
+ fenced_block b(fenced_block::half);
BOOST_ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_));
boost_asio_handler_invoke_helpers::invoke(handler, handler.handler_);
BOOST_ASIO_HANDLER_INVOCATION_END;
diff --git a/include/boost/asio/detail/win_iocp_socket_recv_op.hpp b/include/boost/asio/detail/win_iocp_socket_recv_op.hpp
index cae85fda..98c7baf4 100644
--- a/include/boost/asio/detail/win_iocp_socket_recv_op.hpp
+++ b/include/boost/asio/detail/win_iocp_socket_recv_op.hpp
@@ -53,8 +53,11 @@ public:
}
static void do_complete(io_service_impl* owner, operation* base,
- boost::system::error_code ec, std::size_t bytes_transferred)
+ const boost::system::error_code& result_ec,
+ std::size_t bytes_transferred)
{
+ boost::system::error_code ec(result_ec);
+
// Take ownership of the operation object.
win_iocp_socket_recv_op* o(static_cast(base));
ptr p = { boost::addressof(o->handler_), o, o };
@@ -89,7 +92,7 @@ public:
// Make the upcall if required.
if (owner)
{
- boost::asio::detail::fenced_block b;
+ fenced_block b(fenced_block::half);
BOOST_ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));
boost_asio_handler_invoke_helpers::invoke(handler, handler.handler_);
BOOST_ASIO_HANDLER_INVOCATION_END;
diff --git a/include/boost/asio/detail/win_iocp_socket_recvfrom_op.hpp b/include/boost/asio/detail/win_iocp_socket_recvfrom_op.hpp
index f2c38fa4..a7a8f780 100644
--- a/include/boost/asio/detail/win_iocp_socket_recvfrom_op.hpp
+++ b/include/boost/asio/detail/win_iocp_socket_recvfrom_op.hpp
@@ -59,8 +59,11 @@ public:
}
static void do_complete(io_service_impl* owner, operation* base,
- boost::system::error_code ec, std::size_t bytes_transferred)
+ const boost::system::error_code& result_ec,
+ std::size_t bytes_transferred)
{
+ boost::system::error_code ec(result_ec);
+
// Take ownership of the operation object.
win_iocp_socket_recvfrom_op* o(
static_cast(base));
@@ -96,7 +99,7 @@ public:
// Make the upcall if required.
if (owner)
{
- boost::asio::detail::fenced_block b;
+ fenced_block b(fenced_block::half);
BOOST_ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));
boost_asio_handler_invoke_helpers::invoke(handler, handler.handler_);
BOOST_ASIO_HANDLER_INVOCATION_END;
diff --git a/include/boost/asio/detail/win_iocp_socket_recvmsg_op.hpp b/include/boost/asio/detail/win_iocp_socket_recvmsg_op.hpp
index 6ee61132..b3130646 100644
--- a/include/boost/asio/detail/win_iocp_socket_recvmsg_op.hpp
+++ b/include/boost/asio/detail/win_iocp_socket_recvmsg_op.hpp
@@ -55,8 +55,11 @@ public:
}
static void do_complete(io_service_impl* owner, operation* base,
- boost::system::error_code ec, std::size_t bytes_transferred)
+ const boost::system::error_code& result_ec,
+ std::size_t bytes_transferred)
{
+ boost::system::error_code ec(result_ec);
+
// Take ownership of the operation object.
win_iocp_socket_recvmsg_op* o(
static_cast(base));
@@ -90,7 +93,7 @@ public:
// Make the upcall if required.
if (owner)
{
- boost::asio::detail::fenced_block b;
+ fenced_block b(fenced_block::half);
BOOST_ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));
boost_asio_handler_invoke_helpers::invoke(handler, handler.handler_);
BOOST_ASIO_HANDLER_INVOCATION_END;
diff --git a/include/boost/asio/detail/win_iocp_socket_send_op.hpp b/include/boost/asio/detail/win_iocp_socket_send_op.hpp
index c8a49a37..9b434788 100644
--- a/include/boost/asio/detail/win_iocp_socket_send_op.hpp
+++ b/include/boost/asio/detail/win_iocp_socket_send_op.hpp
@@ -51,8 +51,11 @@ public:
}
static void do_complete(io_service_impl* owner, operation* base,
- boost::system::error_code ec, std::size_t bytes_transferred)
+ const boost::system::error_code& result_ec,
+ std::size_t bytes_transferred)
{
+ boost::system::error_code ec(result_ec);
+
// Take ownership of the operation object.
win_iocp_socket_send_op* o(static_cast(base));
ptr p = { boost::addressof(o->handler_), o, o };
@@ -84,7 +87,7 @@ public:
// Make the upcall if required.
if (owner)
{
- boost::asio::detail::fenced_block b;
+ fenced_block b(fenced_block::half);
BOOST_ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));
boost_asio_handler_invoke_helpers::invoke(handler, handler.handler_);
BOOST_ASIO_HANDLER_INVOCATION_END;
diff --git a/include/boost/asio/detail/win_iocp_socket_service.hpp b/include/boost/asio/detail/win_iocp_socket_service.hpp
index a7dfbfba..22d0141c 100644
--- a/include/boost/asio/detail/win_iocp_socket_service.hpp
+++ b/include/boost/asio/detail/win_iocp_socket_service.hpp
@@ -281,7 +281,7 @@ public:
boost::system::error_code& ec)
{
// Wait for socket to become ready.
- socket_ops::poll_write(impl.socket_, ec);
+ socket_ops::poll_write(impl.socket_, impl.state_, ec);
return 0;
}
@@ -358,7 +358,7 @@ public:
socket_base::message_flags, boost::system::error_code& ec)
{
// Wait for socket to become ready.
- socket_ops::poll_read(impl.socket_, ec);
+ socket_ops::poll_read(impl.socket_, impl.state_, ec);
// Reset endpoint since it can be given no sensible value at this time.
sender_endpoint = endpoint_type();
diff --git a/include/boost/asio/detail/win_iocp_socket_service_base.hpp b/include/boost/asio/detail/win_iocp_socket_service_base.hpp
index 21062ee2..edb28e2d 100644
--- a/include/boost/asio/detail/win_iocp_socket_service_base.hpp
+++ b/include/boost/asio/detail/win_iocp_socket_service_base.hpp
@@ -206,7 +206,7 @@ public:
socket_base::message_flags, boost::system::error_code& ec)
{
// Wait for socket to become ready.
- socket_ops::poll_write(impl.socket_, ec);
+ socket_ops::poll_write(impl.socket_, impl.state_, ec);
return 0;
}
@@ -273,7 +273,7 @@ public:
socket_base::message_flags, boost::system::error_code& ec)
{
// Wait for socket to become ready.
- socket_ops::poll_read(impl.socket_, ec);
+ socket_ops::poll_read(impl.socket_, impl.state_, ec);
return 0;
}
@@ -343,7 +343,7 @@ public:
socket_base::message_flags& out_flags, boost::system::error_code& ec)
{
// Wait for socket to become ready.
- socket_ops::poll_read(impl.socket_, ec);
+ socket_ops::poll_read(impl.socket_, impl.state_, ec);
// Clear out_flags, since we cannot give it any other sensible value when
// performing a null_buffers operation.
diff --git a/include/boost/asio/impl/io_service.hpp b/include/boost/asio/impl/io_service.hpp
index dbdd2941..d1fabc39 100644
--- a/include/boost/asio/impl/io_service.hpp
+++ b/include/boost/asio/impl/io_service.hpp
@@ -33,6 +33,13 @@ inline Service& use_service(io_service& ios)
return ios.service_registry_->template use_service();
}
+template <>
+inline detail::io_service_impl& use_service(
+ io_service& ios)
+{
+ return ios.impl_;
+}
+
template
inline void add_service(io_service& ios, Service* svc)
{
@@ -102,25 +109,25 @@ io_service::wrap(Handler handler)
}
inline io_service::work::work(boost::asio::io_service& io_service)
- : io_service_(io_service)
+ : io_service_impl_(io_service.impl_)
{
- io_service_.impl_.work_started();
+ io_service_impl_.work_started();
}
inline io_service::work::work(const work& other)
- : io_service_(other.io_service_)
+ : io_service_impl_(other.io_service_impl_)
{
- io_service_.impl_.work_started();
+ io_service_impl_.work_started();
}
inline io_service::work::~work()
{
- io_service_.impl_.work_finished();
+ io_service_impl_.work_finished();
}
inline boost::asio::io_service& io_service::work::get_io_service()
{
- return io_service_;
+ return io_service_impl_.get_io_service();
}
inline boost::asio::io_service& io_service::service::get_io_service()
diff --git a/include/boost/asio/impl/io_service.ipp b/include/boost/asio/impl/io_service.ipp
index 60ad28c8..5bac64c3 100644
--- a/include/boost/asio/impl/io_service.ipp
+++ b/include/boost/asio/impl/io_service.ipp
@@ -18,6 +18,7 @@
#include
#include
#include
+#include
#include
#include
@@ -33,17 +34,18 @@ namespace boost {
namespace asio {
io_service::io_service()
- : service_registry_(new boost::asio::detail::service_registry(*this)),
- impl_(service_registry_->use_service())
+ : service_registry_(new boost::asio::detail::service_registry(
+ *this, static_cast(0),
+ (std::numeric_limits::max)())),
+ impl_(service_registry_->first_service())
{
- impl_.init((std::numeric_limits::max)());
}
io_service::io_service(std::size_t concurrency_hint)
- : service_registry_(new boost::asio::detail::service_registry(*this)),
- impl_(service_registry_->use_service())
+ : service_registry_(new boost::asio::detail::service_registry(
+ *this, static_cast(0), concurrency_hint)),
+ impl_(service_registry_->first_service())
{
- impl_.init(concurrency_hint);
}
io_service::~io_service()
diff --git a/include/boost/asio/impl/read.hpp b/include/boost/asio/impl/read.hpp
index 83518102..16cf5f0f 100644
--- a/include/boost/asio/impl/read.hpp
+++ b/include/boost/asio/impl/read.hpp
@@ -18,9 +18,11 @@
#include
#include
#include
+#include
#include
#include
#include
+#include
#include
#include
#include
@@ -280,6 +282,168 @@ namespace detail
ReadHandler handler_;
};
+ template
+ class read_op,
+ CompletionCondition, ReadHandler>
+ : detail::base_from_completion_cond
+ {
+ public:
+ read_op(AsyncReadStream& stream, const boost::array& buffers,
+ CompletionCondition completion_condition, ReadHandler& handler)
+ : detail::base_from_completion_cond<
+ CompletionCondition>(completion_condition),
+ stream_(stream),
+ buffers_(buffers),
+ total_transferred_(0),
+ handler_(BOOST_ASIO_MOVE_CAST(ReadHandler)(handler))
+ {
+ }
+
+#if defined(BOOST_ASIO_HAS_MOVE)
+ read_op(const read_op& other)
+ : detail::base_from_completion_cond(other),
+ stream_(other.stream_),
+ buffers_(other.buffers_),
+ total_transferred_(other.total_transferred_),
+ handler_(other.handler_)
+ {
+ }
+
+ read_op(read_op&& other)
+ : detail::base_from_completion_cond(other),
+ stream_(other.stream_),
+ buffers_(other.buffers_),
+ total_transferred_(other.total_transferred_),
+ handler_(BOOST_ASIO_MOVE_CAST(ReadHandler)(other.handler_))
+ {
+ }
+#endif // defined(BOOST_ASIO_HAS_MOVE)
+
+ void operator()(const boost::system::error_code& ec,
+ std::size_t bytes_transferred, int start = 0)
+ {
+ typename boost::asio::detail::dependent_type >::type bufs = {{
+ boost::asio::mutable_buffer(buffers_[0]),
+ boost::asio::mutable_buffer(buffers_[1]) }};
+ std::size_t buffer_size0 = boost::asio::buffer_size(bufs[0]);
+ std::size_t buffer_size1 = boost::asio::buffer_size(bufs[1]);
+ std::size_t n = 0;
+ switch (start)
+ {
+ case 1:
+ n = this->check_for_completion(ec, total_transferred_);
+ for (;;)
+ {
+ bufs[0] = boost::asio::buffer(bufs[0] + total_transferred_, n);
+ bufs[1] = boost::asio::buffer(
+ bufs[1] + (total_transferred_ < buffer_size0
+ ? 0 : total_transferred_ - buffer_size0),
+ n - boost::asio::buffer_size(bufs[0]));
+ stream_.async_read_some(bufs, BOOST_ASIO_MOVE_CAST(read_op)(*this));
+ return; default:
+ total_transferred_ += bytes_transferred;
+ if ((!ec && bytes_transferred == 0)
+ || (n = this->check_for_completion(ec, total_transferred_)) == 0
+ || total_transferred_ == buffer_size0 + buffer_size1)
+ break;
+ }
+
+ handler_(ec, static_cast(total_transferred_));
+ }
+ }
+
+ //private:
+ AsyncReadStream& stream_;
+ boost::array buffers_;
+ std::size_t total_transferred_;
+ ReadHandler handler_;
+ };
+
+#if defined(BOOST_ASIO_HAS_STD_ARRAY)
+
+ template
+ class read_op,
+ CompletionCondition, ReadHandler>
+ : detail::base_from_completion_cond
+ {
+ public:
+ read_op(AsyncReadStream& stream, const std::array& buffers,
+ CompletionCondition completion_condition, ReadHandler& handler)
+ : detail::base_from_completion_cond<
+ CompletionCondition>(completion_condition),
+ stream_(stream),
+ buffers_(buffers),
+ total_transferred_(0),
+ handler_(BOOST_ASIO_MOVE_CAST(ReadHandler)(handler))
+ {
+ }
+
+#if defined(BOOST_ASIO_HAS_MOVE)
+ read_op(const read_op& other)
+ : detail::base_from_completion_cond(other),
+ stream_(other.stream_),
+ buffers_(other.buffers_),
+ total_transferred_(other.total_transferred_),
+ handler_(other.handler_)
+ {
+ }
+
+ read_op(read_op&& other)
+ : detail::base_from_completion_cond(other),
+ stream_(other.stream_),
+ buffers_(other.buffers_),
+ total_transferred_(other.total_transferred_),
+ handler_(BOOST_ASIO_MOVE_CAST(ReadHandler)(other.handler_))
+ {
+ }
+#endif // defined(BOOST_ASIO_HAS_MOVE)
+
+ void operator()(const boost::system::error_code& ec,
+ std::size_t bytes_transferred, int start = 0)
+ {
+ typename boost::asio::detail::dependent_type >::type bufs = {{
+ boost::asio::mutable_buffer(buffers_[0]),
+ boost::asio::mutable_buffer(buffers_[1]) }};
+ std::size_t buffer_size0 = boost::asio::buffer_size(bufs[0]);
+ std::size_t buffer_size1 = boost::asio::buffer_size(bufs[1]);
+ std::size_t n = 0;
+ switch (start)
+ {
+ case 1:
+ n = this->check_for_completion(ec, total_transferred_);
+ for (;;)
+ {
+ bufs[0] = boost::asio::buffer(bufs[0] + total_transferred_, n);
+ bufs[1] = boost::asio::buffer(
+ bufs[1] + (total_transferred_ < buffer_size0
+ ? 0 : total_transferred_ - buffer_size0),
+ n - boost::asio::buffer_size(bufs[0]));
+ stream_.async_read_some(bufs, BOOST_ASIO_MOVE_CAST(read_op)(*this));
+ return; default:
+ total_transferred_ += bytes_transferred;
+ if ((!ec && bytes_transferred == 0)
+ || (n = this->check_for_completion(ec, total_transferred_)) == 0
+ || total_transferred_ == buffer_size0 + buffer_size1)
+ break;
+ }
+
+ handler_(ec, static_cast(total_transferred_));
+ }
+ }
+
+ //private:
+ AsyncReadStream& stream_;
+ std::array buffers_;
+ std::size_t total_transferred_;
+ ReadHandler handler_;
+ };
+
+#endif // defined(BOOST_ASIO_HAS_STD_ARRAY)
+
template
inline void* asio_handler_allocate(std::size_t size,
diff --git a/include/boost/asio/impl/read_at.hpp b/include/boost/asio/impl/read_at.hpp
index 14fddc49..8fc5b748 100644
--- a/include/boost/asio/impl/read_at.hpp
+++ b/include/boost/asio/impl/read_at.hpp
@@ -18,9 +18,11 @@
#include
#include
#include
+#include
#include
#include
#include
+#include
#include
#include
#include
@@ -300,6 +302,180 @@ namespace detail
ReadHandler handler_;
};
+ template
+ class read_at_op,
+ CompletionCondition, ReadHandler>
+ : detail::base_from_completion_cond
+ {
+ public:
+ read_at_op(AsyncRandomAccessReadDevice& device,
+ boost::uint64_t offset, const boost::array