From edd538944f93ed217045f1121f38643a5a1daa72 Mon Sep 17 00:00:00 2001 From: Marcelo Zimbres Date: Tue, 19 Jul 2022 21:41:57 +0200 Subject: [PATCH] Uses the correct executor in the exec timer. --- CMakeLists.txt | 2 ++ aedis/adapter/detail/response_traits.hpp | 10 ++++------ aedis/aedis.hpp | 13 +++++++++++-- aedis/connection.hpp | 17 +++++++---------- benchmarks/benchmarks.md | 22 ++++++++++++---------- 5 files changed, 36 insertions(+), 28 deletions(-) create mode 100644 CMakeLists.txt diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 00000000..5283c8a9 --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,2 @@ +# This is ongoing work. At the moment autotools is still the supported +# build system. diff --git a/aedis/adapter/detail/response_traits.hpp b/aedis/adapter/detail/response_traits.hpp index 88d666a3..773526df 100644 --- a/aedis/adapter/detail/response_traits.hpp +++ b/aedis/adapter/detail/response_traits.hpp @@ -90,17 +90,15 @@ struct assigner<0> { } }; -// TODO: I am not sure we need the mp_unique below. template class static_aggregate_adapter { private: using adapters_array_type = std::array< - boost::mp11::mp_unique< - boost::mp11::mp_rename< - boost::mp11::mp_transform< - adapter_t, Tuple>, - boost::variant2::variant>>, + boost::mp11::mp_rename< + boost::mp11::mp_transform< + adapter_t, Tuple>, + boost::variant2::variant>, std::tuple_size::value>; std::size_t i_ = 0; diff --git a/aedis/aedis.hpp b/aedis/aedis.hpp index bfee82d7..316272d6 100644 --- a/aedis/aedis.hpp +++ b/aedis/aedis.hpp @@ -12,8 +12,17 @@ #include #include -// \li Support for Redis [sentinel](https://redis.io/docs/manual/sentinel). -// TODO: Remove conflicts of the adapt function. +// TODO: Support for Redis sentinel: +// +// - https://redis.io/docs/manual/sentinel and . +// - https://redis.io/docs/reference/sentinel-clients +// +// TODO: Should we avoid conflicts of +// +// - aedis::adapt and +// - aedis::resp3::adapt +// +// function. /** \mainpage Documentation \tableofcontents diff --git a/aedis/connection.hpp b/aedis/connection.hpp index 5d45ca77..4bc329a3 100644 --- a/aedis/connection.hpp +++ b/aedis/connection.hpp @@ -24,8 +24,6 @@ namespace aedis { -// https://redis.io/docs/reference/sentinel-clients - /** \brief A high level Redis connection class. * \ingroup any * @@ -298,9 +296,15 @@ public: } private: + using channel_type = boost::asio::experimental::channel; + using clock_type = std::chrono::steady_clock; + using clock_traits_type = boost::asio::wait_traits; + using timer_type = boost::asio::basic_waitable_timer; + using resolver_type = boost::asio::ip::basic_resolver; + struct req_info { req_info(executor_type ex) : timer{ex} {} - boost::asio::steady_timer timer; + timer_type timer; resp3::request const* req = nullptr; std::size_t cmds = 0; bool stop = false; @@ -369,7 +373,6 @@ private: >(detail::connect_with_timeout_op{this}, token, resv_); } - // Loops on async_read_with_timeout described above. template auto reader(CompletionToken&& token) { @@ -440,12 +443,6 @@ private: } } - using channel_type = boost::asio::experimental::channel; - using clock_type = std::chrono::steady_clock; - using clock_traits_type = boost::asio::wait_traits; - using timer_type = boost::asio::basic_waitable_timer; - using resolver_type = boost::asio::ip::basic_resolver; - // IO objects resolver_type resv_; std::shared_ptr socket_; diff --git a/benchmarks/benchmarks.md b/benchmarks/benchmarks.md index f6850ac9..696df449 100644 --- a/benchmarks/benchmarks.md +++ b/benchmarks/benchmarks.md @@ -1,14 +1,12 @@ # TCP echo server performance -This document describe benchmarks the performance of TCP echo servers -I implemented in different languages using different Redis clients. -The main motivations for choosing a TCP echo server as a benchmark -program are +This document benchmarks the performance of TCP echo servers I +implemented in different languages using different Redis clients. The +main motivations for choosing an echo server are * Simple to implement and does not require expertise level in most languages. * I/O bound: Echo servers have very low CPU consumption in general - and therefore are excelent as a measure of the ability of a - program to server concurrent requests. + and therefore are excelent to measure how a program handles concurrent requests. * It simulates very well a typical backend in regard to concurrency. I also imposed some constraints on the implementations @@ -56,8 +54,8 @@ can be seen below ![](https://mzimbres.github.io/aedis/tcp-echo-over-redis.png) The tests were performed on a network where latency is 35ms on -average, otherwise it is equal to the benchmarks above regarding the -number of TCP connections. +average, otherwise it uses the same number of TCP connections +as the previous example. ### Remarks @@ -66,8 +64,8 @@ in the graph, the reasons are * [redis-rs](https://github.com/redis-rs/redis-rs): This client comes so far behind that it can't even be represented together - with the other benchmarks without making them look insignificant. I - don't know for sure why it is so slow, I suppose it has + with the other benchmarks without making them look insignificant. + I don't know for sure why it is so slow, I suppose it has something to do with its lack of proper [pipelining](https://redis.io/docs/manual/pipelining/) support. In fact, the more TCP connections I lauch the worst its @@ -84,6 +82,10 @@ The code used in the benchmarks can be found at * [node-redis](https://github.com/redis/node-redis): [code](https://github.com/mzimbres/aedis/tree/3fb018ccc6138d310ac8b73540391cdd8f2fdad6/benchmarks/nodejs/echo_server_over_redis) * [go-redis](https://github.com/go-redis/redis): [code](https://github.com/mzimbres/aedis/blob/3fb018ccc6138d310ac8b73540391cdd8f2fdad6/benchmarks/go/echo_server_over_redis.go) +## Running the benchmarks + +Run one of the echo-server programs in one terminal and the [echo-server-client](https://github.com/mzimbres/aedis/blob/42880e788bec6020dd018194075a211ad9f339e8/benchmarks/cpp/asio/echo_server_client.cpp) in another. + ## Contributing If your spot any performance improvement in any of the example or