2
0
mirror of https://github.com/boostorg/redis.git synced 2026-01-19 04:42:09 +00:00

Uses the correct executor in the exec timer.

This commit is contained in:
Marcelo Zimbres
2022-07-19 21:41:57 +02:00
parent 42880e788b
commit edd538944f
5 changed files with 36 additions and 28 deletions

2
CMakeLists.txt Normal file
View File

@@ -0,0 +1,2 @@
# This is ongoing work. At the moment autotools is still the supported
# build system.

View File

@@ -90,17 +90,15 @@ struct assigner<0> {
}
};
// TODO: I am not sure we need the mp_unique below.
template <class Tuple>
class static_aggregate_adapter {
private:
using adapters_array_type =
std::array<
boost::mp11::mp_unique<
boost::mp11::mp_rename<
boost::mp11::mp_transform<
adapter_t, Tuple>,
boost::variant2::variant>>,
boost::mp11::mp_rename<
boost::mp11::mp_transform<
adapter_t, Tuple>,
boost::variant2::variant>,
std::tuple_size<Tuple>::value>;
std::size_t i_ = 0;

View File

@@ -12,8 +12,17 @@
#include <aedis/connection.hpp>
#include <aedis/resp3/request.hpp>
// \li Support for Redis [sentinel](https://redis.io/docs/manual/sentinel).
// TODO: Remove conflicts of the adapt function.
// TODO: Support for Redis sentinel:
//
// - https://redis.io/docs/manual/sentinel and .
// - https://redis.io/docs/reference/sentinel-clients
//
// TODO: Should we avoid conflicts of
//
// - aedis::adapt and
// - aedis::resp3::adapt
//
// function.
/** \mainpage Documentation
\tableofcontents

View File

@@ -24,8 +24,6 @@
namespace aedis {
// https://redis.io/docs/reference/sentinel-clients
/** \brief A high level Redis connection class.
* \ingroup any
*
@@ -298,9 +296,15 @@ public:
}
private:
using channel_type = boost::asio::experimental::channel<executor_type, void(boost::system::error_code, std::size_t)>;
using clock_type = std::chrono::steady_clock;
using clock_traits_type = boost::asio::wait_traits<clock_type>;
using timer_type = boost::asio::basic_waitable_timer<clock_type, clock_traits_type, executor_type>;
using resolver_type = boost::asio::ip::basic_resolver<boost::asio::ip::tcp, executor_type>;
struct req_info {
req_info(executor_type ex) : timer{ex} {}
boost::asio::steady_timer timer;
timer_type timer;
resp3::request const* req = nullptr;
std::size_t cmds = 0;
bool stop = false;
@@ -369,7 +373,6 @@ private:
>(detail::connect_with_timeout_op<connection>{this}, token, resv_);
}
// Loops on async_read_with_timeout described above.
template <class CompletionToken>
auto reader(CompletionToken&& token)
{
@@ -440,12 +443,6 @@ private:
}
}
using channel_type = boost::asio::experimental::channel<executor_type, void(boost::system::error_code, std::size_t)>;
using clock_type = std::chrono::steady_clock;
using clock_traits_type = boost::asio::wait_traits<clock_type>;
using timer_type = boost::asio::basic_waitable_timer<clock_type, clock_traits_type, executor_type>;
using resolver_type = boost::asio::ip::basic_resolver<boost::asio::ip::tcp, executor_type>;
// IO objects
resolver_type resv_;
std::shared_ptr<AsyncReadWriteStream> socket_;

View File

@@ -1,14 +1,12 @@
# TCP echo server performance
This document describe benchmarks the performance of TCP echo servers
I implemented in different languages using different Redis clients.
The main motivations for choosing a TCP echo server as a benchmark
program are
This document benchmarks the performance of TCP echo servers I
implemented in different languages using different Redis clients. The
main motivations for choosing an echo server are
* Simple to implement and does not require expertise level in most languages.
* I/O bound: Echo servers have very low CPU consumption in general
and therefore are excelent as a measure of the ability of a
program to server concurrent requests.
and therefore are excelent to measure how a program handles concurrent requests.
* It simulates very well a typical backend in regard to concurrency.
I also imposed some constraints on the implementations
@@ -56,8 +54,8 @@ can be seen below
![](https://mzimbres.github.io/aedis/tcp-echo-over-redis.png)
The tests were performed on a network where latency is 35ms on
average, otherwise it is equal to the benchmarks above regarding the
number of TCP connections.
average, otherwise it uses the same number of TCP connections
as the previous example.
### Remarks
@@ -66,8 +64,8 @@ in the graph, the reasons are
* [redis-rs](https://github.com/redis-rs/redis-rs): This client
comes so far behind that it can't even be represented together
with the other benchmarks without making them look insignificant. I
don't know for sure why it is so slow, I suppose it has
with the other benchmarks without making them look insignificant.
I don't know for sure why it is so slow, I suppose it has
something to do with its lack of proper
[pipelining](https://redis.io/docs/manual/pipelining/) support.
In fact, the more TCP connections I lauch the worst its
@@ -84,6 +82,10 @@ The code used in the benchmarks can be found at
* [node-redis](https://github.com/redis/node-redis): [code](https://github.com/mzimbres/aedis/tree/3fb018ccc6138d310ac8b73540391cdd8f2fdad6/benchmarks/nodejs/echo_server_over_redis)
* [go-redis](https://github.com/go-redis/redis): [code](https://github.com/mzimbres/aedis/blob/3fb018ccc6138d310ac8b73540391cdd8f2fdad6/benchmarks/go/echo_server_over_redis.go)
## Running the benchmarks
Run one of the echo-server programs in one terminal and the [echo-server-client](https://github.com/mzimbres/aedis/blob/42880e788bec6020dd018194075a211ad9f339e8/benchmarks/cpp/asio/echo_server_client.cpp) in another.
## Contributing
If your spot any performance improvement in any of the example or