2
0
mirror of https://github.com/boostorg/redis.git synced 2026-01-23 06:02:09 +00:00

Compare commits

...

14 Commits

Author SHA1 Message Date
Marcelo Zimbres
e9dab97992 v1.3.0 2022-11-26 22:22:56 +01:00
Marcelo Zimbres
2e8cad858d Improvements in the examples. 2022-11-26 19:42:39 +01:00
Marcelo Zimbres
5a6e426028 Build fix and improvements in the examples. 2022-11-22 22:57:33 +01:00
Marcelo Zimbres
c55978a379 CI fix and improvements in the examples. 2022-11-21 23:41:41 +01:00
Marcelo Zimbres
6f51397e49 Build fix. 2022-11-20 14:06:07 +01:00
Marcelo Zimbres
6b9ba6b2d9 Adds connection typedef and improves docs. 2022-11-19 23:53:26 +01:00
Marcelo Zimbres
d29c03cb38 Changes:
* Uses pmr::string for the connection read and write buffer.
* Improvements in the examples.
2022-11-18 23:15:47 +01:00
Marcelo Zimbres
34cfbaa22f Removes healthy checks from the connection class. 2022-11-13 21:22:50 +01:00
Marcelo Zimbres
c9354fe320 Test improvements. 2022-11-13 18:39:28 +01:00
Marcelo Zimbres
bb555cb509 Remove built-in resolve and connect operation in async_run. 2022-11-13 00:10:26 +01:00
Marcelo Zimbres
5b209afa1d Removes endpoint class. 2022-11-09 23:05:52 +01:00
Marcelo Zimbres
3f5491654d Removes built-in HELLO from the connection. 2022-11-08 00:04:52 +01:00
Marcelo Zimbres
2bdc25752f Simplifications in the low-level tests. 2022-11-06 22:40:00 +01:00
Marcelo Zimbres
faafce1c64 Adds tls test. 2022-11-06 19:12:36 +01:00
43 changed files with 1383 additions and 2326 deletions

View File

@@ -10,7 +10,7 @@ cmake_minimum_required(VERSION 3.14)
project(
Aedis
VERSION 1.2.0
VERSION 1.3.0
DESCRIPTION "A redis client designed for performance and scalability"
HOMEPAGE_URL "https://mzimbres.github.io/aedis"
LANGUAGES CXX
@@ -21,6 +21,7 @@ target_include_directories(aedis INTERFACE
$<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}/include>
$<INSTALL_INTERFACE:include>
)
target_link_libraries(aedis
INTERFACE
Boost::asio
@@ -50,22 +51,59 @@ find_package(OpenSSL REQUIRED)
enable_testing()
include_directories(include)
# Main function for the examples.
#=======================================================================
add_library(common STATIC examples/common.cpp)
target_compile_features(common PUBLIC cxx_std_20)
# Executables
#=======================================================================
#add_executable(intro_sync examples/intro_sync.cpp) // Uncomment after update to Boost 1.80
add_executable(intro examples/intro.cpp)
target_link_libraries(intro common)
target_compile_features(intro PUBLIC cxx_std_20)
add_test(intro intro)
add_executable(chat_room examples/chat_room.cpp)
target_compile_features(chat_room PUBLIC cxx_std_20)
target_link_libraries(chat_room common)
add_executable(containers examples/containers.cpp)
target_compile_features(containers PUBLIC cxx_std_20)
target_link_libraries(containers common)
add_test(containers containers)
add_executable(echo_server examples/echo_server.cpp)
target_compile_features(echo_server PUBLIC cxx_std_20)
target_link_libraries(echo_server common)
add_executable(resolve_with_sentinel examples/resolve_with_sentinel.cpp)
target_compile_features(resolve_with_sentinel PUBLIC cxx_std_20)
target_link_libraries(resolve_with_sentinel common)
add_test(resolve_with_sentinel resolve_with_sentinel)
add_executable(serialization examples/serialization.cpp)
target_compile_features(serialization PUBLIC cxx_std_20)
target_link_libraries(serialization common)
add_test(serialization serialization)
add_executable(subscriber examples/subscriber.cpp)
target_compile_features(subscriber PUBLIC cxx_std_20)
target_link_libraries(subscriber common)
add_executable(intro_tls examples/intro_tls.cpp)
target_compile_features(intro_tls PUBLIC cxx_std_20)
add_test(intro_tls intro_tls)
target_link_libraries(intro_tls OpenSSL::Crypto OpenSSL::SSL)
target_link_libraries(intro_tls common)
add_executable(echo_server_client benchmarks/cpp/asio/echo_server_client.cpp)
add_executable(echo_server_direct benchmarks/cpp/asio/echo_server_direct.cpp)
add_executable(intro examples/intro.cpp)
add_executable(intro_tls examples/intro_tls.cpp)
add_executable(low_level_sync examples/low_level_sync.cpp)
add_executable(serialization examples/serialization.cpp)
add_executable(subscriber examples/subscriber.cpp)
add_executable(subscriber_sentinel examples/subscriber_sentinel.cpp)
add_executable(test_conn_connect tests/conn_connect.cpp)
add_executable(low_level_async examples/low_level_async.cpp)
add_executable(test_conn_exec tests/conn_exec.cpp)
add_executable(test_conn_push tests/conn_push.cpp)
add_executable(test_conn_quit tests/conn_quit.cpp)
@@ -78,18 +116,10 @@ add_executable(test_conn_exec_cancel tests/conn_exec_cancel.cpp)
add_executable(test_conn_echo_stress tests/conn_echo_stress.cpp)
add_executable(test_request tests/request.cpp)
target_compile_features(chat_room PUBLIC cxx_std_20)
target_compile_features(containers PUBLIC cxx_std_20)
target_compile_features(echo_server PUBLIC cxx_std_20)
target_compile_features(echo_server_client PUBLIC cxx_std_20)
target_compile_features(echo_server_direct PUBLIC cxx_std_20)
target_compile_features(intro PUBLIC cxx_std_17)
target_compile_features(intro_tls PUBLIC cxx_std_17)
target_compile_features(low_level_sync PUBLIC cxx_std_17)
target_compile_features(serialization PUBLIC cxx_std_17)
target_compile_features(subscriber PUBLIC cxx_std_20)
target_compile_features(subscriber_sentinel PUBLIC cxx_std_20)
target_compile_features(test_conn_connect PUBLIC cxx_std_17)
target_compile_features(low_level_async PUBLIC cxx_std_20)
target_compile_features(test_conn_exec PUBLIC cxx_std_20)
target_compile_features(test_conn_push PUBLIC cxx_std_20)
target_compile_features(test_conn_quit PUBLIC cxx_std_17)
@@ -102,21 +132,16 @@ target_compile_features(test_conn_exec_cancel PUBLIC cxx_std_20)
target_compile_features(test_conn_echo_stress PUBLIC cxx_std_20)
target_compile_features(test_request PUBLIC cxx_std_17)
target_link_libraries(intro_tls OpenSSL::Crypto OpenSSL::SSL)
target_link_libraries(test_conn_tls OpenSSL::Crypto OpenSSL::SSL)
# Tests
#=======================================================================
add_test(containers containers)
add_test(intro intro)
add_test(intro_tls intro_tls)
#add_test(intro_sync intro_sync)
add_test(serialization serialization)
add_test(low_level_sync low_level_sync)
add_test(low_level_async low_level_async)
add_test(test_low_level test_low_level)
add_test(test_conn_exec test_conn_exec)
add_test(test_conn_connect test_conn_connect)
add_test(test_conn_push test_conn_push)
add_test(test_conn_quit test_conn_quit)
add_test(test_conn_quit_coalesce test_conn_quit_coalesce)

548
README.md
View File

@@ -13,244 +13,82 @@ Some of its distinctive features are
* Support for the latest version of the Redis communication protocol [RESP3](https://github.com/redis/redis-specifications/blob/master/protocol/RESP3.md).
* Support for STL containers, TLS and Redis sentinel.
* Serialization and deserialization of your own data types.
* Healthy checks, back pressure, cancellation and low latency.
* Back pressure, cancellation and low latency.
In addition to that, Aedis hides most of the low-level Asio code away
from the user, which in the majority of the cases, will interact with
only three library entities
* `aedis::connection`: A connection to the Redis server.
* `aedis::resp3::request`: A container of Redis commands.
* `aedis::adapt()`: A function that adapts data structures to receive Redis responses.
The example below shows for example how to read Redis hashes in an
`std::map` using a coroutine, a short-lived connection and
cancellation
from the user. For example, the coroutine below retrieves Redis hashes
in a `std::map` and quits the connection (see containers.cpp)
```cpp
net::awaitable<std::map<std::string, std::string>> retrieve_hashes(endpoint ep)
auto hgetall(std::shared_ptr<connection> conn) -> net::awaitable<void>
{
connection conn{co_await net::this_coro::executor};
// A request contains multiple Redis commands.
request req;
req.get_config().cancel_on_connection_lost = true;
req.push("HELLO", 3);
req.push("HGETALL", "hset-key");
req.push("QUIT");
std::tuple<std::map<std::string, std::string>, aedis::ignore> resp;
co_await (conn.async_run(ep) || conn.async_exec(req, adapt(resp)));
// Tuple elements will hold the response to each command in the request.
std::tuple<aedis::ignore, std::map<std::string, std::string>, aedis::ignore> resp;
co_return std::move(std::get<0>(resp));
// Executes the request and reads the response.
co_await conn->async_exec(req, adapt(resp));
// Uses the map ...
}
```
In the next section we will see more details about connections,
requests and responses.
<a name="connection"></a>
### Connection
The `aedis::connection` is a class that provides async-only
communication with a Redis server by means of three member
functions
* `aedis::connection::async_run`: Establishes a connection and
completes only when it is lost.
* `aedis::connection::async_exec`: Executes commands.
* `aedis::connection::async_receive`: Receives server-side pushes.
In general, these operations will be running concurrently in user
application, where, for example
1. **Connect**: One coroutine will call `async_run` in a loop
to reconnect whenever a connection is lost.
2. **Execute**: Multiple coroutines will call `async_exec` independently
and without coordination (e.g. queuing).
3. **Receive**: One coroutine will loop on `async_receive` to receive
server-side pushes (required only if the app expects server pushes).
Each of the operations above can be performed without regards to the
others as they are independent from each other. Below we will cover
the points above with more detail.
#### Connect
In general, applications will connect to a Redis server and hang
around for as long as possible, until the connection is lost for some
reason. When that happens, simple setups will want to wait for a
short period of time and try to reconnect. The code snippet below
shows how this can be achieved with a coroutine (see echo_server.cpp)
The execution of calls to `connection::async_exec` like above are
triggered by the `connection::async_run` member function, which is
required to be running concurrently for as long as the connection
stands. For example, the code below uses a short-lived connection to
execute the coroutine above
```cpp
net::awaitable<void> reconnect(std::shared_ptr<connection> conn, endpoint ep)
net::awaitable<void> async_main()
{
net::steady_timer timer{co_await net::this_coro::executor};
for (boost::system::error_code ec;;) {
auto conn = std::make_shared<connection>(co_await net::this_coro::executor);
// Establishes a connection and hangs around until it is lost.
co_await conn->async_run(ep, {}, redir(ec));
conn->reset_stream();
// Resolves and connects (from examples/common.hpp to avoid vebosity)
co_await connect(conn, "127.0.0.1", "6379");
// Waits some time before trying to restablish the connection.
timer.expires_after(std::chrono::seconds{1});
co_await timer.async_wait();
}
// Runs and executes the request.
co_await (conn->async_run() || hgetall(conn));
}
```
Other common scenarios are, for example, performing a failover with
sentinels and re-subscribing to pubsub channels, both are covered in
the `subscriber_sentinel.cpp` example.
Long-lived connections follow the same principle (see the examples
below) and will be discussed in more detail later. The role of the
`async_run` is to coordinate IO and ensure the connection is always
reading from the socket. The reationale behind this design is
#### Execute
* Provide quick reaction to disconnections and hence faster failovers.
* Support server pushes and requests in the same connection object,
concurrently.
The basic idea about `async_exec` was stated above already: execute
Redis commands. One of the most important things about it though is
that it can be called multiple times without coordination, for
example, in a HTTP or Websocket server where each session calls it
independently to communicate with Redis. The benefits of this feature
are manifold
In the following sections we will discuss with more details the main
entities Aedis users are concerned with, namely
* Reduces code complexity as users won't have to implement queues
every time e.g. HTTP sessions want to share a connection to Redis.
* A small number of connections improves the performance associated
with [pipelines](https://redis.io/topics/pipelining). A single
connection will be indeed enough in most of cases.
* `aedis::resp3::request`: A container of Redis commands.
* `aedis::adapt()`: A function that adapts data structures to receive Redis responses.
* `aedis::connection`: A connection to the Redis server.
The code below illustrates this concepts in a TCP session of the
`echo_server.cpp` example
before that however, users might find it helpful to skim over the
examples, to gain a better feeling about the library capabilities
```cpp
awaitable_type echo_server_session(tcp_socket socket, std::shared_ptr<connection> db)
{
request req;
std::tuple<std::string> response;
for (std::string buffer;;) {
// Reads a user message.
auto n = co_await net::async_read_until(socket, net::dynamic_buffer(buffer, 1024), "\n");
// Echos it through Redis.
req.push("PING", buffer);
co_await db->async_exec(req, adapt(response));
// Writes is back to the user.
co_await net::async_write(socket, net::buffer(std::get<0>(response)));
// Cleanup
std::get<0>(response).clear();
req.clear();
buffer.erase(0, n);
}
}
```
Notice also how the session above provides back-pressure as the
coroutine won't read the next message from the socket until a cycle is
complete.
#### Receive
Point number 3. above is only necessary for servers that expect server
pushes, like, for example, when using Redis pubsub. The example below
was taken from subscriber.cpp
```cpp
net::awaitable<void> push_receiver(std::shared_ptr<connection> conn)
{
for (std::vector<node<std::string>> resp;;) {
co_await conn->async_receive(adapt(resp));
print_push(resp);
resp.clear();
}
}
```
In general, it is advisable to all apps to keep a coroutine calling
`async_receive` as an unread push will cause the connection to stall
and eventually timeout. Notice that the same connection that is being
used to send requests can be also used to receive server-side pushes.
#### Cancellation
Aedis supports both implicit and explicit cancellation of connection
operations. Explicit cancellation is supported by means of the
`aedis::connection::cancel` member function. Implicit cancellation,
like those that may happen when using Asio awaitable operators && and
|| will be discussed with more detail below.
```cpp
co_await (conn.async_run(...) && conn.async_exec(...))
```
* Useful when implementing reconnection on applications that
use pubsub.
* Makes the channel re-subscribe operation simpler when the
connection is reestablished.
```cpp
co_await (conn.async_run(...) || conn.async_exec(...))
```
* Useful for short-lived connections that are meant to be closed after
a command has been executed.
```cpp
co_await (conn.async_exec(...) || time.async_wait(...))
```
* Provides a way to limit how long the execution of a single request
should last.
* Alternatively, for a connection-wide timeout set
`aedis::connection::timeouts::ping_interval` to a proper value. This
will work because all requests use the same queue and is also more
efficient since only one timer will be used.
* The cancellation will be ignored if the request has already
been written to the socket.
```cpp
co_await (conn.async_run(...) || time.async_wait(...))
```
* Set a limit on how long the connection should live (see also
`aedis::connection::timeouts`)
```cpp
co_await (conn.async_exec(...) || conn.async_exec(...) || ... || conn.async_exec(...))
```
* This works but is considered an antipattern. Unless
the user has set `aedis::resp3::request::config::coalesce` to
`false`, and he shouldn't, the connection will automatically merge
the individual requests into a single payload anyway.
#### Timeouts
Aedis high-level API provides built-in support for many of the
timeouts users usually need. For example, the
`aedis::connection::async_run` member function performs the following
operations on behalf of the user
* Resolves Redis address.
* Connects to the resolved endpoint.
* TLS handshake (for TLS endpoints).
* RESP3 handshake and authentication and role check.
* Periodic healthy checks with the PING command.
* Keeps reading from the socket to handle server pushes and command responses.
* Keeps writing requests as it becomes possible e.g. after last response has arrived.
To control the timeout-behaviour of these operations users must
create a `aedis::connection::timeouts` object and pass it to as
argument to the `aedis::connection::async_run` (if
the suggested defaults are not suitable).
* intro.cpp: The Aedis hello-world program. It sends one command to Redis and quits the connection.
* intro_tls.cpp: Same as intro.cpp but over TLS.
* containers.cpp: Shows how to send and receive stl containers and how to use transactions.
* serialization.cpp: Shows how to serialize types using Boost.Json.
* subscriber.cpp: Shows how to implement pubsub that reconnects and resubscribes when the connection is lost.
* echo_server.cpp: A simple TCP echo server.
* chat_room.cpp: A command line chat room built on Redis pubsub.
<a name="requests"></a>
### Requests
Redis requests are composed of one of more Redis commands (in
Redis requests are composed of one or more Redis commands (in
Redis documentation they are called
[pipelines](https://redis.io/topics/pipelining)). For example
@@ -263,6 +101,7 @@ req.push("SET", "key", "some value", "EX", "2");
// Pushes a list.
std::list<std::string> list
{"channel1", "channel2", "channel3"};
req.push_range("SUBSCRIBE", list);
// Same as above but as an iterator range.
@@ -284,7 +123,7 @@ Sending a request to Redis is performed with `aedis::connection::async_exec` as
The `push` and `push_range` functions above work with integers
e.g. `int` and `std::string` out of the box. To send your own
data type defined a `to_bulk` function like this
data type define a `to_bulk` function like this
```cpp
// Example struct.
@@ -403,14 +242,14 @@ std::tuple<
Where both are passed to `async_exec` as showed elsewhere
```cpp
co_await db->async_exec(req, adapt(resp));
co_await conn->async_exec(req, adapt(resp));
```
If the intention is to ignore the response to all commands altogether
use `adapt()` without arguments instead
```cpp
co_await db->async_exec(req, adapt());
co_await conn->async_exec(req, adapt());
```
Responses that contain nested aggregates or heterogeneous data
@@ -453,7 +292,7 @@ std::tuple<
...
> response;
co_await db->async_exec(req, adapt(response));
co_await conn->async_exec(req, adapt(response));
```
Everything else stays pretty much the same.
@@ -466,11 +305,11 @@ of an array, after the `EXEC` command comes. For example, to read
the response to this request
```cpp
db.send("MULTI");
db.send("GET", "key1");
db.send("LRANGE", "key2", 0, -1);
db.send("HGETALL", "key3");
db.send("EXEC");
req.push("MULTI");
req.push("GET", "key1");
req.push("LRANGE", "key2", 0, -1);
req.push("HGETALL", "key3");
req.push("EXEC");
```
use the following response type
@@ -486,14 +325,14 @@ using exec_resp_type =
>;
std::tuple<
aedis::ignore, // multi
aedis::ignore, // get
aedis::ignore, // lrange
aedis::ignore, // hgetall
aedis::ignore, // multi
aedis::ignore, // get
aedis::ignore, // lrange
aedis::ignore, // hgetall
exec_resp_type, // exec
> resp;
co_await db->async_exec(req, adapt(resp));
co_await conn->async_exec(req, adapt(resp));
```
For a complete example see containers.cpp.
@@ -560,7 +399,7 @@ using other types
```cpp
// Receives any RESP3 simple or aggregate data type.
std::vector<node<std::string>> resp;
co_await db->async_exec(req, adapt(resp));
co_await conn->async_exec(req, adapt(resp));
```
For example, suppose we want to retrieve a hash data structure
@@ -575,20 +414,209 @@ In addition to the above users can also use unordered versions of the
containers. The same reasoning also applies to sets e.g. `SMEMBERS`
and other data structures in general.
### Examples
<a name="connection"></a>
### Connection
To conclude this overview users are invited to skim over the
examples below
The `aedis::connection` is a class that provides async-only
communication with a Redis server by means of three member
functions
* intro.cpp: The Aedis hello-world program. It sends one command to Redis and quits the connection.
* intro_tls.cpp: Same as intro.cpp but over TLS.
* intro_sync.cpp: Synchronous version of intro.cpp.
* containers.cpp: Shows how to send and receive stl containers and how to use transactions.
* serialization.cpp: Shows how to serialize types using Boost.Json.
* subscriber.cpp: Shows how to implement pubsub that reconnects and resubscribes when the connection is lost.
* subscriber_sentinel.cpp: Same as subscriber.cpp but with failover with sentinels.
* echo_server.cpp: A simple TCP echo server.
* chat_room.cpp: A simple chat room.
* `connection::async_run`: Starts read and write operations and remains suspended until the connection it is lost.
* `connection::async_exec`: Executes commands.
* `connection::async_receive`: Receives server-side pushes.
In general, these operations will be running concurrently in user
application, where, for example
1. **Run**: One coroutine will call `async_run`, perhaps in a loop and
with healthy checks.
2. **Execute**: Multiple coroutines will call `async_exec` independently
and without coordination (e.g. queuing).
3. **Receive**: One coroutine will loop on `async_receive` to receive
server-side pushes (required only if the app expects server pushes).
Each of these operations can be performed without regards to the
others as they are independent from each other. Below we will cover
the points above with more detail.
#### Run
The code snipet in the overview section has shown us an example that
used `connection::async_run` in short-lived connection, in the general
case however, applications will connect to a Redis server and hang
around for as long as possible, until the connection is lost for some
reason. When that happens, simple setups will want to wait for a
short period of time and try to reconnect. To support this usage
pattern Aedis connections can be reconnected _while there are pending
requests and receive operations_. The general form of a reconnect
loop looks like this (see subscriber.cpp)
```cpp
auto async_main() -> net::awaitable<void>
{
auto ex = co_await net::this_coro::executor;
auto conn = std::make_shared<connection>(ex);
signal_set_type sig{ex, SIGINT, SIGTERM};
timer_type timer{ex};
request req;
req.get_config().cancel_on_connection_lost = true;
req.push("HELLO", 3);
req.push("SUBSCRIBE", "channel");
// Loop to reconnect on connection lost. To exit type Ctrl-C twice.
for (;;) {
co_await connect(conn, "127.0.0.1", "6379");
// Starts async_run and other operations.
co_await ((conn->async_run() || healthy_checker(conn) || sig.async_wait() ||
receiver(conn)) && conn->async_exec(req));
// Prepare for a reconnect.
conn->reset_stream();
// Waits some time before reconnecting.
timer.expires_after(std::chrono::seconds{1});
co_await timer.async_wait();
}
}
```
It is important to emphasize that Redis servers use the old
communication protocol RESP2 by default, therefore it is necessary to
send a `HELLO 3` command everytime a connection is established.
Another common scenarios for reconnection is, for example, a failover
with sentinels, covered in `resolve_with_sentinel.cpp` example.
#### Execute
The basic idea about `async_exec` was stated above already: execute
Redis commands. One of the most important things about it however is
that it can be called multiple times without coordination, for
example, in a HTTP or Websocket server where each session calls it
independently to communicate with Redis. The benefits of this feature
are manifold
* Reduces code complexity as users won't have to implement queues
every time e.g. different HTTP sessions want to share a connection to Redis.
* A small number of connections improves the performance associated
with [pipelines](https://redis.io/topics/pipelining). A single
connection will be indeed enough in most of cases.
There are some important things about `connection::async_exec` that
are worth stating here
* `connection::async_exec` will write a request and read the response
directly in the data structure passed by the user, avoiding
temporaries altogether.
* Requests belonging to different `async_exec` will be coalesced in a single payload
(pipelined) and written only once, improving performance massively.
* Users have full control whether `async_exec` should remain suspended
if a connection is lost, (among other things). See
`aedis::resp3::request::config`.
The code below illustrates this concepts in a TCP session of the
`echo_server.cpp` example
```cpp
auto echo_server_session(tcp_socket socket, std::shared_ptr<connection> db) -> net::awaitable<void>
{
request req;
std::tuple<std::string> response;
for (std::string buffer;;) {
// Reads a user message.
auto n = co_await net::async_read_until(socket, net::dynamic_buffer(buffer, 1024), "\n");
// Echos it through Redis.
req.push("PING", buffer);
co_await conn->async_exec(req, adapt(response));
// Writes is back to the user.
co_await net::async_write(socket, net::buffer(std::get<0>(response)));
// Cleanup
std::get<0>(response).clear();
req.clear();
buffer.erase(0, n);
}
}
```
Notice also how the session above provides back-pressure as the
coroutine won't read the next message from the socket until a cycle is
complete.
#### Receive
Receiving Redis pushes works similar to the `async_exec` discussed
above but without the request. The example below was taken from
subscriber.cpp
```cpp
net::awaitable<void> push_receiver(std::shared_ptr<connection> conn)
{
for (std::vector<node<std::string>> resp;;) {
co_await conn->async_receive(adapt(resp));
print_push(resp);
resp.clear();
}
}
```
In general, it is advisable to all apps to keep a coroutine calling
`async_receive` as an unread push will cause the connection to stall
and eventually timeout. Notice that the same connection that is being
used to send requests can be also used to receive server-side pushes.
#### Cancellation
Aedis supports both implicit and explicit cancellation of connection
operations. Explicit cancellation is supported by means of the
`aedis::connection::cancel` member function. Implicit cancellation,
like those that may happen when using Asio awaitable operators && and
|| will be discussed with more detail below.
```cpp
co_await (conn.async_run(...) && conn.async_exec(...))
```
* Useful when implementing reconnection.
* `async_exec` is responsible for sending the `HELLO` command and
optionally for subscribing to channels.
```cpp
co_await (conn.async_run(...) || conn.async_exec(...))
```
* Useful for short-lived connections that are meant to be closed after
a command has been executed.
```cpp
co_await (conn.async_exec(...) || time.async_wait(...))
```
* Provides a way to limit how long the execution of a single request
should last.
* The cancellation will be ignored if the request has already
been written to the socket.
* It is usually a better idea to have a healthy checker that adding
per request timeout, see subscriber.cpp for an example.
```cpp
co_await (conn.async_run(...) || time.async_wait(...))
```
* Sets a limit on how long the connection should live.
```cpp
co_await (conn.async_exec(...) || conn.async_exec(...) || ... || conn.async_exec(...))
```
* This works but is considered an antipattern. Unless
the user has set `aedis::resp3::request::config::coalesce` to
`false`, and he shouldn't, the connection will automatically merge
the individual requests into a single payload anyway.
## Why Aedis
@@ -609,8 +637,8 @@ redis-plus-plus does not support
* The latest version of the communication protocol RESP3. Without it it is impossible to support some important Redis features like client side caching, among other things.
* Coroutines.
* Reading responses directly in user data structures to avoid creating temporaries.
* Proper error handling with support for error-code.
* Healthy checks.
* Error handling with support for error-code.
* Cancellation.
The remaining points will be addressed individually. Let us first
have a look at what sending a command a pipeline and a transaction
@@ -738,7 +766,7 @@ decrease.
* I did expect nodejs to come a little behind given it is is
javascript code. Otherwise I did expect it to have similar
performance to libuv since it is the framework behind it.
* Go did surprise me: faster than nodejs and liuv!
* Go did surprise me: faster than nodejs and libuv!
The code used in the benchmarks can be found at
@@ -768,15 +796,14 @@ in the graph, the reasons are
comes so far behind that it can't even be represented together
with the other benchmarks without making them look insignificant.
I don't know for sure why it is so slow, I suppose it has
something to do with its lack of proper
something to do with its lack of automatic
[pipelining](https://redis.io/docs/manual/pipelining/) support.
In fact, the more TCP connections I lauch the worse its
performance gets.
* Libuv: I left it out because it would require too much work to
write it and make it have a good performance. More specifically,
I would have to use hiredis and implement support for pipelines
manually.
* Libuv: I left it out because it would require me writing to much
c code. More specifically, I would have to use hiredis and
implement support for pipelines manually.
The code used in the benchmarks can be found at
@@ -786,6 +813,10 @@ The code used in the benchmarks can be found at
<a name="api-reference"></a>
#### Conclusion
Redis clients have to support automatic pipelining to have competitive performance. For updates to this document follow https://github.com/mzimbres/aedis.
## Reference
* [High-Level](#high-level-api): Covers the topics discussed in this document.
@@ -806,7 +837,7 @@ in no more than one source file in your applications. For example, to
compile one of the examples manually
```cpp
g++ -std=c++20 -pthread -I/opt/boost_1_79_0/include/ -I./aedis/include examples/intro.cpp
g++ -std=c++20 -pthread -I/opt/boost_1_79_0/include/ -Iinclude -Iexamples examples/intro.cpp examples/common.cpp
```
The requirements for using Aedis are
@@ -834,6 +865,31 @@ another.
## Changelog
### v1.3.0
* Removes automatic sending of the `HELLO` command. This can't be
implemented properly without bloating the connection class. It is
now a user responsability to send HELLO. Requests that contain it have
priority over other requests and will be moved to the front of the
queue, see `aedis::resp3::request::config`
* Automatic name resolving and connecting have been removed from
`aedis::connection::async_run`. Users have to do this step manually
now. The reason for this change is that having them built-in doesn't
offer enough flexibility that is need for boost users.
* Removes healthy checks and idle timeout. This functionality must now
be implemented by users, see the examples. This is
part of making Aedis useful to a larger audience and suitable for
the Boost review process.
* The `aedis::connection` is now using a typeddef to a
`net::ip::tcp::socket` and `aedis::ssl::connection` to
`net::ssl::stream<net::ip::tcp::socket>`. Users that need to use
other stream type must now specialize `aedis::basic_connection`.
* Adds a low level example of async code.
### v1.2.0
* `aedis::adapt` supports now tuples created with `std::tie`.
@@ -887,7 +943,7 @@ another.
* Renames `operation::receive_push` to `aedis::operation::receive`.
### v1.1.0...1
### v1.1.0-1
* Removes `coalesce_requests` from the `aedis::connection::config`, it
became a request property now, see `aedis::resp3::request::config::coalesce`.
@@ -1013,7 +1069,7 @@ another.
* Fixes build in clang the compilers and makes some improvements in
the documentation.
### v0.2.0...1
### v0.2.0-1
* Fixes a bug that happens on very high load. (v0.2.1)
* Major rewrite of the high-level API. There is no more need to use the low-level API anymore.
@@ -1021,7 +1077,7 @@ another.
* Support for reconnection: Pending requests are not canceled when a connection is lost and are re-sent when a new one is established.
* The library is not sending HELLO-3 on user behalf anymore. This is important to support AUTH properly.
### v0.1.0...2
### v0.1.0-2
* Adds reconnect coroutine in the `echo_server` example. (v0.1.2)
* Corrects `client::async_wait_for_data` with `make_parallel_group` to launch operation. (v0.1.2)

View File

@@ -4,76 +4,40 @@
* accompanying file LICENSE.txt)
*/
#include <string>
#include <iostream>
#include "unistd.h"
#include <boost/asio.hpp>
#if defined(BOOST_ASIO_HAS_CO_AWAIT) && defined(BOOST_ASIO_HAS_POSIX_STREAM_DESCRIPTOR)
#include <boost/asio/experimental/awaitable_operators.hpp>
#include <aedis.hpp>
#include "print.hpp"
#include <unistd.h>
// Include this in no more than one .cpp file.
#include <aedis/src.hpp>
#include "common.hpp"
namespace net = boost::asio;
using namespace net::experimental::awaitable_operators;
using stream_descriptor = net::use_awaitable_t<>::as_default_on_t<net::posix::stream_descriptor>;
using signal_set_type = net::use_awaitable_t<>::as_default_on_t<net::signal_set>;
using aedis::adapt;
using aedis::resp3::request;
using aedis::resp3::node;
using aedis::endpoint;
using tcp_socket = net::use_awaitable_t<>::as_default_on_t<net::ip::tcp::socket>;
using tcp_acceptor = net::use_awaitable_t<>::as_default_on_t<net::ip::tcp::acceptor>;
using stream_descriptor = net::use_awaitable_t<>::as_default_on_t<net::posix::stream_descriptor>;
using connection = aedis::connection<tcp_socket>;
using stimer = net::use_awaitable_t<>::as_default_on_t<net::steady_timer>;
// Chat over redis pubsub. To test, run this program from different
// terminals and type messages to stdin. Use
//
// $ redis-cli monitor
//
// to monitor the message traffic.
// Chat over Redis pubsub. To test, run this program from different
// terminals and type messages to stdin.
// Receives messages from other users.
net::awaitable<void> push_receiver(std::shared_ptr<connection> conn)
// Receives Redis server-side pushes.
auto receiver(std::shared_ptr<connection> conn) -> net::awaitable<void>
{
for (std::vector<node<std::string>> resp;;) {
co_await conn->async_receive(adapt(resp));
print_push(resp);
std::cout << resp.at(1).value << " " << resp.at(2).value << " " << resp.at(3).value << std::endl;
resp.clear();
}
}
// Subscribes to the channels when a new connection is stablished.
net::awaitable<void> reconnect(std::shared_ptr<connection> conn)
{
request req;
req.get_config().cancel_on_connection_lost = true;
req.push("SUBSCRIBE", "chat-channel");
stimer timer{co_await net::this_coro::executor};
endpoint ep{"127.0.0.1", "6379"};
for (;;) {
boost::system::error_code ec1, ec2;
co_await (
conn->async_run(ep, {}, net::redirect_error(net::use_awaitable, ec1)) &&
conn->async_exec(req, adapt(), net::redirect_error(net::use_awaitable, ec2))
);
std::clog << "async_run: " << ec1.message() << "\n"
<< "async_exec: " << ec2.message() << std::endl;
conn->reset_stream();
timer.expires_after(std::chrono::seconds{1});
co_await timer.async_wait();
}
}
// Publishes messages to other users.
net::awaitable<void> publisher(stream_descriptor& in, std::shared_ptr<connection> conn)
// Publishes stdin messages to a Redis channel.
auto publisher(std::shared_ptr<stream_descriptor> in, std::shared_ptr<connection> conn) -> net::awaitable<void>
{
for (std::string msg;;) {
auto n = co_await net::async_read_until(in, net::dynamic_buffer(msg, 1024), "\n");
auto n = co_await net::async_read_until(*in, net::dynamic_buffer(msg, 1024), "\n");
request req;
req.push("PUBLISH", "chat-channel", msg);
co_await conn->async_exec(req);
@@ -81,26 +45,26 @@ net::awaitable<void> publisher(stream_descriptor& in, std::shared_ptr<connection
}
}
auto main() -> int
auto subscriber(std::shared_ptr<connection> conn) -> net::awaitable<void>
{
try {
net::io_context ioc{1};
stream_descriptor in{ioc, ::dup(STDIN_FILENO)};
request req;
req.get_config().cancel_on_connection_lost = true;
req.push("HELLO", 3);
req.push("SUBSCRIBE", "chat-channel");
auto conn = std::make_shared<connection>(ioc);
co_spawn(ioc, publisher(in, conn), net::detached);
co_spawn(ioc, push_receiver(conn), net::detached);
co_spawn(ioc, reconnect(conn), net::detached);
net::signal_set signals(ioc, SIGINT, SIGTERM);
signals.async_wait([&](auto, auto){ ioc.stop(); });
ioc.run();
} catch (std::exception const& e) {
std::cerr << e.what() << std::endl;
}
co_await conn->async_exec(req);
}
auto async_main() -> net::awaitable<void>
{
auto ex = co_await net::this_coro::executor;
auto conn = std::make_shared<connection>(ex);
auto stream = std::make_shared<stream_descriptor>(ex, ::dup(STDIN_FILENO));
signal_set_type sig{ex, SIGINT, SIGTERM};
co_await connect(conn, "127.0.0.1", "6379");
co_await ((conn->async_run() || publisher(stream, conn) || receiver(conn) ||
healthy_checker(conn) || sig.async_wait()) && subscriber(conn));
}
#else // defined(BOOST_ASIO_HAS_CO_AWAIT) && defined(BOOST_ASIO_HAS_POSIX_STREAM_DESCRIPTOR)
auto main() -> int {std::cout << "Requires coroutine support." << std::endl; return 0;}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT) && defined(BOOST_ASIO_HAS_POSIX_STREAM_DESCRIPTOR)

94
examples/common.cpp Normal file
View File

@@ -0,0 +1,94 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include "common.hpp"
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
#include <boost/asio/experimental/awaitable_operators.hpp>
#include <iostream>
namespace net = boost::asio;
using namespace net::experimental::awaitable_operators;
using resolver = net::use_awaitable_t<>::as_default_on_t<net::ip::tcp::resolver>;
using timer_type = net::use_awaitable_t<>::as_default_on_t<net::steady_timer>;
using aedis::resp3::request;
using aedis::adapt;
using aedis::operation;
// Include this in no more than one .cpp file.
#include <aedis/src.hpp>
namespace
{
auto redir(boost::system::error_code& ec)
{ return net::redirect_error(net::use_awaitable, ec); }
}
auto healthy_checker(std::shared_ptr<connection> conn) -> net::awaitable<void>
{
try {
request req;
req.get_config().cancel_on_connection_lost = true;
req.push("PING");
timer_type timer{co_await net::this_coro::executor};
for (boost::system::error_code ec;;) {
timer.expires_after(std::chrono::seconds{1});
co_await (conn->async_exec(req, adapt()) || timer.async_wait(redir(ec)));
if (!ec) {
co_return;
}
// Waits some time before trying the next ping.
timer.expires_after(std::chrono::seconds{1});
co_await timer.async_wait();
}
} catch (...) {
}
}
auto
connect(
std::shared_ptr<connection> conn,
std::string const& host,
std::string const& port) -> net::awaitable<void>
{
auto ex = co_await net::this_coro::executor;
resolver resv{ex};
timer_type timer{ex};
boost::system::error_code ec;
timer.expires_after(std::chrono::seconds{5});
auto const addrs = co_await (resv.async_resolve(host, port) || timer.async_wait(redir(ec)));
if (!ec)
throw std::runtime_error("Resolve timeout");
timer.expires_after(std::chrono::seconds{5});
co_await (net::async_connect(conn->next_layer(), std::get<0>(addrs)) || timer.async_wait(redir(ec)));
if (!ec)
throw std::runtime_error("Connect timeout");
}
extern net::awaitable<void> async_main();
// Main function used in our examples.
auto main() -> int
{
try {
net::io_context ioc;
net::co_spawn(ioc, async_main(), net::detached);
ioc.run();
} catch (std::exception const& e) {
std::cerr << "Error: " << e.what() << std::endl;
return 1;
}
}
#else // defined(BOOST_ASIO_HAS_CO_AWAIT)
auto main() -> int {std::cout << "Requires coroutine support." << std::endl; return 0;}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

32
examples/common.hpp Normal file
View File

@@ -0,0 +1,32 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_EXAMPLES_COMMON_HPP
#define AEDIS_EXAMPLES_COMMON_HPP
#include <boost/asio.hpp>
#include <aedis.hpp>
#include <memory>
#include <iostream>
#include <vector>
#include <map>
#include <set>
#include <string>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
using connection = boost::asio::use_awaitable_t<>::as_default_on_t<aedis::connection>;
auto
connect(
std::shared_ptr<connection> conn,
std::string const& host,
std::string const& port) -> boost::asio::awaitable<void>;
auto healthy_checker(std::shared_ptr<connection> conn) -> boost::asio::awaitable<void>;
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)
#endif // AEDIS_EXAMPLES_COMMON_HPP

View File

@@ -4,38 +4,35 @@
* accompanying file LICENSE.txt)
*/
#include <map>
#include <vector>
#include <iostream>
#include <boost/asio.hpp>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
#include <boost/asio/experimental/awaitable_operators.hpp>
#include <aedis.hpp>
#include "print.hpp"
#include <map>
#include <vector>
// Include this in no more than one .cpp file.
#include <aedis/src.hpp>
#include "common.hpp"
namespace net = boost::asio;
using namespace net::experimental::awaitable_operators;
using aedis::adapt;
using aedis::resp3::request;
using aedis::endpoint;
using tcp_socket = net::use_awaitable_t<>::as_default_on_t<net::ip::tcp::socket>;
using connection = aedis::connection<tcp_socket>;
// To avoid verbosity.
auto redir(boost::system::error_code& ec)
void print(std::map<std::string, std::string> const& cont)
{
return net::redirect_error(net::use_awaitable, ec);
for (auto const& e: cont)
std::cout << e.first << ": " << e.second << "\n";
}
// Sends some containers.
net::awaitable<void> send(endpoint ep)
void print(std::vector<int> const& cont)
{
auto ex = co_await net::this_coro::executor;
for (auto const& e: cont) std::cout << e << " ";
std::cout << "\n";
}
// Stores the content of some STL containers in Redis.
auto store(std::shared_ptr<connection> conn) -> net::awaitable<void>
{
std::vector<int> vec
{1, 2, 3, 4, 5, 6};
@@ -44,38 +41,36 @@ net::awaitable<void> send(endpoint ep)
request req;
req.get_config().cancel_on_connection_lost = true;
req.push_range("RPUSH", "rpush-key", vec); // Sends
req.push_range("HSET", "hset-key", map); // Sends
req.push("HELLO", 3);
req.push_range("RPUSH", "rpush-key", vec);
req.push_range("HSET", "hset-key", map);
req.push("QUIT");
connection conn{ex};
co_await (conn.async_run(ep) || conn.async_exec(req));
co_await conn->async_exec(req);
}
// Retrieves a Redis hash as an std::map.
net::awaitable<std::map<std::string, std::string>> retrieve_hashes(endpoint ep)
auto hgetall(std::shared_ptr<connection> conn) -> net::awaitable<void>
{
connection conn{co_await net::this_coro::executor};
request req;
req.get_config().cancel_on_connection_lost = true;
req.push("HELLO", 3);
req.push("HGETALL", "hset-key");
req.push("QUIT");
std::map<std::string, std::string> ret;
auto resp = std::tie(ret, std::ignore);
co_await (conn.async_run(ep) || conn.async_exec(req, adapt(resp)));
std::tuple<aedis::ignore, std::map<std::string, std::string>, aedis::ignore> resp;
co_return std::move(ret);
co_await conn->async_exec(req, adapt(resp));
print(std::get<1>(resp));
}
// Retrieves as a data structure.
net::awaitable<void> transaction(endpoint ep)
// Retrieves in a transaction.
auto transaction(std::shared_ptr<connection> conn) -> net::awaitable<void>
{
connection conn{co_await net::this_coro::executor};
request req;
req.get_config().cancel_on_connection_lost = true;
req.push("HELLO", 3);
req.push("MULTI");
req.push("LRANGE", "rpush-key", 0, -1); // Retrieves
req.push("HGETALL", "hset-key"); // Retrieves
@@ -83,6 +78,7 @@ net::awaitable<void> transaction(endpoint ep)
req.push("QUIT");
std::tuple<
aedis::ignore, // hello
aedis::ignore, // multi
aedis::ignore, // lrange
aedis::ignore, // hgetall
@@ -90,36 +86,26 @@ net::awaitable<void> transaction(endpoint ep)
aedis::ignore // quit
> resp;
co_await (conn.async_run(ep) || conn.async_exec(req, adapt(resp)));
co_await conn->async_exec(req, adapt(resp));
print(std::get<0>(std::get<3>(resp)).value());
print(std::get<1>(std::get<3>(resp)).value());
print(std::get<0>(std::get<4>(resp)).value());
print(std::get<1>(std::get<4>(resp)).value());
}
net::awaitable<void> async_main()
{
try {
endpoint ep{"127.0.0.1", "6379"};
co_await send(ep);
co_await transaction(ep);
auto const hashes = co_await retrieve_hashes(ep);
print(hashes);
} catch (std::exception const& e) {
std::cerr << e.what() << std::endl;
}
auto conn = std::make_shared<connection>(co_await net::this_coro::executor);
// Uses short-lived connections to store and retrieve the
// containers.
co_await connect(conn, "127.0.0.1", "6379");
co_await (conn->async_run() || store(conn));
co_await connect(conn, "127.0.0.1", "6379");
co_await (conn->async_run() || hgetall(conn));
co_await connect(conn, "127.0.0.1", "6379");
co_await (conn->async_run() || transaction(conn));
}
auto main() -> int
{
try {
net::io_context ioc;
net::co_spawn(ioc, async_main(), net::detached);
ioc.run();
} catch (...) {
std::cerr << "Error." << std::endl;
}
}
#else // defined(BOOST_ASIO_HAS_CO_AWAIT)
auto main() -> int {std::cout << "Requires coroutine support." << std::endl; return 0;}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -4,84 +4,58 @@
* accompanying file LICENSE.txt)
*/
#include <string>
#include <iostream>
#include <boost/asio.hpp>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
#include <boost/asio/experimental/awaitable_operators.hpp>
#include <aedis.hpp>
// Include this in no more than one .cpp file.
#include <aedis/src.hpp>
#include "common.hpp"
namespace net = boost::asio;
using namespace net::experimental::awaitable_operators;
using tcp_socket = net::use_awaitable_t<>::as_default_on_t<net::ip::tcp::socket>;
using tcp_acceptor = net::use_awaitable_t<>::as_default_on_t<net::ip::tcp::acceptor>;
using signal_set_type = net::use_awaitable_t<>::as_default_on_t<net::signal_set>;
using aedis::adapt;
using aedis::resp3::request;
using aedis::endpoint;
using executor_type = net::io_context::executor_type;
using socket_type = net::basic_stream_socket<net::ip::tcp, executor_type>;
using tcp_socket = net::use_awaitable_t<executor_type>::as_default_on_t<socket_type>;
using acceptor_type = net::basic_socket_acceptor<net::ip::tcp, executor_type>;
using tcp_acceptor = net::use_awaitable_t<executor_type>::as_default_on_t<acceptor_type>;
using awaitable_type = net::awaitable<void, executor_type>;
using connection = aedis::connection<tcp_socket>;
awaitable_type echo_server_session(tcp_socket socket, std::shared_ptr<connection> db)
auto echo_server_session(tcp_socket socket, std::shared_ptr<connection> conn) -> net::awaitable<void>
{
request req;
std::tuple<std::string> response;
std::string resp;
for (std::string buffer;;) {
auto n = co_await net::async_read_until(socket, net::dynamic_buffer(buffer, 1024), "\n");
req.push("PING", buffer);
co_await db->async_exec(req, adapt(response));
co_await net::async_write(socket, net::buffer(std::get<0>(response)));
std::get<0>(response).clear();
auto tmp = std::tie(resp);
co_await conn->async_exec(req, adapt(tmp));
co_await net::async_write(socket, net::buffer(resp));
resp.clear();
req.clear();
buffer.erase(0, n);
}
}
awaitable_type listener(std::shared_ptr<connection> db)
auto listener(std::shared_ptr<connection> conn) -> net::awaitable<void>
{
auto ex = co_await net::this_coro::executor;
tcp_acceptor acc(ex, {net::ip::tcp::v4(), 55555});
for (;;)
net::co_spawn(ex, echo_server_session(co_await acc.async_accept(), db), net::detached);
net::co_spawn(ex, echo_server_session(co_await acc.async_accept(), conn), net::detached);
}
net::awaitable<void> reconnect(std::shared_ptr<connection> conn)
auto async_main() -> net::awaitable<void>
{
net::steady_timer timer{co_await net::this_coro::executor};
endpoint ep{"127.0.0.1", "6379"};
for (boost::system::error_code ec1;;) {
co_await conn->async_run(ep, {}, net::redirect_error(net::use_awaitable, ec1));
std::clog << "async_run: " << ec1.message() << std::endl;
conn->reset_stream();
timer.expires_after(std::chrono::seconds{1});
co_await timer.async_wait(net::use_awaitable);
}
auto ex = co_await net::this_coro::executor;
auto conn = std::make_shared<connection>(ex);
signal_set_type sig{ex, SIGINT, SIGTERM};
request req;
req.get_config().cancel_on_connection_lost = true;
req.push("HELLO", 3);
co_await connect(conn, "127.0.0.1", "6379");
co_await ((conn->async_run() || listener(conn) || healthy_checker(conn) ||
sig.async_wait()) && conn->async_exec(req));
}
auto main() -> int
{
try {
net::io_context ioc{1};
auto db = std::make_shared<connection>(ioc);
co_spawn(ioc, reconnect(db), net::detached);
net::signal_set signals(ioc, SIGINT, SIGTERM);
signals.async_wait([&](auto, auto) {
ioc.stop();
});
co_spawn(ioc, listener(db), net::detached);
ioc.run();
} catch (std::exception const& e) {
std::cerr << e.what() << std::endl;
}
}
#else // defined(BOOST_ASIO_HAS_CO_AWAIT)
auto main() -> int {std::cout << "Requires coroutine support." << std::endl; return 0;}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -4,40 +4,32 @@
* accompanying file LICENSE.txt)
*/
#include <tuple>
#include <string>
#include <boost/asio.hpp>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
#include <boost/asio/experimental/awaitable_operators.hpp>
#include <aedis.hpp>
#include "common.hpp"
// Include this in no more than one .cpp file.
#include <aedis/src.hpp>
namespace net = boost::asio;
using namespace net::experimental::awaitable_operators;
using aedis::adapt;
using aedis::resp3::request;
using connection = aedis::connection<>;
auto const logger = [](auto ec, auto...)
{ std::cout << ec.message() << std::endl; };
auto main() -> int
net::awaitable<void> async_main()
{
try {
boost::asio::io_context ioc;
connection conn{ioc};
request req;
req.get_config().cancel_on_connection_lost = true;
req.push("HELLO", 3);
req.push("PING", "Hello world");
req.push("QUIT");
request req;
req.get_config().cancel_on_connection_lost = true;
req.push("PING");
req.push("QUIT");
std::tuple<aedis::ignore, std::string, aedis::ignore> resp;
std::tuple<std::string, aedis::ignore> resp;
conn.async_exec(req, adapt(resp), logger);
conn.async_run({"127.0.0.1", "6379"}, {}, logger);
auto conn = std::make_shared<connection>(co_await net::this_coro::executor);
co_await connect(conn, "127.0.0.1", "6379");
co_await (conn->async_run() || conn->async_exec(req, adapt(resp)));
ioc.run();
std::cout << std::get<0>(resp) << std::endl;
} catch (...) {
std::cerr << "Error" << std::endl;
}
std::cout << "PING: " << std::get<1>(resp) << std::endl;
}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -10,13 +10,14 @@
#include <boost/asio.hpp>
#include <aedis.hpp>
// TODO: Fix this after updating to 1.80.
// Include this in no more than one .cpp file.
#include <aedis/src.hpp>
namespace net = boost::asio;
using aedis::adapt;
using aedis::resp3::request;
using aedis::endpoint;
using connection = aedis::connection<>;
template <class Adapter>
@@ -38,12 +39,13 @@ int main()
connection conn{ioc};
std::thread t{[&]() {
conn.async_run({"127.0.0.1", "6379"}, {}, logger);
conn.async_run(logger);
ioc.run();
}};
request req;
req.get_config().cancel_on_connection_lost = true;
req.push("HELLO", 3);
req.push("PING");
req.push("QUIT");

View File

@@ -6,21 +6,22 @@
#include <tuple>
#include <string>
#include <iostream>
#include <boost/asio.hpp>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
#include <boost/asio/experimental/awaitable_operators.hpp>
#include <boost/asio/ssl.hpp>
#include <aedis.hpp>
#include <aedis/ssl/connection.hpp>
// Include this in no more than one .cpp file.
#include <aedis/src.hpp>
namespace net = boost::asio;
using namespace net::experimental::awaitable_operators;
using resolver = net::use_awaitable_t<>::as_default_on_t<net::ip::tcp::resolver>;
using aedis::adapt;
using aedis::resp3::request;
using connection = aedis::ssl::connection<net::ssl::stream<net::ip::tcp::socket>>;
auto const logger = [](auto ec, auto...)
{ std::cout << ec.message() << std::endl; };
using connection = net::use_awaitable_t<>::as_default_on_t<aedis::ssl::connection>;
auto verify_certificate(bool, net::ssl::verify_context&) -> bool
{
@@ -28,30 +29,31 @@ auto verify_certificate(bool, net::ssl::verify_context&) -> bool
return true;
}
auto main() -> int
net::awaitable<void> async_main()
{
try {
net::io_context ioc;
request req;
req.get_config().cancel_on_connection_lost = true;
req.push("HELLO", 3, "AUTH", "aedis", "aedis");
req.push("PING");
req.push("QUIT");
net::ssl::context ctx{net::ssl::context::sslv23};
std::tuple<aedis::ignore, std::string, aedis::ignore> resp;
connection conn{ioc, ctx};
conn.next_layer().set_verify_mode(net::ssl::verify_peer);
conn.next_layer().set_verify_callback(verify_certificate);
// Resolve
auto ex = co_await net::this_coro::executor;
resolver resv{ex};
auto const endpoints = co_await resv.async_resolve("db.occase.de", "6380");
request req;
req.get_config().cancel_on_connection_lost = true;
req.push("PING");
req.push("QUIT");
net::ssl::context ctx{net::ssl::context::sslv23};
connection conn{ex, ctx};
conn.next_layer().set_verify_mode(net::ssl::verify_peer);
conn.next_layer().set_verify_callback(verify_certificate);
std::tuple<std::string, aedis::ignore> resp;
conn.async_exec(req, adapt(resp), logger);
conn.async_run({"127.0.0.1", "6379"}, {}, logger);
co_await net::async_connect(conn.lowest_layer(), endpoints);
co_await conn.next_layer().async_handshake(net::ssl::stream_base::client);
co_await (conn.async_run() || conn.async_exec(req, adapt(resp)));
ioc.run();
std::cout << "Response: " << std::get<0>(resp) << std::endl;
} catch (std::exception const& e) {
std::cerr << "Error: " << e.what() << std::endl;
}
std::cout << "Response: " << std::get<1>(resp) << std::endl;
}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -0,0 +1,63 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <string>
#include <iostream>
#include <boost/asio.hpp>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
#include <aedis.hpp>
#include <aedis/src.hpp>
namespace net = boost::asio;
namespace resp3 = aedis::resp3;
using endpoints = net::ip::tcp::resolver::results_type;
using tcp_socket = net::use_awaitable_t<>::as_default_on_t<net::ip::tcp::socket>;
using aedis::resp3::request;
using aedis::adapter::adapt2;
using net::ip::tcp;
net::awaitable<void> ping(endpoints const& addrs)
{
tcp_socket socket{co_await net::this_coro::executor};
net::connect(socket, addrs);
// Creates the request and writes to the socket.
request req;
req.push("HELLO", 3);
req.push("PING");
req.push("QUIT");
co_await resp3::async_write(socket, req);
// Responses
std::string buffer, resp;
// Reads the responses to all commands in the request.
auto dbuffer = net::dynamic_buffer(buffer);
co_await resp3::async_read(socket, dbuffer);
co_await resp3::async_read(socket, dbuffer, adapt2(resp));
co_await resp3::async_read(socket, dbuffer);
std::cout << "Ping: " << resp << std::endl;
}
int main()
{
try {
net::io_context ioc;
net::ip::tcp::resolver resv{ioc};
auto const addrs = resv.resolve("127.0.0.1", "6379");
net::co_spawn(ioc, ping(addrs), net::detached);
ioc.run();
} catch (std::exception const& e) {
std::cerr << "Error: " << e.what() << std::endl;
}
}
#else // defined(BOOST_ASIO_HAS_CO_AWAIT)
auto main() -> int {std::cout << "Requires coroutine support." << std::endl; return 0;}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

0
examples/main.cpp Normal file
View File

View File

@@ -1,65 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <map>
#include <set>
#include <vector>
#include <string>
#include <iostream>
#include <aedis/resp3/type.hpp>
#include <aedis/resp3/node.hpp>
// Some functions to make the examples less repetitive.
namespace net = boost::asio;
using aedis::resp3::node;
void print_aggr(std::vector<aedis::resp3::node<std::string>>& v)
{
if (std::empty(v))
return;
auto const m = aedis::resp3::element_multiplicity(v.front().data_type);
for (auto i = 0lu; i < m * v.front().aggregate_size; ++i)
std::cout << v[i + 1].value << " ";
std::cout << "\n";
v.clear();
}
template <class T>
void print(std::vector<T> const& cont)
{
for (auto const& e: cont) std::cout << e << " ";
std::cout << "\n";
}
template <class T>
void print(std::set<T> const& cont)
{
for (auto const& e: cont) std::cout << e << "\n";
}
template <class T, class U>
void print(std::map<T, U> const& cont)
{
for (auto const& e: cont)
std::cout << e.first << ": " << e.second << "\n";
}
void print(std::string const& e)
{
std::cout << e << std::endl;
}
void print_push(std::vector<aedis::resp3::node<std::string>>& resp)
{
std::cout
<< "Push type: " << resp.at(1).value << "\n"
<< "Channel: " << resp.at(2).value << "\n"
<< "Message: " << resp.at(3).value << "\n"
<< std::endl;
}

View File

@@ -0,0 +1,70 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <boost/asio.hpp>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
#include <boost/asio/experimental/awaitable_operators.hpp>
#include <aedis.hpp>
#include "common.hpp"
namespace net = boost::asio;
using namespace net::experimental::awaitable_operators;
using endpoints = net::ip::tcp::resolver::results_type;
using aedis::adapt;
using aedis::resp3::request;
auto redir(boost::system::error_code& ec)
{ return net::redirect_error(net::use_awaitable, ec); }
struct endpoint {
std::string host;
std::string port;
};
// For more info see
// - https://redis.io/docs/manual/sentinel.
// - https://redis.io/docs/reference/sentinel-clients.
auto resolve_master_address(std::vector<endpoint> const& endpoints) -> net::awaitable<endpoint>
{
request req;
req.get_config().cancel_on_connection_lost = true;
req.push("SENTINEL", "get-master-addr-by-name", "mymaster");
req.push("QUIT");
auto conn = std::make_shared<connection>(co_await net::this_coro::executor);
std::tuple<std::optional<std::array<std::string, 2>>, aedis::ignore> addr;
for (auto ep : endpoints) {
boost::system::error_code ec;
co_await connect(conn, ep.host, ep.port);
co_await (conn->async_run() && conn->async_exec(req, adapt(addr), redir(ec)));
conn->reset_stream();
if (std::get<0>(addr))
co_return endpoint{std::get<0>(addr).value().at(0), std::get<0>(addr).value().at(1)};
}
co_return endpoint{};
}
auto async_main() -> net::awaitable<void>
{
// A list of sentinel addresses from which only one is responsive
// to simulate sentinels that are down.
std::vector<endpoint> const endpoints
{ {"foo", "26379"}
, {"bar", "26379"}
, {"127.0.0.1", "26379"}
};
auto const ep = co_await resolve_master_address(endpoints);
std::clog
<< "Host: " << ep.host << "\n"
<< "Port: " << ep.port << "\n"
<< std::flush;
}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -4,25 +4,26 @@
* accompanying file LICENSE.txt)
*/
#include <boost/asio.hpp>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
#include <boost/asio/experimental/awaitable_operators.hpp>
#include <boost/json.hpp>
#include <aedis.hpp>
#include "common.hpp"
#include <algorithm>
#include <cstdint>
#include <iostream>
#include <set>
#include <iterator>
#include <string>
#include <boost/json.hpp>
#include <boost/json/src.hpp>
#include <aedis.hpp>
#include "print.hpp"
// Include this in no more than one .cpp file.
#include <aedis/src.hpp>
#include <boost/json/src.hpp>
namespace net = boost::asio;
using namespace net::experimental::awaitable_operators;
using aedis::resp3::request;
using aedis::adapt;
using aedis::endpoint;
using connection = aedis::connection<>;
using namespace boost::json;
struct user {
@@ -43,17 +44,17 @@ void tag_invoke(value_from_tag, value& jv, user const& u)
template<class T>
void extract(object const& obj, T& t, boost::string_view key)
{
t = value_to<T>(obj.at(key));
t = value_to<T>(obj.at(key));
}
auto tag_invoke(value_to_tag<user>, value const& jv)
{
user u;
object const& obj = jv.as_object();
extract(obj, u.name, "name");
extract(obj, u.age, "age");
extract(obj, u.country, "country");
return u;
user u;
object const& obj = jv.as_object();
extract(obj, u.name, "name");
extract(obj, u.age, "age");
extract(obj, u.country, "country");
return u;
}
// Serializes
@@ -83,35 +84,26 @@ auto operator<(user const& a, user const& b)
return std::tie(a.name, a.age, a.country) < std::tie(b.name, b.age, b.country);
}
auto const logger = [](auto ec, auto...)
{ std::cout << ec.message() << std::endl; };
auto main() -> int
net::awaitable<void> async_main()
{
try {
net::io_context ioc;
connection conn{ioc};
std::set<user> users
{{"Joao", "58", "Brazil"} , {"Serge", "60", "France"}};
std::set<user> users
{{"Joao", "58", "Brazil"} , {"Serge", "60", "France"}};
request req;
req.get_config().cancel_on_connection_lost = true;
req.push("HELLO", 3);
req.push_range("SADD", "sadd-key", users); // Sends
req.push("SMEMBERS", "sadd-key"); // Retrieves
req.push("QUIT");
request req;
req.get_config().cancel_on_connection_lost = true;
req.push("HELLO", 3);
req.push_range("SADD", "sadd-key", users); // Sends
req.push("SMEMBERS", "sadd-key"); // Retrieves
req.push("QUIT");
std::tuple<aedis::ignore, int, std::set<user>, std::string> resp;
std::tuple<aedis::ignore, int, std::set<user>, std::string> resp;
auto conn = std::make_shared<connection>(co_await net::this_coro::executor);
co_await connect(conn, "127.0.0.1", "6379");
co_await (conn->async_run() || conn->async_exec(req, adapt(resp)));
endpoint ep{"127.0.0.1", "6379"};
conn.async_exec(req, adapt(resp),logger);
conn.async_run(ep, {}, logger);
ioc.run();
// Print
print(std::get<2>(resp));
} catch (std::exception const& e) {
std::cerr << "Error: " << e.what() << std::endl;
}
for (auto const& e: std::get<2>(resp))
std::cout << e << "\n";
}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -4,29 +4,21 @@
* accompanying file LICENSE.txt)
*/
#include <string>
#include <vector>
#include <iostream>
#include <tuple>
#include <boost/asio.hpp>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
#include <boost/asio/experimental/awaitable_operators.hpp>
#include <aedis.hpp>
#include "print.hpp"
// Include this in no more than one .cpp file.
#include <aedis/src.hpp>
#include "common.hpp"
namespace net = boost::asio;
using namespace net::experimental::awaitable_operators;
using resolver = net::use_awaitable_t<>::as_default_on_t<net::ip::tcp::resolver>;
using signal_set_type = net::use_awaitable_t<>::as_default_on_t<net::signal_set>;
using timer_type = net::use_awaitable_t<>::as_default_on_t<net::steady_timer>;
using aedis::adapt;
using aedis::resp3::request;
using aedis::resp3::node;
using aedis::endpoint;
using tcp_socket = net::use_awaitable_t<>::as_default_on_t<net::ip::tcp::socket>;
using stimer = net::use_awaitable_t<>::as_default_on_t<net::steady_timer>;
using connection = aedis::connection<tcp_socket>;
/* This example will subscribe and read pushes indefinitely.
*
@@ -45,59 +37,36 @@ using connection = aedis::connection<tcp_socket>;
*/
// Receives pushes.
net::awaitable<void> push_receiver(std::shared_ptr<connection> conn)
auto receiver(std::shared_ptr<connection> conn) -> net::awaitable<void>
{
for (std::vector<node<std::string>> resp;;) {
co_await conn->async_receive(adapt(resp));
print_push(resp);
std::cout << resp.at(1).value << " " << resp.at(2).value << " " << resp.at(3).value << std::endl;
resp.clear();
}
}
// See
// - https://redis.io/docs/manual/sentinel.
// - https://redis.io/docs/reference/sentinel-clients.
net::awaitable<void> reconnect(std::shared_ptr<connection> conn)
auto async_main() -> net::awaitable<void>
{
auto ex = co_await net::this_coro::executor;
auto conn = std::make_shared<connection>(ex);
signal_set_type sig{ex, SIGINT, SIGTERM};
timer_type timer{ex};
request req;
req.get_config().cancel_if_not_connected = false;
req.get_config().cancel_on_connection_lost = true;
req.push("HELLO", 3);
req.push("SUBSCRIBE", "channel");
stimer timer{co_await net::this_coro::executor};
endpoint ep{"127.0.0.1", "6379"};
// The loop will reconnect on connection lost. To exit type Ctrl-C twice.
for (;;) {
boost::system::error_code ec1, ec2;
co_await (
conn->async_run(ep, {}, net::redirect_error(net::use_awaitable, ec1)) &&
conn->async_exec(req, adapt(), net::redirect_error(net::use_awaitable, ec2))
);
std::clog << "async_run: " << ec1.message() << "\n"
<< "async_exec: " << ec2.message() << std::endl;
co_await connect(conn, "127.0.0.1", "6379");
co_await ((conn->async_run() || healthy_checker(conn) || sig.async_wait() ||
receiver(conn)) && conn->async_exec(req));
conn->reset_stream();
timer.expires_after(std::chrono::seconds{1});
co_await timer.async_wait();
}
}
auto main() -> int
{
try {
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
net::co_spawn(ioc, push_receiver(conn), net::detached);
net::co_spawn(ioc, reconnect(conn), net::detached);
net::signal_set signals(ioc, SIGINT, SIGTERM);
signals.async_wait([&](auto, auto){ ioc.stop(); });
ioc.run();
} catch (std::exception const& e) {
std::cerr << "Error: " << e.what() << std::endl;
}
}
#else // defined(BOOST_ASIO_HAS_CO_AWAIT)
auto main() -> int {std::cout << "Requires coroutine support." << std::endl; return 0;}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -1,139 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <string>
#include <vector>
#include <iostream>
#include <tuple>
#include <boost/asio.hpp>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
#include <boost/asio/experimental/awaitable_operators.hpp>
#include <aedis.hpp>
#include "print.hpp"
// Include this in no more than one .cpp file.
#include <aedis/src.hpp>
namespace net = boost::asio;
using namespace net::experimental::awaitable_operators;
using aedis::adapt;
using aedis::resp3::request;
using aedis::resp3::node;
using aedis::endpoint;
using tcp_socket = net::use_awaitable_t<>::as_default_on_t<net::ip::tcp::socket>;
using stimer = net::use_awaitable_t<>::as_default_on_t<net::steady_timer>;
using connection = aedis::connection<tcp_socket>;
auto is_valid(endpoint const& ep) noexcept -> bool
{
return !std::empty(ep.host) && !std::empty(ep.port);
}
// Connects to a Redis instance over sentinel and performs failover in
// case of disconnection, see
// https://redis.io/docs/reference/sentinel-clients. This example
// assumes a sentinel and a redis server running on localhost.
net::awaitable<void> receive_pushes(std::shared_ptr<connection> conn)
{
for (std::vector<node<std::string>> resp;;) {
co_await conn->async_receive(adapt(resp));
print_push(resp);
resp.clear();
}
}
net::awaitable<endpoint> resolve()
{
// A list of sentinel addresses from which only one is responsive
// to simulate sentinels that are down.
std::vector<endpoint> const endpoints
{ {"foo", "26379"}
, {"bar", "26379"}
, {"127.0.0.1", "26379"}
};
request req;
req.get_config().cancel_on_connection_lost = true;
req.push("SENTINEL", "get-master-addr-by-name", "mymaster");
req.push("QUIT");
connection conn{co_await net::this_coro::executor};
std::tuple<std::optional<std::array<std::string, 2>>, aedis::ignore> addr;
for (auto ep : endpoints) {
boost::system::error_code ec1, ec2;
co_await (
conn.async_run(ep, {}, net::redirect_error(net::use_awaitable, ec1)) &&
conn.async_exec(req, adapt(addr), net::redirect_error(net::use_awaitable, ec2))
);
std::clog << "async_run: " << ec1.message() << "\n"
<< "async_exec: " << ec2.message() << std::endl;
conn.reset_stream();
if (std::get<0>(addr))
break;
}
endpoint ep;
if (std::get<0>(addr)) {
ep.host = std::get<0>(addr).value().at(0);
ep.port = std::get<0>(addr).value().at(1);
}
co_return ep;
}
net::awaitable<void> reconnect(std::shared_ptr<connection> conn)
{
request req;
req.get_config().cancel_on_connection_lost = true;
req.push("SUBSCRIBE", "channel");
auto ex = co_await net::this_coro::executor;
stimer timer{ex};
for (;;) {
auto ep = co_await net::co_spawn(ex, resolve(), net::use_awaitable);
if (!is_valid(ep)) {
std::clog << "Can't resolve master name" << std::endl;
co_return;
}
boost::system::error_code ec1, ec2;
co_await (
conn->async_run(ep, {}, net::redirect_error(net::use_awaitable, ec1)) &&
conn->async_exec(req, adapt(), net::redirect_error(net::use_awaitable, ec2))
);
std::clog << "async_run: " << ec1.message() << "\n"
<< "async_exec: " << ec2.message() << "\n"
<< "Starting the failover." << std::endl;
timer.expires_after(std::chrono::seconds{1});
co_await timer.async_wait();
}
}
int main()
{
try {
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
net::co_spawn(ioc, receive_pushes(conn), net::detached);
net::co_spawn(ioc, reconnect(conn), net::detached);
net::signal_set signals(ioc, SIGINT, SIGTERM);
signals.async_wait([&](auto, auto){ ioc.stop(); });
ioc.run();
} catch (std::exception const& e) {
std::cerr << "Error: " << e.what() << std::endl;
}
}
#else // defined(BOOST_ASIO_HAS_CO_AWAIT)
int main() {std::cout << "Requires coroutine support." << std::endl; return 0;}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -25,38 +25,31 @@ namespace aedis {
* @tparam AsyncReadWriteStream A stream that supports reading and
* writing.
*/
template <class AsyncReadWriteStream = boost::asio::ip::tcp::socket>
class connection :
template <class AsyncReadWriteStream>
class basic_connection :
private detail::connection_base<
typename AsyncReadWriteStream::executor_type,
connection<AsyncReadWriteStream>> {
basic_connection<AsyncReadWriteStream>> {
public:
/// Executor type.
using executor_type = typename AsyncReadWriteStream::executor_type;
/// Type of the next layer
using next_layer_type = AsyncReadWriteStream;
using base_type = detail::connection_base<executor_type, connection<AsyncReadWriteStream>>;
/** \brief Connection configuration parameters.
*/
struct timeouts {
/// Timeout of the resolve operation.
std::chrono::steady_clock::duration resolve_timeout = std::chrono::seconds{10};
/// Timeout of the connect operation.
std::chrono::steady_clock::duration connect_timeout = std::chrono::seconds{10};
/// Timeout of the resp3-handshake operation.
std::chrono::steady_clock::duration resp3_handshake_timeout = std::chrono::seconds{2};
/// Time interval with which PING commands are sent to Redis.
std::chrono::steady_clock::duration ping_interval = std::chrono::seconds{1};
/// Rebinds the socket type to another executor.
template <class Executor1>
struct rebind_executor
{
/// The socket type when rebound to the specified executor.
using other = basic_connection<typename next_layer_type::template rebind_executor<Executor1>::other>;
};
using base_type = detail::connection_base<executor_type, basic_connection<AsyncReadWriteStream>>;
/// Constructor
explicit
connection(
basic_connection(
executor_type ex,
std::pmr::memory_resource* resource = std::pmr::get_default_resource())
: base_type{ex, resource}
@@ -64,10 +57,10 @@ public:
{}
explicit
connection(
basic_connection(
boost::asio::io_context& ioc,
std::pmr::memory_resource* resource = std::pmr::get_default_resource())
: connection(ioc.get_executor(), resource)
: basic_connection(ioc.get_executor(), resource)
{ }
/// Returns the associated executor.
@@ -91,44 +84,10 @@ public:
/** @brief Establishes a connection with the Redis server asynchronously.
*
* This function performs the following steps
* This function will start reading from the socket and executes
* all requests that have been started prior to this function
* call.
*
* @li Resolves the Redis host as of `async_resolve` with the
* timeout passed in the base class `connection::timeouts::resolve_timeout`.
*
* @li Connects to one of the endpoints returned by the resolve
* operation with the timeout passed in the base class
* `connection::timeouts::connect_timeout`.
*
* @li Performs a RESP3 handshake by sending a
* [HELLO](https://redis.io/commands/hello/) command with protocol
* version 3 and the credentials contained in the
* `aedis::endpoint` object. The timeout used is the one specified
* in `connection::timeouts::resp3_handshake_timeout`.
*
* @li Erases any password that may be contained in
* `endpoint::password`.
*
* @li Checks whether the server role corresponds to the one
* specified in the `endpoint`. If `endpoint::role` is left empty,
* no check is performed. If the role is different than the
* expected `async_run` will complete with
* `error::unexpected_server_role`.
*
* @li Starts healthy checks with a timeout twice the value of
* `connection::timeouts::ping_interval`. If no data is received during that
* time interval `connection::async_run` completes with
* `error::idle_timeout`.
*
* @li Starts the healthy check operation that sends the
* [PING](https://redis.io/commands/ping/) to Redis with a
* frequency equal to `connection::timeouts::ping_interval`.
*
* @li Starts reading from the socket and executes all requests
* that have been started prior to this function call.
*
* @param ep Redis endpoint.
* @param ts Timeouts used by the operations.
* @param token Completion token.
*
* The completion token must have the following signature
@@ -142,13 +101,9 @@ public:
* will complete without error.
*/
template <class CompletionToken = boost::asio::default_completion_token_t<executor_type>>
auto
async_run(
endpoint ep,
timeouts ts = timeouts{},
CompletionToken token = CompletionToken{})
auto async_run(CompletionToken token = CompletionToken{})
{
return base_type::async_run(ep, ts, std::move(token));
return base_type::async_run(std::move(token));
}
/** @brief Executes a command on the Redis server asynchronously.
@@ -236,33 +191,15 @@ public:
{ return base_type::cancel(op); }
private:
using this_type = connection<next_layer_type>;
using this_type = basic_connection<next_layer_type>;
template <class, class> friend class detail::connection_base;
template <class, class> friend struct detail::exec_read_op;
template <class, class> friend struct detail::exec_op;
template <class, class> friend struct detail::receive_op;
template <class> friend struct detail::check_idle_op;
template <class> friend struct detail::reader_op;
template <class> friend struct detail::writer_op;
template <class, class> friend struct detail::connect_with_timeout_op;
template <class, class> friend struct detail::run_op;
template <class> friend struct detail::ping_op;
template <class Timer, class CompletionToken>
auto
async_connect(
boost::asio::ip::tcp::resolver::results_type const& endpoints,
timeouts ts,
Timer& timer,
CompletionToken&& token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::connect_with_timeout_op<this_type, Timer>{this, &endpoints, ts, &timer},
token, stream_);
}
template <class> friend struct detail::run_op;
void close() { stream_.close(); }
auto is_open() const noexcept { return stream_.is_open(); }
@@ -271,6 +208,11 @@ private:
AsyncReadWriteStream stream_;
};
/** \brief A connection that uses a boost::asio::ip::tcp::socket.
* \ingroup high-level-api
*/
using connection = basic_connection<boost::asio::ip::tcp::socket>;
} // aedis
#endif // AEDIS_CONNECTION_HPP

View File

@@ -23,7 +23,6 @@
#include <aedis/adapt.hpp>
#include <aedis/operation.hpp>
#include <aedis/endpoint.hpp>
#include <aedis/resp3/request.hpp>
#include <aedis/detail/connection_ops.hpp>
@@ -46,22 +45,18 @@ public:
explicit
connection_base(executor_type ex, std::pmr::memory_resource* resource)
: resv_{ex}
, ping_timer_{ex}
, check_idle_timer_{ex}
, writer_timer_{ex}
: writer_timer_{ex}
, read_timer_{ex}
, push_channel_{ex}
, read_buffer_{resource}
, write_buffer_{resource}
, reqs_{resource}
, last_data_{std::chrono::time_point<std::chrono::steady_clock>::min()}
{
req_.get_config().cancel_if_not_connected = true;
req_.get_config().cancel_on_connection_lost = true;
writer_timer_.expires_at(std::chrono::steady_clock::time_point::max());
read_timer_.expires_at(std::chrono::steady_clock::time_point::max());
}
auto get_executor() {return resv_.get_executor();}
auto get_executor() {return writer_timer_.get_executor();}
auto cancel(operation op) -> std::size_t
{
@@ -72,13 +67,9 @@ public:
}
case operation::run:
{
resv_.cancel();
derived().close();
read_timer_.cancel();
check_idle_timer_.cancel();
writer_timer_.cancel();
ping_timer_.cancel();
cancel_on_conn_lost();
return 1U;
@@ -152,7 +143,7 @@ public:
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code, std::size_t)
>(detail::exec_op<Derived, Adapter>{&derived(), &req, adapter}, token, resv_);
>(detail::exec_op<Derived, Adapter>{&derived(), &req, adapter}, token, writer_timer_);
}
template <
@@ -166,18 +157,16 @@ public:
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code, std::size_t)
>(detail::receive_op<Derived, decltype(f)>{&derived(), f}, token, resv_);
>(detail::receive_op<Derived, decltype(f)>{&derived(), f}, token, writer_timer_);
}
template <class Timeouts, class CompletionToken>
auto
async_run(endpoint ep, Timeouts ts, CompletionToken token)
template <class CompletionToken>
auto async_run(CompletionToken token)
{
ep_ = std::move(ep);
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::run_op<Derived, Timeouts>{&derived(), ts}, token, resv_);
>(detail::run_op<Derived>{&derived()}, token, writer_timer_);
}
private:
@@ -290,13 +279,9 @@ private:
template <class, class> friend struct detail::receive_op;
template <class> friend struct detail::reader_op;
template <class> friend struct detail::writer_op;
template <class> friend struct detail::ping_op;
template <class, class> friend struct detail::run_op;
template <class> friend struct detail::run_op;
template <class, class> friend struct detail::exec_op;
template <class, class> friend struct detail::exec_read_op;
template <class> friend struct detail::resolve_with_timeout_op;
template <class> friend struct detail::check_idle_op;
template <class, class> friend struct detail::start_op;
template <class> friend struct detail::send_receive_op;
void cancel_push_requests()
@@ -315,6 +300,15 @@ private:
void add_request_info(std::shared_ptr<req_info> const& info)
{
reqs_.push_back(info);
if (info->get_request().has_hello_priority()) {
auto rend = std::partition_point(std::rbegin(reqs_), std::rend(reqs_), [](auto const& e) {
return !e->is_written() && !e->is_staged();
});
std::rotate(std::rbegin(reqs_), std::rbegin(reqs_) + 1, rend);
}
if (derived().is_open() && cmds_ == 0 && write_buffer_.empty())
writer_timer_.cancel();
}
@@ -322,26 +316,13 @@ private:
auto make_dynamic_buffer(std::size_t max_read_size = 512)
{ return boost::asio::dynamic_buffer(read_buffer_, max_read_size); }
template <class CompletionToken>
auto
async_resolve_with_timeout(
std::chrono::steady_clock::duration d,
CompletionToken&& token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::resolve_with_timeout_op<this_type>{this, d},
token, resv_);
}
template <class CompletionToken>
auto reader(CompletionToken&& token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::reader_op<Derived>{&derived()}, token, resv_.get_executor());
>(detail::reader_op<Derived>{&derived()}, token, writer_timer_);
}
template <class CompletionToken>
@@ -350,42 +331,7 @@ private:
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::writer_op<Derived>{&derived()}, token, resv_.get_executor());
}
template <
class Timeouts,
class CompletionToken>
auto async_start(Timeouts ts, CompletionToken&& token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::start_op<this_type, Timeouts>{this, ts}, token, resv_);
}
template <class CompletionToken>
auto
async_ping(
std::chrono::steady_clock::duration d,
CompletionToken&& token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::ping_op<Derived>{&derived(), d}, token, resv_);
}
template <class CompletionToken>
auto
async_check_idle(
std::chrono::steady_clock::duration d,
CompletionToken&& token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::check_idle_op<Derived>{&derived(), d}, token, check_idle_timer_);
>(detail::writer_op<Derived>{&derived()}, token, writer_timer_);
}
template <class Adapter, class CompletionToken>
@@ -394,7 +340,7 @@ private:
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code, std::size_t)
>(detail::exec_read_op<Derived, Adapter>{&derived(), adapter, cmds}, token, resv_);
>(detail::exec_read_op<Derived, Adapter>{&derived(), adapter, cmds}, token, writer_timer_);
}
void stage_request(req_info& ri)
@@ -422,57 +368,15 @@ private:
}
}
void prepare_hello(endpoint const& ep)
{
req_.clear();
if (requires_auth(ep)) {
req_.push("HELLO", "3", "AUTH", ep.username, ep.password);
} else {
req_.push("HELLO", "3");
}
}
auto expect_role(std::string const& expected) -> bool
{
if (std::empty(expected))
return true;
resp3::node<std::string> role_node;
role_node.data_type = resp3::type::blob_string;
role_node.aggregate_size = 1;
role_node.depth = 1;
role_node.value = "role";
auto iter = std::find(std::cbegin(response_), std::cend(response_), role_node);
if (iter == std::end(response_))
return false;
++iter;
BOOST_ASSERT(iter != std::cend(response_));
return iter->value == expected;
}
// IO objects
resolver_type resv_;
timer_type ping_timer_;
timer_type check_idle_timer_;
timer_type writer_timer_;
timer_type read_timer_;
push_channel_type push_channel_;
std::string read_buffer_;
std::string write_buffer_;
std::pmr::string read_buffer_;
std::pmr::string write_buffer_;
std::size_t cmds_ = 0;
reqs_type reqs_;
// Last time we received data.
time_point_type last_data_;
resp3::request req_;
std::vector<resp3::node<std::string>> response_;
endpoint ep_;
// The result of async_resolve.
boost::asio::ip::tcp::resolver::results_type endpoints_;
};
} // aedis

View File

@@ -20,7 +20,6 @@
#include <aedis/error.hpp>
#include <aedis/detail/net.hpp>
#include <aedis/resp3/type.hpp>
#include <aedis/resp3/detail/exec.hpp>
#include <aedis/resp3/detail/parser.hpp>
#include <aedis/resp3/read.hpp>
#include <aedis/resp3/write.hpp>
@@ -30,54 +29,6 @@
namespace aedis::detail {
template <class Conn, class Timer>
struct connect_with_timeout_op {
Conn* conn = nullptr;
boost::asio::ip::tcp::resolver::results_type const* endpoints = nullptr;
typename Conn::timeouts ts;
Timer* timer = nullptr;
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, boost::system::error_code ec = {}
, boost::asio::ip::tcp::endpoint const& = {})
{
reenter (coro)
{
timer->expires_after(ts.connect_timeout);
yield detail::async_connect(conn->next_layer(), *timer, *endpoints, std::move(self));
AEDIS_CHECK_OP0();
self.complete({});
}
}
};
template <class Conn>
struct resolve_with_timeout_op {
Conn* conn = nullptr;
std::chrono::steady_clock::duration resolve_timeout{};
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, boost::system::error_code ec = {}
, boost::asio::ip::tcp::resolver::results_type const& res = {})
{
reenter (coro)
{
conn->ping_timer_.expires_after(resolve_timeout);
yield
aedis::detail::async_resolve(
conn->resv_, conn->ping_timer_,
conn->ep_.host, conn->ep_.port, std::move(self));
AEDIS_CHECK_OP0();
conn->endpoints_ = res;
self.complete({});
}
}
};
template <class Conn, class Adapter>
struct receive_op {
Conn* conn = nullptr;
@@ -209,14 +160,11 @@ struct exec_op {
{
// Check whether the user wants to wait for the connection to
// be stablished.
// TODO: is_open below reflects only whether a TCP connection
// has been stablished. We need a variable that informs
// whether HELLO was successfull and we are connected with
// Redis.
if (req->get_config().cancel_if_not_connected && !conn->is_open())
if (req->get_config().cancel_if_not_connected && !conn->is_open()) {
return self.complete(error::not_connected, 0);
}
info = std::allocate_shared<req_info_type>(boost::asio::get_associated_allocator(self), *req, conn->resv_.get_executor());
info = std::allocate_shared<req_info_type>(boost::asio::get_associated_allocator(self), *req, conn->get_executor());
conn->add_request_info(info);
EXEC_OP_WAIT:
@@ -224,6 +172,8 @@ EXEC_OP_WAIT:
BOOST_ASSERT(ec == boost::asio::error::operation_aborted);
if (info->get_action() == Conn::req_info::action::stop) {
// Don't have to call remove_request as it has already
// been by cancel(exec).
return self.complete(ec, 0);
}
@@ -240,15 +190,25 @@ EXEC_OP_WAIT:
BOOST_ASSERT(conn->is_open());
if (req->size() == 0)
if (req->size() == 0) {
// Don't have to call remove_request as it has already
// been removed.
return self.complete({}, 0);
}
BOOST_ASSERT(!conn->reqs_.empty());
BOOST_ASSERT(conn->reqs_.front() != nullptr);
BOOST_ASSERT(conn->cmds_ != 0);
yield
conn->async_exec_read(adapter, conn->reqs_.front()->get_number_of_commands(), std::move(self));
AEDIS_CHECK_OP1();
if (is_cancelled(self)) {
conn->remove_request(info);
return self.complete(boost::asio::error::operation_aborted, {});
}
if (ec) {
return self.complete(ec, {});
}
read_size = n;
@@ -270,94 +230,25 @@ EXEC_OP_WAIT:
};
template <class Conn>
struct ping_op {
Conn* conn{};
std::chrono::steady_clock::duration ping_interval{};
boost::asio::coroutine coro{};
template <class Self>
void
operator()( Self& self
, boost::system::error_code ec = {}
, std::size_t = 0)
{
reenter (coro) for (;;)
{
conn->ping_timer_.expires_after(ping_interval);
yield conn->ping_timer_.async_wait(std::move(self));
if (!conn->is_open() || ec || is_cancelled(self)) {
// Checking for is_open is necessary becuse the timer can
// complete with success although cancel has been called.
self.complete({});
return;
}
conn->req_.clear();
conn->req_.push("PING");
yield conn->async_exec(conn->req_, adapt(), std::move(self));
if (!conn->is_open() || is_cancelled(self)) {
// Checking for is_open is necessary to avoid
// looping back on the timer although cancel has been
// called.
return self.complete({});
}
}
}
};
template <class Conn>
struct check_idle_op {
Conn* conn{};
std::chrono::steady_clock::duration ping_interval{};
boost::asio::coroutine coro{};
template <class Self>
void operator()(Self& self, boost::system::error_code ec = {})
{
reenter (coro) for (;;)
{
conn->check_idle_timer_.expires_after(2 * ping_interval);
yield conn->check_idle_timer_.async_wait(std::move(self));
if (!conn->is_open() || ec || is_cancelled(self)) {
// Checking for is_open is necessary becuse the timer can
// complete with success although cancel has been called.
return self.complete({});
}
auto const now = std::chrono::steady_clock::now();
if (conn->last_data_ + (2 * ping_interval) < now) {
conn->cancel(operation::run);
self.complete(error::idle_timeout);
return;
}
conn->last_data_ = now;
}
}
};
template <class Conn, class Timeouts>
struct start_op {
Conn* conn;
Timeouts ts;
struct run_op {
Conn* conn = nullptr;
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, std::array<std::size_t, 4> order = {}
, std::array<std::size_t, 2> order = {}
, boost::system::error_code ec0 = {}
, boost::system::error_code ec1 = {}
, boost::system::error_code ec2 = {}
, boost::system::error_code ec3 = {})
, boost::system::error_code ec1 = {})
{
reenter (coro)
{
conn->write_buffer_.clear();
conn->cmds_ = 0;
yield
boost::asio::experimental::make_parallel_group(
[this](auto token) { return conn->reader(token);},
[this](auto token) { return conn->writer(token);},
[this](auto token) { return conn->async_check_idle(ts.ping_interval, token);},
[this](auto token) { return conn->async_ping(ts.ping_interval, token);}
[this](auto token) { return conn->writer(token);}
).async_wait(
boost::asio::experimental::wait_for_one(),
std::move(self));
@@ -370,83 +261,12 @@ struct start_op {
switch (order[0]) {
case 0: self.complete(ec0); break;
case 1: self.complete(ec1); break;
case 2: self.complete(ec2); break;
case 3: self.complete(ec3); break;
default: BOOST_ASSERT(false);
}
}
}
};
inline
auto check_resp3_handshake_failed(std::vector<resp3::node<std::string>> const& resp) -> bool
{
return std::size(resp) == 1 &&
(resp.front().data_type == resp3::type::simple_error ||
resp.front().data_type == resp3::type::blob_error ||
resp.front().data_type == resp3::type::null);
}
template <class Conn, class Timeouts>
struct run_op {
Conn* conn = nullptr;
Timeouts ts;
boost::asio::coroutine coro{};
template <class Self>
void operator()(
Self& self,
boost::system::error_code ec = {},
std::size_t = 0)
{
reenter (coro)
{
yield conn->async_resolve_with_timeout(ts.resolve_timeout, std::move(self));
AEDIS_CHECK_OP0(conn->cancel(operation::run));
yield conn->derived().async_connect(conn->endpoints_, ts, conn->ping_timer_, std::move(self));
AEDIS_CHECK_OP0(conn->cancel(operation::run));
conn->prepare_hello(conn->ep_);
conn->ping_timer_.expires_after(ts.resp3_handshake_timeout);
conn->response_.clear();
yield
resp3::detail::async_exec(
conn->next_layer(),
conn->ping_timer_,
conn->req_,
adapter::adapt2(conn->response_),
conn->make_dynamic_buffer(),
std::move(self)
);
AEDIS_CHECK_OP0(conn->cancel(operation::run));
if (check_resp3_handshake_failed(conn->response_)) {
conn->cancel(operation::run);
self.complete(error::resp3_handshake_error);
return;
}
conn->ep_.password.clear();
if (!conn->expect_role(conn->ep_.role)) {
conn->cancel(operation::run);
self.complete(error::unexpected_server_role);
return;
}
conn->write_buffer_.clear();
conn->cmds_ = 0;
yield conn->async_start(ts, std::move(self));
AEDIS_CHECK_OP0();
self.complete({});
}
}
};
template <class Conn>
struct writer_op {
Conn* conn;
@@ -517,8 +337,6 @@ struct reader_op {
AEDIS_CHECK_OP0(conn->cancel(operation::run));
conn->last_data_ = std::chrono::steady_clock::now();
// We handle unsolicited events in the following way
//
// 1. Its resp3 type is a push.

View File

@@ -19,112 +19,6 @@
namespace aedis::detail {
template <class Executor>
using conn_timer_t = boost::asio::basic_waitable_timer<std::chrono::steady_clock, boost::asio::wait_traits<std::chrono::steady_clock>, Executor>;
template <
class Stream,
class EndpointSequence
>
struct connect_op {
Stream* socket;
conn_timer_t<typename Stream::executor_type>* timer;
EndpointSequence* endpoints;
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, std::array<std::size_t, 2> order = {}
, boost::system::error_code ec1 = {}
, typename Stream::protocol_type::endpoint const& ep = {}
, boost::system::error_code ec2 = {})
{
reenter (coro)
{
yield
boost::asio::experimental::make_parallel_group(
[this](auto token)
{
auto f = [](boost::system::error_code const&, auto const&) { return true; };
return boost::asio::async_connect(*socket, *endpoints, f, token);
},
[this](auto token) { return timer->async_wait(token);}
).async_wait(
boost::asio::experimental::wait_for_one(),
std::move(self));
if (is_cancelled(self)) {
self.complete(boost::asio::error::operation_aborted, {});
return;
}
switch (order[0]) {
case 0: self.complete(ec1, ep); return;
case 1:
{
if (ec2) {
self.complete(ec2, {});
} else {
self.complete(error::connect_timeout, ep);
}
return;
}
default: BOOST_ASSERT(false);
}
}
}
};
template <class Resolver, class Timer>
struct resolve_op {
Resolver* resv;
Timer* timer;
boost::string_view host;
boost::string_view port;
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, std::array<std::size_t, 2> order = {}
, boost::system::error_code ec1 = {}
, boost::asio::ip::tcp::resolver::results_type res = {}
, boost::system::error_code ec2 = {})
{
reenter (coro)
{
yield
boost::asio::experimental::make_parallel_group(
[this](auto token) { return resv->async_resolve(host.data(), port.data(), token);},
[this](auto token) { return timer->async_wait(token);}
).async_wait(
boost::asio::experimental::wait_for_one(),
std::move(self));
if (is_cancelled(self)) {
self.complete(boost::asio::error::operation_aborted, {});
return;
}
switch (order[0]) {
case 0: self.complete(ec1, res); return;
case 1:
{
if (ec2) {
self.complete(ec2, {});
} else {
self.complete(error::resolve_timeout, {});
}
return;
}
default: BOOST_ASSERT(false);
}
}
}
};
template <class Channel>
struct send_receive_op {
Channel* channel;
@@ -150,43 +44,6 @@ struct send_receive_op {
}
};
template <
class Stream,
class EndpointSequence,
class CompletionToken
>
auto async_connect(
Stream& socket,
conn_timer_t<typename Stream::executor_type>& timer,
EndpointSequence ep,
CompletionToken&& token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code, typename Stream::protocol_type::endpoint const&)
>(connect_op<Stream, EndpointSequence>
{&socket, &timer, &ep}, token, socket, timer);
}
template <
class Resolver,
class Timer,
class CompletionToken =
boost::asio::default_completion_token_t<typename Resolver::executor_type>
>
auto async_resolve(
Resolver& resv,
Timer& timer,
boost::string_view host,
boost::string_view port,
CompletionToken&& token = CompletionToken{})
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code, boost::asio::ip::tcp::resolver::results_type)
>(resolve_op<Resolver, Timer>{&resv, &timer, host, port}, token, resv, timer);
}
template <
class Channel,
class CompletionToken =

View File

@@ -1,38 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_ENDPOINT_HPP
#define AEDIS_ENDPOINT_HPP
#include <string>
namespace aedis {
/** \brief A Redis endpoint.
* \ingroup high-level-api
*/
struct endpoint {
/// Redis server address.
std::string host;
/// Redis server port.
std::string port;
/// Expected role if any.
std::string role{};
/// Username if authentication is required.
std::string username{};
/// Password if authentication is required.
std::string password{};
};
auto requires_auth(endpoint const& ep) noexcept -> bool;
} // aedis
#endif // AEDIS_ENDPOINT_HPP

View File

@@ -16,20 +16,8 @@ namespace aedis {
*/
enum class error
{
/// Resolve timeout.
resolve_timeout = 1,
/// Connect timeout.
connect_timeout,
/// Idle timeout.
idle_timeout,
/// Exec timeout.
exec_timeout,
/// Invalid RESP3 type.
invalid_data_type,
invalid_data_type = 1,
/// Can't parse the string as a number.
not_a_number,
@@ -73,17 +61,8 @@ enum class error
/// Got RESP3 null.
resp3_null,
/// Unexpected server role.
unexpected_server_role,
/// SSL handshake timeout.
ssl_handshake_timeout,
/// There is no stablished connection.
not_connected,
/// RESP3 handshake error (HELLO command).
resp3_handshake_error,
};
/** \internal

View File

@@ -1,18 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <aedis/endpoint.hpp>
#include <string>
namespace aedis {
auto requires_auth(endpoint const& ep) noexcept -> bool
{
return !std::empty(ep.username) && !std::empty(ep.password);
}
} // aedis

View File

@@ -21,10 +21,6 @@ struct error_category_impl : boost::system::error_category {
auto message(int ev) const -> std::string override
{
switch(static_cast<error>(ev)) {
case error::resolve_timeout: return "Resolve operation timeout.";
case error::connect_timeout: return "Connect operation timeout.";
case error::idle_timeout: return "Idle timeout.";
case error::exec_timeout: return "Exec timeout.";
case error::invalid_data_type: return "Invalid resp3 type.";
case error::not_a_number: return "Can't convert string to number.";
case error::exceeeds_max_nested_depth: return "Exceeds the maximum number of nested responses.";
@@ -40,10 +36,7 @@ struct error_category_impl : boost::system::error_category {
case error::incompatible_size: return "Aggregate container has incompatible size.";
case error::not_a_double: return "Not a double.";
case error::resp3_null: return "Got RESP3 null.";
case error::unexpected_server_role: return "Unexpected server role.";
case error::ssl_handshake_timeout: return "SSL handshake timeout.";
case error::not_connected: return "Not connected.";
case error::resp3_handshake_error: return "RESP3 handshake error (HELLO command).";
default: BOOST_ASSERT(false); return "Aedis error.";
}
}

View File

@@ -1,173 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_RESP3_EXEC_HPP
#define AEDIS_RESP3_EXEC_HPP
#include <boost/assert.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <boost/asio/read.hpp>
#include <boost/asio/write.hpp>
#include <boost/asio/coroutine.hpp>
#include <boost/asio/compose.hpp>
#include <boost/asio/steady_timer.hpp>
#include <boost/asio/experimental/parallel_group.hpp>
#include <aedis/error.hpp>
#include <aedis/resp3/read.hpp>
#include <aedis/resp3/request.hpp>
#include <boost/asio/yield.hpp>
namespace aedis::resp3::detail {
template <
class AsyncStream,
class Adapter,
class DynamicBuffer
>
struct exec_op {
AsyncStream* socket = nullptr;
request const* req = nullptr;
Adapter adapter;
DynamicBuffer dbuf{};
std::size_t n_cmds = 0;
std::size_t size = 0;
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, boost::system::error_code ec = {}
, std::size_t n = 0)
{
reenter (coro) for (;;)
{
if (req) {
yield
boost::asio::async_write(
*socket,
boost::asio::buffer(req->payload()),
std::move(self));
AEDIS_CHECK_OP1();
if (n_cmds == 0) {
return self.complete({}, n);
}
req = nullptr;
}
yield resp3::async_read(*socket, dbuf, adapter, std::move(self));
AEDIS_CHECK_OP1();
size += n;
if (--n_cmds == 0) {
return self.complete(ec, size);
}
}
}
};
template <
class AsyncStream,
class Adapter,
class DynamicBuffer,
class CompletionToken = boost::asio::default_completion_token_t<typename AsyncStream::executor_type>
>
auto async_exec(
AsyncStream& socket,
request const& req,
Adapter adapter,
DynamicBuffer dbuf,
CompletionToken token = CompletionToken{})
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code, std::size_t)
>(detail::exec_op<AsyncStream, Adapter, DynamicBuffer>
{&socket, &req, adapter, dbuf, req.size()}, token, socket);
}
template <
class AsyncStream,
class Timer,
class Adapter,
class DynamicBuffer
>
struct exec_with_timeout_op {
AsyncStream* socket = nullptr;
Timer* timer = nullptr;
request const* req = nullptr;
Adapter adapter;
DynamicBuffer dbuf{};
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, std::array<std::size_t, 2> order = {}
, boost::system::error_code ec1 = {}
, std::size_t n = 0
, boost::system::error_code ec2 = {})
{
reenter (coro)
{
yield
boost::asio::experimental::make_parallel_group(
[this](auto token) { return detail::async_exec(*socket, *req, adapter, dbuf, token);},
[this](auto token) { return timer->async_wait(token);}
).async_wait(
boost::asio::experimental::wait_for_one(),
std::move(self));
if (is_cancelled(self)) {
self.complete(boost::asio::error::operation_aborted, 0);
return;
}
switch (order[0]) {
case 0: self.complete(ec1, n); break;
case 1:
{
if (ec2) {
self.complete(ec2, 0);
} else {
self.complete(aedis::error::exec_timeout, 0);
}
} break;
default: BOOST_ASSERT(false);
}
}
}
};
template <
class AsyncStream,
class Timer,
class Adapter,
class DynamicBuffer,
class CompletionToken = boost::asio::default_completion_token_t<typename AsyncStream::executor_type>
>
auto async_exec(
AsyncStream& socket,
Timer& timer,
request const& req,
Adapter adapter,
DynamicBuffer dbuf,
CompletionToken token = CompletionToken{})
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code, std::size_t)
>(detail::exec_with_timeout_op<AsyncStream, Timer, Adapter, DynamicBuffer>
{&socket, &timer, &req, adapter, dbuf}, token, socket, timer);
}
} // aedis::resp3::detail
#include <boost/asio/unyield.hpp>
#endif // AEDIS_RESP3_EXEC_HPP

View File

@@ -16,4 +16,9 @@ auto has_push_response(boost::string_view cmd) -> bool
return false;
}
auto is_hello(boost::string_view cmd) -> bool
{
return cmd == "HELLO";
}
} // aedis::resp3::detail

View File

@@ -68,6 +68,8 @@ namespace detail {
auto has_push_response(boost::string_view cmd) -> bool;
auto is_hello(boost::string_view cmd) -> bool;
template <class T>
struct add_bulk_impl {
template <class Request>
@@ -200,6 +202,14 @@ public:
* cancel_on_connection_lost is true.
*/
bool retry = true;
/** \brief If this request has a HELLO command and this flag is
* set to true, the `aedis::connection` will move it to the
* front of the queue of awaiting requests. This makes it
* possible to send HELLO and authenticate before other
* commands are sent.
*/
bool hello_with_priority = true;
};
/** \brief Constructor
@@ -208,17 +218,19 @@ public:
* \param resource Memory resource.
*/
explicit
request(config cfg = config{false, true, false, true},
request(config cfg = config{false, true, false, true, true},
std::pmr::memory_resource* resource = std::pmr::get_default_resource())
: payload_(resource), cfg_{cfg}
{}
: cfg_{cfg}, payload_(resource) {}
//// Returns the number of commands contained in this request.
[[nodiscard]] auto size() const noexcept -> std::size_t { return commands_;};
[[nodiscard]] auto size() const noexcept -> std::size_t
{ return commands_;};
// Returns the request payload.
[[nodiscard]] auto payload() const noexcept -> auto const& { return payload_;}
[[nodiscard]] auto payload() const noexcept -> auto const&
{ return payload_;}
[[nodiscard]] auto has_hello_priority() const noexcept -> auto const&
{ return has_hello_priority_;}
/// Clears the request preserving allocated memory.
void clear()
@@ -227,6 +239,16 @@ public:
commands_ = 0;
}
/// Calls std::string::reserve on the internal storage.
void reserve(std::size_t new_cap = 0)
{ payload_.reserve(new_cap); }
/// Returns a const reference to the config object.
[[nodiscard]] auto get_config() const noexcept -> auto const& {return cfg_; }
/// Returns a reference to the config object.
[[nodiscard]] auto get_config() noexcept -> auto& {return cfg_; }
/** @brief Appends a new command to the end of the request.
*
* For example
@@ -254,8 +276,7 @@ public:
detail::add_bulk(payload_, cmd);
detail::add_bulk(payload_, make_tuple(args...));
if (!detail::has_push_response(cmd))
++commands_;
check_cmd(cmd);
}
/** @brief Appends a new command to the end of the request.
@@ -298,8 +319,7 @@ public:
for (; begin != end; ++begin)
detail::add_bulk(payload_, *begin);
if (!detail::has_push_response(cmd))
++commands_;
check_cmd(cmd);
}
/** @brief Appends a new command to the end of the request.
@@ -337,8 +357,7 @@ public:
for (; begin != end; ++begin)
detail::add_bulk(payload_, *begin);
if (!detail::has_push_response(cmd))
++commands_;
check_cmd(cmd);
}
/** @brief Appends a new command to the end of the request.
@@ -374,20 +393,19 @@ public:
push_range(cmd, begin(range), end(range));
}
/// Calls std::string::reserve on the internal storage.
void reserve(std::size_t new_cap = 0)
{ payload_.reserve(new_cap); }
/// Returns a const reference to the config object.
[[nodiscard]] auto get_config() const noexcept -> auto const& {return cfg_; }
/// Returns a reference to the config object.
[[nodiscard]] auto get_config() noexcept -> auto& {return cfg_; }
private:
void check_cmd(boost::string_view cmd)
{
if (!detail::has_push_response(cmd))
++commands_;
has_hello_priority_ = detail::is_hello(cmd) && cfg_.hello_with_priority;
}
config cfg_;
std::pmr::string payload_;
std::size_t commands_ = 0;
config cfg_;
bool has_hello_priority_ = false;
};
} // aedis::resp3

View File

@@ -5,7 +5,6 @@
*/
#include <aedis/impl/error.ipp>
#include <aedis/impl/endpoint.ipp>
#include <aedis/resp3/impl/request.ipp>
#include <aedis/resp3/impl/type.ipp>
#include <aedis/resp3/detail/impl/parser.ipp>

View File

@@ -12,12 +12,11 @@
#include <boost/asio/io_context.hpp>
#include <aedis/detail/connection_base.hpp>
#include <aedis/ssl/detail/connection_ops.hpp>
namespace aedis::ssl {
template <class>
class connection;
class basic_connection;
/** \brief A SSL connection to the Redis server.
* \ingroup high-level-api
@@ -31,55 +30,44 @@ class connection;
*
*/
template <class AsyncReadWriteStream>
class connection<boost::asio::ssl::stream<AsyncReadWriteStream>> :
class basic_connection<boost::asio::ssl::stream<AsyncReadWriteStream>> :
private aedis::detail::connection_base<
typename boost::asio::ssl::stream<AsyncReadWriteStream>::executor_type,
connection<boost::asio::ssl::stream<AsyncReadWriteStream>>> {
basic_connection<boost::asio::ssl::stream<AsyncReadWriteStream>>> {
public:
/// Type of the next layer
using next_layer_type = boost::asio::ssl::stream<AsyncReadWriteStream>;
/// Executor type.
using executor_type = typename next_layer_type::executor_type;
using base_type = aedis::detail::connection_base<executor_type, connection<boost::asio::ssl::stream<AsyncReadWriteStream>>>;
/** \brief Connection configuration parameters.
*/
struct timeouts {
/// Timeout of the resolve operation.
std::chrono::steady_clock::duration resolve_timeout = std::chrono::seconds{10};
/// Timeout of the connect operation.
std::chrono::steady_clock::duration connect_timeout = std::chrono::seconds{10};
/// Timeout of the ssl handshake operation.
std::chrono::steady_clock::duration handshake_timeout = std::chrono::seconds{10};
/// Timeout of the resp3 handshake operation.
std::chrono::steady_clock::duration resp3_handshake_timeout = std::chrono::seconds{2};
/// Time interval of ping operations.
std::chrono::steady_clock::duration ping_interval = std::chrono::seconds{1};
/// Rebinds the socket type to another executor.
template <class Executor1>
struct rebind_executor
{
/// The socket type when rebound to the specified executor.
using other = basic_connection<boost::asio::ssl::stream<typename AsyncReadWriteStream::template rebind_executor<Executor1>::other>>;
};
using base_type = aedis::detail::connection_base<executor_type, basic_connection<boost::asio::ssl::stream<AsyncReadWriteStream>>>;
/// Constructor
explicit
connection(
basic_connection(
executor_type ex,
boost::asio::ssl::context& ctx,
std::pmr::memory_resource* resource = std::pmr::get_default_resource())
: base_type{ex, resource}
, stream_{ex, ctx}
{
}
{ }
/// Constructor
explicit
connection(
basic_connection(
boost::asio::io_context& ioc,
boost::asio::ssl::context& ctx,
std::pmr::memory_resource* resource = std::pmr::get_default_resource())
: connection(ioc.get_executor(), ctx, resource)
: basic_connection(ioc.get_executor(), ctx, resource)
{ }
/// Returns the associated executor.
@@ -102,13 +90,9 @@ public:
* See aedis::connection::async_run for more information.
*/
template <class CompletionToken = boost::asio::default_completion_token_t<executor_type>>
auto
async_run(
endpoint ep,
timeouts ts = timeouts{},
CompletionToken token = CompletionToken{})
auto async_run(CompletionToken token = CompletionToken{})
{
return base_type::async_run(ep, ts, std::move(token));
return base_type::async_run(std::move(token));
}
/** @brief Executes a command on the Redis server asynchronously.
@@ -147,40 +131,30 @@ public:
auto cancel(operation op) -> std::size_t
{ return base_type::cancel(op); }
auto& lowest_layer() noexcept { return stream_.lowest_layer(); }
private:
using this_type = connection<next_layer_type>;
using this_type = basic_connection<next_layer_type>;
template <class, class> friend class aedis::detail::connection_base;
template <class, class> friend struct aedis::detail::exec_op;
template <class, class> friend struct detail::ssl_connect_with_timeout_op;
template <class, class> friend struct aedis::detail::run_op;
template <class> friend struct aedis::detail::writer_op;
template <class> friend struct aedis::detail::check_idle_op;
template <class> friend struct aedis::detail::reader_op;
template <class, class> friend struct aedis::detail::exec_read_op;
template <class> friend struct aedis::detail::ping_op;
template <class, class> friend struct detail::receive_op;
template <class> friend struct aedis::detail::run_op;
template <class> friend struct aedis::detail::writer_op;
template <class> friend struct aedis::detail::reader_op;
auto& lowest_layer() noexcept { return stream_.lowest_layer(); }
auto is_open() const noexcept { return stream_.next_layer().is_open(); }
void close() { stream_.next_layer().close(); }
template <class Timer, class CompletionToken>
auto
async_connect(
boost::asio::ip::tcp::resolver::results_type const& endpoints,
timeouts ts,
Timer& timer,
CompletionToken&& token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::ssl_connect_with_timeout_op<this_type, Timer>{this, &endpoints, ts, &timer}, token, stream_);
}
next_layer_type stream_;
};
/** \brief A connection that uses a boost::asio::ssl::stream<boost::asio::ip::tcp::socket>.
* \ingroup high-level-api
*/
using connection = basic_connection<boost::asio::ssl::stream<boost::asio::ip::tcp::socket>>;
} // aedis::ssl
#endif // AEDIS_SSL_CONNECTION_HPP

View File

@@ -1,113 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_SSL_CONNECTION_OPS_HPP
#define AEDIS_SSL_CONNECTION_OPS_HPP
#include <array>
#include <boost/assert.hpp>
#include <boost/system.hpp>
#include <boost/asio/experimental/parallel_group.hpp>
#include <boost/asio/yield.hpp>
namespace aedis::ssl::detail
{
template <class Stream>
struct handshake_op {
Stream* stream;
aedis::detail::conn_timer_t<typename Stream::executor_type>* timer;
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, std::array<std::size_t, 2> order = {}
, boost::system::error_code ec1 = {}
, boost::system::error_code ec2 = {})
{
reenter (coro)
{
yield
boost::asio::experimental::make_parallel_group(
[this](auto token)
{
return stream->async_handshake(boost::asio::ssl::stream_base::client, token);
},
[this](auto token) { return timer->async_wait(token);}
).async_wait(
boost::asio::experimental::wait_for_one(),
std::move(self));
if (is_cancelled(self)) {
self.complete(boost::asio::error::operation_aborted);
return;
}
switch (order[0]) {
case 0: self.complete(ec1); return;
case 1:
{
BOOST_ASSERT_MSG(!ec2, "handshake_op: Incompatible state.");
self.complete(error::ssl_handshake_timeout);
return;
}
default: BOOST_ASSERT(false);
}
}
}
};
template <
class Stream,
class CompletionToken
>
auto async_handshake(
Stream& stream,
aedis::detail::conn_timer_t<typename Stream::executor_type>& timer,
CompletionToken&& token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(handshake_op<Stream>{&stream, &timer}, token, stream, timer);
}
template <class Conn, class Timer>
struct ssl_connect_with_timeout_op {
Conn* conn = nullptr;
boost::asio::ip::tcp::resolver::results_type const* endpoints = nullptr;
typename Conn::timeouts ts;
Timer* timer = nullptr;
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, boost::system::error_code ec = {}
, boost::asio::ip::tcp::endpoint const& = {})
{
reenter (coro)
{
timer->expires_after(ts.connect_timeout);
yield
aedis::detail::async_connect(
conn->lowest_layer(), *timer, *endpoints, std::move(self));
AEDIS_CHECK_OP0();
timer->expires_after(ts.handshake_timeout);
yield
async_handshake(conn->next_layer(), *timer, std::move(self));
AEDIS_CHECK_OP0();
self.complete({});
}
}
};
} // aedis::ssl::detail
#include <boost/asio/unyield.hpp>
#endif // AEDIS_SSL_CONNECTION_OPS_HPP

17
tests/common.hpp Normal file
View File

@@ -0,0 +1,17 @@
#pragma once
#include <boost/asio.hpp>
#include <chrono>
namespace net = boost::asio;
using endpoints = net::ip::tcp::resolver::results_type;
auto
resolve(
std::string const& host = "127.0.0.1",
std::string const& port = "6379") -> endpoints
{
net::io_context ioc;
net::ip::tcp::resolver resv{ioc};
return resv.resolve(host, port);
}

View File

@@ -1,164 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <iostream>
#include <boost/asio.hpp>
#include <boost/system/errc.hpp>
#define BOOST_TEST_MODULE low level
#include <boost/test/included/unit_test.hpp>
#include <aedis.hpp>
#include <aedis/src.hpp>
namespace net = boost::asio;
using connection = aedis::connection<>;
using endpoint = aedis::endpoint;
using error_code = boost::system::error_code;
bool is_host_not_found(error_code ec)
{
if (ec == net::error::netdb_errors::host_not_found) return true;
if (ec == net::error::netdb_errors::host_not_found_try_again) return true;
return false;
}
error_code test_async_run(endpoint ep, connection::timeouts cfg = {})
{
net::io_context ioc;
connection db{ioc};
error_code ret;
db.async_run(ep, cfg, [&](auto ec) { ret = ec; });
ioc.run();
return ret;
}
BOOST_AUTO_TEST_CASE(resolve_bad_host)
{
std::cout << boost::unit_test::framework::current_test_case().p_name << std::endl;
endpoint ep;
ep.host = "Atibaia";
ep.port = "6379";
connection::timeouts cfg;
cfg.resolve_timeout = std::chrono::seconds{100};
auto const ec = test_async_run(ep, cfg);
BOOST_TEST(is_host_not_found(ec));
}
BOOST_AUTO_TEST_CASE(resolve_with_timeout)
{
std::cout << boost::unit_test::framework::current_test_case().p_name << std::endl;
endpoint ep;
ep.host = "Atibaia";
ep.port = "6379";
connection::timeouts cfg;
// Low-enough to cause a timeout always.
cfg.resolve_timeout = std::chrono::milliseconds{1};
auto const ec = test_async_run(ep, cfg);
BOOST_CHECK_EQUAL(ec, aedis::error::resolve_timeout);
}
BOOST_AUTO_TEST_CASE(connect_bad_port)
{
std::cout << boost::unit_test::framework::current_test_case().p_name << std::endl;
endpoint ep;
ep.host = "127.0.0.1";
ep.port = "1";
connection::timeouts cfg;
cfg.connect_timeout = std::chrono::seconds{100};
auto const ec = test_async_run(ep, cfg);
BOOST_CHECK_EQUAL(ec, net::error::basic_errors::connection_refused);
}
BOOST_AUTO_TEST_CASE(connect_with_timeout)
{
std::cout << boost::unit_test::framework::current_test_case().p_name << std::endl;
endpoint ep;
ep.host = "example.com";
ep.port = "1";
connection::timeouts cfg;
cfg.connect_timeout = std::chrono::milliseconds{1};
auto const ec = test_async_run(ep, cfg);
BOOST_CHECK_EQUAL(ec, aedis::error::connect_timeout);
}
BOOST_AUTO_TEST_CASE(bad_hello_response)
{
std::cout << boost::unit_test::framework::current_test_case().p_name << std::endl;
// Succeeds with the tcp connection but fails the hello.
endpoint ep;
ep.host = "google.com";
ep.port = "80";
auto const ec = test_async_run(ep);
BOOST_CHECK_EQUAL(ec, aedis::error::invalid_data_type);
}
BOOST_AUTO_TEST_CASE(plain_conn_on_tls_endpoint)
{
std::cout << boost::unit_test::framework::current_test_case().p_name << std::endl;
endpoint ep;
ep.host = "google.com";
ep.port = "443";
auto const ec = test_async_run(ep);
BOOST_TEST(!!ec);
}
auto auth_fail_error(boost::system::error_code ec)
{
return ec == aedis::error::resp3_handshake_error ||
ec == aedis::error::exec_timeout;
}
BOOST_AUTO_TEST_CASE(auth_fail)
{
std::cout << boost::unit_test::framework::current_test_case().p_name << std::endl;
// Should cause an error in the authentication as our redis server
// has no authentication configured.
endpoint ep;
ep.host = "127.0.0.1";
ep.port = "6379";
ep.username = "caboclo-do-mato";
ep.password = "jabuticaba";
auto const ec = test_async_run(ep);
BOOST_TEST(auth_fail_error(ec));
}
auto wrong_role_error(boost::system::error_code ec)
{
return ec == aedis::error::unexpected_server_role ||
ec == aedis::error::exec_timeout;
}
BOOST_AUTO_TEST_CASE(wrong_role)
{
std::cout << boost::unit_test::framework::current_test_case().p_name << std::endl;
// Should cause an error in the authentication as our redis server
// has no authentication configured.
endpoint ep;
ep.host = "127.0.0.1";
ep.port = "6379";
ep.role = "errado";
auto const ec = test_async_run(ep);
BOOST_TEST(wrong_role_error(ec));
}

View File

@@ -15,14 +15,15 @@
#include <aedis.hpp>
#include <aedis/src.hpp>
#include "common.hpp"
namespace net = boost::asio;
using error_code = boost::system::error_code;
using aedis::resp3::request;
using aedis::operation;
using aedis::adapt;
using connection = aedis::connection<>;
using endpoint = aedis::endpoint;
using error_code = boost::system::error_code;
using connection = net::use_awaitable_t<>::as_default_on_t<aedis::connection>;
#include <boost/asio/experimental/awaitable_operators.hpp>
using namespace net::experimental::awaitable_operators;
@@ -37,8 +38,9 @@ net::awaitable<void> push_consumer(std::shared_ptr<connection> conn, int expecte
}
request req;
req.push("HELLO", 3);
req.push("QUIT");
co_await conn->async_exec(req, adapt(), net::use_awaitable);
co_await conn->async_exec(req, adapt());
}
auto echo_session(std::shared_ptr<connection> conn, std::string id, int n) -> net::awaitable<void>
@@ -46,19 +48,20 @@ auto echo_session(std::shared_ptr<connection> conn, std::string id, int n) -> ne
auto ex = co_await net::this_coro::executor;
request req;
std::tuple<std::string> resp;
std::tuple<aedis::ignore, std::string> resp;
for (auto i = 0; i < n; ++i) {
auto const msg = id + "/" + std::to_string(i);
//std::cout << msg << std::endl;
req.push("HELLO", 3);
req.push("PING", msg);
req.push("SUBSCRIBE", "channel");
boost::system::error_code ec;
co_await conn->async_exec(req, adapt(resp), net::redirect_error(net::use_awaitable, ec));
BOOST_TEST(!ec);
BOOST_CHECK_EQUAL(msg, std::get<0>(resp));
BOOST_CHECK_EQUAL(msg, std::get<1>(resp));
req.clear();
std::get<0>(resp).clear();
std::get<1>(resp).clear();
}
}
@@ -76,14 +79,15 @@ auto async_echo_stress() -> net::awaitable<void>
for (int i = 0; i < sessions; ++i)
net::co_spawn(ex, echo_session(conn, std::to_string(i), msgs), net::detached);
endpoint ep{"127.0.0.1", "6379"};
co_await conn->async_run(ep, {}, net::use_awaitable);
auto const addrs = resolve();
co_await net::async_connect(conn->next_layer(), addrs);
co_await conn->async_run();
}
BOOST_AUTO_TEST_CASE(echo_stress)
{
net::io_context ioc;
net::co_spawn(ioc.get_executor(), async_echo_stress(), net::detached);
net::co_spawn(ioc, async_echo_stress(), net::detached);
ioc.run();
}

View File

@@ -14,32 +14,36 @@
#include <aedis.hpp>
#include <aedis/src.hpp>
#include "common.hpp"
// TODO: Test whether HELLO won't be inserted passt commands that have
// been already writen.
namespace net = boost::asio;
using aedis::resp3::request;
using aedis::adapt;
using connection = aedis::connection<>;
using endpoint = aedis::endpoint;
using connection = aedis::connection;
using error_code = boost::system::error_code;
#ifdef BOOST_ASIO_HAS_CO_AWAIT
#include <boost/asio/experimental/awaitable_operators.hpp>
using namespace net::experimental::awaitable_operators;
#endif
BOOST_AUTO_TEST_CASE(wrong_response_data_type)
{
request req;
req.push("HELLO", 3);
req.push("QUIT");
// Wrong data type.
std::tuple<int> resp;
std::tuple<aedis::ignore, int> resp;
net::io_context ioc;
auto db = std::make_shared<connection>(ioc);
db->async_exec(req, adapt(resp), [](auto ec, auto){
auto const endpoints = resolve();
connection conn{ioc};
net::connect(conn.next_layer(), endpoints);
conn.async_exec(req, adapt(resp), [](auto ec, auto){
BOOST_CHECK_EQUAL(ec, aedis::error::not_a_number);
});
db->async_run({"127.0.0.1", "6379"}, {}, [](auto ec){
conn.async_run([](auto ec){
BOOST_CHECK_EQUAL(ec, boost::asio::error::basic_errors::operation_aborted);
});
@@ -50,11 +54,12 @@ BOOST_AUTO_TEST_CASE(cancel_request_if_not_connected)
{
request req;
req.get_config().cancel_if_not_connected = true;
req.push("HELLO", 3);
req.push("PING");
net::io_context ioc;
auto db = std::make_shared<connection>(ioc);
db->async_exec(req, adapt(), [](auto ec, auto){
auto conn = std::make_shared<connection>(ioc);
conn->async_exec(req, adapt(), [](auto ec, auto){
BOOST_CHECK_EQUAL(ec, aedis::error::not_connected);
});
@@ -65,6 +70,7 @@ BOOST_AUTO_TEST_CASE(request_retry)
{
request req1;
req1.get_config().cancel_on_connection_lost = true;
req1.push("HELLO", 3);
req1.push("CLIENT", "PAUSE", 7000);
request req2;
@@ -73,18 +79,30 @@ BOOST_AUTO_TEST_CASE(request_retry)
req2.push("PING");
net::io_context ioc;
auto db = std::make_shared<connection>(ioc);
connection conn{ioc};
db->async_exec(req1, adapt(), [](auto ec, auto){
net::steady_timer st{ioc};
st.expires_after(std::chrono::seconds{1});
st.async_wait([&](auto){
// Cancels the request before receiving the response. This
// should cause the second request to complete with error
// although it has cancel_on_connection_lost = false.
conn.cancel(aedis::operation::run);
});
auto const endpoints = resolve();
net::connect(conn.next_layer(), endpoints);
conn.async_exec(req1, adapt(), [](auto ec, auto){
BOOST_TEST(!ec);
});
db->async_exec(req2, adapt(), [](auto ec, auto){
conn.async_exec(req2, adapt(), [](auto ec, auto){
BOOST_CHECK_EQUAL(ec, boost::system::errc::errc_t::operation_canceled);
});
db->async_run({"127.0.0.1", "6379"}, {}, [](auto ec){
BOOST_CHECK_EQUAL(ec, aedis::error::idle_timeout);
conn.async_run([](auto ec){
BOOST_CHECK_EQUAL(ec, boost::system::errc::errc_t::operation_canceled);
});
ioc.run();

View File

@@ -15,28 +15,27 @@
#include <aedis.hpp>
#include <aedis/src.hpp>
#include "common.hpp"
namespace net = boost::asio;
using aedis::resp3::request;
using aedis::operation;
using aedis::adapt;
using connection = aedis::connection<>;
using endpoint = aedis::endpoint;
using connection = aedis::connection;
using error_code = boost::system::error_code;
#include <boost/asio/experimental/awaitable_operators.hpp>
using namespace net::experimental::awaitable_operators;
auto async_run(std::shared_ptr<connection> conn) -> net::awaitable<void>
auto async_run(std::shared_ptr<connection> conn, error_code expected) -> net::awaitable<void>
{
auto ex = co_await net::this_coro::executor;
connection::timeouts tms;
tms.ping_interval = std::chrono::seconds{10};
endpoint ep{"127.0.0.1", "6379"};
boost::system::error_code ec;
co_await conn->async_run(ep, tms, net::redirect_error(net::use_awaitable, ec));
BOOST_TEST(!ec);
co_await conn->async_run(net::redirect_error(net::use_awaitable, ec));
std::cout << ec.message() << std::endl;
BOOST_CHECK_EQUAL(ec, expected);
}
auto async_cancel_exec(std::shared_ptr<connection> conn) -> net::awaitable<void>
@@ -50,6 +49,7 @@ auto async_cancel_exec(std::shared_ptr<connection> conn) -> net::awaitable<void>
request req1;
req1.get_config().coalesce = false;
req1.push("HELLO", 3);
req1.push("BLPOP", "any", 3);
// Should not be canceled.
@@ -63,7 +63,7 @@ auto async_cancel_exec(std::shared_ptr<connection> conn) -> net::awaitable<void>
// Should be canceled.
conn->async_exec(req1, adapt(), [](auto ec, auto){
BOOST_CHECK_EQUAL(ec, boost::asio::error::basic_errors::operation_aborted);
BOOST_CHECK_EQUAL(ec, net::error::basic_errors::operation_aborted);
});
// Will complete while BLPOP is pending.
@@ -84,10 +84,14 @@ auto async_cancel_exec(std::shared_ptr<connection> conn) -> net::awaitable<void>
BOOST_AUTO_TEST_CASE(cancel_exec_with_timer)
{
std::cout << boost::unit_test::framework::current_test_case().p_name << std::endl;
net::io_context ioc;
auto const endpoints = resolve();
auto conn = std::make_shared<connection>(ioc);
net::co_spawn(ioc.get_executor(), async_run(conn), net::detached);
net::connect(conn->next_layer(), endpoints);
net::co_spawn(ioc.get_executor(), async_run(conn, {}), net::detached);
net::co_spawn(ioc.get_executor(), async_cancel_exec(conn), net::detached);
ioc.run();
}
@@ -106,6 +110,7 @@ auto async_ignore_cancel_of_written_req(std::shared_ptr<connection> conn) -> net
request req1; // Will be cancelled after it has been written.
req1.get_config().coalesce = false;
req1.push("HELLO", 3);
req1.push("BLPOP", "any", 3);
request req2; // Will be cancelled.
@@ -117,23 +122,21 @@ auto async_ignore_cancel_of_written_req(std::shared_ptr<connection> conn) -> net
st.async_wait(net::redirect_error(net::use_awaitable, ec3))
);
BOOST_TEST(!ec1);
BOOST_CHECK_EQUAL(ec2, boost::asio::error::basic_errors::operation_aborted);
BOOST_CHECK_EQUAL(ec1, net::error::basic_errors::operation_aborted);
BOOST_CHECK_EQUAL(ec2, net::error::basic_errors::operation_aborted);
BOOST_TEST(!ec3);
request req3;
req3.push("PING");
req3.push("QUIT");
co_await conn->async_exec(req3, adapt(), net::redirect_error(net::use_awaitable, ec1));
BOOST_TEST(!ec1);
}
BOOST_AUTO_TEST_CASE(ignore_cancel_of_written_req)
{
std::cout << boost::unit_test::framework::current_test_case().p_name << std::endl;
auto const endpoints = resolve();
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
net::co_spawn(ioc.get_executor(), async_run(conn), net::detached);
net::connect(conn->next_layer(), endpoints);
error_code expected = net::error::operation_aborted;
net::co_spawn(ioc.get_executor(), async_run(conn, expected), net::detached);
net::co_spawn(ioc.get_executor(), async_ignore_cancel_of_written_req(conn), net::detached);
ioc.run();
}

View File

@@ -6,6 +6,7 @@
#include <iostream>
#include <boost/asio.hpp>
#ifdef BOOST_ASIO_HAS_CO_AWAIT
#include <boost/system/errc.hpp>
#include <boost/asio/experimental/as_tuple.hpp>
@@ -14,94 +15,84 @@
#include <aedis.hpp>
#include <aedis/src.hpp>
#include "common.hpp"
namespace net = boost::asio;
using aedis::resp3::request;
using aedis::adapt;
using aedis::endpoint;
using aedis::operation;
using connection = aedis::connection<>;
using connection = aedis::connection;
using error_code = boost::system::error_code;
using net::experimental::as_tuple;
BOOST_AUTO_TEST_CASE(push_filtered_out)
{
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
auto const endpoints = resolve();
connection conn{ioc};
net::connect(conn.next_layer(), endpoints);
request req;
req.push("HELLO", 3);
req.push("PING");
req.push("SUBSCRIBE", "channel");
req.push("QUIT");
std::tuple<std::string, std::string> resp;
conn->async_exec(req, adapt(resp), [](auto ec, auto){
std::tuple<aedis::ignore, std::string, std::string> resp;
conn.async_exec(req, adapt(resp), [](auto ec, auto){
BOOST_TEST(!ec);
});
conn->async_receive(adapt(), [](auto ec, auto){
conn.async_receive(adapt(), [](auto ec, auto){
BOOST_TEST(!ec);
});
conn->async_run({"127.0.0.1", "6379"}, {}, [conn](auto ec){
conn.async_run([](auto ec){
std::cout << "===> " << ec.message() << std::endl;
BOOST_TEST(!ec);
});
ioc.run();
BOOST_CHECK_EQUAL(std::get<0>(resp), "PONG");
BOOST_CHECK_EQUAL(std::get<1>(resp), "OK");
BOOST_CHECK_EQUAL(std::get<1>(resp), "PONG");
BOOST_CHECK_EQUAL(std::get<2>(resp), "OK");
}
// Checks whether we get idle timeout when no push reader is set.
void test_missing_push_reader1(bool coalesce)
void receive_wrong_syntax(request const& req)
{
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
auto const endpoints = resolve();
connection conn{ioc};
net::connect(conn.next_layer(), endpoints);
request req{{false, coalesce}};
req.get_config().cancel_on_connection_lost = true;
req.push("SUBSCRIBE", "channel");
conn->async_exec(req, adapt(), [](auto ec, auto){
conn.async_exec(req, adapt(), [](auto ec, auto){
BOOST_TEST(!ec);
});
conn->async_run({"127.0.0.1", "6379"}, {}, [conn](auto ec){
BOOST_CHECK_EQUAL(ec, aedis::error::idle_timeout);
conn.async_run([](auto ec){
BOOST_CHECK_EQUAL(ec, boost::asio::error::basic_errors::operation_aborted);
});
ioc.run();
}
void test_missing_push_reader2(request const& req)
{
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
conn->async_exec(req, adapt(), [](auto ec, auto){
conn.async_receive(adapt(), [&](auto ec, auto){
BOOST_TEST(!ec);
});
conn->async_run({"127.0.0.1", "6379"}, {}, [](auto ec){
BOOST_CHECK_EQUAL(ec, aedis::error::idle_timeout);
conn.cancel(aedis::operation::run);
});
ioc.run();
}
#ifdef BOOST_ASIO_HAS_CO_AWAIT
net::awaitable<void> push_consumer1(std::shared_ptr<connection> conn, bool& push_received)
net::awaitable<void> push_consumer1(connection& conn, bool& push_received)
{
{
auto [ec, ev] = co_await conn->async_receive(adapt(), as_tuple(net::use_awaitable));
auto [ec, ev] = co_await conn.async_receive(adapt(), as_tuple(net::use_awaitable));
BOOST_TEST(!ec);
}
{
auto [ec, ev] = co_await conn->async_receive(adapt(), as_tuple(net::use_awaitable));
BOOST_CHECK_EQUAL(ec, boost::asio::experimental::channel_errc::channel_cancelled);
auto [ec, ev] = co_await conn.async_receive(adapt(), as_tuple(net::use_awaitable));
BOOST_CHECK_EQUAL(ec, net::experimental::channel_errc::channel_cancelled);
}
push_received = true;
@@ -127,22 +118,25 @@ struct adapter_error {
BOOST_AUTO_TEST_CASE(test_push_adapter)
{
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
auto const endpoints = resolve();
connection conn{ioc};
net::connect(conn.next_layer(), endpoints);
request req;
req.push("HELLO", 3);
req.push("PING");
req.push("SUBSCRIBE", "channel");
req.push("PING");
conn->async_receive(adapter_error{}, [](auto ec, auto) {
conn.async_receive(adapter_error{}, [](auto ec, auto) {
BOOST_CHECK_EQUAL(ec, aedis::error::incompatible_size);
});
conn->async_exec(req, adapt(), [](auto ec, auto){
BOOST_CHECK_EQUAL(ec, boost::asio::experimental::error::channel_errors::channel_cancelled);
conn.async_exec(req, adapt(), [](auto ec, auto){
BOOST_CHECK_EQUAL(ec, net::experimental::error::channel_errors::channel_cancelled);
});
conn->async_run({"127.0.0.1", "6379"}, {}, [](auto ec){
conn.async_run([](auto ec){
BOOST_CHECK_EQUAL(ec, boost::system::errc::errc_t::operation_canceled);
});
@@ -155,19 +149,23 @@ BOOST_AUTO_TEST_CASE(test_push_adapter)
void test_push_is_received1(bool coalesce)
{
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
auto const endpoints = resolve();
connection conn{ioc};
net::connect(conn.next_layer(), endpoints);
request req{{false, coalesce}};
req.push("HELLO", 3);
req.push("SUBSCRIBE", "channel");
req.push("QUIT");
conn->async_exec(req, adapt(), [](auto ec, auto){
conn.async_exec(req, adapt(), [](auto ec, auto){
BOOST_TEST(!ec);
});
conn->async_run({"127.0.0.1", "6379"}, {}, [conn](auto ec){
conn.async_run([&](auto ec){
BOOST_TEST(!ec);
conn->cancel(operation::receive);
conn.cancel(operation::receive);
});
bool push_received = false;
@@ -184,6 +182,7 @@ void test_push_is_received1(bool coalesce)
void test_push_is_received2(bool coalesce)
{
request req1{{false, coalesce}};
req1.push("HELLO", 3);
req1.push("PING", "Message1");
request req2{{false, coalesce}};
@@ -195,21 +194,23 @@ void test_push_is_received2(bool coalesce)
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
auto const endpoints = resolve();
connection conn{ioc};
net::connect(conn.next_layer(), endpoints);
auto handler =[](auto ec, auto...)
{
BOOST_TEST(!ec);
};
conn->async_exec(req1, adapt(), handler);
conn->async_exec(req2, adapt(), handler);
conn->async_exec(req3, adapt(), handler);
conn.async_exec(req1, adapt(), handler);
conn.async_exec(req2, adapt(), handler);
conn.async_exec(req3, adapt(), handler);
endpoint ep{"127.0.0.1", "6379"};
conn->async_run(ep, {}, [conn](auto ec) {
conn.async_run([&](auto ec) {
BOOST_TEST(!ec);
conn->cancel(operation::receive);
conn.cancel(operation::receive);
});
bool push_received = false;
@@ -223,10 +224,10 @@ void test_push_is_received2(bool coalesce)
BOOST_TEST(push_received);
}
net::awaitable<void> push_consumer3(std::shared_ptr<connection> conn)
net::awaitable<void> push_consumer3(connection& conn)
{
for (;;)
co_await conn->async_receive(adapt(), net::use_awaitable);
co_await conn.async_receive(adapt(), net::use_awaitable);
}
// Test many subscribe requests.
@@ -250,24 +251,26 @@ void test_push_many_subscribes(bool coalesce)
};
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
conn->async_exec(req0, adapt(), handler);
conn->async_exec(req1, adapt(), handler);
conn->async_exec(req2, adapt(), handler);
conn->async_exec(req2, adapt(), handler);
conn->async_exec(req1, adapt(), handler);
conn->async_exec(req2, adapt(), handler);
conn->async_exec(req1, adapt(), handler);
conn->async_exec(req2, adapt(), handler);
conn->async_exec(req2, adapt(), handler);
conn->async_exec(req1, adapt(), handler);
conn->async_exec(req2, adapt(), handler);
conn->async_exec(req3, adapt(), handler);
auto const endpoints = resolve();
connection conn{ioc};
net::connect(conn.next_layer(), endpoints);
endpoint ep{"127.0.0.1", "6379"};
conn->async_run(ep, {}, [conn](auto ec) {
conn.async_exec(req0, adapt(), handler);
conn.async_exec(req1, adapt(), handler);
conn.async_exec(req2, adapt(), handler);
conn.async_exec(req2, adapt(), handler);
conn.async_exec(req1, adapt(), handler);
conn.async_exec(req2, adapt(), handler);
conn.async_exec(req1, adapt(), handler);
conn.async_exec(req2, adapt(), handler);
conn.async_exec(req2, adapt(), handler);
conn.async_exec(req1, adapt(), handler);
conn.async_exec(req2, adapt(), handler);
conn.async_exec(req3, adapt(), handler);
conn.async_run([&](auto ec) {
BOOST_TEST(!ec);
conn->cancel(operation::receive);
conn.cancel(operation::receive);
});
net::co_spawn(ioc.get_executor(), push_consumer3(conn), net::detached);
@@ -293,38 +296,33 @@ BOOST_AUTO_TEST_CASE(many_subscribers)
}
#endif
BOOST_AUTO_TEST_CASE(missing_reader1_coalesce)
{
test_missing_push_reader1(true);
}
BOOST_AUTO_TEST_CASE(missing_reader1_no_coalesce)
{
test_missing_push_reader1(false);
}
BOOST_AUTO_TEST_CASE(missing_reader2a)
BOOST_AUTO_TEST_CASE(receive_wrong_syntax1)
{
request req1{{false}};
req1.push("HELLO", 3);
req1.push("PING", "Message");
req1.push("SUBSCRIBE"); // Wrong command synthax.
req1.get_config().coalesce = true;
test_missing_push_reader2(req1);
receive_wrong_syntax(req1);
req1.get_config().coalesce = false;
test_missing_push_reader2(req1);
receive_wrong_syntax(req1);
}
BOOST_AUTO_TEST_CASE(missing_reader2b)
BOOST_AUTO_TEST_CASE(receice_wrong_syntay2)
{
request req2{{false}};
req2.push("HELLO", 3);
req2.push("SUBSCRIBE"); // Wrong command syntax.
req2.get_config().coalesce = true;
test_missing_push_reader2(req2);
receive_wrong_syntax(req2);
req2.get_config().coalesce = false;
test_missing_push_reader2(req2);
receive_wrong_syntax(req2);
}
#else
int main() {}
#endif

View File

@@ -13,13 +13,13 @@
#include <aedis.hpp>
#include <aedis/src.hpp>
#include "common.hpp"
namespace net = boost::asio;
using aedis::adapt;
using aedis::endpoint;
using aedis::resp3::request;
using connection = aedis::connection<>;
using connection = aedis::connection;
using error_code = boost::system::error_code;
using operation = aedis::operation;
@@ -27,34 +27,37 @@ using operation = aedis::operation;
BOOST_AUTO_TEST_CASE(test_quit_no_coalesce)
{
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
auto const endpoints = resolve();
connection conn{ioc};
net::connect(conn.next_layer(), endpoints);
request req1{{false, false}};
req1.push("HELLO", 3);
req1.push("PING");
request req2{{false, false}};
req2.push("QUIT");
conn->async_exec(req1, adapt(), [](auto ec, auto){
conn.async_exec(req1, adapt(), [](auto ec, auto){
BOOST_TEST(!ec);
});
conn->async_exec(req2, adapt(), [](auto ec, auto) {
conn.async_exec(req2, adapt(), [](auto ec, auto) {
BOOST_TEST(!ec);
});
conn->async_exec(req1, adapt(), [](auto ec, auto){
conn.async_exec(req1, adapt(), [](auto ec, auto){
BOOST_CHECK_EQUAL(ec, boost::system::errc::errc_t::operation_canceled);
});
conn->async_exec(req1, adapt(), [](auto ec, auto){
conn.async_exec(req1, adapt(), [](auto ec, auto){
BOOST_CHECK_EQUAL(ec, boost::system::errc::errc_t::operation_canceled);
});
conn->async_exec(req1, adapt(), [](auto ec, auto){
conn.async_exec(req1, adapt(), [](auto ec, auto){
BOOST_CHECK_EQUAL(ec, boost::system::errc::errc_t::operation_canceled);
});
endpoint ep{"127.0.0.1", "6379"};
conn->async_run(ep, {}, [conn](auto ec){
conn.async_run([&](auto ec){
BOOST_TEST(!ec);
conn->cancel(operation::exec);
conn.cancel(operation::exec);
});
ioc.run();
@@ -63,15 +66,20 @@ BOOST_AUTO_TEST_CASE(test_quit_no_coalesce)
void test_quit2(bool coalesce)
{
request req{{false, coalesce}};
req.push("HELLO", 3);
req.push("QUIT");
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
conn->async_exec(req, adapt(), [](auto ec, auto) {
auto const endpoints = resolve();
connection conn{ioc};
net::connect(conn.next_layer(), endpoints);
conn.async_exec(req, adapt(), [](auto ec, auto) {
BOOST_TEST(!ec);
});
conn->async_run({"127.0.0.1", "6379"}, {}, [](auto ec) {
conn.async_run([](auto ec) {
BOOST_TEST(!ec);
});

View File

@@ -13,44 +13,46 @@
#include <aedis.hpp>
#include <aedis/src.hpp>
#include "common.hpp"
namespace net = boost::asio;
using aedis::adapt;
using aedis::endpoint;
using aedis::resp3::request;
using connection = aedis::connection<>;
using connection = aedis::connection;
using error_code = boost::system::error_code;
using operation = aedis::operation;
BOOST_AUTO_TEST_CASE(test_quit_coalesce)
{
net::io_context ioc;
auto db = std::make_shared<connection>(ioc);
auto const endpoints = resolve();
connection conn{ioc};
net::connect(conn.next_layer(), endpoints);
request req1{{false, true}};
req1.push("HELLO", 3);
req1.push("PING");
request req2{{false, true}};
req2.push("QUIT");
db->async_exec(req1, adapt(), [](auto ec, auto){
conn.async_exec(req1, adapt(), [](auto ec, auto){
BOOST_TEST(!ec);
});
db->async_exec(req2, adapt(), [](auto ec, auto){
conn.async_exec(req2, adapt(), [](auto ec, auto){
BOOST_TEST(!ec);
});
db->async_exec(req1, adapt(), [](auto ec, auto){
conn.async_exec(req1, adapt(), [](auto ec, auto){
BOOST_CHECK_EQUAL(ec, net::error::misc_errors::eof);
});
db->async_exec(req1, adapt(), [](auto ec, auto){
conn.async_exec(req1, adapt(), [](auto ec, auto){
BOOST_CHECK_EQUAL(ec, boost::system::errc::errc_t::operation_canceled);
});
endpoint ep{"127.0.0.1", "6379"};
db->async_run(ep, {}, [db](auto ec){
conn.async_run([&](auto ec){
BOOST_CHECK_EQUAL(ec, boost::system::errc::errc_t::operation_canceled);
db->cancel(operation::exec);
conn.cancel(operation::exec);
});
ioc.run();

View File

@@ -13,35 +13,40 @@
#include <aedis.hpp>
#include <aedis/src.hpp>
#include "common.hpp"
namespace net = boost::asio;
using aedis::resp3::request;
using aedis::adapt;
using connection = aedis::connection<>;
using endpoint = aedis::endpoint;
using connection = aedis::connection;
using error_code = boost::system::error_code;
#include <boost/asio/experimental/awaitable_operators.hpp>
using namespace boost::asio::experimental::awaitable_operators;
net::awaitable<void> test_reconnect_impl(std::shared_ptr<connection> db)
net::awaitable<void> test_reconnect_impl()
{
auto ex = co_await net::this_coro::executor;
request req;
req.push("QUIT");
auto const endpoints = resolve();
connection conn{ex};
int i = 0;
endpoint ep{"127.0.0.1", "6379"};
for (; i < 5; ++i) {
boost::system::error_code ec1, ec2;
net::connect(conn.next_layer(), endpoints);
co_await (
db->async_exec(req, adapt(), net::redirect_error(net::use_awaitable, ec1)) &&
db->async_run(ep, {}, net::redirect_error(net::use_awaitable, ec2))
conn.async_exec(req, adapt(), net::redirect_error(net::use_awaitable, ec1)) &&
conn.async_run(net::redirect_error(net::use_awaitable, ec2))
);
BOOST_TEST(!ec1);
BOOST_TEST(!ec2);
db->reset_stream();
conn.reset_stream();
}
BOOST_CHECK_EQUAL(i, 5);
@@ -51,44 +56,56 @@ net::awaitable<void> test_reconnect_impl(std::shared_ptr<connection> db)
// Test whether the client works after a reconnect.
BOOST_AUTO_TEST_CASE(test_reconnect)
{
std::cout << boost::unit_test::framework::current_test_case().p_name << std::endl;
net::io_context ioc;
auto db = std::make_shared<connection>(ioc);
net::co_spawn(ioc, test_reconnect_impl(db), net::detached);
net::co_spawn(ioc, test_reconnect_impl(), net::detached);
ioc.run();
}
auto async_test_reconnect_timeout() -> net::awaitable<void>
{
auto conn = std::make_shared<connection>(co_await net::this_coro::executor);
endpoint ep{"127.0.0.1", "6379"};
boost::system::error_code ec1, ec2;
auto ex = co_await net::this_coro::executor;
net::steady_timer st{ex};
auto conn = std::make_shared<connection>(ex);
auto const endpoints = resolve();
boost::system::error_code ec1, ec2, ec3;
request req1;
req1.get_config().cancel_if_not_connected = false;
req1.get_config().cancel_on_connection_lost = true;
req1.push("HELLO", 3);
req1.push("CLIENT", "PAUSE", 7000);
net::connect(conn->next_layer(), endpoints);
st.expires_after(std::chrono::seconds{1});
co_await (
conn->async_exec(req1, adapt(), net::redirect_error(net::use_awaitable, ec1)) &&
conn->async_run(ep, {}, net::redirect_error(net::use_awaitable, ec2))
conn->async_exec(req1, adapt(), net::redirect_error(net::use_awaitable, ec1)) ||
conn->async_run(net::redirect_error(net::use_awaitable, ec2)) ||
st.async_wait(net::redirect_error(net::use_awaitable, ec3))
);
BOOST_TEST(!ec1);
BOOST_CHECK_EQUAL(ec2, aedis::error::idle_timeout);
BOOST_CHECK_EQUAL(ec2, boost::system::errc::errc_t::operation_canceled);
//BOOST_TEST(!ec3);
request req2;
req2.get_config().cancel_if_not_connected = false;
req2.get_config().cancel_on_connection_lost = true;
req2.push("HELLO", 3);
req2.push("QUIT");
net::connect(conn->next_layer(), endpoints);
st.expires_after(std::chrono::seconds{1});
co_await (
conn->async_exec(req1, adapt(), net::redirect_error(net::use_awaitable, ec1)) &&
conn->async_run(ep, {}, net::redirect_error(net::use_awaitable, ec2))
conn->async_exec(req1, adapt(), net::redirect_error(net::use_awaitable, ec1)) ||
conn->async_run(net::redirect_error(net::use_awaitable, ec2)) ||
st.async_wait(net::redirect_error(net::use_awaitable, ec3))
);
BOOST_CHECK_EQUAL(ec1, boost::system::errc::errc_t::operation_canceled);
BOOST_CHECK_EQUAL(ec2, aedis::error::exec_timeout);
BOOST_CHECK_EQUAL(ec2, boost::asio::error::basic_errors::operation_aborted);
}
BOOST_AUTO_TEST_CASE(test_reconnect_and_idle)

View File

@@ -15,14 +15,14 @@
#include <aedis.hpp>
#include <aedis/src.hpp>
#include "common.hpp"
namespace net = boost::asio;
using aedis::resp3::request;
using aedis::operation;
using aedis::adapt;
using connection = aedis::connection<>;
using endpoint = aedis::endpoint;
using connection = aedis::connection;
using error_code = boost::system::error_code;
using net::experimental::as_tuple;
@@ -32,14 +32,16 @@ using namespace net::experimental::awaitable_operators;
auto async_cancel_run_with_timer() -> net::awaitable<void>
{
auto ex = co_await net::this_coro::executor;
auto conn = std::make_shared<connection>(ex);
auto const endpoints = resolve();
connection conn{ex};
net::connect(conn.next_layer(), endpoints);
net::steady_timer st{ex};
st.expires_after(std::chrono::seconds{1});
endpoint ep{"127.0.0.1", "6379"};
boost::system::error_code ec1, ec2;
co_await (
conn->async_run(ep, {}, net::redirect_error(net::use_awaitable, ec1)) ||
conn.async_run(net::redirect_error(net::use_awaitable, ec1)) ||
st.async_wait(net::redirect_error(net::use_awaitable, ec2))
);
@@ -54,27 +56,21 @@ BOOST_AUTO_TEST_CASE(cancel_run_with_timer)
ioc.run();
}
net::awaitable<void>
async_check_cancellation_not_missed(
std::shared_ptr<connection> conn,
int n,
std::chrono::milliseconds ms)
auto
async_check_cancellation_not_missed(int n, std::chrono::milliseconds ms) -> net::awaitable<void>
{
net::steady_timer timer{co_await net::this_coro::executor};
auto ex = co_await net::this_coro::executor;
auto const endpoints = resolve();
connection conn{ex};
connection::timeouts tms;
tms.resolve_timeout = std::chrono::seconds{10};
tms.connect_timeout = std::chrono::seconds{10};
tms.resp3_handshake_timeout = std::chrono::seconds{2};
tms.ping_interval = std::chrono::seconds{1};
endpoint ep{"127.0.0.1", "6379"};
net::steady_timer timer{ex};
for (auto i = 0; i < n; ++i) {
timer.expires_after(ms);
net::connect(conn.next_layer(), endpoints);
boost::system::error_code ec1, ec2;
co_await (
conn->async_run(ep, {}, net::redirect_error(net::use_awaitable, ec1)) ||
conn.async_run(net::redirect_error(net::use_awaitable, ec1)) ||
timer.async_wait(net::redirect_error(net::use_awaitable, ec2))
);
BOOST_CHECK_EQUAL(ec1, boost::asio::error::basic_errors::operation_aborted);
@@ -86,98 +82,92 @@ async_check_cancellation_not_missed(
BOOST_AUTO_TEST_CASE(check_implicit_cancel_not_missed_0)
{
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
net::co_spawn(ioc, async_check_cancellation_not_missed(conn, 10, std::chrono::milliseconds{0}), net::detached);
net::co_spawn(ioc, async_check_cancellation_not_missed(10, std::chrono::milliseconds{0}), net::detached);
ioc.run();
}
BOOST_AUTO_TEST_CASE(check_implicit_cancel_not_missed_2)
{
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
net::co_spawn(ioc, async_check_cancellation_not_missed(conn, 20, std::chrono::milliseconds{2}), net::detached);
net::co_spawn(ioc, async_check_cancellation_not_missed(20, std::chrono::milliseconds{2}), net::detached);
ioc.run();
}
BOOST_AUTO_TEST_CASE(check_implicit_cancel_not_missed_8)
{
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
net::co_spawn(ioc, async_check_cancellation_not_missed(conn, 20, std::chrono::milliseconds{8}), net::detached);
net::co_spawn(ioc, async_check_cancellation_not_missed(20, std::chrono::milliseconds{8}), net::detached);
ioc.run();
}
BOOST_AUTO_TEST_CASE(check_implicit_cancel_not_missed_16)
{
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
net::co_spawn(ioc, async_check_cancellation_not_missed(conn, 20, std::chrono::milliseconds{16}), net::detached);
net::co_spawn(ioc, async_check_cancellation_not_missed(20, std::chrono::milliseconds{16}), net::detached);
ioc.run();
}
BOOST_AUTO_TEST_CASE(check_implicit_cancel_not_missed_32)
{
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
net::co_spawn(ioc, async_check_cancellation_not_missed(conn, 20, std::chrono::milliseconds{32}), net::detached);
net::co_spawn(ioc, async_check_cancellation_not_missed(20, std::chrono::milliseconds{32}), net::detached);
ioc.run();
}
BOOST_AUTO_TEST_CASE(check_implicit_cancel_not_missed_64)
{
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
net::co_spawn(ioc, async_check_cancellation_not_missed(conn, 20, std::chrono::milliseconds{64}), net::detached);
net::co_spawn(ioc, async_check_cancellation_not_missed(20, std::chrono::milliseconds{64}), net::detached);
ioc.run();
}
BOOST_AUTO_TEST_CASE(check_implicit_cancel_not_missed_128)
{
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
net::co_spawn(ioc, async_check_cancellation_not_missed(conn, 20, std::chrono::milliseconds{128}), net::detached);
net::co_spawn(ioc, async_check_cancellation_not_missed(20, std::chrono::milliseconds{128}), net::detached);
ioc.run();
}
BOOST_AUTO_TEST_CASE(check_implicit_cancel_not_missed_256)
{
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
net::co_spawn(ioc, async_check_cancellation_not_missed(conn, 20, std::chrono::milliseconds{256}), net::detached);
net::co_spawn(ioc, async_check_cancellation_not_missed(20, std::chrono::milliseconds{256}), net::detached);
ioc.run();
}
BOOST_AUTO_TEST_CASE(check_implicit_cancel_not_missed_512)
{
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
net::co_spawn(ioc, async_check_cancellation_not_missed(conn, 20, std::chrono::milliseconds{512}), net::detached);
net::co_spawn(ioc, async_check_cancellation_not_missed(20, std::chrono::milliseconds{512}), net::detached);
ioc.run();
}
BOOST_AUTO_TEST_CASE(check_implicit_cancel_not_missed_1024)
{
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
net::co_spawn(ioc, async_check_cancellation_not_missed(conn, 20, std::chrono::milliseconds{1024}), net::detached);
net::co_spawn(ioc, async_check_cancellation_not_missed(20, std::chrono::milliseconds{1024}), net::detached);
ioc.run();
}
BOOST_AUTO_TEST_CASE(reset_before_run_completes)
{
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
auto const endpoints = resolve();
connection conn{ioc};
net::connect(conn.next_layer(), endpoints);
// Sends a ping just as a means of waiting until we are connected.
request req;
req.push("HELLO", 3);
req.push("PING");
conn->async_exec(req, adapt(), [conn](auto ec, auto){
conn.async_exec(req, adapt(), [&](auto ec, auto){
BOOST_TEST(!ec);
conn->reset_stream();
conn.reset_stream();
});
conn->async_run({"127.0.0.1", "6379"}, {}, [conn](auto ec){
conn.async_run([&](auto ec){
BOOST_CHECK_EQUAL(ec, net::error::operation_aborted);
});

View File

@@ -14,12 +14,18 @@
#include <aedis.hpp>
#include <aedis/ssl/connection.hpp>
#include <aedis/src.hpp>
#include "common.hpp"
namespace net = boost::asio;
using aedis::adapt;
using connection = aedis::ssl::connection<net::ssl::stream<net::ip::tcp::socket>>;
using endpoint = aedis::endpoint;
using aedis::resp3::request;
using connection = aedis::ssl::connection;
struct endpoint {
std::string host;
std::string port;
};
bool verify_certificate(bool, net::ssl::verify_context&)
{
@@ -27,54 +33,40 @@ bool verify_certificate(bool, net::ssl::verify_context&)
return true;
}
boost::system::error_code hello_fail(endpoint ep)
BOOST_AUTO_TEST_CASE(ping)
{
net::io_context ioc;
std::string const in = "Kabuf";
request req;
req.get_config().cancel_on_connection_lost = true;
req.push("HELLO", 3, "AUTH", "aedis", "aedis");
req.push("PING", in);
req.push("QUIT");
std::string out;
auto resp = std::tie(std::ignore, out, std::ignore);
auto const endpoints = resolve("db.occase.de", "6380");
net::io_context ioc;
net::ssl::context ctx{net::ssl::context::sslv23};
auto conn = std::make_shared<connection>(ioc.get_executor(), ctx);
conn->next_layer().set_verify_mode(net::ssl::verify_peer);
conn->next_layer().set_verify_callback(verify_certificate);
boost::system::error_code ret;
conn->async_run(ep, {}, [&](auto ec) {
ret = ec;
connection conn{ioc, ctx};
conn.next_layer().set_verify_mode(net::ssl::verify_peer);
conn.next_layer().set_verify_callback(verify_certificate);
net::connect(conn.lowest_layer(), endpoints);
conn.next_layer().handshake(net::ssl::stream_base::client);
conn.async_exec(req, adapt(resp), [](auto ec, auto) {
BOOST_TEST(!ec);
});
conn.async_run([](auto ec) {
BOOST_TEST(!ec);
});
ioc.run();
return ret;
}
BOOST_AUTO_TEST_CASE(test_tls_handshake_fail)
{
std::cout << boost::unit_test::framework::current_test_case().p_name << std::endl;
endpoint ep;
ep.host = "google.com";
ep.port = "80";
auto const ec = hello_fail(ep);
BOOST_TEST(!!ec);
std::cout << "-----> " << ec.message() << std::endl;
}
BOOST_AUTO_TEST_CASE(test_tls_handshake_fail2)
{
std::cout << boost::unit_test::framework::current_test_case().p_name << std::endl;
endpoint ep;
ep.host = "127.0.0.1";
ep.port = "6379";
auto const ec = hello_fail(ep);
BOOST_CHECK_EQUAL(ec, aedis::error::ssl_handshake_timeout);
}
BOOST_AUTO_TEST_CASE(test_hello_fail)
{
std::cout << boost::unit_test::framework::current_test_case().p_name << std::endl;
endpoint ep;
ep.host = "google.com";
ep.port = "443";
auto const ec = hello_fail(ep);
BOOST_CHECK_EQUAL(ec, aedis::error::invalid_data_type);
BOOST_CHECK_EQUAL(in, out);
}

View File

@@ -119,8 +119,6 @@ void test_async(net::any_io_executor ex, expect<Result> e)
std::optional<int> op_int_ok = 11;
std::optional<bool> op_bool_ok = true;
std::string const streamed_string_wire = "$?\r\n;4\r\nHell\r\n;5\r\no wor\r\n;1\r\nd\r\n;0\r\n";
std::string const streamed_string_wire_error = "$?\r\n;b\r\nHell\r\n;5\r\no wor\r\n;1\r\nd\r\n;0\r\n";
// TODO: Test a streamed string that is not finished with a string of
// size 0 but other command comes in.
std::vector<node_type> streamed_string_e1
@@ -132,40 +130,68 @@ std::vector<node_type> streamed_string_e1
std::vector<node_type> streamed_string_e2 { {resp3::type::streamed_string_part, 1UL, 1UL, {}} };
#define S01 "#11\r\n"
#define S02 "#f\r\n"
#define S03 "#t\r\n"
#define S04 "$?\r\n;0\r\n"
#define S05 "%11\r\n"
#define S06 "$?\r\n;4\r\nHell\r\n;5\r\no wor\r\n;1\r\nd\r\n;0\r\n"
#define S07 "$?\r\n;b\r\nHell\r\n;5\r\no wor\r\n;1\r\nd\r\n;0\r\n"
#define S08 "*1\r\n:11\r\n"
#define S09 ":-3\r\n"
#define S10 ":11\r\n"
#define S11 ":3\r\n"
#define S12 "_\r\n"
#define S13 ">4\r\n+pubsub\r\n+message\r\n+some-channel\r\n+some message\r\n"
#define S14 ">0\r\n"
#define S15 "*3\r\n$2\r\n11\r\n$2\r\n22\r\n$1\r\n3\r\n"
#define S16 "%4\r\n$4\r\nkey1\r\n$6\r\nvalue1\r\n$4\r\nkey2\r\n$6\r\nvalue2\r\n$4\r\nkey3\r\n$6\r\nvalue3\r\n$4\r\nkey3\r\n$6\r\nvalue3\r\n"
#define S17 "*1\r\n" S16
#define S18 "|1\r\n+key-popularity\r\n%2\r\n$1\r\na\r\n,0.1923\r\n$1\r\nb\r\n,0.0012\r\n"
#define S19 "|0\r\n"
#define S20 "*3\r\n$2\r\n11\r\n$2\r\n22\r\n$1\r\n3\r\n"
#define S21 "*1\r\n*1\r\n$2\r\nab\r\n"
#define S22 "*1\r\n*1\r\n*1\r\n*1\r\n*1\r\n*1\r\na\r\n"
#define S23 "*0\r\n"
#define S24 "*3\r\n$2\r\n11\r\n$2\r\n22\r\n$1\r\n3\r\n"
#define S25 "~6\r\n+orange\r\n+apple\r\n+one\r\n+two\r\n+three\r\n+orange\r\n"
#define S26 "*1\r\n" S25
#define S27 "~0\r\n"
#define S28 "-Error\r\n"
#define S29 "-\r\n"
#define NUMBER_TEST_CONDITIONS(test) \
test(ex, make_expected("#11\r\n", std::optional<bool>{}, "bool.error", aedis::error::unexpected_bool_value)); \
test(ex, make_expected("#f\r\n", bool{false}, "bool.bool (true)")); \
test(ex, make_expected("#f\r\n", node_type{resp3::type::boolean, 1UL, 0UL, {"f"}}, "bool.node (false)")); \
test(ex, make_expected("#t\r\n", bool{true}, "bool.bool (true)")); \
test(ex, make_expected("#t\r\n", node_type{resp3::type::boolean, 1UL, 0UL, {"t"}}, "bool.node (true)")); \
test(ex, make_expected("#t\r\n", op_bool_ok, "optional.int")); \
test(ex, make_expected("#t\r\n", std::map<int, int>{}, "bool.error", aedis::error::expects_resp3_map)); \
test(ex, make_expected("#t\r\n", std::set<int>{}, "bool.error", aedis::error::expects_resp3_set)); \
test(ex, make_expected("#t\r\n", std::unordered_map<int, int>{}, "bool.error", aedis::error::expects_resp3_map)); \
test(ex, make_expected("#t\r\n", std::unordered_set<int>{}, "bool.error", aedis::error::expects_resp3_set)); \
test(ex, make_expected("$?\r\n;0\r\n", streamed_string_e2, "streamed_string.node.empty")); \
test(ex, make_expected("%11\r\n", std::optional<int>{}, "number.optional.int.error", aedis::error::expects_resp3_simple_type));; \
test(ex, make_expected("*1\r\n:11\r\n", std::tuple<int>{11}, "number.tuple.int")); \
test(ex, make_expected(":-3\r\n", node_type{resp3::type::number, 1UL, 0UL, {"-3"}}, "number.node (negative)")); \
test(ex, make_expected(":11\r\n", int{11}, "number.int")); \
test(ex, make_expected(":11\r\n", op_int_ok, "number.optional.int")); \
test(ex, make_expected(":11\r\n", std::list<std::string>{}, "number.optional.int", aedis::error::expects_resp3_aggregate)); \
test(ex, make_expected(":11\r\n", std::map<std::string, std::string>{}, "number.optional.int", aedis::error::expects_resp3_map)); \
test(ex, make_expected(":11\r\n", std::set<std::string>{}, "number.optional.int", aedis::error::expects_resp3_set)); \
test(ex, make_expected(":11\r\n", std::unordered_map<std::string, std::string>{}, "number.optional.int", aedis::error::expects_resp3_map)); \
test(ex, make_expected(":11\r\n", std::unordered_set<std::string>{}, "number.optional.int", aedis::error::expects_resp3_set)); \
test(ex, make_expected(":3\r\n", node_type{resp3::type::number, 1UL, 0UL, {"3"}}, "number.node (positive)")); \
test(ex, make_expected("_\r\n", int{0}, "number.int.error.null", aedis::error::resp3_null)); \
test(ex, make_expected(streamed_string_wire, std::string{"Hello word"}, "streamed_string.string")); \
test(ex, make_expected(streamed_string_wire, int{}, "streamed_string.string", aedis::error::not_a_number)); \
test(ex, make_expected(streamed_string_wire, streamed_string_e1, "streamed_string.node")); \
test(ex, make_expected(streamed_string_wire_error, std::string{}, "streamed_string.error", aedis::error::not_a_number)); \
test(ex, make_expected(S01, std::optional<bool>{}, "bool.error", aedis::error::unexpected_bool_value)); \
test(ex, make_expected(S02, bool{false}, "bool.bool (true)")); \
test(ex, make_expected(S02, node_type{resp3::type::boolean, 1UL, 0UL, {"f"}}, "bool.node (false)")); \
test(ex, make_expected(S03, bool{true}, "bool.bool (true)")); \
test(ex, make_expected(S03, node_type{resp3::type::boolean, 1UL, 0UL, {"t"}}, "bool.node (true)")); \
test(ex, make_expected(S03, op_bool_ok, "optional.int")); \
test(ex, make_expected(S03, std::map<int, int>{}, "bool.error", aedis::error::expects_resp3_map)); \
test(ex, make_expected(S03, std::set<int>{}, "bool.error", aedis::error::expects_resp3_set)); \
test(ex, make_expected(S03, std::unordered_map<int, int>{}, "bool.error", aedis::error::expects_resp3_map)); \
test(ex, make_expected(S03, std::unordered_set<int>{}, "bool.error", aedis::error::expects_resp3_set)); \
test(ex, make_expected(S04, streamed_string_e2, "streamed_string.node.empty")); \
test(ex, make_expected(S05, std::optional<int>{}, "number.optional.int.error", aedis::error::expects_resp3_simple_type));; \
test(ex, make_expected(S06, int{}, "streamed_string.string", aedis::error::not_a_number)); \
test(ex, make_expected(S06, std::string{"Hello word"}, "streamed_string.string")); \
test(ex, make_expected(S06, streamed_string_e1, "streamed_string.node")); \
test(ex, make_expected(S07, std::string{}, "streamed_string.error", aedis::error::not_a_number)); \
test(ex, make_expected(S08, std::tuple<int>{11}, "number.tuple.int")); \
test(ex, make_expected(S09, node_type{resp3::type::number, 1UL, 0UL, {"-3"}}, "number.node (negative)")); \
test(ex, make_expected(S10, int{11}, "number.int")); \
test(ex, make_expected(S10, op_int_ok, "number.optional.int")); \
test(ex, make_expected(S10, std::list<std::string>{}, "number.optional.int", aedis::error::expects_resp3_aggregate)); \
test(ex, make_expected(S10, std::map<std::string, std::string>{}, "number.optional.int", aedis::error::expects_resp3_map)); \
test(ex, make_expected(S10, std::set<std::string>{}, "number.optional.int", aedis::error::expects_resp3_set)); \
test(ex, make_expected(S10, std::unordered_map<std::string, std::string>{}, "number.optional.int", aedis::error::expects_resp3_map)); \
test(ex, make_expected(S10, std::unordered_set<std::string>{}, "number.optional.int", aedis::error::expects_resp3_set)); \
test(ex, make_expected(S11, node_type{resp3::type::number, 1UL, 0UL, {"3"}}, "number.node (positive)")); \
test(ex, make_expected(S12, int{0}, "number.int.error.null", aedis::error::resp3_null)); \
BOOST_AUTO_TEST_CASE(test_push)
{
net::io_context ioc;
std::string const wire = ">4\r\n+pubsub\r\n+message\r\n+some-channel\r\n+some message\r\n";
std::vector<node_type> e1a
{ {resp3::type::push, 4UL, 0UL, {}}
@@ -177,8 +203,8 @@ BOOST_AUTO_TEST_CASE(test_push)
std::vector<node_type> e1b { {resp3::type::push, 0UL, 0UL, {}} };
auto const in01 = expect<std::vector<node_type>>{wire, e1a, "push.node"};
auto const in02 = expect<std::vector<node_type>>{">0\r\n", e1b, "push.node.empty"};
auto const in01 = expect<std::vector<node_type>>{S13, e1a, "push.node"};
auto const in02 = expect<std::vector<node_type>>{S14, e1b, "push.node.empty"};
auto ex = ioc.get_executor();
@@ -202,9 +228,6 @@ BOOST_AUTO_TEST_CASE(test_map)
using op_vec_type = std::optional<std::vector<std::string>>;
using tuple_type = std::tuple<std::string, std::string, std::string, std::string, std::string, std::string, std::string, std::string>;
std::string const wire2 = "*3\r\n$2\r\n11\r\n$2\r\n22\r\n$1\r\n3\r\n";
std::string const wire = "%4\r\n$4\r\nkey1\r\n$6\r\nvalue1\r\n$4\r\nkey2\r\n$6\r\nvalue2\r\n$4\r\nkey3\r\n$6\r\nvalue3\r\n$4\r\nkey3\r\n$6\r\nvalue3\r\n";
std::vector<node_type> expected_1a
{ {resp3::type::map, 4UL, 0UL, {}}
, {resp3::type::blob_string, 1UL, 1UL, {"key1"}}
@@ -263,20 +286,20 @@ BOOST_AUTO_TEST_CASE(test_map)
, std::string{"key3"}, std::string{"value3"}
};
auto const in00 = expect<std::vector<node_type>>{wire, expected_1a, "map.node"};
auto const in00 = expect<std::vector<node_type>>{S16, expected_1a, "map.node"};
auto const in01 = expect<map_type>{"%0\r\n", map_type{}, "map.map.empty"};
auto const in02 = expect<map_type>{wire, expected_1b, "map.map"};
auto const in03 = expect<mmap_type>{wire, e1k, "map.multimap"};
auto const in04 = expect<umap_type>{wire, e1g, "map.unordered_map"};
auto const in05 = expect<mumap_type>{wire, e1l, "map.unordered_multimap"};
auto const in06 = expect<vec_type>{wire, expected_1c, "map.vector"};
auto const in07 = expect<op_map_type>{wire, expected_1d, "map.optional.map"};
auto const in08 = expect<op_vec_type>{wire, expected_1e, "map.optional.vector"};
auto const in09 = expect<std::tuple<op_map_type>>{"*1\r\n" + wire, std::tuple<op_map_type>{expected_1d}, "map.transaction.optional.map"};
auto const in02 = expect<map_type>{S16, expected_1b, "map.map"};
auto const in03 = expect<mmap_type>{S16, e1k, "map.multimap"};
auto const in04 = expect<umap_type>{S16, e1g, "map.unordered_map"};
auto const in05 = expect<mumap_type>{S16, e1l, "map.unordered_multimap"};
auto const in06 = expect<vec_type>{S16, expected_1c, "map.vector"};
auto const in07 = expect<op_map_type>{S16, expected_1d, "map.optional.map"};
auto const in08 = expect<op_vec_type>{S16, expected_1e, "map.optional.vector"};
auto const in09 = expect<std::tuple<op_map_type>>{S17, std::tuple<op_map_type>{expected_1d}, "map.transaction.optional.map"};
auto const in10 = expect<int>{"%11\r\n", int{}, "map.invalid.int", aedis::error::expects_resp3_simple_type};
auto const in11 = expect<tuple_type>{wire, e1f, "map.tuple"};
auto const in12 = expect<map_type>{wire2, map_type{}, "map.error", aedis::error::expects_resp3_map};
auto const in13 = expect<map_type>{"_\r\n", map_type{}, "map.null", aedis::error::resp3_null};
auto const in11 = expect<tuple_type>{S16, e1f, "map.tuple"};
auto const in12 = expect<map_type>{S15, map_type{}, "map.error", aedis::error::expects_resp3_map};
auto const in13 = expect<map_type>{S12, map_type{}, "map.null", aedis::error::resp3_null};
auto ex = ioc.get_executor();
@@ -314,8 +337,6 @@ BOOST_AUTO_TEST_CASE(test_map)
void test_attribute(net::io_context& ioc)
{
char const* wire = "|1\r\n+key-popularity\r\n%2\r\n$1\r\na\r\n,0.1923\r\n$1\r\nb\r\n,0.0012\r\n";
std::vector<node_type> e1a
{ {resp3::type::attribute, 1UL, 0UL, {}}
, {resp3::type::simple_string, 1UL, 1UL, "key-popularity"}
@@ -328,8 +349,8 @@ void test_attribute(net::io_context& ioc)
std::vector<node_type> e1b;
auto const in01 = expect<std::vector<node_type>>{wire, e1a, "attribute.node"};
auto const in02 = expect<std::vector<node_type>>{"|0\r\n", e1b, "attribute.node.empty"};
auto const in01 = expect<std::vector<node_type>>{S18, e1a, "attribute.node"};
auto const in02 = expect<std::vector<node_type>>{S19, e1b, "attribute.node.empty"};
auto ex = ioc.get_executor();
@@ -347,9 +368,6 @@ BOOST_AUTO_TEST_CASE(test_array)
using array_type2 = std::array<int, 1>;
net::io_context ioc;
char const* wire = "*3\r\n$2\r\n11\r\n$2\r\n22\r\n$1\r\n3\r\n";
char const* wire_nested = "*1\r\n*1\r\n$2\r\nab\r\n";
char const* wire_nested2 = "*1\r\n*1\r\n*1\r\n*1\r\n*1\r\n*1\r\na\r\n";
std::vector<node_type> e1a
{ {resp3::type::array, 3UL, 0UL, {}}
@@ -366,22 +384,22 @@ BOOST_AUTO_TEST_CASE(test_array)
std::list<int> const e1g{11, 22, 3};
std::deque<int> const e1h{11, 22, 3};
auto const in01 = expect<std::vector<node_type>>{wire, e1a, "array.node"};
auto const in02 = expect<std::vector<int>>{wire, e1b, "array.int"};
auto const in03 = expect<std::vector<node_type>>{"*0\r\n", e1e, "array.node.empty"};
auto const in04 = expect<std::vector<std::string>>{"*0\r\n", e1d, "array.string.empty"};
auto const in05 = expect<std::vector<std::string>>{wire, e1c, "array.string"};
auto const in06 = expect<array_type>{wire, e1f, "array.array"};
auto const in07 = expect<std::list<int>>{wire, e1g, "array.list"};
auto const in08 = expect<std::deque<int>>{wire, e1h, "array.deque"};
auto const in09 = expect<std::vector<int>>{"_\r\n", std::vector<int>{}, "array.vector", aedis::error::resp3_null};
auto const in10 = expect<std::list<int>>{"_\r\n", std::list<int>{}, "array.list", aedis::error::resp3_null};
auto const in11 = expect<array_type>{"_\r\n", array_type{}, "array.null", aedis::error::resp3_null};
auto const in12 = expect<tuple_type>{wire, tuple_type{}, "array.list", aedis::error::incompatible_size};
auto const in13 = expect<array_type2>{wire_nested, array_type2{}, "array.nested", aedis::error::nested_aggregate_not_supported};
auto const in14 = expect<array_type2>{wire, array_type2{}, "array.null", aedis::error::incompatible_size};
auto const in15 = expect<array_type2>{":3\r\n", array_type2{}, "array.array", aedis::error::expects_resp3_aggregate};
auto const in16 = expect<vec_node_type>{wire_nested2, vec_node_type{}, "array.depth.exceeds", aedis::error::exceeeds_max_nested_depth};
auto const in01 = expect<std::vector<node_type>>{S20, e1a, "array.node"};
auto const in02 = expect<std::vector<int>>{S20, e1b, "array.int"};
auto const in03 = expect<std::vector<node_type>>{S23, e1e, "array.node.empty"};
auto const in04 = expect<std::vector<std::string>>{S23, e1d, "array.string.empty"};
auto const in05 = expect<std::vector<std::string>>{S20, e1c, "array.string"};
auto const in06 = expect<array_type>{S20, e1f, "array.array"};
auto const in07 = expect<std::list<int>>{S20, e1g, "array.list"};
auto const in08 = expect<std::deque<int>>{S20, e1h, "array.deque"};
auto const in09 = expect<std::vector<int>>{S12, std::vector<int>{}, "array.vector", aedis::error::resp3_null};
auto const in10 = expect<std::list<int>>{S12, std::list<int>{}, "array.list", aedis::error::resp3_null};
auto const in11 = expect<array_type>{S12, array_type{}, "array.null", aedis::error::resp3_null};
auto const in12 = expect<tuple_type>{S20, tuple_type{}, "array.list", aedis::error::incompatible_size};
auto const in13 = expect<array_type2>{S21, array_type2{}, "array.nested", aedis::error::nested_aggregate_not_supported};
auto const in14 = expect<array_type2>{S20, array_type2{}, "array.null", aedis::error::incompatible_size};
auto const in15 = expect<array_type2>{S11, array_type2{}, "array.array", aedis::error::expects_resp3_aggregate};
auto const in16 = expect<vec_node_type>{S22, vec_node_type{}, "array.depth.exceeds", aedis::error::exceeeds_max_nested_depth};
auto ex = ioc.get_executor();
@@ -432,9 +450,6 @@ BOOST_AUTO_TEST_CASE(test_set)
using vec_type = std::vector<std::string>;
using op_vec_type = std::optional<std::vector<std::string>>;
std::string const wire2 = "*3\r\n$2\r\n11\r\n$2\r\n22\r\n$1\r\n3\r\n";
std::string const wire = "~6\r\n+orange\r\n+apple\r\n+one\r\n+two\r\n+three\r\n+orange\r\n";
std::vector<node_type> const expected1a
{ {resp3::type::set, 6UL, 0UL, {}}
, {resp3::type::simple_string, 1UL, 1UL, {"orange"}}
@@ -452,16 +467,16 @@ BOOST_AUTO_TEST_CASE(test_set)
op_vec_type expected_1e;
expected_1e = e1d;
auto const in00 = expect<std::vector<node_type>>{wire, expected1a, "set.node"};
auto const in01 = expect<std::vector<node_type>>{"~0\r\n", std::vector<node_type>{ {resp3::type::set, 0UL, 0UL, {}} }, "set.node (empty)"};
auto const in02 = expect<set_type>{wire, set_type{"apple", "one", "orange", "three", "two"}, "set.set"};
auto const in03 = expect<mset_type>{wire, e1f, "set.multiset"};
auto const in04 = expect<vec_type>{wire, e1d, "set.vector "};
auto const in05 = expect<op_vec_type>{wire, expected_1e, "set.vector "};
auto const in06 = expect<uset_type>{wire, e1c, "set.unordered_set"};
auto const in07 = expect<muset_type>{wire, e1g, "set.unordered_multiset"};
auto const in08 = expect<std::tuple<uset_type>>{"*1\r\n" + wire, std::tuple<uset_type>{e1c}, "set.tuple"};
auto const in09 = expect<set_type>{wire2, set_type{}, "set.error", aedis::error::expects_resp3_set};
auto const in00 = expect<std::vector<node_type>>{S25, expected1a, "set.node"};
auto const in01 = expect<std::vector<node_type>>{S27, std::vector<node_type>{ {resp3::type::set, 0UL, 0UL, {}} }, "set.node (empty)"};
auto const in02 = expect<set_type>{S25, set_type{"apple", "one", "orange", "three", "two"}, "set.set"};
auto const in03 = expect<mset_type>{S25, e1f, "set.multiset"};
auto const in04 = expect<vec_type>{S25, e1d, "set.vector "};
auto const in05 = expect<op_vec_type>{S25, expected_1e, "set.vector "};
auto const in06 = expect<uset_type>{S25, e1c, "set.unordered_set"};
auto const in07 = expect<muset_type>{S25, e1g, "set.unordered_multiset"};
auto const in08 = expect<std::tuple<uset_type>>{S26, std::tuple<uset_type>{e1c}, "set.tuple"};
auto const in09 = expect<set_type>{S24, set_type{}, "set.error", aedis::error::expects_resp3_set};
auto ex = ioc.get_executor();
@@ -492,9 +507,9 @@ BOOST_AUTO_TEST_CASE(test_set)
BOOST_AUTO_TEST_CASE(test_simple_error)
{
net::io_context ioc;
auto const in01 = expect<node_type>{"-Error\r\n", node_type{resp3::type::simple_error, 1UL, 0UL, {"Error"}}, "simple_error.node", aedis::error::resp3_simple_error};
auto const in02 = expect<node_type>{"-\r\n", node_type{resp3::type::simple_error, 1UL, 0UL, {""}}, "simple_error.node.empty", aedis::error::resp3_simple_error};
auto const in03 = expect<aedis::ignore>{"-Error\r\n", aedis::ignore{}, "simple_error.not.ignore.error", aedis::error::resp3_simple_error};
auto const in01 = expect<node_type>{S28, node_type{resp3::type::simple_error, 1UL, 0UL, {"Error"}}, "simple_error.node", aedis::error::resp3_simple_error};
auto const in02 = expect<node_type>{S29, node_type{resp3::type::simple_error, 1UL, 0UL, {""}}, "simple_error.node.empty", aedis::error::resp3_simple_error};
auto const in03 = expect<aedis::ignore>{S28, aedis::ignore{}, "simple_error.not.ignore.error", aedis::error::resp3_simple_error};
auto ex = ioc.get_executor();
@@ -696,15 +711,15 @@ BOOST_AUTO_TEST_CASE(test_null)
using op_type_08 = std::optional<std::set<std::string>>;
using op_type_09 = std::optional<std::unordered_set<std::string>>;
auto const in01 = expect<op_type_01>{"_\r\n", op_type_01{}, "null.optional.bool"};
auto const in02 = expect<op_type_02>{"_\r\n", op_type_02{}, "null.optional.int"};
auto const in03 = expect<op_type_03>{"_\r\n", op_type_03{}, "null.optional.string"};
auto const in04 = expect<op_type_04>{"_\r\n", op_type_04{}, "null.optional.vector"};
auto const in05 = expect<op_type_05>{"_\r\n", op_type_05{}, "null.optional.list"};
auto const in06 = expect<op_type_06>{"_\r\n", op_type_06{}, "null.optional.map"};
auto const in07 = expect<op_type_07>{"_\r\n", op_type_07{}, "null.optional.unordered_map"};
auto const in08 = expect<op_type_08>{"_\r\n", op_type_08{}, "null.optional.set"};
auto const in09 = expect<op_type_09>{"_\r\n", op_type_09{}, "null.optional.unordered_set"};
auto const in01 = expect<op_type_01>{S12, op_type_01{}, "null.optional.bool"};
auto const in02 = expect<op_type_02>{S12, op_type_02{}, "null.optional.int"};
auto const in03 = expect<op_type_03>{S12, op_type_03{}, "null.optional.string"};
auto const in04 = expect<op_type_04>{S12, op_type_04{}, "null.optional.vector"};
auto const in05 = expect<op_type_05>{S12, op_type_05{}, "null.optional.list"};
auto const in06 = expect<op_type_06>{S12, op_type_06{}, "null.optional.map"};
auto const in07 = expect<op_type_07>{S12, op_type_07{}, "null.optional.unordered_map"};
auto const in08 = expect<op_type_08>{S12, op_type_08{}, "null.optional.set"};
auto const in09 = expect<op_type_09>{S12, op_type_09{}, "null.optional.unordered_set"};
auto ex = ioc.get_executor();
@@ -803,11 +818,6 @@ void check_error(char const* name, aedis::error ev)
BOOST_AUTO_TEST_CASE(error)
{
check_error("aedis", aedis::error::resolve_timeout);
check_error("aedis", aedis::error::resolve_timeout);
check_error("aedis", aedis::error::connect_timeout);
check_error("aedis", aedis::error::idle_timeout);
check_error("aedis", aedis::error::exec_timeout);
check_error("aedis", aedis::error::invalid_data_type);
check_error("aedis", aedis::error::not_a_number);
check_error("aedis", aedis::error::exceeeds_max_nested_depth);
@@ -823,9 +833,7 @@ BOOST_AUTO_TEST_CASE(error)
check_error("aedis", aedis::error::incompatible_size);
check_error("aedis", aedis::error::not_a_double);
check_error("aedis", aedis::error::resp3_null);
check_error("aedis", aedis::error::unexpected_server_role);
check_error("aedis", aedis::error::not_connected);
check_error("aedis", aedis::error::resp3_handshake_error);
}
std::string get_type_as_str(aedis::resp3::type t)