2
0
mirror of https://github.com/boostorg/redis.git synced 2026-01-24 18:22:09 +00:00

Compare commits

...

28 Commits

Author SHA1 Message Date
Marcelo Zimbres
562075230f v1.2.0 2022-11-05 19:06:56 +01:00
Marcelo Zimbres
5dc677c6d8 Changes:
* Adds allocator support for the internal connection queue.
* Support for std::tie in aedis::adapt.
* Docs.
2022-11-05 12:07:22 +01:00
Marcelo Zimbres
395a167d48 Improvements in the coverage. 2022-11-01 14:13:33 +01:00
Marcelo Zimbres
f93f3cab58 Merge branch 'klemens-morgenstern-allocator-nonsense' 2022-10-31 22:20:54 +01:00
Marcelo Zimbres
df68fb0235 Changes:
- Ports from boost::container::pmr to std::pmr.
- Fixes clang-tidy issues.
- Adds resp3::request unit-tests.
2022-10-31 22:17:58 +01:00
Klemens Morgenstern
15e6883bc1 Added boost.container.pmr to request. 2022-10-31 07:37:03 +01:00
Marcelo Zimbres
3816d1d358 Documentation and stress test. 2022-10-30 19:48:04 +01:00
Marcelo
bb15c70723 Merge pull request #36 from klemens-morgenstern/sfinae
Added sfinae to push_range.
2022-10-30 19:47:16 +01:00
Klemens Morgenstern
297b7f15eb Added sfinae to push_range. 2022-10-30 23:11:35 +08:00
Marcelo Zimbres
ec6e99d99a Docs and example improvements. 2022-10-29 22:49:53 +02:00
Marcelo Zimbres
8dc6db069b Docs and examples. 2022-10-27 23:09:59 +02:00
Marcelo Zimbres
bac27c1770 Fixes cancellation. 2022-10-25 20:58:16 +02:00
Marcelo Zimbres
feaaedc6c0 Improvements in the cancellation support. 2022-10-23 22:32:58 +02:00
Marcelo Zimbres
000ebddf44 Fixes bug that caused unwritten request to be closed if write fails. 2022-10-22 22:34:10 +02:00
Marcelo Zimbres
268ea2c10f Improvements in the writer cancellation. 2022-10-22 21:23:33 +02:00
Marcelo Zimbres
d8b67f6e23 Improves async_exec cancellation support. 2022-10-22 20:43:37 +02:00
Marcelo Zimbres
ce1fa6a683 Implements per-op cancelation of async_exec. 2022-10-18 20:52:18 +02:00
Marcelo Zimbres
b8ede6ccb7 Fixes bug in conn.cancel(exec). 2022-10-16 22:44:44 +02:00
Marcelo Zimbres
6dce1a9226 Marks function inline. 2022-10-15 13:08:50 +02:00
Marcelo Zimbres
8566745d83 Changes the behaviour of adapt() with vector<node>. 2022-10-13 21:45:01 +02:00
Marcelo Zimbres
0b4906fcba Test improvements. 2022-10-13 20:51:35 +02:00
Marcelo Zimbres
2c8bb92071 Improvements in the docs. 2022-10-10 23:14:54 +02:00
Marcelo Zimbres
770e224917 Changes:
- CI fix.
- Renames request::fail_* to request::cancel_*.
- Adds a second parameter to async_run.
- Adds request::retry flag.
2022-10-09 22:45:42 +02:00
Marcelo Zimbres
4fb2b20954 Build fixes. 2022-10-08 22:24:20 +02:00
Marcelo Zimbres
c01a57b6cb Adds cancelation test. 2022-10-08 22:07:51 +02:00
Marcelo Zimbres
ea0b333c4d Removes the second async_run overload. 2022-10-08 17:07:28 +02:00
Marcelo Zimbres
ba82c6cd84 Progresses removing the second async_run overload. 2022-10-07 22:43:32 +02:00
Marcelo Zimbres
4c298ddc6b Adds doxygen output to the preset. 2022-10-03 14:32:36 +02:00
52 changed files with 2364 additions and 1440 deletions

View File

@@ -9,6 +9,7 @@ Checks: "*,\
-google-readability-braces-around-statements,\
-hicpp-braces-around-statements,\
-hicpp-named-parameter,\
-hicpp-avoid-goto,\
-google-build-using-namespace,\
-altera-*,\
-fuchsia-*,\
@@ -21,6 +22,12 @@ Checks: "*,\
-bugprone-use-after-move,\
-hicpp-invalid-access-moved,\
-misc-no-recursion,\
-cppcoreguidelines-pro-bounds-pointer-arithmetic,\
-cppcoreguidelines-avoid-magic-numbers,\
-cppcoreguidelines-pro-bounds-constant-array-index,\
-cppcoreguidelines-interfaces-global-init,\
-cppcoreguidelines-macro-usage,\
-cppcoreguidelines-avoid-goto,\
-cppcoreguidelines-non-private-member-variables-in-classes"
WarningsAsErrors: ''
CheckOptions:

View File

@@ -10,7 +10,7 @@ cmake_minimum_required(VERSION 3.14)
project(
Aedis
VERSION 1.1.0
VERSION 1.2.0
DESCRIPTION "A redis client designed for performance and scalability"
HOMEPAGE_URL "https://mzimbres.github.io/aedis"
LANGUAGES CXX
@@ -53,45 +53,57 @@ include_directories(include)
# Executables
#=======================================================================
#add_executable(intro_sync examples/intro_sync.cpp) // Uncomment after update to Boost 1.80
add_executable(chat_room examples/chat_room.cpp)
add_executable(containers examples/containers.cpp)
add_executable(echo_server examples/echo_server.cpp)
add_executable(echo_server_client benchmarks/cpp/asio/echo_server_client.cpp)
add_executable(echo_server_direct benchmarks/cpp/asio/echo_server_direct.cpp)
add_executable(intro examples/intro.cpp)
add_executable(intro_tls examples/intro_tls.cpp)
#add_executable(intro_sync examples/intro_sync.cpp) // Uncomment after update to Boost 1.80
add_executable(low_level_sync examples/low_level_sync.cpp)
add_executable(serialization examples/serialization.cpp)
add_executable(subscriber examples/subscriber.cpp)
add_executable(subscriber_sentinel examples/subscriber_sentinel.cpp)
add_executable(test_conn_connect tests/conn_connect.cpp)
add_executable(test_conn_exec tests/conn_exec.cpp)
add_executable(test_conn_push tests/conn_push.cpp)
add_executable(test_conn_quit tests/conn_quit.cpp)
add_executable(test_conn_quit_coalesce tests/conn_quit_coalesce.cpp)
add_executable(test_conn_reconnect tests/conn_reconnect.cpp)
add_executable(test_conn_tls tests/conn_tls.cpp)
add_executable(test_low_level tests/low_level.cpp)
add_executable(low_level_sync examples/low_level_sync.cpp)
add_executable(test_connection_other tests/connection_other.cpp)
add_executable(test_connection_connect tests/connection_connect.cpp)
add_executable(test_connection_push tests/connection_push.cpp)
add_executable(test_connection_quit tests/connection_quit.cpp)
add_executable(test_connection_quit_coalesce tests/connection_quit_coalesce.cpp)
add_executable(test_connection_reconnect tests/connection_reconnect.cpp)
add_executable(test_connection_tls tests/connection_tls.cpp)
add_executable(test_conn_run_cancel tests/conn_run_cancel.cpp)
add_executable(test_conn_exec_cancel tests/conn_exec_cancel.cpp)
add_executable(test_conn_echo_stress tests/conn_echo_stress.cpp)
add_executable(test_request tests/request.cpp)
target_compile_features(chat_room PUBLIC cxx_std_20)
target_compile_features(containers PUBLIC cxx_std_20)
target_compile_features(echo_server PUBLIC cxx_std_20)
target_compile_features(echo_server_client PUBLIC cxx_std_20)
target_compile_features(echo_server_direct PUBLIC cxx_std_20)
target_compile_features(intro PUBLIC cxx_std_17)
target_compile_features(intro_tls PUBLIC cxx_std_17)
target_compile_features(serialization PUBLIC cxx_std_17)
target_compile_features(containers PUBLIC cxx_std_17)
target_compile_features(test_low_level PUBLIC cxx_std_17)
target_compile_features(low_level_sync PUBLIC cxx_std_17)
target_compile_features(echo_server PUBLIC cxx_std_20)
target_compile_features(serialization PUBLIC cxx_std_17)
target_compile_features(subscriber PUBLIC cxx_std_20)
target_compile_features(subscriber_sentinel PUBLIC cxx_std_20)
target_compile_features(test_connection_other PUBLIC cxx_std_20)
target_compile_features(test_connection_push PUBLIC cxx_std_20)
target_compile_features(test_connection_connect PUBLIC cxx_std_17)
target_compile_features(test_connection_quit PUBLIC cxx_std_17)
target_compile_features(test_connection_quit_coalesce PUBLIC cxx_std_17)
target_compile_features(test_connection_reconnect PUBLIC cxx_std_20)
target_compile_features(test_connection_tls PUBLIC cxx_std_17)
target_compile_features(test_conn_connect PUBLIC cxx_std_17)
target_compile_features(test_conn_exec PUBLIC cxx_std_20)
target_compile_features(test_conn_push PUBLIC cxx_std_20)
target_compile_features(test_conn_quit PUBLIC cxx_std_17)
target_compile_features(test_conn_quit_coalesce PUBLIC cxx_std_17)
target_compile_features(test_conn_reconnect PUBLIC cxx_std_20)
target_compile_features(test_conn_tls PUBLIC cxx_std_17)
target_compile_features(test_low_level PUBLIC cxx_std_17)
target_compile_features(test_conn_run_cancel PUBLIC cxx_std_20)
target_compile_features(test_conn_exec_cancel PUBLIC cxx_std_20)
target_compile_features(test_conn_echo_stress PUBLIC cxx_std_20)
target_compile_features(test_request PUBLIC cxx_std_17)
target_link_libraries(intro_tls OpenSSL::Crypto OpenSSL::SSL)
target_link_libraries(test_connection_tls OpenSSL::Crypto OpenSSL::SSL)
target_link_libraries(test_conn_tls OpenSSL::Crypto OpenSSL::SSL)
# Tests
#=======================================================================
@@ -103,13 +115,17 @@ add_test(intro_tls intro_tls)
add_test(serialization serialization)
add_test(low_level_sync low_level_sync)
add_test(test_low_level test_low_level)
add_test(test_connection_other test_connection_other)
add_test(test_connection_connect test_connection_connect)
add_test(test_connection_push test_connection_push)
add_test(test_connection_quit test_connection_quit)
add_test(test_connection_quit_coalesce test_connection_quit_coalesce)
add_test(test_connection_reconnect test_connection_reconnect)
add_test(test_connection_tls test_connection_tls)
add_test(test_conn_exec test_conn_exec)
add_test(test_conn_connect test_conn_connect)
add_test(test_conn_push test_conn_push)
add_test(test_conn_quit test_conn_quit)
add_test(test_conn_quit_coalesce test_conn_quit_coalesce)
add_test(test_conn_reconnect test_conn_reconnect)
add_test(test_conn_tls test_conn_tls)
add_test(test_conn_run_cancel test_conn_run_cancel)
add_test(test_conn_exec_cancel test_conn_exec_cancel)
add_test(test_conn_echo_stress test_conn_echo_stress)
add_test(test_request test_request)
# Install
#=======================================================================
@@ -138,7 +154,6 @@ install(DIRECTORY ${PROJECT_SOURCE_DIR}/include/ DESTINATION include)
#=======================================================================
set(DOXYGEN_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}/doc")
configure_file(doc/Doxyfile.in doc/Doxyfile @ONLY)
add_custom_target(

View File

@@ -51,7 +51,8 @@
"CMAKE_CXX_FLAGS": "-Wall -Wextra -fsanitize=address",
"CMAKE_SHARED_LINKER_FLAGS": "-fsanitize=address",
"CMAKE_CXX_STANDARD_REQUIRED": "ON",
"PROJECT_BINARY_DIR": "${sourceDir}/build/dev"
"PROJECT_BINARY_DIR": "${sourceDir}/build/dev",
"DOXYGEN_OUTPUT_DIRECTORY": "${sourceDir}/build/dev/doc/"
}
},
{

757
README.md
View File

@@ -13,222 +13,242 @@ Some of its distinctive features are
* Support for the latest version of the Redis communication protocol [RESP3](https://github.com/redis/redis-specifications/blob/master/protocol/RESP3.md).
* Support for STL containers, TLS and Redis sentinel.
* Serialization and deserialization of your own data types.
* Healthy checks, back pressure and low latency.
* Healthy checks, back pressure, cancellation and low latency.
In addition to that, Aedis hides most of the low-level Asio code away
from the user, which in the majority of the use cases will interact
with only three library entities
from the user, which in the majority of the cases, will interact with
only three library entities
* `aedis::connection`: A healthy long-lasting connection to the Redis server.
* `aedis::connection`: A connection to the Redis server.
* `aedis::resp3::request`: A container of Redis commands.
* `aedis::adapt()`: Adapts user data structures like STL containers to
receive Redis responses.
* `aedis::adapt()`: A function that adapts data structures to receive Redis responses.
Let us see how this works in more detail.
The example below shows for example how to read Redis hashes in an
`std::map` using a coroutine, a short-lived connection and
cancellation
```cpp
net::awaitable<std::map<std::string, std::string>> retrieve_hashes(endpoint ep)
{
connection conn{co_await net::this_coro::executor};
request req;
req.get_config().cancel_on_connection_lost = true;
req.push("HGETALL", "hset-key");
req.push("QUIT");
std::tuple<std::map<std::string, std::string>, aedis::ignore> resp;
co_await (conn.async_run(ep) || conn.async_exec(req, adapt(resp)));
co_return std::move(std::get<0>(resp));
}
```
In the next section we will see more details about connections,
requests and responses.
<a name="connection"></a>
### Connection
The code below will establish a connection with a Redis
server where users can send commands (see intro.cpp)
The `aedis::connection` is a class that provides async-only
communication with a Redis server by means of three member
functions
* `aedis::connection::async_run`: Establishes a connection and
completes only when it is lost.
* `aedis::connection::async_exec`: Executes commands.
* `aedis::connection::async_receive`: Receives server-side pushes.
In general, these operations will be running concurrently in user
application, where, for example
1. **Connect**: One coroutine will call `async_run` in a loop
to reconnect whenever a connection is lost.
2. **Execute**: Multiple coroutines will call `async_exec` independently
and without coordination (e.g. queuing).
3. **Receive**: One coroutine will loop on `async_receive` to receive
server-side pushes (required only if the app expects server pushes).
Each of the operations above can be performed without regards to the
others as they are independent from each other. Below we will cover
the points above with more detail.
#### Connect
In general, applications will connect to a Redis server and hang
around for as long as possible, until the connection is lost for some
reason. When that happens, simple setups will want to wait for a
short period of time and try to reconnect. The code snippet below
shows how this can be achieved with a coroutine (see echo_server.cpp)
```cpp
int main()
{
net::io_context ioc;
connection conn{ioc};
conn.async_run({"127.0.0.1", "6379"}, {}, [](auto ec) { ... });
// Pass conn to other operations ...
ioc.run();
}
```
Requests on the other hand can be sent at any time, regardless of whether before or
after a connection was established. For example, the code below sends
the `PING` and `QUIT` commands, waits for the response and exits
```cpp
net::awaitable<void> ping(std::shared_ptr<connection> conn)
{
// Request
request req;
req.push("PING", "some message");
req.push("QUIT");
// Response
std::tuple<std::string, aedis::ignore> resp;
// Execution
co_await conn->async_exec(req, adapt(resp));
std::cout << "Response: " << std::get<0>(resp) << std::endl;
}
```
The general structure about how to send commands is evident from the
code above
* Create a `aedis::resp3::request` object and add commands.
* Declare responses as elements of a `std::tuple`.
* Execute the request.
Multiple calls to `connection::async_exec` are synchronized
automatically so that different operations (or coroutines) don't have
to be aware of each other. Server side pushes can be received on the
same connection object that is being used to execute commands, for
example (see subscriber.cpp)
```cpp
net::awaitable<void> receive_pushes(std::shared_ptr<connection> conn)
{
for (std::vector<node<std::string>> resp;;) {
co_await conn->async_receive_push(adapt(resp));
// Process the push in resp.
resp.clear();
}
}
```
@note Users should make sure any server pushes sent by the server are
consumed, otherwise the connection will eventually timeout.
### Reconnect
The `aedis::connection` class also supports reconnection. In the
simplest scenario, after a connection lost users will want to
reconnect to the same server, the loop below shows how to do it
```cpp
net::awaitable<void> reconnect(std::shared_ptr<connection> conn)
net::awaitable<void> reconnect(std::shared_ptr<connection> conn, endpoint ep)
{
net::steady_timer timer{co_await net::this_coro::executor};
for (;;) {
boost::system::error_code ec;
co_await conn->async_run({"127.0.0.1", "6379"}, {}, net::redirect_error(net::use_awaitable, ec));
for (boost::system::error_code ec;;) {
// Establishes a connection and hangs around until it is lost.
co_await conn->async_run(ep, {}, redir(ec));
conn->reset_stream();
// Waits some time before trying to restablish the connection.
timer.expires_after(std::chrono::seconds{1});
co_await timer.async_wait();
}
}
```
more complex scenarios, like performing a failover with sentinel can
be found in the examples. To aid proper failover, calls to
`connection::async_exec` won't automatically fail as a result of
connection lost, rather, they will remain suspended until a new
connection is established, once that happens all awaiting requests will be sent
automatically. This behaviour can be changed per request by setting on
the `aedis::resp3::request::config::close_on_connection_lost` or by
calling `connection::cancel()` with `connection::operation::exec`
which will cause all pending requests to be canceled.
### Timeouts
Other common scenarios are, for example, performing a failover with
sentinels and re-subscribing to pubsub channels, both are covered in
the `subscriber_sentinel.cpp` example.
Aedis high-level API provides built-in support for most timeouts users
might need. For example, the `aedis::connection::async_run` member
function performs the following operations on behalf of the user
#### Execute
The basic idea about `async_exec` was stated above already: execute
Redis commands. One of the most important things about it though is
that it can be called multiple times without coordination, for
example, in a HTTP or Websocket server where each session calls it
independently to communicate with Redis. The benefits of this feature
are manifold
* Reduces code complexity as users won't have to implement queues
every time e.g. HTTP sessions want to share a connection to Redis.
* A small number of connections improves the performance associated
with [pipelines](https://redis.io/topics/pipelining). A single
connection will be indeed enough in most of cases.
The code below illustrates this concepts in a TCP session of the
`echo_server.cpp` example
```cpp
awaitable_type echo_server_session(tcp_socket socket, std::shared_ptr<connection> db)
{
request req;
std::tuple<std::string> response;
for (std::string buffer;;) {
// Reads a user message.
auto n = co_await net::async_read_until(socket, net::dynamic_buffer(buffer, 1024), "\n");
// Echos it through Redis.
req.push("PING", buffer);
co_await db->async_exec(req, adapt(response));
// Writes is back to the user.
co_await net::async_write(socket, net::buffer(std::get<0>(response)));
// Cleanup
std::get<0>(response).clear();
req.clear();
buffer.erase(0, n);
}
}
```
Notice also how the session above provides back-pressure as the
coroutine won't read the next message from the socket until a cycle is
complete.
#### Receive
Point number 3. above is only necessary for servers that expect server
pushes, like, for example, when using Redis pubsub. The example below
was taken from subscriber.cpp
```cpp
net::awaitable<void> push_receiver(std::shared_ptr<connection> conn)
{
for (std::vector<node<std::string>> resp;;) {
co_await conn->async_receive(adapt(resp));
print_push(resp);
resp.clear();
}
}
```
In general, it is advisable to all apps to keep a coroutine calling
`async_receive` as an unread push will cause the connection to stall
and eventually timeout. Notice that the same connection that is being
used to send requests can be also used to receive server-side pushes.
#### Cancellation
Aedis supports both implicit and explicit cancellation of connection
operations. Explicit cancellation is supported by means of the
`aedis::connection::cancel` member function. Implicit cancellation,
like those that may happen when using Asio awaitable operators && and
|| will be discussed with more detail below.
```cpp
co_await (conn.async_run(...) && conn.async_exec(...))
```
* Useful when implementing reconnection on applications that
use pubsub.
* Makes the channel re-subscribe operation simpler when the
connection is reestablished.
```cpp
co_await (conn.async_run(...) || conn.async_exec(...))
```
* Useful for short-lived connections that are meant to be closed after
a command has been executed.
```cpp
co_await (conn.async_exec(...) || time.async_wait(...))
```
* Provides a way to limit how long the execution of a single request
should last.
* Alternatively, for a connection-wide timeout set
`aedis::connection::timeouts::ping_interval` to a proper value. This
will work because all requests use the same queue and is also more
efficient since only one timer will be used.
* The cancellation will be ignored if the request has already
been written to the socket.
```cpp
co_await (conn.async_run(...) || time.async_wait(...))
```
* Set a limit on how long the connection should live (see also
`aedis::connection::timeouts`)
```cpp
co_await (conn.async_exec(...) || conn.async_exec(...) || ... || conn.async_exec(...))
```
* This works but is considered an antipattern. Unless
the user has set `aedis::resp3::request::config::coalesce` to
`false`, and he shouldn't, the connection will automatically merge
the individual requests into a single payload anyway.
#### Timeouts
Aedis high-level API provides built-in support for many of the
timeouts users usually need. For example, the
`aedis::connection::async_run` member function performs the following
operations on behalf of the user
* Resolves Redis address.
* Connects to the endpoint.
* TLS handhshake (for TLS endpoints).
* RESP3 handshake, authentication and role check.
* Keeps sending PING commands to check for unresponsive servers.
* Connects to the resolved endpoint.
* TLS handshake (for TLS endpoints).
* RESP3 handshake and authentication and role check.
* Periodic healthy checks with the PING command.
* Keeps reading from the socket to handle server pushes and command responses.
* Keeps writing requests as it becomes possible e.g. after last response has arrived.
To control the timeout-behaviour of the operations above users must
To control the timeout-behaviour of these operations users must
create a `aedis::connection::timeouts` object and pass it to as
argument to the `aedis::connection::async_run` member function (or use
the suggested defaults).
argument to the `aedis::connection::async_run` (if
the suggested defaults are not suitable).
Another related topic is the cancellation of
`aedis::connection::async_exec`. With the introduction of awaitable
operators in Asio it is very simple implement timeouts either on
individual or on a group of operations, for example, users may be
tempted in writing code like
<a name="requests"></a>
```cpp
co_await (conn.async_exec(...) || timer.async_wait(...))
```
The problem with this approach in Aedis is twofold
* Aedis has a buil-in healthy check that sends `PING` commands and
checks whether responses are being received on time. Since user
commands use the same queue as the built-in `PING`, they are also
subjected to the idle timeout, rendering cancellation like above
unnecessary.
* To improve performance Redis encourages the use of pipelines, where
many requests are sent in a single chunk to the server. In this
scenario it is harder to cancel individual operations without
causing all other (independent) requests in the same pipeline to
fail too.
### Installation
Download the latest Aedis release from github
```cpp
$ wget https://github.com/mzimbres/aedis/releases/download/v1.1.0/aedis-1.1.0.tar.gz
```
and unpack in your preferred location. Aedis is a header only
library, so you can starting using it. For that include the
following header
```cpp
#include <aedis/src.hpp>
```
in no more than one source file in your applications (see intro.cpp
for example). To build the examples and run the tests cmake is also
supported
```cpp
$ BOOST_ROOT=/opt/boost_1_79_0/ cmake
$ make
$ make test
```
These are the requirements for using Aedis
- Boost 1.79 or greater.
- C++17. Some examples require C++20 with coroutine support.
- Redis 6 or higher. Optionally also redis-cli and Redis Sentinel.
The following compilers are supported
- Tested with gcc: 10, 11, 12.
- Tested with clang: 11, 13, 14.
### Examples
Users are encouraged to skim over the examples below before proceeding
to the next sections
* intro.cpp: The Aedis hello-world program. It sends one command to Redis and quits the connection.
* intro_tls.cpp: Same as intro.cpp but over TLS.
* intro_sync.cpp: Synchronous version of intro.cpp.
* containers.cpp: Shows how to send and receive stl containers and how to use transactions.
* serialization.cpp: Shows how to serialize types using Boost.Json.
* subscriber.cpp: Shows how to implement pubsub that reconnects and resubscribes when the connection is lost.
* subscriber_sentinel.cpp: Same as subscriber.cpp but with failover with sentinels.
* echo_server.cpp: A simple TCP echo server.
* chat_room.cpp: A simple chat room.
<a name="api-reference"></a>
### API Reference
* [High-Level](#high-level-api): Recommend to all users
* [Low-Level](#low-level-api): For users with needs yet to be imagined by the author.
In the next sections we will see how to create requests and receive
responses with more detail
## Requests
### Requests
Redis requests are composed of one of more Redis commands (in
Redis documentation they are called
@@ -238,7 +258,7 @@ Redis documentation they are called
request req;
// Command with variable length of arguments.
req.push("SET", "key", "some value", value, "EX", "2");
req.push("SET", "key", "some value", "EX", "2");
// Pushes a list.
std::list<std::string> list
@@ -246,7 +266,7 @@ std::list<std::string> list
req.push_range("SUBSCRIBE", list);
// Same as above but as an iterator range.
req.push_range2("SUBSCRIBE", std::cbegin(list), std::cend(list));
req.push_range("SUBSCRIBE", std::cbegin(list), std::cend(list));
// Pushes a map.
std::map<std::string, mystruct> map
@@ -256,33 +276,29 @@ std::map<std::string, mystruct> map
req.push_range("HSET", "key", map);
```
Sending a request to Redis is then performed with the following function
```cpp
co_await db->async_exec(req, adapt(resp));
```
Sending a request to Redis is performed with `aedis::connection::async_exec` as already stated.
<a name="serialization"></a>
### Serialization
#### Serialization
The `push` and `push_range` functions above work with integers
e.g. `int` and `std::string` out of the box. To send your own
data type defined a `to_bulk` function like this
```cpp
struct mystruct {
// Example struct.
};
// Example struct.
struct mystruct {...};
void to_bulk(std::string& to, mystruct const& obj)
// Serialize your data structure here.
void to_bulk(std::pmr::string& to, mystruct const& obj)
{
std::string dummy = "Dummy serializaiton string.";
aedis::resp3::to_bulk(to, dummy);
}
```
Once `to_bulk` is defined and accessible over ADL `mystruct` can
Once `to_bulk` is defined and visible over ADL `mystruct` can
be passed to the `request`
```cpp
@@ -297,11 +313,41 @@ Example serialization.cpp shows how store json string in Redis.
<a name="responses"></a>
## Responses
### Responses
To read responses effectively, users must know their RESP3 type,
this can be found in the Redis documentation for each command
(https://redis.io/commands). For example
Aedis uses the following strategy to support Redis responses
* **Static**: For `aedis::resp3::request` whose sizes are known at compile time
std::tuple is supported.
* **Dynamic**: Otherwise use `std::vector<aedis::resp3::node<std::string>>`.
For example, below is a request with a compile time size
```cpp
request req;
req.push("PING");
req.push("INCR", "key");
req.push("QUIT");
```
To read the response to this request users can use the following tuple
```cpp
std::tuple<std::string, int, std::string>
```
The pattern may have become apparent to the user, the tuple must have
the same size as the request (exceptions below) and each element must
be able to store the response to the command it refers to. To ignore
responses to individual commands in the request use the tag
`aedis::ignore`
```cpp
// Ignore the second and last responses.
std::tuple<std::string, aedis::ignore, std::string, aedis::ignore>
```
The following table provides the response types of some commands
Command | RESP3 type | Documentation
---------|-------------------------------------|--------------
@@ -312,9 +358,7 @@ get | Blob-string | https://redis.io/commands/get
smembers | Set | https://redis.io/commands/smembers
hgetall | Map | https://redis.io/commands/hgetall
Once the RESP3 type of a given response is known we can choose a
proper C++ data structure to receive it in. Fortunately, this is a
simple task for most types. The table below summarises the options
To map these RESP3 types into a C++ data structure use the table below
RESP3 type | Possible C++ type | Type
---------------|--------------------------------------------------------------|------------------
@@ -330,7 +374,7 @@ Map | `std::vector`, `std::map`, `std::unordered_map` | Ag
Set | `std::vector`, `std::set`, `std::unordered_set` | Aggregate
Push | `std::vector`, `std::map`, `std::unordered_map` | Aggregate
For example
For example, the response to the request
```cpp
request req;
@@ -341,6 +385,11 @@ req.push("LRANGE", "key3", 0, -1);
req.push("HGETALL", "key4");
req.push("QUIT");
```
can be read in the tuple below
```cpp
std::tuple<
aedis::ignore, // hello
int, // rpush
@@ -349,13 +398,16 @@ std::tuple<
std::map<U, V>, // hgetall
std::string // quit
> resp;
```
Where both are passed to `async_exec` as showed elsewhere
```cpp
co_await db->async_exec(req, adapt(resp));
```
The tag `aedis::ignore` can be used to ignore individual
elements in the responses. If the intention is to ignore the
response to all commands in the request use @c adapt()
If the intention is to ignore the response to all commands altogether
use `adapt()` without arguments instead
```cpp
co_await db->async_exec(req, adapt());
@@ -367,7 +419,27 @@ of this writing, not all RESP3 types are used by the Redis server,
which means in practice users will be concerned with a reduced
subset of the RESP3 specification.
### Null
#### Push
Commands that have push response like
* `"SUBSCRIBE"`
* `"PSUBSCRIBE"`
* `"UNSUBSCRIBE"`
must be not be included in the tuple. For example, the request below
```cpp
request req;
req.push("PING");
req.push("SUBSCRIBE", "channel");
req.push("QUIT");
```
must be read in this tuple `std::tuple<std::string, std::string>`,
that has size two.
#### Null
It is not uncommon for apps to access keys that do not exist or
that have already expired in the Redis server, to deal with these
@@ -375,18 +447,23 @@ cases Aedis provides support for `std::optional`. To use it,
wrap your type around `std::optional` like this
```cpp
std::optional<std::unordered_map<T, U>> resp;
co_await db->async_exec(req, adapt(resp));
std::tuple<
std::optional<A>,
std::optional<B>,
...
> response;
co_await db->async_exec(req, adapt(response));
```
Everything else stays the same.
Everything else stays pretty much the same.
### Transactions
#### Transactions
To read the response to transactions we have to observe that Redis
queues the commands as they arrive and sends the responses back to
the user as an array, in the response to the @c exec command.
For example, to read the response to the this request
To read responses to transactions we must first observe that Redis will
queue its commands and send their responses to the user as elements
of an array, after the `EXEC` command comes. For example, to read
the response to this request
```cpp
db.send("MULTI");
@@ -409,28 +486,26 @@ using exec_resp_type =
>;
std::tuple<
ignore, // multi
ignore, // get
ignore, // lrange
ignore, // hgetall
aedis::ignore, // multi
aedis::ignore, // get
aedis::ignore, // lrange
aedis::ignore, // hgetall
exec_resp_type, // exec
> resp;
co_await db->async_exec(req, adapt(resp));
```
Note that above we are not ignoring the response to the commands
themselves but whether they have been successfully queued. For a
complete example see containers.cpp.
For a complete example see containers.cpp.
### Deserialization
#### Deserialization
As mentioned in \ref serialization, it is common to
serialize data before sending it to Redis e.g. to json strings.
As mentioned in \ref serialization, it is common practice to
serialize data before sending it to Redis e.g. as json strings.
For performance and convenience reasons, we may also want to
deserialize it directly in its final data structure. Aedis
supports this use case by calling a user provided `from_bulk`
function while parsing the response. For example
deserialize it directly in its final data structure when reading them
back from Redis. Aedis supports this use case by calling a user
provided `from_bulk` function while parsing the response. For example
```cpp
void from_bulk(mystruct& obj, char const* p, std::size_t size, boost::system::error_code& ec)
@@ -444,7 +519,7 @@ types e.g. `mystruct`, `std::map<std::string, mystruct>` etc.
<a name="the-general-case"></a>
### The general case
#### The general case
There are cases where responses to Redis
commands won't fit in the model presented above, some examples are
@@ -455,9 +530,10 @@ will result in error.
* RESP3 aggregates that contain nested aggregates can't be read in STL containers.
* Transactions with a dynamic number of commands can't be read in a `std::tuple`.
To deal with these cases Aedis provides the `resp3::node`
type, that is the most general form of an element in a response,
be it a simple RESP3 type or an aggregate. It is defined like this
To deal with these cases Aedis provides the `aedis::resp3::node` type
abstraction, that is the most general form of an element in a
response, be it a simple RESP3 type or the element of an aggregate. It
is defined like this
```cpp
template <class String>
@@ -478,14 +554,10 @@ struct node {
Any response to a Redis command can be received in a
`std::vector<node<std::string>>`. The vector can be seen as a
pre-order view of the response tree. Using it is no different than
pre-order view of the response tree. Using it is not different than
using other types
```cpp
// Receives any RESP3 simple data type.
node<std::string> resp;
co_await db->async_exec(req, adapt(resp));
// Receives any RESP3 simple or aggregate data type.
std::vector<node<std::string>> resp;
co_await db->async_exec(req, adapt(resp));
@@ -499,38 +571,50 @@ from Redis with `HGETALL`, some of the options are
* `std::map<std::string, std::string>`: Efficient if you need the data as a `std::map`.
* `std::map<U, V>`: Efficient if you are storing serialized data. Avoids temporaries and requires `from_bulk` for `U` and `V`.
In addition to the above users can also use unordered versions of the containers. The same reasoning also applies to sets e.g. `SMEMBERS`.
In addition to the above users can also use unordered versions of the
containers. The same reasoning also applies to sets e.g. `SMEMBERS`
and other data structures in general.
### Examples
To conclude this overview users are invited to skim over the
examples below
* intro.cpp: The Aedis hello-world program. It sends one command to Redis and quits the connection.
* intro_tls.cpp: Same as intro.cpp but over TLS.
* intro_sync.cpp: Synchronous version of intro.cpp.
* containers.cpp: Shows how to send and receive stl containers and how to use transactions.
* serialization.cpp: Shows how to serialize types using Boost.Json.
* subscriber.cpp: Shows how to implement pubsub that reconnects and resubscribes when the connection is lost.
* subscriber_sentinel.cpp: Same as subscriber.cpp but with failover with sentinels.
* echo_server.cpp: A simple TCP echo server.
* chat_room.cpp: A simple chat room.
## Why Aedis
At the time of this writing there are seventeen Redis clients
listed in the [official](https://redis.io/docs/clients/#cpp) list.
With so many clients available it is not unlikely that users are
asking themselves why yet another one. In this section I will try
to compare Aedis with the most popular clients and why we need
Aedis. Notice however that this is ongoing work as comparing
client objectively is difficult and time consuming.
### Redis-plus-plus
The most popular client at the moment of this writing ranked by
github stars is
The main reason for why I started writing Aedis was to have a client
compatible with the Asio asynchronous model. As I made progresses I could
also address what I considered weaknesses in other libraries. Due to
time constraints I won't be able to give a detailed comparison with
each client listed in the
[official](https://redis.io/docs/clients/#cpp) list,
instead I will focus on the most popular C++ client on github in number of
stars, namely
* https://github.com/sewenew/redis-plus-plus
Before we start it is worth mentioning some of the things it does
not support
Before we start it is important to mentioning some of the things
redis-plus-plus does not support
* RESP3. Without RESP3 is impossible to support some important Redis features like client side caching, among other things.
* The latest version of the communication protocol RESP3. Without it it is impossible to support some important Redis features like client side caching, among other things.
* Coroutines.
* Reading responses directly in user data structures avoiding temporaries.
* Reading responses directly in user data structures to avoid creating temporaries.
* Proper error handling with support for error-code.
* Healthy checks.
The remaining points will be addressed individually.
Let us first have a look at what sending a command a pipeline and a
transaction look like
The remaining points will be addressed individually. Let us first
have a look at what sending a command a pipeline and a transaction
look like
```cpp
auto redis = Redis("tcp://127.0.0.1:6379");
@@ -579,7 +663,7 @@ the following characteristics
> NOTE: By default, creating a Pipeline object is NOT cheap, since
> it creates a new connection.
This is clearly a downside of the API as pipelines should be the
This is clearly a downside in the API as pipelines should be the
default way of communicating and not an exception, paying such a
high price for each pipeline imposes a severe cost in performance.
Transactions also suffer from the very same problem.
@@ -612,10 +696,10 @@ problem however with this async design is that it makes it
impossible to write asynchronous programs correctly since it
starts an async operation on every command sent instead of
enqueueing a message and triggering a write when it can be sent.
It is also not clear how are pipelines realised with the design
It is also not clear how are pipelines realised with this design
(if at all).
### Benchmark: Echo server
### Echo server benchmark
This document benchmarks the performance of TCP echo servers I
implemented in different languages using different Redis clients. The
@@ -700,9 +784,110 @@ The code used in the benchmarks can be found at
* [node-redis](https://github.com/redis/node-redis): [code](https://github.com/mzimbres/aedis/tree/3fb018ccc6138d310ac8b73540391cdd8f2fdad6/benchmarks/nodejs/echo_server_over_redis)
* [go-redis](https://github.com/go-redis/redis): [code](https://github.com/mzimbres/aedis/blob/3fb018ccc6138d310ac8b73540391cdd8f2fdad6/benchmarks/go/echo_server_over_redis.go)
<a name="api-reference"></a>
## Reference
* [High-Level](#high-level-api): Covers the topics discussed in this document.
* [Low-Level](#low-level-api): Covers low-level building blocks. Provided mostly for developers, most users won't need any information provided here.
## Installation
Download the latest release on
https://github.com/mzimbres/aedis/releases. Aedis is a header only
library, so you can starting using it right away by adding the
`include` subdirectory to your project and including
```cpp
#include <aedis/src.hpp>
```
in no more than one source file in your applications. For example, to
compile one of the examples manually
```cpp
g++ -std=c++20 -pthread -I/opt/boost_1_79_0/include/ -I./aedis/include examples/intro.cpp
```
The requirements for using Aedis are
- Boost 1.79 or greater.
- C++17 minimum.
- Redis 6 or higher (must support RESP3).
- Optionally also redis-cli and Redis Sentinel.
The following compilers are supported
- Tested with gcc: 10, 11, 12.
- Tested with clang: 11, 13, 14.
## Acknowledgement
Acknowledgement to people that helped shape Aedis in one way or
another.
* Richard Hodges ([madmongo1](https://github.com/madmongo1)): For very helpful support with Asio, the design of asynchronous programs, etc.
* Vinícius dos Santos Oliveira ([vinipsmaker](https://github.com/vinipsmaker)): For useful discussion about how Aedis consumes buffers in the read operation.
* Petr Dannhofer ([Eddie-cz](https://github.com/Eddie-cz)): For helping me understand how the `AUTH` and `HELLO` command can influence each other.
* Mohammad Nejati ([ashtum](https://github.com/ashtum)): For pointing out scenarios where calls to `async_exec` should fail when the connection is lost.
* Klemens Morgenstern ([klemens-morgenstern](https://github.com/klemens-morgenstern)): For useful discussion about timeouts, cancellation, synchronous interfaces and general help with Asio.
## Changelog
### v1.1.0
### v1.2.0
* `aedis::adapt` supports now tuples created with `std::tie`.
`aedis::ignore` is now an alias to the type of `std::ignore`.
* Provides allocator support for the internal queue used in the
`aedis::connection` class.
* Changes the behaviour of `async_run` to complete with success if
asio::error::eof is received. This makes it easier to write
composed operations with awaitable operators.
* Adds allocator support in the `aedis::resp3::request` (a
contribution from Klemens Morgenstern).
* Renames `aedis::resp3::request::push_range2` to `push_range`. The
suffix 2 was used for disambiguation. Klemens fixed it with SFINAE.
* Renames `fail_on_connection_lost` to
`aedis::resp3::request::config::cancel_on_connection_lost`. Now, it will
only cause connections to be canceled when `async_run` completes.
* Introduces `aedis::resp3::request::config::cancel_if_not_connected` which will
cause a request to be canceled if `async_exec` is called before a
connection has been established.
* Introduces new request flag `aedis::resp3::request::config::retry` that if
set to true will cause the request to not be canceled when it was
sent to Redis but remained unresponded after `async_run` completed.
It provides a way to avoid executing commands twice.
* Removes the `aedis::connection::async_run` overload that takes
request and adapter as parameters.
* Changes the way `aedis::adapt()` behaves with
`std::vector<aedis::resp3::node<T>>`. Receiving RESP3 simple errors,
blob errors or null won't causes an error but will be treated as
normal response. It is the user responsibility to check the content
in the vector.
* Fixes a bug in `connection::cancel(operation::exec)`. Now this
call will only cancel non-written requests.
* Implements per-operation implicit cancellation support for
`aedis::connection::async_exec`. The following call will `co_await (conn.async_exec(...) || timer.async_wait(...))`
will cancel the request as long as it has not been written.
* Changes `aedis::connection::async_run` completion signature to
`f(error_code)`. This is how is was in the past, the second
parameter was not helpful.
* Renames `operation::receive_push` to `aedis::operation::receive`.
### v1.1.0...1
* Removes `coalesce_requests` from the `aedis::connection::config`, it
became a request property now, see `aedis::resp3::request::config::coalesce`.
@@ -828,44 +1013,24 @@ The code used in the benchmarks can be found at
* Fixes build in clang the compilers and makes some improvements in
the documentation.
### v0.2.1
* Fixes a bug that happens on very high load.
### v0.2.0
### v0.2.0...1
* Fixes a bug that happens on very high load. (v0.2.1)
* Major rewrite of the high-level API. There is no more need to use the low-level API anymore.
* No more callbacks: Sending requests follows the ASIO asynchronous model.
* Support for reconnection: Pending requests are not canceled when a connection is lost and are re-sent when a new one is established.
* The library is not sending HELLO-3 on user behalf anymore. This is important to support AUTH properly.
### v0.1.2
* Adds reconnect coroutine in the `echo_server` example.
* Corrects `client::async_wait_for_data` with `make_parallel_group` to launch operation.
* Improvements in the documentation.
* Avoids dynamic memory allocation in the client class after reconnection.
### v0.1.1
* Improves the documentation and adds some features to the high-level client.
### v0.1.0
### v0.1.0...2
* Adds reconnect coroutine in the `echo_server` example. (v0.1.2)
* Corrects `client::async_wait_for_data` with `make_parallel_group` to launch operation. (v0.1.2)
* Improvements in the documentation. (v0.1.2)
* Avoids dynamic memory allocation in the client class after reconnection. (v0.1.2)
* Improves the documentation and adds some features to the high-level client. (v.0.1.1)
* Improvements in the design and documentation.
### v0.0.1
* First release to collect design feedback.
## Acknowledgement
Acknowledgement to people that helped shape Aedis in one way or
another.
* Richard Hodges ([madmongo1](https://github.com/madmongo1)): For very helpful support with Asio, the design of asynchronous programs, etc.
* Vinícius dos Santos Oliveira ([vinipsmaker](https://github.com/vinipsmaker)): For useful discussion about how Aedis consumes buffers in the read operation.
* Petr Dannhofer ([Eddie-cz](https://github.com/Eddie-cz)): For helping me understand how the `AUTH` and `HELLO` command can influence each other.
* Mohammad Nejati ([ashtum](https://github.com/ashtum)): For pointing scenarios where calls to `async_exec` should fail when the connection is lost.
* Klemens Morgenstern ([klemens-morgenstern](https://github.com/klemens-morgenstern)): For useful discussion about timeouts, the synchronous interface and general help with Asio.

View File

@@ -24,7 +24,7 @@
nodes near coords align={horizontal},
]
\addplot coordinates {
(31.1,Asio)
(29.5,Asio)
(30.7,Tokio)
(35.6,Go)
(43.6,Libuv)

View File

@@ -6,6 +6,7 @@
#include <iostream>
#include <boost/asio.hpp>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
namespace net = boost::asio;
@@ -62,3 +63,6 @@ int main(int argc, char* argv[])
std::cerr << e.what() << std::endl;
}
}
#else // defined(BOOST_ASIO_HAS_CO_AWAIT)
auto main() -> int {std::cout << "Requires coroutine support." << std::endl; return 1;}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -9,7 +9,9 @@
//
#include <cstdio>
#include <iostream>
#include <boost/asio.hpp>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
namespace net = boost::asio;
namespace this_coro = net::this_coro;
@@ -56,3 +58,6 @@ int main()
std::printf("Exception: %s\n", e.what());
}
}
#else // defined(BOOST_ASIO_HAS_CO_AWAIT)
auto main() -> int {std::cout << "Requires coroutine support." << std::endl; return 1;}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -907,10 +907,7 @@ WARN_LOGFILE =
# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING
# Note: If this tag is empty the current directory is searched.
INPUT = include \
benchmarks/benchmarks.md \
CHANGELOG.md \
examples README.md
INPUT = include examples README.md
# This tag can be used to specify the character encoding of the source files
# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses

View File

@@ -10,6 +10,7 @@
#include <boost/asio.hpp>
#if defined(BOOST_ASIO_HAS_CO_AWAIT) && defined(BOOST_ASIO_HAS_POSIX_STREAM_DESCRIPTOR)
#include <boost/asio/experimental/awaitable_operators.hpp>
#include <aedis.hpp>
#include "print.hpp"
@@ -17,6 +18,7 @@
#include <aedis/src.hpp>
namespace net = boost::asio;
using namespace net::experimental::awaitable_operators;
using aedis::adapt;
using aedis::resp3::request;
using aedis::resp3::node;
@@ -35,41 +37,46 @@ using stimer = net::use_awaitable_t<>::as_default_on_t<net::steady_timer>;
// to monitor the message traffic.
// Receives messages from other users.
net::awaitable<void> push_receiver(std::shared_ptr<connection> db)
net::awaitable<void> push_receiver(std::shared_ptr<connection> conn)
{
for (std::vector<node<std::string>> resp;;) {
co_await db->async_receive_push(adapt(resp));
co_await conn->async_receive(adapt(resp));
print_push(resp);
resp.clear();
}
}
// Subscribes to the channels when a new connection is stablished.
net::awaitable<void> reconnect(std::shared_ptr<connection> db)
net::awaitable<void> reconnect(std::shared_ptr<connection> conn)
{
request req;
req.get_config().cancel_on_connection_lost = true;
req.push("SUBSCRIBE", "chat-channel");
stimer timer{co_await net::this_coro::executor};
endpoint ep{"127.0.0.1", "6379"};
for (;;) {
boost::system::error_code ec;
co_await db->async_run(ep, req, adapt(), {}, net::redirect_error(net::use_awaitable, ec));
db->reset_stream();
std::cout << ec.message() << std::endl;
boost::system::error_code ec1, ec2;
co_await (
conn->async_run(ep, {}, net::redirect_error(net::use_awaitable, ec1)) &&
conn->async_exec(req, adapt(), net::redirect_error(net::use_awaitable, ec2))
);
std::clog << "async_run: " << ec1.message() << "\n"
<< "async_exec: " << ec2.message() << std::endl;
conn->reset_stream();
timer.expires_after(std::chrono::seconds{1});
co_await timer.async_wait();
}
}
// Publishes messages to other users.
net::awaitable<void> publisher(stream_descriptor& in, std::shared_ptr<connection> db)
net::awaitable<void> publisher(stream_descriptor& in, std::shared_ptr<connection> conn)
{
for (std::string msg;;) {
auto n = co_await net::async_read_until(in, net::dynamic_buffer(msg, 1024), "\n");
request req;
req.push("PUBLISH", "chat-channel", msg);
co_await db->async_exec(req);
co_await conn->async_exec(req);
msg.erase(0, n);
}
}
@@ -80,10 +87,10 @@ auto main() -> int
net::io_context ioc{1};
stream_descriptor in{ioc, ::dup(STDIN_FILENO)};
auto db = std::make_shared<connection>(ioc);
co_spawn(ioc, publisher(in, db), net::detached);
co_spawn(ioc, push_receiver(db), net::detached);
co_spawn(ioc, reconnect(db), net::detached);
auto conn = std::make_shared<connection>(ioc);
co_spawn(ioc, publisher(in, conn), net::detached);
co_spawn(ioc, push_receiver(conn), net::detached);
co_spawn(ioc, reconnect(conn), net::detached);
net::signal_set signals(ioc, SIGINT, SIGTERM);
signals.async_wait([&](auto, auto){ ioc.stop(); });
@@ -95,5 +102,5 @@ auto main() -> int
}
#else // defined(BOOST_ASIO_HAS_CO_AWAIT) && defined(BOOST_ASIO_HAS_POSIX_STREAM_DESCRIPTOR)
auto main() -> int {std::cout << "Requires coroutine support." << std::endl; return 1;}
auto main() -> int {std::cout << "Requires coroutine support." << std::endl; return 0;}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT) && defined(BOOST_ASIO_HAS_POSIX_STREAM_DESCRIPTOR)

View File

@@ -7,6 +7,10 @@
#include <map>
#include <vector>
#include <iostream>
#include <boost/asio.hpp>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
#include <boost/asio/experimental/awaitable_operators.hpp>
#include <aedis.hpp>
#include "print.hpp"
@@ -14,50 +18,108 @@
#include <aedis/src.hpp>
namespace net = boost::asio;
using namespace net::experimental::awaitable_operators;
using aedis::adapt;
using aedis::resp3::request;
using aedis::endpoint;
using connection = aedis::connection<>;
using tcp_socket = net::use_awaitable_t<>::as_default_on_t<net::ip::tcp::socket>;
using connection = aedis::connection<tcp_socket>;
// To avoid verbosity.
auto redir(boost::system::error_code& ec)
{
return net::redirect_error(net::use_awaitable, ec);
}
// Sends some containers.
net::awaitable<void> send(endpoint ep)
{
auto ex = co_await net::this_coro::executor;
std::vector<int> vec
{1, 2, 3, 4, 5, 6};
std::map<std::string, std::string> map
{{"key1", "value1"}, {"key2", "value2"}, {"key3", "value3"}};
request req;
req.get_config().cancel_on_connection_lost = true;
req.push_range("RPUSH", "rpush-key", vec); // Sends
req.push_range("HSET", "hset-key", map); // Sends
req.push("QUIT");
connection conn{ex};
co_await (conn.async_run(ep) || conn.async_exec(req));
}
// Retrieves a Redis hash as an std::map.
net::awaitable<std::map<std::string, std::string>> retrieve_hashes(endpoint ep)
{
connection conn{co_await net::this_coro::executor};
request req;
req.get_config().cancel_on_connection_lost = true;
req.push("HGETALL", "hset-key");
req.push("QUIT");
std::map<std::string, std::string> ret;
auto resp = std::tie(ret, std::ignore);
co_await (conn.async_run(ep) || conn.async_exec(req, adapt(resp)));
co_return std::move(ret);
}
// Retrieves as a data structure.
net::awaitable<void> transaction(endpoint ep)
{
connection conn{co_await net::this_coro::executor};
request req;
req.get_config().cancel_on_connection_lost = true;
req.push("MULTI");
req.push("LRANGE", "rpush-key", 0, -1); // Retrieves
req.push("HGETALL", "hset-key"); // Retrieves
req.push("EXEC");
req.push("QUIT");
std::tuple<
aedis::ignore, // multi
aedis::ignore, // lrange
aedis::ignore, // hgetall
std::tuple<std::optional<std::vector<int>>, std::optional<std::map<std::string, std::string>>>, // exec
aedis::ignore // quit
> resp;
co_await (conn.async_run(ep) || conn.async_exec(req, adapt(resp)));
print(std::get<0>(std::get<3>(resp)).value());
print(std::get<1>(std::get<3>(resp)).value());
}
net::awaitable<void> async_main()
{
try {
endpoint ep{"127.0.0.1", "6379"};
co_await send(ep);
co_await transaction(ep);
auto const hashes = co_await retrieve_hashes(ep);
print(hashes);
} catch (std::exception const& e) {
std::cerr << e.what() << std::endl;
}
}
auto main() -> int
{
try {
std::vector<int> vec{1, 2, 3, 4, 5, 6};
std::map<std::string, int> map{{"key1", 10}, {"key2", 20}, {"key3", 30}};
// Sends and retrieves containers in the same request for
// simplification.
request req;
req.push_range("RPUSH", "rpush-key", vec); // Sends
req.push_range("HSET", "hset-key", map); // Sends
req.push("MULTI");
req.push("LRANGE", "rpush-key", 0, -1); // Retrieves
req.push("HGETALL", "hset-key"); // Retrieves
req.push("EXEC");
req.push("QUIT");
std::tuple<
aedis::ignore, // rpush
aedis::ignore, // hset
aedis::ignore, // multi
aedis::ignore, // lrange
aedis::ignore, // hgetall
std::tuple<std::optional<std::vector<int>>, std::optional<std::map<std::string, int>>>, // exec
aedis::ignore // quit
> resp;
net::io_context ioc;
connection db{ioc};
endpoint ep{"127.0.0.1", "6379"};
db.async_run(ep, req, adapt(resp), {}, [](auto ec, auto) {
std::cout << ec.message() << std::endl;
});
net::co_spawn(ioc, async_main(), net::detached);
ioc.run();
print(std::get<0>(std::get<5>(resp)).value());
print(std::get<1>(std::get<5>(resp)).value());
} catch (...) {
std::cerr << "Error." << std::endl;
}
}
#else // defined(BOOST_ASIO_HAS_CO_AWAIT)
auto main() -> int {std::cout << "Requires coroutine support." << std::endl; return 0;}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -26,17 +26,17 @@ using tcp_acceptor = net::use_awaitable_t<executor_type>::as_default_on_t<accept
using awaitable_type = net::awaitable<void, executor_type>;
using connection = aedis::connection<tcp_socket>;
awaitable_type echo_loop(tcp_socket socket, std::shared_ptr<connection> db)
awaitable_type echo_server_session(tcp_socket socket, std::shared_ptr<connection> db)
{
request req;
std::tuple<std::string> resp;
std::tuple<std::string> response;
for (std::string buffer;;) {
auto n = co_await net::async_read_until(socket, net::dynamic_buffer(buffer, 1024), "\n");
req.push("PING", buffer);
co_await db->async_exec(req, adapt(resp));
co_await net::async_write(socket, net::buffer(std::get<0>(resp)));
std::get<0>(resp).clear();
co_await db->async_exec(req, adapt(response));
co_await net::async_write(socket, net::buffer(std::get<0>(response)));
std::get<0>(response).clear();
req.clear();
buffer.erase(0, n);
}
@@ -47,22 +47,33 @@ awaitable_type listener(std::shared_ptr<connection> db)
auto ex = co_await net::this_coro::executor;
tcp_acceptor acc(ex, {net::ip::tcp::v4(), 55555});
for (;;)
net::co_spawn(ex, echo_loop(co_await acc.async_accept(), db), net::detached);
net::co_spawn(ex, echo_server_session(co_await acc.async_accept(), db), net::detached);
}
net::awaitable<void> reconnect(std::shared_ptr<connection> conn)
{
net::steady_timer timer{co_await net::this_coro::executor};
endpoint ep{"127.0.0.1", "6379"};
for (boost::system::error_code ec1;;) {
co_await conn->async_run(ep, {}, net::redirect_error(net::use_awaitable, ec1));
std::clog << "async_run: " << ec1.message() << std::endl;
conn->reset_stream();
timer.expires_after(std::chrono::seconds{1});
co_await timer.async_wait(net::use_awaitable);
}
}
auto main() -> int
{
try {
net::io_context ioc{BOOST_ASIO_CONCURRENCY_HINT_UNSAFE_IO};
net::io_context ioc{1};
auto db = std::make_shared<connection>(ioc);
endpoint ep{"127.0.0.1", "6379"};
db->async_run(ep, {}, [&](auto const& ec) {
std::clog << ec.message() << std::endl;
ioc.stop();
});
co_spawn(ioc, reconnect(db), net::detached);
net::signal_set signals(ioc, SIGINT, SIGTERM);
signals.async_wait([&](auto, auto){ ioc.stop(); });
signals.async_wait([&](auto, auto) {
ioc.stop();
});
co_spawn(ioc, listener(db), net::detached);
ioc.run();
@@ -72,5 +83,5 @@ auto main() -> int
}
#else // defined(BOOST_ASIO_HAS_CO_AWAIT)
auto main() -> int {std::cout << "Requires coroutine support." << std::endl; return 1;}
auto main() -> int {std::cout << "Requires coroutine support." << std::endl; return 0;}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -12,27 +12,27 @@
// Include this in no more than one .cpp file.
#include <aedis/src.hpp>
namespace net = boost::asio;
using aedis::adapt;
using aedis::resp3::request;
using aedis::endpoint;
using connection = aedis::connection<>;
auto const logger = [](auto ec, auto...)
{ std::cout << ec.message() << std::endl; };
auto main() -> int
{
try {
net::io_context ioc;
connection db{ioc};
boost::asio::io_context ioc;
connection conn{ioc};
request req;
req.get_config().cancel_on_connection_lost = true;
req.push("PING");
req.push("QUIT");
std::tuple<std::string, aedis::ignore> resp;
db.async_run({"127.0.0.1", "6379"}, req, adapt(resp), {}, [](auto ec, auto) {
std::cout << ec.message() << std::endl;
});
conn.async_exec(req, adapt(resp), logger);
conn.async_run({"127.0.0.1", "6379"}, {}, logger);
ioc.run();

View File

@@ -43,6 +43,7 @@ int main()
}};
request req;
req.get_config().cancel_on_connection_lost = true;
req.push("PING");
req.push("QUIT");

View File

@@ -17,10 +17,12 @@
namespace net = boost::asio;
using aedis::adapt;
using aedis::resp3::request;
using aedis::endpoint;
using connection = aedis::ssl::connection<net::ssl::stream<net::ip::tcp::socket>>;
bool verify_certificate(bool, net::ssl::verify_context&)
auto const logger = [](auto ec, auto...)
{ std::cout << ec.message() << std::endl; };
auto verify_certificate(bool, net::ssl::verify_context&) -> bool
{
std::cout << "set_verify_callback" << std::endl;
return true;
@@ -38,13 +40,13 @@ auto main() -> int
conn.next_layer().set_verify_callback(verify_certificate);
request req;
req.get_config().cancel_on_connection_lost = true;
req.push("PING");
req.push("QUIT");
std::tuple<std::string, aedis::ignore> resp;
conn.async_run({"127.0.0.1", "6379"}, req, adapt(resp), {}, [&](auto ec, auto) {
std::cout << ec.message() << std::endl;
});
conn.async_exec(req, adapt(resp), logger);
conn.async_run({"127.0.0.1", "6379"}, {}, logger);
ioc.run();

View File

@@ -46,7 +46,7 @@ void extract(object const& obj, T& t, boost::string_view key)
t = value_to<T>(obj.at(key));
}
user tag_invoke(value_to_tag<user>, value const& jv)
auto tag_invoke(value_to_tag<user>, value const& jv)
{
user u;
object const& obj = jv.as_object();
@@ -57,7 +57,7 @@ user tag_invoke(value_to_tag<user>, value const& jv)
}
// Serializes
void to_bulk(std::string& to, user const& u)
void to_bulk(std::pmr::string& to, user const& u)
{
aedis::resp3::to_bulk(to, serialize(value_from(u)));
}
@@ -69,7 +69,7 @@ void from_bulk(user& u, boost::string_view sv, boost::system::error_code&)
u = value_to<user>(jv);
}
std::ostream& operator<<(std::ostream& os, user const& u)
auto operator<<(std::ostream& os, user const& u) -> std::ostream&
{
os << "Name: " << u.name << "\n"
<< "Age: " << u.age << "\n"
@@ -78,34 +78,40 @@ std::ostream& operator<<(std::ostream& os, user const& u)
return os;
}
bool operator<(user const& a, user const& b)
auto operator<(user const& a, user const& b)
{
return std::tie(a.name, a.age, a.country) < std::tie(b.name, b.age, b.country);
}
int main()
auto const logger = [](auto ec, auto...)
{ std::cout << ec.message() << std::endl; };
auto main() -> int
{
net::io_context ioc;
connection db{ioc};
try {
net::io_context ioc;
connection conn{ioc};
std::set<user> users
{{"Joao", "58", "Brazil"} , {"Serge", "60", "France"}};
std::set<user> users
{{"Joao", "58", "Brazil"} , {"Serge", "60", "France"}};
request req;
req.push("HELLO", 3);
req.push_range("SADD", "sadd-key", users); // Sends
req.push("SMEMBERS", "sadd-key"); // Retrieves
req.push("QUIT");
request req;
req.get_config().cancel_on_connection_lost = true;
req.push("HELLO", 3);
req.push_range("SADD", "sadd-key", users); // Sends
req.push("SMEMBERS", "sadd-key"); // Retrieves
req.push("QUIT");
std::tuple<aedis::ignore, int, std::set<user>, std::string> resp;
std::tuple<aedis::ignore, int, std::set<user>, std::string> resp;
endpoint ep{"127.0.0.1", "6379"};
db.async_run(ep, req, adapt(resp), {}, [](auto ec, auto) {
std::cout << ec.message() << std::endl;
});
endpoint ep{"127.0.0.1", "6379"};
conn.async_exec(req, adapt(resp),logger);
conn.async_run(ep, {}, logger);
ioc.run();
ioc.run();
// Print
print(std::get<2>(resp));
// Print
print(std::get<2>(resp));
} catch (std::exception const& e) {
std::cerr << "Error: " << e.what() << std::endl;
}
}

View File

@@ -11,6 +11,7 @@
#include <boost/asio.hpp>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
#include <boost/asio/experimental/awaitable_operators.hpp>
#include <aedis.hpp>
#include "print.hpp"
@@ -18,6 +19,7 @@
#include <aedis/src.hpp>
namespace net = boost::asio;
using namespace net::experimental::awaitable_operators;
using aedis::adapt;
using aedis::resp3::request;
using aedis::resp3::node;
@@ -43,10 +45,10 @@ using connection = aedis::connection<tcp_socket>;
*/
// Receives pushes.
net::awaitable<void> push_receiver(std::shared_ptr<connection> db)
net::awaitable<void> push_receiver(std::shared_ptr<connection> conn)
{
for (std::vector<node<std::string>> resp;;) {
co_await db->async_receive_push(adapt(resp));
co_await conn->async_receive(adapt(resp));
print_push(resp);
resp.clear();
}
@@ -55,31 +57,37 @@ net::awaitable<void> push_receiver(std::shared_ptr<connection> db)
// See
// - https://redis.io/docs/manual/sentinel.
// - https://redis.io/docs/reference/sentinel-clients.
net::awaitable<void> reconnect(std::shared_ptr<connection> db)
net::awaitable<void> reconnect(std::shared_ptr<connection> conn)
{
request req;
req.get_config().cancel_if_not_connected = false;
req.get_config().cancel_on_connection_lost = true;
req.push("SUBSCRIBE", "channel");
stimer timer{co_await net::this_coro::executor};
endpoint ep{"127.0.0.1", "6379"};
for (;;) {
boost::system::error_code ec;
co_await db->async_run(ep, req, adapt(), {}, net::redirect_error(net::use_awaitable, ec));
db->reset_stream();
std::cout << ec.message() << std::endl;
boost::system::error_code ec1, ec2;
co_await (
conn->async_run(ep, {}, net::redirect_error(net::use_awaitable, ec1)) &&
conn->async_exec(req, adapt(), net::redirect_error(net::use_awaitable, ec2))
);
std::clog << "async_run: " << ec1.message() << "\n"
<< "async_exec: " << ec2.message() << std::endl;
conn->reset_stream();
timer.expires_after(std::chrono::seconds{1});
co_await timer.async_wait();
}
}
int main()
auto main() -> int
{
try {
net::io_context ioc;
auto db = std::make_shared<connection>(ioc);
auto conn = std::make_shared<connection>(ioc);
net::co_spawn(ioc, push_receiver(db), net::detached);
net::co_spawn(ioc, reconnect(db), net::detached);
net::co_spawn(ioc, push_receiver(conn), net::detached);
net::co_spawn(ioc, reconnect(conn), net::detached);
net::signal_set signals(ioc, SIGINT, SIGTERM);
signals.async_wait([&](auto, auto){ ioc.stop(); });
@@ -91,5 +99,5 @@ int main()
}
#else // defined(BOOST_ASIO_HAS_CO_AWAIT)
int main() {std::cout << "Requires coroutine support." << std::endl; return 1;}
auto main() -> int {std::cout << "Requires coroutine support." << std::endl; return 0;}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -11,6 +11,7 @@
#include <boost/asio.hpp>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
#include <boost/asio/experimental/awaitable_operators.hpp>
#include <aedis.hpp>
#include "print.hpp"
@@ -18,6 +19,7 @@
#include <aedis/src.hpp>
namespace net = boost::asio;
using namespace net::experimental::awaitable_operators;
using aedis::adapt;
using aedis::resp3::request;
using aedis::resp3::node;
@@ -26,15 +28,20 @@ using tcp_socket = net::use_awaitable_t<>::as_default_on_t<net::ip::tcp::socket>
using stimer = net::use_awaitable_t<>::as_default_on_t<net::steady_timer>;
using connection = aedis::connection<tcp_socket>;
auto is_valid(endpoint const& ep) noexcept -> bool
{
return !std::empty(ep.host) && !std::empty(ep.port);
}
// Connects to a Redis instance over sentinel and performs failover in
// case of disconnection, see
// https://redis.io/docs/reference/sentinel-clients. This example
// assumes a sentinel and a redis server running on localhost.
net::awaitable<void> receive_pushes(std::shared_ptr<connection> db)
net::awaitable<void> receive_pushes(std::shared_ptr<connection> conn)
{
for (std::vector<node<std::string>> resp;;) {
co_await db->async_receive_push(adapt(resp));
co_await conn->async_receive(adapt(resp));
print_push(resp);
resp.clear();
}
@@ -50,19 +57,25 @@ net::awaitable<endpoint> resolve()
, {"127.0.0.1", "26379"}
};
request req1;
req1.push("SENTINEL", "get-master-addr-by-name", "mymaster");
req1.push("QUIT");
request req;
req.get_config().cancel_on_connection_lost = true;
req.push("SENTINEL", "get-master-addr-by-name", "mymaster");
req.push("QUIT");
auto ex = co_await net::this_coro::executor;
connection conn{ex};
connection conn{co_await net::this_coro::executor};
std::tuple<std::optional<std::array<std::string, 2>>, aedis::ignore> addr;
for (auto ep : endpoints) {
boost::system::error_code ec;
co_await conn.async_run(ep, req1, adapt(addr), {}, net::redirect_error(net::use_awaitable, ec));
boost::system::error_code ec1, ec2;
co_await (
conn.async_run(ep, {}, net::redirect_error(net::use_awaitable, ec1)) &&
conn.async_exec(req, adapt(addr), net::redirect_error(net::use_awaitable, ec2))
);
std::clog << "async_run: " << ec1.message() << "\n"
<< "async_exec: " << ec2.message() << std::endl;
conn.reset_stream();
std::cout << ec.message() << std::endl;
if (std::get<0>(addr))
break;
}
@@ -76,24 +89,31 @@ net::awaitable<endpoint> resolve()
co_return ep;
}
net::awaitable<void> reconnect(std::shared_ptr<connection> db)
net::awaitable<void> reconnect(std::shared_ptr<connection> conn)
{
request req2;
req2.push("SUBSCRIBE", "channel");
request req;
req.get_config().cancel_on_connection_lost = true;
req.push("SUBSCRIBE", "channel");
auto ex = co_await net::this_coro::executor;
stimer timer{ex};
for (;;) {
auto ep = co_await net::co_spawn(ex, resolve(), net::use_awaitable);
if (!aedis::is_valid(ep)) {
if (!is_valid(ep)) {
std::clog << "Can't resolve master name" << std::endl;
co_return;
}
boost::system::error_code ec;
co_await db->async_run(ep, req2, adapt(), {}, net::redirect_error(net::use_awaitable, ec));
std::cout << ec.message() << std::endl;
std::cout << "Starting the failover." << std::endl;
boost::system::error_code ec1, ec2;
co_await (
conn->async_run(ep, {}, net::redirect_error(net::use_awaitable, ec1)) &&
conn->async_exec(req, adapt(), net::redirect_error(net::use_awaitable, ec2))
);
std::clog << "async_run: " << ec1.message() << "\n"
<< "async_exec: " << ec2.message() << "\n"
<< "Starting the failover." << std::endl;
timer.expires_after(std::chrono::seconds{1});
co_await timer.async_wait();
}
@@ -103,9 +123,9 @@ int main()
{
try {
net::io_context ioc;
auto db = std::make_shared<connection>(ioc);
net::co_spawn(ioc, receive_pushes(db), net::detached);
net::co_spawn(ioc, reconnect(db), net::detached);
auto conn = std::make_shared<connection>(ioc);
net::co_spawn(ioc, receive_pushes(conn), net::detached);
net::co_spawn(ioc, reconnect(conn), net::detached);
net::signal_set signals(ioc, SIGINT, SIGTERM);
signals.async_wait([&](auto, auto){ ioc.stop(); });
ioc.run();
@@ -115,5 +135,5 @@ int main()
}
#else // defined(BOOST_ASIO_HAS_CO_AWAIT)
int main() {std::cout << "Requires coroutine support." << std::endl; return 1;}
int main() {std::cout << "Requires coroutine support." << std::endl; return 0;}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -204,8 +204,13 @@ inline auto adapt(std::size_t max_read_size = (std::numeric_limits<std::size_t>:
/** @brief Adapts a type to be used as a response.
* @ingroup high-level-api
*
* The type T can be any STL container, any integer type and
* \c std::string
* The type T must be either
*
* 1. a std::tuple<T1, T2, T3, ...> or
* 2. std::vector<node<String>>
*
* The types T1, T2, etc can be any STL container, any integer type
* and \c std::string
*
* @param t Tuple containing the responses.
* @param max_read_size Specifies the maximum size of the read

View File

@@ -49,11 +49,10 @@ auto parse_double(
// Serialization.
template <class T>
typename std::enable_if<std::is_integral<T>::value, void>::type
from_bulk(
auto from_bulk(
T& i,
boost::string_view sv,
boost::system::error_code& ec)
boost::system::error_code& ec) -> typename std::enable_if<std::is_integral<T>::value, void>::type
{
i = resp3::detail::parse_uint(sv.data(), sv.size(), ec);
}
@@ -106,10 +105,9 @@ private:
public:
explicit general_aggregate(Result* c = nullptr): result_(c) {}
void operator()(resp3::node<boost::string_view> const& n, boost::system::error_code& ec)
void operator()(resp3::node<boost::string_view> const& n, boost::system::error_code&)
{
result_->push_back({n.data_type, n.aggregate_size, n.depth, std::string{std::cbegin(n.value), std::cend(n.value)}});
set_on_resp3_error(n.data_type, ec);
}
};

View File

@@ -20,7 +20,7 @@
namespace aedis::adapter::detail {
struct ignore {};
using ignore = std::decay_t<decltype(std::ignore)>;
/* Traits class for response objects.
*
@@ -29,18 +29,15 @@ struct ignore {};
*/
template <class ResponseType>
struct response_traits {
using adapter_type = adapter::detail::wrapper<ResponseType>;
using adapter_type = adapter::detail::wrapper<typename std::decay<ResponseType>::type>;
static auto adapt(ResponseType& r) noexcept { return adapter_type{&r}; }
};
template <class T>
using adapter_t = typename response_traits<T>::adapter_type;
template <>
struct response_traits<ignore> {
using response_type = ignore;
using adapter_type = resp3::detail::ignore_response;
static auto adapt(response_type&) noexcept { return adapter_type{}; }
static auto adapt(response_type) noexcept { return adapter_type{}; }
};
template <class T>
@@ -64,10 +61,13 @@ struct response_traits<void> {
static auto adapt() noexcept { return adapter_type{}; }
};
template <class T>
using adapter_t = typename response_traits<std::decay_t<T>>::adapter_type;
// Duplicated here to avoid circular include dependency.
template<class T>
auto internal_adapt(T& t) noexcept
{ return response_traits<T>::adapt(t); }
{ return response_traits<std::decay_t<T>>::adapt(t); }
template <std::size_t N>
struct assigner {

View File

@@ -22,12 +22,8 @@ namespace aedis {
* commands can be sent at any time. For more details, please see the
* documentation of each individual function.
*
* @remarks This class exposes only asynchronous member functions,
* synchronous communications with the Redis server is provided by
* the `aedis::sync` class.
*
* @tparam Derived class.
*
* @tparam AsyncReadWriteStream A stream that supports reading and
* writing.
*/
template <class AsyncReadWriteStream = boost::asio::ip::tcp::socket>
class connection :
@@ -51,21 +47,27 @@ public:
/// Timeout of the connect operation.
std::chrono::steady_clock::duration connect_timeout = std::chrono::seconds{10};
/// Timeout of the resp3 handshake operation.
/// Timeout of the resp3-handshake operation.
std::chrono::steady_clock::duration resp3_handshake_timeout = std::chrono::seconds{2};
/// Time interval of ping operations.
/// Time interval with which PING commands are sent to Redis.
std::chrono::steady_clock::duration ping_interval = std::chrono::seconds{1};
};
/// Constructor
explicit connection(executor_type ex)
: base_type{ex}
explicit
connection(
executor_type ex,
std::pmr::memory_resource* resource = std::pmr::get_default_resource())
: base_type{ex, resource}
, stream_{ex}
{}
explicit connection(boost::asio::io_context& ioc)
: connection(ioc.get_executor())
explicit
connection(
boost::asio::io_context& ioc,
std::pmr::memory_resource* resource = std::pmr::get_default_resource())
: connection(ioc.get_executor(), resource)
{ }
/// Returns the associated executor.
@@ -87,7 +89,7 @@ public:
/// Returns a const reference to the next layer.
auto next_layer() const noexcept -> auto const& { return stream_; }
/** @brief Starts communication with the Redis server asynchronously.
/** @brief Establishes a connection with the Redis server asynchronously.
*
* This function performs the following steps
*
@@ -108,8 +110,8 @@ public:
* `endpoint::password`.
*
* @li Checks whether the server role corresponds to the one
* specifed in the `endpoint`. If `endpoint::role` is left empty,
* no check is performed. If the role role is different than the
* specified in the `endpoint`. If `endpoint::role` is left empty,
* no check is performed. If the role is different than the
* expected `async_run` will complete with
* `error::unexpected_server_role`.
*
@@ -134,6 +136,10 @@ public:
* @code
* void f(boost::system::error_code);
* @endcode
*
* This function will complete when the connection is lost as
* follows. If the error is boost::asio::error::eof this function
* will complete without error.
*/
template <class CompletionToken = boost::asio::default_completion_token_t<executor_type>>
auto
@@ -145,39 +151,6 @@ public:
return base_type::async_run(ep, ts, std::move(token));
}
/** @brief Connects and executes a request asynchronously.
*
* Combines the other `async_run` overload with `async_exec` in a
* single function. This function is useful for users that want to
* send a single request to the server and close it.
*
* @param ep Redis endpoint.
* @param req Request object.
* @param adapter Response adapter.
* @param ts Timeouts used by the operation.
* @param token Asio completion token.
*
* The completion token must have the following signature
*
* @code
* void f(boost::system::error_code, std::size_t);
* @endcode
*
* Where the second parameter is the size of the response in bytes.
*/
template <
class Adapter = detail::response_traits<void>::adapter_type,
class CompletionToken = boost::asio::default_completion_token_t<executor_type>>
auto async_run(
endpoint ep,
resp3::request const& req,
Adapter adapter,
timeouts ts,
CompletionToken token = CompletionToken{})
{
return base_type::async_run(ep, req, adapter, ts, std::move(token));
}
/** @brief Executes a command on the Redis server asynchronously.
*
* This function will send a request to the Redis server and
@@ -234,18 +207,18 @@ public:
template <
class Adapter = detail::response_traits<void>::adapter_type,
class CompletionToken = boost::asio::default_completion_token_t<executor_type>>
auto async_receive_push(
auto async_receive(
Adapter adapter = adapt(),
CompletionToken token = CompletionToken{})
{
return base_type::async_receive_push(adapter, std::move(token));
return base_type::async_receive(adapter, std::move(token));
}
/** @brief Cancel operations.
*
* @li `operation::exec`: Cancels operations started with
* `async_exec`. Has precedence over
* `request::config::close_on_connection_lost`
* `async_exec`. Affects only requests that haven't been written
* yet.
* @li operation::run: Cancels the `async_run` operation. Notice
* that the preferred way to close a connection is to send a
* [QUIT](https://redis.io/commands/quit/) command to the server.
@@ -253,8 +226,8 @@ public:
* timeout and lead to `connection::async_run` completing with
* `error::idle_timeout`. Calling `cancel(operation::run)`
* directly should be seen as the last option.
* @li operation::receive_push: Cancels any ongoing callto
* `async_receive_push`.
* @li operation::receive: Cancels any ongoing callto
* `async_receive`.
*
* @param op: The operation to be cancelled.
* @returns The number of operations that have been canceled.
@@ -268,7 +241,7 @@ private:
template <class, class> friend class detail::connection_base;
template <class, class> friend struct detail::exec_read_op;
template <class, class> friend struct detail::exec_op;
template <class, class> friend struct detail::receive_push_op;
template <class, class> friend struct detail::receive_op;
template <class> friend struct detail::check_idle_op;
template <class> friend struct detail::reader_op;
template <class> friend struct detail::writer_op;
@@ -293,7 +266,7 @@ private:
void close() { stream_.close(); }
auto is_open() const noexcept { return stream_.is_open(); }
auto& lowest_layer() noexcept { return stream_.lowest_layer(); }
auto lowest_layer() noexcept -> auto& { return stream_.lowest_layer(); }
AsyncReadWriteStream stream_;
};

View File

@@ -13,6 +13,7 @@
#include <chrono>
#include <memory>
#include <type_traits>
#include <memory_resource>
#include <boost/assert.hpp>
#include <boost/asio/ip/tcp.hpp>
@@ -43,16 +44,19 @@ public:
using executor_type = Executor;
using this_type = connection_base<Executor, Derived>;
explicit connection_base(executor_type ex)
explicit
connection_base(executor_type ex, std::pmr::memory_resource* resource)
: resv_{ex}
, ping_timer_{ex}
, check_idle_timer_{ex}
, writer_timer_{ex}
, read_timer_{ex}
, push_channel_{ex}
, reqs_{resource}
, last_data_{std::chrono::time_point<std::chrono::steady_clock>::min()}
, req_{{true}}
{
req_.get_config().cancel_if_not_connected = true;
req_.get_config().cancel_on_connection_lost = true;
writer_timer_.expires_at(std::chrono::steady_clock::time_point::max());
read_timer_.expires_at(std::chrono::steady_clock::time_point::max());
}
@@ -64,14 +68,7 @@ public:
switch (op) {
case operation::exec:
{
for (auto& e: reqs_) {
e->stop = true;
e->timer.cancel_one();
}
auto const ret = reqs_.size();
reqs_ = {};
return ret;
return cancel_unwritten_requests();
}
case operation::run:
{
@@ -82,21 +79,11 @@ public:
check_idle_timer_.cancel();
writer_timer_.cancel();
ping_timer_.cancel();
cancel_on_conn_lost();
auto point = std::stable_partition(std::begin(reqs_), std::end(reqs_), [](auto const& ptr) {
return !ptr->req->get_config().close_on_connection_lost;
});
// Cancel own pings if there are any waiting.
std::for_each(point, std::end(reqs_), [](auto const& ptr) {
ptr->stop = true;
ptr->timer.cancel();
});
reqs_.erase(point, std::end(reqs_));
return 1U;
}
case operation::receive_push:
case operation::receive:
{
push_channel_.cancel();
return 1U;
@@ -105,6 +92,53 @@ public:
}
}
auto cancel_unwritten_requests() -> std::size_t
{
auto f = [](auto const& ptr)
{
BOOST_ASSERT(ptr != nullptr);
return ptr->is_written();
};
auto point = std::stable_partition(std::begin(reqs_), std::end(reqs_), f);
auto const ret = std::distance(point, std::end(reqs_));
std::for_each(point, std::end(reqs_), [](auto const& ptr) {
ptr->stop();
});
reqs_.erase(point, std::end(reqs_));
return ret;
}
auto cancel_on_conn_lost() -> std::size_t
{
auto cond = [](auto const& ptr)
{
BOOST_ASSERT(ptr != nullptr);
if (ptr->get_request().get_config().cancel_on_connection_lost)
return false;
return !(!ptr->get_request().get_config().retry && ptr->is_written());
};
auto point = std::stable_partition(std::begin(reqs_), std::end(reqs_), cond);
auto const ret = std::distance(point, std::end(reqs_));
std::for_each(point, std::end(reqs_), [](auto const& ptr) {
ptr->stop();
});
reqs_.erase(point, std::end(reqs_));
std::for_each(std::begin(reqs_), std::end(reqs_), [](auto const& ptr) {
return ptr->reset_status();
});
return ret;
}
template <
class Adapter = detail::response_traits<void>::adapter_type,
class CompletionToken = boost::asio::default_completion_token_t<executor_type>>
@@ -124,7 +158,7 @@ public:
template <
class Adapter = detail::response_traits<void>::adapter_type,
class CompletionToken = boost::asio::default_completion_token_t<executor_type>>
auto async_receive_push(
auto async_receive(
Adapter adapter = adapt(),
CompletionToken token = CompletionToken{})
{
@@ -132,7 +166,7 @@ public:
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code, std::size_t)
>(detail::receive_push_op<Derived, decltype(f)>{&derived(), f}, token, resv_);
>(detail::receive_op<Derived, decltype(f)>{&derived(), f}, token, resv_);
}
template <class Timeouts, class CompletionToken>
@@ -146,21 +180,6 @@ public:
>(detail::run_op<Derived, Timeouts>{&derived(), ts}, token, resv_);
}
template <class Adapter, class Timeouts, class CompletionToken>
auto async_run(
endpoint ep,
resp3::request const& req,
Adapter adapter,
Timeouts ts,
CompletionToken token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code, std::size_t)
>(detail::runexec_op<Derived, Adapter, Timeouts>
{&derived(), ep, &req, adapter, ts}, token, resv_);
}
private:
using clock_type = std::chrono::steady_clock;
using clock_traits_type = boost::asio::wait_traits<clock_type>;
@@ -171,25 +190,110 @@ private:
auto derived() -> Derived& { return static_cast<Derived&>(*this); }
void on_write()
{
// We have to clear the payload right after writing it to use it
// as a flag that informs there is no ongoing write.
write_buffer_.clear();
// Notice this must come before the for-each below.
cancel_push_requests();
std::for_each(std::begin(reqs_), std::end(reqs_), [](auto const& ptr) {
if (ptr->is_staged())
ptr->mark_written();
});
}
struct req_info {
explicit req_info(executor_type ex) : timer{ex} {}
timer_type timer;
resp3::request const* req = nullptr;
std::size_t cmds = 0;
bool stop = false;
bool written = false;
public:
enum class action
{
stop,
proceed,
none,
};
explicit req_info(resp3::request const& req, executor_type ex)
: timer_{ex}
, action_{action::none}
, req_{&req}
, cmds_{std::size(req)}
, status_{status::none}
{
timer_.expires_at(std::chrono::steady_clock::time_point::max());
}
auto proceed()
{
timer_.cancel();
action_ = action::proceed;
}
void stop()
{
timer_.cancel();
action_ = action::stop;
}
[[nodiscard]] auto is_written() const noexcept
{ return status_ == status::written; }
[[nodiscard]] auto is_staged() const noexcept
{ return status_ == status::staged; }
void mark_written() noexcept
{ status_ = status::written; }
void mark_staged() noexcept
{ status_ = status::staged; }
void reset_status() noexcept
{ status_ = status::none; }
[[nodiscard]] auto get_number_of_commands() const noexcept
{ return cmds_; }
[[nodiscard]] auto get_request() const noexcept -> auto const&
{ return *req_; }
[[nodiscard]] auto get_action() const noexcept
{ return action_;}
template <class CompletionToken>
auto async_wait(CompletionToken token)
{
return timer_.async_wait(std::move(token));
}
private:
enum class status
{ none
, staged
, written
};
timer_type timer_;
action action_;
resp3::request const* req_;
std::size_t cmds_;
status status_;
};
using reqs_type = std::deque<std::shared_ptr<req_info>>;
void remove_request(std::shared_ptr<req_info> const& info)
{
reqs_.erase(std::remove(std::begin(reqs_), std::end(reqs_), info));
}
template <class, class> friend struct detail::receive_push_op;
using reqs_type = std::pmr::deque<std::shared_ptr<req_info>>;
template <class, class> friend struct detail::receive_op;
template <class> friend struct detail::reader_op;
template <class> friend struct detail::writer_op;
template <class> friend struct detail::ping_op;
template <class, class> friend struct detail::run_op;
template <class, class> friend struct detail::exec_op;
template <class, class> friend struct detail::exec_read_op;
template <class, class, class> friend struct detail::runexec_op;
template <class> friend struct detail::resolve_with_timeout_op;
template <class> friend struct detail::check_idle_op;
template <class, class> friend struct detail::start_op;
@@ -198,11 +302,11 @@ private:
void cancel_push_requests()
{
auto point = std::stable_partition(std::begin(reqs_), std::end(reqs_), [](auto const& ptr) {
return !(ptr->written && ptr->req->size() == 0);
return !(ptr->is_staged() && ptr->get_request().size() == 0);
});
std::for_each(point, std::end(reqs_), [](auto const& ptr) {
ptr->timer.cancel();
ptr->proceed();
});
reqs_.erase(point, std::end(reqs_));
@@ -295,21 +399,23 @@ private:
void stage_request(req_info& ri)
{
write_buffer_ += ri.req->payload();
cmds_ += ri.req->size();
ri.written = true;
write_buffer_ += ri.get_request().payload();
cmds_ += ri.get_request().size();
ri.mark_staged();
}
void coalesce_requests()
{
// Coalesce the requests and marks them staged. After a
// successful write staged requests will be marked as written.
BOOST_ASSERT(write_buffer_.empty());
BOOST_ASSERT(!reqs_.empty());
stage_request(*reqs_.at(0));
for (std::size_t i = 1; i < std::size(reqs_); ++i) {
if (!reqs_.at(i - 1)->req->get_config().coalesce ||
!reqs_.at(i - 0)->req->get_config().coalesce) {
if (!reqs_.at(i - 1)->get_request().get_config().coalesce ||
!reqs_.at(i - 0)->get_request().get_config().coalesce) {
break;
}
stage_request(*reqs_.at(i));

View File

@@ -46,10 +46,9 @@ struct connect_with_timeout_op {
reenter (coro)
{
timer->expires_after(ts.connect_timeout);
yield
detail::async_connect(
conn->next_layer(), *timer, *endpoints, std::move(self));
self.complete(ec);
yield detail::async_connect(conn->next_layer(), *timer, *endpoints, std::move(self));
AEDIS_CHECK_OP0();
self.complete({});
}
}
};
@@ -57,7 +56,7 @@ struct connect_with_timeout_op {
template <class Conn>
struct resolve_with_timeout_op {
Conn* conn = nullptr;
std::chrono::steady_clock::duration resolve_timeout;
std::chrono::steady_clock::duration resolve_timeout{};
boost::asio::coroutine coro{};
template <class Self>
@@ -72,14 +71,15 @@ struct resolve_with_timeout_op {
aedis::detail::async_resolve(
conn->resv_, conn->ping_timer_,
conn->ep_.host, conn->ep_.port, std::move(self));
AEDIS_CHECK_OP0();
conn->endpoints_ = res;
self.complete(ec);
self.complete({});
}
}
};
template <class Conn, class Adapter>
struct receive_push_op {
struct receive_op {
Conn* conn = nullptr;
Adapter adapter;
std::size_t read_size = 0;
@@ -93,34 +93,26 @@ struct receive_push_op {
{
reenter (coro)
{
yield
conn->push_channel_.async_receive(std::move(self));
if (ec) {
self.complete(ec, 0);
return;
}
yield conn->push_channel_.async_receive(std::move(self));
AEDIS_CHECK_OP1();
yield
resp3::async_read(
conn->next_layer(),
conn->make_dynamic_buffer(adapter.get_max_read_size(0)),
adapter, std::move(self));
if (ec) {
conn->cancel(operation::run);
// Needed to cancel the channel, otherwise the read
// operation will be blocked forever see
// test_push_adapter.
conn->cancel(operation::receive_push);
self.complete(ec, 0);
return;
}
// cancel(receive) is needed to cancel the channel, otherwise
// the read operation will be blocked forever see
// test_push_adapter.
AEDIS_CHECK_OP1(conn->cancel(operation::run); conn->cancel(operation::receive));
read_size = n;
yield
conn->push_channel_.async_send({}, 0, std::move(self));
self.complete(ec, read_size);
yield conn->push_channel_.async_send({}, 0, std::move(self));
AEDIS_CHECK_OP1();
self.complete({}, read_size);
return;
}
}
@@ -158,25 +150,15 @@ struct exec_read_op {
conn->next_layer(),
conn->make_dynamic_buffer(),
"\r\n", std::move(self));
if (ec) {
conn->cancel(operation::run);
self.complete(ec, 0);
return;
}
AEDIS_CHECK_OP1(conn->cancel(operation::run));
}
// If the next request is a push we have to handle it to
// the receive_push_op wait for it to be done and continue.
// the receive_op wait for it to be done and continue.
if (resp3::to_type(conn->read_buffer_.front()) == resp3::type::push) {
yield
async_send_receive(conn->push_channel_, std::move(self));
if (ec) {
// Notice we don't call cancel_run() as that is the
// responsability of the receive_push_op.
self.complete(ec, 0);
return;
}
AEDIS_CHECK_OP1(conn->cancel(operation::run));
continue;
}
//-----------------------------------
@@ -190,11 +172,7 @@ struct exec_read_op {
++index;
if (ec) {
conn->cancel(operation::run);
self.complete(ec, 0);
return;
}
AEDIS_CHECK_OP1(conn->cancel(operation::run));
read_size += n;
@@ -229,50 +207,48 @@ struct exec_op {
{
reenter (coro)
{
if (req->get_config().close_on_connection_lost && !conn->is_open()) {
// The user doesn't want to wait for the connection to be
// stablished.
self.complete(error::not_connected, 0);
return;
}
// Check whether the user wants to wait for the connection to
// be stablished.
// TODO: is_open below reflects only whether a TCP connection
// has been stablished. We need a variable that informs
// whether HELLO was successfull and we are connected with
// Redis.
if (req->get_config().cancel_if_not_connected && !conn->is_open())
return self.complete(error::not_connected, 0);
info = std::allocate_shared<req_info_type>(boost::asio::get_associated_allocator(self), conn->resv_.get_executor());
info->timer.expires_at(std::chrono::steady_clock::time_point::max());
info->req = req;
info->cmds = req->size();
info->stop = false;
info = std::allocate_shared<req_info_type>(boost::asio::get_associated_allocator(self), *req, conn->resv_.get_executor());
conn->add_request_info(info);
yield
info->timer.async_wait(std::move(self));
BOOST_ASSERT(!!ec);
if (ec != boost::asio::error::operation_aborted) {
self.complete(ec, 0);
return;
EXEC_OP_WAIT:
yield info->async_wait(std::move(self));
BOOST_ASSERT(ec == boost::asio::error::operation_aborted);
if (info->get_action() == Conn::req_info::action::stop) {
return self.complete(ec, 0);
}
// null can happen for example when resolve fails.
if (!conn->is_open() || info->stop) {
self.complete(ec, 0);
return;
if (is_cancelled(self)) {
if (info->is_written()) {
self.get_cancellation_state().clear();
goto EXEC_OP_WAIT; // Too late, can't cancel.
} else {
conn->remove_request(info);
self.complete(ec, 0);
return;
}
}
BOOST_ASSERT(conn->is_open());
if (req->size() == 0) {
self.complete({}, 0);
return;
}
if (req->size() == 0)
return self.complete({}, 0);
BOOST_ASSERT(!conn->reqs_.empty());
BOOST_ASSERT(conn->reqs_.front() != nullptr);
BOOST_ASSERT(conn->cmds_ != 0);
yield
conn->async_exec_read(adapter, conn->reqs_.front()->cmds, std::move(self));
if (ec) {
self.complete(ec, 0);
return;
}
conn->async_exec_read(adapter, conn->reqs_.front()->get_number_of_commands(), std::move(self));
AEDIS_CHECK_OP1();
read_size = n;
@@ -285,7 +261,7 @@ struct exec_op {
conn->writer_timer_.cancel_one();
} else {
BOOST_ASSERT(!conn->reqs_.empty());
conn->reqs_.front()->timer.cancel_one();
conn->reqs_.front()->proceed();
}
self.complete({}, read_size);
@@ -295,8 +271,8 @@ struct exec_op {
template <class Conn>
struct ping_op {
Conn* conn;
std::chrono::steady_clock::duration ping_interval;
Conn* conn{};
std::chrono::steady_clock::duration ping_interval{};
boost::asio::coroutine coro{};
template <class Self>
@@ -308,22 +284,22 @@ struct ping_op {
reenter (coro) for (;;)
{
conn->ping_timer_.expires_after(ping_interval);
yield
conn->ping_timer_.async_wait(std::move(self));
if (ec || !conn->is_open()) {
conn->cancel(operation::run);
self.complete(ec);
yield conn->ping_timer_.async_wait(std::move(self));
if (!conn->is_open() || ec || is_cancelled(self)) {
// Checking for is_open is necessary becuse the timer can
// complete with success although cancel has been called.
self.complete({});
return;
}
conn->req_.clear();
conn->req_.push("PING");
yield
conn->async_exec(conn->req_, adapt(), std::move(self));
if (ec) {
conn->cancel(operation::run);
self.complete({});
return;
yield conn->async_exec(conn->req_, adapt(), std::move(self));
if (!conn->is_open() || is_cancelled(self)) {
// Checking for is_open is necessary to avoid
// looping back on the timer although cancel has been
// called.
return self.complete({});
}
}
}
@@ -331,8 +307,8 @@ struct ping_op {
template <class Conn>
struct check_idle_op {
Conn* conn;
std::chrono::steady_clock::duration ping_interval;
Conn* conn{};
std::chrono::steady_clock::duration ping_interval{};
boost::asio::coroutine coro{};
template <class Self>
@@ -341,18 +317,11 @@ struct check_idle_op {
reenter (coro) for (;;)
{
conn->check_idle_timer_.expires_after(2 * ping_interval);
yield
conn->check_idle_timer_.async_wait(std::move(self));
if (ec) {
conn->cancel(operation::run);
self.complete({});
return;
}
if (!conn->is_open()) {
// Notice this is not an error, it was requested from an
// external op.
self.complete({});
return;
yield conn->check_idle_timer_.async_wait(std::move(self));
if (!conn->is_open() || ec || is_cancelled(self)) {
// Checking for is_open is necessary becuse the timer can
// complete with success although cancel has been called.
return self.complete({});
}
auto const now = std::chrono::steady_clock::now();
@@ -393,6 +362,11 @@ struct start_op {
boost::asio::experimental::wait_for_one(),
std::move(self));
if (is_cancelled(self)) {
self.complete(boost::asio::error::operation_aborted);
return;
}
switch (order[0]) {
case 0: self.complete(ec0); break;
case 1: self.complete(ec1); break;
@@ -404,6 +378,15 @@ struct start_op {
}
};
inline
auto check_resp3_handshake_failed(std::vector<resp3::node<std::string>> const& resp) -> bool
{
return std::size(resp) == 1 &&
(resp.front().data_type == resp3::type::simple_error ||
resp.front().data_type == resp3::type::blob_error ||
resp.front().data_type == resp3::type::null);
}
template <class Conn, class Timeouts>
struct run_op {
Conn* conn = nullptr;
@@ -418,24 +401,15 @@ struct run_op {
{
reenter (coro)
{
yield
conn->async_resolve_with_timeout(ts.resolve_timeout, std::move(self));
if (ec) {
conn->cancel(operation::run);
self.complete(ec);
return;
}
yield conn->async_resolve_with_timeout(ts.resolve_timeout, std::move(self));
AEDIS_CHECK_OP0(conn->cancel(operation::run));
yield
conn->derived().async_connect(conn->endpoints_, ts, conn->ping_timer_, std::move(self));
if (ec) {
conn->cancel(operation::run);
self.complete(ec);
return;
}
yield conn->derived().async_connect(conn->endpoints_, ts, conn->ping_timer_, std::move(self));
AEDIS_CHECK_OP0(conn->cancel(operation::run));
conn->prepare_hello(conn->ep_);
conn->ping_timer_.expires_after(ts.resp3_handshake_timeout);
conn->response_.clear();
yield
resp3::detail::async_exec(
@@ -447,9 +421,11 @@ struct run_op {
std::move(self)
);
if (ec) {
AEDIS_CHECK_OP0(conn->cancel(operation::run));
if (check_resp3_handshake_failed(conn->response_)) {
conn->cancel(operation::run);
self.complete(ec);
self.complete(error::resp3_handshake_error);
return;
}
@@ -463,12 +439,10 @@ struct run_op {
conn->write_buffer_.clear();
conn->cmds_ = 0;
std::for_each(std::begin(conn->reqs_), std::end(conn->reqs_), [](auto const& ptr) {
return ptr->written = false;
});
yield conn->async_start(ts, std::move(self));
self.complete(ec);
AEDIS_CHECK_OP0();
self.complete({});
}
}
};
@@ -491,33 +465,21 @@ struct writer_op {
conn->coalesce_requests();
yield
boost::asio::async_write(conn->next_layer(), boost::asio::buffer(conn->write_buffer_), std::move(self));
if (ec) {
self.complete(ec);
AEDIS_CHECK_OP0(conn->cancel(operation::run));
conn->on_write();
// A socket.close() may have been called while a
// successful write might had already been queued, so we
// have to check here before proceeding.
if (!conn->is_open()) {
self.complete({});
return;
}
// We have to clear the payload right after the read op in
// order to to use it as a flag that informs there is no
// ongoing write.
conn->write_buffer_.clear();
conn->cancel_push_requests();
}
if (conn->is_open()) {
yield
conn->writer_timer_.async_wait(std::move(self));
if (ec != boost::asio::error::operation_aborted) {
conn->cancel(operation::run);
self.complete(ec);
return;
}
// The timer may be canceled either to stop the write op
// or to proceed to the next write, the difference between
// the two is that for the former the socket will be
// closed first. We check for that below.
}
if (!conn->is_open()) {
yield conn->writer_timer_.async_wait(std::move(self));
if (!conn->is_open() || is_cancelled(self)) {
// Notice this is not an error of the op, stoping was
// requested from the outside, so we complete with
// success.
@@ -547,12 +509,14 @@ struct reader_op {
conn->next_layer(),
conn->make_dynamic_buffer(),
"\r\n", std::move(self));
if (ec) {
if (ec == boost::asio::error::eof) {
conn->cancel(operation::run);
self.complete(ec);
return;
return self.complete({}); // EOFINAE: EOF is not an error.
}
AEDIS_CHECK_OP0(conn->cancel(operation::run));
conn->last_data_ = std::chrono::steady_clock::now();
// We handle unsolicited events in the following way
@@ -575,25 +539,24 @@ struct reader_op {
BOOST_ASSERT(!conn->read_buffer_.empty());
if (resp3::to_type(conn->read_buffer_.front()) == resp3::type::push
|| conn->reqs_.empty()
|| (!conn->reqs_.empty() && conn->reqs_.front()->cmds == 0)) {
yield
async_send_receive(conn->push_channel_, std::move(self));
if (ec) {
|| (!conn->reqs_.empty() && conn->reqs_.front()->get_number_of_commands() == 0)) {
yield async_send_receive(conn->push_channel_, std::move(self));
if (!conn->is_open() || ec || is_cancelled(self)) {
conn->cancel(operation::run);
self.complete(ec);
self.complete(boost::asio::error::basic_errors::operation_aborted);
return;
}
} else {
BOOST_ASSERT(conn->cmds_ != 0);
BOOST_ASSERT(!conn->reqs_.empty());
BOOST_ASSERT(conn->reqs_.front()->cmds != 0);
conn->reqs_.front()->timer.cancel_one();
yield
conn->read_timer_.async_wait(std::move(self));
if (ec != boost::asio::error::operation_aborted ||
!conn->is_open()) {
BOOST_ASSERT(conn->reqs_.front()->get_number_of_commands() != 0);
conn->reqs_.front()->proceed();
yield conn->read_timer_.async_wait(std::move(self));
if (!conn->is_open() || is_cancelled(self)) {
// Added this cancel here to make sure any outstanding
// ping is cancelled.
conn->cancel(operation::run);
self.complete(ec);
self.complete(boost::asio::error::basic_errors::operation_aborted);
return;
}
}
@@ -601,48 +564,6 @@ struct reader_op {
}
};
template <class Conn, class Adapter, class Timeouts>
struct runexec_op {
Conn* conn = nullptr;
endpoint ep;
resp3::request const* req = nullptr;
Adapter adapter;
Timeouts ts;
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, std::array<std::size_t, 2> order = {}
, boost::system::error_code ec1 = {}
, boost::system::error_code ec2 = {}
, std::size_t n = 0)
{
reenter (coro)
{
yield
boost::asio::experimental::make_parallel_group(
[this, ep2 = ep](auto token) { return conn->async_run(ep2, ts, token);},
[this](auto token) { return conn->async_exec(*req, adapter, token);}
).async_wait(
boost::asio::experimental::wait_for_one_error(),
std::move(self));
switch (order[0]) {
case 0: self.complete(ec1, n); return;
case 1: {
if (ec2)
self.complete(ec2, n);
else
self.complete(ec1, n);
return;
}
default: BOOST_ASSERT(false);
}
}
}
};
} // aedis::detail
#include <boost/asio/unyield.hpp>

View File

@@ -53,6 +53,11 @@ struct connect_op {
boost::asio::experimental::wait_for_one(),
std::move(self));
if (is_cancelled(self)) {
self.complete(boost::asio::error::operation_aborted, {});
return;
}
switch (order[0]) {
case 0: self.complete(ec1, ep); return;
case 1:
@@ -96,6 +101,11 @@ struct resolve_op {
boost::asio::experimental::wait_for_one(),
std::move(self));
if (is_cancelled(self)) {
self.complete(boost::asio::error::operation_aborted, {});
return;
}
switch (order[0]) {
case 0: self.complete(ec1, res); return;
@@ -129,14 +139,13 @@ struct send_receive_op {
{
yield
channel->async_send(boost::system::error_code{}, 0, std::move(self));
if (ec) {
self.complete(ec, 0);
return;
}
AEDIS_CHECK_OP1();
yield
channel->async_receive(std::move(self));
self.complete(ec, 0);
AEDIS_CHECK_OP1();
self.complete({}, 0);
}
}
};

View File

@@ -31,9 +31,7 @@ struct endpoint {
std::string password{};
};
auto is_valid(endpoint const& ep) noexcept -> bool;
auto requires_auth(endpoint const& ep) noexcept -> bool;
auto operator<<(std::ostream& os, endpoint const& ep) -> std::ostream&;
} // aedis

View File

@@ -81,6 +81,9 @@ enum class error
/// There is no stablished connection.
not_connected,
/// RESP3 handshake error (HELLO command).
resp3_handshake_error,
};
/** \internal

View File

@@ -10,20 +10,9 @@
namespace aedis {
auto is_valid(endpoint const& ep) noexcept -> bool
{
return !std::empty(ep.host) && !std::empty(ep.port);
}
auto requires_auth(endpoint const& ep) noexcept -> bool
{
return !std::empty(ep.username) && !std::empty(ep.password);
}
auto operator<<(std::ostream& os, endpoint const& ep) -> std::ostream&
{
os << ep.host << ":" << ep.port << " (" << ep.username << "," << ep.password << ")";
return os;
}
} // aedis

View File

@@ -43,6 +43,7 @@ struct error_category_impl : boost::system::error_category {
case error::unexpected_server_role: return "Unexpected server role.";
case error::ssl_handshake_timeout: return "SSL handshake timeout.";
case error::not_connected: return "Not connected.";
case error::resp3_handshake_error: return "RESP3 handshake error (HELLO command).";
default: BOOST_ASSERT(false); return "Aedis error.";
}
}

View File

@@ -20,8 +20,8 @@ enum class operation {
exec,
/// Refers to `connection::async_run` operations.
run,
/// Refers to `connection::async_receive_push` operations.
receive_push,
/// Refers to `connection::async_receive` operations.
receive,
};
} // aedis

View File

@@ -51,25 +51,21 @@ struct exec_op {
*socket,
boost::asio::buffer(req->payload()),
std::move(self));
AEDIS_CHECK_OP1();
if (ec || n_cmds == 0) {
self.complete(ec, n);
return;
if (n_cmds == 0) {
return self.complete({}, n);
}
req = nullptr;
}
yield resp3::async_read(*socket, dbuf, adapter, std::move(self));
if (ec) {
self.complete(ec, 0);
return;
}
AEDIS_CHECK_OP1();
size += n;
if (--n_cmds == 0) {
self.complete(ec, size);
return;
return self.complete(ec, size);
}
}
}
@@ -126,6 +122,11 @@ struct exec_with_timeout_op {
boost::asio::experimental::wait_for_one(),
std::move(self));
if (is_cancelled(self)) {
self.complete(boost::asio::error::operation_aborted, 0);
return;
}
switch (order[0]) {
case 0: self.complete(ec1, n); break;
case 1:

View File

@@ -212,7 +212,7 @@ public:
// The bulk type expected in the next read. If none is expected returns
// type::invalid.
auto bulk() const noexcept { return bulk_; }
[[nodiscard]] auto bulk() const noexcept { return bulk_; }
// The length expected in the the next bulk.
[[nodiscard]] auto bulk_length() const noexcept { return bulk_length_; }

View File

@@ -17,6 +17,29 @@
#include <boost/asio/yield.hpp>
namespace aedis::detail
{
template <class T>
auto is_cancelled(T const& self)
{
return self.get_cancellation_state().cancelled() != boost::asio::cancellation_type_t::none;
}
}
#define AEDIS_CHECK_OP0(X)\
if (ec || aedis::detail::is_cancelled(self)) {\
X;\
self.complete(!!ec ? ec : boost::asio::error::operation_aborted);\
return;\
}
#define AEDIS_CHECK_OP1(X)\
if (ec || aedis::detail::is_cancelled(self)) {\
X;\
self.complete(!!ec ? ec : boost::asio::error::operation_aborted, {});\
return;\
}
namespace aedis::resp3::detail {
struct ignore_response {
@@ -59,12 +82,7 @@ public:
if (parser_.bulk() == type::invalid) {
yield
boost::asio::async_read_until(stream_, buf_, "\r\n", std::move(self));
if (ec) {
self.complete(ec, 0);
return;
}
AEDIS_CHECK_OP1();
} else {
// On a bulk read we can't read until delimiter since the
// payload may contain the delimiter itself so we have to
@@ -83,11 +101,7 @@ public:
buf_.data(buffer_size_, parser_.bulk_length() + 2 - buffer_size_),
boost::asio::transfer_all(),
std::move(self));
if (ec) {
self.complete(ec, 0);
return;
}
AEDIS_CHECK_OP1();
}
n = parser_.bulk_length() + 2;

View File

@@ -26,16 +26,16 @@ namespace aedis::resp3 {
template <class String>
struct node {
/// The RESP3 type of the data in this node.
resp3::type data_type;
type data_type = type::invalid;
/// The number of elements of an aggregate.
std::size_t aggregate_size;
std::size_t aggregate_size{};
/// The depth of this node in the response tree.
std::size_t depth;
std::size_t depth{};
/// The actual data. For aggregate types this is usually empty.
String value;
String value{};
};
/** @brief Converts the node to a string.

View File

@@ -9,6 +9,7 @@
#include <string>
#include <tuple>
#include <memory_resource>
#include <boost/hana.hpp>
#include <boost/utility/string_view.hpp>
@@ -148,8 +149,8 @@ void add_separator(Request& to)
}
} // detail
/** @brief Creates Redis requests.
* @ingroup high-level-api
/** \brief Creates Redis requests.
* \ingroup high-level-api
*
* A request is composed of one or more Redis commands and is
* referred to in the redis documentation as a pipeline, see
@@ -162,49 +163,62 @@ void add_separator(Request& to)
* r.push("PING");
* r.push("PING", "key");
* r.push("QUIT");
* co_await async_write(socket, buffer(r));
* @endcode
*
* @remarks
* \remarks
*
* @li Non-string types will be converted to string by using \c
* \li Non-string types will be converted to string by using \c
* to_bulk, which must be made available over ADL.
* @li Uses std::string as internal storage.
* \li Uses std::string as internal storage.
*/
class request {
public:
/// Request configuration options.
struct config {
/** @brief If set to true, requests started with
* `connection::async_exe` will fail either if the connection is
* lost while the request is pending or if `async_exec` is
* called while there is no connection with Redis. The default
/** \brief If set to true, requests started with
* `aedis::connection::async_exec` will fail if the connection is
* lost while the request is pending. The default
* behaviour is not to close requests.
*/
bool close_on_connection_lost = false;
bool cancel_on_connection_lost = false;
/** @brief Coalesce this with other requests.
*
* If true this request will be coalesced with other requests,
/** \brief If true this request will be coalesced with other requests,
* see https://redis.io/topics/pipelining. If false, this
* request will be sent individually.
*/
bool coalesce = true;
/** \brief If set to true, requests started with
* `aedis::connection::async_exec` will fail if the call happens
* before the connection with Redis was stablished.
*/
bool cancel_if_not_connected = false;
/** \brief If true, the implementation will resend this
* request if it remained unresponded when
* `aedis::connection::async_run` completed. Has effect only if
* cancel_on_connection_lost is true.
*/
bool retry = true;
};
/** @brief Constructor
/** \brief Constructor
*
* @param cfg Configuration options.
* \param cfg Configuration options.
* \param resource Memory resource.
*/
explicit request(config cfg = config{false, true})
: cfg_{cfg}
{}
explicit
request(config cfg = config{false, true, false, true},
std::pmr::memory_resource* resource = std::pmr::get_default_resource())
: payload_(resource), cfg_{cfg}
{}
//// Returns the number of commands contained in this request.
auto size() const noexcept -> std::size_t { return commands_;};
//// Returns the number of commands contained in this request.
[[nodiscard]] auto size() const noexcept -> std::size_t { return commands_;};
// Returns the request payload.
auto payload() const noexcept -> auto const& { return payload_;}
[[nodiscard]] auto payload() const noexcept -> auto const& { return payload_;}
/// Clears the request preserving allocated memory.
void clear()
@@ -257,7 +271,7 @@ public:
* };
*
* request req;
* req.push_range2("HSET", "key", std::cbegin(map), std::cend(map));
* req.push_range("HSET", "key", std::cbegin(map), std::cend(map));
* @endcode
*
* \param cmd The command e.g. Redis or Sentinel command.
@@ -266,7 +280,8 @@ public:
* \param end Iterator to the end of the range.
*/
template <class Key, class ForwardIterator>
void push_range2(boost::string_view cmd, Key const& key, ForwardIterator begin, ForwardIterator end)
void push_range(boost::string_view cmd, Key const& key, ForwardIterator begin, ForwardIterator end,
typename std::iterator_traits<ForwardIterator>::value_type * = nullptr)
{
using value_type = typename std::iterator_traits<ForwardIterator>::value_type;
using resp3::type;
@@ -297,7 +312,7 @@ public:
* { "channel1" , "channel2" , "channel3" }
*
* request req;
* req.push("SUBSCRIBE", std::cbegin(channels), std::cedn(channels));
* req.push("SUBSCRIBE", std::cbegin(channels), std::cend(channels));
* \endcode
*
* \param cmd The Redis command
@@ -305,7 +320,8 @@ public:
* \param end Iterator to the end of the range.
*/
template <class ForwardIterator>
void push_range2(boost::string_view cmd, ForwardIterator begin, ForwardIterator end)
void push_range(boost::string_view cmd, ForwardIterator begin, ForwardIterator end,
typename std::iterator_traits<ForwardIterator>::value_type * = nullptr)
{
using value_type = typename std::iterator_traits<ForwardIterator>::value_type;
using resp3::type;
@@ -334,11 +350,12 @@ public:
* \param range Range to send e.g. and \c std::map.
*/
template <class Key, class Range>
void push_range(boost::string_view cmd, Key const& key, Range const& range)
void push_range(boost::string_view cmd, Key const& key, Range const& range,
decltype(std::begin(range)) * = nullptr)
{
using std::begin;
using std::end;
push_range2(cmd, key, begin(range), end(range));
push_range(cmd, key, begin(range), end(range));
}
/** @brief Appends a new command to the end of the request.
@@ -349,21 +366,26 @@ public:
* \param range Range to send e.g. and \c std::map.
*/
template <class Range>
void push_range(boost::string_view cmd, Range const& range)
void push_range(boost::string_view cmd, Range const& range,
decltype(std::begin(range)) * = nullptr)
{
using std::begin;
using std::end;
push_range2(cmd, begin(range), end(range));
push_range(cmd, begin(range), end(range));
}
/// Calls std::string::reserve on the internal storage.
void reserve(std::size_t new_cap = 0)
{ payload_.reserve(new_cap); }
auto get_config() const noexcept -> auto const& {return cfg_; }
/// Returns a const reference to the config object.
[[nodiscard]] auto get_config() const noexcept -> auto const& {return cfg_; }
/// Returns a reference to the config object.
[[nodiscard]] auto get_config() noexcept -> auto& {return cfg_; }
private:
std::string payload_;
std::pmr::string payload_;
std::size_t commands_ = 0;
config cfg_;
};

View File

@@ -13,6 +13,9 @@ namespace aedis::resp3 {
/** \brief Writes a request synchronously.
* \ingroup low-level-api
*
* \param stream Stream to write the request to.
* \param req Request to write.
*/
template<
class SyncWriteStream,
@@ -37,6 +40,10 @@ auto write(
/** \brief Writes a request asynchronously.
* \ingroup low-level-api
*
* \param stream Stream to write the request to.
* \param req Request to write.
* \param token Asio completion token.
*/
template<
class AsyncWriteStream,

View File

@@ -26,11 +26,8 @@ class connection;
* commands can be sent at any time. For more details, please see the
* documentation of each individual function.
*
* @remarks This class exposes only asynchronous member functions,
* synchronous communications with the Redis server is provided by
* the `aedis::sync` class.
*
* @tparam Derived class.
* @tparam AsyncReadWriteStream A stream that supports reading and
* writing.
*
*/
template <class AsyncReadWriteStream>
@@ -66,15 +63,23 @@ public:
};
/// Constructor
explicit connection(executor_type ex, boost::asio::ssl::context& ctx)
: base_type{ex}
explicit
connection(
executor_type ex,
boost::asio::ssl::context& ctx,
std::pmr::memory_resource* resource = std::pmr::get_default_resource())
: base_type{ex, resource}
, stream_{ex, ctx}
{
}
/// Constructor
explicit connection(boost::asio::io_context& ioc, boost::asio::ssl::context& ctx)
: connection(ioc.get_executor(), ctx)
explicit
connection(
boost::asio::io_context& ioc,
boost::asio::ssl::context& ctx,
std::pmr::memory_resource* resource = std::pmr::get_default_resource())
: connection(ioc.get_executor(), ctx, resource)
{ }
/// Returns the associated executor.
@@ -92,9 +97,9 @@ public:
/// Returns a const reference to the next layer.
auto const& next_layer() const noexcept { return stream_; }
/** @brief Connects and executes a request asynchronously.
/** @brief Establishes a connection with the Redis server asynchronously.
*
* See aedis::connection::async_run for detailed information.
* See aedis::connection::async_run for more information.
*/
template <class CompletionToken = boost::asio::default_completion_token_t<executor_type>>
auto
@@ -106,26 +111,9 @@ public:
return base_type::async_run(ep, ts, std::move(token));
}
/** @brief Connects and executes a request asynchronously.
*
* See aedis::connection::async_run for detailed information.
*/
template <
class Adapter = aedis::detail::response_traits<void>::adapter_type,
class CompletionToken = boost::asio::default_completion_token_t<executor_type>>
auto async_run(
endpoint ep,
resp3::request const& req,
Adapter adapter,
timeouts ts,
CompletionToken token = CompletionToken{})
{
return base_type::async_run(ep, req, adapter, ts, std::move(token));
}
/** @brief Executes a command on the Redis server asynchronously.
*
* See aedis::connection::async_exec for detailed information.
* See aedis::connection::async_exec for more information.
*/
template <
class Adapter = aedis::detail::response_traits<void>::adapter_type,
@@ -140,21 +128,21 @@ public:
/** @brief Receives server side pushes asynchronously.
*
* See aedis::connection::async_receive_push for detailed information.
* See aedis::connection::async_receive for detailed information.
*/
template <
class Adapter = aedis::detail::response_traits<void>::adapter_type,
class CompletionToken = boost::asio::default_completion_token_t<executor_type>>
auto async_receive_push(
auto async_receive(
Adapter adapter = adapt(),
CompletionToken token = CompletionToken{})
{
return base_type::async_receive_push(adapter, std::move(token));
return base_type::async_receive(adapter, std::move(token));
}
/** @brief Cancel operations.
*
* See aedis::connection::cancel for detailed information.
* See aedis::connection::cancel for more information.
*/
auto cancel(operation op) -> std::size_t
{ return base_type::cancel(op); }

View File

@@ -42,6 +42,11 @@ struct handshake_op {
boost::asio::experimental::wait_for_one(),
std::move(self));
if (is_cancelled(self)) {
self.complete(boost::asio::error::operation_aborted);
return;
}
switch (order[0]) {
case 0: self.complete(ec1); return;
case 1:
@@ -88,21 +93,16 @@ struct ssl_connect_with_timeout_op {
reenter (coro)
{
timer->expires_after(ts.connect_timeout);
yield
aedis::detail::async_connect(
conn->lowest_layer(), *timer, *endpoints, std::move(self));
if (ec) {
self.complete(ec);
return;
}
AEDIS_CHECK_OP0();
timer->expires_after(ts.handshake_timeout);
yield
async_handshake(conn->next_layer(), *timer, std::move(self));
self.complete(ec);
AEDIS_CHECK_OP0();
self.complete({});
}
}
};

View File

@@ -37,8 +37,7 @@ error_code test_async_run(endpoint ep, connection::timeouts cfg = {})
return ret;
}
// Tests whether resolve fails with the correct error.
BOOST_AUTO_TEST_CASE(test_resolve)
BOOST_AUTO_TEST_CASE(resolve_bad_host)
{
std::cout << boost::unit_test::framework::current_test_case().p_name << std::endl;
@@ -53,7 +52,7 @@ BOOST_AUTO_TEST_CASE(test_resolve)
BOOST_TEST(is_host_not_found(ec));
}
BOOST_AUTO_TEST_CASE(test_resolve_with_timeout)
BOOST_AUTO_TEST_CASE(resolve_with_timeout)
{
std::cout << boost::unit_test::framework::current_test_case().p_name << std::endl;
@@ -68,7 +67,7 @@ BOOST_AUTO_TEST_CASE(test_resolve_with_timeout)
BOOST_CHECK_EQUAL(ec, aedis::error::resolve_timeout);
}
BOOST_AUTO_TEST_CASE(test_connect)
BOOST_AUTO_TEST_CASE(connect_bad_port)
{
std::cout << boost::unit_test::framework::current_test_case().p_name << std::endl;
@@ -82,7 +81,7 @@ BOOST_AUTO_TEST_CASE(test_connect)
BOOST_CHECK_EQUAL(ec, net::error::basic_errors::connection_refused);
}
BOOST_AUTO_TEST_CASE(test_connect_timeout)
BOOST_AUTO_TEST_CASE(connect_with_timeout)
{
std::cout << boost::unit_test::framework::current_test_case().p_name << std::endl;
@@ -96,7 +95,7 @@ BOOST_AUTO_TEST_CASE(test_connect_timeout)
BOOST_CHECK_EQUAL(ec, aedis::error::connect_timeout);
}
BOOST_AUTO_TEST_CASE(test_hello_fail)
BOOST_AUTO_TEST_CASE(bad_hello_response)
{
std::cout << boost::unit_test::framework::current_test_case().p_name << std::endl;
@@ -109,7 +108,7 @@ BOOST_AUTO_TEST_CASE(test_hello_fail)
BOOST_CHECK_EQUAL(ec, aedis::error::invalid_data_type);
}
BOOST_AUTO_TEST_CASE(test_hello_tls_over_plain_fail)
BOOST_AUTO_TEST_CASE(plain_conn_on_tls_endpoint)
{
std::cout << boost::unit_test::framework::current_test_case().p_name << std::endl;
@@ -118,10 +117,16 @@ BOOST_AUTO_TEST_CASE(test_hello_tls_over_plain_fail)
ep.port = "443";
auto const ec = test_async_run(ep);
BOOST_CHECK_EQUAL(ec, net::error::misc_errors::eof);
BOOST_TEST(!!ec);
}
BOOST_AUTO_TEST_CASE(test_auth_fail)
auto auth_fail_error(boost::system::error_code ec)
{
return ec == aedis::error::resp3_handshake_error ||
ec == aedis::error::exec_timeout;
}
BOOST_AUTO_TEST_CASE(auth_fail)
{
std::cout << boost::unit_test::framework::current_test_case().p_name << std::endl;
@@ -134,10 +139,16 @@ BOOST_AUTO_TEST_CASE(test_auth_fail)
ep.password = "jabuticaba";
auto const ec = test_async_run(ep);
BOOST_CHECK_EQUAL(ec, aedis::error::resp3_simple_error);
BOOST_TEST(auth_fail_error(ec));
}
BOOST_AUTO_TEST_CASE(test_wrong_role)
auto wrong_role_error(boost::system::error_code ec)
{
return ec == aedis::error::unexpected_server_role ||
ec == aedis::error::exec_timeout;
}
BOOST_AUTO_TEST_CASE(wrong_role)
{
std::cout << boost::unit_test::framework::current_test_case().p_name << std::endl;
@@ -149,5 +160,5 @@ BOOST_AUTO_TEST_CASE(test_wrong_role)
ep.role = "errado";
auto const ec = test_async_run(ep);
BOOST_CHECK_EQUAL(ec, aedis::error::unexpected_server_role);
BOOST_TEST(wrong_role_error(ec));
}

View File

@@ -0,0 +1,92 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <iostream>
#include <boost/asio.hpp>
#ifdef BOOST_ASIO_HAS_CO_AWAIT
#include <boost/system/errc.hpp>
#define BOOST_TEST_MODULE low level
#include <boost/test/included/unit_test.hpp>
#include <aedis.hpp>
#include <aedis/src.hpp>
namespace net = boost::asio;
using aedis::resp3::request;
using aedis::operation;
using aedis::adapt;
using connection = aedis::connection<>;
using endpoint = aedis::endpoint;
using error_code = boost::system::error_code;
#include <boost/asio/experimental/awaitable_operators.hpp>
using namespace net::experimental::awaitable_operators;
net::awaitable<void> push_consumer(std::shared_ptr<connection> conn, int expected)
{
int c = 0;
for (;;) {
co_await conn->async_receive(adapt(), net::use_awaitable);
if (++c == expected)
break;
}
request req;
req.push("QUIT");
co_await conn->async_exec(req, adapt(), net::use_awaitable);
}
auto echo_session(std::shared_ptr<connection> conn, std::string id, int n) -> net::awaitable<void>
{
auto ex = co_await net::this_coro::executor;
request req;
std::tuple<std::string> resp;
for (auto i = 0; i < n; ++i) {
auto const msg = id + "/" + std::to_string(i);
//std::cout << msg << std::endl;
req.push("PING", msg);
req.push("SUBSCRIBE", "channel");
boost::system::error_code ec;
co_await conn->async_exec(req, adapt(resp), net::redirect_error(net::use_awaitable, ec));
BOOST_TEST(!ec);
BOOST_CHECK_EQUAL(msg, std::get<0>(resp));
req.clear();
std::get<0>(resp).clear();
}
}
auto async_echo_stress() -> net::awaitable<void>
{
auto ex = co_await net::this_coro::executor;
auto conn = std::make_shared<connection>(ex);
int const sessions = 1000;
int const msgs = 100;
int total = sessions * msgs;
net::co_spawn(ex, push_consumer(conn, total), net::detached);
for (int i = 0; i < sessions; ++i)
net::co_spawn(ex, echo_session(conn, std::to_string(i), msgs), net::detached);
endpoint ep{"127.0.0.1", "6379"};
co_await conn->async_run(ep, {}, net::use_awaitable);
}
BOOST_AUTO_TEST_CASE(echo_stress)
{
net::io_context ioc;
net::co_spawn(ioc.get_executor(), async_echo_stress(), net::detached);
ioc.run();
}
#else
int main(){}
#endif

91
tests/conn_exec.cpp Normal file
View File

@@ -0,0 +1,91 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <iostream>
#include <boost/asio.hpp>
#include <boost/system/errc.hpp>
#define BOOST_TEST_MODULE low level
#include <boost/test/included/unit_test.hpp>
#include <aedis.hpp>
#include <aedis/src.hpp>
namespace net = boost::asio;
using aedis::resp3::request;
using aedis::adapt;
using connection = aedis::connection<>;
using endpoint = aedis::endpoint;
using error_code = boost::system::error_code;
#ifdef BOOST_ASIO_HAS_CO_AWAIT
#include <boost/asio/experimental/awaitable_operators.hpp>
using namespace net::experimental::awaitable_operators;
#endif
BOOST_AUTO_TEST_CASE(wrong_response_data_type)
{
request req;
req.push("QUIT");
// Wrong data type.
std::tuple<int> resp;
net::io_context ioc;
auto db = std::make_shared<connection>(ioc);
db->async_exec(req, adapt(resp), [](auto ec, auto){
BOOST_CHECK_EQUAL(ec, aedis::error::not_a_number);
});
db->async_run({"127.0.0.1", "6379"}, {}, [](auto ec){
BOOST_CHECK_EQUAL(ec, boost::asio::error::basic_errors::operation_aborted);
});
ioc.run();
}
BOOST_AUTO_TEST_CASE(cancel_request_if_not_connected)
{
request req;
req.get_config().cancel_if_not_connected = true;
req.push("PING");
net::io_context ioc;
auto db = std::make_shared<connection>(ioc);
db->async_exec(req, adapt(), [](auto ec, auto){
BOOST_CHECK_EQUAL(ec, aedis::error::not_connected);
});
ioc.run();
}
BOOST_AUTO_TEST_CASE(request_retry)
{
request req1;
req1.get_config().cancel_on_connection_lost = true;
req1.push("CLIENT", "PAUSE", 7000);
request req2;
req2.get_config().cancel_on_connection_lost = false;
req2.get_config().retry = false;
req2.push("PING");
net::io_context ioc;
auto db = std::make_shared<connection>(ioc);
db->async_exec(req1, adapt(), [](auto ec, auto){
BOOST_TEST(!ec);
});
db->async_exec(req2, adapt(), [](auto ec, auto){
BOOST_CHECK_EQUAL(ec, boost::system::errc::errc_t::operation_canceled);
});
db->async_run({"127.0.0.1", "6379"}, {}, [](auto ec){
BOOST_CHECK_EQUAL(ec, aedis::error::idle_timeout);
});
ioc.run();
}

142
tests/conn_exec_cancel.cpp Normal file
View File

@@ -0,0 +1,142 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <iostream>
#include <boost/asio.hpp>
#ifdef BOOST_ASIO_HAS_CO_AWAIT
#include <boost/system/errc.hpp>
#define BOOST_TEST_MODULE low level
#include <boost/test/included/unit_test.hpp>
#include <aedis.hpp>
#include <aedis/src.hpp>
namespace net = boost::asio;
using aedis::resp3::request;
using aedis::operation;
using aedis::adapt;
using connection = aedis::connection<>;
using endpoint = aedis::endpoint;
using error_code = boost::system::error_code;
#include <boost/asio/experimental/awaitable_operators.hpp>
using namespace net::experimental::awaitable_operators;
auto async_run(std::shared_ptr<connection> conn) -> net::awaitable<void>
{
auto ex = co_await net::this_coro::executor;
connection::timeouts tms;
tms.ping_interval = std::chrono::seconds{10};
endpoint ep{"127.0.0.1", "6379"};
boost::system::error_code ec;
co_await conn->async_run(ep, tms, net::redirect_error(net::use_awaitable, ec));
BOOST_TEST(!ec);
}
auto async_cancel_exec(std::shared_ptr<connection> conn) -> net::awaitable<void>
{
auto ex = co_await net::this_coro::executor;
net::steady_timer st{ex};
st.expires_after(std::chrono::seconds{1});
boost::system::error_code ec1;
request req1;
req1.get_config().coalesce = false;
req1.push("BLPOP", "any", 3);
// Should not be canceled.
conn->async_exec(req1, adapt(), [](auto ec, auto){
BOOST_TEST(!ec);
});
request req2;
req2.get_config().coalesce = false;
req2.push("PING", "second");
// Should be canceled.
conn->async_exec(req1, adapt(), [](auto ec, auto){
BOOST_CHECK_EQUAL(ec, boost::asio::error::basic_errors::operation_aborted);
});
// Will complete while BLPOP is pending.
co_await st.async_wait(net::redirect_error(net::use_awaitable, ec1));
conn->cancel(operation::exec);
BOOST_TEST(!ec1);
request req3;
req3.push("QUIT");
// Test whether the connection remains usable after a call to
// cancel(exec).
co_await conn->async_exec(req3, adapt(), net::redirect_error(net::use_awaitable, ec1));
BOOST_TEST(!ec1);
}
BOOST_AUTO_TEST_CASE(cancel_exec_with_timer)
{
std::cout << boost::unit_test::framework::current_test_case().p_name << std::endl;
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
net::co_spawn(ioc.get_executor(), async_run(conn), net::detached);
net::co_spawn(ioc.get_executor(), async_cancel_exec(conn), net::detached);
ioc.run();
}
auto async_ignore_cancel_of_written_req(std::shared_ptr<connection> conn) -> net::awaitable<void>
{
auto ex = co_await net::this_coro::executor;
net::steady_timer st{ex};
st.expires_after(std::chrono::seconds{1});
net::steady_timer st2{ex};
st2.expires_after(std::chrono::seconds{3});
boost::system::error_code ec1, ec2, ec3;
request req1; // Will be cancelled after it has been written.
req1.get_config().coalesce = false;
req1.push("BLPOP", "any", 3);
request req2; // Will be cancelled.
req2.push("PING");
co_await (
conn->async_exec(req1, adapt(), net::redirect_error(net::use_awaitable, ec1)) ||
conn->async_exec(req2, adapt(), net::redirect_error(net::use_awaitable, ec2)) ||
st.async_wait(net::redirect_error(net::use_awaitable, ec3))
);
BOOST_TEST(!ec1);
BOOST_CHECK_EQUAL(ec2, boost::asio::error::basic_errors::operation_aborted);
BOOST_TEST(!ec3);
request req3;
req3.push("PING");
req3.push("QUIT");
co_await conn->async_exec(req3, adapt(), net::redirect_error(net::use_awaitable, ec1));
BOOST_TEST(!ec1);
}
BOOST_AUTO_TEST_CASE(ignore_cancel_of_written_req)
{
std::cout << boost::unit_test::framework::current_test_case().p_name << std::endl;
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
net::co_spawn(ioc.get_executor(), async_run(conn), net::detached);
net::co_spawn(ioc.get_executor(), async_ignore_cancel_of_written_req(conn), net::detached);
ioc.run();
}
#else
int main(){}
#endif

330
tests/conn_push.cpp Normal file
View File

@@ -0,0 +1,330 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <iostream>
#include <boost/asio.hpp>
#include <boost/system/errc.hpp>
#include <boost/asio/experimental/as_tuple.hpp>
#define BOOST_TEST_MODULE low level
#include <boost/test/included/unit_test.hpp>
#include <aedis.hpp>
#include <aedis/src.hpp>
namespace net = boost::asio;
using aedis::resp3::request;
using aedis::adapt;
using aedis::endpoint;
using aedis::operation;
using connection = aedis::connection<>;
using error_code = boost::system::error_code;
using net::experimental::as_tuple;
BOOST_AUTO_TEST_CASE(push_filtered_out)
{
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
request req;
req.push("PING");
req.push("SUBSCRIBE", "channel");
req.push("QUIT");
std::tuple<std::string, std::string> resp;
conn->async_exec(req, adapt(resp), [](auto ec, auto){
BOOST_TEST(!ec);
});
conn->async_receive(adapt(), [](auto ec, auto){
BOOST_TEST(!ec);
});
conn->async_run({"127.0.0.1", "6379"}, {}, [conn](auto ec){
BOOST_TEST(!ec);
});
ioc.run();
BOOST_CHECK_EQUAL(std::get<0>(resp), "PONG");
BOOST_CHECK_EQUAL(std::get<1>(resp), "OK");
}
// Checks whether we get idle timeout when no push reader is set.
void test_missing_push_reader1(bool coalesce)
{
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
request req{{false, coalesce}};
req.get_config().cancel_on_connection_lost = true;
req.push("SUBSCRIBE", "channel");
conn->async_exec(req, adapt(), [](auto ec, auto){
BOOST_TEST(!ec);
});
conn->async_run({"127.0.0.1", "6379"}, {}, [conn](auto ec){
BOOST_CHECK_EQUAL(ec, aedis::error::idle_timeout);
});
ioc.run();
}
void test_missing_push_reader2(request const& req)
{
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
conn->async_exec(req, adapt(), [](auto ec, auto){
BOOST_TEST(!ec);
});
conn->async_run({"127.0.0.1", "6379"}, {}, [](auto ec){
BOOST_CHECK_EQUAL(ec, aedis::error::idle_timeout);
});
ioc.run();
}
#ifdef BOOST_ASIO_HAS_CO_AWAIT
net::awaitable<void> push_consumer1(std::shared_ptr<connection> conn, bool& push_received)
{
{
auto [ec, ev] = co_await conn->async_receive(adapt(), as_tuple(net::use_awaitable));
BOOST_TEST(!ec);
}
{
auto [ec, ev] = co_await conn->async_receive(adapt(), as_tuple(net::use_awaitable));
BOOST_CHECK_EQUAL(ec, boost::asio::experimental::channel_errc::channel_cancelled);
}
push_received = true;
}
struct adapter_error {
void
operator()(
std::size_t, aedis::resp3::node<boost::string_view> const&, boost::system::error_code& ec)
{
ec = aedis::error::incompatible_size;
}
[[nodiscard]]
auto get_supported_response_size() const noexcept
{ return static_cast<std::size_t>(-1);}
[[nodiscard]]
auto get_max_read_size(std::size_t) const noexcept
{ return static_cast<std::size_t>(-1);}
};
BOOST_AUTO_TEST_CASE(test_push_adapter)
{
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
request req;
req.push("PING");
req.push("SUBSCRIBE", "channel");
req.push("PING");
conn->async_receive(adapter_error{}, [](auto ec, auto) {
BOOST_CHECK_EQUAL(ec, aedis::error::incompatible_size);
});
conn->async_exec(req, adapt(), [](auto ec, auto){
BOOST_CHECK_EQUAL(ec, boost::asio::experimental::error::channel_errors::channel_cancelled);
});
conn->async_run({"127.0.0.1", "6379"}, {}, [](auto ec){
BOOST_CHECK_EQUAL(ec, boost::system::errc::errc_t::operation_canceled);
});
ioc.run();
// TODO: Reset the ioc reconnect and send a quit to ensure
// reconnection is possible after an error.
}
void test_push_is_received1(bool coalesce)
{
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
request req{{false, coalesce}};
req.push("SUBSCRIBE", "channel");
req.push("QUIT");
conn->async_exec(req, adapt(), [](auto ec, auto){
BOOST_TEST(!ec);
});
conn->async_run({"127.0.0.1", "6379"}, {}, [conn](auto ec){
BOOST_TEST(!ec);
conn->cancel(operation::receive);
});
bool push_received = false;
net::co_spawn(
ioc.get_executor(),
push_consumer1(conn, push_received),
net::detached);
ioc.run();
BOOST_TEST(push_received);
}
void test_push_is_received2(bool coalesce)
{
request req1{{false, coalesce}};
req1.push("PING", "Message1");
request req2{{false, coalesce}};
req2.push("SUBSCRIBE", "channel");
request req3{{false, coalesce}};
req3.push("PING", "Message2");
req3.push("QUIT");
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
auto handler =[](auto ec, auto...)
{
BOOST_TEST(!ec);
};
conn->async_exec(req1, adapt(), handler);
conn->async_exec(req2, adapt(), handler);
conn->async_exec(req3, adapt(), handler);
endpoint ep{"127.0.0.1", "6379"};
conn->async_run(ep, {}, [conn](auto ec) {
BOOST_TEST(!ec);
conn->cancel(operation::receive);
});
bool push_received = false;
net::co_spawn(
ioc.get_executor(),
push_consumer1(conn, push_received),
net::detached);
ioc.run();
BOOST_TEST(push_received);
}
net::awaitable<void> push_consumer3(std::shared_ptr<connection> conn)
{
for (;;)
co_await conn->async_receive(adapt(), net::use_awaitable);
}
// Test many subscribe requests.
void test_push_many_subscribes(bool coalesce)
{
request req0{{false, coalesce}};
req0.push("HELLO", 3);
request req1{{false, coalesce}};
req1.push("PING", "Message1");
request req2{{false, coalesce}};
req2.push("SUBSCRIBE", "channel");
request req3{{false, coalesce}};
req3.push("QUIT");
auto handler =[](auto ec, auto...)
{
BOOST_TEST(!ec);
};
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
conn->async_exec(req0, adapt(), handler);
conn->async_exec(req1, adapt(), handler);
conn->async_exec(req2, adapt(), handler);
conn->async_exec(req2, adapt(), handler);
conn->async_exec(req1, adapt(), handler);
conn->async_exec(req2, adapt(), handler);
conn->async_exec(req1, adapt(), handler);
conn->async_exec(req2, adapt(), handler);
conn->async_exec(req2, adapt(), handler);
conn->async_exec(req1, adapt(), handler);
conn->async_exec(req2, adapt(), handler);
conn->async_exec(req3, adapt(), handler);
endpoint ep{"127.0.0.1", "6379"};
conn->async_run(ep, {}, [conn](auto ec) {
BOOST_TEST(!ec);
conn->cancel(operation::receive);
});
net::co_spawn(ioc.get_executor(), push_consumer3(conn), net::detached);
ioc.run();
}
BOOST_AUTO_TEST_CASE(push_received1)
{
test_push_is_received1(true);
test_push_is_received1(false);
}
BOOST_AUTO_TEST_CASE(push_received2)
{
test_push_is_received2(true);
test_push_is_received2(false);
}
BOOST_AUTO_TEST_CASE(many_subscribers)
{
test_push_many_subscribes(true);
test_push_many_subscribes(false);
}
#endif
BOOST_AUTO_TEST_CASE(missing_reader1_coalesce)
{
test_missing_push_reader1(true);
}
BOOST_AUTO_TEST_CASE(missing_reader1_no_coalesce)
{
test_missing_push_reader1(false);
}
BOOST_AUTO_TEST_CASE(missing_reader2a)
{
request req1{{false}};
req1.push("PING", "Message");
req1.push("SUBSCRIBE"); // Wrong command synthax.
req1.get_config().coalesce = true;
test_missing_push_reader2(req1);
req1.get_config().coalesce = false;
test_missing_push_reader2(req1);
}
BOOST_AUTO_TEST_CASE(missing_reader2b)
{
request req2{{false}};
req2.push("SUBSCRIBE"); // Wrong command syntax.
req2.get_config().coalesce = true;
test_missing_push_reader2(req2);
req2.get_config().coalesce = false;
test_missing_push_reader2(req2);
}

View File

@@ -27,7 +27,7 @@ using operation = aedis::operation;
BOOST_AUTO_TEST_CASE(test_quit_no_coalesce)
{
net::io_context ioc;
auto db = std::make_shared<connection>(ioc);
auto conn = std::make_shared<connection>(ioc);
request req1{{false, false}};
req1.push("PING");
@@ -35,26 +35,26 @@ BOOST_AUTO_TEST_CASE(test_quit_no_coalesce)
request req2{{false, false}};
req2.push("QUIT");
db->async_exec(req1, adapt(), [](auto ec, auto){
conn->async_exec(req1, adapt(), [](auto ec, auto){
BOOST_TEST(!ec);
});
db->async_exec(req2, adapt(), [](auto ec, auto) {
conn->async_exec(req2, adapt(), [](auto ec, auto) {
BOOST_TEST(!ec);
});
db->async_exec(req1, adapt(), [](auto ec, auto){
conn->async_exec(req1, adapt(), [](auto ec, auto){
BOOST_CHECK_EQUAL(ec, boost::system::errc::errc_t::operation_canceled);
});
db->async_exec(req1, adapt(), [](auto ec, auto){
conn->async_exec(req1, adapt(), [](auto ec, auto){
BOOST_CHECK_EQUAL(ec, boost::system::errc::errc_t::operation_canceled);
});
db->async_exec(req1, adapt(), [](auto ec, auto){
conn->async_exec(req1, adapt(), [](auto ec, auto){
BOOST_CHECK_EQUAL(ec, boost::system::errc::errc_t::operation_canceled);
});
endpoint ep{"127.0.0.1", "6379"};
db->async_run(ep, {}, [db](auto ec){
BOOST_CHECK_EQUAL(ec, net::error::misc_errors::eof);
db->cancel(operation::exec);
conn->async_run(ep, {}, [conn](auto ec){
BOOST_TEST(!ec);
conn->cancel(operation::exec);
});
ioc.run();
@@ -66,10 +66,13 @@ void test_quit2(bool coalesce)
req.push("QUIT");
net::io_context ioc;
auto db = std::make_shared<connection>(ioc);
endpoint ep{"127.0.0.1", "6379"};
db->async_run(ep, req, adapt(), {}, [](auto ec, auto) {
BOOST_CHECK_EQUAL(ec, net::error::misc_errors::eof);
auto conn = std::make_shared<connection>(ioc);
conn->async_exec(req, adapt(), [](auto ec, auto) {
BOOST_TEST(!ec);
});
conn->async_run({"127.0.0.1", "6379"}, {}, [](auto ec) {
BOOST_TEST(!ec);
});
ioc.run();
@@ -77,7 +80,6 @@ void test_quit2(bool coalesce)
BOOST_AUTO_TEST_CASE(test_quit)
{
std::cout << boost::unit_test::framework::current_test_case().p_name << std::endl;
test_quit2(true);
test_quit2(false);
}

102
tests/conn_reconnect.cpp Normal file
View File

@@ -0,0 +1,102 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <iostream>
#include <boost/asio.hpp>
#ifdef BOOST_ASIO_HAS_CO_AWAIT
#define BOOST_TEST_MODULE low level
#include <boost/test/included/unit_test.hpp>
#include <aedis.hpp>
#include <aedis/src.hpp>
namespace net = boost::asio;
using aedis::resp3::request;
using aedis::adapt;
using connection = aedis::connection<>;
using endpoint = aedis::endpoint;
using error_code = boost::system::error_code;
#include <boost/asio/experimental/awaitable_operators.hpp>
using namespace boost::asio::experimental::awaitable_operators;
net::awaitable<void> test_reconnect_impl(std::shared_ptr<connection> db)
{
request req;
req.push("QUIT");
int i = 0;
endpoint ep{"127.0.0.1", "6379"};
for (; i < 5; ++i) {
boost::system::error_code ec1, ec2;
co_await (
db->async_exec(req, adapt(), net::redirect_error(net::use_awaitable, ec1)) &&
db->async_run(ep, {}, net::redirect_error(net::use_awaitable, ec2))
);
BOOST_TEST(!ec1);
BOOST_TEST(!ec2);
db->reset_stream();
}
BOOST_CHECK_EQUAL(i, 5);
co_return;
}
// Test whether the client works after a reconnect.
BOOST_AUTO_TEST_CASE(test_reconnect)
{
std::cout << boost::unit_test::framework::current_test_case().p_name << std::endl;
net::io_context ioc;
auto db = std::make_shared<connection>(ioc);
net::co_spawn(ioc, test_reconnect_impl(db), net::detached);
ioc.run();
}
auto async_test_reconnect_timeout() -> net::awaitable<void>
{
auto conn = std::make_shared<connection>(co_await net::this_coro::executor);
endpoint ep{"127.0.0.1", "6379"};
boost::system::error_code ec1, ec2;
request req1;
req1.get_config().cancel_if_not_connected = false;
req1.get_config().cancel_on_connection_lost = true;
req1.push("CLIENT", "PAUSE", 7000);
co_await (
conn->async_exec(req1, adapt(), net::redirect_error(net::use_awaitable, ec1)) &&
conn->async_run(ep, {}, net::redirect_error(net::use_awaitable, ec2))
);
BOOST_TEST(!ec1);
BOOST_CHECK_EQUAL(ec2, aedis::error::idle_timeout);
request req2;
req2.get_config().cancel_if_not_connected = false;
req2.get_config().cancel_on_connection_lost = true;
req2.push("QUIT");
co_await (
conn->async_exec(req1, adapt(), net::redirect_error(net::use_awaitable, ec1)) &&
conn->async_run(ep, {}, net::redirect_error(net::use_awaitable, ec2))
);
BOOST_CHECK_EQUAL(ec1, boost::system::errc::errc_t::operation_canceled);
BOOST_CHECK_EQUAL(ec2, aedis::error::exec_timeout);
}
BOOST_AUTO_TEST_CASE(test_reconnect_and_idle)
{
net::io_context ioc;
net::co_spawn(ioc, async_test_reconnect_timeout(), net::detached);
ioc.run();
}
#else
int main(){}
#endif

189
tests/conn_run_cancel.cpp Normal file
View File

@@ -0,0 +1,189 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <iostream>
#include <boost/asio.hpp>
#ifdef BOOST_ASIO_HAS_CO_AWAIT
#include <boost/system/errc.hpp>
#include <boost/asio/experimental/as_tuple.hpp>
#define BOOST_TEST_MODULE low level
#include <boost/test/included/unit_test.hpp>
#include <aedis.hpp>
#include <aedis/src.hpp>
namespace net = boost::asio;
using aedis::resp3::request;
using aedis::operation;
using aedis::adapt;
using connection = aedis::connection<>;
using endpoint = aedis::endpoint;
using error_code = boost::system::error_code;
using net::experimental::as_tuple;
#include <boost/asio/experimental/awaitable_operators.hpp>
using namespace net::experimental::awaitable_operators;
auto async_cancel_run_with_timer() -> net::awaitable<void>
{
auto ex = co_await net::this_coro::executor;
auto conn = std::make_shared<connection>(ex);
net::steady_timer st{ex};
st.expires_after(std::chrono::seconds{1});
endpoint ep{"127.0.0.1", "6379"};
boost::system::error_code ec1, ec2;
co_await (
conn->async_run(ep, {}, net::redirect_error(net::use_awaitable, ec1)) ||
st.async_wait(net::redirect_error(net::use_awaitable, ec2))
);
BOOST_CHECK_EQUAL(ec1, boost::asio::error::basic_errors::operation_aborted);
BOOST_TEST(!ec2);
}
BOOST_AUTO_TEST_CASE(cancel_run_with_timer)
{
net::io_context ioc;
net::co_spawn(ioc.get_executor(), async_cancel_run_with_timer(), net::detached);
ioc.run();
}
net::awaitable<void>
async_check_cancellation_not_missed(
std::shared_ptr<connection> conn,
int n,
std::chrono::milliseconds ms)
{
net::steady_timer timer{co_await net::this_coro::executor};
connection::timeouts tms;
tms.resolve_timeout = std::chrono::seconds{10};
tms.connect_timeout = std::chrono::seconds{10};
tms.resp3_handshake_timeout = std::chrono::seconds{2};
tms.ping_interval = std::chrono::seconds{1};
endpoint ep{"127.0.0.1", "6379"};
for (auto i = 0; i < n; ++i) {
timer.expires_after(ms);
boost::system::error_code ec1, ec2;
co_await (
conn->async_run(ep, {}, net::redirect_error(net::use_awaitable, ec1)) ||
timer.async_wait(net::redirect_error(net::use_awaitable, ec2))
);
BOOST_CHECK_EQUAL(ec1, boost::asio::error::basic_errors::operation_aborted);
std::cout << "Counter: " << i << std::endl;
}
}
// See PR #29
BOOST_AUTO_TEST_CASE(check_implicit_cancel_not_missed_0)
{
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
net::co_spawn(ioc, async_check_cancellation_not_missed(conn, 10, std::chrono::milliseconds{0}), net::detached);
ioc.run();
}
BOOST_AUTO_TEST_CASE(check_implicit_cancel_not_missed_2)
{
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
net::co_spawn(ioc, async_check_cancellation_not_missed(conn, 20, std::chrono::milliseconds{2}), net::detached);
ioc.run();
}
BOOST_AUTO_TEST_CASE(check_implicit_cancel_not_missed_8)
{
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
net::co_spawn(ioc, async_check_cancellation_not_missed(conn, 20, std::chrono::milliseconds{8}), net::detached);
ioc.run();
}
BOOST_AUTO_TEST_CASE(check_implicit_cancel_not_missed_16)
{
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
net::co_spawn(ioc, async_check_cancellation_not_missed(conn, 20, std::chrono::milliseconds{16}), net::detached);
ioc.run();
}
BOOST_AUTO_TEST_CASE(check_implicit_cancel_not_missed_32)
{
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
net::co_spawn(ioc, async_check_cancellation_not_missed(conn, 20, std::chrono::milliseconds{32}), net::detached);
ioc.run();
}
BOOST_AUTO_TEST_CASE(check_implicit_cancel_not_missed_64)
{
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
net::co_spawn(ioc, async_check_cancellation_not_missed(conn, 20, std::chrono::milliseconds{64}), net::detached);
ioc.run();
}
BOOST_AUTO_TEST_CASE(check_implicit_cancel_not_missed_128)
{
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
net::co_spawn(ioc, async_check_cancellation_not_missed(conn, 20, std::chrono::milliseconds{128}), net::detached);
ioc.run();
}
BOOST_AUTO_TEST_CASE(check_implicit_cancel_not_missed_256)
{
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
net::co_spawn(ioc, async_check_cancellation_not_missed(conn, 20, std::chrono::milliseconds{256}), net::detached);
ioc.run();
}
BOOST_AUTO_TEST_CASE(check_implicit_cancel_not_missed_512)
{
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
net::co_spawn(ioc, async_check_cancellation_not_missed(conn, 20, std::chrono::milliseconds{512}), net::detached);
ioc.run();
}
BOOST_AUTO_TEST_CASE(check_implicit_cancel_not_missed_1024)
{
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
net::co_spawn(ioc, async_check_cancellation_not_missed(conn, 20, std::chrono::milliseconds{1024}), net::detached);
ioc.run();
}
BOOST_AUTO_TEST_CASE(reset_before_run_completes)
{
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
// Sends a ping just as a means of waiting until we are connected.
request req;
req.push("PING");
conn->async_exec(req, adapt(), [conn](auto ec, auto){
BOOST_TEST(!ec);
conn->reset_stream();
});
conn->async_run({"127.0.0.1", "6379"}, {}, [conn](auto ec){
BOOST_CHECK_EQUAL(ec, net::error::operation_aborted);
});
ioc.run();
}
#else
int main(){}
#endif

View File

@@ -1,152 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <iostream>
#include <boost/asio.hpp>
#include <boost/system/errc.hpp>
#include <boost/asio/experimental/as_tuple.hpp>
#define BOOST_TEST_MODULE low level
#include <boost/test/included/unit_test.hpp>
#include <aedis.hpp>
#include <aedis/src.hpp>
namespace net = boost::asio;
using aedis::resp3::request;
using aedis::adapt;
using connection = aedis::connection<>;
using endpoint = aedis::endpoint;
using error_code = boost::system::error_code;
using net::experimental::as_tuple;
#ifdef BOOST_ASIO_HAS_CO_AWAIT
#include <boost/asio/experimental/awaitable_operators.hpp>
using namespace net::experimental::awaitable_operators;
net::awaitable<void> send_after(std::shared_ptr<connection> db, std::chrono::milliseconds ms)
{
net::steady_timer st{co_await net::this_coro::executor};
st.expires_after(ms);
co_await st.async_wait(net::use_awaitable);
request req;
req.push("CLIENT", "PAUSE", ms.count());
auto [ec, n] = co_await db->async_exec(req, adapt(), as_tuple(net::use_awaitable));
BOOST_TEST(!ec);
}
BOOST_AUTO_TEST_CASE(test_idle)
{
std::cout << boost::unit_test::framework::current_test_case().p_name << std::endl;
std::chrono::milliseconds ms{5000};
{
std::cout << "test_idle" << std::endl;
connection::timeouts cfg;
cfg.resolve_timeout = std::chrono::seconds{1};
cfg.connect_timeout = std::chrono::seconds{1};
cfg.ping_interval = std::chrono::seconds{1};
net::io_context ioc;
auto db = std::make_shared<connection>(ioc);
net::co_spawn(ioc.get_executor(), send_after(db, ms), net::detached);
endpoint ep{"127.0.0.1", "6379"};
db->async_run(ep, cfg, [](auto ec){
BOOST_CHECK_EQUAL(ec, aedis::error::idle_timeout);
});
ioc.run();
}
//----------------------------------------------------------------
// Since we have paused the server above, we have to wait until the
// server is responsive again, so as not to cause other tests to
// fail.
{
net::io_context ioc;
auto db = std::make_shared<connection>(ioc);
connection::timeouts cfg;
cfg.ping_interval = 2 * ms;
cfg.resolve_timeout = 2 * ms;
cfg.connect_timeout = 2 * ms;
cfg.ping_interval = 2 * ms;
cfg.resp3_handshake_timeout = 2 * ms;
request req;
req.push("QUIT");
endpoint ep{"127.0.0.1", "6379"};
db->async_run(ep, req, adapt(), cfg, [](auto ec, auto){
BOOST_CHECK_EQUAL(ec, net::error::misc_errors::eof);
});
ioc.run();
}
}
net::awaitable<void> reconnect(std::shared_ptr<connection> db)
{
net::steady_timer timer{co_await net::this_coro::executor};
for (auto i = 0; i < 1000; ++i) {
timer.expires_after(std::chrono::milliseconds{10});
endpoint ep{"127.0.0.1", "6379"};
co_await (
db->async_run(ep, {}, net::use_awaitable) ||
timer.async_wait(net::use_awaitable)
);
std::cout << i << ": Retrying" << std::endl;
}
std::cout << "Finished" << std::endl;
}
BOOST_AUTO_TEST_CASE(test_cancelation)
{
std::cout << boost::unit_test::framework::current_test_case().p_name << std::endl;
net::io_context ioc;
auto db = std::make_shared<connection>(ioc);
net::co_spawn(ioc, reconnect(db), net::detached);
ioc.run();
}
#endif
BOOST_AUTO_TEST_CASE(test_wrong_data_type)
{
std::cout << boost::unit_test::framework::current_test_case().p_name << std::endl;
request req;
req.push("QUIT");
// Wrong data type.
std::tuple<int> resp;
net::io_context ioc;
auto db = std::make_shared<connection>(ioc);
endpoint ep{"127.0.0.1", "6379"};
db->async_run(ep, req, adapt(resp), {}, [](auto ec, auto){
BOOST_CHECK_EQUAL(ec, aedis::error::not_a_number);
});
ioc.run();
}
BOOST_AUTO_TEST_CASE(test_not_connected)
{
std::cout << boost::unit_test::framework::current_test_case().p_name << std::endl;
request req{{true}};
req.push("PING");
net::io_context ioc;
auto db = std::make_shared<connection>(ioc);
db->async_exec(req, adapt(), [](auto ec, auto){
BOOST_CHECK_EQUAL(ec, aedis::error::not_connected);
});
ioc.run();
}

View File

@@ -1,277 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <iostream>
#include <boost/asio.hpp>
#include <boost/system/errc.hpp>
#include <boost/asio/experimental/as_tuple.hpp>
#define BOOST_TEST_MODULE low level
#include <boost/test/included/unit_test.hpp>
#include <aedis.hpp>
#include <aedis/src.hpp>
namespace net = boost::asio;
using aedis::resp3::request;
using aedis::adapt;
using aedis::endpoint;
using aedis::operation;
using connection = aedis::connection<>;
using error_code = boost::system::error_code;
using net::experimental::as_tuple;
// Checks whether we get idle timeout when no push reader is set.
void test_missing_push_reader1(bool coalesce)
{
net::io_context ioc;
auto db = std::make_shared<connection>(ioc);
request req{{false, coalesce}};
req.push("SUBSCRIBE", "channel");
endpoint ep{"127.0.0.1", "6379"};
db->async_run(ep, req, adapt(), {}, [](auto ec, auto){
BOOST_CHECK_EQUAL(ec, aedis::error::idle_timeout);
});
ioc.run();
}
void test_missing_push_reader2(bool coalesce)
{
net::io_context ioc;
auto db = std::make_shared<connection>(ioc);
request req{{false, coalesce}}; // Wrong command syntax.
req.push("SUBSCRIBE");
endpoint ep{"127.0.0.1", "6379"};
db->async_run(ep, req, adapt(), {}, [](auto ec, auto){
BOOST_CHECK_EQUAL(ec, aedis::error::idle_timeout);
});
ioc.run();
}
void test_missing_push_reader3(bool coalesce)
{
net::io_context ioc;
auto db = std::make_shared<connection>(ioc);
request req{{false, coalesce}}; // Wrong command synthax.
req.push("PING", "Message");
req.push("SUBSCRIBE");
endpoint ep{"127.0.0.1", "6379"};
db->async_run(ep, req, adapt(), {}, [](auto ec, auto){
BOOST_CHECK_EQUAL(ec, aedis::error::idle_timeout);
});
ioc.run();
}
#ifdef BOOST_ASIO_HAS_CO_AWAIT
net::awaitable<void> push_consumer1(std::shared_ptr<connection> db, bool& push_received)
{
{
auto [ec, ev] = co_await db->async_receive_push(adapt(), as_tuple(net::use_awaitable));
BOOST_TEST(!ec);
}
{
auto [ec, ev] = co_await db->async_receive_push(adapt(), as_tuple(net::use_awaitable));
BOOST_CHECK_EQUAL(ec, boost::asio::experimental::channel_errc::channel_cancelled);
}
push_received = true;
}
struct adapter_error {
void
operator()(
std::size_t, aedis::resp3::node<boost::string_view> const&, boost::system::error_code& ec)
{
ec = aedis::error::incompatible_size;
}
[[nodiscard]]
auto get_supported_response_size() const noexcept
{ return static_cast<std::size_t>(-1);}
[[nodiscard]]
auto get_max_read_size(std::size_t) const noexcept
{ return static_cast<std::size_t>(-1);}
};
BOOST_AUTO_TEST_CASE(test_push_adapter)
{
net::io_context ioc;
auto db = std::make_shared<connection>(ioc);
request req;
req.push("PING");
req.push("SUBSCRIBE", "channel");
req.push("PING");
db->async_receive_push(adapter_error{}, [](auto ec, auto) {
BOOST_CHECK_EQUAL(ec, aedis::error::incompatible_size);
});
endpoint ep{"127.0.0.1", "6379"};
db->async_run(ep, req, adapt(), {}, [db](auto, auto){
//BOOST_CHECK_EQUAL(ec, net::error::misc_errors::eof);
});
ioc.run();
// TODO: Reset the ioc reconnect and send a quit to ensure
// reconnection is possible after an error.
}
void test_push_is_received1(bool coalesce)
{
net::io_context ioc;
auto db = std::make_shared<connection>(ioc);
request req{{false, coalesce}};
req.push("SUBSCRIBE", "channel");
req.push("QUIT");
endpoint ep{"127.0.0.1", "6379"};
db->async_run(ep, req, adapt(), {}, [db](auto ec, auto){
BOOST_CHECK_EQUAL(ec, net::error::misc_errors::eof);
db->cancel(operation::receive_push);
});
bool push_received = false;
net::co_spawn(
ioc.get_executor(),
push_consumer1(db, push_received),
net::detached);
ioc.run();
BOOST_TEST(push_received);
}
void test_push_is_received2(bool coalesce)
{
request req1{{false, coalesce}};
req1.push("PING", "Message1");
request req2{{false, coalesce}};
req2.push("SUBSCRIBE", "channel");
request req3{{false, coalesce}};
req3.push("PING", "Message2");
req3.push("QUIT");
net::io_context ioc;
auto db = std::make_shared<connection>(ioc);
auto handler =[](auto ec, auto...)
{
BOOST_TEST(!ec);
};
db->async_exec(req1, adapt(), handler);
db->async_exec(req2, adapt(), handler);
db->async_exec(req3, adapt(), handler);
endpoint ep{"127.0.0.1", "6379"};
db->async_run(ep, {}, [db](auto ec, auto...) {
BOOST_CHECK_EQUAL(ec, net::error::misc_errors::eof);
db->cancel(operation::receive_push);
});
bool push_received = false;
net::co_spawn(
ioc.get_executor(),
push_consumer1(db, push_received),
net::detached);
ioc.run();
BOOST_TEST(push_received);
}
net::awaitable<void> push_consumer3(std::shared_ptr<connection> db)
{
for (;;)
co_await db->async_receive_push(adapt(), net::use_awaitable);
}
// Test many subscribe requests.
void test_push_many_subscribes(bool coalesce)
{
request req0{{false, coalesce}};
req0.push("HELLO", 3);
request req1{{false, coalesce}};
req1.push("PING", "Message1");
request req2{{false, coalesce}};
req2.push("SUBSCRIBE", "channel");
request req3{{false, coalesce}};
req3.push("QUIT");
auto handler =[](auto ec, auto...)
{
BOOST_TEST(!ec);
};
net::io_context ioc;
auto db = std::make_shared<connection>(ioc);
db->async_exec(req0, adapt(), handler);
db->async_exec(req1, adapt(), handler);
db->async_exec(req2, adapt(), handler);
db->async_exec(req2, adapt(), handler);
db->async_exec(req1, adapt(), handler);
db->async_exec(req2, adapt(), handler);
db->async_exec(req1, adapt(), handler);
db->async_exec(req2, adapt(), handler);
db->async_exec(req2, adapt(), handler);
db->async_exec(req1, adapt(), handler);
db->async_exec(req2, adapt(), handler);
db->async_exec(req3, adapt(), handler);
endpoint ep{"127.0.0.1", "6379"};
db->async_run(ep, {}, [db](auto ec, auto...) {
BOOST_CHECK_EQUAL(ec, net::error::misc_errors::eof);
db->cancel(operation::receive_push);
});
net::co_spawn(ioc.get_executor(), push_consumer3(db), net::detached);
ioc.run();
}
#endif
BOOST_AUTO_TEST_CASE(test_push)
{
#ifdef BOOST_ASIO_HAS_CO_AWAIT
test_push_is_received1(true);
test_push_is_received2(true);
test_push_many_subscribes(true);
#endif
test_missing_push_reader1(true);
test_missing_push_reader2(false);
test_missing_push_reader3(true);
#ifdef BOOST_ASIO_HAS_CO_AWAIT
test_push_is_received1(true);
test_push_is_received2(false);
test_push_many_subscribes(false);
#endif
test_missing_push_reader1(true);
test_missing_push_reader2(false);
test_missing_push_reader3(false);
}

View File

@@ -1,77 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <iostream>
#include <boost/asio.hpp>
#define BOOST_TEST_MODULE low level
#include <boost/test/included/unit_test.hpp>
#include <aedis.hpp>
#include <aedis/src.hpp>
namespace net = boost::asio;
using aedis::resp3::request;
using aedis::adapt;
using connection = aedis::connection<>;
using endpoint = aedis::endpoint;
using error_code = boost::system::error_code;
#ifdef BOOST_ASIO_HAS_CO_AWAIT
net::awaitable<void> test_reconnect_impl(std::shared_ptr<connection> db)
{
request req;
req.push("QUIT");
int i = 0;
endpoint ep{"127.0.0.1", "6379"};
for (; i < 5; ++i) {
boost::system::error_code ec;
co_await db->async_run(ep, req, adapt(), {}, net::redirect_error(net::use_awaitable, ec));
db->reset_stream();
BOOST_CHECK_EQUAL(ec, net::error::misc_errors::eof);
}
BOOST_CHECK_EQUAL(i, 5);
co_return;
}
// Test whether the client works after a reconnect.
BOOST_AUTO_TEST_CASE(test_reconnect)
{
std::cout << boost::unit_test::framework::current_test_case().p_name << std::endl;
net::io_context ioc;
auto db = std::make_shared<connection>(ioc);
net::co_spawn(ioc, test_reconnect_impl(db), net::detached);
ioc.run();
}
#endif
BOOST_AUTO_TEST_CASE(test_reconnect_timeout)
{
net::io_context ioc;
auto db = std::make_shared<connection>(ioc);
request req1;
req1.push("CLIENT", "PAUSE", 7000);
request req2;
req2.push("QUIT");
endpoint ep{"127.0.0.1", "6379"};
db->async_run(ep, req1, adapt(), {}, [db, &req2, &ep](auto ec, auto){
BOOST_CHECK_EQUAL(ec, aedis::error::idle_timeout);
db->reset_stream();
db->async_run(ep, req2, adapt(), {}, [db](auto ec, auto){
BOOST_CHECK_EQUAL(ec, aedis::error::exec_timeout);
});
});
ioc.run();
}

View File

@@ -24,6 +24,13 @@
// TODO: Test with empty strings.
namespace std
{
auto operator==(aedis::ignore, aedis::ignore) noexcept {return true;}
auto operator!=(aedis::ignore, aedis::ignore) noexcept {return false;}
}
namespace net = boost::asio;
namespace resp3 = aedis::resp3;
@@ -113,6 +120,9 @@ std::optional<int> op_int_ok = 11;
std::optional<bool> op_bool_ok = true;
std::string const streamed_string_wire = "$?\r\n;4\r\nHell\r\n;5\r\no wor\r\n;1\r\nd\r\n;0\r\n";
std::string const streamed_string_wire_error = "$?\r\n;b\r\nHell\r\n;5\r\no wor\r\n;1\r\nd\r\n;0\r\n";
// TODO: Test a streamed string that is not finished with a string of
// size 0 but other command comes in.
std::vector<node_type> streamed_string_e1
{ {aedis::resp3::type::streamed_string_part, 1, 1, "Hell"}
, {aedis::resp3::type::streamed_string_part, 1, 1, "o wor"}
@@ -150,6 +160,7 @@ std::vector<node_type> streamed_string_e2 { {resp3::type::streamed_string_part,
test(ex, make_expected(streamed_string_wire, std::string{"Hello word"}, "streamed_string.string")); \
test(ex, make_expected(streamed_string_wire, int{}, "streamed_string.string", aedis::error::not_a_number)); \
test(ex, make_expected(streamed_string_wire, streamed_string_e1, "streamed_string.node")); \
test(ex, make_expected(streamed_string_wire_error, std::string{}, "streamed_string.error", aedis::error::not_a_number)); \
BOOST_AUTO_TEST_CASE(test_push)
{
@@ -483,14 +494,17 @@ BOOST_AUTO_TEST_CASE(test_simple_error)
net::io_context ioc;
auto const in01 = expect<node_type>{"-Error\r\n", node_type{resp3::type::simple_error, 1UL, 0UL, {"Error"}}, "simple_error.node", aedis::error::resp3_simple_error};
auto const in02 = expect<node_type>{"-\r\n", node_type{resp3::type::simple_error, 1UL, 0UL, {""}}, "simple_error.node.empty", aedis::error::resp3_simple_error};
auto const in03 = expect<aedis::ignore>{"-Error\r\n", aedis::ignore{}, "simple_error.not.ignore.error", aedis::error::resp3_simple_error};
auto ex = ioc.get_executor();
test_sync(ex, in01);
test_sync(ex, in02);
test_sync(ex, in03);
test_async(ex, in01);
test_async(ex, in02);
test_async(ex, in03);
ioc.run();
}
@@ -557,14 +571,17 @@ BOOST_AUTO_TEST_CASE(test_blob_error)
net::io_context ioc;
auto const in01 = expect<node_type>{"!21\r\nSYNTAX invalid syntax\r\n", node_type{resp3::type::blob_error, 1UL, 0UL, {"SYNTAX invalid syntax"}}, "blob_error", aedis::error::resp3_blob_error};
auto const in02 = expect<node_type>{"!0\r\n\r\n", node_type{resp3::type::blob_error, 1UL, 0UL, {}}, "blob_error.empty", aedis::error::resp3_blob_error};
auto const in03 = expect<aedis::ignore>{"!3\r\nfoo\r\n", aedis::ignore{}, "blob_error.ignore.adapter.error", aedis::error::resp3_blob_error};
auto ex = ioc.get_executor();
test_sync(ex, in01);
test_sync(ex, in02);
test_sync(ex, in03);
test_async(ex, in01);
test_async(ex, in02);
test_async(ex, in03);
ioc.run();
}
@@ -713,6 +730,45 @@ BOOST_AUTO_TEST_CASE(test_null)
ioc.run();
}
BOOST_AUTO_TEST_CASE(ignore_adapter_simple_error)
{
net::io_context ioc;
std::string rbuffer;
boost::system::error_code ec;
test_stream ts {ioc};
ts.append("-Error\r\n");
resp3::read(ts, net::dynamic_buffer(rbuffer), adapt2(), ec);
BOOST_CHECK_EQUAL(ec, aedis::error::resp3_simple_error);
BOOST_TEST(!rbuffer.empty());
}
BOOST_AUTO_TEST_CASE(ignore_adapter_blob_error)
{
net::io_context ioc;
std::string rbuffer;
boost::system::error_code ec;
test_stream ts {ioc};
ts.append("!21\r\nSYNTAX invalid syntax\r\n");
resp3::read(ts, net::dynamic_buffer(rbuffer), adapt2(), ec);
BOOST_CHECK_EQUAL(ec, aedis::error::resp3_blob_error);
BOOST_TEST(!rbuffer.empty());
}
BOOST_AUTO_TEST_CASE(ignore_adapter_no_error)
{
net::io_context ioc;
std::string rbuffer;
boost::system::error_code ec;
test_stream ts {ioc};
ts.append(":10\r\n");
resp3::read(ts, net::dynamic_buffer(rbuffer), adapt2(), ec);
BOOST_TEST(!ec);
BOOST_TEST(rbuffer.empty());
}
BOOST_AUTO_TEST_CASE(all_tests)
{
net::io_context ioc;
@@ -768,6 +824,8 @@ BOOST_AUTO_TEST_CASE(error)
check_error("aedis", aedis::error::not_a_double);
check_error("aedis", aedis::error::resp3_null);
check_error("aedis", aedis::error::unexpected_server_role);
check_error("aedis", aedis::error::not_connected);
check_error("aedis", aedis::error::resp3_handshake_error);
}
std::string get_type_as_str(aedis::resp3::type t)
@@ -824,3 +882,13 @@ BOOST_AUTO_TEST_CASE(type_convert)
#undef CHECK_CASE
}
BOOST_AUTO_TEST_CASE(adapter)
{
using aedis::adapt;
std::string s;
auto resp = std::tie(s, std::ignore);
auto f = adapt(resp);
(void)f;
}

57
tests/request.cpp Normal file
View File

@@ -0,0 +1,57 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <iostream>
#include <memory_resource>
#define BOOST_TEST_MODULE low level
#include <boost/test/included/unit_test.hpp>
#include <aedis.hpp>
#include <aedis/src.hpp>
using aedis::resp3::request;
// TODO: Serialization.
BOOST_AUTO_TEST_CASE(single_arg_allocator)
{
char buf[4096];
std::pmr::monotonic_buffer_resource resource{buf, 4096};
request req1{{}, &resource};
req1.push("PING");
BOOST_CHECK_EQUAL(req1.payload(), std::pmr::string{"*1\r\n$4\r\nPING\r\n"});
}
BOOST_AUTO_TEST_CASE(arg_int)
{
request req;
req.push("PING", 42);
BOOST_CHECK_EQUAL(req.payload(), std::pmr::string{"*2\r\n$4\r\nPING\r\n$2\r\n42\r\n"});
}
BOOST_AUTO_TEST_CASE(multiple_args)
{
char const* res = "*5\r\n$3\r\nSET\r\n$3\r\nkey\r\n$5\r\nvalue\r\n$2\r\nEX\r\n$1\r\n2\r\n";
request req;
req.push("SET", "key", "value", "EX", "2");
BOOST_CHECK_EQUAL(req.payload(), std::pmr::string{res});
}
BOOST_AUTO_TEST_CASE(container_and_range)
{
std::map<std::string, std::string> in{{"key1", "value1"}, {"key2", "value2"}};
char const* res = "*6\r\n$4\r\nHSET\r\n$3\r\nkey\r\n$4\r\nkey1\r\n$6\r\nvalue1\r\n$4\r\nkey2\r\n$6\r\nvalue2\r\n";
request req1;
req1.push_range("HSET", "key", in);
BOOST_CHECK_EQUAL(req1.payload(), std::pmr::string{res});
request req2;
req2.push_range("HSET", "key", std::cbegin(in), std::cend(in));
BOOST_CHECK_EQUAL(req2.payload(), std::pmr::string{res});
}