2
0
mirror of https://github.com/boostorg/redis.git synced 2026-01-24 06:22:07 +00:00

Compare commits

...

30 Commits

Author SHA1 Message Date
Marcelo Zimbres
63f9b74502 Improves the stress test. 2022-12-18 09:20:29 +01:00
Marcelo Zimbres
801f60a026 Readme improvements. 2022-12-17 22:59:44 +01:00
Marcelo Zimbres
c37fcb641c Documentation improvements. 2022-12-17 16:59:06 +01:00
Marcelo Zimbres
48c3f37168 Test improvements and bugfix in send-retry. 2022-12-11 22:19:37 +01:00
Marcelo Zimbres
3c63911802 Removes some boost dependencies. 2022-12-10 19:42:51 +01:00
Marcelo Zimbres
1645881a44 Doc improvements and add guarded_op class. 2022-12-07 22:32:49 +01:00
Marcelo Zimbres
730e06c38d Adds bigobj and other vs flags. 2022-12-04 20:36:17 +01:00
Marcelo Zimbres
cf3a79737d Removes mingw from windows builds. 2022-12-04 16:15:12 +01:00
Marcelo Zimbres
edb384c843 Build fix on windows. 2022-12-04 15:13:14 +01:00
Marcelo Zimbres
f745faddf8 Replaces boost::string_view with std::string_view. 2022-12-04 13:53:44 +01:00
Marcelo Zimbres
927117568e Build fix. 2022-12-04 13:53:44 +01:00
Marcelo Zimbres
1e7c176f92 Removes dependency on Boost.Hana. 2022-12-03 22:29:04 +01:00
Marcelo Zimbres
449b5f7e7c First steps with windows CI. 2022-12-03 18:14:10 +01:00
Marcelo Zimbres
75f91f3b11 v1.3.1 and build fixes. 2022-12-03 14:34:15 +01:00
Marcelo Zimbres
b9a23568e3 Many improvements in the examples. 2022-12-02 22:58:39 +01:00
Marcelo Zimbres
4ac2509afa Improvements in the docs and examples. 2022-11-27 21:59:02 +01:00
Marcelo Zimbres
e9dab97992 v1.3.0 2022-11-26 22:22:56 +01:00
Marcelo Zimbres
2e8cad858d Improvements in the examples. 2022-11-26 19:42:39 +01:00
Marcelo Zimbres
5a6e426028 Build fix and improvements in the examples. 2022-11-22 22:57:33 +01:00
Marcelo Zimbres
c55978a379 CI fix and improvements in the examples. 2022-11-21 23:41:41 +01:00
Marcelo Zimbres
6f51397e49 Build fix. 2022-11-20 14:06:07 +01:00
Marcelo Zimbres
6b9ba6b2d9 Adds connection typedef and improves docs. 2022-11-19 23:53:26 +01:00
Marcelo Zimbres
d29c03cb38 Changes:
* Uses pmr::string for the connection read and write buffer.
* Improvements in the examples.
2022-11-18 23:15:47 +01:00
Marcelo Zimbres
34cfbaa22f Removes healthy checks from the connection class. 2022-11-13 21:22:50 +01:00
Marcelo Zimbres
c9354fe320 Test improvements. 2022-11-13 18:39:28 +01:00
Marcelo Zimbres
bb555cb509 Remove built-in resolve and connect operation in async_run. 2022-11-13 00:10:26 +01:00
Marcelo Zimbres
5b209afa1d Removes endpoint class. 2022-11-09 23:05:52 +01:00
Marcelo Zimbres
3f5491654d Removes built-in HELLO from the connection. 2022-11-08 00:04:52 +01:00
Marcelo Zimbres
2bdc25752f Simplifications in the low-level tests. 2022-11-06 22:40:00 +01:00
Marcelo Zimbres
faafce1c64 Adds tls test. 2022-11-06 19:12:36 +01:00
64 changed files with 3908 additions and 3034 deletions

View File

@@ -3,6 +3,92 @@ name: CI
on: [push, pull_request]
jobs:
windows:
name: "${{matrix.generator}} ${{matrix.toolset}} Boost ${{matrix.boost_version}} ${{matrix.build_type}} ${{matrix.name_args}}"
runs-on: ${{matrix.os}}
defaults:
run:
shell: bash
strategy:
fail-fast: false
matrix:
boost_version: ["1.80.0"]
os: [windows-2019, windows-2022]
toolset: [v142, v143]
build_type: [Release]
generator: ["Visual Studio 16 2019", "Visual Studio 17 2022"]
config_args: [""]
build_args: [""]
name_args: [""]
exclude:
- { os: windows-2019, toolset: v143 }
- { os: windows-2019, generator: "Visual Studio 17 2022" }
- { os: windows-2022, generator: "Visual Studio 16 2019" }
# The following combinations are not available through install-boost
- { boost_version: "1.80.0", toolset: v143 }
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Add boost toolset to environment
if: contains(fromJson('["1.80.0"]'), matrix.boost_version)
run: echo BOOST_TOOLSET=$(echo "msvc") >> $GITHUB_ENV
# The platform_version passed to boost-install determines the msvc toolset version for which static libs are installed.
- name: Add boost platform version to environment
run: |
declare -A toolset_to_platform_version=( [v142]="2019" [v143]="2022" )
key=$(echo "${{matrix.toolset}}")
echo BOOST_PLATFORM_VERSION="${toolset_to_platform_version[$key]}" >> $GITHUB_ENV
- name: Add boost install path to environment
run: echo BOOST_INSTALL_PATH="${GITHUB_WORKSPACE}/boost-${{matrix.boost_version}}${BOOST_TOOLSET}${BOOST_PLATFORM_VERSION}" >> $GITHUB_ENV
- name: Add build type configuration to environment
run: echo BUILD_CONFIG_ARG="--config ${{matrix.build_type}}" >> $GITHUB_ENV
- name: Cache Boost installation
id: cache-boost
uses: actions/cache@v3
with:
path: ${{env.BOOST_INSTALL_PATH}}
key: ${{matrix.boost_version}}${{env.BOOST_TOOLSET}}${{env.BOOST_PLATFORM_VERSION}}
- name: Install Boost
if: steps.cache-boost.outputs.cache-hit != 'true'
uses: MarkusJx/install-boost@v2.4.1
with:
boost_version: ${{matrix.boost_version}}
toolset: ${{env.BOOST_TOOLSET}}
boost_install_dir: ${{env.BOOST_INSTALL_PATH}}
platform_version: ${{env.BOOST_PLATFORM_VERSION}}
arch: null
- name: Install packages
run: cinst openssl
- name: Create build directory
run: mkdir build
- name: Configure
working-directory: build
run: |
cmake -T "${{matrix.toolset}}" \
-G "${{matrix.generator}}" \
${{matrix.config_args}} \
${BOOST_COMPILER_ARG}\
"${GITHUB_WORKSPACE}"
env:
BOOST_ROOT: ${{env.BOOST_INSTALL_PATH}}/boost
- name: Build
working-directory: build
run: |
cmake --build . ${BUILD_CONFIG_ARG} ${{matrix.build_args}}
posix:
defaults:
run:
@@ -12,7 +98,6 @@ jobs:
fail-fast: false
matrix:
include:
- { toolset: gcc, compiler: g++-10, install: g++-10, os: ubuntu-22.04, cxxstd: 'c++17' }
- { toolset: gcc, compiler: g++-11, install: g++-11, os: ubuntu-22.04, cxxstd: 'c++17' }
- { toolset: gcc, compiler: g++-11, install: g++-11, os: ubuntu-22.04, cxxstd: 'c++20' }
- { toolset: clang, compiler: clang++-11, install: clang-11, os: ubuntu-22.04, cxxstd: 'c++17' }
@@ -35,7 +120,7 @@ jobs:
uses: MarkusJx/install-boost@v2.3.0
id: install-boost
with:
boost_version: 1.79.0
boost_version: 1.80.0
platform_version: 22.04
- name: Run CMake
run: |

View File

@@ -30,7 +30,7 @@ jobs:
uses: MarkusJx/install-boost@v2.3.0
id: install-boost
with:
boost_version: 1.79.0
boost_version: 1.80.0
platform_version: 22.04
- name: Run CMake
run: |

View File

@@ -10,7 +10,7 @@ cmake_minimum_required(VERSION 3.14)
project(
Aedis
VERSION 1.2.0
VERSION 1.4.0
DESCRIPTION "A redis client designed for performance and scalability"
HOMEPAGE_URL "https://mzimbres.github.io/aedis"
LANGUAGES CXX
@@ -21,28 +21,34 @@ target_include_directories(aedis INTERFACE
$<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}/include>
$<INSTALL_INTERFACE:include>
)
target_link_libraries(aedis
target_link_libraries(
aedis
INTERFACE
Boost::asio
Boost::assert
Boost::config
Boost::core
Boost::mp11
Boost::optional
Boost::system
Boost::utility
Boost::winapi
)
target_compile_features(aedis INTERFACE cxx_std_17)
# Asio bases C++ feature detection on __cplusplus. Make MSVC
# define it correctly
if (MSVC)
target_compile_options(aedis INTERFACE /Zc:__cplusplus)
endif()
include(CMakePackageConfigHelpers)
write_basic_package_version_file(
"${PROJECT_BINARY_DIR}/AedisConfigVersion.cmake"
COMPATIBILITY AnyNewerVersion
)
find_package(Boost 1.79 REQUIRED)
find_package(Boost 1.80 REQUIRED)
include_directories(${Boost_INCLUDE_DIRS})
find_package(OpenSSL REQUIRED)
@@ -50,82 +56,224 @@ find_package(OpenSSL REQUIRED)
enable_testing()
include_directories(include)
# Main function for the examples.
#=======================================================================
add_library(common STATIC
examples/common/common.cpp
examples/common/main.cpp
examples/common/aedis.cpp
)
target_compile_features(common PUBLIC cxx_std_20)
if (MSVC)
target_compile_options(common PRIVATE /bigobj)
target_compile_definitions(common PRIVATE _WIN32_WINNT=0x0601)
endif()
# Executables
#=======================================================================
#add_executable(intro_sync examples/intro_sync.cpp) // Uncomment after update to Boost 1.80
add_executable(chat_room examples/chat_room.cpp)
add_executable(containers examples/containers.cpp)
add_executable(echo_server examples/echo_server.cpp)
add_executable(echo_server_client benchmarks/cpp/asio/echo_server_client.cpp)
add_executable(echo_server_direct benchmarks/cpp/asio/echo_server_direct.cpp)
add_executable(intro examples/intro.cpp)
add_executable(intro_tls examples/intro_tls.cpp)
add_executable(low_level_sync examples/low_level_sync.cpp)
add_executable(serialization examples/serialization.cpp)
add_executable(subscriber examples/subscriber.cpp)
add_executable(subscriber_sentinel examples/subscriber_sentinel.cpp)
add_executable(test_conn_connect tests/conn_connect.cpp)
add_executable(test_conn_exec tests/conn_exec.cpp)
add_executable(test_conn_push tests/conn_push.cpp)
add_executable(test_conn_quit tests/conn_quit.cpp)
add_executable(test_conn_quit_coalesce tests/conn_quit_coalesce.cpp)
add_executable(test_conn_reconnect tests/conn_reconnect.cpp)
add_executable(test_conn_tls tests/conn_tls.cpp)
add_executable(test_low_level tests/low_level.cpp)
add_executable(test_conn_run_cancel tests/conn_run_cancel.cpp)
add_executable(test_conn_exec_cancel tests/conn_exec_cancel.cpp)
add_executable(test_conn_echo_stress tests/conn_echo_stress.cpp)
add_executable(test_request tests/request.cpp)
target_compile_features(chat_room PUBLIC cxx_std_20)
target_compile_features(containers PUBLIC cxx_std_20)
target_compile_features(echo_server PUBLIC cxx_std_20)
target_compile_features(echo_server_client PUBLIC cxx_std_20)
target_compile_features(echo_server_direct PUBLIC cxx_std_20)
target_compile_features(intro PUBLIC cxx_std_17)
target_compile_features(intro_tls PUBLIC cxx_std_17)
target_compile_features(low_level_sync PUBLIC cxx_std_17)
target_compile_features(serialization PUBLIC cxx_std_17)
target_compile_features(subscriber PUBLIC cxx_std_20)
target_compile_features(subscriber_sentinel PUBLIC cxx_std_20)
target_compile_features(test_conn_connect PUBLIC cxx_std_17)
target_compile_features(test_conn_exec PUBLIC cxx_std_20)
target_compile_features(test_conn_push PUBLIC cxx_std_20)
target_compile_features(test_conn_quit PUBLIC cxx_std_17)
target_compile_features(test_conn_quit_coalesce PUBLIC cxx_std_17)
target_compile_features(test_conn_reconnect PUBLIC cxx_std_20)
target_compile_features(test_conn_tls PUBLIC cxx_std_17)
target_compile_features(test_low_level PUBLIC cxx_std_17)
target_compile_features(test_conn_run_cancel PUBLIC cxx_std_20)
target_compile_features(test_conn_exec_cancel PUBLIC cxx_std_20)
target_compile_features(test_conn_echo_stress PUBLIC cxx_std_20)
target_compile_features(test_request PUBLIC cxx_std_17)
target_link_libraries(intro_tls OpenSSL::Crypto OpenSSL::SSL)
target_link_libraries(test_conn_tls OpenSSL::Crypto OpenSSL::SSL)
# Tests
#=======================================================================
add_test(containers containers)
target_link_libraries(intro common)
target_compile_features(intro PUBLIC cxx_std_20)
add_test(intro intro)
add_test(intro_tls intro_tls)
#add_test(intro_sync intro_sync)
if (MSVC)
target_compile_options(intro PRIVATE /bigobj)
target_compile_definitions(intro PRIVATE _WIN32_WINNT=0x0601)
endif()
add_executable(intro_sync examples/intro_sync.cpp)
target_compile_features(intro_sync PUBLIC cxx_std_20)
add_test(intro_sync intro_sync)
add_test(intro_sync intro_sync)
if (MSVC)
target_compile_options(intro_sync PRIVATE /bigobj)
target_compile_definitions(intro_sync PRIVATE _WIN32_WINNT=0x0601)
endif()
add_executable(chat_room examples/chat_room.cpp)
target_compile_features(chat_room PUBLIC cxx_std_20)
target_link_libraries(chat_room common)
if (MSVC)
target_compile_options(chat_room PRIVATE /bigobj)
target_compile_definitions(chat_room PRIVATE _WIN32_WINNT=0x0601)
endif()
add_executable(containers examples/containers.cpp)
target_compile_features(containers PUBLIC cxx_std_20)
target_link_libraries(containers common)
add_test(containers containers)
if (MSVC)
target_compile_options(containers PRIVATE /bigobj)
target_compile_definitions(containers PRIVATE _WIN32_WINNT=0x0601)
endif()
add_executable(echo_server examples/echo_server.cpp)
target_compile_features(echo_server PUBLIC cxx_std_20)
target_link_libraries(echo_server common)
if (MSVC)
target_compile_options(echo_server PRIVATE /bigobj)
target_compile_definitions(echo_server PRIVATE _WIN32_WINNT=0x0601)
endif()
add_executable(resolve_with_sentinel examples/resolve_with_sentinel.cpp)
target_compile_features(resolve_with_sentinel PUBLIC cxx_std_20)
target_link_libraries(resolve_with_sentinel common)
#add_test(resolve_with_sentinel resolve_with_sentinel)
if (MSVC)
target_compile_options(resolve_with_sentinel PRIVATE /bigobj)
target_compile_definitions(resolve_with_sentinel PRIVATE _WIN32_WINNT=0x0601)
endif()
add_executable(serialization examples/serialization.cpp)
target_compile_features(serialization PUBLIC cxx_std_20)
target_link_libraries(serialization common)
add_test(serialization serialization)
if (MSVC)
target_compile_options(serialization PRIVATE /bigobj)
target_compile_definitions(serialization PRIVATE _WIN32_WINNT=0x0601)
endif()
add_executable(subscriber examples/subscriber.cpp)
target_compile_features(subscriber PUBLIC cxx_std_20)
target_link_libraries(subscriber common)
if (MSVC)
target_compile_options(subscriber PRIVATE /bigobj)
target_compile_definitions(subscriber PRIVATE _WIN32_WINNT=0x0601)
endif()
add_executable(intro_tls examples/intro_tls.cpp)
target_compile_features(intro_tls PUBLIC cxx_std_20)
add_test(intro_tls intro_tls)
target_link_libraries(intro_tls OpenSSL::Crypto OpenSSL::SSL)
target_link_libraries(intro_tls common)
if (MSVC)
target_compile_options(intro_tls PRIVATE /bigobj)
target_compile_definitions(intro_tls PRIVATE _WIN32_WINNT=0x0601)
endif()
add_executable(low_level_async examples/low_level_async.cpp)
target_compile_features(low_level_async PUBLIC cxx_std_20)
add_test(low_level_async low_level_async)
target_link_libraries(low_level_async common)
if (MSVC)
target_compile_options(low_level_async PRIVATE /bigobj)
target_compile_definitions(low_level_async PRIVATE _WIN32_WINNT=0x0601)
endif()
add_executable(echo_server_client benchmarks/cpp/asio/echo_server_client.cpp)
target_compile_features(echo_server_client PUBLIC cxx_std_20)
if (MSVC)
target_compile_options(echo_server_client PRIVATE /bigobj)
target_compile_definitions(echo_server_client PRIVATE _WIN32_WINNT=0x0601)
endif()
add_executable(echo_server_direct benchmarks/cpp/asio/echo_server_direct.cpp)
target_compile_features(echo_server_direct PUBLIC cxx_std_20)
if (MSVC)
target_compile_options(echo_server_direct PRIVATE /bigobj)
target_compile_definitions(echo_server_direct PRIVATE _WIN32_WINNT=0x0601)
endif()
add_executable(low_level_sync examples/low_level_sync.cpp)
target_compile_features(low_level_sync PUBLIC cxx_std_17)
add_test(low_level_sync low_level_sync)
add_test(test_low_level test_low_level)
if (MSVC)
target_compile_options(low_level_sync PRIVATE /bigobj)
target_compile_definitions(low_level_sync PRIVATE _WIN32_WINNT=0x0601)
endif()
add_executable(test_conn_exec tests/conn_exec.cpp)
target_compile_features(test_conn_exec PUBLIC cxx_std_20)
add_test(test_conn_exec test_conn_exec)
add_test(test_conn_connect test_conn_connect)
if (MSVC)
target_compile_options(test_conn_exec PRIVATE /bigobj)
target_compile_definitions(test_conn_exec PRIVATE _WIN32_WINNT=0x0601)
endif()
add_executable(test_conn_push tests/conn_push.cpp)
target_compile_features(test_conn_push PUBLIC cxx_std_20)
add_test(test_conn_push test_conn_push)
if (MSVC)
target_compile_options(test_conn_push PRIVATE /bigobj)
target_compile_definitions(test_conn_push PRIVATE _WIN32_WINNT=0x0601)
endif()
add_executable(test_conn_quit tests/conn_quit.cpp)
target_compile_features(test_conn_quit PUBLIC cxx_std_17)
add_test(test_conn_quit test_conn_quit)
if (MSVC)
target_compile_options(test_conn_quit PRIVATE /bigobj)
target_compile_definitions(test_conn_quit PRIVATE _WIN32_WINNT=0x0601)
endif()
add_executable(test_conn_quit_coalesce tests/conn_quit_coalesce.cpp)
add_test(test_conn_quit_coalesce test_conn_quit_coalesce)
target_compile_features(test_conn_quit_coalesce PUBLIC cxx_std_17)
if (MSVC)
target_compile_options(test_conn_quit_coalesce PRIVATE /bigobj)
target_compile_definitions(test_conn_quit_coalesce PRIVATE _WIN32_WINNT=0x0601)
endif()
add_executable(test_conn_reconnect tests/conn_reconnect.cpp)
target_compile_features(test_conn_reconnect PUBLIC cxx_std_20)
target_link_libraries(test_conn_reconnect common)
add_test(test_conn_reconnect test_conn_reconnect)
if (MSVC)
target_compile_options(test_conn_reconnect PRIVATE /bigobj)
target_compile_definitions(test_conn_reconnect PRIVATE _WIN32_WINNT=0x0601)
endif()
add_executable(test_conn_tls tests/conn_tls.cpp)
add_test(test_conn_tls test_conn_tls)
target_compile_features(test_conn_tls PUBLIC cxx_std_17)
target_link_libraries(test_conn_tls OpenSSL::Crypto OpenSSL::SSL)
if (MSVC)
target_compile_options(test_conn_tls PRIVATE /bigobj)
target_compile_definitions(test_conn_tls PRIVATE _WIN32_WINNT=0x0601)
endif()
add_executable(test_low_level tests/low_level.cpp)
target_compile_features(test_low_level PUBLIC cxx_std_17)
add_test(test_low_level test_low_level)
if (MSVC)
target_compile_options(test_low_level PRIVATE /bigobj)
target_compile_definitions(test_low_level PRIVATE _WIN32_WINNT=0x0601)
endif()
add_executable(test_conn_run_cancel tests/conn_run_cancel.cpp)
target_compile_features(test_conn_run_cancel PUBLIC cxx_std_20)
add_test(test_conn_run_cancel test_conn_run_cancel)
if (MSVC)
target_compile_options(test_conn_run_cancel PRIVATE /bigobj)
target_compile_definitions(test_conn_run_cancel PRIVATE _WIN32_WINNT=0x0601)
endif()
add_executable(test_conn_exec_cancel tests/conn_exec_cancel.cpp)
target_compile_features(test_conn_exec_cancel PUBLIC cxx_std_20)
target_link_libraries(test_conn_exec_cancel common)
add_test(test_conn_exec_cancel test_conn_exec_cancel)
if (MSVC)
target_compile_options(test_conn_exec_cancel PRIVATE /bigobj)
target_compile_definitions(test_conn_exec_cancel PRIVATE _WIN32_WINNT=0x0601)
endif()
add_executable(test_conn_echo_stress tests/conn_echo_stress.cpp)
target_compile_features(test_conn_echo_stress PUBLIC cxx_std_20)
target_link_libraries(test_conn_echo_stress common)
add_test(test_conn_echo_stress test_conn_echo_stress)
if (MSVC)
target_compile_options(test_conn_echo_stress PRIVATE /bigobj)
target_compile_definitions(test_conn_echo_stress PRIVATE _WIN32_WINNT=0x0601)
endif()
add_executable(test_request tests/request.cpp)
target_compile_features(test_request PUBLIC cxx_std_17)
add_test(test_request test_request)
if (MSVC)
target_compile_options(test_request PRIVATE /bigobj)
target_compile_definitions(test_request PRIVATE _WIN32_WINNT=0x0601)
endif()
# Install
#=======================================================================

View File

@@ -49,6 +49,7 @@
"CMAKE_BUILD_TYPE": "Debug",
"CMAKE_CXX_EXTENSIONS": "OFF",
"CMAKE_CXX_FLAGS": "-Wall -Wextra -fsanitize=address",
"CMAKE_CXX_COMPILER": "g++-11",
"CMAKE_SHARED_LINKER_FLAGS": "-fsanitize=address",
"CMAKE_CXX_STANDARD_REQUIRED": "ON",
"PROJECT_BINARY_DIR": "${sourceDir}/build/dev",

682
README.md
View File

@@ -1,178 +1,165 @@
# Documentation
[TOC]
# Aedis
## Overview
Aedis is a high-level [Redis](https://redis.io/) client library
built on top of
[Asio](https://www.boost.org/doc/libs/release/doc/html/boost_asio.html).
Some of its distinctive features are
* Support for the latest version of the Redis communication protocol [RESP3](https://github.com/redis/redis-specifications/blob/master/protocol/RESP3.md).
* Support for STL containers, TLS and Redis sentinel.
* Serialization and deserialization of your own data types.
* Healthy checks, back pressure, cancellation and low latency.
In addition to that, Aedis hides most of the low-level Asio code away
from the user, which in the majority of the cases, will interact with
only three library entities
Aedis is a [Redis](https://redis.io/) client library built on top of
[Asio](https://www.boost.org/doc/libs/release/doc/html/boost_asio.html)
that implements the latest version of the Redis communication
protocol
[RESP3](https://github.com/redis/redis-specifications/blob/master/protocol/RESP3.md).
It makes communication with a Redis server easy by hiding most of
the low-level Asio-related code away from the user, which in the majority of
the cases will be concerned with only three library entities
* `aedis::connection`: A connection to the Redis server.
* `aedis::resp3::request`: A container of Redis commands.
* `aedis::adapt()`: A function that adapts data structures to receive Redis responses.
* `aedis::adapt()`: Adapts data structures to receive responses.
The example below shows for example how to read Redis hashes in an
`std::map` using a coroutine, a short-lived connection and
cancellation
For example, the coroutine below uses a short-lived connection to read Redis
[hashes](https://redis.io/docs/data-types/hashes/)
in a `std::map` (see intro.cpp and containers.cpp)
```cpp
net::awaitable<std::map<std::string, std::string>> retrieve_hashes(endpoint ep)
auto async_main() -> net::awaitable<void>
{
connection conn{co_await net::this_coro::executor};
auto conn = std::make_shared<connection>(co_await net::this_coro::executor);
request req;
req.get_config().cancel_on_connection_lost = true;
// From examples/common.hpp to avoid vebosity
co_await connect(conn, "127.0.0.1", "6379");
// A request contains multiple commands.
resp3::request req;
req.push("HELLO", 3);
req.push("HGETALL", "hset-key");
req.push("QUIT");
std::tuple<std::map<std::string, std::string>, aedis::ignore> resp;
co_await (conn.async_run(ep) || conn.async_exec(req, adapt(resp)));
// Responses as tuple elements.
std::tuple<aedis::ignore, std::map<std::string, std::string>, aedis::ignore> resp;
co_return std::move(std::get<0>(resp));
// Executes the request and reads the response.
co_await (conn->async_run() || conn->async_exec(req, adapt(resp)));
// Use the map from std::get<1>(resp) ...
}
```
In the next section we will see more details about connections,
requests and responses.
The execution of `connection::async_exec` above is composed with
`connection::async_run` with the aid of the Asio awaitable operator ||
that ensures that one operation is cancelled as soon as the other
completes, these functions play the following roles
* `connection::async_exec`: Execute commands (i.e. write the request and reads the response).
* `connection::async_run`: Coordinate read and write operations and remains suspended until the connection is lost.
Let us dig in.
<a name="connection"></a>
## Connection
### Connection
The `aedis::connection` is a class that provides async-only
communication with a Redis server by means of three member
functions
* `aedis::connection::async_run`: Establishes a connection and
completes only when it is lost.
* `aedis::connection::async_exec`: Executes commands.
* `aedis::connection::async_receive`: Receives server-side pushes.
In general, these operations will be running concurrently in user
application, where, for example
1. **Connect**: One coroutine will call `async_run` in a loop
to reconnect whenever a connection is lost.
2. **Execute**: Multiple coroutines will call `async_exec` independently
and without coordination (e.g. queuing).
3. **Receive**: One coroutine will loop on `async_receive` to receive
server-side pushes (required only if the app expects server pushes).
Each of the operations above can be performed without regards to the
others as they are independent from each other. Below we will cover
the points above with more detail.
#### Connect
In general, applications will connect to a Redis server and hang
around for as long as possible, until the connection is lost for some
reason. When that happens, simple setups will want to wait for a
short period of time and try to reconnect. The code snippet below
shows how this can be achieved with a coroutine (see echo_server.cpp)
In general we will want to reuse the same connection for multiple
requests, we can do this with the example above by decoupling the
HELLO command and the call to `async_run` in a separate coroutine
```cpp
net::awaitable<void> reconnect(std::shared_ptr<connection> conn, endpoint ep)
auto run(std::shared_ptr<connection> conn) -> net::awaitable<void>
{
net::steady_timer timer{co_await net::this_coro::executor};
for (boost::system::error_code ec;;) {
co_await connect(conn, "127.0.0.1", "6379");
resp3::request req;
req.push("HELLO", 3); // Upgrade to RESP3
// Notice we use && instead of || so async_run is not cancelled
// when the response to HELLO comes.
co_await (conn->async_run() && conn->async_exec(req));
}
```
We can now let `run` run detached in the background while other
coroutines perform requests on the connection
```cpp
auto async_main() -> net::awaitable<void>
{
auto conn = std::make_shared<connection>(co_await net::this_coro::executor);
// Calls async_run detached.
net::co_spawn(ex, run(conn), net::detached)
// Here we can pass conn around to other coroutines so they can make requests.
...
}
```
With this separation, it is now easy to incorporate other operations
in our application, for example, to cancel the connection on `SIGINT`
and `SIGTERM` we can extend `run` as follows
```cpp
auto run(std::shared_ptr<connection> conn) -> net::awaitable<void>
{
co_await connect(conn, "127.0.0.1", "6379");
signal_set sig{ex, SIGINT, SIGTERM};
resp3::request req;
req.push("HELLO", 3);
co_await ((conn->async_run() || sig.async_wait()) && conn->async_exec(req));
}
```
Likewise we can incorporate support for server pushes, healthy checks and pubsub
```cpp
auto run(std::shared_ptr<connection> conn) -> net::awaitable<void>
{
co_await connect(conn, "127.0.0.1", "6379");
signal_set sig{ex, SIGINT, SIGTERM};
resp3::request req;
req.push("HELLO", 3);
req.push("SUBSCRIBE", "channel1", "channel2");
co_await ((conn->async_run() || sig.async_wait() || receiver(conn) || healthy_checker(conn))
&& conn->async_exec(req));
}
```
The definition of `receiver` and `healthy_checker` above can be found
in subscriber.cpp. Adding a loop around `async_run` produces a simple
way to support reconnection _while there are pending operations on the connection_,
for example, to reconnect to the same address
```cpp
auto run(std::shared_ptr<connection> conn) -> net::awaitable<void>
{
auto ex = co_await net::this_coro::executor;
steady_timer timer{ex};
resp3::request req;
req.push("HELLO", 3);
req.push("SUBSCRIBE", "channel1", "channel2");
for (;;) {
co_await connect(conn, "127.0.0.1", "6379");
co_await ((conn->async_run() || healthy_checker(conn) || receiver(conn))
&& conn->async_exec(req));
// Establishes a connection and hangs around until it is lost.
co_await conn->async_run(ep, {}, redir(ec));
conn->reset_stream();
// Waits some time before trying to restablish the connection.
timer.expires_after(std::chrono::seconds{1});
co_await timer.async_wait();
}
}
```
Other common scenarios are, for example, performing a failover with
sentinels and re-subscribing to pubsub channels, both are covered in
the `subscriber_sentinel.cpp` example.
For failover with sentinels see `resolve_with_sentinel.cpp`. At
this point the reasons for why `async_run` was introduced in Aedis
might have become apparent to the reader
#### Execute
* Provide quick reaction to disconnections and hence faster failover.
* Support server pushes and requests in the same connection object, concurrently.
* Separate requests, handling of server pushes and reconnection operations.
The basic idea about `async_exec` was stated above already: execute
Redis commands. One of the most important things about it though is
that it can be called multiple times without coordination, for
example, in a HTTP or Websocket server where each session calls it
independently to communicate with Redis. The benefits of this feature
are manifold
* Reduces code complexity as users won't have to implement queues
every time e.g. HTTP sessions want to share a connection to Redis.
* A small number of connections improves the performance associated
with [pipelines](https://redis.io/topics/pipelining). A single
connection will be indeed enough in most of cases.
The code below illustrates this concepts in a TCP session of the
`echo_server.cpp` example
```cpp
awaitable_type echo_server_session(tcp_socket socket, std::shared_ptr<connection> db)
{
request req;
std::tuple<std::string> response;
for (std::string buffer;;) {
// Reads a user message.
auto n = co_await net::async_read_until(socket, net::dynamic_buffer(buffer, 1024), "\n");
// Echos it through Redis.
req.push("PING", buffer);
co_await db->async_exec(req, adapt(response));
// Writes is back to the user.
co_await net::async_write(socket, net::buffer(std::get<0>(response)));
// Cleanup
std::get<0>(response).clear();
req.clear();
buffer.erase(0, n);
}
}
```
Notice also how the session above provides back-pressure as the
coroutine won't read the next message from the socket until a cycle is
complete.
#### Receive
Point number 3. above is only necessary for servers that expect server
pushes, like, for example, when using Redis pubsub. The example below
was taken from subscriber.cpp
```cpp
net::awaitable<void> push_receiver(std::shared_ptr<connection> conn)
{
for (std::vector<node<std::string>> resp;;) {
co_await conn->async_receive(adapt(resp));
print_push(resp);
resp.clear();
}
}
```
In general, it is advisable to all apps to keep a coroutine calling
`async_receive` as an unread push will cause the connection to stall
and eventually timeout. Notice that the same connection that is being
used to send requests can be also used to receive server-side pushes.
#### Cancellation
### Cancellation
Aedis supports both implicit and explicit cancellation of connection
operations. Explicit cancellation is supported by means of the
@@ -184,10 +171,7 @@ like those that may happen when using Asio awaitable operators && and
co_await (conn.async_run(...) && conn.async_exec(...))
```
* Useful when implementing reconnection on applications that
use pubsub.
* Makes the channel re-subscribe operation simpler when the
connection is reestablished.
* Provide a simple way to send HELLO and perform channel subscription.
```cpp
co_await (conn.async_run(...) || conn.async_exec(...))
@@ -202,77 +186,50 @@ co_await (conn.async_exec(...) || time.async_wait(...))
* Provides a way to limit how long the execution of a single request
should last.
* Alternatively, for a connection-wide timeout set
`aedis::connection::timeouts::ping_interval` to a proper value. This
will work because all requests use the same queue and is also more
efficient since only one timer will be used.
* The cancellation will be ignored if the request has already
been written to the socket.
* It is usually a better idea to have a healthy checker than adding
per request timeout, see subscriber.cpp for an example.
```cpp
co_await (conn.async_run(...) || time.async_wait(...))
```
* Set a limit on how long the connection should live (see also
`aedis::connection::timeouts`)
* Sets a limit on how long the connection should live.
```cpp
co_await (conn.async_exec(...) || conn.async_exec(...) || ... || conn.async_exec(...))
```
* This works but is considered an antipattern. Unless
the user has set `aedis::resp3::request::config::coalesce` to
`false`, and he shouldn't, the connection will automatically merge
the individual requests into a single payload anyway.
#### Timeouts
Aedis high-level API provides built-in support for many of the
timeouts users usually need. For example, the
`aedis::connection::async_run` member function performs the following
operations on behalf of the user
* Resolves Redis address.
* Connects to the resolved endpoint.
* TLS handshake (for TLS endpoints).
* RESP3 handshake and authentication and role check.
* Periodic healthy checks with the PING command.
* Keeps reading from the socket to handle server pushes and command responses.
* Keeps writing requests as it becomes possible e.g. after last response has arrived.
To control the timeout-behaviour of these operations users must
create a `aedis::connection::timeouts` object and pass it to as
argument to the `aedis::connection::async_run` (if
the suggested defaults are not suitable).
* This works but is unnecessary. Unless the user has set
`aedis::resp3::request::config::coalesce` to `false`, and he
shouldn't, the connection will automatically merge the individual
requests into a single payload anyway.
<a name="requests"></a>
## Requests
### Requests
Redis requests are composed of one of more Redis commands (in
Redis requests are composed of one or more commands (in the
Redis documentation they are called
[pipelines](https://redis.io/topics/pipelining)). For example
```cpp
// Some example containers.
std::list<std::string> list {...};
std::map<std::string, mystruct> map { ...};
request req;
// Command with variable length of arguments.
req.push("SET", "key", "some value", "EX", "2");
// Pushes a list.
std::list<std::string> list
{"channel1", "channel2", "channel3"};
req.push_range("SUBSCRIBE", list);
// Same as above but as an iterator range.
req.push_range("SUBSCRIBE", std::cbegin(list), std::cend(list));
// Pushes a map.
std::map<std::string, mystruct> map
{ {"key1", "value1"}
, {"key2", "value2"}
, {"key3", "value3"}};
req.push_range("HSET", "key", map);
```
@@ -280,11 +237,11 @@ Sending a request to Redis is performed with `aedis::connection::async_exec` as
<a name="serialization"></a>
#### Serialization
### Serialization
The `push` and `push_range` functions above work with integers
e.g. `int` and `std::string` out of the box. To send your own
data type defined a `to_bulk` function like this
The `resp3::request::push` and `resp3::request::push_range` member functions work
with integer data types e.g. `int` and `std::string` out of the box.
To send your own data type define a `to_bulk` function like this
```cpp
// Example struct.
@@ -309,11 +266,17 @@ std::map<std::string, mystruct> map {...};
req.push_range("HSET", "key", map);
```
Example serialization.cpp shows how store json string in Redis.
Example serialization.cpp shows how store json strings in Redis.
<a name="responses"></a>
### Responses
### Config flags
The `aedis::resp3::request::config` object inside the request dictates how the
`aedis::connection` should handle the request in some important situations. The
reader is advised to read it carefully.
## Responses
Aedis uses the following strategy to support Redis responses
@@ -336,10 +299,11 @@ To read the response to this request users can use the following tuple
std::tuple<std::string, int, std::string>
```
The pattern may have become apparent to the user, the tuple must have
the same size as the request (exceptions below) and each element must
be able to store the response to the command it refers to. To ignore
responses to individual commands in the request use the tag
The pattern might have become apparent to the reader: the tuple must
have as many elements as the request has commands (exceptions below).
It is also necessary that each tuple element is capable of storing the
response to the command it refers to, otherwise an error will occur.
To ignore responses to individual commands in the request use the tag
`aedis::ignore`
```cpp
@@ -347,7 +311,8 @@ responses to individual commands in the request use the tag
std::tuple<std::string, aedis::ignore, std::string, aedis::ignore>
```
The following table provides the response types of some commands
The following table provides the resp3-types returned by some Redis
commands
Command | RESP3 type | Documentation
---------|-------------------------------------|--------------
@@ -403,14 +368,18 @@ std::tuple<
Where both are passed to `async_exec` as showed elsewhere
```cpp
co_await db->async_exec(req, adapt(resp));
co_await conn->async_exec(req, adapt(resp));
```
If the intention is to ignore the response to all commands altogether
use `adapt()` without arguments instead
```cpp
co_await db->async_exec(req, adapt());
// Uses the ignore adapter explicitly.
co_await conn->async_exec(req, adapt());
// Ignore adapter is also the default argument.
co_await conn->async_exec(req);
```
Responses that contain nested aggregates or heterogeneous data
@@ -419,7 +388,7 @@ of this writing, not all RESP3 types are used by the Redis server,
which means in practice users will be concerned with a reduced
subset of the RESP3 specification.
#### Push
### Pushes
Commands that have push response like
@@ -427,7 +396,7 @@ Commands that have push response like
* `"PSUBSCRIBE"`
* `"UNSUBSCRIBE"`
must be not be included in the tuple. For example, the request below
must be **NOT** be included in the tuple. For example, the request below
```cpp
request req;
@@ -439,7 +408,7 @@ req.push("QUIT");
must be read in this tuple `std::tuple<std::string, std::string>`,
that has size two.
#### Null
### Null
It is not uncommon for apps to access keys that do not exist or
that have already expired in the Redis server, to deal with these
@@ -451,26 +420,26 @@ std::tuple<
std::optional<A>,
std::optional<B>,
...
> response;
> resp;
co_await db->async_exec(req, adapt(response));
co_await conn->async_exec(req, adapt(resp));
```
Everything else stays pretty much the same.
#### Transactions
### Transactions
To read responses to transactions we must first observe that Redis will
queue its commands and send their responses to the user as elements
of an array, after the `EXEC` command comes. For example, to read
the response to this request
queue the transaction commands and send their individual responses as elements
of an array, the array is itself the response to the `EXEC` command.
For example, to read the response to this request
```cpp
db.send("MULTI");
db.send("GET", "key1");
db.send("LRANGE", "key2", 0, -1);
db.send("HGETALL", "key3");
db.send("EXEC");
req.push("MULTI");
req.push("GET", "key1");
req.push("LRANGE", "key2", 0, -1);
req.push("HGETALL", "key3");
req.push("EXEC");
```
use the following response type
@@ -486,26 +455,26 @@ using exec_resp_type =
>;
std::tuple<
aedis::ignore, // multi
aedis::ignore, // get
aedis::ignore, // lrange
aedis::ignore, // hgetall
aedis::ignore, // multi
aedis::ignore, // get
aedis::ignore, // lrange
aedis::ignore, // hgetall
exec_resp_type, // exec
> resp;
co_await db->async_exec(req, adapt(resp));
co_await conn->async_exec(req, adapt(resp));
```
For a complete example see containers.cpp.
#### Deserialization
### Deserialization
As mentioned in \ref serialization, it is common practice to
serialize data before sending it to Redis e.g. as json strings.
For performance and convenience reasons, we may also want to
deserialize it directly in its final data structure when reading them
back from Redis. Aedis supports this use case by calling a user
provided `from_bulk` function while parsing the response. For example
As mentioned in the serialization section, it is common practice to
serialize data before sending it to Redis e.g. as json strings. For
performance and convenience reasons, we may also want to deserialize
responses directly in their final data structure. Aedis supports this
use case by calling a user provided `from_bulk` function while parsing
the response. For example
```cpp
void from_bulk(mystruct& obj, char const* p, std::size_t size, boost::system::error_code& ec)
@@ -519,7 +488,7 @@ types e.g. `mystruct`, `std::map<std::string, mystruct>` etc.
<a name="the-general-case"></a>
#### The general case
### The general case
There are cases where responses to Redis
commands won't fit in the model presented above, some examples are
@@ -560,7 +529,7 @@ using other types
```cpp
// Receives any RESP3 simple or aggregate data type.
std::vector<node<std::string>> resp;
co_await db->async_exec(req, adapt(resp));
co_await conn->async_exec(req, adapt(resp));
```
For example, suppose we want to retrieve a hash data structure
@@ -575,22 +544,115 @@ In addition to the above users can also use unordered versions of the
containers. The same reasoning also applies to sets e.g. `SMEMBERS`
and other data structures in general.
### Examples
## Examples
To conclude this overview users are invited to skim over the
examples below
These examples demonstrate what has been discussed so far.
* intro.cpp: The Aedis hello-world program. It sends one command to Redis and quits the connection.
* intro.cpp: The Aedis hello-world program. Sends one command and quits the connection.
* intro_tls.cpp: Same as intro.cpp but over TLS.
* intro_sync.cpp: Synchronous version of intro.cpp.
* containers.cpp: Shows how to send and receive stl containers and how to use transactions.
* intro_sync.cpp: Shows how to use the connection class synchronously.
* containers.cpp: Shows how to send and receive STL containers and how to use transactions.
* serialization.cpp: Shows how to serialize types using Boost.Json.
* subscriber.cpp: Shows how to implement pubsub that reconnects and resubscribes when the connection is lost.
* subscriber_sentinel.cpp: Same as subscriber.cpp but with failover with sentinels.
* resolve_with_sentinel.cpp: Shows how to resolve a master address using sentinels.
* subscriber.cpp: Shows how to implement pubsub with reconnection re-subscription.
* echo_server.cpp: A simple TCP echo server.
* chat_room.cpp: A simple chat room.
* chat_room.cpp: A command line chat built on Redis pubsub.
* low_level_sync.cpp: Sends a ping synchronously using the low-level API.
* low_level_async.cpp: Sends a ping asynchronously using the low-level API.
## Why Aedis
To avoid repetition code that is common to all examples has been
grouped in common.hpp. The main function used in some async examples
has been factored out in the main.cpp file.
## Echo server benchmark
This document benchmarks the performance of TCP echo servers I
implemented in different languages using different Redis clients. The
main motivations for choosing an echo server are
* Simple to implement and does not require expertise level in most languages.
* I/O bound: Echo servers have very low CPU consumption in general
and therefore are excelent to measure how a program handles concurrent requests.
* It simulates very well a typical backend in regard to concurrency.
I also imposed some constraints on the implementations
* It should be simple enough and not require writing too much code.
* Favor the use standard idioms and avoid optimizations that require expert level.
* Avoid the use of complex things like connection and thread pool.
To reproduce these results run one of the echo-server programs in one
terminal and the
[echo-server-client](https://github.com/mzimbres/aedis/blob/42880e788bec6020dd018194075a211ad9f339e8/benchmarks/cpp/asio/echo_server_client.cpp)
in another.
### Without Redis
First I tested a pure TCP echo server, i.e. one that sends the messages
directly to the client without interacting with Redis. The result can
be seen below
![](https://mzimbres.github.io/aedis/tcp-echo-direct.png)
The tests were performed with a 1000 concurrent TCP connections on the
localhost where latency is 0.07ms on average on my machine. On higher
latency networks the difference among libraries is expected to
decrease.
* I expected Libuv to have similar performance to Asio and Tokio.
* I did expect nodejs to come a little behind given it is is
javascript code. Otherwise I did expect it to have similar
performance to libuv since it is the framework behind it.
* Go did surprise me: faster than nodejs and libuv!
The code used in the benchmarks can be found at
* [Asio](https://github.com/mzimbres/aedis/blob/3fb018ccc6138d310ac8b73540391cdd8f2fdad6/benchmarks/cpp/asio/echo_server_direct.cpp): A variation of [this](https://github.com/chriskohlhoff/asio/blob/4915cfd8a1653c157a1480162ae5601318553eb8/asio/src/examples/cpp20/coroutines/echo_server.cpp) Asio example.
* [Libuv](https://github.com/mzimbres/aedis/tree/835a1decf477b09317f391eddd0727213cdbe12b/benchmarks/c/libuv): Taken from [here](https://github.com/libuv/libuv/blob/06948c6ee502862524f233af4e2c3e4ca876f5f6/docs/code/tcp-echo-server/main.c) Libuv example .
* [Tokio](https://github.com/mzimbres/aedis/tree/3fb018ccc6138d310ac8b73540391cdd8f2fdad6/benchmarks/rust/echo_server_direct): Taken from [here](https://docs.rs/tokio/latest/tokio/).
* [Nodejs](https://github.com/mzimbres/aedis/tree/3fb018ccc6138d310ac8b73540391cdd8f2fdad6/benchmarks/nodejs/echo_server_direct)
* [Go](https://github.com/mzimbres/aedis/blob/3fb018ccc6138d310ac8b73540391cdd8f2fdad6/benchmarks/go/echo_server_direct.go)
### With Redis
This is similar to the echo server described above but messages are
echoed by Redis and not by the echo-server itself, which acts
as a proxy between the client and the Redis server. The results
can be seen below
![](https://mzimbres.github.io/aedis/tcp-echo-over-redis.png)
The tests were performed on a network where latency is 35ms on
average, otherwise it uses the same number of TCP connections
as the previous example.
As the reader can see, the Libuv and the Rust test are not depicted
in the graph, the reasons are
* [redis-rs](https://github.com/redis-rs/redis-rs): This client
comes so far behind that it can't even be represented together
with the other benchmarks without making them look insignificant.
I don't know for sure why it is so slow, I suppose it has
something to do with its lack of automatic
[pipelining](https://redis.io/docs/manual/pipelining/) support.
In fact, the more TCP connections I lauch the worse its
performance gets.
* Libuv: I left it out because it would require me writing to much
c code. More specifically, I would have to use hiredis and
implement support for pipelines manually.
The code used in the benchmarks can be found at
* [Aedis](https://github.com/mzimbres/aedis): [code](https://github.com/mzimbres/aedis/blob/3fb018ccc6138d310ac8b73540391cdd8f2fdad6/examples/echo_server.cpp)
* [node-redis](https://github.com/redis/node-redis): [code](https://github.com/mzimbres/aedis/tree/3fb018ccc6138d310ac8b73540391cdd8f2fdad6/benchmarks/nodejs/echo_server_over_redis)
* [go-redis](https://github.com/go-redis/redis): [code](https://github.com/mzimbres/aedis/blob/3fb018ccc6138d310ac8b73540391cdd8f2fdad6/benchmarks/go/echo_server_over_redis.go)
### Conclusion
Redis clients have to support automatic pipelining to have competitive performance. For updates to this document follow https://github.com/mzimbres/aedis.
## Comparison
The main reason for why I started writing Aedis was to have a client
compatible with the Asio asynchronous model. As I made progresses I could
@@ -603,14 +665,16 @@ stars, namely
* https://github.com/sewenew/redis-plus-plus
### Aedis vs Redis-plus-plus
Before we start it is important to mentioning some of the things
redis-plus-plus does not support
* The latest version of the communication protocol RESP3. Without it it is impossible to support some important Redis features like client side caching, among other things.
* Coroutines.
* Reading responses directly in user data structures to avoid creating temporaries.
* Proper error handling with support for error-code.
* Healthy checks.
* Error handling with support for error-code.
* Cancellation.
The remaining points will be addressed individually. Let us first
have a look at what sending a command a pipeline and a transaction
@@ -699,93 +763,7 @@ enqueueing a message and triggering a write when it can be sent.
It is also not clear how are pipelines realised with this design
(if at all).
### Echo server benchmark
This document benchmarks the performance of TCP echo servers I
implemented in different languages using different Redis clients. The
main motivations for choosing an echo server are
* Simple to implement and does not require expertise level in most languages.
* I/O bound: Echo servers have very low CPU consumption in general
and therefore are excelent to measure how a program handles concurrent requests.
* It simulates very well a typical backend in regard to concurrency.
I also imposed some constraints on the implementations
* It should be simple enough and not require writing too much code.
* Favor the use standard idioms and avoid optimizations that require expert level.
* Avoid the use of complex things like connection and thread pool.
To reproduce these results run one of the echo-server programs in one
terminal and the
[echo-server-client](https://github.com/mzimbres/aedis/blob/42880e788bec6020dd018194075a211ad9f339e8/benchmarks/cpp/asio/echo_server_client.cpp)
in another.
#### Without Redis
First I tested a pure TCP echo server, i.e. one that sends the messages
directly to the client without interacting with Redis. The result can
be seen below
![](https://mzimbres.github.io/aedis/tcp-echo-direct.png)
The tests were performed with a 1000 concurrent TCP connections on the
localhost where latency is 0.07ms on average on my machine. On higher
latency networks the difference among libraries is expected to
decrease.
* I expected Libuv to have similar performance to Asio and Tokio.
* I did expect nodejs to come a little behind given it is is
javascript code. Otherwise I did expect it to have similar
performance to libuv since it is the framework behind it.
* Go did surprise me: faster than nodejs and liuv!
The code used in the benchmarks can be found at
* [Asio](https://github.com/mzimbres/aedis/blob/3fb018ccc6138d310ac8b73540391cdd8f2fdad6/benchmarks/cpp/asio/echo_server_direct.cpp): A variation of [this](https://github.com/chriskohlhoff/asio/blob/4915cfd8a1653c157a1480162ae5601318553eb8/asio/src/examples/cpp20/coroutines/echo_server.cpp) Asio example.
* [Libuv](https://github.com/mzimbres/aedis/tree/835a1decf477b09317f391eddd0727213cdbe12b/benchmarks/c/libuv): Taken from [here](https://github.com/libuv/libuv/blob/06948c6ee502862524f233af4e2c3e4ca876f5f6/docs/code/tcp-echo-server/main.c) Libuv example .
* [Tokio](https://github.com/mzimbres/aedis/tree/3fb018ccc6138d310ac8b73540391cdd8f2fdad6/benchmarks/rust/echo_server_direct): Taken from [here](https://docs.rs/tokio/latest/tokio/).
* [Nodejs](https://github.com/mzimbres/aedis/tree/3fb018ccc6138d310ac8b73540391cdd8f2fdad6/benchmarks/nodejs/echo_server_direct)
* [Go](https://github.com/mzimbres/aedis/blob/3fb018ccc6138d310ac8b73540391cdd8f2fdad6/benchmarks/go/echo_server_direct.go)
#### With Redis
This is similar to the echo server described above but messages are
echoed by Redis and not by the echo-server itself, which acts
as a proxy between the client and the Redis server. The results
can be seen below
![](https://mzimbres.github.io/aedis/tcp-echo-over-redis.png)
The tests were performed on a network where latency is 35ms on
average, otherwise it uses the same number of TCP connections
as the previous example.
As the reader can see, the Libuv and the Rust test are not depicted
in the graph, the reasons are
* [redis-rs](https://github.com/redis-rs/redis-rs): This client
comes so far behind that it can't even be represented together
with the other benchmarks without making them look insignificant.
I don't know for sure why it is so slow, I suppose it has
something to do with its lack of proper
[pipelining](https://redis.io/docs/manual/pipelining/) support.
In fact, the more TCP connections I lauch the worse its
performance gets.
* Libuv: I left it out because it would require too much work to
write it and make it have a good performance. More specifically,
I would have to use hiredis and implement support for pipelines
manually.
The code used in the benchmarks can be found at
* [Aedis](https://github.com/mzimbres/aedis): [code](https://github.com/mzimbres/aedis/blob/3fb018ccc6138d310ac8b73540391cdd8f2fdad6/examples/echo_server.cpp)
* [node-redis](https://github.com/redis/node-redis): [code](https://github.com/mzimbres/aedis/tree/3fb018ccc6138d310ac8b73540391cdd8f2fdad6/benchmarks/nodejs/echo_server_over_redis)
* [go-redis](https://github.com/go-redis/redis): [code](https://github.com/mzimbres/aedis/blob/3fb018ccc6138d310ac8b73540391cdd8f2fdad6/benchmarks/go/echo_server_over_redis.go)
<a name="api-reference"></a>
## Reference
* [High-Level](#high-level-api): Covers the topics discussed in this document.
@@ -800,31 +778,31 @@ library, so you can starting using it right away by adding the
```cpp
#include <aedis/src.hpp>
```
in no more than one source file in your applications. For example, to
compile one of the examples manually
in no more than one source file in your applications. To build the
examples and test cmake is supported, for example
```cpp
g++ -std=c++20 -pthread -I/opt/boost_1_79_0/include/ -I./aedis/include examples/intro.cpp
BOOST_ROOT=/opt/boost_1_80_0 cmake --preset dev
```
The requirements for using Aedis are
- Boost 1.79 or greater.
- Boost 1.80 or greater.
- C++17 minimum.
- Redis 6 or higher (must support RESP3).
- Optionally also redis-cli and Redis Sentinel.
The following compilers are supported
- Tested with gcc: 10, 11, 12.
- Tested with clang: 11, 13, 14.
- Gcc: 10, 11, 12.
- Clang: 11, 13, 14.
- Visual Studio 17 2022, Visual Studio 16 2019.
## Acknowledgement
Acknowledgement to people that helped shape Aedis in one way or
another.
Acknowledgement to people that helped shape Aedis
* Richard Hodges ([madmongo1](https://github.com/madmongo1)): For very helpful support with Asio, the design of asynchronous programs, etc.
* Vinícius dos Santos Oliveira ([vinipsmaker](https://github.com/vinipsmaker)): For useful discussion about how Aedis consumes buffers in the read operation.
@@ -834,6 +812,38 @@ another.
## Changelog
### v1.4.0
* Removes dependency on Boost.Hana, boost::string_view, Boost.Variant2 and Boost.Spirit.
* Fixes build and setup CI on windows.
### v1.3.0-1
* Upgrades to Boost 1.80.0
* Removes automatic sending of the `HELLO` command. This can't be
implemented properly without bloating the connection class. It is
now a user responsibility to send HELLO. Requests that contain it have
priority over other requests and will be moved to the front of the
queue, see `aedis::resp3::request::config`
* Automatic name resolving and connecting have been removed from
`aedis::connection::async_run`. Users have to do this step manually
now. The reason for this change is that having them built-in doesn't
offer enough flexibility that is need for boost users.
* Removes healthy checks and idle timeout. This functionality must now
be implemented by users, see the examples. This is
part of making Aedis useful to a larger audience and suitable for
the Boost review process.
* The `aedis::connection` is now using a typeddef to a
`net::ip::tcp::socket` and `aedis::ssl::connection` to
`net::ssl::stream<net::ip::tcp::socket>`. Users that need to use
other stream type must now specialize `aedis::basic_connection`.
* Adds a low level example of async code.
### v1.2.0
* `aedis::adapt` supports now tuples created with `std::tie`.
@@ -887,7 +897,7 @@ another.
* Renames `operation::receive_push` to `aedis::operation::receive`.
### v1.1.0...1
### v1.1.0-1
* Removes `coalesce_requests` from the `aedis::connection::config`, it
became a request property now, see `aedis::resp3::request::config::coalesce`.
@@ -926,7 +936,7 @@ another.
is possible in simple reconnection strategies but bloats the class
in more complex scenarios, for example, with sentinel,
authentication and TLS. This is trivial to implement in a separate
coroutine. As a result the enum `event` and `async_receive_event`
coroutine. As a result the `enum event` and `async_receive_event`
have been removed from the class too.
* Fixes a bug in `connection::async_receive_push` that prevented
@@ -1013,7 +1023,7 @@ another.
* Fixes build in clang the compilers and makes some improvements in
the documentation.
### v0.2.0...1
### v0.2.0-1
* Fixes a bug that happens on very high load. (v0.2.1)
* Major rewrite of the high-level API. There is no more need to use the low-level API anymore.
@@ -1021,7 +1031,7 @@ another.
* Support for reconnection: Pending requests are not canceled when a connection is lost and are re-sent when a new one is established.
* The library is not sending HELLO-3 on user behalf anymore. This is important to support AUTH properly.
### v0.1.0...2
### v0.1.0-2
* Adds reconnect coroutine in the `echo_server` example. (v0.1.2)
* Corrects `client::async_wait_for_data` with `make_parallel_group` to launch operation. (v0.1.2)

View File

@@ -14,8 +14,7 @@ using net::ip::tcp;
using tcp_socket = net::use_awaitable_t<>::as_default_on_t<net::ip::tcp::socket>;
using timer_type = net::use_awaitable_t<>::as_default_on_t<net::steady_timer>;
net::awaitable<void>
example(boost::asio::ip::tcp::endpoint ep, std::string msg, int n)
auto example(boost::asio::ip::tcp::endpoint ep, std::string msg, int n) -> net::awaitable<void>
{
try {
auto ex = co_await net::this_coro::executor;

View File

@@ -33,7 +33,7 @@ awaitable_type echo(tcp_socket socket)
std::size_t n = co_await socket.async_read_some(net::buffer(data), use_awaitable);
co_await async_write(socket, net::buffer(data, n), use_awaitable);
}
} catch (std::exception const& e) {
} catch (std::exception const&) {
//std::printf("echo Exception: %s\n", e.what());
}
}

View File

@@ -1250,7 +1250,7 @@ HTML_FILE_EXTENSION = .html
# of the possible markers and block names see the documentation.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_HEADER = doc/htmlheader.html
HTML_HEADER =
# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each
# generated HTML page. If the tag is left blank doxygen will generate a standard
@@ -1260,7 +1260,7 @@ HTML_HEADER = doc/htmlheader.html
# that doxygen normally uses.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_FOOTER = doc/htmlfooter.html
HTML_FOOTER =
# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style
# sheet that is used by each HTML page. It can be used to fine-tune the look of
@@ -1595,7 +1595,7 @@ DISABLE_INDEX = YES
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
GENERATE_TREEVIEW = NO
GENERATE_TREEVIEW = YES
# When both GENERATE_TREEVIEW and DISABLE_INDEX are set to YES, then the
# FULL_SIDEBAR option determines if the side bar is limited to only the treeview

View File

@@ -3,7 +3,7 @@
<!-- Navigation index tabs for HTML output -->
<navindex>
<tab type="mainpage" visible="yes" title="Contents"/>
<tab type="pages" visible="no" title="" intro=""/>
<tab type="pages" visible="yes" title="" intro=""/>
<tab type="modules" visible="no" title="Reference" intro=""/>
<tab type="namespaces" visible="no" title="">
<tab type="namespacelist" visible="yes" title="" intro=""/>

File diff suppressed because it is too large Load Diff

View File

@@ -1,19 +0,0 @@
<!-- HTML footer for doxygen 1.8.14-->
<!-- start footer part -->
<!--BEGIN GENERATE_TREEVIEW-->
<div id="nav-path" class="navpath"><!-- id is needed for treeview function! -->
<ul>
$navpath
<li class="footer">
Aedis 1.0.0 - Reference Guide generated on $datetime using Doxygen $doxygenversion &#160;&#160;
<img class="footer" src="rootlogo_s.gif" alt="root"/></li>
</ul>
</div>
<!--END GENERATE_TREEVIEW-->
<!--BEGIN !GENERATE_TREEVIEW-->
<hr class="footer"/><address class="footer">
Author: Marcelo Zimbres Silva.
</address>
<!--END !GENERATE_TREEVIEW-->
</body>
</html>

View File

@@ -1,34 +0,0 @@
<!-- HTML header for doxygen 1.8.14-->
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
<meta http-equiv="X-UA-Compatible" content="IE=9"/>
<meta name="generator" content="Doxygen $doxygenversion"/>
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<!--BEGIN PROJECT_NAME--><title>$projectname: $title</title><!--END PROJECT_NAME-->
<!--BEGIN !PROJECT_NAME--><title>$title</title><!--END !PROJECT_NAME-->
<link href="$relpath^tabs.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="$relpath^jquery.js"></script>
<script type="text/javascript" src="$relpath^dynsections.js"></script>
$search
<link href="$relpath^$stylesheet" rel="stylesheet" type="text/css" />
$extrastylesheet
</head>
<body>
<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
<!--BEGIN TITLEAREA-->
<div id="titlearea">
<table bgcolor="#346295" cellspacing="0" cellpadding="6">
<tbody>
<tr>
<td valign="middle" style="color: #FFFFFF" nowrap="nowrap"><font size="6">$projectname $projectnumber</font> &#160; <br> $projectbrief </td>
<td style="width:100%"> $searchbox </td>
</tr>
</tbody>
</table>
</div>
<!--END TITLEAREA-->
<!-- end header part -->

View File

@@ -4,103 +4,70 @@
* accompanying file LICENSE.txt)
*/
#include <string>
#include <iostream>
#include "unistd.h"
#include <boost/asio.hpp>
#if defined(BOOST_ASIO_HAS_CO_AWAIT) && defined(BOOST_ASIO_HAS_POSIX_STREAM_DESCRIPTOR)
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
#include <iostream>
namespace net = boost::asio;
#if defined(BOOST_ASIO_HAS_POSIX_STREAM_DESCRIPTOR)
#include <boost/asio/experimental/awaitable_operators.hpp>
#include <aedis.hpp>
#include "print.hpp"
#include <unistd.h>
// Include this in no more than one .cpp file.
#include <aedis/src.hpp>
#include "common/common.hpp"
namespace net = boost::asio;
namespace resp3 = aedis::resp3;
using namespace net::experimental::awaitable_operators;
using aedis::adapt;
using aedis::resp3::request;
using aedis::resp3::node;
using aedis::endpoint;
using tcp_socket = net::use_awaitable_t<>::as_default_on_t<net::ip::tcp::socket>;
using tcp_acceptor = net::use_awaitable_t<>::as_default_on_t<net::ip::tcp::acceptor>;
using stream_descriptor = net::use_awaitable_t<>::as_default_on_t<net::posix::stream_descriptor>;
using connection = aedis::connection<tcp_socket>;
using stimer = net::use_awaitable_t<>::as_default_on_t<net::steady_timer>;
using signal_set = net::use_awaitable_t<>::as_default_on_t<net::signal_set>;
using aedis::adapt;
// Chat over redis pubsub. To test, run this program from different
// terminals and type messages to stdin. Use
//
// $ redis-cli monitor
//
// to monitor the message traffic.
// Chat over Redis pubsub. To test, run this program from multiple
// terminals and type messages to stdin.
// Receives messages from other users.
net::awaitable<void> push_receiver(std::shared_ptr<connection> conn)
// Receives Redis pushes.
auto receiver(std::shared_ptr<connection> conn) -> net::awaitable<void>
{
for (std::vector<node<std::string>> resp;;) {
for (std::vector<resp3::node<std::string>> resp;;) {
co_await conn->async_receive(adapt(resp));
print_push(resp);
std::cout << resp.at(1).value << " " << resp.at(2).value << " " << resp.at(3).value << std::endl;
resp.clear();
}
}
// Subscribes to the channels when a new connection is stablished.
net::awaitable<void> reconnect(std::shared_ptr<connection> conn)
{
request req;
req.get_config().cancel_on_connection_lost = true;
req.push("SUBSCRIBE", "chat-channel");
stimer timer{co_await net::this_coro::executor};
endpoint ep{"127.0.0.1", "6379"};
for (;;) {
boost::system::error_code ec1, ec2;
co_await (
conn->async_run(ep, {}, net::redirect_error(net::use_awaitable, ec1)) &&
conn->async_exec(req, adapt(), net::redirect_error(net::use_awaitable, ec2))
);
std::clog << "async_run: " << ec1.message() << "\n"
<< "async_exec: " << ec2.message() << std::endl;
conn->reset_stream();
timer.expires_after(std::chrono::seconds{1});
co_await timer.async_wait();
}
}
// Publishes messages to other users.
net::awaitable<void> publisher(stream_descriptor& in, std::shared_ptr<connection> conn)
// Publishes stdin messages to a Redis channel.
auto publisher(std::shared_ptr<stream_descriptor> in, std::shared_ptr<connection> conn) -> net::awaitable<void>
{
for (std::string msg;;) {
auto n = co_await net::async_read_until(in, net::dynamic_buffer(msg, 1024), "\n");
request req;
auto n = co_await net::async_read_until(*in, net::dynamic_buffer(msg, 1024), "\n");
resp3::request req;
req.push("PUBLISH", "chat-channel", msg);
co_await conn->async_exec(req);
msg.erase(0, n);
}
}
auto main() -> int
// Called from the main function (see main.cpp)
auto async_main() -> net::awaitable<void>
{
try {
net::io_context ioc{1};
stream_descriptor in{ioc, ::dup(STDIN_FILENO)};
auto ex = co_await net::this_coro::executor;
auto conn = std::make_shared<connection>(ex);
auto stream = std::make_shared<stream_descriptor>(ex, ::dup(STDIN_FILENO));
signal_set sig{ex, SIGINT, SIGTERM};
auto conn = std::make_shared<connection>(ioc);
co_spawn(ioc, publisher(in, conn), net::detached);
co_spawn(ioc, push_receiver(conn), net::detached);
co_spawn(ioc, reconnect(conn), net::detached);
resp3::request req;
req.push("HELLO", 3);
req.push("SUBSCRIBE", "chat-channel");
net::signal_set signals(ioc, SIGINT, SIGTERM);
signals.async_wait([&](auto, auto){ ioc.stop(); });
ioc.run();
} catch (std::exception const& e) {
std::cerr << e.what() << std::endl;
}
co_await connect(conn, "127.0.0.1", "6379");
co_await ((conn->async_run() || publisher(stream, conn) || receiver(conn) ||
healthy_checker(conn) || sig.async_wait()) && conn->async_exec(req));
}
#else // defined(BOOST_ASIO_HAS_CO_AWAIT) && defined(BOOST_ASIO_HAS_POSIX_STREAM_DESCRIPTOR)
auto main() -> int {std::cout << "Requires coroutine support." << std::endl; return 0;}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT) && defined(BOOST_ASIO_HAS_POSIX_STREAM_DESCRIPTOR)
#else // defined(BOOST_ASIO_HAS_POSIX_STREAM_DESCRIPTOR)
auto async_main() -> net::awaitable<void>
{
std::cout << "Requires support for posix streams." << std::endl;
co_return;
}
#endif // defined(BOOST_ASIO_HAS_POSIX_STREAM_DESCRIPTOR)
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -0,0 +1,8 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <aedis.hpp>
#include <aedis/src.hpp>

View File

@@ -0,0 +1,93 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include "common.hpp"
#include <boost/asio.hpp>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
#include <boost/asio/experimental/awaitable_operators.hpp>
#include <iostream>
namespace net = boost::asio;
using namespace net::experimental::awaitable_operators;
using resolver = net::use_awaitable_t<>::as_default_on_t<net::ip::tcp::resolver>;
using timer_type = net::use_awaitable_t<>::as_default_on_t<net::steady_timer>;
using aedis::resp3::request;
using aedis::adapt;
using aedis::operation;
namespace
{
auto redir(boost::system::error_code& ec)
{ return net::redirect_error(net::use_awaitable, ec); }
}
auto healthy_checker(std::shared_ptr<connection> conn) -> net::awaitable<void>
{
try {
request req;
req.push("PING");
timer_type timer{co_await net::this_coro::executor};
for (boost::system::error_code ec;;) {
timer.expires_after(std::chrono::seconds{1});
co_await (conn->async_exec(req, adapt()) || timer.async_wait(redir(ec)));
if (!ec) {
co_return;
}
// Waits some time before trying the next ping.
timer.expires_after(std::chrono::seconds{1});
co_await timer.async_wait();
}
} catch (...) {
}
}
auto
connect(
std::shared_ptr<connection> conn,
std::string const& host,
std::string const& port) -> net::awaitable<void>
{
auto ex = co_await net::this_coro::executor;
resolver resv{ex};
timer_type timer{ex};
boost::system::error_code ec;
timer.expires_after(std::chrono::seconds{5});
auto const addrs = co_await (resv.async_resolve(host, port) || timer.async_wait(redir(ec)));
if (!ec)
throw std::runtime_error("Resolve timeout");
timer.expires_after(std::chrono::seconds{5});
co_await (net::async_connect(conn->next_layer(), std::get<0>(addrs)) || timer.async_wait(redir(ec)));
if (!ec)
throw std::runtime_error("Connect timeout");
}
auto run(net::awaitable<void> op) -> int
{
try {
net::io_context ioc;
net::co_spawn(ioc, std::move(op), [](std::exception_ptr p) {
if (p)
std::rethrow_exception(p);
});
ioc.run();
return 0;
} catch (std::exception const& e) {
std::cerr << "Error: " << e.what() << std::endl;
}
return 1;
}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -0,0 +1,34 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_EXAMPLES_COMMON_HPP
#define AEDIS_EXAMPLES_COMMON_HPP
#include <boost/asio.hpp>
#include <aedis.hpp>
#include <memory>
#include <iostream>
#include <vector>
#include <map>
#include <set>
#include <string>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
using connection = boost::asio::use_awaitable_t<>::as_default_on_t<aedis::connection>;
auto
connect(
std::shared_ptr<connection> conn,
std::string const& host,
std::string const& port) -> boost::asio::awaitable<void>;
auto healthy_checker(std::shared_ptr<connection> conn) -> boost::asio::awaitable<void>;
auto run(boost::asio::awaitable<void> op) -> int;
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)
#endif // AEDIS_EXAMPLES_COMMON_HPP

30
examples/common/main.cpp Normal file
View File

@@ -0,0 +1,30 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <boost/asio.hpp>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
#include "common.hpp"
extern boost::asio::awaitable<void> async_main();
auto main() -> int
{
return run(async_main());
}
#else // defined(BOOST_ASIO_HAS_CO_AWAIT)
#include <iostream>
auto main() -> int
{
std::cout << "Requires coroutine support." << std::endl;
return 0;
}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -4,37 +4,39 @@
* accompanying file LICENSE.txt)
*/
#include <map>
#include <vector>
#include <iostream>
#include <boost/asio.hpp>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
#include <boost/asio/experimental/awaitable_operators.hpp>
#include <aedis.hpp>
#include "print.hpp"
#include <map>
#include <vector>
// Include this in no more than one .cpp file.
#include <aedis/src.hpp>
#include "common/common.hpp"
namespace net = boost::asio;
namespace resp3 = aedis::resp3;
using namespace net::experimental::awaitable_operators;
using aedis::adapt;
using aedis::resp3::request;
using aedis::endpoint;
using tcp_socket = net::use_awaitable_t<>::as_default_on_t<net::ip::tcp::socket>;
using connection = aedis::connection<tcp_socket>;
// To avoid verbosity.
auto redir(boost::system::error_code& ec)
void print(std::map<std::string, std::string> const& cont)
{
return net::redirect_error(net::use_awaitable, ec);
for (auto const& e: cont)
std::cout << e.first << ": " << e.second << "\n";
}
// Sends some containers.
net::awaitable<void> send(endpoint ep)
void print(std::vector<int> const& cont)
{
auto ex = co_await net::this_coro::executor;
for (auto const& e: cont) std::cout << e << " ";
std::cout << "\n";
}
// Stores the content of some STL containers in Redis.
auto store() -> net::awaitable<void>
{
auto conn = std::make_shared<connection>(co_await net::this_coro::executor);
// Resolves and connects (from examples/common.hpp to avoid vebosity)
co_await connect(conn, "127.0.0.1", "6379");
std::vector<int> vec
{1, 2, 3, 4, 5, 6};
@@ -42,40 +44,46 @@ net::awaitable<void> send(endpoint ep)
std::map<std::string, std::string> map
{{"key1", "value1"}, {"key2", "value2"}, {"key3", "value3"}};
request req;
req.get_config().cancel_on_connection_lost = true;
req.push_range("RPUSH", "rpush-key", vec); // Sends
req.push_range("HSET", "hset-key", map); // Sends
resp3::request req;
req.push("HELLO", 3);
req.push_range("RPUSH", "rpush-key", vec);
req.push_range("HSET", "hset-key", map);
req.push("QUIT");
connection conn{ex};
co_await (conn.async_run(ep) || conn.async_exec(req));
co_await (conn->async_run() || conn->async_exec(req));
}
// Retrieves a Redis hash as an std::map.
net::awaitable<std::map<std::string, std::string>> retrieve_hashes(endpoint ep)
auto hgetall() -> net::awaitable<std::map<std::string, std::string>>
{
connection conn{co_await net::this_coro::executor};
auto conn = std::make_shared<connection>(co_await net::this_coro::executor);
request req;
req.get_config().cancel_on_connection_lost = true;
// From examples/common.hpp to avoid vebosity
co_await connect(conn, "127.0.0.1", "6379");
// A request contains multiple commands.
resp3::request req;
req.push("HELLO", 3);
req.push("HGETALL", "hset-key");
req.push("QUIT");
std::map<std::string, std::string> ret;
auto resp = std::tie(ret, std::ignore);
co_await (conn.async_run(ep) || conn.async_exec(req, adapt(resp)));
// Responses as tuple elements.
std::tuple<aedis::ignore, std::map<std::string, std::string>, aedis::ignore> resp;
co_return std::move(ret);
// Executes the request and reads the response.
co_await (conn->async_run() || conn->async_exec(req, adapt(resp)));
co_return std::get<1>(resp);
}
// Retrieves as a data structure.
net::awaitable<void> transaction(endpoint ep)
// Retrieves in a transaction.
auto transaction() -> net::awaitable<void>
{
connection conn{co_await net::this_coro::executor};
auto conn = std::make_shared<connection>(co_await net::this_coro::executor);
request req;
req.get_config().cancel_on_connection_lost = true;
// Resolves and connects (from examples/common.hpp to avoid vebosity)
co_await connect(conn, "127.0.0.1", "6379");
resp3::request req;
req.push("HELLO", 3);
req.push("MULTI");
req.push("LRANGE", "rpush-key", 0, -1); // Retrieves
req.push("HGETALL", "hset-key"); // Retrieves
@@ -83,6 +91,7 @@ net::awaitable<void> transaction(endpoint ep)
req.push("QUIT");
std::tuple<
aedis::ignore, // hello
aedis::ignore, // multi
aedis::ignore, // lrange
aedis::ignore, // hgetall
@@ -90,36 +99,19 @@ net::awaitable<void> transaction(endpoint ep)
aedis::ignore // quit
> resp;
co_await (conn.async_run(ep) || conn.async_exec(req, adapt(resp)));
co_await (conn->async_run() || conn->async_exec(req, adapt(resp)));
print(std::get<0>(std::get<3>(resp)).value());
print(std::get<1>(std::get<3>(resp)).value());
print(std::get<0>(std::get<4>(resp)).value());
print(std::get<1>(std::get<4>(resp)).value());
}
// Called from the main function (see main.cpp)
net::awaitable<void> async_main()
{
try {
endpoint ep{"127.0.0.1", "6379"};
co_await send(ep);
co_await transaction(ep);
auto const hashes = co_await retrieve_hashes(ep);
print(hashes);
} catch (std::exception const& e) {
std::cerr << e.what() << std::endl;
}
co_await store();
co_await transaction();
auto const map = co_await hgetall();
print(map);
}
auto main() -> int
{
try {
net::io_context ioc;
net::co_spawn(ioc, async_main(), net::detached);
ioc.run();
} catch (...) {
std::cerr << "Error." << std::endl;
}
}
#else // defined(BOOST_ASIO_HAS_CO_AWAIT)
auto main() -> int {std::cout << "Requires coroutine support." << std::endl; return 0;}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -4,84 +4,59 @@
* accompanying file LICENSE.txt)
*/
#include <string>
#include <iostream>
#include <boost/asio.hpp>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
#include <boost/asio/experimental/awaitable_operators.hpp>
#include <aedis.hpp>
// Include this in no more than one .cpp file.
#include <aedis/src.hpp>
#include "common/common.hpp"
namespace net = boost::asio;
namespace resp3 = aedis::resp3;
using namespace net::experimental::awaitable_operators;
using tcp_socket = net::use_awaitable_t<>::as_default_on_t<net::ip::tcp::socket>;
using tcp_acceptor = net::use_awaitable_t<>::as_default_on_t<net::ip::tcp::acceptor>;
using signal_set = net::use_awaitable_t<>::as_default_on_t<net::signal_set>;
using aedis::adapt;
using aedis::resp3::request;
using aedis::endpoint;
using executor_type = net::io_context::executor_type;
using socket_type = net::basic_stream_socket<net::ip::tcp, executor_type>;
using tcp_socket = net::use_awaitable_t<executor_type>::as_default_on_t<socket_type>;
using acceptor_type = net::basic_socket_acceptor<net::ip::tcp, executor_type>;
using tcp_acceptor = net::use_awaitable_t<executor_type>::as_default_on_t<acceptor_type>;
using awaitable_type = net::awaitable<void, executor_type>;
using connection = aedis::connection<tcp_socket>;
awaitable_type echo_server_session(tcp_socket socket, std::shared_ptr<connection> db)
auto echo_server_session(tcp_socket socket, std::shared_ptr<connection> conn) -> net::awaitable<void>
{
request req;
std::tuple<std::string> response;
resp3::request req;
std::string resp;
for (std::string buffer;;) {
auto n = co_await net::async_read_until(socket, net::dynamic_buffer(buffer, 1024), "\n");
req.push("PING", buffer);
co_await db->async_exec(req, adapt(response));
co_await net::async_write(socket, net::buffer(std::get<0>(response)));
std::get<0>(response).clear();
auto tmp = std::tie(resp);
co_await conn->async_exec(req, adapt(tmp));
co_await net::async_write(socket, net::buffer(resp));
resp.clear();
req.clear();
buffer.erase(0, n);
}
}
awaitable_type listener(std::shared_ptr<connection> db)
// Listens for tcp connections.
auto listener(std::shared_ptr<connection> conn) -> net::awaitable<void>
{
auto ex = co_await net::this_coro::executor;
tcp_acceptor acc(ex, {net::ip::tcp::v4(), 55555});
for (;;)
net::co_spawn(ex, echo_server_session(co_await acc.async_accept(), db), net::detached);
net::co_spawn(ex, echo_server_session(co_await acc.async_accept(), conn), net::detached);
}
net::awaitable<void> reconnect(std::shared_ptr<connection> conn)
// Called from the main function (see main.cpp)
auto async_main() -> net::awaitable<void>
{
net::steady_timer timer{co_await net::this_coro::executor};
endpoint ep{"127.0.0.1", "6379"};
for (boost::system::error_code ec1;;) {
co_await conn->async_run(ep, {}, net::redirect_error(net::use_awaitable, ec1));
std::clog << "async_run: " << ec1.message() << std::endl;
conn->reset_stream();
timer.expires_after(std::chrono::seconds{1});
co_await timer.async_wait(net::use_awaitable);
}
auto ex = co_await net::this_coro::executor;
auto conn = std::make_shared<connection>(ex);
signal_set sig{ex, SIGINT, SIGTERM};
resp3::request req;
req.push("HELLO", 3);
co_await connect(conn, "127.0.0.1", "6379");
co_await ((conn->async_run() || listener(conn) || healthy_checker(conn) ||
sig.async_wait()) && conn->async_exec(req));
}
auto main() -> int
{
try {
net::io_context ioc{1};
auto db = std::make_shared<connection>(ioc);
co_spawn(ioc, reconnect(db), net::detached);
net::signal_set signals(ioc, SIGINT, SIGTERM);
signals.async_wait([&](auto, auto) {
ioc.stop();
});
co_spawn(ioc, listener(db), net::detached);
ioc.run();
} catch (std::exception const& e) {
std::cerr << e.what() << std::endl;
}
}
#else // defined(BOOST_ASIO_HAS_CO_AWAIT)
auto main() -> int {std::cout << "Requires coroutine support." << std::endl; return 0;}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -4,40 +4,32 @@
* accompanying file LICENSE.txt)
*/
#include <tuple>
#include <string>
#include <boost/asio.hpp>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
#include <boost/asio/experimental/awaitable_operators.hpp>
#include <aedis.hpp>
#include "common/common.hpp"
// Include this in no more than one .cpp file.
#include <aedis/src.hpp>
namespace net = boost::asio;
namespace resp3 = aedis::resp3;
using namespace net::experimental::awaitable_operators;
using aedis::adapt;
using aedis::resp3::request;
using connection = aedis::connection<>;
auto const logger = [](auto ec, auto...)
{ std::cout << ec.message() << std::endl; };
auto main() -> int
// Called from the main function (see main.cpp)
auto async_main() -> net::awaitable<void>
{
try {
boost::asio::io_context ioc;
connection conn{ioc};
resp3::request req;
req.push("HELLO", 3);
req.push("PING", "Hello world");
req.push("QUIT");
request req;
req.get_config().cancel_on_connection_lost = true;
req.push("PING");
req.push("QUIT");
std::tuple<aedis::ignore, std::string, aedis::ignore> resp;
std::tuple<std::string, aedis::ignore> resp;
conn.async_exec(req, adapt(resp), logger);
conn.async_run({"127.0.0.1", "6379"}, {}, logger);
auto conn = std::make_shared<connection>(co_await net::this_coro::executor);
co_await connect(conn, "127.0.0.1", "6379");
co_await (conn->async_run() || conn->async_exec(req, adapt(resp)));
ioc.run();
std::cout << std::get<0>(resp) << std::endl;
} catch (...) {
std::cerr << "Error" << std::endl;
}
std::cout << "PING: " << std::get<1>(resp) << std::endl;
}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -7,6 +7,7 @@
#include <tuple>
#include <string>
#include <thread>
#include <iostream>
#include <boost/asio.hpp>
#include <aedis.hpp>
@@ -14,18 +15,17 @@
#include <aedis/src.hpp>
namespace net = boost::asio;
namespace resp3 = aedis::resp3;
using aedis::adapt;
using aedis::resp3::request;
using aedis::endpoint;
using connection = aedis::connection<>;
using connection = aedis::connection;
template <class Adapter>
auto exec(connection& conn, request const& req, Adapter adapter, boost::system::error_code& ec)
auto exec(std::shared_ptr<connection> conn, resp3::request const& req, Adapter adapter)
{
net::dispatch(
conn.get_executor(),
net::deferred([&]() { return conn.async_exec(req, adapter, net::deferred); }))
(net::redirect_error(net::use_future, ec)).get();
conn->get_executor(),
net::deferred([&]() { return conn->async_exec(req, adapter, net::deferred); }))
(net::use_future).get();
}
auto logger = [](auto const& ec)
@@ -36,24 +36,24 @@ int main()
try {
net::io_context ioc{1};
connection conn{ioc};
std::thread t{[&]() {
conn.async_run({"127.0.0.1", "6379"}, {}, logger);
auto conn = std::make_shared<connection>(ioc);
net::ip::tcp::resolver resv{ioc};
auto const res = resv.resolve("127.0.0.1", "6379");
net::connect(conn->next_layer(), res);
std::thread t{[conn, &ioc]() {
conn->async_run(logger);
ioc.run();
}};
request req;
req.get_config().cancel_on_connection_lost = true;
resp3::request req;
req.push("HELLO", 3);
req.push("PING");
req.push("QUIT");
boost::system::error_code ec;
std::tuple<std::string, aedis::ignore> resp;
exec(conn, req, adapt(resp), ec);
std::tuple<aedis::ignore, std::string, aedis::ignore> resp;
exec(conn, req, adapt(resp));
std::cout
<< "Exec: " << ec.message() << "\n"
<< "Response: " << std::get<0>(resp) << std::endl;
std::cout << "Response: " << std::get<1>(resp) << std::endl;
t.join();
} catch (std::exception const& e) {

View File

@@ -6,21 +6,22 @@
#include <tuple>
#include <string>
#include <iostream>
#include <boost/asio.hpp>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
#include <boost/asio/experimental/awaitable_operators.hpp>
#include <boost/asio/ssl.hpp>
#include <aedis.hpp>
#include <aedis/ssl/connection.hpp>
// Include this in no more than one .cpp file.
#include <aedis/src.hpp>
namespace net = boost::asio;
namespace resp3 = aedis::resp3;
using namespace net::experimental::awaitable_operators;
using resolver = net::use_awaitable_t<>::as_default_on_t<net::ip::tcp::resolver>;
using aedis::adapt;
using aedis::resp3::request;
using connection = aedis::ssl::connection<net::ssl::stream<net::ip::tcp::socket>>;
auto const logger = [](auto ec, auto...)
{ std::cout << ec.message() << std::endl; };
using connection = net::use_awaitable_t<>::as_default_on_t<aedis::ssl::connection>;
auto verify_certificate(bool, net::ssl::verify_context&) -> bool
{
@@ -28,30 +29,30 @@ auto verify_certificate(bool, net::ssl::verify_context&) -> bool
return true;
}
auto main() -> int
net::awaitable<void> async_main()
{
try {
net::io_context ioc;
resp3::request req;
req.push("HELLO", 3, "AUTH", "aedis", "aedis");
req.push("PING");
req.push("QUIT");
net::ssl::context ctx{net::ssl::context::sslv23};
std::tuple<aedis::ignore, std::string, aedis::ignore> resp;
connection conn{ioc, ctx};
conn.next_layer().set_verify_mode(net::ssl::verify_peer);
conn.next_layer().set_verify_callback(verify_certificate);
// Resolve
auto ex = co_await net::this_coro::executor;
resolver resv{ex};
auto const endpoints = co_await resv.async_resolve("db.occase.de", "6380");
request req;
req.get_config().cancel_on_connection_lost = true;
req.push("PING");
req.push("QUIT");
net::ssl::context ctx{net::ssl::context::sslv23};
connection conn{ex, ctx};
conn.next_layer().set_verify_mode(net::ssl::verify_peer);
conn.next_layer().set_verify_callback(verify_certificate);
std::tuple<std::string, aedis::ignore> resp;
conn.async_exec(req, adapt(resp), logger);
conn.async_run({"127.0.0.1", "6379"}, {}, logger);
co_await net::async_connect(conn.lowest_layer(), endpoints);
co_await conn.next_layer().async_handshake(net::ssl::stream_base::client);
co_await (conn.async_run() || conn.async_exec(req, adapt(resp)));
ioc.run();
std::cout << "Response: " << std::get<0>(resp) << std::endl;
} catch (std::exception const& e) {
std::cerr << "Error: " << e.what() << std::endl;
}
std::cout << "Response: " << std::get<1>(resp) << std::endl;
}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -0,0 +1,48 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <boost/asio.hpp>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
#include <aedis.hpp>
#include <string>
#include <iostream>
namespace net = boost::asio;
namespace resp3 = aedis::resp3;
using resolver = net::use_awaitable_t<>::as_default_on_t<net::ip::tcp::resolver>;
using tcp_socket = net::use_awaitable_t<>::as_default_on_t<net::ip::tcp::socket>;
using aedis::adapter::adapt2;
using net::ip::tcp;
auto async_main() -> net::awaitable<void>
{
auto ex = co_await net::this_coro::executor;
resolver resv{ex};
auto const addrs = co_await resv.async_resolve("127.0.0.1", "6379");
tcp_socket socket{ex};
co_await net::async_connect(socket, addrs);
// Creates the request and writes to the socket.
resp3::request req;
req.push("HELLO", 3);
req.push("PING", "Hello world");
req.push("QUIT");
co_await resp3::async_write(socket, req);
// Responses
std::string buffer, resp;
// Reads the responses to all commands in the request.
auto dbuffer = net::dynamic_buffer(buffer);
co_await resp3::async_read(socket, dbuffer);
co_await resp3::async_read(socket, dbuffer, adapt2(resp));
co_await resp3::async_read(socket, dbuffer);
std::cout << "Ping: " << resp << std::endl;
}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -14,24 +14,21 @@
namespace net = boost::asio;
namespace resp3 = aedis::resp3;
using aedis::resp3::request;
using aedis::adapter::adapt2;
using net::ip::tcp;
int main()
{
try {
net::io_context ioc;
tcp::resolver resv{ioc};
net::ip::tcp::resolver resv{ioc};
auto const res = resv.resolve("127.0.0.1", "6379");
tcp::socket socket{ioc};
net::ip::tcp::socket socket{ioc};
net::connect(socket, res);
// Creates the request and writes to the socket.
request req;
resp3::request req;
req.push("HELLO", 3);
req.push("PING");
req.push("PING", "Hello world");
req.push("QUIT");
resp3::write(socket, req);

View File

@@ -1,65 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <map>
#include <set>
#include <vector>
#include <string>
#include <iostream>
#include <aedis/resp3/type.hpp>
#include <aedis/resp3/node.hpp>
// Some functions to make the examples less repetitive.
namespace net = boost::asio;
using aedis::resp3::node;
void print_aggr(std::vector<aedis::resp3::node<std::string>>& v)
{
if (std::empty(v))
return;
auto const m = aedis::resp3::element_multiplicity(v.front().data_type);
for (auto i = 0lu; i < m * v.front().aggregate_size; ++i)
std::cout << v[i + 1].value << " ";
std::cout << "\n";
v.clear();
}
template <class T>
void print(std::vector<T> const& cont)
{
for (auto const& e: cont) std::cout << e << " ";
std::cout << "\n";
}
template <class T>
void print(std::set<T> const& cont)
{
for (auto const& e: cont) std::cout << e << "\n";
}
template <class T, class U>
void print(std::map<T, U> const& cont)
{
for (auto const& e: cont)
std::cout << e.first << ": " << e.second << "\n";
}
void print(std::string const& e)
{
std::cout << e << std::endl;
}
void print_push(std::vector<aedis::resp3::node<std::string>>& resp)
{
std::cout
<< "Push type: " << resp.at(1).value << "\n"
<< "Channel: " << resp.at(2).value << "\n"
<< "Message: " << resp.at(3).value << "\n"
<< std::endl;
}

View File

@@ -0,0 +1,70 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <boost/asio.hpp>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
#include <boost/asio/experimental/awaitable_operators.hpp>
#include <aedis.hpp>
#include "common/common.hpp"
namespace net = boost::asio;
namespace resp3 = aedis::resp3;
using namespace net::experimental::awaitable_operators;
using endpoints = net::ip::tcp::resolver::results_type;
using aedis::adapt;
auto redir(boost::system::error_code& ec)
{ return net::redirect_error(net::use_awaitable, ec); }
struct address {
std::string host;
std::string port;
};
// For more info see
// - https://redis.io/docs/manual/sentinel.
// - https://redis.io/docs/reference/sentinel-clients.
auto resolve_master_address(std::vector<address> const& endpoints) -> net::awaitable<address>
{
resp3::request req;
req.push("SENTINEL", "get-master-addr-by-name", "mymaster");
req.push("QUIT");
auto conn = std::make_shared<connection>(co_await net::this_coro::executor);
std::tuple<std::optional<std::array<std::string, 2>>, aedis::ignore> addr;
for (auto ep : endpoints) {
boost::system::error_code ec;
co_await connect(conn, ep.host, ep.port);
co_await (conn->async_run() && conn->async_exec(req, adapt(addr), redir(ec)));
conn->reset_stream();
if (std::get<0>(addr))
co_return address{std::get<0>(addr).value().at(0), std::get<0>(addr).value().at(1)};
}
co_return address{};
}
auto async_main() -> net::awaitable<void>
{
// A list of sentinel addresses from which only one is responsive
// to simulate sentinels that are down.
std::vector<address> const endpoints
{ {"foo", "26379"}
, {"bar", "26379"}
, {"127.0.0.1", "26379"}
};
auto const ep = co_await resolve_master_address(endpoints);
std::clog
<< "Host: " << ep.host << "\n"
<< "Port: " << ep.port << "\n"
<< std::flush;
}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -4,33 +4,51 @@
* accompanying file LICENSE.txt)
*/
#include <boost/asio.hpp>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
#include <boost/asio/experimental/awaitable_operators.hpp>
#define BOOST_JSON_NO_LIB
#define BOOST_CONTAINER_NO_LIB
#include <boost/json.hpp>
#include <aedis.hpp>
#include <algorithm>
#include <cstdint>
#include <iostream>
#include <set>
#include <iterator>
#include <string>
#include <boost/json.hpp>
#include <boost/json/src.hpp>
#include <aedis.hpp>
#include "print.hpp"
#include "common/common.hpp"
// Include this in no more than one .cpp file.
#include <aedis/src.hpp>
#include <boost/json/src.hpp>
namespace net = boost::asio;
using aedis::resp3::request;
using aedis::adapt;
using aedis::endpoint;
using connection = aedis::connection<>;
namespace resp3 = aedis::resp3;
using namespace net::experimental::awaitable_operators;
using namespace boost::json;
using aedis::adapt;
struct user {
std::string name;
std::string age;
std::string country;
friend auto operator<(user const& a, user const& b)
{
return std::tie(a.name, a.age, a.country) < std::tie(b.name, b.age, b.country);
}
friend auto operator<<(std::ostream& os, user const& u) -> std::ostream&
{
os << "Name: " << u.name << "\n"
<< "Age: " << u.age << "\n"
<< "Country: " << u.country;
return os;
}
};
// Boost.Json serialization.
void tag_invoke(value_from_tag, value& jv, user const& u)
{
jv =
@@ -41,77 +59,53 @@ void tag_invoke(value_from_tag, value& jv, user const& u)
}
template<class T>
void extract(object const& obj, T& t, boost::string_view key)
void extract(object const& obj, T& t, std::string_view key)
{
t = value_to<T>(obj.at(key));
t = value_to<T>(obj.at(key));
}
auto tag_invoke(value_to_tag<user>, value const& jv)
{
user u;
object const& obj = jv.as_object();
extract(obj, u.name, "name");
extract(obj, u.age, "age");
extract(obj, u.country, "country");
return u;
user u;
object const& obj = jv.as_object();
extract(obj, u.name, "name");
extract(obj, u.age, "age");
extract(obj, u.country, "country");
return u;
}
// Serializes
// Aedis serialization
void to_bulk(std::pmr::string& to, user const& u)
{
aedis::resp3::to_bulk(to, serialize(value_from(u)));
}
// Deserializes
void from_bulk(user& u, boost::string_view sv, boost::system::error_code&)
void from_bulk(user& u, std::string_view sv, boost::system::error_code&)
{
value jv = parse(sv);
u = value_to<user>(jv);
}
auto operator<<(std::ostream& os, user const& u) -> std::ostream&
net::awaitable<void> async_main()
{
os << "Name: " << u.name << "\n"
<< "Age: " << u.age << "\n"
<< "Country: " << u.country;
std::set<user> users
{{"Joao", "58", "Brazil"} , {"Serge", "60", "France"}};
return os;
resp3::request req;
req.push("HELLO", 3);
req.push_range("SADD", "sadd-key", users); // Sends
req.push("SMEMBERS", "sadd-key"); // Retrieves
req.push("QUIT");
std::tuple<aedis::ignore, int, std::set<user>, std::string> resp;
auto conn = std::make_shared<connection>(co_await net::this_coro::executor);
co_await connect(conn, "127.0.0.1", "6379");
co_await (conn->async_run() || conn->async_exec(req, adapt(resp)));
for (auto const& e: std::get<2>(resp))
std::cout << e << "\n";
}
auto operator<(user const& a, user const& b)
{
return std::tie(a.name, a.age, a.country) < std::tie(b.name, b.age, b.country);
}
auto const logger = [](auto ec, auto...)
{ std::cout << ec.message() << std::endl; };
auto main() -> int
{
try {
net::io_context ioc;
connection conn{ioc};
std::set<user> users
{{"Joao", "58", "Brazil"} , {"Serge", "60", "France"}};
request req;
req.get_config().cancel_on_connection_lost = true;
req.push("HELLO", 3);
req.push_range("SADD", "sadd-key", users); // Sends
req.push("SMEMBERS", "sadd-key"); // Retrieves
req.push("QUIT");
std::tuple<aedis::ignore, int, std::set<user>, std::string> resp;
endpoint ep{"127.0.0.1", "6379"};
conn.async_exec(req, adapt(resp),logger);
conn.async_run(ep, {}, logger);
ioc.run();
// Print
print(std::get<2>(resp));
} catch (std::exception const& e) {
std::cerr << "Error: " << e.what() << std::endl;
}
}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -4,29 +4,19 @@
* accompanying file LICENSE.txt)
*/
#include <string>
#include <vector>
#include <iostream>
#include <tuple>
#include <boost/asio.hpp>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
#include <boost/asio/experimental/awaitable_operators.hpp>
#include <aedis.hpp>
#include "print.hpp"
// Include this in no more than one .cpp file.
#include <aedis/src.hpp>
#include "common/common.hpp"
namespace net = boost::asio;
namespace resp3 = aedis::resp3;
using namespace net::experimental::awaitable_operators;
using signal_set = net::use_awaitable_t<>::as_default_on_t<net::signal_set>;
using steady_timer = net::use_awaitable_t<>::as_default_on_t<net::steady_timer>;
using aedis::adapt;
using aedis::resp3::request;
using aedis::resp3::node;
using aedis::endpoint;
using tcp_socket = net::use_awaitable_t<>::as_default_on_t<net::ip::tcp::socket>;
using stimer = net::use_awaitable_t<>::as_default_on_t<net::steady_timer>;
using connection = aedis::connection<tcp_socket>;
/* This example will subscribe and read pushes indefinitely.
*
@@ -45,59 +35,36 @@ using connection = aedis::connection<tcp_socket>;
*/
// Receives pushes.
net::awaitable<void> push_receiver(std::shared_ptr<connection> conn)
auto receiver(std::shared_ptr<connection> conn) -> net::awaitable<void>
{
for (std::vector<node<std::string>> resp;;) {
for (std::vector<resp3::node<std::string>> resp;;) {
co_await conn->async_receive(adapt(resp));
print_push(resp);
std::cout << resp.at(1).value << " " << resp.at(2).value << " " << resp.at(3).value << std::endl;
resp.clear();
}
}
// See
// - https://redis.io/docs/manual/sentinel.
// - https://redis.io/docs/reference/sentinel-clients.
net::awaitable<void> reconnect(std::shared_ptr<connection> conn)
auto async_main() -> net::awaitable<void>
{
request req;
req.get_config().cancel_if_not_connected = false;
req.get_config().cancel_on_connection_lost = true;
auto ex = co_await net::this_coro::executor;
auto conn = std::make_shared<connection>(ex);
signal_set sig{ex, SIGINT, SIGTERM};
steady_timer timer{ex};
resp3::request req;
req.push("HELLO", 3);
req.push("SUBSCRIBE", "channel");
stimer timer{co_await net::this_coro::executor};
endpoint ep{"127.0.0.1", "6379"};
// The loop will reconnect on connection lost. To exit type Ctrl-C twice.
for (;;) {
boost::system::error_code ec1, ec2;
co_await (
conn->async_run(ep, {}, net::redirect_error(net::use_awaitable, ec1)) &&
conn->async_exec(req, adapt(), net::redirect_error(net::use_awaitable, ec2))
);
std::clog << "async_run: " << ec1.message() << "\n"
<< "async_exec: " << ec2.message() << std::endl;
co_await connect(conn, "127.0.0.1", "6379");
co_await ((conn->async_run() || healthy_checker(conn) || sig.async_wait() ||
receiver(conn)) && conn->async_exec(req));
conn->reset_stream();
timer.expires_after(std::chrono::seconds{1});
co_await timer.async_wait();
}
}
auto main() -> int
{
try {
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
net::co_spawn(ioc, push_receiver(conn), net::detached);
net::co_spawn(ioc, reconnect(conn), net::detached);
net::signal_set signals(ioc, SIGINT, SIGTERM);
signals.async_wait([&](auto, auto){ ioc.stop(); });
ioc.run();
} catch (std::exception const& e) {
std::cerr << "Error: " << e.what() << std::endl;
}
}
#else // defined(BOOST_ASIO_HAS_CO_AWAIT)
auto main() -> int {std::cout << "Requires coroutine support." << std::endl; return 0;}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -1,139 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <string>
#include <vector>
#include <iostream>
#include <tuple>
#include <boost/asio.hpp>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
#include <boost/asio/experimental/awaitable_operators.hpp>
#include <aedis.hpp>
#include "print.hpp"
// Include this in no more than one .cpp file.
#include <aedis/src.hpp>
namespace net = boost::asio;
using namespace net::experimental::awaitable_operators;
using aedis::adapt;
using aedis::resp3::request;
using aedis::resp3::node;
using aedis::endpoint;
using tcp_socket = net::use_awaitable_t<>::as_default_on_t<net::ip::tcp::socket>;
using stimer = net::use_awaitable_t<>::as_default_on_t<net::steady_timer>;
using connection = aedis::connection<tcp_socket>;
auto is_valid(endpoint const& ep) noexcept -> bool
{
return !std::empty(ep.host) && !std::empty(ep.port);
}
// Connects to a Redis instance over sentinel and performs failover in
// case of disconnection, see
// https://redis.io/docs/reference/sentinel-clients. This example
// assumes a sentinel and a redis server running on localhost.
net::awaitable<void> receive_pushes(std::shared_ptr<connection> conn)
{
for (std::vector<node<std::string>> resp;;) {
co_await conn->async_receive(adapt(resp));
print_push(resp);
resp.clear();
}
}
net::awaitable<endpoint> resolve()
{
// A list of sentinel addresses from which only one is responsive
// to simulate sentinels that are down.
std::vector<endpoint> const endpoints
{ {"foo", "26379"}
, {"bar", "26379"}
, {"127.0.0.1", "26379"}
};
request req;
req.get_config().cancel_on_connection_lost = true;
req.push("SENTINEL", "get-master-addr-by-name", "mymaster");
req.push("QUIT");
connection conn{co_await net::this_coro::executor};
std::tuple<std::optional<std::array<std::string, 2>>, aedis::ignore> addr;
for (auto ep : endpoints) {
boost::system::error_code ec1, ec2;
co_await (
conn.async_run(ep, {}, net::redirect_error(net::use_awaitable, ec1)) &&
conn.async_exec(req, adapt(addr), net::redirect_error(net::use_awaitable, ec2))
);
std::clog << "async_run: " << ec1.message() << "\n"
<< "async_exec: " << ec2.message() << std::endl;
conn.reset_stream();
if (std::get<0>(addr))
break;
}
endpoint ep;
if (std::get<0>(addr)) {
ep.host = std::get<0>(addr).value().at(0);
ep.port = std::get<0>(addr).value().at(1);
}
co_return ep;
}
net::awaitable<void> reconnect(std::shared_ptr<connection> conn)
{
request req;
req.get_config().cancel_on_connection_lost = true;
req.push("SUBSCRIBE", "channel");
auto ex = co_await net::this_coro::executor;
stimer timer{ex};
for (;;) {
auto ep = co_await net::co_spawn(ex, resolve(), net::use_awaitable);
if (!is_valid(ep)) {
std::clog << "Can't resolve master name" << std::endl;
co_return;
}
boost::system::error_code ec1, ec2;
co_await (
conn->async_run(ep, {}, net::redirect_error(net::use_awaitable, ec1)) &&
conn->async_exec(req, adapt(), net::redirect_error(net::use_awaitable, ec2))
);
std::clog << "async_run: " << ec1.message() << "\n"
<< "async_exec: " << ec2.message() << "\n"
<< "Starting the failover." << std::endl;
timer.expires_after(std::chrono::seconds{1});
co_await timer.async_wait();
}
}
int main()
{
try {
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
net::co_spawn(ioc, receive_pushes(conn), net::detached);
net::co_spawn(ioc, reconnect(conn), net::detached);
net::signal_set signals(ioc, SIGINT, SIGTERM);
signals.async_wait([&](auto, auto){ ioc.stop(); });
ioc.run();
} catch (std::exception const& e) {
std::cerr << "Error: " << e.what() << std::endl;
}
}
#else // defined(BOOST_ASIO_HAS_CO_AWAIT)
int main() {std::cout << "Requires coroutine support." << std::endl; return 0;}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -9,10 +9,10 @@
#include <tuple>
#include <limits>
#include <string_view>
#include <variant>
#include <boost/mp11.hpp>
#include <boost/variant2.hpp>
#include <boost/utility/string_view.hpp>
#include <boost/system.hpp>
#include <aedis/resp3/node.hpp>
@@ -44,7 +44,7 @@ public:
void
operator()(
std::size_t, resp3::node<boost::string_view> const&, boost::system::error_code&) { }
std::size_t, resp3::node<std::string_view> const&, boost::system::error_code&) { }
[[nodiscard]]
auto get_supported_response_size() const noexcept
@@ -63,7 +63,7 @@ class static_adapter {
private:
static constexpr auto size = std::tuple_size<Tuple>::value;
using adapter_tuple = boost::mp11::mp_transform<adapter::adapter_t, Tuple>;
using variant_type = boost::mp11::mp_rename<adapter_tuple, boost::variant2::variant>;
using variant_type = boost::mp11::mp_rename<adapter_tuple, std::variant>;
using adapters_array_type = std::array<variant_type, size>;
adapters_array_type adapters_;
@@ -87,10 +87,10 @@ public:
void
operator()(
std::size_t i,
resp3::node<boost::string_view> const& nd,
resp3::node<std::string_view> const& nd,
boost::system::error_code& ec)
{
using boost::variant2::visit;
using std::visit;
// I am usure whether this should be an error or an assertion.
BOOST_ASSERT(i < adapters_.size());
visit([&](auto& arg){arg(nd, ec);}, adapters_.at(i));
@@ -122,7 +122,7 @@ public:
void
operator()(
std::size_t,
resp3::node<boost::string_view> const& nd,
resp3::node<std::string_view> const& nd,
boost::system::error_code& ec)
{
adapter_(nd, ec);
@@ -164,7 +164,7 @@ class wrapper {
public:
explicit wrapper(Adapter adapter) : adapter_{adapter} {}
void operator()(resp3::node<boost::string_view> const& node, boost::system::error_code& ec)
void operator()(resp3::node<std::string_view> const& node, boost::system::error_code& ec)
{ return adapter_(0, node, ec); }
[[nodiscard]]
@@ -210,7 +210,7 @@ inline auto adapt(std::size_t max_read_size = (std::numeric_limits<std::size_t>:
* 2. std::vector<node<String>>
*
* The types T1, T2, etc can be any STL container, any integer type
* and \c std::string
* and `std::string`.
*
* @param t Tuple containing the responses.
* @param max_read_size Specifies the maximum size of the read

View File

@@ -18,11 +18,10 @@
#include <deque>
#include <vector>
#include <array>
#include <string_view>
#include <charconv>
#include <boost/assert.hpp>
#include <boost/spirit/include/qi.hpp>
#include <boost/spirit/home/x3.hpp>
#include <boost/utility/string_view.hpp>
#include <aedis/error.hpp>
#include <aedis/resp3/type.hpp>
@@ -32,54 +31,35 @@
namespace aedis::adapter::detail {
inline
auto parse_double(
char const* data,
std::size_t size,
boost::system::error_code& ec) -> double
{
static constexpr boost::spirit::x3::real_parser<double> p{};
double ret = 0;
if (!parse(data, data + size, p, ret))
ec = error::not_a_double;
return ret;
}
// Serialization.
template <class T>
auto from_bulk(
T& i,
boost::string_view sv,
boost::system::error_code& ec) -> typename std::enable_if<std::is_integral<T>::value, void>::type
auto from_bulk(T& i, std::string_view sv, boost::system::error_code& ec) -> typename std::enable_if<std::is_integral<T>::value, void>::type
{
i = resp3::detail::parse_uint(sv.data(), sv.size(), ec);
auto const res = std::from_chars(sv.data(), sv.data() + std::size(sv), i);
if (res.ec != std::errc())
ec = error::not_a_number;
}
inline
void from_bulk(
bool& t,
boost::string_view sv,
boost::system::error_code&)
void from_bulk(bool& t, std::string_view sv, boost::system::error_code&)
{
t = *sv.data() == 't';
}
inline
void from_bulk(
double& d,
boost::string_view sv,
boost::system::error_code& ec)
void from_bulk(double& d, std::string_view sv, boost::system::error_code& ec)
{
d = parse_double(sv.data(), sv.size(), ec);
auto const res = std::from_chars(sv.data(), sv.data() + std::size(sv), d);
if (res.ec != std::errc())
ec = error::not_a_double;
}
template <class CharT, class Traits, class Allocator>
void
from_bulk(
std::basic_string<CharT, Traits, Allocator>& s,
boost::string_view sv,
std::string_view sv,
boost::system::error_code&)
{
s.append(sv.data(), sv.size());
@@ -105,7 +85,7 @@ private:
public:
explicit general_aggregate(Result* c = nullptr): result_(c) {}
void operator()(resp3::node<boost::string_view> const& n, boost::system::error_code&)
void operator()(resp3::node<std::string_view> const& n, boost::system::error_code&)
{
result_->push_back({n.data_type, n.aggregate_size, n.depth, std::string{std::cbegin(n.value), std::cend(n.value)}});
}
@@ -119,7 +99,7 @@ private:
public:
explicit general_simple(Node* t = nullptr) : result_(t) {}
void operator()(resp3::node<boost::string_view> const& n, boost::system::error_code& ec)
void operator()(resp3::node<std::string_view> const& n, boost::system::error_code& ec)
{
result_->data_type = n.data_type;
result_->aggregate_size = n.aggregate_size;
@@ -137,7 +117,7 @@ public:
void
operator()(
Result& result,
resp3::node<boost::string_view> const& n,
resp3::node<std::string_view> const& n,
boost::system::error_code& ec)
{
set_on_resp3_error(n.data_type, ec);
@@ -165,7 +145,7 @@ public:
void
operator()(
Result& result,
resp3::node<boost::string_view> const& nd,
resp3::node<std::string_view> const& nd,
boost::system::error_code& ec)
{
set_on_resp3_error(nd.data_type, ec);
@@ -204,7 +184,7 @@ public:
void
operator()(
Result& result,
resp3::node<boost::string_view> const& nd,
resp3::node<std::string_view> const& nd,
boost::system::error_code& ec)
{
set_on_resp3_error(nd.data_type, ec);
@@ -246,7 +226,7 @@ public:
void
operator()(
Result& result,
resp3::node<boost::string_view> const& nd,
resp3::node<std::string_view> const& nd,
boost::system::error_code& ec)
{
set_on_resp3_error(nd.data_type, ec);
@@ -274,7 +254,7 @@ public:
void
operator()(
Result& result,
resp3::node<boost::string_view> const& nd,
resp3::node<std::string_view> const& nd,
boost::system::error_code& ec)
{
set_on_resp3_error(nd.data_type, ec);
@@ -313,7 +293,7 @@ struct list_impl {
void
operator()(
Result& result,
resp3::node<boost::string_view> const& nd,
resp3::node<std::string_view> const& nd,
boost::system::error_code& ec)
{
set_on_resp3_error(nd.data_type, ec);
@@ -388,7 +368,7 @@ public:
void
operator()(
resp3::node<boost::string_view> const& nd,
resp3::node<std::string_view> const& nd,
boost::system::error_code& ec)
{
BOOST_ASSERT(result_);
@@ -407,7 +387,7 @@ public:
void
operator()(
resp3::node<boost::string_view> const& nd,
resp3::node<std::string_view> const& nd,
boost::system::error_code& ec)
{
if (nd.data_type == resp3::type::null)

View File

@@ -9,9 +9,10 @@
#include <vector>
#include <tuple>
#include <string_view>
#include <variant>
#include <boost/mp11.hpp>
#include <boost/variant2.hpp>
#include <aedis/error.hpp>
#include <aedis/resp3/type.hpp>
@@ -74,7 +75,7 @@ struct assigner {
template <class T1, class T2>
static void assign(T1& dest, T2& from)
{
dest[N] = internal_adapt(std::get<N>(from));
dest[N].template emplace<N>(internal_adapt(std::get<N>(from)));
assigner<N - 1>::assign(dest, from);
}
};
@@ -84,7 +85,7 @@ struct assigner<0> {
template <class T1, class T2>
static void assign(T1& dest, T2& from)
{
dest[0] = internal_adapt(std::get<0>(from));
dest[0].template emplace<0>(internal_adapt(std::get<0>(from)));
}
};
@@ -96,7 +97,7 @@ private:
boost::mp11::mp_rename<
boost::mp11::mp_transform<
adapter_t, Tuple>,
boost::variant2::variant>,
std::variant>,
std::tuple_size<Tuple>::value>;
std::size_t i_ = 0;
@@ -109,7 +110,7 @@ public:
detail::assigner<std::tuple_size<Tuple>::value - 1>::assign(adapters_, *r);
}
void count(resp3::node<boost::string_view> const& nd)
void count(resp3::node<std::string_view> const& nd)
{
if (nd.depth == 1) {
if (is_aggregate(nd.data_type))
@@ -126,10 +127,10 @@ public:
void
operator()(
resp3::node<boost::string_view> const& nd,
resp3::node<std::string_view> const& nd,
boost::system::error_code& ec)
{
using boost::variant2::visit;
using std::visit;
if (nd.depth == 0) {
auto const real_aggr_size = nd.aggregate_size * element_multiplicity(nd.data_type);

View File

@@ -18,56 +18,49 @@ namespace aedis {
/** @brief A connection to the Redis server.
* @ingroup high-level-api
*
* This class keeps a healthy connection to the Redis instance where
* commands can be sent at any time. For more details, please see the
* documentation of each individual function.
* For more details, please see the documentation of each individual
* function.
*
* @tparam AsyncReadWriteStream A stream that supports reading and
* writing.
*/
template <class AsyncReadWriteStream = boost::asio::ip::tcp::socket>
class connection :
template <class AsyncReadWriteStream>
class basic_connection :
private detail::connection_base<
typename AsyncReadWriteStream::executor_type,
connection<AsyncReadWriteStream>> {
basic_connection<AsyncReadWriteStream>> {
public:
/// Executor type.
using executor_type = typename AsyncReadWriteStream::executor_type;
/// Type of the next layer
using next_layer_type = AsyncReadWriteStream;
using base_type = detail::connection_base<executor_type, connection<AsyncReadWriteStream>>;
/** \brief Connection configuration parameters.
*/
struct timeouts {
/// Timeout of the resolve operation.
std::chrono::steady_clock::duration resolve_timeout = std::chrono::seconds{10};
/// Timeout of the connect operation.
std::chrono::steady_clock::duration connect_timeout = std::chrono::seconds{10};
/// Timeout of the resp3-handshake operation.
std::chrono::steady_clock::duration resp3_handshake_timeout = std::chrono::seconds{2};
/// Time interval with which PING commands are sent to Redis.
std::chrono::steady_clock::duration ping_interval = std::chrono::seconds{1};
/// Rebinds the socket type to another executor.
template <class Executor1>
struct rebind_executor
{
/// The socket type when rebound to the specified executor.
using other = basic_connection<typename next_layer_type::template rebind_executor<Executor1>::other>;
};
/// Constructor
using base_type = detail::connection_base<executor_type, basic_connection<AsyncReadWriteStream>>;
/// Contructs from an executor.
explicit
connection(
basic_connection(
executor_type ex,
std::pmr::memory_resource* resource = std::pmr::get_default_resource())
: base_type{ex, resource}
, stream_{ex}
{}
/// Contructs from a context.
explicit
connection(
basic_connection(
boost::asio::io_context& ioc,
std::pmr::memory_resource* resource = std::pmr::get_default_resource())
: connection(ioc.get_executor(), resource)
: basic_connection(ioc.get_executor(), resource)
{ }
/// Returns the associated executor.
@@ -89,46 +82,13 @@ public:
/// Returns a const reference to the next layer.
auto next_layer() const noexcept -> auto const& { return stream_; }
/** @brief Establishes a connection with the Redis server asynchronously.
/** @brief Starts read and write operations
*
* This function performs the following steps
* This function starts read and write operations with the Redis
* server. More specifically it will trigger the write of all
* requests i.e. calls to `async_exec` that happened prior to this
* call.
*
* @li Resolves the Redis host as of `async_resolve` with the
* timeout passed in the base class `connection::timeouts::resolve_timeout`.
*
* @li Connects to one of the endpoints returned by the resolve
* operation with the timeout passed in the base class
* `connection::timeouts::connect_timeout`.
*
* @li Performs a RESP3 handshake by sending a
* [HELLO](https://redis.io/commands/hello/) command with protocol
* version 3 and the credentials contained in the
* `aedis::endpoint` object. The timeout used is the one specified
* in `connection::timeouts::resp3_handshake_timeout`.
*
* @li Erases any password that may be contained in
* `endpoint::password`.
*
* @li Checks whether the server role corresponds to the one
* specified in the `endpoint`. If `endpoint::role` is left empty,
* no check is performed. If the role is different than the
* expected `async_run` will complete with
* `error::unexpected_server_role`.
*
* @li Starts healthy checks with a timeout twice the value of
* `connection::timeouts::ping_interval`. If no data is received during that
* time interval `connection::async_run` completes with
* `error::idle_timeout`.
*
* @li Starts the healthy check operation that sends the
* [PING](https://redis.io/commands/ping/) to Redis with a
* frequency equal to `connection::timeouts::ping_interval`.
*
* @li Starts reading from the socket and executes all requests
* that have been started prior to this function call.
*
* @param ep Redis endpoint.
* @param ts Timeouts used by the operations.
* @param token Completion token.
*
* The completion token must have the following signature
@@ -137,27 +97,23 @@ public:
* void f(boost::system::error_code);
* @endcode
*
* This function will complete when the connection is lost as
* follows. If the error is boost::asio::error::eof this function
* will complete without error.
* This function will complete when the connection is lost. If the
* error is boost::asio::error::eof this function will complete
* without error.
*/
template <class CompletionToken = boost::asio::default_completion_token_t<executor_type>>
auto
async_run(
endpoint ep,
timeouts ts = timeouts{},
CompletionToken token = CompletionToken{})
auto async_run(CompletionToken token = CompletionToken{})
{
return base_type::async_run(ep, ts, std::move(token));
return base_type::async_run(std::move(token));
}
/** @brief Executes a command on the Redis server asynchronously.
*
* This function will send a request to the Redis server and
* complete when the response arrives. If the request contains
* only commands that don't expect a response, the completion
* occurs after it has been written to the underlying stream.
* Multiple concurrent calls to this function will be
* This function sends a request to the Redis server and
* complete after the response has been processed. If the request
* contains only commands that don't expect a response, the
* completion occurs after it has been written to the underlying
* stream. Multiple concurrent calls to this function will be
* automatically queued by the implementation.
*
* @param req Request object.
@@ -189,7 +145,7 @@ public:
*
* Users that expect server pushes should call this function in a
* loop. If a push arrives and there is no reader, the connection
* will hang and eventually timeout.
* will hang.
*
* @param adapter The response adapter.
* @param token The Asio completion token.
@@ -222,12 +178,7 @@ public:
* @li operation::run: Cancels the `async_run` operation. Notice
* that the preferred way to close a connection is to send a
* [QUIT](https://redis.io/commands/quit/) command to the server.
* An unresponsive Redis server will also cause the idle-checks to
* timeout and lead to `connection::async_run` completing with
* `error::idle_timeout`. Calling `cancel(operation::run)`
* directly should be seen as the last option.
* @li operation::receive: Cancels any ongoing callto
* `async_receive`.
* @li operation::receive: Cancels any ongoing calls to * `async_receive`.
*
* @param op: The operation to be cancelled.
* @returns The number of operations that have been canceled.
@@ -236,33 +187,14 @@ public:
{ return base_type::cancel(op); }
private:
using this_type = connection<next_layer_type>;
using this_type = basic_connection<next_layer_type>;
template <class, class> friend class detail::connection_base;
template <class, class> friend struct detail::exec_read_op;
template <class, class> friend struct detail::exec_op;
template <class, class> friend struct detail::receive_op;
template <class> friend struct detail::check_idle_op;
template <class> friend struct detail::reader_op;
template <class> friend struct detail::writer_op;
template <class, class> friend struct detail::connect_with_timeout_op;
template <class, class> friend struct detail::run_op;
template <class> friend struct detail::ping_op;
template <class Timer, class CompletionToken>
auto
async_connect(
boost::asio::ip::tcp::resolver::results_type const& endpoints,
timeouts ts,
Timer& timer,
CompletionToken&& token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::connect_with_timeout_op<this_type, Timer>{this, &endpoints, ts, &timer},
token, stream_);
}
template <class> friend struct detail::run_op;
void close() { stream_.close(); }
auto is_open() const noexcept { return stream_.is_open(); }
@@ -271,6 +203,11 @@ private:
AsyncReadWriteStream stream_;
};
/** \brief A connection that uses a boost::asio::ip::tcp::socket.
* \ingroup high-level-api
*/
using connection = basic_connection<boost::asio::ip::tcp::socket>;
} // aedis
#endif // AEDIS_CONNECTION_HPP

View File

@@ -19,11 +19,11 @@
#include <boost/asio/ip/tcp.hpp>
#include <boost/asio/steady_timer.hpp>
#include <boost/asio/bind_executor.hpp>
#include <boost/asio/deferred.hpp>
#include <boost/asio/experimental/channel.hpp>
#include <aedis/adapt.hpp>
#include <aedis/operation.hpp>
#include <aedis/endpoint.hpp>
#include <aedis/resp3/request.hpp>
#include <aedis/detail/connection_ops.hpp>
@@ -46,22 +46,18 @@ public:
explicit
connection_base(executor_type ex, std::pmr::memory_resource* resource)
: resv_{ex}
, ping_timer_{ex}
, check_idle_timer_{ex}
, writer_timer_{ex}
: writer_timer_{ex}
, read_timer_{ex}
, push_channel_{ex}
, guarded_op_{ex}
, read_buffer_{resource}
, write_buffer_{resource}
, reqs_{resource}
, last_data_{std::chrono::time_point<std::chrono::steady_clock>::min()}
{
req_.get_config().cancel_if_not_connected = true;
req_.get_config().cancel_on_connection_lost = true;
writer_timer_.expires_at(std::chrono::steady_clock::time_point::max());
read_timer_.expires_at(std::chrono::steady_clock::time_point::max());
}
auto get_executor() {return resv_.get_executor();}
auto get_executor() {return writer_timer_.get_executor();}
auto cancel(operation op) -> std::size_t
{
@@ -72,20 +68,16 @@ public:
}
case operation::run:
{
resv_.cancel();
derived().close();
read_timer_.cancel();
check_idle_timer_.cancel();
writer_timer_.cancel();
ping_timer_.cancel();
cancel_on_conn_lost();
return 1U;
}
case operation::receive:
{
push_channel_.cancel();
guarded_op_.cancel();
return 1U;
}
default: BOOST_ASSERT(false); return 0;
@@ -112,16 +104,19 @@ public:
return ret;
}
// Remove requests that have the flag cancel_if_not_sent_when_connection_lost set
auto cancel_on_conn_lost() -> std::size_t
{
// Must return false if the request should be removed.
auto cond = [](auto const& ptr)
{
BOOST_ASSERT(ptr != nullptr);
if (ptr->get_request().get_config().cancel_on_connection_lost)
return false;
return !(!ptr->get_request().get_config().retry && ptr->is_written());
if (ptr->is_written()) {
return ptr->get_request().get_config().retry_on_connection_lost;
} else {
return !ptr->get_request().get_config().cancel_on_connection_lost;
}
};
auto point = std::stable_partition(std::begin(reqs_), std::end(reqs_), cond);
@@ -136,6 +131,7 @@ public:
std::for_each(std::begin(reqs_), std::end(reqs_), [](auto const& ptr) {
return ptr->reset_status();
});
return ret;
}
@@ -152,7 +148,7 @@ public:
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code, std::size_t)
>(detail::exec_op<Derived, Adapter>{&derived(), &req, adapter}, token, resv_);
>(detail::exec_op<Derived, Adapter>{&derived(), &req, adapter}, token, writer_timer_);
}
template <
@@ -163,30 +159,25 @@ public:
CompletionToken token = CompletionToken{})
{
auto f = detail::make_adapter_wrapper(adapter);
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code, std::size_t)
>(detail::receive_op<Derived, decltype(f)>{&derived(), f}, token, resv_);
return guarded_op_.async_wait(
resp3::async_read(derived().next_layer(), make_dynamic_buffer(adapter.get_max_read_size(0)), f, boost::asio::deferred),
std::move(token));
}
template <class Timeouts, class CompletionToken>
auto
async_run(endpoint ep, Timeouts ts, CompletionToken token)
template <class CompletionToken>
auto async_run(CompletionToken token)
{
ep_ = std::move(ep);
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::run_op<Derived, Timeouts>{&derived(), ts}, token, resv_);
>(detail::run_op<Derived>{&derived()}, token, writer_timer_);
}
private:
using clock_type = std::chrono::steady_clock;
using clock_traits_type = boost::asio::wait_traits<clock_type>;
using timer_type = boost::asio::basic_waitable_timer<clock_type, clock_traits_type, executor_type>;
using resolver_type = boost::asio::ip::basic_resolver<boost::asio::ip::tcp, executor_type>;
using push_channel_type = boost::asio::experimental::channel<executor_type, void(boost::system::error_code, std::size_t)>;
using time_point_type = std::chrono::time_point<std::chrono::steady_clock>;
auto derived() -> Derived& { return static_cast<Derived&>(*this); }
@@ -287,16 +278,11 @@ private:
using reqs_type = std::pmr::deque<std::shared_ptr<req_info>>;
template <class, class> friend struct detail::receive_op;
template <class> friend struct detail::reader_op;
template <class> friend struct detail::writer_op;
template <class> friend struct detail::ping_op;
template <class, class> friend struct detail::run_op;
template <class> friend struct detail::run_op;
template <class, class> friend struct detail::exec_op;
template <class, class> friend struct detail::exec_read_op;
template <class> friend struct detail::resolve_with_timeout_op;
template <class> friend struct detail::check_idle_op;
template <class, class> friend struct detail::start_op;
template <class> friend struct detail::send_receive_op;
void cancel_push_requests()
@@ -315,6 +301,15 @@ private:
void add_request_info(std::shared_ptr<req_info> const& info)
{
reqs_.push_back(info);
if (info->get_request().has_hello_priority()) {
auto rend = std::partition_point(std::rbegin(reqs_), std::rend(reqs_), [](auto const& e) {
return !e->is_written() && !e->is_staged();
});
std::rotate(std::rbegin(reqs_), std::rbegin(reqs_) + 1, rend);
}
if (derived().is_open() && cmds_ == 0 && write_buffer_.empty())
writer_timer_.cancel();
}
@@ -322,26 +317,13 @@ private:
auto make_dynamic_buffer(std::size_t max_read_size = 512)
{ return boost::asio::dynamic_buffer(read_buffer_, max_read_size); }
template <class CompletionToken>
auto
async_resolve_with_timeout(
std::chrono::steady_clock::duration d,
CompletionToken&& token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::resolve_with_timeout_op<this_type>{this, d},
token, resv_);
}
template <class CompletionToken>
auto reader(CompletionToken&& token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::reader_op<Derived>{&derived()}, token, resv_.get_executor());
>(detail::reader_op<Derived>{&derived()}, token, writer_timer_);
}
template <class CompletionToken>
@@ -350,42 +332,7 @@ private:
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::writer_op<Derived>{&derived()}, token, resv_.get_executor());
}
template <
class Timeouts,
class CompletionToken>
auto async_start(Timeouts ts, CompletionToken&& token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::start_op<this_type, Timeouts>{this, ts}, token, resv_);
}
template <class CompletionToken>
auto
async_ping(
std::chrono::steady_clock::duration d,
CompletionToken&& token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::ping_op<Derived>{&derived(), d}, token, resv_);
}
template <class CompletionToken>
auto
async_check_idle(
std::chrono::steady_clock::duration d,
CompletionToken&& token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::check_idle_op<Derived>{&derived(), d}, token, check_idle_timer_);
>(detail::writer_op<Derived>{&derived()}, token, writer_timer_);
}
template <class Adapter, class CompletionToken>
@@ -394,7 +341,7 @@ private:
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code, std::size_t)
>(detail::exec_read_op<Derived, Adapter>{&derived(), adapter, cmds}, token, resv_);
>(detail::exec_read_op<Derived, Adapter>{&derived(), adapter, cmds}, token, writer_timer_);
}
void stage_request(req_info& ri)
@@ -422,57 +369,17 @@ private:
}
}
void prepare_hello(endpoint const& ep)
{
req_.clear();
if (requires_auth(ep)) {
req_.push("HELLO", "3", "AUTH", ep.username, ep.password);
} else {
req_.push("HELLO", "3");
}
}
auto expect_role(std::string const& expected) -> bool
{
if (std::empty(expected))
return true;
resp3::node<std::string> role_node;
role_node.data_type = resp3::type::blob_string;
role_node.aggregate_size = 1;
role_node.depth = 1;
role_node.value = "role";
auto iter = std::find(std::cbegin(response_), std::cend(response_), role_node);
if (iter == std::end(response_))
return false;
++iter;
BOOST_ASSERT(iter != std::cend(response_));
return iter->value == expected;
}
// IO objects
resolver_type resv_;
timer_type ping_timer_;
timer_type check_idle_timer_;
// Notice we use a timer to simulate a condition-variable. It is
// also more suitable than a channel and the notify operation does
// not suspend.
timer_type writer_timer_;
timer_type read_timer_;
push_channel_type push_channel_;
detail::guarded_operation<executor_type> guarded_op_;
std::string read_buffer_;
std::string write_buffer_;
std::pmr::string read_buffer_;
std::pmr::string write_buffer_;
std::size_t cmds_ = 0;
reqs_type reqs_;
// Last time we received data.
time_point_type last_data_;
resp3::request req_;
std::vector<resp3::node<std::string>> response_;
endpoint ep_;
// The result of async_resolve.
boost::asio::ip::tcp::resolver::results_type endpoints_;
};
} // aedis

View File

@@ -9,6 +9,7 @@
#include <array>
#include <algorithm>
#include <string_view>
#include <boost/assert.hpp>
#include <boost/system.hpp>
@@ -18,9 +19,8 @@
#include <aedis/adapt.hpp>
#include <aedis/error.hpp>
#include <aedis/detail/net.hpp>
#include <aedis/detail/guarded_operation.hpp>
#include <aedis/resp3/type.hpp>
#include <aedis/resp3/detail/exec.hpp>
#include <aedis/resp3/detail/parser.hpp>
#include <aedis/resp3/read.hpp>
#include <aedis/resp3/write.hpp>
@@ -30,94 +30,6 @@
namespace aedis::detail {
template <class Conn, class Timer>
struct connect_with_timeout_op {
Conn* conn = nullptr;
boost::asio::ip::tcp::resolver::results_type const* endpoints = nullptr;
typename Conn::timeouts ts;
Timer* timer = nullptr;
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, boost::system::error_code ec = {}
, boost::asio::ip::tcp::endpoint const& = {})
{
reenter (coro)
{
timer->expires_after(ts.connect_timeout);
yield detail::async_connect(conn->next_layer(), *timer, *endpoints, std::move(self));
AEDIS_CHECK_OP0();
self.complete({});
}
}
};
template <class Conn>
struct resolve_with_timeout_op {
Conn* conn = nullptr;
std::chrono::steady_clock::duration resolve_timeout{};
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, boost::system::error_code ec = {}
, boost::asio::ip::tcp::resolver::results_type const& res = {})
{
reenter (coro)
{
conn->ping_timer_.expires_after(resolve_timeout);
yield
aedis::detail::async_resolve(
conn->resv_, conn->ping_timer_,
conn->ep_.host, conn->ep_.port, std::move(self));
AEDIS_CHECK_OP0();
conn->endpoints_ = res;
self.complete({});
}
}
};
template <class Conn, class Adapter>
struct receive_op {
Conn* conn = nullptr;
Adapter adapter;
std::size_t read_size = 0;
boost::asio::coroutine coro{};
template <class Self>
void
operator()( Self& self
, boost::system::error_code ec = {}
, std::size_t n = 0)
{
reenter (coro)
{
yield conn->push_channel_.async_receive(std::move(self));
AEDIS_CHECK_OP1();
yield
resp3::async_read(
conn->next_layer(),
conn->make_dynamic_buffer(adapter.get_max_read_size(0)),
adapter, std::move(self));
// cancel(receive) is needed to cancel the channel, otherwise
// the read operation will be blocked forever see
// test_push_adapter.
AEDIS_CHECK_OP1(conn->cancel(operation::run); conn->cancel(operation::receive));
read_size = n;
yield conn->push_channel_.async_send({}, 0, std::move(self));
AEDIS_CHECK_OP1();
self.complete({}, read_size);
return;
}
}
};
template <class Conn, class Adapter>
struct exec_read_op {
Conn* conn;
@@ -150,15 +62,14 @@ struct exec_read_op {
conn->next_layer(),
conn->make_dynamic_buffer(),
"\r\n", std::move(self));
AEDIS_CHECK_OP1(conn->cancel(operation::run));
AEDIS_CHECK_OP1(conn->cancel(operation::run););
}
// If the next request is a push we have to handle it to
// the receive_op wait for it to be done and continue.
if (resp3::to_type(conn->read_buffer_.front()) == resp3::type::push) {
yield
async_send_receive(conn->push_channel_, std::move(self));
AEDIS_CHECK_OP1(conn->cancel(operation::run));
yield conn->guarded_op_.async_run(std::move(self));
AEDIS_CHECK_OP1(conn->cancel(operation::run););
continue;
}
//-----------------------------------
@@ -167,12 +78,12 @@ struct exec_read_op {
resp3::async_read(
conn->next_layer(),
conn->make_dynamic_buffer(adapter.get_max_read_size(index)),
[i = index, adpt = adapter] (resp3::node<boost::string_view> const& nd, boost::system::error_code& ec) mutable { adpt(i, nd, ec); },
[i = index, adpt = adapter] (resp3::node<std::string_view> const& nd, boost::system::error_code& ec) mutable { adpt(i, nd, ec); },
std::move(self));
++index;
AEDIS_CHECK_OP1(conn->cancel(operation::run));
AEDIS_CHECK_OP1(conn->cancel(operation::run););
read_size += n;
@@ -209,14 +120,11 @@ struct exec_op {
{
// Check whether the user wants to wait for the connection to
// be stablished.
// TODO: is_open below reflects only whether a TCP connection
// has been stablished. We need a variable that informs
// whether HELLO was successfull and we are connected with
// Redis.
if (req->get_config().cancel_if_not_connected && !conn->is_open())
if (req->get_config().cancel_if_not_connected && !conn->is_open()) {
return self.complete(error::not_connected, 0);
}
info = std::allocate_shared<req_info_type>(boost::asio::get_associated_allocator(self), *req, conn->resv_.get_executor());
info = std::allocate_shared<req_info_type>(boost::asio::get_associated_allocator(self), *req, conn->get_executor());
conn->add_request_info(info);
EXEC_OP_WAIT:
@@ -224,6 +132,8 @@ EXEC_OP_WAIT:
BOOST_ASSERT(ec == boost::asio::error::operation_aborted);
if (info->get_action() == Conn::req_info::action::stop) {
// Don't have to call remove_request as it has already
// been by cancel(exec).
return self.complete(ec, 0);
}
@@ -240,15 +150,18 @@ EXEC_OP_WAIT:
BOOST_ASSERT(conn->is_open());
if (req->size() == 0)
if (req->size() == 0) {
// Don't have to call remove_request as it has already
// been removed.
return self.complete({}, 0);
}
BOOST_ASSERT(!conn->reqs_.empty());
BOOST_ASSERT(conn->reqs_.front() != nullptr);
BOOST_ASSERT(conn->cmds_ != 0);
yield
conn->async_exec_read(adapter, conn->reqs_.front()->get_number_of_commands(), std::move(self));
AEDIS_CHECK_OP1();
AEDIS_CHECK_OP1(;);
read_size = n;
@@ -270,94 +183,25 @@ EXEC_OP_WAIT:
};
template <class Conn>
struct ping_op {
Conn* conn{};
std::chrono::steady_clock::duration ping_interval{};
boost::asio::coroutine coro{};
template <class Self>
void
operator()( Self& self
, boost::system::error_code ec = {}
, std::size_t = 0)
{
reenter (coro) for (;;)
{
conn->ping_timer_.expires_after(ping_interval);
yield conn->ping_timer_.async_wait(std::move(self));
if (!conn->is_open() || ec || is_cancelled(self)) {
// Checking for is_open is necessary becuse the timer can
// complete with success although cancel has been called.
self.complete({});
return;
}
conn->req_.clear();
conn->req_.push("PING");
yield conn->async_exec(conn->req_, adapt(), std::move(self));
if (!conn->is_open() || is_cancelled(self)) {
// Checking for is_open is necessary to avoid
// looping back on the timer although cancel has been
// called.
return self.complete({});
}
}
}
};
template <class Conn>
struct check_idle_op {
Conn* conn{};
std::chrono::steady_clock::duration ping_interval{};
boost::asio::coroutine coro{};
template <class Self>
void operator()(Self& self, boost::system::error_code ec = {})
{
reenter (coro) for (;;)
{
conn->check_idle_timer_.expires_after(2 * ping_interval);
yield conn->check_idle_timer_.async_wait(std::move(self));
if (!conn->is_open() || ec || is_cancelled(self)) {
// Checking for is_open is necessary becuse the timer can
// complete with success although cancel has been called.
return self.complete({});
}
auto const now = std::chrono::steady_clock::now();
if (conn->last_data_ + (2 * ping_interval) < now) {
conn->cancel(operation::run);
self.complete(error::idle_timeout);
return;
}
conn->last_data_ = now;
}
}
};
template <class Conn, class Timeouts>
struct start_op {
Conn* conn;
Timeouts ts;
struct run_op {
Conn* conn = nullptr;
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, std::array<std::size_t, 4> order = {}
, std::array<std::size_t, 2> order = {}
, boost::system::error_code ec0 = {}
, boost::system::error_code ec1 = {}
, boost::system::error_code ec2 = {}
, boost::system::error_code ec3 = {})
, boost::system::error_code ec1 = {})
{
reenter (coro)
{
conn->write_buffer_.clear();
conn->cmds_ = 0;
yield
boost::asio::experimental::make_parallel_group(
[this](auto token) { return conn->reader(token);},
[this](auto token) { return conn->writer(token);},
[this](auto token) { return conn->async_check_idle(ts.ping_interval, token);},
[this](auto token) { return conn->async_ping(ts.ping_interval, token);}
[this](auto token) { return conn->writer(token);}
).async_wait(
boost::asio::experimental::wait_for_one(),
std::move(self));
@@ -370,83 +214,12 @@ struct start_op {
switch (order[0]) {
case 0: self.complete(ec0); break;
case 1: self.complete(ec1); break;
case 2: self.complete(ec2); break;
case 3: self.complete(ec3); break;
default: BOOST_ASSERT(false);
}
}
}
};
inline
auto check_resp3_handshake_failed(std::vector<resp3::node<std::string>> const& resp) -> bool
{
return std::size(resp) == 1 &&
(resp.front().data_type == resp3::type::simple_error ||
resp.front().data_type == resp3::type::blob_error ||
resp.front().data_type == resp3::type::null);
}
template <class Conn, class Timeouts>
struct run_op {
Conn* conn = nullptr;
Timeouts ts;
boost::asio::coroutine coro{};
template <class Self>
void operator()(
Self& self,
boost::system::error_code ec = {},
std::size_t = 0)
{
reenter (coro)
{
yield conn->async_resolve_with_timeout(ts.resolve_timeout, std::move(self));
AEDIS_CHECK_OP0(conn->cancel(operation::run));
yield conn->derived().async_connect(conn->endpoints_, ts, conn->ping_timer_, std::move(self));
AEDIS_CHECK_OP0(conn->cancel(operation::run));
conn->prepare_hello(conn->ep_);
conn->ping_timer_.expires_after(ts.resp3_handshake_timeout);
conn->response_.clear();
yield
resp3::detail::async_exec(
conn->next_layer(),
conn->ping_timer_,
conn->req_,
adapter::adapt2(conn->response_),
conn->make_dynamic_buffer(),
std::move(self)
);
AEDIS_CHECK_OP0(conn->cancel(operation::run));
if (check_resp3_handshake_failed(conn->response_)) {
conn->cancel(operation::run);
self.complete(error::resp3_handshake_error);
return;
}
conn->ep_.password.clear();
if (!conn->expect_role(conn->ep_.role)) {
conn->cancel(operation::run);
self.complete(error::unexpected_server_role);
return;
}
conn->write_buffer_.clear();
conn->cmds_ = 0;
yield conn->async_start(ts, std::move(self));
AEDIS_CHECK_OP0();
self.complete({});
}
}
};
template <class Conn>
struct writer_op {
Conn* conn;
@@ -465,7 +238,7 @@ struct writer_op {
conn->coalesce_requests();
yield
boost::asio::async_write(conn->next_layer(), boost::asio::buffer(conn->write_buffer_), std::move(self));
AEDIS_CHECK_OP0(conn->cancel(operation::run));
AEDIS_CHECK_OP0(conn->cancel(operation::run););
conn->on_write();
@@ -515,9 +288,7 @@ struct reader_op {
return self.complete({}); // EOFINAE: EOF is not an error.
}
AEDIS_CHECK_OP0(conn->cancel(operation::run));
conn->last_data_ = std::chrono::steady_clock::now();
AEDIS_CHECK_OP0(conn->cancel(operation::run););
// We handle unsolicited events in the following way
//
@@ -540,25 +311,20 @@ struct reader_op {
if (resp3::to_type(conn->read_buffer_.front()) == resp3::type::push
|| conn->reqs_.empty()
|| (!conn->reqs_.empty() && conn->reqs_.front()->get_number_of_commands() == 0)) {
yield async_send_receive(conn->push_channel_, std::move(self));
if (!conn->is_open() || ec || is_cancelled(self)) {
conn->cancel(operation::run);
self.complete(boost::asio::error::basic_errors::operation_aborted);
return;
}
yield conn->guarded_op_.async_run(std::move(self));
} else {
BOOST_ASSERT(conn->cmds_ != 0);
BOOST_ASSERT(!conn->reqs_.empty());
BOOST_ASSERT(conn->reqs_.front()->get_number_of_commands() != 0);
conn->reqs_.front()->proceed();
yield conn->read_timer_.async_wait(std::move(self));
if (!conn->is_open() || is_cancelled(self)) {
// Added this cancel here to make sure any outstanding
// ping is cancelled.
conn->cancel(operation::run);
self.complete(boost::asio::error::basic_errors::operation_aborted);
return;
}
ec = {};
}
if (!conn->is_open() || ec || is_cancelled(self)) {
conn->cancel(operation::run);
self.complete(boost::asio::error::basic_errors::operation_aborted);
return;
}
}
}

View File

@@ -0,0 +1,108 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_DETAIL_GUARDED_OPERATION_HPP
#define AEDIS_DETAIL_GUARDED_OPERATION_HPP
#include <boost/asio/experimental/channel.hpp>
#include <boost/asio/yield.hpp>
namespace aedis::detail {
template <class Executor>
struct send_receive_op {
using channel_type = boost::asio::experimental::channel<Executor, void(boost::system::error_code, std::size_t)>;
channel_type* channel;
boost::asio::coroutine coro{};
template <class Self>
void operator()(Self& self, boost::system::error_code ec = {})
{
reenter (coro)
{
yield channel->async_send(boost::system::error_code{}, 0, std::move(self));
AEDIS_CHECK_OP0(;);
yield channel->async_send(boost::system::error_code{}, 0, std::move(self));
AEDIS_CHECK_OP0(;);
self.complete({});
}
}
};
template <class Executor, class Op>
struct wait_op {
using channel_type = boost::asio::experimental::channel<Executor, void(boost::system::error_code, std::size_t)>;
channel_type* channel;
Op op;
std::size_t res = 0;
boost::asio::coroutine coro{};
template <class Self>
void
operator()( Self& self
, boost::system::error_code ec = {}
, std::size_t n = 0)
{
reenter (coro)
{
yield channel->async_receive(std::move(self));
AEDIS_CHECK_OP1(;);
yield std::move(op)(std::move(self));
AEDIS_CHECK_OP1(channel->cancel(););
res = n;
yield channel->async_receive(std::move(self));
AEDIS_CHECK_OP1(;);
self.complete({}, res);
return;
}
}
};
template <class Executor = boost::asio::any_io_executor>
class guarded_operation {
public:
using executor_type = Executor;
guarded_operation(executor_type ex) : channel_{ex} {}
template <class CompletionToken = boost::asio::default_completion_token_t<executor_type>>
auto async_run(CompletionToken&& token = CompletionToken{})
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(send_receive_op<executor_type>{&channel_}, token, channel_);
}
template <class Op, class CompletionToken = boost::asio::default_completion_token_t<executor_type>>
auto async_wait(Op&& op, CompletionToken token = CompletionToken{})
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code, std::size_t)
>(wait_op<executor_type, Op>{&channel_, std::move(op)}, token, channel_);
}
void cancel() {channel_.cancel();}
private:
using channel_type = boost::asio::experimental::channel<executor_type, void(boost::system::error_code, std::size_t)>;
template <class> friend struct send_receive_op;
template <class, class> friend struct wait_op;
channel_type channel_;
};
} // aedis::detail
#include <boost/asio/unyield.hpp>
#endif // AEDIS_DETAIL_GUARDED_OPERATION_HPP

View File

@@ -1,205 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_NET_HPP
#define AEDIS_NET_HPP
#include <array>
#include <boost/system.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <boost/asio/connect.hpp>
#include <boost/assert.hpp>
#include <boost/asio/experimental/parallel_group.hpp>
#include <boost/asio/yield.hpp>
namespace aedis::detail {
template <class Executor>
using conn_timer_t = boost::asio::basic_waitable_timer<std::chrono::steady_clock, boost::asio::wait_traits<std::chrono::steady_clock>, Executor>;
template <
class Stream,
class EndpointSequence
>
struct connect_op {
Stream* socket;
conn_timer_t<typename Stream::executor_type>* timer;
EndpointSequence* endpoints;
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, std::array<std::size_t, 2> order = {}
, boost::system::error_code ec1 = {}
, typename Stream::protocol_type::endpoint const& ep = {}
, boost::system::error_code ec2 = {})
{
reenter (coro)
{
yield
boost::asio::experimental::make_parallel_group(
[this](auto token)
{
auto f = [](boost::system::error_code const&, auto const&) { return true; };
return boost::asio::async_connect(*socket, *endpoints, f, token);
},
[this](auto token) { return timer->async_wait(token);}
).async_wait(
boost::asio::experimental::wait_for_one(),
std::move(self));
if (is_cancelled(self)) {
self.complete(boost::asio::error::operation_aborted, {});
return;
}
switch (order[0]) {
case 0: self.complete(ec1, ep); return;
case 1:
{
if (ec2) {
self.complete(ec2, {});
} else {
self.complete(error::connect_timeout, ep);
}
return;
}
default: BOOST_ASSERT(false);
}
}
}
};
template <class Resolver, class Timer>
struct resolve_op {
Resolver* resv;
Timer* timer;
boost::string_view host;
boost::string_view port;
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, std::array<std::size_t, 2> order = {}
, boost::system::error_code ec1 = {}
, boost::asio::ip::tcp::resolver::results_type res = {}
, boost::system::error_code ec2 = {})
{
reenter (coro)
{
yield
boost::asio::experimental::make_parallel_group(
[this](auto token) { return resv->async_resolve(host.data(), port.data(), token);},
[this](auto token) { return timer->async_wait(token);}
).async_wait(
boost::asio::experimental::wait_for_one(),
std::move(self));
if (is_cancelled(self)) {
self.complete(boost::asio::error::operation_aborted, {});
return;
}
switch (order[0]) {
case 0: self.complete(ec1, res); return;
case 1:
{
if (ec2) {
self.complete(ec2, {});
} else {
self.complete(error::resolve_timeout, {});
}
return;
}
default: BOOST_ASSERT(false);
}
}
}
};
template <class Channel>
struct send_receive_op {
Channel* channel;
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, boost::system::error_code ec = {}
, std::size_t = 0)
{
reenter (coro)
{
yield
channel->async_send(boost::system::error_code{}, 0, std::move(self));
AEDIS_CHECK_OP1();
yield
channel->async_receive(std::move(self));
AEDIS_CHECK_OP1();
self.complete({}, 0);
}
}
};
template <
class Stream,
class EndpointSequence,
class CompletionToken
>
auto async_connect(
Stream& socket,
conn_timer_t<typename Stream::executor_type>& timer,
EndpointSequence ep,
CompletionToken&& token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code, typename Stream::protocol_type::endpoint const&)
>(connect_op<Stream, EndpointSequence>
{&socket, &timer, &ep}, token, socket, timer);
}
template <
class Resolver,
class Timer,
class CompletionToken =
boost::asio::default_completion_token_t<typename Resolver::executor_type>
>
auto async_resolve(
Resolver& resv,
Timer& timer,
boost::string_view host,
boost::string_view port,
CompletionToken&& token = CompletionToken{})
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code, boost::asio::ip::tcp::resolver::results_type)
>(resolve_op<Resolver, Timer>{&resv, &timer, host, port}, token, resv, timer);
}
template <
class Channel,
class CompletionToken =
boost::asio::default_completion_token_t<typename Channel::executor_type>
>
auto async_send_receive(Channel& channel, CompletionToken&& token = CompletionToken{})
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code, std::size_t)
>(send_receive_op<Channel>{&channel}, token, channel);
}
} // aedis::detail
#include <boost/asio/unyield.hpp>
#endif // AEDIS_NET_HPP

View File

@@ -1,38 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_ENDPOINT_HPP
#define AEDIS_ENDPOINT_HPP
#include <string>
namespace aedis {
/** \brief A Redis endpoint.
* \ingroup high-level-api
*/
struct endpoint {
/// Redis server address.
std::string host;
/// Redis server port.
std::string port;
/// Expected role if any.
std::string role{};
/// Username if authentication is required.
std::string username{};
/// Password if authentication is required.
std::string password{};
};
auto requires_auth(endpoint const& ep) noexcept -> bool;
} // aedis
#endif // AEDIS_ENDPOINT_HPP

View File

@@ -16,20 +16,8 @@ namespace aedis {
*/
enum class error
{
/// Resolve timeout.
resolve_timeout = 1,
/// Connect timeout.
connect_timeout,
/// Idle timeout.
idle_timeout,
/// Exec timeout.
exec_timeout,
/// Invalid RESP3 type.
invalid_data_type,
invalid_data_type = 1,
/// Can't parse the string as a number.
not_a_number,
@@ -73,17 +61,8 @@ enum class error
/// Got RESP3 null.
resp3_null,
/// Unexpected server role.
unexpected_server_role,
/// SSL handshake timeout.
ssl_handshake_timeout,
/// There is no stablished connection.
not_connected,
/// RESP3 handshake error (HELLO command).
resp3_handshake_error,
};
/** \internal

View File

@@ -1,18 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <aedis/endpoint.hpp>
#include <string>
namespace aedis {
auto requires_auth(endpoint const& ep) noexcept -> bool
{
return !std::empty(ep.username) && !std::empty(ep.password);
}
} // aedis

View File

@@ -21,10 +21,6 @@ struct error_category_impl : boost::system::error_category {
auto message(int ev) const -> std::string override
{
switch(static_cast<error>(ev)) {
case error::resolve_timeout: return "Resolve operation timeout.";
case error::connect_timeout: return "Connect operation timeout.";
case error::idle_timeout: return "Idle timeout.";
case error::exec_timeout: return "Exec timeout.";
case error::invalid_data_type: return "Invalid resp3 type.";
case error::not_a_number: return "Can't convert string to number.";
case error::exceeeds_max_nested_depth: return "Exceeds the maximum number of nested responses.";
@@ -40,10 +36,7 @@ struct error_category_impl : boost::system::error_category {
case error::incompatible_size: return "Aggregate container has incompatible size.";
case error::not_a_double: return "Not a double.";
case error::resp3_null: return "Got RESP3 null.";
case error::unexpected_server_role: return "Unexpected server role.";
case error::ssl_handshake_timeout: return "SSL handshake timeout.";
case error::not_connected: return "Not connected.";
case error::resp3_handshake_error: return "RESP3 handshake error (HELLO command).";
default: BOOST_ASSERT(false); return "Aedis error.";
}
}

View File

@@ -1,173 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_RESP3_EXEC_HPP
#define AEDIS_RESP3_EXEC_HPP
#include <boost/assert.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <boost/asio/read.hpp>
#include <boost/asio/write.hpp>
#include <boost/asio/coroutine.hpp>
#include <boost/asio/compose.hpp>
#include <boost/asio/steady_timer.hpp>
#include <boost/asio/experimental/parallel_group.hpp>
#include <aedis/error.hpp>
#include <aedis/resp3/read.hpp>
#include <aedis/resp3/request.hpp>
#include <boost/asio/yield.hpp>
namespace aedis::resp3::detail {
template <
class AsyncStream,
class Adapter,
class DynamicBuffer
>
struct exec_op {
AsyncStream* socket = nullptr;
request const* req = nullptr;
Adapter adapter;
DynamicBuffer dbuf{};
std::size_t n_cmds = 0;
std::size_t size = 0;
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, boost::system::error_code ec = {}
, std::size_t n = 0)
{
reenter (coro) for (;;)
{
if (req) {
yield
boost::asio::async_write(
*socket,
boost::asio::buffer(req->payload()),
std::move(self));
AEDIS_CHECK_OP1();
if (n_cmds == 0) {
return self.complete({}, n);
}
req = nullptr;
}
yield resp3::async_read(*socket, dbuf, adapter, std::move(self));
AEDIS_CHECK_OP1();
size += n;
if (--n_cmds == 0) {
return self.complete(ec, size);
}
}
}
};
template <
class AsyncStream,
class Adapter,
class DynamicBuffer,
class CompletionToken = boost::asio::default_completion_token_t<typename AsyncStream::executor_type>
>
auto async_exec(
AsyncStream& socket,
request const& req,
Adapter adapter,
DynamicBuffer dbuf,
CompletionToken token = CompletionToken{})
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code, std::size_t)
>(detail::exec_op<AsyncStream, Adapter, DynamicBuffer>
{&socket, &req, adapter, dbuf, req.size()}, token, socket);
}
template <
class AsyncStream,
class Timer,
class Adapter,
class DynamicBuffer
>
struct exec_with_timeout_op {
AsyncStream* socket = nullptr;
Timer* timer = nullptr;
request const* req = nullptr;
Adapter adapter;
DynamicBuffer dbuf{};
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, std::array<std::size_t, 2> order = {}
, boost::system::error_code ec1 = {}
, std::size_t n = 0
, boost::system::error_code ec2 = {})
{
reenter (coro)
{
yield
boost::asio::experimental::make_parallel_group(
[this](auto token) { return detail::async_exec(*socket, *req, adapter, dbuf, token);},
[this](auto token) { return timer->async_wait(token);}
).async_wait(
boost::asio::experimental::wait_for_one(),
std::move(self));
if (is_cancelled(self)) {
self.complete(boost::asio::error::operation_aborted, 0);
return;
}
switch (order[0]) {
case 0: self.complete(ec1, n); break;
case 1:
{
if (ec2) {
self.complete(ec2, 0);
} else {
self.complete(aedis::error::exec_timeout, 0);
}
} break;
default: BOOST_ASSERT(false);
}
}
}
};
template <
class AsyncStream,
class Timer,
class Adapter,
class DynamicBuffer,
class CompletionToken = boost::asio::default_completion_token_t<typename AsyncStream::executor_type>
>
auto async_exec(
AsyncStream& socket,
Timer& timer,
request const& req,
Adapter adapter,
DynamicBuffer dbuf,
CompletionToken token = CompletionToken{})
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code, std::size_t)
>(detail::exec_with_timeout_op<AsyncStream, Timer, Adapter, DynamicBuffer>
{&socket, &timer, &req, adapter, dbuf}, token, socket, timer);
}
} // aedis::resp3::detail
#include <boost/asio/unyield.hpp>
#endif // AEDIS_RESP3_EXEC_HPP

View File

@@ -1,25 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <boost/spirit/include/qi.hpp>
#include <boost/spirit/home/x3.hpp>
#include <aedis/resp3/detail/parser.hpp>
#include <aedis/resp3/type.hpp>
namespace aedis::resp3::detail {
auto parse_uint(char const* data, std::size_t size, boost::system::error_code& ec) -> std::size_t
{
static constexpr boost::spirit::x3::uint_parser<std::size_t, 10> p{};
std::size_t ret = 0;
if (!parse(data, data + size, p, ret))
ec = error::not_a_number;
return ret;
}
} // aedis::resp3::detail

View File

@@ -10,21 +10,31 @@
#include <array>
#include <limits>
#include <system_error>
#include <charconv>
#include <string_view>
#include <cstdint>
#include <boost/assert.hpp>
#include <boost/utility/string_view.hpp>
#include <aedis/error.hpp>
#include <aedis/resp3/node.hpp>
namespace aedis::resp3::detail {
auto parse_uint(char const* data, std::size_t size, boost::system::error_code& ec) -> std::size_t;
using int_type = std::uint64_t;
inline
void to_int(int_type& i, std::string_view sv, boost::system::error_code& ec)
{
auto const res = std::from_chars(sv.data(), sv.data() + std::size(sv), i);
if (res.ec != std::errc())
ec = error::not_a_number;
}
template <class ResponseAdapter>
class parser {
private:
using node_type = node<boost::string_view>;
using node_type = node<std::string_view>;
static constexpr std::size_t max_embedded_depth = 5;
ResponseAdapter adapter_;
@@ -40,7 +50,7 @@ private:
std::array<std::size_t, max_embedded_depth + 1> sizes_ = {{1}};
// Contains the length expected in the next bulk read.
std::size_t bulk_length_ = (std::numeric_limits<std::size_t>::max)();
int_type bulk_length_ = (std::numeric_limits<unsigned long>::max)();
// The type of the next bulk. Contains type::invalid if no bulk is
// expected.
@@ -83,7 +93,7 @@ public:
switch (t) {
case type::streamed_string_part:
{
bulk_length_ = parse_uint(data + 1, n - 2, ec);
to_int(bulk_length_ , std::string_view{data + 1, n - 3}, ec);
if (ec)
return 0;
@@ -106,7 +116,7 @@ public:
// 0.
sizes_[++depth_] = (std::numeric_limits<std::size_t>::max)();
} else {
bulk_length_ = parse_uint(data + 1, n - 2, ec);
to_int(bulk_length_ , std::string_view{data + 1, n - 3} , ec);
if (ec)
return 0;
@@ -169,7 +179,8 @@ public:
case type::attribute:
case type::map:
{
auto const l = parse_uint(data + 1, n - 2, ec);
int_type l = -1;
to_int(l, std::string_view{data + 1, n - 3}, ec);
if (ec)
return 0;

View File

@@ -7,12 +7,12 @@
#ifndef AEDIS_RESP3_READ_OPS_HPP
#define AEDIS_RESP3_READ_OPS_HPP
#include <string_view>
#include <boost/assert.hpp>
#include <boost/asio/read.hpp>
#include <boost/asio/read_until.hpp>
#include <boost/asio/coroutine.hpp>
#include <boost/core/ignore_unused.hpp>
#include <boost/utility/string_view.hpp>
#include <aedis/resp3/detail/parser.hpp>
#include <boost/asio/yield.hpp>
@@ -28,14 +28,14 @@ auto is_cancelled(T const& self)
#define AEDIS_CHECK_OP0(X)\
if (ec || aedis::detail::is_cancelled(self)) {\
X;\
X\
self.complete(!!ec ? ec : boost::asio::error::operation_aborted);\
return;\
}
#define AEDIS_CHECK_OP1(X)\
if (ec || aedis::detail::is_cancelled(self)) {\
X;\
X\
self.complete(!!ec ? ec : boost::asio::error::operation_aborted, {});\
return;\
}
@@ -43,7 +43,7 @@ auto is_cancelled(T const& self)
namespace aedis::resp3::detail {
struct ignore_response {
void operator()(node<boost::string_view> nd, boost::system::error_code& ec)
void operator()(node<std::string_view> nd, boost::system::error_code& ec)
{
switch (nd.data_type) {
case resp3::type::simple_error: ec = error::resp3_simple_error; return;
@@ -82,7 +82,7 @@ public:
if (parser_.bulk() == type::invalid) {
yield
boost::asio::async_read_until(stream_, buf_, "\r\n", std::move(self));
AEDIS_CHECK_OP1();
AEDIS_CHECK_OP1(;);
} else {
// On a bulk read we can't read until delimiter since the
// payload may contain the delimiter itself so we have to
@@ -101,7 +101,7 @@ public:
buf_.data(buffer_size_, parser_.bulk_length() + 2 - buffer_size_),
boost::asio::transfer_all(),
std::move(self));
AEDIS_CHECK_OP1();
AEDIS_CHECK_OP1(;);
}
n = parser_.bulk_length() + 2;

View File

@@ -4,11 +4,12 @@
* accompanying file LICENSE.txt)
*/
#include <string_view>
#include <aedis/resp3/request.hpp>
namespace aedis::resp3::detail {
auto has_push_response(boost::string_view cmd) -> bool
auto has_push_response(std::string_view cmd) -> bool
{
if (cmd == "SUBSCRIBE") return true;
if (cmd == "PSUBSCRIBE") return true;

View File

@@ -9,9 +9,6 @@
#include <aedis/resp3/type.hpp>
#include <string>
#include <vector>
namespace aedis::resp3 {
/** \brief A node in the response tree.
@@ -38,27 +35,6 @@ struct node {
String value{};
};
/** @brief Converts the node to a string.
* @relates node
*
* @param in The node object.
*/
template <class String>
auto to_string(node<String> const& in)
{
std::string out;
out += std::to_string(in.depth);
out += '\t';
out += to_string(in.data_type);
out += '\t';
out += std::to_string(in.aggregate_size);
out += '\t';
if (!is_aggregate(in.data_type))
out.append(in.value.data(), in.value.size());
return out;
}
/** @brief Compares a node for equality.
* @relates node
*
@@ -74,21 +50,6 @@ auto operator==(node<String> const& a, node<String> const& b)
&& a.value == b.value;
};
/** @brief Writes the node string to the stream.
* @relates node
*
* @param os Output stream.
* @param node Node object.
*
* \remark Binary data is not converted to text.
*/
template <class String>
auto operator<<(std::ostream& os, node<String> const& node) -> std::ostream&
{
os << to_string(node);
return os;
}
} // aedis::resp3
#endif // AEDIS_RESP3_NODE_HPP

View File

@@ -10,10 +10,6 @@
#include <string>
#include <tuple>
#include <memory_resource>
#include <boost/hana.hpp>
#include <boost/utility/string_view.hpp>
#include <aedis/resp3/type.hpp>
// NOTE: Consider detecting tuples in the type in the parameter pack
@@ -46,7 +42,7 @@ constexpr char const* separator = "\r\n";
* See more in @ref serialization.
*/
template <class Request>
void to_bulk(Request& to, boost::string_view data)
void to_bulk(Request& to, std::string_view data)
{
auto const str = std::to_string(data.size());
@@ -61,12 +57,12 @@ template <class Request, class T, typename = typename std::enable_if<std::is_int
void to_bulk(Request& to, T n)
{
auto const s = std::to_string(n);
to_bulk(to, boost::string_view{s});
to_bulk(to, std::string_view{s});
}
namespace detail {
auto has_push_response(boost::string_view cmd) -> bool;
auto has_push_response(std::string_view cmd) -> bool;
template <class T>
struct add_bulk_impl {
@@ -78,6 +74,21 @@ struct add_bulk_impl {
}
};
template <class ...Ts>
struct add_bulk_impl<std::tuple<Ts...>> {
template <class Request>
static void add(Request& to, std::tuple<Ts...> const& t)
{
auto f = [&](auto const&... vs)
{
using namespace aedis::resp3;
(to_bulk(to, vs), ...);
};
std::apply(f, t);
}
};
template <class U, class V>
struct add_bulk_impl<std::pair<U, V>> {
template <class Request>
@@ -89,23 +100,6 @@ struct add_bulk_impl<std::pair<U, V>> {
}
};
template <class ...Ts>
struct add_bulk_impl<boost::hana::tuple<Ts...>> {
template <class Request>
static void add(Request& to, boost::hana::tuple<Ts...> const& from)
{
using boost::hana::for_each;
// Fold expressions is C++17 so we use hana.
//(detail::add_bulk(*request_, args), ...);
for_each(from, [&](auto const& e) {
using namespace aedis::resp3;
to_bulk(to, e);
});
}
};
template <class Request>
void add_header(Request& to, type t, std::size_t size)
{
@@ -136,7 +130,7 @@ struct bulk_counter<std::pair<T, U>> {
};
template <class Request>
void add_blob(Request& to, boost::string_view blob)
void add_blob(Request& to, std::string_view blob)
{
to.append(std::cbegin(blob), std::cend(blob));
to += separator;
@@ -169,37 +163,43 @@ void add_separator(Request& to)
*
* \li Non-string types will be converted to string by using \c
* to_bulk, which must be made available over ADL.
* \li Uses std::string as internal storage.
* \li Uses a std::pmr::string for internal storage.
*/
class request {
public:
/// Request configuration options.
struct config {
/** \brief If set to true, requests started with
* `aedis::connection::async_exec` will fail if the connection is
* lost while the request is pending. The default
* behaviour is not to close requests.
/** \brief Setting it to true will cause
* `aedis::connection::async_exec` to complete with error if the
* connection is lost. Affects only requests that haven't been
* sent yet.
*/
bool cancel_on_connection_lost = false;
bool cancel_on_connection_lost = true;
/** \brief If true this request will be coalesced with other requests,
* see https://redis.io/topics/pipelining. If false, this
* request will be sent individually.
/** \brief If true the request will be coalesced with other
* requests, see https://redis.io/topics/pipelining. Otherwise
* the request is sent individually.
*/
bool coalesce = true;
/** \brief If set to true, requests started with
* `aedis::connection::async_exec` will fail if the call happens
* before the connection with Redis was stablished.
/** \brief If true, the request will complete with error if the
* call happens before the connection with Redis was established.
*/
bool cancel_if_not_connected = false;
/** \brief If true, the implementation will resend this
* request if it remained unresponded when
* `aedis::connection::async_run` completed. Has effect only if
* cancel_on_connection_lost is true.
/** \brief If true `aedis::connection::async_exec` will not
* cancel this request if the connection is lost. Affects only
* requests that have been written to the socket but remained
* unresponded when `aedis::connection::async_run` completed.
*/
bool retry = true;
bool retry_on_connection_lost = false;
/** \brief If this request has a HELLO command and this flag is
* true, the `aedis::connection` will move it to the front of
* the queue of awaiting requests. This makes it possible to
* send HELLO and authenticate before other commands are sent.
*/
bool hello_with_priority = true;
};
/** \brief Constructor
@@ -208,17 +208,19 @@ public:
* \param resource Memory resource.
*/
explicit
request(config cfg = config{false, true, false, true},
request(config cfg = config{true, true, false, false, true},
std::pmr::memory_resource* resource = std::pmr::get_default_resource())
: payload_(resource), cfg_{cfg}
{}
: cfg_{cfg}, payload_(resource) {}
//// Returns the number of commands contained in this request.
[[nodiscard]] auto size() const noexcept -> std::size_t { return commands_;};
[[nodiscard]] auto size() const noexcept -> std::size_t
{ return commands_;};
// Returns the request payload.
[[nodiscard]] auto payload() const noexcept -> auto const& { return payload_;}
[[nodiscard]] auto payload() const noexcept -> auto const&
{ return payload_;}
[[nodiscard]] auto has_hello_priority() const noexcept -> auto const&
{ return has_hello_priority_;}
/// Clears the request preserving allocated memory.
void clear()
@@ -227,6 +229,16 @@ public:
commands_ = 0;
}
/// Calls std::pmr::string::reserve on the internal storage.
void reserve(std::size_t new_cap = 0)
{ payload_.reserve(new_cap); }
/// Returns a const reference to the config object.
[[nodiscard]] auto get_config() const noexcept -> auto const& {return cfg_; }
/// Returns a reference to the config object.
[[nodiscard]] auto get_config() noexcept -> auto& {return cfg_; }
/** @brief Appends a new command to the end of the request.
*
* For example
@@ -243,19 +255,16 @@ public:
* \param args Command arguments.
*/
template <class... Ts>
void push(boost::string_view cmd, Ts const&... args)
void push(std::string_view cmd, Ts const&... args)
{
using boost::hana::for_each;
using boost::hana::make_tuple;
using resp3::type;
auto constexpr pack_size = sizeof...(Ts);
detail::add_header(payload_, type::array, 1 + pack_size);
detail::add_bulk(payload_, cmd);
detail::add_bulk(payload_, make_tuple(args...));
detail::add_bulk(payload_, std::tie(std::forward<Ts const&>(args)...));
if (!detail::has_push_response(cmd))
++commands_;
check_cmd(cmd);
}
/** @brief Appends a new command to the end of the request.
@@ -280,7 +289,7 @@ public:
* \param end Iterator to the end of the range.
*/
template <class Key, class ForwardIterator>
void push_range(boost::string_view cmd, Key const& key, ForwardIterator begin, ForwardIterator end,
void push_range(std::string_view cmd, Key const& key, ForwardIterator begin, ForwardIterator end,
typename std::iterator_traits<ForwardIterator>::value_type * = nullptr)
{
using value_type = typename std::iterator_traits<ForwardIterator>::value_type;
@@ -298,8 +307,7 @@ public:
for (; begin != end; ++begin)
detail::add_bulk(payload_, *begin);
if (!detail::has_push_response(cmd))
++commands_;
check_cmd(cmd);
}
/** @brief Appends a new command to the end of the request.
@@ -320,7 +328,7 @@ public:
* \param end Iterator to the end of the range.
*/
template <class ForwardIterator>
void push_range(boost::string_view cmd, ForwardIterator begin, ForwardIterator end,
void push_range(std::string_view cmd, ForwardIterator begin, ForwardIterator end,
typename std::iterator_traits<ForwardIterator>::value_type * = nullptr)
{
using value_type = typename std::iterator_traits<ForwardIterator>::value_type;
@@ -337,8 +345,7 @@ public:
for (; begin != end; ++begin)
detail::add_bulk(payload_, *begin);
if (!detail::has_push_response(cmd))
++commands_;
check_cmd(cmd);
}
/** @brief Appends a new command to the end of the request.
@@ -350,7 +357,7 @@ public:
* \param range Range to send e.g. and \c std::map.
*/
template <class Key, class Range>
void push_range(boost::string_view cmd, Key const& key, Range const& range,
void push_range(std::string_view cmd, Key const& key, Range const& range,
decltype(std::begin(range)) * = nullptr)
{
using std::begin;
@@ -366,7 +373,7 @@ public:
* \param range Range to send e.g. and \c std::map.
*/
template <class Range>
void push_range(boost::string_view cmd, Range const& range,
void push_range(std::string_view cmd, Range const& range,
decltype(std::begin(range)) * = nullptr)
{
using std::begin;
@@ -374,20 +381,20 @@ public:
push_range(cmd, begin(range), end(range));
}
/// Calls std::string::reserve on the internal storage.
void reserve(std::size_t new_cap = 0)
{ payload_.reserve(new_cap); }
/// Returns a const reference to the config object.
[[nodiscard]] auto get_config() const noexcept -> auto const& {return cfg_; }
/// Returns a reference to the config object.
[[nodiscard]] auto get_config() noexcept -> auto& {return cfg_; }
private:
void check_cmd(std::string_view cmd)
{
if (!detail::has_push_response(cmd))
++commands_;
if (cmd == "HELLO")
has_hello_priority_ = cfg_.hello_with_priority;
}
config cfg_;
std::pmr::string payload_;
std::size_t commands_ = 0;
config cfg_;
bool has_hello_priority_ = false;
};
} // aedis::resp3

View File

@@ -5,7 +5,5 @@
*/
#include <aedis/impl/error.ipp>
#include <aedis/impl/endpoint.ipp>
#include <aedis/resp3/impl/request.ipp>
#include <aedis/resp3/impl/type.ipp>
#include <aedis/resp3/detail/impl/parser.ipp>

View File

@@ -12,12 +12,11 @@
#include <boost/asio/io_context.hpp>
#include <aedis/detail/connection_base.hpp>
#include <aedis/ssl/detail/connection_ops.hpp>
namespace aedis::ssl {
template <class>
class connection;
class basic_connection;
/** \brief A SSL connection to the Redis server.
* \ingroup high-level-api
@@ -31,55 +30,44 @@ class connection;
*
*/
template <class AsyncReadWriteStream>
class connection<boost::asio::ssl::stream<AsyncReadWriteStream>> :
class basic_connection<boost::asio::ssl::stream<AsyncReadWriteStream>> :
private aedis::detail::connection_base<
typename boost::asio::ssl::stream<AsyncReadWriteStream>::executor_type,
connection<boost::asio::ssl::stream<AsyncReadWriteStream>>> {
basic_connection<boost::asio::ssl::stream<AsyncReadWriteStream>>> {
public:
/// Type of the next layer
using next_layer_type = boost::asio::ssl::stream<AsyncReadWriteStream>;
/// Executor type.
using executor_type = typename next_layer_type::executor_type;
using base_type = aedis::detail::connection_base<executor_type, connection<boost::asio::ssl::stream<AsyncReadWriteStream>>>;
/** \brief Connection configuration parameters.
*/
struct timeouts {
/// Timeout of the resolve operation.
std::chrono::steady_clock::duration resolve_timeout = std::chrono::seconds{10};
/// Timeout of the connect operation.
std::chrono::steady_clock::duration connect_timeout = std::chrono::seconds{10};
/// Timeout of the ssl handshake operation.
std::chrono::steady_clock::duration handshake_timeout = std::chrono::seconds{10};
/// Timeout of the resp3 handshake operation.
std::chrono::steady_clock::duration resp3_handshake_timeout = std::chrono::seconds{2};
/// Time interval of ping operations.
std::chrono::steady_clock::duration ping_interval = std::chrono::seconds{1};
/// Rebinds the socket type to another executor.
template <class Executor1>
struct rebind_executor
{
/// The socket type when rebound to the specified executor.
using other = basic_connection<boost::asio::ssl::stream<typename AsyncReadWriteStream::template rebind_executor<Executor1>::other>>;
};
using base_type = aedis::detail::connection_base<executor_type, basic_connection<boost::asio::ssl::stream<AsyncReadWriteStream>>>;
/// Constructor
explicit
connection(
basic_connection(
executor_type ex,
boost::asio::ssl::context& ctx,
std::pmr::memory_resource* resource = std::pmr::get_default_resource())
: base_type{ex, resource}
, stream_{ex, ctx}
{
}
{ }
/// Constructor
explicit
connection(
basic_connection(
boost::asio::io_context& ioc,
boost::asio::ssl::context& ctx,
std::pmr::memory_resource* resource = std::pmr::get_default_resource())
: connection(ioc.get_executor(), ctx, resource)
: basic_connection(ioc.get_executor(), ctx, resource)
{ }
/// Returns the associated executor.
@@ -102,13 +90,9 @@ public:
* See aedis::connection::async_run for more information.
*/
template <class CompletionToken = boost::asio::default_completion_token_t<executor_type>>
auto
async_run(
endpoint ep,
timeouts ts = timeouts{},
CompletionToken token = CompletionToken{})
auto async_run(CompletionToken token = CompletionToken{})
{
return base_type::async_run(ep, ts, std::move(token));
return base_type::async_run(std::move(token));
}
/** @brief Executes a command on the Redis server asynchronously.
@@ -147,40 +131,29 @@ public:
auto cancel(operation op) -> std::size_t
{ return base_type::cancel(op); }
auto& lowest_layer() noexcept { return stream_.lowest_layer(); }
private:
using this_type = connection<next_layer_type>;
using this_type = basic_connection<next_layer_type>;
template <class, class> friend class aedis::detail::connection_base;
template <class, class> friend struct aedis::detail::exec_op;
template <class, class> friend struct detail::ssl_connect_with_timeout_op;
template <class, class> friend struct aedis::detail::run_op;
template <class> friend struct aedis::detail::writer_op;
template <class> friend struct aedis::detail::check_idle_op;
template <class> friend struct aedis::detail::reader_op;
template <class, class> friend struct aedis::detail::exec_read_op;
template <class> friend struct aedis::detail::ping_op;
template <class> friend struct aedis::detail::run_op;
template <class> friend struct aedis::detail::writer_op;
template <class> friend struct aedis::detail::reader_op;
auto& lowest_layer() noexcept { return stream_.lowest_layer(); }
auto is_open() const noexcept { return stream_.next_layer().is_open(); }
void close() { stream_.next_layer().close(); }
template <class Timer, class CompletionToken>
auto
async_connect(
boost::asio::ip::tcp::resolver::results_type const& endpoints,
timeouts ts,
Timer& timer,
CompletionToken&& token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::ssl_connect_with_timeout_op<this_type, Timer>{this, &endpoints, ts, &timer}, token, stream_);
}
next_layer_type stream_;
};
/** \brief A connection that uses a boost::asio::ssl::stream<boost::asio::ip::tcp::socket>.
* \ingroup high-level-api
*/
using connection = basic_connection<boost::asio::ssl::stream<boost::asio::ip::tcp::socket>>;
} // aedis::ssl
#endif // AEDIS_SSL_CONNECTION_HPP

View File

@@ -1,113 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_SSL_CONNECTION_OPS_HPP
#define AEDIS_SSL_CONNECTION_OPS_HPP
#include <array>
#include <boost/assert.hpp>
#include <boost/system.hpp>
#include <boost/asio/experimental/parallel_group.hpp>
#include <boost/asio/yield.hpp>
namespace aedis::ssl::detail
{
template <class Stream>
struct handshake_op {
Stream* stream;
aedis::detail::conn_timer_t<typename Stream::executor_type>* timer;
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, std::array<std::size_t, 2> order = {}
, boost::system::error_code ec1 = {}
, boost::system::error_code ec2 = {})
{
reenter (coro)
{
yield
boost::asio::experimental::make_parallel_group(
[this](auto token)
{
return stream->async_handshake(boost::asio::ssl::stream_base::client, token);
},
[this](auto token) { return timer->async_wait(token);}
).async_wait(
boost::asio::experimental::wait_for_one(),
std::move(self));
if (is_cancelled(self)) {
self.complete(boost::asio::error::operation_aborted);
return;
}
switch (order[0]) {
case 0: self.complete(ec1); return;
case 1:
{
BOOST_ASSERT_MSG(!ec2, "handshake_op: Incompatible state.");
self.complete(error::ssl_handshake_timeout);
return;
}
default: BOOST_ASSERT(false);
}
}
}
};
template <
class Stream,
class CompletionToken
>
auto async_handshake(
Stream& stream,
aedis::detail::conn_timer_t<typename Stream::executor_type>& timer,
CompletionToken&& token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(handshake_op<Stream>{&stream, &timer}, token, stream, timer);
}
template <class Conn, class Timer>
struct ssl_connect_with_timeout_op {
Conn* conn = nullptr;
boost::asio::ip::tcp::resolver::results_type const* endpoints = nullptr;
typename Conn::timeouts ts;
Timer* timer = nullptr;
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, boost::system::error_code ec = {}
, boost::asio::ip::tcp::endpoint const& = {})
{
reenter (coro)
{
timer->expires_after(ts.connect_timeout);
yield
aedis::detail::async_connect(
conn->lowest_layer(), *timer, *endpoints, std::move(self));
AEDIS_CHECK_OP0();
timer->expires_after(ts.handshake_timeout);
yield
async_handshake(conn->next_layer(), *timer, std::move(self));
AEDIS_CHECK_OP0();
self.complete({});
}
}
};
} // aedis::ssl::detail
#include <boost/asio/unyield.hpp>
#endif // AEDIS_SSL_CONNECTION_OPS_HPP

23
tests/common.hpp Normal file
View File

@@ -0,0 +1,23 @@
#pragma once
#include <boost/asio.hpp>
#include <chrono>
namespace net = boost::asio;
using endpoints = net::ip::tcp::resolver::results_type;
auto
resolve(
std::string const& host = "127.0.0.1",
std::string const& port = "6379") -> endpoints
{
net::io_context ioc;
net::ip::tcp::resolver resv{ioc};
return resv.resolve(host, port);
}
#ifdef BOOST_ASIO_HAS_CO_AWAIT
inline
auto redir(boost::system::error_code& ec)
{ return net::redirect_error(net::use_awaitable, ec); }
#endif // BOOST_ASIO_HAS_CO_AWAIT

View File

@@ -1,164 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <iostream>
#include <boost/asio.hpp>
#include <boost/system/errc.hpp>
#define BOOST_TEST_MODULE low level
#include <boost/test/included/unit_test.hpp>
#include <aedis.hpp>
#include <aedis/src.hpp>
namespace net = boost::asio;
using connection = aedis::connection<>;
using endpoint = aedis::endpoint;
using error_code = boost::system::error_code;
bool is_host_not_found(error_code ec)
{
if (ec == net::error::netdb_errors::host_not_found) return true;
if (ec == net::error::netdb_errors::host_not_found_try_again) return true;
return false;
}
error_code test_async_run(endpoint ep, connection::timeouts cfg = {})
{
net::io_context ioc;
connection db{ioc};
error_code ret;
db.async_run(ep, cfg, [&](auto ec) { ret = ec; });
ioc.run();
return ret;
}
BOOST_AUTO_TEST_CASE(resolve_bad_host)
{
std::cout << boost::unit_test::framework::current_test_case().p_name << std::endl;
endpoint ep;
ep.host = "Atibaia";
ep.port = "6379";
connection::timeouts cfg;
cfg.resolve_timeout = std::chrono::seconds{100};
auto const ec = test_async_run(ep, cfg);
BOOST_TEST(is_host_not_found(ec));
}
BOOST_AUTO_TEST_CASE(resolve_with_timeout)
{
std::cout << boost::unit_test::framework::current_test_case().p_name << std::endl;
endpoint ep;
ep.host = "Atibaia";
ep.port = "6379";
connection::timeouts cfg;
// Low-enough to cause a timeout always.
cfg.resolve_timeout = std::chrono::milliseconds{1};
auto const ec = test_async_run(ep, cfg);
BOOST_CHECK_EQUAL(ec, aedis::error::resolve_timeout);
}
BOOST_AUTO_TEST_CASE(connect_bad_port)
{
std::cout << boost::unit_test::framework::current_test_case().p_name << std::endl;
endpoint ep;
ep.host = "127.0.0.1";
ep.port = "1";
connection::timeouts cfg;
cfg.connect_timeout = std::chrono::seconds{100};
auto const ec = test_async_run(ep, cfg);
BOOST_CHECK_EQUAL(ec, net::error::basic_errors::connection_refused);
}
BOOST_AUTO_TEST_CASE(connect_with_timeout)
{
std::cout << boost::unit_test::framework::current_test_case().p_name << std::endl;
endpoint ep;
ep.host = "example.com";
ep.port = "1";
connection::timeouts cfg;
cfg.connect_timeout = std::chrono::milliseconds{1};
auto const ec = test_async_run(ep, cfg);
BOOST_CHECK_EQUAL(ec, aedis::error::connect_timeout);
}
BOOST_AUTO_TEST_CASE(bad_hello_response)
{
std::cout << boost::unit_test::framework::current_test_case().p_name << std::endl;
// Succeeds with the tcp connection but fails the hello.
endpoint ep;
ep.host = "google.com";
ep.port = "80";
auto const ec = test_async_run(ep);
BOOST_CHECK_EQUAL(ec, aedis::error::invalid_data_type);
}
BOOST_AUTO_TEST_CASE(plain_conn_on_tls_endpoint)
{
std::cout << boost::unit_test::framework::current_test_case().p_name << std::endl;
endpoint ep;
ep.host = "google.com";
ep.port = "443";
auto const ec = test_async_run(ep);
BOOST_TEST(!!ec);
}
auto auth_fail_error(boost::system::error_code ec)
{
return ec == aedis::error::resp3_handshake_error ||
ec == aedis::error::exec_timeout;
}
BOOST_AUTO_TEST_CASE(auth_fail)
{
std::cout << boost::unit_test::framework::current_test_case().p_name << std::endl;
// Should cause an error in the authentication as our redis server
// has no authentication configured.
endpoint ep;
ep.host = "127.0.0.1";
ep.port = "6379";
ep.username = "caboclo-do-mato";
ep.password = "jabuticaba";
auto const ec = test_async_run(ep);
BOOST_TEST(auth_fail_error(ec));
}
auto wrong_role_error(boost::system::error_code ec)
{
return ec == aedis::error::unexpected_server_role ||
ec == aedis::error::exec_timeout;
}
BOOST_AUTO_TEST_CASE(wrong_role)
{
std::cout << boost::unit_test::framework::current_test_case().p_name << std::endl;
// Should cause an error in the authentication as our redis server
// has no authentication configured.
endpoint ep;
ep.host = "127.0.0.1";
ep.port = "6379";
ep.role = "errado";
auto const ec = test_async_run(ep);
BOOST_TEST(wrong_role_error(ec));
}

View File

@@ -8,26 +8,20 @@
#include <boost/asio.hpp>
#ifdef BOOST_ASIO_HAS_CO_AWAIT
#include <boost/system/errc.hpp>
#define BOOST_TEST_MODULE low level
#include <boost/test/included/unit_test.hpp>
#include <aedis.hpp>
#include <aedis/src.hpp>
#include "common.hpp"
#include "../examples/common/common.hpp"
namespace net = boost::asio;
using aedis::resp3::request;
namespace resp3 = aedis::resp3;
using error_code = boost::system::error_code;
using aedis::operation;
using aedis::adapt;
using connection = aedis::connection<>;
using endpoint = aedis::endpoint;
using error_code = boost::system::error_code;
#include <boost/asio/experimental/awaitable_operators.hpp>
using namespace net::experimental::awaitable_operators;
net::awaitable<void> push_consumer(std::shared_ptr<connection> conn, int expected)
auto push_consumer(std::shared_ptr<connection> conn, int expected) -> net::awaitable<void>
{
int c = 0;
for (;;) {
@@ -36,29 +30,31 @@ net::awaitable<void> push_consumer(std::shared_ptr<connection> conn, int expecte
break;
}
request req;
resp3::request req;
req.push("HELLO", 3);
req.push("QUIT");
co_await conn->async_exec(req, adapt(), net::use_awaitable);
co_await conn->async_exec(req, adapt());
}
auto echo_session(std::shared_ptr<connection> conn, std::string id, int n) -> net::awaitable<void>
{
auto ex = co_await net::this_coro::executor;
request req;
std::tuple<std::string> resp;
resp3::request req;
std::tuple<aedis::ignore, std::string> resp;
for (auto i = 0; i < n; ++i) {
auto const msg = id + "/" + std::to_string(i);
//std::cout << msg << std::endl;
req.push("HELLO", 3);
req.push("PING", msg);
req.push("SUBSCRIBE", "channel");
boost::system::error_code ec;
co_await conn->async_exec(req, adapt(resp), net::redirect_error(net::use_awaitable, ec));
BOOST_TEST(!ec);
BOOST_CHECK_EQUAL(msg, std::get<0>(resp));
co_await conn->async_exec(req, adapt(resp), redir(ec));
BOOST_CHECK_EQUAL(ec, boost::system::error_code{});
BOOST_CHECK_EQUAL(msg, std::get<1>(resp));
req.clear();
std::get<0>(resp).clear();
std::get<1>(resp).clear();
}
}
@@ -67,8 +63,8 @@ auto async_echo_stress() -> net::awaitable<void>
auto ex = co_await net::this_coro::executor;
auto conn = std::make_shared<connection>(ex);
int const sessions = 1000;
int const msgs = 100;
int const sessions = 500;
int const msgs = 1000;
int total = sessions * msgs;
net::co_spawn(ex, push_consumer(conn, total), net::detached);
@@ -76,15 +72,13 @@ auto async_echo_stress() -> net::awaitable<void>
for (int i = 0; i < sessions; ++i)
net::co_spawn(ex, echo_session(conn, std::to_string(i), msgs), net::detached);
endpoint ep{"127.0.0.1", "6379"};
co_await conn->async_run(ep, {}, net::use_awaitable);
co_await connect(conn, "127.0.0.1", "6379");
co_await conn->async_run();
}
BOOST_AUTO_TEST_CASE(echo_stress)
{
net::io_context ioc;
net::co_spawn(ioc.get_executor(), async_echo_stress(), net::detached);
ioc.run();
run(async_echo_stress());
}
#else

View File

@@ -14,32 +14,93 @@
#include <aedis.hpp>
#include <aedis/src.hpp>
#include "common.hpp"
// TODO: Test whether HELLO won't be inserted passt commands that have
// been already writen.
namespace net = boost::asio;
using aedis::resp3::request;
using aedis::adapt;
using connection = aedis::connection<>;
using endpoint = aedis::endpoint;
namespace resp3 = aedis::resp3;
using error_code = boost::system::error_code;
using connection = aedis::connection;
using aedis::adapt;
#ifdef BOOST_ASIO_HAS_CO_AWAIT
#include <boost/asio/experimental/awaitable_operators.hpp>
using namespace net::experimental::awaitable_operators;
#endif
BOOST_AUTO_TEST_CASE(hello_priority)
{
resp3::request req1;
req1.get_config().coalesce = false;
req1.push("PING", "req1");
resp3::request req2;
req2.get_config().coalesce = false;
req2.get_config().hello_with_priority = false;
req2.push("HELLO", 3);
req2.push("PING", "req2");
req2.push("QUIT");
resp3::request req3;
req3.get_config().coalesce = false;
req3.get_config().hello_with_priority = true;
req3.push("HELLO", 3);
req3.push("PING", "req3");
net::io_context ioc;
auto const endpoints = resolve();
connection conn{ioc};
net::connect(conn.next_layer(), endpoints);
bool seen1 = false;
bool seen2 = false;
bool seen3 = false;
conn.async_exec(req1, adapt(), [&](auto ec, auto){
std::cout << "bbb" << std::endl;
BOOST_TEST(!ec);
BOOST_TEST(!seen2);
BOOST_TEST(seen3);
seen1 = true;
});
conn.async_exec(req2, adapt(), [&](auto ec, auto){
std::cout << "ccc" << std::endl;
BOOST_TEST(!ec);
BOOST_TEST(seen1);
BOOST_TEST(seen3);
seen2 = true;
});
conn.async_exec(req3, adapt(), [&](auto ec, auto){
std::cout << "ddd" << std::endl;
BOOST_TEST(!ec);
BOOST_TEST(!seen1);
BOOST_TEST(!seen2);
seen3 = true;
});
conn.async_run([](auto ec){
BOOST_TEST(!ec);
});
ioc.run();
}
BOOST_AUTO_TEST_CASE(wrong_response_data_type)
{
request req;
resp3::request req;
req.push("HELLO", 3);
req.push("QUIT");
// Wrong data type.
std::tuple<int> resp;
std::tuple<aedis::ignore, int> resp;
net::io_context ioc;
auto db = std::make_shared<connection>(ioc);
db->async_exec(req, adapt(resp), [](auto ec, auto){
auto const endpoints = resolve();
connection conn{ioc};
net::connect(conn.next_layer(), endpoints);
conn.async_exec(req, adapt(resp), [](auto ec, auto){
BOOST_CHECK_EQUAL(ec, aedis::error::not_a_number);
});
db->async_run({"127.0.0.1", "6379"}, {}, [](auto ec){
conn.async_run([](auto ec){
BOOST_CHECK_EQUAL(ec, boost::asio::error::basic_errors::operation_aborted);
});
@@ -48,43 +109,135 @@ BOOST_AUTO_TEST_CASE(wrong_response_data_type)
BOOST_AUTO_TEST_CASE(cancel_request_if_not_connected)
{
request req;
resp3::request req;
req.get_config().cancel_if_not_connected = true;
req.push("HELLO", 3);
req.push("PING");
net::io_context ioc;
auto db = std::make_shared<connection>(ioc);
db->async_exec(req, adapt(), [](auto ec, auto){
auto conn = std::make_shared<connection>(ioc);
conn->async_exec(req, adapt(), [](auto ec, auto){
BOOST_CHECK_EQUAL(ec, aedis::error::not_connected);
});
ioc.run();
}
BOOST_AUTO_TEST_CASE(request_retry)
// TODO: This test is broken.
BOOST_AUTO_TEST_CASE(request_retry_false)
{
request req1;
req1.get_config().cancel_on_connection_lost = true;
req1.push("CLIENT", "PAUSE", 7000);
resp3::request req0;
req0.get_config().coalesce = false;
req0.get_config().cancel_on_connection_lost = true;
req0.push("HELLO", 3);
request req2;
resp3::request req1;
req1.get_config().coalesce = true;
req1.get_config().cancel_on_connection_lost = true;
req1.push("BLPOP", "any", 0);
resp3::request req2;
req2.get_config().coalesce = true;
req2.get_config().cancel_on_connection_lost = false;
req2.get_config().retry = false;
req2.get_config().retry_on_connection_lost = false;
req2.push("PING");
net::io_context ioc;
auto db = std::make_shared<connection>(ioc);
connection conn{ioc};
db->async_exec(req1, adapt(), [](auto ec, auto){
net::steady_timer st{ioc};
st.expires_after(std::chrono::seconds{1});
st.async_wait([&](auto){
// Cancels the request before receiving the response. This
// should cause the second request to complete with error
// although it has cancel_on_connection_lost = false.
conn.cancel(aedis::operation::run);
});
auto const endpoints = resolve();
net::connect(conn.next_layer(), endpoints);
conn.async_exec(req0, adapt(), [](auto ec, auto){
BOOST_TEST(!ec);
});
db->async_exec(req2, adapt(), [](auto ec, auto){
conn.async_exec(req1, adapt(), [](auto ec, auto){
BOOST_CHECK_EQUAL(ec, boost::system::errc::errc_t::operation_canceled);
});
db->async_run({"127.0.0.1", "6379"}, {}, [](auto ec){
BOOST_CHECK_EQUAL(ec, aedis::error::idle_timeout);
conn.async_exec(req2, adapt(), [](auto ec, auto){
BOOST_CHECK_EQUAL(ec, boost::system::errc::errc_t::operation_canceled);
});
conn.async_run([](auto ec){
BOOST_CHECK_EQUAL(ec, boost::system::errc::errc_t::operation_canceled);
});
ioc.run();
}
BOOST_AUTO_TEST_CASE(request_retry_true)
{
resp3::request req0;
req0.get_config().coalesce = false;
req0.get_config().cancel_on_connection_lost = true;
req0.push("HELLO", 3);
resp3::request req1;
req1.get_config().coalesce = true;
req1.get_config().cancel_on_connection_lost = true;
req1.push("BLPOP", "any", 0);
resp3::request req2;
req2.get_config().coalesce = true;
req2.get_config().cancel_on_connection_lost = false;
req2.get_config().retry_on_connection_lost = true;
req2.push("PING");
resp3::request req3;
req3.get_config().coalesce = true;
req3.get_config().cancel_on_connection_lost = true;
req3.get_config().retry_on_connection_lost = false;
req3.push("QUIT");
net::io_context ioc;
connection conn{ioc};
net::steady_timer st{ioc};
st.expires_after(std::chrono::seconds{1});
st.async_wait([&](auto){
// Cancels the request before receiving the response. This
// should cause the second request to complete with error
// although it has cancel_on_connection_lost = false.
conn.cancel(aedis::operation::run);
});
auto const endpoints = resolve();
net::connect(conn.next_layer(), endpoints);
conn.async_exec(req0, adapt(), [](auto ec, auto){
BOOST_TEST(!ec);
});
conn.async_exec(req1, adapt(), [](auto ec, auto){
BOOST_CHECK_EQUAL(ec, boost::system::errc::errc_t::operation_canceled);
});
conn.async_exec(req2, adapt(), [&](auto ec, auto){
BOOST_TEST(!ec);
conn.async_exec(req3, adapt(), [&](auto ec, auto){
BOOST_TEST(!ec);
});
});
conn.async_run([&](auto ec){
BOOST_CHECK_EQUAL(ec, boost::system::errc::errc_t::operation_canceled);
conn.reset_stream();
net::connect(conn.next_layer(), endpoints);
conn.async_run([&](auto ec){
std::cout << ec.message() << std::endl;
BOOST_TEST(!ec);
});
});
ioc.run();

View File

@@ -8,47 +8,47 @@
#include <boost/asio.hpp>
#ifdef BOOST_ASIO_HAS_CO_AWAIT
#include <boost/system/errc.hpp>
#include <boost/asio/experimental/awaitable_operators.hpp>
#define BOOST_TEST_MODULE low level
#include <boost/test/included/unit_test.hpp>
#include <aedis.hpp>
#include <aedis/src.hpp>
#include "common.hpp"
#include "../examples/common/common.hpp"
// NOTE1: Sends hello separately. I have observed that if hello and
// blpop are sent toguether, Redis will send the response of hello
// right away, not waiting for blpop. That is why we have to send it
// separately here.
namespace net = boost::asio;
using aedis::resp3::request;
namespace resp3 = aedis::resp3;
using error_code = boost::system::error_code;
using namespace net::experimental::awaitable_operators;
using aedis::operation;
using aedis::adapt;
using connection = aedis::connection<>;
using endpoint = aedis::endpoint;
using error_code = boost::system::error_code;
#include <boost/asio/experimental/awaitable_operators.hpp>
using namespace net::experimental::awaitable_operators;
auto async_run(std::shared_ptr<connection> conn) -> net::awaitable<void>
auto async_ignore_explicit_cancel_of_req_written() -> net::awaitable<void>
{
auto ex = co_await net::this_coro::executor;
connection::timeouts tms;
tms.ping_interval = std::chrono::seconds{10};
endpoint ep{"127.0.0.1", "6379"};
boost::system::error_code ec;
co_await conn->async_run(ep, tms, net::redirect_error(net::use_awaitable, ec));
BOOST_TEST(!ec);
}
auto conn = std::make_shared<connection>(ex);
co_await connect(conn, "127.0.0.1", "6379");
auto async_cancel_exec(std::shared_ptr<connection> conn) -> net::awaitable<void>
{
auto ex = co_await net::this_coro::executor;
conn->async_run([conn](auto ec) {
BOOST_TEST(!ec);
});
net::steady_timer st{ex};
st.expires_after(std::chrono::seconds{1});
boost::system::error_code ec1;
// See NOTE1.
resp3::request req0;
req0.get_config().coalesce = false;
req0.push("HELLO", 3);
std::ignore = co_await conn->async_exec(req0, adapt(), net::use_awaitable);
request req1;
resp3::request req1;
req1.get_config().coalesce = false;
req1.push("BLPOP", "any", 3);
@@ -57,22 +57,23 @@ auto async_cancel_exec(std::shared_ptr<connection> conn) -> net::awaitable<void>
BOOST_TEST(!ec);
});
request req2;
resp3::request req2;
req2.get_config().coalesce = false;
req2.push("PING", "second");
// Should be canceled.
conn->async_exec(req1, adapt(), [](auto ec, auto){
BOOST_CHECK_EQUAL(ec, boost::asio::error::basic_errors::operation_aborted);
conn->async_exec(req2, adapt(), [](auto ec, auto){
BOOST_CHECK_EQUAL(ec, net::error::basic_errors::operation_aborted);
});
// Will complete while BLPOP is pending.
boost::system::error_code ec1;
co_await st.async_wait(net::redirect_error(net::use_awaitable, ec1));
conn->cancel(operation::exec);
BOOST_TEST(!ec1);
request req3;
resp3::request req3;
req3.push("QUIT");
// Test whether the connection remains usable after a call to
@@ -82,61 +83,100 @@ auto async_cancel_exec(std::shared_ptr<connection> conn) -> net::awaitable<void>
BOOST_TEST(!ec1);
}
BOOST_AUTO_TEST_CASE(cancel_exec_with_timer)
{
std::cout << boost::unit_test::framework::current_test_case().p_name << std::endl;
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
net::co_spawn(ioc.get_executor(), async_run(conn), net::detached);
net::co_spawn(ioc.get_executor(), async_cancel_exec(conn), net::detached);
ioc.run();
}
auto async_ignore_cancel_of_written_req(std::shared_ptr<connection> conn) -> net::awaitable<void>
auto ignore_implicit_cancel_of_req_written() -> net::awaitable<void>
{
auto ex = co_await net::this_coro::executor;
auto conn = std::make_shared<connection>(ex);
co_await connect(conn, "127.0.0.1", "6379");
// Calls async_run separately from the group of ops below to avoid
// having it canceled when the timer fires.
conn->async_run([conn](auto ec) {
BOOST_CHECK_EQUAL(ec, net::error::basic_errors::operation_aborted);
});
// See NOTE1.
resp3::request req0;
req0.get_config().coalesce = false;
req0.push("HELLO", 3);
std::ignore = co_await conn->async_exec(req0, adapt(), net::use_awaitable);
// Will be cancelled after it has been written but before the
// response arrives.
resp3::request req1;
req1.get_config().coalesce = false;
req1.push("BLPOP", "any", 3);
// Will be cancelled before it is written.
resp3::request req2;
req2.get_config().coalesce = false;
//req2.get_config().cancel_on_connection_lost = true;
req2.push("PING");
net::steady_timer st{ex};
st.expires_after(std::chrono::seconds{1});
net::steady_timer st2{ex};
st2.expires_after(std::chrono::seconds{3});
boost::system::error_code ec1, ec2, ec3;
request req1; // Will be cancelled after it has been written.
req1.get_config().coalesce = false;
req1.push("BLPOP", "any", 3);
request req2; // Will be cancelled.
req2.push("PING");
co_await (
conn->async_exec(req1, adapt(), net::redirect_error(net::use_awaitable, ec1)) ||
conn->async_exec(req2, adapt(), net::redirect_error(net::use_awaitable, ec2)) ||
st.async_wait(net::redirect_error(net::use_awaitable, ec3))
conn->async_exec(req1, adapt(), redir(ec1)) ||
conn->async_exec(req2, adapt(), redir(ec2)) ||
st.async_wait(redir(ec3))
);
BOOST_TEST(!ec1);
BOOST_CHECK_EQUAL(ec2, boost::asio::error::basic_errors::operation_aborted);
BOOST_CHECK_EQUAL(ec2, net::error::basic_errors::operation_aborted);
BOOST_TEST(!ec3);
request req3;
req3.push("PING");
req3.push("QUIT");
co_await conn->async_exec(req3, adapt(), net::redirect_error(net::use_awaitable, ec1));
BOOST_TEST(!ec1);
conn->cancel(operation::run);
}
BOOST_AUTO_TEST_CASE(ignore_cancel_of_written_req)
auto cancel_of_req_written_on_run_canceled() -> net::awaitable<void>
{
std::cout << boost::unit_test::framework::current_test_case().p_name << std::endl;
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
net::co_spawn(ioc.get_executor(), async_run(conn), net::detached);
net::co_spawn(ioc.get_executor(), async_ignore_cancel_of_written_req(conn), net::detached);
ioc.run();
resp3::request req0;
req0.get_config().coalesce = false;
req0.push("HELLO", 3);
resp3::request req1;
req1.get_config().cancel_on_connection_lost = true;
req1.get_config().retry_on_connection_lost = false;
req1.push("BLPOP", "any", 0);
auto ex = co_await net::this_coro::executor;
auto conn = std::make_shared<connection>(ex);
co_await connect(conn, "127.0.0.1", "6379");
net::steady_timer st{ex};
st.expires_after(std::chrono::seconds{1});
boost::system::error_code ec0, ec1, ec2, ec3;
co_await (
conn->async_exec(req0, adapt(), redir(ec0)) &&
(conn->async_exec(req1, adapt(), redir(ec1)) ||
conn->async_run(redir(ec2)) ||
st.async_wait(redir(ec3)))
);
BOOST_TEST(!ec0);
BOOST_CHECK_EQUAL(ec1, net::error::basic_errors::operation_aborted);
BOOST_CHECK_EQUAL(ec2, net::error::basic_errors::operation_aborted);
BOOST_TEST(!ec3);
}
BOOST_AUTO_TEST_CASE(test_ignore_explicit_cancel_of_req_written)
{
run(async_ignore_explicit_cancel_of_req_written());
}
BOOST_AUTO_TEST_CASE(test_ignore_implicit_cancel_of_req_written)
{
run(ignore_implicit_cancel_of_req_written());
}
BOOST_AUTO_TEST_CASE(test_cancel_of_req_written_on_run_canceled)
{
run(cancel_of_req_written_on_run_canceled());
}
#else
int main(){}
#endif

View File

@@ -6,6 +6,7 @@
#include <iostream>
#include <boost/asio.hpp>
#ifdef BOOST_ASIO_HAS_CO_AWAIT
#include <boost/system/errc.hpp>
#include <boost/asio/experimental/as_tuple.hpp>
@@ -14,94 +15,83 @@
#include <aedis.hpp>
#include <aedis/src.hpp>
#include "common.hpp"
namespace net = boost::asio;
using aedis::resp3::request;
using aedis::adapt;
using aedis::endpoint;
using aedis::operation;
using connection = aedis::connection<>;
using connection = aedis::connection;
using error_code = boost::system::error_code;
using net::experimental::as_tuple;
BOOST_AUTO_TEST_CASE(push_filtered_out)
{
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
auto const endpoints = resolve();
connection conn{ioc};
net::connect(conn.next_layer(), endpoints);
request req;
req.push("HELLO", 3);
req.push("PING");
req.push("SUBSCRIBE", "channel");
req.push("QUIT");
std::tuple<std::string, std::string> resp;
conn->async_exec(req, adapt(resp), [](auto ec, auto){
std::tuple<aedis::ignore, std::string, std::string> resp;
conn.async_exec(req, adapt(resp), [](auto ec, auto){
BOOST_TEST(!ec);
});
conn->async_receive(adapt(), [](auto ec, auto){
conn.async_receive(adapt(), [](auto ec, auto){
BOOST_TEST(!ec);
});
conn->async_run({"127.0.0.1", "6379"}, {}, [conn](auto ec){
conn.async_run([](auto ec){
BOOST_TEST(!ec);
});
ioc.run();
BOOST_CHECK_EQUAL(std::get<0>(resp), "PONG");
BOOST_CHECK_EQUAL(std::get<1>(resp), "OK");
BOOST_CHECK_EQUAL(std::get<1>(resp), "PONG");
BOOST_CHECK_EQUAL(std::get<2>(resp), "OK");
}
// Checks whether we get idle timeout when no push reader is set.
void test_missing_push_reader1(bool coalesce)
void receive_wrong_syntax(request const& req)
{
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
auto const endpoints = resolve();
connection conn{ioc};
net::connect(conn.next_layer(), endpoints);
request req{{false, coalesce}};
req.get_config().cancel_on_connection_lost = true;
req.push("SUBSCRIBE", "channel");
conn->async_exec(req, adapt(), [](auto ec, auto){
conn.async_exec(req, adapt(), [](auto ec, auto){
BOOST_TEST(!ec);
});
conn->async_run({"127.0.0.1", "6379"}, {}, [conn](auto ec){
BOOST_CHECK_EQUAL(ec, aedis::error::idle_timeout);
conn.async_run([](auto ec){
BOOST_CHECK_EQUAL(ec, boost::asio::error::basic_errors::operation_aborted);
});
ioc.run();
}
void test_missing_push_reader2(request const& req)
{
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
conn->async_exec(req, adapt(), [](auto ec, auto){
conn.async_receive(adapt(), [&](auto ec, auto){
BOOST_TEST(!ec);
});
conn->async_run({"127.0.0.1", "6379"}, {}, [](auto ec){
BOOST_CHECK_EQUAL(ec, aedis::error::idle_timeout);
conn.cancel(aedis::operation::run);
});
ioc.run();
}
#ifdef BOOST_ASIO_HAS_CO_AWAIT
net::awaitable<void> push_consumer1(std::shared_ptr<connection> conn, bool& push_received)
net::awaitable<void> push_consumer1(connection& conn, bool& push_received)
{
{
auto [ec, ev] = co_await conn->async_receive(adapt(), as_tuple(net::use_awaitable));
auto [ec, ev] = co_await conn.async_receive(adapt(), as_tuple(net::use_awaitable));
BOOST_TEST(!ec);
}
{
auto [ec, ev] = co_await conn->async_receive(adapt(), as_tuple(net::use_awaitable));
BOOST_CHECK_EQUAL(ec, boost::asio::experimental::channel_errc::channel_cancelled);
auto [ec, ev] = co_await conn.async_receive(adapt(), as_tuple(net::use_awaitable));
BOOST_CHECK_EQUAL(ec, net::experimental::channel_errc::channel_cancelled);
}
push_received = true;
@@ -110,7 +100,7 @@ net::awaitable<void> push_consumer1(std::shared_ptr<connection> conn, bool& push
struct adapter_error {
void
operator()(
std::size_t, aedis::resp3::node<boost::string_view> const&, boost::system::error_code& ec)
std::size_t, aedis::resp3::node<std::string_view> const&, boost::system::error_code& ec)
{
ec = aedis::error::incompatible_size;
}
@@ -127,22 +117,25 @@ struct adapter_error {
BOOST_AUTO_TEST_CASE(test_push_adapter)
{
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
auto const endpoints = resolve();
connection conn{ioc};
net::connect(conn.next_layer(), endpoints);
request req;
req.push("HELLO", 3);
req.push("PING");
req.push("SUBSCRIBE", "channel");
req.push("PING");
conn->async_receive(adapter_error{}, [](auto ec, auto) {
conn.async_receive(adapter_error{}, [](auto ec, auto) {
BOOST_CHECK_EQUAL(ec, aedis::error::incompatible_size);
});
conn->async_exec(req, adapt(), [](auto ec, auto){
BOOST_CHECK_EQUAL(ec, boost::asio::experimental::error::channel_errors::channel_cancelled);
conn.async_exec(req, adapt(), [](auto ec, auto){
BOOST_CHECK_EQUAL(ec, net::experimental::error::channel_errors::channel_cancelled);
});
conn->async_run({"127.0.0.1", "6379"}, {}, [](auto ec){
conn.async_run([](auto ec){
BOOST_CHECK_EQUAL(ec, boost::system::errc::errc_t::operation_canceled);
});
@@ -155,19 +148,23 @@ BOOST_AUTO_TEST_CASE(test_push_adapter)
void test_push_is_received1(bool coalesce)
{
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
auto const endpoints = resolve();
connection conn{ioc};
net::connect(conn.next_layer(), endpoints);
request req{{false, coalesce}};
req.push("HELLO", 3);
req.push("SUBSCRIBE", "channel");
req.push("QUIT");
conn->async_exec(req, adapt(), [](auto ec, auto){
conn.async_exec(req, adapt(), [](auto ec, auto){
BOOST_TEST(!ec);
});
conn->async_run({"127.0.0.1", "6379"}, {}, [conn](auto ec){
conn.async_run([&](auto ec){
BOOST_TEST(!ec);
conn->cancel(operation::receive);
conn.cancel(operation::receive);
});
bool push_received = false;
@@ -184,6 +181,7 @@ void test_push_is_received1(bool coalesce)
void test_push_is_received2(bool coalesce)
{
request req1{{false, coalesce}};
req1.push("HELLO", 3);
req1.push("PING", "Message1");
request req2{{false, coalesce}};
@@ -195,21 +193,23 @@ void test_push_is_received2(bool coalesce)
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
auto const endpoints = resolve();
connection conn{ioc};
net::connect(conn.next_layer(), endpoints);
auto handler =[](auto ec, auto...)
{
BOOST_TEST(!ec);
};
conn->async_exec(req1, adapt(), handler);
conn->async_exec(req2, adapt(), handler);
conn->async_exec(req3, adapt(), handler);
conn.async_exec(req1, adapt(), handler);
conn.async_exec(req2, adapt(), handler);
conn.async_exec(req3, adapt(), handler);
endpoint ep{"127.0.0.1", "6379"};
conn->async_run(ep, {}, [conn](auto ec) {
conn.async_run([&](auto ec) {
BOOST_TEST(!ec);
conn->cancel(operation::receive);
conn.cancel(operation::receive);
});
bool push_received = false;
@@ -223,10 +223,10 @@ void test_push_is_received2(bool coalesce)
BOOST_TEST(push_received);
}
net::awaitable<void> push_consumer3(std::shared_ptr<connection> conn)
net::awaitable<void> push_consumer3(connection& conn)
{
for (;;)
co_await conn->async_receive(adapt(), net::use_awaitable);
co_await conn.async_receive(adapt(), net::use_awaitable);
}
// Test many subscribe requests.
@@ -250,24 +250,26 @@ void test_push_many_subscribes(bool coalesce)
};
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
conn->async_exec(req0, adapt(), handler);
conn->async_exec(req1, adapt(), handler);
conn->async_exec(req2, adapt(), handler);
conn->async_exec(req2, adapt(), handler);
conn->async_exec(req1, adapt(), handler);
conn->async_exec(req2, adapt(), handler);
conn->async_exec(req1, adapt(), handler);
conn->async_exec(req2, adapt(), handler);
conn->async_exec(req2, adapt(), handler);
conn->async_exec(req1, adapt(), handler);
conn->async_exec(req2, adapt(), handler);
conn->async_exec(req3, adapt(), handler);
auto const endpoints = resolve();
connection conn{ioc};
net::connect(conn.next_layer(), endpoints);
endpoint ep{"127.0.0.1", "6379"};
conn->async_run(ep, {}, [conn](auto ec) {
conn.async_exec(req0, adapt(), handler);
conn.async_exec(req1, adapt(), handler);
conn.async_exec(req2, adapt(), handler);
conn.async_exec(req2, adapt(), handler);
conn.async_exec(req1, adapt(), handler);
conn.async_exec(req2, adapt(), handler);
conn.async_exec(req1, adapt(), handler);
conn.async_exec(req2, adapt(), handler);
conn.async_exec(req2, adapt(), handler);
conn.async_exec(req1, adapt(), handler);
conn.async_exec(req2, adapt(), handler);
conn.async_exec(req3, adapt(), handler);
conn.async_run([&](auto ec) {
BOOST_TEST(!ec);
conn->cancel(operation::receive);
conn.cancel(operation::receive);
});
net::co_spawn(ioc.get_executor(), push_consumer3(conn), net::detached);
@@ -293,38 +295,33 @@ BOOST_AUTO_TEST_CASE(many_subscribers)
}
#endif
BOOST_AUTO_TEST_CASE(missing_reader1_coalesce)
{
test_missing_push_reader1(true);
}
BOOST_AUTO_TEST_CASE(missing_reader1_no_coalesce)
{
test_missing_push_reader1(false);
}
BOOST_AUTO_TEST_CASE(missing_reader2a)
BOOST_AUTO_TEST_CASE(receive_wrong_syntax1)
{
request req1{{false}};
req1.push("HELLO", 3);
req1.push("PING", "Message");
req1.push("SUBSCRIBE"); // Wrong command synthax.
req1.get_config().coalesce = true;
test_missing_push_reader2(req1);
receive_wrong_syntax(req1);
req1.get_config().coalesce = false;
test_missing_push_reader2(req1);
receive_wrong_syntax(req1);
}
BOOST_AUTO_TEST_CASE(missing_reader2b)
BOOST_AUTO_TEST_CASE(receice_wrong_syntay2)
{
request req2{{false}};
req2.push("HELLO", 3);
req2.push("SUBSCRIBE"); // Wrong command syntax.
req2.get_config().coalesce = true;
test_missing_push_reader2(req2);
receive_wrong_syntax(req2);
req2.get_config().coalesce = false;
test_missing_push_reader2(req2);
receive_wrong_syntax(req2);
}
#else
int main() {}
#endif

View File

@@ -13,13 +13,13 @@
#include <aedis.hpp>
#include <aedis/src.hpp>
#include "common.hpp"
namespace net = boost::asio;
using aedis::adapt;
using aedis::endpoint;
using aedis::resp3::request;
using connection = aedis::connection<>;
using connection = aedis::connection;
using error_code = boost::system::error_code;
using operation = aedis::operation;
@@ -27,34 +27,40 @@ using operation = aedis::operation;
BOOST_AUTO_TEST_CASE(test_quit_no_coalesce)
{
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
request req1{{false, false}};
auto const endpoints = resolve();
connection conn{ioc};
net::connect(conn.next_layer(), endpoints);
request req1;
req1.get_config().cancel_on_connection_lost = false;
req1.get_config().coalesce = false;
req1.push("PING");
request req2{{false, false}};
request req2;
req2.get_config().cancel_on_connection_lost = false;
req2.get_config().coalesce = false;
req2.push("QUIT");
conn->async_exec(req1, adapt(), [](auto ec, auto){
conn.async_exec(req1, adapt(), [](auto ec, auto){
BOOST_TEST(!ec);
});
conn->async_exec(req2, adapt(), [](auto ec, auto) {
conn.async_exec(req2, adapt(), [](auto ec, auto) {
BOOST_TEST(!ec);
});
conn->async_exec(req1, adapt(), [](auto ec, auto){
conn.async_exec(req1, adapt(), [](auto ec, auto){
BOOST_CHECK_EQUAL(ec, boost::system::errc::errc_t::operation_canceled);
});
conn->async_exec(req1, adapt(), [](auto ec, auto){
conn.async_exec(req1, adapt(), [](auto ec, auto){
BOOST_CHECK_EQUAL(ec, boost::system::errc::errc_t::operation_canceled);
});
conn->async_exec(req1, adapt(), [](auto ec, auto){
conn.async_exec(req1, adapt(), [](auto ec, auto){
BOOST_CHECK_EQUAL(ec, boost::system::errc::errc_t::operation_canceled);
});
endpoint ep{"127.0.0.1", "6379"};
conn->async_run(ep, {}, [conn](auto ec){
conn.async_run([&](auto ec){
BOOST_TEST(!ec);
conn->cancel(operation::exec);
conn.cancel(operation::exec);
});
ioc.run();
@@ -63,15 +69,20 @@ BOOST_AUTO_TEST_CASE(test_quit_no_coalesce)
void test_quit2(bool coalesce)
{
request req{{false, coalesce}};
req.push("HELLO", 3);
req.push("QUIT");
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
conn->async_exec(req, adapt(), [](auto ec, auto) {
auto const endpoints = resolve();
connection conn{ioc};
net::connect(conn.next_layer(), endpoints);
conn.async_exec(req, adapt(), [](auto ec, auto) {
BOOST_TEST(!ec);
});
conn->async_run({"127.0.0.1", "6379"}, {}, [](auto ec) {
conn.async_run([](auto ec) {
BOOST_TEST(!ec);
});

View File

@@ -13,20 +13,22 @@
#include <aedis.hpp>
#include <aedis/src.hpp>
#include "common.hpp"
namespace net = boost::asio;
using aedis::adapt;
using aedis::endpoint;
using aedis::resp3::request;
using connection = aedis::connection<>;
using connection = aedis::connection;
using error_code = boost::system::error_code;
using operation = aedis::operation;
BOOST_AUTO_TEST_CASE(test_quit_coalesce)
{
net::io_context ioc;
auto db = std::make_shared<connection>(ioc);
auto const endpoints = resolve();
connection conn{ioc};
net::connect(conn.next_layer(), endpoints);
request req1{{false, true}};
req1.push("PING");
@@ -34,23 +36,22 @@ BOOST_AUTO_TEST_CASE(test_quit_coalesce)
request req2{{false, true}};
req2.push("QUIT");
db->async_exec(req1, adapt(), [](auto ec, auto){
conn.async_exec(req1, adapt(), [](auto ec, auto){
BOOST_TEST(!ec);
});
db->async_exec(req2, adapt(), [](auto ec, auto){
conn.async_exec(req2, adapt(), [](auto ec, auto){
BOOST_TEST(!ec);
});
db->async_exec(req1, adapt(), [](auto ec, auto){
conn.async_exec(req1, adapt(), [](auto ec, auto){
BOOST_CHECK_EQUAL(ec, net::error::misc_errors::eof);
});
db->async_exec(req1, adapt(), [](auto ec, auto){
conn.async_exec(req1, adapt(), [](auto ec, auto){
BOOST_CHECK_EQUAL(ec, boost::system::errc::errc_t::operation_canceled);
});
endpoint ep{"127.0.0.1", "6379"};
db->async_run(ep, {}, [db](auto ec){
conn.async_run([&](auto ec){
BOOST_CHECK_EQUAL(ec, boost::system::errc::errc_t::operation_canceled);
db->cancel(operation::exec);
conn.cancel(operation::exec);
});
ioc.run();

View File

@@ -13,35 +13,40 @@
#include <aedis.hpp>
#include <aedis/src.hpp>
#include "common.hpp"
#include "../examples/common/common.hpp"
namespace net = boost::asio;
namespace resp3 = aedis::resp3;
using aedis::resp3::request;
using aedis::adapt;
using connection = aedis::connection<>;
using endpoint = aedis::endpoint;
using error_code = boost::system::error_code;
#include <boost/asio/experimental/awaitable_operators.hpp>
using namespace boost::asio::experimental::awaitable_operators;
net::awaitable<void> test_reconnect_impl(std::shared_ptr<connection> db)
net::awaitable<void> test_reconnect_impl()
{
request req;
auto ex = co_await net::this_coro::executor;
resp3::request req;
req.push("QUIT");
auto const endpoints = resolve();
connection conn{ex};
int i = 0;
endpoint ep{"127.0.0.1", "6379"};
for (; i < 5; ++i) {
boost::system::error_code ec1, ec2;
net::connect(conn.next_layer(), endpoints);
co_await (
db->async_exec(req, adapt(), net::redirect_error(net::use_awaitable, ec1)) &&
db->async_run(ep, {}, net::redirect_error(net::use_awaitable, ec2))
conn.async_exec(req, adapt(), net::redirect_error(net::use_awaitable, ec1)) &&
conn.async_run(net::redirect_error(net::use_awaitable, ec2))
);
BOOST_TEST(!ec1);
BOOST_TEST(!ec2);
db->reset_stream();
conn.reset_stream();
}
BOOST_CHECK_EQUAL(i, 5);
@@ -51,51 +56,62 @@ net::awaitable<void> test_reconnect_impl(std::shared_ptr<connection> db)
// Test whether the client works after a reconnect.
BOOST_AUTO_TEST_CASE(test_reconnect)
{
std::cout << boost::unit_test::framework::current_test_case().p_name << std::endl;
net::io_context ioc;
auto db = std::make_shared<connection>(ioc);
net::co_spawn(ioc, test_reconnect_impl(db), net::detached);
net::co_spawn(ioc, test_reconnect_impl(), net::detached);
ioc.run();
}
auto async_test_reconnect_timeout() -> net::awaitable<void>
{
auto conn = std::make_shared<connection>(co_await net::this_coro::executor);
endpoint ep{"127.0.0.1", "6379"};
boost::system::error_code ec1, ec2;
auto ex = co_await net::this_coro::executor;
request req1;
net::steady_timer st{ex};
auto conn = std::make_shared<connection>(ex);
boost::system::error_code ec1, ec2, ec3;
resp3::request req1;
req1.get_config().cancel_if_not_connected = false;
req1.get_config().cancel_on_connection_lost = true;
req1.push("CLIENT", "PAUSE", 7000);
req1.get_config().retry_on_connection_lost = false;
req1.push("HELLO", 3);
req1.push("BLPOP", "any", 0);
co_await connect(conn, "127.0.0.1", "6379");
st.expires_after(std::chrono::seconds{1});
co_await (
conn->async_exec(req1, adapt(), net::redirect_error(net::use_awaitable, ec1)) &&
conn->async_run(ep, {}, net::redirect_error(net::use_awaitable, ec2))
conn->async_exec(req1, adapt(), redir(ec1)) ||
conn->async_run(redir(ec2)) ||
st.async_wait(redir(ec3))
);
BOOST_TEST(!ec1);
BOOST_CHECK_EQUAL(ec2, aedis::error::idle_timeout);
//BOOST_TEST(!ec1);
BOOST_CHECK_EQUAL(ec2, boost::system::errc::errc_t::operation_canceled);
//BOOST_TEST(!ec3);
request req2;
resp3::request req2;
req2.get_config().cancel_if_not_connected = false;
req2.get_config().cancel_on_connection_lost = true;
req2.get_config().retry_on_connection_lost= false;
req2.push("HELLO", 3);
req2.push("QUIT");
co_await connect(conn, "127.0.0.1", "6379");
st.expires_after(std::chrono::seconds{1});
co_await (
conn->async_exec(req1, adapt(), net::redirect_error(net::use_awaitable, ec1)) &&
conn->async_run(ep, {}, net::redirect_error(net::use_awaitable, ec2))
conn->async_exec(req1, adapt(), net::redirect_error(net::use_awaitable, ec1)) ||
conn->async_run(net::redirect_error(net::use_awaitable, ec2)) ||
st.async_wait(net::redirect_error(net::use_awaitable, ec3))
);
std::cout << "ccc" << std::endl;
BOOST_CHECK_EQUAL(ec1, boost::system::errc::errc_t::operation_canceled);
BOOST_CHECK_EQUAL(ec2, aedis::error::exec_timeout);
BOOST_CHECK_EQUAL(ec2, boost::asio::error::basic_errors::operation_aborted);
}
BOOST_AUTO_TEST_CASE(test_reconnect_and_idle)
{
net::io_context ioc;
net::co_spawn(ioc, async_test_reconnect_timeout(), net::detached);
ioc.run();
run(async_test_reconnect_timeout());
}
#else
int main(){}

View File

@@ -15,14 +15,14 @@
#include <aedis.hpp>
#include <aedis/src.hpp>
#include "common.hpp"
namespace net = boost::asio;
using aedis::resp3::request;
using aedis::operation;
using aedis::adapt;
using connection = aedis::connection<>;
using endpoint = aedis::endpoint;
using connection = aedis::connection;
using error_code = boost::system::error_code;
using net::experimental::as_tuple;
@@ -32,16 +32,15 @@ using namespace net::experimental::awaitable_operators;
auto async_cancel_run_with_timer() -> net::awaitable<void>
{
auto ex = co_await net::this_coro::executor;
auto conn = std::make_shared<connection>(ex);
auto const endpoints = resolve();
connection conn{ex};
net::connect(conn.next_layer(), endpoints);
net::steady_timer st{ex};
st.expires_after(std::chrono::seconds{1});
endpoint ep{"127.0.0.1", "6379"};
boost::system::error_code ec1, ec2;
co_await (
conn->async_run(ep, {}, net::redirect_error(net::use_awaitable, ec1)) ||
st.async_wait(net::redirect_error(net::use_awaitable, ec2))
);
co_await (conn.async_run(redir(ec1)) || st.async_wait(redir(ec2)));
BOOST_CHECK_EQUAL(ec1, boost::asio::error::basic_errors::operation_aborted);
BOOST_TEST(!ec2);
@@ -54,29 +53,20 @@ BOOST_AUTO_TEST_CASE(cancel_run_with_timer)
ioc.run();
}
net::awaitable<void>
async_check_cancellation_not_missed(
std::shared_ptr<connection> conn,
int n,
std::chrono::milliseconds ms)
auto
async_check_cancellation_not_missed(int n, std::chrono::milliseconds ms) -> net::awaitable<void>
{
net::steady_timer timer{co_await net::this_coro::executor};
auto ex = co_await net::this_coro::executor;
auto const endpoints = resolve();
connection conn{ex};
connection::timeouts tms;
tms.resolve_timeout = std::chrono::seconds{10};
tms.connect_timeout = std::chrono::seconds{10};
tms.resp3_handshake_timeout = std::chrono::seconds{2};
tms.ping_interval = std::chrono::seconds{1};
endpoint ep{"127.0.0.1", "6379"};
net::steady_timer timer{ex};
for (auto i = 0; i < n; ++i) {
timer.expires_after(ms);
net::connect(conn.next_layer(), endpoints);
boost::system::error_code ec1, ec2;
co_await (
conn->async_run(ep, {}, net::redirect_error(net::use_awaitable, ec1)) ||
timer.async_wait(net::redirect_error(net::use_awaitable, ec2))
);
co_await (conn.async_run(redir(ec1)) || timer.async_wait(redir(ec2)));
BOOST_CHECK_EQUAL(ec1, boost::asio::error::basic_errors::operation_aborted);
std::cout << "Counter: " << i << std::endl;
}
@@ -86,104 +76,123 @@ async_check_cancellation_not_missed(
BOOST_AUTO_TEST_CASE(check_implicit_cancel_not_missed_0)
{
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
net::co_spawn(ioc, async_check_cancellation_not_missed(conn, 10, std::chrono::milliseconds{0}), net::detached);
net::co_spawn(ioc, async_check_cancellation_not_missed(10, std::chrono::milliseconds{0}), net::detached);
ioc.run();
}
BOOST_AUTO_TEST_CASE(check_implicit_cancel_not_missed_2)
{
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
net::co_spawn(ioc, async_check_cancellation_not_missed(conn, 20, std::chrono::milliseconds{2}), net::detached);
net::co_spawn(ioc, async_check_cancellation_not_missed(20, std::chrono::milliseconds{2}), net::detached);
ioc.run();
}
BOOST_AUTO_TEST_CASE(check_implicit_cancel_not_missed_8)
{
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
net::co_spawn(ioc, async_check_cancellation_not_missed(conn, 20, std::chrono::milliseconds{8}), net::detached);
net::co_spawn(ioc, async_check_cancellation_not_missed(20, std::chrono::milliseconds{8}), net::detached);
ioc.run();
}
BOOST_AUTO_TEST_CASE(check_implicit_cancel_not_missed_16)
{
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
net::co_spawn(ioc, async_check_cancellation_not_missed(conn, 20, std::chrono::milliseconds{16}), net::detached);
net::co_spawn(ioc, async_check_cancellation_not_missed(20, std::chrono::milliseconds{16}), net::detached);
ioc.run();
}
BOOST_AUTO_TEST_CASE(check_implicit_cancel_not_missed_32)
{
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
net::co_spawn(ioc, async_check_cancellation_not_missed(conn, 20, std::chrono::milliseconds{32}), net::detached);
net::co_spawn(ioc, async_check_cancellation_not_missed(20, std::chrono::milliseconds{32}), net::detached);
ioc.run();
}
BOOST_AUTO_TEST_CASE(check_implicit_cancel_not_missed_64)
{
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
net::co_spawn(ioc, async_check_cancellation_not_missed(conn, 20, std::chrono::milliseconds{64}), net::detached);
net::co_spawn(ioc, async_check_cancellation_not_missed(20, std::chrono::milliseconds{64}), net::detached);
ioc.run();
}
BOOST_AUTO_TEST_CASE(check_implicit_cancel_not_missed_128)
{
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
net::co_spawn(ioc, async_check_cancellation_not_missed(conn, 20, std::chrono::milliseconds{128}), net::detached);
net::co_spawn(ioc, async_check_cancellation_not_missed(20, std::chrono::milliseconds{128}), net::detached);
ioc.run();
}
BOOST_AUTO_TEST_CASE(check_implicit_cancel_not_missed_256)
{
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
net::co_spawn(ioc, async_check_cancellation_not_missed(conn, 20, std::chrono::milliseconds{256}), net::detached);
net::co_spawn(ioc, async_check_cancellation_not_missed(20, std::chrono::milliseconds{256}), net::detached);
ioc.run();
}
BOOST_AUTO_TEST_CASE(check_implicit_cancel_not_missed_512)
{
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
net::co_spawn(ioc, async_check_cancellation_not_missed(conn, 20, std::chrono::milliseconds{512}), net::detached);
net::co_spawn(ioc, async_check_cancellation_not_missed(20, std::chrono::milliseconds{512}), net::detached);
ioc.run();
}
BOOST_AUTO_TEST_CASE(check_implicit_cancel_not_missed_1024)
{
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
net::co_spawn(ioc, async_check_cancellation_not_missed(conn, 20, std::chrono::milliseconds{1024}), net::detached);
net::co_spawn(ioc, async_check_cancellation_not_missed(20, std::chrono::milliseconds{1024}), net::detached);
ioc.run();
}
BOOST_AUTO_TEST_CASE(reset_before_run_completes)
{
net::io_context ioc;
auto conn = std::make_shared<connection>(ioc);
auto const endpoints = resolve();
connection conn{ioc};
net::connect(conn.next_layer(), endpoints);
// Sends a ping just as a means of waiting until we are connected.
request req;
req.push("HELLO", 3);
req.push("PING");
conn->async_exec(req, adapt(), [conn](auto ec, auto){
conn.async_exec(req, adapt(), [&](auto ec, auto){
BOOST_TEST(!ec);
conn->reset_stream();
conn.reset_stream();
});
conn->async_run({"127.0.0.1", "6379"}, {}, [conn](auto ec){
conn.async_run([&](auto ec){
BOOST_CHECK_EQUAL(ec, net::error::operation_aborted);
});
ioc.run();
}
using slave_operation = aedis::detail::guarded_operation<>;
auto master(std::shared_ptr<slave_operation> op) -> net::awaitable<void>
{
co_await op->async_run(net::use_awaitable);
}
auto slave(std::shared_ptr<slave_operation> op) -> net::awaitable<void>
{
net::steady_timer timer{co_await net::this_coro::executor};
timer.expires_after(std::chrono::seconds{1});
co_await op->async_wait(timer.async_wait(net::deferred), net::use_awaitable);
std::cout << "Kabuf" << std::endl;
}
BOOST_AUTO_TEST_CASE(slave_op)
{
net::io_context ioc;
auto op = std::make_shared<slave_operation>(ioc.get_executor());
net::co_spawn(ioc, master(op), net::detached);
net::co_spawn(ioc, slave(op), net::detached);
ioc.run();
}
#else
int main(){}
#endif

View File

@@ -14,12 +14,18 @@
#include <aedis.hpp>
#include <aedis/ssl/connection.hpp>
#include <aedis/src.hpp>
#include "common.hpp"
namespace net = boost::asio;
using aedis::adapt;
using connection = aedis::ssl::connection<net::ssl::stream<net::ip::tcp::socket>>;
using endpoint = aedis::endpoint;
using aedis::resp3::request;
using connection = aedis::ssl::connection;
struct endpoint {
std::string host;
std::string port;
};
bool verify_certificate(bool, net::ssl::verify_context&)
{
@@ -27,54 +33,40 @@ bool verify_certificate(bool, net::ssl::verify_context&)
return true;
}
boost::system::error_code hello_fail(endpoint ep)
BOOST_AUTO_TEST_CASE(ping)
{
net::io_context ioc;
std::string const in = "Kabuf";
request req;
req.get_config().cancel_on_connection_lost = true;
req.push("HELLO", 3, "AUTH", "aedis", "aedis");
req.push("PING", in);
req.push("QUIT");
std::string out;
auto resp = std::tie(std::ignore, out, std::ignore);
auto const endpoints = resolve("db.occase.de", "6380");
net::io_context ioc;
net::ssl::context ctx{net::ssl::context::sslv23};
auto conn = std::make_shared<connection>(ioc.get_executor(), ctx);
conn->next_layer().set_verify_mode(net::ssl::verify_peer);
conn->next_layer().set_verify_callback(verify_certificate);
boost::system::error_code ret;
conn->async_run(ep, {}, [&](auto ec) {
ret = ec;
connection conn{ioc, ctx};
conn.next_layer().set_verify_mode(net::ssl::verify_peer);
conn.next_layer().set_verify_callback(verify_certificate);
net::connect(conn.lowest_layer(), endpoints);
conn.next_layer().handshake(net::ssl::stream_base::client);
conn.async_exec(req, adapt(resp), [](auto ec, auto) {
BOOST_TEST(!ec);
});
conn.async_run([](auto ec) {
BOOST_TEST(!ec);
});
ioc.run();
return ret;
}
BOOST_AUTO_TEST_CASE(test_tls_handshake_fail)
{
std::cout << boost::unit_test::framework::current_test_case().p_name << std::endl;
endpoint ep;
ep.host = "google.com";
ep.port = "80";
auto const ec = hello_fail(ep);
BOOST_TEST(!!ec);
std::cout << "-----> " << ec.message() << std::endl;
}
BOOST_AUTO_TEST_CASE(test_tls_handshake_fail2)
{
std::cout << boost::unit_test::framework::current_test_case().p_name << std::endl;
endpoint ep;
ep.host = "127.0.0.1";
ep.port = "6379";
auto const ec = hello_fail(ep);
BOOST_CHECK_EQUAL(ec, aedis::error::ssl_handshake_timeout);
}
BOOST_AUTO_TEST_CASE(test_hello_fail)
{
std::cout << boost::unit_test::framework::current_test_case().p_name << std::endl;
endpoint ep;
ep.host = "google.com";
ep.port = "443";
auto const ec = hello_fail(ep);
BOOST_CHECK_EQUAL(ec, aedis::error::invalid_data_type);
BOOST_CHECK_EQUAL(in, out);
}

View File

@@ -24,7 +24,6 @@
// TODO: Test with empty strings.
namespace std
{
auto operator==(aedis::ignore, aedis::ignore) noexcept {return true;}
@@ -119,8 +118,6 @@ void test_async(net::any_io_executor ex, expect<Result> e)
std::optional<int> op_int_ok = 11;
std::optional<bool> op_bool_ok = true;
std::string const streamed_string_wire = "$?\r\n;4\r\nHell\r\n;5\r\no wor\r\n;1\r\nd\r\n;0\r\n";
std::string const streamed_string_wire_error = "$?\r\n;b\r\nHell\r\n;5\r\no wor\r\n;1\r\nd\r\n;0\r\n";
// TODO: Test a streamed string that is not finished with a string of
// size 0 but other command comes in.
std::vector<node_type> streamed_string_e1
@@ -132,40 +129,68 @@ std::vector<node_type> streamed_string_e1
std::vector<node_type> streamed_string_e2 { {resp3::type::streamed_string_part, 1UL, 1UL, {}} };
#define S01 "#11\r\n"
#define S02 "#f\r\n"
#define S03 "#t\r\n"
#define S04 "$?\r\n;0\r\n"
#define S05 "%11\r\n"
#define S06 "$?\r\n;4\r\nHell\r\n;5\r\no wor\r\n;1\r\nd\r\n;0\r\n"
#define S07 "$?\r\n;b\r\nHell\r\n;5\r\no wor\r\n;1\r\nd\r\n;0\r\n"
#define S08 "*1\r\n:11\r\n"
#define S09 ":-3\r\n"
#define S10 ":11\r\n"
#define S11 ":3\r\n"
#define S12 "_\r\n"
#define S13 ">4\r\n+pubsub\r\n+message\r\n+some-channel\r\n+some message\r\n"
#define S14 ">0\r\n"
#define S15 "*3\r\n$2\r\n11\r\n$2\r\n22\r\n$1\r\n3\r\n"
#define S16 "%4\r\n$4\r\nkey1\r\n$6\r\nvalue1\r\n$4\r\nkey2\r\n$6\r\nvalue2\r\n$4\r\nkey3\r\n$6\r\nvalue3\r\n$4\r\nkey3\r\n$6\r\nvalue3\r\n"
#define S17 "*1\r\n" S16
#define S18 "|1\r\n+key-popularity\r\n%2\r\n$1\r\na\r\n,0.1923\r\n$1\r\nb\r\n,0.0012\r\n"
#define S19 "|0\r\n"
#define S20 "*3\r\n$2\r\n11\r\n$2\r\n22\r\n$1\r\n3\r\n"
#define S21 "*1\r\n*1\r\n$2\r\nab\r\n"
#define S22 "*1\r\n*1\r\n*1\r\n*1\r\n*1\r\n*1\r\na\r\n"
#define S23 "*0\r\n"
#define S24 "*3\r\n$2\r\n11\r\n$2\r\n22\r\n$1\r\n3\r\n"
#define S25 "~6\r\n+orange\r\n+apple\r\n+one\r\n+two\r\n+three\r\n+orange\r\n"
#define S26 "*1\r\n" S25
#define S27 "~0\r\n"
#define S28 "-Error\r\n"
#define S29 "-\r\n"
#define NUMBER_TEST_CONDITIONS(test) \
test(ex, make_expected("#11\r\n", std::optional<bool>{}, "bool.error", aedis::error::unexpected_bool_value)); \
test(ex, make_expected("#f\r\n", bool{false}, "bool.bool (true)")); \
test(ex, make_expected("#f\r\n", node_type{resp3::type::boolean, 1UL, 0UL, {"f"}}, "bool.node (false)")); \
test(ex, make_expected("#t\r\n", bool{true}, "bool.bool (true)")); \
test(ex, make_expected("#t\r\n", node_type{resp3::type::boolean, 1UL, 0UL, {"t"}}, "bool.node (true)")); \
test(ex, make_expected("#t\r\n", op_bool_ok, "optional.int")); \
test(ex, make_expected("#t\r\n", std::map<int, int>{}, "bool.error", aedis::error::expects_resp3_map)); \
test(ex, make_expected("#t\r\n", std::set<int>{}, "bool.error", aedis::error::expects_resp3_set)); \
test(ex, make_expected("#t\r\n", std::unordered_map<int, int>{}, "bool.error", aedis::error::expects_resp3_map)); \
test(ex, make_expected("#t\r\n", std::unordered_set<int>{}, "bool.error", aedis::error::expects_resp3_set)); \
test(ex, make_expected("$?\r\n;0\r\n", streamed_string_e2, "streamed_string.node.empty")); \
test(ex, make_expected("%11\r\n", std::optional<int>{}, "number.optional.int.error", aedis::error::expects_resp3_simple_type));; \
test(ex, make_expected("*1\r\n:11\r\n", std::tuple<int>{11}, "number.tuple.int")); \
test(ex, make_expected(":-3\r\n", node_type{resp3::type::number, 1UL, 0UL, {"-3"}}, "number.node (negative)")); \
test(ex, make_expected(":11\r\n", int{11}, "number.int")); \
test(ex, make_expected(":11\r\n", op_int_ok, "number.optional.int")); \
test(ex, make_expected(":11\r\n", std::list<std::string>{}, "number.optional.int", aedis::error::expects_resp3_aggregate)); \
test(ex, make_expected(":11\r\n", std::map<std::string, std::string>{}, "number.optional.int", aedis::error::expects_resp3_map)); \
test(ex, make_expected(":11\r\n", std::set<std::string>{}, "number.optional.int", aedis::error::expects_resp3_set)); \
test(ex, make_expected(":11\r\n", std::unordered_map<std::string, std::string>{}, "number.optional.int", aedis::error::expects_resp3_map)); \
test(ex, make_expected(":11\r\n", std::unordered_set<std::string>{}, "number.optional.int", aedis::error::expects_resp3_set)); \
test(ex, make_expected(":3\r\n", node_type{resp3::type::number, 1UL, 0UL, {"3"}}, "number.node (positive)")); \
test(ex, make_expected("_\r\n", int{0}, "number.int.error.null", aedis::error::resp3_null)); \
test(ex, make_expected(streamed_string_wire, std::string{"Hello word"}, "streamed_string.string")); \
test(ex, make_expected(streamed_string_wire, int{}, "streamed_string.string", aedis::error::not_a_number)); \
test(ex, make_expected(streamed_string_wire, streamed_string_e1, "streamed_string.node")); \
test(ex, make_expected(streamed_string_wire_error, std::string{}, "streamed_string.error", aedis::error::not_a_number)); \
test(ex, make_expected(S01, std::optional<bool>{}, "bool.error", aedis::error::unexpected_bool_value)); \
test(ex, make_expected(S02, bool{false}, "bool.bool (true)")); \
test(ex, make_expected(S02, node_type{resp3::type::boolean, 1UL, 0UL, {"f"}}, "bool.node (false)")); \
test(ex, make_expected(S03, bool{true}, "bool.bool (true)")); \
test(ex, make_expected(S03, node_type{resp3::type::boolean, 1UL, 0UL, {"t"}}, "bool.node (true)")); \
test(ex, make_expected(S03, op_bool_ok, "optional.int")); \
test(ex, make_expected(S03, std::map<int, int>{}, "bool.error", aedis::error::expects_resp3_map)); \
test(ex, make_expected(S03, std::set<int>{}, "bool.error", aedis::error::expects_resp3_set)); \
test(ex, make_expected(S03, std::unordered_map<int, int>{}, "bool.error", aedis::error::expects_resp3_map)); \
test(ex, make_expected(S03, std::unordered_set<int>{}, "bool.error", aedis::error::expects_resp3_set)); \
test(ex, make_expected(S04, streamed_string_e2, "streamed_string.node.empty")); \
test(ex, make_expected(S05, std::optional<int>{}, "number.optional.int.error", aedis::error::expects_resp3_simple_type));; \
test(ex, make_expected(S06, int{}, "streamed_string.string", aedis::error::not_a_number)); \
test(ex, make_expected(S06, std::string{"Hello word"}, "streamed_string.string")); \
test(ex, make_expected(S06, streamed_string_e1, "streamed_string.node")); \
test(ex, make_expected(S07, std::string{}, "streamed_string.error", aedis::error::not_a_number)); \
test(ex, make_expected(S08, std::tuple<int>{11}, "number.tuple.int")); \
test(ex, make_expected(S09, node_type{resp3::type::number, 1UL, 0UL, {"-3"}}, "number.node (negative)")); \
test(ex, make_expected(S10, int{11}, "number.int")); \
test(ex, make_expected(S10, op_int_ok, "number.optional.int")); \
test(ex, make_expected(S10, std::list<std::string>{}, "number.optional.int", aedis::error::expects_resp3_aggregate)); \
test(ex, make_expected(S10, std::map<std::string, std::string>{}, "number.optional.int", aedis::error::expects_resp3_map)); \
test(ex, make_expected(S10, std::set<std::string>{}, "number.optional.int", aedis::error::expects_resp3_set)); \
test(ex, make_expected(S10, std::unordered_map<std::string, std::string>{}, "number.optional.int", aedis::error::expects_resp3_map)); \
test(ex, make_expected(S10, std::unordered_set<std::string>{}, "number.optional.int", aedis::error::expects_resp3_set)); \
test(ex, make_expected(S11, node_type{resp3::type::number, 1UL, 0UL, {"3"}}, "number.node (positive)")); \
test(ex, make_expected(S12, int{0}, "number.int.error.null", aedis::error::resp3_null)); \
BOOST_AUTO_TEST_CASE(test_push)
{
net::io_context ioc;
std::string const wire = ">4\r\n+pubsub\r\n+message\r\n+some-channel\r\n+some message\r\n";
std::vector<node_type> e1a
{ {resp3::type::push, 4UL, 0UL, {}}
@@ -177,8 +202,8 @@ BOOST_AUTO_TEST_CASE(test_push)
std::vector<node_type> e1b { {resp3::type::push, 0UL, 0UL, {}} };
auto const in01 = expect<std::vector<node_type>>{wire, e1a, "push.node"};
auto const in02 = expect<std::vector<node_type>>{">0\r\n", e1b, "push.node.empty"};
auto const in01 = expect<std::vector<node_type>>{S13, e1a, "push.node"};
auto const in02 = expect<std::vector<node_type>>{S14, e1b, "push.node.empty"};
auto ex = ioc.get_executor();
@@ -202,9 +227,6 @@ BOOST_AUTO_TEST_CASE(test_map)
using op_vec_type = std::optional<std::vector<std::string>>;
using tuple_type = std::tuple<std::string, std::string, std::string, std::string, std::string, std::string, std::string, std::string>;
std::string const wire2 = "*3\r\n$2\r\n11\r\n$2\r\n22\r\n$1\r\n3\r\n";
std::string const wire = "%4\r\n$4\r\nkey1\r\n$6\r\nvalue1\r\n$4\r\nkey2\r\n$6\r\nvalue2\r\n$4\r\nkey3\r\n$6\r\nvalue3\r\n$4\r\nkey3\r\n$6\r\nvalue3\r\n";
std::vector<node_type> expected_1a
{ {resp3::type::map, 4UL, 0UL, {}}
, {resp3::type::blob_string, 1UL, 1UL, {"key1"}}
@@ -263,20 +285,20 @@ BOOST_AUTO_TEST_CASE(test_map)
, std::string{"key3"}, std::string{"value3"}
};
auto const in00 = expect<std::vector<node_type>>{wire, expected_1a, "map.node"};
auto const in00 = expect<std::vector<node_type>>{S16, expected_1a, "map.node"};
auto const in01 = expect<map_type>{"%0\r\n", map_type{}, "map.map.empty"};
auto const in02 = expect<map_type>{wire, expected_1b, "map.map"};
auto const in03 = expect<mmap_type>{wire, e1k, "map.multimap"};
auto const in04 = expect<umap_type>{wire, e1g, "map.unordered_map"};
auto const in05 = expect<mumap_type>{wire, e1l, "map.unordered_multimap"};
auto const in06 = expect<vec_type>{wire, expected_1c, "map.vector"};
auto const in07 = expect<op_map_type>{wire, expected_1d, "map.optional.map"};
auto const in08 = expect<op_vec_type>{wire, expected_1e, "map.optional.vector"};
auto const in09 = expect<std::tuple<op_map_type>>{"*1\r\n" + wire, std::tuple<op_map_type>{expected_1d}, "map.transaction.optional.map"};
auto const in02 = expect<map_type>{S16, expected_1b, "map.map"};
auto const in03 = expect<mmap_type>{S16, e1k, "map.multimap"};
auto const in04 = expect<umap_type>{S16, e1g, "map.unordered_map"};
auto const in05 = expect<mumap_type>{S16, e1l, "map.unordered_multimap"};
auto const in06 = expect<vec_type>{S16, expected_1c, "map.vector"};
auto const in07 = expect<op_map_type>{S16, expected_1d, "map.optional.map"};
auto const in08 = expect<op_vec_type>{S16, expected_1e, "map.optional.vector"};
auto const in09 = expect<std::tuple<op_map_type>>{S17, std::tuple<op_map_type>{expected_1d}, "map.transaction.optional.map"};
auto const in10 = expect<int>{"%11\r\n", int{}, "map.invalid.int", aedis::error::expects_resp3_simple_type};
auto const in11 = expect<tuple_type>{wire, e1f, "map.tuple"};
auto const in12 = expect<map_type>{wire2, map_type{}, "map.error", aedis::error::expects_resp3_map};
auto const in13 = expect<map_type>{"_\r\n", map_type{}, "map.null", aedis::error::resp3_null};
auto const in11 = expect<tuple_type>{S16, e1f, "map.tuple"};
auto const in12 = expect<map_type>{S15, map_type{}, "map.error", aedis::error::expects_resp3_map};
auto const in13 = expect<map_type>{S12, map_type{}, "map.null", aedis::error::resp3_null};
auto ex = ioc.get_executor();
@@ -314,8 +336,6 @@ BOOST_AUTO_TEST_CASE(test_map)
void test_attribute(net::io_context& ioc)
{
char const* wire = "|1\r\n+key-popularity\r\n%2\r\n$1\r\na\r\n,0.1923\r\n$1\r\nb\r\n,0.0012\r\n";
std::vector<node_type> e1a
{ {resp3::type::attribute, 1UL, 0UL, {}}
, {resp3::type::simple_string, 1UL, 1UL, "key-popularity"}
@@ -328,8 +348,8 @@ void test_attribute(net::io_context& ioc)
std::vector<node_type> e1b;
auto const in01 = expect<std::vector<node_type>>{wire, e1a, "attribute.node"};
auto const in02 = expect<std::vector<node_type>>{"|0\r\n", e1b, "attribute.node.empty"};
auto const in01 = expect<std::vector<node_type>>{S18, e1a, "attribute.node"};
auto const in02 = expect<std::vector<node_type>>{S19, e1b, "attribute.node.empty"};
auto ex = ioc.get_executor();
@@ -347,9 +367,6 @@ BOOST_AUTO_TEST_CASE(test_array)
using array_type2 = std::array<int, 1>;
net::io_context ioc;
char const* wire = "*3\r\n$2\r\n11\r\n$2\r\n22\r\n$1\r\n3\r\n";
char const* wire_nested = "*1\r\n*1\r\n$2\r\nab\r\n";
char const* wire_nested2 = "*1\r\n*1\r\n*1\r\n*1\r\n*1\r\n*1\r\na\r\n";
std::vector<node_type> e1a
{ {resp3::type::array, 3UL, 0UL, {}}
@@ -366,22 +383,22 @@ BOOST_AUTO_TEST_CASE(test_array)
std::list<int> const e1g{11, 22, 3};
std::deque<int> const e1h{11, 22, 3};
auto const in01 = expect<std::vector<node_type>>{wire, e1a, "array.node"};
auto const in02 = expect<std::vector<int>>{wire, e1b, "array.int"};
auto const in03 = expect<std::vector<node_type>>{"*0\r\n", e1e, "array.node.empty"};
auto const in04 = expect<std::vector<std::string>>{"*0\r\n", e1d, "array.string.empty"};
auto const in05 = expect<std::vector<std::string>>{wire, e1c, "array.string"};
auto const in06 = expect<array_type>{wire, e1f, "array.array"};
auto const in07 = expect<std::list<int>>{wire, e1g, "array.list"};
auto const in08 = expect<std::deque<int>>{wire, e1h, "array.deque"};
auto const in09 = expect<std::vector<int>>{"_\r\n", std::vector<int>{}, "array.vector", aedis::error::resp3_null};
auto const in10 = expect<std::list<int>>{"_\r\n", std::list<int>{}, "array.list", aedis::error::resp3_null};
auto const in11 = expect<array_type>{"_\r\n", array_type{}, "array.null", aedis::error::resp3_null};
auto const in12 = expect<tuple_type>{wire, tuple_type{}, "array.list", aedis::error::incompatible_size};
auto const in13 = expect<array_type2>{wire_nested, array_type2{}, "array.nested", aedis::error::nested_aggregate_not_supported};
auto const in14 = expect<array_type2>{wire, array_type2{}, "array.null", aedis::error::incompatible_size};
auto const in15 = expect<array_type2>{":3\r\n", array_type2{}, "array.array", aedis::error::expects_resp3_aggregate};
auto const in16 = expect<vec_node_type>{wire_nested2, vec_node_type{}, "array.depth.exceeds", aedis::error::exceeeds_max_nested_depth};
auto const in01 = expect<std::vector<node_type>>{S20, e1a, "array.node"};
auto const in02 = expect<std::vector<int>>{S20, e1b, "array.int"};
auto const in03 = expect<std::vector<node_type>>{S23, e1e, "array.node.empty"};
auto const in04 = expect<std::vector<std::string>>{S23, e1d, "array.string.empty"};
auto const in05 = expect<std::vector<std::string>>{S20, e1c, "array.string"};
auto const in06 = expect<array_type>{S20, e1f, "array.array"};
auto const in07 = expect<std::list<int>>{S20, e1g, "array.list"};
auto const in08 = expect<std::deque<int>>{S20, e1h, "array.deque"};
auto const in09 = expect<std::vector<int>>{S12, std::vector<int>{}, "array.vector", aedis::error::resp3_null};
auto const in10 = expect<std::list<int>>{S12, std::list<int>{}, "array.list", aedis::error::resp3_null};
auto const in11 = expect<array_type>{S12, array_type{}, "array.null", aedis::error::resp3_null};
auto const in12 = expect<tuple_type>{S20, tuple_type{}, "array.list", aedis::error::incompatible_size};
auto const in13 = expect<array_type2>{S21, array_type2{}, "array.nested", aedis::error::nested_aggregate_not_supported};
auto const in14 = expect<array_type2>{S20, array_type2{}, "array.null", aedis::error::incompatible_size};
auto const in15 = expect<array_type2>{S11, array_type2{}, "array.array", aedis::error::expects_resp3_aggregate};
auto const in16 = expect<vec_node_type>{S22, vec_node_type{}, "array.depth.exceeds", aedis::error::exceeeds_max_nested_depth};
auto ex = ioc.get_executor();
@@ -432,9 +449,6 @@ BOOST_AUTO_TEST_CASE(test_set)
using vec_type = std::vector<std::string>;
using op_vec_type = std::optional<std::vector<std::string>>;
std::string const wire2 = "*3\r\n$2\r\n11\r\n$2\r\n22\r\n$1\r\n3\r\n";
std::string const wire = "~6\r\n+orange\r\n+apple\r\n+one\r\n+two\r\n+three\r\n+orange\r\n";
std::vector<node_type> const expected1a
{ {resp3::type::set, 6UL, 0UL, {}}
, {resp3::type::simple_string, 1UL, 1UL, {"orange"}}
@@ -452,16 +466,16 @@ BOOST_AUTO_TEST_CASE(test_set)
op_vec_type expected_1e;
expected_1e = e1d;
auto const in00 = expect<std::vector<node_type>>{wire, expected1a, "set.node"};
auto const in01 = expect<std::vector<node_type>>{"~0\r\n", std::vector<node_type>{ {resp3::type::set, 0UL, 0UL, {}} }, "set.node (empty)"};
auto const in02 = expect<set_type>{wire, set_type{"apple", "one", "orange", "three", "two"}, "set.set"};
auto const in03 = expect<mset_type>{wire, e1f, "set.multiset"};
auto const in04 = expect<vec_type>{wire, e1d, "set.vector "};
auto const in05 = expect<op_vec_type>{wire, expected_1e, "set.vector "};
auto const in06 = expect<uset_type>{wire, e1c, "set.unordered_set"};
auto const in07 = expect<muset_type>{wire, e1g, "set.unordered_multiset"};
auto const in08 = expect<std::tuple<uset_type>>{"*1\r\n" + wire, std::tuple<uset_type>{e1c}, "set.tuple"};
auto const in09 = expect<set_type>{wire2, set_type{}, "set.error", aedis::error::expects_resp3_set};
auto const in00 = expect<std::vector<node_type>>{S25, expected1a, "set.node"};
auto const in01 = expect<std::vector<node_type>>{S27, std::vector<node_type>{ {resp3::type::set, 0UL, 0UL, {}} }, "set.node (empty)"};
auto const in02 = expect<set_type>{S25, set_type{"apple", "one", "orange", "three", "two"}, "set.set"};
auto const in03 = expect<mset_type>{S25, e1f, "set.multiset"};
auto const in04 = expect<vec_type>{S25, e1d, "set.vector "};
auto const in05 = expect<op_vec_type>{S25, expected_1e, "set.vector "};
auto const in06 = expect<uset_type>{S25, e1c, "set.unordered_set"};
auto const in07 = expect<muset_type>{S25, e1g, "set.unordered_multiset"};
auto const in08 = expect<std::tuple<uset_type>>{S26, std::tuple<uset_type>{e1c}, "set.tuple"};
auto const in09 = expect<set_type>{S24, set_type{}, "set.error", aedis::error::expects_resp3_set};
auto ex = ioc.get_executor();
@@ -492,9 +506,9 @@ BOOST_AUTO_TEST_CASE(test_set)
BOOST_AUTO_TEST_CASE(test_simple_error)
{
net::io_context ioc;
auto const in01 = expect<node_type>{"-Error\r\n", node_type{resp3::type::simple_error, 1UL, 0UL, {"Error"}}, "simple_error.node", aedis::error::resp3_simple_error};
auto const in02 = expect<node_type>{"-\r\n", node_type{resp3::type::simple_error, 1UL, 0UL, {""}}, "simple_error.node.empty", aedis::error::resp3_simple_error};
auto const in03 = expect<aedis::ignore>{"-Error\r\n", aedis::ignore{}, "simple_error.not.ignore.error", aedis::error::resp3_simple_error};
auto const in01 = expect<node_type>{S28, node_type{resp3::type::simple_error, 1UL, 0UL, {"Error"}}, "simple_error.node", aedis::error::resp3_simple_error};
auto const in02 = expect<node_type>{S29, node_type{resp3::type::simple_error, 1UL, 0UL, {""}}, "simple_error.node.empty", aedis::error::resp3_simple_error};
auto const in03 = expect<aedis::ignore>{S28, aedis::ignore{}, "simple_error.not.ignore.error", aedis::error::resp3_simple_error};
auto ex = ioc.get_executor();
@@ -696,15 +710,15 @@ BOOST_AUTO_TEST_CASE(test_null)
using op_type_08 = std::optional<std::set<std::string>>;
using op_type_09 = std::optional<std::unordered_set<std::string>>;
auto const in01 = expect<op_type_01>{"_\r\n", op_type_01{}, "null.optional.bool"};
auto const in02 = expect<op_type_02>{"_\r\n", op_type_02{}, "null.optional.int"};
auto const in03 = expect<op_type_03>{"_\r\n", op_type_03{}, "null.optional.string"};
auto const in04 = expect<op_type_04>{"_\r\n", op_type_04{}, "null.optional.vector"};
auto const in05 = expect<op_type_05>{"_\r\n", op_type_05{}, "null.optional.list"};
auto const in06 = expect<op_type_06>{"_\r\n", op_type_06{}, "null.optional.map"};
auto const in07 = expect<op_type_07>{"_\r\n", op_type_07{}, "null.optional.unordered_map"};
auto const in08 = expect<op_type_08>{"_\r\n", op_type_08{}, "null.optional.set"};
auto const in09 = expect<op_type_09>{"_\r\n", op_type_09{}, "null.optional.unordered_set"};
auto const in01 = expect<op_type_01>{S12, op_type_01{}, "null.optional.bool"};
auto const in02 = expect<op_type_02>{S12, op_type_02{}, "null.optional.int"};
auto const in03 = expect<op_type_03>{S12, op_type_03{}, "null.optional.string"};
auto const in04 = expect<op_type_04>{S12, op_type_04{}, "null.optional.vector"};
auto const in05 = expect<op_type_05>{S12, op_type_05{}, "null.optional.list"};
auto const in06 = expect<op_type_06>{S12, op_type_06{}, "null.optional.map"};
auto const in07 = expect<op_type_07>{S12, op_type_07{}, "null.optional.unordered_map"};
auto const in08 = expect<op_type_08>{S12, op_type_08{}, "null.optional.set"};
auto const in09 = expect<op_type_09>{S12, op_type_09{}, "null.optional.unordered_set"};
auto ex = ioc.get_executor();
@@ -803,11 +817,6 @@ void check_error(char const* name, aedis::error ev)
BOOST_AUTO_TEST_CASE(error)
{
check_error("aedis", aedis::error::resolve_timeout);
check_error("aedis", aedis::error::resolve_timeout);
check_error("aedis", aedis::error::connect_timeout);
check_error("aedis", aedis::error::idle_timeout);
check_error("aedis", aedis::error::exec_timeout);
check_error("aedis", aedis::error::invalid_data_type);
check_error("aedis", aedis::error::not_a_number);
check_error("aedis", aedis::error::exceeeds_max_nested_depth);
@@ -823,9 +832,7 @@ BOOST_AUTO_TEST_CASE(error)
check_error("aedis", aedis::error::incompatible_size);
check_error("aedis", aedis::error::not_a_double);
check_error("aedis", aedis::error::resp3_null);
check_error("aedis", aedis::error::unexpected_server_role);
check_error("aedis", aedis::error::not_connected);
check_error("aedis", aedis::error::resp3_handshake_error);
}
std::string get_type_as_str(aedis::resp3::type t)
@@ -885,10 +892,22 @@ BOOST_AUTO_TEST_CASE(type_convert)
BOOST_AUTO_TEST_CASE(adapter)
{
using aedis::adapt;
using resp3::type;
boost::system::error_code ec;
std::string a;
int b;
auto resp = std::tie(a, b, std::ignore);
std::string s;
auto resp = std::tie(s, std::ignore);
auto f = adapt(resp);
(void)f;
f(0, resp3::node<std::string_view>{type::simple_string, 1, 0, "Hello"}, ec);
f(1, resp3::node<std::string_view>{type::number, 1, 0, "42"}, ec);
BOOST_CHECK_EQUAL(a, "Hello");
BOOST_TEST(!ec);
BOOST_CHECK_EQUAL(b, 42);
BOOST_TEST(!ec);
}

View File

@@ -10,7 +10,7 @@
#define BOOST_TEST_MODULE low level
#include <boost/test/included/unit_test.hpp>
#include <aedis.hpp>
#include <aedis/resp3/request.hpp>
#include <aedis/src.hpp>
using aedis::resp3::request;