2
0
mirror of https://github.com/boostorg/redis.git synced 2026-01-24 06:22:07 +00:00

Compare commits

...

140 Commits

Author SHA1 Message Date
Marcelo Zimbres
11807c82b7 Improves documentations of the connection class. 2022-08-21 12:50:29 +02:00
Marcelo Zimbres
24a215d78b First steps with cmake support. 2022-08-20 22:12:33 +02:00
Marcelo Zimbres
b7abe20703 CI fix. 2022-08-20 12:18:59 +02:00
Marcelo Zimbres
225095944c Commit of the following:
- Adds sync class the offer a thread-safe and synchronous API.
- Fixes documentation of adapt functions.
- Removes compose.hpp header.
- Adds test to aedis::error and resp3::type.
- Simplifies some code.
2022-08-20 11:56:31 +02:00
Marcelo Zimbres
a31d797e43 Moves sync functions from experimental to connection and improves code coverage. 2022-08-18 22:17:33 +02:00
Marcelo Zimbres
cca8d5d6dc Improvements in the examples, docs, sync functions and coverage. 2022-08-17 22:30:59 +02:00
Marcelo Zimbres
6c5bee6920 Fixes bug in the context of reconnecting and events. 2022-08-16 22:18:27 +02:00
Marcelo Zimbres
c4714d0037 Splits async_receive_event in two functions. 2022-08-15 22:45:55 +02:00
Marcelo Zimbres
38bf2395af Fix coverage and ports tests to boost.test. 2022-08-14 21:46:56 +02:00
Marcelo Zimbres
7511d6b4d8 Progress porting to boost.test. 2022-08-13 22:52:42 +02:00
Marcelo Zimbres
ddc2815fe5 Progress with coverage report. 2022-08-13 17:00:18 +02:00
Marcelo Zimbres
de6f5de655 Adds coverage file. 2022-08-12 22:43:37 +02:00
Marcelo Zimbres
8d454ada0e Simplifies the cancellation of some connection async_ functions. 2022-08-12 21:53:04 +02:00
Marcelo Zimbres
ebac88f2ca Improvementes in the CI script. 2022-08-07 18:44:28 +02:00
Marcelo Zimbres
d26ecb65ca Improvements in the docs. 2022-08-07 11:32:50 +02:00
Marcelo Zimbres
c57f97b8c1 Improvements in the examples. 2022-08-06 23:12:32 +02:00
Marcelo Zimbres
37ab1e7387 Support for reconnection. 2022-08-06 18:11:12 +02:00
Marcelo Zimbres
54d448cad4 Progresses with connection events. 2022-08-06 13:06:05 +02:00
Marcelo Zimbres
97428dedb3 Progresses with reconnection. 2022-08-04 23:58:04 +02:00
Marcelo Zimbres
83802f217a Prepares for events. 2022-08-03 21:40:43 +02:00
Marcelo Zimbres
08140f9186 Fixes async_exec function. 2022-08-02 21:52:34 +02:00
Marcelo Zimbres
3ddb017edb Adds automatic AUTH and HELLO. 2022-08-01 22:46:34 +02:00
Marcelo Zimbres
20328cd423 Don't cancel the push channel when async_run exits. 2022-08-01 21:50:38 +02:00
Marcelo Zimbres
6577ddbaab First steps. 2022-07-31 22:10:49 +02:00
Marcelo Zimbres
217d2bd87b Progresses with the synchronous exec functions. 2022-07-31 11:50:23 +02:00
Marcelo Zimbres
f96dd22153 Improves executor usage in sync wrapper. 2022-07-30 23:35:28 +02:00
Marcelo Zimbres
f1fd0cfa8c Removes warnings on g++. 2022-07-30 23:18:24 +02:00
Marcelo Zimbres
8728914109 Adds changelog, fixes CI file, improvements in the docs. 2022-07-30 09:31:11 +02:00
Marcelo Zimbres
e0041ac7ae Progresses with async_failover function. 2022-07-28 22:00:42 +02:00
Marcelo Zimbres
317a185eb0 Adds in the sync_wrapper class. 2022-07-27 22:18:00 +02:00
Marcelo Zimbres
aa81200a8f Adds assert to check the response tuple is compatible with the request size. 2022-07-26 22:10:31 +02:00
Marcelo Zimbres
55fc0e861c Fixes bug on reconnection. 2022-07-26 21:47:14 +02:00
Marcelo Zimbres
04271855b0 Adds example on how to use Aedis synchronously. 2022-07-25 22:53:03 +02:00
Marcelo Zimbres
700e0c823e First steps with the CI file. 2022-07-24 21:21:02 +02:00
Marcelo Zimbres
63c6465a4a Improvements in the docs and subscriber example with reconnection. 2022-07-24 16:33:13 +02:00
Marcelo Zimbres
c86422cf50 Moves files to include directory. 2022-07-24 00:03:19 +02:00
Marcelo Zimbres
0168ed5faf Fixes build for clang++-14,13,11. 2022-07-23 14:55:01 +02:00
Marcelo Zimbres
7bffa252f4 Improvements in the documentation. 2022-07-21 22:05:26 +02:00
Marcelo Zimbres
0bb65599c4 Simplifies the char_room example. 2022-07-21 21:35:52 +02:00
Marcelo Zimbres
edd538944f Uses the correct executor in the exec timer. 2022-07-19 22:01:09 +02:00
Marcelo Zimbres
42880e788b Simplifies aedis header. 2022-07-17 18:42:24 +02:00
Marcelo Zimbres
bcc3917174 Test improvements. 2022-07-17 10:47:12 +02:00
Marcelo Zimbres
b08dd63192 Updates benchmark doc. 2022-07-16 21:25:57 +02:00
Marcelo Zimbres
76b6106caa Fixes executor usage in connection class. 2022-07-16 21:21:13 +02:00
Marcelo Zimbres
ab68e8a31d Updates to a more recent Tokio version and uses single thread. 2022-07-16 20:00:36 +02:00
Marcelo Zimbres
2673557ce5 More corrections. 2022-07-16 14:30:16 +02:00
Marcelo Zimbres
2a302dcb65 Corrections to the benchmark document. 2022-07-16 14:25:38 +02:00
Marcelo Zimbres
ffc4230368 Fixes documentation. 2022-07-16 13:50:21 +02:00
Marcelo Zimbres
59b5d35672 Small corrections. 2022-07-16 12:33:02 +02:00
Marcelo Zimbres
835a1decf4 Progresses with benchmarks. 2022-07-16 11:03:48 +02:00
Marcelo Zimbres
3fb018ccc6 Some changes in the benchmarks. 2022-07-15 23:15:08 +02:00
Marcelo Zimbres
1fe4a87287 Adds go-redis 2022-07-14 22:13:58 +02:00
Marcelo Zimbres
70cdff41e0 Fixes some bugs. 2022-07-14 21:44:50 +02:00
Marcelo Zimbres
2edd9f3d87 Some improvements in the benchmarks. 2022-07-11 00:00:09 +02:00
Marcelo Zimbres
fa4181b197 New version. 2022-07-10 20:53:44 +02:00
Marcelo Zimbres
9e2cd8855e Small documentation improvements. 2022-07-10 19:19:42 +02:00
Marcelo Zimbres
bef70870cd Test improvements. 2022-07-10 10:05:21 +02:00
Marcelo Zimbres
9885439845 Adds Petr Dannhofer to the acknowledgements section. 2022-07-09 22:55:58 +02:00
Marcelo Zimbres
b5a9162efb Uses the associated allocator to allocate memory. 2022-07-09 22:51:34 +02:00
Marcelo Zimbres
6ca0bcc945 Removes automatic sending of hello command. 2022-07-09 22:11:22 +02:00
Marcelo Zimbres
efd0a0379a Reenables some tests. 2022-07-09 17:02:34 +02:00
Marcelo Zimbres
97153abc3c Add own-ping cancelation when async_run exits. 2022-07-09 16:31:27 +02:00
Marcelo Zimbres
f4710941d3 Fixes error handling. 2022-07-09 12:50:44 +02:00
Marcelo Zimbres
f8ff3034f4 Adds AUTH to the example. 2022-07-09 10:22:12 +02:00
Marcelo Zimbres
561eb5dccb Progresses with the support for failover. 2022-07-09 09:32:33 +02:00
Marcelo Zimbres
95d609b75c Improvements in the docs. 2022-07-03 09:21:35 +02:00
Marcelo Zimbres
d5f9e702d7 Fixes missing return statement. 2022-07-03 08:16:06 +02:00
Marcelo Zimbres
5add83b73c Fix compilation on clang++-14. 2022-07-02 23:40:53 +02:00
Marcelo Zimbres
200974d9be Adds an tcp echo server from libuv. 2022-07-02 23:01:12 +02:00
Marcelo Zimbres
649c84d7d0 Code simplifications. 2022-07-02 18:11:42 +02:00
Marcelo Zimbres
240cce4b09 Fixes the tests. 2022-07-02 13:49:03 +02:00
Marcelo Zimbres
b140216f0d Fixes the connection ops for the subscriber. 2022-07-02 11:30:39 +02:00
Marcelo Zimbres
4f0d9de393 Removes command enum. 2022-06-27 22:48:47 +02:00
Marcelo Zimbres
888bb476d7 Loads missing files. 2022-06-27 21:28:33 +02:00
Marcelo Zimbres
eae37ace0b Fixes some problems with clang. 2022-06-26 23:03:36 +02:00
Marcelo Zimbres
0c3ed1afee Improves error handling. 2022-06-26 22:48:14 +02:00
Marcelo Zimbres
0f5e8e3d1f Renames from_bulk to to_bulk. 2022-06-26 21:08:05 +02:00
Marcelo Zimbres
963b228e02 Adds serialization example. 2022-06-26 18:24:36 +02:00
Marcelo Zimbres
bddf47d626 Simplifies the code. 2022-06-25 13:34:31 +02:00
Marcelo Zimbres
b3b8dfc243 Fixes the rust echo server. 2022-06-23 22:27:14 +02:00
Marcelo Zimbres
e013d846b2 Adds files to Makefile.am. 2022-06-20 21:51:06 +02:00
Marcelo Zimbres
d2ba54a7a6 Add go echo server. 2022-06-19 21:15:01 +02:00
Marcelo Zimbres
250e24d5fb Adds echo server test. 2022-06-19 20:50:00 +02:00
Marcelo Zimbres
9dcccca11e Move files around and adds rust program. 2022-06-19 14:47:56 +02:00
Marcelo Zimbres
8af1c9f19c Adds nodejs echo_server benchmark program. 2022-06-19 14:12:10 +02:00
Marcelo Zimbres
b058cc0c02 Adds echo_server_direct tool for benchmark purposes. 2022-06-19 09:17:28 +02:00
Marcelo Zimbres
df3f2b8ca5 More improvements in the docs. 2022-06-18 22:07:59 +02:00
Marcelo Zimbres
8e4928347c Improvements in the documentation. 2022-06-18 13:03:21 +02:00
Marcelo Zimbres
33461d54c8 Adds reconnect test. 2022-06-18 09:13:44 +02:00
Marcelo Zimbres
5328cdff9a Adds coalesce option. 2022-06-17 22:51:51 +02:00
Marcelo Zimbres
452589d4e7 Test improvements. 2022-06-16 15:00:35 +02:00
Marcelo Zimbres
4036df9255 Moves write operation in exec_op to its own op. 2022-06-16 11:42:16 +02:00
Marcelo Zimbres
9f2df4d052 Fix in the operations. 2022-06-14 22:43:35 +02:00
Marcelo Zimbres
1571afbd88 Simplifies read operation. 2022-06-12 21:39:18 +02:00
Marcelo Zimbres
b43e6dfb68 Simplifies the code. 2022-06-12 15:12:34 +02:00
Marcelo Zimbres
ce9cb04168 Refactoring. 2022-06-12 14:44:20 +02:00
Marcelo Zimbres
77fe3a0f5f Fixes and improves some tests. 2022-06-11 10:59:59 +02:00
Marcelo Zimbres
40dfacb0b7 Adds exec overload. 2022-06-06 22:00:43 +02:00
Marcelo Zimbres
6d859c57f8 Unifies all error codes into one. 2022-06-06 16:46:38 +02:00
Marcelo Zimbres
9e43541a5e Remove Command template parameter from request. 2022-06-06 15:22:31 +02:00
Marcelo Zimbres
97cb5b5b25 Improvements in the project structure. 2022-06-06 10:51:47 +02:00
Marcelo Zimbres
a40c9fe35f Factors async_write_with_timeout out of connection class. 2022-06-06 08:50:44 +02:00
Marcelo Zimbres
a411cc50fc Simplifies and enhances code modularity. 2022-06-05 23:01:19 +02:00
Marcelo Zimbres
5893f0913e Adds a pool of timers. 2022-06-05 10:29:04 +02:00
Marcelo Zimbres
dea7712a29 Improvements in the code. 2022-06-05 09:31:19 +02:00
Marcelo Zimbres
56479b88eb Using generic::adapter for async_read_push. 2022-06-04 22:50:48 +02:00
Marcelo Zimbres
dfeb3bbfcf Improvements in the examples. 2022-06-04 19:03:22 +02:00
Marcelo Zimbres
7464851e9e Improvements in the examples. 2022-06-04 14:41:18 +02:00
Marcelo Zimbres
226c2b228c Factors out code related with async_connect. 2022-06-04 12:39:44 +02:00
Marcelo Zimbres
fee892b6ad Updates the examples. 2022-05-29 14:06:16 +02:00
Marcelo Zimbres
74e0a6ca23 Adds support for tuple in the high level api. 2022-05-29 10:54:23 +02:00
Marcelo Zimbres
ebef2f9e23 Pass the adapter directly to async_exec. 2022-05-28 21:19:00 +02:00
Marcelo Zimbres
485bdc316b Refactors operations that consume pushes. 2022-05-28 10:54:02 +02:00
Marcelo Zimbres
36fb83e1d6 Simplifications in the read timeouts. 2022-05-28 10:08:31 +02:00
Marcelo Zimbres
3753c27dcf Changes how the reader_op works. 2022-05-27 22:32:49 +02:00
Marcelo Zimbres
091cad6ee7 Improves pipelining. 2022-05-26 21:34:29 +02:00
Marcelo Zimbres
1e98c04603 Simplifications in the examples. 2022-05-25 23:02:48 +02:00
Marcelo Zimbres
4858c078f9 Improvements with timeouts and simplifications. 2022-05-25 21:43:37 +02:00
Marcelo Zimbres
3dff0b78de Implements automatic hello. 2022-05-24 22:36:16 +02:00
Marcelo Zimbres
7300f1498b Fixes echo_server example. 2022-05-23 22:44:25 +02:00
Marcelo Zimbres
f6fc45d8ba Small improvements. 2022-05-22 22:21:58 +02:00
Marcelo Zimbres
5eb88b5042 Improvements in the examples. 2022-05-22 20:04:35 +02:00
Marcelo Zimbres
f7d2f3ab28 General improvements. 2022-05-22 17:50:22 +02:00
Marcelo Zimbres
f62ad6a8bf Ports high-level tests to new api. 2022-05-22 15:14:20 +02:00
Marcelo Zimbres
1efcf7b7d8 Fixes chat_room. 2022-05-22 08:27:22 +02:00
Marcelo Zimbres
29166a2cf0 Progresses with porting to channels. 2022-05-21 22:14:46 +02:00
Marcelo Zimbres
215fd7ea73 Renames serializer to request. 2022-05-18 22:22:17 +02:00
Marcelo Zimbres
9b8ca4dbc8 Simplifications. 2022-05-16 23:28:49 +02:00
Marcelo Zimbres
4075dc380d Commit of the following:
- Simplifications.
- Refactors read operaitons for useability.
- Better naming.
- Simplification of write operations.
- Adds a push communication channel.
2022-05-15 20:48:23 +02:00
Marcelo Zimbres
161cd848f8 Removes function. 2022-05-15 09:01:08 +02:00
Marcelo Zimbres
7c7eed4a53 Refactors the serializer class. 2022-05-14 23:16:16 +02:00
Marcelo Zimbres
e70b00e976 Renames async_receive to async_read_one. 2022-05-14 16:53:56 +02:00
Marcelo Zimbres
52d7b95cf8 Fixes one test. 2022-05-14 16:22:31 +02:00
Marcelo Zimbres
641032fa9a Fixes one more test. 2022-05-09 22:45:14 +02:00
Marcelo Zimbres
2a2a13c4dc Adopts Asio channels to deliver read events instead of callbacks. 2022-05-08 23:03:06 +02:00
Marcelo Zimbres
76741d8466 Simplifies the read operations. 2022-05-08 10:54:59 +02:00
Marcelo Zimbres
0f79214d37 Removes the on_push callback. 2022-05-08 08:58:57 +02:00
Marcelo Zimbres
de476169ae Removes the on_write callback from the receiver. 2022-05-08 08:32:19 +02:00
Marcelo Zimbres
d1bf3a91be Changes:
* Program to benchmark the high level client.
* First steps with sentinel support in the high level client.
2022-05-04 22:54:21 +02:00
Marcelo Zimbres
4be6e6cc1e Passing host and port in the config parameter. 2022-05-02 23:13:15 +02:00
91 changed files with 6099 additions and 5416 deletions

13
.codecov.yml Normal file
View File

@@ -0,0 +1,13 @@
codecov:
max_report_age: off
require_ci_to_pass: yes
notify:
after_n_builds: 1
wait_for_ci: yes
ignore:
- "benchmarks/cpp/asio/*"
- "examples/*"
- "tests/*"
- "/usr/*"
- "**/boost/*"

48
.github/workflows/ci.yml vendored Normal file
View File

@@ -0,0 +1,48 @@
name: CI
on: [push, pull_request]
jobs:
posix:
defaults:
run:
shell: bash
strategy:
fail-fast: false
matrix:
include:
- { toolset: gcc, compiler: g++-10, install: g++-10, os: ubuntu-22.04, cxxstd: 'c++17' }
- { toolset: gcc, compiler: g++-11, install: g++-11, os: ubuntu-22.04, cxxstd: 'c++17' }
- { toolset: gcc, compiler: g++-11, install: g++-11, os: ubuntu-22.04, cxxstd: 'c++20' }
- { toolset: clang, compiler: clang++-11, install: clang-11, os: ubuntu-22.04, cxxstd: 'c++17' }
- { toolset: clang, compiler: clang++-11, install: clang-11, os: ubuntu-22.04, cxxstd: 'c++20' }
- { toolset: clang, compiler: clang++-13, install: clang-13, os: ubuntu-22.04, cxxstd: 'c++17' }
- { toolset: clang, compiler: clang++-13, install: clang-13, os: ubuntu-22.04, cxxstd: 'c++20' }
runs-on: ${{ matrix.os }}
env:
CXX: ${{ matrix.compiler }}
CXXFLAGS: -std=${{matrix.cxxstd}} -Wall -Wextra
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Install Autotools
run: sudo apt install automake
- name: Install compiler
run: sudo apt-get install -y ${{ matrix.install }}
- name: Install Redis
run: sudo apt-get install -y redis-server
- name: Install boost
uses: MarkusJx/install-boost@v2.3.0
id: install-boost
with:
boost_version: 1.79.0
platform_version: 22.04
- name: Configure
run: |
autoreconf -i
./configure --with-boost=${{ steps.install-boost.outputs.BOOST_ROOT }}
- name: Build
run: make
- name: Check
run: make check VERBOSE=1

50
.github/workflows/coverage.yml vendored Normal file
View File

@@ -0,0 +1,50 @@
name: Coverage
on:
push:
branches:
- master
jobs:
posix:
defaults:
run:
shell: bash
runs-on: ubuntu-22.04
env:
CXX: g++-11
CXXFLAGS: -std=c++20 -Wall -Wextra --coverage -fkeep-inline-functions -fkeep-static-functions -O0
LDFLAGS: --coverage
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Install Autotools
run: sudo apt install automake
- name: Install compiler
run: sudo apt-get install -y g++-11
- name: Install Redis
run: sudo apt-get install -y redis-server
- name: Install boost
uses: MarkusJx/install-boost@v2.3.0
id: install-boost
with:
boost_version: 1.79.0
platform_version: 22.04
- name: Configure
run: |
autoreconf -i
./configure --with-boost=${{ steps.install-boost.outputs.BOOST_ROOT }}
- name: Build
run: make
- name: Check
run: make check VERBOSE=1
# - name: Generate coverage report
# run: |
# lcov --base-directory . --directory tests/ --output-file aedis.info --capture
# lcov --remove aedis.info '/usr/*' "${{ steps.install-boost.outputs.BOOST_ROOT }}/include/boost/*" --output-file aedis.info
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v3
with:
gcov: true
working_directory: ${{ env.GITHUB_WORKSPACE }}

102
CHANGELOG.md Normal file
View File

@@ -0,0 +1,102 @@
# Changelog
## v1.0.0
* Adds experimental cmake support for windows users.
* Adds new class `aedis::sync` that wraps an `aedis::connection` in
a thread-safe and synchronous API. All free functions from the
`sync.hpp` are now member functions of `aedis::sync`.
* Split `aedis::connection::async_receive_event` in two functions, one
to receive events and another for server side pushes, see
`aedis::connection::async_receive_push`.
* Removes collision between `aedis::adapter::adapt` and
`aedis::adapt`.
* Adds `connection::operation` enum to replace `cancel_*` member
functions with a single cancel function that gets the operations
that should be cancelled as argument.
* Bugfix: a bug on reconnect from a state where the `connection` object
had unsent commands. It could cause `async_exec` to never
complete under certain conditions.
* Bugfix: Documentation of `adapt()` functions were missing from
doxygen.
## v0.3.0
* Adds `experimental::exec` and `receive_event` functions to offer a
thread safe and synchronous way of executing requests across
threads. See `intro_sync.cpp` and `subscriber_sync.cpp` for
examples.
* `connection::async_read_push` was renamed to `async_receive_event`.
* `connection::async_receive_event` is now being used to communicate
internal events to the user, such as resolve, connect, push etc. For
examples see subscriber.cpp and `connection::event`.
* The `aedis` directory has been moved to `include` to look more
similar to Boost libraries. Users should now replace `-I/aedis-path`
with `-I/aedis-path/include` in the compiler flags.
* The `AUTH` and `HELLO` commands are now sent automatically. This change was
necessary to implement reconnection. The username and password
used in `AUTH` should be provided by the user on
`connection::config`.
* Adds support for reconnection. See `connection::enable_reconnect`.
* Fixes a bug in the `connection::async_run(host, port)` overload
that was causing crashes on reconnection.
* Fixes the executor usage in the connection class. Before theses
changes it was imposing `any_io_executor` on users.
* `connection::async_receiver_event` is not cancelled anymore when
`connection::async_run` exits. This change makes user code simpler.
* `connection::async_exec` with host and port overload has been
removed. Use the other `connection::async_run` overload.
* The host and port parameters from `connection::async_run` have been
move to `connection::config` to better support authentication and
failover.
* Many simplifications in the `chat_room` example.
* Fixes build in clang the compilers and makes some improvements in
the documentation.
## v0.2.1
* Fixes a bug that happens on very high load.
## v0.2.0
* Major rewrite of the high-level API. There is no more need to use the low-level API anymore.
* No more callbacks: Sending requests follows the ASIO asynchrnous model.
* Support for reconnection: Pending requests are not canceled when a connection is lost and are re-sent when a new one is established.
* The library is not sending HELLO-3 on user behalf anymore. This is important to support AUTH properly.
## v0.1.2
* Adds reconnect coroutine in the `echo_server` example.
* Corrects `client::async_wait_for_data` with `make_parallel_group` to launch operation.
* Improvements in the documentation.
* Avoids dynamic memory allocation in the client class after reconnection.
## v0.1.1
* Improves the documentation and adds some features to the high-level client.
## v0.1.0
* Improvements in the design and documentation.
## v0.0.1
* First release to collect design feedback.

50
CMakeLists.txt Normal file
View File

@@ -0,0 +1,50 @@
# At the moment the official build system is still autotools and this
# file is meant to support Aedis on windows.
cmake_minimum_required(VERSION 3.14)
project(
Aedis
VERSION 1.0.0
DESCRIPTION "An async redis client designed for performance and scalability"
HOMEPAGE_URL "https://mzimbres.github.io/aedis"
LANGUAGES CXX
)
add_library(aedis INTERFACE)
target_include_directories(aedis INTERFACE include)
find_package(Boost 1.79 REQUIRED)
include_directories(${Boost_INCLUDE_DIRS})
enable_testing()
include_directories(include)
add_executable(chat_room examples/chat_room.cpp)
add_executable(containers examples/containers.cpp)
add_executable(echo_server examples/echo_server.cpp)
add_executable(intro examples/intro.cpp)
add_executable(intro_sync examples/intro_sync.cpp)
add_executable(serialization examples/serialization.cpp)
add_executable(subscriber examples/subscriber.cpp)
add_executable(subscriber_sync examples/subscriber_sync.cpp)
add_executable(test_low_level tests/low_level.cpp)
add_executable(test_connection tests/connection.cpp)
add_executable(low_level_sync tests/low_level_sync.cpp)
add_test(containers containers)
add_test(intro intro)
add_test(intro_sync intro_sync)
add_test(serialization serialization)
add_test(test_low_level test_low_level)
add_test(test_connection test_connection)
add_test(low_level_sync low_level_sync)
include(GNUInstallDirs)
install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/include/boost
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}
FILES_MATCHING
PATTERN "*.hpp"
PATTERN "*.ipp"
)

View File

@@ -1 +0,0 @@
See https://mzimbres.github.io/aedis/#using-aedis

View File

@@ -6,30 +6,30 @@ DISTCHECK_CONFIGURE_FLAGS = CPPFLAGS="$(BOOST_CPPFLAGS) $(CPPFLAGS)" LDFLAGS="$(
AM_CPPFLAGS =
AM_CPPFLAGS += $(BOOST_CPPFLAGS)
#AM_CPPFLAGS += -I$(top_srcdir)/include
AM_CPPFLAGS += -I$(top_srcdir)/include
AM_LDFLAGS =
AM_LDFLAGS += -pthread
SUBDIRS = include
check_PROGRAMS =
check_PROGRAMS += low_level_sync
check_PROGRAMS += intro
check_PROGRAMS += intro_sync
check_PROGRAMS += serialization_sync
check_PROGRAMS += intro_high_level
check_PROGRAMS += aggregates_high_level
check_PROGRAMS += containers
check_PROGRAMS += serialization
check_PROGRAMS += test_low_level
check_PROGRAMS += test_high_level
if HAVE_CXX20
check_PROGRAMS += transaction
check_PROGRAMS += custom_adapter
endif
check_PROGRAMS += test_connection
EXTRA_PROGRAMS =
EXTRA_PROGRAMS += subscriber_high_level
EXTRA_PROGRAMS += commands
if HAVE_CXX20
if HAVE_COROUTINES
EXTRA_PROGRAMS += subscriber
EXTRA_PROGRAMS += subscriber_sync
EXTRA_PROGRAMS += echo_server
EXTRA_PROGRAMS += echo_server_direct
EXTRA_PROGRAMS += chat_room
EXTRA_PROGRAMS += echo_server_client
endif
CLEANFILES =
@@ -38,55 +38,23 @@ CLEANFILES += $(EXTRA_PROGRAMS)
.PHONY: all
all: $(check_PROGRAMS) $(EXTRA_PROGRAMS)
intro_high_level_SOURCES = $(top_srcdir)/examples/intro_high_level.cpp
aggregates_high_level_SOURCES = $(top_srcdir)/examples/aggregates_high_level.cpp
intro_sync_SOURCES = $(top_srcdir)/examples/intro_sync.cpp
serialization_sync_SOURCES = $(top_srcdir)/examples/serialization_sync.cpp
commands_SOURCES = $(top_srcdir)/tools/commands.cpp
subscriber_high_level_SOURCES = $(top_srcdir)/examples/subscriber_high_level.cpp
low_level_sync_SOURCES = $(top_srcdir)/tests/low_level_sync.cpp
test_low_level_SOURCES = $(top_srcdir)/tests/low_level.cpp
test_high_level_SOURCES = $(top_srcdir)/tests/high_level.cpp
if HAVE_CXX20
transaction_SOURCES = $(top_srcdir)/examples/transaction.cpp
intro_SOURCES = $(top_srcdir)/examples/intro.cpp
intro_sync_SOURCES = $(top_srcdir)/examples/intro_sync.cpp
containers_SOURCES = $(top_srcdir)/examples/containers.cpp
serialization_SOURCES = $(top_srcdir)/examples/serialization.cpp
test_connection_SOURCES = $(top_srcdir)/tests/connection.cpp
subscriber_sync_SOURCES = $(top_srcdir)/examples/subscriber_sync.cpp
if HAVE_COROUTINES
subscriber_SOURCES = $(top_srcdir)/examples/subscriber.cpp
custom_adapter_SOURCES = $(top_srcdir)/examples/custom_adapter.cpp
echo_server_SOURCES = $(top_srcdir)/examples/echo_server.cpp
chat_room_SOURCES = $(top_srcdir)/examples/chat_room.cpp
echo_server_SOURCES = $(top_srcdir)/examples/echo_server.cpp
echo_server_direct_SOURCES = $(top_srcdir)/benchmarks/cpp/asio/echo_server_direct.cpp
echo_server_client_SOURCES = $(top_srcdir)/benchmarks/cpp/asio/echo_server_client.cpp
endif
nobase_include_HEADERS =\
$(top_srcdir)/aedis/src.hpp\
$(top_srcdir)/aedis/redis/command.hpp\
$(top_srcdir)/aedis/generic/client.hpp\
$(top_srcdir)/aedis/generic/serializer.hpp\
$(top_srcdir)/aedis/generic/error.hpp\
$(top_srcdir)/aedis/generic/impl/error.ipp\
$(top_srcdir)/aedis/generic/detail/client_ops.hpp\
$(top_srcdir)/aedis/sentinel/command.hpp\
$(top_srcdir)/aedis/aedis.hpp\
$(top_srcdir)/aedis/adapter/detail/adapters.hpp\
$(top_srcdir)/aedis/adapter/error.hpp\
$(top_srcdir)/aedis/adapter/impl/error.ipp\
$(top_srcdir)/aedis/adapter/adapt.hpp\
$(top_srcdir)/aedis/adapter/detail/response_traits.hpp\
$(top_srcdir)/aedis/resp3/node.hpp\
$(top_srcdir)/aedis/resp3/compose.hpp\
$(top_srcdir)/aedis/resp3/detail/read_ops.hpp\
$(top_srcdir)/aedis/resp3/detail/parser.hpp\
$(top_srcdir)/aedis/resp3/error.hpp\
$(top_srcdir)/aedis/resp3/impl/error.ipp\
$(top_srcdir)/aedis/resp3/type.hpp\
$(top_srcdir)/aedis/resp3/read.hpp\
$(top_srcdir)/aedis/redis/impl/command.ipp\
$(top_srcdir)/aedis/sentinel/impl/command.ipp\
$(top_srcdir)/aedis/resp3/detail/impl/parser.ipp\
$(top_srcdir)/aedis/resp3/impl/type.ipp
nobase_noinst_HEADERS =\
$(top_srcdir)/examples/user_session.hpp\
$(top_srcdir)/examples/print.hpp\
$(top_srcdir)/examples/mystruct.hpp\
$(top_srcdir)/tests/check.hpp
nobase_noinst_HEADERS = $(top_srcdir)/examples/print.hpp
TESTS = $(check_PROGRAMS)
@@ -96,9 +64,33 @@ EXTRA_DIST += $(top_srcdir)/doc/DoxygenLayout.xml
EXTRA_DIST += $(top_srcdir)/doc/aedis.css
EXTRA_DIST += $(top_srcdir)/doc/htmlfooter.html
EXTRA_DIST += $(top_srcdir)/doc/htmlheader.html
EXTRA_DIST += $(top_srcdir)/benchmarks/benchmarks.md
EXTRA_DIST += $(top_srcdir)/benchmarks/benchmarks.tex
EXTRA_DIST += $(top_srcdir)/benchmarks/c/libuv/echo_server_direct.c
EXTRA_DIST += $(top_srcdir)/benchmarks/c/libuv/README.md
EXTRA_DIST += $(top_srcdir)/benchmarks/go/echo_server_direct.go
EXTRA_DIST += $(top_srcdir)/benchmarks/nodejs/echo_server_direct/echo_server_direct.js
EXTRA_DIST += $(top_srcdir)/benchmarks/nodejs/echo_server_direct/package.json
EXTRA_DIST += $(top_srcdir)/benchmarks/nodejs/echo_server_over_redis/echo_server_over_redis.js
EXTRA_DIST += $(top_srcdir)/benchmarks/nodejs/echo_server_over_redis/package.json
EXTRA_DIST += $(top_srcdir)/benchmarks/rust/echo_server_direct/Cargo.toml
EXTRA_DIST += $(top_srcdir)/benchmarks/rust/echo_server_direct/src/main.rs
EXTRA_DIST += $(top_srcdir)/benchmarks/rust/echo_server_over_redis/Cargo.toml
EXTRA_DIST += $(top_srcdir)/benchmarks/rust/echo_server_over_redis/src/main.rs
EXTRA_DIST += $(top_srcdir)/CMakeLists.txt
.PHONY: doc
doc:
rm -rf ../aedis-gh-pages/*
doxygen doc/Doxyfile
.PHONY: coverage
coverage:
lcov --base-directory . --directory tests/ --output-file aedis.info --capture
lcov --remove aedis.info '/usr/*' '/opt/boost_1_79_0/include/boost/*' --output-file aedis.info
genhtml --output-directory html aedis.info
.PHONY: bench
bench:
pdflatex --jobname=echo-f0 benchmarks/benchmarks.tex
pdflatex --jobname=echo-f1 benchmarks/benchmarks.tex

View File

@@ -1 +1,20 @@
See https://mzimbres.github.io/aedis/
Branch | GH Actions | codecov.io |
:-------------: | ---------- | ---------- |
[`master`](https://github.com/mzimbres/aedis/tree/master) | [![CI](https://github.com/mzimbres/aedis/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/mzimbres/aedis/actions/workflows/ci.yml) | [![codecov](https://codecov.io/gh/mzimbres/aedis/branch/master/graph/badge.svg)](https://codecov.io/gh/mzimbres/aedis/branch/master)
## Aedis
An async redis client designed for performance and scalability
### License
Distributed under the [Boost Software License, Version 1.0](http://www.boost.org/LICENSE_1_0.txt).
### More information
* See the official github-pages for documentation: https://mzimbres.github.io/aedis
### Installation
See https://mzimbres.github.io/aedis/#using-aedis

View File

@@ -1,66 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_ADAPTER_ERROR_HPP
#define AEDIS_ADAPTER_ERROR_HPP
#include <system_error>
namespace aedis {
namespace adapter {
/** \brief Adapter errors.
* \ingroup any
*/
enum class error
{
/// Expects a simple RESP3 type but got an aggregate.
expects_simple_type = 1,
/// Expects aggregate type.
expects_aggregate_type,
/// Expects a map but got other aggregate.
expects_map_type,
/// Expects a set aggregate but got something else.
expects_set_type,
/// Nested response not supported.
nested_aggregate_unsupported,
/// Got RESP3 simple error.
simple_error,
/// Got RESP3 blob_error.
blob_error,
/// Aggregate container has incompatible size.
incompatible_size,
/// Not a double
not_a_double,
/// Got RESP3 null type.
null
};
/** \brief Creates a error_code object from an error.
* \ingroup any
*/
boost::system::error_code make_error_code(error e);
} // adapter
} // aedis
namespace std {
template<>
struct is_error_code_enum<::aedis::adapter::error> : std::true_type {};
} // std
#endif // AEDIS_ADAPTER_ERROR_HPP

View File

@@ -1,847 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_HPP
#define AEDIS_HPP
#include <aedis/resp3/read.hpp>
#include <aedis/adapter/adapt.hpp>
#include <aedis/adapter/error.hpp>
#include <aedis/redis/command.hpp>
#include <aedis/sentinel/command.hpp>
#include <aedis/generic/error.hpp>
#include <aedis/generic/client.hpp>
#include <aedis/generic/serializer.hpp>
/** \mainpage Documentation
\tableofcontents
\section Overview
Aedis is a [Redis](https://redis.io/) client library built on top
of [Asio](https://www.boost.org/doc/libs/release/doc/html/boost_asio.html)
that provides simple and efficient communication with a Redis
server. Some of its distinctive features are
@li Support for the latest version of the Redis communication protocol [RESP3](https://github.com/redis/redis-specifications/blob/master/protocol/RESP3.md).
@li First class support for STL containers and C++ built-in types.
@li Serialization and deserialization of your own data types that avoid unnecessary copies.
@li Support for Redis [sentinel](https://redis.io/docs/manual/sentinel).
@li Sync and async API.
In addition to that, Aedis provides a high-level client that offers the following functionality
@li Management of message queues.
@li Simplified handling of server pushes.
@li Zero asymptotic allocations by means of memory reuse.
@li Healthy checks.
If you are interested in a detailed comparison of Redis clients
and the design rationale behind Aedis jump to \ref why-aedis. Now
let us have a look at the low-level API.
\section low-level-api Low-level API
The low-level API is very useful for tasks that can be performed
in short lived connections, for example, assume we want to perform
the following steps
@li Set the value of a Redis key.
@li Set the expiration of that key to two seconds.
@li Get and return its old value.
@li Quit
The coroutine-based asynchronous implementation of the steps above look like
@code
net::awaitable<std::string> set(net::ip::tcp::endpoint ep)
{
// To make code less verbose
using tcp_socket = net::use_awaitable_t<>::as_default_on_t<net::ip::tcp::socket>;
tcp_socket socket{co_await net::this_coro::executor};
co_await socket.async_connect(ep);
std::string buffer, response;
auto sr = make_serializer(request);
sr.push(command::hello, 3);
sr.push(command::set, "key", "Value", "EX", "2", "get");
sr.push(command::quit);
co_await net::async_write(socket, net::buffer(buffer));
buffer.clear();
auto dbuffer = net::dynamic_buffer(read_buffer);
co_await resp3::async_read(socket, dbuffer); // Hello ignored.
co_await resp3::async_read(socket, dbuffer, adapt(response)); // Set
co_await resp3::async_read(socket, dbuffer); // Quit ignored.
co_return response;
}
@endcode
The simplicity of the code above makes it self explanatory
@li Connect to the Redis server.
@li Declare a \c std::string to hold the request and add some commands in it with a serializer.
@li Write the payload to the socket and read the responses in the same order they were sent.
@li Return the response to the user.
The @c hello command above is always required and must be sent
first as it informs we want to communicate over RESP3.
\subsection requests Requests
As stated above, requests are created by defining a storage object
and a serializer that knows how to convert user data into valid
RESP3 wire-format. They are composed of one or more
commands (in Redis documentation they are called [pipelines](https://redis.io/topics/pipelining)),
which means users can add
as many commands to the request as they like, a feature that aids
performance.
The individual commands in a request assume many
different forms
@li With and without keys.
@li Variable length arguments.
@li Ranges.
@li etc.
To account for all these variations, the \c serializer class
offers some member functions, each of them with a couple of
overloads, for example
@code
// Some data to send to Redis.
std::string value = "some value";
std::list<std::string> list {"channel1", "channel2", "channel3"};
std::map<std::string, mystruct> map
{ {"key1", "value1"}
, {"key2", "value2"}
, {"key3", "value3"}};
// Command with no arguments
sr.push(command::quit);
// Command with variable lenght arguments.
sr.push(command::set, "key", value, "EX", "2");
// Sends a container, no key.
sr.push_range(command::subscribe, list);
// Same as above but an iterator range.
sr.push_range2(command::subscribe, std::cbegin(list), std::cend(list));
// Sends a container, with key.
sr.push_range(command::hset, "key", map);
// Same as above but as iterator range.
sr.push_range2(command::hset, "key", std::cbegin(map), std::cend(map));
@endcode
Once all commands have been added to the request, we can write it
as usual by writing the payload to the socket
@code
co_await net::async_write(socket, buffer(request));
@endcode
\subsubsection requests-serialization Serialization
The \c send and \c send_range functions above work with integers
e.g. \c int and \c std::string out of the box. To send your own
data type defined the \c to_bulk function like this
@code
// Example struct.
struct mystruct {
// ...
};
void to_bulk(std::string& to, mystruct const& obj)
{
// Convert to obj string and call to_bulk (see also add_header
// and add_separator)
auto dummy = "Dummy serializaiton string.";
aedis::resp3::to_bulk(to, dummy);
}
std::map<std::string, mystruct> map
{ {"key1", {...}}
, {"key2", {...}}
, {"key3", {...}}};
db.send_range(command::hset, "key", map);
@endcode
It is quite common to store json string in Redis for example.
\subsection low-level-responses Responses
To read responses effectively, users must know their RESP3 type,
this can be found in the Redis documentation of each command
(https://redis.io/commands). For example
Command | RESP3 type | Documentation
---------|-------------------------------------|--------------
lpush | Number | https://redis.io/commands/lpush
lrange | Array | https://redis.io/commands/lrange
set | Simple-string, null or blob-string | https://redis.io/commands/set
get | Blob-string | https://redis.io/commands/get
smembers | Set | https://redis.io/commands/smembers
hgetall | Map | https://redis.io/commands/hgetall
Once the RESP3 type of a given response is known we can choose a
proper C++ data structure to receive it in. Fortunately, this is a
simple task for most types. The table below summarises the options
RESP3 type | C++ | Type
---------------|--------------------------------------------------------------|------------------
Simple-string | \c std::string | Simple
Simple-error | \c std::string | Simple
Blob-string | \c std::string, \c std::vector | Simple
Blob-error | \c std::string, \c std::vector | Simple
Number | `long long`, `int`, `std::size_t`, \c std::string | Simple
Double | `double`, \c std::string | Simple
Null | `boost::optional<T>` | Simple
Array | \c std::vector, \c std::list, \c std::array, \c std::deque | Aggregate
Map | \c std::vector, \c std::map, \c std::unordered_map | Aggregate
Set | \c std::vector, \c std::set, \c std::unordered_set | Aggregate
Push | \c std::vector, \c std::map, \c std::unordered_map | Aggregate
Responses that contain nested aggregates or heterogeneous data
types will be given special treatment later. As of this writing,
not all RESP3 types are used by the Redis server, which means in
practice users will be concerned with a reduced subset of the
RESP3 specification. Now let us see some examples
@code
auto dbuffer = dynamic_buffer(buffer);
// To ignore the response.
co_await resp3::async_read(socket, dbuffer, adapt());
// Read in a std::string e.g. get.
std::string str;
co_await resp3::async_read(socket, dbuffer, adapt(str));
// Read in a long long e.g. rpush.
long long number;
co_await resp3::async_read(socket, dbuffer, adapt(number));
// Read in a std::set e.g. smembers.
std::set<T, U> set;
co_await resp3::async_read(socket, dbuffer, adapt(set));
// Read in a std::map e.g. hgetall.
std::map<T, U> set;
co_await resp3::async_read(socket, dbuffer, adapt(map));
// Read in a std::unordered_map e.g. hgetall.
std::unordered_map<T, U> umap;
co_await resp3::async_read(socket, dbuffer, adapt(umap));
// Read in a std::vector e.g. lrange.
std::vector<T> vec;
co_await resp3::async_read(socket, dbuffer, adapt(vec));
@endcode
In other words, it is straightforward, just pass the result of \c
adapt to the read function and make sure the response data type is
compatible with the data structure you are calling @c adapter(...)
with. All standard C++ containers are supported by Aedis.
\subsubsection Optional
It is not uncommon for apps to access keys that do not exist or
that have already expired in the Redis server, to deal with these
cases Aedis provides support for \c boost::optional. To use it,
wrap your type around \c boost::optional like this
@code
boost::optional<std::unordered_map<T, U>> umap;
co_await resp3::async_read(socket, dynamic_buffer(buffer), adapt(umap));
@endcode
Everything else stays the same, before accessing data, users will
have to check or assert the optional contains a value.
\subsubsection heterogeneous_aggregates Heterogeneous aggregates
There are cases where Redis returns aggregates that
contain heterogeneous data, for example, an array that contains
integers, strings nested sets etc. Aedis supports reading such
aggregates in a \c std::tuple efficiently as long as the they
don't contain 3-order nested aggregates e.g. an array that
contains an array of arrays. For example, to read the response to
a \c hello command we can use the following response type.
@code
using hello_type = std::tuple<
std::string, std::string,
std::string, std::string,
std::string, int,
std::string, int,
std::string, std::string,
std::string, std::string,
std::string, std::vector<std::string>>;
@endcode
Transactions are another example where this feature is useful, for
example, the response to the transaction below
@code
db.send(command::multi);
db.send(command::get, "key1");
db.send(command::lrange, "key2", 0, -1);
db.send(command::hgetall, "key3");
db.send(command::exec);
@endcode
can be read in the following way
@code
std::tuple<
boost::optional<std::string>, // Response to get
boost::optional<std::vector<std::string>>, // Response to lrange
boost::optional<std::map<std::string, std::string>> // Response to hgetall
> trans;
co_await resp3::async_read(socket, dynamic_buffer(buffer)); // Ignore multi
co_await resp3::async_read(socket, dynamic_buffer(buffer)); // Ignore get
co_await resp3::async_read(socket, dynamic_buffer(buffer)); // Ignore lrange
co_await resp3::async_read(socket, dynamic_buffer(buffer)); // Ignore hgetall
co_await resp3::async_read(socket, dynamic_buffer(buffer), adapt(trans));
@endcode
Note that above we are not ignoring the response to the commands
themselves but whether they have been successfully queued. Only
after @c exec is received Redis will execute them in sequence and
send all responses together in an array.
\subsubsection Serialization
As mentioned in \ref requests-serialization, it is common for
users to serialized data before sending it to Redis e.g. json
strings, for example
@code
sr.push(command::set, "key", "{"Server": "Redis"}"); // Unquoted for readability.
sr.push(command::get, "key")
@endcode
For performance and convenience reasons, we may want to avoid
receiving the response to the \c get command above as a string
just to convert it later to a e.g. deserialized json. To support
this, Aedis calls a user defined \c from_string function while
parsing the response. In simple terms, define your type
@code
struct mystruct {
// struct fields.
};
@endcode
and deserialize it from a string in a function \c from_string with
the following signature
@code
void from_string(mystruct& obj, char const* p, std::size_t size, boost::system::error_code& ec)
{
// Deserializes p into obj.
}
@endcode
After that, you can start receiving data efficiently in the desired
types e.g. \c mystruct, \c std::map<std::string, mystruct> etc.
\subsubsection gen-case The general case
As already mentioned, there are cases where responses to Redis
commands won't fit in the model presented above, some examples are
@li Commands (like \c set) whose response don't have a fixed
RESP3 type. Expecting an \c int and receiving a blob-string
will result in error.
@li RESP3 aggregates that contain nested aggregates can't be read in STL containers.
@li Transactions with a dynamic number of commands can't be read in a \c std::tuple.
To deal with these cases Aedis provides the \c resp3::node
type, that is the most general form of an element in a response,
be it a simple RESP3 type or an aggregate. It is defined like this
@code
template <class String>
struct node {
// The RESP3 type of the data in this node.
type data_type;
// The number of elements of an aggregate (or 1 for simple data).
std::size_t aggregate_size;
// The depth of this node in the response tree.
std::size_t depth;
// The actual data. For aggregate types this is always empty.
String value;
};
@endcode
Any response to a Redis command can be received in a \c
std::vector<node<std::string>>. The vector can be seen as a
pre-order view of the response tree
(https://en.wikipedia.org/wiki/Tree_traversal#Pre-order,_NLR).
Using it is no different that using other types
@code
// Receives any RESP3 simple data type.
node<std::string> resp;
co_await resp3::async_read(socket, dynamic_buffer(buffer), adapt(resp));
// Receives any RESP3 simple or aggregate data type.
std::vector<node<std::string>> resp;
co_await resp3::async_read(socket, dynamic_buffer(buffer), adapt(resp));
@endcode
For example, suppose we want to retrieve a hash data structure
from Redis with \c hgetall, some of the options are
@li \c std::vector<node<std::string>: Works always.
@li \c std::vector<std::string>: Efficient and flat, all elements as string.
@li \c std::map<std::string, std::string>: Efficient if you need the data as a \c std::map
@li \c std::map<U, V>: Efficient if you are storing serialized data. Avoids temporaries and requires \c from_string for \c U and \c V.
In addition to the above users can also use unordered versions of the containers. The same reasoning also applies to sets e.g. \c smembers.
\subsubsection low-level-adapters Adapters
Users that are not satisfied with any of the options above can
write their own adapters very easily. For example, the adapter below
can be used to print incoming data to the screen.
@code
auto adapter = [](resp3::node<boost::string_view> const& nd, boost::system::error_code&)
{
std::cout << nd << std::endl;
};
co_await resp3::async_read(socket, dynamic_buffer(buffer), adapter);
@endcode
See more in the \ref examples section.
\section high-level-api High-level API
As stated earlier, the low-level API is very useful for tasks that
can be performed with short lived connections. Sometimes however,
the need for long-lived connections becomes compeling
@li \b Server \b pushes: Short lived connections can't deal with server pushes, that means no [client side caching](https://redis.io/topics/client-side-caching), [notifications](https://redis.io/topics/notifications) and [pubsub](https://redis.io/topics/pubsub).
@li \b Performance: Keep opening and closing connections impact performance serverely.
@li \b Pipeline: Code such as shown in \ref low-level-api don't support pipelines well since it can only send a fixed number of commands at time. It misses important optimization opportunities (https://redis.io/topics/pipelining).
A serious implementation that supports the points listed above is
far from trivial and involves many complex asynchronous operations
@li \c async_resolve: Resolve a hostname.
@li \c async_connect: Connect to Redis.
@li \c async_read: Performed in a loop as long as the connection lives.
@li \c async_write: Performed everytime a new message is added.
@li \c async_wait: To timout all operations above if the server becomes unresponsive.
Notice that many of the operations above will run concurrently with each other and, in addition to that
@li \c async_write operations require management of the message queue to prevent concurrent writes.
@li Healthy checks must be sent periodically by the client to detect a dead or unresponsive server.
@li Recovery after a disconnection to avoid loosing enqueued commands.
Expecting users to implement these points themselves is
unrealistic and could result in code that performs poorly and
can't handle errors properly. To avoid all of that, Aedis
provides its own implementation. The general form of a program
that uses the high-level API looks like this
@code
int main()
{
net::io_context ioc;
client_type db(ioc.get_executor());
auto recv = std::make_shared<receiver>(db);
db.set_receiver(recv);
db.async_run("127.0.0.1", "6379", [](auto ec){ ... });
ioc.run();
}
@endcode
Users are concerned only with the implementation of the
receiver. For example
@code
// Callbacks.
struct receiver {
void on_resp3(command cmd, node<string_view> const& nd, error_code& ec) { ... }
void on_read(command cmd, std::size_t) { ... }
void on_push(std::size_t n) { }
void on_write(std::size_t n) { ... }
};
@endcode
The functions in the receiver above are callbacks that will be
called when events arrives
@li \c on_resp3: Called when a new chunk of resp3 data is parsed.
@li \c on_read: Called after the response to a command has been successfully read.
@li \c on_push: Called when a server push is received.
@li \c on_write: Called after a request has been successfully written to the stream.
The callbacks above are never called on errors, instead the \c
async_run function returns. Reconnection is also supported, for
example
@code
net::awaitable<void> run(std::shared_ptr<client_type> db)
{
auto ex = co_await net::this_coro::executor;
boost::asio::steady_timer timer{ex};
for (error_code ec;;) {
co_await db->async_run("127.0.0.1", "6379", redirect_error(use_awaitable, ec));
// Log the error.
std::clog << ec.message() << std::endl;
// Wait two seconds and try again.
timer.expires_after(std::chrono::seconds{2});
co_await timer.async_wait(redirect_error(use_awaitable, ec));
}
}
@endcode
when reconnecting the client will recover requests that haven't
been sent to Redis yet.
\subsection high-level-sending-cmds Sending commands
The db object from the example above can be passed around to other
objects so that commands can be sent from everywhere in the app.
Sending commands is also similar to what has been discussed before
@code
void foo(client<net::ip::tcp::socket>& db)
{
db.send(command::ping, "O rato roeu a roupa do rei de Roma");
db.send(command::incr, "counter");
db.send(command::set, "key", "Três pratos de trigo para três tigres");
db.send(command::get, "key");
...
}
@endcode
The \c send functions in this case will add commands to the output
queue and send them only if there is no pending response. This is
so because RESP3 is a request/response protocol, which means
clients must wait for responses before sending
the next request.
\section examples Examples
To better fix what has been said above, users should have a look at some simple examples.
\b Low \b level \b API (sync)
@li intro_sync.cpp: Synchronous API usage example.
@li serialization_sync.cpp: Shows how serialize your own types.
\b Low \b level \b API (async-coroutine)
@li subscriber.cpp: Shows how channel subscription works at the low level.
@li transaction.cpp: Shows how to read the response to transactions.
@li custom_adapter.cpp: Shows how to write a response adapter that prints to the screen, see \ref low-level-adapters.
\b High \b level \b API (async only)
@li intro_high_level.cpp: High-level API usage example.
@li aggregates_high_level.cpp: Shows how receive RESP3 aggregate data types in a general way or in STL containers.
@li subscriber_high_level.cpp: Shows how channel [subscription](https://redis.io/topics/pubsub) works at a high-level.
\b Asynchronous \b Servers (high-level API)
@li echo_server.cpp: Shows the basic principles behind asynchronous communication with a database in an asynchronous server.
@li chat_room.cpp: Shows how to build a scalable chat room.
\section using-aedis Using Aedis
To install and use Aedis you will need
- Boost 1.78 or greater.
- Unix Shell and Make.
- C++14. Some examples require C++20 with coroutine support.
- Redis server.
Some examples will also require interaction with
- redis-cli: Used in one example.
- Redis Sentinel Server: used in some examples.
Aedis has been tested with the following compilers
- Tested with gcc: 7.5.0, 8.4.0, 9.3.0, 10.3.0.
- Tested with clang: 11.0.0, 10.0.0, 9.0.1, 8.0.1, 7.0.1.
\subsection Installation
The first thing to do is to download and unpack Aedis
```
# Download the latest release on github
$ wget https://github.com/mzimbres/aedis/releases
# Uncompress the tarball and cd into the dir
$ tar -xzvf aedis-version.tar.gz
```
If you can't use \c configure and \c make (e.g. Windows users)
you can already add the directory where you unpacked Aedis to the
include directories in your project, otherwise run
```
# See configure --help for all options.
$ ./configure --prefix=/opt/aedis-version --with-boost=/opt/boost_1_78_0
# Install Aedis in the path specified in --prefix
$ sudo make install
```
and include the following header
```cpp
#include <aedis/src.hpp>
```
in exactly one source file in your applications. At this point you
can start using Aedis. To build the examples and run the tests run
```
# Build aedis examples.
$ make examples
# Test aedis in your machine.
$ make check
```
\subsection Developers
To generate the build system run
```
$ autoreconf -i
```
After that you will have a configure script
that you can run as explained above, for example, to use a
compiler other that the system compiler run
```
$ CC=/opt/gcc-10.2.0/bin/gcc-10.2.0 CXX=/opt/gcc-10.2.0/bin/g++-10.2.0 CXXFLAGS="-g -Wall -Werror" ./configure ...
$ make distcheck
```
\section why-aedis Why Aedis
At the time of this writing there are seventeen Redis clients
listed in the [official](https://redis.io/docs/clients/#cpp) list.
With so many clients available it is not unlikely that users are
asking themselves why yet another one. In this section I will try
to compare Aedis to the most popular clients and why we need
Aedis. Notice however that this is ongoing work as comparing
client objectively is difficult and time consuming.
The most popular client at the moment of this writing ranked by
github stars is
@li https://github.com/sewenew/redis-plus-plus
Before we start it is worth mentioning some of the things it does
not support
@li RESP3. Without RESP3 is impossible to support some important
Redis features like client side caching, among other things.
@li The Asio asynchronous model.
@li Serialization of user data types that avoids temporaries.
@li Error handling with error-code and exception overloads.
@li Healthy checks.
@li Fine control over memory allocation by means of allocators.
The remaining points will be addressed individually.
@subsection redis-plus-plus
Let us first have a look at what sending a command a pipeline and a
transaction look like
@code
auto redis = Redis("tcp://127.0.0.1:6379");
// Send commands
redis.set("key", "val");
auto val = redis.get("key"); // val is of type OptionalString.
if (val)
std::cout << *val << std::endl;
// Sending pipelines
auto pipe = redis.pipeline();
auto pipe_replies = pipe.set("key", "value")
.get("key")
.rename("key", "new-key")
.rpush("list", {"a", "b", "c"})
.lrange("list", 0, -1)
.exec();
// Parse reply with reply type and index.
auto set_cmd_result = pipe_replies.get<bool>(0);
// ...
// Sending a transaction
auto tx = redis.transaction();
auto tx_replies = tx.incr("num0")
.incr("num1")
.mget({"num0", "num1"})
.exec();
auto incr_result0 = tx_replies.get<long long>(0);
// ...
@endcode
Some of the problems with this API are
@li Heterogeneous treatment of commands, pipelines and transaction.
@li Having to manually finish the pipeline with \c .exec() is a major source of headache. This is not required by the protocol itself but results from the abstraction used.
@li Any Api that sends individual commands has a very restricted scope of usability and should be avoided in anything that needs minimum performance guarantees.
@li The API imposes exceptions on users, no error-code overload is provided.
@li No control over dynamic allocations.
@li No way to reuse the buffer for new calls to e.g. \c redis.get in order to avoid further dynamic memory allocations.
@li Error handling of resolve and connection no clear.
According to the documentation, pipelines in redis-plus-plus have
the following characteristics
> NOTE: By default, creating a Pipeline object is NOT cheap, since
> it creates a new connection.
This is clearly a downside of the API as pipelines should be the
default way of communicating and not an exception, paying such a
high price for each pipeline imposes a severe cost in performance.
Transactions also suffer from the very same problem
> NOTE: Creating a Transaction object is NOT cheap, since it
> creates a new connection.
In Aedis there is no difference between sending one command, a
pipeline or a transaction because creating the request is decoupled
from the IO objects, for example
@code
std::string request;
auto sr = make_serializer(request);
sr.push(command::hello, 3);
sr.push(command::multi);
sr.push(command::ping, "Some message.");
sr.push(command::set, "low-level-key", "some content", "EX", "2");
sr.push(command::exec);
sr.push(command::ping, "Another message.");
net::write(socket, net::buffer(request));
@endcode
The request created above will be sent to Redis in a single
pipeline and imposes no restriction on what it contains e.g. the
number of commands, transactions etc. The problems mentioned above
simply do not exist in Aedis. The way responses are read is
also more flexible
@code
std::string buffer;
auto dbuffer = net::dynamic_buffer(buffer);
std::tuple<std::string, boost::optional<std::string>> response;
resp3::read(socket, dbuffer); // hellp
resp3::read(socket, dbuffer); // multi
resp3::read(socket, dbuffer); // ping
resp3::read(socket, dbuffer); // set
resp3::read(socket, dbuffer, adapt(response));
resp3::read(socket, dbuffer); // quit
@endcode
@li The response objects are passed by the caller to the read
functions so that he has fine control over memory allocations and
object lifetime.
@li The user can either use error-code or exceptions.
@li Each response can be read individually in the response object
avoiding temporaries.
@li It is possible to ignore responses.
This was the blocking API, now let us compare the async interface
> redis-plus-plus also supports async interface, however, async
> support for Transaction and Subscriber is still on the way.
>
> The async interface depends on third-party event library, and so
> far, only libuv is supported.
Async code in redis-plus-plus looks like the following
@code
auto async_redis = AsyncRedis(opts, pool_opts);
Future<string> ping_res = async_redis.ping();
cout << ping_res.get() << endl;
@endcode
As the reader can see, the async interface is based on futures
which is also known to have a bad performance. The biggest
problem however with this async design is that it makes it
impossible to write asynchronous programs correctly since it
starts an async operation on every command sent instead of
enqueueing a message and triggering a write. It is also not clear
how are pipelines realised with the design (if at all).
In Aedis the send function looks like this
@code
template <class... Ts>
void client::send(Command cmd, Ts const&... args);
@endcode
and the response is delivered through a callback.
\section Acknowledgement
Some people that were helpful in the development of Aedis
@li Richard Hodges ([madmongo1](https://github.com/madmongo1)): For answering pretty much every question I had about Asio and the design of asynchronous programs.
@li Vinícius dos Santos Oliveira ([vinipsmaker](https://github.com/vinipsmaker)): For useful discussion about how Aedis consumes buffers in the read operation (among other things).
\section Reference
See \subpage any.
*/
/** \defgroup any Reference
*
* This page contains the documentation of all user facing code.
*/
#endif // AEDIS_HPP

View File

@@ -1,701 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_GENERIC_CLIENT_HPP
#define AEDIS_GENERIC_CLIENT_HPP
#include <vector>
#include <limits>
#include <functional>
#include <iterator>
#include <algorithm>
#include <utility>
#include <chrono>
#include <boost/asio/ip/tcp.hpp>
#include <boost/asio/steady_timer.hpp>
#include <aedis/resp3/type.hpp>
#include <aedis/resp3/node.hpp>
#include <aedis/redis/command.hpp>
#include <aedis/generic/detail/client_ops.hpp>
namespace aedis {
namespace generic {
/** \brief A high level Redis client.
* \ingroup any
*
* This class keeps a connection open to the Redis server where
* commands can be sent at any time. For more details, please see the
* documentation of each individual function.
*/
template <class AsyncReadWriteStream, class Command>
class client {
public:
/// Executor type.
using executor_type = typename AsyncReadWriteStream::executor_type;
/// Callback type of read operations.
using read_handler_type = std::function<void(Command cmd, std::size_t)>;
/// Callback type of write operations.
using write_handler_type = std::function<void(std::size_t)>;
/// Callback type of push operations.
using push_handler_type = std::function<void(std::size_t)>;
/// Callback type of resp3 operations.
using resp3_handler_type = std::function<void(Command, resp3::node<boost::string_view> const&, boost::system::error_code&)>;
using default_completion_token_type = boost::asio::default_completion_token_t<executor_type>;
/** @brief Configuration parameters.
*/
struct config {
/// Timeout of the \c async_resolve operation.
std::chrono::seconds resolve_timeout = std::chrono::seconds{5};
/// Timeout of the \c async_connect operation.
std::chrono::seconds connect_timeout = std::chrono::seconds{5};
/// Timeout of the \c async_read operation.
std::chrono::seconds read_timeout = std::chrono::seconds{5};
/// Timeout of the \c async_write operation.
std::chrono::seconds write_timeout = std::chrono::seconds{5};
/// Time after which a connection is considered idle if no data is received.
std::chrono::seconds idle_timeout = std::chrono::seconds{10};
/// The maximum size allwed in a read operation.
std::size_t max_read_size = (std::numeric_limits<std::size_t>::max)();
};
/** \brief Constructor.
*
* \param ex The executor.
* \param cfg Configuration parameters.
*/
client(boost::asio::any_io_executor ex, config cfg = config{})
: resv_{ex}
, read_timer_{ex}
, write_timer_{ex}
, wait_write_timer_{ex}
, check_idle_timer_{ex}
, cfg_{cfg}
, on_read_{[](Command, std::size_t){}}
, on_write_{[](std::size_t){}}
, on_push_{[](std::size_t){}}
, on_resp3_{[](Command, resp3::node<boost::string_view> const&, boost::system::error_code&) {}}
, sr_{requests_}
, last_data_{std::chrono::time_point<std::chrono::steady_clock>::min()}
, type_{resp3::type::invalid}
, cmd_info_{std::make_pair<Command>(Command::invalid, 0)}
{
if (cfg.idle_timeout < std::chrono::seconds{2})
cfg.idle_timeout = std::chrono::seconds{2};
}
/// Returns the executor.
auto get_executor() {return read_timer_.get_executor();}
/** @brief Adds a command to the output command queue.
*
* Adds a command to the end of the next request and signals the
* writer operation there is a new message awaiting to be sent.
* Otherwise the function is equivalent to serializer::push. @sa
* serializer.
*/
template <class... Ts>
void send(Command cmd, Ts const&... args)
{
auto const can_write = prepare_next();
auto const before = requests_.size();
sr_.push(cmd, args...);
auto const d = requests_.size() - before;
BOOST_ASSERT(d != 0);
info_.back().size += d;;
if (!has_push_response(cmd)) {
commands_.push_back(std::make_pair(cmd, d));
++info_.back().cmds;
}
if (can_write)
wait_write_timer_.cancel_one();
}
/** @brief Adds a command to the output command queue.
*
* Adds a command to the end of the next request and signals the
* writer operation there is a new message awaiting to be sent.
* Otherwise the function is equivalent to
* serializer::push_range2.
* @sa serializer.
*/
template <class Key, class ForwardIterator>
void send_range2(Command cmd, Key const& key, ForwardIterator begin, ForwardIterator end)
{
if (begin == end)
return;
auto const can_write = prepare_next();
auto const before = requests_.size();
sr_.push_range2(cmd, key, begin, end);
auto const d = requests_.size() - before;
BOOST_ASSERT(d != 0);
info_.back().size += d;
if (!has_push_response(cmd)) {
commands_.push_back(std::make_pair(cmd, d));
++info_.back().cmds;
}
if (can_write)
wait_write_timer_.cancel_one();
}
/** @brief Adds a command to the output command queue.
*
* Adds a command to the end of the next request and signals the
* writer operation there is a new message awaiting to be sent.
* Otherwise the function is equivalent to
* serializer::push_range2.
* @sa serializer.
*/
template <class ForwardIterator>
void send_range2(Command cmd, ForwardIterator begin, ForwardIterator end)
{
if (begin == end)
return;
auto const can_write = prepare_next();
auto const before = requests_.size();
sr_.push_range2(cmd, begin, end);
auto const d = requests_.size() - before;
BOOST_ASSERT(d != 0);
info_.back().size += d;
if (!has_push_response(cmd)) {
commands_.push_back(std::make_pair(cmd, d));
++info_.back().cmds;
}
if (can_write)
wait_write_timer_.cancel_one();
}
/** @brief Adds a command to the output command queue.
*
* Adds a command to the end of the next request and signals the
* writer operation there is a new message awaiting to be sent.
* Otherwise the function is equivalent to
* serializer::push_range.
* @sa serializer.
*/
template <class Key, class Range>
void send_range(Command cmd, Key const& key, Range const& range)
{
using std::begin;
using std::end;
send_range2(cmd, key, begin(range), end(range));
}
/** @brief Adds a command to the output command queue.
*
* Adds a command to the end of the next request and signals the
* writer operation there is a new message awaiting to be sent.
* Otherwise the function is equivalent to
* serializer::push_range.
* @sa serializer.
*/
template <class Range>
void send_range(Command cmd, Range const& range)
{
using std::begin;
using std::end;
send_range2(cmd, begin(range), end(range));
}
/** @brief Starts communication with the Redis server asynchronously.
*
* This function performs the following steps
*
* @li Resolves the Redis host as of \c async_resolve with the
* timeout passed in client::config::resolve_timeout.
*
* @li Connects to one of the endpoints returned by the resolve
* operation with the timeout passed in client::config::connect_timeout.
*
* @li Starts the \c async_read operation that keeps reading incoming
* responses. Each individual read uses the timeout passed on
* client::config::read_timeout. After each successful read it
* will call the read or push callback.
*
* @li Starts the \c async_write operation that waits for new commands
* to be sent to Redis. Each individual write uses the timeout
* passed on client::config::write_timeout. After a successful
* write it will call the write callback.
*
* @li Starts the check idle operation with the timeout specified
* in client::config::idle_timeout. If no data is received during
* that time interval \c async_run completes with
* generic::error::idle_timeout.
*
* @li Starts the healthy check operation that sends
* redis::command::ping to Redis with a frequency equal to
* client::config::idle_timeout / 2.
*
* In addition to the callbacks mentioned above, the read
* operations will call the resp3 callback as soon a new chunks of
* data become available to the user.
*
* It is safe to call \c async_run after it has returned. In this
* case, any outstanding commands will be sent after the
* connection is restablished. If a disconnect occurs while the
* response to a request has not been received, the client doesn't
* try to resend it to avoid resubmission.
*
* Example:
*
* @code
* awaitable<void> run_with_reconnect(std::shared_ptr<client_type> db)
* {
* auto ex = co_await this_coro::executor;
* asio::steady_timer timer{ex};
*
* for (error_code ec;;) {
* co_await db->async_run("127.0.0.1", "6379", redirect_error(use_awaitable, ec));
* timer.expires_after(std::chrono::seconds{2});
* co_await timer.async_wait(redirect_error(use_awaitable, ec));
* }
* }
* @endcode
*
* \param host Ip address or name of the Redis server.
* \param port Port where the Redis server is listening.
* \param token The completion token.
*
* The completion token must have the following signature
*
* @code
* void f(boost::system::error_code);
* @endcode
*
* \return This function returns only when there is an error.
*/
template <class CompletionToken = default_completion_token_type>
auto
async_run(
boost::string_view host = "127.0.0.1",
boost::string_view port = "6379",
CompletionToken token = CompletionToken{})
{
host_ = host;
port_ = port;
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::run_op<client>{this}, token, read_timer_, write_timer_, wait_write_timer_);
}
/// Set the read handler.
void set_read_handler(read_handler_type rh)
{ on_read_ = std::move(rh); }
/// Set the write handler.
void set_write_handler(write_handler_type wh)
{ on_write_ = std::move(wh); }
/// Set the push handler.
void set_push_handler(push_handler_type ph)
{ on_push_ = std::move(ph); }
/// Set the resp3 handler.
void set_resp3_handler(resp3_handler_type rh)
{ on_resp3_ = std::move(rh); }
/** @brief Convenience callback setter.
*
* Expects a class with the following member functions
*
* @code
* struct receiver {
* void on_resp3(Command cmd, resp3::node<boost::string_view> const& nd, boost::system::error_code& ec);
* void on_read(Command cmd, std::size_t);
* void on_write(std::size_t n);
* void on_push(std::size_t n);
* };
* @endcode
*/
template <class Receiver>
void set_receiver(std::shared_ptr<Receiver> recv)
{
on_resp3_ = [recv](Command cmd, resp3::node<boost::string_view> const& nd, boost::system::error_code& ec){recv->on_resp3(cmd, nd, ec);};
on_read_ = [recv](Command cmd, std::size_t n){recv->on_read(cmd, n);};
on_write_ = [recv](std::size_t n){recv->on_write(n);};
on_push_ = [recv](std::size_t n){recv->on_push(n);};
}
private:
using command_info_type = std::pair<Command, std::size_t>;
using time_point_type = std::chrono::time_point<std::chrono::steady_clock>;
template <class T, class V> friend struct detail::reader_op;
template <class T, class V> friend struct detail::ping_after_op;
template <class T> friend struct detail::read_op;
template <class T> friend struct detail::read_until_op;
template <class T> friend struct detail::writer_op;
template <class T> friend struct detail::write_op;
template <class T> friend struct detail::run_op;
template <class T> friend struct detail::connect_op;
template <class T> friend struct detail::resolve_op;
template <class T> friend struct detail::check_idle_op;
template <class T> friend struct detail::init_op;
template <class T> friend struct detail::read_write_check_op;
template <class T> friend struct detail::wait_for_data_op;
void on_resolve()
{
// If we are coming from a connection that was lost we have to
// reset the socket to a fresh state.
socket_ =
std::make_shared<AsyncReadWriteStream>(read_timer_.get_executor());
}
void on_connect()
{
// When we are reconnecting we can't simply call send(hello)
// as that will add the command to the end of the queue, we need
// it as the first element.
if (info_.empty()) {
// Either we are connecting for the first time or there are
// no commands that were left unresponded from the last
// connection. We can send hello as usual.
BOOST_ASSERT(requests_.empty());
BOOST_ASSERT(commands_.empty());
send(Command::hello, 3);
return;
}
if (info_.front().sent) {
// There is one request that was left unresponded when we
// e.g. lost the connection, since we erase requests right
// after writing them to the socket (to avoid resubmission) it
// is lost and we have to remove it.
// Noop if info_.front().size is already zero, which happens
// when the request was successfully writen to the socket.
// In the future we may want to avoid erasing but resend (at
// the risc of resubmission).
requests_.erase(0, info_.front().size);
// Erases the commands that were lost as well.
commands_.erase(
std::begin(commands_),
std::begin(commands_) + info_.front().cmds);
info_.front().cmds = 0;
// Do not erase the info_ front as we will use it below.
// info_.erase(std::begin(info_));
}
// Code below will add a hello to the front of the request and
// update info_ and commands_ accordingly.
auto const old_size = requests_.size();
sr_.push(Command::hello, 3);
auto const hello_size = requests_.size() - old_size;;
// Now we have to rotate the hello to the front of the request
// (Remember it must always be the first command).
std::rotate(
std::begin(requests_),
std::begin(requests_) + old_size,
std::end(requests_));
// Updates info_.
info_.front().size += hello_size;
info_.front().cmds += 1;
// Updates commands_
commands_.push_back(std::make_pair(Command::hello, hello_size));
std::rotate(
std::begin(commands_),
std::prev(std::end(commands_)),
std::end(commands_));
}
// Prepares the back of the queue to receive further commands. If
// true is returned the request in the front of the queue can be
// sent to the server.
bool prepare_next()
{
if (info_.empty()) {
info_.push_back({});
return true;
}
if (info_.front().sent) {
// There is a pending response, we can't modify the front of
// the vector.
BOOST_ASSERT(info_.front().cmds != 0);
if (info_.size() == 1)
info_.push_back({});
return false;
}
// When cmds = 0 there are only commands with push response on
// the request and we are not waiting for any response.
return info_.front().cmds == 0;
}
// Returns true when the next request can be written.
bool on_cmd(command_info_type)
{
BOOST_ASSERT(!info_.empty());
BOOST_ASSERT(!commands_.empty());
commands_.erase(std::begin(commands_));
if (--info_.front().cmds != 0)
return false;
info_.erase(std::begin(info_));
return !info_.empty();
}
// Resolves the address passed in async_run and store the results
// in endpoints_.
template <class CompletionToken = default_completion_token_type>
auto
async_resolve(CompletionToken&& token = default_completion_token_type{})
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::resolve_op<client>{this}, token, resv_.get_executor());
}
// Connects the socket to one of the endpoints in endpoints_ and
// stores the successful endpoint in endpoint_.
template <class CompletionToken = default_completion_token_type>
auto
async_connect(CompletionToken&& token = default_completion_token_type{})
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::connect_op<client>{this}, token, write_timer_.get_executor());
}
template <class CompletionToken = default_completion_token_type>
auto
async_read_until(CompletionToken&& token = default_completion_token_type{})
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::read_until_op<client>{this}, token, read_timer_.get_executor());
}
// Reads a complete resp3 response from the socket using the
// timeout config::read_timeout. On a successful read calls
// on_read_ or on_push_ depending on whether the response is a push
// or a response to a command.
template <class CompletionToken = default_completion_token_type>
auto
async_read(CompletionToken&& token = default_completion_token_type{})
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::read_op<client>{this}, token, read_timer_.get_executor());
}
// Loops on async_read described above.
template <class CompletionToken = default_completion_token_type>
auto
reader(CompletionToken&& token = default_completion_token_type{})
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::reader_op<client, Command>{this}, token, read_timer_.get_executor());
}
// Write with a timeout.
template <class CompletionToken = default_completion_token_type>
auto
async_write(
CompletionToken&& token = default_completion_token_type{})
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::write_op<client>{this}, token, write_timer_);
}
template <class CompletionToken = default_completion_token_type>
auto
writer(CompletionToken&& token = default_completion_token_type{})
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::writer_op<client>{this}, token, wait_write_timer_);
}
template <class CompletionToken = default_completion_token_type>
auto
async_init(CompletionToken&& token = default_completion_token_type{})
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::init_op<client>{this}, token, write_timer_, resv_);
}
template <class CompletionToken = default_completion_token_type>
auto
async_read_write_check(CompletionToken&& token = default_completion_token_type{})
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::read_write_check_op<client>{this}, token, read_timer_, write_timer_, wait_write_timer_, check_idle_timer_);
}
template <class CompletionToken = default_completion_token_type>
auto
async_ping_after(CompletionToken&& token = default_completion_token_type{})
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::ping_after_op<client, Command>{this}, token, read_timer_);
}
template <class CompletionToken = default_completion_token_type>
auto
async_wait_for_data(CompletionToken&& token = default_completion_token_type{})
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::wait_for_data_op<client>{this}, token, read_timer_);
}
template <class CompletionToken = default_completion_token_type>
auto
async_check_idle(CompletionToken&& token = default_completion_token_type{})
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::check_idle_op<client>{this}, token, check_idle_timer_);
}
void on_reader_exit()
{
socket_->close();
wait_write_timer_.expires_at(std::chrono::steady_clock::now());
}
// Stores information about a request.
struct info {
// Set to true before calling async_write.
bool sent = false;
// Request size in bytes. After a successful write it is set to
// zero.
std::size_t size = 0;
// The number of commands it contains. Commands with push
// responses are not counted.
std::size_t cmds = 0;
};
// Used to resolve the host on async_resolve.
boost::asio::ip::tcp::resolver resv_;
// The tcp socket.
std::shared_ptr<AsyncReadWriteStream> socket_;
// Timer used with async_read.
boost::asio::steady_timer read_timer_;
// Timer used with async_write.
boost::asio::steady_timer write_timer_;
// Timer that is canceled when a new message is added to the output
// queue.
boost::asio::steady_timer wait_write_timer_;
// Check idle timer.
boost::asio::steady_timer check_idle_timer_;
// Configuration parameters.
config cfg_;
// Called when a complete message is read.
read_handler_type on_read_;
// Called when a request has been written to the socket.
write_handler_type on_write_;
// Called when a complete push message is received.
push_handler_type on_push_;
// Called by the parser after each new chunk of resp3 data is
// processed.
resp3_handler_type on_resp3_;
// Buffer used by the read operations.
std::string read_buffer_;
// Requests payload and its serializer.
std::string requests_;
serializer<std::string> sr_;
// The commands contained in the requests.
std::vector<command_info_type> commands_;
// Info about the requests.
std::vector<info> info_;
// Last time we received data.
time_point_type last_data_;
// Used by the read_op.
resp3::type type_;
// Used by the read_op.
command_info_type cmd_info_;
// See async_connect.
boost::asio::ip::tcp::endpoint endpoint_;
// See async_resolve.
boost::asio::ip::tcp::resolver::results_type endpoints_;
// Host and port passed to async_run.
boost::string_view host_;
boost::string_view port_;
};
} // generic
} // aedis
#endif // AEDIS_GENERIC_CLIENT_HPP

View File

@@ -1,554 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_GENERIC_CLIENT_OPS_HPP
#define AEDIS_GENERIC_CLIENT_OPS_HPP
#include <array>
#include <boost/system.hpp>
#include <boost/asio/write.hpp>
#include <boost/asio/connect.hpp>
#include <boost/core/ignore_unused.hpp>
#include <boost/assert.hpp>
#include <boost/asio/experimental/parallel_group.hpp>
#include <aedis/resp3/type.hpp>
#include <aedis/resp3/detail/parser.hpp>
#include <aedis/resp3/read.hpp>
#include <aedis/generic/error.hpp>
namespace aedis {
namespace generic {
namespace detail {
#include <boost/asio/yield.hpp>
template <class Client, class Command>
struct ping_after_op {
Client* cli;
boost::asio::coroutine coro;
template <class Self>
void
operator()(Self& self, boost::system::error_code ec = {})
{
reenter (coro)
{
BOOST_ASSERT((cli->cfg_.idle_timeout / 2) != std::chrono::seconds{0});
cli->read_timer_.expires_after(cli->cfg_.idle_timeout / 2);
yield cli->read_timer_.async_wait(std::move(self));
if (ec) {
self.complete(ec);
return;
}
// The timer fired, send the ping.
cli->send(Command::ping);
self.complete({});
}
}
};
template <class Client>
struct read_until_op {
Client* cli;
boost::asio::coroutine coro;
template <class Self>
void operator()( Self& self
, boost::system::error_code ec = {}
, std::size_t n = 0)
{
reenter (coro)
{
// Waits for incomming data.
yield
boost::asio::async_read_until(
*cli->socket_,
boost::asio::dynamic_buffer(cli->read_buffer_, cli->cfg_.max_read_size),
"\r\n",
std::move(self));
// Cancels the async_ping_after.
cli->read_timer_.cancel();
self.complete(ec);
}
}
};
template <class Client>
struct wait_for_data_op {
Client* cli;
boost::asio::coroutine coro;
template <class Self>
void operator()( Self& self
, std::array<std::size_t, 2> order = {}
, boost::system::error_code ec1 = {}
, boost::system::error_code ec2 = {})
{
reenter (coro)
{
yield
boost::asio::experimental::make_parallel_group(
[this](auto token) { return cli->async_read_until(token);},
[this](auto token) { return cli->async_ping_after(token);}
).async_wait(
boost::asio::experimental::wait_for_all(),
std::move(self));
// The order of completion is not important.
self.complete(ec1);
}
}
};
template <class Client>
struct check_idle_op {
Client* cli;
boost::asio::coroutine coro;
template <class Self>
void operator()(Self& self, boost::system::error_code ec = {})
{
reenter (coro) for(;;)
{
cli->check_idle_timer_.expires_after(cli->cfg_.idle_timeout);
yield cli->check_idle_timer_.async_wait(std::move(self));
if (ec) {
self.complete(ec);
return;
}
auto const now = std::chrono::steady_clock::now();
if (cli->last_data_ + cli->cfg_.idle_timeout < now) {
cli->on_reader_exit();
self.complete(error::idle_timeout);
return;
}
cli->last_data_ = now;
}
}
};
template <class Client>
struct resolve_op {
Client* cli;
boost::asio::coroutine coro;
template <class Self>
void
operator()( Self& self
, boost::system::error_code ec = {}
, boost::asio::ip::tcp::resolver::results_type res = {})
{
reenter (coro)
{
yield
cli->resv_.async_resolve(cli->host_.data(), cli->port_.data(), std::move(self));
if (ec) {
self.complete(ec);
return;
}
cli->endpoints_ = res;
self.complete({});
}
}
};
template <class Client>
struct connect_op {
Client* cli;
boost::asio::coroutine coro;
template <class Self>
void
operator()( Self& self
, boost::system::error_code ec = {}
, boost::asio::ip::tcp::endpoint const& ep = {})
{
reenter (coro)
{
yield
boost::asio::async_connect(
*cli->socket_,
cli->endpoints_,
std::move(self));
if (ec) {
self.complete(ec);
return;
}
cli->endpoint_ = ep;
self.complete({});
}
}
};
template <class Client>
struct init_op {
Client* cli;
boost::asio::coroutine coro;
template <class Self>
void operator()( Self& self
, std::array<std::size_t, 2> order = {}
, boost::system::error_code ec1 = {}
, boost::system::error_code ec2 = {})
{
reenter (coro)
{
// Tries to resolve with a timeout. We can use the writer
// timer here as there is no ongoing write operation.
cli->write_timer_.expires_after(cli->cfg_.resolve_timeout);
yield
boost::asio::experimental::make_parallel_group(
[this](auto token) { return cli->async_resolve(token);},
[this](auto token) { return cli->write_timer_.async_wait(token);}
).async_wait(
boost::asio::experimental::wait_for_one(),
std::move(self));
switch (order[0]) {
case 0:
{
if (ec1) {
self.complete(ec1);
return;
}
cli->on_resolve();
} break;
case 1:
{
if (!ec2) {
self.complete(generic::error::resolve_timeout);
return;
}
} break;
default: BOOST_ASSERT(false);
}
// Tries a connection with a timeout. We can use the writer
// timer here as there is no ongoing write operation.
cli->write_timer_.expires_after(cli->cfg_.connect_timeout);
yield
boost::asio::experimental::make_parallel_group(
[this](auto token) { return cli->async_connect(token);},
[this](auto token) { return cli->write_timer_.async_wait(token);}
).async_wait(
boost::asio::experimental::wait_for_one(),
std::move(self));
switch (order[0]) {
case 0:
{
if (ec1) {
self.complete(ec1);
return;
}
cli->on_connect();
} break;
case 1:
{
if (!ec2) {
self.complete(generic::error::connect_timeout);
return;
}
} break;
default: BOOST_ASSERT(false);
}
self.complete({});
}
}
};
template <class Client>
struct read_write_check_op {
Client* cli;
boost::asio::coroutine coro;
template <class Self>
void operator()( Self& self
, std::array<std::size_t, 3> order = {}
, boost::system::error_code ec1 = {}
, boost::system::error_code ec2 = {}
, boost::system::error_code ec3 = {})
{
reenter (coro)
{
// Starts the reader and writer ops.
cli->wait_write_timer_.expires_at(std::chrono::steady_clock::time_point::max());
yield
boost::asio::experimental::make_parallel_group(
[this](auto token) { return cli->writer(token);},
[this](auto token) { return cli->reader(token);},
[this](auto token) { return cli->async_check_idle(token);}
).async_wait(
boost::asio::experimental::wait_for_one_error(),
std::move(self));
switch (order[0]) {
case 0:
{
BOOST_ASSERT(ec1);
self.complete(ec1);
} break;
case 1:
{
BOOST_ASSERT(ec2);
self.complete(ec2);
} break;
case 2:
{
BOOST_ASSERT(ec3);
self.complete(ec3);
} break;
default: BOOST_ASSERT(false);
}
}
}
};
template <class Client>
struct run_op {
Client* cli;
boost::asio::coroutine coro;
template <class Self>
void operator()(Self& self, boost::system::error_code ec = {})
{
reenter (coro)
{
yield cli->async_init(std::move(self));
if (ec) {
self.complete(ec);
return;
}
yield cli->async_read_write_check(std::move(self));
if (ec) {
self.complete(ec);
return;
}
BOOST_ASSERT(false);
}
}
};
template <class Client>
struct write_op {
Client* cli;
boost::asio::coroutine coro;
template <class Self>
void operator()( Self& self
, std::array<std::size_t, 2> order = {}
, boost::system::error_code ec1 = {}
, std::size_t n = 0
, boost::system::error_code ec2 = {})
{
reenter (coro)
{
BOOST_ASSERT(!cli->info_.empty());
BOOST_ASSERT(cli->info_.front().size != 0);
BOOST_ASSERT(!cli->requests_.empty());
cli->write_timer_.expires_after(cli->cfg_.write_timeout);
cli->info_.front().sent = true;
yield
boost::asio::experimental::make_parallel_group(
[this](auto token) { return boost::asio::async_write(*cli->socket_, boost::asio::buffer(cli->requests_.data(), cli->info_.front().size), token);},
[this](auto token) { return cli->write_timer_.async_wait(token);}
).async_wait(
boost::asio::experimental::wait_for_one(),
std::move(self));
switch (order[0]) {
case 0:
{
if (ec1) {
self.complete(ec1);
return;
}
} break;
case 1:
{
if (!ec2) {
self.complete(generic::error::write_timeout);
return;
}
} break;
default: BOOST_ASSERT(false);
}
BOOST_ASSERT(!cli->info_.empty());
BOOST_ASSERT(cli->info_.front().size != 0);
BOOST_ASSERT(!cli->requests_.empty());
BOOST_ASSERT(n == cli->info_.front().size);
cli->requests_.erase(0, n);
cli->info_.front().size = 0;
if (cli->info_.front().cmds == 0)
cli->info_.erase(std::begin(cli->info_));
cli->on_write_(n);
self.complete({});
}
}
};
template <class Client>
struct writer_op {
Client* cli;
boost::asio::coroutine coro;
template <class Self>
void operator()(Self& self , boost::system::error_code ec = {})
{
reenter (coro) for (;;)
{
yield cli->async_write(std::move(self));
if (ec) {
cli->socket_->close();
self.complete(ec);
return;
}
yield cli->wait_write_timer_.async_wait(std::move(self));
if (!cli->socket_->is_open()) {
self.complete(error::write_stop_requested);
return;
}
}
}
};
template <class Client>
struct read_op {
Client* cli;
boost::asio::coroutine coro;
template <class Self>
void operator()( Self& self
, std::array<std::size_t, 2> order = {}
, boost::system::error_code ec1 = {}
, std::size_t n = 0
, boost::system::error_code ec2 = {})
{
reenter (coro)
{
cli->read_timer_.expires_after(cli->cfg_.read_timeout);
yield
boost::asio::experimental::make_parallel_group(
[this](auto token) { return resp3::async_read(*cli->socket_, boost::asio::dynamic_buffer(cli->read_buffer_, cli->cfg_.max_read_size), [cli_ = cli](resp3::node<boost::string_view> const& nd, boost::system::error_code& ec) mutable {cli_->on_resp3_(cli_->cmd_info_.first, nd, ec);}, token);},
[this](auto token) { return cli->read_timer_.async_wait(token);}
).async_wait(
boost::asio::experimental::wait_for_one(),
std::move(self));
switch (order[0]) {
case 0:
{
if (ec1) {
self.complete(ec1);
return;
}
} break;
case 1:
{
if (!ec2) {
self.complete(generic::error::read_timeout);
return;
}
} break;
default: BOOST_ASSERT(false);
}
if (cli->type_ == resp3::type::push) {
cli->on_push_(n);
} else {
if (cli->on_cmd(cli->cmd_info_))
cli->wait_write_timer_.cancel_one();
cli->on_read_(cli->cmd_info_.first, n);
}
self.complete({});
}
}
};
template <class Client, class Command>
struct reader_op {
Client* cli;
boost::asio::coroutine coro;
template <class Self>
void operator()( Self& self
, boost::system::error_code ec = {}
, std::size_t n = 0)
{
boost::ignore_unused(n);
reenter (coro) for (;;)
{
if (cli->read_buffer_.empty()) {
yield cli->async_wait_for_data(std::move(self));
if (ec) {
cli->on_reader_exit();
self.complete(ec);
return;
}
}
BOOST_ASSERT(!cli->read_buffer_.empty());
cli->type_ = resp3::to_type(cli->read_buffer_.front());
cli->cmd_info_ = std::make_pair(Command::invalid, 0);
if (cli->type_ != resp3::type::push) {
BOOST_ASSERT(!cli->commands_.empty());
cli->cmd_info_ = cli->commands_.front();
}
cli->last_data_ = std::chrono::steady_clock::now();
yield cli->async_read(std::move(self));
if (ec) {
cli->on_reader_exit();
self.complete(ec);
return;
}
}
}
};
#include <boost/asio/unyield.hpp>
} // detail
} // generic
} // aedis
#endif // AEDIS_GENERIC_CLIENT_OPS_HPP

View File

@@ -1,54 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_GENERIC_ERROR_HPP
#define AEDIS_GENERIC_ERROR_HPP
#include <boost/system/error_code.hpp>
namespace aedis {
namespace generic {
/** \brief Generic errors.
* \ingroup any
*/
enum class error
{
/// Represents the timeout of the resolve operation.
resolve_timeout = 1,
/// Represents the timeout of the connect operation.
connect_timeout,
/// Represents the timeout of the read operation.
read_timeout,
/// Represents the timeout of the write operation.
write_timeout,
/// Idle timeout.
idle_timeout,
/// Write stop requested.
write_stop_requested,
};
/** \brief Creates a error_code object from an error.
* \ingroup any
*/
boost::system::error_code make_error_code(error e);
} // generic
} // aedis
namespace std {
template<>
struct is_error_code_enum<::aedis::generic::error> : std::true_type {};
} // std
#endif // AEDIS_GENERIC_ERROR_HPP

View File

@@ -1,48 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <aedis/generic/error.hpp>
namespace aedis {
namespace generic {
namespace detail {
struct error_category_impl : boost::system::error_category {
char const* name() const noexcept override
{
return "aedis.generic";
}
std::string message(int ev) const override
{
switch(static_cast<error>(ev)) {
case error::resolve_timeout: return "Resolve operation timeout.";
case error::connect_timeout: return "Connect operation timeout.";
case error::read_timeout: return "Read operation timeout.";
case error::write_timeout: return "Write operation timeout.";
case error::idle_timeout: return "Idle timeout.";
case error::write_stop_requested: return "Write stop requested.";
default: BOOST_ASSERT(false);
}
}
};
boost::system::error_category const& category()
{
static error_category_impl instance;
return instance;
}
} // detail
boost::system::error_code make_error_code(error e)
{
return boost::system::error_code{static_cast<int>(e), detail::category()};
}
} // generic
} // aedis

View File

@@ -1,203 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_GENERIC_SERIALIZER_HPP
#define AEDIS_GENERIC_SERIALIZER_HPP
#include <boost/hana.hpp>
#include <aedis/resp3/compose.hpp>
// NOTE: Consider detecting tuples in the type in the parameter pack
// to calculate the header size correctly.
//
// NOTE: For some commands like hset it would be a good idea to assert
// the value type is a pair.
namespace aedis {
namespace generic {
/** @brief Creates Redis requests from user data.
* \ingroup any
*
* A request is composed of one or more redis commands and is
* referred to in the redis documentation as a pipeline, see
* https://redis.io/topics/pipelining.
*
* Example
*
* @code
* std::string request;
* auto sr = make_serializer(request);
* sr.push(command::hello, 3);
* sr.push(command::flushall);
* sr.push(command::ping);
* sr.push(command::incr, "key");
* sr.push(command::quit);
* co_await async_write(socket, buffer(request));
* @endcode
*
* \tparam Storage The storage type e.g \c std::string.
*
* \remarks Non-string types will be converted to string by using \c
* to_bulk, which must be made available over ADL.
*/
template <class Storage>
class serializer {
private:
Storage* request_;
public:
/** \brief Constructor
*
* \param storage The underlying storage object i.e. where the
* request is to be stored.
*/
serializer(Storage& storage) : request_(&storage) {}
/** @brief Appends a new command to the end of the request.
*
* For example
*
* \code
* std::string request;
* auto sr = make_serializer<command>(request);
* sr.push(command::set, "key", "some string", "EX", "2");
* \endcode
*
* will add the \c set command with value "some string" and an
* expiration of 2 seconds.
*
* \param cmd The command e.g redis or sentinel command.
* \param args Command arguments.
*/
template <class Command, class... Ts>
void push(Command cmd, Ts const&... args)
{
using boost::hana::for_each;
using boost::hana::make_tuple;
using resp3::type;
auto constexpr pack_size = sizeof...(Ts);
resp3::add_header(*request_, type::array, 1 + pack_size);
resp3::add_bulk(*request_, to_string(cmd));
resp3::add_bulk(*request_, make_tuple(args...));
}
/** @brief Appends a new command to the end of the request.
*
* This overload is useful for commands that have a key and have a
* dynamic range of arguments. For example
*
* @code
* std::map<std::string, std::string> map
* { {"key1", "value1"}
* , {"key2", "value2"}
* , {"key3", "value3"}
* };
*
* request req;
* req.push_range2(command::hset, "key", std::cbegin(map), std::cend(map));
* @endcode
*
* \param cmd The command e.g. Redis or Sentinel command.
* \param key The command key.
* \param begin Iterator to the begin of the range.
* \param end Iterator to the end of the range.
*/
template <class Command, class Key, class ForwardIterator>
void push_range2(Command cmd, Key const& key, ForwardIterator begin, ForwardIterator end)
{
using value_type = typename std::iterator_traits<ForwardIterator>::value_type;
using resp3::type;
if (begin == end)
return;
auto constexpr size = resp3::bulk_counter<value_type>::size;
auto const distance = std::distance(begin, end);
resp3::add_header(*request_, type::array, 2 + size * distance);
resp3::add_bulk(*request_, to_string(cmd));
resp3::add_bulk(*request_, key);
for (; begin != end; ++begin)
resp3::add_bulk(*request_, *begin);
}
/** @brief Appends a new command to the end of the request.
*
* This overload is useful for commands that have a dynamic number
* of arguments and don't have a key. For example
*
* \code
* std::set<std::string> channels
* { "channel1" , "channel2" , "channel3" }
*
* request req;
* req.push(command::subscribe, std::cbegin(channels), std::cedn(channels));
* \endcode
*
* \param cmd The Redis command
* \param begin Iterator to the begin of the range.
* \param end Iterator to the end of the range.
*/
template <class Command, class ForwardIterator>
void push_range2(Command cmd, ForwardIterator begin, ForwardIterator end)
{
using value_type = typename std::iterator_traits<ForwardIterator>::value_type;
using resp3::type;
if (begin == end)
return;
auto constexpr size = resp3::bulk_counter<value_type>::size;
auto const distance = std::distance(begin, end);
resp3::add_header(*request_, type::array, 1 + size * distance);
resp3::add_bulk(*request_, to_string(cmd));
for (; begin != end; ++begin)
resp3::add_bulk(*request_, *begin);
}
/** @brief Appends a new command to the end of the request.
*
* Equivalent to the overload taking a range (i.e. send_range2).
*/
template <class Command, class Key, class Range>
void push_range(Command cmd, Key const& key, Range const& range)
{
using std::begin;
using std::end;
push_range2(cmd, key, begin(range), end(range));
}
/** @brief Appends a new command to the end of the request.
*
* Equivalent to the overload taking a range (i.e. send_range2).
*/
template <class Command, class Range>
void push_range(Command cmd, Range const& range)
{
using std::begin;
using std::end;
push_range2(cmd, begin(range), end(range));
}
};
/** \brief Creates a serializer.
* \ingroup any
* \param storage The string.
*/
template <class CharT, class Traits, class Allocator>
auto make_serializer(std::basic_string<CharT, Traits, Allocator>& storage)
{
return serializer<std::basic_string<CharT, Traits, Allocator>>(storage);
}
} // generic
} // aedis
#endif // AEDIS_GENERIC_SERIALIZER_HPP

View File

@@ -1,457 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_REDIS_COMMAND_HPP
#define AEDIS_REDIS_COMMAND_HPP
#include <ostream>
#include <string>
namespace aedis {
namespace redis {
/** \brief Redis commands.
* \ingroup any
*
* The full list of of commands can be found at
* https://redis.io/commands.
*
* \remark This list was created with the help of the \c command
* command.
*/
enum class command {
/// https://redis.io/commands/acl
acl,
/// https://redis.io/commands/append
append,
/// https://redis.io/commands/asking
asking,
/// https://redis.io/commands/auth
auth,
/// https://redis.io/commands/bgrewriteaof
bgrewriteaof,
/// https://redis.io/commands/bgsave
bgsave,
/// https://redis.io/commands/bitcount
bitcount,
/// https://redis.io/commands/bitfield
bitfield,
/// https://redis.io/commands/bitfield_ro
bitfield_ro,
/// https://redis.io/commands/bitop
bitop,
/// https://redis.io/commands/bitpos
bitpos,
/// https://redis.io/commands/blpop
blpop,
/// https://redis.io/commands/brpop
brpop,
/// https://redis.io/commands/brpoplpush
brpoplpush,
/// https://redis.io/commands/bzpopmax
bzpopmax,
/// https://redis.io/commands/bzpopmin
bzpopmin,
/// https://redis.io/commands/client
client,
/// https://redis.io/commands/cluster
cluster,
/// https://redis.io/commands/command
command,
/// https://redis.io/commands/config
config,
/// https://redis.io/commands/dbsize
dbsize,
/// https://redis.io/commands/debug
debug,
/// https://redis.io/commands/decr
decr,
/// https://redis.io/commands/decrby
decrby,
/// https://redis.io/commands/del
del,
/// https://redis.io/commands/discard
discard,
/// https://redis.io/commands/dump
dump,
/// https://redis.io/commands/echo
echo,
/// https://redis.io/commands/eval
eval,
/// https://redis.io/commands/evalsha
evalsha,
/// https://redis.io/commands/exec
exec,
/// https://redis.io/commands/exists
exists,
/// https://redis.io/commands/expire
expire,
/// https://redis.io/commands/expireat
expireat,
/// https://redis.io/commands/flushall
flushall,
/// https://redis.io/commands/flushdb
flushdb,
/// https://redis.io/commands/geoadd
geoadd,
/// https://redis.io/commands/geodist
geodist,
/// https://redis.io/commands/geohash
geohash,
/// https://redis.io/commands/geopos
geopos,
/// https://redis.io/commands/georadius
georadius,
/// https://redis.io/commands/georadius_ro
georadius_ro,
/// https://redis.io/commands/georadiusbymember
georadiusbymember,
/// https://redis.io/commands/georadiusbymember_ro
georadiusbymember_ro,
/// https://redis.io/commands/get
get,
/// https://redis.io/commands/getbit
getbit,
/// https://redis.io/commands/getrange
getrange,
/// https://redis.io/commands/getset
getset,
/// https://redis.io/commands/hdel
hdel,
/// https://redis.io/commands/hello
hello,
/// https://redis.io/commands/hexists
hexists,
/// https://redis.io/commands/hget
hget,
/// https://redis.io/commands/hgetall
hgetall,
/// https://redis.io/commands/hincrby
hincrby,
/// https://redis.io/commands/hincrbyfloat
hincrbyfloat,
/// https://redis.io/commands/hkeys
hkeys,
/// https://redis.io/commands/hlen
hlen,
/// https://redis.io/commands/hmget
hmget,
/// https://redis.io/commands/hmset
hmset,
/// https://redis.io/commands/hscan
hscan,
/// https://redis.io/commands/hset
hset,
/// https://redis.io/commands/hsetnx
hsetnx,
/// https://redis.io/commands/hstrlen
hstrlen,
/// https://redis.io/commands/hvals
hvals,
/// https://redis.io/commands/incr
incr,
/// https://redis.io/commands/incrby
incrby,
/// https://redis.io/commands/incrbyfloat
incrbyfloat,
/// https://redis.io/commands/info
info,
/// https://redis.io/commands/keys
keys,
/// https://redis.io/commands/lastsave
lastsave,
/// https://redis.io/commands/latency
latency,
/// https://redis.io/commands/lindex
lindex,
/// https://redis.io/commands/linsert
linsert,
/// https://redis.io/commands/llen
llen,
/// https://redis.io/commands/lolwut
lolwut,
/// https://redis.io/commands/lpop
lpop,
/// https://redis.io/commands/lpos
lpos,
/// https://redis.io/commands/lpush
lpush,
/// https://redis.io/commands/lpushx
lpushx,
/// https://redis.io/commands/lrange
lrange,
/// https://redis.io/commands/lrem
lrem,
/// https://redis.io/commands/lset
lset,
/// https://redis.io/commands/ltrim
ltrim,
/// https://redis.io/commands/memory
memory,
/// https://redis.io/commands/mget
mget,
/// https://redis.io/commands/migrate
migrate,
/// https://redis.io/commands/module
module,
/// https://redis.io/commands/monitor
monitor,
/// https://redis.io/commands/move
move,
/// https://redis.io/commands/mset
mset,
/// https://redis.io/commands/msetnx
msetnx,
/// https://redis.io/commands/multi
multi,
/// https://redis.io/commands/object
object,
/// https://redis.io/commands/persist
persist,
/// https://redis.io/commands/pexpire
pexpire,
/// https://redis.io/commands/pexpireat
pexpireat,
/// https://redis.io/commands/pfadd
pfadd,
/// https://redis.io/commands/pfcount
pfcount,
/// https://redis.io/commands/pfdebug
pfdebug,
/// https://redis.io/commands/pfmerge
pfmerge,
/// https://redis.io/commands/pfselftest
pfselftest,
/// https://redis.io/commands/ping
ping,
/// https://redis.io/commands/post
post,
/// https://redis.io/commands/psetex
psetex,
/// https://redis.io/commands/psubscribe
psubscribe,
/// https://redis.io/commands/psync
psync,
/// https://redis.io/commands/pttl
pttl,
/// https://redis.io/commands/publish
publish,
/// https://redis.io/commands/pubsub
pubsub,
/// https://redis.io/commands/punsubscribe
punsubscribe,
/// https://redis.io/commands/randomkey
randomkey,
/// https://redis.io/commands/readonly
readonly,
/// https://redis.io/commands/readwrite
readwrite,
/// https://redis.io/commands/rename
rename,
/// https://redis.io/commands/renamenx
renamenx,
/// https://redis.io/commands/replconf
replconf,
/// https://redis.io/commands/replicaof
replicaof,
/// https://redis.io/commands/restore
restore,
/// https://redis.io/commands/role
role,
/// https://redis.io/commands/rpop
rpop,
/// https://redis.io/commands/rpoplpush
rpoplpush,
/// https://redis.io/commands/rpush
rpush,
/// https://redis.io/commands/rpushx
rpushx,
/// https://redis.io/commands/sadd
sadd,
/// https://redis.io/commands/save
save,
/// https://redis.io/commands/scan
scan,
/// https://redis.io/commands/scard
scard,
/// https://redis.io/commands/script
script,
/// https://redis.io/commands/sdiff
sdiff,
/// https://redis.io/commands/sdiffstore
sdiffstore,
/// https://redis.io/commands/select
select,
/// https://redis.io/commands/set
set,
/// https://redis.io/commands/setbit
setbit,
/// https://redis.io/commands/setex
setex,
/// https://redis.io/commands/setnx
setnx,
/// https://redis.io/commands/setrange
setrange,
/// https://redis.io/commands/shutdown
shutdown,
/// https://redis.io/commands/sinter
sinter,
/// https://redis.io/commands/sinterstore
sinterstore,
/// https://redis.io/commands/sismember
sismember,
/// https://redis.io/commands/slaveof
slaveof,
/// https://redis.io/commands/slowlog
slowlog,
/// https://redis.io/commands/smembers
smembers,
/// https://redis.io/commands/smove
smove,
/// https://redis.io/commands/sort
sort,
/// https://redis.io/commands/spop
spop,
/// https://redis.io/commands/srandmember
srandmember,
/// https://redis.io/commands/srem
srem,
/// https://redis.io/commands/sscan
sscan,
/// https://redis.io/commands/stralgo
stralgo,
/// https://redis.io/commands/strlen
strlen,
/// https://redis.io/commands/subscribe
subscribe,
/// https://redis.io/commands/substr
substr,
/// https://redis.io/commands/sunion
sunion,
/// https://redis.io/commands/sunionstore
sunionstore,
/// https://redis.io/commands/swapdb
swapdb,
/// https://redis.io/commands/sync
sync,
/// https://redis.io/commands/time
time,
/// https://redis.io/commands/touch
touch,
/// https://redis.io/commands/ttl
ttl,
/// https://redis.io/commands/type
type,
/// https://redis.io/commands/unlink
unlink,
/// https://redis.io/commands/quit
quit,
/// https://redis.io/commands/unsubscribe
unsubscribe,
/// https://redis.io/commands/unwatch
unwatch,
/// https://redis.io/commands/wait
wait,
/// https://redis.io/commands/watch
watch,
/// https://redis.io/commands/xack
xack,
/// https://redis.io/commands/xadd
xadd,
/// https://redis.io/commands/xclaim
xclaim,
/// https://redis.io/commands/xdel
xdel,
/// https://redis.io/commands/xgroup
xgroup,
/// https://redis.io/commands/xinfo
xinfo,
/// https://redis.io/commands/xlen
xlen,
/// https://redis.io/commands/xpending
xpending,
/// https://redis.io/commands/xrange
xrange,
/// https://redis.io/commands/xread
xread,
/// https://redis.io/commands/xreadgroup
xreadgroup,
/// https://redis.io/commands/xrevrange
xrevrange,
/// https://redis.io/commands/xsetid
xsetid,
/// https://redis.io/commands/xtrim
xtrim,
/// https://redis.io/commands/zadd
zadd,
/// https://redis.io/commands/zcard
zcard,
/// https://redis.io/commands/zcount
zcount,
/// https://redis.io/commands/zincrby
zincrby,
/// https://redis.io/commands/zinterstore
zinterstore,
/// https://redis.io/commands/zlexcount
zlexcount,
/// https://redis.io/commands/zpopmax
zpopmax,
/// https://redis.io/commands/zpopmin
zpopmin,
/// https://redis.io/commands/zrange
zrange,
/// https://redis.io/commands/zrangebylex
zrangebylex,
/// https://redis.io/commands/zrangebyscore
zrangebyscore,
/// https://redis.io/commands/zrank
zrank,
/// https://redis.io/commands/zrem
zrem,
/// https://redis.io/commands/zremrangebylex
zremrangebylex,
/// https://redis.io/commands/zremrangebyrank
zremrangebyrank,
/// https://redis.io/commands/zremrangebyscore
zremrangebyscore,
/// https://redis.io/commands/zrevrange
zrevrange,
/// https://redis.io/commands/zrevrangebylex
zrevrangebylex,
/// https://redis.io/commands/zrevrangebyscore
zrevrangebyscore,
/// https://redis.io/commands/zrevrank
zrevrank,
/// https://redis.io/commands/zscan
zscan,
/// https://redis.io/commands/zscore
zscore,
/// https://redis.io/commands/zunionstore
zunionstore,
/// Invalid command.
invalid
};
/** \brief Converts the command to a string.
* \ingroup any
* \param c The command to convert.
*/
char const* to_string(command c);
/** \brief Writes the command string to the stream.
* \ingroup any
* \param os Output stream.
* \param c Redis command
*/
std::ostream& operator<<(std::ostream& os, command c);
// Checks whether a command has push response.
bool has_push_response(command cmd);
} // redis
} // aedis
#endif // AEDIS_REDIS_COMMAND_HPP

View File

@@ -1,245 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <aedis/redis/command.hpp>
namespace aedis {
namespace redis {
char const* to_string(command c)
{
static char const* table[] = {
"ACL",
"APPEND",
"ASKING",
"AUTH",
"BGREWRITEAOF",
"BGSAVE",
"BITCOUNT",
"BITFIELD",
"BITFIELD_RO",
"BITOP",
"BITPOS",
"BLPOP",
"BRPOP",
"BRPOPLPUSH",
"BZPOPMAX",
"BZPOPMIN",
"CLIENT",
"CLUSTER",
"COMMAND",
"CONFIG",
"DBSIZE",
"DEBUG",
"DECR",
"DECRBY",
"DEL",
"DISCARD",
"DUMP",
"ECHO",
"EVAL",
"EVALSHA",
"EXEC",
"EXISTS",
"EXPIRE",
"EXPIREAT",
"FLUSHALL",
"FLUSHDB",
"GEOADD",
"GEODIST",
"GEOHASH",
"GEOPOS",
"GEORADIUS",
"GEORADIUS_RO",
"GEORADIUSBYMEMBER",
"GEORADIUSBYMEMBER_RO",
"GET",
"GETBIT",
"GETRANGE",
"GETSET",
"HDEL",
"HELLO",
"HEXISTS",
"HGET",
"HGETALL",
"HINCRBY",
"HINCRBYFLOAT",
"HKEYS",
"HLEN",
"HMGET",
"HMSET",
"HSCAN",
"HSET",
"HSETNX",
"HSTRLEN",
"HVALS",
"INCR",
"INCRBY",
"INCRBYFLOAT",
"INFO",
"KEYS",
"LASTSAVE",
"LATENCY",
"LINDEX",
"LINSERT",
"LLEN",
"LOLWUT",
"LPOP",
"LPOS",
"LPUSH",
"LPUSHX",
"LRANGE",
"LREM",
"LSET",
"LTRIM",
"MEMORY",
"MGET",
"MIGRATE",
"MODULE",
"MONITOR",
"MOVE",
"MSET",
"MSETNX",
"MULTI",
"OBJECT",
"PERSIST",
"PEXPIRE",
"PEXPIREAT",
"PFADD",
"PFCOUNT",
"PFDEBUG",
"PFMERGE",
"PFSELFTEST",
"PING",
"POST",
"PSETEX",
"PSUBSCRIBE",
"PSYNC",
"PTTL",
"PUBLISH",
"PUBSUB",
"PUNSUBSCRIBE",
"RANDOMKEY",
"READONLY",
"READWRITE",
"RENAME",
"RENAMENX",
"REPLCONF",
"REPLICAOF",
"RESTORE",
"ROLE",
"RPOP",
"RPOPLPUSH",
"RPUSH",
"RPUSHX",
"SADD",
"SAVE",
"SCAN",
"SCARD",
"SCRIPT",
"SDIFF",
"SDIFFSTORE",
"SELECT",
"SET",
"SETBIT",
"SETEX",
"SETNX",
"SETRANGE",
"SHUTDOWN",
"SINTER",
"SINTERSTORE",
"SISMEMBER",
"SLAVEOF",
"SLOWLOG",
"SMEMBERS",
"SMOVE",
"SORT",
"SPOP",
"SRANDMEMBER",
"SREM",
"SSCAN",
"STRALGO",
"STRLEN",
"SUBSCRIBE",
"SUBSTR",
"SUNION",
"SUNIONSTORE",
"SWAPDB",
"SYNC",
"TIME",
"TOUCH",
"TTL",
"TYPE",
"UNLINK",
"QUIT",
"UNSUBSCRIBE",
"UNWATCH",
"WAIT",
"WATCH",
"XACK",
"XADD",
"XCLAIM",
"XDEL",
"XGROUP",
"XINFO",
"XLEN",
"XPENDING",
"XRANGE",
"XREAD",
"XREADGROUP",
"XREVRANGE",
"XSETID",
"XTRIM",
"ZADD",
"ZCARD",
"ZCOUNT",
"ZINCRBY",
"ZINTERSTORE",
"ZLEXCOUNT",
"ZPOPMAX",
"ZPOPMIN",
"ZRANGE",
"ZRANGEBYLEX",
"ZRANGEBYSCORE",
"ZRANK",
"ZREM",
"ZREMRANGEBYLEX",
"ZREMRANGEBYRANK",
"ZREMRANGEBYSCORE",
"ZREVRANGE",
"ZREVRANGEBYLEX",
"ZREVRANGEBYSCORE",
"ZREVRANK",
"ZSCAN",
"ZSCORE",
"ZUNIONSTORE",
"INVALID",
};
return table[static_cast<int>(c)];
}
std::ostream& operator<<(std::ostream& os, command c)
{
os << to_string(c);
return os;
}
bool has_push_response(command cmd)
{
switch (cmd) {
case command::subscribe:
case command::unsubscribe:
case command::psubscribe:
return true;
default:
return false;
}
}
} // redis
} // aedis

View File

@@ -1,159 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_RESP3_COMPOSE_HPP
#define AEDIS_RESP3_COMPOSE_HPP
#include <string>
#include <tuple>
#include <boost/hana.hpp>
#include <boost/utility/string_view.hpp>
#include <aedis/resp3/type.hpp>
namespace aedis {
namespace resp3 {
constexpr char separator[] = "\r\n";
/** @brief Adds a bulk to the request.
* @ingroup any
*
* This function is useful in serialization of your own data
* structures in a request. For example
*
* @code
* void to_bulk(std::string& to, mystruct const& obj)
* {
* auto const str = // Convert obj to a string.
* resp3::to_bulk(to, str);
* }
* @endcode
*
* See more in \ref requests-serialization.
*/
template <class Request>
void to_bulk(Request& to, boost::string_view data)
{
auto const str = std::to_string(data.size());
to += to_code(type::blob_string);
to.append(std::cbegin(str), std::cend(str));
to += separator;
to.append(std::cbegin(data), std::cend(data));
to += separator;
}
template <class Request, class T, typename = typename std::enable_if<std::is_integral<T>::value>::type>
void to_bulk(Request& to, T n)
{
auto const s = std::to_string(n);
to_bulk(to, boost::string_view{s});
}
namespace detail {
template <class T>
struct add_bulk_impl {
template <class Request>
static void add(Request& to, T const& from)
{
using namespace aedis::resp3;
to_bulk(to, from);
}
};
template <class U, class V>
struct add_bulk_impl<std::pair<U, V>> {
template <class Request>
static void add(Request& to, std::pair<U, V> const& from)
{
using namespace aedis::resp3;
to_bulk(to, from.first);
to_bulk(to, from.second);
}
};
template <class ...Ts>
struct add_bulk_impl<boost::hana::tuple<Ts...>> {
template <class Request>
static void add(Request& to, boost::hana::tuple<Ts...> const& from)
{
using boost::hana::for_each;
// Fold expressions is C++17 so we use hana.
//(resp3::add_bulk(*request_, args), ...);
for_each(from, [&](auto const& e) {
using namespace aedis::resp3;
to_bulk(to, e);
});
}
};
} // detail
/** @brief Adds a resp3 header to the request.
* @ingroup any
*
* See mystruct.hpp for an example.
*/
template <class Request>
void add_header(Request& to, type t, std::size_t size)
{
auto const str = std::to_string(size);
to += to_code(t);
to.append(std::cbegin(str), std::cend(str));
to += separator;
}
/* Adds a rep3 bulk to the request.
*
* This function adds \c data as a bulk string to the request \c to.
*/
template <class Request, class T>
void add_bulk(Request& to, T const& data)
{
detail::add_bulk_impl<T>::add(to, data);
}
template <class>
struct bulk_counter;
template <class>
struct bulk_counter {
static constexpr auto size = 1U;
};
template <class T, class U>
struct bulk_counter<std::pair<T, U>> {
static constexpr auto size = 2U;
};
template <class Request>
void add_blob(Request& to, boost::string_view blob)
{
to.append(std::cbegin(blob), std::cend(blob));
to += separator;
}
/** @brief Adds a separator to the request.
* @ingroup any
*
* See mystruct.hpp for an example.
*/
template <class Request>
void add_separator(Request& to)
{
to += separator;
}
} // resp3
} // aedis
#endif // AEDIS_RESP3_COMPOSE_HPP

View File

@@ -1,54 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_RESP3_ERROR_HPP
#define AEDIS_RESP3_ERROR_HPP
#include <boost/system/error_code.hpp>
namespace aedis {
namespace resp3 {
/** \brief RESP3 errors.
* \ingroup any
*/
enum class error
{
/// Invalid RESP3 type.
invalid_type = 1,
/// Can't parse the string as a number.
not_a_number,
/// Received less bytes than expected.
unexpected_read_size,
/// The maximum depth of a nested response was exceeded.
exceeeds_max_nested_depth,
/// Unexpects bool value.
unexpected_bool_value,
/// Expected field value is empty.
empty_field
};
/** \brief Creates a error_code object from an error.
* \ingroup any
*/
boost::system::error_code make_error_code(error e);
} // resp3
} // aedis
namespace std {
template<>
struct is_error_code_enum<::aedis::resp3::error> : std::true_type {};
} // std
#endif // AEDIS_RESP3_ERROR_HPP

View File

@@ -1,49 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <boost/assert.hpp>
#include <aedis/resp3/error.hpp>
namespace aedis {
namespace resp3 {
namespace detail {
struct error_category_impl : boost::system::error_category {
char const* name() const noexcept override
{
return "aedis.resp3";
}
std::string message(int ev) const override
{
switch(static_cast<error>(ev)) {
case error::invalid_type: return "Invalid resp3 type.";
case error::not_a_number: return "Can't convert string to number.";
case error::unexpected_read_size: return "Unexpected read size.";
case error::exceeeds_max_nested_depth: return "Exceeds the maximum number of nested responses.";
case error::unexpected_bool_value: return "Unexpected bool value.";
case error::empty_field: return "Expected field value is empty.";
default: BOOST_ASSERT(false);
}
}
};
boost::system::error_category const& category()
{
static error_category_impl instance;
return instance;
}
} // detail
boost::system::error_code make_error_code(error e)
{
return boost::system::error_code{static_cast<int>(e), detail::category()};
}
} // resp3
} // aedis

View File

@@ -1,78 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_SENTINEL_COMMAND_HPP
#define AEDIS_SENTINEL_COMMAND_HPP
#include <ostream>
namespace aedis {
namespace sentinel {
/** \brief Sentinel commands.
* \ingroup any
*
* The full list of Sentinel commands can be found at
* https://redis.io/topics/sentinel.
*
* \remark This list was created with the help of the \c command
* command.
*/
enum class command {
/// https://redis.io/commands/acl
acl,
/// https://redis.io/commands/auth
auth,
/// https://redis.io/commands/client
client,
/// https://redis.io/commands/command
command,
/// https://redis.io/commands/hello
hello,
/// https://redis.io/commands/info
info,
/// https://redis.io/commands/ping
ping,
/// https://redis.io/commands/psubscribe
psubscribe,
/// https://redis.io/commands/publish
publish,
/// https://redis.io/commands/punsubscribe
punsubscribe,
/// https://redis.io/commands/role
role,
/// https://redis.io/topics/sentinel
sentinel,
/// https://redis.io/commands/shutdown
shutdown,
/// https://redis.io/commands/subscribe
subscribe,
/// https://redis.io/commands/unsubscribe
unsubscribe,
/// Unknown/invalid command.
invalid,
};
/** \brief Converts the command to a string.
* \ingroup any
* \param c The command to convert.
*/
char const* to_string(command c);
/** \brief Writes the command string to the stream.
* \ingroup any
* \param os Output stream.
* \param c Sentinel command
*/
std::ostream& operator<<(std::ostream& os, command c);
// Checks whether a command has push response.
bool has_push_response(command cmd);
} // sentinel
} // aedis
#endif // AEDIS_SENTINEL_COMMAND_HPP

View File

@@ -1,55 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <aedis/sentinel/command.hpp>
namespace aedis {
namespace sentinel {
char const* to_string(command c)
{
static char const* table[] = {
"ACL",
"AUTH",
"CLIENT",
"COMMAND",
"HELLO",
"INFO",
"PING",
"PSUBSCRIBE",
"PUBLISH",
"PUNSUBSCRIBE",
"ROLE",
"SENTINEL",
"SHUTDOWN",
"SUBSCRIBE",
"UNSUBSCRIBE",
};
return table[static_cast<int>(c)];
}
std::ostream& operator<<(std::ostream& os, command c)
{
os << to_string(c);
return os;
}
bool has_push_response(command cmd)
{
switch (cmd) {
case command::subscribe:
case command::unsubscribe:
case command::psubscribe:
return true;
default:
return false;
}
}
} // sentinel
} // aedis

87
benchmarks/benchmarks.md Normal file
View File

@@ -0,0 +1,87 @@
# TCP echo server performance
This document benchmarks the performance of TCP echo servers I
implemented in different languages using different Redis clients. The
main motivations for choosing an echo server are
* Simple to implement and does not require expertise level in most languages.
* I/O bound: Echo servers have very low CPU consumption in general
and therefore are excelent to measure how a program handles concurrent requests.
* It simulates very well a typical backend in regard to concurrency.
I also imposed some constraints on the implementations
* It should be simple enough and not require writing too much code.
* Favor the use standard idioms and avoid optimizations that require expert level.
* Avoid the use of complex things like connection and thread pool.
## No Redis
First I tested a pure TCP echo server, i.e. one that sends the messages
directly to the client without interacting with Redis. The result can
be seen below
![](https://mzimbres.github.io/aedis/tcp-echo-direct.png)
The tests were performed with a 1000 concurrent TCP connections on the
localhost where latency is 0.07ms on average on my machine. On higher
latency networks the difference among libraries is expected to
decrease.
### Remarks
* I expected Libuv to have similar performance to Asio and Tokio.
* I did expect nodejs to come a little behind given it is is
javascript code. Otherwise I did expect it to have similar
performance to libuv since it is the framework behind it.
* Go performance did not surprise me: decent and not some much far behind nodejs.
The code used in the benchmarks can be found at
* [Asio](https://github.com/mzimbres/aedis/blob/3fb018ccc6138d310ac8b73540391cdd8f2fdad6/benchmarks/cpp/asio/echo_server_direct.cpp): A variation of [this](https://github.com/chriskohlhoff/asio/blob/4915cfd8a1653c157a1480162ae5601318553eb8/asio/src/examples/cpp20/coroutines/echo_server.cpp) Asio example.
* [Libuv](https://github.com/mzimbres/aedis/tree/835a1decf477b09317f391eddd0727213cdbe12b/benchmarks/c/libuv): Taken from [here](https://github.com/libuv/libuv/blob/06948c6ee502862524f233af4e2c3e4ca876f5f6/docs/code/tcp-echo-server/main.c) Libuv example .
* [Tokio](https://github.com/mzimbres/aedis/tree/3fb018ccc6138d310ac8b73540391cdd8f2fdad6/benchmarks/rust/echo_server_direct): Taken from [here](https://docs.rs/tokio/latest/tokio/).
* [Nodejs](https://github.com/mzimbres/aedis/tree/3fb018ccc6138d310ac8b73540391cdd8f2fdad6/benchmarks/nodejs/echo_server_direct)
* [Go](https://github.com/mzimbres/aedis/blob/3fb018ccc6138d310ac8b73540391cdd8f2fdad6/benchmarks/go/echo_server_direct.go)
## Echo over Redis
This is similar to the echo server described above but messages are
echoed by Redis and not by the echo-server itself, which acts
as a proxy between the client and the Redis server. The results
can be seen below
![](https://mzimbres.github.io/aedis/tcp-echo-over-redis.png)
The tests were performed on a network where latency is 35ms on
average, otherwise it uses the same number of TCP connections
as the previous example.
### Remarks
As the reader can see, the Libuv and the Rust test are not depicted
in the graph, the reasons are
* [redis-rs](https://github.com/redis-rs/redis-rs): This client
comes so far behind that it can't even be represented together
with the other benchmarks without making them look insignificant.
I don't know for sure why it is so slow, I suppose it has
something to do with its lack of proper
[pipelining](https://redis.io/docs/manual/pipelining/) support.
In fact, the more TCP connections I lauch the worst its
performance gets.
* Libuv: I left it out because it would require too much work to
write it and make it have a good performance. More specifically,
I would have to use hiredis and implement support for pipelines
manually.
The code used in the benchmarks can be found at
* [Aedis](https://github.com/mzimbres/aedis): [code](https://github.com/mzimbres/aedis/blob/3fb018ccc6138d310ac8b73540391cdd8f2fdad6/examples/echo_server.cpp)
* [node-redis](https://github.com/redis/node-redis): [code](https://github.com/mzimbres/aedis/tree/3fb018ccc6138d310ac8b73540391cdd8f2fdad6/benchmarks/nodejs/echo_server_over_redis)
* [go-redis](https://github.com/go-redis/redis): [code](https://github.com/mzimbres/aedis/blob/3fb018ccc6138d310ac8b73540391cdd8f2fdad6/benchmarks/go/echo_server_over_redis.go)
## Running the benchmarks
Run one of the echo-server programs in one terminal and the [echo-server-client](https://github.com/mzimbres/aedis/blob/42880e788bec6020dd018194075a211ad9f339e8/benchmarks/cpp/asio/echo_server_client.cpp) in another.

73
benchmarks/benchmarks.tex Normal file
View File

@@ -0,0 +1,73 @@
\documentclass{article}
\usepackage{pgfplots}
\pgfrealjobname{echo}
\pgfplotsset{compat=newest}
\begin{document}
\beginpgfgraphicnamed{echo-f0}
% time ./echo_server_client 1000 5000
\begin{tikzpicture}[scale=1.0]
\begin{axis}[
y dir=reverse,
%xbar stacked,
xbar, xmin=0,
%hide x axis,
bar shift=0pt,
width=15cm, height=6cm, enlarge y limits=0.5,
title={TCP Echo Server Performance},
xlabel={Seconds},
symbolic y coords={Asio,Tokio,Libuv,Nodejs,Go},
ytick=data,
%bar width=1cm,
nodes near coords,
nodes near coords align={horizontal},
]
\addplot coordinates {
(31.1,Asio)
(30.7,Tokio)
(43.6,Libuv)
(74.2,Nodejs)
(81.0,Go)
};
\end{axis}
\end{tikzpicture}
\endpgfgraphicnamed
\beginpgfgraphicnamed{echo-f1}
%debian2[0]$ time ./echo_server_client 1000 1000
%Go (1): 1.000s
%C++ (1): 0.07s
\begin{tikzpicture}[scale=1.0]
\begin{axis}[
y dir=reverse,
%xbar stacked,
xbar, xmin=0,
%hide x axis,
bar shift=0pt,
width=12cm, height=6cm, enlarge y limits=0.5,
title={TCP Echo Server Performance (over Redis)},
xlabel={Seconds},
symbolic y coords={Aedis,Rust-rs,Libuv,Node-redis,Go-redis},
ytick=data,
%bar width=1cm,
nodes near coords,
nodes near coords align={horizontal},
]
\addplot coordinates {
(12.6,Aedis)
(28.8,Node-redis)
(352.4,Go-redis)
};
%\addplot coordinates {
% (30.0,Asio)
% (90.6,Rust-rs)
% (0.0,Libuv)
% (68.9,Nodejs)
% (0.0,Go)
%};
\end{axis}
\end{tikzpicture}
\endpgfgraphicnamed
\end{document}

View File

@@ -0,0 +1,7 @@
This example was taken from
https://github.com/libuv/libuv/tree/v1.x/docs/code/tcp-echo-server
To build it run, for example
$ gcc echo_server_direct.c -luv -O2 -o echo_server_direct

View File

@@ -0,0 +1,87 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <uv.h>
#define DEFAULT_PORT 55555
#define DEFAULT_BACKLOG 1024
uv_loop_t *loop;
struct sockaddr_in addr;
typedef struct {
uv_write_t req;
uv_buf_t buf;
} write_req_t;
void free_write_req(uv_write_t *req) {
write_req_t *wr = (write_req_t*) req;
free(wr->buf.base);
free(wr);
}
void alloc_buffer(uv_handle_t *handle, size_t suggested_size, uv_buf_t *buf) {
buf->base = (char*) malloc(suggested_size);
buf->len = suggested_size;
}
void on_close(uv_handle_t* handle) {
free(handle);
}
void echo_write(uv_write_t *req, int status) {
if (status) {
fprintf(stderr, "Write error %s\n", uv_strerror(status));
}
free_write_req(req);
}
void echo_read(uv_stream_t *client, ssize_t nread, const uv_buf_t *buf) {
if (nread > 0) {
write_req_t *req = (write_req_t*) malloc(sizeof(write_req_t));
req->buf = uv_buf_init(buf->base, nread);
uv_write((uv_write_t*) req, client, &req->buf, 1, echo_write);
return;
}
if (nread < 0) {
if (nread != UV_EOF)
fprintf(stderr, "Read error %s\n", uv_err_name(nread));
uv_close((uv_handle_t*) client, on_close);
}
free(buf->base);
}
void on_new_connection(uv_stream_t *server, int status) {
if (status < 0) {
fprintf(stderr, "New connection error %s\n", uv_strerror(status));
// error!
return;
}
uv_tcp_t *client = (uv_tcp_t*) malloc(sizeof(uv_tcp_t));
uv_tcp_init(loop, client);
if (uv_accept(server, (uv_stream_t*) client) == 0) {
uv_read_start((uv_stream_t*) client, alloc_buffer, echo_read);
}
else {
uv_close((uv_handle_t*) client, on_close);
}
}
int main() {
loop = uv_default_loop();
uv_tcp_t server;
uv_tcp_init(loop, &server);
uv_ip4_addr("0.0.0.0", DEFAULT_PORT, &addr);
uv_tcp_bind(&server, (const struct sockaddr*)&addr, 0);
int r = uv_listen((uv_stream_t*) &server, DEFAULT_BACKLOG, on_new_connection);
if (r) {
fprintf(stderr, "Listen error %s\n", uv_strerror(r));
return 1;
}
return uv_run(loop, UV_RUN_DEFAULT);
}

View File

@@ -0,0 +1,64 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <iostream>
#include <boost/asio.hpp>
namespace net = boost::asio;
using net::ip::tcp;
using tcp_socket = net::use_awaitable_t<>::as_default_on_t<net::ip::tcp::socket>;
using timer_type = net::use_awaitable_t<>::as_default_on_t<net::steady_timer>;
net::awaitable<void>
example(boost::asio::ip::tcp::endpoint ep, std::string msg, int n)
{
try {
auto ex = co_await net::this_coro::executor;
tcp_socket socket{ex};
co_await socket.async_connect(ep);
std::string buffer;
auto dbuffer = net::dynamic_buffer(buffer);
for (int i = 0; i < n; ++i) {
co_await net::async_write(socket, net::buffer(msg));
auto n = co_await net::async_read_until(socket, dbuffer, "\n");
//std::printf("> %s", buffer.data());
dbuffer.consume(n);
}
//std::printf("Ok: %s", msg.data());
} catch (std::exception const& e) {
std::cerr << "Error: " << e.what() << std::endl;
}
}
int main(int argc, char* argv[])
{
try {
int sessions = 1;
int msgs = 1;
if (argc == 3) {
sessions = std::stoi(argv[1]);
msgs = std::stoi(argv[2]);
}
net::io_context ioc;
tcp::resolver resv{ioc};
auto const res = resv.resolve("127.0.0.1", "55555");
auto ep = *std::begin(res);
for (int i = 0; i < sessions; ++i)
net::co_spawn(ioc, example(ep, "Some message\n", msgs), net::detached);
ioc.run();
} catch (std::exception const& e) {
std::cerr << e.what() << std::endl;
}
}

View File

@@ -0,0 +1,58 @@
//
// echo_server.cpp
// ~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2022 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#include <cstdio>
#include <boost/asio.hpp>
namespace net = boost::asio;
namespace this_coro = net::this_coro;
using net::ip::tcp;
using net::detached;
using executor_type = net::io_context::executor_type;
using socket_type = net::basic_stream_socket<net::ip::tcp, executor_type>;
using tcp_socket = net::use_awaitable_t<executor_type>::as_default_on_t<socket_type>;
using acceptor_type = net::basic_socket_acceptor<net::ip::tcp, executor_type>;
using tcp_acceptor = net::use_awaitable_t<executor_type>::as_default_on_t<acceptor_type>;
using awaitable_type = net::awaitable<void, executor_type>;
constexpr net::use_awaitable_t<executor_type> use_awaitable;
awaitable_type echo(tcp_socket socket)
{
try {
char data[1024];
for (;;) {
std::size_t n = co_await socket.async_read_some(net::buffer(data), use_awaitable);
co_await async_write(socket, net::buffer(data, n), use_awaitable);
}
} catch (std::exception const& e) {
//std::printf("echo Exception: %s\n", e.what());
}
}
awaitable_type listener()
{
auto ex = co_await this_coro::executor;
tcp_acceptor acceptor(ex, {tcp::v4(), 55555});
for (;;) {
tcp_socket socket = co_await acceptor.async_accept(use_awaitable);
co_spawn(ex, echo(std::move(socket)), detached);
}
}
int main()
{
try {
net::io_context io_context{BOOST_ASIO_CONCURRENCY_HINT_UNSAFE_IO};
co_spawn(io_context, listener(), detached);
io_context.run();
} catch (std::exception const& e) {
std::printf("Exception: %s\n", e.what());
}
}

View File

@@ -0,0 +1,41 @@
package main
import (
"bufio"
"fmt"
"io"
"net"
"os"
)
func echo(conn net.Conn) {
r := bufio.NewReader(conn)
for {
line, err := r.ReadBytes(byte('\n'))
switch err {
case nil:
break
case io.EOF:
default:
fmt.Println("ERROR", err)
}
conn.Write(line)
}
}
func main() {
l, err := net.Listen("tcp", "0.0.0.0:55555")
if err != nil {
fmt.Println("ERROR", err)
os.Exit(1)
}
for {
conn, err := l.Accept()
if err != nil {
fmt.Println("ERROR", err)
continue
}
go echo(conn)
}
}

View File

@@ -0,0 +1,54 @@
package main
import (
"context"
"github.com/go-redis/redis/v8"
"bufio"
"fmt"
"io"
"net"
"os"
)
var ctx = context.Background()
var rdb = redis.NewClient(&redis.Options{Addr: "db.occase.de:6379", Password: "", DB: 0,})
func echo(conn net.Conn) {
r := bufio.NewReader(conn)
for {
line, err := r.ReadBytes(byte('\n'))
switch err {
case nil:
break
case io.EOF:
default:
fmt.Println("ERROR", err)
}
err2 := rdb.Ping(ctx).Err()
if err2 != nil {
fmt.Println("ERROR", err2)
panic(err2)
}
conn.Write(line)
}
}
func main() {
l, err := net.Listen("tcp", "0.0.0.0:55555")
if err != nil {
fmt.Println("ERROR", err)
os.Exit(1)
}
for {
conn, err := l.Accept()
if err != nil {
fmt.Println("ERROR", err)
continue
}
go echo(conn)
}
}

View File

@@ -0,0 +1,105 @@
import java.nio.*;
import java.nio.channels.*;
import java.net.*;
import java.util.*;
import java.io.IOException;
public class TcpEchoServer {
public static int DEFAULT_PORT = 55555;
public static void main(String[] args) {
int port;
try {
port = Integer.parseInt(args[0]);
}
catch (Exception ex) {
port = DEFAULT_PORT;
}
//System.out.println("Listening for connections on port " + port);
ServerSocketChannel serverChannel;
Selector selector;
try {
serverChannel = ServerSocketChannel.open( );
ServerSocket ss = serverChannel.socket( );
InetSocketAddress address = new InetSocketAddress(port);
ss.bind(address);
serverChannel.configureBlocking(false);
selector = Selector.open( );
serverChannel.register(selector, SelectionKey.OP_ACCEPT);
}
catch (IOException ex) {
ex.printStackTrace( );
return;
}
while (true) {
try {
selector.select( );
}
catch (IOException ex) {
ex.printStackTrace( );
break;
}
Set readyKeys = selector.selectedKeys( );
Iterator iterator = readyKeys.iterator( );
while (iterator.hasNext( )) {
SelectionKey key = (SelectionKey) iterator.next( );
iterator.remove( );
try {
if (key.isAcceptable( )) {
ServerSocketChannel server = (ServerSocketChannel ) key.channel( );
SocketChannel client = server.accept( );
//System.out.println("Accepted connection from " + client);
client.configureBlocking(false);
SelectionKey clientKey = client.register(
selector, SelectionKey.OP_WRITE | SelectionKey.OP_READ);
ByteBuffer buffer = ByteBuffer.allocate(100);
clientKey.attach(buffer);
//System.out.println(buffer.toString());
}
if (key.isReadable( )) {
SocketChannel client = (SocketChannel) key.channel( );
ByteBuffer output = (ByteBuffer) key.attachment( );
client.read(output);
}
if (key.isWritable( )) {
SocketChannel client = (SocketChannel) key.channel( );
ByteBuffer output = (ByteBuffer) key.attachment( );
output.flip( );
client.write(output);
output.compact( );
}
}
catch (IOException ex) {
key.cancel( );
try {
key.channel().close();
}
catch (IOException cex) {}
}
}
}
}
}

View File

@@ -0,0 +1,7 @@
var net = require('net');
net.createServer(function(socket){
socket.on('data', function(data){
socket.write(data.toString())
});
}).listen(55555);

View File

@@ -0,0 +1,2 @@
{
}

View File

@@ -0,0 +1,13 @@
import { createClient } from 'redis';
import * as net from 'net';
const client = createClient({url: 'redis://db.occase.de:6379' });
client.on('error', (err) => console.log('Redis Client Error', err));
await client.connect();
net.createServer(function(socket){
socket.on('data', async function(data) {
const value = await client.ping(data.toString());
socket.write(data)
});
}).listen(55555);

View File

@@ -0,0 +1,169 @@
{
"name": "echo_server_over_redis",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"dependencies": {
"redis": "^4.2.0"
}
},
"node_modules/@redis/bloom": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/@redis/bloom/-/bloom-1.0.2.tgz",
"integrity": "sha512-EBw7Ag1hPgFzdznK2PBblc1kdlj5B5Cw3XwI9/oG7tSn85/HKy3X9xHy/8tm/eNXJYHLXHJL/pkwBpFMVVefkw==",
"peerDependencies": {
"@redis/client": "^1.0.0"
}
},
"node_modules/@redis/client": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/@redis/client/-/client-1.2.0.tgz",
"integrity": "sha512-a8Nlw5fv2EIAFJxTDSSDVUT7yfBGpZO96ybZXzQpgkyLg/dxtQ1uiwTc0EGfzg1mrPjZokeBSEGTbGXekqTNOg==",
"dependencies": {
"cluster-key-slot": "1.1.0",
"generic-pool": "3.8.2",
"yallist": "4.0.0"
},
"engines": {
"node": ">=14"
}
},
"node_modules/@redis/graph": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/@redis/graph/-/graph-1.0.1.tgz",
"integrity": "sha512-oDE4myMCJOCVKYMygEMWuriBgqlS5FqdWerikMoJxzmmTUErnTRRgmIDa2VcgytACZMFqpAOWDzops4DOlnkfQ==",
"peerDependencies": {
"@redis/client": "^1.0.0"
}
},
"node_modules/@redis/json": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/@redis/json/-/json-1.0.3.tgz",
"integrity": "sha512-4X0Qv0BzD9Zlb0edkUoau5c1bInWSICqXAGrpwEltkncUwcxJIGEcVryZhLgb0p/3PkKaLIWkjhHRtLe9yiA7Q==",
"peerDependencies": {
"@redis/client": "^1.0.0"
}
},
"node_modules/@redis/search": {
"version": "1.0.6",
"resolved": "https://registry.npmjs.org/@redis/search/-/search-1.0.6.tgz",
"integrity": "sha512-pP+ZQRis5P21SD6fjyCeLcQdps+LuTzp2wdUbzxEmNhleighDDTD5ck8+cYof+WLec4csZX7ks+BuoMw0RaZrA==",
"peerDependencies": {
"@redis/client": "^1.0.0"
}
},
"node_modules/@redis/time-series": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/@redis/time-series/-/time-series-1.0.3.tgz",
"integrity": "sha512-OFp0q4SGrTH0Mruf6oFsHGea58u8vS/iI5+NpYdicaM+7BgqBZH8FFvNZ8rYYLrUO/QRqMq72NpXmxLVNcdmjA==",
"peerDependencies": {
"@redis/client": "^1.0.0"
}
},
"node_modules/cluster-key-slot": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/cluster-key-slot/-/cluster-key-slot-1.1.0.tgz",
"integrity": "sha512-2Nii8p3RwAPiFwsnZvukotvow2rIHM+yQ6ZcBXGHdniadkYGZYiGmkHJIbZPIV9nfv7m/U1IPMVVcAhoWFeklw==",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/generic-pool": {
"version": "3.8.2",
"resolved": "https://registry.npmjs.org/generic-pool/-/generic-pool-3.8.2.tgz",
"integrity": "sha512-nGToKy6p3PAbYQ7p1UlWl6vSPwfwU6TMSWK7TTu+WUY4ZjyZQGniGGt2oNVvyNSpyZYSB43zMXVLcBm08MTMkg==",
"engines": {
"node": ">= 4"
}
},
"node_modules/redis": {
"version": "4.2.0",
"resolved": "https://registry.npmjs.org/redis/-/redis-4.2.0.tgz",
"integrity": "sha512-bCR0gKVhIXFg8zCQjXEANzgI01DDixtPZgIUZHBCmwqixnu+MK3Tb2yqGjh+HCLASQVVgApiwhNkv+FoedZOGQ==",
"dependencies": {
"@redis/bloom": "1.0.2",
"@redis/client": "1.2.0",
"@redis/graph": "1.0.1",
"@redis/json": "1.0.3",
"@redis/search": "1.0.6",
"@redis/time-series": "1.0.3"
}
},
"node_modules/yallist": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz",
"integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A=="
}
},
"dependencies": {
"@redis/bloom": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/@redis/bloom/-/bloom-1.0.2.tgz",
"integrity": "sha512-EBw7Ag1hPgFzdznK2PBblc1kdlj5B5Cw3XwI9/oG7tSn85/HKy3X9xHy/8tm/eNXJYHLXHJL/pkwBpFMVVefkw==",
"requires": {}
},
"@redis/client": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/@redis/client/-/client-1.2.0.tgz",
"integrity": "sha512-a8Nlw5fv2EIAFJxTDSSDVUT7yfBGpZO96ybZXzQpgkyLg/dxtQ1uiwTc0EGfzg1mrPjZokeBSEGTbGXekqTNOg==",
"requires": {
"cluster-key-slot": "1.1.0",
"generic-pool": "3.8.2",
"yallist": "4.0.0"
}
},
"@redis/graph": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/@redis/graph/-/graph-1.0.1.tgz",
"integrity": "sha512-oDE4myMCJOCVKYMygEMWuriBgqlS5FqdWerikMoJxzmmTUErnTRRgmIDa2VcgytACZMFqpAOWDzops4DOlnkfQ==",
"requires": {}
},
"@redis/json": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/@redis/json/-/json-1.0.3.tgz",
"integrity": "sha512-4X0Qv0BzD9Zlb0edkUoau5c1bInWSICqXAGrpwEltkncUwcxJIGEcVryZhLgb0p/3PkKaLIWkjhHRtLe9yiA7Q==",
"requires": {}
},
"@redis/search": {
"version": "1.0.6",
"resolved": "https://registry.npmjs.org/@redis/search/-/search-1.0.6.tgz",
"integrity": "sha512-pP+ZQRis5P21SD6fjyCeLcQdps+LuTzp2wdUbzxEmNhleighDDTD5ck8+cYof+WLec4csZX7ks+BuoMw0RaZrA==",
"requires": {}
},
"@redis/time-series": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/@redis/time-series/-/time-series-1.0.3.tgz",
"integrity": "sha512-OFp0q4SGrTH0Mruf6oFsHGea58u8vS/iI5+NpYdicaM+7BgqBZH8FFvNZ8rYYLrUO/QRqMq72NpXmxLVNcdmjA==",
"requires": {}
},
"cluster-key-slot": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/cluster-key-slot/-/cluster-key-slot-1.1.0.tgz",
"integrity": "sha512-2Nii8p3RwAPiFwsnZvukotvow2rIHM+yQ6ZcBXGHdniadkYGZYiGmkHJIbZPIV9nfv7m/U1IPMVVcAhoWFeklw=="
},
"generic-pool": {
"version": "3.8.2",
"resolved": "https://registry.npmjs.org/generic-pool/-/generic-pool-3.8.2.tgz",
"integrity": "sha512-nGToKy6p3PAbYQ7p1UlWl6vSPwfwU6TMSWK7TTu+WUY4ZjyZQGniGGt2oNVvyNSpyZYSB43zMXVLcBm08MTMkg=="
},
"redis": {
"version": "4.2.0",
"resolved": "https://registry.npmjs.org/redis/-/redis-4.2.0.tgz",
"integrity": "sha512-bCR0gKVhIXFg8zCQjXEANzgI01DDixtPZgIUZHBCmwqixnu+MK3Tb2yqGjh+HCLASQVVgApiwhNkv+FoedZOGQ==",
"requires": {
"@redis/bloom": "1.0.2",
"@redis/client": "1.2.0",
"@redis/graph": "1.0.1",
"@redis/json": "1.0.3",
"@redis/search": "1.0.6",
"@redis/time-series": "1.0.3"
}
},
"yallist": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz",
"integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A=="
}
}
}

View File

@@ -0,0 +1,6 @@
{
"type": "module",
"dependencies": {
"redis": "^4.2.0"
}
}

View File

@@ -0,0 +1,309 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
[[package]]
name = "autocfg"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
[[package]]
name = "bitflags"
version = "1.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
name = "bytes"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8"
[[package]]
name = "cfg-if"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "echo_server_direct"
version = "0.1.0"
dependencies = [
"tokio",
]
[[package]]
name = "hermit-abi"
version = "0.1.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33"
dependencies = [
"libc",
]
[[package]]
name = "libc"
version = "0.2.126"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "349d5a591cd28b49e1d1037471617a32ddcda5731b99419008085f72d5a53836"
[[package]]
name = "lock_api"
version = "0.4.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "327fa5b6a6940e4699ec49a9beae1ea4845c6bab9314e4f84ac68742139d8c53"
dependencies = [
"autocfg",
"scopeguard",
]
[[package]]
name = "log"
version = "0.4.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e"
dependencies = [
"cfg-if",
]
[[package]]
name = "memchr"
version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d"
[[package]]
name = "mio"
version = "0.8.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "57ee1c23c7c63b0c9250c339ffdc69255f110b298b901b9f6c82547b7b87caaf"
dependencies = [
"libc",
"log",
"wasi",
"windows-sys",
]
[[package]]
name = "num_cpus"
version = "1.13.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1"
dependencies = [
"hermit-abi",
"libc",
]
[[package]]
name = "once_cell"
version = "1.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "18a6dbe30758c9f83eb00cbea4ac95966305f5a7772f3f42ebfc7fc7eddbd8e1"
[[package]]
name = "parking_lot"
version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f"
dependencies = [
"lock_api",
"parking_lot_core",
]
[[package]]
name = "parking_lot_core"
version = "0.9.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09a279cbf25cb0757810394fbc1e359949b59e348145c643a939a525692e6929"
dependencies = [
"cfg-if",
"libc",
"redox_syscall",
"smallvec",
"windows-sys",
]
[[package]]
name = "pin-project-lite"
version = "0.2.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116"
[[package]]
name = "proc-macro2"
version = "1.0.39"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c54b25569025b7fc9651de43004ae593a75ad88543b17178aa5e1b9c4f15f56f"
dependencies = [
"unicode-ident",
]
[[package]]
name = "quote"
version = "1.0.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f53dc8cf16a769a6f677e09e7ff2cd4be1ea0f48754aac39520536962011de0d"
dependencies = [
"proc-macro2",
]
[[package]]
name = "redox_syscall"
version = "0.2.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "62f25bc4c7e55e0b0b7a1d43fb893f4fa1361d0abe38b9ce4f323c2adfe6ef42"
dependencies = [
"bitflags",
]
[[package]]
name = "scopeguard"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
[[package]]
name = "signal-hook-registry"
version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0"
dependencies = [
"libc",
]
[[package]]
name = "smallvec"
version = "1.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2fd0db749597d91ff862fd1d55ea87f7855a744a8425a64695b6fca237d1dad1"
[[package]]
name = "socket2"
version = "0.4.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "66d72b759436ae32898a2af0a14218dbf55efde3feeb170eb623637db85ee1e0"
dependencies = [
"libc",
"winapi",
]
[[package]]
name = "syn"
version = "1.0.98"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c50aef8a904de4c23c788f104b7dddc7d6f79c647c7c8ce4cc8f73eb0ca773dd"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "tokio"
version = "1.20.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "57aec3cfa4c296db7255446efb4928a6be304b431a806216105542a67b6ca82e"
dependencies = [
"autocfg",
"bytes",
"libc",
"memchr",
"mio",
"num_cpus",
"once_cell",
"parking_lot",
"pin-project-lite",
"signal-hook-registry",
"socket2",
"tokio-macros",
"winapi",
]
[[package]]
name = "tokio-macros"
version = "1.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9724f9a975fb987ef7a3cd9be0350edcbe130698af5b8f7a631e23d42d052484"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "unicode-ident"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5bd2fe26506023ed7b5e1e315add59d6f584c621d037f9368fea9cfb988f368c"
[[package]]
name = "wasi"
version = "0.11.0+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
[[package]]
name = "winapi"
version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
dependencies = [
"winapi-i686-pc-windows-gnu",
"winapi-x86_64-pc-windows-gnu",
]
[[package]]
name = "winapi-i686-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
[[package]]
name = "winapi-x86_64-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
[[package]]
name = "windows-sys"
version = "0.36.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2"
dependencies = [
"windows_aarch64_msvc",
"windows_i686_gnu",
"windows_i686_msvc",
"windows_x86_64_gnu",
"windows_x86_64_msvc",
]
[[package]]
name = "windows_aarch64_msvc"
version = "0.36.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47"
[[package]]
name = "windows_i686_gnu"
version = "0.36.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6"
[[package]]
name = "windows_i686_msvc"
version = "0.36.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024"
[[package]]
name = "windows_x86_64_gnu"
version = "0.36.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1"
[[package]]
name = "windows_x86_64_msvc"
version = "0.36.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680"

View File

@@ -0,0 +1,9 @@
[package]
name = "echo_server_direct"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
tokio = { version = "1.0", features = ["full"] }

View File

@@ -0,0 +1,35 @@
use tokio::net::TcpListener;
use tokio::io::AsyncReadExt;
use tokio::io::AsyncWriteExt;
#[tokio::main(flavor = "current_thread")]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let listener = TcpListener::bind("127.0.0.1:55555").await?;
loop {
let (mut socket, _) = listener.accept().await?;
tokio::spawn(async move {
let mut buf = [0; 1024];
// In a loop, read data from the socket and write the data back.
loop {
let n = match socket.read(&mut buf).await {
// socket closed
Ok(n) if n == 0 => return,
Ok(n) => n,
Err(e) => {
eprintln!("failed to read from socket; err = {:?}", e);
return;
}
};
// Write the data back
if let Err(e) = socket.write_all(&buf[0..n]).await {
eprintln!("failed to write to socket; err = {:?}", e);
return;
}
}
});
}
}

View File

@@ -0,0 +1,11 @@
[package]
name = "echo_server_over_redis"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
tokio = { version = "1.16.1", features = ["full"] }
redis = { version = "0.21.5", features = ["tokio-comp"] }
futures = "0.3"

View File

@@ -0,0 +1,44 @@
use tokio::net::TcpListener;
use tokio::io::AsyncReadExt;
use tokio::io::AsyncWriteExt;
use tokio::sync::Mutex;
use std::sync::{Arc};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let listener = TcpListener::bind("127.0.0.1:55555").await?;
let client = redis::Client::open("redis://db.occase.de/").unwrap();
let con = Arc::new(Mutex::new(client.get_async_connection().await?));
loop {
let conn = Arc::clone(&con);
let (mut socket, _) = listener.accept().await?;
tokio::spawn(async move {
let mut buf = [0; 1024];
loop {
let n = match socket.read(&mut buf).await {
Ok(n) if n == 0 => return,
Ok(n) => n,
Err(e) => {
eprintln!("failed to read from socket; err = {:?}", e);
return;
}
};
let mut local_conn = conn.lock().await;
let result =
redis::cmd("PING")
.arg(&buf[0..n])
.query_async::<redis::aio::Connection, String>(&mut local_conn).await.unwrap();
if let Err(e) = socket.write_all(result.as_bytes()).await {
eprintln!("failed to write to socket; err = {:?}", e);
return;
}
}
});
}
}

View File

@@ -1,9 +1,10 @@
AC_PREREQ([2.69])
AC_INIT([Aedis], [0.1.2], [mzimbres@gmail.com])
AC_INIT([Aedis], [1.0.0], [mzimbres@gmail.com])
AC_CONFIG_MACRO_DIR([m4])
#AC_CONFIG_SRCDIR([src/aedis.cpp])
AC_CONFIG_HEADERS([config.h])
AC_CONFIG_SRCDIR(include/aedis.hpp)
AM_INIT_AUTOMAKE([-Wall foreign])
AC_LANG(C++)
# Checks for programs.
AC_PROG_CXX
@@ -18,10 +19,32 @@ AC_CHECK_HEADER_STDBOOL
AC_TYPE_UINT64_T
AC_CHECK_TYPES([ptrdiff_t])
AX_CXX_COMPILE_STDCXX(14, , mandatory)
AX_CXX_COMPILE_STDCXX(20, , optional)
# This check has been stolen from Asio
AC_MSG_CHECKING([whether coroutines are enabled])
AC_COMPILE_IFELSE(
[AC_LANG_PROGRAM(
[[#if defined(__clang__)]]
[[# if (__cplusplus >= 201703) && (__cpp_coroutines >= 201703)]]
[[# if __has_include(<experimental/coroutine>)]]
[[# define AEDIS_HAS_CO_AWAIT 1]]
[[# endif]]
[[# endif]]
[[#elif defined(__GNUC__)]]
[[# if (__cplusplus >= 201709) && (__cpp_impl_coroutine >= 201902)]]
[[# if __has_include(<coroutine>)]]
[[# define AEDIS_HAS_CO_AWAIT 1]]
[[# endif]]
[[# endif]]
[[#endif]]
[[#ifndef AEDIS_HAS_CO_AWAIT]]
[[# error coroutines not available]]
[[#endif]])],
[AC_MSG_RESULT([yes])
HAVE_COROUTINES=yes;],
[AC_MSG_RESULT([no])
HAVE_COROUTINES=no;])
AM_CONDITIONAL(HAVE_CXX20,[test x$HAVE_CXX20 == x1])
AM_CONDITIONAL(HAVE_COROUTINES,test x$HAVE_COROUTINES = xyes)
AC_CONFIG_FILES([Makefile doc/Doxyfile])
AC_CONFIG_FILES([Makefile include/Makefile doc/Doxyfile])
AC_OUTPUT

View File

@@ -44,7 +44,7 @@ PROJECT_NUMBER = "@PACKAGE_VERSION@"
# for a project that appears at the top of each page and should give viewer a
# quick idea about the purpose of the project. Keep the description short.
PROJECT_BRIEF = "Low level Redis client library"
PROJECT_BRIEF = "High level Redis client"
# With the PROJECT_LOGO tag one can specify a logo or an icon that is included
# in the documentation. The maximum height of the logo should not exceed 55
@@ -823,7 +823,7 @@ WARN_LOGFILE =
# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING
# Note: If this tag is empty the current directory is searched.
INPUT = aedis examples
INPUT = include benchmarks/benchmarks.md CHANGELOG.md examples
# This tag can be used to specify the character encoding of the source files
# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses

View File

@@ -26,5 +26,5 @@ div.contents {
code
{
background-color:#f0e9ce;
background-color:#fffbeb;
}

View File

@@ -1,132 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <map>
#include <set>
#include <vector>
#include <iostream>
#include <aedis/aedis.hpp>
#include <aedis/src.hpp>
#include "print.hpp"
namespace net = boost::asio;
using aedis::resp3::node;
using aedis::adapter::adapt;
using aedis::adapter::adapter_t;
using aedis::redis::command;
using aedis::generic::client;
using client_type = client<net::ip::tcp::socket, command>;
// Response types we use in this example.
using T0 = std::vector<node<std::string>>;
using T1 = std::set<std::string>;
using T2 = std::map<std::string, std::string>;
// Some containers we will store in Redis as example.
std::vector<int> vec
{1, 2, 3, 4, 5, 6};
std::set<std::string> set
{"one", "two", "three", "four"};
std::map<std::string, std::string> map
{ {"key1", "value1"}
, {"key2", "value2"}
, {"key3", "value3"}
};
struct receiver {
public:
receiver(client_type& db)
: adapter0_{adapt(resp0_)}
, adapter1_{adapt(resp1_)}
, adapter2_{adapt(resp2_)}
, db_{&db} {}
void on_resp3(command cmd, node<boost::string_view> const& nd, boost::system::error_code& ec)
{
switch (cmd) {
case command::lrange: adapter0_(nd, ec); break;
case command::smembers: adapter1_(nd, ec); break;
case command::hgetall: adapter2_(nd, ec); break;
default:;
}
}
void on_read(command cmd, std::size_t n)
{
std::cout << "on_read: " << cmd << ", " << n << "\n";
switch (cmd) {
case command::hello:
{
db_->send_range(command::rpush, "rpush-key", vec);
db_->send_range(command::sadd, "sadd-key", set);
db_->send_range(command::hset, "hset-key", map);
} break;
case command::rpush:
db_->send(command::lrange, "rpush-key", 0, -1);
break;
case command::sadd:
db_->send(command::smembers, "sadd-key");
break;
case command::hset:
db_->send(command::hgetall, "hset-key");
db_->send(command::quit);
break;
case command::lrange:
print_and_clear_aggregate(resp0_);
break;
case command::smembers:
print_and_clear(resp1_);
break;
case command::hgetall:
print_and_clear(resp2_);
break;
default:;
}
}
void on_write(std::size_t n)
{
std::cout << "on_write: " << n << std::endl;
}
void on_push(std::size_t n) { }
private:
T0 resp0_;
T1 resp1_;
T2 resp2_;
adapter_t<T0> adapter0_;
adapter_t<T1> adapter1_;
adapter_t<T2> adapter2_;
client_type* db_;
};
int main()
{
net::io_context ioc;
client_type db{ioc.get_executor()};
auto recv = std::make_shared<receiver>(db);
db.set_receiver(recv);
db.async_run("127.0.0.1", "6379",
[](auto ec){ std::cout << ec.message() << std::endl;});
ioc.run();
}

View File

@@ -4,97 +4,66 @@
* accompanying file LICENSE.txt)
*/
#include <vector>
#include <string>
#include <iostream>
#include <boost/asio.hpp>
#include <aedis.hpp>
#include "unistd.h"
#include "print.hpp"
#include <boost/asio/signal_set.hpp>
#include <aedis/aedis.hpp>
// Include this in no more than one .cpp file.
#include <aedis/src.hpp>
#include "user_session.hpp"
#if defined(BOOST_ASIO_HAS_POSIX_STREAM_DESCRIPTOR)
namespace net = boost::asio;
using aedis::adapt;
using aedis::resp3::request;
using aedis::resp3::node;
using aedis::adapter::adapt;
using aedis::adapter::adapter_t;
using aedis::redis::command;
using aedis::generic::client;
using aedis::user_session;
using aedis::user_session_base;
using tcp_socket = net::use_awaitable_t<>::as_default_on_t<net::ip::tcp::socket>;
using tcp_acceptor = net::use_awaitable_t<>::as_default_on_t<net::ip::tcp::acceptor>;
using stream_descriptor = net::use_awaitable_t<>::as_default_on_t<net::posix::stream_descriptor>;
using connection = aedis::connection<tcp_socket>;
using client_type = client<net::ip::tcp::socket, command>;
using response_type = std::vector<node<std::string>>;
// Chat over redis pubsub. To test, run this program from different
// terminals and type messages to stdin. Use
//
// $ redis-cli monitor
//
// to monitor the message traffic.
class receiver {
public:
receiver(std::shared_ptr<client_type> db)
: adapter_{adapt(resp_)}
, db_{db}
{}
void on_resp3(command cmd, node<boost::string_view> const& nd, boost::system::error_code& ec)
{
adapter_(nd, ec);
}
void on_read(command cmd, std::size_t)
{
switch (cmd) {
case command::hello:
db_->send(command::subscribe, "channel");
break;
case command::incr:
std::cout << "Messages so far: " << resp_.front().value << std::endl;
break;
default:;
}
resp_.clear();
}
void on_write(std::size_t n)
{
std::cout << "Number of bytes written: " << n << std::endl;
}
void on_push(std::size_t)
{
for (auto& session: sessions_)
session->deliver(resp_.at(3).value);
resp_.clear();
}
auto add(std::shared_ptr<user_session_base> session)
{ sessions_.push_back(session); }
private:
response_type resp_;
adapter_t<response_type> adapter_;
std::shared_ptr<client_type> db_;
std::vector<std::shared_ptr<user_session_base>> sessions_;
};
net::awaitable<void>
listener(
std::shared_ptr<net::ip::tcp::acceptor> acc,
std::shared_ptr<client_type> db,
std::shared_ptr<receiver> recv)
// Receives messages from other users.
net::awaitable<void> push_receiver(std::shared_ptr<connection> db)
{
auto on_user_msg = [db](std::string const& msg)
{
db->send(command::publish, "channel", msg);
db->send(command::incr, "message-counter");
};
for (std::vector<node<std::string>> resp;;) {
co_await db->async_receive_push(adapt(resp));
print_push(resp);
resp.clear();
}
}
// Subscribes to the channels when a new connection is stablished.
net::awaitable<void> event_receiver(std::shared_ptr<connection> db)
{
request req;
req.push("SUBSCRIBE", "chat-channel");
for (;;) {
auto socket = co_await acc->async_accept(net::use_awaitable);
auto session = std::make_shared<user_session>(std::move(socket));
session->start(on_user_msg);
recv->add(session);
auto ev = co_await db->async_receive_event();
if (ev == connection::event::hello)
co_await db->async_exec(req);
}
}
// Publishes messages to other users.
net::awaitable<void> publisher(stream_descriptor& in, std::shared_ptr<connection> db)
{
for (std::string msg;;) {
auto n = co_await net::async_read_until(in, net::dynamic_buffer(msg, 1024), "\n");
request req;
req.push("PUBLISH", "chat-channel", msg);
co_await db->async_exec(req);
msg.erase(0, n);
}
}
@@ -102,23 +71,26 @@ int main()
{
try {
net::io_context ioc{1};
stream_descriptor in{ioc, ::dup(STDIN_FILENO)};
auto db = std::make_shared<client_type>(ioc.get_executor());
auto recv = std::make_shared<receiver>(db);
db->set_receiver(recv);
auto db = std::make_shared<connection>(ioc);
db->get_config().enable_events = true;
db->get_config().enable_reconnect = true;
db->async_run("127.0.0.1", "6379",
[](auto ec){ std::cout << ec.message() << std::endl;});
co_spawn(ioc, publisher(in, db), net::detached);
co_spawn(ioc, push_receiver(db), net::detached);
co_spawn(ioc, event_receiver(db), net::detached);
db->async_run(net::detached);
auto endpoint = net::ip::tcp::endpoint{net::ip::tcp::v4(), 55555};
auto acc = std::make_shared<net::ip::tcp::acceptor>(ioc.get_executor(), endpoint);
co_spawn(ioc, listener(acc, db, recv), net::detached);
net::signal_set signals(ioc.get_executor(), SIGINT, SIGTERM);
signals.async_wait([&] (auto, int) { ioc.stop(); });
net::signal_set signals(ioc, SIGINT, SIGTERM);
signals.async_wait([&](auto, auto){ ioc.stop(); });
ioc.run();
} catch (std::exception const& e) {
std::cerr << e.what() << std::endl;
}
}
#else // defined(BOOST_ASIO_HAS_POSIX_STREAM_DESCRIPTOR)
int main() {}
#endif // defined(BOOST_ASIO_HAS_POSIX_STREAM_DESCRIPTOR)

59
examples/containers.cpp Normal file
View File

@@ -0,0 +1,59 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <map>
#include <vector>
#include <iostream>
#include <aedis.hpp>
#include "print.hpp"
// Include this in no more than one .cpp file.
#include <aedis/src.hpp>
namespace net = boost::asio;
using boost::optional;
using aedis::adapt;
using aedis::resp3::request;
using connection = aedis::connection<>;
int main()
{
std::vector<int> vec
{1, 2, 3, 4, 5, 6};
std::map<std::string, int> map
{{"key1", 10}, {"key2", 20}, {"key3", 30}};
request req;
req.push_range("RPUSH", "rpush-key", vec); // Sends
req.push_range("HSET", "hset-key", map); // Sends
req.push("MULTI");
req.push("LRANGE", "rpush-key", 0, -1); // Retrieves
req.push("HGETALL", "hset-key"); // Retrieves
req.push("EXEC");
req.push("QUIT");
std::tuple<
aedis::ignore, // rpush
aedis::ignore, // hset
aedis::ignore, // multi
aedis::ignore, // lrange
aedis::ignore, // hgetall
std::tuple<optional<std::vector<int>>, optional<std::map<std::string, int>>>, // exec
aedis::ignore // quit
> resp;
net::io_context ioc;
connection db{ioc};
db.async_run(req, adapt(resp), [](auto ec, auto) {
std::cout << ec.message() << std::endl;
});
ioc.run();
print(std::get<0>(std::get<5>(resp)).value());
print(std::get<1>(std::get<5>(resp)).value());
}

View File

@@ -1,63 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <iostream>
#include <boost/asio/use_awaitable.hpp>
#include <boost/asio/detached.hpp>
#include <boost/asio/co_spawn.hpp>
#include <aedis/aedis.hpp>
#include <aedis/src.hpp>
namespace net = boost::asio;
namespace resp3 = aedis::resp3;
using aedis::resp3::node;
using aedis::redis::command;
using aedis::generic::make_serializer;
using net::ip::tcp;
using tcp_socket = net::use_awaitable_t<>::as_default_on_t<net::ip::tcp::socket>;
net::awaitable<void> example()
{
auto ex = co_await net::this_coro::executor;
tcp::resolver resv{ex};
auto const res = resv.resolve("127.0.0.1", "6379");
tcp_socket socket{ex};
co_await socket.async_connect(*std::begin(res));
std::string request, buffer;
auto sr = make_serializer(request);
sr.push(command::hello, 3);
sr.push(command::ping, "Some message.");
sr.push(command::quit);
co_await net::async_write(socket, net::buffer(request));
auto adapter = [](node<boost::string_view> const& nd, boost::system::error_code&)
{
std::cout << nd << std::endl;
};
auto dbuffer = net::dynamic_buffer(buffer);
co_await resp3::async_read(socket, dbuffer); // hello
co_await resp3::async_read(socket, dbuffer, adapter);
co_await resp3::async_read(socket, dbuffer); // quit
}
int main()
{
try {
net::io_context ioc;
net::co_spawn(ioc, example(), net::detached);
ioc.run();
} catch (std::exception const& e) {
std::cerr << e.what() << std::endl;
}
}

View File

@@ -4,136 +4,57 @@
* accompanying file LICENSE.txt)
*/
#include <iostream>
#include <queue>
#include <vector>
#include <string>
#include <iostream>
#include <boost/asio.hpp>
#include <aedis.hpp>
#include <boost/asio/signal_set.hpp>
#include <aedis/aedis.hpp>
// Include this in no more than one .cpp file.
#include <aedis/src.hpp>
#include "user_session.hpp"
namespace net = boost::asio;
using aedis::resp3::node;
using aedis::adapter::adapt;
using aedis::adapter::adapter_t;
using aedis::redis::command;
using aedis::generic::client;
using aedis::user_session;
using aedis::user_session_base;
using client_type = client<net::ip::tcp::socket, command>;
using response_type = std::vector<node<std::string>>;
using aedis::adapt;
using aedis::resp3::request;
using executor_type = net::io_context::executor_type;
using socket_type = net::basic_stream_socket<net::ip::tcp, executor_type>;
using tcp_socket = net::use_awaitable_t<executor_type>::as_default_on_t<socket_type>;
using acceptor_type = net::basic_socket_acceptor<net::ip::tcp, executor_type>;
using tcp_acceptor = net::use_awaitable_t<executor_type>::as_default_on_t<acceptor_type>;
using awaitable_type = net::awaitable<void, executor_type>;
using connection = aedis::connection<tcp_socket>;
class receiver {
public:
receiver(std::shared_ptr<client_type> db)
: adapter_{adapt(resp_)}
, db_{db}
{}
void on_resp3(command cmd, node<boost::string_view> const& nd, boost::system::error_code& ec)
{
adapter_(nd, ec);
}
void on_read(command cmd, std::size_t n)
{
std::cout << "on_read: " << cmd << " " << n << std::endl;
switch (cmd) {
case command::ping:
if (resp_.front().value != "PONG") {
sessions_.front()->deliver(resp_.front().value);
sessions_.pop();
}
break;
case command::incr:
std::cout << "Echos so far: " << resp_.front().value << std::endl;
break;
default: /* Ignore */;
}
resp_.clear();
}
void on_write(std::size_t n)
{
std::cout << "Number of bytes written: " << n << std::endl;
}
void on_push(std::size_t n) { }
void add_user_session(std::shared_ptr<user_session_base> session)
{ sessions_.push(session); }
private:
response_type resp_;
adapter_t<response_type> adapter_;
std::shared_ptr<client_type> db_;
std::queue<std::shared_ptr<user_session_base>> sessions_;
};
net::awaitable<void>
run_with_reconnect(std::shared_ptr<client_type> db)
awaitable_type echo_loop(tcp_socket socket, std::shared_ptr<connection> db)
{
auto ex = co_await net::this_coro::executor;
request req;
std::tuple<std::string> resp;
boost::asio::steady_timer timer{ex};
for (boost::system::error_code ec;;) {
co_await db->async_run("127.0.0.1", "6379",
net::redirect_error(net::use_awaitable, ec));
timer.expires_after(std::chrono::seconds{2});
co_await timer.async_wait(net::redirect_error(net::use_awaitable, ec));
for (std::string buffer;;) {
auto n = co_await net::async_read_until(socket, net::dynamic_buffer(buffer, 1024), "\n");
req.push("PING", buffer);
co_await db->async_exec(req, adapt(resp));
co_await net::async_write(socket, net::buffer(std::get<0>(resp)));
std::get<0>(resp).clear();
req.clear();
buffer.erase(0, n);
}
}
net::awaitable<void>
listener(
std::shared_ptr<net::ip::tcp::acceptor> acc,
std::shared_ptr<client_type> db,
std::shared_ptr<receiver> recv)
awaitable_type listener()
{
for (;;) {
auto socket = co_await acc->async_accept(net::use_awaitable);
auto session = std::make_shared<user_session>(std::move(socket));
auto ex = co_await net::this_coro::executor;
auto db = std::make_shared<connection>(ex);
db->async_run(net::detached);
auto on_user_msg = [db, recv, session](std::string const& msg)
{
if (!msg.empty()) {
db->send(command::ping, msg);
db->send(command::incr, "echo-counter");
recv->add_user_session(session);
}
};
session->start(on_user_msg);
}
tcp_acceptor acc(ex, {net::ip::tcp::v4(), 55555});
for (;;)
net::co_spawn(ex, echo_loop(co_await acc.async_accept(), db), net::detached);
}
int main()
{
try {
net::io_context ioc;
auto db = std::make_shared<client_type>(ioc.get_executor());
auto recv = std::make_shared<receiver>(db);
db->set_receiver(recv);
co_spawn(ioc, run_with_reconnect(db), net::detached);
auto endpoint = net::ip::tcp::endpoint{net::ip::tcp::v4(), 55555};
auto acc = std::make_shared<net::ip::tcp::acceptor>(ioc.get_executor(), endpoint);
co_spawn(ioc, listener(acc, db, recv), net::detached);
net::signal_set signals(ioc.get_executor(), SIGINT, SIGTERM);
signals.async_wait([&] (auto, int) { ioc.stop(); });
net::io_context ioc{BOOST_ASIO_CONCURRENCY_HINT_UNSAFE_IO};
co_spawn(ioc, listener(), net::detached);
ioc.run();
} catch (std::exception const& e) {
std::cerr << e.what() << std::endl;

38
examples/intro.cpp Normal file
View File

@@ -0,0 +1,38 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <tuple>
#include <string>
#include <boost/asio.hpp>
#include <aedis.hpp>
// Include this in no more than one .cpp file.
#include <aedis/src.hpp>
namespace net = boost::asio;
using aedis::adapt;
using aedis::resp3::request;
using connection = aedis::connection<>;
int main()
{
net::io_context ioc;
connection db{ioc};
request req;
req.push("PING");
req.push("QUIT");
std::tuple<std::string, aedis::ignore> resp;
db.async_run(req, adapt(resp), [](auto ec, auto) {
std::cout << ec.message() << std::endl;
});
ioc.run();
std::cout << std::get<0>(resp) << std::endl;
}

View File

@@ -1,76 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <string>
#include <iostream>
#include <aedis/aedis.hpp>
#include <aedis/src.hpp>
namespace net = boost::asio;
using aedis::resp3::node;
using aedis::adapter::adapter_t;
using aedis::adapter::adapt;
using aedis::redis::command;
using aedis::generic::client;
using client_type = client<net::ip::tcp::socket, command>;
using response_type = node<std::string>;
struct receiver {
public:
receiver(client_type& db)
: adapter_{adapt(resp_)}
, db_{&db} {}
void on_resp3(command cmd, node<boost::string_view> const& nd, boost::system::error_code& ec)
{
adapter_(nd, ec);
}
void on_read(command cmd, std::size_t)
{
switch (cmd) {
case command::hello:
db_->send(command::ping, "O rato roeu a roupa do rei de Roma");
db_->send(command::incr, "intro-counter");
db_->send(command::set, "intro-key", "Três pratos de trigo para três tigres");
db_->send(command::get, "intro-key");
db_->send(command::quit);
break;
default:
std::cout << resp_.value << std::endl;
}
}
void on_write(std::size_t n)
{
std::cout << "Number of bytes written: " << n << std::endl;
}
void on_push(std::size_t n) { }
private:
response_type resp_;
adapter_t<response_type> adapter_;
client_type* db_;
};
int main()
{
net::io_context ioc;
client_type db(ioc.get_executor());
auto recv = std::make_shared<receiver>(db);
db.set_receiver(recv);
db.async_run("127.0.0.1", "6379",
[](auto ec){ std::cout << ec.message() << std::endl;});
ioc.run();
}

View File

@@ -4,54 +4,43 @@
* accompanying file LICENSE.txt)
*/
#include <tuple>
#include <string>
#include <iostream>
#include <thread>
#include <boost/asio/io_context.hpp>
#include <aedis.hpp>
#include <boost/asio/connect.hpp>
#include <aedis/aedis.hpp>
// Include this in no more than one .cpp file.
#include <aedis/src.hpp>
namespace net = boost::asio;
namespace resp3 = aedis::resp3;
using aedis::redis::command;
using aedis::generic::make_serializer;
using aedis::adapter::adapt;
using net::dynamic_buffer;
using net::ip::tcp;
using aedis::adapt;
using aedis::resp3::request;
using connection = aedis::sync<aedis::connection<>>;
int main()
{
try {
net::io_context ioc;
tcp::resolver resv{ioc};
auto const res = resv.resolve("127.0.0.1", "6379");
tcp::socket socket{ioc};
net::connect(socket, res);
net::io_context ioc{1};
auto work = net::make_work_guard(ioc);
std::thread t1{[&]() { ioc.run(); }};
// Creates the request and writes to the socket.
std::string buffer;
auto sr = make_serializer(buffer);
sr.push(command::hello, 3);
sr.push(command::ping);
sr.push(command::quit);
net::write(socket, net::buffer(buffer));
buffer.clear();
connection conn{work.get_executor()};
std::thread t2{[&]() { boost::system::error_code ec; conn.run(ec); }};
// Responses
std::string resp;
request req;
req.push("PING");
req.push("QUIT");
// Reads the responses to all commands in the request.
auto dbuffer = dynamic_buffer(buffer);
resp3::read(socket, dbuffer);
resp3::read(socket, dbuffer, adapt(resp));
resp3::read(socket, dbuffer);
std::tuple<std::string, aedis::ignore> resp;
conn.exec(req, adapt(resp));
std::cout << "Response: " << std::get<0>(resp) << std::endl;
std::cout << "Ping: " << resp << std::endl;
work.reset();
t1.join();
t2.join();
} catch (std::exception const& e) {
std::cerr << e.what() << std::endl;
exit(EXIT_FAILURE);
}
}

View File

@@ -1,53 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <string>
#include <iterator>
#include <cstdint>
#include <iostream>
#include <algorithm>
#include <aedis/aedis.hpp>
// Arbitrary struct to de/serialize.
struct mystruct {
std::int32_t x;
std::string y;
};
// Serializes mystruct
void to_bulk(std::string& to, mystruct const& obj)
{
using aedis::resp3::type;
using aedis::resp3::add_header;
using aedis::resp3::add_separator;
auto const size = sizeof obj.x + obj.y.size();
add_header(to, type::blob_string, size);
auto const* p = reinterpret_cast<char const*>(&obj.x);
std::copy(p, p + sizeof obj.x, std::back_inserter(to));
std::copy(std::cbegin(obj.y), std::cend(obj.y), std::back_inserter(to));
add_separator(to);
}
// Deserialize the struct.
void from_string(mystruct& obj, boost::string_view sv, boost::system::error_code& ec)
{
char* p = reinterpret_cast<char*>(&obj.x);
std::copy(std::cbegin(sv), std::cbegin(sv) + sizeof obj.x, p);
std::copy(std::cbegin(sv) + sizeof obj.x, std::cend(sv), std::back_inserter(obj.y));
}
std::ostream& operator<<(std::ostream& os, mystruct const& obj)
{
os << "x: " << obj.x << ", y: " << obj.y;
return os;
}
bool operator<(mystruct const& a, mystruct const& b)
{
return std::tie(a.x, a.y) < std::tie(b.x, b.y);
}

View File

@@ -17,12 +17,8 @@
namespace net = boost::asio;
using aedis::resp3::node;
using aedis::adapter::adapt;
using aedis::adapter::adapter_t;
using aedis::redis::command;
using aedis::generic::client;
void print_and_clear_aggregate(std::vector<aedis::resp3::node<std::string>>& v)
void print_aggr(std::vector<aedis::resp3::node<std::string>>& v)
{
auto const m = aedis::resp3::element_multiplicity(v.front().data_type);
for (auto i = 0lu; i < m * v.front().aggregate_size; ++i)
@@ -31,17 +27,36 @@ void print_and_clear_aggregate(std::vector<aedis::resp3::node<std::string>>& v)
v.clear();
}
void print_and_clear(std::set<std::string>& cont)
template <class T>
void print(std::vector<T> const& cont)
{
for (auto const& e: cont) std::cout << e << " ";
std::cout << "\n";
cont.clear();
}
void print_and_clear(std::map<std::string, std::string>& cont)
template <class T>
void print(std::set<T> const& cont)
{
for (auto const& e: cont) std::cout << e << "\n";
}
template <class T, class U>
void print(std::map<T, U> const& cont)
{
for (auto const& e: cont)
std::cout << e.first << ": " << e.second << "\n";
cont.clear();
}
void print(std::string const& e)
{
std::cout << e << std::endl;
}
void print_push(std::vector<aedis::resp3::node<std::string>>& resp)
{
std::cout
<< "Push type: " << resp.at(1).value << "\n"
<< "Channel: " << resp.at(2).value << "\n"
<< "Message: " << resp.at(3).value << "\n"
<< std::endl;
}

109
examples/serialization.cpp Normal file
View File

@@ -0,0 +1,109 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <algorithm>
#include <cstdint>
#include <iostream>
#include <set>
#include <iterator>
#include <string>
#include <boost/json.hpp>
#include <boost/json/src.hpp>
#include <aedis.hpp>
#include "print.hpp"
// Include this in no more than one .cpp file.
#include <aedis/src.hpp>
namespace net = boost::asio;
using aedis::resp3::request;
using aedis::adapt;
using connection = aedis::connection<>;
using namespace boost::json;
struct user {
std::string name;
std::string age;
std::string country;
};
void tag_invoke(value_from_tag, value& jv, user const& u)
{
jv =
{ {"name", u.name}
, {"age", u.age}
, {"country", u.country}
};
}
template<class T>
void extract(object const& obj, T& t, boost::string_view key)
{
t = value_to<T>(obj.at(key));
}
user tag_invoke(value_to_tag<user>, value const& jv)
{
user u;
object const& obj = jv.as_object();
extract(obj, u.name, "name");
extract(obj, u.age, "age");
extract(obj, u.country, "country");
return u;
}
// Serializes
void to_bulk(std::string& to, user const& u)
{
aedis::resp3::to_bulk(to, serialize(value_from(u)));
}
// Deserializes
void from_bulk(user& u, boost::string_view sv, boost::system::error_code&)
{
value jv = parse(sv);
u = value_to<user>(jv);
}
std::ostream& operator<<(std::ostream& os, user const& u)
{
os << "Name: " << u.name << "\n"
<< "Age: " << u.age << "\n"
<< "Country: " << u.country;
return os;
}
bool operator<(user const& a, user const& b)
{
return std::tie(a.name, a.age, a.country) < std::tie(b.name, b.age, b.country);
}
int main()
{
net::io_context ioc;
connection db{ioc};
std::set<user> users
{{"Joao", "58", "Brazil"} , {"Serge", "60", "France"}};
request req;
req.push("HELLO", 3);
req.push_range("SADD", "sadd-key", users); // Sends
req.push("SMEMBERS", "sadd-key"); // Retrieves
req.push("QUIT");
std::tuple<aedis::ignore, int, std::set<user>, std::string> resp;
db.async_run(req, adapt(resp), [](auto ec, auto) {
std::cout << ec.message() << std::endl;
});
ioc.run();
// Print
print(std::get<2>(resp));
}

View File

@@ -1,68 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <string>
#include <iterator>
#include <cstdint>
#include <iostream>
#include <algorithm>
#include <boost/asio/connect.hpp>
#include <aedis/aedis.hpp>
#include <aedis/src.hpp>
#include "mystruct.hpp"
namespace net = boost::asio;
namespace resp3 = aedis::resp3;
using aedis::resp3::type;
using aedis::redis::command;
using aedis::generic::make_serializer;
using aedis::adapter::adapt;
using net::ip::tcp;
int main()
{
try {
net::io_context ioc;
tcp::resolver resv{ioc};
auto const res = resv.resolve("127.0.0.1", "6379");
tcp::socket socket{ioc};
net::connect(socket, res);
// This struct will be serialized and stored on Redis.
mystruct in{42, "Some string"};
// Creates and sends a request to redis.
std::string request;
auto sr = make_serializer(request);
sr.push(command::hello, 3);
sr.push(command::set, "key", in);
sr.push(command::get, "key");
sr.push(command::quit);
net::write(socket, net::buffer(request));
// Object to store the response.
mystruct out;
// Reads the responses to all commands in the request.
std::string buffer;
auto dbuf = net::dynamic_buffer(buffer);
resp3::read(socket, dbuf); // hello
resp3::read(socket, dbuf); // set
resp3::read(socket, dbuf, adapt(out)); // get
resp3::read(socket, dbuf); // quit
// Should be equal to what has been sent above.
std::cout << out << std::endl;
} catch (std::exception const& e) {
std::cerr << e.what() << std::endl;
exit(EXIT_FAILURE);
}
}

View File

@@ -4,62 +4,81 @@
* accompanying file LICENSE.txt)
*/
#include <string>
#include <vector>
#include <iostream>
#include <tuple>
#include <boost/asio.hpp>
#include <aedis.hpp>
#include "print.hpp"
#include <boost/asio/use_awaitable.hpp>
#include <boost/asio/detached.hpp>
#include <boost/asio/co_spawn.hpp>
#include <aedis/aedis.hpp>
// Include this in no more than one .cpp file.
#include <aedis/src.hpp>
namespace net = boost::asio;
namespace resp3 = aedis::resp3;
using aedis::adapt;
using aedis::resp3::request;
using aedis::resp3::node;
using aedis::redis::command;
using aedis::adapter::adapt;
using aedis::generic::make_serializer;
using net::ip::tcp;
using net::write;
using net::buffer;
using tcp_socket = net::use_awaitable_t<>::as_default_on_t<net::ip::tcp::socket>;
using connection = aedis::connection<tcp_socket>;
net::awaitable<void> example()
/* This example will subscribe and read pushes indefinitely.
*
* To test send messages with redis-cli
*
* $ redis-cli -3
* 127.0.0.1:6379> PUBLISH channel some-message
* (integer) 3
* 127.0.0.1:6379>
*
* To test reconnection try, for example, to close all clients currently
* connected to the Redis instance
*
* $ redis-cli
* > CLIENT kill TYPE pubsub
*/
// Receives pushes.
net::awaitable<void> push_receiver(std::shared_ptr<connection> db)
{
auto ex = co_await net::this_coro::executor;
tcp::resolver resv{ex};
auto const res = resv.resolve("127.0.0.1", "6379");
tcp_socket socket{ex};
co_await socket.async_connect(*std::begin(res));
std::string request;
auto sr = make_serializer(request);
sr.push(command::hello, 3);
sr.push(command::subscribe, "channel1", "channel2");
co_await net::async_write(socket, buffer(request));
// Ignores the response to hello.
std::string buffer;
co_await resp3::async_read(socket, net::dynamic_buffer(buffer));
for (std::vector<node<std::string>> resp;;) {
co_await resp3::async_read(socket, net::dynamic_buffer(buffer), adapt(resp));
for (auto const& e: resp)
std::cout << e << std::endl;
co_await db->async_receive_push(adapt(resp));
print_push(resp);
resp.clear();
}
}
// Receives events.
net::awaitable<void> event_receiver(std::shared_ptr<connection> db)
{
request req;
req.push("SUBSCRIBE", "channel");
for (;;) {
auto ev = co_await db->async_receive_event();
if (ev == connection::event::hello)
co_await db->async_exec(req);
}
}
int main()
{
try {
net::io_context ioc;
net::co_spawn(ioc, example(), net::detached);
auto db = std::make_shared<connection>(ioc);
db->get_config().enable_events = true;
db->get_config().enable_reconnect = true;
net::co_spawn(ioc, push_receiver(db), net::detached);
net::co_spawn(ioc, event_receiver(db), net::detached);
db->async_run(net::detached);
net::signal_set signals(ioc, SIGINT, SIGTERM);
signals.async_wait([&](auto, auto){ ioc.stop(); });
ioc.run();
} catch (std::exception const& e) {
std::cerr << e.what() << std::endl;
std::cerr << "Error: " << e.what() << std::endl;
}
}

View File

@@ -1,93 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <iostream>
#include <aedis/aedis.hpp>
#include <aedis/src.hpp>
namespace net = boost::asio;
using aedis::resp3::node;
using aedis::sentinel::command;
using aedis::generic::client;
using aedis::adapter::adapt;
using client_type = client<net::ip::tcp::socket, command>;
using response_type = std::vector<node<std::string>>;
using adapter_type = aedis::adapter::adapter_t<response_type>;
/* In this example we send a subscription to a channel and start
* reading server side messages indefinitely.
*
* After starting the example you can test it by sending messages with
* redis-cli like this
*
* $ redis-cli -3
* 127.0.0.1:6379> PUBLISH channel1 some-message
* (integer) 3
* 127.0.0.1:6379>
*
* The messages will then appear on the terminal you are running the
* example.
*/
class receiver {
public:
receiver(client_type& db)
: adapter_{adapt(resp_)}
, db_{&db} {}
void on_resp3(command cmd, node<boost::string_view> const& nd, boost::system::error_code& ec)
{
adapter_(nd, ec);
}
void on_read(command cmd, std::size_t)
{
switch (cmd) {
case command::hello:
db_->send(command::subscribe, "channel1", "channel2");
break;
default:;
}
resp_.clear();
}
void on_write(std::size_t n)
{
std::cout << "Number of bytes written: " << n << std::endl;
}
void on_push(std::size_t)
{
std::cout
<< "Event: " << resp_.at(1).value << "\n"
<< "Channel: " << resp_.at(2).value << "\n"
<< "Message: " << resp_.at(3).value << "\n"
<< std::endl;
resp_.clear();
}
private:
response_type resp_;
adapter_type adapter_;
client_type* db_;
};
int main()
{
net::io_context ioc;
client_type db{ioc.get_executor()};
auto recv = std::make_shared<receiver>(db);
db.set_receiver(recv);
db.async_run("127.0.0.1", "6379",
[](auto ec){ std::cout << ec.message() << std::endl;});
ioc.run();
}

View File

@@ -0,0 +1,67 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <tuple>
#include <string>
#include <thread>
#include <boost/asio.hpp>
#include <aedis.hpp>
#include "print.hpp"
// Include this in no more than one .cpp file.
#include <aedis/src.hpp>
namespace net = boost::asio;
using aedis::adapt;
using aedis::resp3::node;
using aedis::resp3::request;
using connection = aedis::sync<aedis::connection<>>;
using event = connection::event;
// See subscriber.cpp for more info about how to run this example.
// Subscribe again everytime there is a disconnection.
void event_receiver(connection& conn)
{
request req;
req.push("SUBSCRIBE", "channel");
for (;;) {
auto ev = conn.receive_event();
if (ev == connection::event::hello)
conn.exec(req);
}
}
int main()
{
try {
net::io_context ioc{1};
auto work = net::make_work_guard(ioc);
connection::config cfg;
cfg.enable_events = true;
cfg.enable_reconnect = true;
connection conn{work.get_executor(), cfg};
std::thread t1{[&]() { ioc.run(); }};
std::thread t2{[&]() { boost::system::error_code ec; conn.run(ec); }};
std::thread t3{[&]() { event_receiver(conn); }};
for (std::vector<node<std::string>> resp;;) {
conn.receive_push(adapt(resp));
print_push(resp);
resp.clear();
}
t1.join();
t2.join();
t3.join();
} catch (std::exception const& e) {
std::cerr << e.what() << std::endl;
}
}

View File

@@ -1,73 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <iostream>
#include <boost/asio/use_awaitable.hpp>
#include <boost/asio/detached.hpp>
#include <boost/asio/co_spawn.hpp>
#include <aedis/aedis.hpp>
#include <aedis/src.hpp>
namespace net = boost::asio;
namespace resp3 = aedis::resp3;
using aedis::redis::command;
using aedis::adapter::adapt;
using aedis::generic::make_serializer;
using net::ip::tcp;
using tcp_socket = net::use_awaitable_t<>::as_default_on_t<net::ip::tcp::socket>;
net::awaitable<void> example()
{
auto ex = co_await net::this_coro::executor;
tcp::resolver resv{ex};
auto const res = resv.resolve("127.0.0.1", "6379");
tcp_socket socket{ex};
co_await socket.async_connect(*std::begin(res));
std::string request;
auto sr = make_serializer(request);
sr.push(command::hello, 3);
sr.push(command::multi);
sr.push(command::ping, "Some message.");
sr.push(command::set, "low-level-key", "some content", "EX", "2");
sr.push(command::exec);
sr.push(command::quit);
co_await net::async_write(socket, net::buffer(request));
std::tuple<std::string, boost::optional<std::string>> response;
std::string buffer;
auto dbuffer = net::dynamic_buffer(buffer);
co_await resp3::async_read(socket, dbuffer); // hellp
co_await resp3::async_read(socket, dbuffer); // multi
co_await resp3::async_read(socket, dbuffer); // ping
co_await resp3::async_read(socket, dbuffer); // set
co_await resp3::async_read(socket, dbuffer, adapt(response));
co_await resp3::async_read(socket, dbuffer); // quit
std::cout
<< "Ping: " << std::get<0>(response) << "\n"
<< "Get (has_value): " << std::get<1>(response).has_value()
<< std::endl;
if (std::get<1>(response).has_value())
std::cout << "Get (value): " << std::get<1>(response).value() << std::endl;
}
int main()
{
try {
net::io_context ioc;
net::co_spawn(ioc, example(), net::detached);
ioc.run();
} catch (std::exception const& e) {
std::cerr << e.what() << std::endl;
}
}

View File

@@ -1,98 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_USER_SESSION_HPP
#define AEDIS_USER_SESSION_HPP
#include <functional>
#include <boost/asio/awaitable.hpp>
#include <boost/asio/detached.hpp>
#include <boost/asio/co_spawn.hpp>
#include <boost/asio/redirect_error.hpp>
// An example user session.
namespace aedis
{
// Base class for user sessions.
struct user_session_base {
virtual ~user_session_base() {}
virtual void deliver(std::string const& msg) = 0;
};
class user_session:
public user_session_base,
public std::enable_shared_from_this<user_session> {
public:
user_session(boost::asio::ip::tcp::socket socket)
: socket_(std::move(socket))
, timer_(socket_.get_executor())
{ timer_.expires_at(std::chrono::steady_clock::time_point::max()); }
void start(std::function<void(std::string const&)> on_msg)
{
co_spawn(socket_.get_executor(),
[self = shared_from_this(), on_msg]{ return self->reader(on_msg); },
boost::asio::detached);
co_spawn(socket_.get_executor(),
[self = shared_from_this()]{ return self->writer(); },
boost::asio::detached);
}
void deliver(std::string const& msg)
{
write_msgs_.push_back(msg);
timer_.cancel_one();
}
private:
boost::asio::awaitable<void>
reader(std::function<void(std::string const&)> on_msg)
{
try {
for (std::string msg;;) {
auto const n = co_await boost::asio::async_read_until(socket_, boost::asio::dynamic_buffer(msg, 1024), "\n", boost::asio::use_awaitable);
on_msg(msg);
msg.erase(0, n);
}
} catch (std::exception&) {
stop();
}
}
boost::asio::awaitable<void> writer()
{
try {
while (socket_.is_open()) {
if (write_msgs_.empty()) {
boost::system::error_code ec;
co_await timer_.async_wait(boost::asio::redirect_error(boost::asio::use_awaitable, ec));
} else {
co_await boost::asio::async_write(socket_, boost::asio::buffer(write_msgs_.front()), boost::asio::use_awaitable);
write_msgs_.pop_front();
}
}
} catch (std::exception&) {
stop();
}
}
void stop()
{
socket_.close();
timer_.cancel();
}
boost::asio::ip::tcp::socket socket_;
boost::asio::steady_timer timer_;
std::deque<std::string> write_msgs_;
};
} // aedis
#endif // AEDIS_USER_SESSION_HPP

25
include/Makefile.am Normal file
View File

@@ -0,0 +1,25 @@
nobase_include_HEADERS =\
$(top_srcdir)/include/aedis/src.hpp\
$(top_srcdir)/include/aedis/error.hpp\
$(top_srcdir)/include/aedis/impl/error.ipp\
$(top_srcdir)/include/aedis/detail/net.hpp\
$(top_srcdir)/include/aedis/connection.hpp\
$(top_srcdir)/include/aedis/adapt.hpp\
$(top_srcdir)/include/aedis/sync.hpp\
$(top_srcdir)/include/aedis/detail/connection_ops.hpp\
$(top_srcdir)/include/aedis.hpp\
$(top_srcdir)/include/aedis/adapter/detail/adapters.hpp\
$(top_srcdir)/include/aedis/adapter/adapt.hpp\
$(top_srcdir)/include/aedis/adapter/detail/response_traits.hpp\
$(top_srcdir)/include/aedis/resp3/node.hpp\
$(top_srcdir)/include/aedis/resp3/detail/read_ops.hpp\
$(top_srcdir)/include/aedis/resp3/detail/parser.hpp\
$(top_srcdir)/include/aedis/resp3/detail/exec.hpp\
$(top_srcdir)/include/aedis/resp3/type.hpp\
$(top_srcdir)/include/aedis/resp3/read.hpp\
$(top_srcdir)/include/aedis/resp3/write.hpp\
$(top_srcdir)/include/aedis/resp3/request.hpp\
$(top_srcdir)/include/aedis/resp3/impl/request.ipp\
$(top_srcdir)/include/aedis/resp3/detail/impl/parser.ipp\
$(top_srcdir)/include/aedis/resp3/impl/type.ipp

636
include/aedis.hpp Normal file
View File

@@ -0,0 +1,636 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_HPP
#define AEDIS_HPP
#include <aedis/error.hpp>
#include <aedis/adapt.hpp>
#include <aedis/connection.hpp>
#include <aedis/sync.hpp>
#include <aedis/resp3/request.hpp>
/** \mainpage Documentation
\tableofcontents
Useful links: \subpage any, [Changelog](CHANGELOG.md) and [Benchmarks](benchmarks/benchmarks.md).
\section Overview
Aedis is a high-level [Redis](https://redis.io/) client library
built on top of
[Asio](https://www.boost.org/doc/libs/release/doc/html/boost_asio.html).
Some of its distinctive features are
\li Support for the latest version of the Redis communication protocol [RESP3](https://github.com/redis/redis-specifications/blob/master/protocol/RESP3.md).
\li First class support for STL containers and C++ built-in types.
\li Serialization and deserialization of your own data types.
\li Healthy checks, back pressure and low latency.
\li Hides most of the low level asynchronous operations away from the user.
Let us have a look a some code snippets
@subsection Async
The code below sends a ping command to Redis and quits (see intro.cpp)
@code
int main()
{
net::io_context ioc;
connection db{ioc};
request req;
req.push("PING");
req.push("QUIT");
std::tuple<std::string, aedis::ignore> resp;
db.async_run(req, adapt(resp), net::detached);
ioc.run();
std::cout << std::get<0>(resp) << std::endl;
}
@endcode
The connection class maintains a healthy connection with Redis
over which users can execute their commands, without any need of
queuing. For example, to execute more than one request
@code
int main()
{
...
net::io_context ioc;
connection db{ioc};
db.async_exec(req1, adapt(resp1), handler1);
db.async_exec(req2, adapt(resp2), handler2);
db.async_exec(req3, adapt(resp3), handler3);
db.async_run(net::detached);
ioc.run();
...
}
@endcode
The `connection::async_exec` functions above can be called from different
places in the code without knowing about each other, see for
example echo_server.cpp. Server-side pushes are supported on the
same connection where commands are executed, a typical subscriber
will look like
(see subscriber.cpp)
@code
net::awaitable<void> reader(std::shared_ptr<connection> db)
{
for (std::vector<node_type> resp;;) {
co_await db->async_receive_event(adapt(resp));
// Use resp and clear it.
resp.clear();
}
}
@endcode
@subsection Sync
The `connection` class offers only an asynchronous API.
Synchronous communications with redis is provided by the `aedis::sync`
wrapper class. (see intro_sync.cpp)
@code
int main()
{
net::io_context ioc{1};
auto work = net::make_work_guard(ioc);
std::thread t1{[&]() { ioc.run(); }};
sync<connection> conn{work.get_executor()};
std::thread t2{[&]() { boost::system::error_code ec; conn.run(ec); }};
request req;
req.push("PING");
req.push("QUIT");
std::tuple<std::string, aedis::ignore> resp;
conn.exec(req, adapt(resp));
std::cout << "Response: " << std::get<0>(resp) << std::endl;
work.reset();
t1.join();
t2.join();
}
@endcode
\subsection using-aedis Installation
To install and use Aedis you will need
- Boost 1.78 or greater.
- C++17. Some examples require C++20 with coroutine support.
- Redis 6 or higher. Optionally also redis-cli and Redis Sentinel.
For a simple installation run
```
$ git clone --branch v1.0.0 https://github.com/mzimbres/aedis.git
$ cd aedis
# Option 1: Direct compilation.
$ g++ -std=c++17 -pthread examples/intro.cpp -I./include -I/path/boost_1_79_0/include/
# Option 2: Use cmake.
$ BOOST_ROOT=/opt/boost_1_79_0/ cmake -DCMAKE_CXX_FLAGS=-std=c++20 .
```
@note CMake support is still experimental.
For a proper full installation on the system run
```
# Download and unpack the latest release
$ wget https://github.com/mzimbres/aedis/releases/download/v1.0.0/aedis-1.0.0.tar.gz
$ tar -xzvf aedis-1.0.0.tar.gz
# Configure, build and install
$ CXXFLAGS="-std=c++17" ./configure --prefix=/opt/aedis-1.0.0 --with-boost=/opt/boost_1_78_0
$ sudo make install
```
To build examples and tests
```
$ make
```
@subsubsection using_aedis Using Aedis
When writing you own applications include the following header
```cpp
#include <aedis/src.hpp>
```
in no more than one source file in your applications.
@subsubsection sup-comp Supported compilers
Aedis has been tested with the following compilers
- Tested with gcc: 12, 11.
- Tested with clang: 14, 13, 11.
\subsubsection Developers
To generate the build system clone the repository and run
```
# git clone https://github.com/mzimbres/aedis.git
$ autoreconf -i
```
After that we get a configure script that can be run as explained
above, for example, to build with a compiler other that the system
compiler with coverage support run
```
$ CXX=clang++-14 \
CXXFLAGS="-g -std=c++20 -Wall -Wextra --coverage -fkeep-inline-functions -fkeep-static-functions" \
LDFLAGS="--coverage" \
./configure --with-boost=/opt/boost_1_79_0
```
To generate release tarballs run
```
$ make distcheck
```
\section requests Requests
Redis requests are composed of one of more Redis commands (in
Redis documentation they are called
[pipelines](https://redis.io/topics/pipelining)). For example
@code
request req;
// Command with variable length of arguments.
req.push("SET", "key", "some value", value, "EX", "2");
// Pushes a list.
std::list<std::string> list
{"channel1", "channel2", "channel3"};
req.push_range("SUBSCRIBE", list);
// Same as above but as an iterator range.
req.push_range2("SUBSCRIBE", std::cbegin(list), std::cend(list));
// Pushes a map.
std::map<std::string, mystruct> map
{ {"key1", "value1"}
, {"key2", "value2"}
, {"key3", "value3"}};
req.push_range("HSET", "key", map);
@endcode
Sending a request to Redis is then peformed with the following function
@code
co_await db->async_exec(req, adapt(resp));
@endcode
\subsection requests-serialization Serialization
The \c push and \c push_range functions above work with integers
e.g. \c int and \c std::string out of the box. To send your own
data type defined a \c to_bulk function like this
@code
struct mystruct {
// Example struct.
};
void to_bulk(std::string& to, mystruct const& obj)
{
std::string dummy = "Dummy serializaiton string.";
aedis::resp3::to_bulk(to, dummy);
}
@endcode
Once \c to_bulk is defined and accessible over ADL \c mystruct can
be passed to the \c request
@code
request req;
std::map<std::string, mystruct> map {...};
req.push_range("HSET", "key", map);
@endcode
Example serialization.cpp shows how store json string in Redis.
\section low-level-responses Responses
To read responses effectively, users must know their RESP3 type,
this can be found in the Redis documentation for each command
(https://redis.io/commands). For example
Command | RESP3 type | Documentation
---------|-------------------------------------|--------------
lpush | Number | https://redis.io/commands/lpush
lrange | Array | https://redis.io/commands/lrange
set | Simple-string, null or blob-string | https://redis.io/commands/set
get | Blob-string | https://redis.io/commands/get
smembers | Set | https://redis.io/commands/smembers
hgetall | Map | https://redis.io/commands/hgetall
Once the RESP3 type of a given response is known we can choose a
proper C++ data structure to receive it in. Fortunately, this is a
simple task for most types. The table below summarises the options
RESP3 type | Possible C++ type | Type
---------------|--------------------------------------------------------------|------------------
Simple-string | \c std::string | Simple
Simple-error | \c std::string | Simple
Blob-string | \c std::string, \c std::vector | Simple
Blob-error | \c std::string, \c std::vector | Simple
Number | `long long`, `int`, `std::size_t`, \c std::string | Simple
Double | `double`, \c std::string | Simple
Null | `boost::optional<T>` | Simple
Array | \c std::vector, \c std::list, \c std::array, \c std::deque | Aggregate
Map | \c std::vector, \c std::map, \c std::unordered_map | Aggregate
Set | \c std::vector, \c std::set, \c std::unordered_set | Aggregate
Push | \c std::vector, \c std::map, \c std::unordered_map | Aggregate
For example
@code
request req;
req.push("HELLO", 3);
req.push_range("RPUSH", "key1", vec);
req.push_range("HSET", "key2", map);
req.push("LRANGE", "key3", 0, -1);
req.push("HGETALL", "key4");
req.push("QUIT");
std::tuple<
aedis::ignore, // hello
int, // rpush
int, // hset
std::vector<T>, // lrange
std::map<U, V>, // hgetall
std::string // quit
> resp;
co_await db->async_exec(req, adapt(resp));
@endcode
The tag @c aedis::ignore can be used to ignore individual
elements in the responses. If the intention is to ignore the
response to all commands in the request use @c adapt()
@code
co_await db->async_exec(req, adapt());
@endcode
Responses that contain nested aggregates or heterogeneous data
types will be given special treatment later in \ref gen-case. As
of this writing, not all RESP3 types are used by the Redis server,
which means in practice users will be concerned with a reduced
subset of the RESP3 specification.
\subsection Optional
It is not uncommon for apps to access keys that do not exist or
that have already expired in the Redis server, to deal with these
cases Aedis provides support for \c boost::optional. To use it,
wrap your type around \c boost::optional like this
@code
boost::optional<std::unordered_map<T, U>> resp;
co_await db->async_exec(req, adapt(resp));
@endcode
Everything else stays the same.
\subsection transactions Transactions
To read the response to transactions we have to observe that Redis
queues the commands as they arrive and sends the responses back to
the user as an array, in the response to the @c exec command.
For example, to read the response to the this request
@code
db.send("MULTI");
db.send("GET", "key1");
db.send("LRANGE", "key2", 0, -1);
db.send("HGETALL", "key3");
db.send("EXEC");
@endcode
use the following response type
@code
using aedis::ignore;
using boost::optional;
using exec_resp_type =
std::tuple<
optional<std::string>, // get
optional<std::vector<std::string>>, // lrange
optional<std::map<std::string, std::string>> // hgetall
>;
std::tuple<
ignore, // multi
ignore, // get
ignore, // lrange
ignore, // hgetall
exec_resp_type, // exec
> resp;
co_await db->async_exec(req, adapt(resp));
@endcode
Note that above we are not ignoring the response to the commands
themselves but whether they have been successfully queued. For a
complete example see containers.cpp.
\subsection Deserialization
As mentioned in \ref requests-serialization, it is common to
serialize data before sending it to Redis e.g. to json strings.
For performance and convenience reasons, we may also want to
deserialize it directly in its final data structure. Aedis
supports this use case by calling a user provided \c from_bulk
function while parsing the response. For example
@code
void from_bulk(mystruct& obj, char const* p, std::size_t size, boost::system::error_code& ec)
{
// Deserializes p into obj.
}
@endcode
After that, you can start receiving data efficiently in the desired
types e.g. \c mystruct, \c std::map<std::string, mystruct> etc.
\subsection gen-case The general case
There are cases where responses to Redis
commands won't fit in the model presented above, some examples are
@li Commands (like \c set) whose responses don't have a fixed
RESP3 type. Expecting an \c int and receiving a blob-string
will result in error.
@li RESP3 aggregates that contain nested aggregates can't be read in STL containers.
@li Transactions with a dynamic number of commands can't be read in a \c std::tuple.
To deal with these cases Aedis provides the \c resp3::node
type, that is the most general form of an element in a response,
be it a simple RESP3 type or an aggregate. It is defined like this
@code
template <class String>
struct node {
// The RESP3 type of the data in this node.
type data_type;
// The number of elements of an aggregate (or 1 for simple data).
std::size_t aggregate_size;
// The depth of this node in the response tree.
std::size_t depth;
// The actual data. For aggregate types this is always empty.
String value;
};
@endcode
Any response to a Redis command can be received in a \c
std::vector<node<std::string>>. The vector can be seen as a
pre-order view of the response tree
(https://en.wikipedia.org/wiki/Tree_traversal#Pre-order,_NLR).
Using it is no different than using other types
@code
// Receives any RESP3 simple data type.
node<std::string> resp;
co_await db->async_exec(req, adapt(resp));
// Receives any RESP3 simple or aggregate data type.
std::vector<node<std::string>> resp;
co_await db->async_exec(req, adapt(resp));
@endcode
For example, suppose we want to retrieve a hash data structure
from Redis with `HGETALL`, some of the options are
@li \c std::vector<node<std::string>: Works always.
@li \c std::vector<std::string>: Efficient and flat, all elements as string.
@li \c std::map<std::string, std::string>: Efficient if you need the data as a \c std::map
@li \c std::map<U, V>: Efficient if you are storing serialized data. Avoids temporaries and requires \c from_bulk for \c U and \c V.
In addition to the above users can also use unordered versions of the containers. The same reasoning also applies to sets e.g. `SMEMBERS`.
\section examples Examples
The examples listed below cover most use cases presented in the documentation above.
@li intro.cpp: Basic steps with Aedis.
@li intro_sync.cpp: Synchronous version of intro.cpp.
@li containers.cpp: Shows how to send and receive stl containers.
@li serialization.cpp: Shows how to serialize your own types.
@li subscriber.cpp: Shows how to use pubsub.
@li subscriber_sync.cpp: Synchronous version of subscriber.cpp.
@li echo_server.cpp: A simple TCP echo server that uses coroutines.
@li chat_room.cpp: A simple chat room that uses coroutines.
\section why-aedis Why Aedis
At the time of this writing there are seventeen Redis clients
listed in the [official](https://redis.io/docs/clients/#cpp) list.
With so many clients available it is not unlikely that users are
asking themselves why yet another one. In this section I will try
to compare Aedis with the most popular clients and why we need
Aedis. Notice however that this is ongoing work as comparing
client objectively is difficult and time consuming.
The most popular client at the moment of this writing ranked by
github stars is
@li https://github.com/sewenew/redis-plus-plus
Before we start it is worth mentioning some of the things it does
not support
@li RESP3. Without RESP3 is impossible to support some important Redis features like client side caching, among other things.
@li Coroutines.
@li Reading responses directly in user data structures avoiding temporaries.
@li Error handling with error-code and exception overloads.
@li Healthy checks.
The remaining points will be addressed individually.
@subsection redis-plus-plus
Let us first have a look at what sending a command a pipeline and a
transaction look like
@code
auto redis = Redis("tcp://127.0.0.1:6379");
// Send commands
redis.set("key", "val");
auto val = redis.get("key"); // val is of type OptionalString.
if (val)
std::cout << *val << std::endl;
// Sending pipelines
auto pipe = redis.pipeline();
auto pipe_replies = pipe.set("key", "value")
.get("key")
.rename("key", "new-key")
.rpush("list", {"a", "b", "c"})
.lrange("list", 0, -1)
.exec();
// Parse reply with reply type and index.
auto set_cmd_result = pipe_replies.get<bool>(0);
// ...
// Sending a transaction
auto tx = redis.transaction();
auto tx_replies = tx.incr("num0")
.incr("num1")
.mget({"num0", "num1"})
.exec();
auto incr_result0 = tx_replies.get<long long>(0);
// ...
@endcode
Some of the problems with this API are
@li Heterogeneous treatment of commands, pipelines and transaction. This makes auto-pipelining impossible.
@li Any Api that sends individual commands has a very restricted scope of usability and should be avoided for performance reasons.
@li The API imposes exceptions on users, no error-code overload is provided.
@li No way to reuse the buffer for new calls to e.g. \c redis.get in order to avoid further dynamic memory allocations.
@li Error handling of resolve and connection not clear.
According to the documentation, pipelines in redis-plus-plus have
the following characteristics
> NOTE: By default, creating a Pipeline object is NOT cheap, since
> it creates a new connection.
This is clearly a downside of the API as pipelines should be the
default way of communicating and not an exception, paying such a
high price for each pipeline imposes a severe cost in performance.
Transactions also suffer from the very same problem.
> NOTE: Creating a Transaction object is NOT cheap, since it
> creates a new connection.
In Aedis there is no difference between sending one command, a
pipeline or a transaction because requests are decoupled
from the IO objects.
> redis-plus-plus also supports async interface, however, async
> support for Transaction and Subscriber is still on the way.
>
> The async interface depends on third-party event library, and so
> far, only libuv is supported.
Async code in redis-plus-plus looks like the following
@code
auto async_redis = AsyncRedis(opts, pool_opts);
Future<string> ping_res = async_redis.ping();
cout << ping_res.get() << endl;
@endcode
As the reader can see, the async interface is based on futures
which is also known to have a bad performance. The biggest
problem however with this async design is that it makes it
impossible to write asynchronous programs correctly since it
starts an async operation on every command sent instead of
enqueueing a message and triggering a write when it can be sent.
It is also not clear how are pipelines realised with the design
(if at all).
\section Acknowledgement
Some people that were helpful in the development of Aedis
@li Richard Hodges ([madmongo1](https://github.com/madmongo1)): For helping me with Asio and the design of asynchronous programs in general.
@li Vinícius dos Santos Oliveira ([vinipsmaker](https://github.com/vinipsmaker)): For useful discussion about how Aedis consumes buffers in the read operation (among other things).
@li Petr Dannhofer ([Eddie-cz](https://github.com/Eddie-cz)): For helping me understand how the `AUTH` and `HELLO` command can influence each other.
*/
/** \defgroup any Reference
*
* This page contains the documentation of all user facing code.
*/
// Support sentinel support as described in
//
// - https://redis.io/docs/manual/sentinel.
// - https://redis.io/docs/reference/sentinel-clients.
//
// Avoid conflicts between
//
// - aedis::adapt
// - aedis::resp3::adapt.
#endif // AEDIS_HPP

159
include/aedis/adapt.hpp Normal file
View File

@@ -0,0 +1,159 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_ADAPT_HPP
#define AEDIS_ADAPT_HPP
#include <tuple>
#include <boost/mp11.hpp>
#include <boost/variant2.hpp>
#include <boost/utility/string_view.hpp>
#include <boost/system.hpp>
#include <aedis/resp3/node.hpp>
#include <aedis/adapter/adapt.hpp>
#include <aedis/adapter/detail/response_traits.hpp>
namespace aedis {
/** @brief Tag used to ignore responses.
* @ingroup any
*
* For example
*
* @code
* std::tuple<aedis::ignore, std::string, aedis::ignore> resp;
* @endcode
*
* will cause only the second tuple type to be parsed, the others
* will be ignored.
*/
using ignore = adapter::detail::ignore;
namespace detail {
struct ignore_adapter {
void
operator()(
std::size_t,
resp3::node<boost::string_view> const&,
boost::system::error_code&)
{
}
auto supported_response_size() const noexcept { return std::size_t(-1);}
};
template <class Tuple>
class static_adapter {
private:
static constexpr auto size = std::tuple_size<Tuple>::value;
using adapter_tuple = boost::mp11::mp_transform<adapter::adapter_t, Tuple>;
using variant_type = boost::mp11::mp_rename<adapter_tuple, boost::variant2::variant>;
using adapters_array_type = std::array<variant_type, size>;
adapters_array_type adapters_;
public:
static_adapter(Tuple& r = nullptr)
{
adapter::detail::assigner<size - 1>::assign(adapters_, r);
}
auto supported_response_size() const noexcept { return size;}
void
operator()(
std::size_t i,
resp3::node<boost::string_view> const& nd,
boost::system::error_code& ec)
{
using boost::variant2::visit;
BOOST_ASSERT(i < adapters_.size());
visit([&](auto& arg){arg(nd, ec);}, adapters_.at(i));
}
};
template <class Vector>
class vector_adapter {
private:
using adapter_type = typename adapter::detail::response_traits<Vector>::adapter_type;
adapter_type adapter_;
public:
vector_adapter(Vector& v) : adapter_{adapter::adapt2(v)} { }
auto supported_response_size() const noexcept { return std::size_t(-1);}
void
operator()(
std::size_t,
resp3::node<boost::string_view> const& nd,
boost::system::error_code& ec)
{
adapter_(nd, ec);
}
};
template <class>
struct response_traits;
template <>
struct response_traits<void> {
using response_type = void;
using adapter_type = detail::ignore_adapter;
static auto adapt() noexcept
{ return detail::ignore_adapter{}; }
};
template <class String, class Allocator>
struct response_traits<std::vector<resp3::node<String>, Allocator>> {
using response_type = std::vector<resp3::node<String>, Allocator>;
using adapter_type = vector_adapter<response_type>;
static auto adapt(response_type& v) noexcept
{ return adapter_type{v}; }
};
template <class ...Ts>
struct response_traits<std::tuple<Ts...>> {
using response_type = std::tuple<Ts...>;
using adapter_type = static_adapter<response_type>;
static auto adapt(response_type& r) noexcept
{ return adapter_type{r}; }
};
} // detail
/** @brief Creates an adapter that ignores responses.
* @ingroup any
*
* This function can be used to create adapters that ignores
* responses.
*/
auto adapt() noexcept
{
return detail::response_traits<void>::adapt();
}
/** @brief Adapts a type to be used as a response.
* @ingroup any
*
* The type T can be any STL container, any integer type and
* \c std::string
*/
template<class T>
auto adapt(T& t) noexcept
{
return detail::response_traits<T>::adapt(t);
}
} // aedis
#endif // AEDIS_ADAPT_HPP

View File

@@ -15,7 +15,8 @@ namespace adapter {
template <class T>
using adapter_t = typename detail::adapter_t<T>;
/** \brief Creates a dummy response adapter.
/** \internal
\brief Creates a dummy response adapter.
\ingroup any
The adapter returned by this function ignores responses. It is
@@ -38,10 +39,11 @@ using adapter_t = typename detail::adapter_t<T>;
@endcode
*/
inline
auto adapt() noexcept
auto adapt2() noexcept
{ return detail::response_traits<void>::adapt(); }
/** \brief Adapts user data to read operations.
/** \internal
* \brief Adapts user data to read operations.
* \ingroup any
*
* STL containers, \c std::tuple and built-in types are supported and
@@ -73,7 +75,7 @@ auto adapt() noexcept
* @endcode
*/
template<class T>
auto adapt(T& t) noexcept
auto adapt2(T& t) noexcept
{ return detail::response_traits<T>::adapt(t); }
} // adapter

View File

@@ -24,11 +24,11 @@
#include <boost/spirit/home/x3.hpp>
#include <boost/utility/string_view.hpp>
#include <aedis/error.hpp>
#include <aedis/resp3/type.hpp>
#include <aedis/resp3/request.hpp>
#include <aedis/resp3/detail/parser.hpp>
#include <aedis/generic/serializer.hpp>
#include <aedis/resp3/node.hpp>
#include <aedis/adapter/error.hpp>
namespace aedis {
namespace adapter {
@@ -52,7 +52,7 @@ parse_double(
template <class T>
typename std::enable_if<std::is_integral<T>::value, void>::type
from_string(
from_bulk(
T& i,
boost::string_view sv,
boost::system::error_code& ec)
@@ -60,15 +60,15 @@ from_string(
i = resp3::detail::parse_uint(sv.data(), sv.size(), ec);
}
void from_string(
void from_bulk(
bool& t,
boost::string_view sv,
boost::system::error_code& ec)
boost::system::error_code&)
{
t = *sv.data() == 't';
}
void from_string(
void from_bulk(
double& d,
boost::string_view sv,
boost::system::error_code& ec)
@@ -78,7 +78,7 @@ void from_string(
template <class CharT, class Traits, class Allocator>
void
from_string(
from_bulk(
std::basic_string<CharT, Traits, Allocator>& s,
boost::string_view sv,
boost::system::error_code&)
@@ -91,9 +91,9 @@ from_string(
void set_on_resp3_error(resp3::type t, boost::system::error_code& ec)
{
switch (t) {
case resp3::type::simple_error: ec = adapter::error::simple_error; return;
case resp3::type::blob_error: ec = adapter::error::blob_error; return;
case resp3::type::null: ec = adapter::error::null; return;
case resp3::type::simple_error: ec = error::simple_error; return;
case resp3::type::blob_error: ec = error::blob_error; return;
case resp3::type::null: ec = error::null; return;
default: return;
}
}
@@ -105,9 +105,10 @@ private:
public:
general_aggregate(Result* c = nullptr): result_(c) {}
void operator()(resp3::node<boost::string_view> const& n, boost::system::error_code&)
void operator()(resp3::node<boost::string_view> const& n, boost::system::error_code& ec)
{
result_->push_back({n.data_type, n.aggregate_size, n.depth, std::string{std::cbegin(n.value), std::cend(n.value)}});
set_on_resp3_error(n.data_type, ec);
}
};
@@ -119,12 +120,13 @@ private:
public:
general_simple(Node* t = nullptr) : result_(t) {}
void operator()(resp3::node<boost::string_view> const& n, boost::system::error_code&)
void operator()(resp3::node<boost::string_view> const& n, boost::system::error_code& ec)
{
result_->data_type = n.data_type;
result_->aggregate_size = n.aggregate_size;
result_->depth = n.depth;
result_->value.assign(n.value.data(), n.value.size());
result_->data_type = n.data_type;
result_->aggregate_size = n.aggregate_size;
result_->depth = n.depth;
result_->value.assign(n.value.data(), n.value.size());
set_on_resp3_error(n.data_type, ec);
}
};
@@ -144,11 +146,11 @@ public:
return;
if (is_aggregate(n.data_type)) {
ec = adapter::error::expects_simple_type;
ec = error::expects_resp3_simple_type;
return;
}
from_string(result, n.value, ec);
from_bulk(result, n.value, ec);
}
};
@@ -173,19 +175,19 @@ public:
if (is_aggregate(nd.data_type)) {
if (nd.data_type != resp3::type::set)
ec = error::expects_set_type;
ec = error::expects_resp3_set;
return;
}
BOOST_ASSERT(nd.aggregate_size == 1);
if (nd.depth < 1) {
ec = adapter::error::expects_set_type;
ec = error::expects_resp3_set;
return;
}
typename Result::key_type obj;
from_string(obj, nd.value, ec);
from_bulk(obj, nd.value, ec);
hint_ = result.insert(hint_, std::move(obj));
}
};
@@ -212,24 +214,24 @@ public:
if (is_aggregate(nd.data_type)) {
if (element_multiplicity(nd.data_type) != 2)
ec = error::expects_map_type;
ec = error::expects_resp3_map;
return;
}
BOOST_ASSERT(nd.aggregate_size == 1);
if (nd.depth < 1) {
ec = adapter::error::expects_map_type;
ec = error::expects_resp3_map;
return;
}
if (on_key_) {
typename Result::key_type obj;
from_string(obj, nd.value, ec);
from_bulk(obj, nd.value, ec);
current_ = result.insert(current_, {std::move(obj), {}});
} else {
typename Result::mapped_type obj;
from_string(obj, nd.value, ec);
from_bulk(obj, nd.value, ec);
current_->second = std::move(obj);
}
@@ -257,7 +259,7 @@ public:
result.reserve(result.size() + m * nd.aggregate_size);
} else {
result.push_back({});
from_string(result.back(), nd.value, ec);
from_bulk(result.back(), nd.value, ec);
}
}
};
@@ -282,7 +284,7 @@ public:
if (is_aggregate(nd.data_type)) {
if (i_ != -1) {
ec = adapter::error::nested_aggregate_unsupported;
ec = error::nested_aggregate_unsupported;
return;
}
@@ -292,12 +294,12 @@ public:
}
} else {
if (i_ == -1) {
ec = adapter::error::expects_aggregate_type;
ec = error::expects_resp3_aggregate;
return;
}
BOOST_ASSERT(nd.aggregate_size == 1);
from_string(result.at(i_), nd.value, ec);
from_bulk(result.at(i_), nd.value, ec);
}
++i_;
@@ -322,12 +324,12 @@ struct list_impl {
if (!is_aggregate(nd.data_type)) {
BOOST_ASSERT(nd.aggregate_size == 1);
if (nd.depth < 1) {
ec = adapter::error::expects_aggregate_type;
ec = error::expects_resp3_aggregate;
return;
}
result.push_back({});
from_string(result.back(), nd.value, ec);
from_bulk(result.back(), nd.value, ec);
}
}
};

View File

@@ -13,23 +13,24 @@
#include <boost/mp11.hpp>
#include <boost/variant2.hpp>
#include <aedis/error.hpp>
#include <aedis/resp3/type.hpp>
#include <aedis/resp3/read.hpp>
#include <aedis/adapter/detail/adapters.hpp>
#include <aedis/adapter/error.hpp>
namespace aedis {
namespace adapter {
namespace detail {
struct ignore {};
/* Traits class for response objects.
*
* Provides traits for all supported response types i.e. all STL
* containers and C++ buil-in types.
*/
template <class ResponseType>
struct response_traits
{
struct response_traits {
using adapter_type = adapter::detail::wrapper<ResponseType>;
static auto adapt(ResponseType& r) noexcept { return adapter_type{&r}; }
};
@@ -37,32 +38,34 @@ struct response_traits
template <class T>
using adapter_t = typename response_traits<T>::adapter_type;
template <>
struct response_traits<ignore> {
using response_type = ignore;
using adapter_type = resp3::detail::ignore_response;
static auto adapt(response_type&) noexcept { return adapter_type{}; }
};
template <class T>
struct response_traits<resp3::node<T>>
{
struct response_traits<resp3::node<T>> {
using response_type = resp3::node<T>;
using adapter_type = adapter::detail::general_simple<response_type>;
static auto adapt(response_type& v) noexcept { return adapter_type{&v}; }
};
template <class String, class Allocator>
struct response_traits<std::vector<resp3::node<String>, Allocator>>
{
struct response_traits<std::vector<resp3::node<String>, Allocator>> {
using response_type = std::vector<resp3::node<String>, Allocator>;
using adapter_type = adapter::detail::general_aggregate<response_type>;
static auto adapt(response_type& v) noexcept { return adapter_type{&v}; }
};
template <>
struct response_traits<void>
{
struct response_traits<void> {
using response_type = void;
using adapter_type = resp3::detail::ignore_response;
static auto adapt() noexcept { return adapter_type{}; }
};
namespace detail {
// Duplicated here to avoid circular include dependency.
template<class T>
auto internal_adapt(T& t) noexcept
@@ -92,11 +95,10 @@ class static_aggregate_adapter {
private:
using adapters_array_type =
std::array<
boost::mp11::mp_unique<
boost::mp11::mp_rename<
boost::mp11::mp_transform<
adapter_t, Tuple>,
boost::variant2::variant>>,
boost::mp11::mp_rename<
boost::mp11::mp_transform<
adapter_t, Tuple>,
boost::variant2::variant>,
std::tuple_size<Tuple>::value>;
std::size_t i_ = 0;
@@ -144,13 +146,11 @@ public:
}
};
} // detail
template <class... Ts>
struct response_traits<std::tuple<Ts...>>
{
using response_type = std::tuple<Ts...>;
using adapter_type = detail::static_aggregate_adapter<response_type>;
using adapter_type = static_aggregate_adapter<response_type>;
static auto adapt(response_type& r) noexcept { return adapter_type{&r}; }
};

View File

@@ -0,0 +1,616 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_CONNECTION_HPP
#define AEDIS_CONNECTION_HPP
#include <vector>
#include <queue>
#include <limits>
#include <chrono>
#include <memory>
#include <type_traits>
#include <boost/assert.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <boost/asio/steady_timer.hpp>
#include <boost/asio/bind_executor.hpp>
#include <boost/asio/experimental/channel.hpp>
#include <aedis/adapt.hpp>
#include <aedis/resp3/request.hpp>
#include <aedis/detail/connection_ops.hpp>
namespace aedis {
/** @brief A high level connection to Redis.
* @ingroup any
*
* This class keeps a healthy connection to the Redis instance where
* commands can be sent at any time. For more details, please see the
* documentation of each individual function.
*
* @remarks This class exposes only asynchronous member functions,
* synchronous communications with the Redis server is provided by
* the sync class.
*
* @tparam AsyncReadWriteStream A stream that supports
* `async_read_some` and `async_write_some`.
*
*/
template <class AsyncReadWriteStream = boost::asio::ip::tcp::socket>
class connection {
public:
/// Executor type.
using executor_type = typename AsyncReadWriteStream::executor_type;
/// Type of the next layer
using next_layer_type = AsyncReadWriteStream;
/** @brief Connection configuration parameters.
*/
struct config {
/// Redis server address.
std::string host = "127.0.0.1";
/// Redis server port.
std::string port = "6379";
/// Username if authentication is required.
std::string username;
/// Password if authentication is required.
std::string password;
/// Timeout of the resolve operation.
std::chrono::milliseconds resolve_timeout = std::chrono::seconds{10};
/// Timeout of the connect operation.
std::chrono::milliseconds connect_timeout = std::chrono::seconds{10};
/// Time interval of ping operations.
std::chrono::milliseconds ping_interval = std::chrono::seconds{1};
/// Time waited before trying a reconnection (see config::enable_reconnect).
std::chrono::milliseconds reconnect_interval = std::chrono::seconds{1};
/// The maximum size of read operations.
std::size_t max_read_size = (std::numeric_limits<std::size_t>::max)();
/// Whether to coalesce requests (see [pipelines](https://redis.io/topics/pipelining)).
bool coalesce_requests = true;
/// Enable internal events, see connection::async_receive_event.
bool enable_events = false;
/// Enable automatic reconnection (see also config::reconnect_interval).
bool enable_reconnect = false;
};
/// Events that are communicated by `connection::async_receive_event`.
enum class event {
/// Resolve operation was successful.
resolve,
/// Connect operation was successful.
connect,
/// Success sending AUTH and HELLO.
hello,
/// Used internally.
invalid
};
/** @brief Async operations exposed by this class.
*
* The operations listed below can be cancelled with the `cancel`
* member function.
*/
enum class operation {
/// `connection::async_exec` operations.
exec,
/// `connection::async_run` operations.
run,
/// `connection::async_receive_event` operations.
receive_event,
/// `connection::async_receive_push` operations.
receive_push,
};
/** \brief Contructor
*
* \param ex The executor.
* \param cfg Configuration parameters.
*/
connection(executor_type ex, config cfg = config{})
: resv_{ex}
, ping_timer_{ex}
, check_idle_timer_{ex}
, writer_timer_{ex}
, read_timer_{ex}
, push_channel_{ex}
, event_channel_{ex}
, cfg_{cfg}
, last_data_{std::chrono::time_point<std::chrono::steady_clock>::min()}
{
writer_timer_.expires_at(std::chrono::steady_clock::time_point::max());
read_timer_.expires_at(std::chrono::steady_clock::time_point::max());
}
/** \brief Constructor
*
* \param ioc The executor.
* \param cfg Configuration parameters.
*/
connection(boost::asio::io_context& ioc, config cfg = config{})
: connection(ioc.get_executor(), cfg)
{ }
/// Returns the executor.
auto get_executor() {return resv_.get_executor();}
/** @brief Cancel operations.
*
* @li `operation::exec`: Cancels operations started with `async_exec`.
*
* @li operation::run: Cancels `async_run`. Notice that the
* preferred way to close a connection is to ensure
* `config::enable_reconnect` is set to `false` and send `QUIT`
* to the server. An unresponsive Redis server will also cause
* the idle-checks to kick in and lead to
* `connection::async_run` completing with
* `error::idle_timeout`. Calling `cancel(operation::run)`
* directly should be seen as the last option.
*
* @li operation::receive_event: Cancels `connection::async_receive_event`.
*
* @param op: The operation to be cancelled.
* @returns The number of operations that have been canceled.
*/
std::size_t cancel(operation op)
{
switch (op) {
case operation::exec:
{
for (auto& e: reqs_) {
e->stop = true;
e->timer.cancel_one();
}
auto const ret = reqs_.size();
reqs_ = {};
return ret;
}
case operation::run:
{
if (socket_)
socket_->close();
read_timer_.cancel();
check_idle_timer_.cancel();
writer_timer_.cancel();
ping_timer_.cancel();
auto point = std::stable_partition(std::begin(reqs_), std::end(reqs_), [](auto const& ptr) {
return !ptr->req->close_on_run_completion;
});
// Cancel own pings if there are any waiting.
std::for_each(point, std::end(reqs_), [](auto const& ptr) {
ptr->stop = true;
ptr->timer.cancel();
});
reqs_.erase(point, std::end(reqs_));
return 1U;
}
case operation::receive_event:
{
event_channel_.cancel();
return 1U;
}
case operation::receive_push:
{
push_channel_.cancel();
return 1U;
}
}
return 0;
}
/// Get the config object.
config& get_config() noexcept { return cfg_;}
/// Gets the config object.
config const& get_config() const noexcept { return cfg_;}
/** @name Asynchronous functions
*
* Each of these operations a individually cancellable.
**/
/// @{
/** @brief Starts communication with the Redis server asynchronously.
*
* This function performs the following steps
*
* @li Resolves the Redis host as of `async_resolve` with the
* timeout passed in `config::resolve_timeout`.
*
* @li Connects to one of the endpoints returned by the resolve
* operation with the timeout passed in `config::connect_timeout`.
*
* @li Starts healthy checks with a timeout twice
* the value of `config::ping_interval`. If no data is
* received during that time interval `connection::async_run` completes with
* `error::idle_timeout`.
*
* @li Starts the healthy check operation that sends `PING`s to
* Redis with a frequency equal to `config::ping_interval`.
*
* @li Starts reading from the socket and executes all requests
* that have been started prior to this function call.
*
* @remark When a timeout occur and config::enable_reconnect is
* set, this function will automatically try a reconnection
* without returning control to the user.
*
* For an example see echo_server.cpp.
*
* \param token Completion token.
*
* The completion token must have the following signature
*
* @code
* void f(boost::system::error_code);
* @endcode
*/
template <class CompletionToken = boost::asio::default_completion_token_t<executor_type>>
auto async_run(CompletionToken token = CompletionToken{})
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::run_op<connection>{this}, token, resv_);
}
/** @brief Connects and executes a request asynchronously.
*
* Combines the other `async_run` overload with `async_exec` in a
* single function. This function is useful for users that want to
* send a single request to the server and close it.
*
* \param req Request object.
* \param adapter Response adapter.
* \param token Asio completion token.
*
* For an example see intro.cpp. The completion token must have
* the following signature
*
* @code
* void f(boost::system::error_code, std::size_t);
* @endcode
*
* Where the second parameter is the size of the response in bytes.
*/
template <
class Adapter = detail::response_traits<void>::adapter_type,
class CompletionToken = boost::asio::default_completion_token_t<executor_type>>
auto async_run(
resp3::request const& req,
Adapter adapter = adapt(),
CompletionToken token = CompletionToken{})
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code, std::size_t)
>(detail::runexec_op<connection, Adapter>
{this, &req, adapter}, token, resv_);
}
/** @brief Executes a command on the redis server asynchronously.
*
* There is no need to synchronize multiple calls to this
* function as it keeps an internal queue.
*
* \param req Request object.
* \param adapter Response adapter.
* \param token Asio completion token.
*
* For an example see echo_server.cpp. The completion token must
* have the following signature
*
* @code
* void f(boost::system::error_code, std::size_t);
* @endcode
*
* Where the second parameter is the size of the response in
* bytes.
*/
template <
class Adapter = detail::response_traits<void>::adapter_type,
class CompletionToken = boost::asio::default_completion_token_t<executor_type>>
auto async_exec(
resp3::request const& req,
Adapter adapter = adapt(),
CompletionToken token = CompletionToken{})
{
BOOST_ASSERT_MSG(req.size() <= adapter.supported_response_size(), "Request and adapter have incompatible sizes.");
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code, std::size_t)
>(detail::exec_op<connection, Adapter>{this, &req, adapter}, token, resv_);
}
/** @brief Receives server side pushes asynchronously.
*
* Users that expect server pushes have to call this function in a
* loop. If an unsolicited event comes in and there is no reader,
* the connection will hang and eventually timeout.
*
* \param adapter The response adapter.
* \param token The Asio completion token.
*
* For an example see subscriber.cpp. The completion token must
* have the following signature
*
* @code
* void f(boost::system::error_code, std::size_t);
* @endcode
*
* Where the second parameter is the size of the push in
* bytes.
*/
template <
class Adapter = detail::response_traits<void>::adapter_type,
class CompletionToken = boost::asio::default_completion_token_t<executor_type>>
auto async_receive_push(
Adapter adapter = adapt(),
CompletionToken token = CompletionToken{})
{
auto f =
[adapter]
(resp3::node<boost::string_view> const& node, boost::system::error_code& ec) mutable
{
adapter(std::size_t(-1), node, ec);
};
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code, std::size_t)
>(detail::receive_push_op<connection, decltype(f)>{this, f}, token, resv_);
}
/** @brief Receives internal events.
*
* See enum \c events for the list of events.
*
* \param token The Asio completion token.
*
* The completion token must have the following signature
*
* @code
* void f(boost::system::error_code, event);
* @endcode
*/
template <class CompletionToken = boost::asio::default_completion_token_t<executor_type>>
auto async_receive_event(CompletionToken token = CompletionToken{})
{
return event_channel_.async_receive(token);
}
/// @}
private:
using clock_type = std::chrono::steady_clock;
using clock_traits_type = boost::asio::wait_traits<clock_type>;
using timer_type = boost::asio::basic_waitable_timer<clock_type, clock_traits_type, executor_type>;
using resolver_type = boost::asio::ip::basic_resolver<boost::asio::ip::tcp, executor_type>;
using push_channel_type = boost::asio::experimental::channel<executor_type, void(boost::system::error_code, std::size_t)>;
using time_point_type = std::chrono::time_point<std::chrono::steady_clock>;
using event_channel_type = boost::asio::experimental::channel<executor_type, void(boost::system::error_code, event)>;
struct req_info {
req_info(executor_type ex) : timer{ex} {}
timer_type timer;
resp3::request const* req = nullptr;
std::size_t cmds = 0;
bool stop = false;
bool written = false;
};
using reqs_type = std::deque<std::shared_ptr<req_info>>;
template <class T, class U> friend struct detail::receive_push_op;
template <class T> friend struct detail::reader_op;
template <class T> friend struct detail::writer_op;
template <class T> friend struct detail::ping_op;
template <class T> friend struct detail::run_op;
template <class T> friend struct detail::run_one_op;
template <class T, class U> friend struct detail::exec_op;
template <class T, class U> friend struct detail::exec_read_op;
template <class T, class U> friend struct detail::runexec_op;
template <class T> friend struct detail::connect_with_timeout_op;
template <class T> friend struct detail::resolve_with_timeout_op;
template <class T> friend struct detail::check_idle_op;
template <class T> friend struct detail::start_op;
template <class T> friend struct detail::send_receive_op;
template <class CompletionToken>
auto async_run_one(CompletionToken token = CompletionToken{})
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::run_one_op<connection>{this}, token, resv_);
}
void cancel_push_requests()
{
auto point = std::stable_partition(std::begin(reqs_), std::end(reqs_), [](auto const& ptr) {
return !(ptr->written && ptr->req->size() == 0);
});
std::for_each(point, std::end(reqs_), [](auto const& ptr) {
ptr->timer.cancel();
});
reqs_.erase(point, std::end(reqs_));
}
void add_request_info(std::shared_ptr<req_info> const& info)
{
reqs_.push_back(info);
if (socket_ != nullptr && socket_->is_open() && cmds_ == 0 && write_buffer_.empty())
writer_timer_.cancel();
}
auto make_dynamic_buffer()
{ return boost::asio::dynamic_buffer(read_buffer_, cfg_.max_read_size); }
template <class CompletionToken>
auto async_resolve_with_timeout(CompletionToken&& token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::resolve_with_timeout_op<connection>{this},
token, resv_);
}
template <class CompletionToken>
auto async_connect_with_timeout(CompletionToken&& token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::connect_with_timeout_op<connection>{this}, token, resv_);
}
template <class CompletionToken>
auto reader(CompletionToken&& token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::reader_op<connection>{this}, token, resv_.get_executor());
}
template <class CompletionToken>
auto writer(CompletionToken&& token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::writer_op<connection>{this}, token, resv_.get_executor());
}
template <class CompletionToken>
auto
async_start(CompletionToken&& token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::start_op<connection>{this}, token, resv_);
}
template <class CompletionToken>
auto async_ping(CompletionToken&& token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::ping_op<connection>{this}, token, resv_);
}
template <class CompletionToken>
auto async_check_idle(CompletionToken&& token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::check_idle_op<connection>{this}, token, check_idle_timer_);
}
template <class Adapter, class CompletionToken>
auto async_exec_read(Adapter adapter, std::size_t cmds, CompletionToken token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code, std::size_t)
>(detail::exec_read_op<connection, Adapter>{this, adapter, cmds}, token, resv_);
}
void coalesce_requests()
{
// Coaleces all requests: Copies the request to the variables
// that won't be touched while async_write is suspended.
BOOST_ASSERT(write_buffer_.empty());
BOOST_ASSERT(!reqs_.empty());
auto const size = cfg_.coalesce_requests ? reqs_.size() : 1;
for (auto i = 0UL; i < size; ++i) {
write_buffer_ += reqs_.at(i)->req->payload();
cmds_ += reqs_.at(i)->req->size();
reqs_.at(i)->written = true;
}
}
// IO objects
resolver_type resv_;
std::shared_ptr<AsyncReadWriteStream> socket_;
timer_type ping_timer_;
timer_type check_idle_timer_;
timer_type writer_timer_;
timer_type read_timer_;
push_channel_type push_channel_;
event_channel_type event_channel_;
config cfg_;
std::string read_buffer_;
std::string write_buffer_;
std::size_t cmds_ = 0;
reqs_type reqs_;
// Last time we received data.
time_point_type last_data_;
// The result of async_resolve.
boost::asio::ip::tcp::resolver::results_type endpoints_;
resp3::request req_;
};
/** @brief Converts a connection event to a string.
* @relates connection
*/
template <class T>
char const* to_string(typename connection<T>::event e)
{
using event_type = typename connection<T>::event;
switch (e) {
case event_type::resolve: return "resolve";
case event_type::connect: return "connect";
case event_type::hello: return "hello";
case event_type::push: return "push";
case event_type::invalid: return "invalid";
default: BOOST_ASSERT_MSG(false, "to_string: unhandled event.");
}
}
/** @brief Writes a connection event to the stream.
* @relates connection
*/
template <class T>
std::ostream& operator<<(std::ostream& os, typename connection<T>::event e)
{
os << to_string(e);
return os;
}
} // aedis
#endif // AEDIS_CONNECTION_HPP

View File

@@ -0,0 +1,664 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_CONNECTION_OPS_HPP
#define AEDIS_CONNECTION_OPS_HPP
#include <array>
#include <algorithm>
#include <boost/assert.hpp>
#include <boost/system.hpp>
#include <boost/asio/write.hpp>
#include <boost/core/ignore_unused.hpp>
#include <boost/asio/experimental/parallel_group.hpp>
#include <aedis/adapt.hpp>
#include <aedis/error.hpp>
#include <aedis/detail/net.hpp>
#include <aedis/resp3/type.hpp>
#include <aedis/resp3/detail/exec.hpp>
#include <aedis/resp3/detail/parser.hpp>
#include <aedis/resp3/read.hpp>
#include <aedis/resp3/write.hpp>
#include <aedis/resp3/request.hpp>
#define HANDLER_LOCATION \
BOOST_ASIO_HANDLER_LOCATION((__FILE__, __LINE__, __func__))
namespace aedis {
namespace detail {
#include <boost/asio/yield.hpp>
template <class Conn>
struct connect_with_timeout_op {
Conn* conn;
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, boost::system::error_code ec = {}
, boost::asio::ip::tcp::endpoint const& = {})
{
reenter (coro)
{
BOOST_ASSERT(conn->socket_ != nullptr);
conn->ping_timer_.expires_after(conn->cfg_.connect_timeout);
yield
aedis::detail::async_connect(*conn->socket_, conn->ping_timer_, conn->endpoints_, std::move(self));
self.complete(ec);
}
}
};
template <class Conn>
struct resolve_with_timeout_op {
Conn* conn;
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, boost::system::error_code ec = {}
, boost::asio::ip::tcp::resolver::results_type res = {})
{
reenter (coro)
{
conn->ping_timer_.expires_after(conn->cfg_.resolve_timeout);
yield
aedis::detail::async_resolve(
conn->resv_, conn->ping_timer_,
conn->cfg_.host, conn->cfg_.port, std::move(self));
conn->endpoints_ = res;
self.complete(ec);
}
}
};
template <class Conn, class Adapter>
struct receive_push_op {
Conn* conn = nullptr;
Adapter adapter;
std::size_t read_size = 0;
boost::asio::coroutine coro{};
template <class Self>
void
operator()( Self& self
, boost::system::error_code ec = {}
, std::size_t n = 0)
{
reenter (coro)
{
yield
conn->push_channel_.async_receive(std::move(self));
if (ec) {
self.complete(ec, 0);
return;
}
BOOST_ASSERT(conn->socket_ != nullptr);
yield
resp3::async_read(*conn->socket_, conn->make_dynamic_buffer(), adapter, std::move(self));
if (ec) {
conn->cancel(Conn::operation::run);
self.complete(ec, 0);
return;
}
read_size = n;
yield
conn->push_channel_.async_send({}, 0, std::move(self));
self.complete(ec, read_size);
return;
}
}
};
template <class Conn, class Adapter>
struct exec_read_op {
Conn* conn;
Adapter adapter;
std::size_t cmds = 0;
std::size_t read_size = 0;
std::size_t index = 0;
boost::asio::coroutine coro{};
template <class Self>
void
operator()( Self& self
, boost::system::error_code ec = {}
, std::size_t n = 0)
{
reenter (coro)
{
// Loop reading the responses to this request.
BOOST_ASSERT(!conn->reqs_.empty());
while (cmds != 0) {
BOOST_ASSERT(conn->cmds_ != 0);
//-----------------------------------
// If we detect a push in the middle of a request we have
// to hand it to the push consumer. To do that we need
// some data in the read bufer.
if (conn->read_buffer_.empty()) {
BOOST_ASSERT(conn->socket_ != nullptr);
yield
boost::asio::async_read_until(*conn->socket_, conn->make_dynamic_buffer(), "\r\n", std::move(self));
if (ec) {
conn->cancel(Conn::operation::run);
self.complete(ec, 0);
return;
}
}
// If the next request is a push we have to handle it to
// the receive_push_op wait for it to be done and continue.
if (resp3::to_type(conn->read_buffer_.front()) == resp3::type::push) {
yield
async_send_receive(conn->push_channel_, std::move(self));
if (ec) {
// Notice we don't call cancel_run() as that is the
// responsability of the receive_push_op.
self.complete(ec, 0);
return;
}
continue;
}
//-----------------------------------
yield
resp3::async_read(*conn->socket_, conn->make_dynamic_buffer(),
[i = index, adpt = adapter] (resp3::node<boost::string_view> const& nd, boost::system::error_code& ec) mutable { adpt(i, nd, ec); },
std::move(self));
++index;
if (ec) {
conn->cancel(Conn::operation::run);
self.complete(ec, 0);
return;
}
read_size += n;
BOOST_ASSERT(cmds != 0);
--cmds;
BOOST_ASSERT(conn->cmds_ != 0);
--conn->cmds_;
}
self.complete({}, read_size);
}
}
};
template <class Conn, class Adapter>
struct exec_op {
using req_info_type = typename Conn::req_info;
Conn* conn = nullptr;
resp3::request const* req = nullptr;
Adapter adapter{};
std::shared_ptr<req_info_type> info = nullptr;
std::size_t read_size = 0;
boost::asio::coroutine coro{};
template <class Self>
void
operator()( Self& self
, boost::system::error_code ec = {}
, std::size_t n = 0)
{
reenter (coro)
{
info = std::allocate_shared<req_info_type>(boost::asio::get_associated_allocator(self), conn->resv_.get_executor());
info->timer.expires_at(std::chrono::steady_clock::time_point::max());
info->req = req;
info->cmds = req->size();
info->stop = false;
conn->add_request_info(info);
yield
info->timer.async_wait(std::move(self));
BOOST_ASSERT(conn->socket_ != nullptr);
BOOST_ASSERT(!!ec);
if (info->stop) {
self.complete(ec, 0);
return;
}
BOOST_ASSERT(conn->socket_->is_open());
if (req->size() == 0) {
self.complete({}, 0);
return;
}
BOOST_ASSERT(!conn->reqs_.empty());
BOOST_ASSERT(conn->reqs_.front() != nullptr);
BOOST_ASSERT(conn->cmds_ != 0);
yield
conn->async_exec_read(adapter, conn->reqs_.front()->cmds, std::move(self));
if (ec) {
self.complete(ec, 0);
return;
}
read_size = n;
BOOST_ASSERT(!conn->reqs_.empty());
conn->reqs_.pop_front();
if (conn->cmds_ == 0) {
conn->read_timer_.cancel_one();
if (!conn->reqs_.empty())
conn->writer_timer_.cancel_one();
} else {
BOOST_ASSERT(!conn->reqs_.empty());
conn->reqs_.front()->timer.cancel_one();
}
self.complete({}, read_size);
}
}
};
template <class Conn>
struct ping_op {
Conn* conn;
boost::asio::coroutine coro{};
template <class Self>
void
operator()( Self& self
, boost::system::error_code ec = {}
, std::size_t = 0)
{
reenter (coro) for (;;)
{
conn->ping_timer_.expires_after(conn->cfg_.ping_interval);
yield
conn->ping_timer_.async_wait(std::move(self));
BOOST_ASSERT(conn->socket_ != nullptr);
if (ec || !conn->socket_->is_open()) {
self.complete(ec);
return;
}
conn->req_.clear();
conn->req_.push("PING");
conn->req_.close_on_run_completion = true;
yield
conn->async_exec(conn->req_, adapt(), std::move(self));
if (ec) {
// Notice we don't report error but let the idle check
// timeout. It is enough to finish the op.
self.complete({});
return;
}
}
}
};
template <class Conn>
struct check_idle_op {
Conn* conn;
boost::asio::coroutine coro{};
template <class Self>
void operator()(Self& self, boost::system::error_code ec = {})
{
reenter (coro) for (;;)
{
conn->check_idle_timer_.expires_after(2 * conn->cfg_.ping_interval);
yield
conn->check_idle_timer_.async_wait(std::move(self));
BOOST_ASSERT(conn->socket_ != nullptr);
if (ec || !conn->socket_->is_open()) {
// Notice this is not an error, it was requested from an
// external op.
self.complete({});
return;
}
auto const now = std::chrono::steady_clock::now();
if (conn->last_data_ + (2 * conn->cfg_.ping_interval) < now) {
conn->cancel(Conn::operation::run);
self.complete(error::idle_timeout);
return;
}
conn->last_data_ = now;
}
}
};
template <class Conn>
struct start_op {
Conn* conn;
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, std::array<std::size_t, 4> order = {}
, boost::system::error_code ec0 = {}
, boost::system::error_code ec1 = {}
, boost::system::error_code ec2 = {}
, boost::system::error_code ec3 = {})
{
reenter (coro)
{
yield
boost::asio::experimental::make_parallel_group(
[this](auto token) { return conn->reader(token);},
[this](auto token) { return conn->writer(token);},
[this](auto token) { return conn->async_check_idle(token);},
[this](auto token) { return conn->async_ping(token);}
).async_wait(
boost::asio::experimental::wait_for_one_error(),
std::move(self));
switch (order[0]) {
case 0: self.complete(ec0); break;
case 1: self.complete(ec1); break;
case 2: self.complete(ec2); break;
case 3: self.complete(ec3); break;
default: BOOST_ASSERT(false);
}
}
}
};
template <class Conn>
struct run_one_op {
Conn* conn;
boost::asio::coroutine coro{};
template <class Self>
void operator()(
Self& self,
boost::system::error_code ec = {},
std::size_t = 0)
{
reenter (coro)
{
yield
conn->async_resolve_with_timeout(std::move(self));
if (ec) {
conn->cancel(Conn::operation::run);
self.complete(ec);
return;
}
if (conn->cfg_.enable_events) {
yield
conn->event_channel_.async_send({}, Conn::event::resolve, std::move(self));
if (ec) {
self.complete(ec);
return;
}
}
conn->socket_ = std::make_shared<typename Conn::next_layer_type>(conn->resv_.get_executor());
yield
conn->async_connect_with_timeout(std::move(self));
if (ec) {
conn->cancel(Conn::operation::run);
self.complete(ec);
return;
}
if (conn->cfg_.enable_events) {
yield
conn->event_channel_.async_send({}, Conn::event::connect, std::move(self));
if (ec) {
self.complete(ec);
return;
}
}
conn->req_.clear();
if (!std::empty(conn->cfg_.username) && !std::empty(conn->cfg_.password))
conn->req_.push("AUTH", conn->cfg_.username, conn->cfg_.password);
conn->req_.push("HELLO", "3");
conn->ping_timer_.expires_after(conn->cfg_.ping_interval);
yield
async_exec(
*conn->socket_,
conn->ping_timer_,
conn->req_,
adapter::adapt2(),
conn->make_dynamic_buffer(),
std::move(self)
);
if (ec) {
conn->cancel(Conn::operation::run);
self.complete(ec);
return;
}
if (conn->cfg_.enable_events) {
yield
conn->event_channel_.async_send({}, Conn::event::hello, std::move(self));
if (ec) {
self.complete(ec);
return;
}
}
conn->write_buffer_.clear();
conn->cmds_ = 0;
std::for_each(std::begin(conn->reqs_), std::end(conn->reqs_), [](auto const& ptr) {
return ptr->written = false;
});
yield
conn->async_start(std::move(self));
self.complete(ec);
}
}
};
template <class Conn>
struct run_op {
Conn* conn;
boost::asio::coroutine coro{};
template <class Self>
void operator()(
Self& self,
boost::system::error_code ec = {},
std::size_t = 0)
{
reenter (coro) for(;;)
{
yield
conn->async_run_one(std::move(self));
if (!conn->cfg_.enable_reconnect) {
self.complete(ec);
return;
}
// Consider communicating the return of async_run_one as an
// event here.
conn->ping_timer_.expires_after(conn->cfg_.reconnect_interval);
yield
conn->ping_timer_.async_wait(std::move(self));
}
}
};
template <class Conn>
struct writer_op {
Conn* conn;
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, boost::system::error_code ec = {}
, std::size_t n = 0)
{
boost::ignore_unused(n);
reenter (coro) for (;;)
{
while (!conn->reqs_.empty() && conn->cmds_ == 0 && conn->write_buffer_.empty()) {
conn->coalesce_requests();
yield
boost::asio::async_write(*conn->socket_, boost::asio::buffer(conn->write_buffer_), std::move(self));
if (ec) {
self.complete(ec);
return;
}
// We have to clear the payload right after the read op in
// order to to use it as a flag that informs there is no
// ongoing write.
conn->write_buffer_.clear();
conn->cancel_push_requests();
}
if (conn->socket_->is_open()) {
yield
conn->writer_timer_.async_wait(std::move(self));
// The timer may be canceled either to stop the write op
// or to proceed to the next write, the difference between
// the two is that for the former the socket will be
// closed first. We check for that below.
}
if (!conn->socket_->is_open()) {
// Notice this is not an error of the op, stoping was
// requested from the outside, so we complete with
// success.
self.complete({});
return;
}
}
}
};
template <class Conn>
struct reader_op {
Conn* conn;
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, boost::system::error_code ec = {}
, std::size_t n = 0)
{
boost::ignore_unused(n);
reenter (coro) for (;;)
{
BOOST_ASSERT(conn->socket_->is_open());
yield
boost::asio::async_read_until(*conn->socket_, conn->make_dynamic_buffer(), "\r\n", std::move(self));
if (ec) {
conn->cancel(Conn::operation::run);
self.complete(ec);
return;
}
conn->last_data_ = std::chrono::steady_clock::now();
// We handle unsolicited events in the following way
//
// 1. Its resp3 type is a push.
//
// 2. A non-push type is received with an empty requests
// queue. I have noticed this is possible (e.g. -MISCONF).
// I expect them to have type push so we can distinguish
// them from responses to commands, but it is a
// simple-error. If we are lucky enough to receive them
// when the command queue is empty we can treat them as
// server pushes, otherwise it is impossible to handle
// them properly
//
// 3. The request does not expect any response but we got
// one. This may happen if for example, subscribe with
// wrong syntax.
//
BOOST_ASSERT(!conn->read_buffer_.empty());
if (resp3::to_type(conn->read_buffer_.front()) == resp3::type::push
|| conn->reqs_.empty()
|| (!conn->reqs_.empty() && conn->reqs_.front()->cmds == 0)) {
yield
async_send_receive(conn->push_channel_, std::move(self));
if (ec) {
self.complete(ec);
return;
}
} else {
BOOST_ASSERT(conn->cmds_ != 0);
BOOST_ASSERT(!conn->reqs_.empty());
BOOST_ASSERT(conn->reqs_.front()->cmds != 0);
conn->reqs_.front()->timer.cancel_one();
yield
conn->read_timer_.async_wait(std::move(self));
if (!conn->socket_->is_open()) {
self.complete({});
return;
}
}
}
}
};
template <class Conn, class Adapter>
struct runexec_op {
Conn* conn;
resp3::request const* req = nullptr;
Adapter adapter;
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, std::array<std::size_t, 2> order = {}
, boost::system::error_code ec1 = {}
, boost::system::error_code ec2 = {}
, std::size_t n = 0)
{
reenter (coro)
{
req->close_on_run_completion = true;
yield
boost::asio::experimental::make_parallel_group(
[this](auto token) { return conn->async_run(token);},
[this](auto token) { return conn->async_exec(*req, adapter, token);}
).async_wait(
boost::asio::experimental::wait_for_one_error(),
std::move(self));
switch (order[0]) {
case 0: self.complete(ec1, n); break;
case 1: self.complete(ec2, n); break;
default: BOOST_ASSERT(false);
}
}
}
};
#include <boost/asio/unyield.hpp>
} // detail
} // aedis
#endif // AEDIS_CONNECTION_OPS_HPP

View File

@@ -0,0 +1,208 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_NET_HPP
#define AEDIS_NET_HPP
#include <array>
#include <boost/system.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <boost/asio/connect.hpp>
#include <boost/assert.hpp>
#include <boost/asio/experimental/parallel_group.hpp>
namespace aedis {
namespace detail {
template <class Executor>
using conn_timer_t = boost::asio::basic_waitable_timer<std::chrono::steady_clock, boost::asio::wait_traits<std::chrono::steady_clock>, Executor>;
#include <boost/asio/yield.hpp>
template <
class Protocol,
class Executor,
class EndpointSequence
>
struct connect_op {
boost::asio::basic_socket<Protocol, Executor>* socket;
conn_timer_t<Executor>* timer;
EndpointSequence* endpoints;
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, std::array<std::size_t, 2> order = {}
, boost::system::error_code ec1 = {}
, typename Protocol::endpoint const& ep = {}
, boost::system::error_code ec2 = {})
{
reenter (coro)
{
yield
boost::asio::experimental::make_parallel_group(
[this](auto token)
{
auto f = [](boost::system::error_code const&, typename Protocol::endpoint const&) { return true; };
return boost::asio::async_connect(*socket, *endpoints, f, token);
},
[this](auto token) { return timer->async_wait(token);}
).async_wait(
boost::asio::experimental::wait_for_one(),
std::move(self));
switch (order[0]) {
case 0: self.complete(ec1, ep); break;
case 1:
{
if (ec2)
self.complete({}, ep);
else
self.complete(error::connect_timeout, ep);
} break;
default: BOOST_ASSERT(false);
}
}
}
};
template <class Resolver, class Timer>
struct resolve_op {
Resolver* resv;
Timer* timer;
boost::string_view host;
boost::string_view port;
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, std::array<std::size_t, 2> order = {}
, boost::system::error_code ec1 = {}
, boost::asio::ip::tcp::resolver::results_type res = {}
, boost::system::error_code ec2 = {})
{
reenter (coro)
{
yield
boost::asio::experimental::make_parallel_group(
[this](auto token) { return resv->async_resolve(host.data(), port.data(), token);},
[this](auto token) { return timer->async_wait(token);}
).async_wait(
boost::asio::experimental::wait_for_one(),
std::move(self));
switch (order[0]) {
case 0:
{
if (ec1) {
self.complete(ec1, {});
return;
}
} break;
case 1:
{
if (!ec2) {
self.complete(error::resolve_timeout, {});
return;
}
} break;
default: BOOST_ASSERT(false);
}
self.complete({}, res);
}
}
};
template <class Channel>
struct send_receive_op {
Channel* channel;
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, boost::system::error_code ec = {}
, std::size_t = 0)
{
reenter (coro)
{
yield
channel->async_send(boost::system::error_code{}, 0, std::move(self));
if (ec) {
self.complete(ec, 0);
return;
}
yield
channel->async_receive(std::move(self));
self.complete(ec, 0);
}
}
};
#include <boost/asio/unyield.hpp>
template <
class Protocol,
class Executor,
class EndpointSequence,
class CompletionToken = boost::asio::default_completion_token_t<Executor>
>
auto async_connect(
boost::asio::basic_socket<Protocol, Executor>& socket,
conn_timer_t<Executor>& timer,
EndpointSequence ep,
CompletionToken&& token = boost::asio::default_completion_token_t<Executor>{})
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code, typename Protocol::endpoint const&)
>(connect_op<Protocol, Executor, EndpointSequence>
{&socket, &timer, &ep}, token, socket, timer);
}
template <
class Resolver,
class Timer,
class CompletionToken =
boost::asio::default_completion_token_t<typename Resolver::executor_type>
>
auto async_resolve(
Resolver& resv,
Timer& timer,
boost::string_view host,
boost::string_view port,
CompletionToken&& token = CompletionToken{})
{
// TODO: Use static_assert to check Resolver::executor_type and
// Timer::executor_type are same.
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code, boost::asio::ip::tcp::resolver::results_type)
>(resolve_op<Resolver, Timer>{&resv, &timer, host, port}, token, resv, timer);
}
template <
class Channel,
class CompletionToken =
boost::asio::default_completion_token_t<typename Channel::executor_type>
>
auto async_send_receive(Channel& channel, CompletionToken&& token = CompletionToken{})
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code, std::size_t)
>(send_receive_op<Channel>{&channel}, token, channel);
}
} // detail
} // aedis
#endif // AEDIS_NET_HPP

96
include/aedis/error.hpp Normal file
View File

@@ -0,0 +1,96 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_ERROR_HPP
#define AEDIS_ERROR_HPP
#include <boost/system/error_code.hpp>
namespace aedis {
/** \brief Generic errors.
* \ingroup any
*/
enum class error
{
/// Resolve timeout.
resolve_timeout = 1,
/// Connect timeout.
connect_timeout,
/// Idle timeout.
idle_timeout,
/// Exec timeout.
exec_timeout,
/// Invalid RESP3 type.
invalid_data_type,
/// Can't parse the string as a number.
not_a_number,
/// Received less bytes than expected.
unexpected_read_size,
/// The maximum depth of a nested response was exceeded.
exceeeds_max_nested_depth,
/// Got non boolean value.
unexpected_bool_value,
/// Expected field value is empty.
empty_field,
/// Expects a simple RESP3 type but got an aggregate.
expects_resp3_simple_type,
/// Expects aggregate.
expects_resp3_aggregate,
/// Expects a map but got other aggregate.
expects_resp3_map,
/// Expects a set aggregate but got something else.
expects_resp3_set,
/// Nested response not supported.
nested_aggregate_unsupported,
/// Got RESP3 simple error.
simple_error,
/// Got RESP3 blob_error.
blob_error,
/// Aggregate container has incompatible size.
incompatible_size,
/// Not a double
not_a_double,
/// Got RESP3 null.
null
};
/** \internal
* \brief Creates a error_code object from an error.
* \param e Error code.
* \ingroup any
*/
boost::system::error_code make_error_code(error e);
} // aedis
namespace std {
template<>
struct is_error_code_enum<::aedis::error> : std::true_type {};
} // std
#endif // AEDIS_ERROR_HPP

View File

@@ -4,34 +4,44 @@
* accompanying file LICENSE.txt)
*/
#include <system_error>
#include <boost/assert.hpp>
#include <aedis/error.hpp>
namespace aedis {
namespace adapter {
namespace detail {
struct error_category_impl : boost::system::error_category {
char const* name() const noexcept override
{
return "aedis.adapter";
return "aedis";
}
std::string message(int ev) const override
{
switch(static_cast<error>(ev)) {
case error::expects_simple_type: return "Expects a simple RESP3 type.";
case error::expects_aggregate_type: return "Expects aggregate type.";
case error::expects_map_type: return "Expects map type.";
case error::expects_set_type: return "Expects set type.";
case error::resolve_timeout: return "Resolve operation timeout.";
case error::connect_timeout: return "Connect operation timeout.";
case error::idle_timeout: return "Idle timeout.";
case error::exec_timeout: return "Exec timeout.";
case error::invalid_data_type: return "Invalid resp3 type.";
case error::not_a_number: return "Can't convert string to number.";
case error::unexpected_read_size: return "Unexpected read size.";
case error::exceeeds_max_nested_depth: return "Exceeds the maximum number of nested responses.";
case error::unexpected_bool_value: return "Unexpected bool value.";
case error::empty_field: return "Expected field value is empty.";
case error::expects_resp3_simple_type: return "Expects a resp3 simple type.";
case error::expects_resp3_aggregate: return "Expects resp3 aggregate.";
case error::expects_resp3_map: return "Expects resp3 map.";
case error::expects_resp3_set: return "Expects resp3 set.";
case error::nested_aggregate_unsupported: return "Nested aggregate unsupported.";
case error::simple_error: return "Got RESP3 simple-error.";
case error::blob_error: return "Got RESP3 blob-error.";
case error::incompatible_size: return "Aggregate container has incompatible size.";
case error::not_a_double: return "Not a double.";
case error::null: return "Got RESP3 null.";
default: BOOST_ASSERT(false);
default:
BOOST_ASSERT(false);
return "Aedis error.";
}
}
};
@@ -49,5 +59,4 @@ boost::system::error_code make_error_code(error e)
return boost::system::error_code{static_cast<int>(e), detail::category()};
}
} // adapter
} // aedis

View File

@@ -0,0 +1,176 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_RESP3_EXEC_HPP
#define AEDIS_RESP3_EXEC_HPP
#include <boost/assert.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <boost/asio/read.hpp>
#include <boost/asio/write.hpp>
#include <boost/asio/coroutine.hpp>
#include <boost/asio/compose.hpp>
#include <boost/asio/steady_timer.hpp>
#include <boost/asio/experimental/parallel_group.hpp>
#include <aedis/error.hpp>
#include <aedis/resp3/read.hpp>
#include <aedis/resp3/request.hpp>
namespace aedis {
namespace resp3 {
namespace detail {
#include <boost/asio/yield.hpp>
template <
class AsyncStream,
class Adapter,
class DynamicBuffer
>
struct exec_op {
AsyncStream* socket = nullptr;
request const* req = nullptr;
Adapter adapter;
DynamicBuffer dbuf{};
std::size_t n_cmds = 0;
std::size_t size = 0;
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, boost::system::error_code ec = {}
, std::size_t n = 0)
{
reenter (coro) for (;;)
{
if (req) {
yield
boost::asio::async_write(
*socket,
boost::asio::buffer(req->payload()),
std::move(self));
if (ec || n_cmds == 0) {
self.complete(ec, n);
return;
}
req = nullptr;
}
yield resp3::async_read(*socket, dbuf, adapter, std::move(self));
if (ec) {
self.complete(ec, 0);
return;
}
size += n;
if (--n_cmds == 0) {
self.complete(ec, size);
return;
}
}
}
};
template <
class AsyncStream,
class Adapter,
class DynamicBuffer,
class CompletionToken = boost::asio::default_completion_token_t<typename AsyncStream::executor_type>
>
auto async_exec(
AsyncStream& socket,
request const& req,
Adapter adapter,
DynamicBuffer dbuf,
CompletionToken token = CompletionToken{})
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code, std::size_t)
>(detail::exec_op<AsyncStream, Adapter, DynamicBuffer>
{&socket, &req, adapter, dbuf, req.size()}, token, socket);
}
template <
class AsyncStream,
class Timer,
class Adapter,
class DynamicBuffer
>
struct exec_with_timeout_op {
AsyncStream* socket = nullptr;
Timer* timer = nullptr;
request const* req = nullptr;
Adapter adapter;
DynamicBuffer dbuf{};
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, std::array<std::size_t, 2> order = {}
, boost::system::error_code ec1 = {}
, std::size_t n = 0
, boost::system::error_code ec2 = {})
{
reenter (coro)
{
yield
boost::asio::experimental::make_parallel_group(
[this](auto token) { return detail::async_exec(*socket, *req, adapter, dbuf, token);},
[this](auto token) { return timer->async_wait(token);}
).async_wait(
boost::asio::experimental::wait_for_one(),
std::move(self));
switch (order[0]) {
case 0: self.complete(ec1, n); break;
case 1:
{
if (ec2)
self.complete({}, n);
else
self.complete(aedis::error::exec_timeout, 0);
} break;
default: BOOST_ASSERT(false);
}
}
}
};
#include <boost/asio/unyield.hpp>
template <
class AsyncStream,
class Timer,
class Adapter,
class DynamicBuffer,
class CompletionToken = boost::asio::default_completion_token_t<typename AsyncStream::executor_type>
>
auto async_exec(
AsyncStream& socket,
Timer& timer,
request const& req,
Adapter adapter,
DynamicBuffer dbuf,
CompletionToken token = CompletionToken{})
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code, std::size_t)
>(detail::exec_with_timeout_op<AsyncStream, Timer, Adapter, DynamicBuffer>
{&socket, &timer, &req, adapter, dbuf}, token, socket, timer);
}
} // detail
} // resp3
} // aedis
#endif // AEDIS_RESP3_EXEC_HPP

View File

@@ -13,7 +13,7 @@
#include <boost/assert.hpp>
#include <boost/utility/string_view.hpp>
#include <aedis/resp3/error.hpp>
#include <aedis/error.hpp>
#include <aedis/resp3/node.hpp>
namespace aedis {
@@ -193,7 +193,7 @@ public:
} break;
default:
{
ec = error::invalid_type;
ec = error::invalid_data_type;
return 0;
}
}

View File

@@ -37,7 +37,7 @@ private:
parser<ResponseAdapter> parser_;
std::size_t consumed_;
std::size_t buffer_size_;
boost::asio::coroutine coro_;
boost::asio::coroutine coro_{};
public:
parse_op(AsyncReadStream& stream, DynamicBuffer buf, ResponseAdapter adapter)

View File

@@ -0,0 +1,23 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <aedis/resp3/request.hpp>
namespace aedis {
namespace resp3 {
namespace detail {
bool has_push_response(boost::string_view cmd)
{
if (cmd == "SUBSCRIBE") return true;
if (cmd == "PSUBSCRIBE") return true;
if (cmd == "UNSUBSCRIBE") return true;
return false;
}
} // detail
} // resp3
} // aedis

View File

@@ -12,27 +12,25 @@ namespace resp3 {
char const* to_string(type t)
{
static char const* table[] =
{ "array"
, "push"
, "set"
, "map"
, "attribute"
, "simple_string"
, "simple_error"
, "number"
, "doublean"
, "boolean"
, "big_number"
, "null"
, "blob_error"
, "verbatim_string"
, "blob_string"
, "streamed_string_part"
, "invalid"
};
return table[static_cast<int>(t)];
switch (t) {
case type::array: return "array";
case type::push: return "push";
case type::set: return "set";
case type::map: return "map";
case type::attribute: return "attribute";
case type::simple_string: return "simple_string";
case type::simple_error: return "simple_error";
case type::number: return "number";
case type::doublean: return "doublean";
case type::boolean: return "boolean";
case type::big_number: return "big_number";
case type::null: return "null";
case type::blob_error: return "blob_error";
case type::verbatim_string: return "verbatim_string";
case type::blob_string: return "blob_string";
case type::streamed_string_part: return "streamed_string_part";
default: return "invalid";
}
}
std::ostream& operator<<(std::ostream& os, type t)

View File

@@ -39,10 +39,10 @@ struct node {
String value;
};
/** \brief Converts the node to a string.
* \ingroup any
/** @brief Converts the node to a string.
* @relates node
*
* \param in The node object.
* @param in The node object.
*/
template <class String>
std::string to_string(node<String> const& in)
@@ -60,8 +60,11 @@ std::string to_string(node<String> const& in)
return out;
}
/** \brief Compares a node for equality.
* \ingroup any
/** @brief Compares a node for equality.
* @relates node
*
* @param a Left hand side node object.
* @param b Right hand side node object.
*/
template <class String>
bool operator==(node<String> const& a, node<String> const& b)
@@ -72,15 +75,18 @@ bool operator==(node<String> const& a, node<String> const& b)
&& a.value == b.value;
};
/** \brief Writes the node string to the stream.
* \ingroup any
/** @brief Writes the node string to the stream.
* @relates node
*
* NOTE: Binary data is not converted to text.
* @param os Output stream.
* @param node Node object.
*
* \remark Binary data is not converted to text.
*/
template <class String>
std::ostream& operator<<(std::ostream& os, node<String> const& o)
std::ostream& operator<<(std::ostream& os, node<String> const& node)
{
os << to_string(o);
os << to_string(node);
return os;
}

View File

@@ -14,12 +14,12 @@
#include <boost/asio/read.hpp>
#include <boost/asio/compose.hpp>
#include <boost/asio/async_result.hpp>
#include <boost/asio/yield.hpp>
namespace aedis {
namespace resp3 {
/** \brief Reads a complete response to a command sychronously.
/** \internal
* \brief Reads a complete response to a command sychronously.
* \ingroup any
*
* This function reads a complete response to a command or a
@@ -103,7 +103,8 @@ read(
return consumed;
}
/** \brief Reads a complete response to a command sychronously.
/** \internal
* \brief Reads a complete response to a command sychronously.
* \ingroup any
*
* Same as the error_code overload but throws on error.
@@ -127,7 +128,8 @@ read(
return n;
}
/** @brief Reads a complete response to a Redis command asynchronously.
/** \internal
* \brief Reads a complete response to a Redis command asynchronously.
* \ingroup any
*
* This function reads a complete response to a command or a
@@ -189,6 +191,4 @@ auto async_read(
} // resp3
} // aedis
#include <boost/asio/unyield.hpp>
#endif // AEDIS_RESP3_READ_HPP

View File

@@ -0,0 +1,340 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_RESP3_REQUEST_HPP
#define AEDIS_RESP3_REQUEST_HPP
#include <string>
#include <tuple>
#include <boost/hana.hpp>
#include <boost/utility/string_view.hpp>
#include <aedis/resp3/type.hpp>
// NOTE: Consider detecting tuples in the type in the parameter pack
// to calculate the header size correctly.
//
// NOTE: For some commands like hset it would be a good idea to assert
// the value type is a pair.
namespace aedis {
namespace resp3 {
constexpr char separator[] = "\r\n";
/** @brief Adds a bulk to the request.
* @relates request
*
* This function is useful in serialization of your own data
* structures in a request. For example
*
* @code
* void to_bulk(std::string& to, mystruct const& obj)
* {
* auto const str = // Convert obj to a string.
* resp3::to_bulk(to, str);
* }
* @endcode
*
* @param to Storage on which data will be copied into.
* @param data Data that will be serialized and stored in @c to.
*
* See more in \ref requests-serialization.
*/
template <class Request>
void to_bulk(Request& to, boost::string_view data)
{
auto const str = std::to_string(data.size());
to += to_code(type::blob_string);
to.append(std::cbegin(str), std::cend(str));
to += separator;
to.append(std::cbegin(data), std::cend(data));
to += separator;
}
template <class Request, class T, typename = typename std::enable_if<std::is_integral<T>::value>::type>
void to_bulk(Request& to, T n)
{
auto const s = std::to_string(n);
to_bulk(to, boost::string_view{s});
}
namespace detail {
bool has_push_response(boost::string_view cmd);
template <class T>
struct add_bulk_impl {
template <class Request>
static void add(Request& to, T const& from)
{
using namespace aedis::resp3;
to_bulk(to, from);
}
};
template <class U, class V>
struct add_bulk_impl<std::pair<U, V>> {
template <class Request>
static void add(Request& to, std::pair<U, V> const& from)
{
using namespace aedis::resp3;
to_bulk(to, from.first);
to_bulk(to, from.second);
}
};
template <class ...Ts>
struct add_bulk_impl<boost::hana::tuple<Ts...>> {
template <class Request>
static void add(Request& to, boost::hana::tuple<Ts...> const& from)
{
using boost::hana::for_each;
// Fold expressions is C++17 so we use hana.
//(detail::add_bulk(*request_, args), ...);
for_each(from, [&](auto const& e) {
using namespace aedis::resp3;
to_bulk(to, e);
});
}
};
template <class Request>
void add_header(Request& to, type t, std::size_t size)
{
auto const str = std::to_string(size);
to += to_code(t);
to.append(std::cbegin(str), std::cend(str));
to += separator;
}
template <class Request, class T>
void add_bulk(Request& to, T const& data)
{
detail::add_bulk_impl<T>::add(to, data);
}
template <class>
struct bulk_counter;
template <class>
struct bulk_counter {
static constexpr auto size = 1U;
};
template <class T, class U>
struct bulk_counter<std::pair<T, U>> {
static constexpr auto size = 2U;
};
template <class Request>
void add_blob(Request& to, boost::string_view blob)
{
to.append(std::cbegin(blob), std::cend(blob));
to += separator;
}
template <class Request>
void add_separator(Request& to)
{
to += separator;
}
} // detail
/** @brief Creates Redis requests.
* \ingroup any
*
* A request is composed of one or more Redis commands and is
* referred to in the redis documentation as a pipeline, see
* https://redis.io/topics/pipelining. For example
*
* @code
* request r;
* r.push("HELLO", 3);
* r.push("FLUSHALL");
* r.push("PING");
* r.push("PING", "key");
* r.push("QUIT");
* co_await async_write(socket, buffer(r));
* @endcode
*
* \remarks Non-string types will be converted to string by using \c
* to_bulk, which must be made available over ADL.
*/
class request {
public:
//// Returns the number of commands contained in this request.
std::size_t size() const noexcept { return commands_;};
// Returns the request payload.
auto const& payload() const noexcept { return payload_;}
/// Clears the request preserving allocated memory.
void clear()
{
payload_.clear();
commands_ = 0;
}
/** @brief Appends a new command to the end of the request.
*
* For example
*
* \code
* request req;
* req.push("SET", "key", "some string", "EX", "2");
* \endcode
*
* will add the \c set command with value "some string" and an
* expiration of 2 seconds.
*
* \param cmd The command e.g redis or sentinel command.
* \param args Command arguments.
*/
template <class... Ts>
void push(boost::string_view cmd, Ts const&... args)
{
using boost::hana::for_each;
using boost::hana::make_tuple;
using resp3::type;
auto constexpr pack_size = sizeof...(Ts);
detail::add_header(payload_, type::array, 1 + pack_size);
detail::add_bulk(payload_, cmd);
detail::add_bulk(payload_, make_tuple(args...));
if (!detail::has_push_response(cmd))
++commands_;
}
/** @brief Appends a new command to the end of the request.
*
* This overload is useful for commands that have a key and have a
* dynamic range of arguments. For example
*
* @code
* std::map<std::string, std::string> map
* { {"key1", "value1"}
* , {"key2", "value2"}
* , {"key3", "value3"}
* };
*
* request req;
* req.push_range2("HSET", "key", std::cbegin(map), std::cend(map));
* @endcode
*
* \param cmd The command e.g. Redis or Sentinel command.
* \param key The command key.
* \param begin Iterator to the begin of the range.
* \param end Iterator to the end of the range.
*/
template <class Key, class ForwardIterator>
void push_range2(boost::string_view cmd, Key const& key, ForwardIterator begin, ForwardIterator end)
{
using value_type = typename std::iterator_traits<ForwardIterator>::value_type;
using resp3::type;
if (begin == end)
return;
auto constexpr size = detail::bulk_counter<value_type>::size;
auto const distance = std::distance(begin, end);
detail::add_header(payload_, type::array, 2 + size * distance);
detail::add_bulk(payload_, cmd);
detail::add_bulk(payload_, key);
for (; begin != end; ++begin)
detail::add_bulk(payload_, *begin);
if (!detail::has_push_response(cmd))
++commands_;
}
/** @brief Appends a new command to the end of the request.
*
* This overload is useful for commands that have a dynamic number
* of arguments and don't have a key. For example
*
* \code
* std::set<std::string> channels
* { "channel1" , "channel2" , "channel3" }
*
* request req;
* req.push("SUBSCRIBE", std::cbegin(channels), std::cedn(channels));
* \endcode
*
* \param cmd The Redis command
* \param begin Iterator to the begin of the range.
* \param end Iterator to the end of the range.
*/
template <class ForwardIterator>
void push_range2(boost::string_view cmd, ForwardIterator begin, ForwardIterator end)
{
using value_type = typename std::iterator_traits<ForwardIterator>::value_type;
using resp3::type;
if (begin == end)
return;
auto constexpr size = detail::bulk_counter<value_type>::size;
auto const distance = std::distance(begin, end);
detail::add_header(payload_, type::array, 1 + size * distance);
detail::add_bulk(payload_, cmd);
for (; begin != end; ++begin)
detail::add_bulk(payload_, *begin);
if (!detail::has_push_response(cmd))
++commands_;
}
/** @brief Appends a new command to the end of the request.
*
* Equivalent to the overload taking a range (i.e. send_range2).
*
* \param cmd Redis command.
* \param key Redis key.
* \param range Range to send e.g. and \c std::map.
*/
template <class Key, class Range>
void push_range(boost::string_view cmd, Key const& key, Range const& range)
{
using std::begin;
using std::end;
push_range2(cmd, key, begin(range), end(range));
}
/** @brief Appends a new command to the end of the request.
*
* Equivalent to the overload taking a range (i.e. send_range2).
*
* \param cmd Redis command.
* \param range Range to send e.g. and \c std::map.
*/
template <class Range>
void push_range(boost::string_view cmd, Range const& range)
{
using std::begin;
using std::end;
push_range2(cmd, begin(range), end(range));
}
mutable bool close_on_run_completion = false;
private:
std::string payload_;
std::size_t commands_ = 0;
};
} // resp3
} // aedis
#endif // AEDIS_RESP3_SERIALIZER_HPP

View File

@@ -0,0 +1,53 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_RESP3_WRITE_HPP
#define AEDIS_RESP3_WRITE_HPP
#include <boost/asio/write.hpp>
namespace aedis {
namespace resp3 {
template<
class SyncWriteStream,
class Request
>
std::size_t write(SyncWriteStream& stream, Request const& req)
{
return boost::asio::write(stream, boost::asio::buffer(req.payload()));
}
template<
class SyncWriteStream,
class Request
>
std::size_t write(
SyncWriteStream& stream,
Request const& req,
boost::system::error_code& ec)
{
return boost::asio::write(stream, boost::asio::buffer(req.payload()), ec);
}
template<
class AsyncWriteStream,
class Request,
class CompletionToken = boost::asio::default_completion_token_t<typename AsyncWriteStream::executor_type>
>
auto async_write(
AsyncWriteStream& stream,
Request const& req,
CompletionToken&& token =
boost::asio::default_completion_token_t<typename AsyncWriteStream::executor_type>{})
{
return boost::asio::async_write(stream, boost::asio::buffer(req.payload()), token);
}
} // resp3
} // aedis
#endif // AEDIS_RESP3_WRITE_HPP

View File

@@ -4,10 +4,7 @@
* accompanying file LICENSE.txt)
*/
#include <aedis/impl/error.ipp>
#include <aedis/resp3/impl/request.ipp>
#include <aedis/resp3/impl/type.ipp>
#include <aedis/resp3/detail/impl/parser.ipp>
#include <aedis/resp3/impl/error.ipp>
#include <aedis/redis/impl/command.ipp>
#include <aedis/adapter/impl/error.ipp>
#include <aedis/sentinel/impl/command.ipp>
#include <aedis/generic/impl/error.ipp>

245
include/aedis/sync.hpp Normal file
View File

@@ -0,0 +1,245 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_SYNC_HPP
#define AEDIS_SYNC_HPP
#include <condition_variable>
#include <aedis/resp3/request.hpp>
namespace aedis {
/** @brief A high level synchronous connection to Redis.
* @ingroup any
*
* This class keeps a healthy and thread safe connection to the Redis
* instance where commands can be sent at any time. For more details,
* please see the documentation of each individual function.
*
*/
template <class Connection>
class sync {
public:
using event = typename Connection::event;
using config = typename Connection::config;
/** @brief Constructor
*
* @param ex Executor
* @param cfg Config options.
*/
template <class Executor>
sync(Executor ex, config cfg = config{}) : conn_{ex, cfg} { }
/** @brief Executes a request synchronously.
*
* The functions calls `connection::async_exec` and waits
* for its completion.
*
* @param req The request.
* @param adapter The response adapter.
* @param ec Error code in case of error.
* @returns The number of bytes of the response.
*/
template <class ResponseAdapter>
std::size_t
exec(resp3::request const& req, ResponseAdapter adapter, boost::system::error_code& ec)
{
sync_helper sh;
std::size_t res = 0;
auto f = [this, &ec, &res, &sh, &req, adapter]()
{
conn_.async_exec(req, adapter, [&sh, &res, &ec](auto const& ecp, std::size_t n) {
std::unique_lock ul(sh.mutex);
ec = ecp;
res = n;
sh.ready = true;
ul.unlock();
sh.cv.notify_one();
});
};
boost::asio::dispatch(boost::asio::bind_executor(conn_.get_executor(), f));
std::unique_lock lk(sh.mutex);
sh.cv.wait(lk, [&sh]{return sh.ready;});
return res;
}
/** @brief Executes a command synchronously
*
* The functions calls `connection::async_exec` and waits for its
* completion.
*
* @param req The request.
* @param adapter The response adapter.
* @throws std::system_error in case of error.
* @returns The number of bytes of the response.
*/
template <class ResponseAdapter = detail::response_traits<void>::adapter_type>
std::size_t exec(resp3::request const& req, ResponseAdapter adapter = aedis::adapt())
{
boost::system::error_code ec;
auto const res = exec(req, adapter, ec);
if (ec)
throw std::system_error(ec);
return res;
}
/** @brief Receives server pushes synchronusly.
*
* The functions calls `connection::async_receive_push` and
* waits for its completion.
*
* @param adapter The response adapter.
* @param ec Error code in case of error.
* @returns The number of bytes received.
*/
template <class ResponseAdapter>
auto receive_push(ResponseAdapter adapter, boost::system::error_code& ec)
{
sync_helper sh;
std::size_t res = 0;
auto f = [this, &ec, &res, &sh, adapter]()
{
conn_.async_receive_push(adapter, [&ec, &res, &sh](auto const& e, std::size_t n) {
std::unique_lock ul(sh.mutex);
ec = e;
res = n;
sh.ready = true;
ul.unlock();
sh.cv.notify_one();
});
};
boost::asio::dispatch(boost::asio::bind_executor(conn_.get_executor(), f));
std::unique_lock lk(sh.mutex);
sh.cv.wait(lk, [&sh]{return sh.ready;});
return res;
}
/** @brief Receives server pushes synchronusly.
*
* The functions calls `connection::async_receive_push` and
* waits for its completion.
*
* @param adapter The response adapter.
* @throws std::system_error in case of error.
* @returns The number of bytes received.
*/
template <class ResponseAdapter = aedis::detail::response_traits<void>::adapter_type>
auto receive_push(ResponseAdapter adapter = aedis::adapt())
{
boost::system::error_code ec;
auto const res = receive_push(adapter, ec);
if (ec)
throw std::system_error(ec);
return res;
}
/** @brief Receives events synchronously.
*
* The functions calls `connection::async_receive_event` and
* waits for its completion.
*
* @param ec Error code in case of error.
* @returns The event received.
*/
auto receive_event(boost::system::error_code& ec)
{
sync_helper sh;
auto res = event::invalid;
auto f = [this, &ec, &res, &sh]()
{
conn_.async_receive_event([&ec, &res, &sh](auto const& ecp, event ev) {
std::unique_lock ul(sh.mutex);
ec = ecp;
res = ev;
sh.ready = true;
ul.unlock();
sh.cv.notify_one();
});
};
boost::asio::dispatch(boost::asio::bind_executor(conn_.get_executor(), f));
std::unique_lock lk(sh.mutex);
sh.cv.wait(lk, [&sh]{return sh.ready;});
return res;
}
/** @brief Receives events synchronously
*
* The functions calls `connection::async_receive_event` and
* waits for its completion.
*
* @throws std::system_error in case of error.
* @returns The event received.
*/
auto receive_event()
{
boost::system::error_code ec;
auto const res = receive_event(ec);
if (ec)
throw std::system_error(ec);
return res;
}
/** @brief Calls \c async_run from the underlying connection.
*
* The functions calls `connection::async_run` and waits for its
* completion.
*
* @param ec Error code.
*/
void run(boost::system::error_code& ec)
{
sync_helper sh;
auto f = [this, &ec, &sh]()
{
conn_.async_run([&ec, &sh](auto const& e) {
std::unique_lock ul(sh.mutex);
ec = e;
sh.ready = true;
ul.unlock();
sh.cv.notify_one();
});
};
boost::asio::dispatch(boost::asio::bind_executor(conn_.get_executor(), f));
std::unique_lock lk(sh.mutex);
sh.cv.wait(lk, [&sh]{return sh.ready;});
}
/** @brief Calls \c async_run from the underlying connection.
*
* The functions calls `connection::async_run` and waits for its
* completion.
*
* @throws std::system_error.
*/
void run()
{
boost::system::error_code ec;
run(ec);
if (ec)
throw std::system_error(ec);
}
private:
struct sync_helper {
std::mutex mutex;
std::condition_variable cv;
bool ready = false;
};
Connection conn_;
};
} // aedis
#endif // AEDIS_SYNC_HPP

View File

@@ -1,43 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <iostream>
#include <stdlib.h>
template <class T>
void expect_eq(T const& a, T const& b, std::string const& msg = "")
{
if (a == b) {
std::cout << "Success: " << msg << std::endl;
} else {
std::cout << "Error: " << msg << std::endl;
exit(EXIT_FAILURE);
}
}
template <class T>
void expect_error(boost::system::error_code a, T expected = {})
{
if (a == expected) {
if (a)
std::cout << "Success: " << a.message() << " (" << a.category().name() << ")" << std::endl;
} else {
std::cout << "Error: " << a.message() << " (" << a.category().name() << ")" << std::endl;
exit(EXIT_FAILURE);
}
}
template <class T>
void check_empty(T const& t)
{
if (t.empty()) {
//std::cout << "Success: " << std::endl;
} else {
std::cout << "Error: Not empty" << std::endl;
exit(EXIT_FAILURE);
}
}

491
tests/connection.cpp Normal file
View File

@@ -0,0 +1,491 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
// TODO: Avoid usage of co_await to improve tests is compilers that
// don't support it.
// TODO: Add reconnect test that kills the server and waits some
// seconds.
#include <iostream>
#include <boost/asio.hpp>
#include <boost/system/errc.hpp>
#include <boost/asio/experimental/as_tuple.hpp>
#define BOOST_TEST_MODULE low level
#include <boost/test/included/unit_test.hpp>
#include <aedis.hpp>
#include <aedis/src.hpp>
namespace net = boost::asio;
using aedis::resp3::request;
using aedis::adapt;
using connection = aedis::connection<>;
using error_code = boost::system::error_code;
using net::experimental::as_tuple;
bool is_host_not_found(boost::system::error_code ec)
{
if (ec == net::error::netdb_errors::host_not_found) return true;
if (ec == net::error::netdb_errors::host_not_found_try_again) return true;
return false;
}
//----------------------------------------------------------------
// Tests whether resolve fails with the correct error.
BOOST_AUTO_TEST_CASE(test_resolve)
{
connection::config cfg;
cfg.host = "Atibaia";
cfg.port = "6379";
cfg.resolve_timeout = std::chrono::seconds{100};
net::io_context ioc;
connection db{ioc, cfg};
db.async_run([](auto ec) {
BOOST_TEST(is_host_not_found(ec));
});
ioc.run();
}
//----------------------------------------------------------------
BOOST_AUTO_TEST_CASE(test_connect)
{
connection::config cfg;
cfg.host = "127.0.0.1";
cfg.port = "1";
cfg.connect_timeout = std::chrono::seconds{100};
net::io_context ioc;
connection db{ioc, cfg};
db.async_run([](auto ec) {
BOOST_CHECK_EQUAL(ec, net::error::basic_errors::connection_refused);
});
ioc.run();
}
//----------------------------------------------------------------
// Test if quit causes async_run to exit.
void test_quit1(connection::config const& cfg)
{
net::io_context ioc;
auto db = std::make_shared<connection>(ioc, cfg);
request req;
req.push("QUIT");
db->async_exec(req, adapt(), [](auto ec, auto){
BOOST_TEST(!ec);
});
db->async_run([](auto ec){
BOOST_CHECK_EQUAL(ec, net::error::misc_errors::eof);
});
ioc.run();
}
void test_quit2(connection::config const& cfg)
{
std::cout << "test_quit2" << std::endl;
request req;
req.push("QUIT");
net::io_context ioc;
auto db = std::make_shared<connection>(ioc, cfg);
db->async_run(req, adapt(), [](auto ec, auto){
BOOST_TEST(!ec);
});
ioc.run();
}
BOOST_AUTO_TEST_CASE(test_quit)
{
connection::config cfg;
cfg.coalesce_requests = true;
test_quit1(cfg);
cfg.coalesce_requests = false;
test_quit1(cfg);
cfg.coalesce_requests = true;
test_quit2(cfg);
cfg.coalesce_requests = false;
test_quit2(cfg);
}
// Checks whether we get idle timeout when no push reader is set.
void test_missing_push_reader1(connection::config const& cfg)
{
std::cout << "test_missing_push_reader1" << std::endl;
net::io_context ioc;
auto db = std::make_shared<connection>(ioc, cfg);
request req;
req.push("SUBSCRIBE", "channel");
db->async_run(req, adapt(), [](auto ec, auto){
BOOST_TEST(!ec);
});
ioc.run();
}
void test_missing_push_reader2(connection::config const& cfg)
{
std::cout << "test_missing_push_reader2" << std::endl;
net::io_context ioc;
auto db = std::make_shared<connection>(ioc, cfg);
request req; // Wrong command syntax.
req.push("SUBSCRIBE");
db->async_run(req, adapt(), [](auto ec, auto){
BOOST_TEST(!ec);
});
ioc.run();
}
void test_missing_push_reader3(connection::config const& cfg)
{
std::cout << "test_missing_push_reader3" << std::endl;
net::io_context ioc;
auto db = std::make_shared<connection>(ioc, cfg);
request req; // Wrong command synthax.
req.push("PING", "Message");
req.push("SUBSCRIBE");
db->async_run(req, adapt(), [](auto ec, auto){
BOOST_TEST(!ec);
});
ioc.run();
}
BOOST_AUTO_TEST_CASE(test_idle)
{
std::chrono::milliseconds ms{5000};
{
std::cout << "test_idle" << std::endl;
connection::config cfg;
cfg.resolve_timeout = std::chrono::seconds{1};
cfg.connect_timeout = std::chrono::seconds{1};
cfg.ping_interval = std::chrono::seconds{1};
net::io_context ioc;
auto db = std::make_shared<connection>(ioc, cfg);
request req;
req.push("CLIENT", "PAUSE", ms.count());
db->async_exec(req, adapt(), [](auto ec, auto){
BOOST_TEST(!ec);
});
db->async_run([](auto ec){
BOOST_CHECK_EQUAL(ec, aedis::error::idle_timeout);
});
ioc.run();
}
//----------------------------------------------------------------
// Since we have paused the server above, we have to wait until the
// server is responsive again, so as not to cause other tests to
// fail.
{
net::io_context ioc;
auto db = std::make_shared<connection>(ioc);
db->get_config().ping_interval = 2* ms;
db->get_config().resolve_timeout = 2 * ms;
db->get_config().connect_timeout = 2 * ms;
db->get_config().ping_interval = 2 * ms;
request req;
req.push("QUIT");
db->async_run(req, adapt(), [](auto ec, auto){
BOOST_TEST(!ec);
});
ioc.run();
}
}
#ifdef BOOST_ASIO_HAS_CO_AWAIT
net::awaitable<void> push_consumer1(std::shared_ptr<connection> db, bool& push_received)
{
{
auto [ec, ev] = co_await db->async_receive_push(adapt(), as_tuple(net::use_awaitable));
BOOST_TEST(!ec);
}
{
auto [ec, ev] = co_await db->async_receive_push(adapt(), as_tuple(net::use_awaitable));
BOOST_CHECK_EQUAL(ec, boost::asio::experimental::channel_errc::channel_cancelled);
}
push_received = true;
}
net::awaitable<void> event_consumer1(std::shared_ptr<connection> db, bool& event_received)
{
{
auto [ec, ev] = co_await db->async_receive_event(as_tuple(net::use_awaitable));
auto const r = ev == connection::event::resolve;
BOOST_TEST(r);
BOOST_TEST(!ec);
}
{
auto [ec, ev] = co_await db->async_receive_event(as_tuple(net::use_awaitable));
auto const r = ev == connection::event::connect;
BOOST_TEST(r);
BOOST_TEST(!ec);
}
{
auto [ec, ev] = co_await db->async_receive_event(as_tuple(net::use_awaitable));
auto const r = ev == connection::event::hello;
BOOST_TEST(r);
BOOST_TEST(!ec);
}
{
auto [ec, ev] = co_await db->async_receive_event(as_tuple(net::use_awaitable));
BOOST_CHECK_EQUAL(ec, boost::asio::experimental::channel_errc::channel_cancelled);
}
event_received = true;
}
void test_push_is_received1(connection::config const& cfg)
{
std::cout << "test_push_is_received1" << std::endl;
net::io_context ioc;
auto db = std::make_shared<connection>(ioc, cfg);
db->get_config().enable_events = true;
request req;
req.push("SUBSCRIBE", "channel");
req.push("QUIT");
db->async_run(req, adapt(), [db](auto ec, auto){
BOOST_TEST(!ec);
db->cancel(connection::operation::receive_event);
db->cancel(connection::operation::receive_push);
});
bool push_received = false;
net::co_spawn(
ioc.get_executor(),
push_consumer1(db, push_received),
net::detached);
bool event_received = false;
net::co_spawn(
ioc.get_executor(),
event_consumer1(db, event_received),
net::detached);
ioc.run();
BOOST_TEST(push_received);
BOOST_TEST(event_received);
}
void test_push_is_received2(connection::config const& cfg)
{
request req1;
req1.push("PING", "Message1");
request req2;
req2.push("SUBSCRIBE", "channel");
request req3;
req3.push("PING", "Message2");
req3.push("QUIT");
net::io_context ioc;
auto db = std::make_shared<connection>(ioc, cfg);
db->get_config().enable_events = true;
auto handler =[](auto ec, auto...)
{
BOOST_TEST(!ec);
};
db->async_exec(req1, adapt(), handler);
db->async_exec(req2, adapt(), handler);
db->async_exec(req3, adapt(), handler);
db->async_run([db](auto ec, auto...) {
BOOST_CHECK_EQUAL(ec, net::error::misc_errors::eof);
db->cancel(connection::operation::receive_event);
db->cancel(connection::operation::receive_push);
});
bool push_received = false;
net::co_spawn(
ioc.get_executor(),
push_consumer1(db, push_received),
net::detached);
bool event_received = false;
net::co_spawn(
ioc.get_executor(),
event_consumer1(db, event_received),
net::detached);
ioc.run();
BOOST_TEST(push_received);
BOOST_TEST(event_received);
}
net::awaitable<void> test_reconnect_impl(std::shared_ptr<connection> db)
{
request req;
req.push("QUIT");
for (auto i = 0;;) {
auto ev = co_await db->async_receive_event(net::use_awaitable);
auto const r1 = ev == connection::event::resolve;
BOOST_TEST(r1);
ev = co_await db->async_receive_event(net::use_awaitable);
auto const r2 = ev == connection::event::connect;
BOOST_TEST(r2);
ev = co_await db->async_receive_event(net::use_awaitable);
auto const r3 = ev == connection::event::hello;
BOOST_TEST(r3);
co_await db->async_exec(req, adapt(), net::use_awaitable);
// Test 5 reconnetions and returns.
++i;
if (i == 5) {
db->get_config().enable_reconnect = false;
co_return;
}
}
co_return;
}
// Test whether the client works after a reconnect.
void test_reconnect()
{
std::cout << "Start: test_reconnect" << std::endl;
net::io_context ioc;
auto db = std::make_shared<connection>(ioc.get_executor());
db->get_config().enable_events = true;
db->get_config().enable_reconnect = true;
db->get_config().reconnect_interval = std::chrono::milliseconds{100};
net::co_spawn(ioc, test_reconnect_impl(db), net::detached);
db->async_run([](auto ec) {
BOOST_CHECK_EQUAL(ec, net::error::misc_errors::eof);
});
ioc.run();
std::cout << "End: test_reconnect()" << std::endl;
}
net::awaitable<void>
push_consumer3(std::shared_ptr<connection> db)
{
for (;;)
co_await db->async_receive_push(adapt(), net::use_awaitable);
}
// Test many subscribe requests.
void test_push_many_subscribes(connection::config const& cfg)
{
std::cout << "test_push_many_subscribes" << std::endl;
request req0;
req0.push("HELLO", 3);
request req1;
req1.push("PING", "Message1");
request req2;
req2.push("SUBSCRIBE", "channel");
request req3;
req3.push("QUIT");
auto handler =[](auto ec, auto...)
{
BOOST_TEST(!ec);
};
net::io_context ioc;
auto db = std::make_shared<connection>(ioc, cfg);
db->async_exec(req0, adapt(), handler);
db->async_exec(req1, adapt(), handler);
db->async_exec(req2, adapt(), handler);
db->async_exec(req2, adapt(), handler);
db->async_exec(req1, adapt(), handler);
db->async_exec(req2, adapt(), handler);
db->async_exec(req1, adapt(), handler);
db->async_exec(req2, adapt(), handler);
db->async_exec(req2, adapt(), handler);
db->async_exec(req1, adapt(), handler);
db->async_exec(req2, adapt(), handler);
db->async_exec(req3, adapt(), handler);
db->async_run([db](auto ec, auto...) {
BOOST_CHECK_EQUAL(ec, net::error::misc_errors::eof);
db->cancel(connection::operation::receive_push);
});
net::co_spawn(ioc.get_executor(), push_consumer3(db), net::detached);
ioc.run();
}
#endif
BOOST_AUTO_TEST_CASE(test_push)
{
connection::config cfg;
cfg.coalesce_requests = true;
#ifdef BOOST_ASIO_HAS_CO_AWAIT
test_push_is_received1(cfg);
test_push_is_received2(cfg);
test_push_many_subscribes(cfg);
#endif
test_missing_push_reader1(cfg);
test_missing_push_reader3(cfg);
cfg.coalesce_requests = false;
#ifdef BOOST_ASIO_HAS_CO_AWAIT
test_push_is_received1(cfg);
test_push_is_received2(cfg);
test_push_many_subscribes(cfg);
#endif
test_missing_push_reader2(cfg);
test_missing_push_reader3(cfg);
}

View File

@@ -1,499 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <iostream>
#include <map>
#include <boost/asio.hpp>
#include <boost/system/errc.hpp>
#include <aedis/aedis.hpp>
#include <aedis/src.hpp>
#include "check.hpp"
namespace net = boost::asio;
namespace resp3 = aedis::resp3;
using aedis::resp3::node;
using aedis::redis::command;
using aedis::generic::make_serializer;
using aedis::adapter::adapt;
using aedis::adapter::adapter_t;
using node_type = aedis::resp3::node<std::string>;
using tcp = net::ip::tcp;
using client_type = aedis::generic::client<net::ip::tcp::socket, command>;
auto print_read = [](auto cmd, auto n)
{
std::cout << cmd << ": " << n << std::endl;
};
void test_resolve_error()
{
auto f = [](auto ec)
{
expect_error(ec, net::error::netdb_errors::host_not_found);
};
net::io_context ioc;
client_type db(ioc.get_executor());
db.async_run("Atibaia", "6379", f);
ioc.run();
}
void test_connect_error()
{
auto f = [](auto ec)
{
expect_error(ec, net::error::basic_errors::connection_refused);
};
net::io_context ioc;
client_type db(ioc.get_executor());
db.async_run("127.0.0.1", "1", f);
ioc.run();
}
struct receiver1 {
public:
receiver1(client_type& db) : db_{&db} {}
void on_read(command cmd, std::size_t)
{
// quit will be sent more than once. It doesn't matter.
db_->send(command::quit);
}
private:
client_type* db_;
};
// Test if a hello is automatically sent.
void test_hello()
{
auto f = [](auto ec)
{
expect_error(ec, net::error::misc_errors::eof);
};
net::io_context ioc;
client_type db(ioc.get_executor());
receiver1 recv{db};
db.set_read_handler([&recv](command cmd, std::size_t n){recv.on_read(cmd, n);});
db.async_run("127.0.0.1", "6379", f);
ioc.run();
}
struct receiver2 {
public:
receiver2(client_type& db) : db_{&db} {}
void on_write(std::size_t)
{
// Notice this causes a loop, but since quit stops the
// connection it is not a problem.
db_->send(command::quit);
}
private:
client_type* db_;
};
// Test if a hello is automatically sent but this time, uses on_write
// to send the quit command. Notice quit will be sent twice.
void test_hello2()
{
auto f = [](auto ec)
{
expect_error(ec, net::error::misc_errors::eof);
};
net::io_context ioc;
client_type db(ioc.get_executor());
receiver2 recv{db};
//db.set_read_handler(print_read);
db.set_write_handler([&recv](std::size_t n){recv.on_write(n);});
db.async_run("127.0.0.1", "6379", f);
ioc.run();
}
struct receiver3 {
public:
receiver3(client_type& db) : db_{&db} {}
void on_write(std::size_t)
{
// Notice this causes a loop.
db_->send(command::subscribe, "channel");
}
void on_push(std::size_t)
{
db_->send(command::quit);
}
private:
client_type* db_;
};
void test_push()
{
auto f = [](auto ec)
{
expect_error(ec, net::error::misc_errors::eof);
};
net::io_context ioc;
client_type db(ioc.get_executor());
receiver3 recv{db};
db.set_write_handler([&recv](std::size_t n){recv.on_write(n);});
db.set_push_handler([&recv](std::size_t n){recv.on_push(n);});
db.async_run("127.0.0.1", "6379", f);
ioc.run();
}
struct receiver4 {
public:
receiver4(client_type& db) : db_{&db} {}
void on_read()
{
// Notice this causes a loop.
db_->send(command::subscribe, "channel");
}
void on_push()
{
db_->send(command::quit);
}
private:
client_type* db_;
};
void test_push2()
{
auto f = [](auto ec)
{
expect_error(ec, net::error::misc_errors::eof);
};
net::io_context ioc;
client_type db(ioc.get_executor());
receiver4 recv{db};
db.set_read_handler([&recv](auto, auto){recv.on_read();});
db.set_push_handler([&recv](auto){recv.on_push();});
db.async_run("127.0.0.1", "6379", f);
ioc.run();
}
#include <boost/asio/yield.hpp>
struct receiver5 {
public:
int counter = 0;
receiver5(client_type& db)
: db_{&db}
, adapter_{adapt(counter)}
{}
void on_read(command) {}
void on_write()
{
if (counter == 0) {
// Avoid problems with previous runs.
db_->send(command::del, "receiver5-key");
db_->send(command::incr, "receiver5-key");
db_->send(command::quit);
}
if (counter == 1) {
db_->send(command::incr, "receiver5-key");
db_->send(command::quit);
}
}
void on_resp3(command cmd, node<boost::string_view> const& nd, boost::system::error_code& ec)
{
if (cmd == command::incr)
adapter_(nd, ec);
}
private:
client_type* db_;
adapter_t<int> adapter_;
};
template <class Receiver>
struct reconnect {
client_type db;
Receiver recv;
boost::asio::steady_timer timer;
net::coroutine coro;
reconnect(net::any_io_executor ex)
: db{ex}
, recv{db}
, timer{ex}
{
db.set_read_handler([this](auto cmd, auto){recv.on_read(cmd);});
db.set_write_handler([this](auto){recv.on_write();});
db.set_resp3_handler([this](auto a, auto b, auto c){recv.on_resp3(a, b, c);});
}
void on_event(boost::system::error_code ec)
{
reenter (coro) for (;;) {
yield db.async_run("127.0.0.1", "6379", [this](auto ec){ on_event(ec);});
expect_error(ec, net::error::misc_errors::eof);
expect_eq(recv.counter, 1, "Reconnect counter 1.");
yield db.async_run("127.0.0.1", "6379", [this](auto ec){ on_event(ec);});
expect_error(ec, net::error::misc_errors::eof);
expect_eq(recv.counter, 2, "Reconnect counter 2.");
yield db.async_run("127.0.0.1", "6379", [this](auto ec){ on_event(ec);});
expect_error(ec, net::error::misc_errors::eof);
expect_eq(recv.counter, 3, "Reconnect counter 3.");
return;
}
}
};
#include <boost/asio/unyield.hpp>
void test_reconnect()
{
net::io_context ioc;
reconnect<receiver5> rec{ioc.get_executor()};
rec.on_event({});
ioc.run();
}
struct receiver6 {
public:
int counter = 0;
receiver6(client_type& db)
: db_{&db}
, adapter_{adapt(counter)}
{}
void on_write() {}
void on_read(command cmd)
{
if (cmd == command::hello) {
db_->send(command::get, "receiver6-key");
if (counter == 0)
db_->send(command::del, "receiver6-key");
db_->send(command::incr, "receiver6-key");
db_->send(command::quit);
return;
}
}
void on_resp3(command cmd, node<boost::string_view> const& nd, boost::system::error_code& ec)
{
if (cmd == command::incr)
adapter_(nd, ec);
}
private:
client_type* db_;
adapter_t<int> adapter_;
};
void test_reconnect2()
{
net::io_context ioc;
reconnect<receiver6> rec{ioc.get_executor()};
rec.on_event({});
ioc.run();
}
struct receiver7 {
public:
int counter = 0;
receiver7(client_type& db)
: db_{&db}
, adapter_{adapt(counter)}
{}
void on_resp3(command cmd, node<boost::string_view> const& nd, boost::system::error_code& ec)
{
if (cmd == command::incr)
adapter_(nd, ec);
}
void on_write(std::size_t)
{
if (!std::exchange(sent_, true)) {
db_->send(command::del, "key");
db_->send(command::multi);
db_->send(command::ping, "aaa");
db_->send(command::incr, "key");
db_->send(command::ping, "bbb");
db_->send(command::discard);
db_->send(command::ping, "ccc");
db_->send(command::incr, "key");
db_->send(command::quit);
}
}
void on_read(command cmd, std::size_t)
{
}
private:
bool sent_ = false;
client_type* db_;
adapter_t<int> adapter_;
};
void test_discard()
{
auto f = [](auto ec)
{
expect_error(ec, net::error::misc_errors::eof);
};
net::io_context ioc;
client_type db(ioc.get_executor());
receiver7 recv{db};
db.set_read_handler([&recv](auto cmd, std::size_t n){recv.on_read(cmd, n);});
db.set_write_handler([&recv](std::size_t n){recv.on_write(n);});
db.set_resp3_handler([&recv](auto a, auto b, auto c){recv.on_resp3(a, b, c);});
db.async_run("127.0.0.1", "6379", f);
ioc.run();
expect_eq(recv.counter, 1, "test_discard.");
}
struct receiver8 {
public:
receiver8(client_type& db) : db_{&db} {}
void on_write(std::size_t)
{
std::cout << "on_write" << std::endl;
if (!std::exchange(sent_, true)) {
db_->send(command::del, "key");
db_->send(command::client, "PAUSE", 5000);
}
}
private:
bool sent_ = false;
client_type* db_;
};
void test_idle()
{
auto f = [](auto ec)
{
expect_error(ec, aedis::generic::error::idle_timeout);
};
net::io_context ioc;
client_type::config cfg;
cfg.resolve_timeout = std::chrono::seconds{1};
cfg.connect_timeout = std::chrono::seconds{1};
cfg.read_timeout = std::chrono::seconds{1};
cfg.write_timeout = std::chrono::seconds{1};
cfg.idle_timeout = std::chrono::seconds{2};
client_type db(ioc.get_executor(), cfg);
receiver8 recv{db};
db.set_write_handler([&recv](std::size_t n){recv.on_write(n);});
db.async_run("127.0.0.1", "6379", f);
ioc.run();
}
struct receiver9 {
public:
bool ping = false;
receiver9(client_type& db) : db_{&db} , adapter_{adapt(counter_)} {}
void on_resp3(command cmd, node<boost::string_view> const& nd, boost::system::error_code& ec)
{
if (cmd == command::incr)
adapter_(nd, ec);
}
void on_push(std::size_t) {}
void on_write(std::size_t)
{
if (!std::exchange(sent_, true))
db_->send(command::del, "key");
db_->send(command::incr, "key");
db_->send(command::subscribe, "channel");
}
void on_read(command cmd, std::size_t)
{
db_->send(command::incr, "key");
db_->send(command::subscribe, "channel");
if (counter_ == 100000) {
std::cout << "Success: counter increase." << std::endl;
db_->send(command::quit);
}
if (cmd == command::ping)
ping = true;
}
private:
bool sent_ = false;
client_type* db_;
int counter_ = 0;
adapter_t<int> adapter_;
};
void test_no_ping()
{
auto f = [](auto ec)
{
expect_error(ec, net::error::misc_errors::eof);
};
net::io_context ioc;
client_type::config cfg;
cfg.idle_timeout = std::chrono::seconds{2};
client_type db(ioc.get_executor(), cfg);
auto recv = std::make_shared<receiver9>(db);
db.set_receiver(recv);
db.async_run("127.0.0.1", "6379", f);
ioc.run();
expect_eq(recv->ping, false, "No ping received.");
}
int main()
{
test_resolve_error();
test_connect_error();
test_hello();
test_hello2();
test_push();
test_push2();
test_reconnect();
test_reconnect2();
test_discard();
test_no_ping();
// Must come last as it send a client pause.
test_idle();
}

View File

@@ -7,6 +7,7 @@
#include <map>
#include <iostream>
#include <optional>
#include <sstream>
#include <boost/system/errc.hpp>
#include <boost/asio/awaitable.hpp>
@@ -15,18 +16,17 @@
#include <boost/asio/detached.hpp>
#include <boost/asio/co_spawn.hpp>
#include <boost/beast/_experimental/test/stream.hpp>
#define BOOST_TEST_MODULE low level
#include <boost/test/included/unit_test.hpp>
#include <aedis/aedis.hpp>
#include <aedis.hpp>
#include <aedis/src.hpp>
#include "check.hpp"
#include "config.h"
namespace net = boost::asio;
namespace resp3 = aedis::resp3;
using test_stream = boost::beast::test::stream;
using aedis::adapter::adapt;
using aedis::adapter::adapt2;
using node_type = aedis::resp3::node<std::string>;
//-------------------------------------------------------------------
@@ -35,8 +35,8 @@ template <class Result>
struct expect {
std::string in;
Result expected;
std::string name;
boost::system::error_code ec;
std::string name; // Currently unused.
boost::system::error_code ec{};
};
template <class Result>
@@ -47,12 +47,13 @@ void test_sync(net::any_io_executor ex, expect<Result> e)
ts.append(e.in);
Result result;
boost::system::error_code ec;
resp3::read(ts, net::dynamic_buffer(rbuffer), adapt(result), ec);
expect_error(ec, e.ec);
resp3::read(ts, net::dynamic_buffer(rbuffer), adapt2(result), ec);
BOOST_CHECK_EQUAL(ec, e.ec);
if (e.ec)
return;
check_empty(rbuffer);
expect_eq(result, e.expected, e.name);
BOOST_TEST(rbuffer.empty());
auto const res = result == e.expected;
BOOST_TEST(res);
}
template <class Result>
@@ -74,19 +75,20 @@ public:
void run()
{
auto self = this->shared_from_this();
auto f = [self](auto ec, auto n)
auto f = [self](auto ec, auto)
{
expect_error(ec, self->data_.ec);
BOOST_CHECK_EQUAL(ec, self->data_.ec);
if (self->data_.ec)
return;
check_empty(self->rbuffer_);
expect_eq(self->result_, self->data_.expected, self->data_.name);
BOOST_TEST(self->rbuffer_.empty());
auto const res = self->result_ == self->data_.expected;
BOOST_TEST(res);
};
resp3::async_read(
ts_,
net::dynamic_buffer(rbuffer_),
adapt(result_),
adapt2(result_),
f);
}
};
@@ -97,8 +99,9 @@ void test_async(net::any_io_executor ex, expect<Result> e)
std::make_shared<async_test<Result>>(ex, e)->run();
}
void test_number(net::io_context& ioc)
BOOST_AUTO_TEST_CASE(test_number)
{
net::io_context ioc;
boost::optional<int> ok;
ok = 11;
@@ -108,12 +111,13 @@ void test_number(net::io_context& ioc)
auto const in03 = expect<int>{":11\r\n", int{11}, "number.int"};
auto const in04 = expect<boost::optional<int>>{":11\r\n", ok, "number.optional.int"};
auto const in05 = expect<std::tuple<int>>{"*1\r\n:11\r\n", std::tuple<int>{11}, "number.tuple.int"};
auto const in06 = expect<boost::optional<int>>{"%11\r\n", boost::optional<int>{}, "number.optional.int", aedis::adapter::make_error_code(aedis::adapter::error::expects_simple_type)};
auto const in07 = expect<std::set<std::string>>{":11\r\n", std::set<std::string>{}, "number.optional.int", aedis::adapter::make_error_code(aedis::adapter::error::expects_set_type)};
auto const in08 = expect<std::unordered_set<std::string>>{":11\r\n", std::unordered_set<std::string>{}, "number.optional.int", aedis::adapter::make_error_code(aedis::adapter::error::expects_set_type)};
auto const in09 = expect<std::map<std::string, std::string>>{":11\r\n", std::map<std::string, std::string>{}, "number.optional.int", aedis::adapter::make_error_code(aedis::adapter::error::expects_map_type)};
auto const in10 = expect<std::unordered_map<std::string, std::string>>{":11\r\n", std::unordered_map<std::string, std::string>{}, "number.optional.int", aedis::adapter::make_error_code(aedis::adapter::error::expects_map_type)};
auto const in11 = expect<std::list<std::string>>{":11\r\n", std::list<std::string>{}, "number.optional.int", aedis::adapter::make_error_code(aedis::adapter::error::expects_aggregate_type)};
auto const in06 = expect<int>{"_\r\n", int{0}, "number.int", aedis::make_error_code(aedis::error::null)};
auto const in07 = expect<boost::optional<int>>{"%11\r\n", boost::optional<int>{}, "number.optional.int", aedis::make_error_code(aedis::error::expects_resp3_simple_type)};
auto const in08 = expect<std::set<std::string>>{":11\r\n", std::set<std::string>{}, "number.optional.int", aedis::make_error_code(aedis::error::expects_resp3_set)};
auto const in09 = expect<std::unordered_set<std::string>>{":11\r\n", std::unordered_set<std::string>{}, "number.optional.int", aedis::make_error_code(aedis::error::expects_resp3_set)};
auto const in10 = expect<std::map<std::string, std::string>>{":11\r\n", std::map<std::string, std::string>{}, "number.optional.int", aedis::make_error_code(aedis::error::expects_resp3_map)};
auto const in11 = expect<std::unordered_map<std::string, std::string>>{":11\r\n", std::unordered_map<std::string, std::string>{}, "number.optional.int", aedis::make_error_code(aedis::error::expects_resp3_map)};
auto const in12 = expect<std::list<std::string>>{":11\r\n", std::list<std::string>{}, "number.optional.int", aedis::make_error_code(aedis::error::expects_resp3_aggregate)};
auto ex = ioc.get_executor();
@@ -128,6 +132,7 @@ void test_number(net::io_context& ioc)
test_sync(ex, in09);
test_sync(ex, in10);
test_sync(ex, in11);
test_sync(ex, in12);
test_async(ex, in01);
test_async(ex, in02);
@@ -140,10 +145,13 @@ void test_number(net::io_context& ioc)
test_async(ex, in09);
test_async(ex, in10);
test_async(ex, in11);
test_async(ex, in12);
ioc.run();
}
void test_bool(net::io_context& ioc)
BOOST_AUTO_TEST_CASE(test_bool)
{
net::io_context ioc;
boost::optional<bool> ok;
ok = true;
@@ -155,11 +163,11 @@ void test_bool(net::io_context& ioc)
auto const in13 = expect<boost::optional<bool>>{"#t\r\n", ok, "optional.int"};
// Error
auto const in01 = expect<boost::optional<bool>>{"#11\r\n", boost::optional<bool>{}, "bool.error", aedis::resp3::make_error_code(aedis::resp3::error::unexpected_bool_value)};
auto const in03 = expect<std::set<int>>{"#t\r\n", std::set<int>{}, "bool.error", aedis::adapter::make_error_code(aedis::adapter::error::expects_set_type)};
auto const in04 = expect<std::unordered_set<int>>{"#t\r\n", std::unordered_set<int>{}, "bool.error", aedis::adapter::make_error_code(aedis::adapter::error::expects_set_type)};
auto const in05 = expect<std::map<int, int>>{"#t\r\n", std::map<int, int>{}, "bool.error", aedis::adapter::make_error_code(aedis::adapter::error::expects_map_type)};
auto const in06 = expect<std::unordered_map<int, int>>{"#t\r\n", std::unordered_map<int, int>{}, "bool.error", aedis::adapter::make_error_code(aedis::adapter::error::expects_map_type)};
auto const in01 = expect<boost::optional<bool>>{"#11\r\n", boost::optional<bool>{}, "bool.error", aedis::make_error_code(aedis::error::unexpected_bool_value)};
auto const in03 = expect<std::set<int>>{"#t\r\n", std::set<int>{}, "bool.error", aedis::make_error_code(aedis::error::expects_resp3_set)};
auto const in04 = expect<std::unordered_set<int>>{"#t\r\n", std::unordered_set<int>{}, "bool.error", aedis::make_error_code(aedis::error::expects_resp3_set)};
auto const in05 = expect<std::map<int, int>>{"#t\r\n", std::map<int, int>{}, "bool.error", aedis::make_error_code(aedis::error::expects_resp3_map)};
auto const in06 = expect<std::unordered_map<int, int>>{"#t\r\n", std::unordered_map<int, int>{}, "bool.error", aedis::make_error_code(aedis::error::expects_resp3_map)};
auto ex = ioc.get_executor();
@@ -182,10 +190,12 @@ void test_bool(net::io_context& ioc)
test_async(ex, in09);
test_async(ex, in10);
test_async(ex, in11);
ioc.run();
}
void test_streamed_string(net::io_context& ioc)
BOOST_AUTO_TEST_CASE(test_streamed_string)
{
net::io_context ioc;
std::string const wire = "$?\r\n;4\r\nHell\r\n;5\r\no wor\r\n;1\r\nd\r\n;0\r\n";
std::vector<node_type> e1a
@@ -210,10 +220,12 @@ void test_streamed_string(net::io_context& ioc)
test_async(ex, in01);
test_async(ex, in02);
test_async(ex, in03);
ioc.run();
}
void test_push(net::io_context& ioc)
BOOST_AUTO_TEST_CASE(test_push)
{
net::io_context ioc;
std::string const wire = ">4\r\n+pubsub\r\n+message\r\n+some-channel\r\n+some message\r\n";
std::vector<node_type> e1a
@@ -236,10 +248,12 @@ void test_push(net::io_context& ioc)
test_async(ex, in01);
test_async(ex, in02);
ioc.run();
}
void test_map(net::io_context& ioc)
BOOST_AUTO_TEST_CASE(test_map)
{
net::io_context ioc;
using map_type = std::map<std::string, std::string>;
using mmap_type = std::multimap<std::string, std::string>;
using umap_type = std::unordered_map<std::string, std::string>;
@@ -249,6 +263,7 @@ void test_map(net::io_context& ioc)
using op_vec_type = boost::optional<std::vector<std::string>>;
using tuple_type = std::tuple<std::string, std::string, std::string, std::string, std::string, std::string, std::string, std::string>;
std::string const wire2 = "*3\r\n$2\r\n11\r\n$2\r\n22\r\n$1\r\n3\r\n";
std::string const wire = "%4\r\n$4\r\nkey1\r\n$6\r\nvalue1\r\n$4\r\nkey2\r\n$6\r\nvalue2\r\n$4\r\nkey3\r\n$6\r\nvalue3\r\n$4\r\nkey3\r\n$6\r\nvalue3\r\n";
std::vector<node_type> expected_1a
@@ -319,8 +334,10 @@ void test_map(net::io_context& ioc)
auto const in07 = expect<op_map_type>{wire, expected_1d, "map.optional.map"};
auto const in08 = expect<op_vec_type>{wire, expected_1e, "map.optional.vector"};
auto const in09 = expect<std::tuple<op_map_type>>{"*1\r\n" + wire, std::tuple<op_map_type>{expected_1d}, "map.transaction.optional.map"};
auto const in10 = expect<int>{"%11\r\n", int{}, "map.invalid.int", aedis::adapter::make_error_code(aedis::adapter::error::expects_simple_type)};
auto const in10 = expect<int>{"%11\r\n", int{}, "map.invalid.int", aedis::make_error_code(aedis::error::expects_resp3_simple_type)};
auto const in11 = expect<tuple_type>{wire, e1f, "map.tuple"};
auto const in12 = expect<map_type>{wire2, map_type{}, "map.error", aedis::make_error_code(aedis::error::expects_resp3_map)};
auto const in13 = expect<map_type>{"_\r\n", map_type{}, "map.null", aedis::make_error_code(aedis::error::null)};
auto ex = ioc.get_executor();
@@ -335,6 +352,8 @@ void test_map(net::io_context& ioc)
test_sync(ex, in09);
test_sync(ex, in00);
test_sync(ex, in11);
test_sync(ex, in12);
test_sync(ex, in13);
test_async(ex, in00);
test_async(ex, in01);
@@ -347,6 +366,9 @@ void test_map(net::io_context& ioc)
test_async(ex, in09);
test_async(ex, in00);
test_async(ex, in11);
test_async(ex, in12);
test_async(ex, in13);
ioc.run();
}
void test_attribute(net::io_context& ioc)
@@ -377,8 +399,9 @@ void test_attribute(net::io_context& ioc)
test_async(ex, in02);
}
void test_array(net::io_context& ioc)
BOOST_AUTO_TEST_CASE(test_array)
{
net::io_context ioc;
char const* wire = "*3\r\n$2\r\n11\r\n$2\r\n22\r\n$1\r\n3\r\n";
std::vector<node_type> e1a
@@ -404,6 +427,8 @@ void test_array(net::io_context& ioc)
auto const in06 = expect<std::array<int, 3>>{wire, e1f, "array.array"};
auto const in07 = expect<std::list<int>>{wire, e1g, "array.list"};
auto const in08 = expect<std::deque<int>>{wire, e1h, "array.deque"};
auto const in09 = expect<std::vector<int>>{"_\r\n", std::vector<int>{}, "array.vector", aedis::make_error_code(aedis::error::null)};
auto const in10 = expect<std::list<int>>{"_\r\n", std::list<int>{}, "array.list", aedis::make_error_code(aedis::error::null)};
auto ex = ioc.get_executor();
@@ -415,6 +440,7 @@ void test_array(net::io_context& ioc)
test_sync(ex, in06);
test_sync(ex, in07);
test_sync(ex, in08);
test_sync(ex, in09);
test_async(ex, in01);
test_async(ex, in02);
@@ -424,10 +450,13 @@ void test_array(net::io_context& ioc)
test_async(ex, in06);
test_async(ex, in07);
test_async(ex, in08);
test_async(ex, in09);
ioc.run();
}
void test_set(net::io_context& ioc)
BOOST_AUTO_TEST_CASE(test_set)
{
net::io_context ioc;
using set_type = std::set<std::string>;
using mset_type = std::multiset<std::string>;
using uset_type = std::unordered_set<std::string>;
@@ -435,7 +464,9 @@ void test_set(net::io_context& ioc)
using vec_type = std::vector<std::string>;
using op_vec_type = boost::optional<std::vector<std::string>>;
std::string const wire2 = "*3\r\n$2\r\n11\r\n$2\r\n22\r\n$1\r\n3\r\n";
std::string const wire = "~6\r\n+orange\r\n+apple\r\n+one\r\n+two\r\n+three\r\n+orange\r\n";
std::vector<node_type> const expected1a
{ {resp3::type::set, 6UL, 0UL, {}}
, {resp3::type::simple_string, 1UL, 1UL, {"orange"}}
@@ -462,6 +493,7 @@ void test_set(net::io_context& ioc)
auto const in06 = expect<uset_type>{wire, e1c, "set.unordered_set"};
auto const in07 = expect<muset_type>{wire, e1g, "set.unordered_multiset"};
auto const in08 = expect<std::tuple<uset_type>>{"*1\r\n" + wire, std::tuple<uset_type>{e1c}, "set.tuple"};
auto const in09 = expect<set_type>{wire2, set_type{}, "set.error", aedis::make_error_code(aedis::error::expects_resp3_set)};
auto ex = ioc.get_executor();
@@ -474,6 +506,7 @@ void test_set(net::io_context& ioc)
test_sync(ex, in06);
test_sync(ex, in07);
test_sync(ex, in08);
test_sync(ex, in09);
test_async(ex, in00);
test_async(ex, in01);
@@ -484,12 +517,15 @@ void test_set(net::io_context& ioc)
test_async(ex, in06);
test_async(ex, in07);
test_async(ex, in08);
test_async(ex, in09);
ioc.run();
}
void test_simple_error(net::io_context& ioc)
BOOST_AUTO_TEST_CASE(test_simple_error)
{
auto const in01 = expect<node_type>{"-Error\r\n", node_type{resp3::type::simple_error, 1UL, 0UL, {"Error"}}, "simple_error.node"};
auto const in02 = expect<node_type>{"-\r\n", node_type{resp3::type::simple_error, 1UL, 0UL, {""}}, "simple_error.node.empty"};
net::io_context ioc;
auto const in01 = expect<node_type>{"-Error\r\n", node_type{resp3::type::simple_error, 1UL, 0UL, {"Error"}}, "simple_error.node", aedis::make_error_code(aedis::error::simple_error)};
auto const in02 = expect<node_type>{"-\r\n", node_type{resp3::type::simple_error, 1UL, 0UL, {""}}, "simple_error.node.empty", aedis::make_error_code(aedis::error::simple_error)};
auto ex = ioc.get_executor();
@@ -498,10 +534,12 @@ void test_simple_error(net::io_context& ioc)
test_async(ex, in01);
test_async(ex, in02);
ioc.run();
}
void test_blob_string(net::io_context& ioc)
BOOST_AUTO_TEST_CASE(test_blob_string)
{
net::io_context ioc;
std::string str(100000, 'a');
str[1000] = '\r';
str[1001] = '\n';
@@ -529,15 +567,17 @@ void test_blob_string(net::io_context& ioc)
test_async(ex, in02);
test_async(ex, in03);
test_async(ex, in04);
ioc.run();
}
void test_double(net::io_context& ioc)
BOOST_AUTO_TEST_CASE(test_double)
{
// TODO: Add test for double.
net::io_context ioc;
auto const in01 = expect<node_type>{",1.23\r\n", node_type{resp3::type::doublean, 1UL, 0UL, {"1.23"}}, "double.node"};
auto const in02 = expect<node_type>{",inf\r\n", node_type{resp3::type::doublean, 1UL, 0UL, {"inf"}}, "double.node (inf)"};
auto const in03 = expect<node_type>{",-inf\r\n", node_type{resp3::type::doublean, 1UL, 0UL, {"-inf"}}, "double.node (-inf)"};
auto const in04 = expect<double>{",1.23\r\n", double{1.23}, "double.double"};
auto const in05 = expect<double>{",er\r\n", double{0}, "double.double", aedis::make_error_code(aedis::error::not_a_double)};
auto ex = ioc.get_executor();
@@ -545,17 +585,21 @@ void test_double(net::io_context& ioc)
test_sync(ex, in02);
test_sync(ex, in03);
test_sync(ex, in04);
test_sync(ex, in05);
test_async(ex, in01);
test_async(ex, in02);
test_async(ex, in03);
test_async(ex, in04);
test_async(ex, in05);
ioc.run();
}
void test_blob_error(net::io_context& ioc)
BOOST_AUTO_TEST_CASE(test_blob_error)
{
auto const in01 = expect<node_type>{"!21\r\nSYNTAX invalid syntax\r\n", node_type{resp3::type::blob_error, 1UL, 0UL, {"SYNTAX invalid syntax"}}, "blob_error"};
auto const in02 = expect<node_type>{"!0\r\n\r\n", node_type{resp3::type::blob_error, 1UL, 0UL, {}}, "blob_error.empty"};
net::io_context ioc;
auto const in01 = expect<node_type>{"!21\r\nSYNTAX invalid syntax\r\n", node_type{resp3::type::blob_error, 1UL, 0UL, {"SYNTAX invalid syntax"}}, "blob_error", aedis::make_error_code(aedis::error::blob_error)};
auto const in02 = expect<node_type>{"!0\r\n\r\n", node_type{resp3::type::blob_error, 1UL, 0UL, {}}, "blob_error.empty", aedis::make_error_code(aedis::error::blob_error)};
auto ex = ioc.get_executor();
@@ -564,10 +608,12 @@ void test_blob_error(net::io_context& ioc)
test_async(ex, in01);
test_async(ex, in02);
ioc.run();
}
void test_verbatim_string(net::io_context& ioc)
BOOST_AUTO_TEST_CASE(test_verbatim_string)
{
net::io_context ioc;
auto const in01 = expect<node_type>{"=15\r\ntxt:Some string\r\n", node_type{resp3::type::verbatim_string, 1UL, 0UL, {"txt:Some string"}}, "verbatim_string"};
auto const in02 = expect<node_type>{"=0\r\n\r\n", node_type{resp3::type::verbatim_string, 1UL, 0UL, {}}, "verbatim_string.empty"};
@@ -578,12 +624,14 @@ void test_verbatim_string(net::io_context& ioc)
test_async(ex, in01);
test_async(ex, in02);
ioc.run();
}
void test_big_number(net::io_context& ioc)
BOOST_AUTO_TEST_CASE(test_big_number)
{
net::io_context ioc;
auto const in01 = expect<node_type>{"(3492890328409238509324850943850943825024385\r\n", node_type{resp3::type::big_number, 1UL, 0UL, {"3492890328409238509324850943850943825024385"}}, "big_number.node"};
auto const in02 = expect<int>{"(\r\n", int{}, "big_number.error (empty field)", aedis::resp3::make_error_code(aedis::resp3::error::empty_field)};
auto const in02 = expect<int>{"(\r\n", int{}, "big_number.error (empty field)", aedis::make_error_code(aedis::error::empty_field)};
auto ex = ioc.get_executor();
@@ -592,10 +640,12 @@ void test_big_number(net::io_context& ioc)
test_async(ex, in01);
test_async(ex, in02);
ioc.run();
}
void test_simple_string(net::io_context& ioc)
BOOST_AUTO_TEST_CASE(test_simple_string)
{
net::io_context ioc;
boost::optional<std::string> ok1, ok2;
ok1 = "OK";
ok2 = "";
@@ -616,15 +666,17 @@ void test_simple_string(net::io_context& ioc)
test_async(ex, in01);
test_async(ex, in02);
test_async(ex, in03);
ioc.run();
}
void test_resp3(net::io_context& ioc)
BOOST_AUTO_TEST_CASE(test_resp3)
{
auto const in01 = expect<int>{"s11\r\n", int{}, "number.error", aedis::resp3::make_error_code(aedis::resp3::error::invalid_type)};
auto const in02 = expect<int>{":adf\r\n", int{11}, "number.int", aedis::resp3::make_error_code(aedis::resp3::error::not_a_number)};
auto const in03 = expect<int>{":\r\n", int{}, "number.error (empty field)", aedis::resp3::make_error_code(aedis::resp3::error::empty_field)};
auto const in04 = expect<boost::optional<bool>>{"#\r\n", boost::optional<bool>{}, "bool.error", aedis::resp3::make_error_code(aedis::resp3::error::empty_field)};
auto const in05 = expect<std::string>{",\r\n", std::string{}, "double.error (empty field)", aedis::resp3::make_error_code(aedis::resp3::error::empty_field)};
net::io_context ioc;
auto const in01 = expect<int>{"s11\r\n", int{}, "number.error", aedis::make_error_code(aedis::error::invalid_data_type)};
auto const in02 = expect<int>{":adf\r\n", int{11}, "number.int", aedis::make_error_code(aedis::error::not_a_number)};
auto const in03 = expect<int>{":\r\n", int{}, "number.error (empty field)", aedis::make_error_code(aedis::error::empty_field)};
auto const in04 = expect<boost::optional<bool>>{"#\r\n", boost::optional<bool>{}, "bool.error", aedis::make_error_code(aedis::error::empty_field)};
auto const in05 = expect<std::string>{",\r\n", std::string{}, "double.error (empty field)", aedis::make_error_code(aedis::error::empty_field)};
auto ex = ioc.get_executor();
@@ -639,10 +691,12 @@ void test_resp3(net::io_context& ioc)
test_async(ex, in03);
test_async(ex, in04);
test_async(ex, in05);
ioc.run();
}
void test_null(net::io_context& ioc)
BOOST_AUTO_TEST_CASE(test_null)
{
net::io_context ioc;
using op_type_01 = boost::optional<bool>;
using op_type_02 = boost::optional<int>;
using op_type_03 = boost::optional<std::string>;
@@ -684,34 +738,73 @@ void test_null(net::io_context& ioc)
test_async(ex, in07);
test_async(ex, in08);
test_async(ex, in09);
}
int main()
{
net::io_context ioc {1};
// Simple types.
test_simple_string(ioc);
test_simple_error(ioc);
test_blob_string(ioc);
test_blob_error(ioc);
test_number(ioc);
test_double(ioc);
test_bool(ioc);
test_null(ioc);
test_big_number(ioc);
test_verbatim_string(ioc);
// Aggregates.
test_array(ioc);
test_set(ioc);
test_map(ioc);
test_push(ioc);
test_streamed_string(ioc);
// RESP3
test_resp3(ioc);
ioc.run();
}
//-----------------------------------------------------------------------------------
void check_error(char const* name, aedis::error ev)
{
auto const ec = aedis::make_error_code(ev);
auto const& cat = ec.category();
BOOST_TEST(std::string(ec.category().name()) == name);
BOOST_TEST(!ec.message().empty());
BOOST_TEST(cat.equivalent(
static_cast<std::underlying_type<aedis::error>::type>(ev),
ec.category().default_error_condition(
static_cast<std::underlying_type<aedis::error>::type>(ev))));
BOOST_TEST(cat.equivalent(ec,
static_cast<std::underlying_type<aedis::error>::type>(ev)));
}
BOOST_AUTO_TEST_CASE(error)
{
check_error("aedis", aedis::error::resolve_timeout);
check_error("aedis", aedis::error::resolve_timeout);
check_error("aedis", aedis::error::connect_timeout);
check_error("aedis", aedis::error::idle_timeout);
check_error("aedis", aedis::error::exec_timeout);
check_error("aedis", aedis::error::invalid_data_type);
check_error("aedis", aedis::error::not_a_number);
check_error("aedis", aedis::error::unexpected_read_size);
check_error("aedis", aedis::error::exceeeds_max_nested_depth);
check_error("aedis", aedis::error::unexpected_bool_value);
check_error("aedis", aedis::error::empty_field);
check_error("aedis", aedis::error::expects_resp3_simple_type);
check_error("aedis", aedis::error::expects_resp3_aggregate);
check_error("aedis", aedis::error::expects_resp3_map);
check_error("aedis", aedis::error::expects_resp3_set);
check_error("aedis", aedis::error::nested_aggregate_unsupported);
check_error("aedis", aedis::error::simple_error);
check_error("aedis", aedis::error::blob_error);
check_error("aedis", aedis::error::incompatible_size);
check_error("aedis", aedis::error::not_a_double);
check_error("aedis", aedis::error::null);
}
std::string get_type_as_str(aedis::resp3::type t)
{
std::ostringstream ss;
ss << t;
return ss.str();
}
BOOST_AUTO_TEST_CASE(type)
{
BOOST_TEST(!get_type_as_str(aedis::resp3::type::array).empty());
BOOST_TEST(!get_type_as_str(aedis::resp3::type::push).empty());
BOOST_TEST(!get_type_as_str(aedis::resp3::type::set).empty());
BOOST_TEST(!get_type_as_str(aedis::resp3::type::map).empty());
BOOST_TEST(!get_type_as_str(aedis::resp3::type::attribute).empty());
BOOST_TEST(!get_type_as_str(aedis::resp3::type::simple_string).empty());
BOOST_TEST(!get_type_as_str(aedis::resp3::type::simple_error).empty());
BOOST_TEST(!get_type_as_str(aedis::resp3::type::number).empty());
BOOST_TEST(!get_type_as_str(aedis::resp3::type::doublean).empty());
BOOST_TEST(!get_type_as_str(aedis::resp3::type::boolean).empty());
BOOST_TEST(!get_type_as_str(aedis::resp3::type::big_number).empty());
BOOST_TEST(!get_type_as_str(aedis::resp3::type::null).empty());
BOOST_TEST(!get_type_as_str(aedis::resp3::type::blob_error).empty());
BOOST_TEST(!get_type_as_str(aedis::resp3::type::verbatim_string).empty());
BOOST_TEST(!get_type_as_str(aedis::resp3::type::blob_string).empty());
BOOST_TEST(!get_type_as_str(aedis::resp3::type::streamed_string_part).empty());
BOOST_TEST(!get_type_as_str(aedis::resp3::type::invalid).empty());
}

53
tests/low_level_sync.cpp Normal file
View File

@@ -0,0 +1,53 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <string>
#include <iostream>
#include <boost/asio/connect.hpp>
#include <aedis.hpp>
#include <aedis/src.hpp>
namespace net = boost::asio;
namespace resp3 = aedis::resp3;
using aedis::resp3::request;
using aedis::adapter::adapt2;
using net::ip::tcp;
int main()
{
try {
net::io_context ioc;
tcp::resolver resv{ioc};
auto const res = resv.resolve("127.0.0.1", "6379");
tcp::socket socket{ioc};
net::connect(socket, res);
// Creates the request and writes to the socket.
request req;
req.push("HELLO", 3);
req.push("PING");
req.push("QUIT");
resp3::write(socket, req);
// Responses
std::string buffer, resp;
// Reads the responses to all commands in the request.
auto dbuffer = net::dynamic_buffer(buffer);
resp3::read(socket, dbuffer);
resp3::read(socket, dbuffer, adapt2(resp));
resp3::read(socket, dbuffer);
std::cout << "Ping: " << resp << std::endl;
} catch (std::exception const& e) {
std::cerr << e.what() << std::endl;
exit(EXIT_FAILURE);
}
}

View File

@@ -1,107 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <iostream>
#include <algorithm>
#include <cctype>
#include <boost/asio/connect.hpp>
#include <aedis/aedis.hpp>
#include <aedis/src.hpp>
namespace net = boost::asio;
namespace resp3 = aedis::resp3;
using aedis::redis::command;
using aedis::generic::make_serializer;
using aedis::resp3::node;
using aedis::adapter::adapt;
using net::ip::tcp;
using net::write;
using net::buffer;
using net::dynamic_buffer;
std::string toupper(std::string s)
{
std::transform(std::begin(s), std::end(s), std::begin(s),
[](unsigned char c){ return std::toupper(c); });
return s;
}
std::vector<std::string>
get_cmd_names(std::vector<node<std::string>> const& resp)
{
if (resp.empty()) {
std::cerr << "Response is empty." << std::endl;
return {};
}
std::vector<std::string> ret;
for (auto i = 0ULL; i < resp.size(); ++i) {
if (resp.at(i).depth == 1)
ret.push_back(resp.at(i + 1).value);
}
std::sort(std::begin(ret), std::end(ret));
return ret;
}
void print_cmds_enum(std::vector<std::string> const& cmds)
{
std::cout << "enum class command {\n";
for (auto const& cmd : cmds) {
std::cout
<< " /// https://redis.io/commands/" << cmd << "\n"
<< " " << cmd << ",\n";
}
std::cout << " invalid\n};\n";
}
void print_cmds_strs(std::vector<std::string> const& cmds)
{
std::cout << " static char const* table[] = {\n";
for (auto const& cmd : cmds) {
std::cout << " \"" << toupper(cmd) << "\",\n";
}
std::cout << " };\n";
}
int main()
{
try {
net::io_context ioc;
tcp::resolver resv{ioc};
auto const res = resv.resolve("127.0.0.1", "6379");
tcp::socket socket{ioc};
net::connect(socket, res);
std::string request;
auto sr = make_serializer(request);
sr.push(command::hello, 3);
sr.push(command::command);
sr.push(command::quit);
write(socket, buffer(request));
std::vector<node<std::string>> resp;
std::string buffer;
resp3::read(socket, dynamic_buffer(buffer));
resp3::read(socket, dynamic_buffer(buffer), adapt(resp));
resp3::read(socket, dynamic_buffer(buffer));
auto const cmds = get_cmd_names(resp);
print_cmds_enum(cmds);
std::cout << "\n";
print_cmds_strs(cmds);
} catch (std::exception const& e) {
std::cerr << e.what() << std::endl;
}
}