2
0
mirror of https://github.com/boostorg/redis.git synced 2026-01-25 06:32:08 +00:00

Compare commits

...

56 Commits

Author SHA1 Message Date
Marcelo Zimbres
11807c82b7 Improves documentations of the connection class. 2022-08-21 12:50:29 +02:00
Marcelo Zimbres
24a215d78b First steps with cmake support. 2022-08-20 22:12:33 +02:00
Marcelo Zimbres
b7abe20703 CI fix. 2022-08-20 12:18:59 +02:00
Marcelo Zimbres
225095944c Commit of the following:
- Adds sync class the offer a thread-safe and synchronous API.
- Fixes documentation of adapt functions.
- Removes compose.hpp header.
- Adds test to aedis::error and resp3::type.
- Simplifies some code.
2022-08-20 11:56:31 +02:00
Marcelo Zimbres
a31d797e43 Moves sync functions from experimental to connection and improves code coverage. 2022-08-18 22:17:33 +02:00
Marcelo Zimbres
cca8d5d6dc Improvements in the examples, docs, sync functions and coverage. 2022-08-17 22:30:59 +02:00
Marcelo Zimbres
6c5bee6920 Fixes bug in the context of reconnecting and events. 2022-08-16 22:18:27 +02:00
Marcelo Zimbres
c4714d0037 Splits async_receive_event in two functions. 2022-08-15 22:45:55 +02:00
Marcelo Zimbres
38bf2395af Fix coverage and ports tests to boost.test. 2022-08-14 21:46:56 +02:00
Marcelo Zimbres
7511d6b4d8 Progress porting to boost.test. 2022-08-13 22:52:42 +02:00
Marcelo Zimbres
ddc2815fe5 Progress with coverage report. 2022-08-13 17:00:18 +02:00
Marcelo Zimbres
de6f5de655 Adds coverage file. 2022-08-12 22:43:37 +02:00
Marcelo Zimbres
8d454ada0e Simplifies the cancellation of some connection async_ functions. 2022-08-12 21:53:04 +02:00
Marcelo Zimbres
ebac88f2ca Improvementes in the CI script. 2022-08-07 18:44:28 +02:00
Marcelo Zimbres
d26ecb65ca Improvements in the docs. 2022-08-07 11:32:50 +02:00
Marcelo Zimbres
c57f97b8c1 Improvements in the examples. 2022-08-06 23:12:32 +02:00
Marcelo Zimbres
37ab1e7387 Support for reconnection. 2022-08-06 18:11:12 +02:00
Marcelo Zimbres
54d448cad4 Progresses with connection events. 2022-08-06 13:06:05 +02:00
Marcelo Zimbres
97428dedb3 Progresses with reconnection. 2022-08-04 23:58:04 +02:00
Marcelo Zimbres
83802f217a Prepares for events. 2022-08-03 21:40:43 +02:00
Marcelo Zimbres
08140f9186 Fixes async_exec function. 2022-08-02 21:52:34 +02:00
Marcelo Zimbres
3ddb017edb Adds automatic AUTH and HELLO. 2022-08-01 22:46:34 +02:00
Marcelo Zimbres
20328cd423 Don't cancel the push channel when async_run exits. 2022-08-01 21:50:38 +02:00
Marcelo Zimbres
6577ddbaab First steps. 2022-07-31 22:10:49 +02:00
Marcelo Zimbres
217d2bd87b Progresses with the synchronous exec functions. 2022-07-31 11:50:23 +02:00
Marcelo Zimbres
f96dd22153 Improves executor usage in sync wrapper. 2022-07-30 23:35:28 +02:00
Marcelo Zimbres
f1fd0cfa8c Removes warnings on g++. 2022-07-30 23:18:24 +02:00
Marcelo Zimbres
8728914109 Adds changelog, fixes CI file, improvements in the docs. 2022-07-30 09:31:11 +02:00
Marcelo Zimbres
e0041ac7ae Progresses with async_failover function. 2022-07-28 22:00:42 +02:00
Marcelo Zimbres
317a185eb0 Adds in the sync_wrapper class. 2022-07-27 22:18:00 +02:00
Marcelo Zimbres
aa81200a8f Adds assert to check the response tuple is compatible with the request size. 2022-07-26 22:10:31 +02:00
Marcelo Zimbres
55fc0e861c Fixes bug on reconnection. 2022-07-26 21:47:14 +02:00
Marcelo Zimbres
04271855b0 Adds example on how to use Aedis synchronously. 2022-07-25 22:53:03 +02:00
Marcelo Zimbres
700e0c823e First steps with the CI file. 2022-07-24 21:21:02 +02:00
Marcelo Zimbres
63c6465a4a Improvements in the docs and subscriber example with reconnection. 2022-07-24 16:33:13 +02:00
Marcelo Zimbres
c86422cf50 Moves files to include directory. 2022-07-24 00:03:19 +02:00
Marcelo Zimbres
0168ed5faf Fixes build for clang++-14,13,11. 2022-07-23 14:55:01 +02:00
Marcelo Zimbres
7bffa252f4 Improvements in the documentation. 2022-07-21 22:05:26 +02:00
Marcelo Zimbres
0bb65599c4 Simplifies the char_room example. 2022-07-21 21:35:52 +02:00
Marcelo Zimbres
edd538944f Uses the correct executor in the exec timer. 2022-07-19 22:01:09 +02:00
Marcelo Zimbres
42880e788b Simplifies aedis header. 2022-07-17 18:42:24 +02:00
Marcelo Zimbres
bcc3917174 Test improvements. 2022-07-17 10:47:12 +02:00
Marcelo Zimbres
b08dd63192 Updates benchmark doc. 2022-07-16 21:25:57 +02:00
Marcelo Zimbres
76b6106caa Fixes executor usage in connection class. 2022-07-16 21:21:13 +02:00
Marcelo Zimbres
ab68e8a31d Updates to a more recent Tokio version and uses single thread. 2022-07-16 20:00:36 +02:00
Marcelo Zimbres
2673557ce5 More corrections. 2022-07-16 14:30:16 +02:00
Marcelo Zimbres
2a302dcb65 Corrections to the benchmark document. 2022-07-16 14:25:38 +02:00
Marcelo Zimbres
ffc4230368 Fixes documentation. 2022-07-16 13:50:21 +02:00
Marcelo Zimbres
59b5d35672 Small corrections. 2022-07-16 12:33:02 +02:00
Marcelo Zimbres
835a1decf4 Progresses with benchmarks. 2022-07-16 11:03:48 +02:00
Marcelo Zimbres
3fb018ccc6 Some changes in the benchmarks. 2022-07-15 23:15:08 +02:00
Marcelo Zimbres
1fe4a87287 Adds go-redis 2022-07-14 22:13:58 +02:00
Marcelo Zimbres
70cdff41e0 Fixes some bugs. 2022-07-14 21:44:50 +02:00
Marcelo Zimbres
2edd9f3d87 Some improvements in the benchmarks. 2022-07-11 00:00:09 +02:00
Marcelo Zimbres
fa4181b197 New version. 2022-07-10 20:53:44 +02:00
Marcelo Zimbres
9e2cd8855e Small documentation improvements. 2022-07-10 19:19:42 +02:00
70 changed files with 3636 additions and 2349 deletions

13
.codecov.yml Normal file
View File

@@ -0,0 +1,13 @@
codecov:
max_report_age: off
require_ci_to_pass: yes
notify:
after_n_builds: 1
wait_for_ci: yes
ignore:
- "benchmarks/cpp/asio/*"
- "examples/*"
- "tests/*"
- "/usr/*"
- "**/boost/*"

48
.github/workflows/ci.yml vendored Normal file
View File

@@ -0,0 +1,48 @@
name: CI
on: [push, pull_request]
jobs:
posix:
defaults:
run:
shell: bash
strategy:
fail-fast: false
matrix:
include:
- { toolset: gcc, compiler: g++-10, install: g++-10, os: ubuntu-22.04, cxxstd: 'c++17' }
- { toolset: gcc, compiler: g++-11, install: g++-11, os: ubuntu-22.04, cxxstd: 'c++17' }
- { toolset: gcc, compiler: g++-11, install: g++-11, os: ubuntu-22.04, cxxstd: 'c++20' }
- { toolset: clang, compiler: clang++-11, install: clang-11, os: ubuntu-22.04, cxxstd: 'c++17' }
- { toolset: clang, compiler: clang++-11, install: clang-11, os: ubuntu-22.04, cxxstd: 'c++20' }
- { toolset: clang, compiler: clang++-13, install: clang-13, os: ubuntu-22.04, cxxstd: 'c++17' }
- { toolset: clang, compiler: clang++-13, install: clang-13, os: ubuntu-22.04, cxxstd: 'c++20' }
runs-on: ${{ matrix.os }}
env:
CXX: ${{ matrix.compiler }}
CXXFLAGS: -std=${{matrix.cxxstd}} -Wall -Wextra
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Install Autotools
run: sudo apt install automake
- name: Install compiler
run: sudo apt-get install -y ${{ matrix.install }}
- name: Install Redis
run: sudo apt-get install -y redis-server
- name: Install boost
uses: MarkusJx/install-boost@v2.3.0
id: install-boost
with:
boost_version: 1.79.0
platform_version: 22.04
- name: Configure
run: |
autoreconf -i
./configure --with-boost=${{ steps.install-boost.outputs.BOOST_ROOT }}
- name: Build
run: make
- name: Check
run: make check VERBOSE=1

50
.github/workflows/coverage.yml vendored Normal file
View File

@@ -0,0 +1,50 @@
name: Coverage
on:
push:
branches:
- master
jobs:
posix:
defaults:
run:
shell: bash
runs-on: ubuntu-22.04
env:
CXX: g++-11
CXXFLAGS: -std=c++20 -Wall -Wextra --coverage -fkeep-inline-functions -fkeep-static-functions -O0
LDFLAGS: --coverage
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Install Autotools
run: sudo apt install automake
- name: Install compiler
run: sudo apt-get install -y g++-11
- name: Install Redis
run: sudo apt-get install -y redis-server
- name: Install boost
uses: MarkusJx/install-boost@v2.3.0
id: install-boost
with:
boost_version: 1.79.0
platform_version: 22.04
- name: Configure
run: |
autoreconf -i
./configure --with-boost=${{ steps.install-boost.outputs.BOOST_ROOT }}
- name: Build
run: make
- name: Check
run: make check VERBOSE=1
# - name: Generate coverage report
# run: |
# lcov --base-directory . --directory tests/ --output-file aedis.info --capture
# lcov --remove aedis.info '/usr/*' "${{ steps.install-boost.outputs.BOOST_ROOT }}/include/boost/*" --output-file aedis.info
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v3
with:
gcov: true
working_directory: ${{ env.GITHUB_WORKSPACE }}

102
CHANGELOG.md Normal file
View File

@@ -0,0 +1,102 @@
# Changelog
## v1.0.0
* Adds experimental cmake support for windows users.
* Adds new class `aedis::sync` that wraps an `aedis::connection` in
a thread-safe and synchronous API. All free functions from the
`sync.hpp` are now member functions of `aedis::sync`.
* Split `aedis::connection::async_receive_event` in two functions, one
to receive events and another for server side pushes, see
`aedis::connection::async_receive_push`.
* Removes collision between `aedis::adapter::adapt` and
`aedis::adapt`.
* Adds `connection::operation` enum to replace `cancel_*` member
functions with a single cancel function that gets the operations
that should be cancelled as argument.
* Bugfix: a bug on reconnect from a state where the `connection` object
had unsent commands. It could cause `async_exec` to never
complete under certain conditions.
* Bugfix: Documentation of `adapt()` functions were missing from
doxygen.
## v0.3.0
* Adds `experimental::exec` and `receive_event` functions to offer a
thread safe and synchronous way of executing requests across
threads. See `intro_sync.cpp` and `subscriber_sync.cpp` for
examples.
* `connection::async_read_push` was renamed to `async_receive_event`.
* `connection::async_receive_event` is now being used to communicate
internal events to the user, such as resolve, connect, push etc. For
examples see subscriber.cpp and `connection::event`.
* The `aedis` directory has been moved to `include` to look more
similar to Boost libraries. Users should now replace `-I/aedis-path`
with `-I/aedis-path/include` in the compiler flags.
* The `AUTH` and `HELLO` commands are now sent automatically. This change was
necessary to implement reconnection. The username and password
used in `AUTH` should be provided by the user on
`connection::config`.
* Adds support for reconnection. See `connection::enable_reconnect`.
* Fixes a bug in the `connection::async_run(host, port)` overload
that was causing crashes on reconnection.
* Fixes the executor usage in the connection class. Before theses
changes it was imposing `any_io_executor` on users.
* `connection::async_receiver_event` is not cancelled anymore when
`connection::async_run` exits. This change makes user code simpler.
* `connection::async_exec` with host and port overload has been
removed. Use the other `connection::async_run` overload.
* The host and port parameters from `connection::async_run` have been
move to `connection::config` to better support authentication and
failover.
* Many simplifications in the `chat_room` example.
* Fixes build in clang the compilers and makes some improvements in
the documentation.
## v0.2.1
* Fixes a bug that happens on very high load.
## v0.2.0
* Major rewrite of the high-level API. There is no more need to use the low-level API anymore.
* No more callbacks: Sending requests follows the ASIO asynchrnous model.
* Support for reconnection: Pending requests are not canceled when a connection is lost and are re-sent when a new one is established.
* The library is not sending HELLO-3 on user behalf anymore. This is important to support AUTH properly.
## v0.1.2
* Adds reconnect coroutine in the `echo_server` example.
* Corrects `client::async_wait_for_data` with `make_parallel_group` to launch operation.
* Improvements in the documentation.
* Avoids dynamic memory allocation in the client class after reconnection.
## v0.1.1
* Improves the documentation and adds some features to the high-level client.
## v0.1.0
* Improvements in the design and documentation.
## v0.0.1
* First release to collect design feedback.

50
CMakeLists.txt Normal file
View File

@@ -0,0 +1,50 @@
# At the moment the official build system is still autotools and this
# file is meant to support Aedis on windows.
cmake_minimum_required(VERSION 3.14)
project(
Aedis
VERSION 1.0.0
DESCRIPTION "An async redis client designed for performance and scalability"
HOMEPAGE_URL "https://mzimbres.github.io/aedis"
LANGUAGES CXX
)
add_library(aedis INTERFACE)
target_include_directories(aedis INTERFACE include)
find_package(Boost 1.79 REQUIRED)
include_directories(${Boost_INCLUDE_DIRS})
enable_testing()
include_directories(include)
add_executable(chat_room examples/chat_room.cpp)
add_executable(containers examples/containers.cpp)
add_executable(echo_server examples/echo_server.cpp)
add_executable(intro examples/intro.cpp)
add_executable(intro_sync examples/intro_sync.cpp)
add_executable(serialization examples/serialization.cpp)
add_executable(subscriber examples/subscriber.cpp)
add_executable(subscriber_sync examples/subscriber_sync.cpp)
add_executable(test_low_level tests/low_level.cpp)
add_executable(test_connection tests/connection.cpp)
add_executable(low_level_sync tests/low_level_sync.cpp)
add_test(containers containers)
add_test(intro intro)
add_test(intro_sync intro_sync)
add_test(serialization serialization)
add_test(test_low_level test_low_level)
add_test(test_connection test_connection)
add_test(low_level_sync low_level_sync)
include(GNUInstallDirs)
install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/include/boost
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}
FILES_MATCHING
PATTERN "*.hpp"
PATTERN "*.ipp"
)

View File

@@ -1 +0,0 @@
See https://mzimbres.github.io/aedis/#using-aedis

View File

@@ -6,24 +6,26 @@ DISTCHECK_CONFIGURE_FLAGS = CPPFLAGS="$(BOOST_CPPFLAGS) $(CPPFLAGS)" LDFLAGS="$(
AM_CPPFLAGS =
AM_CPPFLAGS += $(BOOST_CPPFLAGS)
#AM_CPPFLAGS += -I$(top_srcdir)/include
AM_CPPFLAGS += -I$(top_srcdir)/include
AM_LDFLAGS =
AM_LDFLAGS += -pthread
SUBDIRS = include
check_PROGRAMS =
check_PROGRAMS += intro_sync
check_PROGRAMS += low_level_sync
check_PROGRAMS += intro
check_PROGRAMS += intro_sync
check_PROGRAMS += containers
check_PROGRAMS += serialization
check_PROGRAMS += test_low_level
if HAVE_CXX20
check_PROGRAMS += test_high_level
endif
check_PROGRAMS += test_connection
EXTRA_PROGRAMS =
if HAVE_COROUTINES
EXTRA_PROGRAMS += subscriber
if HAVE_CXX20
EXTRA_PROGRAMS += subscriber_sync
EXTRA_PROGRAMS += echo_server
EXTRA_PROGRAMS += echo_server_direct
EXTRA_PROGRAMS += chat_room
@@ -36,49 +38,23 @@ CLEANFILES += $(EXTRA_PROGRAMS)
.PHONY: all
all: $(check_PROGRAMS) $(EXTRA_PROGRAMS)
intro_sync_SOURCES = $(top_srcdir)/tests/intro_sync.cpp
subscriber_SOURCES = $(top_srcdir)/examples/subscriber.cpp
low_level_sync_SOURCES = $(top_srcdir)/tests/low_level_sync.cpp
test_low_level_SOURCES = $(top_srcdir)/tests/low_level.cpp
intro_SOURCES = $(top_srcdir)/examples/intro.cpp
intro_sync_SOURCES = $(top_srcdir)/examples/intro_sync.cpp
containers_SOURCES = $(top_srcdir)/examples/containers.cpp
serialization_SOURCES = $(top_srcdir)/examples/serialization.cpp
if HAVE_CXX20
test_high_level_SOURCES = $(top_srcdir)/tests/high_level.cpp
test_connection_SOURCES = $(top_srcdir)/tests/connection.cpp
subscriber_sync_SOURCES = $(top_srcdir)/examples/subscriber_sync.cpp
if HAVE_COROUTINES
subscriber_SOURCES = $(top_srcdir)/examples/subscriber.cpp
chat_room_SOURCES = $(top_srcdir)/examples/chat_room.cpp
echo_server_SOURCES = $(top_srcdir)/examples/echo_server.cpp
echo_server_direct_SOURCES = $(top_srcdir)/benchmarks/cpp/asio/echo_server_direct.cpp
echo_server_client_SOURCES = $(top_srcdir)/benchmarks/cpp/asio/echo_server_client.cpp
endif
nobase_include_HEADERS =\
$(top_srcdir)/aedis/src.hpp\
$(top_srcdir)/aedis/error.hpp\
$(top_srcdir)/aedis/impl/error.ipp\
$(top_srcdir)/aedis/detail/net.hpp\
$(top_srcdir)/aedis/command.hpp\
$(top_srcdir)/aedis/impl/command.ipp\
$(top_srcdir)/aedis/connection.hpp\
$(top_srcdir)/aedis/adapt.hpp\
$(top_srcdir)/aedis/detail/connection_ops.hpp\
$(top_srcdir)/aedis/aedis.hpp\
$(top_srcdir)/aedis/adapter/detail/adapters.hpp\
$(top_srcdir)/aedis/adapter/adapt.hpp\
$(top_srcdir)/aedis/adapter/detail/response_traits.hpp\
$(top_srcdir)/aedis/resp3/node.hpp\
$(top_srcdir)/aedis/resp3/compose.hpp\
$(top_srcdir)/aedis/resp3/detail/read_ops.hpp\
$(top_srcdir)/aedis/resp3/detail/parser.hpp\
$(top_srcdir)/aedis/resp3/type.hpp\
$(top_srcdir)/aedis/resp3/read.hpp\
$(top_srcdir)/aedis/resp3/exec.hpp\
$(top_srcdir)/aedis/resp3/write.hpp\
$(top_srcdir)/aedis/resp3/request.hpp\
$(top_srcdir)/aedis/resp3/detail/impl/parser.ipp\
$(top_srcdir)/aedis/resp3/impl/type.ipp
nobase_noinst_HEADERS =\
$(top_srcdir)/examples/print.hpp\
$(top_srcdir)/tests/check.hpp
nobase_noinst_HEADERS = $(top_srcdir)/examples/print.hpp
TESTS = $(check_PROGRAMS)
@@ -88,22 +64,33 @@ EXTRA_DIST += $(top_srcdir)/doc/DoxygenLayout.xml
EXTRA_DIST += $(top_srcdir)/doc/aedis.css
EXTRA_DIST += $(top_srcdir)/doc/htmlfooter.html
EXTRA_DIST += $(top_srcdir)/doc/htmlheader.html
EXTRA_DIST += $(top_srcdir)/benchmarks/cpp/libuv/echo_server_direct.c
EXTRA_DIST += $(top_srcdir)/benchmarks/cpp/libuv/README.md
EXTRA_DIST += $(top_srcdir)/benchmarks/benchmarks.md
EXTRA_DIST += $(top_srcdir)/benchmarks/benchmarks.tex
EXTRA_DIST += $(top_srcdir)/benchmarks/c/libuv/echo_server_direct.c
EXTRA_DIST += $(top_srcdir)/benchmarks/c/libuv/README.md
EXTRA_DIST += $(top_srcdir)/benchmarks/go/echo_server_direct.go
EXTRA_DIST += $(top_srcdir)/benchmarks/nodejs/echo_server_direct.js
EXTRA_DIST += $(top_srcdir)/benchmarks/nodejs/echo_server_over_redis.js
EXTRA_DIST += $(top_srcdir)/benchmarks/nodejs/package.json
EXTRA_DIST += $(top_srcdir)/benchmarks/nodejs/package-lock.json
EXTRA_DIST += $(top_srcdir)/benchmarks/rust/echo_server_direct/Cargo.lock
EXTRA_DIST += $(top_srcdir)/benchmarks/nodejs/echo_server_direct/echo_server_direct.js
EXTRA_DIST += $(top_srcdir)/benchmarks/nodejs/echo_server_direct/package.json
EXTRA_DIST += $(top_srcdir)/benchmarks/nodejs/echo_server_over_redis/echo_server_over_redis.js
EXTRA_DIST += $(top_srcdir)/benchmarks/nodejs/echo_server_over_redis/package.json
EXTRA_DIST += $(top_srcdir)/benchmarks/rust/echo_server_direct/Cargo.toml
EXTRA_DIST += $(top_srcdir)/benchmarks/rust/echo_server_direct/src/main.rs
EXTRA_DIST += $(top_srcdir)/benchmarks/rust/echo_server_over_redis/Cargo.lock
EXTRA_DIST += $(top_srcdir)/benchmarks/rust/echo_server_over_redis/Cargo.toml
EXTRA_DIST += $(top_srcdir)/benchmarks/rust/echo_server_over_redis/src/main.rs
EXTRA_DIST += $(top_srcdir)/CMakeLists.txt
.PHONY: doc
doc:
rm -rf ../aedis-gh-pages/*
doxygen doc/Doxyfile
.PHONY: coverage
coverage:
lcov --base-directory . --directory tests/ --output-file aedis.info --capture
lcov --remove aedis.info '/usr/*' '/opt/boost_1_79_0/include/boost/*' --output-file aedis.info
genhtml --output-directory html aedis.info
.PHONY: bench
bench:
pdflatex --jobname=echo-f0 benchmarks/benchmarks.tex
pdflatex --jobname=echo-f1 benchmarks/benchmarks.tex

View File

@@ -1 +1,20 @@
See https://mzimbres.github.io/aedis/
Branch | GH Actions | codecov.io |
:-------------: | ---------- | ---------- |
[`master`](https://github.com/mzimbres/aedis/tree/master) | [![CI](https://github.com/mzimbres/aedis/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/mzimbres/aedis/actions/workflows/ci.yml) | [![codecov](https://codecov.io/gh/mzimbres/aedis/branch/master/graph/badge.svg)](https://codecov.io/gh/mzimbres/aedis/branch/master)
## Aedis
An async redis client designed for performance and scalability
### License
Distributed under the [Boost Software License, Version 1.0](http://www.boost.org/LICENSE_1_0.txt).
### More information
* See the official github-pages for documentation: https://mzimbres.github.io/aedis
### Installation
See https://mzimbres.github.io/aedis/#using-aedis

View File

@@ -1,18 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_COMMAND_HPP
#define AEDIS_COMMAND_HPP
#include <boost/utility/string_view.hpp>
namespace aedis {
bool has_push_response(boost::string_view cmd);
} // aedis
#endif // AEDIS_COMMAND_HPP

View File

@@ -1,472 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_CONNECTION_HPP
#define AEDIS_CONNECTION_HPP
#include <vector>
#include <queue>
#include <limits>
#include <chrono>
#include <memory>
#include <type_traits>
#include <boost/asio/ip/tcp.hpp>
#include <boost/asio/steady_timer.hpp>
#include <boost/asio/experimental/channel.hpp>
#include <aedis/adapt.hpp>
#include <aedis/resp3/request.hpp>
#include <aedis/detail/connection_ops.hpp>
namespace aedis {
// https://redis.io/docs/reference/sentinel-clients
/** \brief A high level Redis connection.
* \ingroup any
*
* This class keeps a healthy connection to the Redis instance where
* commands can be sent at any time. For more details, please see the
* documentation of each individual function.
*
*/
template <class AsyncReadWriteStream = boost::asio::ip::tcp::socket>
class connection {
public:
/// Executor type.
using executor_type = typename AsyncReadWriteStream::executor_type;
/// Type of the next layer
using next_layer_type = AsyncReadWriteStream;
using default_completion_token_type = boost::asio::default_completion_token_t<executor_type>;
/** @brief Configuration parameters.
*/
struct config {
/// Timeout of the resolve operation.
std::chrono::milliseconds resolve_timeout = std::chrono::seconds{10};
/// Timeout of the connect operation.
std::chrono::milliseconds connect_timeout = std::chrono::seconds{10};
/// Time interval ping operations.
std::chrono::milliseconds ping_interval = std::chrono::seconds{1};
/// The maximum size allowed of read operations.
std::size_t max_read_size = (std::numeric_limits<std::size_t>::max)();
/// Whether to coalesce requests or not.
bool coalesce_requests = true;
};
/** \brief Constructor.
*
* \param ex The executor.
* \param cfg Configuration parameters.
*/
connection(boost::asio::any_io_executor ex, config cfg = config{})
: resv_{ex}
, ping_timer_{ex}
, check_idle_timer_{ex}
, writer_timer_{ex}
, read_timer_{ex}
, push_channel_{ex}
, cfg_{cfg}
, last_data_{std::chrono::time_point<std::chrono::steady_clock>::min()}
{
writer_timer_.expires_at(std::chrono::steady_clock::time_point::max());
read_timer_.expires_at(std::chrono::steady_clock::time_point::max());
}
connection(boost::asio::io_context& ioc, config cfg = config{})
: connection(ioc.get_executor(), cfg)
{ }
/// Returns the executor.
auto get_executor() {return resv_.get_executor();}
/** @brief Starts communication with the Redis server asynchronously.
*
* This function performs the following steps
*
* \li Resolves the Redis host as of \c async_resolve with the
* timeout passed in connection::config::resolve_timeout.
*
* \li Connects to one of the endpoints returned by the resolve
* operation with the timeout passed in connection::config::connect_timeout.
*
* \li Starts the idle check operation with the timeout of twice
* the value of connection::config::ping_interval. If no data is
* received during that time interval \c async_run completes with
* error::idle_timeout.
*
* \li Starts the healthy check operation that sends command::ping
* to Redis with a frequency equal to
* connection::config::ping_interval.
*
* \li Starts reading from the socket and delivering events to the
* request started with \c async_exec or \c async_read_push.
*
* For an example see echo_server.cpp.
*
* \param host Redis address.
* \param port Redis port.
* \param token Completion token.
*
* The completion token must have the following signature
*
* @code
* void f(boost::system::error_code);
* @endcode
*
* \return This function returns only when there is an error.
*/
template <class CompletionToken = default_completion_token_type>
auto
async_run(
boost::string_view host = "127.0.0.1",
boost::string_view port = "6379",
CompletionToken token = CompletionToken{})
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::run_op<connection>{this, host, port}, token, resv_);
}
/** @brief Executes a request on the redis server.
*
* \param req Request object.
* \param adapter Response adapter.
* \param token Asio completion token.
*
* For an example see containers.cpp. The completion token must
* have the following signature
*
* @code
* void f(boost::system::error_code, std::size_t);
* @endcode
*
* Where the second parameter is the size of the response that has
* just been read.
*/
template <
class Adapter = detail::response_traits<void>::adapter_type,
class CompletionToken = default_completion_token_type>
auto async_exec(
resp3::request const& req,
Adapter adapter = adapt(),
CompletionToken token = CompletionToken{})
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code, std::size_t)
>(detail::exec_op<connection, Adapter>{this, &req, adapter}, token, resv_);
}
/** @brief Connects and executes a single request.
*
* Combines \c async_run and the other \c async_exec overload in a
* single function. This function is useful for users that want to
* send a single request to the server.
*
* \param host Address of the Redis server.
* \param port Port of the Redis server.
* \param req Request object.
* \param adapter Response adapter.
* \param token Asio completion token.
*
* For an example see intro.cpp. The completion token must have the following signature
*
* @code
* void f(boost::system::error_code, std::size_t);
* @endcode
*
* Where the second parameter is the size of the response that has
* just been read.
*/
template <
class Adapter = detail::response_traits<void>::adapter_type,
class CompletionToken = default_completion_token_type>
auto async_exec(
boost::string_view host,
boost::string_view port,
resp3::request const& req,
Adapter adapter = adapt(),
CompletionToken token = CompletionToken{})
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code, std::size_t)
>(detail::runexec_op<connection, Adapter>
{this, host, port, &req, adapter}, token, resv_);
}
/** @brief Receives Redis unsolicited events like pushes.
*
* Users that expect unsolicited events should call this function
* in a loop. If an unsolicited events comes in and there is no
* reader, the connection will hang and eventually timeout.
*
* \param adapter The response adapter.
* \param token The Asio completion token.
*
* For an example see subscriber.cpp. The completion token must
* have the following signature
*
* @code
* void f(boost::system::error_code, std::size_t);
* @endcode
*
* Where the second parameter is the size of the response that has
* just been read.
*/
template <
class Adapter = detail::response_traits<void>::adapter_type,
class CompletionToken = default_completion_token_type>
auto async_read_push(
Adapter adapter = adapt(),
CompletionToken token = CompletionToken{})
{
auto f =
[adapter]
(resp3::node<boost::string_view> const& node, boost::system::error_code& ec) mutable
{
adapter(std::size_t(-1), node, ec);
};
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code, std::size_t)
>(detail::read_push_op<connection, decltype(f)>{this, f}, token, resv_);
}
/** @brief Cancel all pending sessions and push operations to return
*
* \returns The number of requests that have been canceled.
*/
std::size_t cancel_requests()
{
for (auto& e: reqs_) {
e->stop = true;
e->timer.cancel_one();
}
auto const ret = reqs_.size();
reqs_ = {};
return ret;
}
/** @brief Closes the connection with the database.
*
* Calling this function will cause \c async_run to return. It is
* safe to try a reconnect after that i.e. calling it again.
*
* Note however that the prefered way to close a connection is to
* send a \c quit command if you are actively closing it.
* Otherwise an unresponsive Redis server will cause the
* idle-checks to fail, which will also lead to \c async_run
* returning.
*
* @remark This function won't cancel pending requests, see
* \c cancel_requests.
*/
void cancel_run()
{
socket_->close();
read_timer_.cancel();
check_idle_timer_.cancel();
writer_timer_.cancel();
ping_timer_.cancel();
// TODO: How to avoid calling this here.
push_channel_.cancel();
// Cancel own pings if there is any waiting.
auto point = std::stable_partition(std::begin(reqs_), std::end(reqs_), [](auto const& ptr) {
return !ptr->req->is_internal();
});
std::for_each(point, std::end(reqs_), [](auto const& ptr) {
ptr->stop = true;
ptr->timer.cancel();
});
reqs_.erase(point, std::end(reqs_));
}
private:
struct req_info {
req_info(boost::asio::any_io_executor ex) : timer{ex} {}
boost::asio::steady_timer timer;
resp3::request const* req = nullptr;
std::size_t cmds = 0;
bool stop = false;
};
using time_point_type = std::chrono::time_point<std::chrono::steady_clock>;
using reqs_type = std::deque<std::shared_ptr<req_info>>;
template <class T, class U> friend struct detail::read_push_op;
template <class T> friend struct detail::reader_op;
template <class T> friend struct detail::writer_op;
template <class T> friend struct detail::ping_op;
template <class T> friend struct detail::run_op;
template <class T, class U> friend struct detail::exec_op;
template <class T, class U> friend struct detail::exec_read_op;
template <class T, class U> friend struct detail::runexec_op;
template <class T> friend struct detail::connect_with_timeout_op;
template <class T> friend struct detail::resolve_with_timeout_op;
template <class T> friend struct detail::check_idle_op;
template <class T> friend struct detail::start_op;
void cancel_push_requests(typename reqs_type::iterator end)
{
auto point = std::stable_partition(std::begin(reqs_), end, [](auto const& ptr) {
return ptr->req->commands() != 0;
});
std::for_each(point, end, [](auto const& ptr) {
ptr->timer.cancel();
});
reqs_.erase(point, end);
}
void add_request_info(std::shared_ptr<req_info> const& info)
{
reqs_.push_back(info);
if (socket_ != nullptr && socket_->is_open() && cmds_ == 0 && write_buffer_.empty())
writer_timer_.cancel();
}
auto make_dynamic_buffer()
{ return boost::asio::dynamic_buffer(read_buffer_, cfg_.max_read_size); }
template <class CompletionToken>
auto
async_resolve_with_timeout(
boost::string_view host,
boost::string_view port,
CompletionToken&& token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::resolve_with_timeout_op<connection>{this, host, port},
token, resv_);
}
template <class CompletionToken>
auto async_connect_with_timeout(CompletionToken&& token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::connect_with_timeout_op<connection>{this}, token, resv_);
}
// Loops on async_read_with_timeout described above.
template <class CompletionToken>
auto reader(CompletionToken&& token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::reader_op<connection>{this}, token, resv_.get_executor());
}
template <class CompletionToken>
auto writer(CompletionToken&& token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::writer_op<connection>{this}, token, resv_.get_executor());
}
template <class CompletionToken>
auto
async_start(CompletionToken&& token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::start_op<connection>{this}, token, resv_);
}
template <class CompletionToken>
auto async_ping(CompletionToken&& token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::ping_op<connection>{this}, token, resv_);
}
template <class CompletionToken>
auto async_check_idle(CompletionToken&& token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::check_idle_op<connection>{this}, token, check_idle_timer_);
}
template <class Adapter, class CompletionToken>
auto async_exec_read(Adapter adapter, CompletionToken token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code, std::size_t)
>(detail::exec_read_op<connection, Adapter>{this, adapter}, token, resv_);
}
void coalesce_requests()
{
// Coaleces all requests: Copies the request to the variables
// that won't be touched while async_write is suspended.
BOOST_ASSERT(write_buffer_.empty());
BOOST_ASSERT(!reqs_.empty());
auto const size = cfg_.coalesce_requests ? reqs_.size() : 1;
for (auto i = 0UL; i < size; ++i) {
write_buffer_ += reqs_.at(i)->req->payload();
cmds_ += reqs_.at(i)->req->commands();
}
}
using channel_type = boost::asio::experimental::channel<void(boost::system::error_code, std::size_t)>;
// IO objects
boost::asio::ip::tcp::resolver resv_;
std::shared_ptr<AsyncReadWriteStream> socket_;
boost::asio::steady_timer ping_timer_;
boost::asio::steady_timer check_idle_timer_;
boost::asio::steady_timer writer_timer_;
boost::asio::steady_timer read_timer_;
channel_type push_channel_;
config cfg_;
std::string read_buffer_;
std::string write_buffer_;
std::size_t cmds_ = 0;
reqs_type reqs_;
// Last time we received data.
time_point_type last_data_;
// The result of async_resolve.
boost::asio::ip::tcp::resolver::results_type endpoints_;
resp3::request req_;
};
} // aedis
#endif // AEDIS_CONNECTION_HPP

View File

@@ -1,161 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_RESP3_COMPOSE_HPP
#define AEDIS_RESP3_COMPOSE_HPP
#include <string>
#include <tuple>
#include <boost/hana.hpp>
#include <boost/utility/string_view.hpp>
#include <aedis/resp3/type.hpp>
namespace aedis {
namespace resp3 {
constexpr char separator[] = "\r\n";
/** @brief Adds a bulk to the request.
* @ingroup any
*
* This function is useful in serialization of your own data
* structures in a request. For example
*
* @code
* void to_bulk(std::string& to, mystruct const& obj)
* {
* auto const str = // Convert obj to a string.
* resp3::to_bulk(to, str);
* }
* @endcode
*
* See more in \ref requests-serialization.
*/
template <class Request>
void to_bulk(Request& to, boost::string_view data)
{
auto const str = std::to_string(data.size());
to += to_code(type::blob_string);
to.append(std::cbegin(str), std::cend(str));
to += separator;
to.append(std::cbegin(data), std::cend(data));
to += separator;
}
template <class Request, class T, typename = typename std::enable_if<std::is_integral<T>::value>::type>
void to_bulk(Request& to, T n)
{
auto const s = std::to_string(n);
to_bulk(to, boost::string_view{s});
}
namespace detail {
template <class T>
struct add_bulk_impl {
template <class Request>
static void add(Request& to, T const& from)
{
using namespace aedis::resp3;
to_bulk(to, from);
}
};
template <class U, class V>
struct add_bulk_impl<std::pair<U, V>> {
template <class Request>
static void add(Request& to, std::pair<U, V> const& from)
{
using namespace aedis::resp3;
to_bulk(to, from.first);
to_bulk(to, from.second);
}
};
template <class ...Ts>
struct add_bulk_impl<boost::hana::tuple<Ts...>> {
template <class Request>
static void add(Request& to, boost::hana::tuple<Ts...> const& from)
{
using boost::hana::for_each;
// Fold expressions is C++17 so we use hana.
//(resp3::add_bulk(*request_, args), ...);
for_each(from, [&](auto const& e) {
using namespace aedis::resp3;
to_bulk(to, e);
});
}
};
} // detail
/** \internal
* \brief Adds a resp3 header to the request.
* \ingroup any
*
* See mystruct.hpp for an example.
*/
template <class Request>
void add_header(Request& to, type t, std::size_t size)
{
auto const str = std::to_string(size);
to += to_code(t);
to.append(std::cbegin(str), std::cend(str));
to += separator;
}
/* Adds a rep3 bulk to the request.
*
* This function adds \c data as a bulk string to the request \c to.
*/
template <class Request, class T>
void add_bulk(Request& to, T const& data)
{
detail::add_bulk_impl<T>::add(to, data);
}
template <class>
struct bulk_counter;
template <class>
struct bulk_counter {
static constexpr auto size = 1U;
};
template <class T, class U>
struct bulk_counter<std::pair<T, U>> {
static constexpr auto size = 2U;
};
template <class Request>
void add_blob(Request& to, boost::string_view blob)
{
to.append(std::cbegin(blob), std::cend(blob));
to += separator;
}
/** \internal
* \brief Adds a separator to the request.
* \ingroup any
*
* See mystruct.hpp for an example.
*/
template <class Request>
void add_separator(Request& to)
{
to += separator;
}
} // resp3
} // aedis
#endif // AEDIS_RESP3_COMPOSE_HPP

87
benchmarks/benchmarks.md Normal file
View File

@@ -0,0 +1,87 @@
# TCP echo server performance
This document benchmarks the performance of TCP echo servers I
implemented in different languages using different Redis clients. The
main motivations for choosing an echo server are
* Simple to implement and does not require expertise level in most languages.
* I/O bound: Echo servers have very low CPU consumption in general
and therefore are excelent to measure how a program handles concurrent requests.
* It simulates very well a typical backend in regard to concurrency.
I also imposed some constraints on the implementations
* It should be simple enough and not require writing too much code.
* Favor the use standard idioms and avoid optimizations that require expert level.
* Avoid the use of complex things like connection and thread pool.
## No Redis
First I tested a pure TCP echo server, i.e. one that sends the messages
directly to the client without interacting with Redis. The result can
be seen below
![](https://mzimbres.github.io/aedis/tcp-echo-direct.png)
The tests were performed with a 1000 concurrent TCP connections on the
localhost where latency is 0.07ms on average on my machine. On higher
latency networks the difference among libraries is expected to
decrease.
### Remarks
* I expected Libuv to have similar performance to Asio and Tokio.
* I did expect nodejs to come a little behind given it is is
javascript code. Otherwise I did expect it to have similar
performance to libuv since it is the framework behind it.
* Go performance did not surprise me: decent and not some much far behind nodejs.
The code used in the benchmarks can be found at
* [Asio](https://github.com/mzimbres/aedis/blob/3fb018ccc6138d310ac8b73540391cdd8f2fdad6/benchmarks/cpp/asio/echo_server_direct.cpp): A variation of [this](https://github.com/chriskohlhoff/asio/blob/4915cfd8a1653c157a1480162ae5601318553eb8/asio/src/examples/cpp20/coroutines/echo_server.cpp) Asio example.
* [Libuv](https://github.com/mzimbres/aedis/tree/835a1decf477b09317f391eddd0727213cdbe12b/benchmarks/c/libuv): Taken from [here](https://github.com/libuv/libuv/blob/06948c6ee502862524f233af4e2c3e4ca876f5f6/docs/code/tcp-echo-server/main.c) Libuv example .
* [Tokio](https://github.com/mzimbres/aedis/tree/3fb018ccc6138d310ac8b73540391cdd8f2fdad6/benchmarks/rust/echo_server_direct): Taken from [here](https://docs.rs/tokio/latest/tokio/).
* [Nodejs](https://github.com/mzimbres/aedis/tree/3fb018ccc6138d310ac8b73540391cdd8f2fdad6/benchmarks/nodejs/echo_server_direct)
* [Go](https://github.com/mzimbres/aedis/blob/3fb018ccc6138d310ac8b73540391cdd8f2fdad6/benchmarks/go/echo_server_direct.go)
## Echo over Redis
This is similar to the echo server described above but messages are
echoed by Redis and not by the echo-server itself, which acts
as a proxy between the client and the Redis server. The results
can be seen below
![](https://mzimbres.github.io/aedis/tcp-echo-over-redis.png)
The tests were performed on a network where latency is 35ms on
average, otherwise it uses the same number of TCP connections
as the previous example.
### Remarks
As the reader can see, the Libuv and the Rust test are not depicted
in the graph, the reasons are
* [redis-rs](https://github.com/redis-rs/redis-rs): This client
comes so far behind that it can't even be represented together
with the other benchmarks without making them look insignificant.
I don't know for sure why it is so slow, I suppose it has
something to do with its lack of proper
[pipelining](https://redis.io/docs/manual/pipelining/) support.
In fact, the more TCP connections I lauch the worst its
performance gets.
* Libuv: I left it out because it would require too much work to
write it and make it have a good performance. More specifically,
I would have to use hiredis and implement support for pipelines
manually.
The code used in the benchmarks can be found at
* [Aedis](https://github.com/mzimbres/aedis): [code](https://github.com/mzimbres/aedis/blob/3fb018ccc6138d310ac8b73540391cdd8f2fdad6/examples/echo_server.cpp)
* [node-redis](https://github.com/redis/node-redis): [code](https://github.com/mzimbres/aedis/tree/3fb018ccc6138d310ac8b73540391cdd8f2fdad6/benchmarks/nodejs/echo_server_over_redis)
* [go-redis](https://github.com/go-redis/redis): [code](https://github.com/mzimbres/aedis/blob/3fb018ccc6138d310ac8b73540391cdd8f2fdad6/benchmarks/go/echo_server_over_redis.go)
## Running the benchmarks
Run one of the echo-server programs in one terminal and the [echo-server-client](https://github.com/mzimbres/aedis/blob/42880e788bec6020dd018194075a211ad9f339e8/benchmarks/cpp/asio/echo_server_client.cpp) in another.

73
benchmarks/benchmarks.tex Normal file
View File

@@ -0,0 +1,73 @@
\documentclass{article}
\usepackage{pgfplots}
\pgfrealjobname{echo}
\pgfplotsset{compat=newest}
\begin{document}
\beginpgfgraphicnamed{echo-f0}
% time ./echo_server_client 1000 5000
\begin{tikzpicture}[scale=1.0]
\begin{axis}[
y dir=reverse,
%xbar stacked,
xbar, xmin=0,
%hide x axis,
bar shift=0pt,
width=15cm, height=6cm, enlarge y limits=0.5,
title={TCP Echo Server Performance},
xlabel={Seconds},
symbolic y coords={Asio,Tokio,Libuv,Nodejs,Go},
ytick=data,
%bar width=1cm,
nodes near coords,
nodes near coords align={horizontal},
]
\addplot coordinates {
(31.1,Asio)
(30.7,Tokio)
(43.6,Libuv)
(74.2,Nodejs)
(81.0,Go)
};
\end{axis}
\end{tikzpicture}
\endpgfgraphicnamed
\beginpgfgraphicnamed{echo-f1}
%debian2[0]$ time ./echo_server_client 1000 1000
%Go (1): 1.000s
%C++ (1): 0.07s
\begin{tikzpicture}[scale=1.0]
\begin{axis}[
y dir=reverse,
%xbar stacked,
xbar, xmin=0,
%hide x axis,
bar shift=0pt,
width=12cm, height=6cm, enlarge y limits=0.5,
title={TCP Echo Server Performance (over Redis)},
xlabel={Seconds},
symbolic y coords={Aedis,Rust-rs,Libuv,Node-redis,Go-redis},
ytick=data,
%bar width=1cm,
nodes near coords,
nodes near coords align={horizontal},
]
\addplot coordinates {
(12.6,Aedis)
(28.8,Node-redis)
(352.4,Go-redis)
};
%\addplot coordinates {
% (30.0,Asio)
% (90.6,Rust-rs)
% (0.0,Libuv)
% (68.9,Nodejs)
% (0.0,Go)
%};
\end{axis}
\end{tikzpicture}
\endpgfgraphicnamed
\end{document}

View File

@@ -4,7 +4,7 @@
#include <uv.h>
#define DEFAULT_PORT 55555
#define DEFAULT_BACKLOG 128
#define DEFAULT_BACKLOG 1024
uv_loop_t *loop;
struct sockaddr_in addr;

View File

@@ -1,26 +1,29 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
//
// echo_server.cpp
// ~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2022 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#include <boost/asio/co_spawn.hpp>
#include <boost/asio/detached.hpp>
#include <boost/asio/io_context.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <boost/asio/signal_set.hpp>
#include <boost/asio/write.hpp>
#include <cstdio>
#include <boost/asio.hpp>
namespace net = boost::asio;
namespace this_coro = net::this_coro;
using net::ip::tcp;
using net::awaitable;
using net::co_spawn;
using net::detached;
using net::use_awaitable;
using executor_type = net::io_context::executor_type;
using socket_type = net::basic_stream_socket<net::ip::tcp, executor_type>;
using tcp_socket = net::use_awaitable_t<executor_type>::as_default_on_t<socket_type>;
using acceptor_type = net::basic_socket_acceptor<net::ip::tcp, executor_type>;
using tcp_acceptor = net::use_awaitable_t<executor_type>::as_default_on_t<acceptor_type>;
using awaitable_type = net::awaitable<void, executor_type>;
constexpr net::use_awaitable_t<executor_type> use_awaitable;
awaitable<void> echo(tcp::socket socket)
awaitable_type echo(tcp_socket socket)
{
try {
char data[1024];
@@ -33,20 +36,20 @@ awaitable<void> echo(tcp::socket socket)
}
}
awaitable<void> listener()
awaitable_type listener()
{
auto executor = co_await this_coro::executor;
tcp::acceptor acceptor(executor, {tcp::v4(), 55555});
auto ex = co_await this_coro::executor;
tcp_acceptor acceptor(ex, {tcp::v4(), 55555});
for (;;) {
tcp::socket socket = co_await acceptor.async_accept(use_awaitable);
co_spawn(executor, echo(std::move(socket)), detached);
tcp_socket socket = co_await acceptor.async_accept(use_awaitable);
co_spawn(ex, echo(std::move(socket)), detached);
}
}
int main()
{
try {
net::io_context io_context(1);
net::io_context io_context{BOOST_ASIO_CONCURRENCY_HINT_UNSAFE_IO};
co_spawn(io_context, listener(), detached);
io_context.run();
} catch (std::exception const& e) {

View File

@@ -0,0 +1,54 @@
package main
import (
"context"
"github.com/go-redis/redis/v8"
"bufio"
"fmt"
"io"
"net"
"os"
)
var ctx = context.Background()
var rdb = redis.NewClient(&redis.Options{Addr: "db.occase.de:6379", Password: "", DB: 0,})
func echo(conn net.Conn) {
r := bufio.NewReader(conn)
for {
line, err := r.ReadBytes(byte('\n'))
switch err {
case nil:
break
case io.EOF:
default:
fmt.Println("ERROR", err)
}
err2 := rdb.Ping(ctx).Err()
if err2 != nil {
fmt.Println("ERROR", err2)
panic(err2)
}
conn.Write(line)
}
}
func main() {
l, err := net.Listen("tcp", "0.0.0.0:55555")
if err != nil {
fmt.Println("ERROR", err)
os.Exit(1)
}
for {
conn, err := l.Accept()
if err != nil {
fmt.Println("ERROR", err)
continue
}
go echo(conn)
}
}

View File

@@ -0,0 +1,105 @@
import java.nio.*;
import java.nio.channels.*;
import java.net.*;
import java.util.*;
import java.io.IOException;
public class TcpEchoServer {
public static int DEFAULT_PORT = 55555;
public static void main(String[] args) {
int port;
try {
port = Integer.parseInt(args[0]);
}
catch (Exception ex) {
port = DEFAULT_PORT;
}
//System.out.println("Listening for connections on port " + port);
ServerSocketChannel serverChannel;
Selector selector;
try {
serverChannel = ServerSocketChannel.open( );
ServerSocket ss = serverChannel.socket( );
InetSocketAddress address = new InetSocketAddress(port);
ss.bind(address);
serverChannel.configureBlocking(false);
selector = Selector.open( );
serverChannel.register(selector, SelectionKey.OP_ACCEPT);
}
catch (IOException ex) {
ex.printStackTrace( );
return;
}
while (true) {
try {
selector.select( );
}
catch (IOException ex) {
ex.printStackTrace( );
break;
}
Set readyKeys = selector.selectedKeys( );
Iterator iterator = readyKeys.iterator( );
while (iterator.hasNext( )) {
SelectionKey key = (SelectionKey) iterator.next( );
iterator.remove( );
try {
if (key.isAcceptable( )) {
ServerSocketChannel server = (ServerSocketChannel ) key.channel( );
SocketChannel client = server.accept( );
//System.out.println("Accepted connection from " + client);
client.configureBlocking(false);
SelectionKey clientKey = client.register(
selector, SelectionKey.OP_WRITE | SelectionKey.OP_READ);
ByteBuffer buffer = ByteBuffer.allocate(100);
clientKey.attach(buffer);
//System.out.println(buffer.toString());
}
if (key.isReadable( )) {
SocketChannel client = (SocketChannel) key.channel( );
ByteBuffer output = (ByteBuffer) key.attachment( );
client.read(output);
}
if (key.isWritable( )) {
SocketChannel client = (SocketChannel) key.channel( );
ByteBuffer output = (ByteBuffer) key.attachment( );
output.flip( );
client.write(output);
output.compact( );
}
}
catch (IOException ex) {
key.cancel( );
try {
key.channel().close();
}
catch (IOException cex) {}
}
}
}
}
}

View File

@@ -0,0 +1,2 @@
{
}

View File

@@ -1,7 +1,7 @@
import { createClient } from 'redis';
import * as net from 'net';
const client = createClient();
const client = createClient({url: 'redis://db.occase.de:6379' });
client.on('error', (err) => console.log('Redis Client Error', err));
await client.connect();

View File

@@ -0,0 +1,169 @@
{
"name": "echo_server_over_redis",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"dependencies": {
"redis": "^4.2.0"
}
},
"node_modules/@redis/bloom": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/@redis/bloom/-/bloom-1.0.2.tgz",
"integrity": "sha512-EBw7Ag1hPgFzdznK2PBblc1kdlj5B5Cw3XwI9/oG7tSn85/HKy3X9xHy/8tm/eNXJYHLXHJL/pkwBpFMVVefkw==",
"peerDependencies": {
"@redis/client": "^1.0.0"
}
},
"node_modules/@redis/client": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/@redis/client/-/client-1.2.0.tgz",
"integrity": "sha512-a8Nlw5fv2EIAFJxTDSSDVUT7yfBGpZO96ybZXzQpgkyLg/dxtQ1uiwTc0EGfzg1mrPjZokeBSEGTbGXekqTNOg==",
"dependencies": {
"cluster-key-slot": "1.1.0",
"generic-pool": "3.8.2",
"yallist": "4.0.0"
},
"engines": {
"node": ">=14"
}
},
"node_modules/@redis/graph": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/@redis/graph/-/graph-1.0.1.tgz",
"integrity": "sha512-oDE4myMCJOCVKYMygEMWuriBgqlS5FqdWerikMoJxzmmTUErnTRRgmIDa2VcgytACZMFqpAOWDzops4DOlnkfQ==",
"peerDependencies": {
"@redis/client": "^1.0.0"
}
},
"node_modules/@redis/json": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/@redis/json/-/json-1.0.3.tgz",
"integrity": "sha512-4X0Qv0BzD9Zlb0edkUoau5c1bInWSICqXAGrpwEltkncUwcxJIGEcVryZhLgb0p/3PkKaLIWkjhHRtLe9yiA7Q==",
"peerDependencies": {
"@redis/client": "^1.0.0"
}
},
"node_modules/@redis/search": {
"version": "1.0.6",
"resolved": "https://registry.npmjs.org/@redis/search/-/search-1.0.6.tgz",
"integrity": "sha512-pP+ZQRis5P21SD6fjyCeLcQdps+LuTzp2wdUbzxEmNhleighDDTD5ck8+cYof+WLec4csZX7ks+BuoMw0RaZrA==",
"peerDependencies": {
"@redis/client": "^1.0.0"
}
},
"node_modules/@redis/time-series": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/@redis/time-series/-/time-series-1.0.3.tgz",
"integrity": "sha512-OFp0q4SGrTH0Mruf6oFsHGea58u8vS/iI5+NpYdicaM+7BgqBZH8FFvNZ8rYYLrUO/QRqMq72NpXmxLVNcdmjA==",
"peerDependencies": {
"@redis/client": "^1.0.0"
}
},
"node_modules/cluster-key-slot": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/cluster-key-slot/-/cluster-key-slot-1.1.0.tgz",
"integrity": "sha512-2Nii8p3RwAPiFwsnZvukotvow2rIHM+yQ6ZcBXGHdniadkYGZYiGmkHJIbZPIV9nfv7m/U1IPMVVcAhoWFeklw==",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/generic-pool": {
"version": "3.8.2",
"resolved": "https://registry.npmjs.org/generic-pool/-/generic-pool-3.8.2.tgz",
"integrity": "sha512-nGToKy6p3PAbYQ7p1UlWl6vSPwfwU6TMSWK7TTu+WUY4ZjyZQGniGGt2oNVvyNSpyZYSB43zMXVLcBm08MTMkg==",
"engines": {
"node": ">= 4"
}
},
"node_modules/redis": {
"version": "4.2.0",
"resolved": "https://registry.npmjs.org/redis/-/redis-4.2.0.tgz",
"integrity": "sha512-bCR0gKVhIXFg8zCQjXEANzgI01DDixtPZgIUZHBCmwqixnu+MK3Tb2yqGjh+HCLASQVVgApiwhNkv+FoedZOGQ==",
"dependencies": {
"@redis/bloom": "1.0.2",
"@redis/client": "1.2.0",
"@redis/graph": "1.0.1",
"@redis/json": "1.0.3",
"@redis/search": "1.0.6",
"@redis/time-series": "1.0.3"
}
},
"node_modules/yallist": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz",
"integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A=="
}
},
"dependencies": {
"@redis/bloom": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/@redis/bloom/-/bloom-1.0.2.tgz",
"integrity": "sha512-EBw7Ag1hPgFzdznK2PBblc1kdlj5B5Cw3XwI9/oG7tSn85/HKy3X9xHy/8tm/eNXJYHLXHJL/pkwBpFMVVefkw==",
"requires": {}
},
"@redis/client": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/@redis/client/-/client-1.2.0.tgz",
"integrity": "sha512-a8Nlw5fv2EIAFJxTDSSDVUT7yfBGpZO96ybZXzQpgkyLg/dxtQ1uiwTc0EGfzg1mrPjZokeBSEGTbGXekqTNOg==",
"requires": {
"cluster-key-slot": "1.1.0",
"generic-pool": "3.8.2",
"yallist": "4.0.0"
}
},
"@redis/graph": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/@redis/graph/-/graph-1.0.1.tgz",
"integrity": "sha512-oDE4myMCJOCVKYMygEMWuriBgqlS5FqdWerikMoJxzmmTUErnTRRgmIDa2VcgytACZMFqpAOWDzops4DOlnkfQ==",
"requires": {}
},
"@redis/json": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/@redis/json/-/json-1.0.3.tgz",
"integrity": "sha512-4X0Qv0BzD9Zlb0edkUoau5c1bInWSICqXAGrpwEltkncUwcxJIGEcVryZhLgb0p/3PkKaLIWkjhHRtLe9yiA7Q==",
"requires": {}
},
"@redis/search": {
"version": "1.0.6",
"resolved": "https://registry.npmjs.org/@redis/search/-/search-1.0.6.tgz",
"integrity": "sha512-pP+ZQRis5P21SD6fjyCeLcQdps+LuTzp2wdUbzxEmNhleighDDTD5ck8+cYof+WLec4csZX7ks+BuoMw0RaZrA==",
"requires": {}
},
"@redis/time-series": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/@redis/time-series/-/time-series-1.0.3.tgz",
"integrity": "sha512-OFp0q4SGrTH0Mruf6oFsHGea58u8vS/iI5+NpYdicaM+7BgqBZH8FFvNZ8rYYLrUO/QRqMq72NpXmxLVNcdmjA==",
"requires": {}
},
"cluster-key-slot": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/cluster-key-slot/-/cluster-key-slot-1.1.0.tgz",
"integrity": "sha512-2Nii8p3RwAPiFwsnZvukotvow2rIHM+yQ6ZcBXGHdniadkYGZYiGmkHJIbZPIV9nfv7m/U1IPMVVcAhoWFeklw=="
},
"generic-pool": {
"version": "3.8.2",
"resolved": "https://registry.npmjs.org/generic-pool/-/generic-pool-3.8.2.tgz",
"integrity": "sha512-nGToKy6p3PAbYQ7p1UlWl6vSPwfwU6TMSWK7TTu+WUY4ZjyZQGniGGt2oNVvyNSpyZYSB43zMXVLcBm08MTMkg=="
},
"redis": {
"version": "4.2.0",
"resolved": "https://registry.npmjs.org/redis/-/redis-4.2.0.tgz",
"integrity": "sha512-bCR0gKVhIXFg8zCQjXEANzgI01DDixtPZgIUZHBCmwqixnu+MK3Tb2yqGjh+HCLASQVVgApiwhNkv+FoedZOGQ==",
"requires": {
"@redis/bloom": "1.0.2",
"@redis/client": "1.2.0",
"@redis/graph": "1.0.1",
"@redis/json": "1.0.3",
"@redis/search": "1.0.6",
"@redis/time-series": "1.0.3"
}
},
"yallist": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz",
"integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A=="
}
}
}

View File

@@ -0,0 +1,6 @@
{
"type": "module",
"dependencies": {
"redis": "^4.2.0"
}
}

View File

@@ -1,169 +0,0 @@
{
"name": "aedis",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"dependencies": {
"redis": "^4.1.0"
}
},
"node_modules/@redis/bloom": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/@redis/bloom/-/bloom-1.0.2.tgz",
"integrity": "sha512-EBw7Ag1hPgFzdznK2PBblc1kdlj5B5Cw3XwI9/oG7tSn85/HKy3X9xHy/8tm/eNXJYHLXHJL/pkwBpFMVVefkw==",
"peerDependencies": {
"@redis/client": "^1.0.0"
}
},
"node_modules/@redis/client": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@redis/client/-/client-1.1.0.tgz",
"integrity": "sha512-xO9JDIgzsZYDl3EvFhl6LC52DP3q3GCMUer8zHgKV6qSYsq1zB+pZs9+T80VgcRogrlRYhi4ZlfX6A+bHiBAgA==",
"dependencies": {
"cluster-key-slot": "1.1.0",
"generic-pool": "3.8.2",
"yallist": "4.0.0"
},
"engines": {
"node": ">=14"
}
},
"node_modules/@redis/graph": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/@redis/graph/-/graph-1.0.1.tgz",
"integrity": "sha512-oDE4myMCJOCVKYMygEMWuriBgqlS5FqdWerikMoJxzmmTUErnTRRgmIDa2VcgytACZMFqpAOWDzops4DOlnkfQ==",
"peerDependencies": {
"@redis/client": "^1.0.0"
}
},
"node_modules/@redis/json": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/@redis/json/-/json-1.0.3.tgz",
"integrity": "sha512-4X0Qv0BzD9Zlb0edkUoau5c1bInWSICqXAGrpwEltkncUwcxJIGEcVryZhLgb0p/3PkKaLIWkjhHRtLe9yiA7Q==",
"peerDependencies": {
"@redis/client": "^1.0.0"
}
},
"node_modules/@redis/search": {
"version": "1.0.6",
"resolved": "https://registry.npmjs.org/@redis/search/-/search-1.0.6.tgz",
"integrity": "sha512-pP+ZQRis5P21SD6fjyCeLcQdps+LuTzp2wdUbzxEmNhleighDDTD5ck8+cYof+WLec4csZX7ks+BuoMw0RaZrA==",
"peerDependencies": {
"@redis/client": "^1.0.0"
}
},
"node_modules/@redis/time-series": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/@redis/time-series/-/time-series-1.0.3.tgz",
"integrity": "sha512-OFp0q4SGrTH0Mruf6oFsHGea58u8vS/iI5+NpYdicaM+7BgqBZH8FFvNZ8rYYLrUO/QRqMq72NpXmxLVNcdmjA==",
"peerDependencies": {
"@redis/client": "^1.0.0"
}
},
"node_modules/cluster-key-slot": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/cluster-key-slot/-/cluster-key-slot-1.1.0.tgz",
"integrity": "sha512-2Nii8p3RwAPiFwsnZvukotvow2rIHM+yQ6ZcBXGHdniadkYGZYiGmkHJIbZPIV9nfv7m/U1IPMVVcAhoWFeklw==",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/generic-pool": {
"version": "3.8.2",
"resolved": "https://registry.npmjs.org/generic-pool/-/generic-pool-3.8.2.tgz",
"integrity": "sha512-nGToKy6p3PAbYQ7p1UlWl6vSPwfwU6TMSWK7TTu+WUY4ZjyZQGniGGt2oNVvyNSpyZYSB43zMXVLcBm08MTMkg==",
"engines": {
"node": ">= 4"
}
},
"node_modules/redis": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/redis/-/redis-4.1.0.tgz",
"integrity": "sha512-5hvJ8wbzpCCiuN1ges6tx2SAh2XXCY0ayresBmu40/SGusWHFW86TAlIPpbimMX2DFHOX7RN34G2XlPA1Z43zg==",
"dependencies": {
"@redis/bloom": "1.0.2",
"@redis/client": "1.1.0",
"@redis/graph": "1.0.1",
"@redis/json": "1.0.3",
"@redis/search": "1.0.6",
"@redis/time-series": "1.0.3"
}
},
"node_modules/yallist": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz",
"integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A=="
}
},
"dependencies": {
"@redis/bloom": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/@redis/bloom/-/bloom-1.0.2.tgz",
"integrity": "sha512-EBw7Ag1hPgFzdznK2PBblc1kdlj5B5Cw3XwI9/oG7tSn85/HKy3X9xHy/8tm/eNXJYHLXHJL/pkwBpFMVVefkw==",
"requires": {}
},
"@redis/client": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@redis/client/-/client-1.1.0.tgz",
"integrity": "sha512-xO9JDIgzsZYDl3EvFhl6LC52DP3q3GCMUer8zHgKV6qSYsq1zB+pZs9+T80VgcRogrlRYhi4ZlfX6A+bHiBAgA==",
"requires": {
"cluster-key-slot": "1.1.0",
"generic-pool": "3.8.2",
"yallist": "4.0.0"
}
},
"@redis/graph": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/@redis/graph/-/graph-1.0.1.tgz",
"integrity": "sha512-oDE4myMCJOCVKYMygEMWuriBgqlS5FqdWerikMoJxzmmTUErnTRRgmIDa2VcgytACZMFqpAOWDzops4DOlnkfQ==",
"requires": {}
},
"@redis/json": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/@redis/json/-/json-1.0.3.tgz",
"integrity": "sha512-4X0Qv0BzD9Zlb0edkUoau5c1bInWSICqXAGrpwEltkncUwcxJIGEcVryZhLgb0p/3PkKaLIWkjhHRtLe9yiA7Q==",
"requires": {}
},
"@redis/search": {
"version": "1.0.6",
"resolved": "https://registry.npmjs.org/@redis/search/-/search-1.0.6.tgz",
"integrity": "sha512-pP+ZQRis5P21SD6fjyCeLcQdps+LuTzp2wdUbzxEmNhleighDDTD5ck8+cYof+WLec4csZX7ks+BuoMw0RaZrA==",
"requires": {}
},
"@redis/time-series": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/@redis/time-series/-/time-series-1.0.3.tgz",
"integrity": "sha512-OFp0q4SGrTH0Mruf6oFsHGea58u8vS/iI5+NpYdicaM+7BgqBZH8FFvNZ8rYYLrUO/QRqMq72NpXmxLVNcdmjA==",
"requires": {}
},
"cluster-key-slot": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/cluster-key-slot/-/cluster-key-slot-1.1.0.tgz",
"integrity": "sha512-2Nii8p3RwAPiFwsnZvukotvow2rIHM+yQ6ZcBXGHdniadkYGZYiGmkHJIbZPIV9nfv7m/U1IPMVVcAhoWFeklw=="
},
"generic-pool": {
"version": "3.8.2",
"resolved": "https://registry.npmjs.org/generic-pool/-/generic-pool-3.8.2.tgz",
"integrity": "sha512-nGToKy6p3PAbYQ7p1UlWl6vSPwfwU6TMSWK7TTu+WUY4ZjyZQGniGGt2oNVvyNSpyZYSB43zMXVLcBm08MTMkg=="
},
"redis": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/redis/-/redis-4.1.0.tgz",
"integrity": "sha512-5hvJ8wbzpCCiuN1ges6tx2SAh2XXCY0ayresBmu40/SGusWHFW86TAlIPpbimMX2DFHOX7RN34G2XlPA1Z43zg==",
"requires": {
"@redis/bloom": "1.0.2",
"@redis/client": "1.1.0",
"@redis/graph": "1.0.1",
"@redis/json": "1.0.3",
"@redis/search": "1.0.6",
"@redis/time-series": "1.0.3"
}
},
"yallist": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz",
"integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A=="
}
}
}

View File

@@ -1,4 +0,0 @@
{
"type": "module",
"dependencies": { "redis": "^4.1.0" }
}

View File

@@ -2,6 +2,12 @@
# It is not intended for manual editing.
version = 3
[[package]]
name = "autocfg"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
[[package]]
name = "bitflags"
version = "1.3.2"
@@ -10,15 +16,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
name = "bytes"
version = "0.5.6"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38"
[[package]]
name = "cfg-if"
version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8"
[[package]]
name = "cfg-if"
@@ -33,34 +33,6 @@ dependencies = [
"tokio",
]
[[package]]
name = "fnv"
version = "1.0.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
[[package]]
name = "fuchsia-zircon"
version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82"
dependencies = [
"bitflags",
"fuchsia-zircon-sys",
]
[[package]]
name = "fuchsia-zircon-sys"
version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7"
[[package]]
name = "futures-core"
version = "0.3.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0c09fd04b7e4073ac7156a9539b57a484a8ea920f79c7c675d05d289ab6110d3"
[[package]]
name = "hermit-abi"
version = "0.1.19"
@@ -70,44 +42,29 @@ dependencies = [
"libc",
]
[[package]]
name = "iovec"
version = "0.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b2b3ea6ff95e175473f8ffe6a7eb7c00d054240321b84c57051175fe3c1e075e"
dependencies = [
"libc",
]
[[package]]
name = "kernel32-sys"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d"
dependencies = [
"winapi 0.2.8",
"winapi-build",
]
[[package]]
name = "lazy_static"
version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
[[package]]
name = "libc"
version = "0.2.126"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "349d5a591cd28b49e1d1037471617a32ddcda5731b99419008085f72d5a53836"
[[package]]
name = "lock_api"
version = "0.4.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "327fa5b6a6940e4699ec49a9beae1ea4845c6bab9314e4f84ac68742139d8c53"
dependencies = [
"autocfg",
"scopeguard",
]
[[package]]
name = "log"
version = "0.4.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e"
dependencies = [
"cfg-if 1.0.0",
"cfg-if",
]
[[package]]
@@ -118,76 +75,14 @@ checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d"
[[package]]
name = "mio"
version = "0.6.23"
version = "0.8.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4afd66f5b91bf2a3bc13fad0e21caedac168ca4c707504e75585648ae80e4cc4"
checksum = "57ee1c23c7c63b0c9250c339ffdc69255f110b298b901b9f6c82547b7b87caaf"
dependencies = [
"cfg-if 0.1.10",
"fuchsia-zircon",
"fuchsia-zircon-sys",
"iovec",
"kernel32-sys",
"libc",
"log",
"miow 0.2.2",
"net2",
"slab",
"winapi 0.2.8",
]
[[package]]
name = "mio-named-pipes"
version = "0.1.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0840c1c50fd55e521b247f949c241c9997709f23bd7f023b9762cd561e935656"
dependencies = [
"log",
"mio",
"miow 0.3.7",
"winapi 0.3.9",
]
[[package]]
name = "mio-uds"
version = "0.6.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "afcb699eb26d4332647cc848492bbc15eafb26f08d0304550d5aa1f612e066f0"
dependencies = [
"iovec",
"libc",
"mio",
]
[[package]]
name = "miow"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ebd808424166322d4a38da87083bfddd3ac4c131334ed55856112eb06d46944d"
dependencies = [
"kernel32-sys",
"net2",
"winapi 0.2.8",
"ws2_32-sys",
]
[[package]]
name = "miow"
version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b9f1c5b025cda876f66ef43a113f91ebc9f4ccef34843000e0adf6ebbab84e21"
dependencies = [
"winapi 0.3.9",
]
[[package]]
name = "net2"
version = "0.2.37"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "391630d12b68002ae1e25e8f974306474966550ad82dac6886fb8910c19568ae"
dependencies = [
"cfg-if 0.1.10",
"libc",
"winapi 0.3.9",
"wasi",
"windows-sys",
]
[[package]]
@@ -201,10 +96,39 @@ dependencies = [
]
[[package]]
name = "pin-project-lite"
version = "0.1.12"
name = "once_cell"
version = "1.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "257b64915a082f7811703966789728173279bdebb956b143dbcd23f6f970a777"
checksum = "18a6dbe30758c9f83eb00cbea4ac95966305f5a7772f3f42ebfc7fc7eddbd8e1"
[[package]]
name = "parking_lot"
version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f"
dependencies = [
"lock_api",
"parking_lot_core",
]
[[package]]
name = "parking_lot_core"
version = "0.9.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09a279cbf25cb0757810394fbc1e359949b59e348145c643a939a525692e6929"
dependencies = [
"cfg-if",
"libc",
"redox_syscall",
"smallvec",
"windows-sys",
]
[[package]]
name = "pin-project-lite"
version = "0.2.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116"
[[package]]
name = "proc-macro2"
@@ -224,6 +148,21 @@ dependencies = [
"proc-macro2",
]
[[package]]
name = "redox_syscall"
version = "0.2.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "62f25bc4c7e55e0b0b7a1d43fb893f4fa1361d0abe38b9ce4f323c2adfe6ef42"
dependencies = [
"bitflags",
]
[[package]]
name = "scopeguard"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
[[package]]
name = "signal-hook-registry"
version = "1.4.0"
@@ -234,10 +173,20 @@ dependencies = [
]
[[package]]
name = "slab"
version = "0.4.6"
name = "smallvec"
version = "1.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "eb703cfe953bccee95685111adeedb76fabe4e97549a58d16f03ea7b9367bb32"
checksum = "2fd0db749597d91ff862fd1d55ea87f7855a744a8425a64695b6fca237d1dad1"
[[package]]
name = "socket2"
version = "0.4.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "66d72b759436ae32898a2af0a14218dbf55efde3feeb170eb623637db85ee1e0"
dependencies = [
"libc",
"winapi",
]
[[package]]
name = "syn"
@@ -252,33 +201,30 @@ dependencies = [
[[package]]
name = "tokio"
version = "0.2.25"
version = "1.20.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6703a273949a90131b290be1fe7b039d0fc884aa1935860dfcbe056f28cd8092"
checksum = "57aec3cfa4c296db7255446efb4928a6be304b431a806216105542a67b6ca82e"
dependencies = [
"autocfg",
"bytes",
"fnv",
"futures-core",
"iovec",
"lazy_static",
"libc",
"memchr",
"mio",
"mio-named-pipes",
"mio-uds",
"num_cpus",
"once_cell",
"parking_lot",
"pin-project-lite",
"signal-hook-registry",
"slab",
"socket2",
"tokio-macros",
"winapi 0.3.9",
"winapi",
]
[[package]]
name = "tokio-macros"
version = "0.2.6"
version = "1.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e44da00bfc73a25f814cd8d7e57a68a5c31b74b3152a0a1d1f590c97ed06265a"
checksum = "9724f9a975fb987ef7a3cd9be0350edcbe130698af5b8f7a631e23d42d052484"
dependencies = [
"proc-macro2",
"quote",
@@ -292,10 +238,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5bd2fe26506023ed7b5e1e315add59d6f584c621d037f9368fea9cfb988f368c"
[[package]]
name = "winapi"
version = "0.2.8"
name = "wasi"
version = "0.11.0+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a"
checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
[[package]]
name = "winapi"
@@ -307,12 +253,6 @@ dependencies = [
"winapi-x86_64-pc-windows-gnu",
]
[[package]]
name = "winapi-build"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc"
[[package]]
name = "winapi-i686-pc-windows-gnu"
version = "0.4.0"
@@ -326,11 +266,44 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
[[package]]
name = "ws2_32-sys"
version = "0.2.1"
name = "windows-sys"
version = "0.36.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e"
checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2"
dependencies = [
"winapi 0.2.8",
"winapi-build",
"windows_aarch64_msvc",
"windows_i686_gnu",
"windows_i686_msvc",
"windows_x86_64_gnu",
"windows_x86_64_msvc",
]
[[package]]
name = "windows_aarch64_msvc"
version = "0.36.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47"
[[package]]
name = "windows_i686_gnu"
version = "0.36.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6"
[[package]]
name = "windows_i686_msvc"
version = "0.36.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024"
[[package]]
name = "windows_x86_64_gnu"
version = "0.36.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1"
[[package]]
name = "windows_x86_64_msvc"
version = "0.36.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680"

View File

@@ -6,4 +6,4 @@ edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
tokio = { version = "0.2", features = ["full"] }
tokio = { version = "1.0", features = ["full"] }

View File

@@ -1,9 +1,10 @@
use tokio::net::TcpListener;
use tokio::prelude::*;
use tokio::io::AsyncReadExt;
use tokio::io::AsyncWriteExt;
#[tokio::main]
#[tokio::main(flavor = "current_thread")]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let mut listener = TcpListener::bind("127.0.0.1:55555").await?;
let listener = TcpListener::bind("127.0.0.1:55555").await?;
loop {
let (mut socket, _) = listener.accept().await?;

View File

@@ -7,7 +7,7 @@ use std::sync::{Arc};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let listener = TcpListener::bind("127.0.0.1:55555").await?;
let client = redis::Client::open("redis://127.0.0.1/").unwrap();
let client = redis::Client::open("redis://db.occase.de/").unwrap();
let con = Arc::new(Mutex::new(client.get_async_connection().await?));
loop {

View File

@@ -1,9 +1,10 @@
AC_PREREQ([2.69])
AC_INIT([Aedis], [0.1.2], [mzimbres@gmail.com])
AC_INIT([Aedis], [1.0.0], [mzimbres@gmail.com])
AC_CONFIG_MACRO_DIR([m4])
#AC_CONFIG_SRCDIR([src/aedis.cpp])
AC_CONFIG_HEADERS([config.h])
AC_CONFIG_SRCDIR(include/aedis.hpp)
AM_INIT_AUTOMAKE([-Wall foreign])
AC_LANG(C++)
# Checks for programs.
AC_PROG_CXX
@@ -18,10 +19,32 @@ AC_CHECK_HEADER_STDBOOL
AC_TYPE_UINT64_T
AC_CHECK_TYPES([ptrdiff_t])
AX_CXX_COMPILE_STDCXX(14, , mandatory)
AX_CXX_COMPILE_STDCXX(20, , optional)
# This check has been stolen from Asio
AC_MSG_CHECKING([whether coroutines are enabled])
AC_COMPILE_IFELSE(
[AC_LANG_PROGRAM(
[[#if defined(__clang__)]]
[[# if (__cplusplus >= 201703) && (__cpp_coroutines >= 201703)]]
[[# if __has_include(<experimental/coroutine>)]]
[[# define AEDIS_HAS_CO_AWAIT 1]]
[[# endif]]
[[# endif]]
[[#elif defined(__GNUC__)]]
[[# if (__cplusplus >= 201709) && (__cpp_impl_coroutine >= 201902)]]
[[# if __has_include(<coroutine>)]]
[[# define AEDIS_HAS_CO_AWAIT 1]]
[[# endif]]
[[# endif]]
[[#endif]]
[[#ifndef AEDIS_HAS_CO_AWAIT]]
[[# error coroutines not available]]
[[#endif]])],
[AC_MSG_RESULT([yes])
HAVE_COROUTINES=yes;],
[AC_MSG_RESULT([no])
HAVE_COROUTINES=no;])
AM_CONDITIONAL(HAVE_CXX20,[test x$HAVE_CXX20 == x1])
AM_CONDITIONAL(HAVE_COROUTINES,test x$HAVE_COROUTINES = xyes)
AC_CONFIG_FILES([Makefile doc/Doxyfile])
AC_CONFIG_FILES([Makefile include/Makefile doc/Doxyfile])
AC_OUTPUT

View File

@@ -44,7 +44,7 @@ PROJECT_NUMBER = "@PACKAGE_VERSION@"
# for a project that appears at the top of each page and should give viewer a
# quick idea about the purpose of the project. Keep the description short.
PROJECT_BRIEF = "High level Redis client library"
PROJECT_BRIEF = "High level Redis client"
# With the PROJECT_LOGO tag one can specify a logo or an icon that is included
# in the documentation. The maximum height of the logo should not exceed 55
@@ -823,7 +823,7 @@ WARN_LOGFILE =
# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING
# Note: If this tag is empty the current directory is searched.
INPUT = aedis examples
INPUT = include benchmarks/benchmarks.md CHANGELOG.md examples
# This tag can be used to specify the character encoding of the source files
# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses

View File

@@ -26,5 +26,5 @@ div.contents {
code
{
background-color:#f0e9ce;
background-color:#fffbeb;
}

View File

@@ -4,156 +4,93 @@
* accompanying file LICENSE.txt)
*/
#include <queue>
#include <vector>
#include <string>
#include <iostream>
#include <boost/asio.hpp>
#include <aedis/aedis.hpp>
#include <aedis.hpp>
#include "unistd.h"
#include "print.hpp"
// Include this in no more than one .cpp file.
#include <aedis/src.hpp>
#if defined(BOOST_ASIO_HAS_POSIX_STREAM_DESCRIPTOR)
namespace net = boost::asio;
using aedis::adapt;
using aedis::resp3::request;
using aedis::resp3::node;
using tcp_socket = net::use_awaitable_t<>::as_default_on_t<net::ip::tcp::socket>;
using tcp_acceptor = net::use_awaitable_t<>::as_default_on_t<net::ip::tcp::acceptor>;
using stream_descriptor = net::use_awaitable_t<>::as_default_on_t<net::posix::stream_descriptor>;
using connection = aedis::connection<tcp_socket>;
using response_type = std::vector<aedis::resp3::node<std::string>>;
class user_session:
public std::enable_shared_from_this<user_session> {
public:
user_session(tcp_socket socket)
: socket_(std::move(socket))
, timer_(socket_.get_executor())
{ timer_.expires_at(std::chrono::steady_clock::time_point::max()); }
// Chat over redis pubsub. To test, run this program from different
// terminals and type messages to stdin. Use
//
// $ redis-cli monitor
//
// to monitor the message traffic.
void start(std::shared_ptr<connection> db)
{
co_spawn(socket_.get_executor(),
[self = shared_from_this(), db]{ return self->reader(db); },
net::detached);
co_spawn(socket_.get_executor(),
[self = shared_from_this()]{ return self->writer(); },
net::detached);
}
void deliver(std::string const& msg)
{
write_msgs_.push_back(msg);
timer_.cancel_one();
}
private:
net::awaitable<void> reader(std::shared_ptr<connection> db)
{
try {
std::string msg;
request req;
auto dbuffer = net::dynamic_buffer(msg, 1024);
for (;;) {
auto const n = co_await net::async_read_until(socket_, dbuffer, "\n");
req.push("PUBLISH", "channel", msg);
co_await db->async_exec(req);
req.clear();
msg.erase(0, n);
}
} catch (std::exception&) {
stop();
}
}
net::awaitable<void> writer()
{
try {
while (socket_.is_open()) {
if (write_msgs_.empty()) {
boost::system::error_code ec;
co_await timer_.async_wait(net::redirect_error(net::use_awaitable, ec));
} else {
co_await net::async_write(socket_, net::buffer(write_msgs_.front()));
write_msgs_.pop_front();
}
}
} catch (std::exception&) {
stop();
}
}
void stop()
{
socket_.close();
timer_.cancel();
}
tcp_socket socket_;
net::steady_timer timer_;
std::deque<std::string> write_msgs_;
};
using sessions_type = std::vector<std::shared_ptr<user_session>>;
net::awaitable<void>
reader(
std::shared_ptr<connection> db,
std::shared_ptr<sessions_type> sessions)
// Receives messages from other users.
net::awaitable<void> push_receiver(std::shared_ptr<connection> db)
{
request req;
req.push("SUBSCRIBE", "channel");
co_await db->async_exec(req);
for (response_type resp;;) {
co_await db->async_read_push(adapt(resp));
for (auto& session: *sessions)
session->deliver(resp.at(3).value);
for (std::vector<node<std::string>> resp;;) {
co_await db->async_receive_push(adapt(resp));
print_push(resp);
resp.clear();
}
}
net::awaitable<void>
listener(
std::shared_ptr<tcp_acceptor> acc,
std::shared_ptr<connection> db,
std::shared_ptr<sessions_type> sessions)
// Subscribes to the channels when a new connection is stablished.
net::awaitable<void> event_receiver(std::shared_ptr<connection> db)
{
request req;
req.push("HELLO", 3);
co_await db->async_exec(req);
req.push("SUBSCRIBE", "chat-channel");
for (;;) {
auto socket = co_await acc->async_accept();
auto session = std::make_shared<user_session>(std::move(socket));
sessions->push_back(session);
session->start(db);
auto ev = co_await db->async_receive_event();
if (ev == connection::event::hello)
co_await db->async_exec(req);
}
}
auto handler =[](auto ec, auto...)
{ std::cout << ec.message() << std::endl; };
// Publishes messages to other users.
net::awaitable<void> publisher(stream_descriptor& in, std::shared_ptr<connection> db)
{
for (std::string msg;;) {
auto n = co_await net::async_read_until(in, net::dynamic_buffer(msg, 1024), "\n");
request req;
req.push("PUBLISH", "chat-channel", msg);
co_await db->async_exec(req);
msg.erase(0, n);
}
}
int main()
{
try {
net::io_context ioc{1};
stream_descriptor in{ioc, ::dup(STDIN_FILENO)};
// Redis client and receiver.
auto db = std::make_shared<connection>(ioc);
db->async_run("127.0.0.1", "6379", handler);
db->get_config().enable_events = true;
db->get_config().enable_reconnect = true;
auto sessions = std::make_shared<sessions_type>();
net::co_spawn(ioc, reader(db, sessions), net::detached);
co_spawn(ioc, publisher(in, db), net::detached);
co_spawn(ioc, push_receiver(db), net::detached);
co_spawn(ioc, event_receiver(db), net::detached);
db->async_run(net::detached);
// TCP acceptor.
auto endpoint = net::ip::tcp::endpoint{net::ip::tcp::v4(), 55555};
auto acc = std::make_shared<tcp_acceptor>(ioc, endpoint);
co_spawn(ioc, listener(acc, db, sessions), net::detached);
net::signal_set signals(ioc, SIGINT, SIGTERM);
signals.async_wait([&](auto, auto){ ioc.stop(); });
ioc.run();
} catch (std::exception const& e) {
std::cerr << e.what() << std::endl;
}
}
#else // defined(BOOST_ASIO_HAS_POSIX_STREAM_DESCRIPTOR)
int main() {}
#endif // defined(BOOST_ASIO_HAS_POSIX_STREAM_DESCRIPTOR)

View File

@@ -7,20 +7,18 @@
#include <map>
#include <vector>
#include <iostream>
#include <aedis/aedis.hpp>
#include <aedis/src.hpp>
#include <aedis.hpp>
#include "print.hpp"
// Include this in no more than one .cpp file.
#include <aedis/src.hpp>
namespace net = boost::asio;
using boost::optional;
using aedis::adapt;
using aedis::resp3::request;
using connection = aedis::connection<>;
// $ redis-cli
// > ACL SETUSER mzimbres on >Jabuticaba ~* +@all
// OK
int main()
{
std::vector<int> vec
@@ -30,19 +28,16 @@ int main()
{{"key1", 10}, {"key2", 20}, {"key3", 30}};
request req;
req.push("AUTH", "mzimbres", "Jabuticaba");
req.push("HELLO", 3);
req.push_range("RPUSH", "rpush-key", vec);
req.push_range("HSET", "hset-key", map);
req.push_range("RPUSH", "rpush-key", vec); // Sends
req.push_range("HSET", "hset-key", map); // Sends
req.push("MULTI");
req.push("LRANGE", "rpush-key", 0, -1);
req.push("HGETALL", "hset-key");
req.push("LRANGE", "rpush-key", 0, -1); // Retrieves
req.push("HGETALL", "hset-key"); // Retrieves
req.push("EXEC");
req.push("QUIT");
std::tuple<
aedis::ignore, // auth
aedis::ignore, // hello
aedis::ignore, // rpush
aedis::ignore, // hset
aedis::ignore, // multi
@@ -54,10 +49,11 @@ int main()
net::io_context ioc;
connection db{ioc};
db.async_exec("127.0.0.1", "6379", req, aedis::adapt(resp),
[](auto ec, auto) { std::cout << ec.message() << std::endl; });
db.async_run(req, adapt(resp), [](auto ec, auto) {
std::cout << ec.message() << std::endl;
});
ioc.run();
print(std::get<0>(std::get<7>(resp)).value());
print(std::get<1>(std::get<7>(resp)).value());
print(std::get<0>(std::get<5>(resp)).value());
print(std::get<1>(std::get<5>(resp)).value());
}

View File

@@ -7,46 +7,43 @@
#include <string>
#include <iostream>
#include <boost/asio.hpp>
#include <aedis/aedis.hpp>
#include <aedis.hpp>
// Include this in no more than one .cpp file.
#include <aedis/src.hpp>
namespace net = boost::asio;
using aedis::adapt;
using aedis::resp3::request;
using tcp_socket = net::use_awaitable_t<>::as_default_on_t<net::ip::tcp::socket>;
using tcp_acceptor = net::use_awaitable_t<>::as_default_on_t<net::ip::tcp::acceptor>;
using executor_type = net::io_context::executor_type;
using socket_type = net::basic_stream_socket<net::ip::tcp, executor_type>;
using tcp_socket = net::use_awaitable_t<executor_type>::as_default_on_t<socket_type>;
using acceptor_type = net::basic_socket_acceptor<net::ip::tcp, executor_type>;
using tcp_acceptor = net::use_awaitable_t<executor_type>::as_default_on_t<acceptor_type>;
using awaitable_type = net::awaitable<void, executor_type>;
using connection = aedis::connection<tcp_socket>;
net::awaitable<void> echo_loop(tcp_socket socket, std::shared_ptr<connection> db)
awaitable_type echo_loop(tcp_socket socket, std::shared_ptr<connection> db)
{
try {
request req;
std::tuple<std::string> resp;
std::string buffer;
request req;
std::tuple<std::string> resp;
for (;;) {
auto n = co_await net::async_read_until(socket, net::dynamic_buffer(buffer, 1024), "\n");
req.push("PING", buffer);
co_await db->async_exec(req, adapt(resp));
co_await net::async_write(socket, net::buffer(std::get<0>(resp)));
std::get<0>(resp).clear();
req.clear();
buffer.erase(0, n);
}
} catch (std::exception const& e) {
std::cout << e.what() << std::endl;
for (std::string buffer;;) {
auto n = co_await net::async_read_until(socket, net::dynamic_buffer(buffer, 1024), "\n");
req.push("PING", buffer);
co_await db->async_exec(req, adapt(resp));
co_await net::async_write(socket, net::buffer(std::get<0>(resp)));
std::get<0>(resp).clear();
req.clear();
buffer.erase(0, n);
}
}
net::awaitable<void> listener()
awaitable_type listener()
{
auto ex = co_await net::this_coro::executor;
auto db = std::make_shared<connection>(ex);
db->async_run("127.0.0.1", "6379", net::detached);
request req;
req.push("HELLO", 3);
co_await db->async_exec(req);
db->async_run(net::detached);
tcp_acceptor acc(ex, {net::ip::tcp::v4(), 55555});
for (;;)
@@ -56,7 +53,7 @@ net::awaitable<void> listener()
int main()
{
try {
net::io_context ioc;
net::io_context ioc{BOOST_ASIO_CONCURRENCY_HINT_UNSAFE_IO};
co_spawn(ioc, listener(), net::detached);
ioc.run();
} catch (std::exception const& e) {

View File

@@ -7,7 +7,9 @@
#include <tuple>
#include <string>
#include <boost/asio.hpp>
#include <aedis/aedis.hpp>
#include <aedis.hpp>
// Include this in no more than one .cpp file.
#include <aedis/src.hpp>
namespace net = boost::asio;
@@ -18,20 +20,19 @@ using connection = aedis::connection<>;
int main()
{
net::io_context ioc;
connection db{ioc};
request req;
req.push("HELLO", 3);
req.push("PING");
req.push("QUIT");
std::tuple<aedis::ignore, std::string, aedis::ignore> resp;
net::io_context ioc;
connection db{ioc};
db.async_exec("127.0.0.1", "6379", req, adapt(resp),
[](auto ec, auto) { std::cout << ec.message() << std::endl; });
std::tuple<std::string, aedis::ignore> resp;
db.async_run(req, adapt(resp), [](auto ec, auto) {
std::cout << ec.message() << std::endl;
});
ioc.run();
std::cout << std::get<1>(resp) << std::endl;
std::cout << std::get<0>(resp) << std::endl;
}

46
examples/intro_sync.cpp Normal file
View File

@@ -0,0 +1,46 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <tuple>
#include <string>
#include <thread>
#include <boost/asio/io_context.hpp>
#include <aedis.hpp>
// Include this in no more than one .cpp file.
#include <aedis/src.hpp>
namespace net = boost::asio;
using aedis::adapt;
using aedis::resp3::request;
using connection = aedis::sync<aedis::connection<>>;
int main()
{
try {
net::io_context ioc{1};
auto work = net::make_work_guard(ioc);
std::thread t1{[&]() { ioc.run(); }};
connection conn{work.get_executor()};
std::thread t2{[&]() { boost::system::error_code ec; conn.run(ec); }};
request req;
req.push("PING");
req.push("QUIT");
std::tuple<std::string, aedis::ignore> resp;
conn.exec(req, adapt(resp));
std::cout << "Response: " << std::get<0>(resp) << std::endl;
work.reset();
t1.join();
t2.join();
} catch (std::exception const& e) {
std::cerr << e.what() << std::endl;
}
}

View File

@@ -17,8 +17,6 @@
namespace net = boost::asio;
using aedis::resp3::node;
using aedis::adapter::adapt;
using aedis::adapter::adapter_t;
void print_aggr(std::vector<aedis::resp3::node<std::string>>& v)
{
@@ -54,3 +52,11 @@ void print(std::string const& e)
std::cout << e << std::endl;
}
void print_push(std::vector<aedis::resp3::node<std::string>>& resp)
{
std::cout
<< "Push type: " << resp.at(1).value << "\n"
<< "Channel: " << resp.at(2).value << "\n"
<< "Message: " << resp.at(3).value << "\n"
<< std::endl;
}

View File

@@ -12,12 +12,15 @@
#include <string>
#include <boost/json.hpp>
#include <boost/json/src.hpp>
#include <aedis/aedis.hpp>
#include <aedis/src.hpp>
#include <aedis.hpp>
#include "print.hpp"
// Include this in no more than one .cpp file.
#include <aedis/src.hpp>
namespace net = boost::asio;
using aedis::resp3::request;
using aedis::adapt;
using connection = aedis::connection<>;
using namespace boost::json;
@@ -59,7 +62,7 @@ void to_bulk(std::string& to, user const& u)
}
// Deserializes
void from_bulk(user& u, boost::string_view sv, boost::system::error_code& ec)
void from_bulk(user& u, boost::string_view sv, boost::system::error_code&)
{
value jv = parse(sv);
u = value_to<user>(jv);
@@ -84,22 +87,20 @@ int main()
net::io_context ioc;
connection db{ioc};
// Request that sends the containers.
std::set<user> users
{ {"Joao", "56", "Brazil"}
, {"Serge", "60", "France"}
};
{{"Joao", "58", "Brazil"} , {"Serge", "60", "France"}};
request req;
req.push("HELLO", 3);
req.push_range("SADD", "sadd-key", users);
req.push("SMEMBERS", "sadd-key");
req.push_range("SADD", "sadd-key", users); // Sends
req.push("SMEMBERS", "sadd-key"); // Retrieves
req.push("QUIT");
std::tuple<aedis::ignore, int, std::set<user>, std::string> resp;
db.async_exec("127.0.0.1", "6379", req, aedis::adapt(resp),
[](auto ec, auto) { std::cout << ec.message() << std::endl; });
db.async_run(req, adapt(resp), [](auto ec, auto) {
std::cout << ec.message() << std::endl;
});
ioc.run();

View File

@@ -7,8 +7,12 @@
#include <string>
#include <vector>
#include <iostream>
#include <tuple>
#include <boost/asio.hpp>
#include <aedis/aedis.hpp>
#include <aedis.hpp>
#include "print.hpp"
// Include this in no more than one .cpp file.
#include <aedis/src.hpp>
namespace net = boost::asio;
@@ -18,46 +22,63 @@ using aedis::resp3::node;
using tcp_socket = net::use_awaitable_t<>::as_default_on_t<net::ip::tcp::socket>;
using connection = aedis::connection<tcp_socket>;
/* In this example we send a subscription to a channel and start
* reading server side messages indefinitely.
/* This example will subscribe and read pushes indefinitely.
*
* After starting the example you can test it by sending messages with
* redis-cli like this
* To test send messages with redis-cli
*
* $ redis-cli -3
* 127.0.0.1:6379> PUBLISH channel1 some-message
* 127.0.0.1:6379> PUBLISH channel some-message
* (integer) 3
* 127.0.0.1:6379>
*
* The messages will then appear on the terminal you are running the
* example.
* To test reconnection try, for example, to close all clients currently
* connected to the Redis instance
*
* $ redis-cli
* > CLIENT kill TYPE pubsub
*/
net::awaitable<void> reader(std::shared_ptr<connection> db)
// Receives pushes.
net::awaitable<void> push_receiver(std::shared_ptr<connection> db)
{
for (std::vector<node<std::string>> resp;;) {
auto n = co_await db->async_read_push(adapt(resp));
std::cout
<< "Size: " << n << "\n"
<< "Event: " << resp.at(1).value << "\n"
<< "Channel: " << resp.at(2).value << "\n"
<< "Message: " << resp.at(3).value << "\n"
<< std::endl;
co_await db->async_receive_push(adapt(resp));
print_push(resp);
resp.clear();
}
}
auto handler = [](auto ec, auto...)
{ std::cout << ec.message() << std::endl; };
// Receives events.
net::awaitable<void> event_receiver(std::shared_ptr<connection> db)
{
request req;
req.push("SUBSCRIBE", "channel");
for (;;) {
auto ev = co_await db->async_receive_event();
if (ev == connection::event::hello)
co_await db->async_exec(req);
}
}
int main()
{
net::io_context ioc;
auto db = std::make_shared<connection>(ioc);
request req;
req.push("HELLO", 3);
req.push("SUBSCRIBE", "channel");
db->async_exec("127.0.0.1", "6379", req, adapt(), handler);
net::co_spawn(ioc, reader(db), net::detached);
ioc.run();
try {
net::io_context ioc;
auto db = std::make_shared<connection>(ioc);
db->get_config().enable_events = true;
db->get_config().enable_reconnect = true;
net::co_spawn(ioc, push_receiver(db), net::detached);
net::co_spawn(ioc, event_receiver(db), net::detached);
db->async_run(net::detached);
net::signal_set signals(ioc, SIGINT, SIGTERM);
signals.async_wait([&](auto, auto){ ioc.stop(); });
ioc.run();
} catch (std::exception const& e) {
std::cerr << "Error: " << e.what() << std::endl;
}
}

View File

@@ -0,0 +1,67 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <tuple>
#include <string>
#include <thread>
#include <boost/asio.hpp>
#include <aedis.hpp>
#include "print.hpp"
// Include this in no more than one .cpp file.
#include <aedis/src.hpp>
namespace net = boost::asio;
using aedis::adapt;
using aedis::resp3::node;
using aedis::resp3::request;
using connection = aedis::sync<aedis::connection<>>;
using event = connection::event;
// See subscriber.cpp for more info about how to run this example.
// Subscribe again everytime there is a disconnection.
void event_receiver(connection& conn)
{
request req;
req.push("SUBSCRIBE", "channel");
for (;;) {
auto ev = conn.receive_event();
if (ev == connection::event::hello)
conn.exec(req);
}
}
int main()
{
try {
net::io_context ioc{1};
auto work = net::make_work_guard(ioc);
connection::config cfg;
cfg.enable_events = true;
cfg.enable_reconnect = true;
connection conn{work.get_executor(), cfg};
std::thread t1{[&]() { ioc.run(); }};
std::thread t2{[&]() { boost::system::error_code ec; conn.run(ec); }};
std::thread t3{[&]() { event_receiver(conn); }};
for (std::vector<node<std::string>> resp;;) {
conn.receive_push(adapt(resp));
print_push(resp);
resp.clear();
}
t1.join();
t2.join();
t3.join();
} catch (std::exception const& e) {
std::cerr << e.what() << std::endl;
}
}

25
include/Makefile.am Normal file
View File

@@ -0,0 +1,25 @@
nobase_include_HEADERS =\
$(top_srcdir)/include/aedis/src.hpp\
$(top_srcdir)/include/aedis/error.hpp\
$(top_srcdir)/include/aedis/impl/error.ipp\
$(top_srcdir)/include/aedis/detail/net.hpp\
$(top_srcdir)/include/aedis/connection.hpp\
$(top_srcdir)/include/aedis/adapt.hpp\
$(top_srcdir)/include/aedis/sync.hpp\
$(top_srcdir)/include/aedis/detail/connection_ops.hpp\
$(top_srcdir)/include/aedis.hpp\
$(top_srcdir)/include/aedis/adapter/detail/adapters.hpp\
$(top_srcdir)/include/aedis/adapter/adapt.hpp\
$(top_srcdir)/include/aedis/adapter/detail/response_traits.hpp\
$(top_srcdir)/include/aedis/resp3/node.hpp\
$(top_srcdir)/include/aedis/resp3/detail/read_ops.hpp\
$(top_srcdir)/include/aedis/resp3/detail/parser.hpp\
$(top_srcdir)/include/aedis/resp3/detail/exec.hpp\
$(top_srcdir)/include/aedis/resp3/type.hpp\
$(top_srcdir)/include/aedis/resp3/read.hpp\
$(top_srcdir)/include/aedis/resp3/write.hpp\
$(top_srcdir)/include/aedis/resp3/request.hpp\
$(top_srcdir)/include/aedis/resp3/impl/request.ipp\
$(top_srcdir)/include/aedis/resp3/detail/impl/parser.ipp\
$(top_srcdir)/include/aedis/resp3/impl/type.ipp

View File

@@ -8,64 +8,209 @@
#define AEDIS_HPP
#include <aedis/error.hpp>
#include <aedis/command.hpp>
#include <aedis/adapt.hpp>
#include <aedis/connection.hpp>
#include <aedis/resp3/read.hpp>
#include <aedis/resp3/write.hpp>
#include <aedis/resp3/exec.hpp>
#include <aedis/sync.hpp>
#include <aedis/resp3/request.hpp>
#include <aedis/adapter/adapt.hpp>
// \li Support for Redis [sentinel](https://redis.io/docs/manual/sentinel).
// TODO: Reconnect support.
// TODO: Remove conflicts of the adapt function.
/** \mainpage Documentation
\tableofcontents
Useful links: \subpage any, [Changelog](CHANGELOG.md) and [Benchmarks](benchmarks/benchmarks.md).
\section Overview
Aedis is a high-level [Redis](https://redis.io/) client library
built on top of [Asio](https://www.boost.org/doc/libs/release/doc/html/boost_asio.html)
that provides simple and efficient communication with a Redis
server. Some of its distinctive features are
built on top of
[Asio](https://www.boost.org/doc/libs/release/doc/html/boost_asio.html).
Some of its distinctive features are
\li Support for the latest version of the Redis communication protocol [RESP3](https://github.com/redis/redis-specifications/blob/master/protocol/RESP3.md).
\li First class support for STL containers and C++ built-in types.
\li Serialization and deserialization of your own data types.
\li Zero asymptotic allocations by means of memory reuse.
\li Healthy checks, back pressure and low latency.
\li Hides most of the low level asynchronous operations away from the user.
The Aedis API hides most of the low level asynchronous operations
away from the user, for example, the code below pings a message to
the server
Let us have a look a some code snippets
\code
@subsection Async
The code below sends a ping command to Redis and quits (see intro.cpp)
@code
int main()
{
net::io_context ioc;
connection db{ioc};
request req;
req.push("PING");
req.push("QUIT");
std::tuple<std::string, std::string> resp;
net::io_context ioc;
connection db{ioc};
db.async_exec("127.0.0.1", "6379", req, adapt(resp),
[](auto ec, auto) { std::cout << ec.message() << std::endl; });
std::tuple<std::string, aedis::ignore> resp;
db.async_run(req, adapt(resp), net::detached);
ioc.run();
// Print
std::cout << std::get<0>(resp) << std::endl;
std::cout << std::get<1>(resp) << std::endl;
}
\endcode
@endcode
For a detailed comparison of Redis clients and the design
rationale behind Aedis jump to \ref why-aedis.
The connection class maintains a healthy connection with Redis
over which users can execute their commands, without any need of
queuing. For example, to execute more than one request
@code
int main()
{
...
net::io_context ioc;
connection db{ioc};
db.async_exec(req1, adapt(resp1), handler1);
db.async_exec(req2, adapt(resp2), handler2);
db.async_exec(req3, adapt(resp3), handler3);
db.async_run(net::detached);
ioc.run();
...
}
@endcode
The `connection::async_exec` functions above can be called from different
places in the code without knowing about each other, see for
example echo_server.cpp. Server-side pushes are supported on the
same connection where commands are executed, a typical subscriber
will look like
(see subscriber.cpp)
@code
net::awaitable<void> reader(std::shared_ptr<connection> db)
{
for (std::vector<node_type> resp;;) {
co_await db->async_receive_event(adapt(resp));
// Use resp and clear it.
resp.clear();
}
}
@endcode
@subsection Sync
The `connection` class offers only an asynchronous API.
Synchronous communications with redis is provided by the `aedis::sync`
wrapper class. (see intro_sync.cpp)
@code
int main()
{
net::io_context ioc{1};
auto work = net::make_work_guard(ioc);
std::thread t1{[&]() { ioc.run(); }};
sync<connection> conn{work.get_executor()};
std::thread t2{[&]() { boost::system::error_code ec; conn.run(ec); }};
request req;
req.push("PING");
req.push("QUIT");
std::tuple<std::string, aedis::ignore> resp;
conn.exec(req, adapt(resp));
std::cout << "Response: " << std::get<0>(resp) << std::endl;
work.reset();
t1.join();
t2.join();
}
@endcode
\subsection using-aedis Installation
To install and use Aedis you will need
- Boost 1.78 or greater.
- C++17. Some examples require C++20 with coroutine support.
- Redis 6 or higher. Optionally also redis-cli and Redis Sentinel.
For a simple installation run
```
$ git clone --branch v1.0.0 https://github.com/mzimbres/aedis.git
$ cd aedis
# Option 1: Direct compilation.
$ g++ -std=c++17 -pthread examples/intro.cpp -I./include -I/path/boost_1_79_0/include/
# Option 2: Use cmake.
$ BOOST_ROOT=/opt/boost_1_79_0/ cmake -DCMAKE_CXX_FLAGS=-std=c++20 .
```
@note CMake support is still experimental.
For a proper full installation on the system run
```
# Download and unpack the latest release
$ wget https://github.com/mzimbres/aedis/releases/download/v1.0.0/aedis-1.0.0.tar.gz
$ tar -xzvf aedis-1.0.0.tar.gz
# Configure, build and install
$ CXXFLAGS="-std=c++17" ./configure --prefix=/opt/aedis-1.0.0 --with-boost=/opt/boost_1_78_0
$ sudo make install
```
To build examples and tests
```
$ make
```
@subsubsection using_aedis Using Aedis
When writing you own applications include the following header
```cpp
#include <aedis/src.hpp>
```
in no more than one source file in your applications.
@subsubsection sup-comp Supported compilers
Aedis has been tested with the following compilers
- Tested with gcc: 12, 11.
- Tested with clang: 14, 13, 11.
\subsubsection Developers
To generate the build system clone the repository and run
```
# git clone https://github.com/mzimbres/aedis.git
$ autoreconf -i
```
After that we get a configure script that can be run as explained
above, for example, to build with a compiler other that the system
compiler with coverage support run
```
$ CXX=clang++-14 \
CXXFLAGS="-g -std=c++20 -Wall -Wextra --coverage -fkeep-inline-functions -fkeep-static-functions" \
LDFLAGS="--coverage" \
./configure --with-boost=/opt/boost_1_79_0
```
To generate release tarballs run
```
$ make distcheck
```
\section requests Requests
@@ -79,7 +224,7 @@
// Command with variable length of arguments.
req.push("SET", "key", "some value", value, "EX", "2");
// Pushes a set.
// Pushes a list.
std::list<std::string> list
{"channel1", "channel2", "channel3"};
req.push_range("SUBSCRIBE", list);
@@ -87,7 +232,7 @@
// Same as above but as an iterator range.
req.push_range2("SUBSCRIBE", std::cbegin(list), std::cend(list));
// Sends a map.
// Pushes a map.
std::map<std::string, mystruct> map
{ {"key1", "value1"}
, {"key2", "value2"}
@@ -101,8 +246,6 @@
co_await db->async_exec(req, adapt(resp));
@endcode
The second argument \c adapt(resp) will be explained in \ref requests.
\subsection requests-serialization Serialization
The \c push and \c push_range functions above work with integers
@@ -116,7 +259,6 @@
void to_bulk(std::string& to, mystruct const& obj)
{
// Convert to obj string and call to_bulk.
std::string dummy = "Dummy serializaiton string.";
aedis::resp3::to_bulk(to, dummy);
}
@@ -133,7 +275,7 @@
req.push_range("HSET", "key", map);
@endcode
It is quite common to store json string in Redis for example.
Example serialization.cpp shows how store json string in Redis.
\section low-level-responses Responses
@@ -154,7 +296,7 @@
proper C++ data structure to receive it in. Fortunately, this is a
simple task for most types. The table below summarises the options
RESP3 type | C++ | Type
RESP3 type | Possible C++ type | Type
---------------|--------------------------------------------------------------|------------------
Simple-string | \c std::string | Simple
Simple-error | \c std::string | Simple
@@ -168,47 +310,42 @@
Set | \c std::vector, \c std::set, \c std::unordered_set | Aggregate
Push | \c std::vector, \c std::map, \c std::unordered_map | Aggregate
For example
@code
request req;
req.push("HELLO", 3);
req.push_range("RPUSH", "key1", vec);
req.push_range("HSET", "key2", map);
req.push("LRANGE", "key3", 0, -1);
req.push("HGETALL", "key4");
req.push("QUIT");
std::tuple<
aedis::ignore, // hello
int, // rpush
int, // hset
std::vector<T>, // lrange
std::map<U, V>, // hgetall
std::string // quit
> resp;
co_await db->async_exec(req, adapt(resp));
@endcode
The tag @c aedis::ignore can be used to ignore individual
elements in the responses. If the intention is to ignore the
response to all commands in the request use @c adapt()
@code
co_await db->async_exec(req, adapt());
@endcode
Responses that contain nested aggregates or heterogeneous data
types will be given special treatment later in \ref gen-case. As
of this writing, not all RESP3 types are used by the Redis server,
which means in practice users will be concerned with a reduced
subset of the RESP3 specification. Now let us see some examples
@code
auto dbuffer = dynamic_buffer(buffer);
// To ignore the response.
co_await resp3::async_read(socket, dbuffer, adapt());
// Read in a std::string e.g. get.
std::string str;
co_await resp3::async_read(socket, dbuffer, adapt(str));
// Read in a long long e.g. rpush.
long long number;
co_await resp3::async_read(socket, dbuffer, adapt(number));
// Read in a std::set e.g. smembers.
std::set<T, U> set;
co_await resp3::async_read(socket, dbuffer, adapt(set));
// Read in a std::map e.g. hgetall.
std::map<T, U> set;
co_await resp3::async_read(socket, dbuffer, adapt(map));
// Read in a std::unordered_map e.g. hgetall.
std::unordered_map<T, U> umap;
co_await resp3::async_read(socket, dbuffer, adapt(umap));
// Read in a std::vector e.g. lrange.
std::vector<T> vec;
co_await resp3::async_read(socket, dbuffer, adapt(vec));
@endcode
In other words, it is straightforward, just pass the result of \c
adapt to the read function and make sure the response data type is
compatible with the data structure you are calling @c adapter(...)
with. All standard C++ containers are supported by Aedis.
subset of the RESP3 specification.
\subsection Optional
@@ -218,36 +355,18 @@
wrap your type around \c boost::optional like this
@code
boost::optional<std::unordered_map<T, U>> umap;
co_await resp3::async_read(socket, dynamic_buffer(buffer), adapt(umap));
boost::optional<std::unordered_map<T, U>> resp;
co_await db->async_exec(req, adapt(resp));
@endcode
Everything else stays the same, before accessing data, users will
have to check or assert the optional contains a value.
Everything else stays the same.
\subsection heterogeneous_aggregates Heterogeneous aggregates
\subsection transactions Transactions
There are cases where Redis returns aggregates that
contain heterogeneous data, for example, an array that contains
integers, strings nested sets etc. Aedis supports reading such
aggregates in a \c std::tuple efficiently as long as the they
don't contain 3-order nested aggregates e.g. an array that
contains an array of arrays. For example, to read the response to
a \c hello command we can use the following response type.
@code
using hello_type = std::tuple<
std::string, std::string,
std::string, std::string,
std::string, int,
std::string, int,
std::string, std::string,
std::string, std::string,
std::string, std::vector<std::string>>;
@endcode
Transactions are another example where this feature is useful, for
example, the response to the transaction below
To read the response to transactions we have to observe that Redis
queues the commands as they arrive and sends the responses back to
the user as an array, in the response to the @c exec command.
For example, to read the response to the this request
@code
db.send("MULTI");
@@ -257,52 +376,42 @@
db.send("EXEC");
@endcode
can be read in the following way
use the following response type
@code
std::tuple<
boost::optional<std::string>, // Response to get
boost::optional<std::vector<std::string>>, // Response to lrange
boost::optional<std::map<std::string, std::string>> // Response to hgetall
> trans;
using aedis::ignore;
using boost::optional;
co_await resp3::async_read(socket, dynamic_buffer(buffer)); // Ignore multi
co_await resp3::async_read(socket, dynamic_buffer(buffer)); // Ignore get
co_await resp3::async_read(socket, dynamic_buffer(buffer)); // Ignore lrange
co_await resp3::async_read(socket, dynamic_buffer(buffer)); // Ignore hgetall
co_await resp3::async_read(socket, dynamic_buffer(buffer), adapt(trans));
using exec_resp_type =
std::tuple<
optional<std::string>, // get
optional<std::vector<std::string>>, // lrange
optional<std::map<std::string, std::string>> // hgetall
>;
std::tuple<
ignore, // multi
ignore, // get
ignore, // lrange
ignore, // hgetall
exec_resp_type, // exec
> resp;
co_await db->async_exec(req, adapt(resp));
@endcode
Note that above we are not ignoring the response to the commands
themselves but whether they have been successfully queued. Only
after @c exec is received Redis will execute them in sequence and
send all responses together in an array.
themselves but whether they have been successfully queued. For a
complete example see containers.cpp.
\subsection Serialization
\subsection Deserialization
As mentioned in \ref requests-serialization, it is common for
users to serialized data before sending it to Redis e.g. json
strings, for example
@code
sr.push("SET", "key", "{"Server": "Redis"}"); // Unquoted for readability.
sr.push("GET", "key")
@endcode
For performance and convenience reasons, we may want to avoid
receiving the response to the \c get command above as a string
just to convert it later to a e.g. deserialized json. To support
this, Aedis calls a user defined \c from_bulk function while
parsing the response. In simple terms, define your type
@code
struct mystruct {
// struct fields.
};
@endcode
and deserialize it from a string in a function \c from_bulk with
the following signature
As mentioned in \ref requests-serialization, it is common to
serialize data before sending it to Redis e.g. to json strings.
For performance and convenience reasons, we may also want to
deserialize it directly in its final data structure. Aedis
supports this use case by calling a user provided \c from_bulk
function while parsing the response. For example
@code
void from_bulk(mystruct& obj, char const* p, std::size_t size, boost::system::error_code& ec)
@@ -316,10 +425,10 @@
\subsection gen-case The general case
As already mentioned, there are cases where responses to Redis
There are cases where responses to Redis
commands won't fit in the model presented above, some examples are
@li Commands (like \c set) whose response don't have a fixed
@li Commands (like \c set) whose responses don't have a fixed
RESP3 type. Expecting an \c int and receiving a blob-string
will result in error.
@li RESP3 aggregates that contain nested aggregates can't be read in STL containers.
@@ -350,125 +459,48 @@
std::vector<node<std::string>>. The vector can be seen as a
pre-order view of the response tree
(https://en.wikipedia.org/wiki/Tree_traversal#Pre-order,_NLR).
Using it is no different that using other types
Using it is no different than using other types
@code
// Receives any RESP3 simple data type.
node<std::string> resp;
co_await resp3::async_read(socket, dynamic_buffer(buffer), adapt(resp));
co_await db->async_exec(req, adapt(resp));
// Receives any RESP3 simple or aggregate data type.
std::vector<node<std::string>> resp;
co_await resp3::async_read(socket, dynamic_buffer(buffer), adapt(resp));
co_await db->async_exec(req, adapt(resp));
@endcode
For example, suppose we want to retrieve a hash data structure
from Redis with \c hgetall, some of the options are
from Redis with `HGETALL`, some of the options are
@li \c std::vector<node<std::string>: Works always.
@li \c std::vector<std::string>: Efficient and flat, all elements as string.
@li \c std::map<std::string, std::string>: Efficient if you need the data as a \c std::map
@li \c std::map<U, V>: Efficient if you are storing serialized data. Avoids temporaries and requires \c from_bulk for \c U and \c V.
In addition to the above users can also use unordered versions of the containers. The same reasoning also applies to sets e.g. \c smembers.
In addition to the above users can also use unordered versions of the containers. The same reasoning also applies to sets e.g. `SMEMBERS`.
\section examples Examples
The examples listed below cover most use cases presented in the documentation above.
@li intro.cpp: Basic steps with Aedis.
@li intro_sync.cpp: Synchronous version of intro.cpp.
@li containers.cpp: Shows how to send and receive stl containers.
@li serialization.cpp: Shows the \c request support to serialization of user types.
@li subscriber.cpp: Shows how channel subscription works.
@li echo_server.cpp: A simple TCP echo server that users coroutines.
@li serialization.cpp: Shows how to serialize your own types.
@li subscriber.cpp: Shows how to use pubsub.
@li subscriber_sync.cpp: Synchronous version of subscriber.cpp.
@li echo_server.cpp: A simple TCP echo server that uses coroutines.
@li chat_room.cpp: A simple chat room that uses coroutines.
\section using-aedis Using Aedis
To install and use Aedis you will need
- Boost 1.78 or greater.
- Unix Shell and Make (for linux users).
- C++14. Some examples require C++20 with coroutine support.
- Redis server.
Some examples will also require interaction with
- redis-cli: Used in one example.
- Redis Sentinel Server: used in some examples.
Aedis has been tested with the following compilers
- Tested with gcc: 7.5.0, 8.4.0, 9.3.0, 10.3.0.
- Tested with clang: 11.0.0, 10.0.0, 9.0.1, 8.0.1, 7.0.1.
\section Installation
The first thing to do is to download and unpack Aedis
```
# Download the latest release on github
$ wget https://github.com/mzimbres/aedis/releases
# Uncompress the tarball and cd into the dir
$ tar -xzvf aedis-version.tar.gz
```
If you can't use \c configure and \c make (e.g. Windows users)
add the directory where you unpacked Aedis to the
include directories in your project, otherwise run
```
# See configure --help for all options.
$ ./configure --prefix=/opt/aedis-version --with-boost=/opt/boost_1_78_0
# Install Aedis in the path specified in --prefix
$ sudo make install
```
and include the following header
```cpp
#include <aedis/src.hpp>
```
in exactly one source file in your applications. At this point you
can start using Aedis. To build the examples and run the tests run
```
# Build aedis examples.
$ make examples
# Test aedis in your machine.
$ make check
```
\section Developers
To generate the build system run
```
$ autoreconf -i
```
After that you will have a configure script
that you can run as explained above, for example, to use a
compiler other that the system compiler run
```
$ CC=/opt/gcc-10.2.0/bin/gcc-10.2.0 CXX=/opt/gcc-10.2.0/bin/g++-10.2.0 CXXFLAGS="-g -Wall -Werror" ./configure ...
$ make distcheck
```
\section why-aedis Why Aedis
At the time of this writing there are seventeen Redis clients
listed in the [official](https://redis.io/docs/clients/#cpp) list.
With so many clients available it is not unlikely that users are
asking themselves why yet another one. In this section I will try
to compare Aedis to the most popular clients and why we need
to compare Aedis with the most popular clients and why we need
Aedis. Notice however that this is ongoing work as comparing
client objectively is difficult and time consuming.
@@ -481,8 +513,8 @@
not support
@li RESP3. Without RESP3 is impossible to support some important Redis features like client side caching, among other things.
@li The Asio asynchronous model.
@li Reading response diretly in user data structures avoiding temporaries.
@li Coroutines.
@li Reading responses directly in user data structures avoiding temporaries.
@li Error handling with error-code and exception overloads.
@li Healthy checks.
@@ -528,11 +560,11 @@
Some of the problems with this API are
@li Heterogeneous treatment of commands, pipelines and transaction.
@li Heterogeneous treatment of commands, pipelines and transaction. This makes auto-pipelining impossible.
@li Any Api that sends individual commands has a very restricted scope of usability and should be avoided for performance reasons.
@li The API imposes exceptions on users, no error-code overload is provided.
@li No way to reuse the buffer for new calls to e.g. \c redis.get in order to avoid further dynamic memory allocations.
@li Error handling of resolve and connection no clear.
@li Error handling of resolve and connection not clear.
According to the documentation, pipelines in redis-plus-plus have
the following characteristics
@@ -543,55 +575,14 @@
This is clearly a downside of the API as pipelines should be the
default way of communicating and not an exception, paying such a
high price for each pipeline imposes a severe cost in performance.
Transactions also suffer from the very same problem
Transactions also suffer from the very same problem.
> NOTE: Creating a Transaction object is NOT cheap, since it
> creates a new connection.
In Aedis there is no difference between sending one command, a
pipeline or a transaction because creating the request is decoupled
from the IO objects, for example
@code
std::string request;
auto sr = make_serializer(request);
sr.push("HELLO", 3);
sr.push("MULTI");
sr.push("PING");
sr.push("SET", "low-level-key", "some content", "EX", "2");
sr.push("EXEC");
sr.push("PING", "Another message.");
net::write(socket, net::buffer(request));
@endcode
The request created above will be sent to Redis in a single
pipeline and imposes no restriction on what it contains e.g. the
number of commands, transactions etc. The problems mentioned above
simply do not exist in Aedis. The way responses are read is
also more flexible
@code
std::string buffer;
auto dbuffer = net::dynamic_buffer(buffer);
std::tuple<std::string, boost::optional<std::string>> response;
resp3::read(socket, dbuffer); // hellp
resp3::read(socket, dbuffer); // multi
resp3::read(socket, dbuffer); // ping
resp3::read(socket, dbuffer); // set
resp3::read(socket, dbuffer, adapt(response));
resp3::read(socket, dbuffer); // quit
@endcode
@li The response objects are passed by the caller to the read
functions so that he has fine control over memory allocations and
object lifetime.
@li The user can either use error-code or exceptions.
@li Each response can be read individually in the response object
avoiding temporaries.
@li It is possible to ignore responses.
This was the blocking API, now let us compare the async interface
pipeline or a transaction because requests are decoupled
from the IO objects.
> redis-plus-plus also supports async interface, however, async
> support for Transaction and Subscriber is still on the way.
@@ -625,11 +616,6 @@
@li Richard Hodges ([madmongo1](https://github.com/madmongo1)): For helping me with Asio and the design of asynchronous programs in general.
@li Vinícius dos Santos Oliveira ([vinipsmaker](https://github.com/vinipsmaker)): For useful discussion about how Aedis consumes buffers in the read operation (among other things).
@li Petr Dannhofer ([Eddie-cz](https://github.com/Eddie-cz)): For helping me understand how the `AUTH` and `HELLO` command can influence each other.
\section Reference
See \subpage any.
*/
/** \defgroup any Reference
@@ -637,4 +623,14 @@
* This page contains the documentation of all user facing code.
*/
// Support sentinel support as described in
//
// - https://redis.io/docs/manual/sentinel.
// - https://redis.io/docs/reference/sentinel-clients.
//
// Avoid conflicts between
//
// - aedis::adapt
// - aedis::resp3::adapt.
#endif // AEDIS_HPP

View File

@@ -20,6 +20,18 @@
namespace aedis {
/** @brief Tag used to ignore responses.
* @ingroup any
*
* For example
*
* @code
* std::tuple<aedis::ignore, std::string, aedis::ignore> resp;
* @endcode
*
* will cause only the second tuple type to be parsed, the others
* will be ignored.
*/
using ignore = adapter::detail::ignore;
namespace detail {
@@ -27,11 +39,13 @@ namespace detail {
struct ignore_adapter {
void
operator()(
std::size_t i,
resp3::node<boost::string_view> const& nd,
boost::system::error_code& ec)
std::size_t,
resp3::node<boost::string_view> const&,
boost::system::error_code&)
{
}
auto supported_response_size() const noexcept { return std::size_t(-1);}
};
template <class Tuple>
@@ -47,9 +61,11 @@ private:
public:
static_adapter(Tuple& r = nullptr)
{
adapter::detail::assigner<std::tuple_size<Tuple>::value - 1>::assign(adapters_, r);
adapter::detail::assigner<size - 1>::assign(adapters_, r);
}
auto supported_response_size() const noexcept { return size;}
void
operator()(
std::size_t i,
@@ -69,11 +85,13 @@ private:
adapter_type adapter_;
public:
vector_adapter(Vector& v) : adapter_{adapter::adapt(v)} { }
vector_adapter(Vector& v) : adapter_{adapter::adapt2(v)} { }
auto supported_response_size() const noexcept { return std::size_t(-1);}
void
operator()(
std::size_t i,
std::size_t,
resp3::node<boost::string_view> const& nd,
boost::system::error_code& ec)
{
@@ -113,11 +131,23 @@ struct response_traits<std::tuple<Ts...>> {
} // detail
/** @brief Creates an adapter that ignores responses.
* @ingroup any
*
* This function can be used to create adapters that ignores
* responses.
*/
auto adapt() noexcept
{
return detail::response_traits<void>::adapt();
}
/** @brief Adapts a type to be used as a response.
* @ingroup any
*
* The type T can be any STL container, any integer type and
* \c std::string
*/
template<class T>
auto adapt(T& t) noexcept
{

View File

@@ -39,7 +39,7 @@ using adapter_t = typename detail::adapter_t<T>;
@endcode
*/
inline
auto adapt() noexcept
auto adapt2() noexcept
{ return detail::response_traits<void>::adapt(); }
/** \internal
@@ -75,7 +75,7 @@ auto adapt() noexcept
* @endcode
*/
template<class T>
auto adapt(T& t) noexcept
auto adapt2(T& t) noexcept
{ return detail::response_traits<T>::adapt(t); }
} // adapter

View File

@@ -63,7 +63,7 @@ from_bulk(
void from_bulk(
bool& t,
boost::string_view sv,
boost::system::error_code& ec)
boost::system::error_code&)
{
t = *sv.data() == 't';
}
@@ -146,7 +146,7 @@ public:
return;
if (is_aggregate(n.data_type)) {
ec = error::expects_simple_type;
ec = error::expects_resp3_simple_type;
return;
}
@@ -175,14 +175,14 @@ public:
if (is_aggregate(nd.data_type)) {
if (nd.data_type != resp3::type::set)
ec = error::expects_set_type;
ec = error::expects_resp3_set;
return;
}
BOOST_ASSERT(nd.aggregate_size == 1);
if (nd.depth < 1) {
ec = error::expects_set_type;
ec = error::expects_resp3_set;
return;
}
@@ -214,14 +214,14 @@ public:
if (is_aggregate(nd.data_type)) {
if (element_multiplicity(nd.data_type) != 2)
ec = error::expects_map_type;
ec = error::expects_resp3_map;
return;
}
BOOST_ASSERT(nd.aggregate_size == 1);
if (nd.depth < 1) {
ec = error::expects_map_type;
ec = error::expects_resp3_map;
return;
}
@@ -294,7 +294,7 @@ public:
}
} else {
if (i_ == -1) {
ec = error::expects_aggregate_type;
ec = error::expects_resp3_aggregate;
return;
}
@@ -324,7 +324,7 @@ struct list_impl {
if (!is_aggregate(nd.data_type)) {
BOOST_ASSERT(nd.aggregate_size == 1);
if (nd.depth < 1) {
ec = error::expects_aggregate_type;
ec = error::expects_resp3_aggregate;
return;
}

View File

@@ -90,17 +90,15 @@ struct assigner<0> {
}
};
// TODO: I am not sure we need the mp_unique below.
template <class Tuple>
class static_aggregate_adapter {
private:
using adapters_array_type =
std::array<
boost::mp11::mp_unique<
boost::mp11::mp_rename<
boost::mp11::mp_transform<
adapter_t, Tuple>,
boost::variant2::variant>>,
boost::mp11::mp_rename<
boost::mp11::mp_transform<
adapter_t, Tuple>,
boost::variant2::variant>,
std::tuple_size<Tuple>::value>;
std::size_t i_ = 0;

View File

@@ -0,0 +1,616 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_CONNECTION_HPP
#define AEDIS_CONNECTION_HPP
#include <vector>
#include <queue>
#include <limits>
#include <chrono>
#include <memory>
#include <type_traits>
#include <boost/assert.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <boost/asio/steady_timer.hpp>
#include <boost/asio/bind_executor.hpp>
#include <boost/asio/experimental/channel.hpp>
#include <aedis/adapt.hpp>
#include <aedis/resp3/request.hpp>
#include <aedis/detail/connection_ops.hpp>
namespace aedis {
/** @brief A high level connection to Redis.
* @ingroup any
*
* This class keeps a healthy connection to the Redis instance where
* commands can be sent at any time. For more details, please see the
* documentation of each individual function.
*
* @remarks This class exposes only asynchronous member functions,
* synchronous communications with the Redis server is provided by
* the sync class.
*
* @tparam AsyncReadWriteStream A stream that supports
* `async_read_some` and `async_write_some`.
*
*/
template <class AsyncReadWriteStream = boost::asio::ip::tcp::socket>
class connection {
public:
/// Executor type.
using executor_type = typename AsyncReadWriteStream::executor_type;
/// Type of the next layer
using next_layer_type = AsyncReadWriteStream;
/** @brief Connection configuration parameters.
*/
struct config {
/// Redis server address.
std::string host = "127.0.0.1";
/// Redis server port.
std::string port = "6379";
/// Username if authentication is required.
std::string username;
/// Password if authentication is required.
std::string password;
/// Timeout of the resolve operation.
std::chrono::milliseconds resolve_timeout = std::chrono::seconds{10};
/// Timeout of the connect operation.
std::chrono::milliseconds connect_timeout = std::chrono::seconds{10};
/// Time interval of ping operations.
std::chrono::milliseconds ping_interval = std::chrono::seconds{1};
/// Time waited before trying a reconnection (see config::enable_reconnect).
std::chrono::milliseconds reconnect_interval = std::chrono::seconds{1};
/// The maximum size of read operations.
std::size_t max_read_size = (std::numeric_limits<std::size_t>::max)();
/// Whether to coalesce requests (see [pipelines](https://redis.io/topics/pipelining)).
bool coalesce_requests = true;
/// Enable internal events, see connection::async_receive_event.
bool enable_events = false;
/// Enable automatic reconnection (see also config::reconnect_interval).
bool enable_reconnect = false;
};
/// Events that are communicated by `connection::async_receive_event`.
enum class event {
/// Resolve operation was successful.
resolve,
/// Connect operation was successful.
connect,
/// Success sending AUTH and HELLO.
hello,
/// Used internally.
invalid
};
/** @brief Async operations exposed by this class.
*
* The operations listed below can be cancelled with the `cancel`
* member function.
*/
enum class operation {
/// `connection::async_exec` operations.
exec,
/// `connection::async_run` operations.
run,
/// `connection::async_receive_event` operations.
receive_event,
/// `connection::async_receive_push` operations.
receive_push,
};
/** \brief Contructor
*
* \param ex The executor.
* \param cfg Configuration parameters.
*/
connection(executor_type ex, config cfg = config{})
: resv_{ex}
, ping_timer_{ex}
, check_idle_timer_{ex}
, writer_timer_{ex}
, read_timer_{ex}
, push_channel_{ex}
, event_channel_{ex}
, cfg_{cfg}
, last_data_{std::chrono::time_point<std::chrono::steady_clock>::min()}
{
writer_timer_.expires_at(std::chrono::steady_clock::time_point::max());
read_timer_.expires_at(std::chrono::steady_clock::time_point::max());
}
/** \brief Constructor
*
* \param ioc The executor.
* \param cfg Configuration parameters.
*/
connection(boost::asio::io_context& ioc, config cfg = config{})
: connection(ioc.get_executor(), cfg)
{ }
/// Returns the executor.
auto get_executor() {return resv_.get_executor();}
/** @brief Cancel operations.
*
* @li `operation::exec`: Cancels operations started with `async_exec`.
*
* @li operation::run: Cancels `async_run`. Notice that the
* preferred way to close a connection is to ensure
* `config::enable_reconnect` is set to `false` and send `QUIT`
* to the server. An unresponsive Redis server will also cause
* the idle-checks to kick in and lead to
* `connection::async_run` completing with
* `error::idle_timeout`. Calling `cancel(operation::run)`
* directly should be seen as the last option.
*
* @li operation::receive_event: Cancels `connection::async_receive_event`.
*
* @param op: The operation to be cancelled.
* @returns The number of operations that have been canceled.
*/
std::size_t cancel(operation op)
{
switch (op) {
case operation::exec:
{
for (auto& e: reqs_) {
e->stop = true;
e->timer.cancel_one();
}
auto const ret = reqs_.size();
reqs_ = {};
return ret;
}
case operation::run:
{
if (socket_)
socket_->close();
read_timer_.cancel();
check_idle_timer_.cancel();
writer_timer_.cancel();
ping_timer_.cancel();
auto point = std::stable_partition(std::begin(reqs_), std::end(reqs_), [](auto const& ptr) {
return !ptr->req->close_on_run_completion;
});
// Cancel own pings if there are any waiting.
std::for_each(point, std::end(reqs_), [](auto const& ptr) {
ptr->stop = true;
ptr->timer.cancel();
});
reqs_.erase(point, std::end(reqs_));
return 1U;
}
case operation::receive_event:
{
event_channel_.cancel();
return 1U;
}
case operation::receive_push:
{
push_channel_.cancel();
return 1U;
}
}
return 0;
}
/// Get the config object.
config& get_config() noexcept { return cfg_;}
/// Gets the config object.
config const& get_config() const noexcept { return cfg_;}
/** @name Asynchronous functions
*
* Each of these operations a individually cancellable.
**/
/// @{
/** @brief Starts communication with the Redis server asynchronously.
*
* This function performs the following steps
*
* @li Resolves the Redis host as of `async_resolve` with the
* timeout passed in `config::resolve_timeout`.
*
* @li Connects to one of the endpoints returned by the resolve
* operation with the timeout passed in `config::connect_timeout`.
*
* @li Starts healthy checks with a timeout twice
* the value of `config::ping_interval`. If no data is
* received during that time interval `connection::async_run` completes with
* `error::idle_timeout`.
*
* @li Starts the healthy check operation that sends `PING`s to
* Redis with a frequency equal to `config::ping_interval`.
*
* @li Starts reading from the socket and executes all requests
* that have been started prior to this function call.
*
* @remark When a timeout occur and config::enable_reconnect is
* set, this function will automatically try a reconnection
* without returning control to the user.
*
* For an example see echo_server.cpp.
*
* \param token Completion token.
*
* The completion token must have the following signature
*
* @code
* void f(boost::system::error_code);
* @endcode
*/
template <class CompletionToken = boost::asio::default_completion_token_t<executor_type>>
auto async_run(CompletionToken token = CompletionToken{})
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::run_op<connection>{this}, token, resv_);
}
/** @brief Connects and executes a request asynchronously.
*
* Combines the other `async_run` overload with `async_exec` in a
* single function. This function is useful for users that want to
* send a single request to the server and close it.
*
* \param req Request object.
* \param adapter Response adapter.
* \param token Asio completion token.
*
* For an example see intro.cpp. The completion token must have
* the following signature
*
* @code
* void f(boost::system::error_code, std::size_t);
* @endcode
*
* Where the second parameter is the size of the response in bytes.
*/
template <
class Adapter = detail::response_traits<void>::adapter_type,
class CompletionToken = boost::asio::default_completion_token_t<executor_type>>
auto async_run(
resp3::request const& req,
Adapter adapter = adapt(),
CompletionToken token = CompletionToken{})
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code, std::size_t)
>(detail::runexec_op<connection, Adapter>
{this, &req, adapter}, token, resv_);
}
/** @brief Executes a command on the redis server asynchronously.
*
* There is no need to synchronize multiple calls to this
* function as it keeps an internal queue.
*
* \param req Request object.
* \param adapter Response adapter.
* \param token Asio completion token.
*
* For an example see echo_server.cpp. The completion token must
* have the following signature
*
* @code
* void f(boost::system::error_code, std::size_t);
* @endcode
*
* Where the second parameter is the size of the response in
* bytes.
*/
template <
class Adapter = detail::response_traits<void>::adapter_type,
class CompletionToken = boost::asio::default_completion_token_t<executor_type>>
auto async_exec(
resp3::request const& req,
Adapter adapter = adapt(),
CompletionToken token = CompletionToken{})
{
BOOST_ASSERT_MSG(req.size() <= adapter.supported_response_size(), "Request and adapter have incompatible sizes.");
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code, std::size_t)
>(detail::exec_op<connection, Adapter>{this, &req, adapter}, token, resv_);
}
/** @brief Receives server side pushes asynchronously.
*
* Users that expect server pushes have to call this function in a
* loop. If an unsolicited event comes in and there is no reader,
* the connection will hang and eventually timeout.
*
* \param adapter The response adapter.
* \param token The Asio completion token.
*
* For an example see subscriber.cpp. The completion token must
* have the following signature
*
* @code
* void f(boost::system::error_code, std::size_t);
* @endcode
*
* Where the second parameter is the size of the push in
* bytes.
*/
template <
class Adapter = detail::response_traits<void>::adapter_type,
class CompletionToken = boost::asio::default_completion_token_t<executor_type>>
auto async_receive_push(
Adapter adapter = adapt(),
CompletionToken token = CompletionToken{})
{
auto f =
[adapter]
(resp3::node<boost::string_view> const& node, boost::system::error_code& ec) mutable
{
adapter(std::size_t(-1), node, ec);
};
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code, std::size_t)
>(detail::receive_push_op<connection, decltype(f)>{this, f}, token, resv_);
}
/** @brief Receives internal events.
*
* See enum \c events for the list of events.
*
* \param token The Asio completion token.
*
* The completion token must have the following signature
*
* @code
* void f(boost::system::error_code, event);
* @endcode
*/
template <class CompletionToken = boost::asio::default_completion_token_t<executor_type>>
auto async_receive_event(CompletionToken token = CompletionToken{})
{
return event_channel_.async_receive(token);
}
/// @}
private:
using clock_type = std::chrono::steady_clock;
using clock_traits_type = boost::asio::wait_traits<clock_type>;
using timer_type = boost::asio::basic_waitable_timer<clock_type, clock_traits_type, executor_type>;
using resolver_type = boost::asio::ip::basic_resolver<boost::asio::ip::tcp, executor_type>;
using push_channel_type = boost::asio::experimental::channel<executor_type, void(boost::system::error_code, std::size_t)>;
using time_point_type = std::chrono::time_point<std::chrono::steady_clock>;
using event_channel_type = boost::asio::experimental::channel<executor_type, void(boost::system::error_code, event)>;
struct req_info {
req_info(executor_type ex) : timer{ex} {}
timer_type timer;
resp3::request const* req = nullptr;
std::size_t cmds = 0;
bool stop = false;
bool written = false;
};
using reqs_type = std::deque<std::shared_ptr<req_info>>;
template <class T, class U> friend struct detail::receive_push_op;
template <class T> friend struct detail::reader_op;
template <class T> friend struct detail::writer_op;
template <class T> friend struct detail::ping_op;
template <class T> friend struct detail::run_op;
template <class T> friend struct detail::run_one_op;
template <class T, class U> friend struct detail::exec_op;
template <class T, class U> friend struct detail::exec_read_op;
template <class T, class U> friend struct detail::runexec_op;
template <class T> friend struct detail::connect_with_timeout_op;
template <class T> friend struct detail::resolve_with_timeout_op;
template <class T> friend struct detail::check_idle_op;
template <class T> friend struct detail::start_op;
template <class T> friend struct detail::send_receive_op;
template <class CompletionToken>
auto async_run_one(CompletionToken token = CompletionToken{})
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::run_one_op<connection>{this}, token, resv_);
}
void cancel_push_requests()
{
auto point = std::stable_partition(std::begin(reqs_), std::end(reqs_), [](auto const& ptr) {
return !(ptr->written && ptr->req->size() == 0);
});
std::for_each(point, std::end(reqs_), [](auto const& ptr) {
ptr->timer.cancel();
});
reqs_.erase(point, std::end(reqs_));
}
void add_request_info(std::shared_ptr<req_info> const& info)
{
reqs_.push_back(info);
if (socket_ != nullptr && socket_->is_open() && cmds_ == 0 && write_buffer_.empty())
writer_timer_.cancel();
}
auto make_dynamic_buffer()
{ return boost::asio::dynamic_buffer(read_buffer_, cfg_.max_read_size); }
template <class CompletionToken>
auto async_resolve_with_timeout(CompletionToken&& token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::resolve_with_timeout_op<connection>{this},
token, resv_);
}
template <class CompletionToken>
auto async_connect_with_timeout(CompletionToken&& token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::connect_with_timeout_op<connection>{this}, token, resv_);
}
template <class CompletionToken>
auto reader(CompletionToken&& token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::reader_op<connection>{this}, token, resv_.get_executor());
}
template <class CompletionToken>
auto writer(CompletionToken&& token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::writer_op<connection>{this}, token, resv_.get_executor());
}
template <class CompletionToken>
auto
async_start(CompletionToken&& token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::start_op<connection>{this}, token, resv_);
}
template <class CompletionToken>
auto async_ping(CompletionToken&& token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::ping_op<connection>{this}, token, resv_);
}
template <class CompletionToken>
auto async_check_idle(CompletionToken&& token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::check_idle_op<connection>{this}, token, check_idle_timer_);
}
template <class Adapter, class CompletionToken>
auto async_exec_read(Adapter adapter, std::size_t cmds, CompletionToken token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code, std::size_t)
>(detail::exec_read_op<connection, Adapter>{this, adapter, cmds}, token, resv_);
}
void coalesce_requests()
{
// Coaleces all requests: Copies the request to the variables
// that won't be touched while async_write is suspended.
BOOST_ASSERT(write_buffer_.empty());
BOOST_ASSERT(!reqs_.empty());
auto const size = cfg_.coalesce_requests ? reqs_.size() : 1;
for (auto i = 0UL; i < size; ++i) {
write_buffer_ += reqs_.at(i)->req->payload();
cmds_ += reqs_.at(i)->req->size();
reqs_.at(i)->written = true;
}
}
// IO objects
resolver_type resv_;
std::shared_ptr<AsyncReadWriteStream> socket_;
timer_type ping_timer_;
timer_type check_idle_timer_;
timer_type writer_timer_;
timer_type read_timer_;
push_channel_type push_channel_;
event_channel_type event_channel_;
config cfg_;
std::string read_buffer_;
std::string write_buffer_;
std::size_t cmds_ = 0;
reqs_type reqs_;
// Last time we received data.
time_point_type last_data_;
// The result of async_resolve.
boost::asio::ip::tcp::resolver::results_type endpoints_;
resp3::request req_;
};
/** @brief Converts a connection event to a string.
* @relates connection
*/
template <class T>
char const* to_string(typename connection<T>::event e)
{
using event_type = typename connection<T>::event;
switch (e) {
case event_type::resolve: return "resolve";
case event_type::connect: return "connect";
case event_type::hello: return "hello";
case event_type::push: return "push";
case event_type::invalid: return "invalid";
default: BOOST_ASSERT_MSG(false, "to_string: unhandled event.");
}
}
/** @brief Writes a connection event to the stream.
* @relates connection
*/
template <class T>
std::ostream& operator<<(std::ostream& os, typename connection<T>::event e)
{
os << to_string(e);
return os;
}
} // aedis
#endif // AEDIS_CONNECTION_HPP

View File

@@ -20,9 +20,9 @@
#include <aedis/error.hpp>
#include <aedis/detail/net.hpp>
#include <aedis/resp3/type.hpp>
#include <aedis/resp3/detail/exec.hpp>
#include <aedis/resp3/detail/parser.hpp>
#include <aedis/resp3/read.hpp>
#include <aedis/resp3/exec.hpp>
#include <aedis/resp3/write.hpp>
#include <aedis/resp3/request.hpp>
@@ -37,18 +37,19 @@ namespace detail {
template <class Conn>
struct connect_with_timeout_op {
Conn* conn;
boost::asio::coroutine coro;
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, boost::system::error_code ec = {}
, boost::asio::ip::tcp::endpoint const& ep = {})
, boost::asio::ip::tcp::endpoint const& = {})
{
reenter (coro)
{
BOOST_ASSERT(conn->socket_ != nullptr);
conn->ping_timer_.expires_after(conn->cfg_.connect_timeout);
yield aedis::detail::async_connect(*conn->socket_, conn->ping_timer_, conn->endpoints_, std::move(self));
yield
aedis::detail::async_connect(*conn->socket_, conn->ping_timer_, conn->endpoints_, std::move(self));
self.complete(ec);
}
}
@@ -57,9 +58,7 @@ struct connect_with_timeout_op {
template <class Conn>
struct resolve_with_timeout_op {
Conn* conn;
boost::string_view host;
boost::string_view port;
boost::asio::coroutine coro;
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
@@ -69,7 +68,10 @@ struct resolve_with_timeout_op {
reenter (coro)
{
conn->ping_timer_.expires_after(conn->cfg_.resolve_timeout);
yield aedis::detail::async_resolve(conn->resv_, conn->ping_timer_, host, port, std::move(self));
yield
aedis::detail::async_resolve(
conn->resv_, conn->ping_timer_,
conn->cfg_.host, conn->cfg_.port, std::move(self));
conn->endpoints_ = res;
self.complete(ec);
}
@@ -77,11 +79,11 @@ struct resolve_with_timeout_op {
};
template <class Conn, class Adapter>
struct read_push_op {
Conn* conn;
struct receive_push_op {
Conn* conn = nullptr;
Adapter adapter;
std::size_t read_size;
boost::asio::coroutine coro;
std::size_t read_size = 0;
boost::asio::coroutine coro{};
template <class Self>
void
@@ -91,23 +93,26 @@ struct read_push_op {
{
reenter (coro)
{
yield conn->push_channel_.async_receive(std::move(self));
yield
conn->push_channel_.async_receive(std::move(self));
if (ec) {
self.complete(ec, 0);
return;
}
BOOST_ASSERT(conn->socket_ != nullptr);
yield resp3::async_read(*conn->socket_, conn->make_dynamic_buffer(), adapter, std::move(self));
yield
resp3::async_read(*conn->socket_, conn->make_dynamic_buffer(), adapter, std::move(self));
if (ec) {
conn->cancel_run();
conn->cancel(Conn::operation::run);
self.complete(ec, 0);
return;
}
read_size = n;
yield conn->push_channel_.async_send({}, 0, std::move(self));
yield
conn->push_channel_.async_send({}, 0, std::move(self));
self.complete(ec, read_size);
return;
}
@@ -118,9 +123,10 @@ template <class Conn, class Adapter>
struct exec_read_op {
Conn* conn;
Adapter adapter;
std::size_t cmds = 0;
std::size_t read_size = 0;
std::size_t index = 0;
boost::asio::coroutine coro;
boost::asio::coroutine coro{};
template <class Self>
void
@@ -132,7 +138,7 @@ struct exec_read_op {
{
// Loop reading the responses to this request.
BOOST_ASSERT(!conn->reqs_.empty());
while (conn->reqs_.front()->cmds != 0) {
while (cmds != 0) {
BOOST_ASSERT(conn->cmds_ != 0);
//-----------------------------------
@@ -141,24 +147,28 @@ struct exec_read_op {
// some data in the read bufer.
if (conn->read_buffer_.empty()) {
BOOST_ASSERT(conn->socket_ != nullptr);
yield boost::asio::async_read_until(*conn->socket_, conn->make_dynamic_buffer(), "\r\n", std::move(self));
yield
boost::asio::async_read_until(*conn->socket_, conn->make_dynamic_buffer(), "\r\n", std::move(self));
if (ec) {
conn->cancel_run();
conn->cancel(Conn::operation::run);
self.complete(ec, 0);
return;
}
}
// If the next request is a push we have to handle it to
// the read_push_op wait for it to be done and continue.
// the receive_push_op wait for it to be done and continue.
if (resp3::to_type(conn->read_buffer_.front()) == resp3::type::push) {
yield async_send_receive(conn->push_channel_, std::move(self));
yield
async_send_receive(conn->push_channel_, std::move(self));
if (ec) {
// Notice we don't call cancel_run() as that is the
// responsability of the read_push_op.
// responsability of the receive_push_op.
self.complete(ec, 0);
return;
}
continue;
}
//-----------------------------------
@@ -170,15 +180,15 @@ struct exec_read_op {
++index;
if (ec) {
conn->cancel_run();
conn->cancel(Conn::operation::run);
self.complete(ec, 0);
return;
}
read_size += n;
BOOST_ASSERT(conn->reqs_.front()->cmds != 0);
--conn->reqs_.front()->cmds;
BOOST_ASSERT(cmds != 0);
--cmds;
BOOST_ASSERT(conn->cmds_ != 0);
--conn->cmds_;
@@ -193,12 +203,12 @@ template <class Conn, class Adapter>
struct exec_op {
using req_info_type = typename Conn::req_info;
Conn* conn;
resp3::request const* req;
Adapter adapter;
std::shared_ptr<req_info_type> info;
Conn* conn = nullptr;
resp3::request const* req = nullptr;
Adapter adapter{};
std::shared_ptr<req_info_type> info = nullptr;
std::size_t read_size = 0;
boost::asio::coroutine coro;
boost::asio::coroutine coro{};
template <class Self>
void
@@ -211,11 +221,12 @@ struct exec_op {
info = std::allocate_shared<req_info_type>(boost::asio::get_associated_allocator(self), conn->resv_.get_executor());
info->timer.expires_at(std::chrono::steady_clock::time_point::max());
info->req = req;
info->cmds = req->commands();
info->cmds = req->size();
info->stop = false;
conn->add_request_info(info);
yield info->timer.async_wait(std::move(self));
yield
info->timer.async_wait(std::move(self));
BOOST_ASSERT(conn->socket_ != nullptr);
BOOST_ASSERT(!!ec);
if (info->stop) {
@@ -225,12 +236,16 @@ struct exec_op {
BOOST_ASSERT(conn->socket_->is_open());
if (req->commands() == 0) {
if (req->size() == 0) {
self.complete({}, 0);
return;
}
yield conn->async_exec_read(adapter, std::move(self));
BOOST_ASSERT(!conn->reqs_.empty());
BOOST_ASSERT(conn->reqs_.front() != nullptr);
BOOST_ASSERT(conn->cmds_ != 0);
yield
conn->async_exec_read(adapter, conn->reqs_.front()->cmds, std::move(self));
if (ec) {
self.complete(ec, 0);
return;
@@ -242,9 +257,9 @@ struct exec_op {
conn->reqs_.pop_front();
if (conn->cmds_ == 0) {
conn->read_timer_.cancel();
conn->read_timer_.cancel_one();
if (!conn->reqs_.empty())
conn->writer_timer_.cancel();
conn->writer_timer_.cancel_one();
} else {
BOOST_ASSERT(!conn->reqs_.empty());
conn->reqs_.front()->timer.cancel_one();
@@ -258,18 +273,19 @@ struct exec_op {
template <class Conn>
struct ping_op {
Conn* conn;
boost::asio::coroutine coro;
boost::asio::coroutine coro{};
template <class Self>
void
operator()( Self& self
, boost::system::error_code ec = {}
, std::size_t read_size = 0)
, std::size_t = 0)
{
reenter (coro) for (;;)
{
conn->ping_timer_.expires_after(conn->cfg_.ping_interval);
yield conn->ping_timer_.async_wait(std::move(self));
yield
conn->ping_timer_.async_wait(std::move(self));
BOOST_ASSERT(conn->socket_ != nullptr);
if (ec || !conn->socket_->is_open()) {
self.complete(ec);
@@ -278,8 +294,9 @@ struct ping_op {
conn->req_.clear();
conn->req_.push("PING");
conn->req_.set_internal();
yield conn->async_exec(conn->req_, aedis::adapt(), std::move(self));
conn->req_.close_on_run_completion = true;
yield
conn->async_exec(conn->req_, adapt(), std::move(self));
if (ec) {
// Notice we don't report error but let the idle check
// timeout. It is enough to finish the op.
@@ -293,7 +310,7 @@ struct ping_op {
template <class Conn>
struct check_idle_op {
Conn* conn;
boost::asio::coroutine coro;
boost::asio::coroutine coro{};
template <class Self>
void operator()(Self& self, boost::system::error_code ec = {})
@@ -301,7 +318,8 @@ struct check_idle_op {
reenter (coro) for (;;)
{
conn->check_idle_timer_.expires_after(2 * conn->cfg_.ping_interval);
yield conn->check_idle_timer_.async_wait(std::move(self));
yield
conn->check_idle_timer_.async_wait(std::move(self));
BOOST_ASSERT(conn->socket_ != nullptr);
if (ec || !conn->socket_->is_open()) {
// Notice this is not an error, it was requested from an
@@ -312,7 +330,7 @@ struct check_idle_op {
auto const now = std::chrono::steady_clock::now();
if (conn->last_data_ + (2 * conn->cfg_.ping_interval) < now) {
conn->cancel_run();
conn->cancel(Conn::operation::run);
self.complete(error::idle_timeout);
return;
}
@@ -325,7 +343,7 @@ struct check_idle_op {
template <class Conn>
struct start_op {
Conn* conn;
boost::asio::coroutine coro;
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
@@ -348,22 +366,10 @@ struct start_op {
std::move(self));
switch (order[0]) {
case 0:
{
self.complete(ec0);
} break;
case 1:
{
self.complete(ec1);
} break;
case 2:
{
self.complete(ec2);
} break;
case 3:
{
self.complete(ec3);
} break;
case 0: self.complete(ec0); break;
case 1: self.complete(ec1); break;
case 2: self.complete(ec2); break;
case 3: self.complete(ec3); break;
default: BOOST_ASSERT(false);
}
}
@@ -371,42 +377,135 @@ struct start_op {
};
template <class Conn>
struct run_op {
struct run_one_op {
Conn* conn;
boost::string_view host;
boost::string_view port;
boost::asio::coroutine coro;
boost::asio::coroutine coro{};
template <class Self>
void operator()(Self& self, boost::system::error_code ec = {})
void operator()(
Self& self,
boost::system::error_code ec = {},
std::size_t = 0)
{
reenter (coro)
{
yield conn->async_resolve_with_timeout(host, port, std::move(self));
yield
conn->async_resolve_with_timeout(std::move(self));
if (ec) {
conn->cancel(Conn::operation::run);
self.complete(ec);
return;
}
if (conn->cfg_.enable_events) {
yield
conn->event_channel_.async_send({}, Conn::event::resolve, std::move(self));
if (ec) {
self.complete(ec);
return;
}
}
conn->socket_ = std::make_shared<typename Conn::next_layer_type>(conn->resv_.get_executor());
yield conn->async_connect_with_timeout(std::move(self));
yield
conn->async_connect_with_timeout(std::move(self));
if (ec) {
conn->cancel(Conn::operation::run);
self.complete(ec);
return;
}
yield conn->async_start(std::move(self));
if (conn->cfg_.enable_events) {
yield
conn->event_channel_.async_send({}, Conn::event::connect, std::move(self));
if (ec) {
self.complete(ec);
return;
}
}
conn->req_.clear();
if (!std::empty(conn->cfg_.username) && !std::empty(conn->cfg_.password))
conn->req_.push("AUTH", conn->cfg_.username, conn->cfg_.password);
conn->req_.push("HELLO", "3");
conn->ping_timer_.expires_after(conn->cfg_.ping_interval);
yield
async_exec(
*conn->socket_,
conn->ping_timer_,
conn->req_,
adapter::adapt2(),
conn->make_dynamic_buffer(),
std::move(self)
);
if (ec) {
conn->cancel(Conn::operation::run);
self.complete(ec);
return;
}
if (conn->cfg_.enable_events) {
yield
conn->event_channel_.async_send({}, Conn::event::hello, std::move(self));
if (ec) {
self.complete(ec);
return;
}
}
conn->write_buffer_.clear();
conn->cmds_ = 0;
std::for_each(std::begin(conn->reqs_), std::end(conn->reqs_), [](auto const& ptr) {
return ptr->written = false;
});
yield
conn->async_start(std::move(self));
self.complete(ec);
}
}
};
template <class Conn>
struct run_op {
Conn* conn;
boost::asio::coroutine coro{};
template <class Self>
void operator()(
Self& self,
boost::system::error_code ec = {},
std::size_t = 0)
{
reenter (coro) for(;;)
{
yield
conn->async_run_one(std::move(self));
if (!conn->cfg_.enable_reconnect) {
self.complete(ec);
return;
}
// Consider communicating the return of async_run_one as an
// event here.
conn->ping_timer_.expires_after(conn->cfg_.reconnect_interval);
yield
conn->ping_timer_.async_wait(std::move(self));
}
}
};
template <class Conn>
struct writer_op {
Conn* conn;
typename Conn::reqs_type::iterator end;
boost::asio::coroutine coro;
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
@@ -419,8 +518,8 @@ struct writer_op {
{
while (!conn->reqs_.empty() && conn->cmds_ == 0 && conn->write_buffer_.empty()) {
conn->coalesce_requests();
end = conn->reqs_.end();
yield boost::asio::async_write(*conn->socket_, boost::asio::buffer(conn->write_buffer_), std::move(self));
yield
boost::asio::async_write(*conn->socket_, boost::asio::buffer(conn->write_buffer_), std::move(self));
if (ec) {
self.complete(ec);
return;
@@ -430,11 +529,12 @@ struct writer_op {
// order to to use it as a flag that informs there is no
// ongoing write.
conn->write_buffer_.clear();
conn->cancel_push_requests(end);
conn->cancel_push_requests();
}
if (conn->socket_->is_open()) {
yield conn->writer_timer_.async_wait(std::move(self));
yield
conn->writer_timer_.async_wait(std::move(self));
// The timer may be canceled either to stop the write op
// or to proceed to the next write, the difference between
// the two is that for the former the socket will be
@@ -455,7 +555,7 @@ struct writer_op {
template <class Conn>
struct reader_op {
Conn* conn;
boost::asio::coroutine coro;
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
@@ -467,9 +567,10 @@ struct reader_op {
reenter (coro) for (;;)
{
BOOST_ASSERT(conn->socket_->is_open());
yield boost::asio::async_read_until(*conn->socket_, conn->make_dynamic_buffer(), "\r\n", std::move(self));
yield
boost::asio::async_read_until(*conn->socket_, conn->make_dynamic_buffer(), "\r\n", std::move(self));
if (ec) {
conn->cancel_run();
conn->cancel(Conn::operation::run);
self.complete(ec);
return;
}
@@ -497,7 +598,8 @@ struct reader_op {
if (resp3::to_type(conn->read_buffer_.front()) == resp3::type::push
|| conn->reqs_.empty()
|| (!conn->reqs_.empty() && conn->reqs_.front()->cmds == 0)) {
yield async_send_receive(conn->push_channel_, std::move(self));
yield
async_send_receive(conn->push_channel_, std::move(self));
if (ec) {
self.complete(ec);
return;
@@ -506,8 +608,9 @@ struct reader_op {
BOOST_ASSERT(conn->cmds_ != 0);
BOOST_ASSERT(!conn->reqs_.empty());
BOOST_ASSERT(conn->reqs_.front()->cmds != 0);
conn->reqs_.front()->timer.cancel();
yield conn->read_timer_.async_wait(std::move(self));
conn->reqs_.front()->timer.cancel_one();
yield
conn->read_timer_.async_wait(std::move(self));
if (!conn->socket_->is_open()) {
self.complete({});
return;
@@ -521,11 +624,9 @@ struct reader_op {
template <class Conn, class Adapter>
struct runexec_op {
Conn* conn;
boost::string_view host;
boost::string_view port;
resp3::request const* req;
resp3::request const* req = nullptr;
Adapter adapter;
boost::asio::coroutine coro;
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
@@ -536,20 +637,20 @@ struct runexec_op {
{
reenter (coro)
{
req->close_on_run_completion = true;
yield
boost::asio::experimental::make_parallel_group(
[this](auto token) { return conn->async_run(host, port, token);},
[this](auto token) { return conn->async_run(token);},
[this](auto token) { return conn->async_exec(*req, adapter, token);}
).async_wait(
boost::asio::experimental::wait_for_one_error(),
std::move(self));
if (ec2) {
self.complete(ec2, n);
} else {
// If there was no error in the async_exec we complete
// with the async_run error, if any.
self.complete(ec1, n);
switch (order[0]) {
case 0: self.complete(ec1, n); break;
case 1: self.complete(ec2, n); break;
default: BOOST_ASSERT(false);
}
}
}

View File

@@ -18,6 +18,9 @@
namespace aedis {
namespace detail {
template <class Executor>
using conn_timer_t = boost::asio::basic_waitable_timer<std::chrono::steady_clock, boost::asio::wait_traits<std::chrono::steady_clock>, Executor>;
#include <boost/asio/yield.hpp>
template <
@@ -27,9 +30,9 @@ template <
>
struct connect_op {
boost::asio::basic_socket<Protocol, Executor>* socket;
boost::asio::steady_timer* timer;
conn_timer_t<Executor>* timer;
EndpointSequence* endpoints;
boost::asio::coroutine coro;
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
@@ -53,36 +56,29 @@ struct connect_op {
std::move(self));
switch (order[0]) {
case 0:
{
if (ec1) {
self.complete(ec1, ep);
return;
}
} break;
case 0: self.complete(ec1, ep); break;
case 1:
{
if (!ec2) {
if (ec2)
self.complete({}, ep);
else
self.complete(error::connect_timeout, ep);
return;
}
} break;
default: BOOST_ASSERT(false);
}
self.complete({}, ep);
}
}
};
template <class Resolver, class Timer>
struct resolve_op {
boost::asio::ip::tcp::resolver* resv;
boost::asio::steady_timer* timer;
Resolver* resv;
Timer* timer;
boost::string_view host;
boost::string_view port;
boost::asio::coroutine coro;
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
@@ -129,22 +125,24 @@ struct resolve_op {
template <class Channel>
struct send_receive_op {
Channel* channel;
boost::asio::coroutine coro;
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, boost::system::error_code ec = {}
, std::size_t n = 0)
, std::size_t = 0)
{
reenter (coro)
{
yield channel->async_send(boost::system::error_code{}, 0, std::move(self));
yield
channel->async_send(boost::system::error_code{}, 0, std::move(self));
if (ec) {
self.complete(ec, 0);
return;
}
yield channel->async_receive(std::move(self));
yield
channel->async_receive(std::move(self));
self.complete(ec, 0);
}
}
@@ -160,7 +158,7 @@ template <
>
auto async_connect(
boost::asio::basic_socket<Protocol, Executor>& socket,
boost::asio::steady_timer& timer,
conn_timer_t<Executor>& timer,
EndpointSequence ep,
CompletionToken&& token = boost::asio::default_completion_token_t<Executor>{})
{
@@ -172,20 +170,24 @@ auto async_connect(
}
template <
class Resolver,
class Timer,
class CompletionToken =
boost::asio::default_completion_token_t<boost::asio::ip::tcp::resolver::executor_type>
boost::asio::default_completion_token_t<typename Resolver::executor_type>
>
auto async_resolve(
boost::asio::ip::tcp::resolver& resv,
boost::asio::steady_timer& timer,
Resolver& resv,
Timer& timer,
boost::string_view host,
boost::string_view port,
CompletionToken&& token = CompletionToken{})
{
// TODO: Use static_assert to check Resolver::executor_type and
// Timer::executor_type are same.
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code, boost::asio::ip::tcp::resolver::results_type)
>(resolve_op{&resv, &timer, host, port}, token, resv, timer);
>(resolve_op<Resolver, Timer>{&resv, &timer, host, port}, token, resv, timer);
}
template <

View File

@@ -25,6 +25,9 @@ enum class error
/// Idle timeout.
idle_timeout,
/// Exec timeout.
exec_timeout,
/// Invalid RESP3 type.
invalid_data_type,
@@ -44,16 +47,16 @@ enum class error
empty_field,
/// Expects a simple RESP3 type but got an aggregate.
expects_simple_type,
expects_resp3_simple_type,
/// Expects aggregate type.
expects_aggregate_type,
/// Expects aggregate.
expects_resp3_aggregate,
/// Expects a map but got other aggregate.
expects_map_type,
expects_resp3_map,
/// Expects a set aggregate but got something else.
expects_set_type,
expects_resp3_set,
/// Nested response not supported.
nested_aggregate_unsupported,
@@ -70,7 +73,7 @@ enum class error
/// Not a double
not_a_double,
/// Got RESP3 null type.
/// Got RESP3 null.
null
};

View File

@@ -22,16 +22,17 @@ struct error_category_impl : boost::system::error_category {
case error::resolve_timeout: return "Resolve operation timeout.";
case error::connect_timeout: return "Connect operation timeout.";
case error::idle_timeout: return "Idle timeout.";
case error::exec_timeout: return "Exec timeout.";
case error::invalid_data_type: return "Invalid resp3 type.";
case error::not_a_number: return "Can't convert string to number.";
case error::unexpected_read_size: return "Unexpected read size.";
case error::exceeeds_max_nested_depth: return "Exceeds the maximum number of nested responses.";
case error::unexpected_bool_value: return "Unexpected bool value.";
case error::empty_field: return "Expected field value is empty.";
case error::expects_simple_type: return "Expects a simple RESP3 type.";
case error::expects_aggregate_type: return "Expects aggregate type.";
case error::expects_map_type: return "Expects map type.";
case error::expects_set_type: return "Expects set type.";
case error::expects_resp3_simple_type: return "Expects a resp3 simple type.";
case error::expects_resp3_aggregate: return "Expects resp3 aggregate.";
case error::expects_resp3_map: return "Expects resp3 map.";
case error::expects_resp3_set: return "Expects resp3 set.";
case error::nested_aggregate_unsupported: return "Nested aggregate unsupported.";
case error::simple_error: return "Got RESP3 simple-error.";
case error::blob_error: return "Got RESP3 blob-error.";

View File

@@ -32,40 +32,51 @@ template <
class DynamicBuffer
>
struct exec_op {
AsyncStream* socket;
request const* req;
AsyncStream* socket = nullptr;
request const* req = nullptr;
Adapter adapter;
DynamicBuffer dbuf;
boost::asio::coroutine coro;
DynamicBuffer dbuf{};
std::size_t n_cmds = 0;
std::size_t size = 0;
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, boost::system::error_code ec = {}
, std::size_t n = 0)
{
reenter (coro)
reenter (coro) for (;;)
{
yield
boost::asio::async_write(
*socket,
boost::asio::buffer(req->payload()),
std::move(self));
if (req) {
yield
boost::asio::async_write(
*socket,
boost::asio::buffer(req->payload()),
std::move(self));
if (ec || n_cmds == 0) {
self.complete(ec, n);
return;
}
req = nullptr;
}
yield resp3::async_read(*socket, dbuf, adapter, std::move(self));
if (ec) {
self.complete(ec, 0);
return;
}
yield resp3::async_read(*socket, dbuf, adapter, std::move(self));
self.complete(ec, n);
size += n;
if (--n_cmds == 0) {
self.complete(ec, size);
return;
}
}
}
};
#include <boost/asio/unyield.hpp>
} // detail
template <
class AsyncStream,
class Adapter,
@@ -83,25 +94,22 @@ auto async_exec(
< CompletionToken
, void(boost::system::error_code, std::size_t)
>(detail::exec_op<AsyncStream, Adapter, DynamicBuffer>
{&socket, &req, adapter, dbuf}, token, socket);
{&socket, &req, adapter, dbuf, req.size()}, token, socket);
}
namespace detail {
#include <boost/asio/yield.hpp>
template <
class AsyncStream,
class Timer,
class Adapter,
class DynamicBuffer
>
struct exec_with_timeout_op {
AsyncStream* socket;
boost::asio::steady_timer* timer;
request const* req;
AsyncStream* socket = nullptr;
Timer* timer = nullptr;
request const* req = nullptr;
Adapter adapter;
DynamicBuffer dbuf;
boost::asio::coroutine coro;
DynamicBuffer dbuf{};
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
@@ -114,50 +122,41 @@ struct exec_with_timeout_op {
{
yield
boost::asio::experimental::make_parallel_group(
[this](auto token) { return resp3::async_exec(*socket, *req, adapter, dbuf, token);},
[this](auto token) { return detail::async_exec(*socket, *req, adapter, dbuf, token);},
[this](auto token) { return timer->async_wait(token);}
).async_wait(
boost::asio::experimental::wait_for_one(),
std::move(self));
switch (order[0]) {
case 0:
{
if (ec1) {
self.complete(ec1, 0);
return;
}
} break;
case 0: self.complete(ec1, n); break;
case 1:
{
if (!ec2) {
self.complete(aedis::error::idle_timeout, 0);
return;
}
if (ec2)
self.complete({}, n);
else
self.complete(aedis::error::exec_timeout, 0);
} break;
default: BOOST_ASSERT(false);
}
self.complete({}, n);
}
}
};
#include <boost/asio/unyield.hpp>
} // detail
template <
class AsyncStream,
class Timer,
class Adapter,
class DynamicBuffer,
class CompletionToken = boost::asio::default_completion_token_t<typename AsyncStream::executor_type>
>
auto async_exec(
AsyncStream& socket,
boost::asio::steady_timer& timer,
Timer& timer,
request const& req,
Adapter adapter,
DynamicBuffer dbuf,
@@ -166,10 +165,11 @@ auto async_exec(
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code, std::size_t)
>(detail::exec_with_timeout_op<AsyncStream, Adapter, DynamicBuffer>
>(detail::exec_with_timeout_op<AsyncStream, Timer, Adapter, DynamicBuffer>
{&socket, &timer, &req, adapter, dbuf}, token, socket, timer);
}
} // detail
} // resp3
} // aedis

View File

@@ -37,7 +37,7 @@ private:
parser<ResponseAdapter> parser_;
std::size_t consumed_;
std::size_t buffer_size_;
boost::asio::coroutine coro_;
boost::asio::coroutine coro_{};
public:
parse_op(AsyncReadStream& stream, DynamicBuffer buf, ResponseAdapter adapter)

View File

@@ -4,9 +4,11 @@
* accompanying file LICENSE.txt)
*/
#include <aedis/command.hpp>
#include <aedis/resp3/request.hpp>
namespace aedis {
namespace resp3 {
namespace detail {
bool has_push_response(boost::string_view cmd)
{
@@ -16,4 +18,6 @@ bool has_push_response(boost::string_view cmd)
return false;
}
} // detail
} // resp3
} // aedis

View File

@@ -12,27 +12,25 @@ namespace resp3 {
char const* to_string(type t)
{
static char const* table[] =
{ "array"
, "push"
, "set"
, "map"
, "attribute"
, "simple_string"
, "simple_error"
, "number"
, "doublean"
, "boolean"
, "big_number"
, "null"
, "blob_error"
, "verbatim_string"
, "blob_string"
, "streamed_string_part"
, "invalid"
};
return table[static_cast<int>(t)];
switch (t) {
case type::array: return "array";
case type::push: return "push";
case type::set: return "set";
case type::map: return "map";
case type::attribute: return "attribute";
case type::simple_string: return "simple_string";
case type::simple_error: return "simple_error";
case type::number: return "number";
case type::doublean: return "doublean";
case type::boolean: return "boolean";
case type::big_number: return "big_number";
case type::null: return "null";
case type::blob_error: return "blob_error";
case type::verbatim_string: return "verbatim_string";
case type::blob_string: return "blob_string";
case type::streamed_string_part: return "streamed_string_part";
default: return "invalid";
}
}
std::ostream& operator<<(std::ostream& os, type t)

View File

@@ -39,10 +39,10 @@ struct node {
String value;
};
/** \brief Converts the node to a string.
* \ingroup any
/** @brief Converts the node to a string.
* @relates node
*
* \param in The node object.
* @param in The node object.
*/
template <class String>
std::string to_string(node<String> const& in)
@@ -60,8 +60,11 @@ std::string to_string(node<String> const& in)
return out;
}
/** \brief Compares a node for equality.
* \ingroup any
/** @brief Compares a node for equality.
* @relates node
*
* @param a Left hand side node object.
* @param b Right hand side node object.
*/
template <class String>
bool operator==(node<String> const& a, node<String> const& b)
@@ -72,15 +75,18 @@ bool operator==(node<String> const& a, node<String> const& b)
&& a.value == b.value;
};
/** \brief Writes the node string to the stream.
* \ingroup any
/** @brief Writes the node string to the stream.
* @relates node
*
* NOTE: Binary data is not converted to text.
* @param os Output stream.
* @param node Node object.
*
* \remark Binary data is not converted to text.
*/
template <class String>
std::ostream& operator<<(std::ostream& os, node<String> const& o)
std::ostream& operator<<(std::ostream& os, node<String> const& node)
{
os << to_string(o);
os << to_string(node);
return os;
}

View File

@@ -7,8 +7,13 @@
#ifndef AEDIS_RESP3_REQUEST_HPP
#define AEDIS_RESP3_REQUEST_HPP
#include <string>
#include <tuple>
#include <boost/hana.hpp>
#include <aedis/resp3/compose.hpp>
#include <boost/utility/string_view.hpp>
#include <aedis/resp3/type.hpp>
// NOTE: Consider detecting tuples in the type in the parameter pack
// to calculate the header size correctly.
@@ -19,14 +24,137 @@
namespace aedis {
namespace resp3 {
/** @brief Creates Redis requests from user data.
constexpr char separator[] = "\r\n";
/** @brief Adds a bulk to the request.
* @relates request
*
* This function is useful in serialization of your own data
* structures in a request. For example
*
* @code
* void to_bulk(std::string& to, mystruct const& obj)
* {
* auto const str = // Convert obj to a string.
* resp3::to_bulk(to, str);
* }
* @endcode
*
* @param to Storage on which data will be copied into.
* @param data Data that will be serialized and stored in @c to.
*
* See more in \ref requests-serialization.
*/
template <class Request>
void to_bulk(Request& to, boost::string_view data)
{
auto const str = std::to_string(data.size());
to += to_code(type::blob_string);
to.append(std::cbegin(str), std::cend(str));
to += separator;
to.append(std::cbegin(data), std::cend(data));
to += separator;
}
template <class Request, class T, typename = typename std::enable_if<std::is_integral<T>::value>::type>
void to_bulk(Request& to, T n)
{
auto const s = std::to_string(n);
to_bulk(to, boost::string_view{s});
}
namespace detail {
bool has_push_response(boost::string_view cmd);
template <class T>
struct add_bulk_impl {
template <class Request>
static void add(Request& to, T const& from)
{
using namespace aedis::resp3;
to_bulk(to, from);
}
};
template <class U, class V>
struct add_bulk_impl<std::pair<U, V>> {
template <class Request>
static void add(Request& to, std::pair<U, V> const& from)
{
using namespace aedis::resp3;
to_bulk(to, from.first);
to_bulk(to, from.second);
}
};
template <class ...Ts>
struct add_bulk_impl<boost::hana::tuple<Ts...>> {
template <class Request>
static void add(Request& to, boost::hana::tuple<Ts...> const& from)
{
using boost::hana::for_each;
// Fold expressions is C++17 so we use hana.
//(detail::add_bulk(*request_, args), ...);
for_each(from, [&](auto const& e) {
using namespace aedis::resp3;
to_bulk(to, e);
});
}
};
template <class Request>
void add_header(Request& to, type t, std::size_t size)
{
auto const str = std::to_string(size);
to += to_code(t);
to.append(std::cbegin(str), std::cend(str));
to += separator;
}
template <class Request, class T>
void add_bulk(Request& to, T const& data)
{
detail::add_bulk_impl<T>::add(to, data);
}
template <class>
struct bulk_counter;
template <class>
struct bulk_counter {
static constexpr auto size = 1U;
};
template <class T, class U>
struct bulk_counter<std::pair<T, U>> {
static constexpr auto size = 2U;
};
template <class Request>
void add_blob(Request& to, boost::string_view blob)
{
to.append(std::cbegin(blob), std::cend(blob));
to += separator;
}
template <class Request>
void add_separator(Request& to)
{
to += separator;
}
} // detail
/** @brief Creates Redis requests.
* \ingroup any
*
* A request is composed of one or more Redis commands and is
* referred to in the redis documentation as a pipeline, see
* https://redis.io/topics/pipelining.
*
* Example
* https://redis.io/topics/pipelining. For example
*
* @code
* request r;
@@ -44,9 +172,9 @@ namespace resp3 {
class request {
public:
//// Returns the number of commands contained in this request.
std::size_t commands() const noexcept { return commands_;};
std::size_t size() const noexcept { return commands_;};
/// Returns the request payload.
// Returns the request payload.
auto const& payload() const noexcept { return payload_;}
/// Clears the request preserving allocated memory.
@@ -78,14 +206,12 @@ public:
using boost::hana::make_tuple;
using resp3::type;
auto const before = payload_.size();
auto constexpr pack_size = sizeof...(Ts);
resp3::add_header(payload_, type::array, 1 + pack_size);
resp3::add_bulk(payload_, cmd);
resp3::add_bulk(payload_, make_tuple(args...));
detail::add_header(payload_, type::array, 1 + pack_size);
detail::add_bulk(payload_, cmd);
detail::add_bulk(payload_, make_tuple(args...));
auto const after = payload_.size();
if (!has_push_response(cmd))
if (!detail::has_push_response(cmd))
++commands_;
}
@@ -119,19 +245,16 @@ public:
if (begin == end)
return;
auto const before = payload_.size();
auto constexpr size = resp3::bulk_counter<value_type>::size;
auto constexpr size = detail::bulk_counter<value_type>::size;
auto const distance = std::distance(begin, end);
resp3::add_header(payload_, type::array, 2 + size * distance);
resp3::add_bulk(payload_, cmd);
resp3::add_bulk(payload_, key);
detail::add_header(payload_, type::array, 2 + size * distance);
detail::add_bulk(payload_, cmd);
detail::add_bulk(payload_, key);
for (; begin != end; ++begin)
resp3::add_bulk(payload_, *begin);
detail::add_bulk(payload_, *begin);
auto const after = payload_.size();
if (!has_push_response(cmd))
if (!detail::has_push_response(cmd))
++commands_;
}
@@ -161,17 +284,15 @@ public:
if (begin == end)
return;
auto const before = payload_.size();
auto constexpr size = resp3::bulk_counter<value_type>::size;
auto constexpr size = detail::bulk_counter<value_type>::size;
auto const distance = std::distance(begin, end);
resp3::add_header(payload_, type::array, 1 + size * distance);
resp3::add_bulk(payload_, cmd);
detail::add_header(payload_, type::array, 1 + size * distance);
detail::add_bulk(payload_, cmd);
for (; begin != end; ++begin)
resp3::add_bulk(payload_, *begin);
detail::add_bulk(payload_, *begin);
auto const after = payload_.size();
if (!has_push_response(cmd))
if (!detail::has_push_response(cmd))
++commands_;
}
@@ -206,13 +327,11 @@ public:
push_range2(cmd, begin(range), end(range));
}
void set_internal() noexcept { is_internal_ = true;}
bool is_internal() const noexcept { return is_internal_;}
mutable bool close_on_run_completion = false;
private:
std::string payload_;
std::size_t commands_ = 0;
bool is_internal_ = true;
};
} // resp3

View File

@@ -5,6 +5,6 @@
*/
#include <aedis/impl/error.ipp>
#include <aedis/impl/command.ipp>
#include <aedis/resp3/impl/request.ipp>
#include <aedis/resp3/impl/type.ipp>
#include <aedis/resp3/detail/impl/parser.ipp>

245
include/aedis/sync.hpp Normal file
View File

@@ -0,0 +1,245 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_SYNC_HPP
#define AEDIS_SYNC_HPP
#include <condition_variable>
#include <aedis/resp3/request.hpp>
namespace aedis {
/** @brief A high level synchronous connection to Redis.
* @ingroup any
*
* This class keeps a healthy and thread safe connection to the Redis
* instance where commands can be sent at any time. For more details,
* please see the documentation of each individual function.
*
*/
template <class Connection>
class sync {
public:
using event = typename Connection::event;
using config = typename Connection::config;
/** @brief Constructor
*
* @param ex Executor
* @param cfg Config options.
*/
template <class Executor>
sync(Executor ex, config cfg = config{}) : conn_{ex, cfg} { }
/** @brief Executes a request synchronously.
*
* The functions calls `connection::async_exec` and waits
* for its completion.
*
* @param req The request.
* @param adapter The response adapter.
* @param ec Error code in case of error.
* @returns The number of bytes of the response.
*/
template <class ResponseAdapter>
std::size_t
exec(resp3::request const& req, ResponseAdapter adapter, boost::system::error_code& ec)
{
sync_helper sh;
std::size_t res = 0;
auto f = [this, &ec, &res, &sh, &req, adapter]()
{
conn_.async_exec(req, adapter, [&sh, &res, &ec](auto const& ecp, std::size_t n) {
std::unique_lock ul(sh.mutex);
ec = ecp;
res = n;
sh.ready = true;
ul.unlock();
sh.cv.notify_one();
});
};
boost::asio::dispatch(boost::asio::bind_executor(conn_.get_executor(), f));
std::unique_lock lk(sh.mutex);
sh.cv.wait(lk, [&sh]{return sh.ready;});
return res;
}
/** @brief Executes a command synchronously
*
* The functions calls `connection::async_exec` and waits for its
* completion.
*
* @param req The request.
* @param adapter The response adapter.
* @throws std::system_error in case of error.
* @returns The number of bytes of the response.
*/
template <class ResponseAdapter = detail::response_traits<void>::adapter_type>
std::size_t exec(resp3::request const& req, ResponseAdapter adapter = aedis::adapt())
{
boost::system::error_code ec;
auto const res = exec(req, adapter, ec);
if (ec)
throw std::system_error(ec);
return res;
}
/** @brief Receives server pushes synchronusly.
*
* The functions calls `connection::async_receive_push` and
* waits for its completion.
*
* @param adapter The response adapter.
* @param ec Error code in case of error.
* @returns The number of bytes received.
*/
template <class ResponseAdapter>
auto receive_push(ResponseAdapter adapter, boost::system::error_code& ec)
{
sync_helper sh;
std::size_t res = 0;
auto f = [this, &ec, &res, &sh, adapter]()
{
conn_.async_receive_push(adapter, [&ec, &res, &sh](auto const& e, std::size_t n) {
std::unique_lock ul(sh.mutex);
ec = e;
res = n;
sh.ready = true;
ul.unlock();
sh.cv.notify_one();
});
};
boost::asio::dispatch(boost::asio::bind_executor(conn_.get_executor(), f));
std::unique_lock lk(sh.mutex);
sh.cv.wait(lk, [&sh]{return sh.ready;});
return res;
}
/** @brief Receives server pushes synchronusly.
*
* The functions calls `connection::async_receive_push` and
* waits for its completion.
*
* @param adapter The response adapter.
* @throws std::system_error in case of error.
* @returns The number of bytes received.
*/
template <class ResponseAdapter = aedis::detail::response_traits<void>::adapter_type>
auto receive_push(ResponseAdapter adapter = aedis::adapt())
{
boost::system::error_code ec;
auto const res = receive_push(adapter, ec);
if (ec)
throw std::system_error(ec);
return res;
}
/** @brief Receives events synchronously.
*
* The functions calls `connection::async_receive_event` and
* waits for its completion.
*
* @param ec Error code in case of error.
* @returns The event received.
*/
auto receive_event(boost::system::error_code& ec)
{
sync_helper sh;
auto res = event::invalid;
auto f = [this, &ec, &res, &sh]()
{
conn_.async_receive_event([&ec, &res, &sh](auto const& ecp, event ev) {
std::unique_lock ul(sh.mutex);
ec = ecp;
res = ev;
sh.ready = true;
ul.unlock();
sh.cv.notify_one();
});
};
boost::asio::dispatch(boost::asio::bind_executor(conn_.get_executor(), f));
std::unique_lock lk(sh.mutex);
sh.cv.wait(lk, [&sh]{return sh.ready;});
return res;
}
/** @brief Receives events synchronously
*
* The functions calls `connection::async_receive_event` and
* waits for its completion.
*
* @throws std::system_error in case of error.
* @returns The event received.
*/
auto receive_event()
{
boost::system::error_code ec;
auto const res = receive_event(ec);
if (ec)
throw std::system_error(ec);
return res;
}
/** @brief Calls \c async_run from the underlying connection.
*
* The functions calls `connection::async_run` and waits for its
* completion.
*
* @param ec Error code.
*/
void run(boost::system::error_code& ec)
{
sync_helper sh;
auto f = [this, &ec, &sh]()
{
conn_.async_run([&ec, &sh](auto const& e) {
std::unique_lock ul(sh.mutex);
ec = e;
sh.ready = true;
ul.unlock();
sh.cv.notify_one();
});
};
boost::asio::dispatch(boost::asio::bind_executor(conn_.get_executor(), f));
std::unique_lock lk(sh.mutex);
sh.cv.wait(lk, [&sh]{return sh.ready;});
}
/** @brief Calls \c async_run from the underlying connection.
*
* The functions calls `connection::async_run` and waits for its
* completion.
*
* @throws std::system_error.
*/
void run()
{
boost::system::error_code ec;
run(ec);
if (ec)
throw std::system_error(ec);
}
private:
struct sync_helper {
std::mutex mutex;
std::condition_variable cv;
bool ready = false;
};
Connection conn_;
};
} // aedis
#endif // AEDIS_SYNC_HPP

View File

@@ -1,62 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <iostream>
#include <stdlib.h>
template <class T>
void expect_eq(T const& a, T const& b, std::string const& msg = "")
{
if (a == b) {
if (!msg.empty())
std::cout << "Success: " << msg << std::endl;
} else {
std::cout << "Error: " << msg << std::endl;
exit(EXIT_FAILURE);
}
}
template <class T>
void expect_neq(T const& a, T const& b, std::string const& msg = "")
{
if (a != b) {
if (!msg.empty())
std::cout << "Success: " << msg << std::endl;
} else {
std::cout << "Error: " << msg << std::endl;
exit(EXIT_FAILURE);
}
}
template <class T>
void expect_error(boost::system::error_code a, T expected = {}, std::string const& msg = "")
{
if (a == expected) {
if (a)
std::cout << "Success: " << a.message() << " (" << a.category().name() << ") " << msg << std::endl;
} else {
std::cout << "Error: " << a.message() << " (" << a.category().name() << ") " << msg << std::endl;
exit(EXIT_FAILURE);
}
}
inline
void expect_no_error(boost::system::error_code ec, std::string const& msg)
{
expect_error(ec, boost::system::error_code{}, msg);
}
template <class T>
void check_empty(T const& t)
{
if (t.empty()) {
//std::cout << "Success: " << std::endl;
} else {
std::cout << "Error: Not empty" << std::endl;
exit(EXIT_FAILURE);
}
}

491
tests/connection.cpp Normal file
View File

@@ -0,0 +1,491 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
// TODO: Avoid usage of co_await to improve tests is compilers that
// don't support it.
// TODO: Add reconnect test that kills the server and waits some
// seconds.
#include <iostream>
#include <boost/asio.hpp>
#include <boost/system/errc.hpp>
#include <boost/asio/experimental/as_tuple.hpp>
#define BOOST_TEST_MODULE low level
#include <boost/test/included/unit_test.hpp>
#include <aedis.hpp>
#include <aedis/src.hpp>
namespace net = boost::asio;
using aedis::resp3::request;
using aedis::adapt;
using connection = aedis::connection<>;
using error_code = boost::system::error_code;
using net::experimental::as_tuple;
bool is_host_not_found(boost::system::error_code ec)
{
if (ec == net::error::netdb_errors::host_not_found) return true;
if (ec == net::error::netdb_errors::host_not_found_try_again) return true;
return false;
}
//----------------------------------------------------------------
// Tests whether resolve fails with the correct error.
BOOST_AUTO_TEST_CASE(test_resolve)
{
connection::config cfg;
cfg.host = "Atibaia";
cfg.port = "6379";
cfg.resolve_timeout = std::chrono::seconds{100};
net::io_context ioc;
connection db{ioc, cfg};
db.async_run([](auto ec) {
BOOST_TEST(is_host_not_found(ec));
});
ioc.run();
}
//----------------------------------------------------------------
BOOST_AUTO_TEST_CASE(test_connect)
{
connection::config cfg;
cfg.host = "127.0.0.1";
cfg.port = "1";
cfg.connect_timeout = std::chrono::seconds{100};
net::io_context ioc;
connection db{ioc, cfg};
db.async_run([](auto ec) {
BOOST_CHECK_EQUAL(ec, net::error::basic_errors::connection_refused);
});
ioc.run();
}
//----------------------------------------------------------------
// Test if quit causes async_run to exit.
void test_quit1(connection::config const& cfg)
{
net::io_context ioc;
auto db = std::make_shared<connection>(ioc, cfg);
request req;
req.push("QUIT");
db->async_exec(req, adapt(), [](auto ec, auto){
BOOST_TEST(!ec);
});
db->async_run([](auto ec){
BOOST_CHECK_EQUAL(ec, net::error::misc_errors::eof);
});
ioc.run();
}
void test_quit2(connection::config const& cfg)
{
std::cout << "test_quit2" << std::endl;
request req;
req.push("QUIT");
net::io_context ioc;
auto db = std::make_shared<connection>(ioc, cfg);
db->async_run(req, adapt(), [](auto ec, auto){
BOOST_TEST(!ec);
});
ioc.run();
}
BOOST_AUTO_TEST_CASE(test_quit)
{
connection::config cfg;
cfg.coalesce_requests = true;
test_quit1(cfg);
cfg.coalesce_requests = false;
test_quit1(cfg);
cfg.coalesce_requests = true;
test_quit2(cfg);
cfg.coalesce_requests = false;
test_quit2(cfg);
}
// Checks whether we get idle timeout when no push reader is set.
void test_missing_push_reader1(connection::config const& cfg)
{
std::cout << "test_missing_push_reader1" << std::endl;
net::io_context ioc;
auto db = std::make_shared<connection>(ioc, cfg);
request req;
req.push("SUBSCRIBE", "channel");
db->async_run(req, adapt(), [](auto ec, auto){
BOOST_TEST(!ec);
});
ioc.run();
}
void test_missing_push_reader2(connection::config const& cfg)
{
std::cout << "test_missing_push_reader2" << std::endl;
net::io_context ioc;
auto db = std::make_shared<connection>(ioc, cfg);
request req; // Wrong command syntax.
req.push("SUBSCRIBE");
db->async_run(req, adapt(), [](auto ec, auto){
BOOST_TEST(!ec);
});
ioc.run();
}
void test_missing_push_reader3(connection::config const& cfg)
{
std::cout << "test_missing_push_reader3" << std::endl;
net::io_context ioc;
auto db = std::make_shared<connection>(ioc, cfg);
request req; // Wrong command synthax.
req.push("PING", "Message");
req.push("SUBSCRIBE");
db->async_run(req, adapt(), [](auto ec, auto){
BOOST_TEST(!ec);
});
ioc.run();
}
BOOST_AUTO_TEST_CASE(test_idle)
{
std::chrono::milliseconds ms{5000};
{
std::cout << "test_idle" << std::endl;
connection::config cfg;
cfg.resolve_timeout = std::chrono::seconds{1};
cfg.connect_timeout = std::chrono::seconds{1};
cfg.ping_interval = std::chrono::seconds{1};
net::io_context ioc;
auto db = std::make_shared<connection>(ioc, cfg);
request req;
req.push("CLIENT", "PAUSE", ms.count());
db->async_exec(req, adapt(), [](auto ec, auto){
BOOST_TEST(!ec);
});
db->async_run([](auto ec){
BOOST_CHECK_EQUAL(ec, aedis::error::idle_timeout);
});
ioc.run();
}
//----------------------------------------------------------------
// Since we have paused the server above, we have to wait until the
// server is responsive again, so as not to cause other tests to
// fail.
{
net::io_context ioc;
auto db = std::make_shared<connection>(ioc);
db->get_config().ping_interval = 2* ms;
db->get_config().resolve_timeout = 2 * ms;
db->get_config().connect_timeout = 2 * ms;
db->get_config().ping_interval = 2 * ms;
request req;
req.push("QUIT");
db->async_run(req, adapt(), [](auto ec, auto){
BOOST_TEST(!ec);
});
ioc.run();
}
}
#ifdef BOOST_ASIO_HAS_CO_AWAIT
net::awaitable<void> push_consumer1(std::shared_ptr<connection> db, bool& push_received)
{
{
auto [ec, ev] = co_await db->async_receive_push(adapt(), as_tuple(net::use_awaitable));
BOOST_TEST(!ec);
}
{
auto [ec, ev] = co_await db->async_receive_push(adapt(), as_tuple(net::use_awaitable));
BOOST_CHECK_EQUAL(ec, boost::asio::experimental::channel_errc::channel_cancelled);
}
push_received = true;
}
net::awaitable<void> event_consumer1(std::shared_ptr<connection> db, bool& event_received)
{
{
auto [ec, ev] = co_await db->async_receive_event(as_tuple(net::use_awaitable));
auto const r = ev == connection::event::resolve;
BOOST_TEST(r);
BOOST_TEST(!ec);
}
{
auto [ec, ev] = co_await db->async_receive_event(as_tuple(net::use_awaitable));
auto const r = ev == connection::event::connect;
BOOST_TEST(r);
BOOST_TEST(!ec);
}
{
auto [ec, ev] = co_await db->async_receive_event(as_tuple(net::use_awaitable));
auto const r = ev == connection::event::hello;
BOOST_TEST(r);
BOOST_TEST(!ec);
}
{
auto [ec, ev] = co_await db->async_receive_event(as_tuple(net::use_awaitable));
BOOST_CHECK_EQUAL(ec, boost::asio::experimental::channel_errc::channel_cancelled);
}
event_received = true;
}
void test_push_is_received1(connection::config const& cfg)
{
std::cout << "test_push_is_received1" << std::endl;
net::io_context ioc;
auto db = std::make_shared<connection>(ioc, cfg);
db->get_config().enable_events = true;
request req;
req.push("SUBSCRIBE", "channel");
req.push("QUIT");
db->async_run(req, adapt(), [db](auto ec, auto){
BOOST_TEST(!ec);
db->cancel(connection::operation::receive_event);
db->cancel(connection::operation::receive_push);
});
bool push_received = false;
net::co_spawn(
ioc.get_executor(),
push_consumer1(db, push_received),
net::detached);
bool event_received = false;
net::co_spawn(
ioc.get_executor(),
event_consumer1(db, event_received),
net::detached);
ioc.run();
BOOST_TEST(push_received);
BOOST_TEST(event_received);
}
void test_push_is_received2(connection::config const& cfg)
{
request req1;
req1.push("PING", "Message1");
request req2;
req2.push("SUBSCRIBE", "channel");
request req3;
req3.push("PING", "Message2");
req3.push("QUIT");
net::io_context ioc;
auto db = std::make_shared<connection>(ioc, cfg);
db->get_config().enable_events = true;
auto handler =[](auto ec, auto...)
{
BOOST_TEST(!ec);
};
db->async_exec(req1, adapt(), handler);
db->async_exec(req2, adapt(), handler);
db->async_exec(req3, adapt(), handler);
db->async_run([db](auto ec, auto...) {
BOOST_CHECK_EQUAL(ec, net::error::misc_errors::eof);
db->cancel(connection::operation::receive_event);
db->cancel(connection::operation::receive_push);
});
bool push_received = false;
net::co_spawn(
ioc.get_executor(),
push_consumer1(db, push_received),
net::detached);
bool event_received = false;
net::co_spawn(
ioc.get_executor(),
event_consumer1(db, event_received),
net::detached);
ioc.run();
BOOST_TEST(push_received);
BOOST_TEST(event_received);
}
net::awaitable<void> test_reconnect_impl(std::shared_ptr<connection> db)
{
request req;
req.push("QUIT");
for (auto i = 0;;) {
auto ev = co_await db->async_receive_event(net::use_awaitable);
auto const r1 = ev == connection::event::resolve;
BOOST_TEST(r1);
ev = co_await db->async_receive_event(net::use_awaitable);
auto const r2 = ev == connection::event::connect;
BOOST_TEST(r2);
ev = co_await db->async_receive_event(net::use_awaitable);
auto const r3 = ev == connection::event::hello;
BOOST_TEST(r3);
co_await db->async_exec(req, adapt(), net::use_awaitable);
// Test 5 reconnetions and returns.
++i;
if (i == 5) {
db->get_config().enable_reconnect = false;
co_return;
}
}
co_return;
}
// Test whether the client works after a reconnect.
void test_reconnect()
{
std::cout << "Start: test_reconnect" << std::endl;
net::io_context ioc;
auto db = std::make_shared<connection>(ioc.get_executor());
db->get_config().enable_events = true;
db->get_config().enable_reconnect = true;
db->get_config().reconnect_interval = std::chrono::milliseconds{100};
net::co_spawn(ioc, test_reconnect_impl(db), net::detached);
db->async_run([](auto ec) {
BOOST_CHECK_EQUAL(ec, net::error::misc_errors::eof);
});
ioc.run();
std::cout << "End: test_reconnect()" << std::endl;
}
net::awaitable<void>
push_consumer3(std::shared_ptr<connection> db)
{
for (;;)
co_await db->async_receive_push(adapt(), net::use_awaitable);
}
// Test many subscribe requests.
void test_push_many_subscribes(connection::config const& cfg)
{
std::cout << "test_push_many_subscribes" << std::endl;
request req0;
req0.push("HELLO", 3);
request req1;
req1.push("PING", "Message1");
request req2;
req2.push("SUBSCRIBE", "channel");
request req3;
req3.push("QUIT");
auto handler =[](auto ec, auto...)
{
BOOST_TEST(!ec);
};
net::io_context ioc;
auto db = std::make_shared<connection>(ioc, cfg);
db->async_exec(req0, adapt(), handler);
db->async_exec(req1, adapt(), handler);
db->async_exec(req2, adapt(), handler);
db->async_exec(req2, adapt(), handler);
db->async_exec(req1, adapt(), handler);
db->async_exec(req2, adapt(), handler);
db->async_exec(req1, adapt(), handler);
db->async_exec(req2, adapt(), handler);
db->async_exec(req2, adapt(), handler);
db->async_exec(req1, adapt(), handler);
db->async_exec(req2, adapt(), handler);
db->async_exec(req3, adapt(), handler);
db->async_run([db](auto ec, auto...) {
BOOST_CHECK_EQUAL(ec, net::error::misc_errors::eof);
db->cancel(connection::operation::receive_push);
});
net::co_spawn(ioc.get_executor(), push_consumer3(db), net::detached);
ioc.run();
}
#endif
BOOST_AUTO_TEST_CASE(test_push)
{
connection::config cfg;
cfg.coalesce_requests = true;
#ifdef BOOST_ASIO_HAS_CO_AWAIT
test_push_is_received1(cfg);
test_push_is_received2(cfg);
test_push_many_subscribes(cfg);
#endif
test_missing_push_reader1(cfg);
test_missing_push_reader3(cfg);
cfg.coalesce_requests = false;
#ifdef BOOST_ASIO_HAS_CO_AWAIT
test_push_is_received1(cfg);
test_push_is_received2(cfg);
test_push_many_subscribes(cfg);
#endif
test_missing_push_reader2(cfg);
test_missing_push_reader3(cfg);
}

View File

@@ -1,391 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <iostream>
#include <map>
//#define BOOST_ASIO_ENABLE_HANDLER_TRACKING
#include <boost/asio.hpp>
#include <boost/system/errc.hpp>
#include <boost/asio/experimental/as_tuple.hpp>
#include <aedis/aedis.hpp>
#include <aedis/src.hpp>
#include "check.hpp"
//std::cout << "aaaa " << ec.message() << " " << cmd << " " << n << std::endl;
namespace net = boost::asio;
namespace resp3 = aedis::resp3;
using aedis::resp3::request;
using connection = aedis::connection<>;
using error_code = boost::system::error_code;
using net::experimental::as_tuple;
using tcp = net::ip::tcp;
using boost::system::error_code;
auto print_read = [](auto cmd, auto n)
{
std::cout << cmd << ": " << n << std::endl;
};
//----------------------------------------------------------------
void test_resolve()
{
connection::config cfg;
cfg.resolve_timeout = std::chrono::seconds{100};
net::io_context ioc;
connection db{ioc, cfg};
db.async_run("Atibaia", "6379", [](auto ec) {
expect_error(ec, net::error::netdb_errors::host_not_found, "test_resolve");
});
ioc.run();
}
//----------------------------------------------------------------
void test_connect()
{
connection::config cfg;
cfg.connect_timeout = std::chrono::seconds{100};
net::io_context ioc;
connection db{ioc, cfg};
db.async_run("127.0.0.1", "1", [](auto ec) {
expect_error(ec, net::error::basic_errors::connection_refused, "test_connect");
});
ioc.run();
}
//----------------------------------------------------------------
// Test if quit causes async_run to exit.
void test_quit1()
{
net::io_context ioc;
auto db = std::make_shared<connection>(ioc);
request req;
req.push("HELLO", 3);
req.push("QUIT");
db->async_exec(req, aedis::adapt(), [](auto ec, auto r){
expect_no_error(ec, "test_quit1");
});
db->async_run("127.0.0.1", "6379", [](auto ec){
expect_error(ec, net::error::misc_errors::eof, "test_quit1");
});
ioc.run();
}
void test_quit2()
{
request req;
req.push("HELLO", 3);
req.push("QUIT");
net::io_context ioc;
auto db = std::make_shared<connection>(ioc);
db->async_exec("127.0.0.1", "6379", req, aedis::adapt(), [](auto ec, auto n){
expect_error(ec, net::error::misc_errors::eof, "test_quit2");
});
ioc.run();
}
//----------------------------------------------------------------
net::awaitable<void>
push_consumer1(std::shared_ptr<connection> db)
{
{
auto [ec, n] = co_await db->async_read_push(aedis::adapt(), as_tuple(net::use_awaitable));
expect_no_error(ec, "push_consumer1");
}
{
auto [ec, n] = co_await db->async_read_push(aedis::adapt(), as_tuple(net::use_awaitable));
expect_error(ec, boost::asio::experimental::channel_errc::channel_cancelled, "push_consumer1");
}
}
// Tests whether a push is indeed delivered.
void test_push1()
{
net::io_context ioc;
auto db = std::make_shared<connection>(ioc);
request req;
req.push("HELLO", 3);
req.push("SUBSCRIBE", "channel");
req.push("QUIT");
db->async_exec("127.0.0.1", "6379", req, aedis::adapt(), [](auto ec, auto r){
expect_error(ec, net::error::misc_errors::eof, "test_push1");
});
net::co_spawn(ioc.get_executor(), push_consumer1(db), net::detached);
ioc.run();
}
//----------------------------------------------------------------
net::awaitable<void> run5(std::shared_ptr<connection> db)
{
{
request req;
req.push("QUIT");
db->async_exec(req, aedis::adapt(), [](auto ec, auto n){
expect_no_error(ec, "test_quit1");
});
auto [ec] = co_await db->async_run("127.0.0.1", "6379", as_tuple(net::use_awaitable));
expect_error(ec, net::error::misc_errors::eof, "run5a");
}
{
request req;
req.push("QUIT");
db->async_exec(req, aedis::adapt(), [](auto ec, auto n){
expect_no_error(ec, "test_quit1");
});
auto [ec] = co_await db->async_run("127.0.0.1", "6379", as_tuple(net::use_awaitable));
expect_error(ec, net::error::misc_errors::eof, "run5a");
}
co_return;
}
// Test whether the client works after a reconnect.
void test_reconnect()
{
net::io_context ioc;
auto db = std::make_shared<connection>(ioc.get_executor());
net::co_spawn(ioc, run5(db), net::detached);
ioc.run();
std::cout << "Success: test_reconnect()" << std::endl;
}
// Checks whether we get idle timeout when no push reader is set.
void test_no_push_reader1()
{
connection::config cfg;
net::io_context ioc;
auto db = std::make_shared<connection>(ioc, cfg);
request req;
req.push("HELLO", 3);
req.push("SUBSCRIBE", "channel");
db->async_exec("127.0.0.1", "6379", req, aedis::adapt(), [](auto ec, auto r){
expect_error(ec, aedis::error::idle_timeout, "test_no_push_reader1");
});
ioc.run();
}
void test_no_push_reader2()
{
connection::config cfg;
net::io_context ioc;
auto db = std::make_shared<connection>(ioc, cfg);
request req; // Wrong command syntax.
req.push("HELLO", 3);
req.push("SUBSCRIBE");
db->async_exec("127.0.0.1", "6379", req, aedis::adapt(), [](auto ec, auto r){
expect_error(ec, aedis::error::idle_timeout, "test_no_push_reader2");
});
ioc.run();
}
void test_no_push_reader3()
{
connection::config cfg;
net::io_context ioc;
auto db = std::make_shared<connection>(ioc, cfg);
request req; // Wrong command synthax.
req.push("HELLO", 3);
req.push("PING", "Message");
req.push("SUBSCRIBE");
db->async_exec("127.0.0.1", "6379", req, aedis::adapt(), [](auto ec, auto r){
expect_error(ec, aedis::error::idle_timeout, "test_no_push_reader3");
});
ioc.run();
}
void test_idle()
{
connection::config cfg;
cfg.resolve_timeout = std::chrono::seconds{1};
cfg.connect_timeout = std::chrono::seconds{1};
cfg.ping_interval = std::chrono::seconds{1};
net::io_context ioc;
auto db = std::make_shared<connection>(ioc, cfg);
request req;
req.push("HELLO", 3);
req.push("CLIENT", "PAUSE", 5000);
db->async_exec(req, aedis::adapt(), [](auto ec, auto r){
expect_no_error(ec, "test_idle");
});
db->async_run("127.0.0.1", "6379", [](auto ec){
expect_error(ec, aedis::error::idle_timeout, "test_idle");
});
ioc.run();
}
auto handler =[](auto ec, auto...)
{ std::cout << ec.message() << std::endl; };
void test_push2()
{
request req1;
req1.push("HELLO", 3);
req1.push("PING", "Message1");
request req2;
req2.push("SUBSCRIBE", "channel");
request req3;
req3.push("PING", "Message2");
req3.push("QUIT");
std::tuple<std::string, std::string> resp;
net::io_context ioc;
connection db{ioc};
db.async_exec(req1, aedis::adapt(resp), handler);
db.async_exec(req2, aedis::adapt(resp), handler);
db.async_exec(req3, aedis::adapt(resp), handler);
db.async_run("127.0.0.1", "6379", [&db](auto ec, auto...) {
std::cout << ec.message() << std::endl;
db.cancel_requests();
});
ioc.run();
}
net::awaitable<void>
push_consumer3(std::shared_ptr<connection> db)
{
for (;;)
co_await db->async_read_push(aedis::adapt(), net::use_awaitable);
}
void test_push3()
{
request req1;
req1.push("HELLO", 3);
req1.push("PING", "Message1");
request req2;
req2.push("SUBSCRIBE", "channel");
request req3;
req3.push("QUIT");
net::io_context ioc;
auto db = std::make_shared<connection>(ioc);
db->async_exec(req1, aedis::adapt(), handler);
db->async_exec(req2, aedis::adapt(), handler);
db->async_exec(req2, aedis::adapt(), handler);
db->async_exec(req1, aedis::adapt(), handler);
db->async_exec(req2, aedis::adapt(), handler);
db->async_exec(req1, aedis::adapt(), handler);
db->async_exec(req2, aedis::adapt(), handler);
db->async_exec(req2, aedis::adapt(), handler);
db->async_exec(req1, aedis::adapt(), handler);
db->async_exec(req2, aedis::adapt(), handler);
db->async_exec(req3, aedis::adapt(), handler);
db->async_run("127.0.0.1", "6379", [db](auto ec, auto...) {
db->cancel_requests();
});
net::co_spawn(ioc.get_executor(), push_consumer3(db), net::detached);
ioc.run();
}
void test_exec_while_processing()
{
request req1;
req1.push("HELLO", 3);
req1.push("PING", "Message1");
request req2;
req2.push("SUBSCRIBE", "channel");
request req3;
req3.push("QUIT");
net::io_context ioc;
auto db = std::make_shared<connection>(ioc);
db->async_exec(req1, aedis::adapt(), [db, &req1](auto ec, auto) {
db->async_exec(req1, aedis::adapt(), handler);
});
db->async_exec(req1, aedis::adapt(), [db, &req2](auto ec, auto) {
db->async_exec(req2, aedis::adapt(), handler);
});
db->async_exec(req2, aedis::adapt(), [db, &req2](auto ec, auto) {
db->async_exec(req2, aedis::adapt(), handler);
});
db->async_exec(req1, aedis::adapt(), [db, &req1](auto ec, auto) {
db->async_exec(req1, aedis::adapt(), handler);
});
db->async_exec(req2, aedis::adapt(), [db, &req3](auto ec, auto) {
db->async_exec(req3, aedis::adapt(), handler);
});
db->async_run("127.0.0.1", "6379", [db](auto ec, auto...) {
db->cancel_requests();
});
net::co_spawn(ioc.get_executor(), push_consumer3(db), net::detached);
ioc.run();
std::cout << "Success: test_exec_while_processing()" << std::endl;
}
int main()
{
test_resolve();
test_connect();
test_quit1();
test_quit2();
test_push1();
test_push2();
test_push3();
test_no_push_reader1();
test_no_push_reader2();
test_no_push_reader3();
test_reconnect();
test_exec_while_processing();
// Must come last as it send a client pause.
test_idle();
}

View File

@@ -7,6 +7,7 @@
#include <map>
#include <iostream>
#include <optional>
#include <sstream>
#include <boost/system/errc.hpp>
#include <boost/asio/awaitable.hpp>
@@ -15,18 +16,17 @@
#include <boost/asio/detached.hpp>
#include <boost/asio/co_spawn.hpp>
#include <boost/beast/_experimental/test/stream.hpp>
#define BOOST_TEST_MODULE low level
#include <boost/test/included/unit_test.hpp>
#include <aedis/aedis.hpp>
#include <aedis.hpp>
#include <aedis/src.hpp>
#include "check.hpp"
#include "config.h"
namespace net = boost::asio;
namespace resp3 = aedis::resp3;
using test_stream = boost::beast::test::stream;
using aedis::adapter::adapt;
using aedis::adapter::adapt2;
using node_type = aedis::resp3::node<std::string>;
//-------------------------------------------------------------------
@@ -35,8 +35,8 @@ template <class Result>
struct expect {
std::string in;
Result expected;
std::string name;
boost::system::error_code ec;
std::string name; // Currently unused.
boost::system::error_code ec{};
};
template <class Result>
@@ -47,12 +47,13 @@ void test_sync(net::any_io_executor ex, expect<Result> e)
ts.append(e.in);
Result result;
boost::system::error_code ec;
resp3::read(ts, net::dynamic_buffer(rbuffer), adapt(result), ec);
expect_error(ec, e.ec);
resp3::read(ts, net::dynamic_buffer(rbuffer), adapt2(result), ec);
BOOST_CHECK_EQUAL(ec, e.ec);
if (e.ec)
return;
check_empty(rbuffer);
expect_eq(result, e.expected, e.name);
BOOST_TEST(rbuffer.empty());
auto const res = result == e.expected;
BOOST_TEST(res);
}
template <class Result>
@@ -74,19 +75,20 @@ public:
void run()
{
auto self = this->shared_from_this();
auto f = [self](auto ec, auto n)
auto f = [self](auto ec, auto)
{
expect_error(ec, self->data_.ec);
BOOST_CHECK_EQUAL(ec, self->data_.ec);
if (self->data_.ec)
return;
check_empty(self->rbuffer_);
expect_eq(self->result_, self->data_.expected, self->data_.name);
BOOST_TEST(self->rbuffer_.empty());
auto const res = self->result_ == self->data_.expected;
BOOST_TEST(res);
};
resp3::async_read(
ts_,
net::dynamic_buffer(rbuffer_),
adapt(result_),
adapt2(result_),
f);
}
};
@@ -97,8 +99,9 @@ void test_async(net::any_io_executor ex, expect<Result> e)
std::make_shared<async_test<Result>>(ex, e)->run();
}
void test_number(net::io_context& ioc)
BOOST_AUTO_TEST_CASE(test_number)
{
net::io_context ioc;
boost::optional<int> ok;
ok = 11;
@@ -108,12 +111,13 @@ void test_number(net::io_context& ioc)
auto const in03 = expect<int>{":11\r\n", int{11}, "number.int"};
auto const in04 = expect<boost::optional<int>>{":11\r\n", ok, "number.optional.int"};
auto const in05 = expect<std::tuple<int>>{"*1\r\n:11\r\n", std::tuple<int>{11}, "number.tuple.int"};
auto const in06 = expect<boost::optional<int>>{"%11\r\n", boost::optional<int>{}, "number.optional.int", aedis::make_error_code(aedis::error::expects_simple_type)};
auto const in07 = expect<std::set<std::string>>{":11\r\n", std::set<std::string>{}, "number.optional.int", aedis::make_error_code(aedis::error::expects_set_type)};
auto const in08 = expect<std::unordered_set<std::string>>{":11\r\n", std::unordered_set<std::string>{}, "number.optional.int", aedis::make_error_code(aedis::error::expects_set_type)};
auto const in09 = expect<std::map<std::string, std::string>>{":11\r\n", std::map<std::string, std::string>{}, "number.optional.int", aedis::make_error_code(aedis::error::expects_map_type)};
auto const in10 = expect<std::unordered_map<std::string, std::string>>{":11\r\n", std::unordered_map<std::string, std::string>{}, "number.optional.int", aedis::make_error_code(aedis::error::expects_map_type)};
auto const in11 = expect<std::list<std::string>>{":11\r\n", std::list<std::string>{}, "number.optional.int", aedis::make_error_code(aedis::error::expects_aggregate_type)};
auto const in06 = expect<int>{"_\r\n", int{0}, "number.int", aedis::make_error_code(aedis::error::null)};
auto const in07 = expect<boost::optional<int>>{"%11\r\n", boost::optional<int>{}, "number.optional.int", aedis::make_error_code(aedis::error::expects_resp3_simple_type)};
auto const in08 = expect<std::set<std::string>>{":11\r\n", std::set<std::string>{}, "number.optional.int", aedis::make_error_code(aedis::error::expects_resp3_set)};
auto const in09 = expect<std::unordered_set<std::string>>{":11\r\n", std::unordered_set<std::string>{}, "number.optional.int", aedis::make_error_code(aedis::error::expects_resp3_set)};
auto const in10 = expect<std::map<std::string, std::string>>{":11\r\n", std::map<std::string, std::string>{}, "number.optional.int", aedis::make_error_code(aedis::error::expects_resp3_map)};
auto const in11 = expect<std::unordered_map<std::string, std::string>>{":11\r\n", std::unordered_map<std::string, std::string>{}, "number.optional.int", aedis::make_error_code(aedis::error::expects_resp3_map)};
auto const in12 = expect<std::list<std::string>>{":11\r\n", std::list<std::string>{}, "number.optional.int", aedis::make_error_code(aedis::error::expects_resp3_aggregate)};
auto ex = ioc.get_executor();
@@ -128,6 +132,7 @@ void test_number(net::io_context& ioc)
test_sync(ex, in09);
test_sync(ex, in10);
test_sync(ex, in11);
test_sync(ex, in12);
test_async(ex, in01);
test_async(ex, in02);
@@ -140,10 +145,13 @@ void test_number(net::io_context& ioc)
test_async(ex, in09);
test_async(ex, in10);
test_async(ex, in11);
test_async(ex, in12);
ioc.run();
}
void test_bool(net::io_context& ioc)
BOOST_AUTO_TEST_CASE(test_bool)
{
net::io_context ioc;
boost::optional<bool> ok;
ok = true;
@@ -156,10 +164,10 @@ void test_bool(net::io_context& ioc)
// Error
auto const in01 = expect<boost::optional<bool>>{"#11\r\n", boost::optional<bool>{}, "bool.error", aedis::make_error_code(aedis::error::unexpected_bool_value)};
auto const in03 = expect<std::set<int>>{"#t\r\n", std::set<int>{}, "bool.error", aedis::make_error_code(aedis::error::expects_set_type)};
auto const in04 = expect<std::unordered_set<int>>{"#t\r\n", std::unordered_set<int>{}, "bool.error", aedis::make_error_code(aedis::error::expects_set_type)};
auto const in05 = expect<std::map<int, int>>{"#t\r\n", std::map<int, int>{}, "bool.error", aedis::make_error_code(aedis::error::expects_map_type)};
auto const in06 = expect<std::unordered_map<int, int>>{"#t\r\n", std::unordered_map<int, int>{}, "bool.error", aedis::make_error_code(aedis::error::expects_map_type)};
auto const in03 = expect<std::set<int>>{"#t\r\n", std::set<int>{}, "bool.error", aedis::make_error_code(aedis::error::expects_resp3_set)};
auto const in04 = expect<std::unordered_set<int>>{"#t\r\n", std::unordered_set<int>{}, "bool.error", aedis::make_error_code(aedis::error::expects_resp3_set)};
auto const in05 = expect<std::map<int, int>>{"#t\r\n", std::map<int, int>{}, "bool.error", aedis::make_error_code(aedis::error::expects_resp3_map)};
auto const in06 = expect<std::unordered_map<int, int>>{"#t\r\n", std::unordered_map<int, int>{}, "bool.error", aedis::make_error_code(aedis::error::expects_resp3_map)};
auto ex = ioc.get_executor();
@@ -182,10 +190,12 @@ void test_bool(net::io_context& ioc)
test_async(ex, in09);
test_async(ex, in10);
test_async(ex, in11);
ioc.run();
}
void test_streamed_string(net::io_context& ioc)
BOOST_AUTO_TEST_CASE(test_streamed_string)
{
net::io_context ioc;
std::string const wire = "$?\r\n;4\r\nHell\r\n;5\r\no wor\r\n;1\r\nd\r\n;0\r\n";
std::vector<node_type> e1a
@@ -210,10 +220,12 @@ void test_streamed_string(net::io_context& ioc)
test_async(ex, in01);
test_async(ex, in02);
test_async(ex, in03);
ioc.run();
}
void test_push(net::io_context& ioc)
BOOST_AUTO_TEST_CASE(test_push)
{
net::io_context ioc;
std::string const wire = ">4\r\n+pubsub\r\n+message\r\n+some-channel\r\n+some message\r\n";
std::vector<node_type> e1a
@@ -236,10 +248,12 @@ void test_push(net::io_context& ioc)
test_async(ex, in01);
test_async(ex, in02);
ioc.run();
}
void test_map(net::io_context& ioc)
BOOST_AUTO_TEST_CASE(test_map)
{
net::io_context ioc;
using map_type = std::map<std::string, std::string>;
using mmap_type = std::multimap<std::string, std::string>;
using umap_type = std::unordered_map<std::string, std::string>;
@@ -249,6 +263,7 @@ void test_map(net::io_context& ioc)
using op_vec_type = boost::optional<std::vector<std::string>>;
using tuple_type = std::tuple<std::string, std::string, std::string, std::string, std::string, std::string, std::string, std::string>;
std::string const wire2 = "*3\r\n$2\r\n11\r\n$2\r\n22\r\n$1\r\n3\r\n";
std::string const wire = "%4\r\n$4\r\nkey1\r\n$6\r\nvalue1\r\n$4\r\nkey2\r\n$6\r\nvalue2\r\n$4\r\nkey3\r\n$6\r\nvalue3\r\n$4\r\nkey3\r\n$6\r\nvalue3\r\n";
std::vector<node_type> expected_1a
@@ -319,8 +334,10 @@ void test_map(net::io_context& ioc)
auto const in07 = expect<op_map_type>{wire, expected_1d, "map.optional.map"};
auto const in08 = expect<op_vec_type>{wire, expected_1e, "map.optional.vector"};
auto const in09 = expect<std::tuple<op_map_type>>{"*1\r\n" + wire, std::tuple<op_map_type>{expected_1d}, "map.transaction.optional.map"};
auto const in10 = expect<int>{"%11\r\n", int{}, "map.invalid.int", aedis::make_error_code(aedis::error::expects_simple_type)};
auto const in10 = expect<int>{"%11\r\n", int{}, "map.invalid.int", aedis::make_error_code(aedis::error::expects_resp3_simple_type)};
auto const in11 = expect<tuple_type>{wire, e1f, "map.tuple"};
auto const in12 = expect<map_type>{wire2, map_type{}, "map.error", aedis::make_error_code(aedis::error::expects_resp3_map)};
auto const in13 = expect<map_type>{"_\r\n", map_type{}, "map.null", aedis::make_error_code(aedis::error::null)};
auto ex = ioc.get_executor();
@@ -335,6 +352,8 @@ void test_map(net::io_context& ioc)
test_sync(ex, in09);
test_sync(ex, in00);
test_sync(ex, in11);
test_sync(ex, in12);
test_sync(ex, in13);
test_async(ex, in00);
test_async(ex, in01);
@@ -347,6 +366,9 @@ void test_map(net::io_context& ioc)
test_async(ex, in09);
test_async(ex, in00);
test_async(ex, in11);
test_async(ex, in12);
test_async(ex, in13);
ioc.run();
}
void test_attribute(net::io_context& ioc)
@@ -377,8 +399,9 @@ void test_attribute(net::io_context& ioc)
test_async(ex, in02);
}
void test_array(net::io_context& ioc)
BOOST_AUTO_TEST_CASE(test_array)
{
net::io_context ioc;
char const* wire = "*3\r\n$2\r\n11\r\n$2\r\n22\r\n$1\r\n3\r\n";
std::vector<node_type> e1a
@@ -404,6 +427,8 @@ void test_array(net::io_context& ioc)
auto const in06 = expect<std::array<int, 3>>{wire, e1f, "array.array"};
auto const in07 = expect<std::list<int>>{wire, e1g, "array.list"};
auto const in08 = expect<std::deque<int>>{wire, e1h, "array.deque"};
auto const in09 = expect<std::vector<int>>{"_\r\n", std::vector<int>{}, "array.vector", aedis::make_error_code(aedis::error::null)};
auto const in10 = expect<std::list<int>>{"_\r\n", std::list<int>{}, "array.list", aedis::make_error_code(aedis::error::null)};
auto ex = ioc.get_executor();
@@ -415,6 +440,7 @@ void test_array(net::io_context& ioc)
test_sync(ex, in06);
test_sync(ex, in07);
test_sync(ex, in08);
test_sync(ex, in09);
test_async(ex, in01);
test_async(ex, in02);
@@ -424,10 +450,13 @@ void test_array(net::io_context& ioc)
test_async(ex, in06);
test_async(ex, in07);
test_async(ex, in08);
test_async(ex, in09);
ioc.run();
}
void test_set(net::io_context& ioc)
BOOST_AUTO_TEST_CASE(test_set)
{
net::io_context ioc;
using set_type = std::set<std::string>;
using mset_type = std::multiset<std::string>;
using uset_type = std::unordered_set<std::string>;
@@ -435,7 +464,9 @@ void test_set(net::io_context& ioc)
using vec_type = std::vector<std::string>;
using op_vec_type = boost::optional<std::vector<std::string>>;
std::string const wire2 = "*3\r\n$2\r\n11\r\n$2\r\n22\r\n$1\r\n3\r\n";
std::string const wire = "~6\r\n+orange\r\n+apple\r\n+one\r\n+two\r\n+three\r\n+orange\r\n";
std::vector<node_type> const expected1a
{ {resp3::type::set, 6UL, 0UL, {}}
, {resp3::type::simple_string, 1UL, 1UL, {"orange"}}
@@ -462,6 +493,7 @@ void test_set(net::io_context& ioc)
auto const in06 = expect<uset_type>{wire, e1c, "set.unordered_set"};
auto const in07 = expect<muset_type>{wire, e1g, "set.unordered_multiset"};
auto const in08 = expect<std::tuple<uset_type>>{"*1\r\n" + wire, std::tuple<uset_type>{e1c}, "set.tuple"};
auto const in09 = expect<set_type>{wire2, set_type{}, "set.error", aedis::make_error_code(aedis::error::expects_resp3_set)};
auto ex = ioc.get_executor();
@@ -474,6 +506,7 @@ void test_set(net::io_context& ioc)
test_sync(ex, in06);
test_sync(ex, in07);
test_sync(ex, in08);
test_sync(ex, in09);
test_async(ex, in00);
test_async(ex, in01);
@@ -484,10 +517,13 @@ void test_set(net::io_context& ioc)
test_async(ex, in06);
test_async(ex, in07);
test_async(ex, in08);
test_async(ex, in09);
ioc.run();
}
void test_simple_error(net::io_context& ioc)
BOOST_AUTO_TEST_CASE(test_simple_error)
{
net::io_context ioc;
auto const in01 = expect<node_type>{"-Error\r\n", node_type{resp3::type::simple_error, 1UL, 0UL, {"Error"}}, "simple_error.node", aedis::make_error_code(aedis::error::simple_error)};
auto const in02 = expect<node_type>{"-\r\n", node_type{resp3::type::simple_error, 1UL, 0UL, {""}}, "simple_error.node.empty", aedis::make_error_code(aedis::error::simple_error)};
@@ -498,10 +534,12 @@ void test_simple_error(net::io_context& ioc)
test_async(ex, in01);
test_async(ex, in02);
ioc.run();
}
void test_blob_string(net::io_context& ioc)
BOOST_AUTO_TEST_CASE(test_blob_string)
{
net::io_context ioc;
std::string str(100000, 'a');
str[1000] = '\r';
str[1001] = '\n';
@@ -529,15 +567,17 @@ void test_blob_string(net::io_context& ioc)
test_async(ex, in02);
test_async(ex, in03);
test_async(ex, in04);
ioc.run();
}
void test_double(net::io_context& ioc)
BOOST_AUTO_TEST_CASE(test_double)
{
// TODO: Add test for double.
net::io_context ioc;
auto const in01 = expect<node_type>{",1.23\r\n", node_type{resp3::type::doublean, 1UL, 0UL, {"1.23"}}, "double.node"};
auto const in02 = expect<node_type>{",inf\r\n", node_type{resp3::type::doublean, 1UL, 0UL, {"inf"}}, "double.node (inf)"};
auto const in03 = expect<node_type>{",-inf\r\n", node_type{resp3::type::doublean, 1UL, 0UL, {"-inf"}}, "double.node (-inf)"};
auto const in04 = expect<double>{",1.23\r\n", double{1.23}, "double.double"};
auto const in05 = expect<double>{",er\r\n", double{0}, "double.double", aedis::make_error_code(aedis::error::not_a_double)};
auto ex = ioc.get_executor();
@@ -545,15 +585,19 @@ void test_double(net::io_context& ioc)
test_sync(ex, in02);
test_sync(ex, in03);
test_sync(ex, in04);
test_sync(ex, in05);
test_async(ex, in01);
test_async(ex, in02);
test_async(ex, in03);
test_async(ex, in04);
test_async(ex, in05);
ioc.run();
}
void test_blob_error(net::io_context& ioc)
BOOST_AUTO_TEST_CASE(test_blob_error)
{
net::io_context ioc;
auto const in01 = expect<node_type>{"!21\r\nSYNTAX invalid syntax\r\n", node_type{resp3::type::blob_error, 1UL, 0UL, {"SYNTAX invalid syntax"}}, "blob_error", aedis::make_error_code(aedis::error::blob_error)};
auto const in02 = expect<node_type>{"!0\r\n\r\n", node_type{resp3::type::blob_error, 1UL, 0UL, {}}, "blob_error.empty", aedis::make_error_code(aedis::error::blob_error)};
@@ -564,10 +608,12 @@ void test_blob_error(net::io_context& ioc)
test_async(ex, in01);
test_async(ex, in02);
ioc.run();
}
void test_verbatim_string(net::io_context& ioc)
BOOST_AUTO_TEST_CASE(test_verbatim_string)
{
net::io_context ioc;
auto const in01 = expect<node_type>{"=15\r\ntxt:Some string\r\n", node_type{resp3::type::verbatim_string, 1UL, 0UL, {"txt:Some string"}}, "verbatim_string"};
auto const in02 = expect<node_type>{"=0\r\n\r\n", node_type{resp3::type::verbatim_string, 1UL, 0UL, {}}, "verbatim_string.empty"};
@@ -578,10 +624,12 @@ void test_verbatim_string(net::io_context& ioc)
test_async(ex, in01);
test_async(ex, in02);
ioc.run();
}
void test_big_number(net::io_context& ioc)
BOOST_AUTO_TEST_CASE(test_big_number)
{
net::io_context ioc;
auto const in01 = expect<node_type>{"(3492890328409238509324850943850943825024385\r\n", node_type{resp3::type::big_number, 1UL, 0UL, {"3492890328409238509324850943850943825024385"}}, "big_number.node"};
auto const in02 = expect<int>{"(\r\n", int{}, "big_number.error (empty field)", aedis::make_error_code(aedis::error::empty_field)};
@@ -592,10 +640,12 @@ void test_big_number(net::io_context& ioc)
test_async(ex, in01);
test_async(ex, in02);
ioc.run();
}
void test_simple_string(net::io_context& ioc)
BOOST_AUTO_TEST_CASE(test_simple_string)
{
net::io_context ioc;
boost::optional<std::string> ok1, ok2;
ok1 = "OK";
ok2 = "";
@@ -616,10 +666,12 @@ void test_simple_string(net::io_context& ioc)
test_async(ex, in01);
test_async(ex, in02);
test_async(ex, in03);
ioc.run();
}
void test_resp3(net::io_context& ioc)
BOOST_AUTO_TEST_CASE(test_resp3)
{
net::io_context ioc;
auto const in01 = expect<int>{"s11\r\n", int{}, "number.error", aedis::make_error_code(aedis::error::invalid_data_type)};
auto const in02 = expect<int>{":adf\r\n", int{11}, "number.int", aedis::make_error_code(aedis::error::not_a_number)};
auto const in03 = expect<int>{":\r\n", int{}, "number.error (empty field)", aedis::make_error_code(aedis::error::empty_field)};
@@ -639,10 +691,12 @@ void test_resp3(net::io_context& ioc)
test_async(ex, in03);
test_async(ex, in04);
test_async(ex, in05);
ioc.run();
}
void test_null(net::io_context& ioc)
BOOST_AUTO_TEST_CASE(test_null)
{
net::io_context ioc;
using op_type_01 = boost::optional<bool>;
using op_type_02 = boost::optional<int>;
using op_type_03 = boost::optional<std::string>;
@@ -684,34 +738,73 @@ void test_null(net::io_context& ioc)
test_async(ex, in07);
test_async(ex, in08);
test_async(ex, in09);
}
int main()
{
net::io_context ioc {1};
// Simple types.
test_simple_string(ioc);
test_simple_error(ioc);
test_blob_string(ioc);
test_blob_error(ioc);
test_number(ioc);
test_double(ioc);
test_bool(ioc);
test_null(ioc);
test_big_number(ioc);
test_verbatim_string(ioc);
// Aggregates.
test_array(ioc);
test_set(ioc);
test_map(ioc);
test_push(ioc);
test_streamed_string(ioc);
// RESP3
test_resp3(ioc);
ioc.run();
}
//-----------------------------------------------------------------------------------
void check_error(char const* name, aedis::error ev)
{
auto const ec = aedis::make_error_code(ev);
auto const& cat = ec.category();
BOOST_TEST(std::string(ec.category().name()) == name);
BOOST_TEST(!ec.message().empty());
BOOST_TEST(cat.equivalent(
static_cast<std::underlying_type<aedis::error>::type>(ev),
ec.category().default_error_condition(
static_cast<std::underlying_type<aedis::error>::type>(ev))));
BOOST_TEST(cat.equivalent(ec,
static_cast<std::underlying_type<aedis::error>::type>(ev)));
}
BOOST_AUTO_TEST_CASE(error)
{
check_error("aedis", aedis::error::resolve_timeout);
check_error("aedis", aedis::error::resolve_timeout);
check_error("aedis", aedis::error::connect_timeout);
check_error("aedis", aedis::error::idle_timeout);
check_error("aedis", aedis::error::exec_timeout);
check_error("aedis", aedis::error::invalid_data_type);
check_error("aedis", aedis::error::not_a_number);
check_error("aedis", aedis::error::unexpected_read_size);
check_error("aedis", aedis::error::exceeeds_max_nested_depth);
check_error("aedis", aedis::error::unexpected_bool_value);
check_error("aedis", aedis::error::empty_field);
check_error("aedis", aedis::error::expects_resp3_simple_type);
check_error("aedis", aedis::error::expects_resp3_aggregate);
check_error("aedis", aedis::error::expects_resp3_map);
check_error("aedis", aedis::error::expects_resp3_set);
check_error("aedis", aedis::error::nested_aggregate_unsupported);
check_error("aedis", aedis::error::simple_error);
check_error("aedis", aedis::error::blob_error);
check_error("aedis", aedis::error::incompatible_size);
check_error("aedis", aedis::error::not_a_double);
check_error("aedis", aedis::error::null);
}
std::string get_type_as_str(aedis::resp3::type t)
{
std::ostringstream ss;
ss << t;
return ss.str();
}
BOOST_AUTO_TEST_CASE(type)
{
BOOST_TEST(!get_type_as_str(aedis::resp3::type::array).empty());
BOOST_TEST(!get_type_as_str(aedis::resp3::type::push).empty());
BOOST_TEST(!get_type_as_str(aedis::resp3::type::set).empty());
BOOST_TEST(!get_type_as_str(aedis::resp3::type::map).empty());
BOOST_TEST(!get_type_as_str(aedis::resp3::type::attribute).empty());
BOOST_TEST(!get_type_as_str(aedis::resp3::type::simple_string).empty());
BOOST_TEST(!get_type_as_str(aedis::resp3::type::simple_error).empty());
BOOST_TEST(!get_type_as_str(aedis::resp3::type::number).empty());
BOOST_TEST(!get_type_as_str(aedis::resp3::type::doublean).empty());
BOOST_TEST(!get_type_as_str(aedis::resp3::type::boolean).empty());
BOOST_TEST(!get_type_as_str(aedis::resp3::type::big_number).empty());
BOOST_TEST(!get_type_as_str(aedis::resp3::type::null).empty());
BOOST_TEST(!get_type_as_str(aedis::resp3::type::blob_error).empty());
BOOST_TEST(!get_type_as_str(aedis::resp3::type::verbatim_string).empty());
BOOST_TEST(!get_type_as_str(aedis::resp3::type::blob_string).empty());
BOOST_TEST(!get_type_as_str(aedis::resp3::type::streamed_string_part).empty());
BOOST_TEST(!get_type_as_str(aedis::resp3::type::invalid).empty());
}

View File

@@ -9,14 +9,14 @@
#include <boost/asio/connect.hpp>
#include <aedis/aedis.hpp>
#include <aedis.hpp>
#include <aedis/src.hpp>
namespace net = boost::asio;
namespace resp3 = aedis::resp3;
using aedis::resp3::request;
using aedis::adapter::adapt;
using aedis::adapter::adapt2;
using net::ip::tcp;
int main()
@@ -41,7 +41,7 @@ int main()
// Reads the responses to all commands in the request.
auto dbuffer = net::dynamic_buffer(buffer);
resp3::read(socket, dbuffer);
resp3::read(socket, dbuffer, adapt(resp));
resp3::read(socket, dbuffer, adapt2(resp));
resp3::read(socket, dbuffer);
std::cout << "Ping: " << resp << std::endl;