2
0
mirror of https://github.com/boostorg/redis.git synced 2026-01-26 19:02:08 +00:00

Compare commits

...

182 Commits

Author SHA1 Message Date
Marcelo Zimbres
b6e1280075 Fixes narrowing conversion.
NOTE: I had to disable the TLS tests because I shotdown the server I was
running on my domain occase.de. Once this ticket is merged I will open a
new one to fix that and reenable the tests.
2024-03-20 23:08:15 +01:00
Marcelo Zimbres
5d553f5d71 Some fixes in the article about the costs of async abstractions [skip ci] 2024-03-20 23:08:15 +01:00
Marcelo Zimbres
78792199ef Adds endian to the list of dependencies 2024-02-17 21:34:01 +01:00
Marcelo Zimbres
f5793ac9bc Refactors add_hello and adds unit tests. 2024-02-17 21:34:01 +01:00
Marcelo Zimbres
dfc2bd1ac2 Fixes issue 181. 2024-02-17 21:34:01 +01:00
Marcelo Zimbres
0445e74fa3 Fixes the CMake file. 2024-01-21 21:52:31 +01:00
Marcelo Zimbres
234f961e87 Provides a way of passing a custom ssl context to the connection. 2024-01-21 21:52:31 +01:00
Marcelo Zimbres
8bb0004188 Adds missing ssl-context getters. 2024-01-21 21:52:31 +01:00
Marcelo Zimbres
4257b2eaec In-tree cmake builds instead of FindBoost. 2024-01-21 21:52:31 +01:00
Marcelo Zimbres
96da11a2cc Article about the costs of async abstractions. 2024-01-21 21:52:31 +01:00
Marcelo Zimbres
3861c5de74 Accepts as valid responses to staged requests.
Before these changes the request had to be marked as written in order to
interpret incoming responses as belonging to that request. On fast
networks however, like on localhost and underload the responses might
arrive before the write operation completed.
2024-01-21 21:52:31 +01:00
Marcelo
168ee6148a Merge pull request #162 from boostorg/develop
Merge latest css improvements into master
2023-12-14 23:22:03 +01:00
Marcelo Zimbres
723e72797f Output test error. 2023-12-02 20:23:16 +01:00
Marcelo
7caea928af Merge pull request #167 from boostorg/165-support-containers-in-ci
165 support containers in ci
2023-11-12 15:08:42 +01:00
Marcelo Zimbres
71b9a4f428 Reduces the number of messages in the stress test. 2023-11-12 13:00:43 +01:00
Marcelo Zimbres
d89a976729 Build gcc11 builds in container. 2023-11-11 14:12:33 +01:00
Marcelo Zimbres
154d0b106d Obtains the Redis host from env variables. 2023-11-04 12:41:53 +01:00
Marcelo Zimbres
2b12525206 Adds container to matrix. 2023-10-30 22:59:04 +01:00
Marcelo Zimbres
0bcbf6d4e4 Fix the build of boost dependencies. 2023-10-28 09:29:51 +02:00
Marcelo
6389daa783 Merge pull request #161 from anarthal/develop
Docs styles minor tweaks
2023-10-11 12:19:16 +02:00
Ruben Perez
ab2d6cdea8 Docs styles minor tweaks
Magnifying glass showing twice.
Sidebar scrolling with the content.
Header not being hidden correctly for small devices.

close #160
2023-10-11 11:57:43 +02:00
Marcelo
63ce40e365 Merge pull request #159 from anarthal/feature/158-boost-docs-formatting-problems
Fixed CSS formatting for Boost docs
2023-10-10 23:10:37 +02:00
Ruben Perez
f2a005a8c4 Fixed CSS formatting for Boost docs 2023-10-10 22:36:09 +02:00
Marcelo Zimbres
0c06be66de Fixes Boost.Redis version.
[skip ci]
2023-10-08 10:04:54 +02:00
Marcelo
0380e643ed Merge pull request #157 from boostorg/develop
Latest develop changes
2023-10-08 09:10:07 +02:00
Marcelo
ff734694ab Merge pull request #156 from boostorg/prepare_for_first_boost_release
Prepare for first boost release
2023-10-07 21:51:28 +02:00
Marcelo Zimbres
548e3d4cb6 Updates the copywrite notice. 2023-10-07 16:44:17 +02:00
Marcelo Zimbres
66b632b13d Small fixes in the docs. 2023-10-07 16:40:13 +02:00
Marcelo
11c9c1b787 Merge pull request #155 from anarthal/feature/118-boost-integration
118 boost integration
2023-10-05 22:42:31 +02:00
Ruben Perez
d6f9e435c7 Revert "Fixed libc++ link flags"
This reverts commit 199fb6c261.
2023-10-05 16:55:53 +02:00
Ruben Perez
9a7816dbf4 switched to default installation of openssl 2023-10-05 16:22:59 +02:00
Ruben Perez
199fb6c261 Fixed libc++ link flags 2023-10-05 16:19:24 +02:00
Ruben Perez
4d30d1e0c0 split cmake_test 2023-10-05 16:10:25 +02:00
Ruben Perez
92be6d958f Reduced ci.py verbosity 2023-10-05 16:08:49 +02:00
Ruben Perez
14d3c0232e Removed unnecessary checks fom jamfile 2023-10-05 16:08:32 +02:00
Ruben Perez
7412b37e08 choco => vcpkg 2023-10-05 16:03:32 +02:00
Ruben Perez
60ba5b62af Missing packages in coverage build 2023-10-05 14:13:19 +02:00
Ruben Perez
0303ae0dbc Simplified & documented Jamfile 2023-10-05 13:47:14 +02:00
Ruben Perez
ea6c5536c1 CMAKE_BUILD_PARALLEL_LEVEL for coverage 2023-10-05 13:26:00 +02:00
Ruben Perez
d386b30c3a Simplified ci.py 2023-10-05 13:25:49 +02:00
Ruben Perez
2951acc80f Merge branch 'feature/118-boost-integration' of github.com:anarthal/boost-redis into feature/118-boost-integration 2023-10-05 12:53:52 +02:00
Ruben Perez
faf15fe7e8 Initial coverage workflow 2023-10-05 12:45:35 +02:00
Ruben Perez
7f3f8b0c13 Relaxed cxx17 requirement in Jamfile 2023-10-05 12:33:49 +02:00
Ruben Perez
f37e514961 Link error fix in win b2 2023-10-05 12:02:47 +02:00
Ruben Perez
b7b4f8f449 OpenSSL win fix in CI 2023-10-05 12:02:31 +02:00
Ruben Perez
686cb306ea README now states Boost requirements 2023-10-04 19:13:50 +02:00
Ruben Perez
fcbe2c431c Canonical project name 2023-10-04 19:10:24 +02:00
Ruben Perez
a7b3fbdd9a Protect min/max 2023-10-04 19:06:31 +02:00
Ruben Perez
5ea0d3c467 Fixed OPENSSL_ROOT on win 2023-10-04 19:06:13 +02:00
Ruben Perez
2cd487784b Attempt to solve b2 openssl problem in win 2023-10-04 18:58:07 +02:00
Ruben Perez
b41e2704a1 choco no progress 2023-10-04 18:50:51 +02:00
Ruben Perez
765f0d45e8 Improved CI build names 2023-10-04 18:48:37 +02:00
Ruben Perez
84c8649d66 Bad b2 command 2023-10-04 18:48:29 +02:00
Ruben Perez
0bf4e76981 B2 CI 2023-10-04 18:41:48 +02:00
Ruben Perez
1d329df81b test jamfile 2023-10-04 18:31:47 +02:00
Ruben Perez
56f7d5af69 examples => example 2023-10-04 17:47:03 +02:00
Ruben Perez
d0c3b3f7ee generator fix 2023-10-04 17:38:50 +02:00
Ruben Perez
87ebc6cf4a protobuf fix 2023-10-04 17:38:34 +02:00
Ruben Perez
ffc35e8e3e copytree and cxxstd 2023-10-04 17:23:48 +02:00
Ruben Perez
a02837ab33 Explicit Python & typos 2023-10-04 17:12:11 +02:00
Ruben Perez
4a39a0d20a Toolset 2023-10-04 17:07:26 +02:00
Ruben Perez
56d9a2778f Typo fix 2023-10-04 12:50:41 +02:00
Ruben Perez
c732f33b48 New CI 2023-10-04 12:49:13 +02:00
Ruben Perez
221016f1c9 subdir tests 2023-10-04 12:29:59 +02:00
Ruben Perez
cb9fdba0a4 New cmakes 2023-10-04 11:28:55 +02:00
Ruben Perez
1c96a60709 ci.py first version 2023-10-03 23:09:34 +02:00
Ruben Perez
b66d067af8 tests => test 2023-10-03 23:08:59 +02:00
Ruben Perez
bc08a8d411 Trigger CI 2023-10-03 21:04:43 +02:00
Ruben Perez
53ef947cf3 Doc install and redirection 2023-10-03 18:59:21 +02:00
Ruben Perez
ecfe51c7ae Doc fixes 2023-10-03 17:27:31 +02:00
Ruben Perez
be20c0d48c Docs via b2 2023-10-03 16:51:05 +02:00
Ruben Perez
d5031c3f69 libraries.json 2023-10-02 17:17:44 +02:00
Marcelo
6748f7682a Merge pull request #153 from boostorg/152-enable-reading-server-pushes-in-batches
152 enable reading server pushes in batches
2023-09-10 22:28:28 +02:00
Marcelo Zimbres
2a4936a9e1 Implements batch reads for server pushes. 2023-09-10 12:05:37 +02:00
Marcelo Zimbres
4547e1ac07 First steps with using adapters to process a generic_response. 2023-09-04 14:00:12 +02:00
Marcelo
44a608c0ba Merge pull request #151 from boostorg/150-remove-resp3read-and-resp3async_read
Removes resp3::async_read.
2023-09-02 14:52:36 +02:00
Marcelo Zimbres
1ed8e0182c Removes resp3::async_read. 2023-09-02 13:05:06 +02:00
Marcelo
d8cf431dc2 Merge pull request #149 from boostorg/144-implement-connection-usage-information
Adds connection usage information.
2023-08-30 09:30:30 +02:00
Marcelo Zimbres
401dd24419 Adds connection usage information. 2023-08-29 16:31:23 +02:00
Marcelo
509635f222 Merge pull request #145 from boostorg/138-use-stdfunction-to-type-erase-the-adapter
Uses std::function to type erase the response adapter
2023-08-26 15:39:49 +02:00
Marcelo Zimbres
4fbd0c6853 Progreeses with the adapter type erasure. 2023-08-26 13:09:48 +02:00
Marcelo
b8899ecdc7 Merge pull request #143 from mrichmon/develop
Fix cmake find_package
2023-08-22 08:26:07 +02:00
Michael Richmond
7d09040646 Bump version number 2023-08-21 16:42:41 -07:00
Michael Richmond
0de26fb0ce Fix out of date filename 2023-08-21 16:42:17 -07:00
Marcelo
84ee2f37f1 Merge pull request #141 from boostorg/develop
Pre-Boost release of latest changes in develop
2023-08-18 23:08:05 +02:00
Marcelo
81927deda4 Merge pull request #134 from boostorg/123-automatically-select-database-after-hello
123 automatically select database after hello
2023-08-06 11:00:34 +02:00
Marcelo Zimbres
34ff1cea63 Fixes https://github.com/boostorg/redis/issues/121 2023-08-06 10:07:17 +02:00
Marcelo Zimbres
10603b7d3a Sends SELECT right after HELLO after a connection. 2023-08-06 10:07:17 +02:00
Marcelo
ad3c2914db Merge pull request #133 from boostorg/122-health-check-not-restart-after-connection-lost
Simplifies parse ops and fixes health-check on reconnection.
2023-08-05 23:04:50 +02:00
Marcelo Zimbres
91014b13bf Simplifies parse ops and fixes health-check on reconnection. 2023-08-05 22:20:39 +02:00
Marcelo
4f6f8b454d Merge pull request #132 from cbodley/wip-exec-forward
connection: async_exec forwards completion token
2023-08-02 22:06:27 +02:00
Casey Bodley
9ebcc544ae connection: async_exec forwards completion token
async operations should support move-only completion handlers. forward
the CompletionToken argument to avoid an unnecessary copy

Fixes: #131

Signed-off-by: Casey Bodley <cbodley@redhat.com>
2023-08-02 11:45:57 -04:00
Marcelo
7d16259749 Merge pull request #130 from boostorg/119-simplify-the-parser-op
Simplifications in the parser
2023-07-30 11:44:58 +02:00
Marcelo Zimbres
9dec63515e Simplifications in the parser. 2023-07-30 08:18:17 +02:00
Marcelo
46525371b9 Merge pull request #117 from cthulhu-irl/build/add-cmake-options
build: add cmake options
2023-07-01 13:03:31 +02:00
Cthulhu
b5f8348598 build: add cmake options
cmake options for install, tests, examples, and doc instead of
building them always.

options are enabled by default when building the project directly,
otherwise if add_subdirectory (directly or by FetchContent, etc),
then unnecessary options will be disabled.

issue #115
2023-06-25 19:26:27 +03:30
Marcelo
69d12421e2 Merge pull request #116 from boostorg/113-create-an-experimental-connection-class-that-has-fast-compilation-times
113 create an experimental connection class that has fast compilation times
2023-06-24 09:52:48 +02:00
Marcelo Zimbres
a715c251bf Improvements in the docs. 2023-06-23 22:24:34 +02:00
Marcelo Zimbres
d29a057fa6 Uses composition instead of inheritance in the connection class. 2023-06-20 23:01:17 +02:00
Marcelo Zimbres
82430afc8b Make the connection non-generic on the executor type. 2023-06-18 09:34:40 +02:00
Marcelo
607946f00e Merge pull request #112 from boostorg/111-simplify-the-serialization-examples
111 simplify the serialization examples
2023-06-11 11:12:06 +02:00
Marcelo Zimbres
c99790ab5c Uses choco instead of cinst. 2023-06-11 09:28:12 +02:00
Marcelo Zimbres
635b3608ad Removes unnecessary files. 2023-06-08 22:00:07 +02:00
Marcelo
a8a78c38c6 Merge pull request #109 from boostorg/103-cant-read-response-of-hello-+-ping-+-hello
Removes payload rotation from request.
2023-06-04 16:50:12 +02:00
Marcelo Zimbres
e09a53ff08 Removes payload rotation from request.
The user can simply call HELLO before other commands. Altering the order
of requests makes it impossible to declare responses.
2023-06-04 16:18:35 +02:00
Marcelo
ec8a1c7286 Merge pull request #105 from boostorg/95-improve-the-performance-of-connectionasync_receive
95 improve the performance of connectionasync receive
2023-05-28 11:14:14 +02:00
Marcelo Zimbres
3c02a7662b Replaces connection channel with a timer. 2023-05-27 23:20:15 +02:00
Marcelo Zimbres
538ab8f35f Reduces the number of rescheduling needed to process a server sent push.
Performance improved by close to 10%.
2023-05-21 21:17:13 +02:00
Marcelo Zimbres
f5f57e370b Improvements in the redis-push stress test. 2023-05-21 21:17:13 +02:00
Marcelo
7abfc5fd8d Merge pull request #101 from boostorg/100-runhpp-no-longer-present-but-still-referred-to-from-redishpp
Fixes redis.hpp and slightly improves compilation times.
2023-05-20 14:16:19 +02:00
Marcelo Zimbres
11eebcf771 Fixes redis.hpp and slightly improves compilation times. 2023-05-20 13:19:31 +02:00
Marcelo
c21f70bc07 Merge pull request #99 from boostorg/93-use-cmake-foreach-to-simplify-cmakeliststxt
Simplifies the CMakeLists.txt.
2023-05-14 13:55:46 +02:00
Marcelo Zimbres
22bacbd52c Simplifies the CMakeLists.txt. 2023-05-14 10:42:16 +02:00
Marcelo
2982f831f6 Merge pull request #98 from boostorg/94-unify-redisconnection-and-redissslconnection
94 unify redisconnection and redissslconnection
2023-05-13 18:22:53 +02:00
Marcelo Zimbres
663e9ac671 Simplifications. 2023-05-13 10:22:11 +02:00
Marcelo Zimbres
c0aa4356ea The ssl::context is now owned by the connection. 2023-05-10 23:25:09 +02:00
Marcelo Zimbres
6f9fd5b2fb Unifies ssl and plain connections. 2023-05-09 23:12:16 +02:00
Marcelo
30a6e34e4e Merge pull request #97 from boostorg/85-added-example-cpp20_streams-which-reproduces-an-assertion
85 added example cpp20 streams which reproduces an assertion
2023-05-06 21:26:09 +02:00
Marcelo Zimbres
1f9b3e8008 Rebase the branch on develop. 2023-05-06 20:48:32 +02:00
bram
3808fec0e3 Cleaned up a bit
Removed unused stuff
Using request and response as shared_ptrs.
Removed (unnecessary?) calls to net::post.
2023-05-06 17:14:10 +02:00
bram
607a9e9dd6 Added example cpp20_streams, which reproduces an assertion. 2023-05-06 16:55:15 +02:00
Marcelo
2d53bb748e Merge pull request #92 from boostorg/90-add-support-for-reconnection
90 add support for reconnection
2023-05-06 15:39:29 +02:00
Marcelo Zimbres
a6cb4ca323 Adds high-level functionality to connection::async_run. 2023-05-02 23:15:08 +02:00
Marcelo Zimbres
5ac4f7e8ad Removes dependency on asio::promise as it does not compile on windows. 2023-03-31 04:00:20 +02:00
Marcelo Zimbres
7a08588808 Progresses with the subscriber. 2023-03-31 04:00:19 +02:00
Marcelo
e7ff1cedf3 Merge pull request #89 from boostorg/88-simplify-async_check_health-with-asioconsign
Uses consign to simplify the check-health operation.
2023-03-20 01:53:40 +01:00
Marcelo Zimbres
0bcb7dcf16 Uses consign to simplify the check-health operation. 2023-03-20 01:26:50 +01:00
Marcelo
c28969674b Merge pull request #86 from boostorg/83-fix-reconnect-loop-in-the-subscriber-example
83 fix reconnect loop in the subscriber example
2023-03-16 18:05:40 +01:00
Marcelo Zimbres
c7f49c6677 Adds address struct. 2023-03-16 17:01:49 +01:00
Marcelo Zimbres
90bcd621fb Including only necessary headers. 2023-03-14 20:11:22 +01:00
Marcelo Zimbres
fd967204df Implements non-member async_run for plain connections.
This function will resolve and connect before calling member async_run.
2023-03-13 21:37:25 +01:00
Marcelo
cd00047a49 Merge pull request #82 from boostorg/81-add-support-for-protobuf
First steps with protobuf support.
2023-03-11 13:10:39 +01:00
Marcelo Zimbres
728b35cfe0 Adds protobuf example. 2023-03-11 12:24:35 +01:00
Marcelo
52e62ba78c Merge pull request #80 from boostorg/79-missing-header-in-resulthpp
Adds missing include header.
2023-03-08 12:10:37 +01:00
Marcelo Zimbres
bb18ff4891 Adds missing include header. 2023-03-07 20:24:53 +01:00
Marcelo
6ce793e413 Merge pull request #78 from boostorg/69-move-health-check-functionality-from-examples-to-the-library
Implements a function that checks Redis health.
2023-03-05 21:18:12 +01:00
Marcelo Zimbres
a83c0e7803 Trying to fix build on MSVC by including tuple before asio. 2023-03-05 20:54:02 +01:00
Marcelo Zimbres
64820bd25b Implements a function that checks Redis health. 2023-03-04 15:54:23 +01:00
Marcelo
16b5c8d1ba Merge pull request #77 from boostorg/72-cannot-build-on-macos
Adds support for libc++.
2023-03-02 08:12:36 +01:00
Marcelo Zimbres
8ef4d3cf0b Adds support for libc++. 2023-03-02 07:46:21 +01:00
Marcelo
d01a9acf3b Merge pull request #75 from boostorg/51-more-suggestions-for-documentation-improvements
Addresses issue #45, #47, #51 and #74.
2023-02-26 20:58:14 +01:00
Marcelo Zimbres
ac7e425d47 Addresses issue #45, #47, #51 and #74. 2023-02-26 20:34:43 +01:00
Marcelo
d620cdee59 Merge pull request #71 from boostorg/70-rename-node-to-basic_node-and-add-typedef-for-nodestdstring
70 rename node to basic node and add typedef for nodestdstring
2023-02-25 15:30:04 +01:00
Marcelo Zimbres
5f07b730f7 Upgrades to Boost 1.81. 2023-02-25 10:26:07 +01:00
Marcelo Zimbres
6d3a112f94 Removes json serialization boilerplate. 2023-02-25 10:14:28 +01:00
Marcelo Zimbres
1f3ef6b486 Renames node to basic_node. 2023-02-25 10:14:28 +01:00
Marcelo
a850a6ed63 Merge pull request #68 from boostorg/67-make-the-connection-full-duplex
67 make the connection full duplex
2023-02-18 21:39:13 +01:00
Marcelo Zimbres
c8b73c2fe8 Removes coalesce property of the requests.
It doesn't make any sense after the implementation of full-duplex
communication.
2023-02-18 20:08:55 +01:00
Marcelo Zimbres
8b02268182 Uses boost.describe to simplify json serialization. 2023-02-16 21:44:45 +01:00
Marcelo Zimbres
1b60eeb352 Makes the connection full-duplex. 2023-02-12 19:14:07 +01:00
Marcelo
b93f36163d Merge pull request #66 from boostorg/65-move-redisresp3async_read-to-redis-namespace
Moves read functions from resp3:: to redis::.
2023-02-12 07:44:33 +01:00
Marcelo Zimbres
071f9a93aa Moves read functions from resp3:: to redis::. 2023-02-11 19:17:05 +01:00
Marcelo
5a6ca14a67 Merge pull request #64 from boostorg/40-improve-support-to-redis-error-messages
Uses system::result to implement per request error handling.
2023-02-11 14:56:12 +01:00
Marcelo Zimbres
a5c86107f8 Uses system::result to implement per request error handling. 2023-02-11 11:53:44 +01:00
Marcelo
3a4445022e Merge pull request #63 from boostorg/62-let-the-implementation-call-adaptresp-automatically
async_exec accepts response now instead of adapter.
2023-02-05 10:08:05 +01:00
Marcelo Zimbres
bfb26f2602 async_exec accepts response now instead of adapter. 2023-02-04 20:43:36 +01:00
Marcelo
7e70cb4ad7 Merge pull request #61 from boostorg/60-we-need-response-typedefs
Adds response typedefs.
2023-02-04 10:29:22 +01:00
Marcelo Zimbres
886561409a Adds response typedefs. 2023-02-04 10:03:08 +01:00
Marcelo
0c5ff09685 Merge pull request #59 from boostorg/42-the-names-from_bulk-and-to_bulk-are-too-generic-for-adl-customization-points
Prefix to_ and from_bulk with boost_redis_ (boost review).
2023-02-02 08:33:44 +01:00
Marcelo Zimbres
4b07b6d516 Prefix to_ and from_bulk with boost_redis_ (boost review). 2023-02-01 22:48:33 +01:00
Marcelo
c1ce8358c7 Merge pull request #57 from boostorg/56-remove-memory_resource-usage
Removes memory_resource.
2023-01-29 21:14:37 +01:00
Marcelo Zimbres
13e16b7a60 Removes memory_resource. 2023-01-29 20:21:30 +01:00
Marcelo
e11502e0df Merge pull request #55 from boostorg/54-rename-aedis-to-boost-redis
Renames Aedis to Boost.Redis.
2023-01-28 21:43:57 +01:00
Marcelo Zimbres
b2344384cf Renames Aedis to Boost.Redis. 2023-01-28 17:57:35 +01:00
Marcelo Zimbres
56c0b28003 Fixes issue 50 and 44. 2023-01-28 09:35:36 +01:00
Marcelo Zimbres
c88fcfb9ed Adds more doc to node class. 2023-01-08 21:51:41 +01:00
Marcelo
a56bf982ab Merge pull request #41 from Hailios/remove_duplicate_cmake
remove duplicate line in cmake
2023-01-07 20:11:50 +01:00
Jakob Lövhall
5d0ed0e986 remove duplicate line in cmake 2023-01-07 16:58:50 +01:00
Marcelo Zimbres
15deaa637d Doc improvements. 2023-01-07 00:14:29 +01:00
Marcelo Zimbres
bb8ff90351 Fixes issue 39. 2023-01-06 17:38:10 +01:00
Marcelo Zimbres
7d4902369a Doc improvements and replaces async_main to co_main. 2023-01-05 23:37:55 +01:00
Marcelo Zimbres
607ca17a89 Improvements in the documentation. 2023-01-04 22:51:53 +01:00
Marcelo Zimbres
3849ba42fd Changes:
- Fix include header order.
- Removes default completion token where it is not needed.
- Replaces yield with BOOST_ macros.
2023-01-02 23:51:50 +01:00
Marcelo Zimbres
56bcdb7914 Improvements in the docs. 2022-12-31 15:58:31 +01:00
Marcelo Zimbres
73ad66eb93 Adds example that does not user awaitable ops. 2022-12-30 18:13:09 +01:00
Marcelo Zimbres
9cf00d6a23 Adds cpp17 async example. 2022-12-30 00:04:41 +01:00
Marcelo Zimbres
a00c9e7439 Doc improvements. 2022-12-27 21:21:43 +01:00
Marcelo Zimbres
0520791100 Renames request flag. 2022-12-27 18:46:27 +01:00
Marcelo Zimbres
14b376e36e Fixes cancelation of async_exec (2). 2022-12-26 11:02:13 +01:00
Marcelo Zimbres
4f9dcc7dc5 Fixes async_exec terminal cancellation. 2022-12-25 20:01:35 +01:00
Marcelo Zimbres
ad5dd8c30b Refactors the parser so it is not header-only. 2022-12-22 21:42:41 +01:00
Marcelo Zimbres
842f864689 Using doxygen-awesome css. 2022-12-19 21:40:44 +01:00
147 changed files with 13789 additions and 11157 deletions

View File

@@ -7,7 +7,7 @@ codecov:
ignore:
- "benchmarks/cpp/asio/*"
- "examples/*"
- "example/*"
- "tests/*"
- "/usr/*"
- "**/boost/*"

View File

@@ -1,10 +1,18 @@
# CI script to verify that CMake and B2 builds work.
# B2 builds include only tests that don't require a DB server, to avoid race conditions.
# CMake tests include the actual project tests and all the CMake integration workflows
# recommended by Boost.CI.
# Windows CMake jobs build the code but don't run the tests,
# since we don't have a way to set up a Redis server on Windows (yet).
# Subcommands are implemented by the tools/ci.py script in a platform-independent manner.
name: CI
on: [push, pull_request]
jobs:
windows:
name: "${{matrix.generator}} ${{matrix.toolset}} Boost ${{matrix.boost_version}} ${{matrix.build_type}} ${{matrix.name_args}}"
windows-cmake:
name: "CMake ${{matrix.toolset}} ${{matrix.build-type}} C++${{matrix.cxxstd}}"
runs-on: ${{matrix.os}}
defaults:
run:
@@ -12,84 +20,109 @@ jobs:
strategy:
fail-fast: false
matrix:
boost_version: ["1.80.0"]
os: [windows-2019, windows-2022]
toolset: [v142, v143]
build_type: [Release]
generator: ["Visual Studio 16 2019", "Visual Studio 17 2022"]
config_args: [""]
build_args: [""]
name_args: [""]
exclude:
- { os: windows-2019, toolset: v143 }
- { os: windows-2019, generator: "Visual Studio 17 2022" }
- { os: windows-2022, generator: "Visual Studio 16 2019" }
# The following combinations are not available through install-boost
- { boost_version: "1.80.0", toolset: v143 }
include:
- { toolset: msvc-14.2, os: windows-2019, generator: "Visual Studio 16 2019", cxxstd: '17', build-type: 'Debug', build-shared-libs: 1 }
- { toolset: msvc-14.2, os: windows-2019, generator: "Visual Studio 16 2019", cxxstd: '17', build-type: 'Release', build-shared-libs: 0 }
- { toolset: msvc-14.3, os: windows-2022, generator: "Visual Studio 17 2022", cxxstd: '20', build-type: 'Debug', build-shared-libs: 0 }
- { toolset: msvc-14.3, os: windows-2022, generator: "Visual Studio 17 2022", cxxstd: '20', build-type: 'Release', build-shared-libs: 1 }
env:
CMAKE_BUILD_PARALLEL_LEVEL: 4
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Add boost toolset to environment
if: contains(fromJson('["1.80.0"]'), matrix.boost_version)
run: echo BOOST_TOOLSET=$(echo "msvc") >> $GITHUB_ENV
- name: Setup Boost
run: python3 tools/ci.py setup-boost --source-dir=$(pwd)
# The platform_version passed to boost-install determines the msvc toolset version for which static libs are installed.
- name: Add boost platform version to environment
- name: Build a Boost distribution using B2
run: |
declare -A toolset_to_platform_version=( [v142]="2019" [v143]="2022" )
key=$(echo "${{matrix.toolset}}")
echo BOOST_PLATFORM_VERSION="${toolset_to_platform_version[$key]}" >> $GITHUB_ENV
python3 tools/ci.py build-b2-distro \
--toolset ${{ matrix.toolset }}
- name: Add boost install path to environment
run: echo BOOST_INSTALL_PATH="${GITHUB_WORKSPACE}/boost-${{matrix.boost_version}}${BOOST_TOOLSET}${BOOST_PLATFORM_VERSION}" >> $GITHUB_ENV
- name: Add build type configuration to environment
run: echo BUILD_CONFIG_ARG="--config ${{matrix.build_type}}" >> $GITHUB_ENV
- name: Cache Boost installation
id: cache-boost
uses: actions/cache@v3
with:
path: ${{env.BOOST_INSTALL_PATH}}
key: ${{matrix.boost_version}}${{env.BOOST_TOOLSET}}${{env.BOOST_PLATFORM_VERSION}}
- name: Install Boost
if: steps.cache-boost.outputs.cache-hit != 'true'
uses: MarkusJx/install-boost@v2.4.1
with:
boost_version: ${{matrix.boost_version}}
toolset: ${{env.BOOST_TOOLSET}}
boost_install_dir: ${{env.BOOST_INSTALL_PATH}}
platform_version: ${{env.BOOST_PLATFORM_VERSION}}
arch: null
- name: Install packages
run: cinst openssl
- name: Create build directory
run: mkdir build
- name: Configure
working-directory: build
- name: Build a Boost distribution using CMake
run: |
cmake -T "${{matrix.toolset}}" \
-G "${{matrix.generator}}" \
${{matrix.config_args}} \
${BOOST_COMPILER_ARG}\
"${GITHUB_WORKSPACE}"
env:
BOOST_ROOT: ${{env.BOOST_INSTALL_PATH}}/boost
- name: Build
working-directory: build
python3 tools/ci.py build-cmake-distro \
--build-type ${{ matrix.build-type }} \
--cxxstd ${{ matrix.cxxstd }} \
--toolset ${{ matrix.toolset }} \
--generator "${{ matrix.generator }}" \
--build-shared-libs ${{ matrix.build-shared-libs }}
- name: Build the project tests
run: |
cmake --build . ${BUILD_CONFIG_ARG} ${{matrix.build_args}}
python3 tools/ci.py build-cmake-standalone-tests \
--build-type ${{ matrix.build-type }} \
--cxxstd ${{ matrix.cxxstd }} \
--toolset ${{ matrix.toolset }} \
--generator "${{ matrix.generator }}" \
--build-shared-libs ${{ matrix.build-shared-libs }}
posix:
# # TODO: re-enable this when a Redis server is available for this job
# - name: Run the project tests
# run: |
# python3 tools/ci.py run-cmake-standalone-tests \
# --build-type ${{ matrix.build-type }}
- name: Run add_subdirectory tests
run: |
python3 tools/ci.py run-cmake-add-subdirectory-tests \
--build-type ${{ matrix.build-type }} \
--cxxstd ${{ matrix.cxxstd }} \
--toolset ${{ matrix.toolset }} \
--generator "${{ matrix.generator }}" \
--build-shared-libs ${{ matrix.build-shared-libs }}
- name: Run find_package tests with the built cmake distribution
run: |
python3 tools/ci.py run-cmake-find-package-tests \
--build-type ${{ matrix.build-type }} \
--cxxstd ${{ matrix.cxxstd }} \
--toolset ${{ matrix.toolset }} \
--generator "${{ matrix.generator }}" \
--build-shared-libs ${{ matrix.build-shared-libs }}
- name: Run find_package tests with the built b2 distribution
run: |
python3 tools/ci.py run-cmake-b2-find-package-tests \
--build-type ${{ matrix.build-type }} \
--cxxstd ${{ matrix.cxxstd }} \
--toolset ${{ matrix.toolset }} \
--generator "${{ matrix.generator }}" \
--build-shared-libs ${{ matrix.build-shared-libs }}
windows-b2:
name: "B2 ${{matrix.toolset}}"
runs-on: ${{matrix.os}}
defaults:
run:
shell: bash
strategy:
fail-fast: false
matrix:
include:
- { toolset: msvc-14.2, os: windows-2019 }
- { toolset: msvc-14.3, os: windows-2022 }
env:
OPENSSL_ROOT: "C:\\Program Files\\OpenSSL"
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Setup user-config.jam
run: cp tools/user-config.jam "${HOMEDRIVE}${HOMEPATH}/"
- name: Setup Boost
run: python3 tools/ci.py setup-boost --source-dir=$(pwd)
- name: Build and run project tests using B2
run: |
python3 tools/ci.py run-b2-tests \
--toolset ${{ matrix.toolset }} \
--cxxstd 17,20 \
--variant debug,release
posix-cmake:
name: "CMake ${{ matrix.toolset }} ${{ matrix.cxxstd }} ${{ matrix.build-type }} ${{ matrix.cxxflags }}"
defaults:
run:
shell: bash
@@ -98,34 +131,190 @@ jobs:
fail-fast: false
matrix:
include:
- { toolset: gcc, compiler: g++-11, install: g++-11, os: ubuntu-22.04, cxxstd: 'c++17' }
- { toolset: gcc, compiler: g++-11, install: g++-11, os: ubuntu-22.04, cxxstd: 'c++20' }
- { toolset: clang, compiler: clang++-11, install: clang-11, os: ubuntu-22.04, cxxstd: 'c++17' }
- { toolset: clang, compiler: clang++-11, install: clang-11, os: ubuntu-22.04, cxxstd: 'c++20' }
- { toolset: clang, compiler: clang++-13, install: clang-13, os: ubuntu-22.04, cxxstd: 'c++17' }
- { toolset: clang, compiler: clang++-13, install: clang-13, os: ubuntu-22.04, cxxstd: 'c++20' }
- toolset: gcc-11
install: g++-11
os: ubuntu-latest
container: ubuntu:22.04
cxxstd: '17'
build-type: 'Debug'
ldflags: ''
- toolset: gcc-11
install: g++-11
os: ubuntu-latest
container: ubuntu:22.04
cxxstd: '20'
build-type: 'Release'
ldflags: ''
- toolset: clang-11
install: clang-11
os: ubuntu-latest
container: ubuntu:22.04
cxxstd: '17'
build-type: 'Debug'
ldflags: ''
- toolset: clang-11
install: clang-11
os: ubuntu-latest
container: ubuntu:22.04
cxxstd: '20'
build-type: 'Debug'
ldflags: ''
- toolset: clang-13
install: clang-13
os: ubuntu-latest
container: ubuntu:22.04
cxxstd: '17'
build-type: 'Release'
ldflags: ''
- toolset: clang-13
install: clang-13
os: ubuntu-latest
container: ubuntu:22.04
cxxstd: '20'
build-type: 'Release'
ldflags: ''
- toolset: clang-14
install: 'clang-14 libc++-14-dev libc++abi-14-dev'
os: ubuntu-latest
container: ubuntu:22.04
cxxstd: '17'
build-type: 'Debug'
cxxflags: '-stdlib=libc++'
ldflags: '-lc++'
- toolset: clang-14
install: 'clang-14 libc++-14-dev libc++abi-14-dev'
os: ubuntu-latest
container: ubuntu:22.04
cxxstd: '20'
build-type: 'Release'
cxxflags: '-stdlib=libc++'
ldflags: '-lc++'
runs-on: ${{ matrix.os }}
container: ${{matrix.container}}
env:
CXXFLAGS: -g -O0 -std=${{matrix.cxxstd}} -Wall -Wextra
CXXFLAGS: ${{matrix.cxxflags}} -Wall -Wextra
LDFLAGS: ${{matrix.ldflags}}
CMAKE_BUILD_PARALLEL_LEVEL: 4
BOOST_REDIS_TEST_SERVER: redis
services:
redis:
image: redis
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Install CMake
run: sudo apt-get -y install cmake
- name: Install compiler
run: sudo apt-get install -y ${{ matrix.install }}
- name: Install Redis
run: sudo apt-get install -y redis-server
- name: Install boost
uses: MarkusJx/install-boost@v2.3.0
id: install-boost
with:
boost_version: 1.80.0
platform_version: 22.04
- name: Run CMake
- name: Setup container environment
if: matrix.container
run: |
BOOST_ROOT=${{steps.install-boost.outputs.BOOST_ROOT}} cmake -DCMAKE_CXX_COMPILER="${{matrix.compiler}}" -DCMAKE_CXX_FLAGS="${{env.CXXFLAGS}}"
- name: Build
run: make
- name: Check
run: ctest --output-on-failure
apt-get update
apt-get -y install sudo python3 git g++ libssl-dev protobuf-compiler redis-server
- name: Install dependencies
run: |
sudo apt-get update
sudo apt-get -y install cmake protobuf-compiler redis-server python3 ${{ matrix.install }}
- name: Setup Boost
run: ./tools/ci.py setup-boost --source-dir=$(pwd)
- name: Build a Boost distribution using B2
run: |
./tools/ci.py build-b2-distro \
--toolset ${{ matrix.toolset }}
- name: Build a Boost distribution using CMake
run: |
./tools/ci.py build-cmake-distro \
--build-type ${{ matrix.build-type }} \
--cxxstd ${{ matrix.cxxstd }} \
--toolset ${{ matrix.toolset }}
- name: Build the project tests
run: |
./tools/ci.py build-cmake-standalone-tests \
--build-type ${{ matrix.build-type }} \
--cxxstd ${{ matrix.cxxstd }} \
--toolset ${{ matrix.toolset }}
- name: Run the project tests
run: |
./tools/ci.py run-cmake-standalone-tests \
--build-type ${{ matrix.build-type }}
- name: Run add_subdirectory tests
run: |
./tools/ci.py run-cmake-add-subdirectory-tests \
--build-type ${{ matrix.build-type }} \
--cxxstd ${{ matrix.cxxstd }} \
--toolset ${{ matrix.toolset }}
- name: Run find_package tests with the built cmake distribution
run: |
./tools/ci.py run-cmake-find-package-tests \
--build-type ${{ matrix.build-type }} \
--cxxstd ${{ matrix.cxxstd }} \
--toolset ${{ matrix.toolset }}
- name: Run find_package tests with the built b2 distribution
run: |
./tools/ci.py run-cmake-b2-find-package-tests \
--build-type ${{ matrix.build-type }} \
--cxxstd ${{ matrix.cxxstd }} \
--toolset ${{ matrix.toolset }}
posix-b2:
name: "B2 ${{ matrix.toolset }}"
defaults:
run:
shell: bash
strategy:
fail-fast: false
matrix:
include:
- toolset: gcc-11
install: g++-11
cxxstd: "11,17,20" # Having C++11 shouldn't break the build
os: ubuntu-latest
container: ubuntu:22.04
- toolset: clang-14
install: clang-14
os: ubuntu-latest
container: ubuntu:22.04
cxxstd: "17,20"
runs-on: ${{ matrix.os }}
container: ${{matrix.container}}
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Setup container environment
if: matrix.container
run: |
apt-get update
apt-get -y install sudo python3 git g++ libssl-dev
- name: Install dependencies
run: |
sudo apt-get update
sudo apt-get -y install python3 ${{ matrix.install }}
- name: Setup Boost
run: ./tools/ci.py setup-boost --source-dir=$(pwd)
- name: Build and run project tests using B2
run: |
python3 tools/ci.py run-b2-tests \
--toolset ${{ matrix.toolset }} \
--cxxstd ${{ matrix.cxxstd }} \
--variant debug,release

View File

@@ -3,7 +3,8 @@ name: Coverage
on:
push:
branches:
- master
- develop
jobs:
posix:
defaults:
@@ -15,33 +16,36 @@ jobs:
CXX: g++-11
CXXFLAGS: -g -O0 -std=c++20 --coverage -fkeep-inline-functions -fkeep-static-functions
LDFLAGS: --coverage
CMAKE_BUILD_PARALLEL_LEVEL: 4
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Install CMake
run: sudo apt-get -y install cmake
- name: Install lcov
run: sudo apt-get -y install lcov
- name: Install compiler
run: sudo apt-get -y install g++-11
- name: Install Redis
run: sudo apt-get -y install redis-server
- name: Install boost
uses: MarkusJx/install-boost@v2.3.0
id: install-boost
with:
boost_version: 1.80.0
platform_version: 22.04
- name: Install dependencies
run: sudo apt-get --no-install-recommends -y install cmake lcov g++-11 redis-server python3 libgd-perl
- name: Setup Boost
run: ./tools/ci.py setup-boost --source-dir=$(pwd)
- name: Build Boost
run: ./tools/ci.py build-b2-distro --toolset=gcc-11
# Having our library there confuses the coverage reports
- name: Remove Boost.Redis from the b2 distro
run: rm -rf ~/boost-b2-distro/include/boost/redis
- name: Run CMake
run: |
BOOST_ROOT=${{steps.install-boost.outputs.BOOST_ROOT}} cmake --preset coverage .
run: cmake -DCMAKE_PREFIX_PATH=$HOME/boost-b2-distro --preset coverage .
- name: Build
run: cmake --build --preset coverage
- name: Test
run: ctest --preset coverage
- name: Make the coverage file
run: cmake --build --preset coverage --target coverage
- name: Upload to codecov
run: |
bash <(curl -s https://codecov.io/bash) -f ./build/coverage/coverage.info || echo "Codecov did not collect coverage reports"
bash <(curl -s https://codecov.io/bash) -f ./build/coverage/coverage.info

View File

@@ -1,355 +1,142 @@
# At the moment the official build system is still autotools and this
# file is meant to support Aedis on windows.
cmake_minimum_required(VERSION 3.8...3.20)
# BOOST_ROOT=/opt/boost_1_79/ cmake -DCMAKE_CXX_FLAGS="-g -O0
# -std=c++20 -Wall -Wextra --coverage -fkeep-inline-functions
# -fkeep-static-functions" -DCMAKE_EXE_LINKER_FLAGS="--coverage"
# ~/my/aedis
cmake_minimum_required(VERSION 3.14)
project(
Aedis
VERSION 1.4.0
DESCRIPTION "A redis client designed for performance and scalability"
HOMEPAGE_URL "https://mzimbres.github.io/aedis"
LANGUAGES CXX
)
add_library(aedis INTERFACE)
target_include_directories(aedis INTERFACE
$<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}/include>
$<INSTALL_INTERFACE:include>
)
target_link_libraries(
aedis
INTERFACE
Boost::asio
Boost::assert
Boost::config
Boost::core
Boost::mp11
Boost::system
Boost::utility
)
target_compile_features(aedis INTERFACE cxx_std_17)
# Asio bases C++ feature detection on __cplusplus. Make MSVC
# define it correctly
if (MSVC)
target_compile_options(aedis INTERFACE /Zc:__cplusplus)
# determine whether it's main/root project
# or being built under another project.
if (NOT DEFINED BOOST_REDIS_MAIN_PROJECT)
set(BOOST_REDIS_MAIN_PROJECT OFF)
if (CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_SOURCE_DIR)
set(BOOST_REDIS_MAIN_PROJECT ON)
endif()
endif()
include(CMakePackageConfigHelpers)
write_basic_package_version_file(
"${PROJECT_BINARY_DIR}/AedisConfigVersion.cmake"
COMPATIBILITY AnyNewerVersion
)
project(boost_redis VERSION "${BOOST_SUPERPROJECT_VERSION}" LANGUAGES CXX)
find_package(Boost 1.80 REQUIRED)
include_directories(${Boost_INCLUDE_DIRS})
# Library
add_library(boost_redis INTERFACE)
add_library(Boost::redis ALIAS boost_redis)
target_include_directories(boost_redis INTERFACE include)
target_compile_features(boost_redis INTERFACE cxx_std_17)
find_package(OpenSSL REQUIRED)
# Dependencies
if (BOOST_REDIS_MAIN_PROJECT)
# TODO: Understand why we have to list all dependencies below
# instead of
#set(BOOST_INCLUDE_LIBRARIES redis)
#set(BOOST_EXCLUDE_LIBRARIES redis)
#add_subdirectory(../.. boostorg/boost EXCLUDE_FROM_ALL)
enable_testing()
include_directories(include)
set(deps
system
assert
config
throw_exception
asio
variant2
mp11
winapi
predef
align
context
core
coroutine
static_assert
pool
date_time
smart_ptr
exception
integer
move
type_traits
algorithm
utility
io
lexical_cast
numeric/conversion
mpl
range
tokenizer
tuple
array
bind
concept_check
function
iterator
regex
unordered
preprocessor
container
conversion
container_hash
detail
optional
function_types
fusion
intrusive
describe
typeof
functional
test
json
endian
)
# Main function for the examples.
#=======================================================================
foreach(dep IN LISTS deps)
add_subdirectory(../${dep} boostorg/${dep})
endforeach()
add_library(common STATIC
examples/common/common.cpp
examples/common/main.cpp
examples/common/aedis.cpp
)
target_compile_features(common PUBLIC cxx_std_20)
if (MSVC)
target_compile_options(common PRIVATE /bigobj)
target_compile_definitions(common PRIVATE _WIN32_WINNT=0x0601)
find_package(Threads REQUIRED)
find_package(OpenSSL REQUIRED)
target_link_libraries(boost_redis
INTERFACE
Boost::system
Boost::asio
Threads::Threads
OpenSSL::Crypto
OpenSSL::SSL
)
else()
# If we're in the superproject or called from add_subdirectory,
# Boost dependencies should be already available.
# If other dependencies are not found, we bail out
find_package(Threads)
if(NOT Threads_FOUND)
message(STATUS "Boost.Redis has been disabled, because the required package Threads hasn't been found")
return()
endif()
find_package(OpenSSL)
if(NOT OpenSSL_FOUND)
message(STATUS "Boost.Redis has been disabled, because the required package OpenSSL hasn't been found")
return()
endif()
# This is generated by boostdep
target_link_libraries(boost_redis
INTERFACE
Boost::asio
Boost::assert
Boost::core
Boost::mp11
Boost::system
Boost::throw_exception
Threads::Threads
OpenSSL::Crypto
OpenSSL::SSL
)
endif()
# Executables
#=======================================================================
add_executable(intro examples/intro.cpp)
target_link_libraries(intro common)
target_compile_features(intro PUBLIC cxx_std_20)
add_test(intro intro)
if (MSVC)
target_compile_options(intro PRIVATE /bigobj)
target_compile_definitions(intro PRIVATE _WIN32_WINNT=0x0601)
# Enable testing. If we're being called from the superproject, this has already been done
if (BOOST_REDIS_MAIN_PROJECT)
include(CTest)
endif()
add_executable(intro_sync examples/intro_sync.cpp)
target_compile_features(intro_sync PUBLIC cxx_std_20)
add_test(intro_sync intro_sync)
add_test(intro_sync intro_sync)
if (MSVC)
target_compile_options(intro_sync PRIVATE /bigobj)
target_compile_definitions(intro_sync PRIVATE _WIN32_WINNT=0x0601)
# Most tests require a running Redis server, so we only run them if we're the main project
if(BOOST_REDIS_MAIN_PROJECT AND BUILD_TESTING)
# Tests and common utilities
add_subdirectory(test)
# Benchmarks. Build them with tests to prevent code rotting
add_subdirectory(benchmarks)
# Examples
add_subdirectory(example)
endif()
add_executable(chat_room examples/chat_room.cpp)
target_compile_features(chat_room PUBLIC cxx_std_20)
target_link_libraries(chat_room common)
if (MSVC)
target_compile_options(chat_room PRIVATE /bigobj)
target_compile_definitions(chat_room PRIVATE _WIN32_WINNT=0x0601)
endif()
add_executable(containers examples/containers.cpp)
target_compile_features(containers PUBLIC cxx_std_20)
target_link_libraries(containers common)
add_test(containers containers)
if (MSVC)
target_compile_options(containers PRIVATE /bigobj)
target_compile_definitions(containers PRIVATE _WIN32_WINNT=0x0601)
endif()
add_executable(echo_server examples/echo_server.cpp)
target_compile_features(echo_server PUBLIC cxx_std_20)
target_link_libraries(echo_server common)
if (MSVC)
target_compile_options(echo_server PRIVATE /bigobj)
target_compile_definitions(echo_server PRIVATE _WIN32_WINNT=0x0601)
endif()
add_executable(resolve_with_sentinel examples/resolve_with_sentinel.cpp)
target_compile_features(resolve_with_sentinel PUBLIC cxx_std_20)
target_link_libraries(resolve_with_sentinel common)
#add_test(resolve_with_sentinel resolve_with_sentinel)
if (MSVC)
target_compile_options(resolve_with_sentinel PRIVATE /bigobj)
target_compile_definitions(resolve_with_sentinel PRIVATE _WIN32_WINNT=0x0601)
endif()
add_executable(serialization examples/serialization.cpp)
target_compile_features(serialization PUBLIC cxx_std_20)
target_link_libraries(serialization common)
add_test(serialization serialization)
if (MSVC)
target_compile_options(serialization PRIVATE /bigobj)
target_compile_definitions(serialization PRIVATE _WIN32_WINNT=0x0601)
endif()
add_executable(subscriber examples/subscriber.cpp)
target_compile_features(subscriber PUBLIC cxx_std_20)
target_link_libraries(subscriber common)
if (MSVC)
target_compile_options(subscriber PRIVATE /bigobj)
target_compile_definitions(subscriber PRIVATE _WIN32_WINNT=0x0601)
endif()
add_executable(intro_tls examples/intro_tls.cpp)
target_compile_features(intro_tls PUBLIC cxx_std_20)
add_test(intro_tls intro_tls)
target_link_libraries(intro_tls OpenSSL::Crypto OpenSSL::SSL)
target_link_libraries(intro_tls common)
if (MSVC)
target_compile_options(intro_tls PRIVATE /bigobj)
target_compile_definitions(intro_tls PRIVATE _WIN32_WINNT=0x0601)
endif()
add_executable(low_level_async examples/low_level_async.cpp)
target_compile_features(low_level_async PUBLIC cxx_std_20)
add_test(low_level_async low_level_async)
target_link_libraries(low_level_async common)
if (MSVC)
target_compile_options(low_level_async PRIVATE /bigobj)
target_compile_definitions(low_level_async PRIVATE _WIN32_WINNT=0x0601)
endif()
add_executable(echo_server_client benchmarks/cpp/asio/echo_server_client.cpp)
target_compile_features(echo_server_client PUBLIC cxx_std_20)
if (MSVC)
target_compile_options(echo_server_client PRIVATE /bigobj)
target_compile_definitions(echo_server_client PRIVATE _WIN32_WINNT=0x0601)
endif()
add_executable(echo_server_direct benchmarks/cpp/asio/echo_server_direct.cpp)
target_compile_features(echo_server_direct PUBLIC cxx_std_20)
if (MSVC)
target_compile_options(echo_server_direct PRIVATE /bigobj)
target_compile_definitions(echo_server_direct PRIVATE _WIN32_WINNT=0x0601)
endif()
add_executable(low_level_sync examples/low_level_sync.cpp)
target_compile_features(low_level_sync PUBLIC cxx_std_17)
add_test(low_level_sync low_level_sync)
if (MSVC)
target_compile_options(low_level_sync PRIVATE /bigobj)
target_compile_definitions(low_level_sync PRIVATE _WIN32_WINNT=0x0601)
endif()
add_executable(test_conn_exec tests/conn_exec.cpp)
target_compile_features(test_conn_exec PUBLIC cxx_std_20)
add_test(test_conn_exec test_conn_exec)
if (MSVC)
target_compile_options(test_conn_exec PRIVATE /bigobj)
target_compile_definitions(test_conn_exec PRIVATE _WIN32_WINNT=0x0601)
endif()
add_executable(test_conn_push tests/conn_push.cpp)
target_compile_features(test_conn_push PUBLIC cxx_std_20)
add_test(test_conn_push test_conn_push)
if (MSVC)
target_compile_options(test_conn_push PRIVATE /bigobj)
target_compile_definitions(test_conn_push PRIVATE _WIN32_WINNT=0x0601)
endif()
add_executable(test_conn_quit tests/conn_quit.cpp)
target_compile_features(test_conn_quit PUBLIC cxx_std_17)
add_test(test_conn_quit test_conn_quit)
if (MSVC)
target_compile_options(test_conn_quit PRIVATE /bigobj)
target_compile_definitions(test_conn_quit PRIVATE _WIN32_WINNT=0x0601)
endif()
add_executable(test_conn_quit_coalesce tests/conn_quit_coalesce.cpp)
add_test(test_conn_quit_coalesce test_conn_quit_coalesce)
target_compile_features(test_conn_quit_coalesce PUBLIC cxx_std_17)
if (MSVC)
target_compile_options(test_conn_quit_coalesce PRIVATE /bigobj)
target_compile_definitions(test_conn_quit_coalesce PRIVATE _WIN32_WINNT=0x0601)
endif()
add_executable(test_conn_reconnect tests/conn_reconnect.cpp)
target_compile_features(test_conn_reconnect PUBLIC cxx_std_20)
target_link_libraries(test_conn_reconnect common)
add_test(test_conn_reconnect test_conn_reconnect)
if (MSVC)
target_compile_options(test_conn_reconnect PRIVATE /bigobj)
target_compile_definitions(test_conn_reconnect PRIVATE _WIN32_WINNT=0x0601)
endif()
add_executable(test_conn_tls tests/conn_tls.cpp)
add_test(test_conn_tls test_conn_tls)
target_compile_features(test_conn_tls PUBLIC cxx_std_17)
target_link_libraries(test_conn_tls OpenSSL::Crypto OpenSSL::SSL)
if (MSVC)
target_compile_options(test_conn_tls PRIVATE /bigobj)
target_compile_definitions(test_conn_tls PRIVATE _WIN32_WINNT=0x0601)
endif()
add_executable(test_low_level tests/low_level.cpp)
target_compile_features(test_low_level PUBLIC cxx_std_17)
add_test(test_low_level test_low_level)
if (MSVC)
target_compile_options(test_low_level PRIVATE /bigobj)
target_compile_definitions(test_low_level PRIVATE _WIN32_WINNT=0x0601)
endif()
add_executable(test_conn_run_cancel tests/conn_run_cancel.cpp)
target_compile_features(test_conn_run_cancel PUBLIC cxx_std_20)
add_test(test_conn_run_cancel test_conn_run_cancel)
if (MSVC)
target_compile_options(test_conn_run_cancel PRIVATE /bigobj)
target_compile_definitions(test_conn_run_cancel PRIVATE _WIN32_WINNT=0x0601)
endif()
add_executable(test_conn_exec_cancel tests/conn_exec_cancel.cpp)
target_compile_features(test_conn_exec_cancel PUBLIC cxx_std_20)
target_link_libraries(test_conn_exec_cancel common)
add_test(test_conn_exec_cancel test_conn_exec_cancel)
if (MSVC)
target_compile_options(test_conn_exec_cancel PRIVATE /bigobj)
target_compile_definitions(test_conn_exec_cancel PRIVATE _WIN32_WINNT=0x0601)
endif()
add_executable(test_conn_echo_stress tests/conn_echo_stress.cpp)
target_compile_features(test_conn_echo_stress PUBLIC cxx_std_20)
target_link_libraries(test_conn_echo_stress common)
add_test(test_conn_echo_stress test_conn_echo_stress)
if (MSVC)
target_compile_options(test_conn_echo_stress PRIVATE /bigobj)
target_compile_definitions(test_conn_echo_stress PRIVATE _WIN32_WINNT=0x0601)
endif()
add_executable(test_request tests/request.cpp)
target_compile_features(test_request PUBLIC cxx_std_17)
add_test(test_request test_request)
if (MSVC)
target_compile_options(test_request PRIVATE /bigobj)
target_compile_definitions(test_request PRIVATE _WIN32_WINNT=0x0601)
endif()
# Install
#=======================================================================
install(TARGETS aedis
EXPORT aedis
PUBLIC_HEADER DESTINATION include COMPONENT Development
)
include(CMakePackageConfigHelpers)
configure_package_config_file(
"${PROJECT_SOURCE_DIR}/cmake/AedisConfig.cmake.in"
"${PROJECT_BINARY_DIR}/AedisConfig.cmake"
INSTALL_DESTINATION lib/cmake/aedis
)
install(EXPORT aedis DESTINATION lib/cmake/aedis)
install(FILES "${PROJECT_BINARY_DIR}/AedisConfigVersion.cmake"
"${PROJECT_BINARY_DIR}/AedisConfig.cmake"
DESTINATION lib/cmake/aedis)
install(DIRECTORY ${PROJECT_SOURCE_DIR}/include/ DESTINATION include)
# Doxygen
#=======================================================================
set(DOXYGEN_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}/doc")
configure_file(doc/Doxyfile.in doc/Doxyfile @ONLY)
add_custom_target(
doc
COMMAND doxygen "${PROJECT_BINARY_DIR}/doc/Doxyfile"
COMMENT "Building documentation using Doxygen"
WORKING_DIRECTORY "${PROJECT_SOURCE_DIR}"
VERBATIM
)
# Coverage
#=======================================================================
set(
COVERAGE_TRACE_COMMAND
lcov --capture
-output-file "${PROJECT_BINARY_DIR}/coverage.info"
--directory "${PROJECT_BINARY_DIR}"
--include "${PROJECT_SOURCE_DIR}/include/*"
)
set(
COVERAGE_HTML_COMMAND
genhtml --legend -f -q
"${PROJECT_BINARY_DIR}/coverage.info"
--prefix "${PROJECT_SOURCE_DIR}"
--output-directory "${PROJECT_BINARY_DIR}/coverage_html"
)
add_custom_target(
coverage
COMMAND ${COVERAGE_TRACE_COMMAND}
COMMAND ${COVERAGE_HTML_COMMAND}
COMMENT "Generating coverage report"
VERBATIM
)
# Distribution
#=======================================================================
include(CPack)
# TODO
#=======================================================================
#.PHONY: bench
#bench:
# pdflatex --jobname=echo-f0 benchmarks/benchmarks.tex
# pdflatex --jobname=echo-f1 benchmarks/benchmarks.tex
# pdftoppm {input.pdf} {output.file} -png

View File

@@ -12,7 +12,7 @@
"warnings": {
"dev": true,
"deprecated": true,
"uninitialized": true,
"uninitialized": false,
"unusedCli": true,
"systemVars": false
},
@@ -40,11 +40,11 @@
}
},
{
"name": "dev",
"name": "g++-11",
"generator": "Unix Makefiles",
"hidden": false,
"inherits": ["cmake-pedantic"],
"binaryDir": "${sourceDir}/build/dev",
"binaryDir": "${sourceDir}/build/g++-11",
"cacheVariables": {
"CMAKE_BUILD_TYPE": "Debug",
"CMAKE_CXX_EXTENSIONS": "OFF",
@@ -52,15 +52,96 @@
"CMAKE_CXX_COMPILER": "g++-11",
"CMAKE_SHARED_LINKER_FLAGS": "-fsanitize=address",
"CMAKE_CXX_STANDARD_REQUIRED": "ON",
"PROJECT_BINARY_DIR": "${sourceDir}/build/dev",
"DOXYGEN_OUTPUT_DIRECTORY": "${sourceDir}/build/dev/doc/"
"PROJECT_BINARY_DIR": "${sourceDir}/build/g++-11"
}
},
{
"name": "g++-11-release",
"generator": "Unix Makefiles",
"hidden": false,
"inherits": ["cmake-pedantic"],
"binaryDir": "${sourceDir}/build/g++-11-release",
"cacheVariables": {
"CMAKE_BUILD_TYPE": "Release",
"CMAKE_CXX_EXTENSIONS": "OFF",
"CMAKE_CXX_FLAGS": "-Wall -Wextra",
"CMAKE_CXX_COMPILER": "g++-11",
"CMAKE_SHARED_LINKER_FLAGS": "",
"CMAKE_CXX_STANDARD_REQUIRED": "ON",
"PROJECT_BINARY_DIR": "${sourceDir}/build/g++-11-release"
}
},
{
"name": "clang++-13",
"generator": "Unix Makefiles",
"hidden": false,
"inherits": ["cmake-pedantic"],
"binaryDir": "${sourceDir}/build/clang++-13",
"cacheVariables": {
"CMAKE_BUILD_TYPE": "Debug",
"CMAKE_CXX_EXTENSIONS": "OFF",
"CMAKE_CXX_FLAGS": "-Wall -Wextra -fsanitize=address",
"CMAKE_CXX_COMPILER": "clang++-13",
"CMAKE_SHARED_LINKER_FLAGS": "-fsanitize=address",
"CMAKE_CXX_STANDARD_REQUIRED": "ON",
"PROJECT_BINARY_DIR": "${sourceDir}/build/clang++-13"
}
},
{
"name": "clang++-14",
"generator": "Unix Makefiles",
"hidden": false,
"inherits": ["cmake-pedantic"],
"binaryDir": "${sourceDir}/build/clang++-14",
"cacheVariables": {
"CMAKE_BUILD_TYPE": "Debug",
"CMAKE_CXX_EXTENSIONS": "OFF",
"CMAKE_CXX_FLAGS": "-Wall -Wextra -fsanitize=address",
"CMAKE_CXX_COMPILER": "clang++-14",
"CMAKE_SHARED_LINKER_FLAGS": "-fsanitize=address",
"CMAKE_CXX_STANDARD_REQUIRED": "ON",
"PROJECT_BINARY_DIR": "${sourceDir}/build/clang++-14"
}
},
{
"name": "libc++-14-cpp17",
"generator": "Unix Makefiles",
"hidden": false,
"inherits": ["cmake-pedantic"],
"binaryDir": "${sourceDir}/build/libc++-14-cpp17",
"cacheVariables": {
"CMAKE_BUILD_TYPE": "Debug",
"CMAKE_CXX_EXTENSIONS": "OFF",
"CMAKE_CXX_FLAGS": "-Wall -Wextra -stdlib=libc++ -std=c++17",
"CMAKE_EXE_LINKER_FLAGS": "-lc++",
"CMAKE_CXX_COMPILER": "clang++-14",
"CMAKE_SHARED_LINKER_FLAGS": "",
"CMAKE_CXX_STANDARD_REQUIRED": "ON",
"PROJECT_BINARY_DIR": "${sourceDir}/build/libc++-14-cpp17"
}
},
{
"name": "libc++-14-cpp20",
"generator": "Unix Makefiles",
"hidden": false,
"inherits": ["cmake-pedantic"],
"binaryDir": "${sourceDir}/build/libc++-14-cpp20",
"cacheVariables": {
"CMAKE_BUILD_TYPE": "Debug",
"CMAKE_CXX_EXTENSIONS": "OFF",
"CMAKE_CXX_FLAGS": "-Wall -Wextra -stdlib=libc++ -std=c++17",
"CMAKE_EXE_LINKER_FLAGS": "-lc++",
"CMAKE_CXX_COMPILER": "clang++-14",
"CMAKE_SHARED_LINKER_FLAGS": "",
"CMAKE_CXX_STANDARD_REQUIRED": "ON",
"PROJECT_BINARY_DIR": "${sourceDir}/build/libc++-14-cpp20"
}
},
{
"name": "clang-tidy",
"generator": "Unix Makefiles",
"hidden": false,
"inherits": ["dev"],
"inherits": ["g++-11"],
"binaryDir": "${sourceDir}/build/clang-tidy",
"cacheVariables": {
"CMAKE_CXX_CLANG_TIDY": "clang-tidy;--header-filter=${sourceDir}/include/*",
@@ -70,7 +151,12 @@
],
"buildPresets": [
{ "name": "coverage", "configurePreset": "coverage" },
{ "name": "dev", "configurePreset": "dev" },
{ "name": "g++-11", "configurePreset": "g++-11" },
{ "name": "g++-11-release", "configurePreset": "g++-11-release" },
{ "name": "clang++-13", "configurePreset": "clang++-13" },
{ "name": "clang++-14", "configurePreset": "clang++-14" },
{ "name": "libc++-14-cpp17", "configurePreset": "libc++-14-cpp17" },
{ "name": "libc++-14-cpp20", "configurePreset": "libc++-14-cpp20" },
{ "name": "clang-tidy", "configurePreset": "clang-tidy" }
],
"testPresets": [
@@ -80,8 +166,13 @@
"output": {"outputOnFailure": true},
"execution": {"noTestsAction": "error", "stopOnFailure": true}
},
{ "name": "coverage", "configurePreset": "coverage", "inherits": ["test"] },
{ "name": "dev", "configurePreset": "dev", "inherits": ["test"] },
{ "name": "clang-tidy", "configurePreset": "clang-tidy", "inherits": ["test"] }
{ "name": "coverage", "configurePreset": "coverage", "inherits": ["test"] },
{ "name": "g++-11", "configurePreset": "g++-11", "inherits": ["test"] },
{ "name": "g++-11-release", "configurePreset": "g++-11-release", "inherits": ["test"] },
{ "name": "clang++-13", "configurePreset": "clang++-13", "inherits": ["test"] },
{ "name": "clang++-14", "configurePreset": "clang++-14", "inherits": ["test"] },
{ "name": "libc++-14-cpp17", "configurePreset": "libc++-14-cpp17", "inherits": ["test"] },
{ "name": "libc++-14-cpp20", "configurePreset": "libc++-14-cpp20", "inherits": ["test"] },
{ "name": "clang-tidy", "configurePreset": "clang-tidy", "inherits": ["test"] }
]
}

707
README.md
View File

@@ -1,211 +1,122 @@
# Boost.Redis
# Aedis
## Overview
Aedis is a [Redis](https://redis.io/) client library built on top of
[Asio](https://www.boost.org/doc/libs/release/doc/html/boost_asio.html)
that implements the latest version of the Redis communication
protocol
Boost.Redis is a high-level [Redis](https://redis.io/) client library built on top of
[Boost.Asio](https://www.boost.org/doc/libs/release/doc/html/boost_asio.html)
that implements the Redis protocol
[RESP3](https://github.com/redis/redis-specifications/blob/master/protocol/RESP3.md).
It makes communication with a Redis server easy by hiding most of
the low-level Asio-related code away from the user, which in the majority of
the cases will be concerned with only three library entities
The requirements for using Boost.Redis are:
* `aedis::connection`: A connection to the Redis server.
* `aedis::resp3::request`: A container of Redis commands.
* `aedis::adapt()`: Adapts data structures to receive responses.
* Boost. The library is included in Boost distributions starting with 1.84.
* C++17 or higher.
* Redis 6 or higher (must support RESP3).
* Gcc (10, 11, 12), Clang (11, 13, 14) and Visual Studio (16 2019, 17 2022).
* Have basic-level knowledge about [Redis](https://redis.io/docs/)
and [Boost.Asio](https://www.boost.org/doc/libs/1_82_0/doc/html/boost_asio/overview.html).
For example, the coroutine below uses a short-lived connection to read Redis
[hashes](https://redis.io/docs/data-types/hashes/)
in a `std::map` (see intro.cpp and containers.cpp)
The latest release can be downloaded on
https://github.com/boostorg/redis/releases. The library headers can be
found in the `include` subdirectory and a compilation of the source
```cpp
auto async_main() -> net::awaitable<void>
{
auto conn = std::make_shared<connection>(co_await net::this_coro::executor);
// From examples/common.hpp to avoid vebosity
co_await connect(conn, "127.0.0.1", "6379");
// A request contains multiple commands.
resp3::request req;
req.push("HELLO", 3);
req.push("HGETALL", "hset-key");
req.push("QUIT");
// Responses as tuple elements.
std::tuple<aedis::ignore, std::map<std::string, std::string>, aedis::ignore> resp;
// Executes the request and reads the response.
co_await (conn->async_run() || conn->async_exec(req, adapt(resp)));
// Use the map from std::get<1>(resp) ...
}
#include <boost/redis/src.hpp>
```
The execution of `connection::async_exec` above is composed with
`connection::async_run` with the aid of the Asio awaitable operator ||
that ensures that one operation is cancelled as soon as the other
completes, these functions play the following roles
is required. The simplest way to do it is to included this header in
no more than one source file in your applications. To build the
examples and tests cmake is supported, for example
* `connection::async_exec`: Execute commands (i.e. write the request and reads the response).
* `connection::async_run`: Coordinate read and write operations and remains suspended until the connection is lost.
```cpp
# Linux
$ BOOST_ROOT=/opt/boost_1_84_0 cmake --preset g++-11
Let us dig in.
# Windows
$ cmake -G "Visual Studio 17 2022" -A x64 -B bin64 -DCMAKE_TOOLCHAIN_FILE=C:/vcpkg/scripts/buildsystems/vcpkg.cmake
```
<a name="connection"></a>
## Connection
In general we will want to reuse the same connection for multiple
requests, we can do this with the example above by decoupling the
HELLO command and the call to `async_run` in a separate coroutine
Let us start with a simple application that uses a short-lived
connection to send a [ping](https://redis.io/commands/ping/) command
to Redis
```cpp
auto run(std::shared_ptr<connection> conn) -> net::awaitable<void>
{
co_await connect(conn, "127.0.0.1", "6379");
resp3::request req;
req.push("HELLO", 3); // Upgrade to RESP3
// Notice we use && instead of || so async_run is not cancelled
// when the response to HELLO comes.
co_await (conn->async_run() && conn->async_exec(req));
}
```
We can now let `run` run detached in the background while other
coroutines perform requests on the connection
```cpp
auto async_main() -> net::awaitable<void>
auto co_main(config const& cfg) -> net::awaitable<void>
{
auto conn = std::make_shared<connection>(co_await net::this_coro::executor);
conn->async_run(cfg, {}, net::consign(net::detached, conn));
// Calls async_run detached.
net::co_spawn(ex, run(conn), net::detached)
// A request containing only a ping command.
request req;
req.push("PING", "Hello world");
// Here we can pass conn around to other coroutines so they can make requests.
...
// Response where the PONG response will be stored.
response<std::string> resp;
// Executes the request.
co_await conn->async_exec(req, resp, net::deferred);
conn->cancel();
std::cout << "PING: " << std::get<0>(resp).value() << std::endl;
}
```
With this separation, it is now easy to incorporate other operations
in our application, for example, to cancel the connection on `SIGINT`
and `SIGTERM` we can extend `run` as follows
The roles played by the `async_run` and `async_exec` functions are
* `async_exec`: Execute the commands contained in the
request and store the individual responses in the `resp` object. Can
be called from multiple places in your code concurrently.
* `async_run`: Resolve, connect, ssl-handshake,
resp3-handshake, health-checks, reconnection and coordinate low-level
read and write operations (among other things).
### Server pushes
Redis servers can also send a variety of pushes to the client, some of
them are
* [Pubsub](https://redis.io/docs/manual/pubsub/)
* [Keyspace notification](https://redis.io/docs/manual/keyspace-notifications/)
* [Client-side caching](https://redis.io/docs/manual/client-side-caching/)
The connection class supports server pushes by means of the
`boost::redis::connection::async_receive` function, which can be
called in the same connection that is being used to execute commands.
The coroutine below shows how to used it
```cpp
auto run(std::shared_ptr<connection> conn) -> net::awaitable<void>
auto
receiver(std::shared_ptr<connection> conn) -> net::awaitable<void>
{
co_await connect(conn, "127.0.0.1", "6379");
signal_set sig{ex, SIGINT, SIGTERM};
request req;
req.push("SUBSCRIBE", "channel");
resp3::request req;
req.push("HELLO", 3);
generic_response resp;
conn->set_receive_response(resp);
co_await ((conn->async_run() || sig.async_wait()) && conn->async_exec(req));
}
```
// Loop while reconnection is enabled
while (conn->will_reconnect()) {
Likewise we can incorporate support for server pushes, healthy checks and pubsub
// Reconnect to channels.
co_await conn->async_exec(req, ignore, net::deferred);
```cpp
auto run(std::shared_ptr<connection> conn) -> net::awaitable<void>
{
co_await connect(conn, "127.0.0.1", "6379");
signal_set sig{ex, SIGINT, SIGTERM};
// Loop reading Redis pushes.
for (;;) {
error_code ec;
co_await conn->async_receive(resp, net::redirect_error(net::use_awaitable, ec));
if (ec)
break; // Connection lost, break so we can reconnect to channels.
resp3::request req;
req.push("HELLO", 3);
req.push("SUBSCRIBE", "channel1", "channel2");
// Use the response resp in some way and then clear it.
...
co_await ((conn->async_run() || sig.async_wait() || receiver(conn) || healthy_checker(conn))
&& conn->async_exec(req));
}
```
The definition of `receiver` and `healthy_checker` above can be found
in subscriber.cpp. Adding a loop around `async_run` produces a simple
way to support reconnection _while there are pending operations on the connection_,
for example, to reconnect to the same address
```cpp
auto run(std::shared_ptr<connection> conn) -> net::awaitable<void>
{
auto ex = co_await net::this_coro::executor;
steady_timer timer{ex};
resp3::request req;
req.push("HELLO", 3);
req.push("SUBSCRIBE", "channel1", "channel2");
for (;;) {
co_await connect(conn, "127.0.0.1", "6379");
co_await ((conn->async_run() || healthy_checker(conn) || receiver(conn))
&& conn->async_exec(req));
conn->reset_stream();
timer.expires_after(std::chrono::seconds{1});
co_await timer.async_wait();
consume_one(resp);
}
}
}
```
For failover with sentinels see `resolve_with_sentinel.cpp`. At
this point the reasons for why `async_run` was introduced in Aedis
might have become apparent to the reader
* Provide quick reaction to disconnections and hence faster failover.
* Support server pushes and requests in the same connection object, concurrently.
* Separate requests, handling of server pushes and reconnection operations.
### Cancellation
Aedis supports both implicit and explicit cancellation of connection
operations. Explicit cancellation is supported by means of the
`aedis::connection::cancel` member function. Implicit cancellation,
like those that may happen when using Asio awaitable operators && and
|| will be discussed with more detail below.
```cpp
co_await (conn.async_run(...) && conn.async_exec(...))
```
* Provide a simple way to send HELLO and perform channel subscription.
```cpp
co_await (conn.async_run(...) || conn.async_exec(...))
```
* Useful for short-lived connections that are meant to be closed after
a command has been executed.
```cpp
co_await (conn.async_exec(...) || time.async_wait(...))
```
* Provides a way to limit how long the execution of a single request
should last.
* The cancellation will be ignored if the request has already
been written to the socket.
* It is usually a better idea to have a healthy checker than adding
per request timeout, see subscriber.cpp for an example.
```cpp
co_await (conn.async_run(...) || time.async_wait(...))
```
* Sets a limit on how long the connection should live.
```cpp
co_await (conn.async_exec(...) || conn.async_exec(...) || ... || conn.async_exec(...))
```
* This works but is unnecessary. Unless the user has set
`aedis::resp3::request::config::coalesce` to `false`, and he
shouldn't, the connection will automatically merge the individual
requests into a single payload anyway.
<a name="requests"></a>
## Requests
@@ -218,6 +129,7 @@ Redis documentation they are called
std::list<std::string> list {...};
std::map<std::string, mystruct> map { ...};
// The request can contain multiple commands.
request req;
// Command with variable length of arguments.
@@ -233,58 +145,23 @@ req.push_range("SUBSCRIBE", std::cbegin(list), std::cend(list));
req.push_range("HSET", "key", map);
```
Sending a request to Redis is performed with `aedis::connection::async_exec` as already stated.
<a name="serialization"></a>
### Serialization
The `resp3::request::push` and `resp3::request::push_range` member functions work
with integer data types e.g. `int` and `std::string` out of the box.
To send your own data type define a `to_bulk` function like this
```cpp
// Example struct.
struct mystruct {...};
// Serialize your data structure here.
void to_bulk(std::pmr::string& to, mystruct const& obj)
{
std::string dummy = "Dummy serializaiton string.";
aedis::resp3::to_bulk(to, dummy);
}
```
Once `to_bulk` is defined and visible over ADL `mystruct` can
be passed to the `request`
```cpp
request req;
std::map<std::string, mystruct> map {...};
req.push_range("HSET", "key", map);
```
Example serialization.cpp shows how store json strings in Redis.
<a name="responses"></a>
Sending a request to Redis is performed with `boost::redis::connection::async_exec` as already stated.
### Config flags
The `aedis::resp3::request::config` object inside the request dictates how the
`aedis::connection` should handle the request in some important situations. The
The `boost::redis::request::config` object inside the request dictates how the
`boost::redis::connection` should handle the request in some important situations. The
reader is advised to read it carefully.
<a name="responses"></a>
## Responses
Aedis uses the following strategy to support Redis responses
Boost.Redis uses the following strategy to support Redis responses
* **Static**: For `aedis::resp3::request` whose sizes are known at compile time
std::tuple is supported.
* **Dynamic**: Otherwise use `std::vector<aedis::resp3::node<std::string>>`.
* `boost::redis::request` is used for requests whose number of commands are not dynamic.
* **Dynamic**: Otherwise use `boost::redis::generic_response`.
For example, below is a request with a compile time size
For example, the request below has three commands
```cpp
request req;
@@ -293,22 +170,23 @@ req.push("INCR", "key");
req.push("QUIT");
```
To read the response to this request users can use the following tuple
and its response also has three comamnds and can be read in the
following response object
```cpp
std::tuple<std::string, int, std::string>
response<std::string, int, std::string>
```
The pattern might have become apparent to the reader: the tuple must
The response behaves as a tuple and must
have as many elements as the request has commands (exceptions below).
It is also necessary that each tuple element is capable of storing the
response to the command it refers to, otherwise an error will occur.
To ignore responses to individual commands in the request use the tag
`aedis::ignore`
`boost::redis::ignore_t`, for example
```cpp
// Ignore the second and last responses.
std::tuple<std::string, aedis::ignore, std::string, aedis::ignore>
response<std::string, boost::redis::ignore_t, std::string, boost::redis::ignore_t>
```
The following table provides the resp3-types returned by some Redis
@@ -355,31 +233,27 @@ req.push("QUIT");
can be read in the tuple below
```cpp
std::tuple<
aedis::ignore, // hello
int, // rpush
int, // hset
std::vector<T>, // lrange
std::map<U, V>, // hgetall
std::string // quit
response<
redis::ignore_t, // hello
int, // rpush
int, // hset
std::vector<T>, // lrange
std::map<U, V>, // hgetall
std::string // quit
> resp;
```
Where both are passed to `async_exec` as showed elsewhere
```cpp
co_await conn->async_exec(req, adapt(resp));
co_await conn->async_exec(req, resp, net::deferred);
```
If the intention is to ignore the response to all commands altogether
use `adapt()` without arguments instead
If the intention is to ignore responses altogether use `ignore`
```cpp
// Uses the ignore adapter explicitly.
co_await conn->async_exec(req, adapt());
// Ignore adapter is also the default argument.
co_await conn->async_exec(req);
// Ignores the response
co_await conn->async_exec(req, ignore, net::deferred);
```
Responses that contain nested aggregates or heterogeneous data
@@ -390,13 +264,13 @@ subset of the RESP3 specification.
### Pushes
Commands that have push response like
Commands that have no response like
* `"SUBSCRIBE"`
* `"PSUBSCRIBE"`
* `"UNSUBSCRIBE"`
must be **NOT** be included in the tuple. For example, the request below
must **NOT** be included in the response tuple. For example, the request below
```cpp
request req;
@@ -405,34 +279,34 @@ req.push("SUBSCRIBE", "channel");
req.push("QUIT");
```
must be read in this tuple `std::tuple<std::string, std::string>`,
that has size two.
must be read in this tuple `response<std::string, std::string>`,
that has static size two.
### Null
It is not uncommon for apps to access keys that do not exist or
that have already expired in the Redis server, to deal with these
cases Aedis provides support for `std::optional`. To use it,
cases Boost.Redis provides support for `std::optional`. To use it,
wrap your type around `std::optional` like this
```cpp
std::tuple<
response<
std::optional<A>,
std::optional<B>,
...
> resp;
co_await conn->async_exec(req, adapt(resp));
co_await conn->async_exec(req, resp, net::deferred);
```
Everything else stays pretty much the same.
### Transactions
To read responses to transactions we must first observe that Redis will
queue the transaction commands and send their individual responses as elements
of an array, the array is itself the response to the `EXEC` command.
For example, to read the response to this request
To read responses to transactions we must first observe that Redis
will queue the transaction commands and send their individual
responses as elements of an array, the array is itself the response to
the `EXEC` command. For example, to read the response to this request
```cpp
req.push("MULTI");
@@ -445,46 +319,27 @@ req.push("EXEC");
use the following response type
```cpp
using aedis::ignore;
using boost::redis::ignore;
using exec_resp_type =
std::tuple<
response<
std::optional<std::string>, // get
std::optional<std::vector<std::string>>, // lrange
std::optional<std::map<std::string, std::string>> // hgetall
>;
std::tuple<
aedis::ignore, // multi
aedis::ignore, // get
aedis::ignore, // lrange
aedis::ignore, // hgetall
exec_resp_type, // exec
response<
boost::redis::ignore_t, // multi
boost::redis::ignore_t, // get
boost::redis::ignore_t, // lrange
boost::redis::ignore_t, // hgetall
exec_resp_type, // exec
> resp;
co_await conn->async_exec(req, adapt(resp));
co_await conn->async_exec(req, resp, net::deferred);
```
For a complete example see containers.cpp.
### Deserialization
As mentioned in the serialization section, it is common practice to
serialize data before sending it to Redis e.g. as json strings. For
performance and convenience reasons, we may also want to deserialize
responses directly in their final data structure. Aedis supports this
use case by calling a user provided `from_bulk` function while parsing
the response. For example
```cpp
void from_bulk(mystruct& obj, char const* p, std::size_t size, boost::system::error_code& ec)
{
// Deserializes p into obj.
}
```
After that, you can start receiving data efficiently in the desired
types e.g. `mystruct`, `std::map<std::string, mystruct>` etc.
For a complete example see cpp20_containers.cpp.
<a name="the-general-case"></a>
@@ -494,19 +349,19 @@ There are cases where responses to Redis
commands won't fit in the model presented above, some examples are
* Commands (like `set`) whose responses don't have a fixed
RESP3 type. Expecting an `int` and receiving a blob-string
will result in error.
RESP3 type. Expecting an `int` and receiving a blob-string
will result in error.
* RESP3 aggregates that contain nested aggregates can't be read in STL containers.
* Transactions with a dynamic number of commands can't be read in a `std::tuple`.
* Transactions with a dynamic number of commands can't be read in a `response`.
To deal with these cases Aedis provides the `aedis::resp3::node` type
To deal with these cases Boost.Redis provides the `boost::redis::resp3::node` type
abstraction, that is the most general form of an element in a
response, be it a simple RESP3 type or the element of an aggregate. It
is defined like this
```cpp
template <class String>
struct node {
struct basic_node {
// The RESP3 type of the data in this node.
type data_type;
@@ -522,47 +377,67 @@ struct node {
```
Any response to a Redis command can be received in a
`std::vector<node<std::string>>`. The vector can be seen as a
`boost::redis::generic_response`. The vector can be seen as a
pre-order view of the response tree. Using it is not different than
using other types
```cpp
// Receives any RESP3 simple or aggregate data type.
std::vector<node<std::string>> resp;
co_await conn->async_exec(req, adapt(resp));
boost::redis::generic_response resp;
co_await conn->async_exec(req, resp, net::deferred);
```
For example, suppose we want to retrieve a hash data structure
from Redis with `HGETALL`, some of the options are
* `std::vector<node<std::string>`: Works always.
* `boost::redis::generic_response`: Works always.
* `std::vector<std::string>`: Efficient and flat, all elements as string.
* `std::map<std::string, std::string>`: Efficient if you need the data as a `std::map`.
* `std::map<U, V>`: Efficient if you are storing serialized data. Avoids temporaries and requires `from_bulk` for `U` and `V`.
* `std::map<U, V>`: Efficient if you are storing serialized data. Avoids temporaries and requires `boost_redis_from_bulk` for `U` and `V`.
In addition to the above users can also use unordered versions of the
containers. The same reasoning also applies to sets e.g. `SMEMBERS`
containers. The same reasoning applies to sets e.g. `SMEMBERS`
and other data structures in general.
<a name="serialization"></a>
## Serialization
Boost.Redis supports serialization of user defined types by means of
the following customization points
```cpp
// Serialize.
void boost_redis_to_bulk(std::string& to, mystruct const& obj);
// Deserialize
void boost_redis_from_bulk(mystruct& obj, char const* p, std::size_t size, boost::system::error_code& ec)
```
These functions are accessed over ADL and therefore they must be
imported in the global namespace by the user. In the
[Examples](#examples) section the reader can find examples showing how
to serialize using json and [protobuf](https://protobuf.dev/).
<a name="examples"></a>
## Examples
These examples demonstrate what has been discussed so far.
The examples below show how to use the features discussed so far
* intro.cpp: The Aedis hello-world program. Sends one command and quits the connection.
* intro_tls.cpp: Same as intro.cpp but over TLS.
* intro_sync.cpp: Shows how to use the connection class synchronously.
* containers.cpp: Shows how to send and receive STL containers and how to use transactions.
* serialization.cpp: Shows how to serialize types using Boost.Json.
* resolve_with_sentinel.cpp: Shows how to resolve a master address using sentinels.
* subscriber.cpp: Shows how to implement pubsub with reconnection re-subscription.
* echo_server.cpp: A simple TCP echo server.
* chat_room.cpp: A command line chat built on Redis pubsub.
* low_level_sync.cpp: Sends a ping synchronously using the low-level API.
* low_level_async.cpp: Sends a ping asynchronously using the low-level API.
* cpp20_intro.cpp: Does not use awaitable operators.
* cpp20_intro_tls.cpp: Communicates over TLS.
* cpp20_containers.cpp: Shows how to send and receive STL containers and how to use transactions.
* cpp20_json.cpp: Shows how to serialize types using Boost.Json.
* cpp20_protobuf.cpp: Shows how to serialize types using protobuf.
* cpp20_resolve_with_sentinel.cpp: Shows how to resolve a master address using sentinels.
* cpp20_subscriber.cpp: Shows how to implement pubsub with reconnection re-subscription.
* cpp20_echo_server.cpp: A simple TCP echo server.
* cpp20_chat_room.cpp: A command line chat built on Redis pubsub.
* cpp17_intro.cpp: Uses callbacks and requires C++17.
* cpp17_intro_sync.cpp: Runs `async_run` in a separate thread and performs synchronous calls to `async_exec`.
To avoid repetition code that is common to all examples has been
grouped in common.hpp. The main function used in some async examples
has been factored out in the main.cpp file.
The main function used in some async examples has been factored out in
the main.cpp file.
## Echo server benchmark
@@ -583,7 +458,7 @@ I also imposed some constraints on the implementations
To reproduce these results run one of the echo-server programs in one
terminal and the
[echo-server-client](https://github.com/mzimbres/aedis/blob/42880e788bec6020dd018194075a211ad9f339e8/benchmarks/cpp/asio/echo_server_client.cpp)
[echo-server-client](https://github.com/boostorg/redis/blob/42880e788bec6020dd018194075a211ad9f339e8/benchmarks/cpp/asio/echo_server_client.cpp)
in another.
### Without Redis
@@ -592,7 +467,7 @@ First I tested a pure TCP echo server, i.e. one that sends the messages
directly to the client without interacting with Redis. The result can
be seen below
![](https://mzimbres.github.io/aedis/tcp-echo-direct.png)
![](https://boostorg.github.io/redis/tcp-echo-direct.png)
The tests were performed with a 1000 concurrent TCP connections on the
localhost where latency is 0.07ms on average on my machine. On higher
@@ -607,11 +482,11 @@ decrease.
The code used in the benchmarks can be found at
* [Asio](https://github.com/mzimbres/aedis/blob/3fb018ccc6138d310ac8b73540391cdd8f2fdad6/benchmarks/cpp/asio/echo_server_direct.cpp): A variation of [this](https://github.com/chriskohlhoff/asio/blob/4915cfd8a1653c157a1480162ae5601318553eb8/asio/src/examples/cpp20/coroutines/echo_server.cpp) Asio example.
* [Libuv](https://github.com/mzimbres/aedis/tree/835a1decf477b09317f391eddd0727213cdbe12b/benchmarks/c/libuv): Taken from [here](https://github.com/libuv/libuv/blob/06948c6ee502862524f233af4e2c3e4ca876f5f6/docs/code/tcp-echo-server/main.c) Libuv example .
* [Tokio](https://github.com/mzimbres/aedis/tree/3fb018ccc6138d310ac8b73540391cdd8f2fdad6/benchmarks/rust/echo_server_direct): Taken from [here](https://docs.rs/tokio/latest/tokio/).
* [Nodejs](https://github.com/mzimbres/aedis/tree/3fb018ccc6138d310ac8b73540391cdd8f2fdad6/benchmarks/nodejs/echo_server_direct)
* [Go](https://github.com/mzimbres/aedis/blob/3fb018ccc6138d310ac8b73540391cdd8f2fdad6/benchmarks/go/echo_server_direct.go)
* [Asio](https://github.com/boostorg/redis/blob/3fb018ccc6138d310ac8b73540391cdd8f2fdad6/benchmarks/cpp/asio/echo_server_direct.cpp): A variation of [this](https://github.com/chriskohlhoff/asio/blob/4915cfd8a1653c157a1480162ae5601318553eb8/asio/src/examples/cpp20/coroutines/echo_server.cpp) Asio example.
* [Libuv](https://github.com/boostorg/redis/tree/835a1decf477b09317f391eddd0727213cdbe12b/benchmarks/c/libuv): Taken from [here](https://github.com/libuv/libuv/blob/06948c6ee502862524f233af4e2c3e4ca876f5f6/docs/code/tcp-echo-server/main.c) Libuv example .
* [Tokio](https://github.com/boostorg/redis/tree/3fb018ccc6138d310ac8b73540391cdd8f2fdad6/benchmarks/rust/echo_server_direct): Taken from [here](https://docs.rs/tokio/latest/tokio/).
* [Nodejs](https://github.com/boostorg/redis/tree/3fb018ccc6138d310ac8b73540391cdd8f2fdad6/benchmarks/nodejs/echo_server_direct)
* [Go](https://github.com/boostorg/redis/blob/3fb018ccc6138d310ac8b73540391cdd8f2fdad6/benchmarks/go/echo_server_direct.go)
### With Redis
@@ -620,7 +495,7 @@ echoed by Redis and not by the echo-server itself, which acts
as a proxy between the client and the Redis server. The results
can be seen below
![](https://mzimbres.github.io/aedis/tcp-echo-over-redis.png)
![](https://boostorg.github.io/redis/tcp-echo-over-redis.png)
The tests were performed on a network where latency is 35ms on
average, otherwise it uses the same number of TCP connections
@@ -644,17 +519,17 @@ in the graph, the reasons are
The code used in the benchmarks can be found at
* [Aedis](https://github.com/mzimbres/aedis): [code](https://github.com/mzimbres/aedis/blob/3fb018ccc6138d310ac8b73540391cdd8f2fdad6/examples/echo_server.cpp)
* [node-redis](https://github.com/redis/node-redis): [code](https://github.com/mzimbres/aedis/tree/3fb018ccc6138d310ac8b73540391cdd8f2fdad6/benchmarks/nodejs/echo_server_over_redis)
* [go-redis](https://github.com/go-redis/redis): [code](https://github.com/mzimbres/aedis/blob/3fb018ccc6138d310ac8b73540391cdd8f2fdad6/benchmarks/go/echo_server_over_redis.go)
* [Boost.Redis](https://github.com/boostorg/redis): [code](https://github.com/boostorg/redis/blob/3fb018ccc6138d310ac8b73540391cdd8f2fdad6/examples/echo_server.cpp)
* [node-redis](https://github.com/redis/node-redis): [code](https://github.com/boostorg/redis/tree/3fb018ccc6138d310ac8b73540391cdd8f2fdad6/benchmarks/nodejs/echo_server_over_redis)
* [go-redis](https://github.com/go-redis/redis): [code](https://github.com/boostorg/redis/blob/3fb018ccc6138d310ac8b73540391cdd8f2fdad6/benchmarks/go/echo_server_over_redis.go)
### Conclusion
Redis clients have to support automatic pipelining to have competitive performance. For updates to this document follow https://github.com/mzimbres/aedis.
Redis clients have to support automatic pipelining to have competitive performance. For updates to this document follow https://github.com/boostorg/redis.
## Comparison
The main reason for why I started writing Aedis was to have a client
The main reason for why I started writing Boost.Redis was to have a client
compatible with the Asio asynchronous model. As I made progresses I could
also address what I considered weaknesses in other libraries. Due to
time constraints I won't be able to give a detailed comparison with
@@ -665,12 +540,12 @@ stars, namely
* https://github.com/sewenew/redis-plus-plus
### Aedis vs Redis-plus-plus
### Boost.Redis vs Redis-plus-plus
Before we start it is important to mentioning some of the things
Before we start it is important to mention some of the things
redis-plus-plus does not support
* The latest version of the communication protocol RESP3. Without it it is impossible to support some important Redis features like client side caching, among other things.
* The latest version of the communication protocol RESP3. Without that it is impossible to support some important Redis features like client side caching, among other things.
* Coroutines.
* Reading responses directly in user data structures to avoid creating temporaries.
* Error handling with support for error-code.
@@ -735,7 +610,7 @@ Transactions also suffer from the very same problem.
> NOTE: Creating a Transaction object is NOT cheap, since it
> creates a new connection.
In Aedis there is no difference between sending one command, a
In Boost.Redis there is no difference between sending one command, a
pipeline or a transaction because requests are decoupled
from the IO objects.
@@ -766,55 +641,161 @@ It is also not clear how are pipelines realised with this design
<a name="api-reference"></a>
## Reference
* [High-Level](#high-level-api): Covers the topics discussed in this document.
* [Low-Level](#low-level-api): Covers low-level building blocks. Provided mostly for developers, most users won't need any information provided here.
## Installation
Download the latest release on
https://github.com/mzimbres/aedis/releases. Aedis is a header only
library, so you can starting using it right away by adding the
`include` subdirectory to your project and including
```cpp
#include <aedis/src.hpp>
```
in no more than one source file in your applications. To build the
examples and test cmake is supported, for example
```cpp
BOOST_ROOT=/opt/boost_1_80_0 cmake --preset dev
```
The requirements for using Aedis are
- Boost 1.80 or greater.
- C++17 minimum.
- Redis 6 or higher (must support RESP3).
- Optionally also redis-cli and Redis Sentinel.
The following compilers are supported
- Gcc: 10, 11, 12.
- Clang: 11, 13, 14.
- Visual Studio 17 2022, Visual Studio 16 2019.
The [High-Level](#high-level-api) page documents all public types.
## Acknowledgement
Acknowledgement to people that helped shape Aedis
Acknowledgement to people that helped shape Boost.Redis
* Richard Hodges ([madmongo1](https://github.com/madmongo1)): For very helpful support with Asio, the design of asynchronous programs, etc.
* Vinícius dos Santos Oliveira ([vinipsmaker](https://github.com/vinipsmaker)): For useful discussion about how Aedis consumes buffers in the read operation.
* Vinícius dos Santos Oliveira ([vinipsmaker](https://github.com/vinipsmaker)): For useful discussion about how Boost.Redis consumes buffers in the read operation.
* Petr Dannhofer ([Eddie-cz](https://github.com/Eddie-cz)): For helping me understand how the `AUTH` and `HELLO` command can influence each other.
* Mohammad Nejati ([ashtum](https://github.com/ashtum)): For pointing out scenarios where calls to `async_exec` should fail when the connection is lost.
* Klemens Morgenstern ([klemens-morgenstern](https://github.com/klemens-morgenstern)): For useful discussion about timeouts, cancellation, synchronous interfaces and general help with Asio.
* Vinnie Falco ([vinniefalco](https://github.com/vinniefalco)): For general suggestions about how to improve the code and the documentation.
* Bram Veldhoen ([bveldhoen](https://github.com/bveldhoen)): For contributing a Redis-streams example.
Also many thanks to all individuals that participated in the Boost
review
* Zach Laine: https://lists.boost.org/Archives/boost/2023/01/253883.php
* Vinnie Falco: https://lists.boost.org/Archives/boost/2023/01/253886.php
* Christian Mazakas: https://lists.boost.org/Archives/boost/2023/01/253900.php
* Ruben Perez: https://lists.boost.org/Archives/boost/2023/01/253915.php
* Dmitry Arkhipov: https://lists.boost.org/Archives/boost/2023/01/253925.php
* Alan de Freitas: https://lists.boost.org/Archives/boost/2023/01/253927.php
* Mohammad Nejati: https://lists.boost.org/Archives/boost/2023/01/253929.php
* Sam Hartsfield: https://lists.boost.org/Archives/boost/2023/01/253931.php
* Miguel Portilla: https://lists.boost.org/Archives/boost/2023/01/253935.php
* Robert A.H. Leahy: https://lists.boost.org/Archives/boost/2023/01/253928.php
The Reviews can be found at:
https://lists.boost.org/Archives/boost/2023/01/date.php. The thread
with the ACCEPT from the review manager can be found here:
https://lists.boost.org/Archives/boost/2023/01/253944.php.
## Changelog
### v1.4.0
### Boost 1.85
* Removes dependency on Boost.Hana, boost::string_view, Boost.Variant2 and Boost.Spirit.
* ([Issue 170](https://github.com/boostorg/redis/issues/170))
Under load and on low-latency networks it is possible to start
receiving responses before the write operation completed and while
the request is still marked as staged and not written. This messes
up with the heuristics that classifies responses as unsolicied or
not.
* ([Issue 168](https://github.com/boostorg/redis/issues/168)).
Provides a way of passing a custom SSL context to the connection.
The design here differs from that of Boost.Beast and Boost.MySql
since in Boost.Redis the connection owns the context instead of only
storing a reference to a user provided one. This is ok so because
apps need only one connection for their entire application, which
makes the overhead of one ssl-context per connection negligible.
* ([Issue 181](https://github.com/boostorg/redis/issues/181)).
See a detailed description of this bug in
[this](https://github.com/boostorg/redis/issues/181#issuecomment-1913346983)
comment.
* ([Issue 182](https://github.com/boostorg/redis/issues/182)).
Sets `"default"` as the default value of `config::username`. This
makes it simpler to use the `requirepass` configuration in Redis.
* ([Issue 189](https://github.com/boostorg/redis/issues/189)).
Fixes narrowing convertion by using `std::size_t` instead of
`std::uint64_t` for the sizes of bulks and aggregates. The code
relies now on `std::from_chars` returning an error if a value
greater than 32 is received on platforms on which the size
of`std::size_t` is 32.
### Boost 1.84 (First release in Boost)
* Deprecates the `async_receive` overload that takes a response. Users
should now first call `set_receive_response` to avoid constantly and
unnecessarily setting the same response.
* Uses `std::function` to type erase the response adapter. This change
should not influence users in any way but allowed important
simplification in the connections internals. This resulted in
massive performance improvement.
* The connection has a new member `get_usage()` that returns the
connection usage information, such as number of bytes written,
received etc.
* There are massive performance improvements in the consuming of
server pushes which are now communicated with an `asio::channel` and
therefore can be buffered which avoids blocking the socket read-loop.
Batch reads are also supported by means of `channel.try_send` and
buffered messages can be consumed synchronously with
`connection::receive`. The function `boost::redis::cancel_one` has
been added to simplify processing multiple server pushes contained
in the same `generic_response`. *IMPORTANT*: These changes may
result in more than one push in the response when
`connection::async_receive` resumes. The user must therefore be
careful when calling `resp.clear()`: either ensure that all message
have been processed or just use `consume_one`.
### v1.4.2 (incorporates changes to conform the boost review and more)
* Adds `boost::redis::config::database_index` to make it possible to
choose a database before starting running commands e.g. after an
automatic reconnection.
* Massive performance improvement. One of my tests went from
140k req/s to 390k/s. This was possible after a parser
simplification that reduced the number of reschedules and buffer
rotations.
* Adds Redis stream example.
* Renames the project to Boost.Redis and moves the code into namespace
`boost::redis`.
* As pointed out in the reviews the `to_bulk` and `from_bulk` names were too
generic for ADL customization points. They gained the prefix `boost_redis_`.
* Moves `boost::redis::resp3::request` to `boost::redis::request`.
* Adds new typedef `boost::redis::response` that should be used instead of
`std::tuple`.
* Adds new typedef `boost::redis::generic_response` that should be used instead
of `std::vector<resp3::node<std::string>>`.
* Renames `redis::ignore` to `redis::ignore_t`.
* Changes `async_exec` to receive a `redis::response` instead of an adapter,
namely, instead of passing `adapt(resp)` users should pass `resp` directly.
* Introduces `boost::redis::adapter::result` to store responses to commands
including possible resp3 errors without losing the error diagnostic part. To
access values now use `std::get<N>(resp).value()` instead of
`std::get<N>(resp)`.
* Implements full-duplex communication. Before these changes the connection
would wait for a response to arrive before sending the next one. Now requests
are continuously coalesced and written to the socket. `request::coalesce`
became unnecessary and was removed. I could measure significative performance
gains with theses changes.
* Improves serialization examples using Boost.Describe to serialize to JSON and protobuf. See
cpp20_json.cpp and cpp20_protobuf.cpp for more details.
* Upgrades to Boost 1.81.0.
* Fixes build with libc++.
* Adds high-level functionality to the connection classes. For
example, `boost::redis::connection::async_run` will automatically
resolve, connect, reconnect and perform health checks.
### v1.4.0-1
* Renames `retry_on_connection_lost` to `cancel_if_unresponded`. (v1.4.1)
* Removes dependency on Boost.Hana, `boost::string_view`, Boost.Variant2 and Boost.Spirit.
* Fixes build and setup CI on windows.
### v1.3.0-1
@@ -825,7 +806,7 @@ Acknowledgement to people that helped shape Aedis
implemented properly without bloating the connection class. It is
now a user responsibility to send HELLO. Requests that contain it have
priority over other requests and will be moved to the front of the
queue, see `aedis::resp3::request::config`
queue, see `aedis::request::config`
* Automatic name resolving and connecting have been removed from
`aedis::connection::async_run`. Users have to do this step manually
@@ -856,21 +837,21 @@ Acknowledgement to people that helped shape Aedis
asio::error::eof is received. This makes it easier to write
composed operations with awaitable operators.
* Adds allocator support in the `aedis::resp3::request` (a
* Adds allocator support in the `aedis::request` (a
contribution from Klemens Morgenstern).
* Renames `aedis::resp3::request::push_range2` to `push_range`. The
* Renames `aedis::request::push_range2` to `push_range`. The
suffix 2 was used for disambiguation. Klemens fixed it with SFINAE.
* Renames `fail_on_connection_lost` to
`aedis::resp3::request::config::cancel_on_connection_lost`. Now, it will
`aedis::request::config::cancel_on_connection_lost`. Now, it will
only cause connections to be canceled when `async_run` completes.
* Introduces `aedis::resp3::request::config::cancel_if_not_connected` which will
* Introduces `aedis::request::config::cancel_if_not_connected` which will
cause a request to be canceled if `async_exec` is called before a
connection has been established.
* Introduces new request flag `aedis::resp3::request::config::retry` that if
* Introduces new request flag `aedis::request::config::retry` that if
set to true will cause the request to not be canceled when it was
sent to Redis but remained unresponded after `async_run` completed.
It provides a way to avoid executing commands twice.
@@ -900,7 +881,7 @@ Acknowledgement to people that helped shape Aedis
### v1.1.0-1
* Removes `coalesce_requests` from the `aedis::connection::config`, it
became a request property now, see `aedis::resp3::request::config::coalesce`.
became a request property now, see `aedis::request::config::coalesce`.
* Removes `max_read_size` from the `aedis::connection::config`. The maximum
read size can be specified now as a parameter of the
@@ -989,7 +970,7 @@ Acknowledgement to people that helped shape Aedis
* `connection::async_receive_event` is now being used to communicate
internal events to the user, such as resolve, connect, push etc. For
examples see subscriber.cpp and `connection::event`.
examples see cpp20_subscriber.cpp and `connection::event`.
* The `aedis` directory has been moved to `include` to look more
similar to Boost libraries. Users should now replace `-I/aedis-path`

20
benchmarks/CMakeLists.txt Normal file
View File

@@ -0,0 +1,20 @@
add_library(benchmarks_options INTERFACE)
target_link_libraries(benchmarks_options INTERFACE boost_redis_src)
target_link_libraries(benchmarks_options INTERFACE boost_redis_project_options)
target_compile_features(benchmarks_options INTERFACE cxx_std_20)
add_executable(echo_server_client cpp/asio/echo_server_client.cpp)
target_link_libraries(echo_server_client PRIVATE benchmarks_options)
add_executable(echo_server_direct cpp/asio/echo_server_direct.cpp)
target_link_libraries(echo_server_direct PRIVATE benchmarks_options)
# TODO
#=======================================================================
#.PHONY: bench
#bench:
# pdflatex --jobname=echo-f0 benchmarks/benchmarks.tex
# pdflatex --jobname=echo-f1 benchmarks/benchmarks.tex
# pdftoppm {input.pdf} {output.file} -png

View File

@@ -0,0 +1,4 @@
$ npm install
$ node echo_server_over_redis.js

View File

@@ -1,7 +1,7 @@
import { createClient } from 'redis';
import * as net from 'net';
const client = createClient({url: 'redis://db.occase.de:6379' });
const client = createClient({url: 'redis://aedis.occase.de:63799' });
client.on('error', (err) => console.log('Redis Client Error', err));
await client.connect();

View File

@@ -1,4 +0,0 @@
@PACKAGE_INIT@
include("${CMAKE_CURRENT_LIST_DIR}/Aedis.cmake")
check_required_components("@PROJECT_NAME@")

File diff suppressed because it is too large Load Diff

92
doc/Jamfile Normal file
View File

@@ -0,0 +1,92 @@
project redis/doc ;
import doxygen ;
import path ;
import sequence ;
# All paths must be absolute to work well with the Doxygen rules.
path-constant this_dir : . ;
path-constant target_dir : html ;
path-constant redis_root_dir : .. ;
path-constant include_dir : ../include ;
path-constant examples_dir : ../example ;
path-constant readme : ../README.md ;
path-constant layout_file : DoxygenLayout.xml ;
path-constant header : header.html ;
path-constant footer : footer.html ;
local stylesheet_files = [ path.glob $(this_dir) : *.css ] ;
local includes = [ path.glob-tree $(include_dir) : *.hpp *.cpp ] ;
local examples = [ path.glob-tree $(examples_dir) : *.hpp *.cpp ] ;
# If passed directly, several HTML_EXTRA_STYLESHEET tags are generated,
# which is not correct.
local stylesheet_arg = [ sequence.join "\"$(stylesheet_files)\"" : " " ] ;
# The doxygen rule requires the target name to end in .html to generate HTML files
doxygen doc.html
:
$(includes) $(examples) $(readme)
:
<doxygen:param>"PROJECT_NAME=Boost.Redis"
<doxygen:param>PROJECT_NUMBER="1.84.0"
<doxygen:param>PROJECT_BRIEF="A redis client library"
<doxygen:param>"STRIP_FROM_PATH=\"$(redis_root_dir)\""
<doxygen:param>"STRIP_FROM_INC_PATH=\"$(include_dir)\""
<doxygen:param>BUILTIN_STL_SUPPORT=YES
<doxygen:param>INLINE_SIMPLE_STRUCTS=YES
<doxygen:param>HIDE_UNDOC_MEMBERS=YES
<doxygen:param>HIDE_UNDOC_CLASSES=YES
<doxygen:param>SHOW_HEADERFILE=YES
<doxygen:param>SORT_BRIEF_DOCS=YES
<doxygen:param>SORT_MEMBERS_CTORS_1ST=YES
<doxygen:param>SHOW_FILES=NO
<doxygen:param>SHOW_NAMESPACES=NO
<doxygen:param>"LAYOUT_FILE=\"$(layout_file)\""
<doxygen:param>WARN_IF_INCOMPLETE_DOC=YES
<doxygen:param>FILE_PATTERNS="*.hpp *.cpp"
<doxygen:param>EXCLUDE_SYMBOLS=std
<doxygen:param>"USE_MDFILE_AS_MAINPAGE=\"$(readme)\""
<doxygen:param>SOURCE_BROWSER=YES
<doxygen:param>"HTML_HEADER=\"$(header)\""
<doxygen:param>"HTML_FOOTER=\"$(footer)\""
<doxygen:param>"HTML_EXTRA_STYLESHEET=$(stylesheet_arg)"
<doxygen:param>HTML_TIMESTAMP=YES
<doxygen:param>GENERATE_TREEVIEW=YES
<doxygen:param>FULL_SIDEBAR=YES
<doxygen:param>DISABLE_INDEX=YES
<doxygen:param>ENUM_VALUES_PER_LINE=0
<doxygen:param>OBFUSCATE_EMAILS=YES
<doxygen:param>USE_MATHJAX=YES
<doxygen:param>MATHJAX_VERSION=MathJax_2
<doxygen:param>MATHJAX_RELPATH="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/"
<doxygen:param>MACRO_EXPANSION=YES
<doxygen:param>HAVE_DOT=NO
<doxygen:param>CLASS_GRAPH=NO
<doxygen:param>DIRECTORY_GRAPH=NO
;
explicit doc.html ;
# The doxygen rule only informs b2 about the main HTML file, and not about
# all the doc directory that gets generated. Using the install rule copies
# only a single file, which is incorrect. This is a workaround to copy
# the generated docs to the doc/html directory, where they should be.
make copyhtml.tag : doc.html : @copy_html_dir ;
explicit copyhtml.tag ;
actions copy_html_dir
{
rm -rf $(target_dir)
mkdir -p $(target_dir)
cp -r $(<:D)/html/doc/* $(target_dir)/
echo "Stamped" > "$(<)"
}
# These are used to inform the build system of the
# means to build the integrated and stand-alone docs.
alias boostdoc ;
explicit boostdoc ;
alias boostrelease : copyhtml.tag ;
explicit boostrelease ;

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,145 @@
/**
Doxygen Awesome
https://github.com/jothepro/doxygen-awesome-css
MIT License
Copyright (c) 2021 jothepro
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
html {
/* side nav width. MUST be = `TREEVIEW_WIDTH`.
* Make sure it is wide enough to contain the page title (logo + title + version)
*/
--side-nav-fixed-width: 335px;
}
#projectname {
white-space: nowrap;
}
#page-wrapper {
height: calc(100vh - 100px);
display: flex;
flex-direction: column;
}
#content-wrapper {
display: flex;
flex-direction: row;
min-height: 0;
}
#doc-content {
overflow-y: scroll;
flex: 1;
height: auto !important;
}
@media (min-width: 768px) {
html {
--searchbar-background: var(--page-background-color);
}
#sidebar-wrapper {
display: flex;
flex-direction: column;
min-width: var(--side-nav-fixed-width);
max-width: var(--side-nav-fixed-width);
background-color: var(--side-nav-background);
border-right: 1px solid rgb(222, 222, 222);
}
#search-box-wrapper {
display: flex;
flex-direction: row;
padding-left: 1em;
padding-right: 1em;
}
#MSearchBox {
flex: 1;
display: flex;
padding-left: 1em;
padding-right: 1em;
}
#MSearchBox .left {
display: flex;
flex: 1;
position: static;
align-items: center;
justify-content: flex-start;
width: auto;
height: auto;
}
#MSearchBox .right {
display: none;
}
#MSearchSelect {
padding-left: 0.75em;
left: auto;
background-repeat: no-repeat;
}
#MSearchField {
flex: 1;
position: static;
width: auto;
height: auto;
}
#nav-tree {
height: auto !important;
}
#nav-sync {
display: none;
}
#top {
display: block;
border-bottom: none;
max-width: var(--side-nav-fixed-width);
background: var(--side-nav-background);
}
.ui-resizable-handle {
cursor: default;
width: 1px !important;
}
#MSearchResultsWindow {
left: var(--spacing-medium) !important;
right: auto;
}
}
@media (max-width: 768px) {
#sidebar-wrapper {
display: none;
}
}

2384
doc/doxygen-awesome.css Normal file

File diff suppressed because it is too large Load Diff

19
doc/footer.html Normal file
View File

@@ -0,0 +1,19 @@
<!-- HTML footer for doxygen 1.9.1-->
<!-- start footer part -->
</div> <!-- close #content-wrapper -->
<!--BEGIN GENERATE_TREEVIEW-->
<div id="nav-path" class="navpath"><!-- id is needed for treeview function! -->
<ul>
$navpath
<li class="footer">$generatedby <a href="https://www.doxygen.org/index.html"><img class="footer" src="$relpath^doxygen.svg" width="104" height="31" alt="doxygen"/></a> $doxygenversion </li>
</ul>
</div>
<!--END GENERATE_TREEVIEW-->
<!--BEGIN !GENERATE_TREEVIEW-->
<hr class="footer"/><address class="footer"><small>
$generatedby&#160;<a href="https://www.doxygen.org/index.html"><img class="footer" src="$relpath^doxygen.svg" width="104" height="31" alt="doxygen"/></a> $doxygenversion
</small></address>
<!--END !GENERATE_TREEVIEW-->
</div> <!-- #page-wrapper -->
</body>
</html>

61
doc/header.html Normal file
View File

@@ -0,0 +1,61 @@
<!-- HTML header for doxygen 1.9.1-->
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "https://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
<meta http-equiv="X-UA-Compatible" content="IE=9"/>
<meta name="generator" content="Doxygen $doxygenversion"/>
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<!--BEGIN PROJECT_NAME--><title>$projectname: $title</title><!--END PROJECT_NAME-->
<!--BEGIN !PROJECT_NAME--><title>$title</title><!--END !PROJECT_NAME-->
<link href="$relpath^tabs.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="$relpath^jquery.js"></script>
<script type="text/javascript" src="$relpath^dynsections.js"></script>
$treeview
$search
$mathjax
<link href="$relpath^$stylesheet" rel="stylesheet" type="text/css" />
$extrastylesheet
</head>
<body>
<div id="page-wrapper">
<div id="content-wrapper">
<div id="sidebar-wrapper">
<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
<!--BEGIN TITLEAREA-->
<div id="titlearea">
<table cellspacing="0" cellpadding="0">
<tbody>
<tr style="height: 56px;">
<!--BEGIN PROJECT_LOGO-->
<td id="projectlogo"><img alt="Logo" src="$relpath^$projectlogo"/></td>
<!--END PROJECT_LOGO-->
<!--BEGIN PROJECT_NAME-->
<td id="projectalign" style="padding-left: 0.5em;">
<div id="projectname">$projectname
<!--BEGIN PROJECT_NUMBER-->&#160;<span id="projectnumber">$projectnumber</span><!--END PROJECT_NUMBER-->
</div>
<!--BEGIN PROJECT_BRIEF--><div id="projectbrief">$projectbrief</div><!--END PROJECT_BRIEF-->
</td>
<!--END PROJECT_NAME-->
<!--BEGIN !PROJECT_NAME-->
<!--BEGIN PROJECT_BRIEF-->
<td style="padding-left: 0.5em;">
<div id="projectbrief">$projectbrief</div>
</td>
<!--END PROJECT_BRIEF-->
<!--END !PROJECT_NAME-->
<!--BEGIN DISABLE_INDEX-->
<!--END DISABLE_INDEX-->
</tr>
</tbody>
</table>
</div>
<!--BEGIN SEARCHENGINE-->
<div id="search-box-wrapper">
$searchbox
</div>
<!--END SEARCHENGINE-->
<!--END TITLEAREA-->
<!-- end header part -->

View File

@@ -0,0 +1,671 @@
# On the costs of asynchronous abstractions
The biggest force behind the evolution of
[Boost.Redis](https://github.com/boostorg/redis) was my struggling in
coming up with a high-level connection abstraction that was capable of
multiplexing Redis commands from independent sources while
concurrently handling server pushes. This journey taught me many
important lessons, many of which are related to the design and
performance of asynchronous programs based on Boost.Asio.
In this article I will share some of the lessons learned, specially
those related to the performance costs of _abstractions_ such as
`async_read_until` that tend to overschedule into the event-loop. In
this context I will also briefly comment on how the topics discussed
here influenced my views on the proposed
[P2300](https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2023/p2300r7.html)
(a.k.a. Senders and Receivers), which is likely to become the basis of
networking in upcoming C++ standards.
Although the analysis presented in this article uses the Redis communication
protocol for illustration I expect it to be useful in general since
[RESP3](https://github.com/antirez/RESP3/blob/master/spec.md) shares
many similarities with other widely used protocols such as HTTP.
## Parsing `\r\n`-delimited messages
The Redis server communicates with its clients by exchanging data
serialized in
[RESP3](https://github.com/antirez/RESP3/blob/master/spec.md) format.
Among the data types supported by this specification, the
`\r\n`-delimited messages are some of the most frequent in a typical
session. The table below shows some examples
Command | Response | Wire format | RESP3 name
---------|----------|---------------|---------------------
PING | PONG | `+PONG\r\n` | simple-string
INCR | 42 | `:42\r\n` | number
GET | null | `_\r\n` | null
Redis also supports command pipelines, which provide a way of
optimizing round-trip times by batching commands. A pipeline composed
by the commands shown in the previous table look like this
```
| Sent in a |
| single write |
+--------+ | | +-------+
| | --------> PING + INCR + GET --------> | |
| | | |
| Client | | Redis |
| | | |
| | <-------- "+PONG\r\n:42\r\n_\r\n" <-------- | |
+--------+ |<------>|<---->|<-->| +-------+
| |
| Responses |
```
Messages that use delimiters are so common in networking that a
facility called `async_read_until` for reading them incrementally from
a socket is already part of Boost.Asio. The coroutine below uses it to
print message contents to the screen
```cpp
awaitable<void> parse_resp3_simple_msgs(tcp::socket socket)
{
for (std::string buffer;;) {
auto n = co_await async_read_until(socket, dynamic_buffer(buffer), "\r\n");
std::cout << buffer.substr(1, n - 3) << std::endl;
// Consume the buffer.
buffer.erase(0, n);
}
}
```
If we pay attention to the buffer content as it is parsed by the code
above we can see it is rotated fairly often, for example
```
"+PONG\r\n:100\r\n+OK\r\n_\r\n"
":100\r\n+OK\r\n_\r\n"
"+OK\r\n_\r\n"
"_\r\n"
""
```
When I first realized these, apparently excessive, buffer rotations I
was concerned they would impact the performance of Boost.Redis in a
severe way. To measure the magnitude of this impact I came up with an
experimental implementation of Asio's `dynamic_buffer` that consumed
the buffer less eagerly than the `std::string::erase` function used
above. For that, the implementation increased a buffer offset up
to a certain threshold and only then triggered a (larger) rotation.
This is illustrated in the diagram below
```
|<---- offset threshold ---->|
| |
"+PONG\r\n:100\r\n+OK\r\n_\r\n+PONG\r\n"
| # Initial offset
"+PONG\r\n:100\r\n+OK\r\n_\r\n+PONG\r\n"
|<------>| # After 1st message
"+PONG\r\n:100\r\n+OK\r\n_\r\n+PONG\r\n"
|<-------------->| # After 2nd message
"+PONG\r\n:100\r\n+OK\r\n_\r\n+PONG\r\n"
|<--------------------->| # After 3rd message
"+PONG\r\n:100\r\n+OK\r\n_\r\n+PONG\r\n"
|<-------------------------->| # Threshold crossed after the 4th message
"+PONG\r\n"
| # After rotation
```
After comparing the performance differences between the two versions I
was surprised there wasn't any! But that was also very suspicious
since some RESP3 aggregate types contain a considerable number of
separators. For example, a map with two pairs `[(key1, value1),
(key2, value2)]` encoded in RESP3 requires ten rotations in total
```
"%2\r\n$4\r\nkey1\r\n$6\r\nvalue1\r\n$4\r\nkey2\r\n$6\r\nvalue2\r\n"
"$4\r\nkey1\r\n$6\r\nvalue1\r\n$4\r\nkey2\r\n$6\r\nvalue2\r\n"
"key1\r\n$6\r\nvalue1\r\n$4\r\nkey2\r\n$6\r\nvalue2\r\n"
"$6\r\nvalue1\r\n$4\r\nkey2\r\n$6\r\nvalue2\r\n"
...
```
It was evident something more costly was shadowing the buffer
rotations. But it couldn't be the search for the separator since it
performs equivalently to rotations. It is also easy to show that the
overhead is not related to any IO operation since the problem persists
if the buffer is never consumed (which causes the function to be
called with the same string repeatedly). Once these two factors
are removed from the table, we are driven into the conclusion that
calling `async_read_until` has an intrinsic cost, let us see what
that is.
### Async operations that complete synchronously considered harmful
Assume the scenario described earlier where `async_read_until` is used
to parse multiple `\r\n`-delimited messages. The following is a
detailed description of what happens behind the scenes
1. `async_read_until` calls `socket.async_read_some` repeatedly
until the separator `\r\n` shows up in the buffer
```
"<read1>" # Read 1: needs more data.
"<read1><read2>" # Read 2: needs more data.
"<read1><read2>" # Read 3: needs more data.
"<read1><read2><read3>" # Read 4: needs more data.
"<read1><read2><read3>\r\n<bonus bytes>" # separator found, done.
```
2. The last call to `socket.async_read_some` happens to read past
the separator `\r\n` (depicted as `<bonus bytes>` above),
resulting in bonus (maybe incomplete) messages in the buffer
```
| 1st async_read_some | 2nd async_read_some |
| | |
"+message content here \r\n:100\r\n+OK\r\n_\r\n+incomplete respo"
| | | |
| Message wanted |<-- bonus msgs --->|<--incomplete-->|
| | msg |
| | |
| |<---------- bonus bytes ----------->|
```
3. The buffer is consumed and `async_read_until` is called again.
However, since the buffer already contains the next message this
is an IO-less call
```
":100\r\n+OK\r\n_\r\n+not enough byt"
| | |
| No IO required | Need more |
| to parse these | data |
| messages. | |
```
The fact that step 3. doesn't perform any IO implies the operation can
complete synchronously, but because this is an asynchronous function
Boost.Asio by default won't call the continuation before the
function returns. The implementation must therefore enqueue it for
execution, as depicted below
```
OP5 ---> OP4 ---> OP3 ---> OP2 ---> OP1 # Reschedules the continuation
|
OP1 schedules its continuation |
+-----------------------------------+
|
|
OP6 ---> OP5 ---> OP4 ---> OP3 ---> OP2 # Reschedules the continuation
|
OP2 schedules its continuation |
+-----------------------------------+
|
|
OP7 ---> OP6 ---> OP5 ---> OP4 ---> OP3
```
When summed up, the excessive rescheduling of continuations lead to
performance degradation at scale. But since this is an event-loop
there is no way around rescheduling as doing otherwise would mean
allowing a task to monopolize the event-loop, preventing other tasks
from making progress. The best that can be done is to avoid
_overscheduling_, so let us determine how much rescheduling is too
much.
## The intrinsic latency of an event-loop
An event-loop is a design pattern originally used to handle events
external to the application, such as GUIs, networking and other forms
of IO. If we take this literally, it becomes evident that the way
`async_read_until` works is incompatible with an event-loop since
_searching for the separator_ is not an external event and as such
should not have to be enqueued for execution.
Once we constrain ourselves to events that have an external origin,
such as anything related to IO and including any form of IPC, the
scheduling overhead is reduced considerably since the latency
of the transport layer eclipses whatever time it takes to schedule the
continuation, for example, according to
[these](https://www.boost.org/doc/libs/develop/libs/cobalt/doc/html/index.html#posting_to_an_executor)
benchmarks, the time it takes to schedule a task in the
`asio::io_context ` is approximately `50ns`.
To give the reader an idea about the magnitude of this number, if
rescheduling alone were to account for 1% of the runtime of an app
that uses asynchronous IO to move around data in chunks of size 128kb,
then this app would have a throughput of approximately 24Gbs. At such
high throughput multiple other factors kick in before any scheduling
overhead even starts to manifest.
It is therefore safe to say that only asynchronous operations that
don't perform or are not bound to any IO are ever likely to
overschedule in the sense described above. Those cases can be usually
avoided, this is what worked for Boost.Redis
1. `async_read_until` was replaced with calls to
`socket.async_read_some` and an incremental parser that does not
do any IO.
2. Channel `try_` functions are used to check if send and receive
operations can be called without suspension. For example,
`try_send` before `async_send` and `try_receive` before
`async_receive` ([see also](https://github.com/chriskohlhoff/asio/commit/fe4fd7acf145335eeefdd19708483c46caeb45e5)
`try_send_via_dispatch` for a more aggressive optimization).
3. Coalescing of individual requests into a single payload to reduce
the number of necessary writes on the socket, this is only
possible because Redis supports pipelining (good protocols
help!).
4. Increased the socket read sizes to 4kb to reduce the number of
reads (which is outweighed by the costs of rotating data in the
buffer).
5. Dropped the `resp3::async_read` abstraction. When I started
developing Boost.Redis there was convincing precedent for having
a `resp3::async_read` function to read complete RESP3 messages
from a socket
Name | Description
---------------------------------------|-------------------
`asio::ip::tcp::async_read` | Reads `n` bytes from a stream.
`beast::http::async_read` | Reads a complete HTTP message.
`beast::websocket::stream::async_read` | Reads a complete Websocket message.
`redis::async_read` | Reads a complete RESP3 message.
It turns out however that this function is also vulnerable to
immediate completions since in command pipelines multiple
responses show up in the buffer after a call to
`socket.async_read_some`. When that happens each call to
`resp3::async_read` is IO-less.
Sometimes it is not possible to avoid asynchronous operations that
complete synchronously, in the following sections we will see how to
favor throughput over fairness in Boost.Asio.
### Calling the continuation inline
In Boost.Asio it is possible to customize how an algorithm executes
the continuation when an immediate completion occurs, this includes
the ability of calling it inline, thereby avoiding the costs of
excessive rescheduling. Here is how it works
```cpp
// (default) The continuation is enqueued for execution, regardless of
// whether it is immediate or not.
async_read_until(socket, buffer, "\r\n", continuation);
// Immediate completions are executed in exec2 (otherwise equal to the
// version above). The completion is called inline if exec2 is the
// same executor that is running the operation.
async_read_until(socket, buffer, "\r\n", bind_immediate_executor(exec2, completion));
```
To compare the performance of both cases I have written a small
function that calls `async_read_until` in a loop with a buffer that is
never consumed so that all completions are immediate. The version
below uses the default behaviour
```cpp
void read_safe(tcp::socket& s, std::string& buffer)
{
auto continuation = [&s, &buffer](auto ec, auto n)
{
read_safe(s, buffer); // Recursive call
};
// This won't cause stack exhaustion because the continuation is
// not called inline but posted in the event loop.
async_read_until(s, dynamic_buffer(buffer), "\r\n", continuation);
}
```
To optimize away some of the rescheduling the version below uses the
`bind_immediate_executor` customization to call the continuation
reentrantly and then breaks the stack from time to time to avoid
exhausting it
```cpp
void read_reentrant(tcp::socket& s, std::string& buffer)
{
auto cont = [&](auto, auto)
{
read_reentrant(s, buffer); // Recursive call
};
// Breaks the callstack after 16 inline calls.
if (counter % 16 == 0) {
post(s.get_executor(), [cont](){cont({}, 0);});
return;
}
// Continuation called reentrantly.
async_read_until(s, dynamic_buffer(buffer), "\r\n",
bind_immediate_executor(s.get_executor(), cont));
}
```
The diagram below shows what the reentrant chain of calls in the code
above look like from the event-loop point of view
```
OP5 ---> OP4 ---> OP3 ---> OP2 ---> OP1a # Completes immediately
|
|
... |
OP1b # Completes immediately
|
Waiting for OP5 to |
reschedule its |
continuation OP1c # Completes immediately
|
|
... |
OP1d # Break the call-stack
|
+-----------------------------------+
|
OP6 ---> OP5 ---> OP4 ---> OP3 ---> OP2
```
Unsurprisingly, the reentrant code is 3x faster than the one that
relies on the default behaviour (don't forget that this is a best case
scenario, in the general case not all completions are immediate).
Although faster, this strategy has some downsides
- The overall operation is not as fast as possible since it still
has to reschedule from time to time to break the call stack. The
less it reschedules the higher the risk of exhausting it.
- It is too easy to forget to break the stack. For example, the
programmer might decide to branch somewhere into another chain of
asynchronous calls that also use this strategy. To avoid
exhaustion all such branches would have to be safeguarded with a
manual rescheduling i.e. `post`.
- Requires additional layers of complexity such as
`bind_immediate_executor` in addition to `bind_executor`.
- Non-compliat with more strict
[guidelines](https://en.wikipedia.org/wiki/The_Power_of_10:_Rules_for_Developing_Safety-Critical_Code)
that prohibits reentrat code.
- There is no simple way of choosing the maximum allowed number of
reentrant calls for each function in a way that covers different
use cases and users. Library writers and users would be tempted
into using a small value reducing the performance advantage.
- If the socket is always ready for reading the task will
monopolize IO for up to `16` interactions which might cause
stutter in unrelated tasks as depicted below
```
Unfairness
+----+----+----+ +----+----+----+ +----+----+----+
Socket-1 | | | | | | | | | | | |
+----+----+----+----+----+----+----+----+----+----+----+----+
Socket-2 | | | | | |
+----+ +----+ +----+
```
From the aesthetic point of view the code above is also unpleasant as
it breaks the function asynchronous contract by injecting a reentrant
behaviour. It gives me the same kind of feeling I have about
[recursive
mutexes](http://www.zaval.org/resources/library/butenhof1.html).
Note: It is worth mentioning here that a similar
[strategy](https://github.com/NVIDIA/stdexec/blob/6f23dd5b1d523541ce28af32fc2603403ebd36ed/include/exec/trampoline_scheduler.hpp#L52)
is used to break the call stack of repeating algorithms in
[stdexec](https://github.com/NVIDIA/stdexec), but in this time
based on
[P2300](https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2023/p2300r7.html)
and not on Boost.Asio.
### Coroutine tail-calls
In the previous section we have seen how to avoid overscheduling by
instructing the asynchronous operation to call the completion inline
on immediate completion. It turns out however that coroutine support
for _tail-calls_ provide a way to completely sidestep this problem.
This feature is described by
[Lewis Baker](https://lewissbaker.github.io/2020/05/11/understanding_symmetric_transfer)
as follows
> A tail-call is one where the current stack-frame is popped before
> the call and the current functions return address becomes the
> return-address for the callee. ie. the callee will return directly
> the the [sic] caller of this function.
This means (at least in principle) that a library capable of using
tail-calls when an immediate completion occurs neither has to
reschedule the continuation nor call it inline. To test how this
feature compares to the other styles I have used Boost.Cobalt. The
code looks as follows
```cpp
// Warning: risks unfairness and starvation of other tasks.
task<void> read_until_unfair()
{
for (int i = 0; i != repeat; ++i) {
co_await async_read_until(s, dynamic_buffer(buffer), "\r\n", cobalt::use_op);
}
}
```
The result of this comparison as listed in the table below
Time/s | Style | Configuration | Library
-------|-----------|-----------------------------|-------------
1,0 | Coroutine | `await_ready` optimization | Boost.Cobalt
4.8 | Callback | Reentant | Boost.Asio
10.3 | Coroutine | `use_op` | Boost.Cobalt
14.9 | Callback | Regular | Boost.Asio
15.6 | Coroutine | `asio::deferred` | Boost.Asio
As the reader can see, `cobalt::use_op` ranks 3rd and is considerably
faster (10.3 vs 15.6) than the Asio equivalent that uses
default-rescheduling. However, by trading rescheduling with tail-calls
the code above can now monopolize the event-loop, resulting in
unfairness if the socket happens to receive data at a higher rate
than other tasks. If by chance data is received continuously
on a socket that is always ready for reading, other tasks will starve
```
Starvation
+----+----+----+----+----+----+----+----+----+----+----+----+
Socket-1 | | | | | | | | | | | | |
+----+----+----+----+----+----+----+----+----+----+----+----+
Socket-2 Starving ...
```
To avoid this problem the programmer is forced to reschedule from time
to time, in the same way we did for the reentrant calls
```cpp
task<void> read_until_fair()
{
for (int i = 0; i != repeat; ++i) {
if (repeat % 16 == 0) {
// Reschedules to address unfairness and starvation of
// other tasks.
co_await post(cobalt::use_op);
continue;
}
co_await async_read_until(s, dynamic_buffer(buffer), "\r\n", cobalt::use_op);
}
}
```
Delegating fairness-safety to applications is a dangerous game.
This is a
[problem](https://tokio.rs/blog/2020-04-preemption) the Tokio
community had to deal with before Tokio runtime started enforcing
rescheduling (after 256 successful operations)
> If data is received faster than it can be processed, it is possible
> that more data will have already been received by the time the
> processing of a data chunk completes. In this case, .await will
> never yield control back to the scheduler, other tasks will not be
> scheduled, resulting in starvation and large latency variance.
> Currently, the answer to this problem is that the user of Tokio is
> responsible for adding yield points in both the application and
> libraries. In practice, very few actually do this and end up being
> vulnerable to this sort of problem.
### Safety in P2300 (Senders and Receivers)
As of this writing, the C++ standards committee (WG21) has been
pursuing the standardization of a networking library for almost 20
years. One of the biggest obstacles that prevented it from happening
was a disagreement on what the _asynchronous model_ that underlies
networking should look like. Until 2021 that model was basically
Boost.Asio _executors_, but in this
[poll](https://www.reddit.com/r/cpp/comments/q6tgod/c_committee_polling_results_for_asynchronous/)
the committee decided to abandon that front and concentrate efforts on
the new [P2300](https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2023/p2300r7.html)
proposal, also known as _senders and receivers_. The decision was
quite [abrupt](https://isocpp.org/files/papers/P2464R0.html)
> The original plan about a week earlier than the actual writing of
> this paper was to write a paper that makes a case for standardizing
> the Networking TS.
and opinions turned out to be very strong against Boost.Asio (see
[this](https://api.csswg.org/bikeshed/?force=1&url=https://raw.githubusercontent.com/brycelelbach/wg21_p2459_2022_january_library_evolution_poll_outcomes/main/2022_january_library_evolution_poll_outcomes.bs)
for how each voter backed their vote)
> The whole concept is completely useless, there's no composed code
> you can write with it.
The part of that debate that interests us most here is stated in
[P2471](https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2021/p2471r1.pdf),
that compares Boost.Asio with P2300
> Yes, default rescheduling each operation and default not
> rescheduling each operation, is a poor trade off. IMO both options
> are poor. The one good option that I know of that can prevent stack
> exhaustion is first-class tail-recursion in library or language
> ASIO has chosen to require that every async operation must schedule
> the completion on a scheduler (every read, every write, etc..).
> sender/receiver has not decided to
> require that the completion be scheduled.
> This is why I consider tail-call the only good solution. Scheduling
> solutions are all inferior (give thanks to Lewis for this shift in
> my understanding :) ).
Although tail-calls solve the problem of stack-exhaustion as we have
seen above, it makes the code vulnerable to unfairness and starvation
and therefore it is not an alternative to default-rescheduling as the
quotation above is implying. To deal with the lack of
default-rescheduling, libraries and applications built on top of P2300
have to address the aforementioned problems, layer after layer. For
example,
[stdexec](https://github.com/NVIDIA/stdexec) has invented something
called
_[trampoline-scheduler](https://github.com/NVIDIA/stdexec/blob/e7cd275273525dbc693f4bf5f6dc4d4181b639e4/include/exec/trampoline_scheduler.hpp)_
to protect repeating algorithms such as `repeat_effect_until` from
exhausting the stack. This construct however is built around
reentracy, allowing
[sixteen](https://github.com/NVIDIA/stdexec/blob/83cdb92d316e8b3bca1357e2cf49fc39e9bed403/include/exec/trampoline_scheduler.hpp#L52)
levels of inline calls by default. While in Boost.Asio it is possible to use
reentracy as an optimization for a corner cases, here it is made its
_modus operandi_, the downsides of this approach have already been stated in a
previous section so I won't repeat it here.
Also the fact that a special scheduler is needed by specific
algorithms is a problem on its own since it contradicts one of the
main selling points of P2300 which is that of being _generic_. For
example, [P2464R0](https://isocpp.org/files/papers/P2464R0.html) uses
the code below as an example
```cpp
void
run_that_io_operation(
scheduler auto sched,
sender_of<network_buffer> auto wrapping_continuation)
{
// snip
}
```
and states
> I have no idea what the sched's concrete type is. I have no idea
> what the wrapping_continuation's concrete type is. They're none of
> my business, ...
Hence, by being generic, the algorithms built on top of P2300 are also
unsafe (against stack-exhaustion, unfairness and starvation). Otherwise,
if library writers require a specific scheduler to ensure safety, then
the algorithms become automatically non-generic, pick your poison!
The proposers of P2300 claim that it doesn't address safety because it
should be seen as the low-level building blocks of asynchronous
programming and that its the role of higher-level libraries, to deal
with that. This claim however does not hold since, as we have just
seen, Boost.Asio also provides those building blocks but does so in a
safe way. In fact during the whole development of Boost.Redis I never
had to think about these kinds of problems because safety is built
from the ground up.
### Avoiding coroutine suspension with `await_ready`
Now let us get back to the first place in the table above, which uses
the `await_ready` optimization from Boost.Cobalt. This API provides
users with the ability to avoid coroutine suspension altogether in
case the separator is already present in the buffer. It works by
defining a `struct` with the following interface
```cpp
struct read_until : cobalt::op<error_code, std::size_t> {
...
void ready(cobalt::handler<error_code, std::size_t> handler) override
{
// Search for the separator in buffer and call the handler if found
}
void initiate(cobalt::completion_handler<error_code, std::size_t> complete) override
{
// Regular call to async_read_until.
async_read_until(socket, buffer, delim, std::move(complete));
}
};
```
and the code that uses it
```cpp
for (int i = 0; i != repeat; ++i) {
co_await read_until(socket, dynamic_buffer(buffer));
}
```
In essence, what the code above does is to skip a call to
`async_read_unil` by first checking with the ready function whether
the forthcoming operation is going to complete immediately. The
nice thing about it is that the programmer can use this optimization
only when a performance bottleneck is detected, without planing for it
in advance. The drawback however is that it requires reimplementing
the search for the separator in the body of the `ready` function,
defeating the purpose of using `async_read_until` in first place as
(again) it would have been simpler to reformulate the operation in
terms of `socket.async_read_some` directly.
## Acknowledgements
Thanks to Klemens Morgenstern for answering questions about
Boost.Cobalt.

52
example/CMakeLists.txt Normal file
View File

@@ -0,0 +1,52 @@
add_library(examples_main STATIC main.cpp)
target_compile_features(examples_main PRIVATE cxx_std_20)
target_link_libraries(examples_main PRIVATE boost_redis_project_options)
macro(make_example EXAMPLE_NAME STANDARD)
add_executable(${EXAMPLE_NAME} ${EXAMPLE_NAME}.cpp)
target_link_libraries(${EXAMPLE_NAME} PRIVATE boost_redis_src)
target_link_libraries(${EXAMPLE_NAME} PRIVATE boost_redis_project_options)
target_compile_features(${EXAMPLE_NAME} PRIVATE cxx_std_${STANDARD})
if (${STANDARD} STREQUAL "20")
target_link_libraries(${EXAMPLE_NAME} PRIVATE examples_main)
endif()
if (${EXAMPLE_NAME} STREQUAL "cpp20_json")
target_link_libraries(${EXAMPLE_NAME} PRIVATE Boost::json Boost::container_hash)
endif()
endmacro()
macro(make_testable_example EXAMPLE_NAME STANDARD)
make_example(${EXAMPLE_NAME} ${STANDARD})
if (BOOST_REDIS_INTEGRATION_TESTS)
add_test(${EXAMPLE_NAME} ${EXAMPLE_NAME})
endif()
endmacro()
make_testable_example(cpp17_intro 17)
make_testable_example(cpp17_intro_sync 17)
make_testable_example(cpp20_intro 20)
make_testable_example(cpp20_containers 20)
make_testable_example(cpp20_json 20)
make_testable_example(cpp20_intro_tls 20)
make_example(cpp20_subscriber 20)
make_example(cpp20_streams 20)
make_example(cpp20_echo_server 20)
make_example(cpp20_resolve_with_sentinel 20)
# We test the protobuf example only on gcc.
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
find_package(Protobuf)
if (Protobuf_FOUND)
protobuf_generate_cpp(PROTO_SRCS PROTO_HDRS person.proto)
make_testable_example(cpp20_protobuf 20)
target_sources(cpp20_protobuf PUBLIC ${PROTO_SRCS} ${PROTO_HDRS})
target_link_libraries(cpp20_protobuf PRIVATE ${Protobuf_LIBRARIES})
target_include_directories(cpp20_protobuf PUBLIC ${Protobuf_INCLUDE_DIRS} ${CMAKE_CURRENT_BINARY_DIR})
endif()
endif()
if (NOT MSVC)
make_example(cpp20_chat_room 20)
endif()

50
example/cpp17_intro.cpp Normal file
View File

@@ -0,0 +1,50 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <boost/redis/connection.hpp>
#include <boost/asio/detached.hpp>
#include <iostream>
namespace asio = boost::asio;
using boost::redis::connection;
using boost::redis::request;
using boost::redis::response;
using boost::redis::config;
auto main(int argc, char * argv[]) -> int
{
try {
config cfg;
if (argc == 3) {
cfg.addr.host = argv[1];
cfg.addr.port = argv[2];
}
request req;
req.push("PING", "Hello world");
response<std::string> resp;
asio::io_context ioc;
connection conn{ioc};
conn.async_run(cfg, {}, asio::detached);
conn.async_exec(req, resp, [&](auto ec, auto) {
if (!ec)
std::cout << "PING: " << std::get<0>(resp).value() << std::endl;
conn.cancel();
});
ioc.run();
} catch (std::exception const& e) {
std::cerr << "Error: " << e.what() << std::endl;
return 1;
}
}

View File

@@ -0,0 +1,43 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include "sync_connection.hpp"
#include <string>
#include <iostream>
using boost::redis::sync_connection;
using boost::redis::request;
using boost::redis::response;
using boost::redis::config;
auto main(int argc, char * argv[]) -> int
{
try {
config cfg;
if (argc == 3) {
cfg.addr.host = argv[1];
cfg.addr.port = argv[2];
}
sync_connection conn;
conn.run(cfg);
request req;
req.push("PING");
response<std::string> resp;
conn.exec(req, resp);
conn.stop();
std::cout << "Response: " << std::get<0>(resp).value() << std::endl;
} catch (std::exception const& e) {
std::cerr << e.what() << std::endl;
}
}

108
example/cpp20_chat_room.cpp Normal file
View File

@@ -0,0 +1,108 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <boost/redis/connection.hpp>
#include <boost/asio/deferred.hpp>
#include <boost/asio/signal_set.hpp>
#include <boost/asio/co_spawn.hpp>
#include <boost/asio/detached.hpp>
#include <boost/asio/redirect_error.hpp>
#include <boost/asio/posix/stream_descriptor.hpp>
#include <unistd.h>
#include <iostream>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
#if defined(BOOST_ASIO_HAS_POSIX_STREAM_DESCRIPTOR)
namespace asio = boost::asio;
using stream_descriptor = asio::deferred_t::as_default_on_t<asio::posix::stream_descriptor>;
using signal_set = asio::deferred_t::as_default_on_t<asio::signal_set>;
using boost::asio::async_read_until;
using boost::asio::awaitable;
using boost::asio::co_spawn;
using boost::asio::consign;
using boost::asio::deferred;
using boost::asio::detached;
using boost::asio::dynamic_buffer;
using boost::asio::redirect_error;
using boost::asio::use_awaitable;
using boost::redis::config;
using boost::redis::connection;
using boost::redis::generic_response;
using boost::redis::ignore;
using boost::redis::request;
using boost::system::error_code;
using namespace std::chrono_literals;
// Chat over Redis pubsub. To test, run this program from multiple
// terminals and type messages to stdin.
auto
receiver(std::shared_ptr<connection> conn) -> awaitable<void>
{
request req;
req.push("SUBSCRIBE", "channel");
generic_response resp;
conn->set_receive_response(resp);
while (conn->will_reconnect()) {
// Subscribe to channels.
co_await conn->async_exec(req, ignore, deferred);
// Loop reading Redis push messages.
for (error_code ec;;) {
co_await conn->async_receive(redirect_error(use_awaitable, ec));
if (ec)
break; // Connection lost, break so we can reconnect to channels.
std::cout
<< resp.value().at(1).value
<< " " << resp.value().at(2).value
<< " " << resp.value().at(3).value
<< std::endl;
resp.value().clear();
}
}
}
// Publishes stdin messages to a Redis channel.
auto publisher(std::shared_ptr<stream_descriptor> in, std::shared_ptr<connection> conn) -> awaitable<void>
{
for (std::string msg;;) {
auto n = co_await async_read_until(*in, dynamic_buffer(msg, 1024), "\n");
request req;
req.push("PUBLISH", "channel", msg);
co_await conn->async_exec(req, ignore, deferred);
msg.erase(0, n);
}
}
// Called from the main function (see main.cpp)
auto co_main(config cfg) -> awaitable<void>
{
auto ex = co_await asio::this_coro::executor;
auto conn = std::make_shared<connection>(ex);
auto stream = std::make_shared<stream_descriptor>(ex, ::dup(STDIN_FILENO));
co_spawn(ex, receiver(conn), detached);
co_spawn(ex, publisher(stream, conn), detached);
conn->async_run(cfg, {}, consign(detached, conn));
signal_set sig_set{ex, SIGINT, SIGTERM};
co_await sig_set.async_wait();
conn->cancel();
stream->cancel();
}
#else // defined(BOOST_ASIO_HAS_POSIX_STREAM_DESCRIPTOR)
auto co_main(config const&) -> awaitable<void>
{
std::cout << "Requires support for posix streams." << std::endl;
co_return;
}
#endif // defined(BOOST_ASIO_HAS_POSIX_STREAM_DESCRIPTOR)
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -0,0 +1,106 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <boost/redis/connection.hpp>
#include <boost/asio/deferred.hpp>
#include <boost/asio/detached.hpp>
#include <boost/asio/co_spawn.hpp>
#include <map>
#include <vector>
#include <iostream>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
namespace asio = boost::asio;
using boost::redis::request;
using boost::redis::response;
using boost::redis::ignore_t;
using boost::redis::ignore;
using boost::redis::config;
using boost::redis::connection;
using boost::asio::awaitable;
using boost::asio::deferred;
using boost::asio::detached;
using boost::asio::consign;
void print(std::map<std::string, std::string> const& cont)
{
for (auto const& e: cont)
std::cout << e.first << ": " << e.second << "\n";
}
void print(std::vector<int> const& cont)
{
for (auto const& e: cont) std::cout << e << " ";
std::cout << "\n";
}
// Stores the content of some STL containers in Redis.
auto store(std::shared_ptr<connection> conn) -> awaitable<void>
{
std::vector<int> vec
{1, 2, 3, 4, 5, 6};
std::map<std::string, std::string> map
{{"key1", "value1"}, {"key2", "value2"}, {"key3", "value3"}};
request req;
req.push_range("RPUSH", "rpush-key", vec);
req.push_range("HSET", "hset-key", map);
co_await conn->async_exec(req, ignore, deferred);
}
auto hgetall(std::shared_ptr<connection> conn) -> awaitable<void>
{
// A request contains multiple commands.
request req;
req.push("HGETALL", "hset-key");
// Responses as tuple elements.
response<std::map<std::string, std::string>> resp;
// Executes the request and reads the response.
co_await conn->async_exec(req, resp, deferred);
print(std::get<0>(resp).value());
}
// Retrieves in a transaction.
auto transaction(std::shared_ptr<connection> conn) -> awaitable<void>
{
request req;
req.push("MULTI");
req.push("LRANGE", "rpush-key", 0, -1); // Retrieves
req.push("HGETALL", "hset-key"); // Retrieves
req.push("EXEC");
response<
ignore_t, // multi
ignore_t, // lrange
ignore_t, // hgetall
response<std::optional<std::vector<int>>, std::optional<std::map<std::string, std::string>>> // exec
> resp;
co_await conn->async_exec(req, resp, deferred);
print(std::get<0>(std::get<3>(resp).value()).value().value());
print(std::get<1>(std::get<3>(resp).value()).value().value());
}
// Called from the main function (see main.cpp)
awaitable<void> co_main(config cfg)
{
auto conn = std::make_shared<connection>(co_await asio::this_coro::executor);
conn->async_run(cfg, {}, consign(detached, conn));
co_await store(conn);
co_await transaction(conn);
co_await hgetall(conn);
conn->cancel();
}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -0,0 +1,70 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <boost/redis/connection.hpp>
#include <boost/asio/deferred.hpp>
#include <boost/asio/signal_set.hpp>
#include <boost/asio/detached.hpp>
#include <boost/asio/redirect_error.hpp>
#include <boost/asio/co_spawn.hpp>
#include <iostream>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
namespace asio = boost::asio;
using tcp_socket = asio::deferred_t::as_default_on_t<asio::ip::tcp::socket>;
using tcp_acceptor = asio::deferred_t::as_default_on_t<asio::ip::tcp::acceptor>;
using signal_set = asio::deferred_t::as_default_on_t<asio::signal_set>;
using boost::redis::request;
using boost::redis::response;
using boost::redis::config;
using boost::system::error_code;
using boost::redis::connection;
using namespace std::chrono_literals;
auto echo_server_session(tcp_socket socket, std::shared_ptr<connection> conn) -> asio::awaitable<void>
{
request req;
response<std::string> resp;
for (std::string buffer;;) {
auto n = co_await asio::async_read_until(socket, asio::dynamic_buffer(buffer, 1024), "\n");
req.push("PING", buffer);
co_await conn->async_exec(req, resp, asio::deferred);
co_await asio::async_write(socket, asio::buffer(std::get<0>(resp).value()));
std::get<0>(resp).value().clear();
req.clear();
buffer.erase(0, n);
}
}
// Listens for tcp connections.
auto listener(std::shared_ptr<connection> conn) -> asio::awaitable<void>
{
try {
auto ex = co_await asio::this_coro::executor;
tcp_acceptor acc(ex, {asio::ip::tcp::v4(), 55555});
for (;;)
asio::co_spawn(ex, echo_server_session(co_await acc.async_accept(), conn), asio::detached);
} catch (std::exception const& e) {
std::clog << "Listener: " << e.what() << std::endl;
}
}
// Called from the main function (see main.cpp)
auto co_main(config cfg) -> asio::awaitable<void>
{
auto ex = co_await asio::this_coro::executor;
auto conn = std::make_shared<connection>(ex);
asio::co_spawn(ex, listener(conn), asio::detached);
conn->async_run(cfg, {}, asio::consign(asio::detached, conn));
signal_set sig_set(ex, SIGINT, SIGTERM);
co_await sig_set.async_wait();
conn->cancel();
}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

42
example/cpp20_intro.cpp Normal file
View File

@@ -0,0 +1,42 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <boost/redis/connection.hpp>
#include <boost/asio/deferred.hpp>
#include <boost/asio/co_spawn.hpp>
#include <boost/asio/detached.hpp>
#include <boost/asio/consign.hpp>
#include <iostream>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
namespace asio = boost::asio;
using boost::redis::request;
using boost::redis::response;
using boost::redis::config;
using boost::redis::connection;
// Called from the main function (see main.cpp)
auto co_main(config cfg) -> asio::awaitable<void>
{
auto conn = std::make_shared<connection>(co_await asio::this_coro::executor);
conn->async_run(cfg, {}, asio::consign(asio::detached, conn));
// A request containing only a ping command.
request req;
req.push("PING", "Hello world");
// Response where the PONG response will be stored.
response<std::string> resp;
// Executes the request.
co_await conn->async_exec(req, resp, asio::deferred);
conn->cancel();
std::cout << "PING: " << std::get<0>(resp).value() << std::endl;
}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -0,0 +1,54 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <boost/redis/connection.hpp>
#include <boost/asio/deferred.hpp>
#include <boost/asio/use_awaitable.hpp>
#include <boost/asio/detached.hpp>
#include <boost/asio/consign.hpp>
#include <iostream>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
namespace asio = boost::asio;
using boost::redis::request;
using boost::redis::response;
using boost::redis::config;
using boost::redis::logger;
using boost::redis::connection;
auto verify_certificate(bool, asio::ssl::verify_context&) -> bool
{
std::cout << "set_verify_callback" << std::endl;
return true;
}
auto co_main(config cfg) -> asio::awaitable<void>
{
cfg.use_ssl = true;
cfg.username = "aedis";
cfg.password = "aedis";
cfg.addr.host = "db.occase.de";
cfg.addr.port = "6380";
auto conn = std::make_shared<connection>(co_await asio::this_coro::executor);
conn->async_run(cfg, {}, asio::consign(asio::detached, conn));
request req;
req.push("PING");
response<std::string> resp;
conn->next_layer().set_verify_mode(asio::ssl::verify_peer);
conn->next_layer().set_verify_callback(verify_certificate);
co_await conn->async_exec(req, resp, asio::deferred);
conn->cancel();
std::cout << "Response: " << std::get<0>(resp).value() << std::endl;
}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

75
example/cpp20_json.cpp Normal file
View File

@@ -0,0 +1,75 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <boost/redis/connection.hpp>
#include <boost/asio/deferred.hpp>
#include <boost/asio/detached.hpp>
#include <boost/describe.hpp>
#include <boost/asio/consign.hpp>
#include <boost/asio/use_awaitable.hpp>
#include <string>
#include <iostream>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
#include <boost/json/serialize.hpp>
#include <boost/json/parse.hpp>
#include <boost/json/value_from.hpp>
#include <boost/json/value_to.hpp>
#include <boost/redis/resp3/serialization.hpp>
namespace asio = boost::asio;
using namespace boost::describe;
using boost::redis::request;
using boost::redis::response;
using boost::redis::ignore_t;
using boost::redis::config;
using boost::redis::connection;
// Struct that will be stored in Redis using json serialization.
struct user {
std::string name;
std::string age;
std::string country;
};
// The type must be described for serialization to work.
BOOST_DESCRIBE_STRUCT(user, (), (name, age, country))
// Boost.Redis customization points (example/json.hpp)
void boost_redis_to_bulk(std::string& to, user const& u)
{ boost::redis::resp3::boost_redis_to_bulk(to, boost::json::serialize(boost::json::value_from(u))); }
void boost_redis_from_bulk(user& u, std::string_view sv, boost::system::error_code&)
{ u = boost::json::value_to<user>(boost::json::parse(sv)); }
auto co_main(config cfg) -> asio::awaitable<void>
{
auto ex = co_await asio::this_coro::executor;
auto conn = std::make_shared<connection>(ex);
conn->async_run(cfg, {}, asio::consign(asio::detached, conn));
// user object that will be stored in Redis in json format.
user const u{"Joao", "58", "Brazil"};
// Stores and retrieves in the same request.
request req;
req.push("SET", "json-key", u); // Stores in Redis.
req.push("GET", "json-key"); // Retrieves from Redis.
response<ignore_t, user> resp;
co_await conn->async_exec(req, resp, asio::deferred);
conn->cancel();
// Prints the first ping
std::cout
<< "Name: " << std::get<1>(resp).value().name << "\n"
<< "Age: " << std::get<1>(resp).value().age << "\n"
<< "Country: " << std::get<1>(resp).value().country << "\n";
}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -0,0 +1,88 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <boost/redis/connection.hpp>
#include <boost/redis/resp3/serialization.hpp>
#include <boost/asio/deferred.hpp>
#include <boost/asio/co_spawn.hpp>
#include <boost/asio/detached.hpp>
#include <boost/asio/consign.hpp>
#include <boost/system/errc.hpp>
#include <iostream>
// See the definition in person.proto. This header is automatically
// generated by CMakeLists.txt.
#include "person.pb.h"
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
namespace asio = boost::asio;
using boost::redis::request;
using boost::redis::response;
using boost::redis::operation;
using boost::redis::ignore_t;
using boost::redis::config;
using boost::redis::connection;
// The protobuf type described in example/person.proto
using tutorial::person;
// Boost.Redis customization points (example/protobuf.hpp)
namespace tutorial
{
// Below I am using a Boost.Redis to indicate a protobuf error, this
// is ok for an example, users however might want to define their own
// error codes.
void boost_redis_to_bulk(std::string& to, person const& u)
{
std::string tmp;
if (!u.SerializeToString(&tmp))
throw boost::system::system_error(boost::redis::error::invalid_data_type);
boost::redis::resp3::boost_redis_to_bulk(to, tmp);
}
void boost_redis_from_bulk(person& u, std::string_view sv, boost::system::error_code& ec)
{
std::string const tmp {sv};
if (!u.ParseFromString(tmp))
ec = boost::redis::error::invalid_data_type;
}
} // tutorial
using tutorial::boost_redis_to_bulk;
using tutorial::boost_redis_from_bulk;
asio::awaitable<void> co_main(config cfg)
{
auto ex = co_await asio::this_coro::executor;
auto conn = std::make_shared<connection>(ex);
conn->async_run(cfg, {}, asio::consign(asio::detached, conn));
person p;
p.set_name("Louis");
p.set_id(3);
p.set_email("No email yet.");
request req;
req.push("SET", "protobuf-key", p);
req.push("GET", "protobuf-key");
response<ignore_t, person> resp;
// Sends the request and receives the response.
co_await conn->async_exec(req, resp, asio::deferred);
conn->cancel();
std::cout
<< "Name: " << std::get<1>(resp).value().name() << "\n"
<< "Age: " << std::get<1>(resp).value().id() << "\n"
<< "Email: " << std::get<1>(resp).value().email() << "\n";
}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -0,0 +1,75 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <boost/redis/connection.hpp>
#include <boost/asio/use_awaitable.hpp>
#include <boost/asio/redirect_error.hpp>
#include <boost/asio/detached.hpp>
#include <iostream>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
namespace asio = boost::asio;
using endpoints = asio::ip::tcp::resolver::results_type;
using boost::redis::request;
using boost::redis::response;
using boost::redis::ignore_t;
using boost::redis::config;
using boost::redis::address;
using boost::redis::connection;
auto redir(boost::system::error_code& ec)
{ return asio::redirect_error(asio::use_awaitable, ec); }
// For more info see
// - https://redis.io/docs/manual/sentinel.
// - https://redis.io/docs/reference/sentinel-clients.
auto resolve_master_address(std::vector<address> const& addresses) -> asio::awaitable<address>
{
request req;
req.push("SENTINEL", "get-master-addr-by-name", "mymaster");
req.push("QUIT");
auto conn = std::make_shared<connection>(co_await asio::this_coro::executor);
response<std::optional<std::array<std::string, 2>>, ignore_t> resp;
for (auto addr : addresses) {
boost::system::error_code ec;
config cfg;
cfg.addr = addr;
// TODO: async_run and async_exec should be lauched in
// parallel here so we can wait for async_run completion
// before eventually calling it again.
conn->async_run(cfg, {}, asio::consign(asio::detached, conn));
co_await conn->async_exec(req, resp, redir(ec));
conn->cancel();
conn->reset_stream();
if (!ec && std::get<0>(resp))
co_return address{std::get<0>(resp).value().value().at(0), std::get<0>(resp).value().value().at(1)};
}
co_return address{};
}
auto co_main(config cfg) -> asio::awaitable<void>
{
// A list of sentinel addresses from which only one is responsive.
// This simulates sentinels that are down.
std::vector<address> const addresses
{ address{"foo", "26379"}
, address{"bar", "26379"}
, cfg.addr
};
auto const ep = co_await resolve_master_address(addresses);
std::clog
<< "Host: " << ep.host << "\n"
<< "Port: " << ep.port << "\n"
<< std::flush;
}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

98
example/cpp20_streams.cpp Normal file
View File

@@ -0,0 +1,98 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <boost/redis/connection.hpp>
#include <boost/asio/deferred.hpp>
#include <boost/asio/co_spawn.hpp>
#include <boost/asio/detached.hpp>
#include <boost/asio/consign.hpp>
#include <boost/asio/signal_set.hpp>
#include <boost/asio/awaitable.hpp>
#include <iostream>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
#include <memory>
#include <string>
#include <thread>
#include <vector>
namespace net = boost::asio;
using boost::redis::config;
using boost::redis::generic_response;
using boost::redis::operation;
using boost::redis::request;
using boost::redis::connection;
using signal_set = net::deferred_t::as_default_on_t<net::signal_set>;
auto stream_reader(std::shared_ptr<connection> conn) -> net::awaitable<void>
{
std::string redisStreamKey_;
request req;
generic_response resp;
std::string stream_id{"$"};
std::string const field = "myfield";
for (;;) {
req.push("XREAD", "BLOCK", "0", "STREAMS", "test-topic", stream_id);
co_await conn->async_exec(req, resp, net::deferred);
//std::cout << "Response: ";
//for (auto i = 0UL; i < resp->size(); ++i) {
// std::cout << resp->at(i).value << ", ";
//}
//std::cout << std::endl;
// The following approach was taken in order to be able to
// deal with the responses, as generated by redis in the case
// that there are multiple stream 'records' within a single
// generic_response. The nesting and number of values in
// resp.value() are different, depending on the contents
// of the stream in redis. Uncomment the above commented-out
// code for examples while running the XADD command.
std::size_t item_index = 0;
while (item_index < std::size(resp.value())) {
auto const& val = resp.value().at(item_index).value;
if (field.compare(val) == 0) {
// We've hit a myfield field.
// The streamId is located at item_index - 2
// The payload is located at item_index + 1
stream_id = resp.value().at(item_index - 2).value;
std::cout
<< "StreamId: " << stream_id << ", "
<< "MyField: " << resp.value().at(item_index + 1).value
<< std::endl;
++item_index; // We can increase so we don't read this again
}
++item_index;
}
req.clear();
resp.value().clear();
}
}
// Run this in another terminal:
// redis-cli -r 100000 -i 0.0001 XADD "test-topic" "*" "myfield" "myfieldvalue1"
auto co_main(config cfg) -> net::awaitable<void>
{
auto ex = co_await net::this_coro::executor;
auto conn = std::make_shared<connection>(ex);
net::co_spawn(ex, stream_reader(conn), net::detached);
// Disable health checks.
cfg.health_check_interval = std::chrono::seconds::zero();
conn->async_run(cfg, {}, net::consign(net::detached, conn));
signal_set sig_set(ex, SIGINT, SIGTERM);
co_await sig_set.async_wait();
conn->cancel();
}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -0,0 +1,102 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <boost/redis/connection.hpp>
#include <boost/redis/logger.hpp>
#include <boost/asio/awaitable.hpp>
#include <boost/asio/use_awaitable.hpp>
#include <boost/asio/deferred.hpp>
#include <boost/asio/co_spawn.hpp>
#include <boost/asio/detached.hpp>
#include <boost/asio/consign.hpp>
#include <boost/asio/redirect_error.hpp>
#include <boost/asio/signal_set.hpp>
#include <iostream>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
namespace asio = boost::asio;
using namespace std::chrono_literals;
using boost::redis::request;
using boost::redis::generic_response;
using boost::redis::consume_one;
using boost::redis::logger;
using boost::redis::config;
using boost::redis::ignore;
using boost::redis::error;
using boost::system::error_code;
using boost::redis::connection;
using signal_set = asio::deferred_t::as_default_on_t<asio::signal_set>;
/* This example will subscribe and read pushes indefinitely.
*
* To test send messages with redis-cli
*
* $ redis-cli -3
* 127.0.0.1:6379> PUBLISH channel some-message
* (integer) 3
* 127.0.0.1:6379>
*
* To test reconnection try, for example, to close all clients currently
* connected to the Redis instance
*
* $ redis-cli
* > CLIENT kill TYPE pubsub
*/
// Receives server pushes.
auto
receiver(std::shared_ptr<connection> conn) -> asio::awaitable<void>
{
request req;
req.push("SUBSCRIBE", "channel");
generic_response resp;
conn->set_receive_response(resp);
// Loop while reconnection is enabled
while (conn->will_reconnect()) {
// Reconnect to the channels.
co_await conn->async_exec(req, ignore, asio::deferred);
// Loop reading Redis pushs messages.
for (error_code ec;;) {
// First tries to read any buffered pushes.
conn->receive(ec);
if (ec == error::sync_receive_push_failed) {
ec = {};
co_await conn->async_receive(asio::redirect_error(asio::use_awaitable, ec));
}
if (ec)
break; // Connection lost, break so we can reconnect to channels.
std::cout
<< resp.value().at(1).value
<< " " << resp.value().at(2).value
<< " " << resp.value().at(3).value
<< std::endl;
consume_one(resp);
}
}
}
auto co_main(config cfg) -> asio::awaitable<void>
{
auto ex = co_await asio::this_coro::executor;
auto conn = std::make_shared<connection>(ex);
asio::co_spawn(ex, receiver(conn), asio::detached);
conn->async_run(cfg, {}, asio::consign(asio::detached, conn));
signal_set sig_set(ex, SIGINT, SIGTERM);
co_await sig_set.async_wait();
conn->cancel();
}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

53
example/main.cpp Normal file
View File

@@ -0,0 +1,53 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <boost/redis/connection.hpp>
#include <boost/redis/config.hpp>
#include <boost/asio/co_spawn.hpp>
#include <boost/asio/use_awaitable.hpp>
#include <boost/asio/io_context.hpp>
#include <iostream>
namespace asio = boost::asio;
using boost::redis::config;
using boost::redis::logger;
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
extern asio::awaitable<void> co_main(config);
auto main(int argc, char * argv[]) -> int
{
try {
config cfg;
if (argc == 3) {
cfg.addr.host = argv[1];
cfg.addr.port = argv[2];
}
asio::io_context ioc;
asio::co_spawn(ioc, co_main(cfg), [](std::exception_ptr p) {
if (p)
std::rethrow_exception(p);
});
ioc.run();
} catch (std::exception const& e) {
std::cerr << "(main) " << e.what() << std::endl;
return 1;
}
}
#else // defined(BOOST_ASIO_HAS_CO_AWAIT)
auto main() -> int
{
std::cout << "Requires coroutine support." << std::endl;
return 0;
}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

9
example/person.proto Normal file
View File

@@ -0,0 +1,9 @@
syntax = "proto2";
package tutorial;
message person {
optional string name = 1;
optional int32 id = 2;
optional string email = 3;
}

View File

@@ -0,0 +1,63 @@
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <boost/redis/connection.hpp>
#include <boost/redis/request.hpp>
#include <boost/asio/deferred.hpp>
#include <boost/asio/detached.hpp>
#include <boost/asio/use_future.hpp>
#include <thread>
#include <chrono>
using namespace std::chrono_literals;
namespace boost::redis
{
class sync_connection {
public:
sync_connection()
: ioc_{1}
, conn_{std::make_shared<connection>(ioc_)}
{ }
~sync_connection()
{
thread_.join();
}
void run(config cfg)
{
// Starts a thread that will can io_context::run on which the
// connection will run.
thread_ = std::thread{[this, cfg]() {
conn_->async_run(cfg, {}, asio::detached);
ioc_.run();
}};
}
void stop()
{
asio::dispatch(ioc_, [this]() { conn_->cancel(); });
}
template <class Response>
auto exec(request const& req, Response& resp)
{
asio::dispatch(
conn_->get_executor(),
asio::deferred([this, &req, &resp]() { return conn_->async_exec(req, resp, asio::deferred); }))
(asio::use_future).get();
}
private:
asio::io_context ioc_{1};
std::shared_ptr<connection> conn_;
std::thread thread_;
};
}

View File

@@ -1,73 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <boost/asio.hpp>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
#include <iostream>
namespace net = boost::asio;
#if defined(BOOST_ASIO_HAS_POSIX_STREAM_DESCRIPTOR)
#include <boost/asio/experimental/awaitable_operators.hpp>
#include <aedis.hpp>
#include <unistd.h>
#include "common/common.hpp"
namespace resp3 = aedis::resp3;
using namespace net::experimental::awaitable_operators;
using stream_descriptor = net::use_awaitable_t<>::as_default_on_t<net::posix::stream_descriptor>;
using signal_set = net::use_awaitable_t<>::as_default_on_t<net::signal_set>;
using aedis::adapt;
// Chat over Redis pubsub. To test, run this program from multiple
// terminals and type messages to stdin.
// Receives Redis pushes.
auto receiver(std::shared_ptr<connection> conn) -> net::awaitable<void>
{
for (std::vector<resp3::node<std::string>> resp;;) {
co_await conn->async_receive(adapt(resp));
std::cout << resp.at(1).value << " " << resp.at(2).value << " " << resp.at(3).value << std::endl;
resp.clear();
}
}
// Publishes stdin messages to a Redis channel.
auto publisher(std::shared_ptr<stream_descriptor> in, std::shared_ptr<connection> conn) -> net::awaitable<void>
{
for (std::string msg;;) {
auto n = co_await net::async_read_until(*in, net::dynamic_buffer(msg, 1024), "\n");
resp3::request req;
req.push("PUBLISH", "chat-channel", msg);
co_await conn->async_exec(req);
msg.erase(0, n);
}
}
// Called from the main function (see main.cpp)
auto async_main() -> net::awaitable<void>
{
auto ex = co_await net::this_coro::executor;
auto conn = std::make_shared<connection>(ex);
auto stream = std::make_shared<stream_descriptor>(ex, ::dup(STDIN_FILENO));
signal_set sig{ex, SIGINT, SIGTERM};
resp3::request req;
req.push("HELLO", 3);
req.push("SUBSCRIBE", "chat-channel");
co_await connect(conn, "127.0.0.1", "6379");
co_await ((conn->async_run() || publisher(stream, conn) || receiver(conn) ||
healthy_checker(conn) || sig.async_wait()) && conn->async_exec(req));
}
#else // defined(BOOST_ASIO_HAS_POSIX_STREAM_DESCRIPTOR)
auto async_main() -> net::awaitable<void>
{
std::cout << "Requires support for posix streams." << std::endl;
co_return;
}
#endif // defined(BOOST_ASIO_HAS_POSIX_STREAM_DESCRIPTOR)
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -1,93 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include "common.hpp"
#include <boost/asio.hpp>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
#include <boost/asio/experimental/awaitable_operators.hpp>
#include <iostream>
namespace net = boost::asio;
using namespace net::experimental::awaitable_operators;
using resolver = net::use_awaitable_t<>::as_default_on_t<net::ip::tcp::resolver>;
using timer_type = net::use_awaitable_t<>::as_default_on_t<net::steady_timer>;
using aedis::resp3::request;
using aedis::adapt;
using aedis::operation;
namespace
{
auto redir(boost::system::error_code& ec)
{ return net::redirect_error(net::use_awaitable, ec); }
}
auto healthy_checker(std::shared_ptr<connection> conn) -> net::awaitable<void>
{
try {
request req;
req.push("PING");
timer_type timer{co_await net::this_coro::executor};
for (boost::system::error_code ec;;) {
timer.expires_after(std::chrono::seconds{1});
co_await (conn->async_exec(req, adapt()) || timer.async_wait(redir(ec)));
if (!ec) {
co_return;
}
// Waits some time before trying the next ping.
timer.expires_after(std::chrono::seconds{1});
co_await timer.async_wait();
}
} catch (...) {
}
}
auto
connect(
std::shared_ptr<connection> conn,
std::string const& host,
std::string const& port) -> net::awaitable<void>
{
auto ex = co_await net::this_coro::executor;
resolver resv{ex};
timer_type timer{ex};
boost::system::error_code ec;
timer.expires_after(std::chrono::seconds{5});
auto const addrs = co_await (resv.async_resolve(host, port) || timer.async_wait(redir(ec)));
if (!ec)
throw std::runtime_error("Resolve timeout");
timer.expires_after(std::chrono::seconds{5});
co_await (net::async_connect(conn->next_layer(), std::get<0>(addrs)) || timer.async_wait(redir(ec)));
if (!ec)
throw std::runtime_error("Connect timeout");
}
auto run(net::awaitable<void> op) -> int
{
try {
net::io_context ioc;
net::co_spawn(ioc, std::move(op), [](std::exception_ptr p) {
if (p)
std::rethrow_exception(p);
});
ioc.run();
return 0;
} catch (std::exception const& e) {
std::cerr << "Error: " << e.what() << std::endl;
}
return 1;
}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -1,34 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_EXAMPLES_COMMON_HPP
#define AEDIS_EXAMPLES_COMMON_HPP
#include <boost/asio.hpp>
#include <aedis.hpp>
#include <memory>
#include <iostream>
#include <vector>
#include <map>
#include <set>
#include <string>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
using connection = boost::asio::use_awaitable_t<>::as_default_on_t<aedis::connection>;
auto
connect(
std::shared_ptr<connection> conn,
std::string const& host,
std::string const& port) -> boost::asio::awaitable<void>;
auto healthy_checker(std::shared_ptr<connection> conn) -> boost::asio::awaitable<void>;
auto run(boost::asio::awaitable<void> op) -> int;
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)
#endif // AEDIS_EXAMPLES_COMMON_HPP

View File

@@ -1,30 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <boost/asio.hpp>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
#include "common.hpp"
extern boost::asio::awaitable<void> async_main();
auto main() -> int
{
return run(async_main());
}
#else // defined(BOOST_ASIO_HAS_CO_AWAIT)
#include <iostream>
auto main() -> int
{
std::cout << "Requires coroutine support." << std::endl;
return 0;
}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -1,117 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <boost/asio.hpp>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
#include <boost/asio/experimental/awaitable_operators.hpp>
#include <aedis.hpp>
#include <map>
#include <vector>
#include "common/common.hpp"
namespace net = boost::asio;
namespace resp3 = aedis::resp3;
using namespace net::experimental::awaitable_operators;
using aedis::adapt;
void print(std::map<std::string, std::string> const& cont)
{
for (auto const& e: cont)
std::cout << e.first << ": " << e.second << "\n";
}
void print(std::vector<int> const& cont)
{
for (auto const& e: cont) std::cout << e << " ";
std::cout << "\n";
}
// Stores the content of some STL containers in Redis.
auto store() -> net::awaitable<void>
{
auto conn = std::make_shared<connection>(co_await net::this_coro::executor);
// Resolves and connects (from examples/common.hpp to avoid vebosity)
co_await connect(conn, "127.0.0.1", "6379");
std::vector<int> vec
{1, 2, 3, 4, 5, 6};
std::map<std::string, std::string> map
{{"key1", "value1"}, {"key2", "value2"}, {"key3", "value3"}};
resp3::request req;
req.push("HELLO", 3);
req.push_range("RPUSH", "rpush-key", vec);
req.push_range("HSET", "hset-key", map);
req.push("QUIT");
co_await (conn->async_run() || conn->async_exec(req));
}
auto hgetall() -> net::awaitable<std::map<std::string, std::string>>
{
auto conn = std::make_shared<connection>(co_await net::this_coro::executor);
// From examples/common.hpp to avoid vebosity
co_await connect(conn, "127.0.0.1", "6379");
// A request contains multiple commands.
resp3::request req;
req.push("HELLO", 3);
req.push("HGETALL", "hset-key");
req.push("QUIT");
// Responses as tuple elements.
std::tuple<aedis::ignore, std::map<std::string, std::string>, aedis::ignore> resp;
// Executes the request and reads the response.
co_await (conn->async_run() || conn->async_exec(req, adapt(resp)));
co_return std::get<1>(resp);
}
// Retrieves in a transaction.
auto transaction() -> net::awaitable<void>
{
auto conn = std::make_shared<connection>(co_await net::this_coro::executor);
// Resolves and connects (from examples/common.hpp to avoid vebosity)
co_await connect(conn, "127.0.0.1", "6379");
resp3::request req;
req.push("HELLO", 3);
req.push("MULTI");
req.push("LRANGE", "rpush-key", 0, -1); // Retrieves
req.push("HGETALL", "hset-key"); // Retrieves
req.push("EXEC");
req.push("QUIT");
std::tuple<
aedis::ignore, // hello
aedis::ignore, // multi
aedis::ignore, // lrange
aedis::ignore, // hgetall
std::tuple<std::optional<std::vector<int>>, std::optional<std::map<std::string, std::string>>>, // exec
aedis::ignore // quit
> resp;
co_await (conn->async_run() || conn->async_exec(req, adapt(resp)));
print(std::get<0>(std::get<4>(resp)).value());
print(std::get<1>(std::get<4>(resp)).value());
}
// Called from the main function (see main.cpp)
net::awaitable<void> async_main()
{
co_await store();
co_await transaction();
auto const map = co_await hgetall();
print(map);
}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -1,62 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <boost/asio.hpp>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
#include <boost/asio/experimental/awaitable_operators.hpp>
#include <aedis.hpp>
#include "common/common.hpp"
namespace net = boost::asio;
namespace resp3 = aedis::resp3;
using namespace net::experimental::awaitable_operators;
using tcp_socket = net::use_awaitable_t<>::as_default_on_t<net::ip::tcp::socket>;
using tcp_acceptor = net::use_awaitable_t<>::as_default_on_t<net::ip::tcp::acceptor>;
using signal_set = net::use_awaitable_t<>::as_default_on_t<net::signal_set>;
using aedis::adapt;
auto echo_server_session(tcp_socket socket, std::shared_ptr<connection> conn) -> net::awaitable<void>
{
resp3::request req;
std::string resp;
for (std::string buffer;;) {
auto n = co_await net::async_read_until(socket, net::dynamic_buffer(buffer, 1024), "\n");
req.push("PING", buffer);
auto tmp = std::tie(resp);
co_await conn->async_exec(req, adapt(tmp));
co_await net::async_write(socket, net::buffer(resp));
resp.clear();
req.clear();
buffer.erase(0, n);
}
}
// Listens for tcp connections.
auto listener(std::shared_ptr<connection> conn) -> net::awaitable<void>
{
auto ex = co_await net::this_coro::executor;
tcp_acceptor acc(ex, {net::ip::tcp::v4(), 55555});
for (;;)
net::co_spawn(ex, echo_server_session(co_await acc.async_accept(), conn), net::detached);
}
// Called from the main function (see main.cpp)
auto async_main() -> net::awaitable<void>
{
auto ex = co_await net::this_coro::executor;
auto conn = std::make_shared<connection>(ex);
signal_set sig{ex, SIGINT, SIGTERM};
resp3::request req;
req.push("HELLO", 3);
co_await connect(conn, "127.0.0.1", "6379");
co_await ((conn->async_run() || listener(conn) || healthy_checker(conn) ||
sig.async_wait()) && conn->async_exec(req));
}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -1,35 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <boost/asio.hpp>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
#include <boost/asio/experimental/awaitable_operators.hpp>
#include <aedis.hpp>
#include "common/common.hpp"
namespace net = boost::asio;
namespace resp3 = aedis::resp3;
using namespace net::experimental::awaitable_operators;
using aedis::adapt;
// Called from the main function (see main.cpp)
auto async_main() -> net::awaitable<void>
{
resp3::request req;
req.push("HELLO", 3);
req.push("PING", "Hello world");
req.push("QUIT");
std::tuple<aedis::ignore, std::string, aedis::ignore> resp;
auto conn = std::make_shared<connection>(co_await net::this_coro::executor);
co_await connect(conn, "127.0.0.1", "6379");
co_await (conn->async_run() || conn->async_exec(req, adapt(resp)));
std::cout << "PING: " << std::get<1>(resp) << std::endl;
}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -1,62 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <tuple>
#include <string>
#include <thread>
#include <iostream>
#include <boost/asio.hpp>
#include <aedis.hpp>
// Include this in no more than one .cpp file.
#include <aedis/src.hpp>
namespace net = boost::asio;
namespace resp3 = aedis::resp3;
using aedis::adapt;
using connection = aedis::connection;
template <class Adapter>
auto exec(std::shared_ptr<connection> conn, resp3::request const& req, Adapter adapter)
{
net::dispatch(
conn->get_executor(),
net::deferred([&]() { return conn->async_exec(req, adapter, net::deferred); }))
(net::use_future).get();
}
auto logger = [](auto const& ec)
{ std::clog << "Run: " << ec.message() << std::endl; };
int main()
{
try {
net::io_context ioc{1};
auto conn = std::make_shared<connection>(ioc);
net::ip::tcp::resolver resv{ioc};
auto const res = resv.resolve("127.0.0.1", "6379");
net::connect(conn->next_layer(), res);
std::thread t{[conn, &ioc]() {
conn->async_run(logger);
ioc.run();
}};
resp3::request req;
req.push("HELLO", 3);
req.push("PING");
req.push("QUIT");
std::tuple<aedis::ignore, std::string, aedis::ignore> resp;
exec(conn, req, adapt(resp));
std::cout << "Response: " << std::get<1>(resp) << std::endl;
t.join();
} catch (std::exception const& e) {
std::cerr << e.what() << std::endl;
}
}

View File

@@ -1,58 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <tuple>
#include <string>
#include <iostream>
#include <boost/asio.hpp>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
#include <boost/asio/experimental/awaitable_operators.hpp>
#include <boost/asio/ssl.hpp>
#include <aedis.hpp>
#include <aedis/ssl/connection.hpp>
namespace net = boost::asio;
namespace resp3 = aedis::resp3;
using namespace net::experimental::awaitable_operators;
using resolver = net::use_awaitable_t<>::as_default_on_t<net::ip::tcp::resolver>;
using aedis::adapt;
using connection = net::use_awaitable_t<>::as_default_on_t<aedis::ssl::connection>;
auto verify_certificate(bool, net::ssl::verify_context&) -> bool
{
std::cout << "set_verify_callback" << std::endl;
return true;
}
net::awaitable<void> async_main()
{
resp3::request req;
req.push("HELLO", 3, "AUTH", "aedis", "aedis");
req.push("PING");
req.push("QUIT");
std::tuple<aedis::ignore, std::string, aedis::ignore> resp;
// Resolve
auto ex = co_await net::this_coro::executor;
resolver resv{ex};
auto const endpoints = co_await resv.async_resolve("db.occase.de", "6380");
net::ssl::context ctx{net::ssl::context::sslv23};
connection conn{ex, ctx};
conn.next_layer().set_verify_mode(net::ssl::verify_peer);
conn.next_layer().set_verify_callback(verify_certificate);
co_await net::async_connect(conn.lowest_layer(), endpoints);
co_await conn.next_layer().async_handshake(net::ssl::stream_base::client);
co_await (conn.async_run() || conn.async_exec(req, adapt(resp)));
std::cout << "Response: " << std::get<1>(resp) << std::endl;
}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -1,48 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <boost/asio.hpp>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
#include <aedis.hpp>
#include <string>
#include <iostream>
namespace net = boost::asio;
namespace resp3 = aedis::resp3;
using resolver = net::use_awaitable_t<>::as_default_on_t<net::ip::tcp::resolver>;
using tcp_socket = net::use_awaitable_t<>::as_default_on_t<net::ip::tcp::socket>;
using aedis::adapter::adapt2;
using net::ip::tcp;
auto async_main() -> net::awaitable<void>
{
auto ex = co_await net::this_coro::executor;
resolver resv{ex};
auto const addrs = co_await resv.async_resolve("127.0.0.1", "6379");
tcp_socket socket{ex};
co_await net::async_connect(socket, addrs);
// Creates the request and writes to the socket.
resp3::request req;
req.push("HELLO", 3);
req.push("PING", "Hello world");
req.push("QUIT");
co_await resp3::async_write(socket, req);
// Responses
std::string buffer, resp;
// Reads the responses to all commands in the request.
auto dbuffer = net::dynamic_buffer(buffer);
co_await resp3::async_read(socket, dbuffer);
co_await resp3::async_read(socket, dbuffer, adapt2(resp));
co_await resp3::async_read(socket, dbuffer);
std::cout << "Ping: " << resp << std::endl;
}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -1,50 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <string>
#include <iostream>
#include <boost/asio/connect.hpp>
#include <aedis.hpp>
#include <aedis/src.hpp>
namespace net = boost::asio;
namespace resp3 = aedis::resp3;
using aedis::adapter::adapt2;
int main()
{
try {
net::io_context ioc;
net::ip::tcp::resolver resv{ioc};
auto const res = resv.resolve("127.0.0.1", "6379");
net::ip::tcp::socket socket{ioc};
net::connect(socket, res);
// Creates the request and writes to the socket.
resp3::request req;
req.push("HELLO", 3);
req.push("PING", "Hello world");
req.push("QUIT");
resp3::write(socket, req);
// Responses
std::string buffer, resp;
// Reads the responses to all commands in the request.
auto dbuffer = net::dynamic_buffer(buffer);
resp3::read(socket, dbuffer);
resp3::read(socket, dbuffer, adapt2(resp));
resp3::read(socket, dbuffer);
std::cout << "Ping: " << resp << std::endl;
} catch (std::exception const& e) {
std::cerr << e.what() << std::endl;
exit(EXIT_FAILURE);
}
}

View File

@@ -1,70 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <boost/asio.hpp>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
#include <boost/asio/experimental/awaitable_operators.hpp>
#include <aedis.hpp>
#include "common/common.hpp"
namespace net = boost::asio;
namespace resp3 = aedis::resp3;
using namespace net::experimental::awaitable_operators;
using endpoints = net::ip::tcp::resolver::results_type;
using aedis::adapt;
auto redir(boost::system::error_code& ec)
{ return net::redirect_error(net::use_awaitable, ec); }
struct address {
std::string host;
std::string port;
};
// For more info see
// - https://redis.io/docs/manual/sentinel.
// - https://redis.io/docs/reference/sentinel-clients.
auto resolve_master_address(std::vector<address> const& endpoints) -> net::awaitable<address>
{
resp3::request req;
req.push("SENTINEL", "get-master-addr-by-name", "mymaster");
req.push("QUIT");
auto conn = std::make_shared<connection>(co_await net::this_coro::executor);
std::tuple<std::optional<std::array<std::string, 2>>, aedis::ignore> addr;
for (auto ep : endpoints) {
boost::system::error_code ec;
co_await connect(conn, ep.host, ep.port);
co_await (conn->async_run() && conn->async_exec(req, adapt(addr), redir(ec)));
conn->reset_stream();
if (std::get<0>(addr))
co_return address{std::get<0>(addr).value().at(0), std::get<0>(addr).value().at(1)};
}
co_return address{};
}
auto async_main() -> net::awaitable<void>
{
// A list of sentinel addresses from which only one is responsive
// to simulate sentinels that are down.
std::vector<address> const endpoints
{ {"foo", "26379"}
, {"bar", "26379"}
, {"127.0.0.1", "26379"}
};
auto const ep = co_await resolve_master_address(endpoints);
std::clog
<< "Host: " << ep.host << "\n"
<< "Port: " << ep.port << "\n"
<< std::flush;
}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -1,111 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <boost/asio.hpp>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
#include <boost/asio/experimental/awaitable_operators.hpp>
#define BOOST_JSON_NO_LIB
#define BOOST_CONTAINER_NO_LIB
#include <boost/json.hpp>
#include <aedis.hpp>
#include <algorithm>
#include <cstdint>
#include <iostream>
#include <set>
#include <iterator>
#include <string>
#include "common/common.hpp"
// Include this in no more than one .cpp file.
#include <boost/json/src.hpp>
namespace net = boost::asio;
namespace resp3 = aedis::resp3;
using namespace net::experimental::awaitable_operators;
using namespace boost::json;
using aedis::adapt;
struct user {
std::string name;
std::string age;
std::string country;
friend auto operator<(user const& a, user const& b)
{
return std::tie(a.name, a.age, a.country) < std::tie(b.name, b.age, b.country);
}
friend auto operator<<(std::ostream& os, user const& u) -> std::ostream&
{
os << "Name: " << u.name << "\n"
<< "Age: " << u.age << "\n"
<< "Country: " << u.country;
return os;
}
};
// Boost.Json serialization.
void tag_invoke(value_from_tag, value& jv, user const& u)
{
jv =
{ {"name", u.name}
, {"age", u.age}
, {"country", u.country}
};
}
template<class T>
void extract(object const& obj, T& t, std::string_view key)
{
t = value_to<T>(obj.at(key));
}
auto tag_invoke(value_to_tag<user>, value const& jv)
{
user u;
object const& obj = jv.as_object();
extract(obj, u.name, "name");
extract(obj, u.age, "age");
extract(obj, u.country, "country");
return u;
}
// Aedis serialization
void to_bulk(std::pmr::string& to, user const& u)
{
aedis::resp3::to_bulk(to, serialize(value_from(u)));
}
void from_bulk(user& u, std::string_view sv, boost::system::error_code&)
{
value jv = parse(sv);
u = value_to<user>(jv);
}
net::awaitable<void> async_main()
{
std::set<user> users
{{"Joao", "58", "Brazil"} , {"Serge", "60", "France"}};
resp3::request req;
req.push("HELLO", 3);
req.push_range("SADD", "sadd-key", users); // Sends
req.push("SMEMBERS", "sadd-key"); // Retrieves
req.push("QUIT");
std::tuple<aedis::ignore, int, std::set<user>, std::string> resp;
auto conn = std::make_shared<connection>(co_await net::this_coro::executor);
co_await connect(conn, "127.0.0.1", "6379");
co_await (conn->async_run() || conn->async_exec(req, adapt(resp)));
for (auto const& e: std::get<2>(resp))
std::cout << e << "\n";
}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -1,70 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <boost/asio.hpp>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
#include <boost/asio/experimental/awaitable_operators.hpp>
#include <aedis.hpp>
#include "common/common.hpp"
namespace net = boost::asio;
namespace resp3 = aedis::resp3;
using namespace net::experimental::awaitable_operators;
using signal_set = net::use_awaitable_t<>::as_default_on_t<net::signal_set>;
using steady_timer = net::use_awaitable_t<>::as_default_on_t<net::steady_timer>;
using aedis::adapt;
/* This example will subscribe and read pushes indefinitely.
*
* To test send messages with redis-cli
*
* $ redis-cli -3
* 127.0.0.1:6379> PUBLISH channel some-message
* (integer) 3
* 127.0.0.1:6379>
*
* To test reconnection try, for example, to close all clients currently
* connected to the Redis instance
*
* $ redis-cli
* > CLIENT kill TYPE pubsub
*/
// Receives pushes.
auto receiver(std::shared_ptr<connection> conn) -> net::awaitable<void>
{
for (std::vector<resp3::node<std::string>> resp;;) {
co_await conn->async_receive(adapt(resp));
std::cout << resp.at(1).value << " " << resp.at(2).value << " " << resp.at(3).value << std::endl;
resp.clear();
}
}
auto async_main() -> net::awaitable<void>
{
auto ex = co_await net::this_coro::executor;
auto conn = std::make_shared<connection>(ex);
signal_set sig{ex, SIGINT, SIGTERM};
steady_timer timer{ex};
resp3::request req;
req.push("HELLO", 3);
req.push("SUBSCRIBE", "channel");
// The loop will reconnect on connection lost. To exit type Ctrl-C twice.
for (;;) {
co_await connect(conn, "127.0.0.1", "6379");
co_await ((conn->async_run() || healthy_checker(conn) || sig.async_wait() ||
receiver(conn)) && conn->async_exec(req));
conn->reset_stream();
timer.expires_after(std::chrono::seconds{1});
co_await timer.async_wait();
}
}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -1,25 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_HPP
#define AEDIS_HPP
#include <aedis/error.hpp>
#include <aedis/adapt.hpp>
#include <aedis/connection.hpp>
#include <aedis/resp3/request.hpp>
/** @defgroup high-level-api Reference
*
* This page contains the documentation of the Aedis high-level API.
*/
/** @defgroup low-level-api Reference
*
* This page contains the documentation of the Aedis low-level API.
*/
#endif // AEDIS_HPP

View File

@@ -1,227 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_ADAPT_HPP
#define AEDIS_ADAPT_HPP
#include <tuple>
#include <limits>
#include <string_view>
#include <variant>
#include <boost/mp11.hpp>
#include <boost/system.hpp>
#include <aedis/resp3/node.hpp>
#include <aedis/adapter/adapt.hpp>
#include <aedis/adapter/detail/response_traits.hpp>
namespace aedis {
/** @brief Tag used to ignore responses.
* @ingroup high-level-api
*
* For example
*
* @code
* std::tuple<aedis::ignore, std::string, aedis::ignore> resp;
* @endcode
*
* will cause only the second tuple type to be parsed, the others
* will be ignored.
*/
using ignore = adapter::detail::ignore;
namespace detail
{
class ignore_adapter {
public:
explicit ignore_adapter(std::size_t max_read_size) : max_read_size_{max_read_size} {}
void
operator()(
std::size_t, resp3::node<std::string_view> const&, boost::system::error_code&) { }
[[nodiscard]]
auto get_supported_response_size() const noexcept
{ return static_cast<std::size_t>(-1);}
[[nodiscard]]
auto get_max_read_size(std::size_t) const noexcept
{ return max_read_size_;}
private:
std::size_t max_read_size_;
};
template <class Tuple>
class static_adapter {
private:
static constexpr auto size = std::tuple_size<Tuple>::value;
using adapter_tuple = boost::mp11::mp_transform<adapter::adapter_t, Tuple>;
using variant_type = boost::mp11::mp_rename<adapter_tuple, std::variant>;
using adapters_array_type = std::array<variant_type, size>;
adapters_array_type adapters_;
std::size_t max_read_size_;
public:
explicit static_adapter(Tuple& r, std::size_t max_read_size)
: max_read_size_{max_read_size}
{
adapter::detail::assigner<size - 1>::assign(adapters_, r);
}
[[nodiscard]]
auto get_supported_response_size() const noexcept
{ return size;}
[[nodiscard]]
auto get_max_read_size(std::size_t) const noexcept
{ return max_read_size_;}
void
operator()(
std::size_t i,
resp3::node<std::string_view> const& nd,
boost::system::error_code& ec)
{
using std::visit;
// I am usure whether this should be an error or an assertion.
BOOST_ASSERT(i < adapters_.size());
visit([&](auto& arg){arg(nd, ec);}, adapters_.at(i));
}
};
template <class Vector>
class vector_adapter {
private:
using adapter_type = typename adapter::detail::response_traits<Vector>::adapter_type;
adapter_type adapter_;
std::size_t max_read_size_;
public:
explicit vector_adapter(Vector& v, std::size_t max_read_size)
: adapter_{adapter::adapt2(v)}
, max_read_size_{max_read_size}
{ }
[[nodiscard]]
auto
get_supported_response_size() const noexcept
{ return static_cast<std::size_t>(-1);}
[[nodiscard]]
auto get_max_read_size(std::size_t) const noexcept
{ return max_read_size_;}
void
operator()(
std::size_t,
resp3::node<std::string_view> const& nd,
boost::system::error_code& ec)
{
adapter_(nd, ec);
}
};
template <class>
struct response_traits;
template <>
struct response_traits<void> {
using response_type = void;
using adapter_type = detail::ignore_adapter;
static auto adapt(std::size_t max_read_size) noexcept
{ return detail::ignore_adapter{max_read_size}; }
};
template <class String, class Allocator>
struct response_traits<std::vector<resp3::node<String>, Allocator>> {
using response_type = std::vector<resp3::node<String>, Allocator>;
using adapter_type = vector_adapter<response_type>;
static auto adapt(response_type& v, std::size_t max_read_size) noexcept
{ return adapter_type{v, max_read_size}; }
};
template <class ...Ts>
struct response_traits<std::tuple<Ts...>> {
using response_type = std::tuple<Ts...>;
using adapter_type = static_adapter<response_type>;
static auto adapt(response_type& r, std::size_t max_read_size) noexcept
{ return adapter_type{r, max_read_size}; }
};
template <class Adapter>
class wrapper {
public:
explicit wrapper(Adapter adapter) : adapter_{adapter} {}
void operator()(resp3::node<std::string_view> const& node, boost::system::error_code& ec)
{ return adapter_(0, node, ec); }
[[nodiscard]]
auto get_supported_response_size() const noexcept
{ return adapter_.get_supported_response_size();}
[[nodiscard]]
auto get_max_read_size(std::size_t) const noexcept
{ return adapter_.get_max_read_size(0); }
private:
Adapter adapter_;
};
template <class Adapter>
auto make_adapter_wrapper(Adapter adapter)
{
return wrapper{adapter};
}
} // detail
/** @brief Creates an adapter that ignores responses.
* @ingroup high-level-api
*
* This function can be used to create adapters that ignores
* responses.
*
* @param max_read_size Specifies the maximum size of the read
* buffer.
*/
inline auto adapt(std::size_t max_read_size = (std::numeric_limits<std::size_t>::max)()) noexcept
{
return detail::response_traits<void>::adapt(max_read_size);
}
/** @brief Adapts a type to be used as a response.
* @ingroup high-level-api
*
* The type T must be either
*
* 1. a std::tuple<T1, T2, T3, ...> or
* 2. std::vector<node<String>>
*
* The types T1, T2, etc can be any STL container, any integer type
* and `std::string`.
*
* @param t Tuple containing the responses.
* @param max_read_size Specifies the maximum size of the read
* buffer.
*/
template<class T>
auto adapt(T& t, std::size_t max_read_size = (std::numeric_limits<std::size_t>::max)()) noexcept
{
return detail::response_traits<T>::adapt(t, max_read_size);
}
} // aedis
#endif // AEDIS_ADAPT_HPP

View File

@@ -1,80 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_ADAPTER_ADAPT_HPP
#define AEDIS_ADAPTER_ADAPT_HPP
#include <aedis/adapter/detail/response_traits.hpp>
namespace aedis::adapter {
template <class T>
using adapter_t = typename detail::adapter_t<T>;
/** \brief Creates a dummy response adapter.
\ingroup low-level-api
The adapter returned by this function ignores responses. It is
useful to avoid wasting time with responses which are not needed.
Example:
@code
// Pushes and writes some commands to the server.
sr.push(command::hello, 3);
sr.push(command::ping);
sr.push(command::quit);
net::write(socket, net::buffer(request));
// Ignores all responses except for the response to ping.
std::string buffer;
resp3::read(socket, dynamic_buffer(buffer), adapt()); // hello
resp3::read(socket, dynamic_buffer(buffer), adapt(resp)); // ping
resp3::read(socket, dynamic_buffer(buffer, adapt())); // quit
@endcode
*/
inline
auto adapt2() noexcept
{ return detail::response_traits<void>::adapt(); }
/** \brief Adapts user data to read operations.
* \ingroup low-level-api
*
* STL containers, \c std::tuple and built-in types are supported and
* can be used in conjunction with \c std::optional<T>.
*
* Example usage:
*
* @code
* std::unordered_map<std::string, std::string> cont;
* co_await async_read(socket, buffer, adapt(cont));
* @endcode
*
* For a transaction
*
* @code
* sr.push(command::multi);
* sr.push(command::ping, ...);
* sr.push(command::incr, ...);
* sr.push_range(command::rpush, ...);
* sr.push(command::lrange, ...);
* sr.push(command::incr, ...);
* sr.push(command::exec);
*
* co_await async_write(socket, buffer(request));
*
* // Reads the response to a transaction
* std::tuple<std::string, int, int, std::vector<std::string>, int> execs;
* co_await resp3::async_read(socket, dynamic_buffer(buffer), adapt(execs));
* @endcode
*/
template<class T>
auto adapt2(T& t) noexcept
{ return detail::response_traits<T>::adapt(t); }
} // aedis::adapter
#endif // AEDIS_ADAPTER_ADAPT_HPP

View File

@@ -1,213 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_CONNECTION_HPP
#define AEDIS_CONNECTION_HPP
#include <chrono>
#include <memory>
#include <boost/asio/io_context.hpp>
#include <aedis/detail/connection_base.hpp>
namespace aedis {
/** @brief A connection to the Redis server.
* @ingroup high-level-api
*
* For more details, please see the documentation of each individual
* function.
*
* @tparam AsyncReadWriteStream A stream that supports reading and
* writing.
*/
template <class AsyncReadWriteStream>
class basic_connection :
private detail::connection_base<
typename AsyncReadWriteStream::executor_type,
basic_connection<AsyncReadWriteStream>> {
public:
/// Executor type.
using executor_type = typename AsyncReadWriteStream::executor_type;
/// Type of the next layer
using next_layer_type = AsyncReadWriteStream;
/// Rebinds the socket type to another executor.
template <class Executor1>
struct rebind_executor
{
/// The socket type when rebound to the specified executor.
using other = basic_connection<typename next_layer_type::template rebind_executor<Executor1>::other>;
};
using base_type = detail::connection_base<executor_type, basic_connection<AsyncReadWriteStream>>;
/// Contructs from an executor.
explicit
basic_connection(
executor_type ex,
std::pmr::memory_resource* resource = std::pmr::get_default_resource())
: base_type{ex, resource}
, stream_{ex}
{}
/// Contructs from a context.
explicit
basic_connection(
boost::asio::io_context& ioc,
std::pmr::memory_resource* resource = std::pmr::get_default_resource())
: basic_connection(ioc.get_executor(), resource)
{ }
/// Returns the associated executor.
auto get_executor() {return stream_.get_executor();}
/// Resets the underlying stream.
void reset_stream()
{
if (stream_.is_open()) {
boost::system::error_code ignore;
stream_.shutdown(boost::asio::ip::tcp::socket::shutdown_both, ignore);
stream_.close(ignore);
}
}
/// Returns a reference to the next layer.
auto next_layer() noexcept -> auto& { return stream_; }
/// Returns a const reference to the next layer.
auto next_layer() const noexcept -> auto const& { return stream_; }
/** @brief Starts read and write operations
*
* This function starts read and write operations with the Redis
* server. More specifically it will trigger the write of all
* requests i.e. calls to `async_exec` that happened prior to this
* call.
*
* @param token Completion token.
*
* The completion token must have the following signature
*
* @code
* void f(boost::system::error_code);
* @endcode
*
* This function will complete when the connection is lost. If the
* error is boost::asio::error::eof this function will complete
* without error.
*/
template <class CompletionToken = boost::asio::default_completion_token_t<executor_type>>
auto async_run(CompletionToken token = CompletionToken{})
{
return base_type::async_run(std::move(token));
}
/** @brief Executes a command on the Redis server asynchronously.
*
* This function sends a request to the Redis server and
* complete after the response has been processed. If the request
* contains only commands that don't expect a response, the
* completion occurs after it has been written to the underlying
* stream. Multiple concurrent calls to this function will be
* automatically queued by the implementation.
*
* @param req Request object.
* @param adapter Response adapter.
* @param token Asio completion token.
*
* For an example see echo_server.cpp. The completion token must
* have the following signature
*
* @code
* void f(boost::system::error_code, std::size_t);
* @endcode
*
* Where the second parameter is the size of the response in
* bytes.
*/
template <
class Adapter = detail::response_traits<void>::adapter_type,
class CompletionToken = boost::asio::default_completion_token_t<executor_type>>
auto async_exec(
resp3::request const& req,
Adapter adapter = adapt(),
CompletionToken token = CompletionToken{})
{
return base_type::async_exec(req, adapter, std::move(token));
}
/** @brief Receives server side pushes asynchronously.
*
* Users that expect server pushes should call this function in a
* loop. If a push arrives and there is no reader, the connection
* will hang.
*
* @param adapter The response adapter.
* @param token The Asio completion token.
*
* For an example see subscriber.cpp. The completion token must
* have the following signature
*
* @code
* void f(boost::system::error_code, std::size_t);
* @endcode
*
* Where the second parameter is the size of the push in
* bytes.
*/
template <
class Adapter = detail::response_traits<void>::adapter_type,
class CompletionToken = boost::asio::default_completion_token_t<executor_type>>
auto async_receive(
Adapter adapter = adapt(),
CompletionToken token = CompletionToken{})
{
return base_type::async_receive(adapter, std::move(token));
}
/** @brief Cancel operations.
*
* @li `operation::exec`: Cancels operations started with
* `async_exec`. Affects only requests that haven't been written
* yet.
* @li operation::run: Cancels the `async_run` operation. Notice
* that the preferred way to close a connection is to send a
* [QUIT](https://redis.io/commands/quit/) command to the server.
* @li operation::receive: Cancels any ongoing calls to * `async_receive`.
*
* @param op: The operation to be cancelled.
* @returns The number of operations that have been canceled.
*/
auto cancel(operation op) -> std::size_t
{ return base_type::cancel(op); }
private:
using this_type = basic_connection<next_layer_type>;
template <class, class> friend class detail::connection_base;
template <class, class> friend struct detail::exec_read_op;
template <class, class> friend struct detail::exec_op;
template <class> friend struct detail::reader_op;
template <class> friend struct detail::writer_op;
template <class> friend struct detail::run_op;
void close() { stream_.close(); }
auto is_open() const noexcept { return stream_.is_open(); }
auto lowest_layer() noexcept -> auto& { return stream_.lowest_layer(); }
AsyncReadWriteStream stream_;
};
/** \brief A connection that uses a boost::asio::ip::tcp::socket.
* \ingroup high-level-api
*/
using connection = basic_connection<boost::asio::ip::tcp::socket>;
} // aedis
#endif // AEDIS_CONNECTION_HPP

View File

@@ -1,387 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_CONNECTION_BASE_HPP
#define AEDIS_CONNECTION_BASE_HPP
#include <vector>
#include <queue>
#include <limits>
#include <chrono>
#include <memory>
#include <type_traits>
#include <memory_resource>
#include <boost/assert.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <boost/asio/steady_timer.hpp>
#include <boost/asio/bind_executor.hpp>
#include <boost/asio/deferred.hpp>
#include <boost/asio/experimental/channel.hpp>
#include <aedis/adapt.hpp>
#include <aedis/operation.hpp>
#include <aedis/resp3/request.hpp>
#include <aedis/detail/connection_ops.hpp>
namespace aedis::detail {
/** Base class for high level Redis asynchronous connections.
*
* This class is not meant to be instantiated directly but as base
* class in the CRTP.
*
* @tparam Executor The executor type.
* @tparam Derived The derived class type.
*
*/
template <class Executor, class Derived>
class connection_base {
public:
using executor_type = Executor;
using this_type = connection_base<Executor, Derived>;
explicit
connection_base(executor_type ex, std::pmr::memory_resource* resource)
: writer_timer_{ex}
, read_timer_{ex}
, guarded_op_{ex}
, read_buffer_{resource}
, write_buffer_{resource}
, reqs_{resource}
{
writer_timer_.expires_at(std::chrono::steady_clock::time_point::max());
read_timer_.expires_at(std::chrono::steady_clock::time_point::max());
}
auto get_executor() {return writer_timer_.get_executor();}
auto cancel(operation op) -> std::size_t
{
switch (op) {
case operation::exec:
{
return cancel_unwritten_requests();
}
case operation::run:
{
derived().close();
read_timer_.cancel();
writer_timer_.cancel();
cancel_on_conn_lost();
return 1U;
}
case operation::receive:
{
guarded_op_.cancel();
return 1U;
}
default: BOOST_ASSERT(false); return 0;
}
}
auto cancel_unwritten_requests() -> std::size_t
{
auto f = [](auto const& ptr)
{
BOOST_ASSERT(ptr != nullptr);
return ptr->is_written();
};
auto point = std::stable_partition(std::begin(reqs_), std::end(reqs_), f);
auto const ret = std::distance(point, std::end(reqs_));
std::for_each(point, std::end(reqs_), [](auto const& ptr) {
ptr->stop();
});
reqs_.erase(point, std::end(reqs_));
return ret;
}
// Remove requests that have the flag cancel_if_not_sent_when_connection_lost set
auto cancel_on_conn_lost() -> std::size_t
{
// Must return false if the request should be removed.
auto cond = [](auto const& ptr)
{
BOOST_ASSERT(ptr != nullptr);
if (ptr->is_written()) {
return ptr->get_request().get_config().retry_on_connection_lost;
} else {
return !ptr->get_request().get_config().cancel_on_connection_lost;
}
};
auto point = std::stable_partition(std::begin(reqs_), std::end(reqs_), cond);
auto const ret = std::distance(point, std::end(reqs_));
std::for_each(point, std::end(reqs_), [](auto const& ptr) {
ptr->stop();
});
reqs_.erase(point, std::end(reqs_));
std::for_each(std::begin(reqs_), std::end(reqs_), [](auto const& ptr) {
return ptr->reset_status();
});
return ret;
}
template <
class Adapter = detail::response_traits<void>::adapter_type,
class CompletionToken = boost::asio::default_completion_token_t<executor_type>>
auto async_exec(
resp3::request const& req,
Adapter adapter = adapt(),
CompletionToken token = CompletionToken{})
{
BOOST_ASSERT_MSG(req.size() <= adapter.get_supported_response_size(), "Request and adapter have incompatible sizes.");
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code, std::size_t)
>(detail::exec_op<Derived, Adapter>{&derived(), &req, adapter}, token, writer_timer_);
}
template <
class Adapter = detail::response_traits<void>::adapter_type,
class CompletionToken = boost::asio::default_completion_token_t<executor_type>>
auto async_receive(
Adapter adapter = adapt(),
CompletionToken token = CompletionToken{})
{
auto f = detail::make_adapter_wrapper(adapter);
return guarded_op_.async_wait(
resp3::async_read(derived().next_layer(), make_dynamic_buffer(adapter.get_max_read_size(0)), f, boost::asio::deferred),
std::move(token));
}
template <class CompletionToken>
auto async_run(CompletionToken token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::run_op<Derived>{&derived()}, token, writer_timer_);
}
private:
using clock_type = std::chrono::steady_clock;
using clock_traits_type = boost::asio::wait_traits<clock_type>;
using timer_type = boost::asio::basic_waitable_timer<clock_type, clock_traits_type, executor_type>;
auto derived() -> Derived& { return static_cast<Derived&>(*this); }
void on_write()
{
// We have to clear the payload right after writing it to use it
// as a flag that informs there is no ongoing write.
write_buffer_.clear();
// Notice this must come before the for-each below.
cancel_push_requests();
std::for_each(std::begin(reqs_), std::end(reqs_), [](auto const& ptr) {
if (ptr->is_staged())
ptr->mark_written();
});
}
struct req_info {
public:
enum class action
{
stop,
proceed,
none,
};
explicit req_info(resp3::request const& req, executor_type ex)
: timer_{ex}
, action_{action::none}
, req_{&req}
, cmds_{std::size(req)}
, status_{status::none}
{
timer_.expires_at(std::chrono::steady_clock::time_point::max());
}
auto proceed()
{
timer_.cancel();
action_ = action::proceed;
}
void stop()
{
timer_.cancel();
action_ = action::stop;
}
[[nodiscard]] auto is_written() const noexcept
{ return status_ == status::written; }
[[nodiscard]] auto is_staged() const noexcept
{ return status_ == status::staged; }
void mark_written() noexcept
{ status_ = status::written; }
void mark_staged() noexcept
{ status_ = status::staged; }
void reset_status() noexcept
{ status_ = status::none; }
[[nodiscard]] auto get_number_of_commands() const noexcept
{ return cmds_; }
[[nodiscard]] auto get_request() const noexcept -> auto const&
{ return *req_; }
[[nodiscard]] auto get_action() const noexcept
{ return action_;}
template <class CompletionToken>
auto async_wait(CompletionToken token)
{
return timer_.async_wait(std::move(token));
}
private:
enum class status
{ none
, staged
, written
};
timer_type timer_;
action action_;
resp3::request const* req_;
std::size_t cmds_;
status status_;
};
void remove_request(std::shared_ptr<req_info> const& info)
{
reqs_.erase(std::remove(std::begin(reqs_), std::end(reqs_), info));
}
using reqs_type = std::pmr::deque<std::shared_ptr<req_info>>;
template <class> friend struct detail::reader_op;
template <class> friend struct detail::writer_op;
template <class> friend struct detail::run_op;
template <class, class> friend struct detail::exec_op;
template <class, class> friend struct detail::exec_read_op;
template <class> friend struct detail::send_receive_op;
void cancel_push_requests()
{
auto point = std::stable_partition(std::begin(reqs_), std::end(reqs_), [](auto const& ptr) {
return !(ptr->is_staged() && ptr->get_request().size() == 0);
});
std::for_each(point, std::end(reqs_), [](auto const& ptr) {
ptr->proceed();
});
reqs_.erase(point, std::end(reqs_));
}
void add_request_info(std::shared_ptr<req_info> const& info)
{
reqs_.push_back(info);
if (info->get_request().has_hello_priority()) {
auto rend = std::partition_point(std::rbegin(reqs_), std::rend(reqs_), [](auto const& e) {
return !e->is_written() && !e->is_staged();
});
std::rotate(std::rbegin(reqs_), std::rbegin(reqs_) + 1, rend);
}
if (derived().is_open() && cmds_ == 0 && write_buffer_.empty())
writer_timer_.cancel();
}
auto make_dynamic_buffer(std::size_t max_read_size = 512)
{ return boost::asio::dynamic_buffer(read_buffer_, max_read_size); }
template <class CompletionToken>
auto reader(CompletionToken&& token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::reader_op<Derived>{&derived()}, token, writer_timer_);
}
template <class CompletionToken>
auto writer(CompletionToken&& token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::writer_op<Derived>{&derived()}, token, writer_timer_);
}
template <class Adapter, class CompletionToken>
auto async_exec_read(Adapter adapter, std::size_t cmds, CompletionToken token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code, std::size_t)
>(detail::exec_read_op<Derived, Adapter>{&derived(), adapter, cmds}, token, writer_timer_);
}
void stage_request(req_info& ri)
{
write_buffer_ += ri.get_request().payload();
cmds_ += ri.get_request().size();
ri.mark_staged();
}
void coalesce_requests()
{
// Coalesce the requests and marks them staged. After a
// successful write staged requests will be marked as written.
BOOST_ASSERT(write_buffer_.empty());
BOOST_ASSERT(!reqs_.empty());
stage_request(*reqs_.at(0));
for (std::size_t i = 1; i < std::size(reqs_); ++i) {
if (!reqs_.at(i - 1)->get_request().get_config().coalesce ||
!reqs_.at(i - 0)->get_request().get_config().coalesce) {
break;
}
stage_request(*reqs_.at(i));
}
}
// Notice we use a timer to simulate a condition-variable. It is
// also more suitable than a channel and the notify operation does
// not suspend.
timer_type writer_timer_;
timer_type read_timer_;
detail::guarded_operation<executor_type> guarded_op_;
std::pmr::string read_buffer_;
std::pmr::string write_buffer_;
std::size_t cmds_ = 0;
reqs_type reqs_;
};
} // aedis
#endif // AEDIS_CONNECTION_BASE_HPP

View File

@@ -1,336 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_CONNECTION_OPS_HPP
#define AEDIS_CONNECTION_OPS_HPP
#include <array>
#include <algorithm>
#include <string_view>
#include <boost/assert.hpp>
#include <boost/system.hpp>
#include <boost/asio/write.hpp>
#include <boost/core/ignore_unused.hpp>
#include <boost/asio/experimental/parallel_group.hpp>
#include <aedis/adapt.hpp>
#include <aedis/error.hpp>
#include <aedis/detail/guarded_operation.hpp>
#include <aedis/resp3/type.hpp>
#include <aedis/resp3/detail/parser.hpp>
#include <aedis/resp3/read.hpp>
#include <aedis/resp3/write.hpp>
#include <aedis/resp3/request.hpp>
#include <boost/asio/yield.hpp>
namespace aedis::detail {
template <class Conn, class Adapter>
struct exec_read_op {
Conn* conn;
Adapter adapter;
std::size_t cmds = 0;
std::size_t read_size = 0;
std::size_t index = 0;
boost::asio::coroutine coro{};
template <class Self>
void
operator()( Self& self
, boost::system::error_code ec = {}
, std::size_t n = 0)
{
reenter (coro)
{
// Loop reading the responses to this request.
BOOST_ASSERT(!conn->reqs_.empty());
while (cmds != 0) {
BOOST_ASSERT(conn->cmds_ != 0);
//-----------------------------------
// If we detect a push in the middle of a request we have
// to hand it to the push consumer. To do that we need
// some data in the read bufer.
if (conn->read_buffer_.empty()) {
yield
boost::asio::async_read_until(
conn->next_layer(),
conn->make_dynamic_buffer(),
"\r\n", std::move(self));
AEDIS_CHECK_OP1(conn->cancel(operation::run););
}
// If the next request is a push we have to handle it to
// the receive_op wait for it to be done and continue.
if (resp3::to_type(conn->read_buffer_.front()) == resp3::type::push) {
yield conn->guarded_op_.async_run(std::move(self));
AEDIS_CHECK_OP1(conn->cancel(operation::run););
continue;
}
//-----------------------------------
yield
resp3::async_read(
conn->next_layer(),
conn->make_dynamic_buffer(adapter.get_max_read_size(index)),
[i = index, adpt = adapter] (resp3::node<std::string_view> const& nd, boost::system::error_code& ec) mutable { adpt(i, nd, ec); },
std::move(self));
++index;
AEDIS_CHECK_OP1(conn->cancel(operation::run););
read_size += n;
BOOST_ASSERT(cmds != 0);
--cmds;
BOOST_ASSERT(conn->cmds_ != 0);
--conn->cmds_;
}
self.complete({}, read_size);
}
}
};
template <class Conn, class Adapter>
struct exec_op {
using req_info_type = typename Conn::req_info;
Conn* conn = nullptr;
resp3::request const* req = nullptr;
Adapter adapter{};
std::shared_ptr<req_info_type> info = nullptr;
std::size_t read_size = 0;
boost::asio::coroutine coro{};
template <class Self>
void
operator()( Self& self
, boost::system::error_code ec = {}
, std::size_t n = 0)
{
reenter (coro)
{
// Check whether the user wants to wait for the connection to
// be stablished.
if (req->get_config().cancel_if_not_connected && !conn->is_open()) {
return self.complete(error::not_connected, 0);
}
info = std::allocate_shared<req_info_type>(boost::asio::get_associated_allocator(self), *req, conn->get_executor());
conn->add_request_info(info);
EXEC_OP_WAIT:
yield info->async_wait(std::move(self));
BOOST_ASSERT(ec == boost::asio::error::operation_aborted);
if (info->get_action() == Conn::req_info::action::stop) {
// Don't have to call remove_request as it has already
// been by cancel(exec).
return self.complete(ec, 0);
}
if (is_cancelled(self)) {
if (info->is_written()) {
self.get_cancellation_state().clear();
goto EXEC_OP_WAIT; // Too late, can't cancel.
} else {
conn->remove_request(info);
self.complete(ec, 0);
return;
}
}
BOOST_ASSERT(conn->is_open());
if (req->size() == 0) {
// Don't have to call remove_request as it has already
// been removed.
return self.complete({}, 0);
}
BOOST_ASSERT(!conn->reqs_.empty());
BOOST_ASSERT(conn->reqs_.front() != nullptr);
BOOST_ASSERT(conn->cmds_ != 0);
yield
conn->async_exec_read(adapter, conn->reqs_.front()->get_number_of_commands(), std::move(self));
AEDIS_CHECK_OP1(;);
read_size = n;
BOOST_ASSERT(!conn->reqs_.empty());
conn->reqs_.pop_front();
if (conn->cmds_ == 0) {
conn->read_timer_.cancel_one();
if (!conn->reqs_.empty())
conn->writer_timer_.cancel_one();
} else {
BOOST_ASSERT(!conn->reqs_.empty());
conn->reqs_.front()->proceed();
}
self.complete({}, read_size);
}
}
};
template <class Conn>
struct run_op {
Conn* conn = nullptr;
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, std::array<std::size_t, 2> order = {}
, boost::system::error_code ec0 = {}
, boost::system::error_code ec1 = {})
{
reenter (coro)
{
conn->write_buffer_.clear();
conn->cmds_ = 0;
yield
boost::asio::experimental::make_parallel_group(
[this](auto token) { return conn->reader(token);},
[this](auto token) { return conn->writer(token);}
).async_wait(
boost::asio::experimental::wait_for_one(),
std::move(self));
if (is_cancelled(self)) {
self.complete(boost::asio::error::operation_aborted);
return;
}
switch (order[0]) {
case 0: self.complete(ec0); break;
case 1: self.complete(ec1); break;
default: BOOST_ASSERT(false);
}
}
}
};
template <class Conn>
struct writer_op {
Conn* conn;
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, boost::system::error_code ec = {}
, std::size_t n = 0)
{
boost::ignore_unused(n);
reenter (coro) for (;;)
{
while (!conn->reqs_.empty() && conn->cmds_ == 0 && conn->write_buffer_.empty()) {
conn->coalesce_requests();
yield
boost::asio::async_write(conn->next_layer(), boost::asio::buffer(conn->write_buffer_), std::move(self));
AEDIS_CHECK_OP0(conn->cancel(operation::run););
conn->on_write();
// A socket.close() may have been called while a
// successful write might had already been queued, so we
// have to check here before proceeding.
if (!conn->is_open()) {
self.complete({});
return;
}
}
yield conn->writer_timer_.async_wait(std::move(self));
if (!conn->is_open() || is_cancelled(self)) {
// Notice this is not an error of the op, stoping was
// requested from the outside, so we complete with
// success.
self.complete({});
return;
}
}
}
};
template <class Conn>
struct reader_op {
Conn* conn;
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, boost::system::error_code ec = {}
, std::size_t n = 0)
{
boost::ignore_unused(n);
reenter (coro) for (;;)
{
yield
boost::asio::async_read_until(
conn->next_layer(),
conn->make_dynamic_buffer(),
"\r\n", std::move(self));
if (ec == boost::asio::error::eof) {
conn->cancel(operation::run);
return self.complete({}); // EOFINAE: EOF is not an error.
}
AEDIS_CHECK_OP0(conn->cancel(operation::run););
// We handle unsolicited events in the following way
//
// 1. Its resp3 type is a push.
//
// 2. A non-push type is received with an empty requests
// queue. I have noticed this is possible (e.g. -MISCONF).
// I expect them to have type push so we can distinguish
// them from responses to commands, but it is a
// simple-error. If we are lucky enough to receive them
// when the command queue is empty we can treat them as
// server pushes, otherwise it is impossible to handle
// them properly
//
// 3. The request does not expect any response but we got
// one. This may happen if for example, subscribe with
// wrong syntax.
//
BOOST_ASSERT(!conn->read_buffer_.empty());
if (resp3::to_type(conn->read_buffer_.front()) == resp3::type::push
|| conn->reqs_.empty()
|| (!conn->reqs_.empty() && conn->reqs_.front()->get_number_of_commands() == 0)) {
yield conn->guarded_op_.async_run(std::move(self));
} else {
BOOST_ASSERT(conn->cmds_ != 0);
BOOST_ASSERT(!conn->reqs_.empty());
BOOST_ASSERT(conn->reqs_.front()->get_number_of_commands() != 0);
conn->reqs_.front()->proceed();
yield conn->read_timer_.async_wait(std::move(self));
ec = {};
}
if (!conn->is_open() || ec || is_cancelled(self)) {
conn->cancel(operation::run);
self.complete(boost::asio::error::basic_errors::operation_aborted);
return;
}
}
}
};
} // aedis::detail
#include <boost/asio/unyield.hpp>
#endif // AEDIS_CONNECTION_OPS_HPP

View File

@@ -1,108 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_DETAIL_GUARDED_OPERATION_HPP
#define AEDIS_DETAIL_GUARDED_OPERATION_HPP
#include <boost/asio/experimental/channel.hpp>
#include <boost/asio/yield.hpp>
namespace aedis::detail {
template <class Executor>
struct send_receive_op {
using channel_type = boost::asio::experimental::channel<Executor, void(boost::system::error_code, std::size_t)>;
channel_type* channel;
boost::asio::coroutine coro{};
template <class Self>
void operator()(Self& self, boost::system::error_code ec = {})
{
reenter (coro)
{
yield channel->async_send(boost::system::error_code{}, 0, std::move(self));
AEDIS_CHECK_OP0(;);
yield channel->async_send(boost::system::error_code{}, 0, std::move(self));
AEDIS_CHECK_OP0(;);
self.complete({});
}
}
};
template <class Executor, class Op>
struct wait_op {
using channel_type = boost::asio::experimental::channel<Executor, void(boost::system::error_code, std::size_t)>;
channel_type* channel;
Op op;
std::size_t res = 0;
boost::asio::coroutine coro{};
template <class Self>
void
operator()( Self& self
, boost::system::error_code ec = {}
, std::size_t n = 0)
{
reenter (coro)
{
yield channel->async_receive(std::move(self));
AEDIS_CHECK_OP1(;);
yield std::move(op)(std::move(self));
AEDIS_CHECK_OP1(channel->cancel(););
res = n;
yield channel->async_receive(std::move(self));
AEDIS_CHECK_OP1(;);
self.complete({}, res);
return;
}
}
};
template <class Executor = boost::asio::any_io_executor>
class guarded_operation {
public:
using executor_type = Executor;
guarded_operation(executor_type ex) : channel_{ex} {}
template <class CompletionToken = boost::asio::default_completion_token_t<executor_type>>
auto async_run(CompletionToken&& token = CompletionToken{})
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(send_receive_op<executor_type>{&channel_}, token, channel_);
}
template <class Op, class CompletionToken = boost::asio::default_completion_token_t<executor_type>>
auto async_wait(Op&& op, CompletionToken token = CompletionToken{})
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code, std::size_t)
>(wait_op<executor_type, Op>{&channel_, std::move(op)}, token, channel_);
}
void cancel() {channel_.cancel();}
private:
using channel_type = boost::asio::experimental::channel<executor_type, void(boost::system::error_code, std::size_t)>;
template <class> friend struct send_receive_op;
template <class, class> friend struct wait_op;
channel_type channel_;
};
} // aedis::detail
#include <boost/asio/unyield.hpp>
#endif // AEDIS_DETAIL_GUARDED_OPERATION_HPP

View File

@@ -1,29 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_OPERATION_HPP
#define AEDIS_OPERATION_HPP
namespace aedis {
/** \brief Connection operations that can be cancelled.
* \ingroup high-level-api
*
* The operations listed below can be passed to the
* `aedis::connection::cancel` member function.
*/
enum class operation {
/// Refers to `connection::async_exec` operations.
exec,
/// Refers to `connection::async_run` operations.
run,
/// Refers to `connection::async_receive` operations.
receive,
};
} // aedis
#endif // AEDIS_OPERATION_HPP

View File

@@ -1,234 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_RESP3_PARSER_HPP
#define AEDIS_RESP3_PARSER_HPP
#include <array>
#include <limits>
#include <system_error>
#include <charconv>
#include <string_view>
#include <cstdint>
#include <boost/assert.hpp>
#include <aedis/error.hpp>
#include <aedis/resp3/node.hpp>
namespace aedis::resp3::detail {
using int_type = std::uint64_t;
inline
void to_int(int_type& i, std::string_view sv, boost::system::error_code& ec)
{
auto const res = std::from_chars(sv.data(), sv.data() + std::size(sv), i);
if (res.ec != std::errc())
ec = error::not_a_number;
}
template <class ResponseAdapter>
class parser {
private:
using node_type = node<std::string_view>;
static constexpr std::size_t max_embedded_depth = 5;
ResponseAdapter adapter_;
// The current depth. Simple data types will have depth 0, whereas
// the elements of aggregates will have depth 1. Embedded types
// will have increasing depth.
std::size_t depth_ = 0;
// The parser supports up to 5 levels of nested structures. The
// first element in the sizes stack is a sentinel and must be
// different from 1.
std::array<std::size_t, max_embedded_depth + 1> sizes_ = {{1}};
// Contains the length expected in the next bulk read.
int_type bulk_length_ = (std::numeric_limits<unsigned long>::max)();
// The type of the next bulk. Contains type::invalid if no bulk is
// expected.
type bulk_ = type::invalid;
public:
explicit parser(ResponseAdapter adapter)
: adapter_{adapter}
{
sizes_[0] = 2; // The sentinel must be more than 1.
}
// Returns the number of bytes that have been consumed.
auto
consume(char const* data, std::size_t n, boost::system::error_code& ec) -> std::size_t
{
if (bulk_ != type::invalid) {
n = bulk_length_ + 2;
switch (bulk_) {
case type::streamed_string_part:
{
BOOST_ASSERT(bulk_length_ != 0);
adapter_({bulk_, 1, depth_, {data, bulk_length_}}, ec);
if (ec)
return 0;
} break;
default:
{
adapter_({bulk_, 1, depth_, {data, bulk_length_}}, ec);
if (ec)
return 0;
}
}
bulk_ = type::invalid;
--sizes_[depth_];
} else if (sizes_[depth_] != 0) {
auto const t = to_type(*data);
switch (t) {
case type::streamed_string_part:
{
to_int(bulk_length_ , std::string_view{data + 1, n - 3}, ec);
if (ec)
return 0;
if (bulk_length_ == 0) {
adapter_({type::streamed_string_part, 1, depth_, {}}, ec);
sizes_[depth_] = 0; // We are done.
} else {
bulk_ = type::streamed_string_part;
}
} break;
case type::blob_error:
case type::verbatim_string:
case type::blob_string:
{
if (data[1] == '?') {
// NOTE: This can only be triggered with blob_string.
// Trick: A streamed string is read as an aggregate
// of infinite lenght. When the streaming is done
// the server is supposed to send a part with length
// 0.
sizes_[++depth_] = (std::numeric_limits<std::size_t>::max)();
} else {
to_int(bulk_length_ , std::string_view{data + 1, n - 3} , ec);
if (ec)
return 0;
bulk_ = t;
}
} break;
case type::boolean:
{
if (n == 3) {
ec = error::empty_field;
return 0;
}
if (data[1] != 'f' && data[1] != 't') {
ec = error::unexpected_bool_value;
return 0;
}
adapter_({t, 1, depth_, {data + 1, n - 3}}, ec);
if (ec)
return 0;
--sizes_[depth_];
} break;
case type::doublean:
case type::big_number:
case type::number:
{
if (n == 3) {
ec = error::empty_field;
return 0;
}
adapter_({t, 1, depth_, {data + 1, n - 3}}, ec);
if (ec)
return 0;
--sizes_[depth_];
} break;
case type::simple_error:
case type::simple_string:
{
adapter_({t, 1, depth_, {&data[1], n - 3}}, ec);
if (ec)
return 0;
--sizes_[depth_];
} break;
case type::null:
{
adapter_({type::null, 1, depth_, {}}, ec);
if (ec)
return 0;
--sizes_[depth_];
} break;
case type::push:
case type::set:
case type::array:
case type::attribute:
case type::map:
{
int_type l = -1;
to_int(l, std::string_view{data + 1, n - 3}, ec);
if (ec)
return 0;
adapter_({t, l, depth_, {}}, ec);
if (ec)
return 0;
if (l == 0) {
--sizes_[depth_];
} else {
if (depth_ == max_embedded_depth) {
ec = error::exceeeds_max_nested_depth;
return 0;
}
++depth_;
sizes_[depth_] = l * element_multiplicity(t);
}
} break;
default:
{
ec = error::invalid_data_type;
return 0;
}
}
}
while (sizes_[depth_] == 0) {
--depth_;
--sizes_[depth_];
}
return n;
}
// Returns true when the parser is done with the current message.
[[nodiscard]] auto done() const noexcept
{ return depth_ == 0 && bulk_ == type::invalid; }
// The bulk type expected in the next read. If none is expected returns
// type::invalid.
[[nodiscard]] auto bulk() const noexcept { return bulk_; }
// The length expected in the the next bulk.
[[nodiscard]] auto bulk_length() const noexcept { return bulk_length_; }
};
} // detail::resp3::aedis
#endif // AEDIS_RESP3_PARSER_HPP

View File

@@ -1,130 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_RESP3_READ_OPS_HPP
#define AEDIS_RESP3_READ_OPS_HPP
#include <string_view>
#include <boost/assert.hpp>
#include <boost/asio/read.hpp>
#include <boost/asio/read_until.hpp>
#include <boost/asio/coroutine.hpp>
#include <boost/core/ignore_unused.hpp>
#include <aedis/resp3/detail/parser.hpp>
#include <boost/asio/yield.hpp>
namespace aedis::detail
{
template <class T>
auto is_cancelled(T const& self)
{
return self.get_cancellation_state().cancelled() != boost::asio::cancellation_type_t::none;
}
}
#define AEDIS_CHECK_OP0(X)\
if (ec || aedis::detail::is_cancelled(self)) {\
X\
self.complete(!!ec ? ec : boost::asio::error::operation_aborted);\
return;\
}
#define AEDIS_CHECK_OP1(X)\
if (ec || aedis::detail::is_cancelled(self)) {\
X\
self.complete(!!ec ? ec : boost::asio::error::operation_aborted, {});\
return;\
}
namespace aedis::resp3::detail {
struct ignore_response {
void operator()(node<std::string_view> nd, boost::system::error_code& ec)
{
switch (nd.data_type) {
case resp3::type::simple_error: ec = error::resp3_simple_error; return;
case resp3::type::blob_error: ec = error::resp3_blob_error; return;
default: return;
}
}
};
template <
class AsyncReadStream,
class DynamicBuffer,
class ResponseAdapter>
class parse_op {
private:
AsyncReadStream& stream_;
DynamicBuffer buf_;
parser<ResponseAdapter> parser_;
std::size_t consumed_ = 0;
std::size_t buffer_size_ = 0;
boost::asio::coroutine coro_{};
public:
parse_op(AsyncReadStream& stream, DynamicBuffer buf, ResponseAdapter adapter)
: stream_ {stream}
, buf_ {std::move(buf)}
, parser_ {std::move(adapter)}
{ }
template <class Self>
void operator()( Self& self
, boost::system::error_code ec = {}
, std::size_t n = 0)
{
reenter (coro_) for (;;) {
if (parser_.bulk() == type::invalid) {
yield
boost::asio::async_read_until(stream_, buf_, "\r\n", std::move(self));
AEDIS_CHECK_OP1(;);
} else {
// On a bulk read we can't read until delimiter since the
// payload may contain the delimiter itself so we have to
// read the whole chunk. However if the bulk blob is small
// enough it may be already on the buffer (from the last
// read), in which case there is no need of initiating
// another async op, otherwise we have to read the missing
// bytes.
if (buf_.size() < (parser_.bulk_length() + 2)) {
buffer_size_ = buf_.size();
buf_.grow(parser_.bulk_length() + 2 - buffer_size_);
yield
boost::asio::async_read(
stream_,
buf_.data(buffer_size_, parser_.bulk_length() + 2 - buffer_size_),
boost::asio::transfer_all(),
std::move(self));
AEDIS_CHECK_OP1(;);
}
n = parser_.bulk_length() + 2;
BOOST_ASSERT(buf_.size() >= n);
}
n = parser_.consume(static_cast<char const*>(buf_.data(0, n).data()), n, ec);
if (ec) {
self.complete(ec, 0);
return;
}
buf_.consume(n);
consumed_ += n;
if (parser_.done()) {
self.complete({}, consumed_);
return;
}
}
}
};
} // aedis::resp3::detail
#include <boost/asio/unyield.hpp>
#endif // AEDIS_RESP3_READ_OPS_HPP

View File

@@ -1,55 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_RESP3_NODE_HPP
#define AEDIS_RESP3_NODE_HPP
#include <aedis/resp3/type.hpp>
namespace aedis::resp3 {
/** \brief A node in the response tree.
* \ingroup high-level-api
*
* Redis responses are the pre-order view of the response tree (see
* https://en.wikipedia.org/wiki/Tree_traversal#Pre-order,_NLR).
*
* \remark Any Redis response can be received in an array of nodes,
* for example \c std::vector<node<std::string>>.
*/
template <class String>
struct node {
/// The RESP3 type of the data in this node.
type data_type = type::invalid;
/// The number of elements of an aggregate.
std::size_t aggregate_size{};
/// The depth of this node in the response tree.
std::size_t depth{};
/// The actual data. For aggregate types this is usually empty.
String value{};
};
/** @brief Compares a node for equality.
* @relates node
*
* @param a Left hand side node object.
* @param b Right hand side node object.
*/
template <class String>
auto operator==(node<String> const& a, node<String> const& b)
{
return a.aggregate_size == b.aggregate_size
&& a.depth == b.depth
&& a.data_type == b.data_type
&& a.value == b.value;
};
} // aedis::resp3
#endif // AEDIS_RESP3_NODE_HPP

View File

@@ -1,180 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_RESP3_READ_HPP
#define AEDIS_RESP3_READ_HPP
#include <aedis/resp3/type.hpp>
#include <aedis/resp3/detail/parser.hpp>
#include <aedis/resp3/detail/read_ops.hpp>
#include <boost/asio/read.hpp>
#include <boost/asio/compose.hpp>
#include <boost/asio/async_result.hpp>
namespace aedis::resp3 {
/** \brief Reads a complete response to a command sychronously.
* \ingroup low-level-api
*
* This function reads a complete response to a command or a
* server push synchronously. For example
*
* @code
* int resp;
* std::string buffer;
* resp3::read(socket, dynamic_buffer(buffer), adapt(resp));
* @endcode
*
* For a complete example see examples/intro_sync.cpp. This function
* is implemented in terms of one or more calls to @c
* asio::read_until and @c asio::read functions, and is known as a @a
* composed @a operation. Furthermore, the implementation may read
* additional bytes from the stream that lie past the end of the
* message being read. These additional bytes are stored in the
* dynamic buffer, which must be preserved for subsequent reads.
*
* \param stream The stream from which to read e.g. a tcp socket.
* \param buf Dynamic buffer (version 2).
* \param adapter The response adapter.
* \param ec If an error occurs, it will be assigned to this paramter.
* \returns The number of bytes that have been consumed from the dynamic buffer.
*
* \remark This function calls buf.consume() in each chunk of data
* after it has been passed to the adapter. Users must not consume
* the bytes after it returns.
*/
template <
class SyncReadStream,
class DynamicBuffer,
class ResponseAdapter
>
auto
read(
SyncReadStream& stream,
DynamicBuffer buf,
ResponseAdapter adapter,
boost::system::error_code& ec) -> std::size_t
{
detail::parser<ResponseAdapter> p {adapter};
std::size_t n = 0;
std::size_t consumed = 0;
do {
if (p.bulk() == type::invalid) {
n = boost::asio::read_until(stream, buf, "\r\n", ec);
if (ec)
return 0;
} else {
auto const s = buf.size();
auto const l = p.bulk_length();
if (s < (l + 2)) {
auto const to_read = l + 2 - s;
buf.grow(to_read);
n = boost::asio::read(stream, buf.data(s, to_read), ec);
if (ec)
return 0;
}
}
auto const* data = static_cast<char const*>(buf.data(0, n).data());
n = p.consume(data, n, ec);
if (ec)
return 0;
buf.consume(n);
consumed += n;
} while (!p.done());
return consumed;
}
/** \brief Reads a complete response to a command sychronously.
* \ingroup low-level-api
*
* Same as the error_code overload but throws on error.
*/
template<
class SyncReadStream,
class DynamicBuffer,
class ResponseAdapter = detail::ignore_response>
auto
read(
SyncReadStream& stream,
DynamicBuffer buf,
ResponseAdapter adapter = ResponseAdapter{})
{
boost::system::error_code ec;
auto const n = resp3::read(stream, buf, adapter, ec);
if (ec)
BOOST_THROW_EXCEPTION(boost::system::system_error{ec});
return n;
}
/** \brief Reads a complete response to a Redis command asynchronously.
* \ingroup low-level-api
*
* This function reads a complete response to a command or a
* server push asynchronously. For example
*
* @code
* std::string buffer;
* std::set<std::string> resp;
* co_await resp3::async_read(socket, dynamic_buffer(buffer), adapt(resp));
* @endcode
*
* For a complete example see examples/transaction.cpp. This function
* is implemented in terms of one or more calls to @c
* asio::async_read_until and @c asio::async_read functions, and is
* known as a @a composed @a operation. Furthermore, the
* implementation may read additional bytes from the stream that lie
* past the end of the message being read. These additional bytes are
* stored in the dynamic buffer, which must be preserved for
* subsequent reads.
*
* \param stream The stream from which to read e.g. a tcp socket.
* \param buffer Dynamic buffer (version 2).
* \param adapter The response adapter.
* \param token The completion token.
*
* The completion handler will receive as a parameter the total
* number of bytes transferred from the stream and must have the
* following signature
*
* @code
* void(boost::system::error_code, std::size_t);
* @endcode
*
* \remark This function calls buf.consume() in each chunk of data
* after it has been passed to the adapter. Users must not consume
* the bytes after it returns.
*/
template <
class AsyncReadStream,
class DynamicBuffer,
class ResponseAdapter = detail::ignore_response,
class CompletionToken = boost::asio::default_completion_token_t<typename AsyncReadStream::executor_type>
>
auto async_read(
AsyncReadStream& stream,
DynamicBuffer buffer,
ResponseAdapter adapter = ResponseAdapter{},
CompletionToken&& token =
boost::asio::default_completion_token_t<typename AsyncReadStream::executor_type>{})
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code, std::size_t)
>(detail::parse_op<AsyncReadStream, DynamicBuffer, ResponseAdapter> {stream, buffer, adapter},
token,
stream);
}
} // aedis::resp3
#endif // AEDIS_RESP3_READ_HPP

View File

@@ -1,402 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_RESP3_REQUEST_HPP
#define AEDIS_RESP3_REQUEST_HPP
#include <string>
#include <tuple>
#include <memory_resource>
#include <aedis/resp3/type.hpp>
// NOTE: Consider detecting tuples in the type in the parameter pack
// to calculate the header size correctly.
//
// NOTE: For some commands like hset it would be a good idea to assert
// the value type is a pair.
namespace aedis::resp3 {
constexpr char const* separator = "\r\n";
/** @brief Adds a bulk to the request.
* @relates request
*
* This function is useful in serialization of your own data
* structures in a request. For example
*
* @code
* void to_bulk(std::string& to, mystruct const& obj)
* {
* auto const str = // Convert obj to a string.
* resp3::to_bulk(to, str);
* }
* @endcode
*
* @param to Storage on which data will be copied into.
* @param data Data that will be serialized and stored in @c to.
*
* See more in @ref serialization.
*/
template <class Request>
void to_bulk(Request& to, std::string_view data)
{
auto const str = std::to_string(data.size());
to += to_code(type::blob_string);
to.append(std::cbegin(str), std::cend(str));
to += separator;
to.append(std::cbegin(data), std::cend(data));
to += separator;
}
template <class Request, class T, typename = typename std::enable_if<std::is_integral<T>::value>::type>
void to_bulk(Request& to, T n)
{
auto const s = std::to_string(n);
to_bulk(to, std::string_view{s});
}
namespace detail {
auto has_push_response(std::string_view cmd) -> bool;
template <class T>
struct add_bulk_impl {
template <class Request>
static void add(Request& to, T const& from)
{
using namespace aedis::resp3;
to_bulk(to, from);
}
};
template <class ...Ts>
struct add_bulk_impl<std::tuple<Ts...>> {
template <class Request>
static void add(Request& to, std::tuple<Ts...> const& t)
{
auto f = [&](auto const&... vs)
{
using namespace aedis::resp3;
(to_bulk(to, vs), ...);
};
std::apply(f, t);
}
};
template <class U, class V>
struct add_bulk_impl<std::pair<U, V>> {
template <class Request>
static void add(Request& to, std::pair<U, V> const& from)
{
using namespace aedis::resp3;
to_bulk(to, from.first);
to_bulk(to, from.second);
}
};
template <class Request>
void add_header(Request& to, type t, std::size_t size)
{
auto const str = std::to_string(size);
to += to_code(t);
to.append(std::cbegin(str), std::cend(str));
to += separator;
}
template <class Request, class T>
void add_bulk(Request& to, T const& data)
{
detail::add_bulk_impl<T>::add(to, data);
}
template <class>
struct bulk_counter;
template <class>
struct bulk_counter {
static constexpr auto size = 1U;
};
template <class T, class U>
struct bulk_counter<std::pair<T, U>> {
static constexpr auto size = 2U;
};
template <class Request>
void add_blob(Request& to, std::string_view blob)
{
to.append(std::cbegin(blob), std::cend(blob));
to += separator;
}
template <class Request>
void add_separator(Request& to)
{
to += separator;
}
} // detail
/** \brief Creates Redis requests.
* \ingroup high-level-api
*
* A request is composed of one or more Redis commands and is
* referred to in the redis documentation as a pipeline, see
* https://redis.io/topics/pipelining. For example
*
* @code
* request r;
* r.push("HELLO", 3);
* r.push("FLUSHALL");
* r.push("PING");
* r.push("PING", "key");
* r.push("QUIT");
* @endcode
*
* \remarks
*
* \li Non-string types will be converted to string by using \c
* to_bulk, which must be made available over ADL.
* \li Uses a std::pmr::string for internal storage.
*/
class request {
public:
/// Request configuration options.
struct config {
/** \brief Setting it to true will cause
* `aedis::connection::async_exec` to complete with error if the
* connection is lost. Affects only requests that haven't been
* sent yet.
*/
bool cancel_on_connection_lost = true;
/** \brief If true the request will be coalesced with other
* requests, see https://redis.io/topics/pipelining. Otherwise
* the request is sent individually.
*/
bool coalesce = true;
/** \brief If true, the request will complete with error if the
* call happens before the connection with Redis was established.
*/
bool cancel_if_not_connected = false;
/** \brief If true `aedis::connection::async_exec` will not
* cancel this request if the connection is lost. Affects only
* requests that have been written to the socket but remained
* unresponded when `aedis::connection::async_run` completed.
*/
bool retry_on_connection_lost = false;
/** \brief If this request has a HELLO command and this flag is
* true, the `aedis::connection` will move it to the front of
* the queue of awaiting requests. This makes it possible to
* send HELLO and authenticate before other commands are sent.
*/
bool hello_with_priority = true;
};
/** \brief Constructor
*
* \param cfg Configuration options.
* \param resource Memory resource.
*/
explicit
request(config cfg = config{true, true, false, false, true},
std::pmr::memory_resource* resource = std::pmr::get_default_resource())
: cfg_{cfg}, payload_(resource) {}
//// Returns the number of commands contained in this request.
[[nodiscard]] auto size() const noexcept -> std::size_t
{ return commands_;};
[[nodiscard]] auto payload() const noexcept -> auto const&
{ return payload_;}
[[nodiscard]] auto has_hello_priority() const noexcept -> auto const&
{ return has_hello_priority_;}
/// Clears the request preserving allocated memory.
void clear()
{
payload_.clear();
commands_ = 0;
}
/// Calls std::pmr::string::reserve on the internal storage.
void reserve(std::size_t new_cap = 0)
{ payload_.reserve(new_cap); }
/// Returns a const reference to the config object.
[[nodiscard]] auto get_config() const noexcept -> auto const& {return cfg_; }
/// Returns a reference to the config object.
[[nodiscard]] auto get_config() noexcept -> auto& {return cfg_; }
/** @brief Appends a new command to the end of the request.
*
* For example
*
* \code
* request req;
* req.push("SET", "key", "some string", "EX", "2");
* \endcode
*
* will add the \c set command with value "some string" and an
* expiration of 2 seconds.
*
* \param cmd The command e.g redis or sentinel command.
* \param args Command arguments.
*/
template <class... Ts>
void push(std::string_view cmd, Ts const&... args)
{
using resp3::type;
auto constexpr pack_size = sizeof...(Ts);
detail::add_header(payload_, type::array, 1 + pack_size);
detail::add_bulk(payload_, cmd);
detail::add_bulk(payload_, std::tie(std::forward<Ts const&>(args)...));
check_cmd(cmd);
}
/** @brief Appends a new command to the end of the request.
*
* This overload is useful for commands that have a key and have a
* dynamic range of arguments. For example
*
* @code
* std::map<std::string, std::string> map
* { {"key1", "value1"}
* , {"key2", "value2"}
* , {"key3", "value3"}
* };
*
* request req;
* req.push_range("HSET", "key", std::cbegin(map), std::cend(map));
* @endcode
*
* \param cmd The command e.g. Redis or Sentinel command.
* \param key The command key.
* \param begin Iterator to the begin of the range.
* \param end Iterator to the end of the range.
*/
template <class Key, class ForwardIterator>
void push_range(std::string_view cmd, Key const& key, ForwardIterator begin, ForwardIterator end,
typename std::iterator_traits<ForwardIterator>::value_type * = nullptr)
{
using value_type = typename std::iterator_traits<ForwardIterator>::value_type;
using resp3::type;
if (begin == end)
return;
auto constexpr size = detail::bulk_counter<value_type>::size;
auto const distance = std::distance(begin, end);
detail::add_header(payload_, type::array, 2 + size * distance);
detail::add_bulk(payload_, cmd);
detail::add_bulk(payload_, key);
for (; begin != end; ++begin)
detail::add_bulk(payload_, *begin);
check_cmd(cmd);
}
/** @brief Appends a new command to the end of the request.
*
* This overload is useful for commands that have a dynamic number
* of arguments and don't have a key. For example
*
* \code
* std::set<std::string> channels
* { "channel1" , "channel2" , "channel3" }
*
* request req;
* req.push("SUBSCRIBE", std::cbegin(channels), std::cend(channels));
* \endcode
*
* \param cmd The Redis command
* \param begin Iterator to the begin of the range.
* \param end Iterator to the end of the range.
*/
template <class ForwardIterator>
void push_range(std::string_view cmd, ForwardIterator begin, ForwardIterator end,
typename std::iterator_traits<ForwardIterator>::value_type * = nullptr)
{
using value_type = typename std::iterator_traits<ForwardIterator>::value_type;
using resp3::type;
if (begin == end)
return;
auto constexpr size = detail::bulk_counter<value_type>::size;
auto const distance = std::distance(begin, end);
detail::add_header(payload_, type::array, 1 + size * distance);
detail::add_bulk(payload_, cmd);
for (; begin != end; ++begin)
detail::add_bulk(payload_, *begin);
check_cmd(cmd);
}
/** @brief Appends a new command to the end of the request.
*
* Equivalent to the overload taking a range (i.e. send_range2).
*
* \param cmd Redis command.
* \param key Redis key.
* \param range Range to send e.g. and \c std::map.
*/
template <class Key, class Range>
void push_range(std::string_view cmd, Key const& key, Range const& range,
decltype(std::begin(range)) * = nullptr)
{
using std::begin;
using std::end;
push_range(cmd, key, begin(range), end(range));
}
/** @brief Appends a new command to the end of the request.
*
* Equivalent to the overload taking a range (i.e. send_range2).
*
* \param cmd Redis command.
* \param range Range to send e.g. and \c std::map.
*/
template <class Range>
void push_range(std::string_view cmd, Range const& range,
decltype(std::begin(range)) * = nullptr)
{
using std::begin;
using std::end;
push_range(cmd, begin(range), end(range));
}
private:
void check_cmd(std::string_view cmd)
{
if (!detail::has_push_response(cmd))
++commands_;
if (cmd == "HELLO")
has_hello_priority_ = cfg_.hello_with_priority;
}
config cfg_;
std::pmr::string payload_;
std::size_t commands_ = 0;
bool has_hello_priority_ = false;
};
} // aedis::resp3
#endif // AEDIS_RESP3_SERIALIZER_HPP

View File

@@ -1,87 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_RESP3_TYPE_HPP
#define AEDIS_RESP3_TYPE_HPP
#include <ostream>
#include <vector>
#include <string>
namespace aedis::resp3 {
/** \brief RESP3 data types.
\ingroup high-level-api
The RESP3 specification can be found at https://github.com/redis/redis-specifications/blob/master/protocol/RESP3.md.
*/
enum class type
{ /// Aggregate
array,
/// Aaggregate
push,
/// Aggregate
set,
/// Aggregate
map,
/// Aggregate
attribute,
/// Simple
simple_string,
/// Simple
simple_error,
/// Simple
number,
/// Simple
doublean,
/// Simple
boolean,
/// Simple
big_number,
/// Simple
null,
/// Simple
blob_error,
/// Simple
verbatim_string,
/// Simple
blob_string,
/// Simple
streamed_string_part,
/// Invalid
invalid
};
/** \brief Converts the data type to a string.
* \ingroup high-level-api
* \param t RESP3 type.
*/
auto to_string(type t) -> char const*;
/** \brief Writes the type to the output stream.
* \ingroup high-level-api
* \param os Output stream.
* \param t RESP3 type.
*/
auto operator<<(std::ostream& os, type t) -> std::ostream&;
/* Checks whether the data type is an aggregate.
*/
auto is_aggregate(type t) -> bool;
// For map and attribute data types this function returns 2. All
// other types have value 1.
auto element_multiplicity(type t) -> std::size_t;
// Returns the wire code of a given type.
auto to_code(type t) -> char;
// Converts a wire-format RESP3 type (char) to a resp3 type.
auto to_type(char c) -> type;
} // aedis::resp3
#endif // AEDIS_RESP3_TYPE_HPP

View File

@@ -1,64 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_RESP3_WRITE_HPP
#define AEDIS_RESP3_WRITE_HPP
#include <boost/asio/write.hpp>
namespace aedis::resp3 {
/** \brief Writes a request synchronously.
* \ingroup low-level-api
*
* \param stream Stream to write the request to.
* \param req Request to write.
*/
template<
class SyncWriteStream,
class Request
>
auto write(SyncWriteStream& stream, Request const& req)
{
return boost::asio::write(stream, boost::asio::buffer(req.payload()));
}
template<
class SyncWriteStream,
class Request
>
auto write(
SyncWriteStream& stream,
Request const& req,
boost::system::error_code& ec)
{
return boost::asio::write(stream, boost::asio::buffer(req.payload()), ec);
}
/** \brief Writes a request asynchronously.
* \ingroup low-level-api
*
* \param stream Stream to write the request to.
* \param req Request to write.
* \param token Asio completion token.
*/
template<
class AsyncWriteStream,
class Request,
class CompletionToken = boost::asio::default_completion_token_t<typename AsyncWriteStream::executor_type>
>
auto async_write(
AsyncWriteStream& stream,
Request const& req,
CompletionToken&& token =
boost::asio::default_completion_token_t<typename AsyncWriteStream::executor_type>{})
{
return boost::asio::async_write(stream, boost::asio::buffer(req.payload()), token);
}
} // aedis::resp3
#endif // AEDIS_RESP3_WRITE_HPP

View File

@@ -1,9 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <aedis/impl/error.ipp>
#include <aedis/resp3/impl/request.ipp>
#include <aedis/resp3/impl/type.ipp>

View File

@@ -1,159 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_SSL_CONNECTION_HPP
#define AEDIS_SSL_CONNECTION_HPP
#include <chrono>
#include <memory>
#include <boost/asio/io_context.hpp>
#include <aedis/detail/connection_base.hpp>
namespace aedis::ssl {
template <class>
class basic_connection;
/** \brief A SSL connection to the Redis server.
* \ingroup high-level-api
*
* This class keeps a healthy connection to the Redis instance where
* commands can be sent at any time. For more details, please see the
* documentation of each individual function.
*
* @tparam AsyncReadWriteStream A stream that supports reading and
* writing.
*
*/
template <class AsyncReadWriteStream>
class basic_connection<boost::asio::ssl::stream<AsyncReadWriteStream>> :
private aedis::detail::connection_base<
typename boost::asio::ssl::stream<AsyncReadWriteStream>::executor_type,
basic_connection<boost::asio::ssl::stream<AsyncReadWriteStream>>> {
public:
/// Type of the next layer
using next_layer_type = boost::asio::ssl::stream<AsyncReadWriteStream>;
/// Executor type.
using executor_type = typename next_layer_type::executor_type;
/// Rebinds the socket type to another executor.
template <class Executor1>
struct rebind_executor
{
/// The socket type when rebound to the specified executor.
using other = basic_connection<boost::asio::ssl::stream<typename AsyncReadWriteStream::template rebind_executor<Executor1>::other>>;
};
using base_type = aedis::detail::connection_base<executor_type, basic_connection<boost::asio::ssl::stream<AsyncReadWriteStream>>>;
/// Constructor
explicit
basic_connection(
executor_type ex,
boost::asio::ssl::context& ctx,
std::pmr::memory_resource* resource = std::pmr::get_default_resource())
: base_type{ex, resource}
, stream_{ex, ctx}
{ }
/// Constructor
explicit
basic_connection(
boost::asio::io_context& ioc,
boost::asio::ssl::context& ctx,
std::pmr::memory_resource* resource = std::pmr::get_default_resource())
: basic_connection(ioc.get_executor(), ctx, resource)
{ }
/// Returns the associated executor.
auto get_executor() {return stream_.get_executor();}
/// Reset the underlying stream.
void reset_stream(boost::asio::ssl::context& ctx)
{
stream_ = next_layer_type{stream_.get_executor(), ctx};
}
/// Returns a reference to the next layer.
auto& next_layer() noexcept { return stream_; }
/// Returns a const reference to the next layer.
auto const& next_layer() const noexcept { return stream_; }
/** @brief Establishes a connection with the Redis server asynchronously.
*
* See aedis::connection::async_run for more information.
*/
template <class CompletionToken = boost::asio::default_completion_token_t<executor_type>>
auto async_run(CompletionToken token = CompletionToken{})
{
return base_type::async_run(std::move(token));
}
/** @brief Executes a command on the Redis server asynchronously.
*
* See aedis::connection::async_exec for more information.
*/
template <
class Adapter = aedis::detail::response_traits<void>::adapter_type,
class CompletionToken = boost::asio::default_completion_token_t<executor_type>>
auto async_exec(
resp3::request const& req,
Adapter adapter = adapt(),
CompletionToken token = CompletionToken{})
{
return base_type::async_exec(req, adapter, std::move(token));
}
/** @brief Receives server side pushes asynchronously.
*
* See aedis::connection::async_receive for detailed information.
*/
template <
class Adapter = aedis::detail::response_traits<void>::adapter_type,
class CompletionToken = boost::asio::default_completion_token_t<executor_type>>
auto async_receive(
Adapter adapter = adapt(),
CompletionToken token = CompletionToken{})
{
return base_type::async_receive(adapter, std::move(token));
}
/** @brief Cancel operations.
*
* See aedis::connection::cancel for more information.
*/
auto cancel(operation op) -> std::size_t
{ return base_type::cancel(op); }
auto& lowest_layer() noexcept { return stream_.lowest_layer(); }
private:
using this_type = basic_connection<next_layer_type>;
template <class, class> friend class aedis::detail::connection_base;
template <class, class> friend struct aedis::detail::exec_op;
template <class, class> friend struct aedis::detail::exec_read_op;
template <class> friend struct aedis::detail::run_op;
template <class> friend struct aedis::detail::writer_op;
template <class> friend struct aedis::detail::reader_op;
auto is_open() const noexcept { return stream_.next_layer().is_open(); }
void close() { stream_.next_layer().close(); }
next_layer_type stream_;
};
/** \brief A connection that uses a boost::asio::ssl::stream<boost::asio::ip::tcp::socket>.
* \ingroup high-level-api
*/
using connection = basic_connection<boost::asio::ssl::stream<boost::asio::ip::tcp::socket>>;
} // aedis::ssl
#endif // AEDIS_SSL_CONNECTION_HPP

28
include/boost/redis.hpp Normal file
View File

@@ -0,0 +1,28 @@
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef BOOST_REDIS_HPP
#define BOOST_REDIS_HPP
#include <boost/redis/config.hpp>
#include <boost/redis/error.hpp>
#include <boost/redis/connection.hpp>
#include <boost/redis/request.hpp>
#include <boost/redis/response.hpp>
#include <boost/redis/ignore.hpp>
#include <boost/redis/logger.hpp>
/** @defgroup high-level-api Reference
*
* This page contains the documentation of the Aedis high-level API.
*/
/** @defgroup low-level-api Reference
*
* This page contains the documentation of the Aedis low-level API.
*/
#endif // BOOST_REDIS_HPP

View File

@@ -0,0 +1,80 @@
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef BOOST_REDIS_ADAPTER_ADAPT_HPP
#define BOOST_REDIS_ADAPTER_ADAPT_HPP
#include <boost/redis/resp3/node.hpp>
#include <boost/redis/response.hpp>
#include <boost/redis/adapter/detail/result_traits.hpp>
#include <boost/redis/adapter/detail/response_traits.hpp>
#include <boost/mp11.hpp>
#include <boost/system.hpp>
#include <tuple>
#include <limits>
#include <string_view>
#include <variant>
namespace boost::redis::adapter
{
/** @brief Adapts a type to be used as a response.
*
* The type T must be either
*
* 1. a response<T1, T2, T3, ...> or
* 2. std::vector<node<String>>
*
* The types T1, T2, etc can be any STL container, any integer type
* and `std::string`.
*
* @param t Tuple containing the responses.
*/
template<class T>
auto boost_redis_adapt(T& t) noexcept
{
return detail::response_traits<T>::adapt(t);
}
/** @brief Adapts user data to read operations.
* @ingroup low-level-api
*
* STL containers, \c resp3::response and built-in types are supported and
* can be used in conjunction with \c std::optional<T>.
*
* Example usage:
*
* @code
* std::unordered_map<std::string, std::string> cont;
* co_await async_read(socket, buffer, adapt(cont));
* @endcode
*
* For a transaction
*
* @code
* sr.push(command::multi);
* sr.push(command::ping, ...);
* sr.push(command::incr, ...);
* sr.push_range(command::rpush, ...);
* sr.push(command::lrange, ...);
* sr.push(command::incr, ...);
* sr.push(command::exec);
*
* co_await async_write(socket, buffer(request));
*
* // Reads the response to a transaction
* resp3::response<std::string, int, int, std::vector<std::string>, int> execs;
* co_await resp3::async_read(socket, dynamic_buffer(buffer), adapt(execs));
* @endcode
*/
template<class T>
auto adapt2(T& t = redis::ignore) noexcept
{ return detail::result_traits<T>::adapt(t); }
} // boost::redis::adapter
#endif // BOOST_REDIS_ADAPTER_ADAPT_HPP

View File

@@ -1,11 +1,18 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_ADAPTER_ADAPTERS_HPP
#define AEDIS_ADAPTER_ADAPTERS_HPP
#ifndef BOOST_REDIS_ADAPTER_ADAPTERS_HPP
#define BOOST_REDIS_ADAPTER_ADAPTERS_HPP
#include <boost/redis/error.hpp>
#include <boost/redis/resp3/type.hpp>
#include <boost/redis/resp3/serialization.hpp>
#include <boost/redis/resp3/node.hpp>
#include <boost/redis/adapter/result.hpp>
#include <boost/assert.hpp>
#include <set>
#include <optional>
@@ -21,63 +28,63 @@
#include <string_view>
#include <charconv>
#include <boost/assert.hpp>
// See https://stackoverflow.com/a/31658120/1077832
#include<ciso646>
#ifdef _LIBCPP_VERSION
#else
#include <cstdlib>
#endif
#include <aedis/error.hpp>
#include <aedis/resp3/type.hpp>
#include <aedis/resp3/request.hpp>
#include <aedis/resp3/detail/parser.hpp>
#include <aedis/resp3/node.hpp>
namespace aedis::adapter::detail {
namespace boost::redis::adapter::detail
{
// Serialization.
template <class T>
auto from_bulk(T& i, std::string_view sv, boost::system::error_code& ec) -> typename std::enable_if<std::is_integral<T>::value, void>::type
auto boost_redis_from_bulk(T& i, std::string_view sv, system::error_code& ec) -> typename std::enable_if<std::is_integral<T>::value, void>::type
{
auto const res = std::from_chars(sv.data(), sv.data() + std::size(sv), i);
if (res.ec != std::errc())
ec = error::not_a_number;
ec = redis::error::not_a_number;
}
inline
void from_bulk(bool& t, std::string_view sv, boost::system::error_code&)
void boost_redis_from_bulk(bool& t, std::string_view sv, system::error_code&)
{
t = *sv.data() == 't';
}
inline
void from_bulk(double& d, std::string_view sv, boost::system::error_code& ec)
void boost_redis_from_bulk(double& d, std::string_view sv, system::error_code& ec)
{
#ifdef _LIBCPP_VERSION
// The string in sv is not null terminated and we also don't know
// if there is enough space at the end for a null char. The easiest
// thing to do is to create a temporary.
std::string const tmp{sv.data(), sv.data() + std::size(sv)};
char* end{};
d = std::strtod(tmp.data(), &end);
if (d == HUGE_VAL || d == 0)
ec = redis::error::not_a_double;
#else
auto const res = std::from_chars(sv.data(), sv.data() + std::size(sv), d);
if (res.ec != std::errc())
ec = error::not_a_double;
ec = redis::error::not_a_double;
#endif // _LIBCPP_VERSION
}
template <class CharT, class Traits, class Allocator>
void
from_bulk(
boost_redis_from_bulk(
std::basic_string<CharT, Traits, Allocator>& s,
std::string_view sv,
boost::system::error_code&)
system::error_code&)
{
s.append(sv.data(), sv.size());
}
//================================================
inline
void set_on_resp3_error(resp3::type t, boost::system::error_code& ec)
{
switch (t) {
case resp3::type::simple_error: ec = error::resp3_simple_error; return;
case resp3::type::blob_error: ec = error::resp3_blob_error; return;
case resp3::type::null: ec = error::resp3_null; return;
default: return;
}
}
template <class Result>
class general_aggregate {
private:
@@ -85,9 +92,18 @@ private:
public:
explicit general_aggregate(Result* c = nullptr): result_(c) {}
void operator()(resp3::node<std::string_view> const& n, boost::system::error_code&)
template <class String>
void operator()(resp3::basic_node<String> const& nd, system::error_code&)
{
result_->push_back({n.data_type, n.aggregate_size, n.depth, std::string{std::cbegin(n.value), std::cend(n.value)}});
BOOST_ASSERT_MSG(!!result_, "Unexpected null pointer");
switch (nd.data_type) {
case resp3::type::blob_error:
case resp3::type::simple_error:
*result_ = error{nd.data_type, std::string{std::cbegin(nd.value), std::cend(nd.value)}};
break;
default:
result_->value().push_back({nd.data_type, nd.aggregate_size, nd.depth, std::string{std::cbegin(nd.value), std::cend(nd.value)}});
}
}
};
@@ -99,13 +115,21 @@ private:
public:
explicit general_simple(Node* t = nullptr) : result_(t) {}
void operator()(resp3::node<std::string_view> const& n, boost::system::error_code& ec)
template <class String>
void operator()(resp3::basic_node<String> const& nd, system::error_code&)
{
result_->data_type = n.data_type;
result_->aggregate_size = n.aggregate_size;
result_->depth = n.depth;
result_->value.assign(n.value.data(), n.value.size());
set_on_resp3_error(n.data_type, ec);
BOOST_ASSERT_MSG(!!result_, "Unexpected null pointer");
switch (nd.data_type) {
case resp3::type::blob_error:
case resp3::type::simple_error:
*result_ = error{nd.data_type, std::string{std::cbegin(nd.value), std::cend(nd.value)}};
break;
default:
result_->value().data_type = nd.data_type;
result_->value().aggregate_size = nd.aggregate_size;
result_->value().depth = nd.depth;
result_->value().value.assign(nd.value.data(), nd.value.size());
}
}
};
@@ -114,22 +138,15 @@ class simple_impl {
public:
void on_value_available(Result&) {}
void
operator()(
Result& result,
resp3::node<std::string_view> const& n,
boost::system::error_code& ec)
template <class String>
void operator()(Result& result, resp3::basic_node<String> const& n, system::error_code& ec)
{
set_on_resp3_error(n.data_type, ec);
if (ec)
return;
if (is_aggregate(n.data_type)) {
ec = error::expects_resp3_simple_type;
ec = redis::error::expects_resp3_simple_type;
return;
}
from_bulk(result, n.value, ec);
boost_redis_from_bulk(result, n.value, ec);
}
};
@@ -142,31 +159,24 @@ public:
void on_value_available(Result& result)
{ hint_ = std::end(result); }
void
operator()(
Result& result,
resp3::node<std::string_view> const& nd,
boost::system::error_code& ec)
template <class String>
void operator()(Result& result, resp3::basic_node<String> const& nd, system::error_code& ec)
{
set_on_resp3_error(nd.data_type, ec);
if (ec)
return;
if (is_aggregate(nd.data_type)) {
if (nd.data_type != resp3::type::set)
ec = error::expects_resp3_set;
ec = redis::error::expects_resp3_set;
return;
}
BOOST_ASSERT(nd.aggregate_size == 1);
if (nd.depth < 1) {
ec = error::expects_resp3_set;
ec = redis::error::expects_resp3_set;
return;
}
typename Result::key_type obj;
from_bulk(obj, nd.value, ec);
boost_redis_from_bulk(obj, nd.value, ec);
hint_ = result.insert(hint_, std::move(obj));
}
};
@@ -181,36 +191,29 @@ public:
void on_value_available(Result& result)
{ current_ = std::end(result); }
void
operator()(
Result& result,
resp3::node<std::string_view> const& nd,
boost::system::error_code& ec)
template <class String>
void operator()(Result& result, resp3::basic_node<String> const& nd, system::error_code& ec)
{
set_on_resp3_error(nd.data_type, ec);
if (ec)
return;
if (is_aggregate(nd.data_type)) {
if (element_multiplicity(nd.data_type) != 2)
ec = error::expects_resp3_map;
ec = redis::error::expects_resp3_map;
return;
}
BOOST_ASSERT(nd.aggregate_size == 1);
if (nd.depth < 1) {
ec = error::expects_resp3_map;
ec = redis::error::expects_resp3_map;
return;
}
if (on_key_) {
typename Result::key_type obj;
from_bulk(obj, nd.value, ec);
boost_redis_from_bulk(obj, nd.value, ec);
current_ = result.insert(current_, {std::move(obj), {}});
} else {
typename Result::mapped_type obj;
from_bulk(obj, nd.value, ec);
boost_redis_from_bulk(obj, nd.value, ec);
current_->second = std::move(obj);
}
@@ -223,22 +226,15 @@ class vector_impl {
public:
void on_value_available(Result& ) { }
void
operator()(
Result& result,
resp3::node<std::string_view> const& nd,
boost::system::error_code& ec)
template <class String>
void operator()(Result& result, resp3::basic_node<String> const& nd, system::error_code& ec)
{
set_on_resp3_error(nd.data_type, ec);
if (ec)
return;
if (is_aggregate(nd.data_type)) {
auto const m = element_multiplicity(nd.data_type);
result.reserve(result.size() + m * nd.aggregate_size);
} else {
result.push_back({});
from_bulk(result.back(), nd.value, ec);
boost_redis_from_bulk(result.back(), nd.value, ec);
}
}
};
@@ -251,34 +247,27 @@ private:
public:
void on_value_available(Result& ) { }
void
operator()(
Result& result,
resp3::node<std::string_view> const& nd,
boost::system::error_code& ec)
template <class String>
void operator()(Result& result, resp3::basic_node<String> const& nd, system::error_code& ec)
{
set_on_resp3_error(nd.data_type, ec);
if (ec)
return;
if (is_aggregate(nd.data_type)) {
if (i_ != -1) {
ec = error::nested_aggregate_not_supported;
ec = redis::error::nested_aggregate_not_supported;
return;
}
if (result.size() != nd.aggregate_size * element_multiplicity(nd.data_type)) {
ec = error::incompatible_size;
ec = redis::error::incompatible_size;
return;
}
} else {
if (i_ == -1) {
ec = error::expects_resp3_aggregate;
ec = redis::error::expects_resp3_aggregate;
return;
}
BOOST_ASSERT(nd.aggregate_size == 1);
from_bulk(result.at(i_), nd.value, ec);
boost_redis_from_bulk(result.at(i_), nd.value, ec);
}
++i_;
@@ -290,25 +279,18 @@ struct list_impl {
void on_value_available(Result& ) { }
void
operator()(
Result& result,
resp3::node<std::string_view> const& nd,
boost::system::error_code& ec)
template <class String>
void operator()(Result& result, resp3::basic_node<String> const& nd, system::error_code& ec)
{
set_on_resp3_error(nd.data_type, ec);
if (ec)
return;
if (!is_aggregate(nd.data_type)) {
BOOST_ASSERT(nd.aggregate_size == 1);
if (nd.depth < 1) {
ec = error::expects_resp3_aggregate;
ec = redis::error::expects_resp3_aggregate;
return;
}
result.push_back({});
from_bulk(result.back(), nd.value, ec);
boost_redis_from_bulk(result.back(), nd.value, ec);
}
}
};
@@ -356,52 +338,107 @@ struct impl_map<std::deque<T, Allocator>> { using type = list_impl<std::deque<T,
//---------------------------------------------------
template <class>
class wrapper;
template <class Result>
class wrapper {
class wrapper<result<Result>> {
public:
using response_type = result<Result>;
private:
Result* result_;
response_type* result_;
typename impl_map<Result>::type impl_;
public:
explicit wrapper(Result* t = nullptr) : result_(t)
{ impl_.on_value_available(*result_); }
void
operator()(
resp3::node<std::string_view> const& nd,
boost::system::error_code& ec)
template <class String>
bool set_if_resp3_error(resp3::basic_node<String> const& nd) noexcept
{
BOOST_ASSERT(result_);
impl_(*result_, nd, ec);
switch (nd.data_type) {
case resp3::type::null:
case resp3::type::simple_error:
case resp3::type::blob_error:
*result_ = error{nd.data_type, {std::cbegin(nd.value), std::cend(nd.value)}};
return true;
default:
return false;
}
}
};
template <class T>
class wrapper<std::optional<T>> {
private:
std::optional<T>* result_;
typename impl_map<T>::type impl_{};
public:
explicit wrapper(std::optional<T>* o = nullptr) : result_(o) {}
void
operator()(
resp3::node<std::string_view> const& nd,
boost::system::error_code& ec)
explicit wrapper(response_type* t = nullptr) : result_(t)
{
if (nd.data_type == resp3::type::null)
if (result_) {
result_->value() = Result{};
impl_.on_value_available(result_->value());
}
}
template <class String>
void operator()(resp3::basic_node<String> const& nd, system::error_code& ec)
{
BOOST_ASSERT_MSG(!!result_, "Unexpected null pointer");
if (result_->has_error())
return;
if (!result_->has_value()) {
*result_ = T{};
impl_.on_value_available(result_->value());
}
if (set_if_resp3_error(nd))
return;
BOOST_ASSERT(result_);
impl_(result_->value(), nd, ec);
}
};
} // aedis::adapter:.detail
template <class T>
class wrapper<result<std::optional<T>>> {
public:
using response_type = result<std::optional<T>>;
#endif // AEDIS_ADAPTER_ADAPTERS_HPP
private:
response_type* result_;
typename impl_map<T>::type impl_{};
template <class String>
bool set_if_resp3_error(resp3::basic_node<String> const& nd) noexcept
{
switch (nd.data_type) {
case resp3::type::blob_error:
case resp3::type::simple_error:
*result_ = error{nd.data_type, {std::cbegin(nd.value), std::cend(nd.value)}};
return true;
default:
return false;
}
}
public:
explicit wrapper(response_type* o = nullptr) : result_(o) {}
template <class String>
void
operator()(
resp3::basic_node<String> const& nd,
system::error_code& ec)
{
BOOST_ASSERT_MSG(!!result_, "Unexpected null pointer");
if (result_->has_error())
return;
if (set_if_resp3_error(nd))
return;
if (nd.data_type == resp3::type::null)
return;
if (!result_->value().has_value()) {
result_->value() = T{};
impl_.on_value_available(result_->value().value());
}
impl_(result_->value().value(), nd, ec);
}
};
} // boost::redis::adapter::detail
#endif // BOOST_REDIS_ADAPTER_ADAPTERS_HPP

View File

@@ -0,0 +1,159 @@
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef BOOST_REDIS_ADAPTER_DETAIL_RESPONSE_TRAITS_HPP
#define BOOST_REDIS_ADAPTER_DETAIL_RESPONSE_TRAITS_HPP
#include <boost/redis/resp3/node.hpp>
#include <boost/redis/response.hpp>
#include <boost/redis/adapter/detail/result_traits.hpp>
#include <boost/mp11.hpp>
#include <boost/system.hpp>
#include <tuple>
#include <limits>
#include <string_view>
#include <variant>
namespace boost::redis::adapter::detail
{
class ignore_adapter {
public:
template <class String>
void operator()(std::size_t, resp3::basic_node<String> const& nd, system::error_code& ec)
{
switch (nd.data_type) {
case resp3::type::simple_error: ec = redis::error::resp3_simple_error; break;
case resp3::type::blob_error: ec = redis::error::resp3_blob_error; break;
case resp3::type::null: ec = redis::error::resp3_null; break;
default:;
}
}
[[nodiscard]]
auto get_supported_response_size() const noexcept
{ return static_cast<std::size_t>(-1);}
};
template <class Response>
class static_adapter {
private:
static constexpr auto size = std::tuple_size<Response>::value;
using adapter_tuple = mp11::mp_transform<adapter_t, Response>;
using variant_type = mp11::mp_rename<adapter_tuple, std::variant>;
using adapters_array_type = std::array<variant_type, size>;
adapters_array_type adapters_;
public:
explicit static_adapter(Response& r)
{
assigner<size - 1>::assign(adapters_, r);
}
[[nodiscard]]
auto get_supported_response_size() const noexcept
{ return size;}
template <class String>
void operator()(std::size_t i, resp3::basic_node<String> const& nd, system::error_code& ec)
{
using std::visit;
// I am usure whether this should be an error or an assertion.
BOOST_ASSERT(i < adapters_.size());
visit([&](auto& arg){arg(nd, ec);}, adapters_.at(i));
}
};
template <class Vector>
class vector_adapter {
private:
using adapter_type = typename result_traits<Vector>::adapter_type;
adapter_type adapter_;
public:
explicit vector_adapter(Vector& v)
: adapter_{internal_adapt(v)}
{ }
[[nodiscard]]
auto
get_supported_response_size() const noexcept
{ return static_cast<std::size_t>(-1);}
template <class String>
void operator()(std::size_t, resp3::basic_node<String> const& nd, system::error_code& ec)
{
adapter_(nd, ec);
}
};
template <class>
struct response_traits;
template <>
struct response_traits<ignore_t> {
using response_type = ignore_t;
using adapter_type = detail::ignore_adapter;
static auto adapt(response_type&) noexcept
{ return detail::ignore_adapter{}; }
};
template <>
struct response_traits<result<ignore_t>> {
using response_type = result<ignore_t>;
using adapter_type = detail::ignore_adapter;
static auto adapt(response_type&) noexcept
{ return detail::ignore_adapter{}; }
};
template <class String, class Allocator>
struct response_traits<result<std::vector<resp3::basic_node<String>, Allocator>>> {
using response_type = result<std::vector<resp3::basic_node<String>, Allocator>>;
using adapter_type = vector_adapter<response_type>;
static auto adapt(response_type& v) noexcept
{ return adapter_type{v}; }
};
template <class ...Ts>
struct response_traits<response<Ts...>> {
using response_type = response<Ts...>;
using adapter_type = static_adapter<response_type>;
static auto adapt(response_type& r) noexcept
{ return adapter_type{r}; }
};
template <class Adapter>
class wrapper {
public:
explicit wrapper(Adapter adapter) : adapter_{adapter} {}
template <class String>
void operator()(resp3::basic_node<String> const& nd, system::error_code& ec)
{ return adapter_(0, nd, ec); }
[[nodiscard]]
auto get_supported_response_size() const noexcept
{ return adapter_.get_supported_response_size();}
private:
Adapter adapter_;
};
template <class Adapter>
auto make_adapter_wrapper(Adapter adapter)
{
return wrapper{adapter};
}
} // boost::redis::adapter::detail
#endif // BOOST_REDIS_ADAPTER_DETAIL_RESPONSE_TRAITS_HPP

View File

@@ -1,74 +1,73 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_ADAPTER_RESPONSE_TRAITS_HPP
#define AEDIS_ADAPTER_RESPONSE_TRAITS_HPP
#ifndef BOOST_REDIS_ADAPTER_RESPONSE_TRAITS_HPP
#define BOOST_REDIS_ADAPTER_RESPONSE_TRAITS_HPP
#include <boost/redis/error.hpp>
#include <boost/redis/resp3/type.hpp>
#include <boost/redis/ignore.hpp>
#include <boost/redis/adapter/detail/adapters.hpp>
#include <boost/redis/adapter/result.hpp>
#include <boost/redis/adapter/ignore.hpp>
#include <boost/mp11.hpp>
#include <vector>
#include <tuple>
#include <string_view>
#include <variant>
#include <boost/mp11.hpp>
#include <aedis/error.hpp>
#include <aedis/resp3/type.hpp>
#include <aedis/resp3/read.hpp>
#include <aedis/adapter/detail/adapters.hpp>
namespace aedis::adapter::detail {
using ignore = std::decay_t<decltype(std::ignore)>;
namespace boost::redis::adapter::detail
{
/* Traits class for response objects.
*
* Provides traits for all supported response types i.e. all STL
* containers and C++ buil-in types.
*/
template <class ResponseType>
struct response_traits {
using adapter_type = adapter::detail::wrapper<typename std::decay<ResponseType>::type>;
static auto adapt(ResponseType& r) noexcept { return adapter_type{&r}; }
template <class Result>
struct result_traits {
using adapter_type = adapter::detail::wrapper<typename std::decay<Result>::type>;
static auto adapt(Result& r) noexcept { return adapter_type{&r}; }
};
template <>
struct response_traits<ignore> {
using response_type = ignore;
using adapter_type = resp3::detail::ignore_response;
struct result_traits<result<ignore_t>> {
using response_type = result<ignore_t>;
using adapter_type = ignore;
static auto adapt(response_type) noexcept { return adapter_type{}; }
};
template <>
struct result_traits<ignore_t> {
using response_type = ignore_t;
using adapter_type = ignore;
static auto adapt(response_type) noexcept { return adapter_type{}; }
};
template <class T>
struct response_traits<resp3::node<T>> {
using response_type = resp3::node<T>;
struct result_traits<result<resp3::basic_node<T>>> {
using response_type = result<resp3::basic_node<T>>;
using adapter_type = adapter::detail::general_simple<response_type>;
static auto adapt(response_type& v) noexcept { return adapter_type{&v}; }
};
template <class String, class Allocator>
struct response_traits<std::vector<resp3::node<String>, Allocator>> {
using response_type = std::vector<resp3::node<String>, Allocator>;
struct result_traits<result<std::vector<resp3::basic_node<String>, Allocator>>> {
using response_type = result<std::vector<resp3::basic_node<String>, Allocator>>;
using adapter_type = adapter::detail::general_aggregate<response_type>;
static auto adapt(response_type& v) noexcept { return adapter_type{&v}; }
};
template <>
struct response_traits<void> {
using response_type = void;
using adapter_type = resp3::detail::ignore_response;
static auto adapt() noexcept { return adapter_type{}; }
};
template <class T>
using adapter_t = typename response_traits<std::decay_t<T>>::adapter_type;
using adapter_t = typename result_traits<std::decay_t<T>>::adapter_type;
// Duplicated here to avoid circular include dependency.
template<class T>
auto internal_adapt(T& t) noexcept
{ return response_traits<std::decay_t<T>>::adapt(t); }
{ return result_traits<std::decay_t<T>>::adapt(t); }
template <std::size_t N>
struct assigner {
@@ -90,12 +89,15 @@ struct assigner<0> {
};
template <class Tuple>
class static_aggregate_adapter {
class static_aggregate_adapter;
template <class Tuple>
class static_aggregate_adapter<result<Tuple>> {
private:
using adapters_array_type =
std::array<
boost::mp11::mp_rename<
boost::mp11::mp_transform<
mp11::mp_rename<
mp11::mp_transform<
adapter_t, Tuple>,
std::variant>,
std::tuple_size<Tuple>::value>;
@@ -103,14 +105,19 @@ private:
std::size_t i_ = 0;
std::size_t aggregate_size_ = 0;
adapters_array_type adapters_;
result<Tuple>* res_ = nullptr;
public:
explicit static_aggregate_adapter(Tuple* r = nullptr)
explicit static_aggregate_adapter(result<Tuple>* r = nullptr)
{
detail::assigner<std::tuple_size<Tuple>::value - 1>::assign(adapters_, *r);
if (r) {
res_ = r;
detail::assigner<std::tuple_size<Tuple>::value - 1>::assign(adapters_, r->value());
}
}
void count(resp3::node<std::string_view> const& nd)
template <class String>
void count(resp3::basic_node<String> const& nd)
{
if (nd.depth == 1) {
if (is_aggregate(nd.data_type))
@@ -125,17 +132,15 @@ public:
++i_;
}
void
operator()(
resp3::node<std::string_view> const& nd,
boost::system::error_code& ec)
template <class String>
void operator()(resp3::basic_node<String> const& nd, system::error_code& ec)
{
using std::visit;
if (nd.depth == 0) {
auto const real_aggr_size = nd.aggregate_size * element_multiplicity(nd.data_type);
if (real_aggr_size != std::tuple_size<Tuple>::value)
ec = error::incompatible_size;
ec = redis::error::incompatible_size;
return;
}
@@ -146,13 +151,13 @@ public:
};
template <class... Ts>
struct response_traits<std::tuple<Ts...>>
struct result_traits<result<std::tuple<Ts...>>>
{
using response_type = std::tuple<Ts...>;
using response_type = result<std::tuple<Ts...>>;
using adapter_type = static_aggregate_adapter<response_type>;
static auto adapt(response_type& r) noexcept { return adapter_type{&r}; }
};
} // aedis::adapter::detail
} // boost::redis::adapter::detail
#endif // AEDIS_ADAPTER_RESPONSE_TRAITS_HPP
#endif // BOOST_REDIS_ADAPTER_RESPONSE_TRAITS_HPP

View File

@@ -0,0 +1,37 @@
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef BOOST_REDIS_ADAPTER_IGNORE_HPP
#define BOOST_REDIS_ADAPTER_IGNORE_HPP
#include <boost/redis/resp3/node.hpp>
#include <boost/redis/error.hpp>
#include <boost/system/error_code.hpp>
#include <string>
namespace boost::redis::adapter
{
/** @brief An adapter that ignores responses
* @ingroup high-level-api
*
* RESP3 errors won't be ignored.
*/
struct ignore {
void operator()(resp3::basic_node<std::string_view> const& nd, system::error_code& ec)
{
switch (nd.data_type) {
case resp3::type::simple_error: ec = redis::error::resp3_simple_error; break;
case resp3::type::blob_error: ec = redis::error::resp3_blob_error; break;
case resp3::type::null: ec = redis::error::resp3_null; break;
default:;
}
}
};
} // boost::redis::adapter
#endif // BOOST_REDIS_ADAPTER_IGNORE_HPP

View File

@@ -0,0 +1,81 @@
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef BOOST_REDIS_ADAPTER_RESULT_HPP
#define BOOST_REDIS_ADAPTER_RESULT_HPP
#include <boost/redis/resp3/type.hpp>
#include <boost/redis/error.hpp>
#include <boost/system/result.hpp>
#include <string>
namespace boost::redis::adapter
{
/** @brief Stores any resp3 error
* @ingroup high-level-api
*/
struct error {
/// RESP3 error data type.
resp3::type data_type = resp3::type::invalid;
/// Diagnostic error message sent by Redis.
std::string diagnostic;
};
/** @brief Compares two error objects for equality
* @relates error
*
* @param a Left hand side error object.
* @param b Right hand side error object.
*/
inline bool operator==(error const& a, error const& b)
{
return a.data_type == b.data_type && a.diagnostic == b.diagnostic;
}
/** @brief Compares two error objects for difference
* @relates error
*
* @param a Left hand side error object.
* @param b Right hand side error object.
*/
inline bool operator!=(error const& a, error const& b)
{
return !(a == b);
}
/** @brief Stores response to individual Redis commands
* @ingroup high-level-api
*/
template <class Value>
using result = system::result<Value, error>;
BOOST_NORETURN inline void
throw_exception_from_error(error const & e, boost::source_location const &)
{
system::error_code ec;
switch (e.data_type) {
case resp3::type::simple_error:
ec = redis::error::resp3_simple_error;
break;
case resp3::type::blob_error:
ec = redis::error::resp3_blob_error;
break;
case resp3::type::null:
ec = redis::error::resp3_null;
break;
default:
BOOST_ASSERT_MSG(false, "Unexpected data type.");
}
throw system::system_error(ec, e.diagnostic);
}
} // boost::redis::adapter
#endif // BOOST_REDIS_ADAPTER_RESULT_HPP

View File

@@ -0,0 +1,85 @@
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef BOOST_REDIS_CONFIG_HPP
#define BOOST_REDIS_CONFIG_HPP
#include <string>
#include <chrono>
#include <optional>
namespace boost::redis
{
/** @brief Address of a Redis server
* @ingroup high-level-api
*/
struct address {
/// Redis host.
std::string host = "127.0.0.1";
/// Redis port.
std::string port = "6379";
};
/** @brief Configure parameters used by the connection classes
* @ingroup high-level-api
*/
struct config {
/// Uses SSL instead of a plain connection.
bool use_ssl = false;
/// Address of the Redis server.
address addr = address{"127.0.0.1", "6379"};
/** @brief Username passed to the
* [HELLO](https://redis.io/commands/hello/) command. If left
* empty `HELLO` will be sent without authentication parameters.
*/
std::string username = "default";
/** @brief Password passed to the
* [HELLO](https://redis.io/commands/hello/) command. If left
* empty `HELLO` will be sent without authentication parameters.
*/
std::string password;
/// Client name parameter of the [HELLO](https://redis.io/commands/hello/) command.
std::string clientname = "Boost.Redis";
/// Database that will be passed to the [SELECT](https://redis.io/commands/hello/) command.
std::optional<int> database_index = 0;
/// Message used by the health-checker in `boost::redis::connection::async_run`.
std::string health_check_id = "Boost.Redis";
/// Logger prefix, see `boost::redis::logger`.
std::string log_prefix = "(Boost.Redis) ";
/// Time the resolve operation is allowed to last.
std::chrono::steady_clock::duration resolve_timeout = std::chrono::seconds{10};
/// Time the connect operation is allowed to last.
std::chrono::steady_clock::duration connect_timeout = std::chrono::seconds{10};
/// Time the SSL handshake operation is allowed to last.
std::chrono::steady_clock::duration ssl_handshake_timeout = std::chrono::seconds{10};
/** Health checks interval.
*
* To disable health-checks pass zero as duration.
*/
std::chrono::steady_clock::duration health_check_interval = std::chrono::seconds{2};
/** @brief Time waited before trying a reconnection.
*
* To disable reconnection pass zero as duration.
*/
std::chrono::steady_clock::duration reconnect_wait_interval = std::chrono::seconds{1};
};
} // boost::redis
#endif // BOOST_REDIS_CONFIG_HPP

View File

@@ -0,0 +1,444 @@
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef BOOST_REDIS_CONNECTION_HPP
#define BOOST_REDIS_CONNECTION_HPP
#include <boost/redis/detail/connection_base.hpp>
#include <boost/redis/logger.hpp>
#include <boost/redis/config.hpp>
#include <boost/asio/io_context.hpp>
#include <boost/asio/coroutine.hpp>
#include <boost/asio/steady_timer.hpp>
#include <boost/asio/any_io_executor.hpp>
#include <boost/asio/any_completion_handler.hpp>
#include <chrono>
#include <memory>
#include <limits>
namespace boost::redis {
namespace detail
{
template <class Connection, class Logger>
struct reconnection_op {
Connection* conn_ = nullptr;
Logger logger_;
asio::coroutine coro_{};
template <class Self>
void operator()(Self& self, system::error_code ec = {})
{
BOOST_ASIO_CORO_REENTER (coro_) for (;;)
{
BOOST_ASIO_CORO_YIELD
conn_->impl_.async_run(conn_->cfg_, logger_, std::move(self));
conn_->cancel(operation::receive);
logger_.on_connection_lost(ec);
if (!conn_->will_reconnect() || is_cancelled(self)) {
conn_->cancel(operation::reconnection);
self.complete(!!ec ? ec : asio::error::operation_aborted);
return;
}
conn_->timer_.expires_after(conn_->cfg_.reconnect_wait_interval);
BOOST_ASIO_CORO_YIELD
conn_->timer_.async_wait(std::move(self));
BOOST_REDIS_CHECK_OP0(;)
if (!conn_->will_reconnect()) {
self.complete(asio::error::operation_aborted);
return;
}
conn_->reset_stream();
}
}
};
} // detail
/** @brief A SSL connection to the Redis server.
* @ingroup high-level-api
*
* This class keeps a healthy connection to the Redis instance where
* commands can be sent at any time. For more details, please see the
* documentation of each individual function.
*
* @tparam Socket The socket type e.g. asio::ip::tcp::socket.
*
*/
template <class Executor>
class basic_connection {
public:
/// Executor type.
using executor_type = Executor;
/// Returns the underlying executor.
executor_type get_executor() noexcept
{ return impl_.get_executor(); }
/// Rebinds the socket type to another executor.
template <class Executor1>
struct rebind_executor
{
/// The connection type when rebound to the specified executor.
using other = basic_connection<Executor1>;
};
/** @brief Constructor
*
* @param ex Executor on which connection operation will run.
* @param ctx SSL context.
* @param max_read_size Maximum read size that is passed to
* the internal `asio::dynamic_buffer` constructor.
*/
explicit
basic_connection(
executor_type ex,
asio::ssl::context ctx = asio::ssl::context{asio::ssl::context::tlsv12_client},
std::size_t max_read_size = (std::numeric_limits<std::size_t>::max)())
: impl_{ex, std::move(ctx), max_read_size}
, timer_{ex}
{ }
/// Contructs from a context.
explicit
basic_connection(
asio::io_context& ioc,
asio::ssl::context ctx = asio::ssl::context{asio::ssl::context::tlsv12_client},
std::size_t max_read_size = (std::numeric_limits<std::size_t>::max)())
: basic_connection(ioc.get_executor(), std::move(ctx), max_read_size)
{ }
/** @brief Starts underlying connection operations.
*
* This member function provides the following functionality
*
* 1. Resolve the address passed on `boost::redis::config::addr`.
* 2. Connect to one of the results obtained in the resolve operation.
* 3. Send a [HELLO](https://redis.io/commands/hello/) command where each of its parameters are read from `cfg`.
* 4. Start a health-check operation where ping commands are sent
* at intervals specified in
* `boost::redis::config::health_check_interval`. The message passed to
* `PING` will be `boost::redis::config::health_check_id`. Passing a
* timeout with value zero will disable health-checks. If the Redis
* server does not respond to a health-check within two times the value
* specified here, it will be considered unresponsive and the connection
* will be closed and a new connection will be stablished.
* 5. Starts read and write operations with the Redis
* server. More specifically it will trigger the write of all
* requests i.e. calls to `async_exec` that happened prior to this
* call.
*
* When a connection is lost for any reason, a new one is
* stablished automatically. To disable reconnection call
* `boost::redis::connection::cancel(operation::reconnection)`.
*
* @param cfg Configuration paramters.
* @param l Logger object. The interface expected is specified in the class `boost::redis::logger`.
* @param token Completion token.
*
* The completion token must have the following signature
*
* @code
* void f(system::error_code);
* @endcode
*
* For example on how to call this function refer to
* cpp20_intro.cpp or any other example.
*/
template <
class Logger = logger,
class CompletionToken = asio::default_completion_token_t<executor_type>>
auto
async_run(
config const& cfg = {},
Logger l = Logger{},
CompletionToken token = CompletionToken{})
{
using this_type = basic_connection<executor_type>;
cfg_ = cfg;
l.set_prefix(cfg_.log_prefix);
return asio::async_compose
< CompletionToken
, void(system::error_code)
>(detail::reconnection_op<this_type, Logger>{this, l}, token, timer_);
}
/** @brief Receives server side pushes asynchronously.
*
* When pushes arrive and there is no `async_receive` operation in
* progress, pushed data, requests, and responses will be paused
* until `async_receive` is called again. Apps will usually want
* to call `async_receive` in a loop.
*
* To cancel an ongoing receive operation apps should call
* `connection::cancel(operation::receive)`.
*
* @param token Completion token.
*
* For an example see cpp20_subscriber.cpp. The completion token must
* have the following signature
*
* @code
* void f(system::error_code, std::size_t);
* @endcode
*
* Where the second parameter is the size of the push received in
* bytes.
*/
template <class CompletionToken = asio::default_completion_token_t<executor_type>>
auto async_receive(CompletionToken token = CompletionToken{})
{ return impl_.async_receive(std::move(token)); }
/** @brief Receives server pushes synchronously without blocking.
*
* Receives a server push synchronously by calling `try_receive` on
* the underlying channel. If the operation fails because
* `try_receive` returns `false`, `ec` will be set to
* `boost::redis::error::sync_receive_push_failed`.
*
* @param ec Contains the error if any occurred.
*
* @returns The number of bytes read from the socket.
*/
std::size_t receive(system::error_code& ec)
{
return impl_.receive(ec);
}
template <
class Response = ignore_t,
class CompletionToken = asio::default_completion_token_t<executor_type>
>
[[deprecated("Set the response with set_receive_response and use the other overload.")]]
auto
async_receive(
Response& response,
CompletionToken token = CompletionToken{})
{
return impl_.async_receive(response, token);
}
/** @brief Executes commands on the Redis server asynchronously.
*
* This function sends a request to the Redis server and waits for
* the responses to each individual command in the request. If the
* request contains only commands that don't expect a response,
* the completion occurs after it has been written to the
* underlying stream. Multiple concurrent calls to this function
* will be automatically queued by the implementation.
*
* @param req Request.
* @param resp Response.
* @param token Completion token.
*
* For an example see cpp20_echo_server.cpp. The completion token must
* have the following signature
*
* @code
* void f(system::error_code, std::size_t);
* @endcode
*
* Where the second parameter is the size of the response received
* in bytes.
*/
template <
class Response = ignore_t,
class CompletionToken = asio::default_completion_token_t<executor_type>
>
auto
async_exec(
request const& req,
Response& resp = ignore,
CompletionToken&& token = CompletionToken{})
{
return impl_.async_exec(req, resp, std::forward<CompletionToken>(token));
}
/** @brief Cancel operations.
*
* @li `operation::exec`: Cancels operations started with
* `async_exec`. Affects only requests that haven't been written
* yet.
* @li operation::run: Cancels the `async_run` operation.
* @li operation::receive: Cancels any ongoing calls to `async_receive`.
* @li operation::all: Cancels all operations listed above.
*
* @param op: The operation to be cancelled.
*/
void cancel(operation op = operation::all)
{
switch (op) {
case operation::reconnection:
case operation::all:
cfg_.reconnect_wait_interval = std::chrono::seconds::zero();
timer_.cancel();
break;
default: /* ignore */;
}
impl_.cancel(op);
}
/// Returns true if the connection was canceled.
bool will_reconnect() const noexcept
{ return cfg_.reconnect_wait_interval != std::chrono::seconds::zero();}
/// Returns the ssl context.
auto const& get_ssl_context() const noexcept
{ return impl_.get_ssl_context();}
/// Resets the underlying stream.
void reset_stream()
{ impl_.reset_stream(); }
/// Returns a reference to the next layer.
auto& next_layer() noexcept
{ return impl_.next_layer(); }
/// Returns a const reference to the next layer.
auto const& next_layer() const noexcept
{ return impl_.next_layer(); }
/// Sets the response object of `async_receive` operations.
template <class Response>
void set_receive_response(Response& response)
{ impl_.set_receive_response(response); }
/// Returns connection usage information.
usage get_usage() const noexcept
{ return impl_.get_usage(); }
private:
using timer_type =
asio::basic_waitable_timer<
std::chrono::steady_clock,
asio::wait_traits<std::chrono::steady_clock>,
Executor>;
template <class, class> friend struct detail::reconnection_op;
config cfg_;
detail::connection_base<executor_type> impl_;
timer_type timer_;
};
/** \brief A basic_connection that type erases the executor.
* \ingroup high-level-api
*
* This connection type uses the asio::any_io_executor and
* asio::any_completion_token to reduce compilation times.
*
* For documentaiton of each member function see
* `boost::redis::basic_connection`.
*/
class connection {
public:
/// Executor type.
using executor_type = asio::any_io_executor;
/// Contructs from an executor.
explicit
connection(
executor_type ex,
asio::ssl::context ctx = asio::ssl::context{asio::ssl::context::tlsv12_client},
std::size_t max_read_size = (std::numeric_limits<std::size_t>::max)());
/// Contructs from a context.
explicit
connection(
asio::io_context& ioc,
asio::ssl::context ctx = asio::ssl::context{asio::ssl::context::tlsv12_client},
std::size_t max_read_size = (std::numeric_limits<std::size_t>::max)());
/// Returns the underlying executor.
executor_type get_executor() noexcept
{ return impl_.get_executor(); }
/// Calls `boost::redis::basic_connection::async_run`.
template <class CompletionToken>
auto async_run(config const& cfg, logger l, CompletionToken token)
{
return asio::async_initiate<
CompletionToken, void(boost::system::error_code)>(
[](auto handler, connection* self, config const* cfg, logger l)
{
self->async_run_impl(*cfg, l, std::move(handler));
}, token, this, &cfg, l);
}
/// Calls `boost::redis::basic_connection::async_receive`.
template <class Response, class CompletionToken>
[[deprecated("Set the response with set_receive_response and use the other overload.")]]
auto async_receive(Response& response, CompletionToken token)
{
return impl_.async_receive(response, std::move(token));
}
/// Calls `boost::redis::basic_connection::async_receive`.
template <class CompletionToken>
auto async_receive(CompletionToken token)
{ return impl_.async_receive(std::move(token)); }
/// Calls `boost::redis::basic_connection::receive`.
std::size_t receive(system::error_code& ec)
{
return impl_.receive(ec);
}
/// Calls `boost::redis::basic_connection::async_exec`.
template <class Response, class CompletionToken>
auto async_exec(request const& req, Response& resp, CompletionToken token)
{
return impl_.async_exec(req, resp, std::move(token));
}
/// Calls `boost::redis::basic_connection::cancel`.
void cancel(operation op = operation::all);
/// Calls `boost::redis::basic_connection::will_reconnect`.
bool will_reconnect() const noexcept
{ return impl_.will_reconnect();}
/// Calls `boost::redis::basic_connection::next_layer`.
auto& next_layer() noexcept
{ return impl_.next_layer(); }
/// Calls `boost::redis::basic_connection::next_layer`.
auto const& next_layer() const noexcept
{ return impl_.next_layer(); }
/// Calls `boost::redis::basic_connection::reset_stream`.
void reset_stream()
{ impl_.reset_stream();}
/// Sets the response object of `async_receive` operations.
template <class Response>
void set_receive_response(Response& response)
{ impl_.set_receive_response(response); }
/// Returns connection usage information.
usage get_usage() const noexcept
{ return impl_.get_usage(); }
/// Returns the ssl context.
auto const& get_ssl_context() const noexcept
{ return impl_.get_ssl_context();}
private:
void
async_run_impl(
config const& cfg,
logger l,
asio::any_completion_handler<void(boost::system::error_code)> token);
basic_connection<executor_type> impl_;
};
} // boost::redis
#endif // BOOST_REDIS_CONNECTION_HPP

View File

@@ -0,0 +1,964 @@
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef BOOST_REDIS_CONNECTION_BASE_HPP
#define BOOST_REDIS_CONNECTION_BASE_HPP
#include <boost/redis/adapter/adapt.hpp>
#include <boost/redis/detail/helper.hpp>
#include <boost/redis/error.hpp>
#include <boost/redis/operation.hpp>
#include <boost/redis/request.hpp>
#include <boost/redis/resp3/type.hpp>
#include <boost/redis/config.hpp>
#include <boost/redis/detail/runner.hpp>
#include <boost/redis/usage.hpp>
#include <boost/system.hpp>
#include <boost/asio/basic_stream_socket.hpp>
#include <boost/asio/bind_executor.hpp>
#include <boost/asio/experimental/parallel_group.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <boost/asio/steady_timer.hpp>
#include <boost/asio/write.hpp>
#include <boost/assert.hpp>
#include <boost/core/ignore_unused.hpp>
#include <boost/asio/ssl/stream.hpp>
#include <boost/asio/read_until.hpp>
#include <boost/asio/buffer.hpp>
#include <boost/asio/experimental/channel.hpp>
#include <algorithm>
#include <array>
#include <chrono>
#include <deque>
#include <memory>
#include <string_view>
#include <type_traits>
#include <functional>
namespace boost::redis::detail
{
template <class DynamicBuffer>
std::string_view buffer_view(DynamicBuffer buf) noexcept
{
char const* start = static_cast<char const*>(buf.data(0, buf.size()).data());
return std::string_view{start, std::size(buf)};
}
template <class AsyncReadStream, class DynamicBuffer>
class append_some_op {
private:
AsyncReadStream& stream_;
DynamicBuffer buf_;
std::size_t size_ = 0;
std::size_t tmp_ = 0;
asio::coroutine coro_{};
public:
append_some_op(AsyncReadStream& stream, DynamicBuffer buf, std::size_t size)
: stream_ {stream}
, buf_ {std::move(buf)}
, size_{size}
{ }
template <class Self>
void operator()( Self& self
, system::error_code ec = {}
, std::size_t n = 0)
{
BOOST_ASIO_CORO_REENTER (coro_)
{
tmp_ = buf_.size();
buf_.grow(size_);
BOOST_ASIO_CORO_YIELD
stream_.async_read_some(buf_.data(tmp_, size_), std::move(self));
if (ec) {
self.complete(ec, 0);
return;
}
buf_.shrink(buf_.size() - tmp_ - n);
self.complete({}, n);
}
}
};
template <class AsyncReadStream, class DynamicBuffer, class CompletionToken>
auto
async_append_some(
AsyncReadStream& stream,
DynamicBuffer buffer,
std::size_t size,
CompletionToken&& token)
{
return asio::async_compose
< CompletionToken
, void(system::error_code, std::size_t)
>(append_some_op<AsyncReadStream, DynamicBuffer> {stream, buffer, size}, token, stream);
}
template <class Conn>
struct exec_op {
using req_info_type = typename Conn::req_info;
using adapter_type = typename Conn::adapter_type;
Conn* conn_ = nullptr;
std::shared_ptr<req_info_type> info_ = nullptr;
asio::coroutine coro{};
template <class Self>
void operator()(Self& self , system::error_code ec = {}, std::size_t = 0)
{
BOOST_ASIO_CORO_REENTER (coro)
{
// Check whether the user wants to wait for the connection to
// be stablished.
if (info_->req_->get_config().cancel_if_not_connected && !conn_->is_open()) {
BOOST_ASIO_CORO_YIELD
asio::post(std::move(self));
return self.complete(error::not_connected, 0);
}
conn_->add_request_info(info_);
EXEC_OP_WAIT:
BOOST_ASIO_CORO_YIELD
info_->async_wait(std::move(self));
if (info_->ec_) {
self.complete(info_->ec_, 0);
return;
}
if (info_->stop_requested()) {
// Don't have to call remove_request as it has already
// been by cancel(exec).
return self.complete(asio::error::operation_aborted, 0);
}
if (is_cancelled(self)) {
if (!info_->is_waiting()) {
using c_t = asio::cancellation_type;
auto const c = self.get_cancellation_state().cancelled();
if ((c & c_t::terminal) != c_t::none) {
// Cancellation requires closing the connection
// otherwise it stays in inconsistent state.
conn_->cancel(operation::run);
return self.complete(asio::error::operation_aborted, 0);
} else {
// Can't implement other cancelation types, ignoring.
self.get_cancellation_state().clear();
// TODO: Find out a better way to ignore
// cancelation.
goto EXEC_OP_WAIT;
}
} else {
// Cancelation can be honored.
conn_->remove_request(info_);
self.complete(asio::error::operation_aborted, 0);
return;
}
}
self.complete(info_->ec_, info_->read_size_);
}
}
};
template <class Conn, class Logger>
struct run_op {
Conn* conn = nullptr;
Logger logger_;
asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, std::array<std::size_t, 2> order = {}
, system::error_code ec0 = {}
, system::error_code ec1 = {})
{
BOOST_ASIO_CORO_REENTER (coro)
{
conn->reset();
BOOST_ASIO_CORO_YIELD
asio::experimental::make_parallel_group(
[this](auto token) { return conn->reader(logger_, token);},
[this](auto token) { return conn->writer(logger_, token);}
).async_wait(
asio::experimental::wait_for_one(),
std::move(self));
if (is_cancelled(self)) {
logger_.trace("run-op: canceled. Exiting ...");
self.complete(asio::error::operation_aborted);
return;
}
logger_.on_run(ec0, ec1);
switch (order[0]) {
case 0: self.complete(ec0); break;
case 1: self.complete(ec1); break;
default: BOOST_ASSERT(false);
}
}
}
};
template <class Conn, class Logger>
struct writer_op {
Conn* conn_;
Logger logger_;
asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, system::error_code ec = {}
, std::size_t n = 0)
{
ignore_unused(n);
BOOST_ASIO_CORO_REENTER (coro) for (;;)
{
while (conn_->coalesce_requests()) {
if (conn_->use_ssl())
BOOST_ASIO_CORO_YIELD asio::async_write(conn_->next_layer(), asio::buffer(conn_->write_buffer_), std::move(self));
else
BOOST_ASIO_CORO_YIELD asio::async_write(conn_->next_layer().next_layer(), asio::buffer(conn_->write_buffer_), std::move(self));
logger_.on_write(ec, conn_->write_buffer_);
if (ec) {
logger_.trace("writer-op: error. Exiting ...");
conn_->cancel(operation::run);
self.complete(ec);
return;
}
if (is_cancelled(self)) {
logger_.trace("writer-op: canceled. Exiting ...");
self.complete(asio::error::operation_aborted);
return;
}
conn_->on_write();
// A socket.close() may have been called while a
// successful write might had already been queued, so we
// have to check here before proceeding.
if (!conn_->is_open()) {
logger_.trace("writer-op: canceled (2). Exiting ...");
self.complete({});
return;
}
}
BOOST_ASIO_CORO_YIELD
conn_->writer_timer_.async_wait(std::move(self));
if (!conn_->is_open() || is_cancelled(self)) {
logger_.trace("writer-op: canceled (3). Exiting ...");
// Notice this is not an error of the op, stoping was
// requested from the outside, so we complete with
// success.
self.complete({});
return;
}
}
}
};
template <class Conn, class Logger>
struct reader_op {
using parse_result = typename Conn::parse_result;
using parse_ret_type = typename Conn::parse_ret_type;
Conn* conn_;
Logger logger_;
parse_ret_type res_{parse_result::resp, 0};
asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, system::error_code ec = {}
, std::size_t n = 0)
{
ignore_unused(n);
BOOST_ASIO_CORO_REENTER (coro) for (;;)
{
// Appends some data to the buffer if necessary.
if ((res_.first == parse_result::needs_more) || std::empty(conn_->read_buffer_)) {
if (conn_->use_ssl()) {
BOOST_ASIO_CORO_YIELD
async_append_some(
conn_->next_layer(),
conn_->dbuf_,
conn_->get_suggested_buffer_growth(),
std::move(self));
} else {
BOOST_ASIO_CORO_YIELD
async_append_some(
conn_->next_layer().next_layer(),
conn_->dbuf_,
conn_->get_suggested_buffer_growth(),
std::move(self));
}
logger_.on_read(ec, n);
// EOF is not treated as error.
if (ec == asio::error::eof) {
logger_.trace("reader-op: EOF received. Exiting ...");
conn_->cancel(operation::run);
return self.complete({}); // EOFINAE: EOF is not an error.
}
// The connection is not viable after an error.
if (ec) {
logger_.trace("reader-op: error. Exiting ...");
conn_->cancel(operation::run);
self.complete(ec);
return;
}
// Somebody might have canceled implicitly or explicitly
// while we were suspended and after queueing so we have to
// check.
if (!conn_->is_open() || is_cancelled(self)) {
logger_.trace("reader-op: canceled. Exiting ...");
self.complete(ec);
return;
}
}
res_ = conn_->on_read(buffer_view(conn_->dbuf_), ec);
if (ec) {
logger_.trace("reader-op: parse error. Exiting ...");
conn_->cancel(operation::run);
self.complete(ec);
return;
}
if (res_.first == parse_result::push) {
if (!conn_->receive_channel_.try_send(ec, res_.second)) {
BOOST_ASIO_CORO_YIELD
conn_->receive_channel_.async_send(ec, res_.second, std::move(self));
}
if (ec) {
logger_.trace("reader-op: error. Exiting ...");
conn_->cancel(operation::run);
self.complete(ec);
return;
}
if (!conn_->is_open() || is_cancelled(self)) {
logger_.trace("reader-op: canceled (2). Exiting ...");
self.complete(asio::error::operation_aborted);
return;
}
}
}
}
};
/** @brief Base class for high level Redis asynchronous connections.
* @ingroup high-level-api
*
* @tparam Executor The executor type.
*
*/
template <class Executor>
class connection_base {
public:
/// Executor type
using executor_type = Executor;
/// Type of the next layer
using next_layer_type = asio::ssl::stream<asio::basic_stream_socket<asio::ip::tcp, Executor>>;
using clock_type = std::chrono::steady_clock;
using clock_traits_type = asio::wait_traits<clock_type>;
using timer_type = asio::basic_waitable_timer<clock_type, clock_traits_type, executor_type>;
using this_type = connection_base<Executor>;
/// Constructs from an executor.
connection_base(
executor_type ex,
asio::ssl::context ctx,
std::size_t max_read_size)
: ctx_{std::move(ctx)}
, stream_{std::make_unique<next_layer_type>(ex, ctx_)}
, writer_timer_{ex}
, receive_channel_{ex, 256}
, runner_{ex, {}}
, dbuf_{read_buffer_, max_read_size}
{
set_receive_response(ignore);
writer_timer_.expires_at((std::chrono::steady_clock::time_point::max)());
}
/// Returns the ssl context.
auto const& get_ssl_context() const noexcept
{ return ctx_;}
/// Resets the underlying stream.
void reset_stream()
{
stream_ = std::make_unique<next_layer_type>(writer_timer_.get_executor(), ctx_);
}
/// Returns a reference to the next layer.
auto& next_layer() noexcept { return *stream_; }
/// Returns a const reference to the next layer.
auto const& next_layer() const noexcept { return *stream_; }
/// Returns the associated executor.
auto get_executor() {return writer_timer_.get_executor();}
/// Cancels specific operations.
void cancel(operation op)
{
runner_.cancel(op);
if (op == operation::all) {
cancel_impl(operation::run);
cancel_impl(operation::receive);
cancel_impl(operation::exec);
return;
}
cancel_impl(op);
}
template <class Response, class CompletionToken>
auto async_exec(request const& req, Response& resp, CompletionToken token)
{
using namespace boost::redis::adapter;
auto f = boost_redis_adapt(resp);
BOOST_ASSERT_MSG(req.get_expected_responses() <= f.get_supported_response_size(), "Request and response have incompatible sizes.");
auto info = std::make_shared<req_info>(req, f, get_executor());
return asio::async_compose
< CompletionToken
, void(system::error_code, std::size_t)
>(exec_op<this_type>{this, info}, token, writer_timer_);
}
template <class Response, class CompletionToken>
[[deprecated("Set the response with set_receive_response and use the other overload.")]]
auto async_receive(Response& response, CompletionToken token)
{
set_receive_response(response);
return receive_channel_.async_receive(std::move(token));
}
template <class CompletionToken>
auto async_receive(CompletionToken token)
{ return receive_channel_.async_receive(std::move(token)); }
std::size_t receive(system::error_code& ec)
{
std::size_t size = 0;
auto f = [&](system::error_code const& ec2, std::size_t n)
{
ec = ec2;
size = n;
};
auto const res = receive_channel_.try_receive(f);
if (ec)
return 0;
if (!res)
ec = error::sync_receive_push_failed;
return size;
}
template <class Logger, class CompletionToken>
auto async_run(config const& cfg, Logger l, CompletionToken token)
{
runner_.set_config(cfg);
l.set_prefix(runner_.get_config().log_prefix);
return runner_.async_run(*this, l, std::move(token));
}
template <class Response>
void set_receive_response(Response& response)
{
using namespace boost::redis::adapter;
auto g = boost_redis_adapt(response);
receive_adapter_ = adapter::detail::make_adapter_wrapper(g);
}
usage get_usage() const noexcept
{ return usage_; }
auto run_is_canceled() const noexcept
{ return cancel_run_called_; }
private:
using receive_channel_type = asio::experimental::channel<executor_type, void(system::error_code, std::size_t)>;
using runner_type = runner<executor_type>;
using adapter_type = std::function<void(std::size_t, resp3::basic_node<std::string_view> const&, system::error_code&)>;
using receiver_adapter_type = std::function<void(resp3::basic_node<std::string_view> const&, system::error_code&)>;
using exec_notifier_type = receive_channel_type;
auto use_ssl() const noexcept
{ return runner_.get_config().use_ssl;}
auto cancel_on_conn_lost() -> std::size_t
{
// Must return false if the request should be removed.
auto cond = [](auto const& ptr)
{
BOOST_ASSERT(ptr != nullptr);
if (ptr->is_waiting()) {
return !ptr->req_->get_config().cancel_on_connection_lost;
} else {
return !ptr->req_->get_config().cancel_if_unresponded;
}
};
auto point = std::stable_partition(std::begin(reqs_), std::end(reqs_), cond);
auto const ret = std::distance(point, std::end(reqs_));
std::for_each(point, std::end(reqs_), [](auto const& ptr) {
ptr->stop();
});
reqs_.erase(point, std::end(reqs_));
std::for_each(std::begin(reqs_), std::end(reqs_), [](auto const& ptr) {
return ptr->mark_waiting();
});
return ret;
}
auto cancel_unwritten_requests() -> std::size_t
{
auto f = [](auto const& ptr)
{
BOOST_ASSERT(ptr != nullptr);
return !ptr->is_waiting();
};
auto point = std::stable_partition(std::begin(reqs_), std::end(reqs_), f);
auto const ret = std::distance(point, std::end(reqs_));
std::for_each(point, std::end(reqs_), [](auto const& ptr) {
ptr->stop();
});
reqs_.erase(point, std::end(reqs_));
return ret;
}
void cancel_impl(operation op)
{
switch (op) {
case operation::exec:
{
cancel_unwritten_requests();
} break;
case operation::run:
{
// Protects the code below from being called more than
// once, see https://github.com/boostorg/redis/issues/181
if (std::exchange(cancel_run_called_, true)) {
return;
}
close();
writer_timer_.cancel();
receive_channel_.cancel();
cancel_on_conn_lost();
} break;
case operation::receive:
{
receive_channel_.cancel();
} break;
default: /* ignore */;
}
}
void on_write()
{
// We have to clear the payload right after writing it to use it
// as a flag that informs there is no ongoing write.
write_buffer_.clear();
// Notice this must come before the for-each below.
cancel_push_requests();
// There is small optimization possible here: traverse only the
// partition of unwritten requests instead of them all.
std::for_each(std::begin(reqs_), std::end(reqs_), [](auto const& ptr) {
BOOST_ASSERT_MSG(ptr != nullptr, "Expects non-null pointer.");
if (ptr->is_staged()) {
ptr->mark_written();
}
});
}
struct req_info {
public:
using node_type = resp3::basic_node<std::string_view>;
using wrapped_adapter_type = std::function<void(node_type const&, system::error_code&)>;
explicit req_info(request const& req, adapter_type adapter, executor_type ex)
: notifier_{ex, 1}
, req_{&req}
, adapter_{}
, expected_responses_{req.get_expected_responses()}
, status_{status::waiting}
, ec_{{}}
, read_size_{0}
{
adapter_ = [this, adapter](node_type const& nd, system::error_code& ec)
{
auto const i = req_->get_expected_responses() - expected_responses_;
adapter(i, nd, ec);
};
}
auto proceed()
{
notifier_.try_send(std::error_code{}, 0);
}
void stop()
{
notifier_.close();
}
[[nodiscard]] auto is_waiting() const noexcept
{ return status_ == status::waiting; }
[[nodiscard]] auto is_written() const noexcept
{ return status_ == status::written; }
[[nodiscard]] auto is_staged() const noexcept
{ return status_ == status::staged; }
void mark_written() noexcept
{ status_ = status::written; }
void mark_staged() noexcept
{ status_ = status::staged; }
void mark_waiting() noexcept
{ status_ = status::waiting; }
[[nodiscard]] auto stop_requested() const noexcept
{ return !notifier_.is_open();}
template <class CompletionToken>
auto async_wait(CompletionToken token)
{
return notifier_.async_receive(std::move(token));
}
//private:
enum class status
{ waiting
, staged
, written
};
exec_notifier_type notifier_;
request const* req_;
wrapped_adapter_type adapter_;
// Contains the number of commands that haven't been read yet.
std::size_t expected_responses_;
status status_;
system::error_code ec_;
std::size_t read_size_;
};
void remove_request(std::shared_ptr<req_info> const& info)
{
reqs_.erase(std::remove(std::begin(reqs_), std::end(reqs_), info));
}
using reqs_type = std::deque<std::shared_ptr<req_info>>;
template <class, class> friend struct reader_op;
template <class, class> friend struct writer_op;
template <class, class> friend struct run_op;
template <class> friend struct exec_op;
template <class, class, class> friend struct run_all_op;
void cancel_push_requests()
{
auto point = std::stable_partition(std::begin(reqs_), std::end(reqs_), [](auto const& ptr) {
return !(ptr->is_staged() && ptr->req_->get_expected_responses() == 0);
});
std::for_each(point, std::end(reqs_), [](auto const& ptr) {
ptr->proceed();
});
reqs_.erase(point, std::end(reqs_));
}
[[nodiscard]] bool is_writing() const noexcept
{
return !write_buffer_.empty();
}
void add_request_info(std::shared_ptr<req_info> const& info)
{
reqs_.push_back(info);
if (info->req_->has_hello_priority()) {
auto rend = std::partition_point(std::rbegin(reqs_), std::rend(reqs_), [](auto const& e) {
return e->is_waiting();
});
std::rotate(std::rbegin(reqs_), std::rbegin(reqs_) + 1, rend);
}
if (is_open() && !is_writing())
writer_timer_.cancel();
}
template <class CompletionToken, class Logger>
auto reader(Logger l, CompletionToken&& token)
{
return asio::async_compose
< CompletionToken
, void(system::error_code)
>(reader_op<this_type, Logger>{this, l}, token, writer_timer_);
}
template <class CompletionToken, class Logger>
auto writer(Logger l, CompletionToken&& token)
{
return asio::async_compose
< CompletionToken
, void(system::error_code)
>(writer_op<this_type, Logger>{this, l}, token, writer_timer_);
}
template <class Logger, class CompletionToken>
auto async_run_lean(config const& cfg, Logger l, CompletionToken token)
{
runner_.set_config(cfg);
l.set_prefix(runner_.get_config().log_prefix);
return asio::async_compose
< CompletionToken
, void(system::error_code)
>(run_op<this_type, Logger>{this, l}, token, writer_timer_);
}
[[nodiscard]] bool coalesce_requests()
{
// Coalesces the requests and marks them staged. After a
// successful write staged requests will be marked as written.
auto const point = std::partition_point(std::cbegin(reqs_), std::cend(reqs_), [](auto const& ri) {
return !ri->is_waiting();
});
std::for_each(point, std::cend(reqs_), [this](auto const& ri) {
// Stage the request.
write_buffer_ += ri->req_->payload();
ri->mark_staged();
usage_.commands_sent += ri->expected_responses_;
});
usage_.bytes_sent += std::size(write_buffer_);
return point != std::cend(reqs_);
}
bool is_waiting_response() const noexcept
{
if (std::empty(reqs_))
return false;
// Under load and on low-latency networks we might start
// receiving responses before the write operation completed and
// the request is still maked as staged and not written. See
// https://github.com/boostorg/redis/issues/170
return !reqs_.front()->is_waiting();
}
void close()
{
if (stream_->next_layer().is_open()) {
system::error_code ec;
stream_->next_layer().close(ec);
}
}
auto is_open() const noexcept { return stream_->next_layer().is_open(); }
auto& lowest_layer() noexcept { return stream_->lowest_layer(); }
auto is_next_push()
{
BOOST_ASSERT(!read_buffer_.empty());
// Useful links to understand the heuristics below.
//
// - https://github.com/redis/redis/issues/11784
// - https://github.com/redis/redis/issues/6426
// - https://github.com/boostorg/redis/issues/170
// The message's resp3 type is a push.
if (resp3::to_type(read_buffer_.front()) == resp3::type::push)
return true;
// This is non-push type and the requests queue is empty. I have
// noticed this is possible, for example with -MISCONF. I don't
// know why they are not sent with a push type so we can
// distinguish them from responses to commands. If we are lucky
// enough to receive them when the command queue is empty they
// can be treated as server pushes, otherwise it is impossible
// to handle them properly
if (reqs_.empty())
return true;
// The request does not expect any response but we got one. This
// may happen if for example, subscribe with wrong syntax.
if (reqs_.front()->expected_responses_ == 0)
return true;
// Added to deal with MONITOR and also to fix PR170 which
// happens under load and on low-latency networks, where we
// might start receiving responses before the write operation
// completed and the request is still maked as staged and not
// written.
return reqs_.front()->is_waiting();
}
auto get_suggested_buffer_growth() const noexcept
{
return parser_.get_suggested_buffer_growth(4096);
}
enum class parse_result { needs_more, push, resp };
using parse_ret_type = std::pair<parse_result, std::size_t>;
parse_ret_type on_finish_parsing(parse_result t)
{
if (t == parse_result::push) {
usage_.pushes_received += 1;
usage_.push_bytes_received += parser_.get_consumed();
} else {
usage_.responses_received += 1;
usage_.response_bytes_received += parser_.get_consumed();
}
on_push_ = false;
dbuf_.consume(parser_.get_consumed());
auto const res = std::make_pair(t, parser_.get_consumed());
parser_.reset();
return res;
}
parse_ret_type on_read(std::string_view data, system::error_code& ec)
{
// We arrive here in two states:
//
// 1. While we are parsing a message. In this case we
// don't want to determine the type of the message in the
// buffer (i.e. response vs push) but leave it untouched
// until the parsing of a complete message ends.
//
// 2. On a new message, in which case we have to determine
// whether the next messag is a push or a response.
//
if (!on_push_) // Prepare for new message.
on_push_ = is_next_push();
if (on_push_) {
if (!resp3::parse(parser_, data, receive_adapter_, ec))
return std::make_pair(parse_result::needs_more, 0);
if (ec)
return std::make_pair(parse_result::push, 0);
return on_finish_parsing(parse_result::push);
}
BOOST_ASSERT_MSG(is_waiting_response(), "Not waiting for a response (using MONITOR command perhaps?)");
BOOST_ASSERT(!reqs_.empty());
BOOST_ASSERT(reqs_.front() != nullptr);
BOOST_ASSERT(reqs_.front()->expected_responses_ != 0);
if (!resp3::parse(parser_, data, reqs_.front()->adapter_, ec))
return std::make_pair(parse_result::needs_more, 0);
if (ec) {
reqs_.front()->ec_ = ec;
reqs_.front()->proceed();
return std::make_pair(parse_result::resp, 0);
}
reqs_.front()->read_size_ += parser_.get_consumed();
if (--reqs_.front()->expected_responses_ == 0) {
// Done with this request.
reqs_.front()->proceed();
reqs_.pop_front();
}
return on_finish_parsing(parse_result::resp);
}
void reset()
{
write_buffer_.clear();
read_buffer_.clear();
parser_.reset();
on_push_ = false;
cancel_run_called_ = false;
}
asio::ssl::context ctx_;
std::unique_ptr<next_layer_type> stream_;
// Notice we use a timer to simulate a condition-variable. It is
// also more suitable than a channel and the notify operation does
// not suspend.
timer_type writer_timer_;
receive_channel_type receive_channel_;
runner_type runner_;
receiver_adapter_type receive_adapter_;
using dyn_buffer_type = asio::dynamic_string_buffer<char, std::char_traits<char>, std::allocator<char>>;
std::string read_buffer_;
dyn_buffer_type dbuf_;
std::string write_buffer_;
reqs_type reqs_;
resp3::parser parser_{};
bool on_push_ = false;
bool cancel_run_called_ = false;
usage usage_;
};
} // boost::redis::detail
#endif // BOOST_REDIS_CONNECTION_BASE_HPP

View File

@@ -0,0 +1,133 @@
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef BOOST_REDIS_CONNECTOR_HPP
#define BOOST_REDIS_CONNECTOR_HPP
#include <boost/redis/detail/helper.hpp>
#include <boost/redis/error.hpp>
#include <boost/asio/compose.hpp>
#include <boost/asio/connect.hpp>
#include <boost/asio/coroutine.hpp>
#include <boost/asio/experimental/parallel_group.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <boost/asio/steady_timer.hpp>
#include <string>
#include <chrono>
namespace boost::redis::detail
{
template <class Connector, class Stream>
struct connect_op {
Connector* ctor_ = nullptr;
Stream* stream = nullptr;
asio::ip::tcp::resolver::results_type const* res_ = nullptr;
asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, std::array<std::size_t, 2> const& order = {}
, system::error_code const& ec1 = {}
, asio::ip::tcp::endpoint const& ep= {}
, system::error_code const& ec2 = {})
{
BOOST_ASIO_CORO_REENTER (coro)
{
ctor_->timer_.expires_after(ctor_->timeout_);
BOOST_ASIO_CORO_YIELD
asio::experimental::make_parallel_group(
[this](auto token)
{
auto f = [](system::error_code const&, auto const&) { return true; };
return asio::async_connect(*stream, *res_, f, token);
},
[this](auto token) { return ctor_->timer_.async_wait(token);}
).async_wait(
asio::experimental::wait_for_one(),
std::move(self));
if (is_cancelled(self)) {
self.complete(asio::error::operation_aborted);
return;
}
switch (order[0]) {
case 0: {
ctor_->endpoint_ = ep;
self.complete(ec1);
} break;
case 1:
{
if (ec2) {
self.complete(ec2);
} else {
self.complete(error::connect_timeout);
}
} break;
default: BOOST_ASSERT(false);
}
}
}
};
template <class Executor>
class connector {
public:
using timer_type =
asio::basic_waitable_timer<
std::chrono::steady_clock,
asio::wait_traits<std::chrono::steady_clock>,
Executor>;
connector(Executor ex)
: timer_{ex}
{}
void set_config(config const& cfg)
{ timeout_ = cfg.connect_timeout; }
template <class Stream, class CompletionToken>
auto
async_connect(
Stream& stream,
asio::ip::tcp::resolver::results_type const& res,
CompletionToken&& token)
{
return asio::async_compose
< CompletionToken
, void(system::error_code)
>(connect_op<connector, Stream>{this, &stream, &res}, token, timer_);
}
std::size_t cancel(operation op)
{
switch (op) {
case operation::connect:
case operation::all:
timer_.cancel();
break;
default: /* ignore */;
}
return 0;
}
auto const& endpoint() const noexcept { return endpoint_;}
private:
template <class, class> friend struct connect_op;
timer_type timer_;
std::chrono::steady_clock::duration timeout_ = std::chrono::seconds{2};
asio::ip::tcp::endpoint endpoint_;
};
} // boost::redis::detail
#endif // BOOST_REDIS_CONNECTOR_HPP

View File

@@ -0,0 +1,124 @@
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef BOOST_REDIS_SSL_CONNECTOR_HPP
#define BOOST_REDIS_SSL_CONNECTOR_HPP
#include <boost/redis/detail/helper.hpp>
#include <boost/redis/error.hpp>
#include <boost/asio/compose.hpp>
#include <boost/asio/connect.hpp>
#include <boost/asio/coroutine.hpp>
#include <boost/asio/experimental/parallel_group.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <boost/asio/steady_timer.hpp>
#include <boost/asio/ssl.hpp>
#include <string>
#include <chrono>
namespace boost::redis::detail
{
template <class Handshaker, class Stream>
struct handshake_op {
Handshaker* hsher_ = nullptr;
Stream* stream_ = nullptr;
asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, std::array<std::size_t, 2> const& order = {}
, system::error_code const& ec1 = {}
, system::error_code const& ec2 = {})
{
BOOST_ASIO_CORO_REENTER (coro)
{
hsher_->timer_.expires_after(hsher_->timeout_);
BOOST_ASIO_CORO_YIELD
asio::experimental::make_parallel_group(
[this](auto token) { return stream_->async_handshake(asio::ssl::stream_base::client, token); },
[this](auto token) { return hsher_->timer_.async_wait(token);}
).async_wait(
asio::experimental::wait_for_one(),
std::move(self));
if (is_cancelled(self)) {
self.complete(asio::error::operation_aborted);
return;
}
switch (order[0]) {
case 0: {
self.complete(ec1);
} break;
case 1:
{
if (ec2) {
self.complete(ec2);
} else {
self.complete(error::ssl_handshake_timeout);
}
} break;
default: BOOST_ASSERT(false);
}
}
}
};
template <class Executor>
class handshaker {
public:
using timer_type =
asio::basic_waitable_timer<
std::chrono::steady_clock,
asio::wait_traits<std::chrono::steady_clock>,
Executor>;
handshaker(Executor ex)
: timer_{ex}
{}
template <class Stream, class CompletionToken>
auto
async_handshake(Stream& stream, CompletionToken&& token)
{
return asio::async_compose
< CompletionToken
, void(system::error_code)
>(handshake_op<handshaker, Stream>{this, &stream}, token, timer_);
}
std::size_t cancel(operation op)
{
switch (op) {
case operation::ssl_handshake:
case operation::all:
timer_.cancel();
break;
default: /* ignore */;
}
return 0;
}
constexpr bool is_dummy() const noexcept
{return false;}
void set_config(config const& cfg)
{ timeout_ = cfg.ssl_handshake_timeout; }
private:
template <class, class> friend struct handshake_op;
timer_type timer_;
std::chrono::steady_clock::duration timeout_;
};
} // boost::redis::detail
#endif // BOOST_REDIS_SSL_CONNECTOR_HPP

View File

@@ -0,0 +1,252 @@
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef BOOST_REDIS_HEALTH_CHECKER_HPP
#define BOOST_REDIS_HEALTH_CHECKER_HPP
// Has to included before promise.hpp to build on msvc.
#include <boost/redis/request.hpp>
#include <boost/redis/response.hpp>
#include <boost/redis/operation.hpp>
#include <boost/redis/detail/helper.hpp>
#include <boost/redis/config.hpp>
#include <boost/asio/steady_timer.hpp>
#include <boost/asio/compose.hpp>
#include <boost/asio/consign.hpp>
#include <boost/asio/coroutine.hpp>
#include <boost/asio/post.hpp>
#include <boost/asio/experimental/parallel_group.hpp>
#include <memory>
#include <chrono>
namespace boost::redis::detail {
template <class HealthChecker, class Connection, class Logger>
class ping_op {
public:
HealthChecker* checker_ = nullptr;
Connection* conn_ = nullptr;
Logger logger_;
asio::coroutine coro_{};
template <class Self>
void operator()(Self& self, system::error_code ec = {}, std::size_t = 0)
{
BOOST_ASIO_CORO_REENTER (coro_) for (;;)
{
if (checker_->checker_has_exited_) {
logger_.trace("ping_op: checker has exited. Exiting ...");
self.complete({});
return;
}
BOOST_ASIO_CORO_YIELD
conn_->async_exec(checker_->req_, checker_->resp_, std::move(self));
if (ec || is_cancelled(self)) {
logger_.trace("ping_op: error/cancelled (1).");
checker_->wait_timer_.cancel();
self.complete(!!ec ? ec : asio::error::operation_aborted);
return;
}
// Wait before pinging again.
checker_->ping_timer_.expires_after(checker_->ping_interval_);
BOOST_ASIO_CORO_YIELD
checker_->ping_timer_.async_wait(std::move(self));
if (ec || is_cancelled(self)) {
logger_.trace("ping_op: error/cancelled (2).");
self.complete(!!ec ? ec : asio::error::operation_aborted);
return;
}
}
}
};
template <class HealthChecker, class Connection, class Logger>
class check_timeout_op {
public:
HealthChecker* checker_ = nullptr;
Connection* conn_ = nullptr;
Logger logger_;
asio::coroutine coro_{};
template <class Self>
void operator()(Self& self, system::error_code ec = {})
{
BOOST_ASIO_CORO_REENTER (coro_) for (;;)
{
checker_->wait_timer_.expires_after(2 * checker_->ping_interval_);
BOOST_ASIO_CORO_YIELD
checker_->wait_timer_.async_wait(std::move(self));
if (ec || is_cancelled(self)) {
logger_.trace("check-timeout-op: error/canceled. Exiting ...");
self.complete(!!ec ? ec : asio::error::operation_aborted);
return;
}
if (checker_->resp_.has_error()) {
logger_.trace("check-timeout-op: Response error. Exiting ...");
self.complete({});
return;
}
if (checker_->resp_.value().empty()) {
logger_.trace("check-timeout-op: Response has no value. Exiting ...");
checker_->ping_timer_.cancel();
conn_->cancel(operation::run);
checker_->checker_has_exited_ = true;
self.complete(error::pong_timeout);
return;
}
if (checker_->resp_.has_value()) {
checker_->resp_.value().clear();
}
}
}
};
template <class HealthChecker, class Connection, class Logger>
class check_health_op {
public:
HealthChecker* checker_ = nullptr;
Connection* conn_ = nullptr;
Logger logger_;
asio::coroutine coro_{};
template <class Self>
void
operator()(
Self& self,
std::array<std::size_t, 2> order = {},
system::error_code ec1 = {},
system::error_code ec2 = {})
{
BOOST_ASIO_CORO_REENTER (coro_)
{
if (checker_->ping_interval_ == std::chrono::seconds::zero()) {
logger_.trace("check-health-op: timeout disabled.");
BOOST_ASIO_CORO_YIELD
asio::post(std::move(self));
self.complete({});
return;
}
BOOST_ASIO_CORO_YIELD
asio::experimental::make_parallel_group(
[this](auto token) { return checker_->async_ping(*conn_, logger_, token); },
[this](auto token) { return checker_->async_check_timeout(*conn_, logger_, token);}
).async_wait(
asio::experimental::wait_for_one(),
std::move(self));
logger_.on_check_health(ec1, ec2);
if (is_cancelled(self)) {
logger_.trace("check-health-op: canceled. Exiting ...");
self.complete(asio::error::operation_aborted);
return;
}
switch (order[0]) {
case 0: self.complete(ec1); return;
case 1: self.complete(ec2); return;
default: BOOST_ASSERT(false);
}
}
}
};
template <class Executor>
class health_checker {
private:
using timer_type =
asio::basic_waitable_timer<
std::chrono::steady_clock,
asio::wait_traits<std::chrono::steady_clock>,
Executor>;
public:
health_checker(Executor ex)
: ping_timer_{ex}
, wait_timer_{ex}
{
req_.push("PING", "Boost.Redis");
}
void set_config(config const& cfg)
{
req_.clear();
req_.push("PING", cfg.health_check_id);
ping_interval_ = cfg.health_check_interval;
}
template <
class Connection,
class Logger,
class CompletionToken = asio::default_completion_token_t<Executor>
>
auto
async_check_health(
Connection& conn,
Logger l,
CompletionToken token = CompletionToken{})
{
checker_has_exited_ = false;
return asio::async_compose
< CompletionToken
, void(system::error_code)
>(check_health_op<health_checker, Connection, Logger>{this, &conn, l}, token, conn);
}
std::size_t cancel(operation op)
{
switch (op) {
case operation::health_check:
case operation::all:
ping_timer_.cancel();
wait_timer_.cancel();
break;
default: /* ignore */;
}
return 0;
}
private:
template <class Connection, class Logger, class CompletionToken>
auto async_ping(Connection& conn, Logger l, CompletionToken token)
{
return asio::async_compose
< CompletionToken
, void(system::error_code)
>(ping_op<health_checker, Connection, Logger>{this, &conn, l}, token, conn, ping_timer_);
}
template <class Connection, class Logger, class CompletionToken>
auto async_check_timeout(Connection& conn, Logger l, CompletionToken token)
{
return asio::async_compose
< CompletionToken
, void(system::error_code)
>(check_timeout_op<health_checker, Connection, Logger>{this, &conn, l}, token, conn, wait_timer_);
}
template <class, class, class> friend class ping_op;
template <class, class, class> friend class check_timeout_op;
template <class, class, class> friend class check_health_op;
timer_type ping_timer_;
timer_type wait_timer_;
redis::request req_;
redis::generic_response resp_;
std::chrono::steady_clock::duration ping_interval_ = std::chrono::seconds{5};
bool checker_has_exited_ = false;
};
} // boost::redis::detail
#endif // BOOST_REDIS_HEALTH_CHECKER_HPP

View File

@@ -0,0 +1,37 @@
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef BOOST_REDIS_HELPER_HPP
#define BOOST_REDIS_HELPER_HPP
#include <boost/asio/cancellation_type.hpp>
namespace boost::redis::detail
{
template <class T>
auto is_cancelled(T const& self)
{
return self.get_cancellation_state().cancelled() != asio::cancellation_type_t::none;
}
#define BOOST_REDIS_CHECK_OP0(X)\
if (ec || redis::detail::is_cancelled(self)) {\
X\
self.complete(!!ec ? ec : asio::error::operation_aborted);\
return;\
}
#define BOOST_REDIS_CHECK_OP1(X)\
if (ec || redis::detail::is_cancelled(self)) {\
X\
self.complete(!!ec ? ec : asio::error::operation_aborted, {});\
return;\
}
} // boost::redis::detail
#endif // BOOST_REDIS_HELPER_HPP

View File

@@ -0,0 +1,137 @@
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef BOOST_REDIS_RESOLVER_HPP
#define BOOST_REDIS_RESOLVER_HPP
#include <boost/redis/config.hpp>
#include <boost/redis/detail/helper.hpp>
#include <boost/redis/error.hpp>
#include <boost/asio/compose.hpp>
#include <boost/asio/coroutine.hpp>
#include <boost/asio/experimental/parallel_group.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <boost/asio/steady_timer.hpp>
#include <string>
#include <chrono>
namespace boost::redis::detail
{
template <class Resolver>
struct resolve_op {
Resolver* resv_ = nullptr;
asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, std::array<std::size_t, 2> order = {}
, system::error_code ec1 = {}
, asio::ip::tcp::resolver::results_type res = {}
, system::error_code ec2 = {})
{
BOOST_ASIO_CORO_REENTER (coro)
{
resv_->timer_.expires_after(resv_->timeout_);
BOOST_ASIO_CORO_YIELD
asio::experimental::make_parallel_group(
[this](auto token)
{
return resv_->resv_.async_resolve(resv_->addr_.host, resv_->addr_.port, token);
},
[this](auto token) { return resv_->timer_.async_wait(token);}
).async_wait(
asio::experimental::wait_for_one(),
std::move(self));
if (is_cancelled(self)) {
self.complete(asio::error::operation_aborted);
return;
}
switch (order[0]) {
case 0: {
// Resolver completed first.
resv_->results_ = res;
self.complete(ec1);
} break;
case 1: {
if (ec2) {
// Timer completed first with error, perhaps a
// cancellation going on.
self.complete(ec2);
} else {
// Timer completed first without an error, this is a
// resolve timeout.
self.complete(error::resolve_timeout);
}
} break;
default: BOOST_ASSERT(false);
}
}
}
};
template <class Executor>
class resolver {
public:
using timer_type =
asio::basic_waitable_timer<
std::chrono::steady_clock,
asio::wait_traits<std::chrono::steady_clock>,
Executor>;
resolver(Executor ex) : resv_{ex} , timer_{ex} {}
template <class CompletionToken>
auto async_resolve(CompletionToken&& token)
{
return asio::async_compose
< CompletionToken
, void(system::error_code)
>(resolve_op<resolver>{this}, token, resv_);
}
std::size_t cancel(operation op)
{
switch (op) {
case operation::resolve:
case operation::all:
resv_.cancel();
timer_.cancel();
break;
default: /* ignore */;
}
return 0;
}
auto const& results() const noexcept
{ return results_;}
void set_config(config const& cfg)
{
addr_ = cfg.addr;
timeout_ = cfg.resolve_timeout;
}
private:
using resolver_type = asio::ip::basic_resolver<asio::ip::tcp, Executor>;
template <class> friend struct resolve_op;
resolver_type resv_;
timer_type timer_;
address addr_;
std::chrono::steady_clock::duration timeout_;
asio::ip::tcp::resolver::results_type results_;
};
} // boost::redis::detail
#endif // BOOST_REDIS_RESOLVER_HPP

View File

@@ -0,0 +1,268 @@
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef BOOST_REDIS_RUNNER_HPP
#define BOOST_REDIS_RUNNER_HPP
#include <boost/redis/detail/health_checker.hpp>
#include <boost/redis/config.hpp>
#include <boost/redis/response.hpp>
#include <boost/redis/detail/helper.hpp>
#include <boost/redis/error.hpp>
#include <boost/redis/logger.hpp>
#include <boost/redis/operation.hpp>
#include <boost/redis/detail/connector.hpp>
#include <boost/redis/detail/resolver.hpp>
#include <boost/redis/detail/handshaker.hpp>
#include <boost/asio/compose.hpp>
#include <boost/asio/connect.hpp>
#include <boost/asio/coroutine.hpp>
#include <boost/asio/experimental/parallel_group.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <boost/asio/steady_timer.hpp>
#include <string>
#include <memory>
#include <chrono>
namespace boost::redis::detail
{
void push_hello(config const& cfg, request& req);
template <class Runner, class Connection, class Logger>
struct hello_op {
Runner* runner_ = nullptr;
Connection* conn_ = nullptr;
Logger logger_;
asio::coroutine coro_{};
template <class Self>
void operator()(Self& self, system::error_code ec = {}, std::size_t = 0)
{
BOOST_ASIO_CORO_REENTER (coro_)
{
runner_->add_hello();
BOOST_ASIO_CORO_YIELD
conn_->async_exec(runner_->hello_req_, runner_->hello_resp_, std::move(self));
logger_.on_hello(ec, runner_->hello_resp_);
if (ec || runner_->has_error_in_response() || is_cancelled(self)) {
logger_.trace("hello-op: error/canceled. Exiting ...");
conn_->cancel(operation::run);
self.complete(!!ec ? ec : asio::error::operation_aborted);
return;
}
self.complete({});
}
}
};
template <class Runner, class Connection, class Logger>
class runner_op {
private:
Runner* runner_ = nullptr;
Connection* conn_ = nullptr;
Logger logger_;
asio::coroutine coro_{};
public:
runner_op(Runner* runner, Connection* conn, Logger l)
: runner_{runner}
, conn_{conn}
, logger_{l}
{}
template <class Self>
void operator()( Self& self
, std::array<std::size_t, 3> order = {}
, system::error_code ec0 = {}
, system::error_code ec1 = {}
, system::error_code ec2 = {}
, std::size_t = 0)
{
BOOST_ASIO_CORO_REENTER (coro_)
{
BOOST_ASIO_CORO_YIELD
asio::experimental::make_parallel_group(
[this](auto token) { return runner_->async_run_all(*conn_, logger_, token); },
[this](auto token) { return runner_->health_checker_.async_check_health(*conn_, logger_, token); },
[this](auto token) { return runner_->async_hello(*conn_, logger_, token); }
).async_wait(
asio::experimental::wait_for_all(),
std::move(self));
logger_.on_runner(ec0, ec1, ec2);
if (is_cancelled(self)) {
self.complete(asio::error::operation_aborted);
return;
}
if (ec0 == error::connect_timeout || ec0 == error::resolve_timeout) {
self.complete(ec0);
return;
}
if (order[0] == 2 && !!ec2) {
self.complete(ec2);
return;
}
if (order[0] == 1 && ec1 == error::pong_timeout) {
self.complete(ec1);
return;
}
self.complete(ec0);
}
}
};
template <class Runner, class Connection, class Logger>
struct run_all_op {
Runner* runner_ = nullptr;
Connection* conn_ = nullptr;
Logger logger_;
asio::coroutine coro_{};
template <class Self>
void operator()(Self& self, system::error_code ec = {}, std::size_t = 0)
{
BOOST_ASIO_CORO_REENTER (coro_)
{
BOOST_ASIO_CORO_YIELD
runner_->resv_.async_resolve(std::move(self));
logger_.on_resolve(ec, runner_->resv_.results());
BOOST_REDIS_CHECK_OP0(conn_->cancel(operation::run);)
BOOST_ASIO_CORO_YIELD
runner_->ctor_.async_connect(conn_->next_layer().next_layer(), runner_->resv_.results(), std::move(self));
logger_.on_connect(ec, runner_->ctor_.endpoint());
BOOST_REDIS_CHECK_OP0(conn_->cancel(operation::run);)
if (conn_->use_ssl()) {
BOOST_ASIO_CORO_YIELD
runner_->hsher_.async_handshake(conn_->next_layer(), std::move(self));
logger_.on_ssl_handshake(ec);
BOOST_REDIS_CHECK_OP0(conn_->cancel(operation::run);)
}
BOOST_ASIO_CORO_YIELD
conn_->async_run_lean(runner_->cfg_, logger_, std::move(self));
BOOST_REDIS_CHECK_OP0(;)
self.complete(ec);
}
}
};
template <class Executor>
class runner {
public:
runner(Executor ex, config cfg)
: resv_{ex}
, ctor_{ex}
, hsher_{ex}
, health_checker_{ex}
, cfg_{cfg}
{ }
std::size_t cancel(operation op)
{
resv_.cancel(op);
ctor_.cancel(op);
hsher_.cancel(op);
health_checker_.cancel(op);
return 0U;
}
void set_config(config const& cfg)
{
cfg_ = cfg;
resv_.set_config(cfg);
ctor_.set_config(cfg);
hsher_.set_config(cfg);
health_checker_.set_config(cfg);
}
template <class Connection, class Logger, class CompletionToken>
auto async_run(Connection& conn, Logger l, CompletionToken token)
{
return asio::async_compose
< CompletionToken
, void(system::error_code)
>(runner_op<runner, Connection, Logger>{this, &conn, l}, token, conn);
}
config const& get_config() const noexcept {return cfg_;}
private:
using resolver_type = resolver<Executor>;
using connector_type = connector<Executor>;
using handshaker_type = detail::handshaker<Executor>;
using health_checker_type = health_checker<Executor>;
using timer_type = typename connector_type::timer_type;
template <class, class, class> friend struct run_all_op;
template <class, class, class> friend class runner_op;
template <class, class, class> friend struct hello_op;
template <class Connection, class Logger, class CompletionToken>
auto async_run_all(Connection& conn, Logger l, CompletionToken token)
{
return asio::async_compose
< CompletionToken
, void(system::error_code)
>(run_all_op<runner, Connection, Logger>{this, &conn, l}, token, conn);
}
template <class Connection, class Logger, class CompletionToken>
auto async_hello(Connection& conn, Logger l, CompletionToken token)
{
return asio::async_compose
< CompletionToken
, void(system::error_code)
>(hello_op<runner, Connection, Logger>{this, &conn, l}, token, conn);
}
void add_hello()
{
hello_req_.clear();
if (hello_resp_.has_value())
hello_resp_.value().clear();
push_hello(cfg_, hello_req_);
}
bool has_error_in_response() const noexcept
{
if (!hello_resp_.has_value())
return true;
auto f = [](auto const& e)
{
switch (e.data_type) {
case resp3::type::simple_error:
case resp3::type::blob_error: return true;
default: return false;
}
};
return std::any_of(std::cbegin(hello_resp_.value()), std::cend(hello_resp_.value()), f);
}
resolver_type resv_;
connector_type ctor_;
handshaker_type hsher_;
health_checker_type health_checker_;
request hello_req_;
generic_response hello_resp_;
config cfg_;
};
} // boost::redis::detail
#endif // BOOST_REDIS_RUNNER_HPP

View File

@@ -0,0 +1,55 @@
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef BOOST_REDIS_WRITE_HPP
#define BOOST_REDIS_WRITE_HPP
#include <boost/asio/write.hpp>
#include <boost/redis/request.hpp>
namespace boost::redis::detail {
/** \brief Writes a request synchronously.
* \ingroup low-level-api
*
* \param stream Stream to write the request to.
* \param req Request to write.
*/
template<class SyncWriteStream>
auto write(SyncWriteStream& stream, request const& req)
{
return asio::write(stream, asio::buffer(req.payload()));
}
template<class SyncWriteStream>
auto write(SyncWriteStream& stream, request const& req, system::error_code& ec)
{
return asio::write(stream, asio::buffer(req.payload()), ec);
}
/** \brief Writes a request asynchronously.
* \ingroup low-level-api
*
* \param stream Stream to write the request to.
* \param req Request to write.
* \param token Asio completion token.
*/
template<
class AsyncWriteStream,
class CompletionToken = asio::default_completion_token_t<typename AsyncWriteStream::executor_type>
>
auto async_write(
AsyncWriteStream& stream,
request const& req,
CompletionToken&& token =
asio::default_completion_token_t<typename AsyncWriteStream::executor_type>{})
{
return asio::async_write(stream, asio::buffer(req.payload()), token);
}
} // boost::redis::detail
#endif // BOOST_REDIS_WRITE_HPP

View File

@@ -1,15 +1,15 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_ERROR_HPP
#define AEDIS_ERROR_HPP
#ifndef BOOST_REDIS_ERROR_HPP
#define BOOST_REDIS_ERROR_HPP
#include <boost/system/error_code.hpp>
namespace aedis {
namespace boost::redis {
/** \brief Generic errors.
* \ingroup high-level-api
@@ -63,6 +63,24 @@ enum class error
/// There is no stablished connection.
not_connected,
/// Resolve timeout
resolve_timeout,
/// Connect timeout
connect_timeout,
/// Connect timeout
pong_timeout,
/// SSL handshake timeout
ssl_handshake_timeout,
/// Can't receive push synchronously without blocking
sync_receive_push_failed,
/// Incompatible node depth.
incompatible_node_depth,
};
/** \internal
@@ -70,15 +88,15 @@ enum class error
* \param e Error code.
* \ingroup any
*/
auto make_error_code(error e) -> boost::system::error_code;
auto make_error_code(error e) -> system::error_code;
} // aedis
} // boost::redis
namespace std {
template<>
struct is_error_code_enum<::aedis::error> : std::true_type {};
struct is_error_code_enum<::boost::redis::error> : std::true_type {};
} // std
#endif // AEDIS_ERROR_HPP
#endif // BOOST_REDIS_ERROR_HPP

View File

@@ -0,0 +1,49 @@
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef BOOST_REDIS_IGNORE_HPP
#define BOOST_REDIS_IGNORE_HPP
#include <boost/system/result.hpp>
#include <tuple>
#include <type_traits>
namespace boost::redis
{
/** @brief Type used to ignore responses.
* @ingroup high-level-api
*
* For example
*
* @code
* response<ignore_t, std::string, ignore_t> resp;
* @endcode
*
* will ignore the first and third responses. RESP3 errors won't be
* ignore but will cause `async_exec` to complete with an error.
*/
using ignore_t = std::decay_t<decltype(std::ignore)>;
/** @brief Global ignore object.
* @ingroup high-level-api
*
* Can be used to ignore responses to a request
*
* @code
* conn->async_exec(req, ignore, ...);
* @endcode
*
* RESP3 errors won't be ignore but will cause `async_exec` to
* complete with an error.
*/
extern ignore_t ignore;
} // boost::redis
#endif // BOOST_REDIS_IGNORE_HPP

View File

@@ -0,0 +1,39 @@
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <boost/redis/connection.hpp>
namespace boost::redis {
connection::connection(
executor_type ex,
asio::ssl::context ctx,
std::size_t max_read_size)
: impl_{ex, std::move(ctx), max_read_size}
{ }
connection::connection(
asio::io_context& ioc,
asio::ssl::context ctx,
std::size_t max_read_size)
: impl_{ioc.get_executor(), std::move(ctx), max_read_size}
{ }
void
connection::async_run_impl(
config const& cfg,
logger l,
asio::any_completion_handler<void(boost::system::error_code)> token)
{
impl_.async_run(cfg, l, std::move(token));
}
void connection::cancel(operation op)
{
impl_.cancel(op);
}
} // namespace boost::redis

View File

@@ -1,28 +1,29 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <aedis/error.hpp>
#include <boost/redis/error.hpp>
#include <boost/assert.hpp>
namespace aedis {
namespace boost::redis {
namespace detail {
struct error_category_impl : boost::system::error_category {
struct error_category_impl : system::error_category {
virtual ~error_category_impl() = default;
auto name() const noexcept -> char const* override
{
return "aedis";
return "boost.redis";
}
auto message(int ev) const -> std::string override
{
switch(static_cast<error>(ev)) {
case error::invalid_data_type: return "Invalid resp3 type.";
case error::not_a_number: return "Can't convert string to number.";
case error::not_a_number: return "Can't convert string to number (maybe forgot to upgrade to RESP3?).";
case error::exceeeds_max_nested_depth: return "Exceeds the maximum number of nested responses.";
case error::unexpected_bool_value: return "Unexpected bool value.";
case error::empty_field: return "Expected field value is empty.";
@@ -37,12 +38,18 @@ struct error_category_impl : boost::system::error_category {
case error::not_a_double: return "Not a double.";
case error::resp3_null: return "Got RESP3 null.";
case error::not_connected: return "Not connected.";
default: BOOST_ASSERT(false); return "Aedis error.";
case error::resolve_timeout: return "Resolve timeout.";
case error::connect_timeout: return "Connect timeout.";
case error::pong_timeout: return "Pong timeout.";
case error::ssl_handshake_timeout: return "SSL handshake timeout.";
case error::sync_receive_push_failed: return "Can't receive server push synchronously without blocking.";
case error::incompatible_node_depth: return "Incompatible node depth.";
default: BOOST_ASSERT(false); return "Boost.Redis error.";
}
}
};
auto category() -> boost::system::error_category const&
auto category() -> system::error_category const&
{
static error_category_impl instance;
return instance;
@@ -50,9 +57,9 @@ auto category() -> boost::system::error_category const&
} // detail
auto make_error_code(error e) -> boost::system::error_code
auto make_error_code(error e) -> system::error_code
{
return boost::system::error_code{static_cast<int>(e), detail::category()};
return system::error_code{static_cast<int>(e), detail::category()};
}
} // aedis
} // boost::redis::detail

View File

@@ -0,0 +1,12 @@
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <boost/redis/ignore.hpp>
namespace boost::redis
{
ignore_t ignore;
}

View File

@@ -0,0 +1,203 @@
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <boost/redis/logger.hpp>
#include <boost/system/error_code.hpp>
#include <iostream>
#include <iterator>
namespace boost::redis
{
void logger::write_prefix()
{
if (!std::empty(prefix_))
std::clog << prefix_;
}
void logger::on_resolve(system::error_code const& ec, asio::ip::tcp::resolver::results_type const& res)
{
if (level_ < level::info)
return;
write_prefix();
std::clog << "run-all-op: resolve addresses ";
if (ec) {
std::clog << ec.message() << std::endl;
} else {
auto begin = std::cbegin(res);
auto end = std::cend(res);
if (begin == end)
return;
std::clog << begin->endpoint();
for (auto iter = std::next(begin); iter != end; ++iter)
std::clog << ", " << iter->endpoint();
}
std::clog << std::endl;
}
void logger::on_connect(system::error_code const& ec, asio::ip::tcp::endpoint const& ep)
{
if (level_ < level::info)
return;
write_prefix();
std::clog << "run-all-op: connected to endpoint ";
if (ec)
std::clog << ec.message() << std::endl;
else
std::clog << ep;
std::clog << std::endl;
}
void logger::on_ssl_handshake(system::error_code const& ec)
{
if (level_ < level::info)
return;
write_prefix();
std::clog << "Runner: SSL handshake " << ec.message() << std::endl;
}
void logger::on_connection_lost(system::error_code const& ec)
{
if (level_ < level::info)
return;
write_prefix();
if (ec)
std::clog << "Connection lost: " << ec.message();
else
std::clog << "Connection lost.";
std::clog << std::endl;
}
void
logger::on_write(
system::error_code const& ec,
std::string const& payload)
{
if (level_ < level::info)
return;
write_prefix();
if (ec)
std::clog << "writer-op: " << ec.message();
else
std::clog << "writer-op: " << std::size(payload) << " bytes written.";
std::clog << std::endl;
}
void logger::on_read(system::error_code const& ec, std::size_t n)
{
if (level_ < level::info)
return;
write_prefix();
if (ec)
std::clog << "reader-op: " << ec.message();
else
std::clog << "reader-op: " << n << " bytes read.";
std::clog << std::endl;
}
void logger::on_run(system::error_code const& reader_ec, system::error_code const& writer_ec)
{
if (level_ < level::info)
return;
write_prefix();
std::clog << "run-op: "
<< reader_ec.message() << " (reader), "
<< writer_ec.message() << " (writer)";
std::clog << std::endl;
}
void
logger::on_hello(
system::error_code const& ec,
generic_response const& resp)
{
if (level_ < level::info)
return;
write_prefix();
if (ec) {
std::clog << "hello-op: " << ec.message();
if (resp.has_error())
std::clog << " (" << resp.error().diagnostic << ")";
} else {
std::clog << "hello-op: Success";
}
std::clog << std::endl;
}
void
logger::on_runner(
system::error_code const& run_all_ec,
system::error_code const& health_check_ec,
system::error_code const& hello_ec)
{
if (level_ < level::info)
return;
write_prefix();
std::clog << "runner-op: "
<< run_all_ec.message() << " (async_run_all), "
<< health_check_ec.message() << " (async_health_check) "
<< hello_ec.message() << " (async_hello).";
std::clog << std::endl;
}
void
logger::on_check_health(
system::error_code const& ping_ec,
system::error_code const& timeout_ec)
{
if (level_ < level::info)
return;
write_prefix();
std::clog << "check-health-op: "
<< ping_ec.message() << " (async_ping), "
<< timeout_ec.message() << " (async_check_timeout).";
std::clog << std::endl;
}
void logger::trace(std::string_view reason)
{
if (level_ < level::debug)
return;
write_prefix();
std::clog << reason << std::endl;
}
} // boost::redis

View File

@@ -1,15 +1,16 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <boost/redis/request.hpp>
#include <string_view>
#include <aedis/resp3/request.hpp>
namespace aedis::resp3::detail {
namespace boost::redis::detail {
auto has_push_response(std::string_view cmd) -> bool
auto has_response(std::string_view cmd) -> bool
{
if (cmd == "SUBSCRIBE") return true;
if (cmd == "PSUBSCRIBE") return true;
@@ -17,4 +18,4 @@ auto has_push_response(std::string_view cmd) -> bool
return false;
}
} // aedis::resp3::detail
} // boost:redis::detail

View File

@@ -0,0 +1,48 @@
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <boost/redis/response.hpp>
#include <boost/redis/error.hpp>
#include <boost/assert.hpp>
namespace boost::redis
{
void consume_one(generic_response& r, system::error_code& ec)
{
if (r.has_error())
return; // Nothing to consume.
if (std::empty(r.value()))
return; // Nothing to consume.
auto const depth = r.value().front().depth;
// To simplify we will refuse to consume any data-type that is not
// a root node. I think there is no use for that and it is complex
// since it requires updating parent nodes.
if (depth != 0) {
ec = error::incompatible_node_depth;
return;
}
auto f = [depth](auto const& e)
{ return e.depth == depth; };
auto match = std::find_if(std::next(std::cbegin(r.value())), std::cend(r.value()), f);
r.value().erase(std::cbegin(r.value()), match);
}
void consume_one(generic_response& r)
{
system::error_code ec;
consume_one(r, ec);
if (ec)
throw system::system_error(ec);
}
} // boost::redis::resp3

View File

@@ -0,0 +1,27 @@
/* Copyright (c) 2018-2024 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <boost/redis/detail/runner.hpp>
namespace boost::redis::detail
{
void push_hello(config const& cfg, request& req)
{
if (!cfg.username.empty() && !cfg.password.empty() && !cfg.clientname.empty())
req.push("HELLO", "3", "AUTH", cfg.username, cfg.password, "SETNAME", cfg.clientname);
else if (cfg.password.empty() && cfg.clientname.empty())
req.push("HELLO", "3");
else if (cfg.clientname.empty())
req.push("HELLO", "3", "AUTH", cfg.username, cfg.password);
else
req.push("HELLO", "3", "SETNAME", cfg.clientname);
if (cfg.database_index && cfg.database_index.value() != 0)
req.push("SELECT", cfg.database_index.value());
}
} // boost::redis::detail

View File

@@ -0,0 +1,171 @@
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef BOOST_REDIS_LOGGER_HPP
#define BOOST_REDIS_LOGGER_HPP
#include <boost/redis/response.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <string>
namespace boost::system {class error_code;}
namespace boost::redis {
/** @brief Logger class
* @ingroup high-level-api
*
* The class can be passed to the connection objects to log to `std::clog`
*
* Notice that currently this class has no stable interface. Users
* that don't want any logging can disable it by contructing a logger
* with logger::level::emerg to the connection.
*/
class logger {
public:
/** @brief Syslog-like log levels
* @ingroup high-level-api
*/
enum class level
{
/// Disabled
disabled,
/// Emergency
emerg,
/// Alert
alert,
/// Critical
crit,
/// Error
err,
/// Warning
warning,
/// Notice
notice,
/// Info
info,
/// Debug
debug
};
/** @brief Constructor
* @ingroup high-level-api
*
* @param l Log level.
*/
logger(level l = level::disabled)
: level_{l}
{}
/** @brief Called when the resolve operation completes.
* @ingroup high-level-api
*
* @param ec Error returned by the resolve operation.
* @param res Resolve results.
*/
void on_resolve(system::error_code const& ec, asio::ip::tcp::resolver::results_type const& res);
/** @brief Called when the connect operation completes.
* @ingroup high-level-api
*
* @param ec Error returned by the connect operation.
* @param ep Endpoint to which the connection connected.
*/
void on_connect(system::error_code const& ec, asio::ip::tcp::endpoint const& ep);
/** @brief Called when the ssl handshake operation completes.
* @ingroup high-level-api
*
* @param ec Error returned by the handshake operation.
*/
void on_ssl_handshake(system::error_code const& ec);
/** @brief Called when the connection is lost.
* @ingroup high-level-api
*
* @param ec Error returned when the connection is lost.
*/
void on_connection_lost(system::error_code const& ec);
/** @brief Called when the write operation completes.
* @ingroup high-level-api
*
* @param ec Error code returned by the write operation.
* @param payload The payload written to the socket.
*/
void on_write(system::error_code const& ec, std::string const& payload);
/** @brief Called when the read operation completes.
* @ingroup high-level-api
*
* @param ec Error code returned by the read operation.
* @param n Number of bytes read.
*/
void on_read(system::error_code const& ec, std::size_t n);
/** @brief Called when the run operation completes.
* @ingroup high-level-api
*
* @param reader_ec Error code returned by the read operation.
* @param writer_ec Error code returned by the write operation.
*/
void on_run(system::error_code const& reader_ec, system::error_code const& writer_ec);
/** @brief Called when the `HELLO` request completes.
* @ingroup high-level-api
*
* @param ec Error code returned by the async_exec operation.
* @param resp Response sent by the Redis server.
*/
void on_hello(system::error_code const& ec, generic_response const& resp);
/** @brief Sets a prefix to every log message
* @ingroup high-level-api
*
* @param prefix The prefix.
*/
void set_prefix(std::string_view prefix)
{
prefix_ = prefix;
}
/** @brief Called when the runner operation completes.
* @ingroup high-level-api
*
* @param run_all_ec Error code returned by the run_all operation.
* @param health_check_ec Error code returned by the health checker operation.
* @param hello_ec Error code returned by the health checker operation.
*/
void
on_runner(
system::error_code const& run_all_ec,
system::error_code const& health_check_ec,
system::error_code const& hello_ec);
void
on_check_health(
system::error_code const& ping_ec,
system::error_code const& check_timeout_ec);
void trace(std::string_view reason);
private:
void write_prefix();
level level_;
std::string_view prefix_;
};
} // boost::redis
#endif // BOOST_REDIS_LOGGER_HPP

View File

@@ -0,0 +1,41 @@
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef BOOST_REDIS_OPERATION_HPP
#define BOOST_REDIS_OPERATION_HPP
namespace boost::redis {
/** @brief Connection operations that can be cancelled.
* @ingroup high-level-api
*
* The operations listed below can be passed to the
* `boost::redis::connection::cancel` member function.
*/
enum class operation {
/// Resolve operation.
resolve,
/// Connect operation.
connect,
/// SSL handshake operation.
ssl_handshake,
/// Refers to `connection::async_exec` operations.
exec,
/// Refers to `connection::async_run` operations.
run,
/// Refers to `connection::async_receive` operations.
receive,
/// Cancels reconnection.
reconnection,
/// Health check operation.
health_check,
/// Refers to all operations.
all,
};
} // boost::redis
#endif // BOOST_REDIS_OPERATION_HPP

View File

@@ -0,0 +1,329 @@
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef BOOST_REDIS_REQUEST_HPP
#define BOOST_REDIS_REQUEST_HPP
#include <boost/redis/resp3/type.hpp>
#include <boost/redis/resp3/serialization.hpp>
#include <string>
#include <tuple>
#include <algorithm>
// NOTE: For some commands like hset it would be a good idea to assert
// the value type is a pair.
namespace boost::redis {
namespace detail{
auto has_response(std::string_view cmd) -> bool;
}
/** \brief Creates Redis requests.
* \ingroup high-level-api
*
* A request is composed of one or more Redis commands and is
* referred to in the redis documentation as a pipeline, see
* https://redis.io/topics/pipelining. For example
*
* @code
* request r;
* r.push("HELLO", 3);
* r.push("FLUSHALL");
* r.push("PING");
* r.push("PING", "key");
* r.push("QUIT");
* @endcode
*
* \remarks
*
* Uses a std::string for internal storage.
*/
class request {
public:
/// Request configuration options.
struct config {
/** \brief If `true` calls to `connection::async_exec` will
* complete with error if the connection is lost while the
* request hasn't been sent yet.
*/
bool cancel_on_connection_lost = true;
/** \brief If `true` `connection::async_exec` will complete with
* `boost::redis::error::not_connected` if the call happens
* before the connection with Redis was established.
*/
bool cancel_if_not_connected = false;
/** \brief If `false` `connection::async_exec` will not
* automatically cancel this request if the connection is lost.
* Affects only requests that have been written to the socket
* but remained unresponded when
* `boost::redis::connection::async_run` completed.
*/
bool cancel_if_unresponded = true;
/** \brief If this request has a `HELLO` command and this flag
* is `true`, the `boost::redis::connection` will move it to the
* front of the queue of awaiting requests. This makes it
* possible to send `HELLO` and authenticate before other
* commands are sent.
*/
bool hello_with_priority = true;
};
/** \brief Constructor
*
* \param cfg Configuration options.
*/
explicit
request(config cfg = config{true, false, true, true})
: cfg_{cfg} {}
//// Returns the number of responses expected for this request.
[[nodiscard]] auto get_expected_responses() const noexcept -> std::size_t
{ return expected_responses_;};
//// Returns the number of commands contained in this request.
[[nodiscard]] auto get_commands() const noexcept -> std::size_t
{ return commands_;};
[[nodiscard]] auto payload() const noexcept -> std::string_view
{ return payload_;}
[[nodiscard]] auto has_hello_priority() const noexcept -> auto const&
{ return has_hello_priority_;}
/// Clears the request preserving allocated memory.
void clear()
{
payload_.clear();
commands_ = 0;
expected_responses_ = 0;
has_hello_priority_ = false;
}
/// Calls std::string::reserve on the internal storage.
void reserve(std::size_t new_cap = 0)
{ payload_.reserve(new_cap); }
/// Returns a const reference to the config object.
[[nodiscard]] auto get_config() const noexcept -> auto const& {return cfg_; }
/// Returns a reference to the config object.
[[nodiscard]] auto get_config() noexcept -> auto& {return cfg_; }
/** @brief Appends a new command to the end of the request.
*
* For example
*
* \code
* request req;
* req.push("SET", "key", "some string", "EX", "2");
* \endcode
*
* will add the `set` command with value "some string" and an
* expiration of 2 seconds.
*
* \param cmd The command e.g redis or sentinel command.
* \param args Command arguments.
* \tparam Ts Non-string types will be converted to string by calling `boost_redis_to_bulk` on each argument. This function must be made available over ADL and must have the following signature
*
* @code
* void boost_redis_to_bulk(std::string& to, T const& t);
* {
* boost::redis::resp3::boost_redis_to_bulk(to, serialize(t));
* }
* @endcode
*
* See cpp20_serialization.cpp
*/
template <class... Ts>
void push(std::string_view cmd, Ts const&... args)
{
auto constexpr pack_size = sizeof...(Ts);
resp3::add_header(payload_, resp3::type::array, 1 + pack_size);
resp3::add_bulk(payload_, cmd);
resp3::add_bulk(payload_, std::tie(std::forward<Ts const&>(args)...));
check_cmd(cmd);
}
/** @brief Appends a new command to the end of the request.
*
* This overload is useful for commands that have a key and have a
* dynamic range of arguments. For example
*
* @code
* std::map<std::string, std::string> map
* { {"key1", "value1"}
* , {"key2", "value2"}
* , {"key3", "value3"}
* };
*
* request req;
* req.push_range("HSET", "key", std::cbegin(map), std::cend(map));
* @endcode
*
* \param cmd The command e.g. Redis or Sentinel command.
* \param key The command key.
* \param begin Iterator to the begin of the range.
* \param end Iterator to the end of the range.
* \tparam Ts Non-string types will be converted to string by calling `boost_redis_to_bulk` on each argument. This function must be made available over ADL and must have the following signature
*
* @code
* void boost_redis_to_bulk(std::string& to, T const& t);
* {
* boost::redis::resp3::boost_redis_to_bulk(to, serialize(t));
* }
* @endcode
*
* See cpp20_serialization.cpp
*/
template <class ForwardIterator>
void
push_range(
std::string_view const& cmd,
std::string_view const& key,
ForwardIterator begin,
ForwardIterator end,
typename std::iterator_traits<ForwardIterator>::value_type * = nullptr)
{
using value_type = typename std::iterator_traits<ForwardIterator>::value_type;
if (begin == end)
return;
auto constexpr size = resp3::bulk_counter<value_type>::size;
auto const distance = std::distance(begin, end);
resp3::add_header(payload_, resp3::type::array, 2 + size * distance);
resp3::add_bulk(payload_, cmd);
resp3::add_bulk(payload_, key);
for (; begin != end; ++begin)
resp3::add_bulk(payload_, *begin);
check_cmd(cmd);
}
/** @brief Appends a new command to the end of the request.
*
* This overload is useful for commands that have a dynamic number
* of arguments and don't have a key. For example
*
* \code
* std::set<std::string> channels
* { "channel1" , "channel2" , "channel3" }
*
* request req;
* req.push("SUBSCRIBE", std::cbegin(channels), std::cend(channels));
* \endcode
*
* \param cmd The Redis command
* \param begin Iterator to the begin of the range.
* \param end Iterator to the end of the range.
* \tparam ForwardIterator If the value type is not a std::string it will be converted to a string by calling `boost_redis_to_bulk`. This function must be made available over ADL and must have the following signature
*
* @code
* void boost_redis_to_bulk(std::string& to, T const& t);
* {
* boost::redis::resp3::boost_redis_to_bulk(to, serialize(t));
* }
* @endcode
*
* See cpp20_serialization.cpp
*/
template <class ForwardIterator>
void
push_range(
std::string_view const& cmd,
ForwardIterator begin,
ForwardIterator end,
typename std::iterator_traits<ForwardIterator>::value_type * = nullptr)
{
using value_type = typename std::iterator_traits<ForwardIterator>::value_type;
if (begin == end)
return;
auto constexpr size = resp3::bulk_counter<value_type>::size;
auto const distance = std::distance(begin, end);
resp3::add_header(payload_, resp3::type::array, 1 + size * distance);
resp3::add_bulk(payload_, cmd);
for (; begin != end; ++begin)
resp3::add_bulk(payload_, *begin);
check_cmd(cmd);
}
/** @brief Appends a new command to the end of the request.
*
* Equivalent to the overload taking a range of begin and end
* iterators.
*
* \param cmd Redis command.
* \param key Redis key.
* \param range Range to send e.g. `std::map`.
* \tparam A type that can be passed to `std::cbegin()` and `std::cend()`.
*/
template <class Range>
void
push_range(
std::string_view const& cmd,
std::string_view const& key,
Range const& range,
decltype(std::begin(range)) * = nullptr)
{
using std::begin;
using std::end;
push_range(cmd, key, begin(range), end(range));
}
/** @brief Appends a new command to the end of the request.
*
* Equivalent to the overload taking a range of begin and end
* iterators.
*
* \param cmd Redis command.
* \param range Range to send e.g. `std::map`.
* \tparam A type that can be passed to `std::cbegin()` and `std::cend()`.
*/
template <class Range>
void
push_range(
std::string_view cmd,
Range const& range,
decltype(std::cbegin(range)) * = nullptr)
{
using std::cbegin;
using std::cend;
push_range(cmd, cbegin(range), cend(range));
}
private:
void check_cmd(std::string_view cmd)
{
++commands_;
if (!detail::has_response(cmd))
++expected_responses_;
if (cmd == "HELLO")
has_hello_priority_ = cfg_.hello_with_priority;
}
config cfg_;
std::string payload_;
std::size_t commands_ = 0;
std::size_t expected_responses_ = 0;
bool has_hello_priority_ = false;
};
} // boost::redis::resp3
#endif // BOOST_REDIS_REQUEST_HPP

View File

@@ -0,0 +1,220 @@
/* Copyright (c) 2018-2024 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <boost/redis/resp3/parser.hpp>
#include <boost/redis/error.hpp>
#include <boost/assert.hpp>
#include <charconv>
#include <limits>
namespace boost::redis::resp3 {
void to_int(std::size_t& i, std::string_view sv, system::error_code& ec)
{
auto const res = std::from_chars(sv.data(), sv.data() + std::size(sv), i);
if (res.ec != std::errc())
ec = error::not_a_number;
}
parser::parser()
{
reset();
}
void parser::reset()
{
depth_ = 0;
sizes_ = {{1}};
bulk_length_ = (std::numeric_limits<std::size_t>::max)();
bulk_ = type::invalid;
consumed_ = 0;
sizes_[0] = 2; // The sentinel must be more than 1.
}
std::size_t
parser::get_suggested_buffer_growth(std::size_t hint) const noexcept
{
if (!bulk_expected())
return hint;
if (hint < bulk_length_ + 2)
return bulk_length_ + 2;
return hint;
}
std::size_t
parser::get_consumed() const noexcept
{
return consumed_;
}
bool
parser::done() const noexcept
{
return depth_ == 0 && bulk_ == type::invalid && consumed_ != 0;
}
void
parser::commit_elem() noexcept
{
--sizes_[depth_];
while (sizes_[depth_] == 0) {
--depth_;
--sizes_[depth_];
}
}
auto
parser::consume(std::string_view view, system::error_code& ec) noexcept -> parser::result
{
switch (bulk_) {
case type::invalid:
{
auto const pos = view.find(sep, consumed_);
if (pos == std::string::npos)
return {}; // Needs more data to proceeed.
auto const t = to_type(view.at(consumed_));
auto const content = view.substr(consumed_ + 1, pos - 1 - consumed_);
auto const ret = consume_impl(t, content, ec);
if (ec)
return {};
consumed_ = pos + 2;
if (!bulk_expected())
return ret;
} [[fallthrough]];
default: // Handles bulk.
{
auto const span = bulk_length_ + 2;
if ((std::size(view) - consumed_) < span)
return {}; // Needs more data to proceeed.
auto const bulk_view = view.substr(consumed_, bulk_length_);
node_type const ret = {bulk_, 1, depth_, bulk_view};
bulk_ = type::invalid;
commit_elem();
consumed_ += span;
return ret;
}
}
}
auto
parser::consume_impl(
type t,
std::string_view elem,
system::error_code& ec) -> parser::node_type
{
BOOST_ASSERT(!bulk_expected());
node_type ret;
switch (t) {
case type::streamed_string_part:
{
to_int(bulk_length_ , elem, ec);
if (ec)
return {};
if (bulk_length_ == 0) {
ret = {type::streamed_string_part, 1, depth_, {}};
sizes_[depth_] = 1; // We are done.
bulk_ = type::invalid;
commit_elem();
} else {
bulk_ = type::streamed_string_part;
}
} break;
case type::blob_error:
case type::verbatim_string:
case type::blob_string:
{
if (elem.at(0) == '?') {
// NOTE: This can only be triggered with blob_string.
// Trick: A streamed string is read as an aggregate of
// infinite length. When the streaming is done the server
// is supposed to send a part with length 0.
sizes_[++depth_] = (std::numeric_limits<std::size_t>::max)();
ret = {type::streamed_string, 0, depth_, {}};
} else {
to_int(bulk_length_ , elem , ec);
if (ec)
return {};
bulk_ = t;
}
} break;
case type::boolean:
{
if (std::empty(elem)) {
ec = error::empty_field;
return {};
}
if (elem.at(0) != 'f' && elem.at(0) != 't') {
ec = error::unexpected_bool_value;
return {};
}
ret = {t, 1, depth_, elem};
commit_elem();
} break;
case type::doublean:
case type::big_number:
case type::number:
{
if (std::empty(elem)) {
ec = error::empty_field;
return {};
}
} [[fallthrough]];
case type::simple_error:
case type::simple_string:
case type::null:
{
ret = {t, 1, depth_, elem};
commit_elem();
} break;
case type::push:
case type::set:
case type::array:
case type::attribute:
case type::map:
{
std::size_t l = -1;
to_int(l, elem, ec);
if (ec)
return {};
ret = {t, l, depth_, {}};
if (l == 0) {
commit_elem();
} else {
if (depth_ == max_embedded_depth) {
ec = error::exceeeds_max_nested_depth;
return {};
}
++depth_;
sizes_[depth_] = l * element_multiplicity(t);
}
} break;
default:
{
ec = error::invalid_data_type;
return {};
}
}
return ret;
}
} // boost::redis::resp3

View File

@@ -0,0 +1,42 @@
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <boost/redis/resp3/serialization.hpp>
#include <boost/redis/resp3/parser.hpp>
namespace boost::redis::resp3 {
void boost_redis_to_bulk(std::string& payload, std::string_view data)
{
auto const str = std::to_string(data.size());
payload += to_code(type::blob_string);
payload.append(std::cbegin(str), std::cend(str));
payload += parser::sep;
payload.append(std::cbegin(data), std::cend(data));
payload += parser::sep;
}
void add_header(std::string& payload, type t, std::size_t size)
{
auto const str = std::to_string(size);
payload += to_code(t);
payload.append(std::cbegin(str), std::cend(str));
payload += parser::sep;
}
void add_blob(std::string& payload, std::string_view blob)
{
payload.append(std::cbegin(blob), std::cend(blob));
payload += parser::sep;
}
void add_separator(std::string& payload)
{
payload += parser::sep;
}
} // boost::redis::resp3

View File

@@ -0,0 +1,42 @@
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <boost/redis/resp3/type.hpp>
#include <boost/assert.hpp>
namespace boost::redis::resp3 {
auto to_string(type t) noexcept -> char const*
{
switch (t) {
case type::array: return "array";
case type::push: return "push";
case type::set: return "set";
case type::map: return "map";
case type::attribute: return "attribute";
case type::simple_string: return "simple_string";
case type::simple_error: return "simple_error";
case type::number: return "number";
case type::doublean: return "doublean";
case type::boolean: return "boolean";
case type::big_number: return "big_number";
case type::null: return "null";
case type::blob_error: return "blob_error";
case type::verbatim_string: return "verbatim_string";
case type::blob_string: return "blob_string";
case type::streamed_string: return "streamed_string";
case type::streamed_string_part: return "streamed_string_part";
default: return "invalid";
}
}
auto operator<<(std::ostream& os, type t) -> std::ostream&
{
os << to_string(t);
return os;
}
} // boost::redis::resp3

View File

@@ -0,0 +1,64 @@
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef BOOST_REDIS_RESP3_NODE_HPP
#define BOOST_REDIS_RESP3_NODE_HPP
#include <boost/redis/resp3/type.hpp>
namespace boost::redis::resp3 {
/** \brief A node in the response tree.
* \ingroup high-level-api
*
* RESP3 can contain recursive data structures: A map of sets of
* vector of etc. As it is parsed each element is passed to user
* callbacks (push parser). The signature of this
* callback is `f(resp3::node<std::string_view)`. This class is called a node
* because it can be seen as the element of the response tree. It
* is a template so that users can use it with owing strings e.g.
* `std::string` or `boost::static_string` etc.
*
* @tparam String A `std::string`-like type.
*/
template <class String>
struct basic_node {
/// The RESP3 type of the data in this node.
type data_type = type::invalid;
/// The number of elements of an aggregate.
std::size_t aggregate_size{};
/// The depth of this node in the response tree.
std::size_t depth{};
/// The actual data. For aggregate types this is usually empty.
String value{};
};
/** @brief Compares a node for equality.
* @relates basic_node
*
* @param a Left hand side node object.
* @param b Right hand side node object.
*/
template <class String>
auto operator==(basic_node<String> const& a, basic_node<String> const& b)
{
return a.aggregate_size == b.aggregate_size
&& a.depth == b.depth
&& a.data_type == b.data_type
&& a.value == b.value;
};
/** @brief A node in the response tree.
* @ingroup high-level-api
*/
using node = basic_node<std::string>;
} // boost::redis::resp3
#endif // BOOST_REDIS_RESP3_NODE_HPP

View File

@@ -0,0 +1,104 @@
/* Copyright (c) 2018-2024 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef BOOST_REDIS_RESP3_PARSER_HPP
#define BOOST_REDIS_RESP3_PARSER_HPP
#include <boost/redis/resp3/node.hpp>
#include <boost/system/error_code.hpp>
#include <array>
#include <string_view>
#include <cstdint>
#include <optional>
namespace boost::redis::resp3 {
class parser {
public:
using node_type = basic_node<std::string_view>;
using result = std::optional<node_type>;
static constexpr std::size_t max_embedded_depth = 5;
static constexpr std::string_view sep = "\r\n";
private:
// The current depth. Simple data types will have depth 0, whereas
// the elements of aggregates will have depth 1. Embedded types
// will have increasing depth.
std::size_t depth_;
// The parser supports up to 5 levels of nested structures. The
// first element in the sizes stack is a sentinel and must be
// different from 1.
std::array<std::size_t, max_embedded_depth + 1> sizes_;
// Contains the length expected in the next bulk read.
std::size_t bulk_length_;
// The type of the next bulk. Contains type::invalid if no bulk is
// expected.
type bulk_;
// The number of bytes consumed from the buffer.
std::size_t consumed_;
// Returns the number of bytes that have been consumed.
auto consume_impl(type t, std::string_view elem, system::error_code& ec) -> node_type;
void commit_elem() noexcept;
// The bulk type expected in the next read. If none is expected
// returns type::invalid.
[[nodiscard]]
auto bulk_expected() const noexcept -> bool
{ return bulk_ != type::invalid; }
public:
parser();
// Returns true when the parser is done with the current message.
[[nodiscard]]
auto done() const noexcept -> bool;
auto get_suggested_buffer_growth(std::size_t hint) const noexcept -> std::size_t;
auto get_consumed() const noexcept -> std::size_t;
auto consume(std::string_view view, system::error_code& ec) noexcept -> result;
void reset();
};
// Returns false if more data is needed. If true is returned the
// parser is either done or an error occured, that can be checked on
// ec.
template <class Adapter>
bool
parse(
resp3::parser& p,
std::string_view const& msg,
Adapter& adapter,
system::error_code& ec)
{
while (!p.done()) {
auto const res = p.consume(msg, ec);
if (ec)
return true;
if (!res)
return false;
adapter(res.value(), ec);
if (ec)
return true;
}
return true;
}
} // boost::redis::resp3
#endif // BOOST_REDIS_RESP3_PARSER_HPP

Some files were not shown because too many files have changed in this diff Show More