2
0
mirror of https://github.com/boostorg/redis.git synced 2026-02-02 21:12:16 +00:00

Compare commits

..

1 Commits

Author SHA1 Message Date
Marcelo Zimbres
3f3061a6de Adds doxygen output to the preset. 2022-10-03 14:00:20 +02:00
156 changed files with 10285 additions and 14719 deletions

View File

@@ -9,7 +9,6 @@ Checks: "*,\
-google-readability-braces-around-statements,\
-hicpp-braces-around-statements,\
-hicpp-named-parameter,\
-hicpp-avoid-goto,\
-google-build-using-namespace,\
-altera-*,\
-fuchsia-*,\
@@ -22,12 +21,6 @@ Checks: "*,\
-bugprone-use-after-move,\
-hicpp-invalid-access-moved,\
-misc-no-recursion,\
-cppcoreguidelines-pro-bounds-pointer-arithmetic,\
-cppcoreguidelines-avoid-magic-numbers,\
-cppcoreguidelines-pro-bounds-constant-array-index,\
-cppcoreguidelines-interfaces-global-init,\
-cppcoreguidelines-macro-usage,\
-cppcoreguidelines-avoid-goto,\
-cppcoreguidelines-non-private-member-variables-in-classes"
WarningsAsErrors: ''
CheckOptions:

View File

@@ -7,7 +7,7 @@ codecov:
ignore:
- "benchmarks/cpp/asio/*"
- "example/*"
- "examples/*"
- "tests/*"
- "/usr/*"
- "**/boost/*"

View File

@@ -1,128 +1,9 @@
# CI script to verify that CMake and B2 builds work.
# B2 builds include only tests that don't require a DB server, to avoid race conditions.
# CMake tests include the actual project tests and all the CMake integration workflows
# recommended by Boost.CI.
# Windows CMake jobs build the code but don't run the tests,
# since we don't have a way to set up a Redis server on Windows (yet).
# Subcommands are implemented by the tools/ci.py script in a platform-independent manner.
name: CI
on: [push, pull_request]
jobs:
windows-cmake:
name: "CMake ${{matrix.toolset}} ${{matrix.build-type}} C++${{matrix.cxxstd}}"
runs-on: ${{matrix.os}}
defaults:
run:
shell: bash
strategy:
fail-fast: false
matrix:
include:
- { toolset: msvc-14.2, os: windows-2019, generator: "Visual Studio 16 2019", cxxstd: '17', build-type: 'Debug', build-shared-libs: 1 }
- { toolset: msvc-14.2, os: windows-2019, generator: "Visual Studio 16 2019", cxxstd: '17', build-type: 'Release', build-shared-libs: 0 }
- { toolset: msvc-14.3, os: windows-2022, generator: "Visual Studio 17 2022", cxxstd: '20', build-type: 'Debug', build-shared-libs: 0 }
- { toolset: msvc-14.3, os: windows-2022, generator: "Visual Studio 17 2022", cxxstd: '20', build-type: 'Release', build-shared-libs: 1 }
env:
CMAKE_BUILD_PARALLEL_LEVEL: 4
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Setup Boost
run: python3 tools/ci.py setup-boost --source-dir=$(pwd)
- name: Build a Boost distribution using B2
run: |
python3 tools/ci.py build-b2-distro \
--toolset ${{ matrix.toolset }}
- name: Build a Boost distribution using CMake
run: |
python3 tools/ci.py build-cmake-distro \
--build-type ${{ matrix.build-type }} \
--cxxstd ${{ matrix.cxxstd }} \
--toolset ${{ matrix.toolset }} \
--generator "${{ matrix.generator }}" \
--build-shared-libs ${{ matrix.build-shared-libs }}
- name: Build the project tests
run: |
python3 tools/ci.py build-cmake-standalone-tests \
--build-type ${{ matrix.build-type }} \
--cxxstd ${{ matrix.cxxstd }} \
--toolset ${{ matrix.toolset }} \
--generator "${{ matrix.generator }}" \
--build-shared-libs ${{ matrix.build-shared-libs }}
# # TODO: re-enable this when a Redis server is available for this job
# - name: Run the project tests
# run: |
# python3 tools/ci.py run-cmake-standalone-tests \
# --build-type ${{ matrix.build-type }}
- name: Run add_subdirectory tests
run: |
python3 tools/ci.py run-cmake-add-subdirectory-tests \
--build-type ${{ matrix.build-type }} \
--cxxstd ${{ matrix.cxxstd }} \
--toolset ${{ matrix.toolset }} \
--generator "${{ matrix.generator }}" \
--build-shared-libs ${{ matrix.build-shared-libs }}
- name: Run find_package tests with the built cmake distribution
run: |
python3 tools/ci.py run-cmake-find-package-tests \
--build-type ${{ matrix.build-type }} \
--cxxstd ${{ matrix.cxxstd }} \
--toolset ${{ matrix.toolset }} \
--generator "${{ matrix.generator }}" \
--build-shared-libs ${{ matrix.build-shared-libs }}
- name: Run find_package tests with the built b2 distribution
run: |
python3 tools/ci.py run-cmake-b2-find-package-tests \
--build-type ${{ matrix.build-type }} \
--cxxstd ${{ matrix.cxxstd }} \
--toolset ${{ matrix.toolset }} \
--generator "${{ matrix.generator }}" \
--build-shared-libs ${{ matrix.build-shared-libs }}
windows-b2:
name: "B2 ${{matrix.toolset}}"
runs-on: ${{matrix.os}}
defaults:
run:
shell: bash
strategy:
fail-fast: false
matrix:
include:
- { toolset: msvc-14.2, os: windows-2019 }
- { toolset: msvc-14.3, os: windows-2022 }
env:
OPENSSL_ROOT: "C:\\Program Files\\OpenSSL"
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Setup user-config.jam
run: cp tools/user-config.jam "${HOMEDRIVE}${HOMEPATH}/"
- name: Setup Boost
run: python3 tools/ci.py setup-boost --source-dir=$(pwd)
- name: Build and run project tests using B2
run: |
python3 tools/ci.py run-b2-tests \
--toolset ${{ matrix.toolset }} \
--cxxstd 17,20 \
--variant debug,release
posix-cmake:
name: "CMake ${{ matrix.toolset }} ${{ matrix.cxxstd }} ${{ matrix.build-type }} ${{ matrix.cxxflags }}"
posix:
defaults:
run:
shell: bash
@@ -131,190 +12,35 @@ jobs:
fail-fast: false
matrix:
include:
- toolset: gcc-11
install: g++-11
os: ubuntu-latest
container: ubuntu:22.04
cxxstd: '17'
build-type: 'Debug'
ldflags: ''
- toolset: gcc-11
install: g++-11
os: ubuntu-latest
container: ubuntu:22.04
cxxstd: '20'
build-type: 'Release'
ldflags: ''
- toolset: clang-11
install: clang-11
os: ubuntu-latest
container: ubuntu:22.04
cxxstd: '17'
build-type: 'Debug'
ldflags: ''
- toolset: clang-11
install: clang-11
os: ubuntu-latest
container: ubuntu:22.04
cxxstd: '20'
build-type: 'Debug'
ldflags: ''
- toolset: clang-13
install: clang-13
os: ubuntu-latest
container: ubuntu:22.04
cxxstd: '17'
build-type: 'Release'
ldflags: ''
- toolset: clang-13
install: clang-13
os: ubuntu-latest
container: ubuntu:22.04
cxxstd: '20'
build-type: 'Release'
ldflags: ''
- toolset: clang-14
install: 'clang-14 libc++-14-dev libc++abi-14-dev'
os: ubuntu-latest
container: ubuntu:22.04
cxxstd: '17'
build-type: 'Debug'
cxxflags: '-stdlib=libc++'
ldflags: '-lc++'
- toolset: clang-14
install: 'clang-14 libc++-14-dev libc++abi-14-dev'
os: ubuntu-latest
container: ubuntu:22.04
cxxstd: '20'
build-type: 'Release'
cxxflags: '-stdlib=libc++'
ldflags: '-lc++'
- { toolset: gcc, compiler: g++-10, install: g++-10, os: ubuntu-22.04, cxxstd: 'c++17' }
- { toolset: gcc, compiler: g++-11, install: g++-11, os: ubuntu-22.04, cxxstd: 'c++17' }
- { toolset: gcc, compiler: g++-11, install: g++-11, os: ubuntu-22.04, cxxstd: 'c++20' }
- { toolset: clang, compiler: clang++-11, install: clang-11, os: ubuntu-22.04, cxxstd: 'c++17' }
- { toolset: clang, compiler: clang++-11, install: clang-11, os: ubuntu-22.04, cxxstd: 'c++20' }
- { toolset: clang, compiler: clang++-13, install: clang-13, os: ubuntu-22.04, cxxstd: 'c++17' }
- { toolset: clang, compiler: clang++-13, install: clang-13, os: ubuntu-22.04, cxxstd: 'c++20' }
runs-on: ${{ matrix.os }}
container: ${{matrix.container}}
env:
CXXFLAGS: ${{matrix.cxxflags}} -Wall -Wextra
LDFLAGS: ${{matrix.ldflags}}
CMAKE_BUILD_PARALLEL_LEVEL: 4
BOOST_REDIS_TEST_SERVER: redis
services:
redis:
image: redis
CXXFLAGS: -g -O0 -std=${{matrix.cxxstd}} -Wall -Wextra
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Setup container environment
if: matrix.container
- name: Install CMake
run: sudo apt-get -y install cmake
- name: Install compiler
run: sudo apt-get install -y ${{ matrix.install }}
- name: Install Redis
run: sudo apt-get install -y redis-server
- name: Install boost
uses: MarkusJx/install-boost@v2.3.0
id: install-boost
with:
boost_version: 1.79.0
platform_version: 22.04
- name: Run CMake
run: |
apt-get update
apt-get -y install sudo python3 git g++ libssl-dev protobuf-compiler redis-server
- name: Install dependencies
run: |
sudo apt-get update
sudo apt-get -y install cmake protobuf-compiler redis-server python3 ${{ matrix.install }}
- name: Setup Boost
run: ./tools/ci.py setup-boost --source-dir=$(pwd)
- name: Build a Boost distribution using B2
run: |
./tools/ci.py build-b2-distro \
--toolset ${{ matrix.toolset }}
- name: Build a Boost distribution using CMake
run: |
./tools/ci.py build-cmake-distro \
--build-type ${{ matrix.build-type }} \
--cxxstd ${{ matrix.cxxstd }} \
--toolset ${{ matrix.toolset }}
- name: Build the project tests
run: |
./tools/ci.py build-cmake-standalone-tests \
--build-type ${{ matrix.build-type }} \
--cxxstd ${{ matrix.cxxstd }} \
--toolset ${{ matrix.toolset }}
- name: Run the project tests
run: |
./tools/ci.py run-cmake-standalone-tests \
--build-type ${{ matrix.build-type }}
- name: Run add_subdirectory tests
run: |
./tools/ci.py run-cmake-add-subdirectory-tests \
--build-type ${{ matrix.build-type }} \
--cxxstd ${{ matrix.cxxstd }} \
--toolset ${{ matrix.toolset }}
- name: Run find_package tests with the built cmake distribution
run: |
./tools/ci.py run-cmake-find-package-tests \
--build-type ${{ matrix.build-type }} \
--cxxstd ${{ matrix.cxxstd }} \
--toolset ${{ matrix.toolset }}
- name: Run find_package tests with the built b2 distribution
run: |
./tools/ci.py run-cmake-b2-find-package-tests \
--build-type ${{ matrix.build-type }} \
--cxxstd ${{ matrix.cxxstd }} \
--toolset ${{ matrix.toolset }}
posix-b2:
name: "B2 ${{ matrix.toolset }}"
defaults:
run:
shell: bash
strategy:
fail-fast: false
matrix:
include:
- toolset: gcc-11
install: g++-11
cxxstd: "11,17,20" # Having C++11 shouldn't break the build
os: ubuntu-latest
container: ubuntu:22.04
- toolset: clang-14
install: clang-14
os: ubuntu-latest
container: ubuntu:22.04
cxxstd: "17,20"
runs-on: ${{ matrix.os }}
container: ${{matrix.container}}
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Setup container environment
if: matrix.container
run: |
apt-get update
apt-get -y install sudo python3 git g++ libssl-dev
- name: Install dependencies
run: |
sudo apt-get update
sudo apt-get -y install python3 ${{ matrix.install }}
- name: Setup Boost
run: ./tools/ci.py setup-boost --source-dir=$(pwd)
- name: Build and run project tests using B2
run: |
python3 tools/ci.py run-b2-tests \
--toolset ${{ matrix.toolset }} \
--cxxstd ${{ matrix.cxxstd }} \
--variant debug,release
BOOST_ROOT=${{steps.install-boost.outputs.BOOST_ROOT}} cmake -DCMAKE_CXX_COMPILER="${{matrix.compiler}}" -DCMAKE_CXX_FLAGS="${{env.CXXFLAGS}}"
- name: Build
run: make
- name: Check
run: ctest --output-on-failure

View File

@@ -3,8 +3,7 @@ name: Coverage
on:
push:
branches:
- develop
- master
jobs:
posix:
defaults:
@@ -16,36 +15,33 @@ jobs:
CXX: g++-11
CXXFLAGS: -g -O0 -std=c++20 --coverage -fkeep-inline-functions -fkeep-static-functions
LDFLAGS: --coverage
CMAKE_BUILD_PARALLEL_LEVEL: 4
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Install dependencies
run: sudo apt-get --no-install-recommends -y install cmake lcov g++-11 redis-server python3 libgd-perl
- name: Setup Boost
run: ./tools/ci.py setup-boost --source-dir=$(pwd)
- name: Build Boost
run: ./tools/ci.py build-b2-distro --toolset=gcc-11
# Having our library there confuses the coverage reports
- name: Remove Boost.Redis from the b2 distro
run: rm -rf ~/boost-b2-distro/include/boost/redis
- name: Install CMake
run: sudo apt-get -y install cmake
- name: Install lcov
run: sudo apt-get -y install lcov
- name: Install compiler
run: sudo apt-get -y install g++-11
- name: Install Redis
run: sudo apt-get -y install redis-server
- name: Install boost
uses: MarkusJx/install-boost@v2.3.0
id: install-boost
with:
boost_version: 1.79.0
platform_version: 22.04
- name: Run CMake
run: cmake -DCMAKE_PREFIX_PATH=$HOME/boost-b2-distro --preset coverage .
run: |
BOOST_ROOT=${{steps.install-boost.outputs.BOOST_ROOT}} cmake --preset coverage .
- name: Build
run: cmake --build --preset coverage
- name: Test
run: ctest --preset coverage
- name: Make the coverage file
run: cmake --build --preset coverage --target coverage
- name: Upload to codecov
run: |
bash <(curl -s https://codecov.io/bash) -f ./build/coverage/coverage.info
bash <(curl -s https://codecov.io/bash) -f ./build/coverage/coverage.info || echo "Codecov did not collect coverage reports"

View File

@@ -1,142 +1,190 @@
cmake_minimum_required(VERSION 3.8...3.20)
# At the moment the official build system is still autotools and this
# file is meant to support Aedis on windows.
# determine whether it's main/root project
# or being built under another project.
if (NOT DEFINED BOOST_REDIS_MAIN_PROJECT)
set(BOOST_REDIS_MAIN_PROJECT OFF)
if (CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_SOURCE_DIR)
set(BOOST_REDIS_MAIN_PROJECT ON)
endif()
endif()
# BOOST_ROOT=/opt/boost_1_79/ cmake -DCMAKE_CXX_FLAGS="-g -O0
# -std=c++20 -Wall -Wextra --coverage -fkeep-inline-functions
# -fkeep-static-functions" -DCMAKE_EXE_LINKER_FLAGS="--coverage"
# ~/my/aedis
project(boost_redis VERSION "${BOOST_SUPERPROJECT_VERSION}" LANGUAGES CXX)
cmake_minimum_required(VERSION 3.14)
# Library
add_library(boost_redis INTERFACE)
add_library(Boost::redis ALIAS boost_redis)
target_include_directories(boost_redis INTERFACE include)
target_compile_features(boost_redis INTERFACE cxx_std_17)
project(
Aedis
VERSION 1.1.0
DESCRIPTION "A redis client designed for performance and scalability"
HOMEPAGE_URL "https://mzimbres.github.io/aedis"
LANGUAGES CXX
)
# Dependencies
if (BOOST_REDIS_MAIN_PROJECT)
# TODO: Understand why we have to list all dependencies below
# instead of
#set(BOOST_INCLUDE_LIBRARIES redis)
#set(BOOST_EXCLUDE_LIBRARIES redis)
#add_subdirectory(../.. boostorg/boost EXCLUDE_FROM_ALL)
add_library(aedis INTERFACE)
target_include_directories(aedis INTERFACE
$<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}/include>
$<INSTALL_INTERFACE:include>
)
target_link_libraries(aedis
INTERFACE
Boost::asio
Boost::assert
Boost::config
Boost::core
Boost::mp11
Boost::optional
Boost::system
Boost::utility
Boost::winapi
)
set(deps
system
assert
config
throw_exception
asio
variant2
mp11
winapi
predef
align
context
core
coroutine
static_assert
pool
date_time
smart_ptr
exception
integer
move
type_traits
algorithm
utility
io
lexical_cast
numeric/conversion
mpl
range
tokenizer
tuple
array
bind
concept_check
function
iterator
regex
unordered
preprocessor
container
conversion
container_hash
detail
optional
function_types
fusion
intrusive
describe
typeof
functional
test
json
endian
)
target_compile_features(aedis INTERFACE cxx_std_17)
foreach(dep IN LISTS deps)
add_subdirectory(../${dep} boostorg/${dep})
endforeach()
include(CMakePackageConfigHelpers)
write_basic_package_version_file(
"${PROJECT_BINARY_DIR}/AedisConfigVersion.cmake"
COMPATIBILITY AnyNewerVersion
)
find_package(Threads REQUIRED)
find_package(OpenSSL REQUIRED)
target_link_libraries(boost_redis
INTERFACE
Boost::system
Boost::asio
Threads::Threads
OpenSSL::Crypto
OpenSSL::SSL
)
else()
# If we're in the superproject or called from add_subdirectory,
# Boost dependencies should be already available.
# If other dependencies are not found, we bail out
find_package(Threads)
if(NOT Threads_FOUND)
message(STATUS "Boost.Redis has been disabled, because the required package Threads hasn't been found")
return()
endif()
find_package(OpenSSL)
if(NOT OpenSSL_FOUND)
message(STATUS "Boost.Redis has been disabled, because the required package OpenSSL hasn't been found")
return()
endif()
find_package(Boost 1.79 REQUIRED)
include_directories(${Boost_INCLUDE_DIRS})
# This is generated by boostdep
target_link_libraries(boost_redis
INTERFACE
Boost::asio
Boost::assert
Boost::core
Boost::mp11
Boost::system
Boost::throw_exception
Threads::Threads
OpenSSL::Crypto
OpenSSL::SSL
)
endif()
find_package(OpenSSL REQUIRED)
# Enable testing. If we're being called from the superproject, this has already been done
if (BOOST_REDIS_MAIN_PROJECT)
include(CTest)
endif()
enable_testing()
include_directories(include)
# Most tests require a running Redis server, so we only run them if we're the main project
if(BOOST_REDIS_MAIN_PROJECT AND BUILD_TESTING)
# Tests and common utilities
add_subdirectory(test)
# Executables
#=======================================================================
# Benchmarks. Build them with tests to prevent code rotting
add_subdirectory(benchmarks)
add_executable(chat_room examples/chat_room.cpp)
add_executable(containers examples/containers.cpp)
add_executable(echo_server examples/echo_server.cpp)
add_executable(intro examples/intro.cpp)
add_executable(intro_tls examples/intro_tls.cpp)
#add_executable(intro_sync examples/intro_sync.cpp) // Uncomment after update to Boost 1.80
add_executable(serialization examples/serialization.cpp)
add_executable(subscriber examples/subscriber.cpp)
add_executable(subscriber_sentinel examples/subscriber_sentinel.cpp)
add_executable(test_low_level tests/low_level.cpp)
add_executable(low_level_sync examples/low_level_sync.cpp)
add_executable(test_connection_other tests/connection_other.cpp)
add_executable(test_connection_connect tests/connection_connect.cpp)
add_executable(test_connection_push tests/connection_push.cpp)
add_executable(test_connection_quit tests/connection_quit.cpp)
add_executable(test_connection_quit_coalesce tests/connection_quit_coalesce.cpp)
add_executable(test_connection_reconnect tests/connection_reconnect.cpp)
add_executable(test_connection_tls tests/connection_tls.cpp)
target_compile_features(chat_room PUBLIC cxx_std_20)
target_compile_features(intro PUBLIC cxx_std_17)
target_compile_features(intro_tls PUBLIC cxx_std_17)
target_compile_features(serialization PUBLIC cxx_std_17)
target_compile_features(containers PUBLIC cxx_std_17)
target_compile_features(test_low_level PUBLIC cxx_std_17)
target_compile_features(low_level_sync PUBLIC cxx_std_17)
target_compile_features(echo_server PUBLIC cxx_std_20)
target_compile_features(subscriber PUBLIC cxx_std_20)
target_compile_features(subscriber_sentinel PUBLIC cxx_std_20)
target_compile_features(test_connection_other PUBLIC cxx_std_20)
target_compile_features(test_connection_push PUBLIC cxx_std_20)
target_compile_features(test_connection_connect PUBLIC cxx_std_17)
target_compile_features(test_connection_quit PUBLIC cxx_std_17)
target_compile_features(test_connection_quit_coalesce PUBLIC cxx_std_17)
target_compile_features(test_connection_reconnect PUBLIC cxx_std_20)
target_compile_features(test_connection_tls PUBLIC cxx_std_17)
target_link_libraries(intro_tls OpenSSL::Crypto OpenSSL::SSL)
target_link_libraries(test_connection_tls OpenSSL::Crypto OpenSSL::SSL)
# Tests
#=======================================================================
add_test(containers containers)
add_test(intro intro)
add_test(intro_tls intro_tls)
#add_test(intro_sync intro_sync)
add_test(serialization serialization)
add_test(low_level_sync low_level_sync)
add_test(test_low_level test_low_level)
add_test(test_connection_other test_connection_other)
add_test(test_connection_connect test_connection_connect)
add_test(test_connection_push test_connection_push)
add_test(test_connection_quit test_connection_quit)
add_test(test_connection_quit_coalesce test_connection_quit_coalesce)
add_test(test_connection_reconnect test_connection_reconnect)
add_test(test_connection_tls test_connection_tls)
# Install
#=======================================================================
install(TARGETS aedis
EXPORT aedis
PUBLIC_HEADER DESTINATION include COMPONENT Development
)
include(CMakePackageConfigHelpers)
configure_package_config_file(
"${PROJECT_SOURCE_DIR}/cmake/AedisConfig.cmake.in"
"${PROJECT_BINARY_DIR}/AedisConfig.cmake"
INSTALL_DESTINATION lib/cmake/aedis
)
install(EXPORT aedis DESTINATION lib/cmake/aedis)
install(FILES "${PROJECT_BINARY_DIR}/AedisConfigVersion.cmake"
"${PROJECT_BINARY_DIR}/AedisConfig.cmake"
DESTINATION lib/cmake/aedis)
install(DIRECTORY ${PROJECT_SOURCE_DIR}/include/ DESTINATION include)
# Doxygen
#=======================================================================
configure_file(doc/Doxyfile.in doc/Doxyfile @ONLY)
add_custom_target(
doc
COMMAND doxygen "${PROJECT_BINARY_DIR}/doc/Doxyfile"
COMMENT "Building documentation using Doxygen"
WORKING_DIRECTORY "${PROJECT_SOURCE_DIR}"
VERBATIM
)
# Coverage
#=======================================================================
set(
COVERAGE_TRACE_COMMAND
lcov --capture
-output-file "${PROJECT_BINARY_DIR}/coverage.info"
--directory "${PROJECT_BINARY_DIR}"
--include "${PROJECT_SOURCE_DIR}/include/*"
)
set(
COVERAGE_HTML_COMMAND
genhtml --legend -f -q
"${PROJECT_BINARY_DIR}/coverage.info"
--prefix "${PROJECT_SOURCE_DIR}"
--output-directory "${PROJECT_BINARY_DIR}/coverage_html"
)
add_custom_target(
coverage
COMMAND ${COVERAGE_TRACE_COMMAND}
COMMAND ${COVERAGE_HTML_COMMAND}
COMMENT "Generating coverage report"
VERBATIM
)
# Distribution
#=======================================================================
include(CPack)
# TODO
#=======================================================================
#.PHONY: bench
#bench:
# pdflatex --jobname=echo-f0 benchmarks/benchmarks.tex
# pdflatex --jobname=echo-f1 benchmarks/benchmarks.tex
# pdftoppm {input.pdf} {output.file} -png
# Examples
add_subdirectory(example)
endif()

View File

@@ -12,7 +12,7 @@
"warnings": {
"dev": true,
"deprecated": true,
"uninitialized": false,
"uninitialized": true,
"unusedCli": true,
"systemVars": false
},
@@ -40,108 +40,26 @@
}
},
{
"name": "g++-11",
"name": "dev",
"generator": "Unix Makefiles",
"hidden": false,
"inherits": ["cmake-pedantic"],
"binaryDir": "${sourceDir}/build/g++-11",
"binaryDir": "${sourceDir}/build/dev",
"cacheVariables": {
"CMAKE_BUILD_TYPE": "Debug",
"CMAKE_CXX_EXTENSIONS": "OFF",
"CMAKE_CXX_FLAGS": "-Wall -Wextra -fsanitize=address",
"CMAKE_CXX_COMPILER": "g++-11",
"CMAKE_SHARED_LINKER_FLAGS": "-fsanitize=address",
"CMAKE_CXX_STANDARD_REQUIRED": "ON",
"PROJECT_BINARY_DIR": "${sourceDir}/build/g++-11"
}
},
{
"name": "g++-11-release",
"generator": "Unix Makefiles",
"hidden": false,
"inherits": ["cmake-pedantic"],
"binaryDir": "${sourceDir}/build/g++-11-release",
"cacheVariables": {
"CMAKE_BUILD_TYPE": "Release",
"CMAKE_CXX_EXTENSIONS": "OFF",
"CMAKE_CXX_FLAGS": "-Wall -Wextra",
"CMAKE_CXX_COMPILER": "g++-11",
"CMAKE_SHARED_LINKER_FLAGS": "",
"CMAKE_CXX_STANDARD_REQUIRED": "ON",
"PROJECT_BINARY_DIR": "${sourceDir}/build/g++-11-release"
}
},
{
"name": "clang++-13",
"generator": "Unix Makefiles",
"hidden": false,
"inherits": ["cmake-pedantic"],
"binaryDir": "${sourceDir}/build/clang++-13",
"cacheVariables": {
"CMAKE_BUILD_TYPE": "Debug",
"CMAKE_CXX_EXTENSIONS": "OFF",
"CMAKE_CXX_FLAGS": "-Wall -Wextra -fsanitize=address",
"CMAKE_CXX_COMPILER": "clang++-13",
"CMAKE_SHARED_LINKER_FLAGS": "-fsanitize=address",
"CMAKE_CXX_STANDARD_REQUIRED": "ON",
"PROJECT_BINARY_DIR": "${sourceDir}/build/clang++-13"
}
},
{
"name": "clang++-14",
"generator": "Unix Makefiles",
"hidden": false,
"inherits": ["cmake-pedantic"],
"binaryDir": "${sourceDir}/build/clang++-14",
"cacheVariables": {
"CMAKE_BUILD_TYPE": "Debug",
"CMAKE_CXX_EXTENSIONS": "OFF",
"CMAKE_CXX_FLAGS": "-Wall -Wextra -fsanitize=address",
"CMAKE_CXX_COMPILER": "clang++-14",
"CMAKE_SHARED_LINKER_FLAGS": "-fsanitize=address",
"CMAKE_CXX_STANDARD_REQUIRED": "ON",
"PROJECT_BINARY_DIR": "${sourceDir}/build/clang++-14"
}
},
{
"name": "libc++-14-cpp17",
"generator": "Unix Makefiles",
"hidden": false,
"inherits": ["cmake-pedantic"],
"binaryDir": "${sourceDir}/build/libc++-14-cpp17",
"cacheVariables": {
"CMAKE_BUILD_TYPE": "Debug",
"CMAKE_CXX_EXTENSIONS": "OFF",
"CMAKE_CXX_FLAGS": "-Wall -Wextra -stdlib=libc++ -std=c++17",
"CMAKE_EXE_LINKER_FLAGS": "-lc++",
"CMAKE_CXX_COMPILER": "clang++-14",
"CMAKE_SHARED_LINKER_FLAGS": "",
"CMAKE_CXX_STANDARD_REQUIRED": "ON",
"PROJECT_BINARY_DIR": "${sourceDir}/build/libc++-14-cpp17"
}
},
{
"name": "libc++-14-cpp20",
"generator": "Unix Makefiles",
"hidden": false,
"inherits": ["cmake-pedantic"],
"binaryDir": "${sourceDir}/build/libc++-14-cpp20",
"cacheVariables": {
"CMAKE_BUILD_TYPE": "Debug",
"CMAKE_CXX_EXTENSIONS": "OFF",
"CMAKE_CXX_FLAGS": "-Wall -Wextra -stdlib=libc++ -std=c++17",
"CMAKE_EXE_LINKER_FLAGS": "-lc++",
"CMAKE_CXX_COMPILER": "clang++-14",
"CMAKE_SHARED_LINKER_FLAGS": "",
"CMAKE_CXX_STANDARD_REQUIRED": "ON",
"PROJECT_BINARY_DIR": "${sourceDir}/build/libc++-14-cpp20"
"PROJECT_BINARY_DIR": "${sourceDir}/build/dev",
"DOXYGEN_OUTPUT_DIRECTORY": "${sourceDir}/build/dev/doc/"
}
},
{
"name": "clang-tidy",
"generator": "Unix Makefiles",
"hidden": false,
"inherits": ["g++-11"],
"inherits": ["dev"],
"binaryDir": "${sourceDir}/build/clang-tidy",
"cacheVariables": {
"CMAKE_CXX_CLANG_TIDY": "clang-tidy;--header-filter=${sourceDir}/include/*",
@@ -151,12 +69,7 @@
],
"buildPresets": [
{ "name": "coverage", "configurePreset": "coverage" },
{ "name": "g++-11", "configurePreset": "g++-11" },
{ "name": "g++-11-release", "configurePreset": "g++-11-release" },
{ "name": "clang++-13", "configurePreset": "clang++-13" },
{ "name": "clang++-14", "configurePreset": "clang++-14" },
{ "name": "libc++-14-cpp17", "configurePreset": "libc++-14-cpp17" },
{ "name": "libc++-14-cpp20", "configurePreset": "libc++-14-cpp20" },
{ "name": "dev", "configurePreset": "dev" },
{ "name": "clang-tidy", "configurePreset": "clang-tidy" }
],
"testPresets": [
@@ -166,13 +79,8 @@
"output": {"outputOnFailure": true},
"execution": {"noTestsAction": "error", "stopOnFailure": true}
},
{ "name": "coverage", "configurePreset": "coverage", "inherits": ["test"] },
{ "name": "g++-11", "configurePreset": "g++-11", "inherits": ["test"] },
{ "name": "g++-11-release", "configurePreset": "g++-11-release", "inherits": ["test"] },
{ "name": "clang++-13", "configurePreset": "clang++-13", "inherits": ["test"] },
{ "name": "clang++-14", "configurePreset": "clang++-14", "inherits": ["test"] },
{ "name": "libc++-14-cpp17", "configurePreset": "libc++-14-cpp17", "inherits": ["test"] },
{ "name": "libc++-14-cpp20", "configurePreset": "libc++-14-cpp20", "inherits": ["test"] },
{ "name": "clang-tidy", "configurePreset": "clang-tidy", "inherits": ["test"] }
{ "name": "coverage", "configurePreset": "coverage", "inherits": ["test"] },
{ "name": "dev", "configurePreset": "dev", "inherits": ["test"] },
{ "name": "clang-tidy", "configurePreset": "clang-tidy", "inherits": ["test"] }
]
}

1070
README.md

File diff suppressed because it is too large Load Diff

View File

@@ -1,20 +0,0 @@
add_library(benchmarks_options INTERFACE)
target_link_libraries(benchmarks_options INTERFACE boost_redis_src)
target_link_libraries(benchmarks_options INTERFACE boost_redis_project_options)
target_compile_features(benchmarks_options INTERFACE cxx_std_20)
add_executable(echo_server_client cpp/asio/echo_server_client.cpp)
target_link_libraries(echo_server_client PRIVATE benchmarks_options)
add_executable(echo_server_direct cpp/asio/echo_server_direct.cpp)
target_link_libraries(echo_server_direct PRIVATE benchmarks_options)
# TODO
#=======================================================================
#.PHONY: bench
#bench:
# pdflatex --jobname=echo-f0 benchmarks/benchmarks.tex
# pdflatex --jobname=echo-f1 benchmarks/benchmarks.tex
# pdftoppm {input.pdf} {output.file} -png

View File

@@ -24,7 +24,7 @@
nodes near coords align={horizontal},
]
\addplot coordinates {
(29.5,Asio)
(31.1,Asio)
(30.7,Tokio)
(35.6,Go)
(43.6,Libuv)

View File

@@ -6,7 +6,6 @@
#include <iostream>
#include <boost/asio.hpp>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
namespace net = boost::asio;
@@ -14,7 +13,8 @@ using net::ip::tcp;
using tcp_socket = net::use_awaitable_t<>::as_default_on_t<net::ip::tcp::socket>;
using timer_type = net::use_awaitable_t<>::as_default_on_t<net::steady_timer>;
auto example(boost::asio::ip::tcp::endpoint ep, std::string msg, int n) -> net::awaitable<void>
net::awaitable<void>
example(boost::asio::ip::tcp::endpoint ep, std::string msg, int n)
{
try {
auto ex = co_await net::this_coro::executor;
@@ -62,6 +62,3 @@ int main(int argc, char* argv[])
std::cerr << e.what() << std::endl;
}
}
#else // defined(BOOST_ASIO_HAS_CO_AWAIT)
auto main() -> int {std::cout << "Requires coroutine support." << std::endl; return 1;}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -9,9 +9,7 @@
//
#include <cstdio>
#include <iostream>
#include <boost/asio.hpp>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
namespace net = boost::asio;
namespace this_coro = net::this_coro;
@@ -33,7 +31,7 @@ awaitable_type echo(tcp_socket socket)
std::size_t n = co_await socket.async_read_some(net::buffer(data), use_awaitable);
co_await async_write(socket, net::buffer(data, n), use_awaitable);
}
} catch (std::exception const&) {
} catch (std::exception const& e) {
//std::printf("echo Exception: %s\n", e.what());
}
}
@@ -58,6 +56,3 @@ int main()
std::printf("Exception: %s\n", e.what());
}
}
#else // defined(BOOST_ASIO_HAS_CO_AWAIT)
auto main() -> int {std::cout << "Requires coroutine support." << std::endl; return 1;}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -1,4 +0,0 @@
$ npm install
$ node echo_server_over_redis.js

View File

@@ -1,7 +1,7 @@
import { createClient } from 'redis';
import * as net from 'net';
const client = createClient({url: 'redis://aedis.occase.de:63799' });
const client = createClient({url: 'redis://db.occase.de:6379' });
client.on('error', (err) => console.log('Redis Client Error', err));
await client.connect();

View File

@@ -0,0 +1,4 @@
@PACKAGE_INIT@
include("${CMAKE_CURRENT_LIST_DIR}/Aedis.cmake")
check_required_components("@PROJECT_NAME@")

2690
doc/Doxyfile.in Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -3,7 +3,7 @@
<!-- Navigation index tabs for HTML output -->
<navindex>
<tab type="mainpage" visible="yes" title="Contents"/>
<tab type="pages" visible="yes" title="" intro=""/>
<tab type="pages" visible="no" title="" intro=""/>
<tab type="modules" visible="no" title="Reference" intro=""/>
<tab type="namespaces" visible="no" title="">
<tab type="namespacelist" visible="yes" title="" intro=""/>

View File

@@ -1,92 +0,0 @@
project redis/doc ;
import doxygen ;
import path ;
import sequence ;
# All paths must be absolute to work well with the Doxygen rules.
path-constant this_dir : . ;
path-constant target_dir : html ;
path-constant redis_root_dir : .. ;
path-constant include_dir : ../include ;
path-constant examples_dir : ../example ;
path-constant readme : ../README.md ;
path-constant layout_file : DoxygenLayout.xml ;
path-constant header : header.html ;
path-constant footer : footer.html ;
local stylesheet_files = [ path.glob $(this_dir) : *.css ] ;
local includes = [ path.glob-tree $(include_dir) : *.hpp *.cpp ] ;
local examples = [ path.glob-tree $(examples_dir) : *.hpp *.cpp ] ;
# If passed directly, several HTML_EXTRA_STYLESHEET tags are generated,
# which is not correct.
local stylesheet_arg = [ sequence.join "\"$(stylesheet_files)\"" : " " ] ;
# The doxygen rule requires the target name to end in .html to generate HTML files
doxygen doc.html
:
$(includes) $(examples) $(readme)
:
<doxygen:param>"PROJECT_NAME=Boost.Redis"
<doxygen:param>PROJECT_NUMBER="1.84.0"
<doxygen:param>PROJECT_BRIEF="A redis client library"
<doxygen:param>"STRIP_FROM_PATH=\"$(redis_root_dir)\""
<doxygen:param>"STRIP_FROM_INC_PATH=\"$(include_dir)\""
<doxygen:param>BUILTIN_STL_SUPPORT=YES
<doxygen:param>INLINE_SIMPLE_STRUCTS=YES
<doxygen:param>HIDE_UNDOC_MEMBERS=YES
<doxygen:param>HIDE_UNDOC_CLASSES=YES
<doxygen:param>SHOW_HEADERFILE=YES
<doxygen:param>SORT_BRIEF_DOCS=YES
<doxygen:param>SORT_MEMBERS_CTORS_1ST=YES
<doxygen:param>SHOW_FILES=NO
<doxygen:param>SHOW_NAMESPACES=NO
<doxygen:param>"LAYOUT_FILE=\"$(layout_file)\""
<doxygen:param>WARN_IF_INCOMPLETE_DOC=YES
<doxygen:param>FILE_PATTERNS="*.hpp *.cpp"
<doxygen:param>EXCLUDE_SYMBOLS=std
<doxygen:param>"USE_MDFILE_AS_MAINPAGE=\"$(readme)\""
<doxygen:param>SOURCE_BROWSER=YES
<doxygen:param>"HTML_HEADER=\"$(header)\""
<doxygen:param>"HTML_FOOTER=\"$(footer)\""
<doxygen:param>"HTML_EXTRA_STYLESHEET=$(stylesheet_arg)"
<doxygen:param>HTML_TIMESTAMP=YES
<doxygen:param>GENERATE_TREEVIEW=YES
<doxygen:param>FULL_SIDEBAR=YES
<doxygen:param>DISABLE_INDEX=YES
<doxygen:param>ENUM_VALUES_PER_LINE=0
<doxygen:param>OBFUSCATE_EMAILS=YES
<doxygen:param>USE_MATHJAX=YES
<doxygen:param>MATHJAX_VERSION=MathJax_2
<doxygen:param>MATHJAX_RELPATH="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/"
<doxygen:param>MACRO_EXPANSION=YES
<doxygen:param>HAVE_DOT=NO
<doxygen:param>CLASS_GRAPH=NO
<doxygen:param>DIRECTORY_GRAPH=NO
;
explicit doc.html ;
# The doxygen rule only informs b2 about the main HTML file, and not about
# all the doc directory that gets generated. Using the install rule copies
# only a single file, which is incorrect. This is a workaround to copy
# the generated docs to the doc/html directory, where they should be.
make copyhtml.tag : doc.html : @copy_html_dir ;
explicit copyhtml.tag ;
actions copy_html_dir
{
rm -rf $(target_dir)
mkdir -p $(target_dir)
cp -r $(<:D)/html/doc/* $(target_dir)/
echo "Stamped" > "$(<)"
}
# These are used to inform the build system of the
# means to build the integrated and stand-alone docs.
alias boostdoc ;
explicit boostdoc ;
alias boostrelease : copyhtml.tag ;
explicit boostrelease ;

30
doc/aedis.css Normal file
View File

@@ -0,0 +1,30 @@
/* Doxygen HTML_EXTRA_STYLESHEET */
div.contents {
max-width: 100em;
margin-right: 5em;
margin-left: 5em;
}
.ui-resizable-e {
background-image:url("splitbar.png");
background-size:100%;
background-repeat:repeat-y;
background-attachment: scroll;
cursor:ew-resize;
height:100%;
right:0;
top:0;
width:1px;
}
.pyrootbox {
border: 1px solid #879ecb;
background-color: #f9fafc;
padding: 15px;
}
code
{
background-color:#fffbeb;
}

View File

@@ -1,145 +0,0 @@
/**
Doxygen Awesome
https://github.com/jothepro/doxygen-awesome-css
MIT License
Copyright (c) 2021 jothepro
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
html {
/* side nav width. MUST be = `TREEVIEW_WIDTH`.
* Make sure it is wide enough to contain the page title (logo + title + version)
*/
--side-nav-fixed-width: 335px;
}
#projectname {
white-space: nowrap;
}
#page-wrapper {
height: calc(100vh - 100px);
display: flex;
flex-direction: column;
}
#content-wrapper {
display: flex;
flex-direction: row;
min-height: 0;
}
#doc-content {
overflow-y: scroll;
flex: 1;
height: auto !important;
}
@media (min-width: 768px) {
html {
--searchbar-background: var(--page-background-color);
}
#sidebar-wrapper {
display: flex;
flex-direction: column;
min-width: var(--side-nav-fixed-width);
max-width: var(--side-nav-fixed-width);
background-color: var(--side-nav-background);
border-right: 1px solid rgb(222, 222, 222);
}
#search-box-wrapper {
display: flex;
flex-direction: row;
padding-left: 1em;
padding-right: 1em;
}
#MSearchBox {
flex: 1;
display: flex;
padding-left: 1em;
padding-right: 1em;
}
#MSearchBox .left {
display: flex;
flex: 1;
position: static;
align-items: center;
justify-content: flex-start;
width: auto;
height: auto;
}
#MSearchBox .right {
display: none;
}
#MSearchSelect {
padding-left: 0.75em;
left: auto;
background-repeat: no-repeat;
}
#MSearchField {
flex: 1;
position: static;
width: auto;
height: auto;
}
#nav-tree {
height: auto !important;
}
#nav-sync {
display: none;
}
#top {
display: block;
border-bottom: none;
max-width: var(--side-nav-fixed-width);
background: var(--side-nav-background);
}
.ui-resizable-handle {
cursor: default;
width: 1px !important;
}
#MSearchResultsWindow {
left: var(--spacing-medium) !important;
right: auto;
}
}
@media (max-width: 768px) {
#sidebar-wrapper {
display: none;
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,19 +0,0 @@
<!-- HTML footer for doxygen 1.9.1-->
<!-- start footer part -->
</div> <!-- close #content-wrapper -->
<!--BEGIN GENERATE_TREEVIEW-->
<div id="nav-path" class="navpath"><!-- id is needed for treeview function! -->
<ul>
$navpath
<li class="footer">$generatedby <a href="https://www.doxygen.org/index.html"><img class="footer" src="$relpath^doxygen.svg" width="104" height="31" alt="doxygen"/></a> $doxygenversion </li>
</ul>
</div>
<!--END GENERATE_TREEVIEW-->
<!--BEGIN !GENERATE_TREEVIEW-->
<hr class="footer"/><address class="footer"><small>
$generatedby&#160;<a href="https://www.doxygen.org/index.html"><img class="footer" src="$relpath^doxygen.svg" width="104" height="31" alt="doxygen"/></a> $doxygenversion
</small></address>
<!--END !GENERATE_TREEVIEW-->
</div> <!-- #page-wrapper -->
</body>
</html>

View File

@@ -1,61 +0,0 @@
<!-- HTML header for doxygen 1.9.1-->
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "https://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
<meta http-equiv="X-UA-Compatible" content="IE=9"/>
<meta name="generator" content="Doxygen $doxygenversion"/>
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<!--BEGIN PROJECT_NAME--><title>$projectname: $title</title><!--END PROJECT_NAME-->
<!--BEGIN !PROJECT_NAME--><title>$title</title><!--END !PROJECT_NAME-->
<link href="$relpath^tabs.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="$relpath^jquery.js"></script>
<script type="text/javascript" src="$relpath^dynsections.js"></script>
$treeview
$search
$mathjax
<link href="$relpath^$stylesheet" rel="stylesheet" type="text/css" />
$extrastylesheet
</head>
<body>
<div id="page-wrapper">
<div id="content-wrapper">
<div id="sidebar-wrapper">
<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
<!--BEGIN TITLEAREA-->
<div id="titlearea">
<table cellspacing="0" cellpadding="0">
<tbody>
<tr style="height: 56px;">
<!--BEGIN PROJECT_LOGO-->
<td id="projectlogo"><img alt="Logo" src="$relpath^$projectlogo"/></td>
<!--END PROJECT_LOGO-->
<!--BEGIN PROJECT_NAME-->
<td id="projectalign" style="padding-left: 0.5em;">
<div id="projectname">$projectname
<!--BEGIN PROJECT_NUMBER-->&#160;<span id="projectnumber">$projectnumber</span><!--END PROJECT_NUMBER-->
</div>
<!--BEGIN PROJECT_BRIEF--><div id="projectbrief">$projectbrief</div><!--END PROJECT_BRIEF-->
</td>
<!--END PROJECT_NAME-->
<!--BEGIN !PROJECT_NAME-->
<!--BEGIN PROJECT_BRIEF-->
<td style="padding-left: 0.5em;">
<div id="projectbrief">$projectbrief</div>
</td>
<!--END PROJECT_BRIEF-->
<!--END !PROJECT_NAME-->
<!--BEGIN DISABLE_INDEX-->
<!--END DISABLE_INDEX-->
</tr>
</tbody>
</table>
</div>
<!--BEGIN SEARCHENGINE-->
<div id="search-box-wrapper">
$searchbox
</div>
<!--END SEARCHENGINE-->
<!--END TITLEAREA-->
<!-- end header part -->

19
doc/htmlfooter.html Normal file
View File

@@ -0,0 +1,19 @@
<!-- HTML footer for doxygen 1.8.14-->
<!-- start footer part -->
<!--BEGIN GENERATE_TREEVIEW-->
<div id="nav-path" class="navpath"><!-- id is needed for treeview function! -->
<ul>
$navpath
<li class="footer">
Aedis 1.0.0 - Reference Guide generated on $datetime using Doxygen $doxygenversion &#160;&#160;
<img class="footer" src="rootlogo_s.gif" alt="root"/></li>
</ul>
</div>
<!--END GENERATE_TREEVIEW-->
<!--BEGIN !GENERATE_TREEVIEW-->
<hr class="footer"/><address class="footer">
Author: Marcelo Zimbres Silva.
</address>
<!--END !GENERATE_TREEVIEW-->
</body>
</html>

34
doc/htmlheader.html Normal file
View File

@@ -0,0 +1,34 @@
<!-- HTML header for doxygen 1.8.14-->
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
<meta http-equiv="X-UA-Compatible" content="IE=9"/>
<meta name="generator" content="Doxygen $doxygenversion"/>
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<!--BEGIN PROJECT_NAME--><title>$projectname: $title</title><!--END PROJECT_NAME-->
<!--BEGIN !PROJECT_NAME--><title>$title</title><!--END !PROJECT_NAME-->
<link href="$relpath^tabs.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="$relpath^jquery.js"></script>
<script type="text/javascript" src="$relpath^dynsections.js"></script>
$search
<link href="$relpath^$stylesheet" rel="stylesheet" type="text/css" />
$extrastylesheet
</head>
<body>
<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
<!--BEGIN TITLEAREA-->
<div id="titlearea">
<table bgcolor="#346295" cellspacing="0" cellpadding="6">
<tbody>
<tr>
<td valign="middle" style="color: #FFFFFF" nowrap="nowrap"><font size="6">$projectname $projectnumber</font> &#160; <br> $projectbrief </td>
<td style="width:100%"> $searchbox </td>
</tr>
</tbody>
</table>
</div>
<!--END TITLEAREA-->
<!-- end header part -->

View File

@@ -1,671 +0,0 @@
# On the costs of asynchronous abstractions
The biggest force behind the evolution of
[Boost.Redis](https://github.com/boostorg/redis) was my struggling in
coming up with a high-level connection abstraction that was capable of
multiplexing Redis commands from independent sources while
concurrently handling server pushes. This journey taught me many
important lessons, many of which are related to the design and
performance of asynchronous programs based on Boost.Asio.
In this article I will share some of the lessons learned, specially
those related to the performance costs of _abstractions_ such as
`async_read_until` that tend to overschedule into the event-loop. In
this context I will also briefly comment on how the topics discussed
here influenced my views on the proposed
[P2300](https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2023/p2300r7.html)
(a.k.a. Senders and Receivers), which is likely to become the basis of
networking in upcoming C++ standards.
Although the analysis presented in this article uses the Redis communication
protocol for illustration I expect it to be useful in general since
[RESP3](https://github.com/antirez/RESP3/blob/master/spec.md) shares
many similarities with other widely used protocols such as HTTP.
## Parsing `\r\n`-delimited messages
The Redis server communicates with its clients by exchanging data
serialized in
[RESP3](https://github.com/antirez/RESP3/blob/master/spec.md) format.
Among the data types supported by this specification, the
`\r\n`-delimited messages are some of the most frequent in a typical
session. The table below shows some examples
Command | Response | Wire format | RESP3 name
---------|----------|---------------|---------------------
PING | PONG | `+PONG\r\n` | simple-string
INCR | 42 | `:42\r\n` | number
GET | null | `_\r\n` | null
Redis also supports command pipelines, which provide a way of
optimizing round-trip times by batching commands. A pipeline composed
by the commands shown in the previous table look like this
```
| Sent in a |
| single write |
+--------+ | | +-------+
| | --------> PING + INCR + GET --------> | |
| | | |
| Client | | Redis |
| | | |
| | <-------- "+PONG\r\n:42\r\n_\r\n" <-------- | |
+--------+ |<------>|<---->|<-->| +-------+
| |
| Responses |
```
Messages that use delimiters are so common in networking that a
facility called `async_read_until` for reading them incrementally from
a socket is already part of Boost.Asio. The coroutine below uses it to
print message contents to the screen
```cpp
awaitable<void> parse_resp3_simple_msgs(tcp::socket socket)
{
for (std::string buffer;;) {
auto n = co_await async_read_until(socket, dynamic_buffer(buffer), "\r\n");
std::cout << buffer.substr(1, n - 3) << std::endl;
// Consume the buffer.
buffer.erase(0, n);
}
}
```
If we pay attention to the buffer content as it is parsed by the code
above we can see it is rotated fairly often, for example
```
"+PONG\r\n:100\r\n+OK\r\n_\r\n"
":100\r\n+OK\r\n_\r\n"
"+OK\r\n_\r\n"
"_\r\n"
""
```
When I first realized these, apparently excessive, buffer rotations I
was concerned they would impact the performance of Boost.Redis in a
severe way. To measure the magnitude of this impact I came up with an
experimental implementation of Asio's `dynamic_buffer` that consumed
the buffer less eagerly than the `std::string::erase` function used
above. For that, the implementation increased a buffer offset up
to a certain threshold and only then triggered a (larger) rotation.
This is illustrated in the diagram below
```
|<---- offset threshold ---->|
| |
"+PONG\r\n:100\r\n+OK\r\n_\r\n+PONG\r\n"
| # Initial offset
"+PONG\r\n:100\r\n+OK\r\n_\r\n+PONG\r\n"
|<------>| # After 1st message
"+PONG\r\n:100\r\n+OK\r\n_\r\n+PONG\r\n"
|<-------------->| # After 2nd message
"+PONG\r\n:100\r\n+OK\r\n_\r\n+PONG\r\n"
|<--------------------->| # After 3rd message
"+PONG\r\n:100\r\n+OK\r\n_\r\n+PONG\r\n"
|<-------------------------->| # Threshold crossed after the 4th message
"+PONG\r\n"
| # After rotation
```
After comparing the performance differences between the two versions I
was surprised there wasn't any! But that was also very suspicious
since some RESP3 aggregate types contain a considerable number of
separators. For example, a map with two pairs `[(key1, value1),
(key2, value2)]` encoded in RESP3 requires ten rotations in total
```
"%2\r\n$4\r\nkey1\r\n$6\r\nvalue1\r\n$4\r\nkey2\r\n$6\r\nvalue2\r\n"
"$4\r\nkey1\r\n$6\r\nvalue1\r\n$4\r\nkey2\r\n$6\r\nvalue2\r\n"
"key1\r\n$6\r\nvalue1\r\n$4\r\nkey2\r\n$6\r\nvalue2\r\n"
"$6\r\nvalue1\r\n$4\r\nkey2\r\n$6\r\nvalue2\r\n"
...
```
It was evident something more costly was shadowing the buffer
rotations. But it couldn't be the search for the separator since it
performs equivalently to rotations. It is also easy to show that the
overhead is not related to any IO operation since the problem persists
if the buffer is never consumed (which causes the function to be
called with the same string repeatedly). Once these two factors
are removed from the table, we are driven into the conclusion that
calling `async_read_until` has an intrinsic cost, let us see what
that is.
### Async operations that complete synchronously considered harmful
Assume the scenario described earlier where `async_read_until` is used
to parse multiple `\r\n`-delimited messages. The following is a
detailed description of what happens behind the scenes
1. `async_read_until` calls `socket.async_read_some` repeatedly
until the separator `\r\n` shows up in the buffer
```
"<read1>" # Read 1: needs more data.
"<read1><read2>" # Read 2: needs more data.
"<read1><read2>" # Read 3: needs more data.
"<read1><read2><read3>" # Read 4: needs more data.
"<read1><read2><read3>\r\n<bonus bytes>" # separator found, done.
```
2. The last call to `socket.async_read_some` happens to read past
the separator `\r\n` (depicted as `<bonus bytes>` above),
resulting in bonus (maybe incomplete) messages in the buffer
```
| 1st async_read_some | 2nd async_read_some |
| | |
"+message content here \r\n:100\r\n+OK\r\n_\r\n+incomplete respo"
| | | |
| Message wanted |<-- bonus msgs --->|<--incomplete-->|
| | msg |
| | |
| |<---------- bonus bytes ----------->|
```
3. The buffer is consumed and `async_read_until` is called again.
However, since the buffer already contains the next message this
is an IO-less call
```
":100\r\n+OK\r\n_\r\n+not enough byt"
| | |
| No IO required | Need more |
| to parse these | data |
| messages. | |
```
The fact that step 3. doesn't perform any IO implies the operation can
complete synchronously, but because this is an asynchronous function
Boost.Asio by default won't call the continuation before the
function returns. The implementation must therefore enqueue it for
execution, as depicted below
```
OP5 ---> OP4 ---> OP3 ---> OP2 ---> OP1 # Reschedules the continuation
|
OP1 schedules its continuation |
+-----------------------------------+
|
|
OP6 ---> OP5 ---> OP4 ---> OP3 ---> OP2 # Reschedules the continuation
|
OP2 schedules its continuation |
+-----------------------------------+
|
|
OP7 ---> OP6 ---> OP5 ---> OP4 ---> OP3
```
When summed up, the excessive rescheduling of continuations lead to
performance degradation at scale. But since this is an event-loop
there is no way around rescheduling as doing otherwise would mean
allowing a task to monopolize the event-loop, preventing other tasks
from making progress. The best that can be done is to avoid
_overscheduling_, so let us determine how much rescheduling is too
much.
## The intrinsic latency of an event-loop
An event-loop is a design pattern originally used to handle events
external to the application, such as GUIs, networking and other forms
of IO. If we take this literally, it becomes evident that the way
`async_read_until` works is incompatible with an event-loop since
_searching for the separator_ is not an external event and as such
should not have to be enqueued for execution.
Once we constrain ourselves to events that have an external origin,
such as anything related to IO and including any form of IPC, the
scheduling overhead is reduced considerably since the latency
of the transport layer eclipses whatever time it takes to schedule the
continuation, for example, according to
[these](https://www.boost.org/doc/libs/develop/libs/cobalt/doc/html/index.html#posting_to_an_executor)
benchmarks, the time it takes to schedule a task in the
`asio::io_context ` is approximately `50ns`.
To give the reader an idea about the magnitude of this number, if
rescheduling alone were to account for 1% of the runtime of an app
that uses asynchronous IO to move around data in chunks of size 128kb,
then this app would have a throughput of approximately 24Gbs. At such
high throughput multiple other factors kick in before any scheduling
overhead even starts to manifest.
It is therefore safe to say that only asynchronous operations that
don't perform or are not bound to any IO are ever likely to
overschedule in the sense described above. Those cases can be usually
avoided, this is what worked for Boost.Redis
1. `async_read_until` was replaced with calls to
`socket.async_read_some` and an incremental parser that does not
do any IO.
2. Channel `try_` functions are used to check if send and receive
operations can be called without suspension. For example,
`try_send` before `async_send` and `try_receive` before
`async_receive` ([see also](https://github.com/chriskohlhoff/asio/commit/fe4fd7acf145335eeefdd19708483c46caeb45e5)
`try_send_via_dispatch` for a more aggressive optimization).
3. Coalescing of individual requests into a single payload to reduce
the number of necessary writes on the socket, this is only
possible because Redis supports pipelining (good protocols
help!).
4. Increased the socket read sizes to 4kb to reduce the number of
reads (which is outweighed by the costs of rotating data in the
buffer).
5. Dropped the `resp3::async_read` abstraction. When I started
developing Boost.Redis there was convincing precedent for having
a `resp3::async_read` function to read complete RESP3 messages
from a socket
Name | Description
---------------------------------------|-------------------
`asio::ip::tcp::async_read` | Reads `n` bytes from a stream.
`beast::http::async_read` | Reads a complete HTTP message.
`beast::websocket::stream::async_read` | Reads a complete Websocket message.
`redis::async_read` | Reads a complete RESP3 message.
It turns out however that this function is also vulnerable to
immediate completions since in command pipelines multiple
responses show up in the buffer after a call to
`socket.async_read_some`. When that happens each call to
`resp3::async_read` is IO-less.
Sometimes it is not possible to avoid asynchronous operations that
complete synchronously, in the following sections we will see how to
favor throughput over fairness in Boost.Asio.
### Calling the continuation inline
In Boost.Asio it is possible to customize how an algorithm executes
the continuation when an immediate completion occurs, this includes
the ability of calling it inline, thereby avoiding the costs of
excessive rescheduling. Here is how it works
```cpp
// (default) The continuation is enqueued for execution, regardless of
// whether it is immediate or not.
async_read_until(socket, buffer, "\r\n", continuation);
// Immediate completions are executed in exec2 (otherwise equal to the
// version above). The completion is called inline if exec2 is the
// same executor that is running the operation.
async_read_until(socket, buffer, "\r\n", bind_immediate_executor(exec2, completion));
```
To compare the performance of both cases I have written a small
function that calls `async_read_until` in a loop with a buffer that is
never consumed so that all completions are immediate. The version
below uses the default behaviour
```cpp
void read_safe(tcp::socket& s, std::string& buffer)
{
auto continuation = [&s, &buffer](auto ec, auto n)
{
read_safe(s, buffer); // Recursive call
};
// This won't cause stack exhaustion because the continuation is
// not called inline but posted in the event loop.
async_read_until(s, dynamic_buffer(buffer), "\r\n", continuation);
}
```
To optimize away some of the rescheduling the version below uses the
`bind_immediate_executor` customization to call the continuation
reentrantly and then breaks the stack from time to time to avoid
exhausting it
```cpp
void read_reentrant(tcp::socket& s, std::string& buffer)
{
auto cont = [&](auto, auto)
{
read_reentrant(s, buffer); // Recursive call
};
// Breaks the callstack after 16 inline calls.
if (counter % 16 == 0) {
post(s.get_executor(), [cont](){cont({}, 0);});
return;
}
// Continuation called reentrantly.
async_read_until(s, dynamic_buffer(buffer), "\r\n",
bind_immediate_executor(s.get_executor(), cont));
}
```
The diagram below shows what the reentrant chain of calls in the code
above look like from the event-loop point of view
```
OP5 ---> OP4 ---> OP3 ---> OP2 ---> OP1a # Completes immediately
|
|
... |
OP1b # Completes immediately
|
Waiting for OP5 to |
reschedule its |
continuation OP1c # Completes immediately
|
|
... |
OP1d # Break the call-stack
|
+-----------------------------------+
|
OP6 ---> OP5 ---> OP4 ---> OP3 ---> OP2
```
Unsurprisingly, the reentrant code is 3x faster than the one that
relies on the default behaviour (don't forget that this is a best case
scenario, in the general case not all completions are immediate).
Although faster, this strategy has some downsides
- The overall operation is not as fast as possible since it still
has to reschedule from time to time to break the call stack. The
less it reschedules the higher the risk of exhausting it.
- It is too easy to forget to break the stack. For example, the
programmer might decide to branch somewhere into another chain of
asynchronous calls that also use this strategy. To avoid
exhaustion all such branches would have to be safeguarded with a
manual rescheduling i.e. `post`.
- Requires additional layers of complexity such as
`bind_immediate_executor` in addition to `bind_executor`.
- Non-compliat with more strict
[guidelines](https://en.wikipedia.org/wiki/The_Power_of_10:_Rules_for_Developing_Safety-Critical_Code)
that prohibits reentrat code.
- There is no simple way of choosing the maximum allowed number of
reentrant calls for each function in a way that covers different
use cases and users. Library writers and users would be tempted
into using a small value reducing the performance advantage.
- If the socket is always ready for reading the task will
monopolize IO for up to `16` interactions which might cause
stutter in unrelated tasks as depicted below
```
Unfairness
+----+----+----+ +----+----+----+ +----+----+----+
Socket-1 | | | | | | | | | | | |
+----+----+----+----+----+----+----+----+----+----+----+----+
Socket-2 | | | | | |
+----+ +----+ +----+
```
From the aesthetic point of view the code above is also unpleasant as
it breaks the function asynchronous contract by injecting a reentrant
behaviour. It gives me the same kind of feeling I have about
[recursive
mutexes](http://www.zaval.org/resources/library/butenhof1.html).
Note: It is worth mentioning here that a similar
[strategy](https://github.com/NVIDIA/stdexec/blob/6f23dd5b1d523541ce28af32fc2603403ebd36ed/include/exec/trampoline_scheduler.hpp#L52)
is used to break the call stack of repeating algorithms in
[stdexec](https://github.com/NVIDIA/stdexec), but in this time
based on
[P2300](https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2023/p2300r7.html)
and not on Boost.Asio.
### Coroutine tail-calls
In the previous section we have seen how to avoid overscheduling by
instructing the asynchronous operation to call the completion inline
on immediate completion. It turns out however that coroutine support
for _tail-calls_ provide a way to completely sidestep this problem.
This feature is described by
[Lewis Baker](https://lewissbaker.github.io/2020/05/11/understanding_symmetric_transfer)
as follows
> A tail-call is one where the current stack-frame is popped before
> the call and the current functions return address becomes the
> return-address for the callee. ie. the callee will return directly
> the the [sic] caller of this function.
This means (at least in principle) that a library capable of using
tail-calls when an immediate completion occurs neither has to
reschedule the continuation nor call it inline. To test how this
feature compares to the other styles I have used Boost.Cobalt. The
code looks as follows
```cpp
// Warning: risks unfairness and starvation of other tasks.
task<void> read_until_unfair()
{
for (int i = 0; i != repeat; ++i) {
co_await async_read_until(s, dynamic_buffer(buffer), "\r\n", cobalt::use_op);
}
}
```
The result of this comparison as listed in the table below
Time/s | Style | Configuration | Library
-------|-----------|-----------------------------|-------------
1,0 | Coroutine | `await_ready` optimization | Boost.Cobalt
4.8 | Callback | Reentant | Boost.Asio
10.3 | Coroutine | `use_op` | Boost.Cobalt
14.9 | Callback | Regular | Boost.Asio
15.6 | Coroutine | `asio::deferred` | Boost.Asio
As the reader can see, `cobalt::use_op` ranks 3rd and is considerably
faster (10.3 vs 15.6) than the Asio equivalent that uses
default-rescheduling. However, by trading rescheduling with tail-calls
the code above can now monopolize the event-loop, resulting in
unfairness if the socket happens to receive data at a higher rate
than other tasks. If by chance data is received continuously
on a socket that is always ready for reading, other tasks will starve
```
Starvation
+----+----+----+----+----+----+----+----+----+----+----+----+
Socket-1 | | | | | | | | | | | | |
+----+----+----+----+----+----+----+----+----+----+----+----+
Socket-2 Starving ...
```
To avoid this problem the programmer is forced to reschedule from time
to time, in the same way we did for the reentrant calls
```cpp
task<void> read_until_fair()
{
for (int i = 0; i != repeat; ++i) {
if (repeat % 16 == 0) {
// Reschedules to address unfairness and starvation of
// other tasks.
co_await post(cobalt::use_op);
continue;
}
co_await async_read_until(s, dynamic_buffer(buffer), "\r\n", cobalt::use_op);
}
}
```
Delegating fairness-safety to applications is a dangerous game.
This is a
[problem](https://tokio.rs/blog/2020-04-preemption) the Tokio
community had to deal with before Tokio runtime started enforcing
rescheduling (after 256 successful operations)
> If data is received faster than it can be processed, it is possible
> that more data will have already been received by the time the
> processing of a data chunk completes. In this case, .await will
> never yield control back to the scheduler, other tasks will not be
> scheduled, resulting in starvation and large latency variance.
> Currently, the answer to this problem is that the user of Tokio is
> responsible for adding yield points in both the application and
> libraries. In practice, very few actually do this and end up being
> vulnerable to this sort of problem.
### Safety in P2300 (Senders and Receivers)
As of this writing, the C++ standards committee (WG21) has been
pursuing the standardization of a networking library for almost 20
years. One of the biggest obstacles that prevented it from happening
was a disagreement on what the _asynchronous model_ that underlies
networking should look like. Until 2021 that model was basically
Boost.Asio _executors_, but in this
[poll](https://www.reddit.com/r/cpp/comments/q6tgod/c_committee_polling_results_for_asynchronous/)
the committee decided to abandon that front and concentrate efforts on
the new [P2300](https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2023/p2300r7.html)
proposal, also known as _senders and receivers_. The decision was
quite [abrupt](https://isocpp.org/files/papers/P2464R0.html)
> The original plan about a week earlier than the actual writing of
> this paper was to write a paper that makes a case for standardizing
> the Networking TS.
and opinions turned out to be very strong against Boost.Asio (see
[this](https://api.csswg.org/bikeshed/?force=1&url=https://raw.githubusercontent.com/brycelelbach/wg21_p2459_2022_january_library_evolution_poll_outcomes/main/2022_january_library_evolution_poll_outcomes.bs)
for how each voter backed their vote)
> The whole concept is completely useless, there's no composed code
> you can write with it.
The part of that debate that interests us most here is stated in
[P2471](https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2021/p2471r1.pdf),
that compares Boost.Asio with P2300
> Yes, default rescheduling each operation and default not
> rescheduling each operation, is a poor trade off. IMO both options
> are poor. The one good option that I know of that can prevent stack
> exhaustion is first-class tail-recursion in library or language
> ASIO has chosen to require that every async operation must schedule
> the completion on a scheduler (every read, every write, etc..).
> sender/receiver has not decided to
> require that the completion be scheduled.
> This is why I consider tail-call the only good solution. Scheduling
> solutions are all inferior (give thanks to Lewis for this shift in
> my understanding :) ).
Although tail-calls solve the problem of stack-exhaustion as we have
seen above, it makes the code vulnerable to unfairness and starvation
and therefore it is not an alternative to default-rescheduling as the
quotation above is implying. To deal with the lack of
default-rescheduling, libraries and applications built on top of P2300
have to address the aforementioned problems, layer after layer. For
example,
[stdexec](https://github.com/NVIDIA/stdexec) has invented something
called
_[trampoline-scheduler](https://github.com/NVIDIA/stdexec/blob/e7cd275273525dbc693f4bf5f6dc4d4181b639e4/include/exec/trampoline_scheduler.hpp)_
to protect repeating algorithms such as `repeat_effect_until` from
exhausting the stack. This construct however is built around
reentracy, allowing
[sixteen](https://github.com/NVIDIA/stdexec/blob/83cdb92d316e8b3bca1357e2cf49fc39e9bed403/include/exec/trampoline_scheduler.hpp#L52)
levels of inline calls by default. While in Boost.Asio it is possible to use
reentracy as an optimization for a corner cases, here it is made its
_modus operandi_, the downsides of this approach have already been stated in a
previous section so I won't repeat it here.
Also the fact that a special scheduler is needed by specific
algorithms is a problem on its own since it contradicts one of the
main selling points of P2300 which is that of being _generic_. For
example, [P2464R0](https://isocpp.org/files/papers/P2464R0.html) uses
the code below as an example
```cpp
void
run_that_io_operation(
scheduler auto sched,
sender_of<network_buffer> auto wrapping_continuation)
{
// snip
}
```
and states
> I have no idea what the sched's concrete type is. I have no idea
> what the wrapping_continuation's concrete type is. They're none of
> my business, ...
Hence, by being generic, the algorithms built on top of P2300 are also
unsafe (against stack-exhaustion, unfairness and starvation). Otherwise,
if library writers require a specific scheduler to ensure safety, then
the algorithms become automatically non-generic, pick your poison!
The proposers of P2300 claim that it doesn't address safety because it
should be seen as the low-level building blocks of asynchronous
programming and that its the role of higher-level libraries, to deal
with that. This claim however does not hold since, as we have just
seen, Boost.Asio also provides those building blocks but does so in a
safe way. In fact during the whole development of Boost.Redis I never
had to think about these kinds of problems because safety is built
from the ground up.
### Avoiding coroutine suspension with `await_ready`
Now let us get back to the first place in the table above, which uses
the `await_ready` optimization from Boost.Cobalt. This API provides
users with the ability to avoid coroutine suspension altogether in
case the separator is already present in the buffer. It works by
defining a `struct` with the following interface
```cpp
struct read_until : cobalt::op<error_code, std::size_t> {
...
void ready(cobalt::handler<error_code, std::size_t> handler) override
{
// Search for the separator in buffer and call the handler if found
}
void initiate(cobalt::completion_handler<error_code, std::size_t> complete) override
{
// Regular call to async_read_until.
async_read_until(socket, buffer, delim, std::move(complete));
}
};
```
and the code that uses it
```cpp
for (int i = 0; i != repeat; ++i) {
co_await read_until(socket, dynamic_buffer(buffer));
}
```
In essence, what the code above does is to skip a call to
`async_read_unil` by first checking with the ready function whether
the forthcoming operation is going to complete immediately. The
nice thing about it is that the programmer can use this optimization
only when a performance bottleneck is detected, without planing for it
in advance. The drawback however is that it requires reimplementing
the search for the separator in the body of the `ready` function,
defeating the purpose of using `async_read_until` in first place as
(again) it would have been simpler to reformulate the operation in
terms of `socket.async_read_some` directly.
## Acknowledgements
Thanks to Klemens Morgenstern for answering questions about
Boost.Cobalt.

View File

@@ -1,52 +0,0 @@
add_library(examples_main STATIC main.cpp)
target_compile_features(examples_main PRIVATE cxx_std_20)
target_link_libraries(examples_main PRIVATE boost_redis_project_options)
macro(make_example EXAMPLE_NAME STANDARD)
add_executable(${EXAMPLE_NAME} ${EXAMPLE_NAME}.cpp)
target_link_libraries(${EXAMPLE_NAME} PRIVATE boost_redis_src)
target_link_libraries(${EXAMPLE_NAME} PRIVATE boost_redis_project_options)
target_compile_features(${EXAMPLE_NAME} PRIVATE cxx_std_${STANDARD})
if (${STANDARD} STREQUAL "20")
target_link_libraries(${EXAMPLE_NAME} PRIVATE examples_main)
endif()
if (${EXAMPLE_NAME} STREQUAL "cpp20_json")
target_link_libraries(${EXAMPLE_NAME} PRIVATE Boost::json Boost::container_hash)
endif()
endmacro()
macro(make_testable_example EXAMPLE_NAME STANDARD)
make_example(${EXAMPLE_NAME} ${STANDARD})
if (BOOST_REDIS_INTEGRATION_TESTS)
add_test(${EXAMPLE_NAME} ${EXAMPLE_NAME})
endif()
endmacro()
make_testable_example(cpp17_intro 17)
make_testable_example(cpp17_intro_sync 17)
make_testable_example(cpp20_intro 20)
make_testable_example(cpp20_containers 20)
make_testable_example(cpp20_json 20)
make_testable_example(cpp20_intro_tls 20)
make_example(cpp20_subscriber 20)
make_example(cpp20_streams 20)
make_example(cpp20_echo_server 20)
make_example(cpp20_resolve_with_sentinel 20)
# We test the protobuf example only on gcc.
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
find_package(Protobuf)
if (Protobuf_FOUND)
protobuf_generate_cpp(PROTO_SRCS PROTO_HDRS person.proto)
make_testable_example(cpp20_protobuf 20)
target_sources(cpp20_protobuf PUBLIC ${PROTO_SRCS} ${PROTO_HDRS})
target_link_libraries(cpp20_protobuf PRIVATE ${Protobuf_LIBRARIES})
target_include_directories(cpp20_protobuf PUBLIC ${Protobuf_INCLUDE_DIRS} ${CMAKE_CURRENT_BINARY_DIR})
endif()
endif()
if (NOT MSVC)
make_example(cpp20_chat_room 20)
endif()

View File

@@ -1,50 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <boost/redis/connection.hpp>
#include <boost/asio/detached.hpp>
#include <iostream>
namespace asio = boost::asio;
using boost::redis::connection;
using boost::redis::request;
using boost::redis::response;
using boost::redis::config;
auto main(int argc, char * argv[]) -> int
{
try {
config cfg;
if (argc == 3) {
cfg.addr.host = argv[1];
cfg.addr.port = argv[2];
}
request req;
req.push("PING", "Hello world");
response<std::string> resp;
asio::io_context ioc;
connection conn{ioc};
conn.async_run(cfg, {}, asio::detached);
conn.async_exec(req, resp, [&](auto ec, auto) {
if (!ec)
std::cout << "PING: " << std::get<0>(resp).value() << std::endl;
conn.cancel();
});
ioc.run();
} catch (std::exception const& e) {
std::cerr << "Error: " << e.what() << std::endl;
return 1;
}
}

View File

@@ -1,43 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include "sync_connection.hpp"
#include <string>
#include <iostream>
using boost::redis::sync_connection;
using boost::redis::request;
using boost::redis::response;
using boost::redis::config;
auto main(int argc, char * argv[]) -> int
{
try {
config cfg;
if (argc == 3) {
cfg.addr.host = argv[1];
cfg.addr.port = argv[2];
}
sync_connection conn;
conn.run(cfg);
request req;
req.push("PING");
response<std::string> resp;
conn.exec(req, resp);
conn.stop();
std::cout << "Response: " << std::get<0>(resp).value() << std::endl;
} catch (std::exception const& e) {
std::cerr << e.what() << std::endl;
}
}

View File

@@ -1,108 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <boost/redis/connection.hpp>
#include <boost/asio/deferred.hpp>
#include <boost/asio/signal_set.hpp>
#include <boost/asio/co_spawn.hpp>
#include <boost/asio/detached.hpp>
#include <boost/asio/redirect_error.hpp>
#include <boost/asio/posix/stream_descriptor.hpp>
#include <unistd.h>
#include <iostream>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
#if defined(BOOST_ASIO_HAS_POSIX_STREAM_DESCRIPTOR)
namespace asio = boost::asio;
using stream_descriptor = asio::deferred_t::as_default_on_t<asio::posix::stream_descriptor>;
using signal_set = asio::deferred_t::as_default_on_t<asio::signal_set>;
using boost::asio::async_read_until;
using boost::asio::awaitable;
using boost::asio::co_spawn;
using boost::asio::consign;
using boost::asio::deferred;
using boost::asio::detached;
using boost::asio::dynamic_buffer;
using boost::asio::redirect_error;
using boost::asio::use_awaitable;
using boost::redis::config;
using boost::redis::connection;
using boost::redis::generic_response;
using boost::redis::ignore;
using boost::redis::request;
using boost::system::error_code;
using namespace std::chrono_literals;
// Chat over Redis pubsub. To test, run this program from multiple
// terminals and type messages to stdin.
auto
receiver(std::shared_ptr<connection> conn) -> awaitable<void>
{
request req;
req.push("SUBSCRIBE", "channel");
generic_response resp;
conn->set_receive_response(resp);
while (conn->will_reconnect()) {
// Subscribe to channels.
co_await conn->async_exec(req, ignore, deferred);
// Loop reading Redis push messages.
for (error_code ec;;) {
co_await conn->async_receive(redirect_error(use_awaitable, ec));
if (ec)
break; // Connection lost, break so we can reconnect to channels.
std::cout
<< resp.value().at(1).value
<< " " << resp.value().at(2).value
<< " " << resp.value().at(3).value
<< std::endl;
resp.value().clear();
}
}
}
// Publishes stdin messages to a Redis channel.
auto publisher(std::shared_ptr<stream_descriptor> in, std::shared_ptr<connection> conn) -> awaitable<void>
{
for (std::string msg;;) {
auto n = co_await async_read_until(*in, dynamic_buffer(msg, 1024), "\n");
request req;
req.push("PUBLISH", "channel", msg);
co_await conn->async_exec(req, ignore, deferred);
msg.erase(0, n);
}
}
// Called from the main function (see main.cpp)
auto co_main(config cfg) -> awaitable<void>
{
auto ex = co_await asio::this_coro::executor;
auto conn = std::make_shared<connection>(ex);
auto stream = std::make_shared<stream_descriptor>(ex, ::dup(STDIN_FILENO));
co_spawn(ex, receiver(conn), detached);
co_spawn(ex, publisher(stream, conn), detached);
conn->async_run(cfg, {}, consign(detached, conn));
signal_set sig_set{ex, SIGINT, SIGTERM};
co_await sig_set.async_wait();
conn->cancel();
stream->cancel();
}
#else // defined(BOOST_ASIO_HAS_POSIX_STREAM_DESCRIPTOR)
auto co_main(config const&) -> awaitable<void>
{
std::cout << "Requires support for posix streams." << std::endl;
co_return;
}
#endif // defined(BOOST_ASIO_HAS_POSIX_STREAM_DESCRIPTOR)
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -1,106 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <boost/redis/connection.hpp>
#include <boost/asio/deferred.hpp>
#include <boost/asio/detached.hpp>
#include <boost/asio/co_spawn.hpp>
#include <map>
#include <vector>
#include <iostream>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
namespace asio = boost::asio;
using boost::redis::request;
using boost::redis::response;
using boost::redis::ignore_t;
using boost::redis::ignore;
using boost::redis::config;
using boost::redis::connection;
using boost::asio::awaitable;
using boost::asio::deferred;
using boost::asio::detached;
using boost::asio::consign;
void print(std::map<std::string, std::string> const& cont)
{
for (auto const& e: cont)
std::cout << e.first << ": " << e.second << "\n";
}
void print(std::vector<int> const& cont)
{
for (auto const& e: cont) std::cout << e << " ";
std::cout << "\n";
}
// Stores the content of some STL containers in Redis.
auto store(std::shared_ptr<connection> conn) -> awaitable<void>
{
std::vector<int> vec
{1, 2, 3, 4, 5, 6};
std::map<std::string, std::string> map
{{"key1", "value1"}, {"key2", "value2"}, {"key3", "value3"}};
request req;
req.push_range("RPUSH", "rpush-key", vec);
req.push_range("HSET", "hset-key", map);
co_await conn->async_exec(req, ignore, deferred);
}
auto hgetall(std::shared_ptr<connection> conn) -> awaitable<void>
{
// A request contains multiple commands.
request req;
req.push("HGETALL", "hset-key");
// Responses as tuple elements.
response<std::map<std::string, std::string>> resp;
// Executes the request and reads the response.
co_await conn->async_exec(req, resp, deferred);
print(std::get<0>(resp).value());
}
// Retrieves in a transaction.
auto transaction(std::shared_ptr<connection> conn) -> awaitable<void>
{
request req;
req.push("MULTI");
req.push("LRANGE", "rpush-key", 0, -1); // Retrieves
req.push("HGETALL", "hset-key"); // Retrieves
req.push("EXEC");
response<
ignore_t, // multi
ignore_t, // lrange
ignore_t, // hgetall
response<std::optional<std::vector<int>>, std::optional<std::map<std::string, std::string>>> // exec
> resp;
co_await conn->async_exec(req, resp, deferred);
print(std::get<0>(std::get<3>(resp).value()).value().value());
print(std::get<1>(std::get<3>(resp).value()).value().value());
}
// Called from the main function (see main.cpp)
awaitable<void> co_main(config cfg)
{
auto conn = std::make_shared<connection>(co_await asio::this_coro::executor);
conn->async_run(cfg, {}, consign(detached, conn));
co_await store(conn);
co_await transaction(conn);
co_await hgetall(conn);
conn->cancel();
}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -1,70 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <boost/redis/connection.hpp>
#include <boost/asio/deferred.hpp>
#include <boost/asio/signal_set.hpp>
#include <boost/asio/detached.hpp>
#include <boost/asio/redirect_error.hpp>
#include <boost/asio/co_spawn.hpp>
#include <iostream>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
namespace asio = boost::asio;
using tcp_socket = asio::deferred_t::as_default_on_t<asio::ip::tcp::socket>;
using tcp_acceptor = asio::deferred_t::as_default_on_t<asio::ip::tcp::acceptor>;
using signal_set = asio::deferred_t::as_default_on_t<asio::signal_set>;
using boost::redis::request;
using boost::redis::response;
using boost::redis::config;
using boost::system::error_code;
using boost::redis::connection;
using namespace std::chrono_literals;
auto echo_server_session(tcp_socket socket, std::shared_ptr<connection> conn) -> asio::awaitable<void>
{
request req;
response<std::string> resp;
for (std::string buffer;;) {
auto n = co_await asio::async_read_until(socket, asio::dynamic_buffer(buffer, 1024), "\n");
req.push("PING", buffer);
co_await conn->async_exec(req, resp, asio::deferred);
co_await asio::async_write(socket, asio::buffer(std::get<0>(resp).value()));
std::get<0>(resp).value().clear();
req.clear();
buffer.erase(0, n);
}
}
// Listens for tcp connections.
auto listener(std::shared_ptr<connection> conn) -> asio::awaitable<void>
{
try {
auto ex = co_await asio::this_coro::executor;
tcp_acceptor acc(ex, {asio::ip::tcp::v4(), 55555});
for (;;)
asio::co_spawn(ex, echo_server_session(co_await acc.async_accept(), conn), asio::detached);
} catch (std::exception const& e) {
std::clog << "Listener: " << e.what() << std::endl;
}
}
// Called from the main function (see main.cpp)
auto co_main(config cfg) -> asio::awaitable<void>
{
auto ex = co_await asio::this_coro::executor;
auto conn = std::make_shared<connection>(ex);
asio::co_spawn(ex, listener(conn), asio::detached);
conn->async_run(cfg, {}, asio::consign(asio::detached, conn));
signal_set sig_set(ex, SIGINT, SIGTERM);
co_await sig_set.async_wait();
conn->cancel();
}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -1,42 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <boost/redis/connection.hpp>
#include <boost/asio/deferred.hpp>
#include <boost/asio/co_spawn.hpp>
#include <boost/asio/detached.hpp>
#include <boost/asio/consign.hpp>
#include <iostream>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
namespace asio = boost::asio;
using boost::redis::request;
using boost::redis::response;
using boost::redis::config;
using boost::redis::connection;
// Called from the main function (see main.cpp)
auto co_main(config cfg) -> asio::awaitable<void>
{
auto conn = std::make_shared<connection>(co_await asio::this_coro::executor);
conn->async_run(cfg, {}, asio::consign(asio::detached, conn));
// A request containing only a ping command.
request req;
req.push("PING", "Hello world");
// Response where the PONG response will be stored.
response<std::string> resp;
// Executes the request.
co_await conn->async_exec(req, resp, asio::deferred);
conn->cancel();
std::cout << "PING: " << std::get<0>(resp).value() << std::endl;
}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -1,54 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <boost/redis/connection.hpp>
#include <boost/asio/deferred.hpp>
#include <boost/asio/use_awaitable.hpp>
#include <boost/asio/detached.hpp>
#include <boost/asio/consign.hpp>
#include <iostream>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
namespace asio = boost::asio;
using boost::redis::request;
using boost::redis::response;
using boost::redis::config;
using boost::redis::logger;
using boost::redis::connection;
auto verify_certificate(bool, asio::ssl::verify_context&) -> bool
{
std::cout << "set_verify_callback" << std::endl;
return true;
}
auto co_main(config cfg) -> asio::awaitable<void>
{
cfg.use_ssl = true;
cfg.username = "aedis";
cfg.password = "aedis";
cfg.addr.host = "db.occase.de";
cfg.addr.port = "6380";
auto conn = std::make_shared<connection>(co_await asio::this_coro::executor);
conn->async_run(cfg, {}, asio::consign(asio::detached, conn));
request req;
req.push("PING");
response<std::string> resp;
conn->next_layer().set_verify_mode(asio::ssl::verify_peer);
conn->next_layer().set_verify_callback(verify_certificate);
co_await conn->async_exec(req, resp, asio::deferred);
conn->cancel();
std::cout << "Response: " << std::get<0>(resp).value() << std::endl;
}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -1,75 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <boost/redis/connection.hpp>
#include <boost/asio/deferred.hpp>
#include <boost/asio/detached.hpp>
#include <boost/describe.hpp>
#include <boost/asio/consign.hpp>
#include <boost/asio/use_awaitable.hpp>
#include <string>
#include <iostream>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
#include <boost/json/serialize.hpp>
#include <boost/json/parse.hpp>
#include <boost/json/value_from.hpp>
#include <boost/json/value_to.hpp>
#include <boost/redis/resp3/serialization.hpp>
namespace asio = boost::asio;
using namespace boost::describe;
using boost::redis::request;
using boost::redis::response;
using boost::redis::ignore_t;
using boost::redis::config;
using boost::redis::connection;
// Struct that will be stored in Redis using json serialization.
struct user {
std::string name;
std::string age;
std::string country;
};
// The type must be described for serialization to work.
BOOST_DESCRIBE_STRUCT(user, (), (name, age, country))
// Boost.Redis customization points (example/json.hpp)
void boost_redis_to_bulk(std::string& to, user const& u)
{ boost::redis::resp3::boost_redis_to_bulk(to, boost::json::serialize(boost::json::value_from(u))); }
void boost_redis_from_bulk(user& u, std::string_view sv, boost::system::error_code&)
{ u = boost::json::value_to<user>(boost::json::parse(sv)); }
auto co_main(config cfg) -> asio::awaitable<void>
{
auto ex = co_await asio::this_coro::executor;
auto conn = std::make_shared<connection>(ex);
conn->async_run(cfg, {}, asio::consign(asio::detached, conn));
// user object that will be stored in Redis in json format.
user const u{"Joao", "58", "Brazil"};
// Stores and retrieves in the same request.
request req;
req.push("SET", "json-key", u); // Stores in Redis.
req.push("GET", "json-key"); // Retrieves from Redis.
response<ignore_t, user> resp;
co_await conn->async_exec(req, resp, asio::deferred);
conn->cancel();
// Prints the first ping
std::cout
<< "Name: " << std::get<1>(resp).value().name << "\n"
<< "Age: " << std::get<1>(resp).value().age << "\n"
<< "Country: " << std::get<1>(resp).value().country << "\n";
}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -1,88 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <boost/redis/connection.hpp>
#include <boost/redis/resp3/serialization.hpp>
#include <boost/asio/deferred.hpp>
#include <boost/asio/co_spawn.hpp>
#include <boost/asio/detached.hpp>
#include <boost/asio/consign.hpp>
#include <boost/system/errc.hpp>
#include <iostream>
// See the definition in person.proto. This header is automatically
// generated by CMakeLists.txt.
#include "person.pb.h"
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
namespace asio = boost::asio;
using boost::redis::request;
using boost::redis::response;
using boost::redis::operation;
using boost::redis::ignore_t;
using boost::redis::config;
using boost::redis::connection;
// The protobuf type described in example/person.proto
using tutorial::person;
// Boost.Redis customization points (example/protobuf.hpp)
namespace tutorial
{
// Below I am using a Boost.Redis to indicate a protobuf error, this
// is ok for an example, users however might want to define their own
// error codes.
void boost_redis_to_bulk(std::string& to, person const& u)
{
std::string tmp;
if (!u.SerializeToString(&tmp))
throw boost::system::system_error(boost::redis::error::invalid_data_type);
boost::redis::resp3::boost_redis_to_bulk(to, tmp);
}
void boost_redis_from_bulk(person& u, std::string_view sv, boost::system::error_code& ec)
{
std::string const tmp {sv};
if (!u.ParseFromString(tmp))
ec = boost::redis::error::invalid_data_type;
}
} // tutorial
using tutorial::boost_redis_to_bulk;
using tutorial::boost_redis_from_bulk;
asio::awaitable<void> co_main(config cfg)
{
auto ex = co_await asio::this_coro::executor;
auto conn = std::make_shared<connection>(ex);
conn->async_run(cfg, {}, asio::consign(asio::detached, conn));
person p;
p.set_name("Louis");
p.set_id(3);
p.set_email("No email yet.");
request req;
req.push("SET", "protobuf-key", p);
req.push("GET", "protobuf-key");
response<ignore_t, person> resp;
// Sends the request and receives the response.
co_await conn->async_exec(req, resp, asio::deferred);
conn->cancel();
std::cout
<< "Name: " << std::get<1>(resp).value().name() << "\n"
<< "Age: " << std::get<1>(resp).value().id() << "\n"
<< "Email: " << std::get<1>(resp).value().email() << "\n";
}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -1,75 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <boost/redis/connection.hpp>
#include <boost/asio/use_awaitable.hpp>
#include <boost/asio/redirect_error.hpp>
#include <boost/asio/detached.hpp>
#include <iostream>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
namespace asio = boost::asio;
using endpoints = asio::ip::tcp::resolver::results_type;
using boost::redis::request;
using boost::redis::response;
using boost::redis::ignore_t;
using boost::redis::config;
using boost::redis::address;
using boost::redis::connection;
auto redir(boost::system::error_code& ec)
{ return asio::redirect_error(asio::use_awaitable, ec); }
// For more info see
// - https://redis.io/docs/manual/sentinel.
// - https://redis.io/docs/reference/sentinel-clients.
auto resolve_master_address(std::vector<address> const& addresses) -> asio::awaitable<address>
{
request req;
req.push("SENTINEL", "get-master-addr-by-name", "mymaster");
req.push("QUIT");
auto conn = std::make_shared<connection>(co_await asio::this_coro::executor);
response<std::optional<std::array<std::string, 2>>, ignore_t> resp;
for (auto addr : addresses) {
boost::system::error_code ec;
config cfg;
cfg.addr = addr;
// TODO: async_run and async_exec should be lauched in
// parallel here so we can wait for async_run completion
// before eventually calling it again.
conn->async_run(cfg, {}, asio::consign(asio::detached, conn));
co_await conn->async_exec(req, resp, redir(ec));
conn->cancel();
conn->reset_stream();
if (!ec && std::get<0>(resp))
co_return address{std::get<0>(resp).value().value().at(0), std::get<0>(resp).value().value().at(1)};
}
co_return address{};
}
auto co_main(config cfg) -> asio::awaitable<void>
{
// A list of sentinel addresses from which only one is responsive.
// This simulates sentinels that are down.
std::vector<address> const addresses
{ address{"foo", "26379"}
, address{"bar", "26379"}
, cfg.addr
};
auto const ep = co_await resolve_master_address(addresses);
std::clog
<< "Host: " << ep.host << "\n"
<< "Port: " << ep.port << "\n"
<< std::flush;
}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -1,98 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <boost/redis/connection.hpp>
#include <boost/asio/deferred.hpp>
#include <boost/asio/co_spawn.hpp>
#include <boost/asio/detached.hpp>
#include <boost/asio/consign.hpp>
#include <boost/asio/signal_set.hpp>
#include <boost/asio/awaitable.hpp>
#include <iostream>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
#include <memory>
#include <string>
#include <thread>
#include <vector>
namespace net = boost::asio;
using boost::redis::config;
using boost::redis::generic_response;
using boost::redis::operation;
using boost::redis::request;
using boost::redis::connection;
using signal_set = net::deferred_t::as_default_on_t<net::signal_set>;
auto stream_reader(std::shared_ptr<connection> conn) -> net::awaitable<void>
{
std::string redisStreamKey_;
request req;
generic_response resp;
std::string stream_id{"$"};
std::string const field = "myfield";
for (;;) {
req.push("XREAD", "BLOCK", "0", "STREAMS", "test-topic", stream_id);
co_await conn->async_exec(req, resp, net::deferred);
//std::cout << "Response: ";
//for (auto i = 0UL; i < resp->size(); ++i) {
// std::cout << resp->at(i).value << ", ";
//}
//std::cout << std::endl;
// The following approach was taken in order to be able to
// deal with the responses, as generated by redis in the case
// that there are multiple stream 'records' within a single
// generic_response. The nesting and number of values in
// resp.value() are different, depending on the contents
// of the stream in redis. Uncomment the above commented-out
// code for examples while running the XADD command.
std::size_t item_index = 0;
while (item_index < std::size(resp.value())) {
auto const& val = resp.value().at(item_index).value;
if (field.compare(val) == 0) {
// We've hit a myfield field.
// The streamId is located at item_index - 2
// The payload is located at item_index + 1
stream_id = resp.value().at(item_index - 2).value;
std::cout
<< "StreamId: " << stream_id << ", "
<< "MyField: " << resp.value().at(item_index + 1).value
<< std::endl;
++item_index; // We can increase so we don't read this again
}
++item_index;
}
req.clear();
resp.value().clear();
}
}
// Run this in another terminal:
// redis-cli -r 100000 -i 0.0001 XADD "test-topic" "*" "myfield" "myfieldvalue1"
auto co_main(config cfg) -> net::awaitable<void>
{
auto ex = co_await net::this_coro::executor;
auto conn = std::make_shared<connection>(ex);
net::co_spawn(ex, stream_reader(conn), net::detached);
// Disable health checks.
cfg.health_check_interval = std::chrono::seconds::zero();
conn->async_run(cfg, {}, net::consign(net::detached, conn));
signal_set sig_set(ex, SIGINT, SIGTERM);
co_await sig_set.async_wait();
conn->cancel();
}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -1,102 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <boost/redis/connection.hpp>
#include <boost/redis/logger.hpp>
#include <boost/asio/awaitable.hpp>
#include <boost/asio/use_awaitable.hpp>
#include <boost/asio/deferred.hpp>
#include <boost/asio/co_spawn.hpp>
#include <boost/asio/detached.hpp>
#include <boost/asio/consign.hpp>
#include <boost/asio/redirect_error.hpp>
#include <boost/asio/signal_set.hpp>
#include <iostream>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
namespace asio = boost::asio;
using namespace std::chrono_literals;
using boost::redis::request;
using boost::redis::generic_response;
using boost::redis::consume_one;
using boost::redis::logger;
using boost::redis::config;
using boost::redis::ignore;
using boost::redis::error;
using boost::system::error_code;
using boost::redis::connection;
using signal_set = asio::deferred_t::as_default_on_t<asio::signal_set>;
/* This example will subscribe and read pushes indefinitely.
*
* To test send messages with redis-cli
*
* $ redis-cli -3
* 127.0.0.1:6379> PUBLISH channel some-message
* (integer) 3
* 127.0.0.1:6379>
*
* To test reconnection try, for example, to close all clients currently
* connected to the Redis instance
*
* $ redis-cli
* > CLIENT kill TYPE pubsub
*/
// Receives server pushes.
auto
receiver(std::shared_ptr<connection> conn) -> asio::awaitable<void>
{
request req;
req.push("SUBSCRIBE", "channel");
generic_response resp;
conn->set_receive_response(resp);
// Loop while reconnection is enabled
while (conn->will_reconnect()) {
// Reconnect to the channels.
co_await conn->async_exec(req, ignore, asio::deferred);
// Loop reading Redis pushs messages.
for (error_code ec;;) {
// First tries to read any buffered pushes.
conn->receive(ec);
if (ec == error::sync_receive_push_failed) {
ec = {};
co_await conn->async_receive(asio::redirect_error(asio::use_awaitable, ec));
}
if (ec)
break; // Connection lost, break so we can reconnect to channels.
std::cout
<< resp.value().at(1).value
<< " " << resp.value().at(2).value
<< " " << resp.value().at(3).value
<< std::endl;
consume_one(resp);
}
}
}
auto co_main(config cfg) -> asio::awaitable<void>
{
auto ex = co_await asio::this_coro::executor;
auto conn = std::make_shared<connection>(ex);
asio::co_spawn(ex, receiver(conn), asio::detached);
conn->async_run(cfg, {}, asio::consign(asio::detached, conn));
signal_set sig_set(ex, SIGINT, SIGTERM);
co_await sig_set.async_wait();
conn->cancel();
}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -1,53 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <boost/redis/connection.hpp>
#include <boost/redis/config.hpp>
#include <boost/asio/co_spawn.hpp>
#include <boost/asio/use_awaitable.hpp>
#include <boost/asio/io_context.hpp>
#include <iostream>
namespace asio = boost::asio;
using boost::redis::config;
using boost::redis::logger;
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
extern asio::awaitable<void> co_main(config);
auto main(int argc, char * argv[]) -> int
{
try {
config cfg;
if (argc == 3) {
cfg.addr.host = argv[1];
cfg.addr.port = argv[2];
}
asio::io_context ioc;
asio::co_spawn(ioc, co_main(cfg), [](std::exception_ptr p) {
if (p)
std::rethrow_exception(p);
});
ioc.run();
} catch (std::exception const& e) {
std::cerr << "(main) " << e.what() << std::endl;
return 1;
}
}
#else // defined(BOOST_ASIO_HAS_CO_AWAIT)
auto main() -> int
{
std::cout << "Requires coroutine support." << std::endl;
return 0;
}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -1,9 +0,0 @@
syntax = "proto2";
package tutorial;
message person {
optional string name = 1;
optional int32 id = 2;
optional string email = 3;
}

View File

@@ -1,63 +0,0 @@
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <boost/redis/connection.hpp>
#include <boost/redis/request.hpp>
#include <boost/asio/deferred.hpp>
#include <boost/asio/detached.hpp>
#include <boost/asio/use_future.hpp>
#include <thread>
#include <chrono>
using namespace std::chrono_literals;
namespace boost::redis
{
class sync_connection {
public:
sync_connection()
: ioc_{1}
, conn_{std::make_shared<connection>(ioc_)}
{ }
~sync_connection()
{
thread_.join();
}
void run(config cfg)
{
// Starts a thread that will can io_context::run on which the
// connection will run.
thread_ = std::thread{[this, cfg]() {
conn_->async_run(cfg, {}, asio::detached);
ioc_.run();
}};
}
void stop()
{
asio::dispatch(ioc_, [this]() { conn_->cancel(); });
}
template <class Response>
auto exec(request const& req, Response& resp)
{
asio::dispatch(
conn_->get_executor(),
asio::deferred([this, &req, &resp]() { return conn_->async_exec(req, resp, asio::deferred); }))
(asio::use_future).get();
}
private:
asio::io_context ioc_{1};
std::shared_ptr<connection> conn_;
std::thread thread_;
};
}

99
examples/chat_room.cpp Normal file
View File

@@ -0,0 +1,99 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <string>
#include <iostream>
#include "unistd.h"
#include <boost/asio.hpp>
#if defined(BOOST_ASIO_HAS_CO_AWAIT) && defined(BOOST_ASIO_HAS_POSIX_STREAM_DESCRIPTOR)
#include <aedis.hpp>
#include "print.hpp"
// Include this in no more than one .cpp file.
#include <aedis/src.hpp>
namespace net = boost::asio;
using aedis::adapt;
using aedis::resp3::request;
using aedis::resp3::node;
using aedis::endpoint;
using tcp_socket = net::use_awaitable_t<>::as_default_on_t<net::ip::tcp::socket>;
using tcp_acceptor = net::use_awaitable_t<>::as_default_on_t<net::ip::tcp::acceptor>;
using stream_descriptor = net::use_awaitable_t<>::as_default_on_t<net::posix::stream_descriptor>;
using connection = aedis::connection<tcp_socket>;
using stimer = net::use_awaitable_t<>::as_default_on_t<net::steady_timer>;
// Chat over redis pubsub. To test, run this program from different
// terminals and type messages to stdin. Use
//
// $ redis-cli monitor
//
// to monitor the message traffic.
// Receives messages from other users.
net::awaitable<void> push_receiver(std::shared_ptr<connection> db)
{
for (std::vector<node<std::string>> resp;;) {
co_await db->async_receive_push(adapt(resp));
print_push(resp);
resp.clear();
}
}
// Subscribes to the channels when a new connection is stablished.
net::awaitable<void> reconnect(std::shared_ptr<connection> db)
{
request req;
req.push("SUBSCRIBE", "chat-channel");
stimer timer{co_await net::this_coro::executor};
endpoint ep{"127.0.0.1", "6379"};
for (;;) {
boost::system::error_code ec;
co_await db->async_run(ep, req, adapt(), {}, net::redirect_error(net::use_awaitable, ec));
db->reset_stream();
std::cout << ec.message() << std::endl;
timer.expires_after(std::chrono::seconds{1});
co_await timer.async_wait();
}
}
// Publishes messages to other users.
net::awaitable<void> publisher(stream_descriptor& in, std::shared_ptr<connection> db)
{
for (std::string msg;;) {
auto n = co_await net::async_read_until(in, net::dynamic_buffer(msg, 1024), "\n");
request req;
req.push("PUBLISH", "chat-channel", msg);
co_await db->async_exec(req);
msg.erase(0, n);
}
}
auto main() -> int
{
try {
net::io_context ioc{1};
stream_descriptor in{ioc, ::dup(STDIN_FILENO)};
auto db = std::make_shared<connection>(ioc);
co_spawn(ioc, publisher(in, db), net::detached);
co_spawn(ioc, push_receiver(db), net::detached);
co_spawn(ioc, reconnect(db), net::detached);
net::signal_set signals(ioc, SIGINT, SIGTERM);
signals.async_wait([&](auto, auto){ ioc.stop(); });
ioc.run();
} catch (std::exception const& e) {
std::cerr << e.what() << std::endl;
}
}
#else // defined(BOOST_ASIO_HAS_CO_AWAIT) && defined(BOOST_ASIO_HAS_POSIX_STREAM_DESCRIPTOR)
auto main() -> int {std::cout << "Requires coroutine support." << std::endl; return 1;}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT) && defined(BOOST_ASIO_HAS_POSIX_STREAM_DESCRIPTOR)

63
examples/containers.cpp Normal file
View File

@@ -0,0 +1,63 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <map>
#include <vector>
#include <iostream>
#include <aedis.hpp>
#include "print.hpp"
// Include this in no more than one .cpp file.
#include <aedis/src.hpp>
namespace net = boost::asio;
using aedis::adapt;
using aedis::resp3::request;
using aedis::endpoint;
using connection = aedis::connection<>;
auto main() -> int
{
try {
std::vector<int> vec{1, 2, 3, 4, 5, 6};
std::map<std::string, int> map{{"key1", 10}, {"key2", 20}, {"key3", 30}};
// Sends and retrieves containers in the same request for
// simplification.
request req;
req.push_range("RPUSH", "rpush-key", vec); // Sends
req.push_range("HSET", "hset-key", map); // Sends
req.push("MULTI");
req.push("LRANGE", "rpush-key", 0, -1); // Retrieves
req.push("HGETALL", "hset-key"); // Retrieves
req.push("EXEC");
req.push("QUIT");
std::tuple<
aedis::ignore, // rpush
aedis::ignore, // hset
aedis::ignore, // multi
aedis::ignore, // lrange
aedis::ignore, // hgetall
std::tuple<std::optional<std::vector<int>>, std::optional<std::map<std::string, int>>>, // exec
aedis::ignore // quit
> resp;
net::io_context ioc;
connection db{ioc};
endpoint ep{"127.0.0.1", "6379"};
db.async_run(ep, req, adapt(resp), {}, [](auto ec, auto) {
std::cout << ec.message() << std::endl;
});
ioc.run();
print(std::get<0>(std::get<5>(resp)).value());
print(std::get<1>(std::get<5>(resp)).value());
} catch (...) {
std::cerr << "Error." << std::endl;
}
}

76
examples/echo_server.cpp Normal file
View File

@@ -0,0 +1,76 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <string>
#include <iostream>
#include <boost/asio.hpp>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
#include <aedis.hpp>
// Include this in no more than one .cpp file.
#include <aedis/src.hpp>
namespace net = boost::asio;
using aedis::adapt;
using aedis::resp3::request;
using aedis::endpoint;
using executor_type = net::io_context::executor_type;
using socket_type = net::basic_stream_socket<net::ip::tcp, executor_type>;
using tcp_socket = net::use_awaitable_t<executor_type>::as_default_on_t<socket_type>;
using acceptor_type = net::basic_socket_acceptor<net::ip::tcp, executor_type>;
using tcp_acceptor = net::use_awaitable_t<executor_type>::as_default_on_t<acceptor_type>;
using awaitable_type = net::awaitable<void, executor_type>;
using connection = aedis::connection<tcp_socket>;
awaitable_type echo_loop(tcp_socket socket, std::shared_ptr<connection> db)
{
request req;
std::tuple<std::string> resp;
for (std::string buffer;;) {
auto n = co_await net::async_read_until(socket, net::dynamic_buffer(buffer, 1024), "\n");
req.push("PING", buffer);
co_await db->async_exec(req, adapt(resp));
co_await net::async_write(socket, net::buffer(std::get<0>(resp)));
std::get<0>(resp).clear();
req.clear();
buffer.erase(0, n);
}
}
awaitable_type listener(std::shared_ptr<connection> db)
{
auto ex = co_await net::this_coro::executor;
tcp_acceptor acc(ex, {net::ip::tcp::v4(), 55555});
for (;;)
net::co_spawn(ex, echo_loop(co_await acc.async_accept(), db), net::detached);
}
auto main() -> int
{
try {
net::io_context ioc{BOOST_ASIO_CONCURRENCY_HINT_UNSAFE_IO};
auto db = std::make_shared<connection>(ioc);
endpoint ep{"127.0.0.1", "6379"};
db->async_run(ep, {}, [&](auto const& ec) {
std::clog << ec.message() << std::endl;
ioc.stop();
});
net::signal_set signals(ioc, SIGINT, SIGTERM);
signals.async_wait([&](auto, auto){ ioc.stop(); });
co_spawn(ioc, listener(db), net::detached);
ioc.run();
} catch (std::exception const& e) {
std::cerr << e.what() << std::endl;
}
}
#else // defined(BOOST_ASIO_HAS_CO_AWAIT)
auto main() -> int {std::cout << "Requires coroutine support." << std::endl; return 1;}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

43
examples/intro.cpp Normal file
View File

@@ -0,0 +1,43 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <tuple>
#include <string>
#include <boost/asio.hpp>
#include <aedis.hpp>
// Include this in no more than one .cpp file.
#include <aedis/src.hpp>
namespace net = boost::asio;
using aedis::adapt;
using aedis::resp3::request;
using aedis::endpoint;
using connection = aedis::connection<>;
auto main() -> int
{
try {
net::io_context ioc;
connection db{ioc};
request req;
req.push("PING");
req.push("QUIT");
std::tuple<std::string, aedis::ignore> resp;
db.async_run({"127.0.0.1", "6379"}, req, adapt(resp), {}, [](auto ec, auto) {
std::cout << ec.message() << std::endl;
});
ioc.run();
std::cout << std::get<0>(resp) << std::endl;
} catch (...) {
std::cerr << "Error" << std::endl;
}
}

61
examples/intro_sync.cpp Normal file
View File

@@ -0,0 +1,61 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <tuple>
#include <string>
#include <thread>
#include <boost/asio.hpp>
#include <aedis.hpp>
// Include this in no more than one .cpp file.
#include <aedis/src.hpp>
namespace net = boost::asio;
using aedis::adapt;
using aedis::resp3::request;
using aedis::endpoint;
using connection = aedis::connection<>;
template <class Adapter>
auto exec(connection& conn, request const& req, Adapter adapter, boost::system::error_code& ec)
{
net::dispatch(
conn.get_executor(),
net::deferred([&]() { return conn.async_exec(req, adapter, net::deferred); }))
(net::redirect_error(net::use_future, ec)).get();
}
auto logger = [](auto const& ec)
{ std::clog << "Run: " << ec.message() << std::endl; };
int main()
{
try {
net::io_context ioc{1};
connection conn{ioc};
std::thread t{[&]() {
conn.async_run({"127.0.0.1", "6379"}, {}, logger);
ioc.run();
}};
request req;
req.push("PING");
req.push("QUIT");
boost::system::error_code ec;
std::tuple<std::string, aedis::ignore> resp;
exec(conn, req, adapt(resp), ec);
std::cout
<< "Exec: " << ec.message() << "\n"
<< "Response: " << std::get<0>(resp) << std::endl;
t.join();
} catch (std::exception const& e) {
std::cerr << e.what() << std::endl;
}
}

55
examples/intro_tls.cpp Normal file
View File

@@ -0,0 +1,55 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <tuple>
#include <string>
#include <boost/asio.hpp>
#include <boost/asio/ssl.hpp>
#include <aedis.hpp>
#include <aedis/ssl/connection.hpp>
// Include this in no more than one .cpp file.
#include <aedis/src.hpp>
namespace net = boost::asio;
using aedis::adapt;
using aedis::resp3::request;
using aedis::endpoint;
using connection = aedis::ssl::connection<net::ssl::stream<net::ip::tcp::socket>>;
bool verify_certificate(bool, net::ssl::verify_context&)
{
std::cout << "set_verify_callback" << std::endl;
return true;
}
auto main() -> int
{
try {
net::io_context ioc;
net::ssl::context ctx{net::ssl::context::sslv23};
connection conn{ioc, ctx};
conn.next_layer().set_verify_mode(net::ssl::verify_peer);
conn.next_layer().set_verify_callback(verify_certificate);
request req;
req.push("PING");
req.push("QUIT");
std::tuple<std::string, aedis::ignore> resp;
conn.async_run({"127.0.0.1", "6379"}, req, adapt(resp), {}, [&](auto ec, auto) {
std::cout << ec.message() << std::endl;
});
ioc.run();
std::cout << "Response: " << std::get<0>(resp) << std::endl;
} catch (std::exception const& e) {
std::cerr << "Error: " << e.what() << std::endl;
}
}

View File

@@ -0,0 +1,53 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <string>
#include <iostream>
#include <boost/asio/connect.hpp>
#include <aedis.hpp>
#include <aedis/src.hpp>
namespace net = boost::asio;
namespace resp3 = aedis::resp3;
using aedis::resp3::request;
using aedis::adapter::adapt2;
using net::ip::tcp;
int main()
{
try {
net::io_context ioc;
tcp::resolver resv{ioc};
auto const res = resv.resolve("127.0.0.1", "6379");
tcp::socket socket{ioc};
net::connect(socket, res);
// Creates the request and writes to the socket.
request req;
req.push("HELLO", 3);
req.push("PING");
req.push("QUIT");
resp3::write(socket, req);
// Responses
std::string buffer, resp;
// Reads the responses to all commands in the request.
auto dbuffer = net::dynamic_buffer(buffer);
resp3::read(socket, dbuffer);
resp3::read(socket, dbuffer, adapt2(resp));
resp3::read(socket, dbuffer);
std::cout << "Ping: " << resp << std::endl;
} catch (std::exception const& e) {
std::cerr << e.what() << std::endl;
exit(EXIT_FAILURE);
}
}

65
examples/print.hpp Normal file
View File

@@ -0,0 +1,65 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <map>
#include <set>
#include <vector>
#include <string>
#include <iostream>
#include <aedis/resp3/type.hpp>
#include <aedis/resp3/node.hpp>
// Some functions to make the examples less repetitive.
namespace net = boost::asio;
using aedis::resp3::node;
void print_aggr(std::vector<aedis::resp3::node<std::string>>& v)
{
if (std::empty(v))
return;
auto const m = aedis::resp3::element_multiplicity(v.front().data_type);
for (auto i = 0lu; i < m * v.front().aggregate_size; ++i)
std::cout << v[i + 1].value << " ";
std::cout << "\n";
v.clear();
}
template <class T>
void print(std::vector<T> const& cont)
{
for (auto const& e: cont) std::cout << e << " ";
std::cout << "\n";
}
template <class T>
void print(std::set<T> const& cont)
{
for (auto const& e: cont) std::cout << e << "\n";
}
template <class T, class U>
void print(std::map<T, U> const& cont)
{
for (auto const& e: cont)
std::cout << e.first << ": " << e.second << "\n";
}
void print(std::string const& e)
{
std::cout << e << std::endl;
}
void print_push(std::vector<aedis::resp3::node<std::string>>& resp)
{
std::cout
<< "Push type: " << resp.at(1).value << "\n"
<< "Channel: " << resp.at(2).value << "\n"
<< "Message: " << resp.at(3).value << "\n"
<< std::endl;
}

111
examples/serialization.cpp Normal file
View File

@@ -0,0 +1,111 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <algorithm>
#include <cstdint>
#include <iostream>
#include <set>
#include <iterator>
#include <string>
#include <boost/json.hpp>
#include <boost/json/src.hpp>
#include <aedis.hpp>
#include "print.hpp"
// Include this in no more than one .cpp file.
#include <aedis/src.hpp>
namespace net = boost::asio;
using aedis::resp3::request;
using aedis::adapt;
using aedis::endpoint;
using connection = aedis::connection<>;
using namespace boost::json;
struct user {
std::string name;
std::string age;
std::string country;
};
void tag_invoke(value_from_tag, value& jv, user const& u)
{
jv =
{ {"name", u.name}
, {"age", u.age}
, {"country", u.country}
};
}
template<class T>
void extract(object const& obj, T& t, boost::string_view key)
{
t = value_to<T>(obj.at(key));
}
user tag_invoke(value_to_tag<user>, value const& jv)
{
user u;
object const& obj = jv.as_object();
extract(obj, u.name, "name");
extract(obj, u.age, "age");
extract(obj, u.country, "country");
return u;
}
// Serializes
void to_bulk(std::string& to, user const& u)
{
aedis::resp3::to_bulk(to, serialize(value_from(u)));
}
// Deserializes
void from_bulk(user& u, boost::string_view sv, boost::system::error_code&)
{
value jv = parse(sv);
u = value_to<user>(jv);
}
std::ostream& operator<<(std::ostream& os, user const& u)
{
os << "Name: " << u.name << "\n"
<< "Age: " << u.age << "\n"
<< "Country: " << u.country;
return os;
}
bool operator<(user const& a, user const& b)
{
return std::tie(a.name, a.age, a.country) < std::tie(b.name, b.age, b.country);
}
int main()
{
net::io_context ioc;
connection db{ioc};
std::set<user> users
{{"Joao", "58", "Brazil"} , {"Serge", "60", "France"}};
request req;
req.push("HELLO", 3);
req.push_range("SADD", "sadd-key", users); // Sends
req.push("SMEMBERS", "sadd-key"); // Retrieves
req.push("QUIT");
std::tuple<aedis::ignore, int, std::set<user>, std::string> resp;
endpoint ep{"127.0.0.1", "6379"};
db.async_run(ep, req, adapt(resp), {}, [](auto ec, auto) {
std::cout << ec.message() << std::endl;
});
ioc.run();
// Print
print(std::get<2>(resp));
}

95
examples/subscriber.cpp Normal file
View File

@@ -0,0 +1,95 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <string>
#include <vector>
#include <iostream>
#include <tuple>
#include <boost/asio.hpp>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
#include <aedis.hpp>
#include "print.hpp"
// Include this in no more than one .cpp file.
#include <aedis/src.hpp>
namespace net = boost::asio;
using aedis::adapt;
using aedis::resp3::request;
using aedis::resp3::node;
using aedis::endpoint;
using tcp_socket = net::use_awaitable_t<>::as_default_on_t<net::ip::tcp::socket>;
using stimer = net::use_awaitable_t<>::as_default_on_t<net::steady_timer>;
using connection = aedis::connection<tcp_socket>;
/* This example will subscribe and read pushes indefinitely.
*
* To test send messages with redis-cli
*
* $ redis-cli -3
* 127.0.0.1:6379> PUBLISH channel some-message
* (integer) 3
* 127.0.0.1:6379>
*
* To test reconnection try, for example, to close all clients currently
* connected to the Redis instance
*
* $ redis-cli
* > CLIENT kill TYPE pubsub
*/
// Receives pushes.
net::awaitable<void> push_receiver(std::shared_ptr<connection> db)
{
for (std::vector<node<std::string>> resp;;) {
co_await db->async_receive_push(adapt(resp));
print_push(resp);
resp.clear();
}
}
// See
// - https://redis.io/docs/manual/sentinel.
// - https://redis.io/docs/reference/sentinel-clients.
net::awaitable<void> reconnect(std::shared_ptr<connection> db)
{
request req;
req.push("SUBSCRIBE", "channel");
stimer timer{co_await net::this_coro::executor};
endpoint ep{"127.0.0.1", "6379"};
for (;;) {
boost::system::error_code ec;
co_await db->async_run(ep, req, adapt(), {}, net::redirect_error(net::use_awaitable, ec));
db->reset_stream();
std::cout << ec.message() << std::endl;
timer.expires_after(std::chrono::seconds{1});
co_await timer.async_wait();
}
}
int main()
{
try {
net::io_context ioc;
auto db = std::make_shared<connection>(ioc);
net::co_spawn(ioc, push_receiver(db), net::detached);
net::co_spawn(ioc, reconnect(db), net::detached);
net::signal_set signals(ioc, SIGINT, SIGTERM);
signals.async_wait([&](auto, auto){ ioc.stop(); });
ioc.run();
} catch (std::exception const& e) {
std::cerr << "Error: " << e.what() << std::endl;
}
}
#else // defined(BOOST_ASIO_HAS_CO_AWAIT)
int main() {std::cout << "Requires coroutine support." << std::endl; return 1;}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

View File

@@ -0,0 +1,119 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <string>
#include <vector>
#include <iostream>
#include <tuple>
#include <boost/asio.hpp>
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
#include <aedis.hpp>
#include "print.hpp"
// Include this in no more than one .cpp file.
#include <aedis/src.hpp>
namespace net = boost::asio;
using aedis::adapt;
using aedis::resp3::request;
using aedis::resp3::node;
using aedis::endpoint;
using tcp_socket = net::use_awaitable_t<>::as_default_on_t<net::ip::tcp::socket>;
using stimer = net::use_awaitable_t<>::as_default_on_t<net::steady_timer>;
using connection = aedis::connection<tcp_socket>;
// Connects to a Redis instance over sentinel and performs failover in
// case of disconnection, see
// https://redis.io/docs/reference/sentinel-clients. This example
// assumes a sentinel and a redis server running on localhost.
net::awaitable<void> receive_pushes(std::shared_ptr<connection> db)
{
for (std::vector<node<std::string>> resp;;) {
co_await db->async_receive_push(adapt(resp));
print_push(resp);
resp.clear();
}
}
net::awaitable<endpoint> resolve()
{
// A list of sentinel addresses from which only one is responsive
// to simulate sentinels that are down.
std::vector<endpoint> const endpoints
{ {"foo", "26379"}
, {"bar", "26379"}
, {"127.0.0.1", "26379"}
};
request req1;
req1.push("SENTINEL", "get-master-addr-by-name", "mymaster");
req1.push("QUIT");
auto ex = co_await net::this_coro::executor;
connection conn{ex};
std::tuple<std::optional<std::array<std::string, 2>>, aedis::ignore> addr;
for (auto ep : endpoints) {
boost::system::error_code ec;
co_await conn.async_run(ep, req1, adapt(addr), {}, net::redirect_error(net::use_awaitable, ec));
conn.reset_stream();
std::cout << ec.message() << std::endl;
if (std::get<0>(addr))
break;
}
endpoint ep;
if (std::get<0>(addr)) {
ep.host = std::get<0>(addr).value().at(0);
ep.port = std::get<0>(addr).value().at(1);
}
co_return ep;
}
net::awaitable<void> reconnect(std::shared_ptr<connection> db)
{
request req2;
req2.push("SUBSCRIBE", "channel");
auto ex = co_await net::this_coro::executor;
stimer timer{ex};
for (;;) {
auto ep = co_await net::co_spawn(ex, resolve(), net::use_awaitable);
if (!aedis::is_valid(ep)) {
std::clog << "Can't resolve master name" << std::endl;
co_return;
}
boost::system::error_code ec;
co_await db->async_run(ep, req2, adapt(), {}, net::redirect_error(net::use_awaitable, ec));
std::cout << ec.message() << std::endl;
std::cout << "Starting the failover." << std::endl;
timer.expires_after(std::chrono::seconds{1});
co_await timer.async_wait();
}
}
int main()
{
try {
net::io_context ioc;
auto db = std::make_shared<connection>(ioc);
net::co_spawn(ioc, receive_pushes(db), net::detached);
net::co_spawn(ioc, reconnect(db), net::detached);
net::signal_set signals(ioc, SIGINT, SIGTERM);
signals.async_wait([&](auto, auto){ ioc.stop(); });
ioc.run();
} catch (std::exception const& e) {
std::cerr << "Error: " << e.what() << std::endl;
}
}
#else // defined(BOOST_ASIO_HAS_CO_AWAIT)
int main() {std::cout << "Requires coroutine support." << std::endl; return 1;}
#endif // defined(BOOST_ASIO_HAS_CO_AWAIT)

25
include/aedis.hpp Normal file
View File

@@ -0,0 +1,25 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_HPP
#define AEDIS_HPP
#include <aedis/error.hpp>
#include <aedis/adapt.hpp>
#include <aedis/connection.hpp>
#include <aedis/resp3/request.hpp>
/** @defgroup high-level-api Reference
*
* This page contains the documentation of the Aedis high-level API.
*/
/** @defgroup low-level-api Reference
*
* This page contains the documentation of the Aedis low-level API.
*/
#endif // AEDIS_HPP

222
include/aedis/adapt.hpp Normal file
View File

@@ -0,0 +1,222 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_ADAPT_HPP
#define AEDIS_ADAPT_HPP
#include <tuple>
#include <limits>
#include <boost/mp11.hpp>
#include <boost/variant2.hpp>
#include <boost/utility/string_view.hpp>
#include <boost/system.hpp>
#include <aedis/resp3/node.hpp>
#include <aedis/adapter/adapt.hpp>
#include <aedis/adapter/detail/response_traits.hpp>
namespace aedis {
/** @brief Tag used to ignore responses.
* @ingroup high-level-api
*
* For example
*
* @code
* std::tuple<aedis::ignore, std::string, aedis::ignore> resp;
* @endcode
*
* will cause only the second tuple type to be parsed, the others
* will be ignored.
*/
using ignore = adapter::detail::ignore;
namespace detail
{
class ignore_adapter {
public:
explicit ignore_adapter(std::size_t max_read_size) : max_read_size_{max_read_size} {}
void
operator()(
std::size_t, resp3::node<boost::string_view> const&, boost::system::error_code&) { }
[[nodiscard]]
auto get_supported_response_size() const noexcept
{ return static_cast<std::size_t>(-1);}
[[nodiscard]]
auto get_max_read_size(std::size_t) const noexcept
{ return max_read_size_;}
private:
std::size_t max_read_size_;
};
template <class Tuple>
class static_adapter {
private:
static constexpr auto size = std::tuple_size<Tuple>::value;
using adapter_tuple = boost::mp11::mp_transform<adapter::adapter_t, Tuple>;
using variant_type = boost::mp11::mp_rename<adapter_tuple, boost::variant2::variant>;
using adapters_array_type = std::array<variant_type, size>;
adapters_array_type adapters_;
std::size_t max_read_size_;
public:
explicit static_adapter(Tuple& r, std::size_t max_read_size)
: max_read_size_{max_read_size}
{
adapter::detail::assigner<size - 1>::assign(adapters_, r);
}
[[nodiscard]]
auto get_supported_response_size() const noexcept
{ return size;}
[[nodiscard]]
auto get_max_read_size(std::size_t) const noexcept
{ return max_read_size_;}
void
operator()(
std::size_t i,
resp3::node<boost::string_view> const& nd,
boost::system::error_code& ec)
{
using boost::variant2::visit;
// I am usure whether this should be an error or an assertion.
BOOST_ASSERT(i < adapters_.size());
visit([&](auto& arg){arg(nd, ec);}, adapters_.at(i));
}
};
template <class Vector>
class vector_adapter {
private:
using adapter_type = typename adapter::detail::response_traits<Vector>::adapter_type;
adapter_type adapter_;
std::size_t max_read_size_;
public:
explicit vector_adapter(Vector& v, std::size_t max_read_size)
: adapter_{adapter::adapt2(v)}
, max_read_size_{max_read_size}
{ }
[[nodiscard]]
auto
get_supported_response_size() const noexcept
{ return static_cast<std::size_t>(-1);}
[[nodiscard]]
auto get_max_read_size(std::size_t) const noexcept
{ return max_read_size_;}
void
operator()(
std::size_t,
resp3::node<boost::string_view> const& nd,
boost::system::error_code& ec)
{
adapter_(nd, ec);
}
};
template <class>
struct response_traits;
template <>
struct response_traits<void> {
using response_type = void;
using adapter_type = detail::ignore_adapter;
static auto adapt(std::size_t max_read_size) noexcept
{ return detail::ignore_adapter{max_read_size}; }
};
template <class String, class Allocator>
struct response_traits<std::vector<resp3::node<String>, Allocator>> {
using response_type = std::vector<resp3::node<String>, Allocator>;
using adapter_type = vector_adapter<response_type>;
static auto adapt(response_type& v, std::size_t max_read_size) noexcept
{ return adapter_type{v, max_read_size}; }
};
template <class ...Ts>
struct response_traits<std::tuple<Ts...>> {
using response_type = std::tuple<Ts...>;
using adapter_type = static_adapter<response_type>;
static auto adapt(response_type& r, std::size_t max_read_size) noexcept
{ return adapter_type{r, max_read_size}; }
};
template <class Adapter>
class wrapper {
public:
explicit wrapper(Adapter adapter) : adapter_{adapter} {}
void operator()(resp3::node<boost::string_view> const& node, boost::system::error_code& ec)
{ return adapter_(0, node, ec); }
[[nodiscard]]
auto get_supported_response_size() const noexcept
{ return adapter_.get_supported_response_size();}
[[nodiscard]]
auto get_max_read_size(std::size_t) const noexcept
{ return adapter_.get_max_read_size(0); }
private:
Adapter adapter_;
};
template <class Adapter>
auto make_adapter_wrapper(Adapter adapter)
{
return wrapper{adapter};
}
} // detail
/** @brief Creates an adapter that ignores responses.
* @ingroup high-level-api
*
* This function can be used to create adapters that ignores
* responses.
*
* @param max_read_size Specifies the maximum size of the read
* buffer.
*/
inline auto adapt(std::size_t max_read_size = (std::numeric_limits<std::size_t>::max)()) noexcept
{
return detail::response_traits<void>::adapt(max_read_size);
}
/** @brief Adapts a type to be used as a response.
* @ingroup high-level-api
*
* The type T can be any STL container, any integer type and
* \c std::string
*
* @param t Tuple containing the responses.
* @param max_read_size Specifies the maximum size of the read
* buffer.
*/
template<class T>
auto adapt(T& t, std::size_t max_read_size = (std::numeric_limits<std::size_t>::max)()) noexcept
{
return detail::response_traits<T>::adapt(t, max_read_size);
}
} // aedis
#endif // AEDIS_ADAPT_HPP

View File

@@ -0,0 +1,80 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_ADAPTER_ADAPT_HPP
#define AEDIS_ADAPTER_ADAPT_HPP
#include <aedis/adapter/detail/response_traits.hpp>
namespace aedis::adapter {
template <class T>
using adapter_t = typename detail::adapter_t<T>;
/** \brief Creates a dummy response adapter.
\ingroup low-level-api
The adapter returned by this function ignores responses. It is
useful to avoid wasting time with responses which are not needed.
Example:
@code
// Pushes and writes some commands to the server.
sr.push(command::hello, 3);
sr.push(command::ping);
sr.push(command::quit);
net::write(socket, net::buffer(request));
// Ignores all responses except for the response to ping.
std::string buffer;
resp3::read(socket, dynamic_buffer(buffer), adapt()); // hello
resp3::read(socket, dynamic_buffer(buffer), adapt(resp)); // ping
resp3::read(socket, dynamic_buffer(buffer, adapt())); // quit
@endcode
*/
inline
auto adapt2() noexcept
{ return detail::response_traits<void>::adapt(); }
/** \brief Adapts user data to read operations.
* \ingroup low-level-api
*
* STL containers, \c std::tuple and built-in types are supported and
* can be used in conjunction with \c std::optional<T>.
*
* Example usage:
*
* @code
* std::unordered_map<std::string, std::string> cont;
* co_await async_read(socket, buffer, adapt(cont));
* @endcode
*
* For a transaction
*
* @code
* sr.push(command::multi);
* sr.push(command::ping, ...);
* sr.push(command::incr, ...);
* sr.push_range(command::rpush, ...);
* sr.push(command::lrange, ...);
* sr.push(command::incr, ...);
* sr.push(command::exec);
*
* co_await async_write(socket, buffer(request));
*
* // Reads the response to a transaction
* std::tuple<std::string, int, int, std::vector<std::string>, int> execs;
* co_await resp3::async_read(socket, dynamic_buffer(buffer), adapt(execs));
* @endcode
*/
template<class T>
auto adapt2(T& t) noexcept
{ return detail::response_traits<T>::adapt(t); }
} // aedis::adapter
#endif // AEDIS_ADAPTER_ADAPT_HPP

View File

@@ -0,0 +1,429 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_ADAPTER_ADAPTERS_HPP
#define AEDIS_ADAPTER_ADAPTERS_HPP
#include <set>
#include <optional>
#include <unordered_set>
#include <forward_list>
#include <system_error>
#include <map>
#include <unordered_map>
#include <list>
#include <deque>
#include <vector>
#include <array>
#include <boost/assert.hpp>
#include <boost/spirit/include/qi.hpp>
#include <boost/spirit/home/x3.hpp>
#include <boost/utility/string_view.hpp>
#include <aedis/error.hpp>
#include <aedis/resp3/type.hpp>
#include <aedis/resp3/request.hpp>
#include <aedis/resp3/detail/parser.hpp>
#include <aedis/resp3/node.hpp>
namespace aedis::adapter::detail {
inline
auto parse_double(
char const* data,
std::size_t size,
boost::system::error_code& ec) -> double
{
static constexpr boost::spirit::x3::real_parser<double> p{};
double ret = 0;
if (!parse(data, data + size, p, ret))
ec = error::not_a_double;
return ret;
}
// Serialization.
template <class T>
typename std::enable_if<std::is_integral<T>::value, void>::type
from_bulk(
T& i,
boost::string_view sv,
boost::system::error_code& ec)
{
i = resp3::detail::parse_uint(sv.data(), sv.size(), ec);
}
inline
void from_bulk(
bool& t,
boost::string_view sv,
boost::system::error_code&)
{
t = *sv.data() == 't';
}
inline
void from_bulk(
double& d,
boost::string_view sv,
boost::system::error_code& ec)
{
d = parse_double(sv.data(), sv.size(), ec);
}
template <class CharT, class Traits, class Allocator>
void
from_bulk(
std::basic_string<CharT, Traits, Allocator>& s,
boost::string_view sv,
boost::system::error_code&)
{
s.append(sv.data(), sv.size());
}
//================================================
inline
void set_on_resp3_error(resp3::type t, boost::system::error_code& ec)
{
switch (t) {
case resp3::type::simple_error: ec = error::resp3_simple_error; return;
case resp3::type::blob_error: ec = error::resp3_blob_error; return;
case resp3::type::null: ec = error::resp3_null; return;
default: return;
}
}
template <class Result>
class general_aggregate {
private:
Result* result_;
public:
explicit general_aggregate(Result* c = nullptr): result_(c) {}
void operator()(resp3::node<boost::string_view> const& n, boost::system::error_code& ec)
{
result_->push_back({n.data_type, n.aggregate_size, n.depth, std::string{std::cbegin(n.value), std::cend(n.value)}});
set_on_resp3_error(n.data_type, ec);
}
};
template <class Node>
class general_simple {
private:
Node* result_;
public:
explicit general_simple(Node* t = nullptr) : result_(t) {}
void operator()(resp3::node<boost::string_view> const& n, boost::system::error_code& ec)
{
result_->data_type = n.data_type;
result_->aggregate_size = n.aggregate_size;
result_->depth = n.depth;
result_->value.assign(n.value.data(), n.value.size());
set_on_resp3_error(n.data_type, ec);
}
};
template <class Result>
class simple_impl {
public:
void on_value_available(Result&) {}
void
operator()(
Result& result,
resp3::node<boost::string_view> const& n,
boost::system::error_code& ec)
{
set_on_resp3_error(n.data_type, ec);
if (ec)
return;
if (is_aggregate(n.data_type)) {
ec = error::expects_resp3_simple_type;
return;
}
from_bulk(result, n.value, ec);
}
};
template <class Result>
class set_impl {
private:
typename Result::iterator hint_;
public:
void on_value_available(Result& result)
{ hint_ = std::end(result); }
void
operator()(
Result& result,
resp3::node<boost::string_view> const& nd,
boost::system::error_code& ec)
{
set_on_resp3_error(nd.data_type, ec);
if (ec)
return;
if (is_aggregate(nd.data_type)) {
if (nd.data_type != resp3::type::set)
ec = error::expects_resp3_set;
return;
}
BOOST_ASSERT(nd.aggregate_size == 1);
if (nd.depth < 1) {
ec = error::expects_resp3_set;
return;
}
typename Result::key_type obj;
from_bulk(obj, nd.value, ec);
hint_ = result.insert(hint_, std::move(obj));
}
};
template <class Result>
class map_impl {
private:
typename Result::iterator current_;
bool on_key_ = true;
public:
void on_value_available(Result& result)
{ current_ = std::end(result); }
void
operator()(
Result& result,
resp3::node<boost::string_view> const& nd,
boost::system::error_code& ec)
{
set_on_resp3_error(nd.data_type, ec);
if (ec)
return;
if (is_aggregate(nd.data_type)) {
if (element_multiplicity(nd.data_type) != 2)
ec = error::expects_resp3_map;
return;
}
BOOST_ASSERT(nd.aggregate_size == 1);
if (nd.depth < 1) {
ec = error::expects_resp3_map;
return;
}
if (on_key_) {
typename Result::key_type obj;
from_bulk(obj, nd.value, ec);
current_ = result.insert(current_, {std::move(obj), {}});
} else {
typename Result::mapped_type obj;
from_bulk(obj, nd.value, ec);
current_->second = std::move(obj);
}
on_key_ = !on_key_;
}
};
template <class Result>
class vector_impl {
public:
void on_value_available(Result& ) { }
void
operator()(
Result& result,
resp3::node<boost::string_view> const& nd,
boost::system::error_code& ec)
{
set_on_resp3_error(nd.data_type, ec);
if (ec)
return;
if (is_aggregate(nd.data_type)) {
auto const m = element_multiplicity(nd.data_type);
result.reserve(result.size() + m * nd.aggregate_size);
} else {
result.push_back({});
from_bulk(result.back(), nd.value, ec);
}
}
};
template <class Result>
class array_impl {
private:
int i_ = -1;
public:
void on_value_available(Result& ) { }
void
operator()(
Result& result,
resp3::node<boost::string_view> const& nd,
boost::system::error_code& ec)
{
set_on_resp3_error(nd.data_type, ec);
if (ec)
return;
if (is_aggregate(nd.data_type)) {
if (i_ != -1) {
ec = error::nested_aggregate_not_supported;
return;
}
if (result.size() != nd.aggregate_size * element_multiplicity(nd.data_type)) {
ec = error::incompatible_size;
return;
}
} else {
if (i_ == -1) {
ec = error::expects_resp3_aggregate;
return;
}
BOOST_ASSERT(nd.aggregate_size == 1);
from_bulk(result.at(i_), nd.value, ec);
}
++i_;
}
};
template <class Result>
struct list_impl {
void on_value_available(Result& ) { }
void
operator()(
Result& result,
resp3::node<boost::string_view> const& nd,
boost::system::error_code& ec)
{
set_on_resp3_error(nd.data_type, ec);
if (ec)
return;
if (!is_aggregate(nd.data_type)) {
BOOST_ASSERT(nd.aggregate_size == 1);
if (nd.depth < 1) {
ec = error::expects_resp3_aggregate;
return;
}
result.push_back({});
from_bulk(result.back(), nd.value, ec);
}
}
};
//---------------------------------------------------
template <class T>
struct impl_map { using type = simple_impl<T>; };
template <class Key, class Compare, class Allocator>
struct impl_map<std::set<Key, Compare, Allocator>> { using type = set_impl<std::set<Key, Compare, Allocator>>; };
template <class Key, class Compare, class Allocator>
struct impl_map<std::multiset<Key, Compare, Allocator>> { using type = set_impl<std::multiset<Key, Compare, Allocator>>; };
template <class Key, class Hash, class KeyEqual, class Allocator>
struct impl_map<std::unordered_set<Key, Hash, KeyEqual, Allocator>> { using type = set_impl<std::unordered_set<Key, Hash, KeyEqual, Allocator>>; };
template <class Key, class Hash, class KeyEqual, class Allocator>
struct impl_map<std::unordered_multiset<Key, Hash, KeyEqual, Allocator>> { using type = set_impl<std::unordered_multiset<Key, Hash, KeyEqual, Allocator>>; };
template <class Key, class T, class Compare, class Allocator>
struct impl_map<std::map<Key, T, Compare, Allocator>> { using type = map_impl<std::map<Key, T, Compare, Allocator>>; };
template <class Key, class T, class Compare, class Allocator>
struct impl_map<std::multimap<Key, T, Compare, Allocator>> { using type = map_impl<std::multimap<Key, T, Compare, Allocator>>; };
template <class Key, class Hash, class KeyEqual, class Allocator>
struct impl_map<std::unordered_map<Key, Hash, KeyEqual, Allocator>> { using type = map_impl<std::unordered_map<Key, Hash, KeyEqual, Allocator>>; };
template <class Key, class Hash, class KeyEqual, class Allocator>
struct impl_map<std::unordered_multimap<Key, Hash, KeyEqual, Allocator>> { using type = map_impl<std::unordered_multimap<Key, Hash, KeyEqual, Allocator>>; };
template <class T, class Allocator>
struct impl_map<std::vector<T, Allocator>> { using type = vector_impl<std::vector<T, Allocator>>; };
template <class T, std::size_t N>
struct impl_map<std::array<T, N>> { using type = array_impl<std::array<T, N>>; };
template <class T, class Allocator>
struct impl_map<std::list<T, Allocator>> { using type = list_impl<std::list<T, Allocator>>; };
template <class T, class Allocator>
struct impl_map<std::deque<T, Allocator>> { using type = list_impl<std::deque<T, Allocator>>; };
//---------------------------------------------------
template <class Result>
class wrapper {
private:
Result* result_;
typename impl_map<Result>::type impl_;
public:
explicit wrapper(Result* t = nullptr) : result_(t)
{ impl_.on_value_available(*result_); }
void
operator()(
resp3::node<boost::string_view> const& nd,
boost::system::error_code& ec)
{
BOOST_ASSERT(result_);
impl_(*result_, nd, ec);
}
};
template <class T>
class wrapper<std::optional<T>> {
private:
std::optional<T>* result_;
typename impl_map<T>::type impl_{};
public:
explicit wrapper(std::optional<T>* o = nullptr) : result_(o) {}
void
operator()(
resp3::node<boost::string_view> const& nd,
boost::system::error_code& ec)
{
if (nd.data_type == resp3::type::null)
return;
if (!result_->has_value()) {
*result_ = T{};
impl_.on_value_available(result_->value());
}
impl_(result_->value(), nd, ec);
}
};
} // aedis::adapter:.detail
#endif // AEDIS_ADAPTER_ADAPTERS_HPP

View File

@@ -0,0 +1,157 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_ADAPTER_RESPONSE_TRAITS_HPP
#define AEDIS_ADAPTER_RESPONSE_TRAITS_HPP
#include <vector>
#include <tuple>
#include <boost/mp11.hpp>
#include <boost/variant2.hpp>
#include <aedis/error.hpp>
#include <aedis/resp3/type.hpp>
#include <aedis/resp3/read.hpp>
#include <aedis/adapter/detail/adapters.hpp>
namespace aedis::adapter::detail {
struct ignore {};
/* Traits class for response objects.
*
* Provides traits for all supported response types i.e. all STL
* containers and C++ buil-in types.
*/
template <class ResponseType>
struct response_traits {
using adapter_type = adapter::detail::wrapper<ResponseType>;
static auto adapt(ResponseType& r) noexcept { return adapter_type{&r}; }
};
template <class T>
using adapter_t = typename response_traits<T>::adapter_type;
template <>
struct response_traits<ignore> {
using response_type = ignore;
using adapter_type = resp3::detail::ignore_response;
static auto adapt(response_type&) noexcept { return adapter_type{}; }
};
template <class T>
struct response_traits<resp3::node<T>> {
using response_type = resp3::node<T>;
using adapter_type = adapter::detail::general_simple<response_type>;
static auto adapt(response_type& v) noexcept { return adapter_type{&v}; }
};
template <class String, class Allocator>
struct response_traits<std::vector<resp3::node<String>, Allocator>> {
using response_type = std::vector<resp3::node<String>, Allocator>;
using adapter_type = adapter::detail::general_aggregate<response_type>;
static auto adapt(response_type& v) noexcept { return adapter_type{&v}; }
};
template <>
struct response_traits<void> {
using response_type = void;
using adapter_type = resp3::detail::ignore_response;
static auto adapt() noexcept { return adapter_type{}; }
};
// Duplicated here to avoid circular include dependency.
template<class T>
auto internal_adapt(T& t) noexcept
{ return response_traits<T>::adapt(t); }
template <std::size_t N>
struct assigner {
template <class T1, class T2>
static void assign(T1& dest, T2& from)
{
dest[N] = internal_adapt(std::get<N>(from));
assigner<N - 1>::assign(dest, from);
}
};
template <>
struct assigner<0> {
template <class T1, class T2>
static void assign(T1& dest, T2& from)
{
dest[0] = internal_adapt(std::get<0>(from));
}
};
template <class Tuple>
class static_aggregate_adapter {
private:
using adapters_array_type =
std::array<
boost::mp11::mp_rename<
boost::mp11::mp_transform<
adapter_t, Tuple>,
boost::variant2::variant>,
std::tuple_size<Tuple>::value>;
std::size_t i_ = 0;
std::size_t aggregate_size_ = 0;
adapters_array_type adapters_;
public:
explicit static_aggregate_adapter(Tuple* r = nullptr)
{
detail::assigner<std::tuple_size<Tuple>::value - 1>::assign(adapters_, *r);
}
void count(resp3::node<boost::string_view> const& nd)
{
if (nd.depth == 1) {
if (is_aggregate(nd.data_type))
aggregate_size_ = element_multiplicity(nd.data_type) * nd.aggregate_size;
else
++i_;
return;
}
if (--aggregate_size_ == 0)
++i_;
}
void
operator()(
resp3::node<boost::string_view> const& nd,
boost::system::error_code& ec)
{
using boost::variant2::visit;
if (nd.depth == 0) {
auto const real_aggr_size = nd.aggregate_size * element_multiplicity(nd.data_type);
if (real_aggr_size != std::tuple_size<Tuple>::value)
ec = error::incompatible_size;
return;
}
visit([&](auto& arg){arg(nd, ec);}, adapters_[i_]);
count(nd);
}
};
template <class... Ts>
struct response_traits<std::tuple<Ts...>>
{
using response_type = std::tuple<Ts...>;
using adapter_type = static_aggregate_adapter<response_type>;
static auto adapt(response_type& r) noexcept { return adapter_type{&r}; }
};
} // aedis::adapter::detail
#endif // AEDIS_ADAPTER_RESPONSE_TRAITS_HPP

View File

@@ -0,0 +1,303 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_CONNECTION_HPP
#define AEDIS_CONNECTION_HPP
#include <chrono>
#include <memory>
#include <boost/asio/io_context.hpp>
#include <aedis/detail/connection_base.hpp>
namespace aedis {
/** @brief A connection to the Redis server.
* @ingroup high-level-api
*
* This class keeps a healthy connection to the Redis instance where
* commands can be sent at any time. For more details, please see the
* documentation of each individual function.
*
* @remarks This class exposes only asynchronous member functions,
* synchronous communications with the Redis server is provided by
* the `aedis::sync` class.
*
* @tparam Derived class.
*
*/
template <class AsyncReadWriteStream = boost::asio::ip::tcp::socket>
class connection :
private detail::connection_base<
typename AsyncReadWriteStream::executor_type,
connection<AsyncReadWriteStream>> {
public:
/// Executor type.
using executor_type = typename AsyncReadWriteStream::executor_type;
/// Type of the next layer
using next_layer_type = AsyncReadWriteStream;
using base_type = detail::connection_base<executor_type, connection<AsyncReadWriteStream>>;
/** \brief Connection configuration parameters.
*/
struct timeouts {
/// Timeout of the resolve operation.
std::chrono::steady_clock::duration resolve_timeout = std::chrono::seconds{10};
/// Timeout of the connect operation.
std::chrono::steady_clock::duration connect_timeout = std::chrono::seconds{10};
/// Timeout of the resp3 handshake operation.
std::chrono::steady_clock::duration resp3_handshake_timeout = std::chrono::seconds{2};
/// Time interval of ping operations.
std::chrono::steady_clock::duration ping_interval = std::chrono::seconds{1};
};
/// Constructor
explicit connection(executor_type ex)
: base_type{ex}
, stream_{ex}
{}
explicit connection(boost::asio::io_context& ioc)
: connection(ioc.get_executor())
{ }
/// Returns the associated executor.
auto get_executor() {return stream_.get_executor();}
/// Resets the underlying stream.
void reset_stream()
{
if (stream_.is_open()) {
boost::system::error_code ignore;
stream_.shutdown(boost::asio::ip::tcp::socket::shutdown_both, ignore);
stream_.close(ignore);
}
}
/// Returns a reference to the next layer.
auto next_layer() noexcept -> auto& { return stream_; }
/// Returns a const reference to the next layer.
auto next_layer() const noexcept -> auto const& { return stream_; }
/** @brief Starts communication with the Redis server asynchronously.
*
* This function performs the following steps
*
* @li Resolves the Redis host as of `async_resolve` with the
* timeout passed in the base class `connection::timeouts::resolve_timeout`.
*
* @li Connects to one of the endpoints returned by the resolve
* operation with the timeout passed in the base class
* `connection::timeouts::connect_timeout`.
*
* @li Performs a RESP3 handshake by sending a
* [HELLO](https://redis.io/commands/hello/) command with protocol
* version 3 and the credentials contained in the
* `aedis::endpoint` object. The timeout used is the one specified
* in `connection::timeouts::resp3_handshake_timeout`.
*
* @li Erases any password that may be contained in
* `endpoint::password`.
*
* @li Checks whether the server role corresponds to the one
* specifed in the `endpoint`. If `endpoint::role` is left empty,
* no check is performed. If the role role is different than the
* expected `async_run` will complete with
* `error::unexpected_server_role`.
*
* @li Starts healthy checks with a timeout twice the value of
* `connection::timeouts::ping_interval`. If no data is received during that
* time interval `connection::async_run` completes with
* `error::idle_timeout`.
*
* @li Starts the healthy check operation that sends the
* [PING](https://redis.io/commands/ping/) to Redis with a
* frequency equal to `connection::timeouts::ping_interval`.
*
* @li Starts reading from the socket and executes all requests
* that have been started prior to this function call.
*
* @param ep Redis endpoint.
* @param ts Timeouts used by the operations.
* @param token Completion token.
*
* The completion token must have the following signature
*
* @code
* void f(boost::system::error_code);
* @endcode
*/
template <class CompletionToken = boost::asio::default_completion_token_t<executor_type>>
auto
async_run(
endpoint ep,
timeouts ts = timeouts{},
CompletionToken token = CompletionToken{})
{
return base_type::async_run(ep, ts, std::move(token));
}
/** @brief Connects and executes a request asynchronously.
*
* Combines the other `async_run` overload with `async_exec` in a
* single function. This function is useful for users that want to
* send a single request to the server and close it.
*
* @param ep Redis endpoint.
* @param req Request object.
* @param adapter Response adapter.
* @param ts Timeouts used by the operation.
* @param token Asio completion token.
*
* The completion token must have the following signature
*
* @code
* void f(boost::system::error_code, std::size_t);
* @endcode
*
* Where the second parameter is the size of the response in bytes.
*/
template <
class Adapter = detail::response_traits<void>::adapter_type,
class CompletionToken = boost::asio::default_completion_token_t<executor_type>>
auto async_run(
endpoint ep,
resp3::request const& req,
Adapter adapter,
timeouts ts,
CompletionToken token = CompletionToken{})
{
return base_type::async_run(ep, req, adapter, ts, std::move(token));
}
/** @brief Executes a command on the Redis server asynchronously.
*
* This function will send a request to the Redis server and
* complete when the response arrives. If the request contains
* only commands that don't expect a response, the completion
* occurs after it has been written to the underlying stream.
* Multiple concurrent calls to this function will be
* automatically queued by the implementation.
*
* @param req Request object.
* @param adapter Response adapter.
* @param token Asio completion token.
*
* For an example see echo_server.cpp. The completion token must
* have the following signature
*
* @code
* void f(boost::system::error_code, std::size_t);
* @endcode
*
* Where the second parameter is the size of the response in
* bytes.
*/
template <
class Adapter = detail::response_traits<void>::adapter_type,
class CompletionToken = boost::asio::default_completion_token_t<executor_type>>
auto async_exec(
resp3::request const& req,
Adapter adapter = adapt(),
CompletionToken token = CompletionToken{})
{
return base_type::async_exec(req, adapter, std::move(token));
}
/** @brief Receives server side pushes asynchronously.
*
* Users that expect server pushes should call this function in a
* loop. If a push arrives and there is no reader, the connection
* will hang and eventually timeout.
*
* @param adapter The response adapter.
* @param token The Asio completion token.
*
* For an example see subscriber.cpp. The completion token must
* have the following signature
*
* @code
* void f(boost::system::error_code, std::size_t);
* @endcode
*
* Where the second parameter is the size of the push in
* bytes.
*/
template <
class Adapter = detail::response_traits<void>::adapter_type,
class CompletionToken = boost::asio::default_completion_token_t<executor_type>>
auto async_receive_push(
Adapter adapter = adapt(),
CompletionToken token = CompletionToken{})
{
return base_type::async_receive_push(adapter, std::move(token));
}
/** @brief Cancel operations.
*
* @li `operation::exec`: Cancels operations started with
* `async_exec`. Has precedence over
* `request::config::close_on_connection_lost`
* @li operation::run: Cancels the `async_run` operation. Notice
* that the preferred way to close a connection is to send a
* [QUIT](https://redis.io/commands/quit/) command to the server.
* An unresponsive Redis server will also cause the idle-checks to
* timeout and lead to `connection::async_run` completing with
* `error::idle_timeout`. Calling `cancel(operation::run)`
* directly should be seen as the last option.
* @li operation::receive_push: Cancels any ongoing callto
* `async_receive_push`.
*
* @param op: The operation to be cancelled.
* @returns The number of operations that have been canceled.
*/
auto cancel(operation op) -> std::size_t
{ return base_type::cancel(op); }
private:
using this_type = connection<next_layer_type>;
template <class, class> friend class detail::connection_base;
template <class, class> friend struct detail::exec_read_op;
template <class, class> friend struct detail::exec_op;
template <class, class> friend struct detail::receive_push_op;
template <class> friend struct detail::check_idle_op;
template <class> friend struct detail::reader_op;
template <class> friend struct detail::writer_op;
template <class, class> friend struct detail::connect_with_timeout_op;
template <class, class> friend struct detail::run_op;
template <class> friend struct detail::ping_op;
template <class Timer, class CompletionToken>
auto
async_connect(
boost::asio::ip::tcp::resolver::results_type const& endpoints,
timeouts ts,
Timer& timer,
CompletionToken&& token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::connect_with_timeout_op<this_type, Timer>{this, &endpoints, ts, &timer},
token, stream_);
}
void close() { stream_.close(); }
auto is_open() const noexcept { return stream_.is_open(); }
auto& lowest_layer() noexcept { return stream_.lowest_layer(); }
AsyncReadWriteStream stream_;
};
} // aedis
#endif // AEDIS_CONNECTION_HPP

View File

@@ -0,0 +1,374 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_CONNECTION_BASE_HPP
#define AEDIS_CONNECTION_BASE_HPP
#include <vector>
#include <queue>
#include <limits>
#include <chrono>
#include <memory>
#include <type_traits>
#include <boost/assert.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <boost/asio/steady_timer.hpp>
#include <boost/asio/bind_executor.hpp>
#include <boost/asio/experimental/channel.hpp>
#include <aedis/adapt.hpp>
#include <aedis/operation.hpp>
#include <aedis/endpoint.hpp>
#include <aedis/resp3/request.hpp>
#include <aedis/detail/connection_ops.hpp>
namespace aedis::detail {
/** Base class for high level Redis asynchronous connections.
*
* This class is not meant to be instantiated directly but as base
* class in the CRTP.
*
* @tparam Executor The executor type.
* @tparam Derived The derived class type.
*
*/
template <class Executor, class Derived>
class connection_base {
public:
using executor_type = Executor;
using this_type = connection_base<Executor, Derived>;
explicit connection_base(executor_type ex)
: resv_{ex}
, ping_timer_{ex}
, check_idle_timer_{ex}
, writer_timer_{ex}
, read_timer_{ex}
, push_channel_{ex}
, last_data_{std::chrono::time_point<std::chrono::steady_clock>::min()}
, req_{{true}}
{
writer_timer_.expires_at(std::chrono::steady_clock::time_point::max());
read_timer_.expires_at(std::chrono::steady_clock::time_point::max());
}
auto get_executor() {return resv_.get_executor();}
auto cancel(operation op) -> std::size_t
{
switch (op) {
case operation::exec:
{
for (auto& e: reqs_) {
e->stop = true;
e->timer.cancel_one();
}
auto const ret = reqs_.size();
reqs_ = {};
return ret;
}
case operation::run:
{
resv_.cancel();
derived().close();
read_timer_.cancel();
check_idle_timer_.cancel();
writer_timer_.cancel();
ping_timer_.cancel();
auto point = std::stable_partition(std::begin(reqs_), std::end(reqs_), [](auto const& ptr) {
return !ptr->req->get_config().close_on_connection_lost;
});
// Cancel own pings if there are any waiting.
std::for_each(point, std::end(reqs_), [](auto const& ptr) {
ptr->stop = true;
ptr->timer.cancel();
});
reqs_.erase(point, std::end(reqs_));
return 1U;
}
case operation::receive_push:
{
push_channel_.cancel();
return 1U;
}
default: BOOST_ASSERT(false); return 0;
}
}
template <
class Adapter = detail::response_traits<void>::adapter_type,
class CompletionToken = boost::asio::default_completion_token_t<executor_type>>
auto async_exec(
resp3::request const& req,
Adapter adapter = adapt(),
CompletionToken token = CompletionToken{})
{
BOOST_ASSERT_MSG(req.size() <= adapter.get_supported_response_size(), "Request and adapter have incompatible sizes.");
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code, std::size_t)
>(detail::exec_op<Derived, Adapter>{&derived(), &req, adapter}, token, resv_);
}
template <
class Adapter = detail::response_traits<void>::adapter_type,
class CompletionToken = boost::asio::default_completion_token_t<executor_type>>
auto async_receive_push(
Adapter adapter = adapt(),
CompletionToken token = CompletionToken{})
{
auto f = detail::make_adapter_wrapper(adapter);
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code, std::size_t)
>(detail::receive_push_op<Derived, decltype(f)>{&derived(), f}, token, resv_);
}
template <class Timeouts, class CompletionToken>
auto
async_run(endpoint ep, Timeouts ts, CompletionToken token)
{
ep_ = std::move(ep);
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::run_op<Derived, Timeouts>{&derived(), ts}, token, resv_);
}
template <class Adapter, class Timeouts, class CompletionToken>
auto async_run(
endpoint ep,
resp3::request const& req,
Adapter adapter,
Timeouts ts,
CompletionToken token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code, std::size_t)
>(detail::runexec_op<Derived, Adapter, Timeouts>
{&derived(), ep, &req, adapter, ts}, token, resv_);
}
private:
using clock_type = std::chrono::steady_clock;
using clock_traits_type = boost::asio::wait_traits<clock_type>;
using timer_type = boost::asio::basic_waitable_timer<clock_type, clock_traits_type, executor_type>;
using resolver_type = boost::asio::ip::basic_resolver<boost::asio::ip::tcp, executor_type>;
using push_channel_type = boost::asio::experimental::channel<executor_type, void(boost::system::error_code, std::size_t)>;
using time_point_type = std::chrono::time_point<std::chrono::steady_clock>;
auto derived() -> Derived& { return static_cast<Derived&>(*this); }
struct req_info {
explicit req_info(executor_type ex) : timer{ex} {}
timer_type timer;
resp3::request const* req = nullptr;
std::size_t cmds = 0;
bool stop = false;
bool written = false;
};
using reqs_type = std::deque<std::shared_ptr<req_info>>;
template <class, class> friend struct detail::receive_push_op;
template <class> friend struct detail::reader_op;
template <class> friend struct detail::writer_op;
template <class> friend struct detail::ping_op;
template <class, class> friend struct detail::run_op;
template <class, class> friend struct detail::exec_op;
template <class, class> friend struct detail::exec_read_op;
template <class, class, class> friend struct detail::runexec_op;
template <class> friend struct detail::resolve_with_timeout_op;
template <class> friend struct detail::check_idle_op;
template <class, class> friend struct detail::start_op;
template <class> friend struct detail::send_receive_op;
void cancel_push_requests()
{
auto point = std::stable_partition(std::begin(reqs_), std::end(reqs_), [](auto const& ptr) {
return !(ptr->written && ptr->req->size() == 0);
});
std::for_each(point, std::end(reqs_), [](auto const& ptr) {
ptr->timer.cancel();
});
reqs_.erase(point, std::end(reqs_));
}
void add_request_info(std::shared_ptr<req_info> const& info)
{
reqs_.push_back(info);
if (derived().is_open() && cmds_ == 0 && write_buffer_.empty())
writer_timer_.cancel();
}
auto make_dynamic_buffer(std::size_t max_read_size = 512)
{ return boost::asio::dynamic_buffer(read_buffer_, max_read_size); }
template <class CompletionToken>
auto
async_resolve_with_timeout(
std::chrono::steady_clock::duration d,
CompletionToken&& token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::resolve_with_timeout_op<this_type>{this, d},
token, resv_);
}
template <class CompletionToken>
auto reader(CompletionToken&& token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::reader_op<Derived>{&derived()}, token, resv_.get_executor());
}
template <class CompletionToken>
auto writer(CompletionToken&& token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::writer_op<Derived>{&derived()}, token, resv_.get_executor());
}
template <
class Timeouts,
class CompletionToken>
auto async_start(Timeouts ts, CompletionToken&& token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::start_op<this_type, Timeouts>{this, ts}, token, resv_);
}
template <class CompletionToken>
auto
async_ping(
std::chrono::steady_clock::duration d,
CompletionToken&& token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::ping_op<Derived>{&derived(), d}, token, resv_);
}
template <class CompletionToken>
auto
async_check_idle(
std::chrono::steady_clock::duration d,
CompletionToken&& token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::check_idle_op<Derived>{&derived(), d}, token, check_idle_timer_);
}
template <class Adapter, class CompletionToken>
auto async_exec_read(Adapter adapter, std::size_t cmds, CompletionToken token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code, std::size_t)
>(detail::exec_read_op<Derived, Adapter>{&derived(), adapter, cmds}, token, resv_);
}
void stage_request(req_info& ri)
{
write_buffer_ += ri.req->payload();
cmds_ += ri.req->size();
ri.written = true;
}
void coalesce_requests()
{
BOOST_ASSERT(write_buffer_.empty());
BOOST_ASSERT(!reqs_.empty());
stage_request(*reqs_.at(0));
for (std::size_t i = 1; i < std::size(reqs_); ++i) {
if (!reqs_.at(i - 1)->req->get_config().coalesce ||
!reqs_.at(i - 0)->req->get_config().coalesce) {
break;
}
stage_request(*reqs_.at(i));
}
}
void prepare_hello(endpoint const& ep)
{
req_.clear();
if (requires_auth(ep)) {
req_.push("HELLO", "3", "AUTH", ep.username, ep.password);
} else {
req_.push("HELLO", "3");
}
}
auto expect_role(std::string const& expected) -> bool
{
if (std::empty(expected))
return true;
resp3::node<std::string> role_node;
role_node.data_type = resp3::type::blob_string;
role_node.aggregate_size = 1;
role_node.depth = 1;
role_node.value = "role";
auto iter = std::find(std::cbegin(response_), std::cend(response_), role_node);
if (iter == std::end(response_))
return false;
++iter;
BOOST_ASSERT(iter != std::cend(response_));
return iter->value == expected;
}
// IO objects
resolver_type resv_;
timer_type ping_timer_;
timer_type check_idle_timer_;
timer_type writer_timer_;
timer_type read_timer_;
push_channel_type push_channel_;
std::string read_buffer_;
std::string write_buffer_;
std::size_t cmds_ = 0;
reqs_type reqs_;
// Last time we received data.
time_point_type last_data_;
resp3::request req_;
std::vector<resp3::node<std::string>> response_;
endpoint ep_;
// The result of async_resolve.
boost::asio::ip::tcp::resolver::results_type endpoints_;
};
} // aedis
#endif // AEDIS_CONNECTION_BASE_HPP

View File

@@ -0,0 +1,649 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_CONNECTION_OPS_HPP
#define AEDIS_CONNECTION_OPS_HPP
#include <array>
#include <algorithm>
#include <boost/assert.hpp>
#include <boost/system.hpp>
#include <boost/asio/write.hpp>
#include <boost/core/ignore_unused.hpp>
#include <boost/asio/experimental/parallel_group.hpp>
#include <aedis/adapt.hpp>
#include <aedis/error.hpp>
#include <aedis/detail/net.hpp>
#include <aedis/resp3/type.hpp>
#include <aedis/resp3/detail/exec.hpp>
#include <aedis/resp3/detail/parser.hpp>
#include <aedis/resp3/read.hpp>
#include <aedis/resp3/write.hpp>
#include <aedis/resp3/request.hpp>
#include <boost/asio/yield.hpp>
namespace aedis::detail {
template <class Conn, class Timer>
struct connect_with_timeout_op {
Conn* conn = nullptr;
boost::asio::ip::tcp::resolver::results_type const* endpoints = nullptr;
typename Conn::timeouts ts;
Timer* timer = nullptr;
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, boost::system::error_code ec = {}
, boost::asio::ip::tcp::endpoint const& = {})
{
reenter (coro)
{
timer->expires_after(ts.connect_timeout);
yield
detail::async_connect(
conn->next_layer(), *timer, *endpoints, std::move(self));
self.complete(ec);
}
}
};
template <class Conn>
struct resolve_with_timeout_op {
Conn* conn = nullptr;
std::chrono::steady_clock::duration resolve_timeout;
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, boost::system::error_code ec = {}
, boost::asio::ip::tcp::resolver::results_type const& res = {})
{
reenter (coro)
{
conn->ping_timer_.expires_after(resolve_timeout);
yield
aedis::detail::async_resolve(
conn->resv_, conn->ping_timer_,
conn->ep_.host, conn->ep_.port, std::move(self));
conn->endpoints_ = res;
self.complete(ec);
}
}
};
template <class Conn, class Adapter>
struct receive_push_op {
Conn* conn = nullptr;
Adapter adapter;
std::size_t read_size = 0;
boost::asio::coroutine coro{};
template <class Self>
void
operator()( Self& self
, boost::system::error_code ec = {}
, std::size_t n = 0)
{
reenter (coro)
{
yield
conn->push_channel_.async_receive(std::move(self));
if (ec) {
self.complete(ec, 0);
return;
}
yield
resp3::async_read(
conn->next_layer(),
conn->make_dynamic_buffer(adapter.get_max_read_size(0)),
adapter, std::move(self));
if (ec) {
conn->cancel(operation::run);
// Needed to cancel the channel, otherwise the read
// operation will be blocked forever see
// test_push_adapter.
conn->cancel(operation::receive_push);
self.complete(ec, 0);
return;
}
read_size = n;
yield
conn->push_channel_.async_send({}, 0, std::move(self));
self.complete(ec, read_size);
return;
}
}
};
template <class Conn, class Adapter>
struct exec_read_op {
Conn* conn;
Adapter adapter;
std::size_t cmds = 0;
std::size_t read_size = 0;
std::size_t index = 0;
boost::asio::coroutine coro{};
template <class Self>
void
operator()( Self& self
, boost::system::error_code ec = {}
, std::size_t n = 0)
{
reenter (coro)
{
// Loop reading the responses to this request.
BOOST_ASSERT(!conn->reqs_.empty());
while (cmds != 0) {
BOOST_ASSERT(conn->cmds_ != 0);
//-----------------------------------
// If we detect a push in the middle of a request we have
// to hand it to the push consumer. To do that we need
// some data in the read bufer.
if (conn->read_buffer_.empty()) {
yield
boost::asio::async_read_until(
conn->next_layer(),
conn->make_dynamic_buffer(),
"\r\n", std::move(self));
if (ec) {
conn->cancel(operation::run);
self.complete(ec, 0);
return;
}
}
// If the next request is a push we have to handle it to
// the receive_push_op wait for it to be done and continue.
if (resp3::to_type(conn->read_buffer_.front()) == resp3::type::push) {
yield
async_send_receive(conn->push_channel_, std::move(self));
if (ec) {
// Notice we don't call cancel_run() as that is the
// responsability of the receive_push_op.
self.complete(ec, 0);
return;
}
continue;
}
//-----------------------------------
yield
resp3::async_read(
conn->next_layer(),
conn->make_dynamic_buffer(adapter.get_max_read_size(index)),
[i = index, adpt = adapter] (resp3::node<boost::string_view> const& nd, boost::system::error_code& ec) mutable { adpt(i, nd, ec); },
std::move(self));
++index;
if (ec) {
conn->cancel(operation::run);
self.complete(ec, 0);
return;
}
read_size += n;
BOOST_ASSERT(cmds != 0);
--cmds;
BOOST_ASSERT(conn->cmds_ != 0);
--conn->cmds_;
}
self.complete({}, read_size);
}
}
};
template <class Conn, class Adapter>
struct exec_op {
using req_info_type = typename Conn::req_info;
Conn* conn = nullptr;
resp3::request const* req = nullptr;
Adapter adapter{};
std::shared_ptr<req_info_type> info = nullptr;
std::size_t read_size = 0;
boost::asio::coroutine coro{};
template <class Self>
void
operator()( Self& self
, boost::system::error_code ec = {}
, std::size_t n = 0)
{
reenter (coro)
{
if (req->get_config().close_on_connection_lost && !conn->is_open()) {
// The user doesn't want to wait for the connection to be
// stablished.
self.complete(error::not_connected, 0);
return;
}
info = std::allocate_shared<req_info_type>(boost::asio::get_associated_allocator(self), conn->resv_.get_executor());
info->timer.expires_at(std::chrono::steady_clock::time_point::max());
info->req = req;
info->cmds = req->size();
info->stop = false;
conn->add_request_info(info);
yield
info->timer.async_wait(std::move(self));
BOOST_ASSERT(!!ec);
if (ec != boost::asio::error::operation_aborted) {
self.complete(ec, 0);
return;
}
// null can happen for example when resolve fails.
if (!conn->is_open() || info->stop) {
self.complete(ec, 0);
return;
}
BOOST_ASSERT(conn->is_open());
if (req->size() == 0) {
self.complete({}, 0);
return;
}
BOOST_ASSERT(!conn->reqs_.empty());
BOOST_ASSERT(conn->reqs_.front() != nullptr);
BOOST_ASSERT(conn->cmds_ != 0);
yield
conn->async_exec_read(adapter, conn->reqs_.front()->cmds, std::move(self));
if (ec) {
self.complete(ec, 0);
return;
}
read_size = n;
BOOST_ASSERT(!conn->reqs_.empty());
conn->reqs_.pop_front();
if (conn->cmds_ == 0) {
conn->read_timer_.cancel_one();
if (!conn->reqs_.empty())
conn->writer_timer_.cancel_one();
} else {
BOOST_ASSERT(!conn->reqs_.empty());
conn->reqs_.front()->timer.cancel_one();
}
self.complete({}, read_size);
}
}
};
template <class Conn>
struct ping_op {
Conn* conn;
std::chrono::steady_clock::duration ping_interval;
boost::asio::coroutine coro{};
template <class Self>
void
operator()( Self& self
, boost::system::error_code ec = {}
, std::size_t = 0)
{
reenter (coro) for (;;)
{
conn->ping_timer_.expires_after(ping_interval);
yield
conn->ping_timer_.async_wait(std::move(self));
if (ec || !conn->is_open()) {
conn->cancel(operation::run);
self.complete(ec);
return;
}
conn->req_.clear();
conn->req_.push("PING");
yield
conn->async_exec(conn->req_, adapt(), std::move(self));
if (ec) {
conn->cancel(operation::run);
self.complete({});
return;
}
}
}
};
template <class Conn>
struct check_idle_op {
Conn* conn;
std::chrono::steady_clock::duration ping_interval;
boost::asio::coroutine coro{};
template <class Self>
void operator()(Self& self, boost::system::error_code ec = {})
{
reenter (coro) for (;;)
{
conn->check_idle_timer_.expires_after(2 * ping_interval);
yield
conn->check_idle_timer_.async_wait(std::move(self));
if (ec) {
conn->cancel(operation::run);
self.complete({});
return;
}
if (!conn->is_open()) {
// Notice this is not an error, it was requested from an
// external op.
self.complete({});
return;
}
auto const now = std::chrono::steady_clock::now();
if (conn->last_data_ + (2 * ping_interval) < now) {
conn->cancel(operation::run);
self.complete(error::idle_timeout);
return;
}
conn->last_data_ = now;
}
}
};
template <class Conn, class Timeouts>
struct start_op {
Conn* conn;
Timeouts ts;
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, std::array<std::size_t, 4> order = {}
, boost::system::error_code ec0 = {}
, boost::system::error_code ec1 = {}
, boost::system::error_code ec2 = {}
, boost::system::error_code ec3 = {})
{
reenter (coro)
{
yield
boost::asio::experimental::make_parallel_group(
[this](auto token) { return conn->reader(token);},
[this](auto token) { return conn->writer(token);},
[this](auto token) { return conn->async_check_idle(ts.ping_interval, token);},
[this](auto token) { return conn->async_ping(ts.ping_interval, token);}
).async_wait(
boost::asio::experimental::wait_for_one(),
std::move(self));
switch (order[0]) {
case 0: self.complete(ec0); break;
case 1: self.complete(ec1); break;
case 2: self.complete(ec2); break;
case 3: self.complete(ec3); break;
default: BOOST_ASSERT(false);
}
}
}
};
template <class Conn, class Timeouts>
struct run_op {
Conn* conn = nullptr;
Timeouts ts;
boost::asio::coroutine coro{};
template <class Self>
void operator()(
Self& self,
boost::system::error_code ec = {},
std::size_t = 0)
{
reenter (coro)
{
yield
conn->async_resolve_with_timeout(ts.resolve_timeout, std::move(self));
if (ec) {
conn->cancel(operation::run);
self.complete(ec);
return;
}
yield
conn->derived().async_connect(conn->endpoints_, ts, conn->ping_timer_, std::move(self));
if (ec) {
conn->cancel(operation::run);
self.complete(ec);
return;
}
conn->prepare_hello(conn->ep_);
conn->ping_timer_.expires_after(ts.resp3_handshake_timeout);
yield
resp3::detail::async_exec(
conn->next_layer(),
conn->ping_timer_,
conn->req_,
adapter::adapt2(conn->response_),
conn->make_dynamic_buffer(),
std::move(self)
);
if (ec) {
conn->cancel(operation::run);
self.complete(ec);
return;
}
conn->ep_.password.clear();
if (!conn->expect_role(conn->ep_.role)) {
conn->cancel(operation::run);
self.complete(error::unexpected_server_role);
return;
}
conn->write_buffer_.clear();
conn->cmds_ = 0;
std::for_each(std::begin(conn->reqs_), std::end(conn->reqs_), [](auto const& ptr) {
return ptr->written = false;
});
yield conn->async_start(ts, std::move(self));
self.complete(ec);
}
}
};
template <class Conn>
struct writer_op {
Conn* conn;
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, boost::system::error_code ec = {}
, std::size_t n = 0)
{
boost::ignore_unused(n);
reenter (coro) for (;;)
{
while (!conn->reqs_.empty() && conn->cmds_ == 0 && conn->write_buffer_.empty()) {
conn->coalesce_requests();
yield
boost::asio::async_write(conn->next_layer(), boost::asio::buffer(conn->write_buffer_), std::move(self));
if (ec) {
self.complete(ec);
return;
}
// We have to clear the payload right after the read op in
// order to to use it as a flag that informs there is no
// ongoing write.
conn->write_buffer_.clear();
conn->cancel_push_requests();
}
if (conn->is_open()) {
yield
conn->writer_timer_.async_wait(std::move(self));
if (ec != boost::asio::error::operation_aborted) {
conn->cancel(operation::run);
self.complete(ec);
return;
}
// The timer may be canceled either to stop the write op
// or to proceed to the next write, the difference between
// the two is that for the former the socket will be
// closed first. We check for that below.
}
if (!conn->is_open()) {
// Notice this is not an error of the op, stoping was
// requested from the outside, so we complete with
// success.
self.complete({});
return;
}
}
}
};
template <class Conn>
struct reader_op {
Conn* conn;
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, boost::system::error_code ec = {}
, std::size_t n = 0)
{
boost::ignore_unused(n);
reenter (coro) for (;;)
{
yield
boost::asio::async_read_until(
conn->next_layer(),
conn->make_dynamic_buffer(),
"\r\n", std::move(self));
if (ec) {
conn->cancel(operation::run);
self.complete(ec);
return;
}
conn->last_data_ = std::chrono::steady_clock::now();
// We handle unsolicited events in the following way
//
// 1. Its resp3 type is a push.
//
// 2. A non-push type is received with an empty requests
// queue. I have noticed this is possible (e.g. -MISCONF).
// I expect them to have type push so we can distinguish
// them from responses to commands, but it is a
// simple-error. If we are lucky enough to receive them
// when the command queue is empty we can treat them as
// server pushes, otherwise it is impossible to handle
// them properly
//
// 3. The request does not expect any response but we got
// one. This may happen if for example, subscribe with
// wrong syntax.
//
BOOST_ASSERT(!conn->read_buffer_.empty());
if (resp3::to_type(conn->read_buffer_.front()) == resp3::type::push
|| conn->reqs_.empty()
|| (!conn->reqs_.empty() && conn->reqs_.front()->cmds == 0)) {
yield
async_send_receive(conn->push_channel_, std::move(self));
if (ec) {
conn->cancel(operation::run);
self.complete(ec);
return;
}
} else {
BOOST_ASSERT(conn->cmds_ != 0);
BOOST_ASSERT(!conn->reqs_.empty());
BOOST_ASSERT(conn->reqs_.front()->cmds != 0);
conn->reqs_.front()->timer.cancel_one();
yield
conn->read_timer_.async_wait(std::move(self));
if (ec != boost::asio::error::operation_aborted ||
!conn->is_open()) {
conn->cancel(operation::run);
self.complete(ec);
return;
}
}
}
}
};
template <class Conn, class Adapter, class Timeouts>
struct runexec_op {
Conn* conn = nullptr;
endpoint ep;
resp3::request const* req = nullptr;
Adapter adapter;
Timeouts ts;
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, std::array<std::size_t, 2> order = {}
, boost::system::error_code ec1 = {}
, boost::system::error_code ec2 = {}
, std::size_t n = 0)
{
reenter (coro)
{
yield
boost::asio::experimental::make_parallel_group(
[this, ep2 = ep](auto token) { return conn->async_run(ep2, ts, token);},
[this](auto token) { return conn->async_exec(*req, adapter, token);}
).async_wait(
boost::asio::experimental::wait_for_one_error(),
std::move(self));
switch (order[0]) {
case 0: self.complete(ec1, n); return;
case 1: {
if (ec2)
self.complete(ec2, n);
else
self.complete(ec1, n);
return;
}
default: BOOST_ASSERT(false);
}
}
}
};
} // aedis::detail
#include <boost/asio/unyield.hpp>
#endif // AEDIS_CONNECTION_OPS_HPP

View File

@@ -0,0 +1,196 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_NET_HPP
#define AEDIS_NET_HPP
#include <array>
#include <boost/system.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <boost/asio/connect.hpp>
#include <boost/assert.hpp>
#include <boost/asio/experimental/parallel_group.hpp>
#include <boost/asio/yield.hpp>
namespace aedis::detail {
template <class Executor>
using conn_timer_t = boost::asio::basic_waitable_timer<std::chrono::steady_clock, boost::asio::wait_traits<std::chrono::steady_clock>, Executor>;
template <
class Stream,
class EndpointSequence
>
struct connect_op {
Stream* socket;
conn_timer_t<typename Stream::executor_type>* timer;
EndpointSequence* endpoints;
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, std::array<std::size_t, 2> order = {}
, boost::system::error_code ec1 = {}
, typename Stream::protocol_type::endpoint const& ep = {}
, boost::system::error_code ec2 = {})
{
reenter (coro)
{
yield
boost::asio::experimental::make_parallel_group(
[this](auto token)
{
auto f = [](boost::system::error_code const&, auto const&) { return true; };
return boost::asio::async_connect(*socket, *endpoints, f, token);
},
[this](auto token) { return timer->async_wait(token);}
).async_wait(
boost::asio::experimental::wait_for_one(),
std::move(self));
switch (order[0]) {
case 0: self.complete(ec1, ep); return;
case 1:
{
if (ec2) {
self.complete(ec2, {});
} else {
self.complete(error::connect_timeout, ep);
}
return;
}
default: BOOST_ASSERT(false);
}
}
}
};
template <class Resolver, class Timer>
struct resolve_op {
Resolver* resv;
Timer* timer;
boost::string_view host;
boost::string_view port;
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, std::array<std::size_t, 2> order = {}
, boost::system::error_code ec1 = {}
, boost::asio::ip::tcp::resolver::results_type res = {}
, boost::system::error_code ec2 = {})
{
reenter (coro)
{
yield
boost::asio::experimental::make_parallel_group(
[this](auto token) { return resv->async_resolve(host.data(), port.data(), token);},
[this](auto token) { return timer->async_wait(token);}
).async_wait(
boost::asio::experimental::wait_for_one(),
std::move(self));
switch (order[0]) {
case 0: self.complete(ec1, res); return;
case 1:
{
if (ec2) {
self.complete(ec2, {});
} else {
self.complete(error::resolve_timeout, {});
}
return;
}
default: BOOST_ASSERT(false);
}
}
}
};
template <class Channel>
struct send_receive_op {
Channel* channel;
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, boost::system::error_code ec = {}
, std::size_t = 0)
{
reenter (coro)
{
yield
channel->async_send(boost::system::error_code{}, 0, std::move(self));
if (ec) {
self.complete(ec, 0);
return;
}
yield
channel->async_receive(std::move(self));
self.complete(ec, 0);
}
}
};
template <
class Stream,
class EndpointSequence,
class CompletionToken
>
auto async_connect(
Stream& socket,
conn_timer_t<typename Stream::executor_type>& timer,
EndpointSequence ep,
CompletionToken&& token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code, typename Stream::protocol_type::endpoint const&)
>(connect_op<Stream, EndpointSequence>
{&socket, &timer, &ep}, token, socket, timer);
}
template <
class Resolver,
class Timer,
class CompletionToken =
boost::asio::default_completion_token_t<typename Resolver::executor_type>
>
auto async_resolve(
Resolver& resv,
Timer& timer,
boost::string_view host,
boost::string_view port,
CompletionToken&& token = CompletionToken{})
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code, boost::asio::ip::tcp::resolver::results_type)
>(resolve_op<Resolver, Timer>{&resv, &timer, host, port}, token, resv, timer);
}
template <
class Channel,
class CompletionToken =
boost::asio::default_completion_token_t<typename Channel::executor_type>
>
auto async_send_receive(Channel& channel, CompletionToken&& token = CompletionToken{})
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code, std::size_t)
>(send_receive_op<Channel>{&channel}, token, channel);
}
} // aedis::detail
#include <boost/asio/unyield.hpp>
#endif // AEDIS_NET_HPP

View File

@@ -0,0 +1,40 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_ENDPOINT_HPP
#define AEDIS_ENDPOINT_HPP
#include <string>
namespace aedis {
/** \brief A Redis endpoint.
* \ingroup high-level-api
*/
struct endpoint {
/// Redis server address.
std::string host;
/// Redis server port.
std::string port;
/// Expected role if any.
std::string role{};
/// Username if authentication is required.
std::string username{};
/// Password if authentication is required.
std::string password{};
};
auto is_valid(endpoint const& ep) noexcept -> bool;
auto requires_auth(endpoint const& ep) noexcept -> bool;
auto operator<<(std::ostream& os, endpoint const& ep) -> std::ostream&;
} // aedis
#endif // AEDIS_ENDPOINT_HPP

View File

@@ -1,23 +1,35 @@
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef BOOST_REDIS_ERROR_HPP
#define BOOST_REDIS_ERROR_HPP
#ifndef AEDIS_ERROR_HPP
#define AEDIS_ERROR_HPP
#include <boost/system/error_code.hpp>
namespace boost::redis {
namespace aedis {
/** \brief Generic errors.
* \ingroup high-level-api
*/
enum class error
{
/// Resolve timeout.
resolve_timeout = 1,
/// Connect timeout.
connect_timeout,
/// Idle timeout.
idle_timeout,
/// Exec timeout.
exec_timeout,
/// Invalid RESP3 type.
invalid_data_type = 1,
invalid_data_type,
/// Can't parse the string as a number.
not_a_number,
@@ -61,26 +73,14 @@ enum class error
/// Got RESP3 null.
resp3_null,
/// There is no stablished connection.
not_connected,
/// Unexpected server role.
unexpected_server_role,
/// Resolve timeout
resolve_timeout,
/// Connect timeout
connect_timeout,
/// Connect timeout
pong_timeout,
/// SSL handshake timeout
/// SSL handshake timeout.
ssl_handshake_timeout,
/// Can't receive push synchronously without blocking
sync_receive_push_failed,
/// Incompatible node depth.
incompatible_node_depth,
/// There is no stablished connection.
not_connected,
};
/** \internal
@@ -88,15 +88,15 @@ enum class error
* \param e Error code.
* \ingroup any
*/
auto make_error_code(error e) -> system::error_code;
auto make_error_code(error e) -> boost::system::error_code;
} // boost::redis
} // aedis
namespace std {
template<>
struct is_error_code_enum<::boost::redis::error> : std::true_type {};
struct is_error_code_enum<::aedis::error> : std::true_type {};
} // std
#endif // BOOST_REDIS_ERROR_HPP
#endif // AEDIS_ERROR_HPP

View File

@@ -0,0 +1,29 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <aedis/endpoint.hpp>
#include <string>
namespace aedis {
auto is_valid(endpoint const& ep) noexcept -> bool
{
return !std::empty(ep.host) && !std::empty(ep.port);
}
auto requires_auth(endpoint const& ep) noexcept -> bool
{
return !std::empty(ep.username) && !std::empty(ep.password);
}
auto operator<<(std::ostream& os, endpoint const& ep) -> std::ostream&
{
os << ep.host << ":" << ep.port << " (" << ep.username << "," << ep.password << ")";
return os;
}
} // aedis

View File

@@ -1,29 +1,32 @@
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <boost/redis/error.hpp>
#include <boost/assert.hpp>
#include <aedis/error.hpp>
namespace boost::redis {
namespace aedis {
namespace detail {
struct error_category_impl : system::error_category {
struct error_category_impl : boost::system::error_category {
virtual ~error_category_impl() = default;
auto name() const noexcept -> char const* override
{
return "boost.redis";
return "aedis";
}
auto message(int ev) const -> std::string override
{
switch(static_cast<error>(ev)) {
case error::resolve_timeout: return "Resolve operation timeout.";
case error::connect_timeout: return "Connect operation timeout.";
case error::idle_timeout: return "Idle timeout.";
case error::exec_timeout: return "Exec timeout.";
case error::invalid_data_type: return "Invalid resp3 type.";
case error::not_a_number: return "Can't convert string to number (maybe forgot to upgrade to RESP3?).";
case error::not_a_number: return "Can't convert string to number.";
case error::exceeeds_max_nested_depth: return "Exceeds the maximum number of nested responses.";
case error::unexpected_bool_value: return "Unexpected bool value.";
case error::empty_field: return "Expected field value is empty.";
@@ -37,19 +40,15 @@ struct error_category_impl : system::error_category {
case error::incompatible_size: return "Aggregate container has incompatible size.";
case error::not_a_double: return "Not a double.";
case error::resp3_null: return "Got RESP3 null.";
case error::not_connected: return "Not connected.";
case error::resolve_timeout: return "Resolve timeout.";
case error::connect_timeout: return "Connect timeout.";
case error::pong_timeout: return "Pong timeout.";
case error::unexpected_server_role: return "Unexpected server role.";
case error::ssl_handshake_timeout: return "SSL handshake timeout.";
case error::sync_receive_push_failed: return "Can't receive server push synchronously without blocking.";
case error::incompatible_node_depth: return "Incompatible node depth.";
default: BOOST_ASSERT(false); return "Boost.Redis error.";
case error::not_connected: return "Not connected.";
default: BOOST_ASSERT(false); return "Aedis error.";
}
}
};
auto category() -> system::error_category const&
auto category() -> boost::system::error_category const&
{
static error_category_impl instance;
return instance;
@@ -57,9 +56,9 @@ auto category() -> system::error_category const&
} // detail
auto make_error_code(error e) -> system::error_code
auto make_error_code(error e) -> boost::system::error_code
{
return system::error_code{static_cast<int>(e), detail::category()};
return boost::system::error_code{static_cast<int>(e), detail::category()};
}
} // boost::redis::detail
} // aedis

View File

@@ -0,0 +1,29 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_OPERATION_HPP
#define AEDIS_OPERATION_HPP
namespace aedis {
/** \brief Connection operations that can be cancelled.
* \ingroup high-level-api
*
* The operations listed below can be passed to the
* `aedis::connection::cancel` member function.
*/
enum class operation {
/// Refers to `connection::async_exec` operations.
exec,
/// Refers to `connection::async_run` operations.
run,
/// Refers to `connection::async_receive_push` operations.
receive_push,
};
} // aedis
#endif // AEDIS_OPERATION_HPP

View File

@@ -0,0 +1,172 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_RESP3_EXEC_HPP
#define AEDIS_RESP3_EXEC_HPP
#include <boost/assert.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <boost/asio/read.hpp>
#include <boost/asio/write.hpp>
#include <boost/asio/coroutine.hpp>
#include <boost/asio/compose.hpp>
#include <boost/asio/steady_timer.hpp>
#include <boost/asio/experimental/parallel_group.hpp>
#include <aedis/error.hpp>
#include <aedis/resp3/read.hpp>
#include <aedis/resp3/request.hpp>
#include <boost/asio/yield.hpp>
namespace aedis::resp3::detail {
template <
class AsyncStream,
class Adapter,
class DynamicBuffer
>
struct exec_op {
AsyncStream* socket = nullptr;
request const* req = nullptr;
Adapter adapter;
DynamicBuffer dbuf{};
std::size_t n_cmds = 0;
std::size_t size = 0;
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, boost::system::error_code ec = {}
, std::size_t n = 0)
{
reenter (coro) for (;;)
{
if (req) {
yield
boost::asio::async_write(
*socket,
boost::asio::buffer(req->payload()),
std::move(self));
if (ec || n_cmds == 0) {
self.complete(ec, n);
return;
}
req = nullptr;
}
yield resp3::async_read(*socket, dbuf, adapter, std::move(self));
if (ec) {
self.complete(ec, 0);
return;
}
size += n;
if (--n_cmds == 0) {
self.complete(ec, size);
return;
}
}
}
};
template <
class AsyncStream,
class Adapter,
class DynamicBuffer,
class CompletionToken = boost::asio::default_completion_token_t<typename AsyncStream::executor_type>
>
auto async_exec(
AsyncStream& socket,
request const& req,
Adapter adapter,
DynamicBuffer dbuf,
CompletionToken token = CompletionToken{})
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code, std::size_t)
>(detail::exec_op<AsyncStream, Adapter, DynamicBuffer>
{&socket, &req, adapter, dbuf, req.size()}, token, socket);
}
template <
class AsyncStream,
class Timer,
class Adapter,
class DynamicBuffer
>
struct exec_with_timeout_op {
AsyncStream* socket = nullptr;
Timer* timer = nullptr;
request const* req = nullptr;
Adapter adapter;
DynamicBuffer dbuf{};
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, std::array<std::size_t, 2> order = {}
, boost::system::error_code ec1 = {}
, std::size_t n = 0
, boost::system::error_code ec2 = {})
{
reenter (coro)
{
yield
boost::asio::experimental::make_parallel_group(
[this](auto token) { return detail::async_exec(*socket, *req, adapter, dbuf, token);},
[this](auto token) { return timer->async_wait(token);}
).async_wait(
boost::asio::experimental::wait_for_one(),
std::move(self));
switch (order[0]) {
case 0: self.complete(ec1, n); break;
case 1:
{
if (ec2) {
self.complete(ec2, 0);
} else {
self.complete(aedis::error::exec_timeout, 0);
}
} break;
default: BOOST_ASSERT(false);
}
}
}
};
template <
class AsyncStream,
class Timer,
class Adapter,
class DynamicBuffer,
class CompletionToken = boost::asio::default_completion_token_t<typename AsyncStream::executor_type>
>
auto async_exec(
AsyncStream& socket,
Timer& timer,
request const& req,
Adapter adapter,
DynamicBuffer dbuf,
CompletionToken token = CompletionToken{})
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code, std::size_t)
>(detail::exec_with_timeout_op<AsyncStream, Timer, Adapter, DynamicBuffer>
{&socket, &timer, &req, adapter, dbuf}, token, socket, timer);
}
} // aedis::resp3::detail
#include <boost/asio/unyield.hpp>
#endif // AEDIS_RESP3_EXEC_HPP

View File

@@ -0,0 +1,25 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <boost/spirit/include/qi.hpp>
#include <boost/spirit/home/x3.hpp>
#include <aedis/resp3/detail/parser.hpp>
#include <aedis/resp3/type.hpp>
namespace aedis::resp3::detail {
auto parse_uint(char const* data, std::size_t size, boost::system::error_code& ec) -> std::size_t
{
static constexpr boost::spirit::x3::uint_parser<std::size_t, 10> p{};
std::size_t ret = 0;
if (!parse(data, data + size, p, ret))
ec = error::not_a_number;
return ret;
}
} // aedis::resp3::detail

View File

@@ -0,0 +1,223 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_RESP3_PARSER_HPP
#define AEDIS_RESP3_PARSER_HPP
#include <array>
#include <limits>
#include <system_error>
#include <boost/assert.hpp>
#include <boost/utility/string_view.hpp>
#include <aedis/error.hpp>
#include <aedis/resp3/node.hpp>
namespace aedis::resp3::detail {
auto parse_uint(char const* data, std::size_t size, boost::system::error_code& ec) -> std::size_t;
template <class ResponseAdapter>
class parser {
private:
using node_type = node<boost::string_view>;
static constexpr std::size_t max_embedded_depth = 5;
ResponseAdapter adapter_;
// The current depth. Simple data types will have depth 0, whereas
// the elements of aggregates will have depth 1. Embedded types
// will have increasing depth.
std::size_t depth_ = 0;
// The parser supports up to 5 levels of nested structures. The
// first element in the sizes stack is a sentinel and must be
// different from 1.
std::array<std::size_t, max_embedded_depth + 1> sizes_ = {{1}};
// Contains the length expected in the next bulk read.
std::size_t bulk_length_ = (std::numeric_limits<std::size_t>::max)();
// The type of the next bulk. Contains type::invalid if no bulk is
// expected.
type bulk_ = type::invalid;
public:
explicit parser(ResponseAdapter adapter)
: adapter_{adapter}
{
sizes_[0] = 2; // The sentinel must be more than 1.
}
// Returns the number of bytes that have been consumed.
auto
consume(char const* data, std::size_t n, boost::system::error_code& ec) -> std::size_t
{
if (bulk_ != type::invalid) {
n = bulk_length_ + 2;
switch (bulk_) {
case type::streamed_string_part:
{
BOOST_ASSERT(bulk_length_ != 0);
adapter_({bulk_, 1, depth_, {data, bulk_length_}}, ec);
if (ec)
return 0;
} break;
default:
{
adapter_({bulk_, 1, depth_, {data, bulk_length_}}, ec);
if (ec)
return 0;
}
}
bulk_ = type::invalid;
--sizes_[depth_];
} else if (sizes_[depth_] != 0) {
auto const t = to_type(*data);
switch (t) {
case type::streamed_string_part:
{
bulk_length_ = parse_uint(data + 1, n - 2, ec);
if (ec)
return 0;
if (bulk_length_ == 0) {
adapter_({type::streamed_string_part, 1, depth_, {}}, ec);
sizes_[depth_] = 0; // We are done.
} else {
bulk_ = type::streamed_string_part;
}
} break;
case type::blob_error:
case type::verbatim_string:
case type::blob_string:
{
if (data[1] == '?') {
// NOTE: This can only be triggered with blob_string.
// Trick: A streamed string is read as an aggregate
// of infinite lenght. When the streaming is done
// the server is supposed to send a part with length
// 0.
sizes_[++depth_] = (std::numeric_limits<std::size_t>::max)();
} else {
bulk_length_ = parse_uint(data + 1, n - 2, ec);
if (ec)
return 0;
bulk_ = t;
}
} break;
case type::boolean:
{
if (n == 3) {
ec = error::empty_field;
return 0;
}
if (data[1] != 'f' && data[1] != 't') {
ec = error::unexpected_bool_value;
return 0;
}
adapter_({t, 1, depth_, {data + 1, n - 3}}, ec);
if (ec)
return 0;
--sizes_[depth_];
} break;
case type::doublean:
case type::big_number:
case type::number:
{
if (n == 3) {
ec = error::empty_field;
return 0;
}
adapter_({t, 1, depth_, {data + 1, n - 3}}, ec);
if (ec)
return 0;
--sizes_[depth_];
} break;
case type::simple_error:
case type::simple_string:
{
adapter_({t, 1, depth_, {&data[1], n - 3}}, ec);
if (ec)
return 0;
--sizes_[depth_];
} break;
case type::null:
{
adapter_({type::null, 1, depth_, {}}, ec);
if (ec)
return 0;
--sizes_[depth_];
} break;
case type::push:
case type::set:
case type::array:
case type::attribute:
case type::map:
{
auto const l = parse_uint(data + 1, n - 2, ec);
if (ec)
return 0;
adapter_({t, l, depth_, {}}, ec);
if (ec)
return 0;
if (l == 0) {
--sizes_[depth_];
} else {
if (depth_ == max_embedded_depth) {
ec = error::exceeeds_max_nested_depth;
return 0;
}
++depth_;
sizes_[depth_] = l * element_multiplicity(t);
}
} break;
default:
{
ec = error::invalid_data_type;
return 0;
}
}
}
while (sizes_[depth_] == 0) {
--depth_;
--sizes_[depth_];
}
return n;
}
// Returns true when the parser is done with the current message.
[[nodiscard]] auto done() const noexcept
{ return depth_ == 0 && bulk_ == type::invalid; }
// The bulk type expected in the next read. If none is expected returns
// type::invalid.
auto bulk() const noexcept { return bulk_; }
// The length expected in the the next bulk.
[[nodiscard]] auto bulk_length() const noexcept { return bulk_length_; }
};
} // detail::resp3::aedis
#endif // AEDIS_RESP3_PARSER_HPP

View File

@@ -0,0 +1,116 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_RESP3_READ_OPS_HPP
#define AEDIS_RESP3_READ_OPS_HPP
#include <boost/assert.hpp>
#include <boost/asio/read.hpp>
#include <boost/asio/read_until.hpp>
#include <boost/asio/coroutine.hpp>
#include <boost/core/ignore_unused.hpp>
#include <boost/utility/string_view.hpp>
#include <aedis/resp3/detail/parser.hpp>
#include <boost/asio/yield.hpp>
namespace aedis::resp3::detail {
struct ignore_response {
void operator()(node<boost::string_view> nd, boost::system::error_code& ec)
{
switch (nd.data_type) {
case resp3::type::simple_error: ec = error::resp3_simple_error; return;
case resp3::type::blob_error: ec = error::resp3_blob_error; return;
default: return;
}
}
};
template <
class AsyncReadStream,
class DynamicBuffer,
class ResponseAdapter>
class parse_op {
private:
AsyncReadStream& stream_;
DynamicBuffer buf_;
parser<ResponseAdapter> parser_;
std::size_t consumed_ = 0;
std::size_t buffer_size_ = 0;
boost::asio::coroutine coro_{};
public:
parse_op(AsyncReadStream& stream, DynamicBuffer buf, ResponseAdapter adapter)
: stream_ {stream}
, buf_ {std::move(buf)}
, parser_ {std::move(adapter)}
{ }
template <class Self>
void operator()( Self& self
, boost::system::error_code ec = {}
, std::size_t n = 0)
{
reenter (coro_) for (;;) {
if (parser_.bulk() == type::invalid) {
yield
boost::asio::async_read_until(stream_, buf_, "\r\n", std::move(self));
if (ec) {
self.complete(ec, 0);
return;
}
} else {
// On a bulk read we can't read until delimiter since the
// payload may contain the delimiter itself so we have to
// read the whole chunk. However if the bulk blob is small
// enough it may be already on the buffer (from the last
// read), in which case there is no need of initiating
// another async op, otherwise we have to read the missing
// bytes.
if (buf_.size() < (parser_.bulk_length() + 2)) {
buffer_size_ = buf_.size();
buf_.grow(parser_.bulk_length() + 2 - buffer_size_);
yield
boost::asio::async_read(
stream_,
buf_.data(buffer_size_, parser_.bulk_length() + 2 - buffer_size_),
boost::asio::transfer_all(),
std::move(self));
if (ec) {
self.complete(ec, 0);
return;
}
}
n = parser_.bulk_length() + 2;
BOOST_ASSERT(buf_.size() >= n);
}
n = parser_.consume(static_cast<char const*>(buf_.data(0, n).data()), n, ec);
if (ec) {
self.complete(ec, 0);
return;
}
buf_.consume(n);
consumed_ += n;
if (parser_.done()) {
self.complete({}, consumed_);
return;
}
}
}
};
} // aedis::resp3::detail
#include <boost/asio/unyield.hpp>
#endif // AEDIS_RESP3_READ_OPS_HPP

View File

@@ -1,16 +1,14 @@
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <boost/redis/request.hpp>
#include <aedis/resp3/request.hpp>
#include <string_view>
namespace aedis::resp3::detail {
namespace boost::redis::detail {
auto has_response(std::string_view cmd) -> bool
auto has_push_response(boost::string_view cmd) -> bool
{
if (cmd == "SUBSCRIBE") return true;
if (cmd == "PSUBSCRIBE") return true;
@@ -18,4 +16,4 @@ auto has_response(std::string_view cmd) -> bool
return false;
}
} // boost:redis::detail
} // aedis::resp3::detail

View File

@@ -1,79 +1,44 @@
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef BOOST_REDIS_RESP3_TYPE_HPP
#define BOOST_REDIS_RESP3_TYPE_HPP
#include <boost/assert.hpp>
#include <ostream>
#include <vector>
#include <string>
#include <aedis/resp3/type.hpp>
namespace boost::redis::resp3 {
namespace aedis::resp3 {
/** \brief RESP3 data types.
\ingroup high-level-api
The RESP3 specification can be found at https://github.com/redis/redis-specifications/blob/master/protocol/RESP3.md.
*/
enum class type
{ /// Aggregate
array,
/// Aaggregate
push,
/// Aggregate
set,
/// Aggregate
map,
/// Aggregate
attribute,
/// Simple
simple_string,
/// Simple
simple_error,
/// Simple
number,
/// Simple
doublean,
/// Simple
boolean,
/// Simple
big_number,
/// Simple
null,
/// Simple
blob_error,
/// Simple
verbatim_string,
/// Simple
blob_string,
/// Simple
streamed_string,
/// Simple
streamed_string_part,
/// Invalid
invalid
};
auto to_string(type t) -> char const*
{
switch (t) {
case type::array: return "array";
case type::push: return "push";
case type::set: return "set";
case type::map: return "map";
case type::attribute: return "attribute";
case type::simple_string: return "simple_string";
case type::simple_error: return "simple_error";
case type::number: return "number";
case type::doublean: return "doublean";
case type::boolean: return "boolean";
case type::big_number: return "big_number";
case type::null: return "null";
case type::blob_error: return "blob_error";
case type::verbatim_string: return "verbatim_string";
case type::blob_string: return "blob_string";
case type::streamed_string_part: return "streamed_string_part";
default: return "invalid";
}
}
/** \brief Converts the data type to a string.
* \ingroup high-level-api
* \param t RESP3 type.
*/
auto to_string(type t) noexcept -> char const*;
auto operator<<(std::ostream& os, type t) -> std::ostream&
{
os << to_string(t);
return os;
}
/** \brief Writes the type to the output stream.
* \ingroup high-level-api
* \param os Output stream.
* \param t RESP3 type.
*/
auto operator<<(std::ostream& os, type t) -> std::ostream&;
/* Checks whether the data type is an aggregate.
*/
constexpr auto is_aggregate(type t) noexcept -> bool
auto is_aggregate(type t) -> bool
{
switch (t) {
case type::array:
@@ -85,9 +50,7 @@ constexpr auto is_aggregate(type t) noexcept -> bool
}
}
// For map and attribute data types this function returns 2. All
// other types have value 1.
constexpr auto element_multiplicity(type t) noexcept -> std::size_t
auto element_multiplicity(type t) -> std::size_t
{
switch (t) {
case type::map:
@@ -96,8 +59,7 @@ constexpr auto element_multiplicity(type t) noexcept -> std::size_t
}
}
// Returns the wire code of a given type.
constexpr auto to_code(type t) noexcept -> char
auto to_code(type t) -> char
{
switch (t) {
case type::blob_error: return '!';
@@ -121,8 +83,7 @@ constexpr auto to_code(type t) noexcept -> char
}
}
// Converts a wire-format RESP3 type (char) to a resp3 type.
constexpr auto to_type(char c) noexcept -> type
auto to_type(char c) -> type
{
switch (c) {
case '!': return type::blob_error;
@@ -145,6 +106,4 @@ constexpr auto to_type(char c) noexcept -> type
}
}
} // boost::redis::resp3
#endif // BOOST_REDIS_RESP3_TYPE_HPP
} // aedis::resp3

View File

@@ -0,0 +1,94 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_RESP3_NODE_HPP
#define AEDIS_RESP3_NODE_HPP
#include <aedis/resp3/type.hpp>
#include <string>
#include <vector>
namespace aedis::resp3 {
/** \brief A node in the response tree.
* \ingroup high-level-api
*
* Redis responses are the pre-order view of the response tree (see
* https://en.wikipedia.org/wiki/Tree_traversal#Pre-order,_NLR).
*
* \remark Any Redis response can be received in an array of nodes,
* for example \c std::vector<node<std::string>>.
*/
template <class String>
struct node {
/// The RESP3 type of the data in this node.
resp3::type data_type;
/// The number of elements of an aggregate.
std::size_t aggregate_size;
/// The depth of this node in the response tree.
std::size_t depth;
/// The actual data. For aggregate types this is usually empty.
String value;
};
/** @brief Converts the node to a string.
* @relates node
*
* @param in The node object.
*/
template <class String>
auto to_string(node<String> const& in)
{
std::string out;
out += std::to_string(in.depth);
out += '\t';
out += to_string(in.data_type);
out += '\t';
out += std::to_string(in.aggregate_size);
out += '\t';
if (!is_aggregate(in.data_type))
out.append(in.value.data(), in.value.size());
return out;
}
/** @brief Compares a node for equality.
* @relates node
*
* @param a Left hand side node object.
* @param b Right hand side node object.
*/
template <class String>
auto operator==(node<String> const& a, node<String> const& b)
{
return a.aggregate_size == b.aggregate_size
&& a.depth == b.depth
&& a.data_type == b.data_type
&& a.value == b.value;
};
/** @brief Writes the node string to the stream.
* @relates node
*
* @param os Output stream.
* @param node Node object.
*
* \remark Binary data is not converted to text.
*/
template <class String>
auto operator<<(std::ostream& os, node<String> const& node) -> std::ostream&
{
os << to_string(node);
return os;
}
} // aedis::resp3
#endif // AEDIS_RESP3_NODE_HPP

View File

@@ -0,0 +1,180 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_RESP3_READ_HPP
#define AEDIS_RESP3_READ_HPP
#include <aedis/resp3/type.hpp>
#include <aedis/resp3/detail/parser.hpp>
#include <aedis/resp3/detail/read_ops.hpp>
#include <boost/asio/read.hpp>
#include <boost/asio/compose.hpp>
#include <boost/asio/async_result.hpp>
namespace aedis::resp3 {
/** \brief Reads a complete response to a command sychronously.
* \ingroup low-level-api
*
* This function reads a complete response to a command or a
* server push synchronously. For example
*
* @code
* int resp;
* std::string buffer;
* resp3::read(socket, dynamic_buffer(buffer), adapt(resp));
* @endcode
*
* For a complete example see examples/intro_sync.cpp. This function
* is implemented in terms of one or more calls to @c
* asio::read_until and @c asio::read functions, and is known as a @a
* composed @a operation. Furthermore, the implementation may read
* additional bytes from the stream that lie past the end of the
* message being read. These additional bytes are stored in the
* dynamic buffer, which must be preserved for subsequent reads.
*
* \param stream The stream from which to read e.g. a tcp socket.
* \param buf Dynamic buffer (version 2).
* \param adapter The response adapter.
* \param ec If an error occurs, it will be assigned to this paramter.
* \returns The number of bytes that have been consumed from the dynamic buffer.
*
* \remark This function calls buf.consume() in each chunk of data
* after it has been passed to the adapter. Users must not consume
* the bytes after it returns.
*/
template <
class SyncReadStream,
class DynamicBuffer,
class ResponseAdapter
>
auto
read(
SyncReadStream& stream,
DynamicBuffer buf,
ResponseAdapter adapter,
boost::system::error_code& ec) -> std::size_t
{
detail::parser<ResponseAdapter> p {adapter};
std::size_t n = 0;
std::size_t consumed = 0;
do {
if (p.bulk() == type::invalid) {
n = boost::asio::read_until(stream, buf, "\r\n", ec);
if (ec)
return 0;
} else {
auto const s = buf.size();
auto const l = p.bulk_length();
if (s < (l + 2)) {
auto const to_read = l + 2 - s;
buf.grow(to_read);
n = boost::asio::read(stream, buf.data(s, to_read), ec);
if (ec)
return 0;
}
}
auto const* data = static_cast<char const*>(buf.data(0, n).data());
n = p.consume(data, n, ec);
if (ec)
return 0;
buf.consume(n);
consumed += n;
} while (!p.done());
return consumed;
}
/** \brief Reads a complete response to a command sychronously.
* \ingroup low-level-api
*
* Same as the error_code overload but throws on error.
*/
template<
class SyncReadStream,
class DynamicBuffer,
class ResponseAdapter = detail::ignore_response>
auto
read(
SyncReadStream& stream,
DynamicBuffer buf,
ResponseAdapter adapter = ResponseAdapter{})
{
boost::system::error_code ec;
auto const n = resp3::read(stream, buf, adapter, ec);
if (ec)
BOOST_THROW_EXCEPTION(boost::system::system_error{ec});
return n;
}
/** \brief Reads a complete response to a Redis command asynchronously.
* \ingroup low-level-api
*
* This function reads a complete response to a command or a
* server push asynchronously. For example
*
* @code
* std::string buffer;
* std::set<std::string> resp;
* co_await resp3::async_read(socket, dynamic_buffer(buffer), adapt(resp));
* @endcode
*
* For a complete example see examples/transaction.cpp. This function
* is implemented in terms of one or more calls to @c
* asio::async_read_until and @c asio::async_read functions, and is
* known as a @a composed @a operation. Furthermore, the
* implementation may read additional bytes from the stream that lie
* past the end of the message being read. These additional bytes are
* stored in the dynamic buffer, which must be preserved for
* subsequent reads.
*
* \param stream The stream from which to read e.g. a tcp socket.
* \param buffer Dynamic buffer (version 2).
* \param adapter The response adapter.
* \param token The completion token.
*
* The completion handler will receive as a parameter the total
* number of bytes transferred from the stream and must have the
* following signature
*
* @code
* void(boost::system::error_code, std::size_t);
* @endcode
*
* \remark This function calls buf.consume() in each chunk of data
* after it has been passed to the adapter. Users must not consume
* the bytes after it returns.
*/
template <
class AsyncReadStream,
class DynamicBuffer,
class ResponseAdapter = detail::ignore_response,
class CompletionToken = boost::asio::default_completion_token_t<typename AsyncReadStream::executor_type>
>
auto async_read(
AsyncReadStream& stream,
DynamicBuffer buffer,
ResponseAdapter adapter = ResponseAdapter{},
CompletionToken&& token =
boost::asio::default_completion_token_t<typename AsyncReadStream::executor_type>{})
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code, std::size_t)
>(detail::parse_op<AsyncReadStream, DynamicBuffer, ResponseAdapter> {stream, buffer, adapter},
token,
stream);
}
} // aedis::resp3
#endif // AEDIS_RESP3_READ_HPP

View File

@@ -0,0 +1,373 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_RESP3_REQUEST_HPP
#define AEDIS_RESP3_REQUEST_HPP
#include <string>
#include <tuple>
#include <boost/hana.hpp>
#include <boost/utility/string_view.hpp>
#include <aedis/resp3/type.hpp>
// NOTE: Consider detecting tuples in the type in the parameter pack
// to calculate the header size correctly.
//
// NOTE: For some commands like hset it would be a good idea to assert
// the value type is a pair.
namespace aedis::resp3 {
constexpr char const* separator = "\r\n";
/** @brief Adds a bulk to the request.
* @relates request
*
* This function is useful in serialization of your own data
* structures in a request. For example
*
* @code
* void to_bulk(std::string& to, mystruct const& obj)
* {
* auto const str = // Convert obj to a string.
* resp3::to_bulk(to, str);
* }
* @endcode
*
* @param to Storage on which data will be copied into.
* @param data Data that will be serialized and stored in @c to.
*
* See more in @ref serialization.
*/
template <class Request>
void to_bulk(Request& to, boost::string_view data)
{
auto const str = std::to_string(data.size());
to += to_code(type::blob_string);
to.append(std::cbegin(str), std::cend(str));
to += separator;
to.append(std::cbegin(data), std::cend(data));
to += separator;
}
template <class Request, class T, typename = typename std::enable_if<std::is_integral<T>::value>::type>
void to_bulk(Request& to, T n)
{
auto const s = std::to_string(n);
to_bulk(to, boost::string_view{s});
}
namespace detail {
auto has_push_response(boost::string_view cmd) -> bool;
template <class T>
struct add_bulk_impl {
template <class Request>
static void add(Request& to, T const& from)
{
using namespace aedis::resp3;
to_bulk(to, from);
}
};
template <class U, class V>
struct add_bulk_impl<std::pair<U, V>> {
template <class Request>
static void add(Request& to, std::pair<U, V> const& from)
{
using namespace aedis::resp3;
to_bulk(to, from.first);
to_bulk(to, from.second);
}
};
template <class ...Ts>
struct add_bulk_impl<boost::hana::tuple<Ts...>> {
template <class Request>
static void add(Request& to, boost::hana::tuple<Ts...> const& from)
{
using boost::hana::for_each;
// Fold expressions is C++17 so we use hana.
//(detail::add_bulk(*request_, args), ...);
for_each(from, [&](auto const& e) {
using namespace aedis::resp3;
to_bulk(to, e);
});
}
};
template <class Request>
void add_header(Request& to, type t, std::size_t size)
{
auto const str = std::to_string(size);
to += to_code(t);
to.append(std::cbegin(str), std::cend(str));
to += separator;
}
template <class Request, class T>
void add_bulk(Request& to, T const& data)
{
detail::add_bulk_impl<T>::add(to, data);
}
template <class>
struct bulk_counter;
template <class>
struct bulk_counter {
static constexpr auto size = 1U;
};
template <class T, class U>
struct bulk_counter<std::pair<T, U>> {
static constexpr auto size = 2U;
};
template <class Request>
void add_blob(Request& to, boost::string_view blob)
{
to.append(std::cbegin(blob), std::cend(blob));
to += separator;
}
template <class Request>
void add_separator(Request& to)
{
to += separator;
}
} // detail
/** @brief Creates Redis requests.
* @ingroup high-level-api
*
* A request is composed of one or more Redis commands and is
* referred to in the redis documentation as a pipeline, see
* https://redis.io/topics/pipelining. For example
*
* @code
* request r;
* r.push("HELLO", 3);
* r.push("FLUSHALL");
* r.push("PING");
* r.push("PING", "key");
* r.push("QUIT");
* co_await async_write(socket, buffer(r));
* @endcode
*
* @remarks
*
* @li Non-string types will be converted to string by using \c
* to_bulk, which must be made available over ADL.
* @li Uses std::string as internal storage.
*/
class request {
public:
/// Request configuration options.
struct config {
/** @brief If set to true, requests started with
* `connection::async_exe` will fail either if the connection is
* lost while the request is pending or if `async_exec` is
* called while there is no connection with Redis. The default
* behaviour is not to close requests.
*/
bool close_on_connection_lost = false;
/** @brief Coalesce this with other requests.
*
* If true this request will be coalesced with other requests,
* see https://redis.io/topics/pipelining. If false, this
* request will be sent individually.
*/
bool coalesce = true;
};
/** @brief Constructor
*
* @param cfg Configuration options.
*/
explicit request(config cfg = config{false, true})
: cfg_{cfg}
{}
//// Returns the number of commands contained in this request.
auto size() const noexcept -> std::size_t { return commands_;};
// Returns the request payload.
auto payload() const noexcept -> auto const& { return payload_;}
/// Clears the request preserving allocated memory.
void clear()
{
payload_.clear();
commands_ = 0;
}
/** @brief Appends a new command to the end of the request.
*
* For example
*
* \code
* request req;
* req.push("SET", "key", "some string", "EX", "2");
* \endcode
*
* will add the \c set command with value "some string" and an
* expiration of 2 seconds.
*
* \param cmd The command e.g redis or sentinel command.
* \param args Command arguments.
*/
template <class... Ts>
void push(boost::string_view cmd, Ts const&... args)
{
using boost::hana::for_each;
using boost::hana::make_tuple;
using resp3::type;
auto constexpr pack_size = sizeof...(Ts);
detail::add_header(payload_, type::array, 1 + pack_size);
detail::add_bulk(payload_, cmd);
detail::add_bulk(payload_, make_tuple(args...));
if (!detail::has_push_response(cmd))
++commands_;
}
/** @brief Appends a new command to the end of the request.
*
* This overload is useful for commands that have a key and have a
* dynamic range of arguments. For example
*
* @code
* std::map<std::string, std::string> map
* { {"key1", "value1"}
* , {"key2", "value2"}
* , {"key3", "value3"}
* };
*
* request req;
* req.push_range2("HSET", "key", std::cbegin(map), std::cend(map));
* @endcode
*
* \param cmd The command e.g. Redis or Sentinel command.
* \param key The command key.
* \param begin Iterator to the begin of the range.
* \param end Iterator to the end of the range.
*/
template <class Key, class ForwardIterator>
void push_range2(boost::string_view cmd, Key const& key, ForwardIterator begin, ForwardIterator end)
{
using value_type = typename std::iterator_traits<ForwardIterator>::value_type;
using resp3::type;
if (begin == end)
return;
auto constexpr size = detail::bulk_counter<value_type>::size;
auto const distance = std::distance(begin, end);
detail::add_header(payload_, type::array, 2 + size * distance);
detail::add_bulk(payload_, cmd);
detail::add_bulk(payload_, key);
for (; begin != end; ++begin)
detail::add_bulk(payload_, *begin);
if (!detail::has_push_response(cmd))
++commands_;
}
/** @brief Appends a new command to the end of the request.
*
* This overload is useful for commands that have a dynamic number
* of arguments and don't have a key. For example
*
* \code
* std::set<std::string> channels
* { "channel1" , "channel2" , "channel3" }
*
* request req;
* req.push("SUBSCRIBE", std::cbegin(channels), std::cedn(channels));
* \endcode
*
* \param cmd The Redis command
* \param begin Iterator to the begin of the range.
* \param end Iterator to the end of the range.
*/
template <class ForwardIterator>
void push_range2(boost::string_view cmd, ForwardIterator begin, ForwardIterator end)
{
using value_type = typename std::iterator_traits<ForwardIterator>::value_type;
using resp3::type;
if (begin == end)
return;
auto constexpr size = detail::bulk_counter<value_type>::size;
auto const distance = std::distance(begin, end);
detail::add_header(payload_, type::array, 1 + size * distance);
detail::add_bulk(payload_, cmd);
for (; begin != end; ++begin)
detail::add_bulk(payload_, *begin);
if (!detail::has_push_response(cmd))
++commands_;
}
/** @brief Appends a new command to the end of the request.
*
* Equivalent to the overload taking a range (i.e. send_range2).
*
* \param cmd Redis command.
* \param key Redis key.
* \param range Range to send e.g. and \c std::map.
*/
template <class Key, class Range>
void push_range(boost::string_view cmd, Key const& key, Range const& range)
{
using std::begin;
using std::end;
push_range2(cmd, key, begin(range), end(range));
}
/** @brief Appends a new command to the end of the request.
*
* Equivalent to the overload taking a range (i.e. send_range2).
*
* \param cmd Redis command.
* \param range Range to send e.g. and \c std::map.
*/
template <class Range>
void push_range(boost::string_view cmd, Range const& range)
{
using std::begin;
using std::end;
push_range2(cmd, begin(range), end(range));
}
/// Calls std::string::reserve on the internal storage.
void reserve(std::size_t new_cap = 0)
{ payload_.reserve(new_cap); }
auto get_config() const noexcept -> auto const& {return cfg_; }
private:
std::string payload_;
std::size_t commands_ = 0;
config cfg_;
};
} // aedis::resp3
#endif // AEDIS_RESP3_SERIALIZER_HPP

View File

@@ -0,0 +1,87 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_RESP3_TYPE_HPP
#define AEDIS_RESP3_TYPE_HPP
#include <ostream>
#include <vector>
#include <string>
namespace aedis::resp3 {
/** \brief RESP3 data types.
\ingroup high-level-api
The RESP3 specification can be found at https://github.com/redis/redis-specifications/blob/master/protocol/RESP3.md.
*/
enum class type
{ /// Aggregate
array,
/// Aaggregate
push,
/// Aggregate
set,
/// Aggregate
map,
/// Aggregate
attribute,
/// Simple
simple_string,
/// Simple
simple_error,
/// Simple
number,
/// Simple
doublean,
/// Simple
boolean,
/// Simple
big_number,
/// Simple
null,
/// Simple
blob_error,
/// Simple
verbatim_string,
/// Simple
blob_string,
/// Simple
streamed_string_part,
/// Invalid
invalid
};
/** \brief Converts the data type to a string.
* \ingroup high-level-api
* \param t RESP3 type.
*/
auto to_string(type t) -> char const*;
/** \brief Writes the type to the output stream.
* \ingroup high-level-api
* \param os Output stream.
* \param t RESP3 type.
*/
auto operator<<(std::ostream& os, type t) -> std::ostream&;
/* Checks whether the data type is an aggregate.
*/
auto is_aggregate(type t) -> bool;
// For map and attribute data types this function returns 2. All
// other types have value 1.
auto element_multiplicity(type t) -> std::size_t;
// Returns the wire code of a given type.
auto to_code(type t) -> char;
// Converts a wire-format RESP3 type (char) to a resp3 type.
auto to_type(char c) -> type;
} // aedis::resp3
#endif // AEDIS_RESP3_TYPE_HPP

View File

@@ -0,0 +1,57 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_RESP3_WRITE_HPP
#define AEDIS_RESP3_WRITE_HPP
#include <boost/asio/write.hpp>
namespace aedis::resp3 {
/** \brief Writes a request synchronously.
* \ingroup low-level-api
*/
template<
class SyncWriteStream,
class Request
>
auto write(SyncWriteStream& stream, Request const& req)
{
return boost::asio::write(stream, boost::asio::buffer(req.payload()));
}
template<
class SyncWriteStream,
class Request
>
auto write(
SyncWriteStream& stream,
Request const& req,
boost::system::error_code& ec)
{
return boost::asio::write(stream, boost::asio::buffer(req.payload()), ec);
}
/** \brief Writes a request asynchronously.
* \ingroup low-level-api
*/
template<
class AsyncWriteStream,
class Request,
class CompletionToken = boost::asio::default_completion_token_t<typename AsyncWriteStream::executor_type>
>
auto async_write(
AsyncWriteStream& stream,
Request const& req,
CompletionToken&& token =
boost::asio::default_completion_token_t<typename AsyncWriteStream::executor_type>{})
{
return boost::asio::async_write(stream, boost::asio::buffer(req.payload()), token);
}
} // aedis::resp3
#endif // AEDIS_RESP3_WRITE_HPP

11
include/aedis/src.hpp Normal file
View File

@@ -0,0 +1,11 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <aedis/impl/error.ipp>
#include <aedis/impl/endpoint.ipp>
#include <aedis/resp3/impl/request.ipp>
#include <aedis/resp3/impl/type.ipp>
#include <aedis/resp3/detail/impl/parser.ipp>

View File

@@ -0,0 +1,198 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_SSL_CONNECTION_HPP
#define AEDIS_SSL_CONNECTION_HPP
#include <chrono>
#include <memory>
#include <boost/asio/io_context.hpp>
#include <aedis/detail/connection_base.hpp>
#include <aedis/ssl/detail/connection_ops.hpp>
namespace aedis::ssl {
template <class>
class connection;
/** \brief A SSL connection to the Redis server.
* \ingroup high-level-api
*
* This class keeps a healthy connection to the Redis instance where
* commands can be sent at any time. For more details, please see the
* documentation of each individual function.
*
* @remarks This class exposes only asynchronous member functions,
* synchronous communications with the Redis server is provided by
* the `aedis::sync` class.
*
* @tparam Derived class.
*
*/
template <class AsyncReadWriteStream>
class connection<boost::asio::ssl::stream<AsyncReadWriteStream>> :
private aedis::detail::connection_base<
typename boost::asio::ssl::stream<AsyncReadWriteStream>::executor_type,
connection<boost::asio::ssl::stream<AsyncReadWriteStream>>> {
public:
/// Type of the next layer
using next_layer_type = boost::asio::ssl::stream<AsyncReadWriteStream>;
/// Executor type.
using executor_type = typename next_layer_type::executor_type;
using base_type = aedis::detail::connection_base<executor_type, connection<boost::asio::ssl::stream<AsyncReadWriteStream>>>;
/** \brief Connection configuration parameters.
*/
struct timeouts {
/// Timeout of the resolve operation.
std::chrono::steady_clock::duration resolve_timeout = std::chrono::seconds{10};
/// Timeout of the connect operation.
std::chrono::steady_clock::duration connect_timeout = std::chrono::seconds{10};
/// Timeout of the ssl handshake operation.
std::chrono::steady_clock::duration handshake_timeout = std::chrono::seconds{10};
/// Timeout of the resp3 handshake operation.
std::chrono::steady_clock::duration resp3_handshake_timeout = std::chrono::seconds{2};
/// Time interval of ping operations.
std::chrono::steady_clock::duration ping_interval = std::chrono::seconds{1};
};
/// Constructor
explicit connection(executor_type ex, boost::asio::ssl::context& ctx)
: base_type{ex}
, stream_{ex, ctx}
{
}
/// Constructor
explicit connection(boost::asio::io_context& ioc, boost::asio::ssl::context& ctx)
: connection(ioc.get_executor(), ctx)
{ }
/// Returns the associated executor.
auto get_executor() {return stream_.get_executor();}
/// Reset the underlying stream.
void reset_stream(boost::asio::ssl::context& ctx)
{
stream_ = next_layer_type{stream_.get_executor(), ctx};
}
/// Returns a reference to the next layer.
auto& next_layer() noexcept { return stream_; }
/// Returns a const reference to the next layer.
auto const& next_layer() const noexcept { return stream_; }
/** @brief Connects and executes a request asynchronously.
*
* See aedis::connection::async_run for detailed information.
*/
template <class CompletionToken = boost::asio::default_completion_token_t<executor_type>>
auto
async_run(
endpoint ep,
timeouts ts = timeouts{},
CompletionToken token = CompletionToken{})
{
return base_type::async_run(ep, ts, std::move(token));
}
/** @brief Connects and executes a request asynchronously.
*
* See aedis::connection::async_run for detailed information.
*/
template <
class Adapter = aedis::detail::response_traits<void>::adapter_type,
class CompletionToken = boost::asio::default_completion_token_t<executor_type>>
auto async_run(
endpoint ep,
resp3::request const& req,
Adapter adapter,
timeouts ts,
CompletionToken token = CompletionToken{})
{
return base_type::async_run(ep, req, adapter, ts, std::move(token));
}
/** @brief Executes a command on the Redis server asynchronously.
*
* See aedis::connection::async_exec for detailed information.
*/
template <
class Adapter = aedis::detail::response_traits<void>::adapter_type,
class CompletionToken = boost::asio::default_completion_token_t<executor_type>>
auto async_exec(
resp3::request const& req,
Adapter adapter = adapt(),
CompletionToken token = CompletionToken{})
{
return base_type::async_exec(req, adapter, std::move(token));
}
/** @brief Receives server side pushes asynchronously.
*
* See aedis::connection::async_receive_push for detailed information.
*/
template <
class Adapter = aedis::detail::response_traits<void>::adapter_type,
class CompletionToken = boost::asio::default_completion_token_t<executor_type>>
auto async_receive_push(
Adapter adapter = adapt(),
CompletionToken token = CompletionToken{})
{
return base_type::async_receive_push(adapter, std::move(token));
}
/** @brief Cancel operations.
*
* See aedis::connection::cancel for detailed information.
*/
auto cancel(operation op) -> std::size_t
{ return base_type::cancel(op); }
private:
using this_type = connection<next_layer_type>;
template <class, class> friend class aedis::detail::connection_base;
template <class, class> friend struct aedis::detail::exec_op;
template <class, class> friend struct detail::ssl_connect_with_timeout_op;
template <class, class> friend struct aedis::detail::run_op;
template <class> friend struct aedis::detail::writer_op;
template <class> friend struct aedis::detail::check_idle_op;
template <class> friend struct aedis::detail::reader_op;
template <class, class> friend struct aedis::detail::exec_read_op;
template <class> friend struct aedis::detail::ping_op;
auto& lowest_layer() noexcept { return stream_.lowest_layer(); }
auto is_open() const noexcept { return stream_.next_layer().is_open(); }
void close() { stream_.next_layer().close(); }
template <class Timer, class CompletionToken>
auto
async_connect(
boost::asio::ip::tcp::resolver::results_type const& endpoints,
timeouts ts,
Timer& timer,
CompletionToken&& token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(detail::ssl_connect_with_timeout_op<this_type, Timer>{this, &endpoints, ts, &timer}, token, stream_);
}
next_layer_type stream_;
};
} // aedis::ssl
#endif // AEDIS_SSL_CONNECTION_HPP

View File

@@ -0,0 +1,113 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_SSL_CONNECTION_OPS_HPP
#define AEDIS_SSL_CONNECTION_OPS_HPP
#include <array>
#include <boost/assert.hpp>
#include <boost/system.hpp>
#include <boost/asio/experimental/parallel_group.hpp>
#include <boost/asio/yield.hpp>
namespace aedis::ssl::detail
{
template <class Stream>
struct handshake_op {
Stream* stream;
aedis::detail::conn_timer_t<typename Stream::executor_type>* timer;
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, std::array<std::size_t, 2> order = {}
, boost::system::error_code ec1 = {}
, boost::system::error_code ec2 = {})
{
reenter (coro)
{
yield
boost::asio::experimental::make_parallel_group(
[this](auto token)
{
return stream->async_handshake(boost::asio::ssl::stream_base::client, token);
},
[this](auto token) { return timer->async_wait(token);}
).async_wait(
boost::asio::experimental::wait_for_one(),
std::move(self));
switch (order[0]) {
case 0: self.complete(ec1); return;
case 1:
{
BOOST_ASSERT_MSG(!ec2, "handshake_op: Incompatible state.");
self.complete(error::ssl_handshake_timeout);
return;
}
default: BOOST_ASSERT(false);
}
}
}
};
template <
class Stream,
class CompletionToken
>
auto async_handshake(
Stream& stream,
aedis::detail::conn_timer_t<typename Stream::executor_type>& timer,
CompletionToken&& token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code)
>(handshake_op<Stream>{&stream, &timer}, token, stream, timer);
}
template <class Conn, class Timer>
struct ssl_connect_with_timeout_op {
Conn* conn = nullptr;
boost::asio::ip::tcp::resolver::results_type const* endpoints = nullptr;
typename Conn::timeouts ts;
Timer* timer = nullptr;
boost::asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, boost::system::error_code ec = {}
, boost::asio::ip::tcp::endpoint const& = {})
{
reenter (coro)
{
timer->expires_after(ts.connect_timeout);
yield
aedis::detail::async_connect(
conn->lowest_layer(), *timer, *endpoints, std::move(self));
if (ec) {
self.complete(ec);
return;
}
timer->expires_after(ts.handshake_timeout);
yield
async_handshake(conn->next_layer(), *timer, std::move(self));
self.complete(ec);
}
}
};
} // aedis::ssl::detail
#include <boost/asio/unyield.hpp>
#endif // AEDIS_SSL_CONNECTION_OPS_HPP

View File

@@ -1,28 +0,0 @@
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef BOOST_REDIS_HPP
#define BOOST_REDIS_HPP
#include <boost/redis/config.hpp>
#include <boost/redis/error.hpp>
#include <boost/redis/connection.hpp>
#include <boost/redis/request.hpp>
#include <boost/redis/response.hpp>
#include <boost/redis/ignore.hpp>
#include <boost/redis/logger.hpp>
/** @defgroup high-level-api Reference
*
* This page contains the documentation of the Aedis high-level API.
*/
/** @defgroup low-level-api Reference
*
* This page contains the documentation of the Aedis low-level API.
*/
#endif // BOOST_REDIS_HPP

View File

@@ -1,80 +0,0 @@
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef BOOST_REDIS_ADAPTER_ADAPT_HPP
#define BOOST_REDIS_ADAPTER_ADAPT_HPP
#include <boost/redis/resp3/node.hpp>
#include <boost/redis/response.hpp>
#include <boost/redis/adapter/detail/result_traits.hpp>
#include <boost/redis/adapter/detail/response_traits.hpp>
#include <boost/mp11.hpp>
#include <boost/system.hpp>
#include <tuple>
#include <limits>
#include <string_view>
#include <variant>
namespace boost::redis::adapter
{
/** @brief Adapts a type to be used as a response.
*
* The type T must be either
*
* 1. a response<T1, T2, T3, ...> or
* 2. std::vector<node<String>>
*
* The types T1, T2, etc can be any STL container, any integer type
* and `std::string`.
*
* @param t Tuple containing the responses.
*/
template<class T>
auto boost_redis_adapt(T& t) noexcept
{
return detail::response_traits<T>::adapt(t);
}
/** @brief Adapts user data to read operations.
* @ingroup low-level-api
*
* STL containers, \c resp3::response and built-in types are supported and
* can be used in conjunction with \c std::optional<T>.
*
* Example usage:
*
* @code
* std::unordered_map<std::string, std::string> cont;
* co_await async_read(socket, buffer, adapt(cont));
* @endcode
*
* For a transaction
*
* @code
* sr.push(command::multi);
* sr.push(command::ping, ...);
* sr.push(command::incr, ...);
* sr.push_range(command::rpush, ...);
* sr.push(command::lrange, ...);
* sr.push(command::incr, ...);
* sr.push(command::exec);
*
* co_await async_write(socket, buffer(request));
*
* // Reads the response to a transaction
* resp3::response<std::string, int, int, std::vector<std::string>, int> execs;
* co_await resp3::async_read(socket, dynamic_buffer(buffer), adapt(execs));
* @endcode
*/
template<class T>
auto adapt2(T& t = redis::ignore) noexcept
{ return detail::result_traits<T>::adapt(t); }
} // boost::redis::adapter
#endif // BOOST_REDIS_ADAPTER_ADAPT_HPP

View File

@@ -1,444 +0,0 @@
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef BOOST_REDIS_ADAPTER_ADAPTERS_HPP
#define BOOST_REDIS_ADAPTER_ADAPTERS_HPP
#include <boost/redis/error.hpp>
#include <boost/redis/resp3/type.hpp>
#include <boost/redis/resp3/serialization.hpp>
#include <boost/redis/resp3/node.hpp>
#include <boost/redis/adapter/result.hpp>
#include <boost/assert.hpp>
#include <set>
#include <optional>
#include <unordered_set>
#include <forward_list>
#include <system_error>
#include <map>
#include <unordered_map>
#include <list>
#include <deque>
#include <vector>
#include <array>
#include <string_view>
#include <charconv>
// See https://stackoverflow.com/a/31658120/1077832
#include<ciso646>
#ifdef _LIBCPP_VERSION
#else
#include <cstdlib>
#endif
namespace boost::redis::adapter::detail
{
// Serialization.
template <class T>
auto boost_redis_from_bulk(T& i, std::string_view sv, system::error_code& ec) -> typename std::enable_if<std::is_integral<T>::value, void>::type
{
auto const res = std::from_chars(sv.data(), sv.data() + std::size(sv), i);
if (res.ec != std::errc())
ec = redis::error::not_a_number;
}
inline
void boost_redis_from_bulk(bool& t, std::string_view sv, system::error_code&)
{
t = *sv.data() == 't';
}
inline
void boost_redis_from_bulk(double& d, std::string_view sv, system::error_code& ec)
{
#ifdef _LIBCPP_VERSION
// The string in sv is not null terminated and we also don't know
// if there is enough space at the end for a null char. The easiest
// thing to do is to create a temporary.
std::string const tmp{sv.data(), sv.data() + std::size(sv)};
char* end{};
d = std::strtod(tmp.data(), &end);
if (d == HUGE_VAL || d == 0)
ec = redis::error::not_a_double;
#else
auto const res = std::from_chars(sv.data(), sv.data() + std::size(sv), d);
if (res.ec != std::errc())
ec = redis::error::not_a_double;
#endif // _LIBCPP_VERSION
}
template <class CharT, class Traits, class Allocator>
void
boost_redis_from_bulk(
std::basic_string<CharT, Traits, Allocator>& s,
std::string_view sv,
system::error_code&)
{
s.append(sv.data(), sv.size());
}
//================================================
template <class Result>
class general_aggregate {
private:
Result* result_;
public:
explicit general_aggregate(Result* c = nullptr): result_(c) {}
template <class String>
void operator()(resp3::basic_node<String> const& nd, system::error_code&)
{
BOOST_ASSERT_MSG(!!result_, "Unexpected null pointer");
switch (nd.data_type) {
case resp3::type::blob_error:
case resp3::type::simple_error:
*result_ = error{nd.data_type, std::string{std::cbegin(nd.value), std::cend(nd.value)}};
break;
default:
result_->value().push_back({nd.data_type, nd.aggregate_size, nd.depth, std::string{std::cbegin(nd.value), std::cend(nd.value)}});
}
}
};
template <class Node>
class general_simple {
private:
Node* result_;
public:
explicit general_simple(Node* t = nullptr) : result_(t) {}
template <class String>
void operator()(resp3::basic_node<String> const& nd, system::error_code&)
{
BOOST_ASSERT_MSG(!!result_, "Unexpected null pointer");
switch (nd.data_type) {
case resp3::type::blob_error:
case resp3::type::simple_error:
*result_ = error{nd.data_type, std::string{std::cbegin(nd.value), std::cend(nd.value)}};
break;
default:
result_->value().data_type = nd.data_type;
result_->value().aggregate_size = nd.aggregate_size;
result_->value().depth = nd.depth;
result_->value().value.assign(nd.value.data(), nd.value.size());
}
}
};
template <class Result>
class simple_impl {
public:
void on_value_available(Result&) {}
template <class String>
void operator()(Result& result, resp3::basic_node<String> const& n, system::error_code& ec)
{
if (is_aggregate(n.data_type)) {
ec = redis::error::expects_resp3_simple_type;
return;
}
boost_redis_from_bulk(result, n.value, ec);
}
};
template <class Result>
class set_impl {
private:
typename Result::iterator hint_;
public:
void on_value_available(Result& result)
{ hint_ = std::end(result); }
template <class String>
void operator()(Result& result, resp3::basic_node<String> const& nd, system::error_code& ec)
{
if (is_aggregate(nd.data_type)) {
if (nd.data_type != resp3::type::set)
ec = redis::error::expects_resp3_set;
return;
}
BOOST_ASSERT(nd.aggregate_size == 1);
if (nd.depth < 1) {
ec = redis::error::expects_resp3_set;
return;
}
typename Result::key_type obj;
boost_redis_from_bulk(obj, nd.value, ec);
hint_ = result.insert(hint_, std::move(obj));
}
};
template <class Result>
class map_impl {
private:
typename Result::iterator current_;
bool on_key_ = true;
public:
void on_value_available(Result& result)
{ current_ = std::end(result); }
template <class String>
void operator()(Result& result, resp3::basic_node<String> const& nd, system::error_code& ec)
{
if (is_aggregate(nd.data_type)) {
if (element_multiplicity(nd.data_type) != 2)
ec = redis::error::expects_resp3_map;
return;
}
BOOST_ASSERT(nd.aggregate_size == 1);
if (nd.depth < 1) {
ec = redis::error::expects_resp3_map;
return;
}
if (on_key_) {
typename Result::key_type obj;
boost_redis_from_bulk(obj, nd.value, ec);
current_ = result.insert(current_, {std::move(obj), {}});
} else {
typename Result::mapped_type obj;
boost_redis_from_bulk(obj, nd.value, ec);
current_->second = std::move(obj);
}
on_key_ = !on_key_;
}
};
template <class Result>
class vector_impl {
public:
void on_value_available(Result& ) { }
template <class String>
void operator()(Result& result, resp3::basic_node<String> const& nd, system::error_code& ec)
{
if (is_aggregate(nd.data_type)) {
auto const m = element_multiplicity(nd.data_type);
result.reserve(result.size() + m * nd.aggregate_size);
} else {
result.push_back({});
boost_redis_from_bulk(result.back(), nd.value, ec);
}
}
};
template <class Result>
class array_impl {
private:
int i_ = -1;
public:
void on_value_available(Result& ) { }
template <class String>
void operator()(Result& result, resp3::basic_node<String> const& nd, system::error_code& ec)
{
if (is_aggregate(nd.data_type)) {
if (i_ != -1) {
ec = redis::error::nested_aggregate_not_supported;
return;
}
if (result.size() != nd.aggregate_size * element_multiplicity(nd.data_type)) {
ec = redis::error::incompatible_size;
return;
}
} else {
if (i_ == -1) {
ec = redis::error::expects_resp3_aggregate;
return;
}
BOOST_ASSERT(nd.aggregate_size == 1);
boost_redis_from_bulk(result.at(i_), nd.value, ec);
}
++i_;
}
};
template <class Result>
struct list_impl {
void on_value_available(Result& ) { }
template <class String>
void operator()(Result& result, resp3::basic_node<String> const& nd, system::error_code& ec)
{
if (!is_aggregate(nd.data_type)) {
BOOST_ASSERT(nd.aggregate_size == 1);
if (nd.depth < 1) {
ec = redis::error::expects_resp3_aggregate;
return;
}
result.push_back({});
boost_redis_from_bulk(result.back(), nd.value, ec);
}
}
};
//---------------------------------------------------
template <class T>
struct impl_map { using type = simple_impl<T>; };
template <class Key, class Compare, class Allocator>
struct impl_map<std::set<Key, Compare, Allocator>> { using type = set_impl<std::set<Key, Compare, Allocator>>; };
template <class Key, class Compare, class Allocator>
struct impl_map<std::multiset<Key, Compare, Allocator>> { using type = set_impl<std::multiset<Key, Compare, Allocator>>; };
template <class Key, class Hash, class KeyEqual, class Allocator>
struct impl_map<std::unordered_set<Key, Hash, KeyEqual, Allocator>> { using type = set_impl<std::unordered_set<Key, Hash, KeyEqual, Allocator>>; };
template <class Key, class Hash, class KeyEqual, class Allocator>
struct impl_map<std::unordered_multiset<Key, Hash, KeyEqual, Allocator>> { using type = set_impl<std::unordered_multiset<Key, Hash, KeyEqual, Allocator>>; };
template <class Key, class T, class Compare, class Allocator>
struct impl_map<std::map<Key, T, Compare, Allocator>> { using type = map_impl<std::map<Key, T, Compare, Allocator>>; };
template <class Key, class T, class Compare, class Allocator>
struct impl_map<std::multimap<Key, T, Compare, Allocator>> { using type = map_impl<std::multimap<Key, T, Compare, Allocator>>; };
template <class Key, class Hash, class KeyEqual, class Allocator>
struct impl_map<std::unordered_map<Key, Hash, KeyEqual, Allocator>> { using type = map_impl<std::unordered_map<Key, Hash, KeyEqual, Allocator>>; };
template <class Key, class Hash, class KeyEqual, class Allocator>
struct impl_map<std::unordered_multimap<Key, Hash, KeyEqual, Allocator>> { using type = map_impl<std::unordered_multimap<Key, Hash, KeyEqual, Allocator>>; };
template <class T, class Allocator>
struct impl_map<std::vector<T, Allocator>> { using type = vector_impl<std::vector<T, Allocator>>; };
template <class T, std::size_t N>
struct impl_map<std::array<T, N>> { using type = array_impl<std::array<T, N>>; };
template <class T, class Allocator>
struct impl_map<std::list<T, Allocator>> { using type = list_impl<std::list<T, Allocator>>; };
template <class T, class Allocator>
struct impl_map<std::deque<T, Allocator>> { using type = list_impl<std::deque<T, Allocator>>; };
//---------------------------------------------------
template <class>
class wrapper;
template <class Result>
class wrapper<result<Result>> {
public:
using response_type = result<Result>;
private:
response_type* result_;
typename impl_map<Result>::type impl_;
template <class String>
bool set_if_resp3_error(resp3::basic_node<String> const& nd) noexcept
{
switch (nd.data_type) {
case resp3::type::null:
case resp3::type::simple_error:
case resp3::type::blob_error:
*result_ = error{nd.data_type, {std::cbegin(nd.value), std::cend(nd.value)}};
return true;
default:
return false;
}
}
public:
explicit wrapper(response_type* t = nullptr) : result_(t)
{
if (result_) {
result_->value() = Result{};
impl_.on_value_available(result_->value());
}
}
template <class String>
void operator()(resp3::basic_node<String> const& nd, system::error_code& ec)
{
BOOST_ASSERT_MSG(!!result_, "Unexpected null pointer");
if (result_->has_error())
return;
if (set_if_resp3_error(nd))
return;
BOOST_ASSERT(result_);
impl_(result_->value(), nd, ec);
}
};
template <class T>
class wrapper<result<std::optional<T>>> {
public:
using response_type = result<std::optional<T>>;
private:
response_type* result_;
typename impl_map<T>::type impl_{};
template <class String>
bool set_if_resp3_error(resp3::basic_node<String> const& nd) noexcept
{
switch (nd.data_type) {
case resp3::type::blob_error:
case resp3::type::simple_error:
*result_ = error{nd.data_type, {std::cbegin(nd.value), std::cend(nd.value)}};
return true;
default:
return false;
}
}
public:
explicit wrapper(response_type* o = nullptr) : result_(o) {}
template <class String>
void
operator()(
resp3::basic_node<String> const& nd,
system::error_code& ec)
{
BOOST_ASSERT_MSG(!!result_, "Unexpected null pointer");
if (result_->has_error())
return;
if (set_if_resp3_error(nd))
return;
if (nd.data_type == resp3::type::null)
return;
if (!result_->value().has_value()) {
result_->value() = T{};
impl_.on_value_available(result_->value().value());
}
impl_(result_->value().value(), nd, ec);
}
};
} // boost::redis::adapter::detail
#endif // BOOST_REDIS_ADAPTER_ADAPTERS_HPP

View File

@@ -1,159 +0,0 @@
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef BOOST_REDIS_ADAPTER_DETAIL_RESPONSE_TRAITS_HPP
#define BOOST_REDIS_ADAPTER_DETAIL_RESPONSE_TRAITS_HPP
#include <boost/redis/resp3/node.hpp>
#include <boost/redis/response.hpp>
#include <boost/redis/adapter/detail/result_traits.hpp>
#include <boost/mp11.hpp>
#include <boost/system.hpp>
#include <tuple>
#include <limits>
#include <string_view>
#include <variant>
namespace boost::redis::adapter::detail
{
class ignore_adapter {
public:
template <class String>
void operator()(std::size_t, resp3::basic_node<String> const& nd, system::error_code& ec)
{
switch (nd.data_type) {
case resp3::type::simple_error: ec = redis::error::resp3_simple_error; break;
case resp3::type::blob_error: ec = redis::error::resp3_blob_error; break;
case resp3::type::null: ec = redis::error::resp3_null; break;
default:;
}
}
[[nodiscard]]
auto get_supported_response_size() const noexcept
{ return static_cast<std::size_t>(-1);}
};
template <class Response>
class static_adapter {
private:
static constexpr auto size = std::tuple_size<Response>::value;
using adapter_tuple = mp11::mp_transform<adapter_t, Response>;
using variant_type = mp11::mp_rename<adapter_tuple, std::variant>;
using adapters_array_type = std::array<variant_type, size>;
adapters_array_type adapters_;
public:
explicit static_adapter(Response& r)
{
assigner<size - 1>::assign(adapters_, r);
}
[[nodiscard]]
auto get_supported_response_size() const noexcept
{ return size;}
template <class String>
void operator()(std::size_t i, resp3::basic_node<String> const& nd, system::error_code& ec)
{
using std::visit;
// I am usure whether this should be an error or an assertion.
BOOST_ASSERT(i < adapters_.size());
visit([&](auto& arg){arg(nd, ec);}, adapters_.at(i));
}
};
template <class Vector>
class vector_adapter {
private:
using adapter_type = typename result_traits<Vector>::adapter_type;
adapter_type adapter_;
public:
explicit vector_adapter(Vector& v)
: adapter_{internal_adapt(v)}
{ }
[[nodiscard]]
auto
get_supported_response_size() const noexcept
{ return static_cast<std::size_t>(-1);}
template <class String>
void operator()(std::size_t, resp3::basic_node<String> const& nd, system::error_code& ec)
{
adapter_(nd, ec);
}
};
template <class>
struct response_traits;
template <>
struct response_traits<ignore_t> {
using response_type = ignore_t;
using adapter_type = detail::ignore_adapter;
static auto adapt(response_type&) noexcept
{ return detail::ignore_adapter{}; }
};
template <>
struct response_traits<result<ignore_t>> {
using response_type = result<ignore_t>;
using adapter_type = detail::ignore_adapter;
static auto adapt(response_type&) noexcept
{ return detail::ignore_adapter{}; }
};
template <class String, class Allocator>
struct response_traits<result<std::vector<resp3::basic_node<String>, Allocator>>> {
using response_type = result<std::vector<resp3::basic_node<String>, Allocator>>;
using adapter_type = vector_adapter<response_type>;
static auto adapt(response_type& v) noexcept
{ return adapter_type{v}; }
};
template <class ...Ts>
struct response_traits<response<Ts...>> {
using response_type = response<Ts...>;
using adapter_type = static_adapter<response_type>;
static auto adapt(response_type& r) noexcept
{ return adapter_type{r}; }
};
template <class Adapter>
class wrapper {
public:
explicit wrapper(Adapter adapter) : adapter_{adapter} {}
template <class String>
void operator()(resp3::basic_node<String> const& nd, system::error_code& ec)
{ return adapter_(0, nd, ec); }
[[nodiscard]]
auto get_supported_response_size() const noexcept
{ return adapter_.get_supported_response_size();}
private:
Adapter adapter_;
};
template <class Adapter>
auto make_adapter_wrapper(Adapter adapter)
{
return wrapper{adapter};
}
} // boost::redis::adapter::detail
#endif // BOOST_REDIS_ADAPTER_DETAIL_RESPONSE_TRAITS_HPP

View File

@@ -1,163 +0,0 @@
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef BOOST_REDIS_ADAPTER_RESPONSE_TRAITS_HPP
#define BOOST_REDIS_ADAPTER_RESPONSE_TRAITS_HPP
#include <boost/redis/error.hpp>
#include <boost/redis/resp3/type.hpp>
#include <boost/redis/ignore.hpp>
#include <boost/redis/adapter/detail/adapters.hpp>
#include <boost/redis/adapter/result.hpp>
#include <boost/redis/adapter/ignore.hpp>
#include <boost/mp11.hpp>
#include <vector>
#include <tuple>
#include <string_view>
#include <variant>
namespace boost::redis::adapter::detail
{
/* Traits class for response objects.
*
* Provides traits for all supported response types i.e. all STL
* containers and C++ buil-in types.
*/
template <class Result>
struct result_traits {
using adapter_type = adapter::detail::wrapper<typename std::decay<Result>::type>;
static auto adapt(Result& r) noexcept { return adapter_type{&r}; }
};
template <>
struct result_traits<result<ignore_t>> {
using response_type = result<ignore_t>;
using adapter_type = ignore;
static auto adapt(response_type) noexcept { return adapter_type{}; }
};
template <>
struct result_traits<ignore_t> {
using response_type = ignore_t;
using adapter_type = ignore;
static auto adapt(response_type) noexcept { return adapter_type{}; }
};
template <class T>
struct result_traits<result<resp3::basic_node<T>>> {
using response_type = result<resp3::basic_node<T>>;
using adapter_type = adapter::detail::general_simple<response_type>;
static auto adapt(response_type& v) noexcept { return adapter_type{&v}; }
};
template <class String, class Allocator>
struct result_traits<result<std::vector<resp3::basic_node<String>, Allocator>>> {
using response_type = result<std::vector<resp3::basic_node<String>, Allocator>>;
using adapter_type = adapter::detail::general_aggregate<response_type>;
static auto adapt(response_type& v) noexcept { return adapter_type{&v}; }
};
template <class T>
using adapter_t = typename result_traits<std::decay_t<T>>::adapter_type;
template<class T>
auto internal_adapt(T& t) noexcept
{ return result_traits<std::decay_t<T>>::adapt(t); }
template <std::size_t N>
struct assigner {
template <class T1, class T2>
static void assign(T1& dest, T2& from)
{
dest[N].template emplace<N>(internal_adapt(std::get<N>(from)));
assigner<N - 1>::assign(dest, from);
}
};
template <>
struct assigner<0> {
template <class T1, class T2>
static void assign(T1& dest, T2& from)
{
dest[0].template emplace<0>(internal_adapt(std::get<0>(from)));
}
};
template <class Tuple>
class static_aggregate_adapter;
template <class Tuple>
class static_aggregate_adapter<result<Tuple>> {
private:
using adapters_array_type =
std::array<
mp11::mp_rename<
mp11::mp_transform<
adapter_t, Tuple>,
std::variant>,
std::tuple_size<Tuple>::value>;
std::size_t i_ = 0;
std::size_t aggregate_size_ = 0;
adapters_array_type adapters_;
result<Tuple>* res_ = nullptr;
public:
explicit static_aggregate_adapter(result<Tuple>* r = nullptr)
{
if (r) {
res_ = r;
detail::assigner<std::tuple_size<Tuple>::value - 1>::assign(adapters_, r->value());
}
}
template <class String>
void count(resp3::basic_node<String> const& nd)
{
if (nd.depth == 1) {
if (is_aggregate(nd.data_type))
aggregate_size_ = element_multiplicity(nd.data_type) * nd.aggregate_size;
else
++i_;
return;
}
if (--aggregate_size_ == 0)
++i_;
}
template <class String>
void operator()(resp3::basic_node<String> const& nd, system::error_code& ec)
{
using std::visit;
if (nd.depth == 0) {
auto const real_aggr_size = nd.aggregate_size * element_multiplicity(nd.data_type);
if (real_aggr_size != std::tuple_size<Tuple>::value)
ec = redis::error::incompatible_size;
return;
}
visit([&](auto& arg){arg(nd, ec);}, adapters_[i_]);
count(nd);
}
};
template <class... Ts>
struct result_traits<result<std::tuple<Ts...>>>
{
using response_type = result<std::tuple<Ts...>>;
using adapter_type = static_aggregate_adapter<response_type>;
static auto adapt(response_type& r) noexcept { return adapter_type{&r}; }
};
} // boost::redis::adapter::detail
#endif // BOOST_REDIS_ADAPTER_RESPONSE_TRAITS_HPP

View File

@@ -1,37 +0,0 @@
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef BOOST_REDIS_ADAPTER_IGNORE_HPP
#define BOOST_REDIS_ADAPTER_IGNORE_HPP
#include <boost/redis/resp3/node.hpp>
#include <boost/redis/error.hpp>
#include <boost/system/error_code.hpp>
#include <string>
namespace boost::redis::adapter
{
/** @brief An adapter that ignores responses
* @ingroup high-level-api
*
* RESP3 errors won't be ignored.
*/
struct ignore {
void operator()(resp3::basic_node<std::string_view> const& nd, system::error_code& ec)
{
switch (nd.data_type) {
case resp3::type::simple_error: ec = redis::error::resp3_simple_error; break;
case resp3::type::blob_error: ec = redis::error::resp3_blob_error; break;
case resp3::type::null: ec = redis::error::resp3_null; break;
default:;
}
}
};
} // boost::redis::adapter
#endif // BOOST_REDIS_ADAPTER_IGNORE_HPP

View File

@@ -1,81 +0,0 @@
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef BOOST_REDIS_ADAPTER_RESULT_HPP
#define BOOST_REDIS_ADAPTER_RESULT_HPP
#include <boost/redis/resp3/type.hpp>
#include <boost/redis/error.hpp>
#include <boost/system/result.hpp>
#include <string>
namespace boost::redis::adapter
{
/** @brief Stores any resp3 error
* @ingroup high-level-api
*/
struct error {
/// RESP3 error data type.
resp3::type data_type = resp3::type::invalid;
/// Diagnostic error message sent by Redis.
std::string diagnostic;
};
/** @brief Compares two error objects for equality
* @relates error
*
* @param a Left hand side error object.
* @param b Right hand side error object.
*/
inline bool operator==(error const& a, error const& b)
{
return a.data_type == b.data_type && a.diagnostic == b.diagnostic;
}
/** @brief Compares two error objects for difference
* @relates error
*
* @param a Left hand side error object.
* @param b Right hand side error object.
*/
inline bool operator!=(error const& a, error const& b)
{
return !(a == b);
}
/** @brief Stores response to individual Redis commands
* @ingroup high-level-api
*/
template <class Value>
using result = system::result<Value, error>;
BOOST_NORETURN inline void
throw_exception_from_error(error const & e, boost::source_location const &)
{
system::error_code ec;
switch (e.data_type) {
case resp3::type::simple_error:
ec = redis::error::resp3_simple_error;
break;
case resp3::type::blob_error:
ec = redis::error::resp3_blob_error;
break;
case resp3::type::null:
ec = redis::error::resp3_null;
break;
default:
BOOST_ASSERT_MSG(false, "Unexpected data type.");
}
throw system::system_error(ec, e.diagnostic);
}
} // boost::redis::adapter
#endif // BOOST_REDIS_ADAPTER_RESULT_HPP

View File

@@ -1,85 +0,0 @@
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef BOOST_REDIS_CONFIG_HPP
#define BOOST_REDIS_CONFIG_HPP
#include <string>
#include <chrono>
#include <optional>
namespace boost::redis
{
/** @brief Address of a Redis server
* @ingroup high-level-api
*/
struct address {
/// Redis host.
std::string host = "127.0.0.1";
/// Redis port.
std::string port = "6379";
};
/** @brief Configure parameters used by the connection classes
* @ingroup high-level-api
*/
struct config {
/// Uses SSL instead of a plain connection.
bool use_ssl = false;
/// Address of the Redis server.
address addr = address{"127.0.0.1", "6379"};
/** @brief Username passed to the
* [HELLO](https://redis.io/commands/hello/) command. If left
* empty `HELLO` will be sent without authentication parameters.
*/
std::string username = "default";
/** @brief Password passed to the
* [HELLO](https://redis.io/commands/hello/) command. If left
* empty `HELLO` will be sent without authentication parameters.
*/
std::string password;
/// Client name parameter of the [HELLO](https://redis.io/commands/hello/) command.
std::string clientname = "Boost.Redis";
/// Database that will be passed to the [SELECT](https://redis.io/commands/hello/) command.
std::optional<int> database_index = 0;
/// Message used by the health-checker in `boost::redis::connection::async_run`.
std::string health_check_id = "Boost.Redis";
/// Logger prefix, see `boost::redis::logger`.
std::string log_prefix = "(Boost.Redis) ";
/// Time the resolve operation is allowed to last.
std::chrono::steady_clock::duration resolve_timeout = std::chrono::seconds{10};
/// Time the connect operation is allowed to last.
std::chrono::steady_clock::duration connect_timeout = std::chrono::seconds{10};
/// Time the SSL handshake operation is allowed to last.
std::chrono::steady_clock::duration ssl_handshake_timeout = std::chrono::seconds{10};
/** Health checks interval.
*
* To disable health-checks pass zero as duration.
*/
std::chrono::steady_clock::duration health_check_interval = std::chrono::seconds{2};
/** @brief Time waited before trying a reconnection.
*
* To disable reconnection pass zero as duration.
*/
std::chrono::steady_clock::duration reconnect_wait_interval = std::chrono::seconds{1};
};
} // boost::redis
#endif // BOOST_REDIS_CONFIG_HPP

View File

@@ -1,444 +0,0 @@
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef BOOST_REDIS_CONNECTION_HPP
#define BOOST_REDIS_CONNECTION_HPP
#include <boost/redis/detail/connection_base.hpp>
#include <boost/redis/logger.hpp>
#include <boost/redis/config.hpp>
#include <boost/asio/io_context.hpp>
#include <boost/asio/coroutine.hpp>
#include <boost/asio/steady_timer.hpp>
#include <boost/asio/any_io_executor.hpp>
#include <boost/asio/any_completion_handler.hpp>
#include <chrono>
#include <memory>
#include <limits>
namespace boost::redis {
namespace detail
{
template <class Connection, class Logger>
struct reconnection_op {
Connection* conn_ = nullptr;
Logger logger_;
asio::coroutine coro_{};
template <class Self>
void operator()(Self& self, system::error_code ec = {})
{
BOOST_ASIO_CORO_REENTER (coro_) for (;;)
{
BOOST_ASIO_CORO_YIELD
conn_->impl_.async_run(conn_->cfg_, logger_, std::move(self));
conn_->cancel(operation::receive);
logger_.on_connection_lost(ec);
if (!conn_->will_reconnect() || is_cancelled(self)) {
conn_->cancel(operation::reconnection);
self.complete(!!ec ? ec : asio::error::operation_aborted);
return;
}
conn_->timer_.expires_after(conn_->cfg_.reconnect_wait_interval);
BOOST_ASIO_CORO_YIELD
conn_->timer_.async_wait(std::move(self));
BOOST_REDIS_CHECK_OP0(;)
if (!conn_->will_reconnect()) {
self.complete(asio::error::operation_aborted);
return;
}
conn_->reset_stream();
}
}
};
} // detail
/** @brief A SSL connection to the Redis server.
* @ingroup high-level-api
*
* This class keeps a healthy connection to the Redis instance where
* commands can be sent at any time. For more details, please see the
* documentation of each individual function.
*
* @tparam Socket The socket type e.g. asio::ip::tcp::socket.
*
*/
template <class Executor>
class basic_connection {
public:
/// Executor type.
using executor_type = Executor;
/// Returns the underlying executor.
executor_type get_executor() noexcept
{ return impl_.get_executor(); }
/// Rebinds the socket type to another executor.
template <class Executor1>
struct rebind_executor
{
/// The connection type when rebound to the specified executor.
using other = basic_connection<Executor1>;
};
/** @brief Constructor
*
* @param ex Executor on which connection operation will run.
* @param ctx SSL context.
* @param max_read_size Maximum read size that is passed to
* the internal `asio::dynamic_buffer` constructor.
*/
explicit
basic_connection(
executor_type ex,
asio::ssl::context ctx = asio::ssl::context{asio::ssl::context::tlsv12_client},
std::size_t max_read_size = (std::numeric_limits<std::size_t>::max)())
: impl_{ex, std::move(ctx), max_read_size}
, timer_{ex}
{ }
/// Contructs from a context.
explicit
basic_connection(
asio::io_context& ioc,
asio::ssl::context ctx = asio::ssl::context{asio::ssl::context::tlsv12_client},
std::size_t max_read_size = (std::numeric_limits<std::size_t>::max)())
: basic_connection(ioc.get_executor(), std::move(ctx), max_read_size)
{ }
/** @brief Starts underlying connection operations.
*
* This member function provides the following functionality
*
* 1. Resolve the address passed on `boost::redis::config::addr`.
* 2. Connect to one of the results obtained in the resolve operation.
* 3. Send a [HELLO](https://redis.io/commands/hello/) command where each of its parameters are read from `cfg`.
* 4. Start a health-check operation where ping commands are sent
* at intervals specified in
* `boost::redis::config::health_check_interval`. The message passed to
* `PING` will be `boost::redis::config::health_check_id`. Passing a
* timeout with value zero will disable health-checks. If the Redis
* server does not respond to a health-check within two times the value
* specified here, it will be considered unresponsive and the connection
* will be closed and a new connection will be stablished.
* 5. Starts read and write operations with the Redis
* server. More specifically it will trigger the write of all
* requests i.e. calls to `async_exec` that happened prior to this
* call.
*
* When a connection is lost for any reason, a new one is
* stablished automatically. To disable reconnection call
* `boost::redis::connection::cancel(operation::reconnection)`.
*
* @param cfg Configuration paramters.
* @param l Logger object. The interface expected is specified in the class `boost::redis::logger`.
* @param token Completion token.
*
* The completion token must have the following signature
*
* @code
* void f(system::error_code);
* @endcode
*
* For example on how to call this function refer to
* cpp20_intro.cpp or any other example.
*/
template <
class Logger = logger,
class CompletionToken = asio::default_completion_token_t<executor_type>>
auto
async_run(
config const& cfg = {},
Logger l = Logger{},
CompletionToken token = CompletionToken{})
{
using this_type = basic_connection<executor_type>;
cfg_ = cfg;
l.set_prefix(cfg_.log_prefix);
return asio::async_compose
< CompletionToken
, void(system::error_code)
>(detail::reconnection_op<this_type, Logger>{this, l}, token, timer_);
}
/** @brief Receives server side pushes asynchronously.
*
* When pushes arrive and there is no `async_receive` operation in
* progress, pushed data, requests, and responses will be paused
* until `async_receive` is called again. Apps will usually want
* to call `async_receive` in a loop.
*
* To cancel an ongoing receive operation apps should call
* `connection::cancel(operation::receive)`.
*
* @param token Completion token.
*
* For an example see cpp20_subscriber.cpp. The completion token must
* have the following signature
*
* @code
* void f(system::error_code, std::size_t);
* @endcode
*
* Where the second parameter is the size of the push received in
* bytes.
*/
template <class CompletionToken = asio::default_completion_token_t<executor_type>>
auto async_receive(CompletionToken token = CompletionToken{})
{ return impl_.async_receive(std::move(token)); }
/** @brief Receives server pushes synchronously without blocking.
*
* Receives a server push synchronously by calling `try_receive` on
* the underlying channel. If the operation fails because
* `try_receive` returns `false`, `ec` will be set to
* `boost::redis::error::sync_receive_push_failed`.
*
* @param ec Contains the error if any occurred.
*
* @returns The number of bytes read from the socket.
*/
std::size_t receive(system::error_code& ec)
{
return impl_.receive(ec);
}
template <
class Response = ignore_t,
class CompletionToken = asio::default_completion_token_t<executor_type>
>
[[deprecated("Set the response with set_receive_response and use the other overload.")]]
auto
async_receive(
Response& response,
CompletionToken token = CompletionToken{})
{
return impl_.async_receive(response, token);
}
/** @brief Executes commands on the Redis server asynchronously.
*
* This function sends a request to the Redis server and waits for
* the responses to each individual command in the request. If the
* request contains only commands that don't expect a response,
* the completion occurs after it has been written to the
* underlying stream. Multiple concurrent calls to this function
* will be automatically queued by the implementation.
*
* @param req Request.
* @param resp Response.
* @param token Completion token.
*
* For an example see cpp20_echo_server.cpp. The completion token must
* have the following signature
*
* @code
* void f(system::error_code, std::size_t);
* @endcode
*
* Where the second parameter is the size of the response received
* in bytes.
*/
template <
class Response = ignore_t,
class CompletionToken = asio::default_completion_token_t<executor_type>
>
auto
async_exec(
request const& req,
Response& resp = ignore,
CompletionToken&& token = CompletionToken{})
{
return impl_.async_exec(req, resp, std::forward<CompletionToken>(token));
}
/** @brief Cancel operations.
*
* @li `operation::exec`: Cancels operations started with
* `async_exec`. Affects only requests that haven't been written
* yet.
* @li operation::run: Cancels the `async_run` operation.
* @li operation::receive: Cancels any ongoing calls to `async_receive`.
* @li operation::all: Cancels all operations listed above.
*
* @param op: The operation to be cancelled.
*/
void cancel(operation op = operation::all)
{
switch (op) {
case operation::reconnection:
case operation::all:
cfg_.reconnect_wait_interval = std::chrono::seconds::zero();
timer_.cancel();
break;
default: /* ignore */;
}
impl_.cancel(op);
}
/// Returns true if the connection was canceled.
bool will_reconnect() const noexcept
{ return cfg_.reconnect_wait_interval != std::chrono::seconds::zero();}
/// Returns the ssl context.
auto const& get_ssl_context() const noexcept
{ return impl_.get_ssl_context();}
/// Resets the underlying stream.
void reset_stream()
{ impl_.reset_stream(); }
/// Returns a reference to the next layer.
auto& next_layer() noexcept
{ return impl_.next_layer(); }
/// Returns a const reference to the next layer.
auto const& next_layer() const noexcept
{ return impl_.next_layer(); }
/// Sets the response object of `async_receive` operations.
template <class Response>
void set_receive_response(Response& response)
{ impl_.set_receive_response(response); }
/// Returns connection usage information.
usage get_usage() const noexcept
{ return impl_.get_usage(); }
private:
using timer_type =
asio::basic_waitable_timer<
std::chrono::steady_clock,
asio::wait_traits<std::chrono::steady_clock>,
Executor>;
template <class, class> friend struct detail::reconnection_op;
config cfg_;
detail::connection_base<executor_type> impl_;
timer_type timer_;
};
/** \brief A basic_connection that type erases the executor.
* \ingroup high-level-api
*
* This connection type uses the asio::any_io_executor and
* asio::any_completion_token to reduce compilation times.
*
* For documentaiton of each member function see
* `boost::redis::basic_connection`.
*/
class connection {
public:
/// Executor type.
using executor_type = asio::any_io_executor;
/// Contructs from an executor.
explicit
connection(
executor_type ex,
asio::ssl::context ctx = asio::ssl::context{asio::ssl::context::tlsv12_client},
std::size_t max_read_size = (std::numeric_limits<std::size_t>::max)());
/// Contructs from a context.
explicit
connection(
asio::io_context& ioc,
asio::ssl::context ctx = asio::ssl::context{asio::ssl::context::tlsv12_client},
std::size_t max_read_size = (std::numeric_limits<std::size_t>::max)());
/// Returns the underlying executor.
executor_type get_executor() noexcept
{ return impl_.get_executor(); }
/// Calls `boost::redis::basic_connection::async_run`.
template <class CompletionToken>
auto async_run(config const& cfg, logger l, CompletionToken token)
{
return asio::async_initiate<
CompletionToken, void(boost::system::error_code)>(
[](auto handler, connection* self, config const* cfg, logger l)
{
self->async_run_impl(*cfg, l, std::move(handler));
}, token, this, &cfg, l);
}
/// Calls `boost::redis::basic_connection::async_receive`.
template <class Response, class CompletionToken>
[[deprecated("Set the response with set_receive_response and use the other overload.")]]
auto async_receive(Response& response, CompletionToken token)
{
return impl_.async_receive(response, std::move(token));
}
/// Calls `boost::redis::basic_connection::async_receive`.
template <class CompletionToken>
auto async_receive(CompletionToken token)
{ return impl_.async_receive(std::move(token)); }
/// Calls `boost::redis::basic_connection::receive`.
std::size_t receive(system::error_code& ec)
{
return impl_.receive(ec);
}
/// Calls `boost::redis::basic_connection::async_exec`.
template <class Response, class CompletionToken>
auto async_exec(request const& req, Response& resp, CompletionToken token)
{
return impl_.async_exec(req, resp, std::move(token));
}
/// Calls `boost::redis::basic_connection::cancel`.
void cancel(operation op = operation::all);
/// Calls `boost::redis::basic_connection::will_reconnect`.
bool will_reconnect() const noexcept
{ return impl_.will_reconnect();}
/// Calls `boost::redis::basic_connection::next_layer`.
auto& next_layer() noexcept
{ return impl_.next_layer(); }
/// Calls `boost::redis::basic_connection::next_layer`.
auto const& next_layer() const noexcept
{ return impl_.next_layer(); }
/// Calls `boost::redis::basic_connection::reset_stream`.
void reset_stream()
{ impl_.reset_stream();}
/// Sets the response object of `async_receive` operations.
template <class Response>
void set_receive_response(Response& response)
{ impl_.set_receive_response(response); }
/// Returns connection usage information.
usage get_usage() const noexcept
{ return impl_.get_usage(); }
/// Returns the ssl context.
auto const& get_ssl_context() const noexcept
{ return impl_.get_ssl_context();}
private:
void
async_run_impl(
config const& cfg,
logger l,
asio::any_completion_handler<void(boost::system::error_code)> token);
basic_connection<executor_type> impl_;
};
} // boost::redis
#endif // BOOST_REDIS_CONNECTION_HPP

View File

@@ -1,964 +0,0 @@
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef BOOST_REDIS_CONNECTION_BASE_HPP
#define BOOST_REDIS_CONNECTION_BASE_HPP
#include <boost/redis/adapter/adapt.hpp>
#include <boost/redis/detail/helper.hpp>
#include <boost/redis/error.hpp>
#include <boost/redis/operation.hpp>
#include <boost/redis/request.hpp>
#include <boost/redis/resp3/type.hpp>
#include <boost/redis/config.hpp>
#include <boost/redis/detail/runner.hpp>
#include <boost/redis/usage.hpp>
#include <boost/system.hpp>
#include <boost/asio/basic_stream_socket.hpp>
#include <boost/asio/bind_executor.hpp>
#include <boost/asio/experimental/parallel_group.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <boost/asio/steady_timer.hpp>
#include <boost/asio/write.hpp>
#include <boost/assert.hpp>
#include <boost/core/ignore_unused.hpp>
#include <boost/asio/ssl/stream.hpp>
#include <boost/asio/read_until.hpp>
#include <boost/asio/buffer.hpp>
#include <boost/asio/experimental/channel.hpp>
#include <algorithm>
#include <array>
#include <chrono>
#include <deque>
#include <memory>
#include <string_view>
#include <type_traits>
#include <functional>
namespace boost::redis::detail
{
template <class DynamicBuffer>
std::string_view buffer_view(DynamicBuffer buf) noexcept
{
char const* start = static_cast<char const*>(buf.data(0, buf.size()).data());
return std::string_view{start, std::size(buf)};
}
template <class AsyncReadStream, class DynamicBuffer>
class append_some_op {
private:
AsyncReadStream& stream_;
DynamicBuffer buf_;
std::size_t size_ = 0;
std::size_t tmp_ = 0;
asio::coroutine coro_{};
public:
append_some_op(AsyncReadStream& stream, DynamicBuffer buf, std::size_t size)
: stream_ {stream}
, buf_ {std::move(buf)}
, size_{size}
{ }
template <class Self>
void operator()( Self& self
, system::error_code ec = {}
, std::size_t n = 0)
{
BOOST_ASIO_CORO_REENTER (coro_)
{
tmp_ = buf_.size();
buf_.grow(size_);
BOOST_ASIO_CORO_YIELD
stream_.async_read_some(buf_.data(tmp_, size_), std::move(self));
if (ec) {
self.complete(ec, 0);
return;
}
buf_.shrink(buf_.size() - tmp_ - n);
self.complete({}, n);
}
}
};
template <class AsyncReadStream, class DynamicBuffer, class CompletionToken>
auto
async_append_some(
AsyncReadStream& stream,
DynamicBuffer buffer,
std::size_t size,
CompletionToken&& token)
{
return asio::async_compose
< CompletionToken
, void(system::error_code, std::size_t)
>(append_some_op<AsyncReadStream, DynamicBuffer> {stream, buffer, size}, token, stream);
}
template <class Conn>
struct exec_op {
using req_info_type = typename Conn::req_info;
using adapter_type = typename Conn::adapter_type;
Conn* conn_ = nullptr;
std::shared_ptr<req_info_type> info_ = nullptr;
asio::coroutine coro{};
template <class Self>
void operator()(Self& self , system::error_code ec = {}, std::size_t = 0)
{
BOOST_ASIO_CORO_REENTER (coro)
{
// Check whether the user wants to wait for the connection to
// be stablished.
if (info_->req_->get_config().cancel_if_not_connected && !conn_->is_open()) {
BOOST_ASIO_CORO_YIELD
asio::post(std::move(self));
return self.complete(error::not_connected, 0);
}
conn_->add_request_info(info_);
EXEC_OP_WAIT:
BOOST_ASIO_CORO_YIELD
info_->async_wait(std::move(self));
if (info_->ec_) {
self.complete(info_->ec_, 0);
return;
}
if (info_->stop_requested()) {
// Don't have to call remove_request as it has already
// been by cancel(exec).
return self.complete(asio::error::operation_aborted, 0);
}
if (is_cancelled(self)) {
if (!info_->is_waiting()) {
using c_t = asio::cancellation_type;
auto const c = self.get_cancellation_state().cancelled();
if ((c & c_t::terminal) != c_t::none) {
// Cancellation requires closing the connection
// otherwise it stays in inconsistent state.
conn_->cancel(operation::run);
return self.complete(asio::error::operation_aborted, 0);
} else {
// Can't implement other cancelation types, ignoring.
self.get_cancellation_state().clear();
// TODO: Find out a better way to ignore
// cancelation.
goto EXEC_OP_WAIT;
}
} else {
// Cancelation can be honored.
conn_->remove_request(info_);
self.complete(asio::error::operation_aborted, 0);
return;
}
}
self.complete(info_->ec_, info_->read_size_);
}
}
};
template <class Conn, class Logger>
struct run_op {
Conn* conn = nullptr;
Logger logger_;
asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, std::array<std::size_t, 2> order = {}
, system::error_code ec0 = {}
, system::error_code ec1 = {})
{
BOOST_ASIO_CORO_REENTER (coro)
{
conn->reset();
BOOST_ASIO_CORO_YIELD
asio::experimental::make_parallel_group(
[this](auto token) { return conn->reader(logger_, token);},
[this](auto token) { return conn->writer(logger_, token);}
).async_wait(
asio::experimental::wait_for_one(),
std::move(self));
if (is_cancelled(self)) {
logger_.trace("run-op: canceled. Exiting ...");
self.complete(asio::error::operation_aborted);
return;
}
logger_.on_run(ec0, ec1);
switch (order[0]) {
case 0: self.complete(ec0); break;
case 1: self.complete(ec1); break;
default: BOOST_ASSERT(false);
}
}
}
};
template <class Conn, class Logger>
struct writer_op {
Conn* conn_;
Logger logger_;
asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, system::error_code ec = {}
, std::size_t n = 0)
{
ignore_unused(n);
BOOST_ASIO_CORO_REENTER (coro) for (;;)
{
while (conn_->coalesce_requests()) {
if (conn_->use_ssl())
BOOST_ASIO_CORO_YIELD asio::async_write(conn_->next_layer(), asio::buffer(conn_->write_buffer_), std::move(self));
else
BOOST_ASIO_CORO_YIELD asio::async_write(conn_->next_layer().next_layer(), asio::buffer(conn_->write_buffer_), std::move(self));
logger_.on_write(ec, conn_->write_buffer_);
if (ec) {
logger_.trace("writer-op: error. Exiting ...");
conn_->cancel(operation::run);
self.complete(ec);
return;
}
if (is_cancelled(self)) {
logger_.trace("writer-op: canceled. Exiting ...");
self.complete(asio::error::operation_aborted);
return;
}
conn_->on_write();
// A socket.close() may have been called while a
// successful write might had already been queued, so we
// have to check here before proceeding.
if (!conn_->is_open()) {
logger_.trace("writer-op: canceled (2). Exiting ...");
self.complete({});
return;
}
}
BOOST_ASIO_CORO_YIELD
conn_->writer_timer_.async_wait(std::move(self));
if (!conn_->is_open() || is_cancelled(self)) {
logger_.trace("writer-op: canceled (3). Exiting ...");
// Notice this is not an error of the op, stoping was
// requested from the outside, so we complete with
// success.
self.complete({});
return;
}
}
}
};
template <class Conn, class Logger>
struct reader_op {
using parse_result = typename Conn::parse_result;
using parse_ret_type = typename Conn::parse_ret_type;
Conn* conn_;
Logger logger_;
parse_ret_type res_{parse_result::resp, 0};
asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, system::error_code ec = {}
, std::size_t n = 0)
{
ignore_unused(n);
BOOST_ASIO_CORO_REENTER (coro) for (;;)
{
// Appends some data to the buffer if necessary.
if ((res_.first == parse_result::needs_more) || std::empty(conn_->read_buffer_)) {
if (conn_->use_ssl()) {
BOOST_ASIO_CORO_YIELD
async_append_some(
conn_->next_layer(),
conn_->dbuf_,
conn_->get_suggested_buffer_growth(),
std::move(self));
} else {
BOOST_ASIO_CORO_YIELD
async_append_some(
conn_->next_layer().next_layer(),
conn_->dbuf_,
conn_->get_suggested_buffer_growth(),
std::move(self));
}
logger_.on_read(ec, n);
// EOF is not treated as error.
if (ec == asio::error::eof) {
logger_.trace("reader-op: EOF received. Exiting ...");
conn_->cancel(operation::run);
return self.complete({}); // EOFINAE: EOF is not an error.
}
// The connection is not viable after an error.
if (ec) {
logger_.trace("reader-op: error. Exiting ...");
conn_->cancel(operation::run);
self.complete(ec);
return;
}
// Somebody might have canceled implicitly or explicitly
// while we were suspended and after queueing so we have to
// check.
if (!conn_->is_open() || is_cancelled(self)) {
logger_.trace("reader-op: canceled. Exiting ...");
self.complete(ec);
return;
}
}
res_ = conn_->on_read(buffer_view(conn_->dbuf_), ec);
if (ec) {
logger_.trace("reader-op: parse error. Exiting ...");
conn_->cancel(operation::run);
self.complete(ec);
return;
}
if (res_.first == parse_result::push) {
if (!conn_->receive_channel_.try_send(ec, res_.second)) {
BOOST_ASIO_CORO_YIELD
conn_->receive_channel_.async_send(ec, res_.second, std::move(self));
}
if (ec) {
logger_.trace("reader-op: error. Exiting ...");
conn_->cancel(operation::run);
self.complete(ec);
return;
}
if (!conn_->is_open() || is_cancelled(self)) {
logger_.trace("reader-op: canceled (2). Exiting ...");
self.complete(asio::error::operation_aborted);
return;
}
}
}
}
};
/** @brief Base class for high level Redis asynchronous connections.
* @ingroup high-level-api
*
* @tparam Executor The executor type.
*
*/
template <class Executor>
class connection_base {
public:
/// Executor type
using executor_type = Executor;
/// Type of the next layer
using next_layer_type = asio::ssl::stream<asio::basic_stream_socket<asio::ip::tcp, Executor>>;
using clock_type = std::chrono::steady_clock;
using clock_traits_type = asio::wait_traits<clock_type>;
using timer_type = asio::basic_waitable_timer<clock_type, clock_traits_type, executor_type>;
using this_type = connection_base<Executor>;
/// Constructs from an executor.
connection_base(
executor_type ex,
asio::ssl::context ctx,
std::size_t max_read_size)
: ctx_{std::move(ctx)}
, stream_{std::make_unique<next_layer_type>(ex, ctx_)}
, writer_timer_{ex}
, receive_channel_{ex, 256}
, runner_{ex, {}}
, dbuf_{read_buffer_, max_read_size}
{
set_receive_response(ignore);
writer_timer_.expires_at((std::chrono::steady_clock::time_point::max)());
}
/// Returns the ssl context.
auto const& get_ssl_context() const noexcept
{ return ctx_;}
/// Resets the underlying stream.
void reset_stream()
{
stream_ = std::make_unique<next_layer_type>(writer_timer_.get_executor(), ctx_);
}
/// Returns a reference to the next layer.
auto& next_layer() noexcept { return *stream_; }
/// Returns a const reference to the next layer.
auto const& next_layer() const noexcept { return *stream_; }
/// Returns the associated executor.
auto get_executor() {return writer_timer_.get_executor();}
/// Cancels specific operations.
void cancel(operation op)
{
runner_.cancel(op);
if (op == operation::all) {
cancel_impl(operation::run);
cancel_impl(operation::receive);
cancel_impl(operation::exec);
return;
}
cancel_impl(op);
}
template <class Response, class CompletionToken>
auto async_exec(request const& req, Response& resp, CompletionToken token)
{
using namespace boost::redis::adapter;
auto f = boost_redis_adapt(resp);
BOOST_ASSERT_MSG(req.get_expected_responses() <= f.get_supported_response_size(), "Request and response have incompatible sizes.");
auto info = std::make_shared<req_info>(req, f, get_executor());
return asio::async_compose
< CompletionToken
, void(system::error_code, std::size_t)
>(exec_op<this_type>{this, info}, token, writer_timer_);
}
template <class Response, class CompletionToken>
[[deprecated("Set the response with set_receive_response and use the other overload.")]]
auto async_receive(Response& response, CompletionToken token)
{
set_receive_response(response);
return receive_channel_.async_receive(std::move(token));
}
template <class CompletionToken>
auto async_receive(CompletionToken token)
{ return receive_channel_.async_receive(std::move(token)); }
std::size_t receive(system::error_code& ec)
{
std::size_t size = 0;
auto f = [&](system::error_code const& ec2, std::size_t n)
{
ec = ec2;
size = n;
};
auto const res = receive_channel_.try_receive(f);
if (ec)
return 0;
if (!res)
ec = error::sync_receive_push_failed;
return size;
}
template <class Logger, class CompletionToken>
auto async_run(config const& cfg, Logger l, CompletionToken token)
{
runner_.set_config(cfg);
l.set_prefix(runner_.get_config().log_prefix);
return runner_.async_run(*this, l, std::move(token));
}
template <class Response>
void set_receive_response(Response& response)
{
using namespace boost::redis::adapter;
auto g = boost_redis_adapt(response);
receive_adapter_ = adapter::detail::make_adapter_wrapper(g);
}
usage get_usage() const noexcept
{ return usage_; }
auto run_is_canceled() const noexcept
{ return cancel_run_called_; }
private:
using receive_channel_type = asio::experimental::channel<executor_type, void(system::error_code, std::size_t)>;
using runner_type = runner<executor_type>;
using adapter_type = std::function<void(std::size_t, resp3::basic_node<std::string_view> const&, system::error_code&)>;
using receiver_adapter_type = std::function<void(resp3::basic_node<std::string_view> const&, system::error_code&)>;
using exec_notifier_type = receive_channel_type;
auto use_ssl() const noexcept
{ return runner_.get_config().use_ssl;}
auto cancel_on_conn_lost() -> std::size_t
{
// Must return false if the request should be removed.
auto cond = [](auto const& ptr)
{
BOOST_ASSERT(ptr != nullptr);
if (ptr->is_waiting()) {
return !ptr->req_->get_config().cancel_on_connection_lost;
} else {
return !ptr->req_->get_config().cancel_if_unresponded;
}
};
auto point = std::stable_partition(std::begin(reqs_), std::end(reqs_), cond);
auto const ret = std::distance(point, std::end(reqs_));
std::for_each(point, std::end(reqs_), [](auto const& ptr) {
ptr->stop();
});
reqs_.erase(point, std::end(reqs_));
std::for_each(std::begin(reqs_), std::end(reqs_), [](auto const& ptr) {
return ptr->mark_waiting();
});
return ret;
}
auto cancel_unwritten_requests() -> std::size_t
{
auto f = [](auto const& ptr)
{
BOOST_ASSERT(ptr != nullptr);
return !ptr->is_waiting();
};
auto point = std::stable_partition(std::begin(reqs_), std::end(reqs_), f);
auto const ret = std::distance(point, std::end(reqs_));
std::for_each(point, std::end(reqs_), [](auto const& ptr) {
ptr->stop();
});
reqs_.erase(point, std::end(reqs_));
return ret;
}
void cancel_impl(operation op)
{
switch (op) {
case operation::exec:
{
cancel_unwritten_requests();
} break;
case operation::run:
{
// Protects the code below from being called more than
// once, see https://github.com/boostorg/redis/issues/181
if (std::exchange(cancel_run_called_, true)) {
return;
}
close();
writer_timer_.cancel();
receive_channel_.cancel();
cancel_on_conn_lost();
} break;
case operation::receive:
{
receive_channel_.cancel();
} break;
default: /* ignore */;
}
}
void on_write()
{
// We have to clear the payload right after writing it to use it
// as a flag that informs there is no ongoing write.
write_buffer_.clear();
// Notice this must come before the for-each below.
cancel_push_requests();
// There is small optimization possible here: traverse only the
// partition of unwritten requests instead of them all.
std::for_each(std::begin(reqs_), std::end(reqs_), [](auto const& ptr) {
BOOST_ASSERT_MSG(ptr != nullptr, "Expects non-null pointer.");
if (ptr->is_staged()) {
ptr->mark_written();
}
});
}
struct req_info {
public:
using node_type = resp3::basic_node<std::string_view>;
using wrapped_adapter_type = std::function<void(node_type const&, system::error_code&)>;
explicit req_info(request const& req, adapter_type adapter, executor_type ex)
: notifier_{ex, 1}
, req_{&req}
, adapter_{}
, expected_responses_{req.get_expected_responses()}
, status_{status::waiting}
, ec_{{}}
, read_size_{0}
{
adapter_ = [this, adapter](node_type const& nd, system::error_code& ec)
{
auto const i = req_->get_expected_responses() - expected_responses_;
adapter(i, nd, ec);
};
}
auto proceed()
{
notifier_.try_send(std::error_code{}, 0);
}
void stop()
{
notifier_.close();
}
[[nodiscard]] auto is_waiting() const noexcept
{ return status_ == status::waiting; }
[[nodiscard]] auto is_written() const noexcept
{ return status_ == status::written; }
[[nodiscard]] auto is_staged() const noexcept
{ return status_ == status::staged; }
void mark_written() noexcept
{ status_ = status::written; }
void mark_staged() noexcept
{ status_ = status::staged; }
void mark_waiting() noexcept
{ status_ = status::waiting; }
[[nodiscard]] auto stop_requested() const noexcept
{ return !notifier_.is_open();}
template <class CompletionToken>
auto async_wait(CompletionToken token)
{
return notifier_.async_receive(std::move(token));
}
//private:
enum class status
{ waiting
, staged
, written
};
exec_notifier_type notifier_;
request const* req_;
wrapped_adapter_type adapter_;
// Contains the number of commands that haven't been read yet.
std::size_t expected_responses_;
status status_;
system::error_code ec_;
std::size_t read_size_;
};
void remove_request(std::shared_ptr<req_info> const& info)
{
reqs_.erase(std::remove(std::begin(reqs_), std::end(reqs_), info));
}
using reqs_type = std::deque<std::shared_ptr<req_info>>;
template <class, class> friend struct reader_op;
template <class, class> friend struct writer_op;
template <class, class> friend struct run_op;
template <class> friend struct exec_op;
template <class, class, class> friend struct run_all_op;
void cancel_push_requests()
{
auto point = std::stable_partition(std::begin(reqs_), std::end(reqs_), [](auto const& ptr) {
return !(ptr->is_staged() && ptr->req_->get_expected_responses() == 0);
});
std::for_each(point, std::end(reqs_), [](auto const& ptr) {
ptr->proceed();
});
reqs_.erase(point, std::end(reqs_));
}
[[nodiscard]] bool is_writing() const noexcept
{
return !write_buffer_.empty();
}
void add_request_info(std::shared_ptr<req_info> const& info)
{
reqs_.push_back(info);
if (info->req_->has_hello_priority()) {
auto rend = std::partition_point(std::rbegin(reqs_), std::rend(reqs_), [](auto const& e) {
return e->is_waiting();
});
std::rotate(std::rbegin(reqs_), std::rbegin(reqs_) + 1, rend);
}
if (is_open() && !is_writing())
writer_timer_.cancel();
}
template <class CompletionToken, class Logger>
auto reader(Logger l, CompletionToken&& token)
{
return asio::async_compose
< CompletionToken
, void(system::error_code)
>(reader_op<this_type, Logger>{this, l}, token, writer_timer_);
}
template <class CompletionToken, class Logger>
auto writer(Logger l, CompletionToken&& token)
{
return asio::async_compose
< CompletionToken
, void(system::error_code)
>(writer_op<this_type, Logger>{this, l}, token, writer_timer_);
}
template <class Logger, class CompletionToken>
auto async_run_lean(config const& cfg, Logger l, CompletionToken token)
{
runner_.set_config(cfg);
l.set_prefix(runner_.get_config().log_prefix);
return asio::async_compose
< CompletionToken
, void(system::error_code)
>(run_op<this_type, Logger>{this, l}, token, writer_timer_);
}
[[nodiscard]] bool coalesce_requests()
{
// Coalesces the requests and marks them staged. After a
// successful write staged requests will be marked as written.
auto const point = std::partition_point(std::cbegin(reqs_), std::cend(reqs_), [](auto const& ri) {
return !ri->is_waiting();
});
std::for_each(point, std::cend(reqs_), [this](auto const& ri) {
// Stage the request.
write_buffer_ += ri->req_->payload();
ri->mark_staged();
usage_.commands_sent += ri->expected_responses_;
});
usage_.bytes_sent += std::size(write_buffer_);
return point != std::cend(reqs_);
}
bool is_waiting_response() const noexcept
{
if (std::empty(reqs_))
return false;
// Under load and on low-latency networks we might start
// receiving responses before the write operation completed and
// the request is still maked as staged and not written. See
// https://github.com/boostorg/redis/issues/170
return !reqs_.front()->is_waiting();
}
void close()
{
if (stream_->next_layer().is_open()) {
system::error_code ec;
stream_->next_layer().close(ec);
}
}
auto is_open() const noexcept { return stream_->next_layer().is_open(); }
auto& lowest_layer() noexcept { return stream_->lowest_layer(); }
auto is_next_push()
{
BOOST_ASSERT(!read_buffer_.empty());
// Useful links to understand the heuristics below.
//
// - https://github.com/redis/redis/issues/11784
// - https://github.com/redis/redis/issues/6426
// - https://github.com/boostorg/redis/issues/170
// The message's resp3 type is a push.
if (resp3::to_type(read_buffer_.front()) == resp3::type::push)
return true;
// This is non-push type and the requests queue is empty. I have
// noticed this is possible, for example with -MISCONF. I don't
// know why they are not sent with a push type so we can
// distinguish them from responses to commands. If we are lucky
// enough to receive them when the command queue is empty they
// can be treated as server pushes, otherwise it is impossible
// to handle them properly
if (reqs_.empty())
return true;
// The request does not expect any response but we got one. This
// may happen if for example, subscribe with wrong syntax.
if (reqs_.front()->expected_responses_ == 0)
return true;
// Added to deal with MONITOR and also to fix PR170 which
// happens under load and on low-latency networks, where we
// might start receiving responses before the write operation
// completed and the request is still maked as staged and not
// written.
return reqs_.front()->is_waiting();
}
auto get_suggested_buffer_growth() const noexcept
{
return parser_.get_suggested_buffer_growth(4096);
}
enum class parse_result { needs_more, push, resp };
using parse_ret_type = std::pair<parse_result, std::size_t>;
parse_ret_type on_finish_parsing(parse_result t)
{
if (t == parse_result::push) {
usage_.pushes_received += 1;
usage_.push_bytes_received += parser_.get_consumed();
} else {
usage_.responses_received += 1;
usage_.response_bytes_received += parser_.get_consumed();
}
on_push_ = false;
dbuf_.consume(parser_.get_consumed());
auto const res = std::make_pair(t, parser_.get_consumed());
parser_.reset();
return res;
}
parse_ret_type on_read(std::string_view data, system::error_code& ec)
{
// We arrive here in two states:
//
// 1. While we are parsing a message. In this case we
// don't want to determine the type of the message in the
// buffer (i.e. response vs push) but leave it untouched
// until the parsing of a complete message ends.
//
// 2. On a new message, in which case we have to determine
// whether the next messag is a push or a response.
//
if (!on_push_) // Prepare for new message.
on_push_ = is_next_push();
if (on_push_) {
if (!resp3::parse(parser_, data, receive_adapter_, ec))
return std::make_pair(parse_result::needs_more, 0);
if (ec)
return std::make_pair(parse_result::push, 0);
return on_finish_parsing(parse_result::push);
}
BOOST_ASSERT_MSG(is_waiting_response(), "Not waiting for a response (using MONITOR command perhaps?)");
BOOST_ASSERT(!reqs_.empty());
BOOST_ASSERT(reqs_.front() != nullptr);
BOOST_ASSERT(reqs_.front()->expected_responses_ != 0);
if (!resp3::parse(parser_, data, reqs_.front()->adapter_, ec))
return std::make_pair(parse_result::needs_more, 0);
if (ec) {
reqs_.front()->ec_ = ec;
reqs_.front()->proceed();
return std::make_pair(parse_result::resp, 0);
}
reqs_.front()->read_size_ += parser_.get_consumed();
if (--reqs_.front()->expected_responses_ == 0) {
// Done with this request.
reqs_.front()->proceed();
reqs_.pop_front();
}
return on_finish_parsing(parse_result::resp);
}
void reset()
{
write_buffer_.clear();
read_buffer_.clear();
parser_.reset();
on_push_ = false;
cancel_run_called_ = false;
}
asio::ssl::context ctx_;
std::unique_ptr<next_layer_type> stream_;
// Notice we use a timer to simulate a condition-variable. It is
// also more suitable than a channel and the notify operation does
// not suspend.
timer_type writer_timer_;
receive_channel_type receive_channel_;
runner_type runner_;
receiver_adapter_type receive_adapter_;
using dyn_buffer_type = asio::dynamic_string_buffer<char, std::char_traits<char>, std::allocator<char>>;
std::string read_buffer_;
dyn_buffer_type dbuf_;
std::string write_buffer_;
reqs_type reqs_;
resp3::parser parser_{};
bool on_push_ = false;
bool cancel_run_called_ = false;
usage usage_;
};
} // boost::redis::detail
#endif // BOOST_REDIS_CONNECTION_BASE_HPP

View File

@@ -1,133 +0,0 @@
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef BOOST_REDIS_CONNECTOR_HPP
#define BOOST_REDIS_CONNECTOR_HPP
#include <boost/redis/detail/helper.hpp>
#include <boost/redis/error.hpp>
#include <boost/asio/compose.hpp>
#include <boost/asio/connect.hpp>
#include <boost/asio/coroutine.hpp>
#include <boost/asio/experimental/parallel_group.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <boost/asio/steady_timer.hpp>
#include <string>
#include <chrono>
namespace boost::redis::detail
{
template <class Connector, class Stream>
struct connect_op {
Connector* ctor_ = nullptr;
Stream* stream = nullptr;
asio::ip::tcp::resolver::results_type const* res_ = nullptr;
asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, std::array<std::size_t, 2> const& order = {}
, system::error_code const& ec1 = {}
, asio::ip::tcp::endpoint const& ep= {}
, system::error_code const& ec2 = {})
{
BOOST_ASIO_CORO_REENTER (coro)
{
ctor_->timer_.expires_after(ctor_->timeout_);
BOOST_ASIO_CORO_YIELD
asio::experimental::make_parallel_group(
[this](auto token)
{
auto f = [](system::error_code const&, auto const&) { return true; };
return asio::async_connect(*stream, *res_, f, token);
},
[this](auto token) { return ctor_->timer_.async_wait(token);}
).async_wait(
asio::experimental::wait_for_one(),
std::move(self));
if (is_cancelled(self)) {
self.complete(asio::error::operation_aborted);
return;
}
switch (order[0]) {
case 0: {
ctor_->endpoint_ = ep;
self.complete(ec1);
} break;
case 1:
{
if (ec2) {
self.complete(ec2);
} else {
self.complete(error::connect_timeout);
}
} break;
default: BOOST_ASSERT(false);
}
}
}
};
template <class Executor>
class connector {
public:
using timer_type =
asio::basic_waitable_timer<
std::chrono::steady_clock,
asio::wait_traits<std::chrono::steady_clock>,
Executor>;
connector(Executor ex)
: timer_{ex}
{}
void set_config(config const& cfg)
{ timeout_ = cfg.connect_timeout; }
template <class Stream, class CompletionToken>
auto
async_connect(
Stream& stream,
asio::ip::tcp::resolver::results_type const& res,
CompletionToken&& token)
{
return asio::async_compose
< CompletionToken
, void(system::error_code)
>(connect_op<connector, Stream>{this, &stream, &res}, token, timer_);
}
std::size_t cancel(operation op)
{
switch (op) {
case operation::connect:
case operation::all:
timer_.cancel();
break;
default: /* ignore */;
}
return 0;
}
auto const& endpoint() const noexcept { return endpoint_;}
private:
template <class, class> friend struct connect_op;
timer_type timer_;
std::chrono::steady_clock::duration timeout_ = std::chrono::seconds{2};
asio::ip::tcp::endpoint endpoint_;
};
} // boost::redis::detail
#endif // BOOST_REDIS_CONNECTOR_HPP

View File

@@ -1,124 +0,0 @@
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef BOOST_REDIS_SSL_CONNECTOR_HPP
#define BOOST_REDIS_SSL_CONNECTOR_HPP
#include <boost/redis/detail/helper.hpp>
#include <boost/redis/error.hpp>
#include <boost/asio/compose.hpp>
#include <boost/asio/connect.hpp>
#include <boost/asio/coroutine.hpp>
#include <boost/asio/experimental/parallel_group.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <boost/asio/steady_timer.hpp>
#include <boost/asio/ssl.hpp>
#include <string>
#include <chrono>
namespace boost::redis::detail
{
template <class Handshaker, class Stream>
struct handshake_op {
Handshaker* hsher_ = nullptr;
Stream* stream_ = nullptr;
asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, std::array<std::size_t, 2> const& order = {}
, system::error_code const& ec1 = {}
, system::error_code const& ec2 = {})
{
BOOST_ASIO_CORO_REENTER (coro)
{
hsher_->timer_.expires_after(hsher_->timeout_);
BOOST_ASIO_CORO_YIELD
asio::experimental::make_parallel_group(
[this](auto token) { return stream_->async_handshake(asio::ssl::stream_base::client, token); },
[this](auto token) { return hsher_->timer_.async_wait(token);}
).async_wait(
asio::experimental::wait_for_one(),
std::move(self));
if (is_cancelled(self)) {
self.complete(asio::error::operation_aborted);
return;
}
switch (order[0]) {
case 0: {
self.complete(ec1);
} break;
case 1:
{
if (ec2) {
self.complete(ec2);
} else {
self.complete(error::ssl_handshake_timeout);
}
} break;
default: BOOST_ASSERT(false);
}
}
}
};
template <class Executor>
class handshaker {
public:
using timer_type =
asio::basic_waitable_timer<
std::chrono::steady_clock,
asio::wait_traits<std::chrono::steady_clock>,
Executor>;
handshaker(Executor ex)
: timer_{ex}
{}
template <class Stream, class CompletionToken>
auto
async_handshake(Stream& stream, CompletionToken&& token)
{
return asio::async_compose
< CompletionToken
, void(system::error_code)
>(handshake_op<handshaker, Stream>{this, &stream}, token, timer_);
}
std::size_t cancel(operation op)
{
switch (op) {
case operation::ssl_handshake:
case operation::all:
timer_.cancel();
break;
default: /* ignore */;
}
return 0;
}
constexpr bool is_dummy() const noexcept
{return false;}
void set_config(config const& cfg)
{ timeout_ = cfg.ssl_handshake_timeout; }
private:
template <class, class> friend struct handshake_op;
timer_type timer_;
std::chrono::steady_clock::duration timeout_;
};
} // boost::redis::detail
#endif // BOOST_REDIS_SSL_CONNECTOR_HPP

View File

@@ -1,252 +0,0 @@
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef BOOST_REDIS_HEALTH_CHECKER_HPP
#define BOOST_REDIS_HEALTH_CHECKER_HPP
// Has to included before promise.hpp to build on msvc.
#include <boost/redis/request.hpp>
#include <boost/redis/response.hpp>
#include <boost/redis/operation.hpp>
#include <boost/redis/detail/helper.hpp>
#include <boost/redis/config.hpp>
#include <boost/asio/steady_timer.hpp>
#include <boost/asio/compose.hpp>
#include <boost/asio/consign.hpp>
#include <boost/asio/coroutine.hpp>
#include <boost/asio/post.hpp>
#include <boost/asio/experimental/parallel_group.hpp>
#include <memory>
#include <chrono>
namespace boost::redis::detail {
template <class HealthChecker, class Connection, class Logger>
class ping_op {
public:
HealthChecker* checker_ = nullptr;
Connection* conn_ = nullptr;
Logger logger_;
asio::coroutine coro_{};
template <class Self>
void operator()(Self& self, system::error_code ec = {}, std::size_t = 0)
{
BOOST_ASIO_CORO_REENTER (coro_) for (;;)
{
if (checker_->checker_has_exited_) {
logger_.trace("ping_op: checker has exited. Exiting ...");
self.complete({});
return;
}
BOOST_ASIO_CORO_YIELD
conn_->async_exec(checker_->req_, checker_->resp_, std::move(self));
if (ec || is_cancelled(self)) {
logger_.trace("ping_op: error/cancelled (1).");
checker_->wait_timer_.cancel();
self.complete(!!ec ? ec : asio::error::operation_aborted);
return;
}
// Wait before pinging again.
checker_->ping_timer_.expires_after(checker_->ping_interval_);
BOOST_ASIO_CORO_YIELD
checker_->ping_timer_.async_wait(std::move(self));
if (ec || is_cancelled(self)) {
logger_.trace("ping_op: error/cancelled (2).");
self.complete(!!ec ? ec : asio::error::operation_aborted);
return;
}
}
}
};
template <class HealthChecker, class Connection, class Logger>
class check_timeout_op {
public:
HealthChecker* checker_ = nullptr;
Connection* conn_ = nullptr;
Logger logger_;
asio::coroutine coro_{};
template <class Self>
void operator()(Self& self, system::error_code ec = {})
{
BOOST_ASIO_CORO_REENTER (coro_) for (;;)
{
checker_->wait_timer_.expires_after(2 * checker_->ping_interval_);
BOOST_ASIO_CORO_YIELD
checker_->wait_timer_.async_wait(std::move(self));
if (ec || is_cancelled(self)) {
logger_.trace("check-timeout-op: error/canceled. Exiting ...");
self.complete(!!ec ? ec : asio::error::operation_aborted);
return;
}
if (checker_->resp_.has_error()) {
logger_.trace("check-timeout-op: Response error. Exiting ...");
self.complete({});
return;
}
if (checker_->resp_.value().empty()) {
logger_.trace("check-timeout-op: Response has no value. Exiting ...");
checker_->ping_timer_.cancel();
conn_->cancel(operation::run);
checker_->checker_has_exited_ = true;
self.complete(error::pong_timeout);
return;
}
if (checker_->resp_.has_value()) {
checker_->resp_.value().clear();
}
}
}
};
template <class HealthChecker, class Connection, class Logger>
class check_health_op {
public:
HealthChecker* checker_ = nullptr;
Connection* conn_ = nullptr;
Logger logger_;
asio::coroutine coro_{};
template <class Self>
void
operator()(
Self& self,
std::array<std::size_t, 2> order = {},
system::error_code ec1 = {},
system::error_code ec2 = {})
{
BOOST_ASIO_CORO_REENTER (coro_)
{
if (checker_->ping_interval_ == std::chrono::seconds::zero()) {
logger_.trace("check-health-op: timeout disabled.");
BOOST_ASIO_CORO_YIELD
asio::post(std::move(self));
self.complete({});
return;
}
BOOST_ASIO_CORO_YIELD
asio::experimental::make_parallel_group(
[this](auto token) { return checker_->async_ping(*conn_, logger_, token); },
[this](auto token) { return checker_->async_check_timeout(*conn_, logger_, token);}
).async_wait(
asio::experimental::wait_for_one(),
std::move(self));
logger_.on_check_health(ec1, ec2);
if (is_cancelled(self)) {
logger_.trace("check-health-op: canceled. Exiting ...");
self.complete(asio::error::operation_aborted);
return;
}
switch (order[0]) {
case 0: self.complete(ec1); return;
case 1: self.complete(ec2); return;
default: BOOST_ASSERT(false);
}
}
}
};
template <class Executor>
class health_checker {
private:
using timer_type =
asio::basic_waitable_timer<
std::chrono::steady_clock,
asio::wait_traits<std::chrono::steady_clock>,
Executor>;
public:
health_checker(Executor ex)
: ping_timer_{ex}
, wait_timer_{ex}
{
req_.push("PING", "Boost.Redis");
}
void set_config(config const& cfg)
{
req_.clear();
req_.push("PING", cfg.health_check_id);
ping_interval_ = cfg.health_check_interval;
}
template <
class Connection,
class Logger,
class CompletionToken = asio::default_completion_token_t<Executor>
>
auto
async_check_health(
Connection& conn,
Logger l,
CompletionToken token = CompletionToken{})
{
checker_has_exited_ = false;
return asio::async_compose
< CompletionToken
, void(system::error_code)
>(check_health_op<health_checker, Connection, Logger>{this, &conn, l}, token, conn);
}
std::size_t cancel(operation op)
{
switch (op) {
case operation::health_check:
case operation::all:
ping_timer_.cancel();
wait_timer_.cancel();
break;
default: /* ignore */;
}
return 0;
}
private:
template <class Connection, class Logger, class CompletionToken>
auto async_ping(Connection& conn, Logger l, CompletionToken token)
{
return asio::async_compose
< CompletionToken
, void(system::error_code)
>(ping_op<health_checker, Connection, Logger>{this, &conn, l}, token, conn, ping_timer_);
}
template <class Connection, class Logger, class CompletionToken>
auto async_check_timeout(Connection& conn, Logger l, CompletionToken token)
{
return asio::async_compose
< CompletionToken
, void(system::error_code)
>(check_timeout_op<health_checker, Connection, Logger>{this, &conn, l}, token, conn, wait_timer_);
}
template <class, class, class> friend class ping_op;
template <class, class, class> friend class check_timeout_op;
template <class, class, class> friend class check_health_op;
timer_type ping_timer_;
timer_type wait_timer_;
redis::request req_;
redis::generic_response resp_;
std::chrono::steady_clock::duration ping_interval_ = std::chrono::seconds{5};
bool checker_has_exited_ = false;
};
} // boost::redis::detail
#endif // BOOST_REDIS_HEALTH_CHECKER_HPP

View File

@@ -1,37 +0,0 @@
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef BOOST_REDIS_HELPER_HPP
#define BOOST_REDIS_HELPER_HPP
#include <boost/asio/cancellation_type.hpp>
namespace boost::redis::detail
{
template <class T>
auto is_cancelled(T const& self)
{
return self.get_cancellation_state().cancelled() != asio::cancellation_type_t::none;
}
#define BOOST_REDIS_CHECK_OP0(X)\
if (ec || redis::detail::is_cancelled(self)) {\
X\
self.complete(!!ec ? ec : asio::error::operation_aborted);\
return;\
}
#define BOOST_REDIS_CHECK_OP1(X)\
if (ec || redis::detail::is_cancelled(self)) {\
X\
self.complete(!!ec ? ec : asio::error::operation_aborted, {});\
return;\
}
} // boost::redis::detail
#endif // BOOST_REDIS_HELPER_HPP

View File

@@ -1,137 +0,0 @@
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef BOOST_REDIS_RESOLVER_HPP
#define BOOST_REDIS_RESOLVER_HPP
#include <boost/redis/config.hpp>
#include <boost/redis/detail/helper.hpp>
#include <boost/redis/error.hpp>
#include <boost/asio/compose.hpp>
#include <boost/asio/coroutine.hpp>
#include <boost/asio/experimental/parallel_group.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <boost/asio/steady_timer.hpp>
#include <string>
#include <chrono>
namespace boost::redis::detail
{
template <class Resolver>
struct resolve_op {
Resolver* resv_ = nullptr;
asio::coroutine coro{};
template <class Self>
void operator()( Self& self
, std::array<std::size_t, 2> order = {}
, system::error_code ec1 = {}
, asio::ip::tcp::resolver::results_type res = {}
, system::error_code ec2 = {})
{
BOOST_ASIO_CORO_REENTER (coro)
{
resv_->timer_.expires_after(resv_->timeout_);
BOOST_ASIO_CORO_YIELD
asio::experimental::make_parallel_group(
[this](auto token)
{
return resv_->resv_.async_resolve(resv_->addr_.host, resv_->addr_.port, token);
},
[this](auto token) { return resv_->timer_.async_wait(token);}
).async_wait(
asio::experimental::wait_for_one(),
std::move(self));
if (is_cancelled(self)) {
self.complete(asio::error::operation_aborted);
return;
}
switch (order[0]) {
case 0: {
// Resolver completed first.
resv_->results_ = res;
self.complete(ec1);
} break;
case 1: {
if (ec2) {
// Timer completed first with error, perhaps a
// cancellation going on.
self.complete(ec2);
} else {
// Timer completed first without an error, this is a
// resolve timeout.
self.complete(error::resolve_timeout);
}
} break;
default: BOOST_ASSERT(false);
}
}
}
};
template <class Executor>
class resolver {
public:
using timer_type =
asio::basic_waitable_timer<
std::chrono::steady_clock,
asio::wait_traits<std::chrono::steady_clock>,
Executor>;
resolver(Executor ex) : resv_{ex} , timer_{ex} {}
template <class CompletionToken>
auto async_resolve(CompletionToken&& token)
{
return asio::async_compose
< CompletionToken
, void(system::error_code)
>(resolve_op<resolver>{this}, token, resv_);
}
std::size_t cancel(operation op)
{
switch (op) {
case operation::resolve:
case operation::all:
resv_.cancel();
timer_.cancel();
break;
default: /* ignore */;
}
return 0;
}
auto const& results() const noexcept
{ return results_;}
void set_config(config const& cfg)
{
addr_ = cfg.addr;
timeout_ = cfg.resolve_timeout;
}
private:
using resolver_type = asio::ip::basic_resolver<asio::ip::tcp, Executor>;
template <class> friend struct resolve_op;
resolver_type resv_;
timer_type timer_;
address addr_;
std::chrono::steady_clock::duration timeout_;
asio::ip::tcp::resolver::results_type results_;
};
} // boost::redis::detail
#endif // BOOST_REDIS_RESOLVER_HPP

View File

@@ -1,268 +0,0 @@
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef BOOST_REDIS_RUNNER_HPP
#define BOOST_REDIS_RUNNER_HPP
#include <boost/redis/detail/health_checker.hpp>
#include <boost/redis/config.hpp>
#include <boost/redis/response.hpp>
#include <boost/redis/detail/helper.hpp>
#include <boost/redis/error.hpp>
#include <boost/redis/logger.hpp>
#include <boost/redis/operation.hpp>
#include <boost/redis/detail/connector.hpp>
#include <boost/redis/detail/resolver.hpp>
#include <boost/redis/detail/handshaker.hpp>
#include <boost/asio/compose.hpp>
#include <boost/asio/connect.hpp>
#include <boost/asio/coroutine.hpp>
#include <boost/asio/experimental/parallel_group.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <boost/asio/steady_timer.hpp>
#include <string>
#include <memory>
#include <chrono>
namespace boost::redis::detail
{
void push_hello(config const& cfg, request& req);
template <class Runner, class Connection, class Logger>
struct hello_op {
Runner* runner_ = nullptr;
Connection* conn_ = nullptr;
Logger logger_;
asio::coroutine coro_{};
template <class Self>
void operator()(Self& self, system::error_code ec = {}, std::size_t = 0)
{
BOOST_ASIO_CORO_REENTER (coro_)
{
runner_->add_hello();
BOOST_ASIO_CORO_YIELD
conn_->async_exec(runner_->hello_req_, runner_->hello_resp_, std::move(self));
logger_.on_hello(ec, runner_->hello_resp_);
if (ec || runner_->has_error_in_response() || is_cancelled(self)) {
logger_.trace("hello-op: error/canceled. Exiting ...");
conn_->cancel(operation::run);
self.complete(!!ec ? ec : asio::error::operation_aborted);
return;
}
self.complete({});
}
}
};
template <class Runner, class Connection, class Logger>
class runner_op {
private:
Runner* runner_ = nullptr;
Connection* conn_ = nullptr;
Logger logger_;
asio::coroutine coro_{};
public:
runner_op(Runner* runner, Connection* conn, Logger l)
: runner_{runner}
, conn_{conn}
, logger_{l}
{}
template <class Self>
void operator()( Self& self
, std::array<std::size_t, 3> order = {}
, system::error_code ec0 = {}
, system::error_code ec1 = {}
, system::error_code ec2 = {}
, std::size_t = 0)
{
BOOST_ASIO_CORO_REENTER (coro_)
{
BOOST_ASIO_CORO_YIELD
asio::experimental::make_parallel_group(
[this](auto token) { return runner_->async_run_all(*conn_, logger_, token); },
[this](auto token) { return runner_->health_checker_.async_check_health(*conn_, logger_, token); },
[this](auto token) { return runner_->async_hello(*conn_, logger_, token); }
).async_wait(
asio::experimental::wait_for_all(),
std::move(self));
logger_.on_runner(ec0, ec1, ec2);
if (is_cancelled(self)) {
self.complete(asio::error::operation_aborted);
return;
}
if (ec0 == error::connect_timeout || ec0 == error::resolve_timeout) {
self.complete(ec0);
return;
}
if (order[0] == 2 && !!ec2) {
self.complete(ec2);
return;
}
if (order[0] == 1 && ec1 == error::pong_timeout) {
self.complete(ec1);
return;
}
self.complete(ec0);
}
}
};
template <class Runner, class Connection, class Logger>
struct run_all_op {
Runner* runner_ = nullptr;
Connection* conn_ = nullptr;
Logger logger_;
asio::coroutine coro_{};
template <class Self>
void operator()(Self& self, system::error_code ec = {}, std::size_t = 0)
{
BOOST_ASIO_CORO_REENTER (coro_)
{
BOOST_ASIO_CORO_YIELD
runner_->resv_.async_resolve(std::move(self));
logger_.on_resolve(ec, runner_->resv_.results());
BOOST_REDIS_CHECK_OP0(conn_->cancel(operation::run);)
BOOST_ASIO_CORO_YIELD
runner_->ctor_.async_connect(conn_->next_layer().next_layer(), runner_->resv_.results(), std::move(self));
logger_.on_connect(ec, runner_->ctor_.endpoint());
BOOST_REDIS_CHECK_OP0(conn_->cancel(operation::run);)
if (conn_->use_ssl()) {
BOOST_ASIO_CORO_YIELD
runner_->hsher_.async_handshake(conn_->next_layer(), std::move(self));
logger_.on_ssl_handshake(ec);
BOOST_REDIS_CHECK_OP0(conn_->cancel(operation::run);)
}
BOOST_ASIO_CORO_YIELD
conn_->async_run_lean(runner_->cfg_, logger_, std::move(self));
BOOST_REDIS_CHECK_OP0(;)
self.complete(ec);
}
}
};
template <class Executor>
class runner {
public:
runner(Executor ex, config cfg)
: resv_{ex}
, ctor_{ex}
, hsher_{ex}
, health_checker_{ex}
, cfg_{cfg}
{ }
std::size_t cancel(operation op)
{
resv_.cancel(op);
ctor_.cancel(op);
hsher_.cancel(op);
health_checker_.cancel(op);
return 0U;
}
void set_config(config const& cfg)
{
cfg_ = cfg;
resv_.set_config(cfg);
ctor_.set_config(cfg);
hsher_.set_config(cfg);
health_checker_.set_config(cfg);
}
template <class Connection, class Logger, class CompletionToken>
auto async_run(Connection& conn, Logger l, CompletionToken token)
{
return asio::async_compose
< CompletionToken
, void(system::error_code)
>(runner_op<runner, Connection, Logger>{this, &conn, l}, token, conn);
}
config const& get_config() const noexcept {return cfg_;}
private:
using resolver_type = resolver<Executor>;
using connector_type = connector<Executor>;
using handshaker_type = detail::handshaker<Executor>;
using health_checker_type = health_checker<Executor>;
using timer_type = typename connector_type::timer_type;
template <class, class, class> friend struct run_all_op;
template <class, class, class> friend class runner_op;
template <class, class, class> friend struct hello_op;
template <class Connection, class Logger, class CompletionToken>
auto async_run_all(Connection& conn, Logger l, CompletionToken token)
{
return asio::async_compose
< CompletionToken
, void(system::error_code)
>(run_all_op<runner, Connection, Logger>{this, &conn, l}, token, conn);
}
template <class Connection, class Logger, class CompletionToken>
auto async_hello(Connection& conn, Logger l, CompletionToken token)
{
return asio::async_compose
< CompletionToken
, void(system::error_code)
>(hello_op<runner, Connection, Logger>{this, &conn, l}, token, conn);
}
void add_hello()
{
hello_req_.clear();
if (hello_resp_.has_value())
hello_resp_.value().clear();
push_hello(cfg_, hello_req_);
}
bool has_error_in_response() const noexcept
{
if (!hello_resp_.has_value())
return true;
auto f = [](auto const& e)
{
switch (e.data_type) {
case resp3::type::simple_error:
case resp3::type::blob_error: return true;
default: return false;
}
};
return std::any_of(std::cbegin(hello_resp_.value()), std::cend(hello_resp_.value()), f);
}
resolver_type resv_;
connector_type ctor_;
handshaker_type hsher_;
health_checker_type health_checker_;
request hello_req_;
generic_response hello_resp_;
config cfg_;
};
} // boost::redis::detail
#endif // BOOST_REDIS_RUNNER_HPP

View File

@@ -1,55 +0,0 @@
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef BOOST_REDIS_WRITE_HPP
#define BOOST_REDIS_WRITE_HPP
#include <boost/asio/write.hpp>
#include <boost/redis/request.hpp>
namespace boost::redis::detail {
/** \brief Writes a request synchronously.
* \ingroup low-level-api
*
* \param stream Stream to write the request to.
* \param req Request to write.
*/
template<class SyncWriteStream>
auto write(SyncWriteStream& stream, request const& req)
{
return asio::write(stream, asio::buffer(req.payload()));
}
template<class SyncWriteStream>
auto write(SyncWriteStream& stream, request const& req, system::error_code& ec)
{
return asio::write(stream, asio::buffer(req.payload()), ec);
}
/** \brief Writes a request asynchronously.
* \ingroup low-level-api
*
* \param stream Stream to write the request to.
* \param req Request to write.
* \param token Asio completion token.
*/
template<
class AsyncWriteStream,
class CompletionToken = asio::default_completion_token_t<typename AsyncWriteStream::executor_type>
>
auto async_write(
AsyncWriteStream& stream,
request const& req,
CompletionToken&& token =
asio::default_completion_token_t<typename AsyncWriteStream::executor_type>{})
{
return asio::async_write(stream, asio::buffer(req.payload()), token);
}
} // boost::redis::detail
#endif // BOOST_REDIS_WRITE_HPP

View File

@@ -1,49 +0,0 @@
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef BOOST_REDIS_IGNORE_HPP
#define BOOST_REDIS_IGNORE_HPP
#include <boost/system/result.hpp>
#include <tuple>
#include <type_traits>
namespace boost::redis
{
/** @brief Type used to ignore responses.
* @ingroup high-level-api
*
* For example
*
* @code
* response<ignore_t, std::string, ignore_t> resp;
* @endcode
*
* will ignore the first and third responses. RESP3 errors won't be
* ignore but will cause `async_exec` to complete with an error.
*/
using ignore_t = std::decay_t<decltype(std::ignore)>;
/** @brief Global ignore object.
* @ingroup high-level-api
*
* Can be used to ignore responses to a request
*
* @code
* conn->async_exec(req, ignore, ...);
* @endcode
*
* RESP3 errors won't be ignore but will cause `async_exec` to
* complete with an error.
*/
extern ignore_t ignore;
} // boost::redis
#endif // BOOST_REDIS_IGNORE_HPP

View File

@@ -1,39 +0,0 @@
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <boost/redis/connection.hpp>
namespace boost::redis {
connection::connection(
executor_type ex,
asio::ssl::context ctx,
std::size_t max_read_size)
: impl_{ex, std::move(ctx), max_read_size}
{ }
connection::connection(
asio::io_context& ioc,
asio::ssl::context ctx,
std::size_t max_read_size)
: impl_{ioc.get_executor(), std::move(ctx), max_read_size}
{ }
void
connection::async_run_impl(
config const& cfg,
logger l,
asio::any_completion_handler<void(boost::system::error_code)> token)
{
impl_.async_run(cfg, l, std::move(token));
}
void connection::cancel(operation op)
{
impl_.cancel(op);
}
} // namespace boost::redis

View File

@@ -1,12 +0,0 @@
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#include <boost/redis/ignore.hpp>
namespace boost::redis
{
ignore_t ignore;
}

Some files were not shown because too many files have changed in this diff Show More