mirror of
https://github.com/boostorg/redis.git
synced 2026-02-03 09:22:18 +00:00
Compare commits
24 Commits
boost-1.84
...
boost-1.85
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b6e1280075 | ||
|
|
5d553f5d71 | ||
|
|
78792199ef | ||
|
|
f5793ac9bc | ||
|
|
dfc2bd1ac2 | ||
|
|
0445e74fa3 | ||
|
|
234f961e87 | ||
|
|
8bb0004188 | ||
|
|
4257b2eaec | ||
|
|
96da11a2cc | ||
|
|
3861c5de74 | ||
|
|
168ee6148a | ||
|
|
723e72797f | ||
|
|
7caea928af | ||
|
|
71b9a4f428 | ||
|
|
d89a976729 | ||
|
|
154d0b106d | ||
|
|
2b12525206 | ||
|
|
0bcbf6d4e4 | ||
|
|
6389daa783 | ||
|
|
ab2d6cdea8 | ||
|
|
63ce40e365 | ||
|
|
f2a005a8c4 | ||
|
|
0c06be66de |
116
.github/workflows/ci.yml
vendored
116
.github/workflows/ci.yml
vendored
@@ -131,25 +131,98 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- { toolset: gcc-11, install: g++-11, os: ubuntu-22.04, cxxstd: '17', build-type: 'Debug', ldflags: '' }
|
||||
- { toolset: gcc-11, install: g++-11, os: ubuntu-22.04, cxxstd: '20', build-type: 'Release', ldflags: '' }
|
||||
- { toolset: clang-11, install: clang-11, os: ubuntu-22.04, cxxstd: '17', build-type: 'Debug', ldflags: '' }
|
||||
- { toolset: clang-11, install: clang-11, os: ubuntu-22.04, cxxstd: '20', build-type: 'Debug', ldflags: '' }
|
||||
- { toolset: clang-13, install: clang-13, os: ubuntu-22.04, cxxstd: '17', build-type: 'Release', ldflags: '' }
|
||||
- { toolset: clang-13, install: clang-13, os: ubuntu-22.04, cxxstd: '20', build-type: 'Release', ldflags: '' }
|
||||
- { toolset: clang-14, install: 'clang-14 libc++-14-dev libc++abi-14-dev', os: ubuntu-22.04, cxxstd: '17', build-type: 'Debug', cxxflags: '-stdlib=libc++', ldflags: '-lc++' }
|
||||
- { toolset: clang-14, install: 'clang-14 libc++-14-dev libc++abi-14-dev', os: ubuntu-22.04, cxxstd: '20', build-type: 'Release', cxxflags: '-stdlib=libc++', ldflags: '-lc++' }
|
||||
- toolset: gcc-11
|
||||
install: g++-11
|
||||
os: ubuntu-latest
|
||||
container: ubuntu:22.04
|
||||
cxxstd: '17'
|
||||
build-type: 'Debug'
|
||||
ldflags: ''
|
||||
|
||||
- toolset: gcc-11
|
||||
install: g++-11
|
||||
os: ubuntu-latest
|
||||
container: ubuntu:22.04
|
||||
cxxstd: '20'
|
||||
build-type: 'Release'
|
||||
ldflags: ''
|
||||
|
||||
- toolset: clang-11
|
||||
install: clang-11
|
||||
os: ubuntu-latest
|
||||
container: ubuntu:22.04
|
||||
cxxstd: '17'
|
||||
build-type: 'Debug'
|
||||
ldflags: ''
|
||||
|
||||
- toolset: clang-11
|
||||
install: clang-11
|
||||
os: ubuntu-latest
|
||||
container: ubuntu:22.04
|
||||
cxxstd: '20'
|
||||
build-type: 'Debug'
|
||||
ldflags: ''
|
||||
|
||||
- toolset: clang-13
|
||||
install: clang-13
|
||||
os: ubuntu-latest
|
||||
container: ubuntu:22.04
|
||||
cxxstd: '17'
|
||||
build-type: 'Release'
|
||||
ldflags: ''
|
||||
|
||||
- toolset: clang-13
|
||||
install: clang-13
|
||||
os: ubuntu-latest
|
||||
container: ubuntu:22.04
|
||||
cxxstd: '20'
|
||||
build-type: 'Release'
|
||||
ldflags: ''
|
||||
|
||||
- toolset: clang-14
|
||||
install: 'clang-14 libc++-14-dev libc++abi-14-dev'
|
||||
os: ubuntu-latest
|
||||
container: ubuntu:22.04
|
||||
cxxstd: '17'
|
||||
build-type: 'Debug'
|
||||
cxxflags: '-stdlib=libc++'
|
||||
ldflags: '-lc++'
|
||||
|
||||
- toolset: clang-14
|
||||
install: 'clang-14 libc++-14-dev libc++abi-14-dev'
|
||||
os: ubuntu-latest
|
||||
container: ubuntu:22.04
|
||||
cxxstd: '20'
|
||||
build-type: 'Release'
|
||||
cxxflags: '-stdlib=libc++'
|
||||
ldflags: '-lc++'
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
container: ${{matrix.container}}
|
||||
env:
|
||||
CXXFLAGS: ${{matrix.cxxflags}} -Wall -Wextra
|
||||
LDFLAGS: ${{matrix.ldflags}}
|
||||
CMAKE_BUILD_PARALLEL_LEVEL: 4
|
||||
BOOST_REDIS_TEST_SERVER: redis
|
||||
|
||||
services:
|
||||
redis:
|
||||
image: redis
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Setup container environment
|
||||
if: matrix.container
|
||||
run: |
|
||||
apt-get update
|
||||
apt-get -y install sudo python3 git g++ libssl-dev protobuf-compiler redis-server
|
||||
|
||||
- name: Install dependencies
|
||||
run: sudo apt-get -y install cmake protobuf-compiler redis-server python3 ${{ matrix.install }}
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get -y install cmake protobuf-compiler redis-server python3 ${{ matrix.install }}
|
||||
|
||||
- name: Setup Boost
|
||||
run: ./tools/ci.py setup-boost --source-dir=$(pwd)
|
||||
@@ -209,15 +282,32 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- { toolset: gcc-11, install: g++-11, cxxstd: "11,17,20" } # Having C++11 shouldn't break the build
|
||||
- { toolset: clang-14, install: clang-14, cxxstd: "17,20" }
|
||||
runs-on: ubuntu-22.04
|
||||
- toolset: gcc-11
|
||||
install: g++-11
|
||||
cxxstd: "11,17,20" # Having C++11 shouldn't break the build
|
||||
os: ubuntu-latest
|
||||
container: ubuntu:22.04
|
||||
- toolset: clang-14
|
||||
install: clang-14
|
||||
os: ubuntu-latest
|
||||
container: ubuntu:22.04
|
||||
cxxstd: "17,20"
|
||||
runs-on: ${{ matrix.os }}
|
||||
container: ${{matrix.container}}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Setup container environment
|
||||
if: matrix.container
|
||||
run: |
|
||||
apt-get update
|
||||
apt-get -y install sudo python3 git g++ libssl-dev
|
||||
|
||||
- name: Install dependencies
|
||||
run: sudo apt-get -y install python3 ${{ matrix.install }}
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get -y install python3 ${{ matrix.install }}
|
||||
|
||||
- name: Setup Boost
|
||||
run: ./tools/ci.py setup-boost --source-dir=$(pwd)
|
||||
|
||||
@@ -19,13 +19,77 @@ target_compile_features(boost_redis INTERFACE cxx_std_17)
|
||||
|
||||
# Dependencies
|
||||
if (BOOST_REDIS_MAIN_PROJECT)
|
||||
# If we're the root project, error if a dependency is not found
|
||||
find_package(Boost 1.83 REQUIRED COMPONENTS headers)
|
||||
# TODO: Understand why we have to list all dependencies below
|
||||
# instead of
|
||||
#set(BOOST_INCLUDE_LIBRARIES redis)
|
||||
#set(BOOST_EXCLUDE_LIBRARIES redis)
|
||||
#add_subdirectory(../.. boostorg/boost EXCLUDE_FROM_ALL)
|
||||
|
||||
set(deps
|
||||
system
|
||||
assert
|
||||
config
|
||||
throw_exception
|
||||
asio
|
||||
variant2
|
||||
mp11
|
||||
winapi
|
||||
predef
|
||||
align
|
||||
context
|
||||
core
|
||||
coroutine
|
||||
static_assert
|
||||
pool
|
||||
date_time
|
||||
smart_ptr
|
||||
exception
|
||||
integer
|
||||
move
|
||||
type_traits
|
||||
algorithm
|
||||
utility
|
||||
io
|
||||
lexical_cast
|
||||
numeric/conversion
|
||||
mpl
|
||||
range
|
||||
tokenizer
|
||||
tuple
|
||||
array
|
||||
bind
|
||||
concept_check
|
||||
function
|
||||
iterator
|
||||
regex
|
||||
unordered
|
||||
preprocessor
|
||||
container
|
||||
conversion
|
||||
container_hash
|
||||
detail
|
||||
optional
|
||||
function_types
|
||||
fusion
|
||||
intrusive
|
||||
describe
|
||||
typeof
|
||||
functional
|
||||
test
|
||||
json
|
||||
endian
|
||||
)
|
||||
|
||||
foreach(dep IN LISTS deps)
|
||||
add_subdirectory(../${dep} boostorg/${dep})
|
||||
endforeach()
|
||||
|
||||
find_package(Threads REQUIRED)
|
||||
find_package(OpenSSL REQUIRED)
|
||||
target_link_libraries(boost_redis
|
||||
INTERFACE
|
||||
Boost::headers
|
||||
Boost::system
|
||||
Boost::asio
|
||||
Threads::Threads
|
||||
OpenSSL::Crypto
|
||||
OpenSSL::SSL
|
||||
|
||||
38
README.md
38
README.md
@@ -27,7 +27,7 @@ examples and tests cmake is supported, for example
|
||||
|
||||
```cpp
|
||||
# Linux
|
||||
$ BOOST_ROOT=/opt/boost_1_81_0 cmake --preset g++-11
|
||||
$ BOOST_ROOT=/opt/boost_1_84_0 cmake --preset g++-11
|
||||
|
||||
# Windows
|
||||
$ cmake -G "Visual Studio 17 2022" -A x64 -B bin64 -DCMAKE_TOOLCHAIN_FILE=C:/vcpkg/scripts/buildsystems/vcpkg.cmake
|
||||
@@ -676,7 +676,41 @@ https://lists.boost.org/Archives/boost/2023/01/253944.php.
|
||||
|
||||
## Changelog
|
||||
|
||||
### develop
|
||||
### Boost 1.85
|
||||
|
||||
* ([Issue 170](https://github.com/boostorg/redis/issues/170))
|
||||
Under load and on low-latency networks it is possible to start
|
||||
receiving responses before the write operation completed and while
|
||||
the request is still marked as staged and not written. This messes
|
||||
up with the heuristics that classifies responses as unsolicied or
|
||||
not.
|
||||
|
||||
* ([Issue 168](https://github.com/boostorg/redis/issues/168)).
|
||||
Provides a way of passing a custom SSL context to the connection.
|
||||
The design here differs from that of Boost.Beast and Boost.MySql
|
||||
since in Boost.Redis the connection owns the context instead of only
|
||||
storing a reference to a user provided one. This is ok so because
|
||||
apps need only one connection for their entire application, which
|
||||
makes the overhead of one ssl-context per connection negligible.
|
||||
|
||||
* ([Issue 181](https://github.com/boostorg/redis/issues/181)).
|
||||
See a detailed description of this bug in
|
||||
[this](https://github.com/boostorg/redis/issues/181#issuecomment-1913346983)
|
||||
comment.
|
||||
|
||||
* ([Issue 182](https://github.com/boostorg/redis/issues/182)).
|
||||
Sets `"default"` as the default value of `config::username`. This
|
||||
makes it simpler to use the `requirepass` configuration in Redis.
|
||||
|
||||
* ([Issue 189](https://github.com/boostorg/redis/issues/189)).
|
||||
Fixes narrowing convertion by using `std::size_t` instead of
|
||||
`std::uint64_t` for the sizes of bulks and aggregates. The code
|
||||
relies now on `std::from_chars` returning an error if a value
|
||||
greater than 32 is received on platforms on which the size
|
||||
of`std::size_t` is 32.
|
||||
|
||||
|
||||
### Boost 1.84 (First release in Boost)
|
||||
|
||||
* Deprecates the `async_receive` overload that takes a response. Users
|
||||
should now first call `set_receive_response` to avoid constantly and
|
||||
|
||||
@@ -12,6 +12,8 @@ path-constant include_dir : ../include ;
|
||||
path-constant examples_dir : ../example ;
|
||||
path-constant readme : ../README.md ;
|
||||
path-constant layout_file : DoxygenLayout.xml ;
|
||||
path-constant header : header.html ;
|
||||
path-constant footer : footer.html ;
|
||||
|
||||
local stylesheet_files = [ path.glob $(this_dir) : *.css ] ;
|
||||
local includes = [ path.glob-tree $(include_dir) : *.hpp *.cpp ] ;
|
||||
@@ -27,7 +29,7 @@ doxygen doc.html
|
||||
$(includes) $(examples) $(readme)
|
||||
:
|
||||
<doxygen:param>"PROJECT_NAME=Boost.Redis"
|
||||
<doxygen:param>PROJECT_NUMBER="1.4.2"
|
||||
<doxygen:param>PROJECT_NUMBER="1.84.0"
|
||||
<doxygen:param>PROJECT_BRIEF="A redis client library"
|
||||
<doxygen:param>"STRIP_FROM_PATH=\"$(redis_root_dir)\""
|
||||
<doxygen:param>"STRIP_FROM_INC_PATH=\"$(include_dir)\""
|
||||
@@ -46,10 +48,13 @@ doxygen doc.html
|
||||
<doxygen:param>EXCLUDE_SYMBOLS=std
|
||||
<doxygen:param>"USE_MDFILE_AS_MAINPAGE=\"$(readme)\""
|
||||
<doxygen:param>SOURCE_BROWSER=YES
|
||||
<doxygen:param>"HTML_HEADER=\"$(header)\""
|
||||
<doxygen:param>"HTML_FOOTER=\"$(footer)\""
|
||||
<doxygen:param>"HTML_EXTRA_STYLESHEET=$(stylesheet_arg)"
|
||||
<doxygen:param>HTML_TIMESTAMP=YES
|
||||
<doxygen:param>GENERATE_TREEVIEW=YES
|
||||
<doxygen:param>FULL_SIDEBAR=NO
|
||||
<doxygen:param>FULL_SIDEBAR=YES
|
||||
<doxygen:param>DISABLE_INDEX=YES
|
||||
<doxygen:param>ENUM_VALUES_PER_LINE=0
|
||||
<doxygen:param>OBFUSCATE_EMAILS=YES
|
||||
<doxygen:param>USE_MATHJAX=YES
|
||||
|
||||
@@ -32,80 +32,104 @@ html {
|
||||
* Make sure it is wide enough to contain the page title (logo + title + version)
|
||||
*/
|
||||
--side-nav-fixed-width: 335px;
|
||||
--menu-display: none;
|
||||
|
||||
--top-height: 120px;
|
||||
--toc-sticky-top: -25px;
|
||||
--toc-max-height: calc(100vh - 2 * var(--spacing-medium) - 25px);
|
||||
}
|
||||
|
||||
#projectname {
|
||||
white-space: nowrap;
|
||||
}
|
||||
|
||||
#page-wrapper {
|
||||
height: calc(100vh - 100px);
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
}
|
||||
|
||||
@media screen and (min-width: 768px) {
|
||||
#content-wrapper {
|
||||
display: flex;
|
||||
flex-direction: row;
|
||||
min-height: 0;
|
||||
}
|
||||
|
||||
#doc-content {
|
||||
overflow-y: scroll;
|
||||
flex: 1;
|
||||
height: auto !important;
|
||||
}
|
||||
|
||||
@media (min-width: 768px) {
|
||||
html {
|
||||
--searchbar-background: var(--page-background-color);
|
||||
}
|
||||
|
||||
#side-nav {
|
||||
#sidebar-wrapper {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
min-width: var(--side-nav-fixed-width);
|
||||
max-width: var(--side-nav-fixed-width);
|
||||
top: var(--top-height);
|
||||
overflow: visible;
|
||||
max-width: var(--side-nav-fixed-width);
|
||||
background-color: var(--side-nav-background);
|
||||
border-right: 1px solid rgb(222, 222, 222);
|
||||
}
|
||||
|
||||
#nav-tree, #side-nav {
|
||||
height: calc(100vh - var(--top-height)) !important;
|
||||
#search-box-wrapper {
|
||||
display: flex;
|
||||
flex-direction: row;
|
||||
padding-left: 1em;
|
||||
padding-right: 1em;
|
||||
}
|
||||
|
||||
#MSearchBox {
|
||||
flex: 1;
|
||||
display: flex;
|
||||
padding-left: 1em;
|
||||
padding-right: 1em;
|
||||
}
|
||||
|
||||
|
||||
#MSearchBox .left {
|
||||
display: flex;
|
||||
flex: 1;
|
||||
position: static;
|
||||
align-items: center;
|
||||
justify-content: flex-start;
|
||||
width: auto;
|
||||
height: auto;
|
||||
}
|
||||
|
||||
#MSearchBox .right {
|
||||
display: none;
|
||||
}
|
||||
|
||||
#MSearchSelect {
|
||||
padding-left: 0.75em;
|
||||
left: auto;
|
||||
background-repeat: no-repeat;
|
||||
}
|
||||
|
||||
#MSearchField {
|
||||
flex: 1;
|
||||
position: static;
|
||||
width: auto;
|
||||
height: auto;
|
||||
}
|
||||
|
||||
#nav-tree {
|
||||
padding: 0;
|
||||
height: auto !important;
|
||||
}
|
||||
|
||||
#nav-sync {
|
||||
display: none;
|
||||
}
|
||||
|
||||
#top {
|
||||
display: block;
|
||||
border-bottom: none;
|
||||
height: var(--top-height);
|
||||
margin-bottom: calc(0px - var(--top-height));
|
||||
max-width: var(--side-nav-fixed-width);
|
||||
overflow: hidden;
|
||||
background: var(--side-nav-background);
|
||||
}
|
||||
#main-nav {
|
||||
float: left;
|
||||
padding-right: 0;
|
||||
}
|
||||
|
||||
.ui-resizable-handle {
|
||||
cursor: default;
|
||||
width: 1px !important;
|
||||
box-shadow: 0 calc(-2 * var(--top-height)) 0 0 var(--separator-color);
|
||||
}
|
||||
|
||||
#nav-path {
|
||||
position: fixed;
|
||||
right: 0;
|
||||
left: var(--side-nav-fixed-width);
|
||||
bottom: 0;
|
||||
width: auto;
|
||||
}
|
||||
|
||||
#doc-content {
|
||||
height: calc(100vh - 31px) !important;
|
||||
padding-bottom: calc(3 * var(--spacing-large));
|
||||
padding-top: calc(var(--top-height) - 80px);
|
||||
box-sizing: border-box;
|
||||
margin-left: var(--side-nav-fixed-width) !important;
|
||||
}
|
||||
|
||||
#MSearchBox {
|
||||
width: calc(var(--side-nav-fixed-width) - calc(2 * var(--spacing-medium)));
|
||||
}
|
||||
|
||||
#MSearchField {
|
||||
width: calc(var(--side-nav-fixed-width) - calc(2 * var(--spacing-medium)) - 65px);
|
||||
}
|
||||
|
||||
#MSearchResultsWindow {
|
||||
@@ -113,3 +137,9 @@ html {
|
||||
right: auto;
|
||||
}
|
||||
}
|
||||
|
||||
@media (max-width: 768px) {
|
||||
#sidebar-wrapper {
|
||||
display: none;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -552,25 +552,6 @@ a.anchor {
|
||||
margin-top: 0;
|
||||
}
|
||||
|
||||
/* until Doxygen 1.9.4 */
|
||||
.left img#MSearchSelect {
|
||||
left: 0;
|
||||
user-select: none;
|
||||
padding-left: 8px;
|
||||
}
|
||||
|
||||
/* Doxygen 1.9.5 */
|
||||
.left span#MSearchSelect {
|
||||
left: 0;
|
||||
user-select: none;
|
||||
margin-left: 8px;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
.left #MSearchSelect[src$=".png"] {
|
||||
padding-left: 0
|
||||
}
|
||||
|
||||
.SelectionMark {
|
||||
user-select: none;
|
||||
}
|
||||
@@ -614,9 +595,7 @@ a.anchor {
|
||||
|
||||
#MSearchField {
|
||||
font-size: var(--navigation-font-size);
|
||||
height: calc(var(--searchbar-height) - 2px);
|
||||
background: transparent;
|
||||
width: calc(var(--searchbar-width) - 64px);
|
||||
}
|
||||
|
||||
.MSearchBoxActive #MSearchField {
|
||||
|
||||
19
doc/footer.html
Normal file
19
doc/footer.html
Normal file
@@ -0,0 +1,19 @@
|
||||
<!-- HTML footer for doxygen 1.9.1-->
|
||||
<!-- start footer part -->
|
||||
</div> <!-- close #content-wrapper -->
|
||||
<!--BEGIN GENERATE_TREEVIEW-->
|
||||
<div id="nav-path" class="navpath"><!-- id is needed for treeview function! -->
|
||||
<ul>
|
||||
$navpath
|
||||
<li class="footer">$generatedby <a href="https://www.doxygen.org/index.html"><img class="footer" src="$relpath^doxygen.svg" width="104" height="31" alt="doxygen"/></a> $doxygenversion </li>
|
||||
</ul>
|
||||
</div>
|
||||
<!--END GENERATE_TREEVIEW-->
|
||||
<!--BEGIN !GENERATE_TREEVIEW-->
|
||||
<hr class="footer"/><address class="footer"><small>
|
||||
$generatedby <a href="https://www.doxygen.org/index.html"><img class="footer" src="$relpath^doxygen.svg" width="104" height="31" alt="doxygen"/></a> $doxygenversion
|
||||
</small></address>
|
||||
<!--END !GENERATE_TREEVIEW-->
|
||||
</div> <!-- #page-wrapper -->
|
||||
</body>
|
||||
</html>
|
||||
61
doc/header.html
Normal file
61
doc/header.html
Normal file
@@ -0,0 +1,61 @@
|
||||
<!-- HTML header for doxygen 1.9.1-->
|
||||
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "https://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
|
||||
<html xmlns="http://www.w3.org/1999/xhtml">
|
||||
<head>
|
||||
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
|
||||
<meta http-equiv="X-UA-Compatible" content="IE=9"/>
|
||||
<meta name="generator" content="Doxygen $doxygenversion"/>
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1"/>
|
||||
<!--BEGIN PROJECT_NAME--><title>$projectname: $title</title><!--END PROJECT_NAME-->
|
||||
<!--BEGIN !PROJECT_NAME--><title>$title</title><!--END !PROJECT_NAME-->
|
||||
<link href="$relpath^tabs.css" rel="stylesheet" type="text/css"/>
|
||||
<script type="text/javascript" src="$relpath^jquery.js"></script>
|
||||
<script type="text/javascript" src="$relpath^dynsections.js"></script>
|
||||
$treeview
|
||||
$search
|
||||
$mathjax
|
||||
<link href="$relpath^$stylesheet" rel="stylesheet" type="text/css" />
|
||||
$extrastylesheet
|
||||
</head>
|
||||
<body>
|
||||
<div id="page-wrapper">
|
||||
<div id="content-wrapper">
|
||||
<div id="sidebar-wrapper">
|
||||
<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
|
||||
|
||||
<!--BEGIN TITLEAREA-->
|
||||
<div id="titlearea">
|
||||
<table cellspacing="0" cellpadding="0">
|
||||
<tbody>
|
||||
<tr style="height: 56px;">
|
||||
<!--BEGIN PROJECT_LOGO-->
|
||||
<td id="projectlogo"><img alt="Logo" src="$relpath^$projectlogo"/></td>
|
||||
<!--END PROJECT_LOGO-->
|
||||
<!--BEGIN PROJECT_NAME-->
|
||||
<td id="projectalign" style="padding-left: 0.5em;">
|
||||
<div id="projectname">$projectname
|
||||
<!--BEGIN PROJECT_NUMBER--> <span id="projectnumber">$projectnumber</span><!--END PROJECT_NUMBER-->
|
||||
</div>
|
||||
<!--BEGIN PROJECT_BRIEF--><div id="projectbrief">$projectbrief</div><!--END PROJECT_BRIEF-->
|
||||
</td>
|
||||
<!--END PROJECT_NAME-->
|
||||
<!--BEGIN !PROJECT_NAME-->
|
||||
<!--BEGIN PROJECT_BRIEF-->
|
||||
<td style="padding-left: 0.5em;">
|
||||
<div id="projectbrief">$projectbrief</div>
|
||||
</td>
|
||||
<!--END PROJECT_BRIEF-->
|
||||
<!--END !PROJECT_NAME-->
|
||||
<!--BEGIN DISABLE_INDEX-->
|
||||
<!--END DISABLE_INDEX-->
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
<!--BEGIN SEARCHENGINE-->
|
||||
<div id="search-box-wrapper">
|
||||
$searchbox
|
||||
</div>
|
||||
<!--END SEARCHENGINE-->
|
||||
<!--END TITLEAREA-->
|
||||
<!-- end header part -->
|
||||
671
doc/on-the-costs-of-async-abstractions.md
Normal file
671
doc/on-the-costs-of-async-abstractions.md
Normal file
@@ -0,0 +1,671 @@
|
||||
# On the costs of asynchronous abstractions
|
||||
|
||||
The biggest force behind the evolution of
|
||||
[Boost.Redis](https://github.com/boostorg/redis) was my struggling in
|
||||
coming up with a high-level connection abstraction that was capable of
|
||||
multiplexing Redis commands from independent sources while
|
||||
concurrently handling server pushes. This journey taught me many
|
||||
important lessons, many of which are related to the design and
|
||||
performance of asynchronous programs based on Boost.Asio.
|
||||
|
||||
In this article I will share some of the lessons learned, specially
|
||||
those related to the performance costs of _abstractions_ such as
|
||||
`async_read_until` that tend to overschedule into the event-loop. In
|
||||
this context I will also briefly comment on how the topics discussed
|
||||
here influenced my views on the proposed
|
||||
[P2300](https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2023/p2300r7.html)
|
||||
(a.k.a. Senders and Receivers), which is likely to become the basis of
|
||||
networking in upcoming C++ standards.
|
||||
|
||||
Although the analysis presented in this article uses the Redis communication
|
||||
protocol for illustration I expect it to be useful in general since
|
||||
[RESP3](https://github.com/antirez/RESP3/blob/master/spec.md) shares
|
||||
many similarities with other widely used protocols such as HTTP.
|
||||
|
||||
## Parsing `\r\n`-delimited messages
|
||||
|
||||
The Redis server communicates with its clients by exchanging data
|
||||
serialized in
|
||||
[RESP3](https://github.com/antirez/RESP3/blob/master/spec.md) format.
|
||||
Among the data types supported by this specification, the
|
||||
`\r\n`-delimited messages are some of the most frequent in a typical
|
||||
session. The table below shows some examples
|
||||
|
||||
Command | Response | Wire format | RESP3 name
|
||||
---------|----------|---------------|---------------------
|
||||
PING | PONG | `+PONG\r\n` | simple-string
|
||||
INCR | 42 | `:42\r\n` | number
|
||||
GET | null | `_\r\n` | null
|
||||
|
||||
Redis also supports command pipelines, which provide a way of
|
||||
optimizing round-trip times by batching commands. A pipeline composed
|
||||
by the commands shown in the previous table look like this
|
||||
|
||||
```
|
||||
| Sent in a |
|
||||
| single write |
|
||||
+--------+ | | +-------+
|
||||
| | --------> PING + INCR + GET --------> | |
|
||||
| | | |
|
||||
| Client | | Redis |
|
||||
| | | |
|
||||
| | <-------- "+PONG\r\n:42\r\n_\r\n" <-------- | |
|
||||
+--------+ |<------>|<---->|<-->| +-------+
|
||||
| |
|
||||
| Responses |
|
||||
```
|
||||
|
||||
Messages that use delimiters are so common in networking that a
|
||||
facility called `async_read_until` for reading them incrementally from
|
||||
a socket is already part of Boost.Asio. The coroutine below uses it to
|
||||
print message contents to the screen
|
||||
|
||||
```cpp
|
||||
awaitable<void> parse_resp3_simple_msgs(tcp::socket socket)
|
||||
{
|
||||
for (std::string buffer;;) {
|
||||
auto n = co_await async_read_until(socket, dynamic_buffer(buffer), "\r\n");
|
||||
|
||||
std::cout << buffer.substr(1, n - 3) << std::endl;
|
||||
|
||||
// Consume the buffer.
|
||||
buffer.erase(0, n);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
If we pay attention to the buffer content as it is parsed by the code
|
||||
above we can see it is rotated fairly often, for example
|
||||
|
||||
```
|
||||
"+PONG\r\n:100\r\n+OK\r\n_\r\n"
|
||||
":100\r\n+OK\r\n_\r\n"
|
||||
"+OK\r\n_\r\n"
|
||||
"_\r\n"
|
||||
""
|
||||
```
|
||||
|
||||
When I first realized these, apparently excessive, buffer rotations I
|
||||
was concerned they would impact the performance of Boost.Redis in a
|
||||
severe way. To measure the magnitude of this impact I came up with an
|
||||
experimental implementation of Asio's `dynamic_buffer` that consumed
|
||||
the buffer less eagerly than the `std::string::erase` function used
|
||||
above. For that, the implementation increased a buffer offset up
|
||||
to a certain threshold and only then triggered a (larger) rotation.
|
||||
This is illustrated in the diagram below
|
||||
|
||||
```
|
||||
|<---- offset threshold ---->|
|
||||
| |
|
||||
"+PONG\r\n:100\r\n+OK\r\n_\r\n+PONG\r\n"
|
||||
| # Initial offset
|
||||
|
||||
"+PONG\r\n:100\r\n+OK\r\n_\r\n+PONG\r\n"
|
||||
|<------>| # After 1st message
|
||||
|
||||
"+PONG\r\n:100\r\n+OK\r\n_\r\n+PONG\r\n"
|
||||
|<-------------->| # After 2nd message
|
||||
|
||||
"+PONG\r\n:100\r\n+OK\r\n_\r\n+PONG\r\n"
|
||||
|<--------------------->| # After 3rd message
|
||||
|
||||
"+PONG\r\n:100\r\n+OK\r\n_\r\n+PONG\r\n"
|
||||
|<-------------------------->| # Threshold crossed after the 4th message
|
||||
|
||||
"+PONG\r\n"
|
||||
| # After rotation
|
||||
```
|
||||
|
||||
After comparing the performance differences between the two versions I
|
||||
was surprised there wasn't any! But that was also very suspicious
|
||||
since some RESP3 aggregate types contain a considerable number of
|
||||
separators. For example, a map with two pairs `[(key1, value1),
|
||||
(key2, value2)]` encoded in RESP3 requires ten rotations in total
|
||||
|
||||
```
|
||||
"%2\r\n$4\r\nkey1\r\n$6\r\nvalue1\r\n$4\r\nkey2\r\n$6\r\nvalue2\r\n"
|
||||
"$4\r\nkey1\r\n$6\r\nvalue1\r\n$4\r\nkey2\r\n$6\r\nvalue2\r\n"
|
||||
"key1\r\n$6\r\nvalue1\r\n$4\r\nkey2\r\n$6\r\nvalue2\r\n"
|
||||
"$6\r\nvalue1\r\n$4\r\nkey2\r\n$6\r\nvalue2\r\n"
|
||||
...
|
||||
```
|
||||
|
||||
It was evident something more costly was shadowing the buffer
|
||||
rotations. But it couldn't be the search for the separator since it
|
||||
performs equivalently to rotations. It is also easy to show that the
|
||||
overhead is not related to any IO operation since the problem persists
|
||||
if the buffer is never consumed (which causes the function to be
|
||||
called with the same string repeatedly). Once these two factors
|
||||
are removed from the table, we are driven into the conclusion that
|
||||
calling `async_read_until` has an intrinsic cost, let us see what
|
||||
that is.
|
||||
|
||||
### Async operations that complete synchronously considered harmful
|
||||
|
||||
Assume the scenario described earlier where `async_read_until` is used
|
||||
to parse multiple `\r\n`-delimited messages. The following is a
|
||||
detailed description of what happens behind the scenes
|
||||
|
||||
1. `async_read_until` calls `socket.async_read_some` repeatedly
|
||||
until the separator `\r\n` shows up in the buffer
|
||||
|
||||
```
|
||||
"<read1>" # Read 1: needs more data.
|
||||
"<read1><read2>" # Read 2: needs more data.
|
||||
"<read1><read2>" # Read 3: needs more data.
|
||||
"<read1><read2><read3>" # Read 4: needs more data.
|
||||
"<read1><read2><read3>\r\n<bonus bytes>" # separator found, done.
|
||||
```
|
||||
|
||||
2. The last call to `socket.async_read_some` happens to read past
|
||||
the separator `\r\n` (depicted as `<bonus bytes>` above),
|
||||
resulting in bonus (maybe incomplete) messages in the buffer
|
||||
|
||||
```
|
||||
| 1st async_read_some | 2nd async_read_some |
|
||||
| | |
|
||||
"+message content here \r\n:100\r\n+OK\r\n_\r\n+incomplete respo"
|
||||
| | | |
|
||||
| Message wanted |<-- bonus msgs --->|<--incomplete-->|
|
||||
| | msg |
|
||||
| | |
|
||||
| |<---------- bonus bytes ----------->|
|
||||
```
|
||||
|
||||
3. The buffer is consumed and `async_read_until` is called again.
|
||||
However, since the buffer already contains the next message this
|
||||
is an IO-less call
|
||||
|
||||
```
|
||||
":100\r\n+OK\r\n_\r\n+not enough byt"
|
||||
| | |
|
||||
| No IO required | Need more |
|
||||
| to parse these | data |
|
||||
| messages. | |
|
||||
```
|
||||
|
||||
The fact that step 3. doesn't perform any IO implies the operation can
|
||||
complete synchronously, but because this is an asynchronous function
|
||||
Boost.Asio by default won't call the continuation before the
|
||||
function returns. The implementation must therefore enqueue it for
|
||||
execution, as depicted below
|
||||
|
||||
```
|
||||
OP5 ---> OP4 ---> OP3 ---> OP2 ---> OP1 # Reschedules the continuation
|
||||
|
|
||||
OP1 schedules its continuation |
|
||||
+-----------------------------------+
|
||||
|
|
||||
|
|
||||
OP6 ---> OP5 ---> OP4 ---> OP3 ---> OP2 # Reschedules the continuation
|
||||
|
|
||||
OP2 schedules its continuation |
|
||||
+-----------------------------------+
|
||||
|
|
||||
|
|
||||
OP7 ---> OP6 ---> OP5 ---> OP4 ---> OP3
|
||||
```
|
||||
|
||||
When summed up, the excessive rescheduling of continuations lead to
|
||||
performance degradation at scale. But since this is an event-loop
|
||||
there is no way around rescheduling as doing otherwise would mean
|
||||
allowing a task to monopolize the event-loop, preventing other tasks
|
||||
from making progress. The best that can be done is to avoid
|
||||
_overscheduling_, so let us determine how much rescheduling is too
|
||||
much.
|
||||
|
||||
## The intrinsic latency of an event-loop
|
||||
|
||||
An event-loop is a design pattern originally used to handle events
|
||||
external to the application, such as GUIs, networking and other forms
|
||||
of IO. If we take this literally, it becomes evident that the way
|
||||
`async_read_until` works is incompatible with an event-loop since
|
||||
_searching for the separator_ is not an external event and as such
|
||||
should not have to be enqueued for execution.
|
||||
|
||||
Once we constrain ourselves to events that have an external origin,
|
||||
such as anything related to IO and including any form of IPC, the
|
||||
scheduling overhead is reduced considerably since the latency
|
||||
of the transport layer eclipses whatever time it takes to schedule the
|
||||
continuation, for example, according to
|
||||
[these](https://www.boost.org/doc/libs/develop/libs/cobalt/doc/html/index.html#posting_to_an_executor)
|
||||
benchmarks, the time it takes to schedule a task in the
|
||||
`asio::io_context ` is approximately `50ns`.
|
||||
|
||||
To give the reader an idea about the magnitude of this number, if
|
||||
rescheduling alone were to account for 1% of the runtime of an app
|
||||
that uses asynchronous IO to move around data in chunks of size 128kb,
|
||||
then this app would have a throughput of approximately 24Gbs. At such
|
||||
high throughput multiple other factors kick in before any scheduling
|
||||
overhead even starts to manifest.
|
||||
|
||||
It is therefore safe to say that only asynchronous operations that
|
||||
don't perform or are not bound to any IO are ever likely to
|
||||
overschedule in the sense described above. Those cases can be usually
|
||||
avoided, this is what worked for Boost.Redis
|
||||
|
||||
1. `async_read_until` was replaced with calls to
|
||||
`socket.async_read_some` and an incremental parser that does not
|
||||
do any IO.
|
||||
|
||||
2. Channel `try_` functions are used to check if send and receive
|
||||
operations can be called without suspension. For example,
|
||||
`try_send` before `async_send` and `try_receive` before
|
||||
`async_receive` ([see also](https://github.com/chriskohlhoff/asio/commit/fe4fd7acf145335eeefdd19708483c46caeb45e5)
|
||||
`try_send_via_dispatch` for a more aggressive optimization).
|
||||
|
||||
3. Coalescing of individual requests into a single payload to reduce
|
||||
the number of necessary writes on the socket, this is only
|
||||
possible because Redis supports pipelining (good protocols
|
||||
help!).
|
||||
|
||||
4. Increased the socket read sizes to 4kb to reduce the number of
|
||||
reads (which is outweighed by the costs of rotating data in the
|
||||
buffer).
|
||||
|
||||
5. Dropped the `resp3::async_read` abstraction. When I started
|
||||
developing Boost.Redis there was convincing precedent for having
|
||||
a `resp3::async_read` function to read complete RESP3 messages
|
||||
from a socket
|
||||
|
||||
Name | Description
|
||||
---------------------------------------|-------------------
|
||||
`asio::ip::tcp::async_read` | Reads `n` bytes from a stream.
|
||||
`beast::http::async_read` | Reads a complete HTTP message.
|
||||
`beast::websocket::stream::async_read` | Reads a complete Websocket message.
|
||||
`redis::async_read` | Reads a complete RESP3 message.
|
||||
|
||||
It turns out however that this function is also vulnerable to
|
||||
immediate completions since in command pipelines multiple
|
||||
responses show up in the buffer after a call to
|
||||
`socket.async_read_some`. When that happens each call to
|
||||
`resp3::async_read` is IO-less.
|
||||
|
||||
Sometimes it is not possible to avoid asynchronous operations that
|
||||
complete synchronously, in the following sections we will see how to
|
||||
favor throughput over fairness in Boost.Asio.
|
||||
|
||||
### Calling the continuation inline
|
||||
|
||||
In Boost.Asio it is possible to customize how an algorithm executes
|
||||
the continuation when an immediate completion occurs, this includes
|
||||
the ability of calling it inline, thereby avoiding the costs of
|
||||
excessive rescheduling. Here is how it works
|
||||
|
||||
```cpp
|
||||
// (default) The continuation is enqueued for execution, regardless of
|
||||
// whether it is immediate or not.
|
||||
async_read_until(socket, buffer, "\r\n", continuation);
|
||||
|
||||
// Immediate completions are executed in exec2 (otherwise equal to the
|
||||
// version above). The completion is called inline if exec2 is the
|
||||
// same executor that is running the operation.
|
||||
async_read_until(socket, buffer, "\r\n", bind_immediate_executor(exec2, completion));
|
||||
```
|
||||
|
||||
To compare the performance of both cases I have written a small
|
||||
function that calls `async_read_until` in a loop with a buffer that is
|
||||
never consumed so that all completions are immediate. The version
|
||||
below uses the default behaviour
|
||||
|
||||
```cpp
|
||||
void read_safe(tcp::socket& s, std::string& buffer)
|
||||
{
|
||||
auto continuation = [&s, &buffer](auto ec, auto n)
|
||||
{
|
||||
read_safe(s, buffer); // Recursive call
|
||||
};
|
||||
|
||||
// This won't cause stack exhaustion because the continuation is
|
||||
// not called inline but posted in the event loop.
|
||||
async_read_until(s, dynamic_buffer(buffer), "\r\n", continuation);
|
||||
}
|
||||
```
|
||||
|
||||
To optimize away some of the rescheduling the version below uses the
|
||||
`bind_immediate_executor` customization to call the continuation
|
||||
reentrantly and then breaks the stack from time to time to avoid
|
||||
exhausting it
|
||||
|
||||
```cpp
|
||||
void read_reentrant(tcp::socket& s, std::string& buffer)
|
||||
{
|
||||
auto cont = [&](auto, auto)
|
||||
{
|
||||
read_reentrant(s, buffer); // Recursive call
|
||||
};
|
||||
|
||||
// Breaks the callstack after 16 inline calls.
|
||||
if (counter % 16 == 0) {
|
||||
post(s.get_executor(), [cont](){cont({}, 0);});
|
||||
return;
|
||||
}
|
||||
|
||||
// Continuation called reentrantly.
|
||||
async_read_until(s, dynamic_buffer(buffer), "\r\n",
|
||||
bind_immediate_executor(s.get_executor(), cont));
|
||||
}
|
||||
```
|
||||
|
||||
The diagram below shows what the reentrant chain of calls in the code
|
||||
above look like from the event-loop point of view
|
||||
|
||||
```
|
||||
OP5 ---> OP4 ---> OP3 ---> OP2 ---> OP1a # Completes immediately
|
||||
|
|
||||
|
|
||||
... |
|
||||
OP1b # Completes immediately
|
||||
|
|
||||
Waiting for OP5 to |
|
||||
reschedule its |
|
||||
continuation OP1c # Completes immediately
|
||||
|
|
||||
|
|
||||
... |
|
||||
OP1d # Break the call-stack
|
||||
|
|
||||
+-----------------------------------+
|
||||
|
|
||||
OP6 ---> OP5 ---> OP4 ---> OP3 ---> OP2
|
||||
```
|
||||
|
||||
Unsurprisingly, the reentrant code is 3x faster than the one that
|
||||
relies on the default behaviour (don't forget that this is a best case
|
||||
scenario, in the general case not all completions are immediate).
|
||||
Although faster, this strategy has some downsides
|
||||
|
||||
- The overall operation is not as fast as possible since it still
|
||||
has to reschedule from time to time to break the call stack. The
|
||||
less it reschedules the higher the risk of exhausting it.
|
||||
|
||||
- It is too easy to forget to break the stack. For example, the
|
||||
programmer might decide to branch somewhere into another chain of
|
||||
asynchronous calls that also use this strategy. To avoid
|
||||
exhaustion all such branches would have to be safeguarded with a
|
||||
manual rescheduling i.e. `post`.
|
||||
|
||||
- Requires additional layers of complexity such as
|
||||
`bind_immediate_executor` in addition to `bind_executor`.
|
||||
|
||||
- Non-compliat with more strict
|
||||
[guidelines](https://en.wikipedia.org/wiki/The_Power_of_10:_Rules_for_Developing_Safety-Critical_Code)
|
||||
that prohibits reentrat code.
|
||||
|
||||
- There is no simple way of choosing the maximum allowed number of
|
||||
reentrant calls for each function in a way that covers different
|
||||
use cases and users. Library writers and users would be tempted
|
||||
into using a small value reducing the performance advantage.
|
||||
|
||||
- If the socket is always ready for reading the task will
|
||||
monopolize IO for up to `16` interactions which might cause
|
||||
stutter in unrelated tasks as depicted below
|
||||
|
||||
```
|
||||
Unfairness
|
||||
|
||||
+----+----+----+ +----+----+----+ +----+----+----+
|
||||
Socket-1 | | | | | | | | | | | |
|
||||
+----+----+----+----+----+----+----+----+----+----+----+----+
|
||||
Socket-2 | | | | | |
|
||||
+----+ +----+ +----+
|
||||
```
|
||||
|
||||
From the aesthetic point of view the code above is also unpleasant as
|
||||
it breaks the function asynchronous contract by injecting a reentrant
|
||||
behaviour. It gives me the same kind of feeling I have about
|
||||
[recursive
|
||||
mutexes](http://www.zaval.org/resources/library/butenhof1.html).
|
||||
|
||||
Note: It is worth mentioning here that a similar
|
||||
[strategy](https://github.com/NVIDIA/stdexec/blob/6f23dd5b1d523541ce28af32fc2603403ebd36ed/include/exec/trampoline_scheduler.hpp#L52)
|
||||
is used to break the call stack of repeating algorithms in
|
||||
[stdexec](https://github.com/NVIDIA/stdexec), but in this time
|
||||
based on
|
||||
[P2300](https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2023/p2300r7.html)
|
||||
and not on Boost.Asio.
|
||||
|
||||
### Coroutine tail-calls
|
||||
|
||||
In the previous section we have seen how to avoid overscheduling by
|
||||
instructing the asynchronous operation to call the completion inline
|
||||
on immediate completion. It turns out however that coroutine support
|
||||
for _tail-calls_ provide a way to completely sidestep this problem.
|
||||
This feature is described by
|
||||
[Lewis Baker](https://lewissbaker.github.io/2020/05/11/understanding_symmetric_transfer)
|
||||
as follows
|
||||
|
||||
> A tail-call is one where the current stack-frame is popped before
|
||||
> the call and the current function’s return address becomes the
|
||||
> return-address for the callee. ie. the callee will return directly
|
||||
> the the [sic] caller of this function.
|
||||
|
||||
This means (at least in principle) that a library capable of using
|
||||
tail-calls when an immediate completion occurs neither has to
|
||||
reschedule the continuation nor call it inline. To test how this
|
||||
feature compares to the other styles I have used Boost.Cobalt. The
|
||||
code looks as follows
|
||||
|
||||
```cpp
|
||||
// Warning: risks unfairness and starvation of other tasks.
|
||||
task<void> read_until_unfair()
|
||||
{
|
||||
for (int i = 0; i != repeat; ++i) {
|
||||
co_await async_read_until(s, dynamic_buffer(buffer), "\r\n", cobalt::use_op);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The result of this comparison as listed in the table below
|
||||
|
||||
Time/s | Style | Configuration | Library
|
||||
-------|-----------|-----------------------------|-------------
|
||||
1,0 | Coroutine | `await_ready` optimization | Boost.Cobalt
|
||||
4.8 | Callback | Reentant | Boost.Asio
|
||||
10.3 | Coroutine | `use_op` | Boost.Cobalt
|
||||
14.9 | Callback | Regular | Boost.Asio
|
||||
15.6 | Coroutine | `asio::deferred` | Boost.Asio
|
||||
|
||||
As the reader can see, `cobalt::use_op` ranks 3rd and is considerably
|
||||
faster (10.3 vs 15.6) than the Asio equivalent that uses
|
||||
default-rescheduling. However, by trading rescheduling with tail-calls
|
||||
the code above can now monopolize the event-loop, resulting in
|
||||
unfairness if the socket happens to receive data at a higher rate
|
||||
than other tasks. If by chance data is received continuously
|
||||
on a socket that is always ready for reading, other tasks will starve
|
||||
|
||||
```
|
||||
Starvation
|
||||
|
||||
+----+----+----+----+----+----+----+----+----+----+----+----+
|
||||
Socket-1 | | | | | | | | | | | | |
|
||||
+----+----+----+----+----+----+----+----+----+----+----+----+
|
||||
|
||||
Socket-2 Starving ...
|
||||
|
||||
```
|
||||
|
||||
To avoid this problem the programmer is forced to reschedule from time
|
||||
to time, in the same way we did for the reentrant calls
|
||||
|
||||
```cpp
|
||||
task<void> read_until_fair()
|
||||
{
|
||||
for (int i = 0; i != repeat; ++i) {
|
||||
if (repeat % 16 == 0) {
|
||||
// Reschedules to address unfairness and starvation of
|
||||
// other tasks.
|
||||
co_await post(cobalt::use_op);
|
||||
continue;
|
||||
}
|
||||
|
||||
co_await async_read_until(s, dynamic_buffer(buffer), "\r\n", cobalt::use_op);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Delegating fairness-safety to applications is a dangerous game.
|
||||
This is a
|
||||
[problem](https://tokio.rs/blog/2020-04-preemption) the Tokio
|
||||
community had to deal with before Tokio runtime started enforcing
|
||||
rescheduling (after 256 successful operations)
|
||||
|
||||
> If data is received faster than it can be processed, it is possible
|
||||
> that more data will have already been received by the time the
|
||||
> processing of a data chunk completes. In this case, .await will
|
||||
> never yield control back to the scheduler, other tasks will not be
|
||||
> scheduled, resulting in starvation and large latency variance.
|
||||
|
||||
> Currently, the answer to this problem is that the user of Tokio is
|
||||
> responsible for adding yield points in both the application and
|
||||
> libraries. In practice, very few actually do this and end up being
|
||||
> vulnerable to this sort of problem.
|
||||
|
||||
### Safety in P2300 (Senders and Receivers)
|
||||
|
||||
As of this writing, the C++ standards committee (WG21) has been
|
||||
pursuing the standardization of a networking library for almost 20
|
||||
years. One of the biggest obstacles that prevented it from happening
|
||||
was a disagreement on what the _asynchronous model_ that underlies
|
||||
networking should look like. Until 2021 that model was basically
|
||||
Boost.Asio _executors_, but in this
|
||||
[poll](https://www.reddit.com/r/cpp/comments/q6tgod/c_committee_polling_results_for_asynchronous/)
|
||||
the committee decided to abandon that front and concentrate efforts on
|
||||
the new [P2300](https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2023/p2300r7.html)
|
||||
proposal, also known as _senders and receivers_. The decision was
|
||||
quite [abrupt](https://isocpp.org/files/papers/P2464R0.html)
|
||||
|
||||
> The original plan about a week earlier than the actual writing of
|
||||
> this paper was to write a paper that makes a case for standardizing
|
||||
> the Networking TS.
|
||||
|
||||
and opinions turned out to be very strong against Boost.Asio (see
|
||||
[this](https://api.csswg.org/bikeshed/?force=1&url=https://raw.githubusercontent.com/brycelelbach/wg21_p2459_2022_january_library_evolution_poll_outcomes/main/2022_january_library_evolution_poll_outcomes.bs)
|
||||
for how each voter backed their vote)
|
||||
|
||||
> The whole concept is completely useless, there's no composed code
|
||||
> you can write with it.
|
||||
|
||||
The part of that debate that interests us most here is stated in
|
||||
[P2471](https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2021/p2471r1.pdf),
|
||||
that compares Boost.Asio with P2300
|
||||
|
||||
> Yes, default rescheduling each operation and default not
|
||||
> rescheduling each operation, is a poor trade off. IMO both options
|
||||
> are poor. The one good option that I know of that can prevent stack
|
||||
> exhaustion is first-class tail-recursion in library or language
|
||||
|
||||
> ASIO has chosen to require that every async operation must schedule
|
||||
> the completion on a scheduler (every read, every write, etc..).
|
||||
|
||||
> sender/receiver has not decided to
|
||||
> require that the completion be scheduled.
|
||||
|
||||
> This is why I consider tail-call the only good solution. Scheduling
|
||||
> solutions are all inferior (give thanks to Lewis for this shift in
|
||||
> my understanding :) ).
|
||||
|
||||
Although tail-calls solve the problem of stack-exhaustion as we have
|
||||
seen above, it makes the code vulnerable to unfairness and starvation
|
||||
and therefore it is not an alternative to default-rescheduling as the
|
||||
quotation above is implying. To deal with the lack of
|
||||
default-rescheduling, libraries and applications built on top of P2300
|
||||
have to address the aforementioned problems, layer after layer. For
|
||||
example,
|
||||
[stdexec](https://github.com/NVIDIA/stdexec) has invented something
|
||||
called
|
||||
_[trampoline-scheduler](https://github.com/NVIDIA/stdexec/blob/e7cd275273525dbc693f4bf5f6dc4d4181b639e4/include/exec/trampoline_scheduler.hpp)_
|
||||
to protect repeating algorithms such as `repeat_effect_until` from
|
||||
exhausting the stack. This construct however is built around
|
||||
reentracy, allowing
|
||||
[sixteen](https://github.com/NVIDIA/stdexec/blob/83cdb92d316e8b3bca1357e2cf49fc39e9bed403/include/exec/trampoline_scheduler.hpp#L52)
|
||||
levels of inline calls by default. While in Boost.Asio it is possible to use
|
||||
reentracy as an optimization for a corner cases, here it is made its
|
||||
_modus operandi_, the downsides of this approach have already been stated in a
|
||||
previous section so I won't repeat it here.
|
||||
|
||||
Also the fact that a special scheduler is needed by specific
|
||||
algorithms is a problem on its own since it contradicts one of the
|
||||
main selling points of P2300 which is that of being _generic_. For
|
||||
example, [P2464R0](https://isocpp.org/files/papers/P2464R0.html) uses
|
||||
the code below as an example
|
||||
|
||||
```cpp
|
||||
void
|
||||
run_that_io_operation(
|
||||
scheduler auto sched,
|
||||
sender_of<network_buffer> auto wrapping_continuation)
|
||||
{
|
||||
// snip
|
||||
}
|
||||
```
|
||||
|
||||
and states
|
||||
|
||||
> I have no idea what the sched's concrete type is. I have no idea
|
||||
> what the wrapping_continuation's concrete type is. They're none of
|
||||
> my business, ...
|
||||
|
||||
Hence, by being generic, the algorithms built on top of P2300 are also
|
||||
unsafe (against stack-exhaustion, unfairness and starvation). Otherwise,
|
||||
if library writers require a specific scheduler to ensure safety, then
|
||||
the algorithms become automatically non-generic, pick your poison!
|
||||
|
||||
The proposers of P2300 claim that it doesn't address safety because it
|
||||
should be seen as the low-level building blocks of asynchronous
|
||||
programming and that its the role of higher-level libraries, to deal
|
||||
with that. This claim however does not hold since, as we have just
|
||||
seen, Boost.Asio also provides those building blocks but does so in a
|
||||
safe way. In fact during the whole development of Boost.Redis I never
|
||||
had to think about these kinds of problems because safety is built
|
||||
from the ground up.
|
||||
|
||||
### Avoiding coroutine suspension with `await_ready`
|
||||
|
||||
Now let us get back to the first place in the table above, which uses
|
||||
the `await_ready` optimization from Boost.Cobalt. This API provides
|
||||
users with the ability to avoid coroutine suspension altogether in
|
||||
case the separator is already present in the buffer. It works by
|
||||
defining a `struct` with the following interface
|
||||
|
||||
```cpp
|
||||
struct read_until : cobalt::op<error_code, std::size_t> {
|
||||
...
|
||||
|
||||
void ready(cobalt::handler<error_code, std::size_t> handler) override
|
||||
{
|
||||
// Search for the separator in buffer and call the handler if found
|
||||
}
|
||||
|
||||
void initiate(cobalt::completion_handler<error_code, std::size_t> complete) override
|
||||
{
|
||||
// Regular call to async_read_until.
|
||||
async_read_until(socket, buffer, delim, std::move(complete));
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
and the code that uses it
|
||||
|
||||
```cpp
|
||||
for (int i = 0; i != repeat; ++i) {
|
||||
co_await read_until(socket, dynamic_buffer(buffer));
|
||||
}
|
||||
```
|
||||
|
||||
In essence, what the code above does is to skip a call to
|
||||
`async_read_unil` by first checking with the ready function whether
|
||||
the forthcoming operation is going to complete immediately. The
|
||||
nice thing about it is that the programmer can use this optimization
|
||||
only when a performance bottleneck is detected, without planing for it
|
||||
in advance. The drawback however is that it requires reimplementing
|
||||
the search for the separator in the body of the `ready` function,
|
||||
defeating the purpose of using `async_read_until` in first place as
|
||||
(again) it would have been simpler to reformulate the operation in
|
||||
terms of `socket.async_read_some` directly.
|
||||
|
||||
## Acknowledgements
|
||||
|
||||
Thanks to Klemens Morgenstern for answering questions about
|
||||
Boost.Cobalt.
|
||||
|
||||
@@ -10,6 +10,9 @@ macro(make_example EXAMPLE_NAME STANDARD)
|
||||
if (${STANDARD} STREQUAL "20")
|
||||
target_link_libraries(${EXAMPLE_NAME} PRIVATE examples_main)
|
||||
endif()
|
||||
if (${EXAMPLE_NAME} STREQUAL "cpp20_json")
|
||||
target_link_libraries(${EXAMPLE_NAME} PRIVATE Boost::json Boost::container_hash)
|
||||
endif()
|
||||
endmacro()
|
||||
|
||||
macro(make_testable_example EXAMPLE_NAME STANDARD)
|
||||
@@ -46,4 +49,4 @@ endif()
|
||||
|
||||
if (NOT MSVC)
|
||||
make_example(cpp20_chat_room 20)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
@@ -15,13 +15,11 @@
|
||||
|
||||
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
|
||||
|
||||
#define BOOST_JSON_NO_LIB
|
||||
#define BOOST_CONTAINER_NO_LIB
|
||||
#include <boost/json/serialize.hpp>
|
||||
#include <boost/json/parse.hpp>
|
||||
#include <boost/json/value_from.hpp>
|
||||
#include <boost/json/value_to.hpp>
|
||||
#include <boost/redis/resp3/serialization.hpp>
|
||||
#include <boost/json/src.hpp>
|
||||
|
||||
namespace asio = boost::asio;
|
||||
using namespace boost::describe;
|
||||
|
||||
@@ -38,7 +38,7 @@ struct config {
|
||||
* [HELLO](https://redis.io/commands/hello/) command. If left
|
||||
* empty `HELLO` will be sent without authentication parameters.
|
||||
*/
|
||||
std::string username;
|
||||
std::string username = "default";
|
||||
|
||||
/** @brief Password passed to the
|
||||
* [HELLO](https://redis.io/commands/hello/) command. If left
|
||||
|
||||
@@ -86,13 +86,19 @@ public:
|
||||
using other = basic_connection<Executor1>;
|
||||
};
|
||||
|
||||
/// Contructs from an executor.
|
||||
/** @brief Constructor
|
||||
*
|
||||
* @param ex Executor on which connection operation will run.
|
||||
* @param ctx SSL context.
|
||||
* @param max_read_size Maximum read size that is passed to
|
||||
* the internal `asio::dynamic_buffer` constructor.
|
||||
*/
|
||||
explicit
|
||||
basic_connection(
|
||||
executor_type ex,
|
||||
asio::ssl::context::method method = asio::ssl::context::tls_client,
|
||||
asio::ssl::context ctx = asio::ssl::context{asio::ssl::context::tlsv12_client},
|
||||
std::size_t max_read_size = (std::numeric_limits<std::size_t>::max)())
|
||||
: impl_{ex, method, max_read_size}
|
||||
: impl_{ex, std::move(ctx), max_read_size}
|
||||
, timer_{ex}
|
||||
{ }
|
||||
|
||||
@@ -100,9 +106,9 @@ public:
|
||||
explicit
|
||||
basic_connection(
|
||||
asio::io_context& ioc,
|
||||
asio::ssl::context::method method = asio::ssl::context::tls_client,
|
||||
asio::ssl::context ctx = asio::ssl::context{asio::ssl::context::tlsv12_client},
|
||||
std::size_t max_read_size = (std::numeric_limits<std::size_t>::max)())
|
||||
: basic_connection(ioc.get_executor(), method, max_read_size)
|
||||
: basic_connection(ioc.get_executor(), std::move(ctx), max_read_size)
|
||||
{ }
|
||||
|
||||
/** @brief Starts underlying connection operations.
|
||||
@@ -286,10 +292,6 @@ public:
|
||||
auto const& get_ssl_context() const noexcept
|
||||
{ return impl_.get_ssl_context();}
|
||||
|
||||
/// Returns the ssl context.
|
||||
auto& get_ssl_context() noexcept
|
||||
{ return impl_.get_ssl_context();}
|
||||
|
||||
/// Resets the underlying stream.
|
||||
void reset_stream()
|
||||
{ impl_.reset_stream(); }
|
||||
@@ -343,14 +345,14 @@ public:
|
||||
explicit
|
||||
connection(
|
||||
executor_type ex,
|
||||
asio::ssl::context::method method = asio::ssl::context::tls_client,
|
||||
asio::ssl::context ctx = asio::ssl::context{asio::ssl::context::tlsv12_client},
|
||||
std::size_t max_read_size = (std::numeric_limits<std::size_t>::max)());
|
||||
|
||||
/// Contructs from a context.
|
||||
explicit
|
||||
connection(
|
||||
asio::io_context& ioc,
|
||||
asio::ssl::context::method method = asio::ssl::context::tls_client,
|
||||
asio::ssl::context ctx = asio::ssl::context{asio::ssl::context::tlsv12_client},
|
||||
std::size_t max_read_size = (std::numeric_limits<std::size_t>::max)());
|
||||
|
||||
/// Returns the underlying executor.
|
||||
@@ -423,6 +425,10 @@ public:
|
||||
usage get_usage() const noexcept
|
||||
{ return impl_.get_usage(); }
|
||||
|
||||
/// Returns the ssl context.
|
||||
auto const& get_ssl_context() const noexcept
|
||||
{ return impl_.get_ssl_context();}
|
||||
|
||||
private:
|
||||
void
|
||||
async_run_impl(
|
||||
|
||||
@@ -113,7 +113,7 @@ struct exec_op {
|
||||
asio::coroutine coro{};
|
||||
|
||||
template <class Self>
|
||||
void operator()(Self& self , system::error_code ec = {})
|
||||
void operator()(Self& self , system::error_code ec = {}, std::size_t = 0)
|
||||
{
|
||||
BOOST_ASIO_CORO_REENTER (coro)
|
||||
{
|
||||
@@ -130,7 +130,6 @@ struct exec_op {
|
||||
EXEC_OP_WAIT:
|
||||
BOOST_ASIO_CORO_YIELD
|
||||
info_->async_wait(std::move(self));
|
||||
BOOST_ASSERT(ec == asio::error::operation_aborted);
|
||||
|
||||
if (info_->ec_) {
|
||||
self.complete(info_->ec_, 0);
|
||||
@@ -140,18 +139,18 @@ EXEC_OP_WAIT:
|
||||
if (info_->stop_requested()) {
|
||||
// Don't have to call remove_request as it has already
|
||||
// been by cancel(exec).
|
||||
return self.complete(ec, 0);
|
||||
return self.complete(asio::error::operation_aborted, 0);
|
||||
}
|
||||
|
||||
if (is_cancelled(self)) {
|
||||
if (info_->is_written()) {
|
||||
if (!info_->is_waiting()) {
|
||||
using c_t = asio::cancellation_type;
|
||||
auto const c = self.get_cancellation_state().cancelled();
|
||||
if ((c & c_t::terminal) != c_t::none) {
|
||||
// Cancellation requires closing the connection
|
||||
// otherwise it stays in inconsistent state.
|
||||
conn_->cancel(operation::run);
|
||||
return self.complete(ec, 0);
|
||||
return self.complete(asio::error::operation_aborted, 0);
|
||||
} else {
|
||||
// Can't implement other cancelation types, ignoring.
|
||||
self.get_cancellation_state().clear();
|
||||
@@ -163,7 +162,7 @@ EXEC_OP_WAIT:
|
||||
} else {
|
||||
// Cancelation can be honored.
|
||||
conn_->remove_request(info_);
|
||||
self.complete(ec, 0);
|
||||
self.complete(asio::error::operation_aborted, 0);
|
||||
return;
|
||||
}
|
||||
}
|
||||
@@ -395,9 +394,9 @@ public:
|
||||
/// Constructs from an executor.
|
||||
connection_base(
|
||||
executor_type ex,
|
||||
asio::ssl::context::method method,
|
||||
asio::ssl::context ctx,
|
||||
std::size_t max_read_size)
|
||||
: ctx_{method}
|
||||
: ctx_{std::move(ctx)}
|
||||
, stream_{std::make_unique<next_layer_type>(ex, ctx_)}
|
||||
, writer_timer_{ex}
|
||||
, receive_channel_{ex, 256}
|
||||
@@ -412,10 +411,6 @@ public:
|
||||
auto const& get_ssl_context() const noexcept
|
||||
{ return ctx_;}
|
||||
|
||||
/// Returns the ssl context.
|
||||
auto& get_ssl_context() noexcept
|
||||
{ return ctx_;}
|
||||
|
||||
/// Resets the underlying stream.
|
||||
void reset_stream()
|
||||
{
|
||||
@@ -511,11 +506,15 @@ public:
|
||||
usage get_usage() const noexcept
|
||||
{ return usage_; }
|
||||
|
||||
auto run_is_canceled() const noexcept
|
||||
{ return cancel_run_called_; }
|
||||
|
||||
private:
|
||||
using receive_channel_type = asio::experimental::channel<executor_type, void(system::error_code, std::size_t)>;
|
||||
using runner_type = runner<executor_type>;
|
||||
using adapter_type = std::function<void(std::size_t, resp3::basic_node<std::string_view> const&, system::error_code&)>;
|
||||
using receiver_adapter_type = std::function<void(resp3::basic_node<std::string_view> const&, system::error_code&)>;
|
||||
using exec_notifier_type = receive_channel_type;
|
||||
|
||||
auto use_ssl() const noexcept
|
||||
{ return runner_.get_config().use_ssl;}
|
||||
@@ -527,10 +526,10 @@ private:
|
||||
{
|
||||
BOOST_ASSERT(ptr != nullptr);
|
||||
|
||||
if (ptr->is_written()) {
|
||||
return !ptr->req_->get_config().cancel_if_unresponded;
|
||||
} else {
|
||||
if (ptr->is_waiting()) {
|
||||
return !ptr->req_->get_config().cancel_on_connection_lost;
|
||||
} else {
|
||||
return !ptr->req_->get_config().cancel_if_unresponded;
|
||||
}
|
||||
};
|
||||
|
||||
@@ -543,8 +542,9 @@ private:
|
||||
});
|
||||
|
||||
reqs_.erase(point, std::end(reqs_));
|
||||
|
||||
std::for_each(std::begin(reqs_), std::end(reqs_), [](auto const& ptr) {
|
||||
return ptr->reset_status();
|
||||
return ptr->mark_waiting();
|
||||
});
|
||||
|
||||
return ret;
|
||||
@@ -555,7 +555,7 @@ private:
|
||||
auto f = [](auto const& ptr)
|
||||
{
|
||||
BOOST_ASSERT(ptr != nullptr);
|
||||
return ptr->is_written();
|
||||
return !ptr->is_waiting();
|
||||
};
|
||||
|
||||
auto point = std::stable_partition(std::begin(reqs_), std::end(reqs_), f);
|
||||
@@ -579,6 +579,12 @@ private:
|
||||
} break;
|
||||
case operation::run:
|
||||
{
|
||||
// Protects the code below from being called more than
|
||||
// once, see https://github.com/boostorg/redis/issues/181
|
||||
if (std::exchange(cancel_run_called_, true)) {
|
||||
return;
|
||||
}
|
||||
|
||||
close();
|
||||
writer_timer_.cancel();
|
||||
receive_channel_.cancel();
|
||||
@@ -605,8 +611,9 @@ private:
|
||||
// partition of unwritten requests instead of them all.
|
||||
std::for_each(std::begin(reqs_), std::end(reqs_), [](auto const& ptr) {
|
||||
BOOST_ASSERT_MSG(ptr != nullptr, "Expects non-null pointer.");
|
||||
if (ptr->is_staged())
|
||||
if (ptr->is_staged()) {
|
||||
ptr->mark_written();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@@ -615,25 +622,15 @@ private:
|
||||
using node_type = resp3::basic_node<std::string_view>;
|
||||
using wrapped_adapter_type = std::function<void(node_type const&, system::error_code&)>;
|
||||
|
||||
enum class action
|
||||
{
|
||||
stop,
|
||||
proceed,
|
||||
none,
|
||||
};
|
||||
|
||||
explicit req_info(request const& req, adapter_type adapter, executor_type ex)
|
||||
: timer_{ex}
|
||||
, action_{action::none}
|
||||
: notifier_{ex, 1}
|
||||
, req_{&req}
|
||||
, adapter_{}
|
||||
, expected_responses_{req.get_expected_responses()}
|
||||
, status_{status::none}
|
||||
, status_{status::waiting}
|
||||
, ec_{{}}
|
||||
, read_size_{0}
|
||||
{
|
||||
timer_.expires_at((std::chrono::steady_clock::time_point::max)());
|
||||
|
||||
adapter_ = [this, adapter](node_type const& nd, system::error_code& ec)
|
||||
{
|
||||
auto const i = req_->get_expected_responses() - expected_responses_;
|
||||
@@ -643,18 +640,16 @@ private:
|
||||
|
||||
auto proceed()
|
||||
{
|
||||
timer_.cancel();
|
||||
action_ = action::proceed;
|
||||
notifier_.try_send(std::error_code{}, 0);
|
||||
}
|
||||
|
||||
void stop()
|
||||
{
|
||||
timer_.cancel();
|
||||
action_ = action::stop;
|
||||
notifier_.close();
|
||||
}
|
||||
|
||||
[[nodiscard]] auto is_waiting_write() const noexcept
|
||||
{ return !is_written() && !is_staged(); }
|
||||
[[nodiscard]] auto is_waiting() const noexcept
|
||||
{ return status_ == status::waiting; }
|
||||
|
||||
[[nodiscard]] auto is_written() const noexcept
|
||||
{ return status_ == status::written; }
|
||||
@@ -668,27 +663,26 @@ private:
|
||||
void mark_staged() noexcept
|
||||
{ status_ = status::staged; }
|
||||
|
||||
void reset_status() noexcept
|
||||
{ status_ = status::none; }
|
||||
void mark_waiting() noexcept
|
||||
{ status_ = status::waiting; }
|
||||
|
||||
[[nodiscard]] auto stop_requested() const noexcept
|
||||
{ return action_ == action::stop;}
|
||||
{ return !notifier_.is_open();}
|
||||
|
||||
template <class CompletionToken>
|
||||
auto async_wait(CompletionToken token)
|
||||
{
|
||||
return timer_.async_wait(std::move(token));
|
||||
return notifier_.async_receive(std::move(token));
|
||||
}
|
||||
|
||||
//private:
|
||||
enum class status
|
||||
{ none
|
||||
{ waiting
|
||||
, staged
|
||||
, written
|
||||
};
|
||||
|
||||
timer_type timer_;
|
||||
action action_;
|
||||
exec_notifier_type notifier_;
|
||||
request const* req_;
|
||||
wrapped_adapter_type adapter_;
|
||||
|
||||
@@ -716,7 +710,7 @@ private:
|
||||
void cancel_push_requests()
|
||||
{
|
||||
auto point = std::stable_partition(std::begin(reqs_), std::end(reqs_), [](auto const& ptr) {
|
||||
return !(ptr->is_staged() && ptr->req_->get_expected_responses() == 0);
|
||||
return !(ptr->is_staged() && ptr->req_->get_expected_responses() == 0);
|
||||
});
|
||||
|
||||
std::for_each(point, std::end(reqs_), [](auto const& ptr) {
|
||||
@@ -737,7 +731,7 @@ private:
|
||||
|
||||
if (info->req_->has_hello_priority()) {
|
||||
auto rend = std::partition_point(std::rbegin(reqs_), std::rend(reqs_), [](auto const& e) {
|
||||
return e->is_waiting_write();
|
||||
return e->is_waiting();
|
||||
});
|
||||
|
||||
std::rotate(std::rbegin(reqs_), std::rbegin(reqs_) + 1, rend);
|
||||
@@ -781,7 +775,7 @@ private:
|
||||
// Coalesces the requests and marks them staged. After a
|
||||
// successful write staged requests will be marked as written.
|
||||
auto const point = std::partition_point(std::cbegin(reqs_), std::cend(reqs_), [](auto const& ri) {
|
||||
return !ri->is_waiting_write();
|
||||
return !ri->is_waiting();
|
||||
});
|
||||
|
||||
std::for_each(point, std::cend(reqs_), [this](auto const& ri) {
|
||||
@@ -798,7 +792,14 @@ private:
|
||||
|
||||
bool is_waiting_response() const noexcept
|
||||
{
|
||||
return !std::empty(reqs_) && reqs_.front()->is_written();
|
||||
if (std::empty(reqs_))
|
||||
return false;
|
||||
|
||||
// Under load and on low-latency networks we might start
|
||||
// receiving responses before the write operation completed and
|
||||
// the request is still maked as staged and not written. See
|
||||
// https://github.com/boostorg/redis/issues/170
|
||||
return !reqs_.front()->is_waiting();
|
||||
}
|
||||
|
||||
void close()
|
||||
@@ -814,36 +815,39 @@ private:
|
||||
|
||||
auto is_next_push()
|
||||
{
|
||||
// We handle unsolicited events in the following way
|
||||
//
|
||||
// 1. Its resp3 type is a push.
|
||||
//
|
||||
// 2. A non-push type is received with an empty requests
|
||||
// queue. I have noticed this is possible (e.g. -MISCONF).
|
||||
// I expect them to have type push so we can distinguish
|
||||
// them from responses to commands, but it is a
|
||||
// simple-error. If we are lucky enough to receive them
|
||||
// when the command queue is empty we can treat them as
|
||||
// server pushes, otherwise it is impossible to handle
|
||||
// them properly
|
||||
//
|
||||
// 3. The request does not expect any response but we got
|
||||
// one. This may happen if for example, subscribe with
|
||||
// wrong syntax.
|
||||
//
|
||||
// Useful links:
|
||||
BOOST_ASSERT(!read_buffer_.empty());
|
||||
|
||||
// Useful links to understand the heuristics below.
|
||||
//
|
||||
// - https://github.com/redis/redis/issues/11784
|
||||
// - https://github.com/redis/redis/issues/6426
|
||||
//
|
||||
// - https://github.com/boostorg/redis/issues/170
|
||||
|
||||
BOOST_ASSERT(!read_buffer_.empty());
|
||||
// The message's resp3 type is a push.
|
||||
if (resp3::to_type(read_buffer_.front()) == resp3::type::push)
|
||||
return true;
|
||||
|
||||
return
|
||||
(resp3::to_type(read_buffer_.front()) == resp3::type::push)
|
||||
|| reqs_.empty()
|
||||
|| (!reqs_.empty() && reqs_.front()->expected_responses_ == 0)
|
||||
|| !is_waiting_response(); // Added to deal with MONITOR.
|
||||
// This is non-push type and the requests queue is empty. I have
|
||||
// noticed this is possible, for example with -MISCONF. I don't
|
||||
// know why they are not sent with a push type so we can
|
||||
// distinguish them from responses to commands. If we are lucky
|
||||
// enough to receive them when the command queue is empty they
|
||||
// can be treated as server pushes, otherwise it is impossible
|
||||
// to handle them properly
|
||||
if (reqs_.empty())
|
||||
return true;
|
||||
|
||||
// The request does not expect any response but we got one. This
|
||||
// may happen if for example, subscribe with wrong syntax.
|
||||
if (reqs_.front()->expected_responses_ == 0)
|
||||
return true;
|
||||
|
||||
// Added to deal with MONITOR and also to fix PR170 which
|
||||
// happens under load and on low-latency networks, where we
|
||||
// might start receiving responses before the write operation
|
||||
// completed and the request is still maked as staged and not
|
||||
// written.
|
||||
return reqs_.front()->is_waiting();
|
||||
}
|
||||
|
||||
auto get_suggested_buffer_growth() const noexcept
|
||||
@@ -928,6 +932,7 @@ private:
|
||||
read_buffer_.clear();
|
||||
parser_.reset();
|
||||
on_push_ = false;
|
||||
cancel_run_called_ = false;
|
||||
}
|
||||
|
||||
asio::ssl::context ctx_;
|
||||
@@ -949,6 +954,7 @@ private:
|
||||
reqs_type reqs_;
|
||||
resp3::parser parser_{};
|
||||
bool on_push_ = false;
|
||||
bool cancel_run_called_ = false;
|
||||
|
||||
usage usage_;
|
||||
};
|
||||
|
||||
@@ -30,6 +30,8 @@
|
||||
namespace boost::redis::detail
|
||||
{
|
||||
|
||||
void push_hello(config const& cfg, request& req);
|
||||
|
||||
template <class Runner, class Connection, class Logger>
|
||||
struct hello_op {
|
||||
Runner* runner_ = nullptr;
|
||||
@@ -42,9 +44,6 @@ struct hello_op {
|
||||
{
|
||||
BOOST_ASIO_CORO_REENTER (coro_)
|
||||
{
|
||||
runner_->hello_req_.clear();
|
||||
if (runner_->hello_resp_.has_value())
|
||||
runner_->hello_resp_.value().clear();
|
||||
runner_->add_hello();
|
||||
|
||||
BOOST_ASIO_CORO_YIELD
|
||||
@@ -232,17 +231,10 @@ private:
|
||||
|
||||
void add_hello()
|
||||
{
|
||||
if (!cfg_.username.empty() && !cfg_.password.empty() && !cfg_.clientname.empty())
|
||||
hello_req_.push("HELLO", "3", "AUTH", cfg_.username, cfg_.password, "SETNAME", cfg_.clientname);
|
||||
else if (cfg_.username.empty() && cfg_.password.empty() && cfg_.clientname.empty())
|
||||
hello_req_.push("HELLO", "3");
|
||||
else if (cfg_.clientname.empty())
|
||||
hello_req_.push("HELLO", "3", "AUTH", cfg_.username, cfg_.password);
|
||||
else
|
||||
hello_req_.push("HELLO", "3", "SETNAME", cfg_.clientname);
|
||||
|
||||
if (cfg_.database_index && cfg_.database_index.value() != 0)
|
||||
hello_req_.push("SELECT", cfg_.database_index.value());
|
||||
hello_req_.clear();
|
||||
if (hello_resp_.has_value())
|
||||
hello_resp_.value().clear();
|
||||
push_hello(cfg_, hello_req_);
|
||||
}
|
||||
|
||||
bool has_error_in_response() const noexcept
|
||||
|
||||
@@ -10,16 +10,16 @@ namespace boost::redis {
|
||||
|
||||
connection::connection(
|
||||
executor_type ex,
|
||||
asio::ssl::context::method method,
|
||||
asio::ssl::context ctx,
|
||||
std::size_t max_read_size)
|
||||
: impl_{ex, method, max_read_size}
|
||||
: impl_{ex, std::move(ctx), max_read_size}
|
||||
{ }
|
||||
|
||||
connection::connection(
|
||||
asio::io_context& ioc,
|
||||
asio::ssl::context::method method,
|
||||
asio::ssl::context ctx,
|
||||
std::size_t max_read_size)
|
||||
: impl_{ioc.get_executor(), method, max_read_size}
|
||||
: impl_{ioc.get_executor(), std::move(ctx), max_read_size}
|
||||
{ }
|
||||
|
||||
void
|
||||
|
||||
27
include/boost/redis/impl/runner.ipp
Normal file
27
include/boost/redis/impl/runner.ipp
Normal file
@@ -0,0 +1,27 @@
|
||||
/* Copyright (c) 2018-2024 Marcelo Zimbres Silva (mzimbres@gmail.com)
|
||||
*
|
||||
* Distributed under the Boost Software License, Version 1.0. (See
|
||||
* accompanying file LICENSE.txt)
|
||||
*/
|
||||
|
||||
#include <boost/redis/detail/runner.hpp>
|
||||
|
||||
namespace boost::redis::detail
|
||||
{
|
||||
|
||||
void push_hello(config const& cfg, request& req)
|
||||
{
|
||||
if (!cfg.username.empty() && !cfg.password.empty() && !cfg.clientname.empty())
|
||||
req.push("HELLO", "3", "AUTH", cfg.username, cfg.password, "SETNAME", cfg.clientname);
|
||||
else if (cfg.password.empty() && cfg.clientname.empty())
|
||||
req.push("HELLO", "3");
|
||||
else if (cfg.clientname.empty())
|
||||
req.push("HELLO", "3", "AUTH", cfg.username, cfg.password);
|
||||
else
|
||||
req.push("HELLO", "3", "SETNAME", cfg.clientname);
|
||||
|
||||
if (cfg.database_index && cfg.database_index.value() != 0)
|
||||
req.push("SELECT", cfg.database_index.value());
|
||||
}
|
||||
|
||||
} // boost::redis::detail
|
||||
@@ -47,31 +47,31 @@ class request {
|
||||
public:
|
||||
/// Request configuration options.
|
||||
struct config {
|
||||
/** \brief If `true`
|
||||
* `boost::redis::connection::async_exec` will complete with error if the
|
||||
* connection is lost. Affects only requests that haven't been
|
||||
* sent yet.
|
||||
/** \brief If `true` calls to `connection::async_exec` will
|
||||
* complete with error if the connection is lost while the
|
||||
* request hasn't been sent yet.
|
||||
*/
|
||||
bool cancel_on_connection_lost = true;
|
||||
|
||||
/** \brief If `true` the request will complete with
|
||||
* boost::redis::error::not_connected if `async_exec` is called before
|
||||
* the connection with Redis was established.
|
||||
/** \brief If `true` `connection::async_exec` will complete with
|
||||
* `boost::redis::error::not_connected` if the call happens
|
||||
* before the connection with Redis was established.
|
||||
*/
|
||||
bool cancel_if_not_connected = false;
|
||||
|
||||
/** \brief If `false` `boost::redis::connection::async_exec` will not
|
||||
/** \brief If `false` `connection::async_exec` will not
|
||||
* automatically cancel this request if the connection is lost.
|
||||
* Affects only requests that have been written to the socket
|
||||
* but remained unresponded when `boost::redis::connection::async_run`
|
||||
* completed.
|
||||
* but remained unresponded when
|
||||
* `boost::redis::connection::async_run` completed.
|
||||
*/
|
||||
bool cancel_if_unresponded = true;
|
||||
|
||||
/** \brief If this request has a `HELLO` command and this flag is
|
||||
* `true`, the `boost::redis::connection` will move it to the front of
|
||||
* the queue of awaiting requests. This makes it possible to
|
||||
* send `HELLO` and authenticate before other commands are sent.
|
||||
/** \brief If this request has a `HELLO` command and this flag
|
||||
* is `true`, the `boost::redis::connection` will move it to the
|
||||
* front of the queue of awaiting requests. This makes it
|
||||
* possible to send `HELLO` and authenticate before other
|
||||
* commands are sent.
|
||||
*/
|
||||
bool hello_with_priority = true;
|
||||
};
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
|
||||
/* Copyright (c) 2018-2024 Marcelo Zimbres Silva (mzimbres@gmail.com)
|
||||
*
|
||||
* Distributed under the Boost Software License, Version 1.0. (See
|
||||
* accompanying file LICENSE.txt)
|
||||
@@ -13,7 +13,7 @@
|
||||
|
||||
namespace boost::redis::resp3 {
|
||||
|
||||
void to_int(int_type& i, std::string_view sv, system::error_code& ec)
|
||||
void to_int(std::size_t& i, std::string_view sv, system::error_code& ec)
|
||||
{
|
||||
auto const res = std::from_chars(sv.data(), sv.data() + std::size(sv), i);
|
||||
if (res.ec != std::errc())
|
||||
@@ -29,7 +29,7 @@ void parser::reset()
|
||||
{
|
||||
depth_ = 0;
|
||||
sizes_ = {{1}};
|
||||
bulk_length_ = (std::numeric_limits<unsigned long>::max)();
|
||||
bulk_length_ = (std::numeric_limits<std::size_t>::max)();
|
||||
bulk_ = type::invalid;
|
||||
consumed_ = 0;
|
||||
sizes_[0] = 2; // The sentinel must be more than 1.
|
||||
@@ -189,7 +189,7 @@ parser::consume_impl(
|
||||
case type::attribute:
|
||||
case type::map:
|
||||
{
|
||||
int_type l = -1;
|
||||
std::size_t l = -1;
|
||||
to_int(l, elem, ec);
|
||||
if (ec)
|
||||
return {};
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
/* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
|
||||
/* Copyright (c) 2018-2024 Marcelo Zimbres Silva (mzimbres@gmail.com)
|
||||
*
|
||||
* Distributed under the Boost Software License, Version 1.0. (See
|
||||
* accompanying file LICENSE.txt)
|
||||
@@ -16,8 +16,6 @@
|
||||
|
||||
namespace boost::redis::resp3 {
|
||||
|
||||
using int_type = std::uint64_t;
|
||||
|
||||
class parser {
|
||||
public:
|
||||
using node_type = basic_node<std::string_view>;
|
||||
@@ -38,7 +36,7 @@ private:
|
||||
std::array<std::size_t, max_embedded_depth + 1> sizes_;
|
||||
|
||||
// Contains the length expected in the next bulk read.
|
||||
int_type bulk_length_;
|
||||
std::size_t bulk_length_;
|
||||
|
||||
// The type of the next bulk. Contains type::invalid if no bulk is
|
||||
// expected.
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
#include <boost/redis/impl/ignore.ipp>
|
||||
#include <boost/redis/impl/connection.ipp>
|
||||
#include <boost/redis/impl/response.ipp>
|
||||
#include <boost/redis/impl/runner.ipp>
|
||||
#include <boost/redis/resp3/impl/type.ipp>
|
||||
#include <boost/redis/resp3/impl/parser.ipp>
|
||||
#include <boost/redis/resp3/impl/serialization.ipp>
|
||||
|
||||
@@ -23,13 +23,15 @@ macro(make_test TEST_NAME STANDARD)
|
||||
boost_redis_src
|
||||
boost_redis_tests_common
|
||||
boost_redis_project_options
|
||||
Boost::unit_test_framework
|
||||
)
|
||||
target_compile_features(${EXE_NAME} PRIVATE cxx_std_${STANDARD})
|
||||
add_test(${EXE_NAME} ${EXE_NAME})
|
||||
endmacro()
|
||||
|
||||
make_test(test_conn_quit 17)
|
||||
make_test(test_conn_tls 17)
|
||||
# TODO: Configure a Redis server with TLS in the CI and reenable this test.
|
||||
#make_test(test_conn_tls 17)
|
||||
make_test(test_low_level 17)
|
||||
make_test(test_conn_exec_retry 17)
|
||||
make_test(test_conn_exec_error 17)
|
||||
@@ -46,6 +48,7 @@ make_test(test_conn_exec_cancel2 20)
|
||||
make_test(test_conn_echo_stress 20)
|
||||
make_test(test_conn_run_cancel 20)
|
||||
make_test(test_issue_50 20)
|
||||
make_test(test_issue_181 17)
|
||||
|
||||
# Coverage
|
||||
set(
|
||||
@@ -70,4 +73,4 @@ add_custom_target(
|
||||
COMMAND ${COVERAGE_HTML_COMMAND}
|
||||
COMMENT "Generating coverage report"
|
||||
VERBATIM
|
||||
)
|
||||
)
|
||||
|
||||
@@ -2,73 +2,10 @@ cmake_minimum_required(VERSION 3.5...3.22)
|
||||
|
||||
project(cmake_subdir_test LANGUAGES CXX)
|
||||
|
||||
# Generated by boostdep --brief redis
|
||||
set(_DEPENDENCIES
|
||||
# Primary dependencies
|
||||
asio
|
||||
assert
|
||||
core
|
||||
mp11
|
||||
system
|
||||
throw_exception
|
||||
|
||||
# Secondary dependencies
|
||||
align
|
||||
array
|
||||
bind
|
||||
chrono
|
||||
config
|
||||
context
|
||||
coroutine
|
||||
date_time
|
||||
exception
|
||||
"function"
|
||||
regex
|
||||
smart_ptr
|
||||
type_traits
|
||||
utility
|
||||
static_assert
|
||||
variant2
|
||||
winapi
|
||||
integer
|
||||
move
|
||||
mpl
|
||||
predef
|
||||
ratio
|
||||
typeof
|
||||
pool
|
||||
algorithm
|
||||
io
|
||||
lexical_cast
|
||||
numeric/conversion
|
||||
range
|
||||
tokenizer
|
||||
tuple
|
||||
preprocessor
|
||||
concept_check
|
||||
container_hash
|
||||
iterator
|
||||
unordered
|
||||
describe
|
||||
container
|
||||
conversion
|
||||
detail
|
||||
optional
|
||||
rational
|
||||
intrusive
|
||||
function_types
|
||||
fusion
|
||||
functional
|
||||
)
|
||||
set(BOOST_INCLUDE_LIBRARIES redis)
|
||||
|
||||
# Build our dependencies, so the targets Boost::xxx are defined
|
||||
set(_BOOST_ROOT ../../../..)
|
||||
foreach(_DEPENDENCY IN LISTS _DEPENDENCIES)
|
||||
add_subdirectory(${_BOOST_ROOT}/libs/${_DEPENDENCY} boostorg/${_DEPENDENCY})
|
||||
endforeach()
|
||||
|
||||
# Build our project
|
||||
add_subdirectory(${_BOOST_ROOT}/libs/redis boostorg/redis)
|
||||
add_subdirectory(../../../.. boostorg/boost)
|
||||
|
||||
add_executable(main main.cpp)
|
||||
target_link_libraries(main PRIVATE Boost::redis)
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
#include "common.hpp"
|
||||
#include <iostream>
|
||||
#include <cstdlib>
|
||||
#include <boost/asio/consign.hpp>
|
||||
#include <boost/asio/co_spawn.hpp>
|
||||
|
||||
|
||||
namespace net = boost::asio;
|
||||
|
||||
struct run_callback {
|
||||
@@ -29,6 +29,27 @@ run(
|
||||
conn->async_run(cfg, {l}, run_callback{conn, op, ec});
|
||||
}
|
||||
|
||||
std::string safe_getenv(const char* name, const char* default_value)
|
||||
{
|
||||
// MSVC doesn't like getenv
|
||||
#ifdef BOOST_MSVC
|
||||
#pragma warning(push)
|
||||
#pragma warning(disable : 4996)
|
||||
#endif
|
||||
const char* res = std::getenv(name);
|
||||
#ifdef BOOST_MSVC
|
||||
#pragma warning(pop)
|
||||
#endif
|
||||
return res ? res : default_value;
|
||||
}
|
||||
|
||||
boost::redis::config make_test_config()
|
||||
{
|
||||
boost::redis::config cfg;
|
||||
cfg.addr.host = safe_getenv("BOOST_REDIS_TEST_SERVER", "localhost");
|
||||
return cfg;
|
||||
}
|
||||
|
||||
#ifdef BOOST_ASIO_HAS_CO_AWAIT
|
||||
auto start(net::awaitable<void> op) -> int
|
||||
{
|
||||
@@ -48,5 +69,4 @@ auto start(net::awaitable<void> op) -> int
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
#endif // BOOST_ASIO_HAS_CO_AWAIT
|
||||
|
||||
@@ -15,10 +15,12 @@ auto redir(boost::system::error_code& ec)
|
||||
auto start(boost::asio::awaitable<void> op) -> int;
|
||||
#endif // BOOST_ASIO_HAS_CO_AWAIT
|
||||
|
||||
boost::redis::config make_test_config();
|
||||
|
||||
void
|
||||
run(
|
||||
std::shared_ptr<boost::redis::connection> conn,
|
||||
boost::redis::config cfg = {},
|
||||
boost::redis::config cfg = make_test_config(),
|
||||
boost::system::error_code ec = boost::asio::error::operation_aborted,
|
||||
boost::redis::operation op = boost::redis::operation::receive,
|
||||
boost::redis::logger::level l = boost::redis::logger::level::disabled);
|
||||
|
||||
@@ -22,11 +22,9 @@ using boost::redis::ignore;
|
||||
using boost::redis::operation;
|
||||
using boost::redis::generic_response;
|
||||
using boost::redis::consume_one;
|
||||
using redis::config;
|
||||
|
||||
// TODO: Test cancel(health_check)
|
||||
|
||||
|
||||
struct push_callback {
|
||||
connection* conn1;
|
||||
connection* conn2;
|
||||
@@ -73,14 +71,12 @@ struct push_callback {
|
||||
BOOST_AUTO_TEST_CASE(check_health)
|
||||
{
|
||||
net::io_context ioc;
|
||||
|
||||
|
||||
connection conn1{ioc};
|
||||
|
||||
request req1;
|
||||
req1.push("CLIENT", "PAUSE", "10000", "ALL");
|
||||
|
||||
config cfg1;
|
||||
auto cfg1 = make_test_config();
|
||||
cfg1.health_check_id = "conn1";
|
||||
cfg1.reconnect_wait_interval = std::chrono::seconds::zero();
|
||||
error_code res1;
|
||||
@@ -95,7 +91,7 @@ BOOST_AUTO_TEST_CASE(check_health)
|
||||
// sending MONITOR. I will therefore open a second connection.
|
||||
connection conn2{ioc};
|
||||
|
||||
config cfg2;
|
||||
auto cfg2 = make_test_config();
|
||||
cfg2.health_check_id = "conn2";
|
||||
error_code res2;
|
||||
conn2.async_run(cfg2, {}, [&](auto ec){
|
||||
|
||||
@@ -24,7 +24,6 @@ using boost::redis::response;
|
||||
using boost::redis::ignore;
|
||||
using boost::redis::ignore_t;
|
||||
using boost::redis::logger;
|
||||
using boost::redis::config;
|
||||
using boost::redis::connection;
|
||||
using boost::redis::usage;
|
||||
using boost::redis::error;
|
||||
@@ -79,7 +78,7 @@ echo_session(
|
||||
auto async_echo_stress(std::shared_ptr<connection> conn) -> net::awaitable<void>
|
||||
{
|
||||
auto ex = co_await net::this_coro::executor;
|
||||
config cfg;
|
||||
auto cfg = make_test_config();
|
||||
cfg.health_check_interval = std::chrono::seconds::zero();
|
||||
run(conn, cfg,
|
||||
boost::asio::error::operation_aborted,
|
||||
@@ -92,14 +91,14 @@ auto async_echo_stress(std::shared_ptr<connection> conn) -> net::awaitable<void>
|
||||
|
||||
// Number of coroutines that will send pings sharing the same
|
||||
// connection to redis.
|
||||
int const sessions = 1000;
|
||||
int const sessions = 150;
|
||||
|
||||
// The number of pings that will be sent by each session.
|
||||
int const msgs = 500;
|
||||
int const msgs = 200;
|
||||
|
||||
// The number of publishes that will be sent by each session with
|
||||
// each message.
|
||||
int const n_pubs = 100;
|
||||
int const n_pubs = 25;
|
||||
|
||||
// This is the total number of pushes we will receive.
|
||||
int total_pushes = sessions * msgs * n_pubs + 1;
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
|
||||
#include <boost/redis/connection.hpp>
|
||||
#include <boost/system/errc.hpp>
|
||||
#include <boost/asio/detached.hpp>
|
||||
#define BOOST_TEST_MODULE conn-exec
|
||||
#include <boost/test/included/unit_test.hpp>
|
||||
#include <iostream>
|
||||
@@ -17,13 +18,13 @@
|
||||
// container.
|
||||
|
||||
namespace net = boost::asio;
|
||||
using boost::redis::config;
|
||||
using boost::redis::connection;
|
||||
using boost::redis::request;
|
||||
using boost::redis::response;
|
||||
using boost::redis::generic_response;
|
||||
using boost::redis::ignore;
|
||||
using boost::redis::operation;
|
||||
using boost::redis::config;
|
||||
using boost::redis::request;
|
||||
using boost::redis::response;
|
||||
|
||||
// Sends three requests where one of them has a hello with a priority
|
||||
// set, which means it should be executed first.
|
||||
@@ -122,7 +123,7 @@ BOOST_AUTO_TEST_CASE(cancel_request_if_not_connected)
|
||||
|
||||
BOOST_AUTO_TEST_CASE(correct_database)
|
||||
{
|
||||
config cfg;
|
||||
auto cfg = make_test_config();
|
||||
cfg.database_index = 2;
|
||||
|
||||
net::io_context ioc;
|
||||
@@ -154,3 +155,36 @@ BOOST_AUTO_TEST_CASE(correct_database)
|
||||
BOOST_CHECK_EQUAL(cfg.database_index.value(), index);
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(large_number_of_concurrent_requests_issue_170)
|
||||
{
|
||||
// See https://github.com/boostorg/redis/issues/170
|
||||
|
||||
std::string payload;
|
||||
payload.resize(1024);
|
||||
std::fill(std::begin(payload), std::end(payload), 'A');
|
||||
|
||||
net::io_context ioc;
|
||||
auto conn = std::make_shared<connection>(ioc);
|
||||
|
||||
auto cfg = make_test_config();
|
||||
cfg.health_check_interval = std::chrono::seconds(0);
|
||||
conn->async_run(cfg, {}, net::detached);
|
||||
|
||||
int counter = 0;
|
||||
int const repeat = 8000;
|
||||
|
||||
for (int i = 0; i < repeat; ++i) {
|
||||
auto req = std::make_shared<request>();
|
||||
req->push("PING", payload);
|
||||
conn->async_exec(*req, ignore, [req, &counter, conn](auto ec, auto) {
|
||||
BOOST_TEST(!ec);
|
||||
if (++counter == repeat)
|
||||
conn->cancel();
|
||||
});
|
||||
}
|
||||
|
||||
ioc.run();
|
||||
|
||||
BOOST_CHECK_EQUAL(counter, repeat);
|
||||
}
|
||||
|
||||
|
||||
@@ -29,7 +29,6 @@ using boost::redis::response;
|
||||
using boost::redis::generic_response;
|
||||
using boost::redis::ignore;
|
||||
using boost::redis::ignore_t;
|
||||
using boost::redis::config;
|
||||
using boost::redis::logger;
|
||||
using boost::redis::connection;
|
||||
using namespace std::chrono_literals;
|
||||
@@ -39,8 +38,8 @@ auto implicit_cancel_of_req_written() -> net::awaitable<void>
|
||||
auto ex = co_await net::this_coro::executor;
|
||||
auto conn = std::make_shared<connection>(ex);
|
||||
|
||||
config cfg;
|
||||
cfg.health_check_interval = std::chrono::seconds{0};
|
||||
auto cfg = make_test_config();
|
||||
cfg.health_check_interval = std::chrono::seconds::zero();
|
||||
run(conn, cfg);
|
||||
|
||||
// See NOTE1.
|
||||
@@ -106,7 +105,7 @@ BOOST_AUTO_TEST_CASE(test_cancel_of_req_written_on_run_canceled)
|
||||
|
||||
conn->async_exec(req0, ignore, c0);
|
||||
|
||||
config cfg;
|
||||
auto cfg = make_test_config();
|
||||
cfg.health_check_interval = std::chrono::seconds{5};
|
||||
run(conn);
|
||||
|
||||
|
||||
@@ -25,7 +25,6 @@ using boost::redis::ignore_t;
|
||||
using boost::redis::error;
|
||||
using boost::redis::logger;
|
||||
using boost::redis::operation;
|
||||
using redis::config;
|
||||
using namespace std::chrono_literals;
|
||||
|
||||
BOOST_AUTO_TEST_CASE(no_ignore_error)
|
||||
|
||||
@@ -57,12 +57,12 @@ BOOST_AUTO_TEST_CASE(request_retry_false)
|
||||
|
||||
auto c2 = [&](auto ec, auto){
|
||||
std::cout << "c2" << std::endl;
|
||||
BOOST_CHECK_EQUAL(ec, boost::system::errc::errc_t::operation_canceled);
|
||||
BOOST_CHECK_EQUAL(ec, boost::asio::error::operation_aborted);
|
||||
};
|
||||
|
||||
auto c1 = [&](auto ec, auto){
|
||||
std::cout << "c1" << std::endl;
|
||||
BOOST_CHECK_EQUAL(ec, boost::system::errc::errc_t::operation_canceled);
|
||||
BOOST_CHECK_EQUAL(ec, boost::asio::error::operation_aborted);
|
||||
};
|
||||
|
||||
auto c0 = [&](auto ec, auto){
|
||||
@@ -74,7 +74,7 @@ BOOST_AUTO_TEST_CASE(request_retry_false)
|
||||
|
||||
conn->async_exec(req0, ignore, c0);
|
||||
|
||||
config cfg;
|
||||
auto cfg = make_test_config();
|
||||
cfg.health_check_interval = 5s;
|
||||
run(conn);
|
||||
|
||||
@@ -137,7 +137,7 @@ BOOST_AUTO_TEST_CASE(request_retry_true)
|
||||
|
||||
conn->async_exec(req0, ignore, c0);
|
||||
|
||||
config cfg;
|
||||
auto cfg = make_test_config();
|
||||
cfg.health_check_interval = 5s;
|
||||
conn->async_run(cfg, {}, [&](auto ec){
|
||||
std::cout << ec.message() << std::endl;
|
||||
|
||||
@@ -27,7 +27,6 @@ using boost::redis::response;
|
||||
using boost::redis::ignore;
|
||||
using boost::redis::ignore_t;
|
||||
using boost::system::error_code;
|
||||
using redis::config;
|
||||
using boost::redis::logger;
|
||||
using namespace std::chrono_literals;
|
||||
|
||||
@@ -67,7 +66,7 @@ BOOST_AUTO_TEST_CASE(receives_push_waiting_resps)
|
||||
|
||||
conn->async_exec(req1, ignore, c1);
|
||||
|
||||
run(conn, {}, {});
|
||||
run(conn, make_test_config(), {});
|
||||
|
||||
bool push_received = false;
|
||||
conn->async_receive([&, conn](auto ec, auto){
|
||||
@@ -217,7 +216,8 @@ BOOST_AUTO_TEST_CASE(test_push_adapter)
|
||||
BOOST_CHECK_EQUAL(ec, boost::system::errc::errc_t::operation_canceled);
|
||||
});
|
||||
|
||||
conn->async_run({}, {}, [](auto ec){
|
||||
auto cfg = make_test_config();
|
||||
conn->async_run(cfg, {}, [](auto ec){
|
||||
BOOST_CHECK_EQUAL(ec, boost::redis::error::incompatible_size);
|
||||
});
|
||||
|
||||
@@ -257,9 +257,8 @@ BOOST_AUTO_TEST_CASE(many_subscribers)
|
||||
|
||||
auto c11 =[&](auto ec, auto...)
|
||||
{
|
||||
std::cout << "quit sent" << std::endl;
|
||||
std::cout << "quit sent: " << ec.message() << std::endl;
|
||||
conn->cancel(operation::reconnection);
|
||||
BOOST_TEST(!ec);
|
||||
};
|
||||
auto c10 =[&](auto ec, auto...)
|
||||
{
|
||||
@@ -319,7 +318,7 @@ BOOST_AUTO_TEST_CASE(many_subscribers)
|
||||
|
||||
conn->async_exec(req0, ignore, c0);
|
||||
|
||||
run(conn, {}, {});
|
||||
run(conn, make_test_config(), {});
|
||||
|
||||
net::co_spawn(ioc.get_executor(), push_consumer3(conn), net::detached);
|
||||
ioc.run();
|
||||
|
||||
@@ -18,7 +18,6 @@ using boost::redis::operation;
|
||||
using boost::redis::request;
|
||||
using boost::redis::response;
|
||||
using boost::redis::ignore;
|
||||
using boost::redis::config;
|
||||
using namespace std::chrono_literals;
|
||||
|
||||
BOOST_AUTO_TEST_CASE(test_eof_no_error)
|
||||
@@ -62,7 +61,7 @@ BOOST_AUTO_TEST_CASE(test_async_run_exits)
|
||||
auto c3 = [](auto ec, auto)
|
||||
{
|
||||
std::clog << "c3: " << ec.message() << std::endl;
|
||||
BOOST_CHECK_EQUAL(ec, boost::system::errc::errc_t::operation_canceled);
|
||||
BOOST_CHECK_EQUAL(ec, boost::asio::error::operation_aborted);
|
||||
};
|
||||
|
||||
auto c2 = [&](auto ec, auto)
|
||||
@@ -83,7 +82,7 @@ BOOST_AUTO_TEST_CASE(test_async_run_exits)
|
||||
|
||||
// The healthy checker should not be the cause of async_run
|
||||
// completing, so we disable.
|
||||
config cfg;
|
||||
auto cfg = make_test_config();
|
||||
cfg.health_check_interval = 0s;
|
||||
cfg.reconnect_wait_interval = 0s;
|
||||
run(conn, cfg);
|
||||
|
||||
@@ -19,7 +19,6 @@ using boost::system::error_code;
|
||||
using boost::redis::request;
|
||||
using boost::redis::response;
|
||||
using boost::redis::ignore;
|
||||
using boost::redis::config;
|
||||
using boost::redis::logger;
|
||||
using boost::redis::operation;
|
||||
using boost::redis::connection;
|
||||
@@ -40,7 +39,7 @@ net::awaitable<void> test_reconnect_impl()
|
||||
int i = 0;
|
||||
for (; i < 5; ++i) {
|
||||
error_code ec1, ec2;
|
||||
config cfg;
|
||||
auto cfg = make_test_config();
|
||||
logger l;
|
||||
co_await conn->async_exec(req, ignore, net::redirect_error(net::use_awaitable, ec1));
|
||||
//BOOST_TEST(!ec);
|
||||
@@ -76,7 +75,7 @@ auto async_test_reconnect_timeout() -> net::awaitable<void>
|
||||
req1.push("BLPOP", "any", 0);
|
||||
|
||||
st.expires_after(std::chrono::seconds{1});
|
||||
config cfg;
|
||||
auto cfg = make_test_config();
|
||||
co_await (
|
||||
conn->async_exec(req1, ignore, redir(ec1)) ||
|
||||
st.async_wait(redir(ec3))
|
||||
@@ -100,7 +99,7 @@ auto async_test_reconnect_timeout() -> net::awaitable<void>
|
||||
|
||||
std::cout << "ccc" << std::endl;
|
||||
|
||||
BOOST_CHECK_EQUAL(ec1, boost::system::errc::errc_t::operation_canceled);
|
||||
BOOST_CHECK_EQUAL(ec1, boost::asio::error::operation_aborted);
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(test_reconnect_and_idle)
|
||||
|
||||
@@ -20,7 +20,6 @@
|
||||
namespace net = boost::asio;
|
||||
|
||||
using boost::redis::operation;
|
||||
using boost::redis::config;
|
||||
using boost::redis::connection;
|
||||
using boost::system::error_code;
|
||||
using net::experimental::as_tuple;
|
||||
@@ -41,7 +40,7 @@ auto async_cancel_run_with_timer() -> net::awaitable<void>
|
||||
st.expires_after(1s);
|
||||
|
||||
error_code ec1, ec2;
|
||||
config cfg;
|
||||
auto cfg = make_test_config();
|
||||
logger l;
|
||||
co_await (conn.async_run(cfg, l, redir(ec1)) || st.async_wait(redir(ec2)));
|
||||
|
||||
@@ -67,7 +66,7 @@ async_check_cancellation_not_missed(int n, std::chrono::milliseconds ms) -> net:
|
||||
for (auto i = 0; i < n; ++i) {
|
||||
timer.expires_after(ms);
|
||||
error_code ec1, ec2;
|
||||
config cfg;
|
||||
auto cfg = make_test_config();
|
||||
logger l;
|
||||
co_await (conn.async_run(cfg, l, redir(ec1)) || timer.async_wait(redir(ec2)));
|
||||
BOOST_CHECK_EQUAL(ec1, boost::asio::error::operation_aborted);
|
||||
|
||||
@@ -25,7 +25,7 @@ bool verify_certificate(bool, net::ssl::verify_context&)
|
||||
return true;
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(ping)
|
||||
config make_tls_config()
|
||||
{
|
||||
config cfg;
|
||||
cfg.use_ssl = true;
|
||||
@@ -34,7 +34,12 @@ BOOST_AUTO_TEST_CASE(ping)
|
||||
cfg.addr.host = "db.occase.de";
|
||||
cfg.addr.port = "6380";
|
||||
//cfg.health_check_interval = std::chrono::seconds{0};
|
||||
return cfg;
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(ping_internal_ssl_context)
|
||||
{
|
||||
auto const cfg = make_tls_config();
|
||||
std::string const in = "Kabuf";
|
||||
|
||||
request req;
|
||||
@@ -59,14 +64,37 @@ BOOST_AUTO_TEST_CASE(ping)
|
||||
BOOST_CHECK_EQUAL(in, std::get<0>(resp).value());
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(ping_custom_ssl_context)
|
||||
{
|
||||
auto const cfg = make_tls_config();
|
||||
std::string const in = "Kabuf";
|
||||
|
||||
request req;
|
||||
req.push("PING", in);
|
||||
|
||||
response<std::string> resp;
|
||||
|
||||
net::io_context ioc;
|
||||
net::ssl::context ctx{boost::asio::ssl::context::tls_client};
|
||||
connection conn{ioc, std::move(ctx)};
|
||||
conn.next_layer().set_verify_mode(net::ssl::verify_peer);
|
||||
conn.next_layer().set_verify_callback(verify_certificate);
|
||||
|
||||
conn.async_exec(req, resp, [&](auto ec, auto) {
|
||||
BOOST_TEST(!ec);
|
||||
conn.cancel();
|
||||
});
|
||||
|
||||
conn.async_run(cfg, {}, [](auto) { });
|
||||
|
||||
ioc.run();
|
||||
|
||||
BOOST_CHECK_EQUAL(in, std::get<0>(resp).value());
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(acl_does_not_allow_select)
|
||||
{
|
||||
config cfg;
|
||||
cfg.use_ssl = true;
|
||||
cfg.username = "aedis";
|
||||
cfg.password = "aedis";
|
||||
cfg.addr.host = "db.occase.de";
|
||||
cfg.addr.port = "6380";
|
||||
auto cfg = make_tls_config();
|
||||
cfg.database_index = 22;
|
||||
cfg.reconnect_wait_interval = std::chrono::seconds::zero();
|
||||
|
||||
|
||||
64
test/test_issue_181.cpp
Normal file
64
test/test_issue_181.cpp
Normal file
@@ -0,0 +1,64 @@
|
||||
/* Copyright (c) 2018-2024 Marcelo Zimbres Silva (mzimbres@gmail.com)
|
||||
*
|
||||
* Distributed under the Boost Software License, Version 1.0. (See
|
||||
* accompanying file LICENSE.txt)
|
||||
*/
|
||||
|
||||
#include <boost/redis/connection.hpp>
|
||||
#include <boost/redis/logger.hpp>
|
||||
#include <boost/asio/awaitable.hpp>
|
||||
#include <boost/asio/use_awaitable.hpp>
|
||||
#define BOOST_TEST_MODULE conn-quit
|
||||
#include <boost/test/included/unit_test.hpp>
|
||||
#include <chrono>
|
||||
#include <iostream>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include "common.hpp"
|
||||
|
||||
namespace net = boost::asio;
|
||||
using boost::redis::request;
|
||||
using boost::redis::request;
|
||||
using boost::redis::response;
|
||||
using boost::redis::ignore;
|
||||
using boost::redis::logger;
|
||||
using boost::redis::config;
|
||||
using boost::redis::operation;
|
||||
using boost::redis::connection;
|
||||
using boost::system::error_code;
|
||||
using namespace std::chrono_literals;
|
||||
|
||||
BOOST_AUTO_TEST_CASE(issue_181)
|
||||
{
|
||||
using connection_base = boost::redis::detail::connection_base<net::any_io_executor>;
|
||||
|
||||
auto const level = boost::redis::logger::level::debug;
|
||||
net::io_context ioc;
|
||||
auto ctx = net::ssl::context{net::ssl::context::tlsv12_client};
|
||||
connection_base conn{ioc.get_executor(), std::move(ctx), 1000000};
|
||||
net::steady_timer timer{ioc};
|
||||
timer.expires_after(std::chrono::seconds{1});
|
||||
|
||||
auto run_cont = [&](auto ec){
|
||||
std::cout << "async_run1: " << ec.message() << std::endl;
|
||||
};
|
||||
|
||||
auto cfg = make_test_config();
|
||||
cfg.health_check_interval = std::chrono::seconds{0};
|
||||
cfg.reconnect_wait_interval = std::chrono::seconds{0};
|
||||
conn.async_run(cfg, boost::redis::logger{level}, run_cont);
|
||||
BOOST_TEST(!conn.run_is_canceled());
|
||||
|
||||
// Uses a timer to wait some time until run has been called.
|
||||
auto timer_cont = [&](auto ec){
|
||||
std::cout << "timer_cont: " << ec.message() << std::endl;
|
||||
BOOST_TEST(!conn.run_is_canceled());
|
||||
conn.cancel(operation::run);
|
||||
BOOST_TEST(conn.run_is_canceled());
|
||||
};
|
||||
|
||||
timer.async_wait(timer_cont);
|
||||
|
||||
ioc.run();
|
||||
}
|
||||
@@ -19,6 +19,7 @@
|
||||
#include <boost/test/included/unit_test.hpp>
|
||||
#include <tuple>
|
||||
#include <iostream>
|
||||
#include "common.hpp"
|
||||
|
||||
#if defined(BOOST_ASIO_HAS_CO_AWAIT)
|
||||
|
||||
@@ -86,13 +87,14 @@ periodic_task(std::shared_ptr<connection> conn) -> net::awaitable<void>
|
||||
conn->cancel(operation::reconnection);
|
||||
}
|
||||
|
||||
auto co_main(config cfg) -> net::awaitable<void>
|
||||
auto co_main(config) -> net::awaitable<void>
|
||||
{
|
||||
auto ex = co_await net::this_coro::executor;
|
||||
auto conn = std::make_shared<connection>(ex);
|
||||
|
||||
net::co_spawn(ex, receiver(conn), net::detached);
|
||||
net::co_spawn(ex, periodic_task(conn), net::detached);
|
||||
auto cfg = make_test_config();
|
||||
conn->async_run(cfg, {}, net::consign(net::detached, conn));
|
||||
}
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
* accompanying file LICENSE.txt)
|
||||
*/
|
||||
|
||||
#include <boost/redis/detail/runner.hpp>
|
||||
#include <boost/redis/resp3/serialization.hpp>
|
||||
#include <boost/redis/adapter/adapt.hpp>
|
||||
#define BOOST_TEST_MODULE conn-quit
|
||||
@@ -11,6 +12,9 @@
|
||||
#include <string>
|
||||
#include <iostream>
|
||||
|
||||
using boost::redis::request;
|
||||
using boost::redis::config;
|
||||
using boost::redis::detail::push_hello;
|
||||
using boost::redis::adapter::adapt2;
|
||||
using boost::redis::adapter::result;
|
||||
using boost::redis::resp3::detail::deserialize;
|
||||
@@ -31,3 +35,56 @@ BOOST_AUTO_TEST_CASE(low_level_sync_sans_io)
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(config_to_hello)
|
||||
{
|
||||
config cfg;
|
||||
cfg.clientname = "";
|
||||
request req;
|
||||
|
||||
push_hello(cfg, req);
|
||||
|
||||
std::string_view const expected = "*2\r\n$5\r\nHELLO\r\n$1\r\n3\r\n";
|
||||
BOOST_CHECK_EQUAL(req.payload(), expected);
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(config_to_hello_with_select)
|
||||
{
|
||||
config cfg;
|
||||
cfg.clientname = "";
|
||||
cfg.database_index = 10;
|
||||
request req;
|
||||
|
||||
push_hello(cfg, req);
|
||||
|
||||
std::string_view const expected =
|
||||
"*2\r\n$5\r\nHELLO\r\n$1\r\n3\r\n"
|
||||
"*2\r\n$6\r\nSELECT\r\n$2\r\n10\r\n";
|
||||
|
||||
BOOST_CHECK_EQUAL(req.payload(), expected);
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(config_to_hello_cmd_clientname)
|
||||
{
|
||||
config cfg;
|
||||
request req;
|
||||
|
||||
push_hello(cfg, req);
|
||||
|
||||
std::string_view const expected = "*4\r\n$5\r\nHELLO\r\n$1\r\n3\r\n$7\r\nSETNAME\r\n$11\r\nBoost.Redis\r\n";
|
||||
BOOST_CHECK_EQUAL(req.payload(), expected);
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(config_to_hello_cmd_auth)
|
||||
{
|
||||
config cfg;
|
||||
cfg.clientname = "";
|
||||
cfg.username = "foo";
|
||||
cfg.password = "bar";
|
||||
request req;
|
||||
|
||||
push_hello(cfg, req);
|
||||
|
||||
std::string_view const expected = "*5\r\n$5\r\nHELLO\r\n$1\r\n3\r\n$4\r\nAUTH\r\n$3\r\nfoo\r\n$3\r\nbar\r\n";
|
||||
BOOST_CHECK_EQUAL(req.payload(), expected);
|
||||
}
|
||||
|
||||
@@ -31,7 +31,7 @@ BOOST_AUTO_TEST_CASE(resolve_bad_host)
|
||||
{
|
||||
net::io_context ioc;
|
||||
|
||||
config cfg;
|
||||
auto cfg = make_test_config();
|
||||
cfg.addr.host = "Atibaia";
|
||||
cfg.addr.port = "6379";
|
||||
cfg.resolve_timeout = 10h;
|
||||
@@ -51,7 +51,7 @@ BOOST_AUTO_TEST_CASE(resolve_with_timeout)
|
||||
{
|
||||
net::io_context ioc;
|
||||
|
||||
config cfg;
|
||||
auto cfg = make_test_config();
|
||||
cfg.addr.host = "occase.de";
|
||||
cfg.addr.port = "6379";
|
||||
cfg.resolve_timeout = 1ms;
|
||||
@@ -68,7 +68,7 @@ BOOST_AUTO_TEST_CASE(connect_bad_port)
|
||||
{
|
||||
net::io_context ioc;
|
||||
|
||||
config cfg;
|
||||
auto cfg = make_test_config();
|
||||
cfg.addr.host = "127.0.0.1";
|
||||
cfg.addr.port = "1";
|
||||
cfg.resolve_timeout = 10h;
|
||||
|
||||
@@ -115,7 +115,7 @@ def _setup_boost(
|
||||
# Install Boost dependencies
|
||||
_run(["git", "config", "submodule.fetchJobs", "8"])
|
||||
_run(["git", "submodule", "update", "-q", "--init", "tools/boostdep"])
|
||||
_run(["python", "tools/boostdep/depinst/depinst.py", "--include", "example", "redis"])
|
||||
_run(["python3", "tools/boostdep/depinst/depinst.py", "--include", "example", "redis"])
|
||||
|
||||
# Bootstrap
|
||||
if _is_windows:
|
||||
|
||||
Reference in New Issue
Block a user