2
0
mirror of https://github.com/boostorg/redis.git synced 2026-01-24 18:22:09 +00:00

Compare commits

...

9 Commits

Author SHA1 Message Date
Marcelo Zimbres
ffc4230368 Fixes documentation. 2022-07-16 13:50:21 +02:00
Marcelo Zimbres
59b5d35672 Small corrections. 2022-07-16 12:33:02 +02:00
Marcelo Zimbres
835a1decf4 Progresses with benchmarks. 2022-07-16 11:03:48 +02:00
Marcelo Zimbres
3fb018ccc6 Some changes in the benchmarks. 2022-07-15 23:15:08 +02:00
Marcelo Zimbres
1fe4a87287 Adds go-redis 2022-07-14 22:13:58 +02:00
Marcelo Zimbres
70cdff41e0 Fixes some bugs. 2022-07-14 21:44:50 +02:00
Marcelo Zimbres
2edd9f3d87 Some improvements in the benchmarks. 2022-07-11 00:00:09 +02:00
Marcelo Zimbres
fa4181b197 New version. 2022-07-10 20:53:44 +02:00
Marcelo Zimbres
9e2cd8855e Small documentation improvements. 2022-07-10 19:19:42 +02:00
23 changed files with 602 additions and 474 deletions

View File

@@ -70,7 +70,6 @@ nobase_include_HEADERS =\
$(top_srcdir)/aedis/resp3/detail/parser.hpp\
$(top_srcdir)/aedis/resp3/type.hpp\
$(top_srcdir)/aedis/resp3/read.hpp\
$(top_srcdir)/aedis/resp3/exec.hpp\
$(top_srcdir)/aedis/resp3/write.hpp\
$(top_srcdir)/aedis/resp3/request.hpp\
$(top_srcdir)/aedis/resp3/detail/impl/parser.ipp\
@@ -88,22 +87,25 @@ EXTRA_DIST += $(top_srcdir)/doc/DoxygenLayout.xml
EXTRA_DIST += $(top_srcdir)/doc/aedis.css
EXTRA_DIST += $(top_srcdir)/doc/htmlfooter.html
EXTRA_DIST += $(top_srcdir)/doc/htmlheader.html
EXTRA_DIST += $(top_srcdir)/benchmarks/cpp/libuv/echo_server_direct.c
EXTRA_DIST += $(top_srcdir)/benchmarks/cpp/libuv/README.md
EXTRA_DIST += $(top_srcdir)/benchmarks/benchmarks.md
EXTRA_DIST += $(top_srcdir)/benchmarks/benchmarks.tex
EXTRA_DIST += $(top_srcdir)/benchmarks/c/libuv/echo_server_direct.c
EXTRA_DIST += $(top_srcdir)/benchmarks/c/libuv/README.md
EXTRA_DIST += $(top_srcdir)/benchmarks/go/echo_server_direct.go
EXTRA_DIST += $(top_srcdir)/benchmarks/nodejs/echo_server_direct.js
EXTRA_DIST += $(top_srcdir)/benchmarks/nodejs/echo_server_over_redis.js
EXTRA_DIST += $(top_srcdir)/benchmarks/nodejs/package.json
EXTRA_DIST += $(top_srcdir)/benchmarks/nodejs/package-lock.json
EXTRA_DIST += $(top_srcdir)/benchmarks/rust/echo_server_direct/Cargo.lock
EXTRA_DIST += $(top_srcdir)/benchmarks/nodejs/echo_server_direct/echo_server_direct.js
EXTRA_DIST += $(top_srcdir)/benchmarks/nodejs/echo_server_direct/package.json
EXTRA_DIST += $(top_srcdir)/benchmarks/nodejs/echo_server_over_redis/echo_server_over_redis.js
EXTRA_DIST += $(top_srcdir)/benchmarks/nodejs/echo_server_over_redis/package.json
EXTRA_DIST += $(top_srcdir)/benchmarks/rust/echo_server_direct/Cargo.toml
EXTRA_DIST += $(top_srcdir)/benchmarks/rust/echo_server_direct/src/main.rs
EXTRA_DIST += $(top_srcdir)/benchmarks/rust/echo_server_over_redis/Cargo.lock
EXTRA_DIST += $(top_srcdir)/benchmarks/rust/echo_server_over_redis/Cargo.toml
EXTRA_DIST += $(top_srcdir)/benchmarks/rust/echo_server_over_redis/src/main.rs
.PHONY: doc
doc:
rm -rf ../aedis-gh-pages/*
doxygen doc/Doxyfile
.PHONY: bench
bench:
pdflatex --jobname=echo-f0 benchmarks/benchmarks.tex
pdflatex --jobname=echo-f1 benchmarks/benchmarks.tex

View File

@@ -13,7 +13,6 @@
#include <aedis/connection.hpp>
#include <aedis/resp3/read.hpp>
#include <aedis/resp3/write.hpp>
#include <aedis/resp3/exec.hpp>
#include <aedis/resp3/request.hpp>
#include <aedis/adapter/adapt.hpp>
@@ -37,7 +36,7 @@
\li Zero asymptotic allocations by means of memory reuse.
\li Healthy checks, back pressure and low latency.
The Aedis API hides most of the low level asynchronous operations
Aedis API hides most of the low level asynchronous operations
away from the user, for example, the code below pings a message to
the server
@@ -45,27 +44,28 @@
int main()
{
request req;
req.push("HELLO", 3);
req.push("PING");
req.push("QUIT");
std::tuple<std::string, std::string> resp;
std::tuple<aedis::ignore, std::string, aedis::ignore> resp;
net::io_context ioc;
connection db{ioc};
db.async_exec("127.0.0.1", "6379", req, adapt(resp),
[](auto ec, auto) { std::cout << ec.message() << std::endl; });
ioc.run();
// Print
std::cout << std::get<0>(resp) << std::endl;
// Print the ping message.
std::cout << std::get<1>(resp) << std::endl;
}
\endcode
For a detailed comparison of Redis clients and the design
rationale behind Aedis jump to \ref why-aedis.
rationale behind Aedis jump to \ref why-aedis. For benchmarks see [](https://github.com/mzimbres/aedis/blob/master/benchmarks/benchmarks.md)
\section requests Requests
@@ -79,7 +79,7 @@
// Command with variable length of arguments.
req.push("SET", "key", "some value", value, "EX", "2");
// Pushes a set.
// Pushes a list.
std::list<std::string> list
{"channel1", "channel2", "channel3"};
req.push_range("SUBSCRIBE", list);
@@ -175,34 +175,32 @@
subset of the RESP3 specification. Now let us see some examples
@code
auto dbuffer = dynamic_buffer(buffer);
// To ignore the response.
co_await resp3::async_read(socket, dbuffer, adapt());
co_await db->async_exec(req, adapt());
// Read in a std::string e.g. get.
std::string str;
co_await resp3::async_read(socket, dbuffer, adapt(str));
co_await db->async_exec(req, adapt(resp));
// Read in a long long e.g. rpush.
long long number;
co_await resp3::async_read(socket, dbuffer, adapt(number));
long long resp;
co_await db->async_exec(req, adapt(resp));
// Read in a std::set e.g. smembers.
std::set<T, U> set;
co_await resp3::async_read(socket, dbuffer, adapt(set));
std::set<T, U> resp;
co_await db->async_exec(req, adapt(resp));
// Read in a std::map e.g. hgetall.
std::map<T, U> set;
co_await resp3::async_read(socket, dbuffer, adapt(map));
std::map<T, U> resp;
co_await db->async_exec(req, adapt(resp));
// Read in a std::unordered_map e.g. hgetall.
std::unordered_map<T, U> umap;
co_await resp3::async_read(socket, dbuffer, adapt(umap));
std::unordered_map<T, U> resp;
co_await db->async_exec(req, adapt(resp));
// Read in a std::vector e.g. lrange.
std::vector<T> vec;
co_await resp3::async_read(socket, dbuffer, adapt(vec));
std::vector<T> resp;
co_await db->async_exec(req, adapt(resp));
@endcode
In other words, it is straightforward, just pass the result of \c
@@ -218,8 +216,8 @@
wrap your type around \c boost::optional like this
@code
boost::optional<std::unordered_map<T, U>> umap;
co_await resp3::async_read(socket, dynamic_buffer(buffer), adapt(umap));
boost::optional<std::unordered_map<T, U>> resp;
co_await db->async_exec(req, adapt(resp));
@endcode
Everything else stays the same, before accessing data, users will
@@ -260,17 +258,22 @@
can be read in the following way
@code
std::tuple<
boost::optional<std::string>, // Response to get
boost::optional<std::vector<std::string>>, // Response to lrange
boost::optional<std::map<std::string, std::string>> // Response to hgetall
> trans;
using trans_type =
std::tuple<
boost::optional<std::string>, // get
boost::optional<std::vector<std::string>>, // lrange
boost::optional<std::map<std::string, std::string>> // hgetall
>;
co_await resp3::async_read(socket, dynamic_buffer(buffer)); // Ignore multi
co_await resp3::async_read(socket, dynamic_buffer(buffer)); // Ignore get
co_await resp3::async_read(socket, dynamic_buffer(buffer)); // Ignore lrange
co_await resp3::async_read(socket, dynamic_buffer(buffer)); // Ignore hgetall
co_await resp3::async_read(socket, dynamic_buffer(buffer), adapt(trans));
std::tuple<
aedis::ignore, // multi
aedis::ignore, // get
aedis::ignore, // lrange
aedis::ignore, // hgetall
trans_type, // exec
> resp;
co_await db->async_exec(req, adapt(resp));
@endcode
Note that above we are not ignoring the response to the commands
@@ -316,7 +319,7 @@
\subsection gen-case The general case
As already mentioned, there are cases where responses to Redis
There are cases where responses to Redis
commands won't fit in the model presented above, some examples are
@li Commands (like \c set) whose response don't have a fixed
@@ -355,11 +358,11 @@
@code
// Receives any RESP3 simple data type.
node<std::string> resp;
co_await resp3::async_read(socket, dynamic_buffer(buffer), adapt(resp));
co_await db->async_exec(req, adapt(resp));
// Receives any RESP3 simple or aggregate data type.
std::vector<node<std::string>> resp;
co_await resp3::async_read(socket, dynamic_buffer(buffer), adapt(resp));
co_await db->async_exec(req, adapt(resp));
@endcode
For example, suppose we want to retrieve a hash data structure
@@ -550,48 +553,7 @@
In Aedis there is no difference between sending one command, a
pipeline or a transaction because creating the request is decoupled
from the IO objects, for example
@code
std::string request;
auto sr = make_serializer(request);
sr.push("HELLO", 3);
sr.push("MULTI");
sr.push("PING");
sr.push("SET", "low-level-key", "some content", "EX", "2");
sr.push("EXEC");
sr.push("PING", "Another message.");
net::write(socket, net::buffer(request));
@endcode
The request created above will be sent to Redis in a single
pipeline and imposes no restriction on what it contains e.g. the
number of commands, transactions etc. The problems mentioned above
simply do not exist in Aedis. The way responses are read is
also more flexible
@code
std::string buffer;
auto dbuffer = net::dynamic_buffer(buffer);
std::tuple<std::string, boost::optional<std::string>> response;
resp3::read(socket, dbuffer); // hellp
resp3::read(socket, dbuffer); // multi
resp3::read(socket, dbuffer); // ping
resp3::read(socket, dbuffer); // set
resp3::read(socket, dbuffer, adapt(response));
resp3::read(socket, dbuffer); // quit
@endcode
@li The response objects are passed by the caller to the read
functions so that he has fine control over memory allocations and
object lifetime.
@li The user can either use error-code or exceptions.
@li Each response can be read individually in the response object
avoiding temporaries.
@li It is possible to ignore responses.
This was the blocking API, now let us compare the async interface
from the IO objects.
> redis-plus-plus also supports async interface, however, async
> support for Transaction and Subscriber is still on the way.

View File

@@ -26,7 +26,7 @@ namespace aedis {
// https://redis.io/docs/reference/sentinel-clients
/** \brief A high level Redis connection.
/** \brief A high level Redis connection class.
* \ingroup any
*
* This class keeps a healthy connection to the Redis instance where
@@ -139,7 +139,7 @@ public:
>(detail::run_op<connection>{this, host, port}, token, resv_);
}
/** @brief Executes a request on the redis server.
/** @brief Executes a command on the redis server.
*
* \param req Request object.
* \param adapter Response adapter.
@@ -169,11 +169,11 @@ public:
>(detail::exec_op<connection, Adapter>{this, &req, adapter}, token, resv_);
}
/** @brief Connects and executes a single request.
/** @brief Connects and executes a single command.
*
* Combines \c async_run and the other \c async_exec overload in a
* single function. This function is useful for users that want to
* send a single request to the server.
* send a single request to the server and close it.
*
* \param host Address of the Redis server.
* \param port Port of the Redis server.
@@ -246,7 +246,7 @@ public:
>(detail::read_push_op<connection, decltype(f)>{this, f}, token, resv_);
}
/** @brief Cancel all pending sessions and push operations to return
/** @brief Cancel all pending request.
*
* \returns The number of requests that have been canceled.
*/
@@ -265,16 +265,14 @@ public:
/** @brief Closes the connection with the database.
*
* Calling this function will cause \c async_run to return. It is
* safe to try a reconnect after that i.e. calling it again.
* safe to try a reconnect after that, when that happens, all
* pending request will be automatically sent.
*
* Note however that the prefered way to close a connection is to
* send a \c quit command if you are actively closing it.
* Otherwise an unresponsive Redis server will cause the
* idle-checks to fail, which will also lead to \c async_run
* returning.
*
* @remark This function won't cancel pending requests, see
* \c cancel_requests.
* idle-checks to kick in and lead to \c async_run
* returning with idle_timeout.
*/
void cancel_run()
{
@@ -284,7 +282,6 @@ public:
writer_timer_.cancel();
ping_timer_.cancel();
// TODO: How to avoid calling this here.
push_channel_.cancel();
// Cancel own pings if there is any waiting.
@@ -307,6 +304,7 @@ private:
resp3::request const* req = nullptr;
std::size_t cmds = 0;
bool stop = false;
bool written = false;
};
using time_point_type = std::chrono::time_point<std::chrono::steady_clock>;
@@ -325,17 +323,17 @@ private:
template <class T> friend struct detail::check_idle_op;
template <class T> friend struct detail::start_op;
void cancel_push_requests(typename reqs_type::iterator end)
void cancel_push_requests()
{
auto point = std::stable_partition(std::begin(reqs_), end, [](auto const& ptr) {
return ptr->req->commands() != 0;
auto point = std::stable_partition(std::begin(reqs_), std::end(reqs_), [](auto const& ptr) {
return !(ptr->written && ptr->req->commands() == 0);
});
std::for_each(point, end, [](auto const& ptr) {
ptr->timer.cancel();
std::for_each(point, std::end(reqs_), [](auto const& ptr) {
ptr->timer.cancel();
});
reqs_.erase(point, end);
reqs_.erase(point, std::end(reqs_));
}
void add_request_info(std::shared_ptr<req_info> const& info)
@@ -419,12 +417,12 @@ private:
}
template <class Adapter, class CompletionToken>
auto async_exec_read(Adapter adapter, CompletionToken token)
auto async_exec_read(Adapter adapter, std::size_t cmds, CompletionToken token)
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code, std::size_t)
>(detail::exec_read_op<connection, Adapter>{this, adapter}, token, resv_);
>(detail::exec_read_op<connection, Adapter>{this, adapter, cmds}, token, resv_);
}
void coalesce_requests()
@@ -438,6 +436,7 @@ private:
for (auto i = 0UL; i < size; ++i) {
write_buffer_ += reqs_.at(i)->req->payload();
cmds_ += reqs_.at(i)->req->commands();
reqs_.at(i)->written = true;
}
}

View File

@@ -22,7 +22,6 @@
#include <aedis/resp3/type.hpp>
#include <aedis/resp3/detail/parser.hpp>
#include <aedis/resp3/read.hpp>
#include <aedis/resp3/exec.hpp>
#include <aedis/resp3/write.hpp>
#include <aedis/resp3/request.hpp>
@@ -118,6 +117,7 @@ template <class Conn, class Adapter>
struct exec_read_op {
Conn* conn;
Adapter adapter;
std::size_t cmds = 0;
std::size_t read_size = 0;
std::size_t index = 0;
boost::asio::coroutine coro;
@@ -132,7 +132,7 @@ struct exec_read_op {
{
// Loop reading the responses to this request.
BOOST_ASSERT(!conn->reqs_.empty());
while (conn->reqs_.front()->cmds != 0) {
while (cmds != 0) {
BOOST_ASSERT(conn->cmds_ != 0);
//-----------------------------------
@@ -177,8 +177,8 @@ struct exec_read_op {
read_size += n;
BOOST_ASSERT(conn->reqs_.front()->cmds != 0);
--conn->reqs_.front()->cmds;
BOOST_ASSERT(cmds != 0);
--cmds;
BOOST_ASSERT(conn->cmds_ != 0);
--conn->cmds_;
@@ -230,7 +230,9 @@ struct exec_op {
return;
}
yield conn->async_exec_read(adapter, std::move(self));
BOOST_ASSERT(!conn->reqs_.empty());
BOOST_ASSERT(conn->reqs_.front() != nullptr);
yield conn->async_exec_read(adapter, conn->reqs_.front()->cmds, std::move(self));
if (ec) {
self.complete(ec, 0);
return;
@@ -396,6 +398,10 @@ struct run_op {
return;
}
std::for_each(std::begin(conn->reqs_), std::end(conn->reqs_), [](auto const& ptr) {
return ptr->written = false;
});
yield conn->async_start(std::move(self));
self.complete(ec);
}
@@ -405,7 +411,6 @@ struct run_op {
template <class Conn>
struct writer_op {
Conn* conn;
typename Conn::reqs_type::iterator end;
boost::asio::coroutine coro;
template <class Self>
@@ -419,7 +424,6 @@ struct writer_op {
{
while (!conn->reqs_.empty() && conn->cmds_ == 0 && conn->write_buffer_.empty()) {
conn->coalesce_requests();
end = conn->reqs_.end();
yield boost::asio::async_write(*conn->socket_, boost::asio::buffer(conn->write_buffer_), std::move(self));
if (ec) {
self.complete(ec);
@@ -430,7 +434,7 @@ struct writer_op {
// order to to use it as a flag that informs there is no
// ongoing write.
conn->write_buffer_.clear();
conn->cancel_push_requests(end);
conn->cancel_push_requests();
}
if (conn->socket_->is_open()) {

View File

@@ -1,176 +0,0 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
#ifndef AEDIS_RESP3_EXEC_HPP
#define AEDIS_RESP3_EXEC_HPP
#include <boost/assert.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <boost/asio/read.hpp>
#include <boost/asio/write.hpp>
#include <boost/asio/coroutine.hpp>
#include <boost/asio/compose.hpp>
#include <boost/asio/steady_timer.hpp>
#include <boost/asio/experimental/parallel_group.hpp>
#include <aedis/error.hpp>
#include <aedis/resp3/read.hpp>
#include <aedis/resp3/request.hpp>
namespace aedis {
namespace resp3 {
namespace detail {
#include <boost/asio/yield.hpp>
template <
class AsyncStream,
class Adapter,
class DynamicBuffer
>
struct exec_op {
AsyncStream* socket;
request const* req;
Adapter adapter;
DynamicBuffer dbuf;
boost::asio::coroutine coro;
template <class Self>
void operator()( Self& self
, boost::system::error_code ec = {}
, std::size_t n = 0)
{
reenter (coro)
{
yield
boost::asio::async_write(
*socket,
boost::asio::buffer(req->payload()),
std::move(self));
if (ec) {
self.complete(ec, 0);
return;
}
yield resp3::async_read(*socket, dbuf, adapter, std::move(self));
self.complete(ec, n);
}
}
};
#include <boost/asio/unyield.hpp>
} // detail
template <
class AsyncStream,
class Adapter,
class DynamicBuffer,
class CompletionToken = boost::asio::default_completion_token_t<typename AsyncStream::executor_type>
>
auto async_exec(
AsyncStream& socket,
request const& req,
Adapter adapter,
DynamicBuffer dbuf,
CompletionToken token = CompletionToken{})
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code, std::size_t)
>(detail::exec_op<AsyncStream, Adapter, DynamicBuffer>
{&socket, &req, adapter, dbuf}, token, socket);
}
namespace detail {
#include <boost/asio/yield.hpp>
template <
class AsyncStream,
class Adapter,
class DynamicBuffer
>
struct exec_with_timeout_op {
AsyncStream* socket;
boost::asio::steady_timer* timer;
request const* req;
Adapter adapter;
DynamicBuffer dbuf;
boost::asio::coroutine coro;
template <class Self>
void operator()( Self& self
, std::array<std::size_t, 2> order = {}
, boost::system::error_code ec1 = {}
, std::size_t n = 0
, boost::system::error_code ec2 = {})
{
reenter (coro)
{
yield
boost::asio::experimental::make_parallel_group(
[this](auto token) { return resp3::async_exec(*socket, *req, adapter, dbuf, token);},
[this](auto token) { return timer->async_wait(token);}
).async_wait(
boost::asio::experimental::wait_for_one(),
std::move(self));
switch (order[0]) {
case 0:
{
if (ec1) {
self.complete(ec1, 0);
return;
}
} break;
case 1:
{
if (!ec2) {
self.complete(aedis::error::idle_timeout, 0);
return;
}
} break;
default: BOOST_ASSERT(false);
}
self.complete({}, n);
}
}
};
#include <boost/asio/unyield.hpp>
} // detail
template <
class AsyncStream,
class Adapter,
class DynamicBuffer,
class CompletionToken = boost::asio::default_completion_token_t<typename AsyncStream::executor_type>
>
auto async_exec(
AsyncStream& socket,
boost::asio::steady_timer& timer,
request const& req,
Adapter adapter,
DynamicBuffer dbuf,
CompletionToken token = CompletionToken{})
{
return boost::asio::async_compose
< CompletionToken
, void(boost::system::error_code, std::size_t)
>(detail::exec_with_timeout_op<AsyncStream, Adapter, DynamicBuffer>
{&socket, &timer, &req, adapter, dbuf}, token, socket, timer);
}
} // resp3
} // aedis
#endif // AEDIS_RESP3_EXEC_HPP

91
benchmarks/benchmarks.md Normal file
View File

@@ -0,0 +1,91 @@
# TCP echo server performance.
This document describe benchmarks the performance of TCP echo servers
I implemented in different languages using different Redis clients.
The main motivations for choosing a TCP echo server as a benchmark
program are
* Simple to implement and does not require expertise level in most languages.
* I/O bound: Echo servers have very low CPU consumption in general
and therefore are excelent as a measure of the ability of a
program to server concurrent requests.
* It simulates very well a typical backend in regard to concurrency.
I also imposed some constraints on the implementations
* It should be simple enough and not require writing too much code.
* Favor the use standard idioms and avoid optimizations that require expert level.
* Avoid the use of complex things like connection and thread pool.
## No Redis
First I tested a pure TCP echo server, i.e. one that sends the messages
directly to the client without interacting with Redis. The result can
be seen below
![](https://mzimbres.github.io/aedis/tcp-echo-direct.png)
The tests were performed with a 1000 concurrent TCP connection on the
localhost where latency is 0.07ms on average on my machine. On higher
latency networks the difference among libraries is expected to
decrease.
### Remarks:
* I was not expecting Asio to perform so much better than Tokio and libuv.
* I did expect nodejs to come a little behind given it is is
javascript code. Otherwise I did expect it to have similar
performance to libuv since it is the framework behind it.
* Go performance did not surprise me: decent and not some much far behind nodejs.
The code used in the benchmarks can be found at
* [Asio](https://github.com/mzimbres/aedis/blob/3fb018ccc6138d310ac8b73540391cdd8f2fdad6/benchmarks/cpp/asio/echo_server_direct.cpp): A variation of [this](https://github.com/chriskohlhoff/asio/blob/4915cfd8a1653c157a1480162ae5601318553eb8/asio/src/examples/cpp20/coroutines/echo_server.cpp) Asio example.
* [Libuv](https://github.com/mzimbres/aedis/tree/835a1decf477b09317f391eddd0727213cdbe12b/benchmarks/c/libuv): Taken from [here](https://github.com/libuv/libuv/blob/06948c6ee502862524f233af4e2c3e4ca876f5f6/docs/code/tcp-echo-server/main.c) Libuv example .
* [Tokio](https://github.com/mzimbres/aedis/tree/3fb018ccc6138d310ac8b73540391cdd8f2fdad6/benchmarks/rust/echo_server_direct): Taken from [here](https://docs.rs/tokio/latest/tokio/).
* [Nodejs](https://github.com/mzimbres/aedis/tree/3fb018ccc6138d310ac8b73540391cdd8f2fdad6/benchmarks/nodejs/echo_server_direct)
* [Go](https://github.com/mzimbres/aedis/blob/3fb018ccc6138d310ac8b73540391cdd8f2fdad6/benchmarks/go/echo_server_direct.go)
## Echo over Redis
This is similar to the echo server described above but messages are
echoed by Redis and not by the echo-server itself, which acts
as a proxy between the client and the Redis server. The result
can be seen below
![](https://mzimbres.github.io/aedis/tcp-echo-over-redis.png)
The tests were performed on a network where latency is 35ms on
average, otherwise it is equal to the benchmarks above regarding the
number of TCP connection. The result can be seen below
### Remarks
As the reader can see, the Libuv and the Rust test are not depicted
in the graph, the reasons are
* [redis-rs](https://github.com/redis-rs/redis-rs): This client
comes so far behind that it can't even be represented together
with the other benchmarks without making them look insignificant. I
don't know for sure why it is so slow, I suppose it has
something to do with its lack of proper
[pipelining](https://redis.io/docs/manual/pipelining/) support.
In fact, the more TCP connections I lauch the worst its
performance gets.
* Libuv: I left it out because it would require too much work to
write it and make it have a good performance. More specifically,
I would have to use hiredis and implement support for pipelines
manually.
The code used in the benchmarks can be found at
* [Aedis](https://github.com/mzimbres/aedis): [code](https://github.com/mzimbres/aedis/blob/3fb018ccc6138d310ac8b73540391cdd8f2fdad6/examples/echo_server.cpp)
* [node-redis](https://github.com/redis/node-redis): [code](https://github.com/mzimbres/aedis/tree/3fb018ccc6138d310ac8b73540391cdd8f2fdad6/benchmarks/nodejs/echo_server_over_redis)
* [go-redis](https://github.com/go-redis/redis): [code](https://github.com/mzimbres/aedis/blob/3fb018ccc6138d310ac8b73540391cdd8f2fdad6/benchmarks/go/echo_server_over_redis.go)
## Contributing
If your spot any performance improvement in any of the example or
would like to include other clients, please open a PR and I will
gladly merge it.

73
benchmarks/benchmarks.tex Normal file
View File

@@ -0,0 +1,73 @@
\documentclass{article}
\usepackage{pgfplots}
\pgfrealjobname{echo}
\pgfplotsset{compat=newest}
\begin{document}
\beginpgfgraphicnamed{echo-f0}
% time ./echo_server_client 1000 5000
\begin{tikzpicture}[scale=1.0]
\begin{axis}[
y dir=reverse,
%xbar stacked,
xbar, xmin=0,
%hide x axis,
bar shift=0pt,
width=15cm, height=6cm, enlarge y limits=0.5,
title={TCP Echo Server Performance},
xlabel={Seconds},
symbolic y coords={Asio,Tokio,Libuv,Nodejs,Go},
ytick=data,
%bar width=1cm,
nodes near coords,
nodes near coords align={horizontal},
]
\addplot coordinates {
(31.1,Asio)
(43.2,Tokio)
(43.6,Libuv)
(74.2,Nodejs)
(81.0,Go)
};
\end{axis}
\end{tikzpicture}
\endpgfgraphicnamed
\beginpgfgraphicnamed{echo-f1}
%debian2[0]$ time ./echo_server_client 1000 1000
%Go (1): 1.000s
%C++ (1): 0.07s
\begin{tikzpicture}[scale=1.0]
\begin{axis}[
y dir=reverse,
%xbar stacked,
xbar, xmin=0,
%hide x axis,
bar shift=0pt,
width=12cm, height=6cm, enlarge y limits=0.5,
title={TCP Echo Server Performance (over Redis)},
xlabel={Seconds},
symbolic y coords={Aedis,Rust-rs,Libuv,Node-redis,Go-redis},
ytick=data,
%bar width=1cm,
nodes near coords,
nodes near coords align={horizontal},
]
\addplot coordinates {
(12.6,Aedis)
(28.8,Node-redis)
(352.4,Go-redis)
};
%\addplot coordinates {
% (30.0,Asio)
% (90.6,Rust-rs)
% (0.0,Libuv)
% (68.9,Nodejs)
% (0.0,Go)
%};
\end{axis}
\end{tikzpicture}
\endpgfgraphicnamed
\end{document}

View File

@@ -4,7 +4,7 @@
#include <uv.h>
#define DEFAULT_PORT 55555
#define DEFAULT_BACKLOG 128
#define DEFAULT_BACKLOG 1024
uv_loop_t *loop;
struct sockaddr_in addr;

View File

@@ -1,8 +1,12 @@
/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com)
*
* Distributed under the Boost Software License, Version 1.0. (See
* accompanying file LICENSE.txt)
*/
//
// echo_server.cpp
// ~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2022 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#include <boost/asio/co_spawn.hpp>
#include <boost/asio/detached.hpp>

View File

@@ -0,0 +1,54 @@
package main
import (
"context"
"github.com/go-redis/redis/v8"
"bufio"
"fmt"
"io"
"net"
"os"
)
var ctx = context.Background()
var rdb = redis.NewClient(&redis.Options{Addr: "db.occase.de:6379", Password: "", DB: 0,})
func echo(conn net.Conn) {
r := bufio.NewReader(conn)
for {
line, err := r.ReadBytes(byte('\n'))
switch err {
case nil:
break
case io.EOF:
default:
fmt.Println("ERROR", err)
}
err2 := rdb.Ping(ctx).Err()
if err2 != nil {
fmt.Println("ERROR", err2)
panic(err2)
}
conn.Write(line)
}
}
func main() {
l, err := net.Listen("tcp", "0.0.0.0:55555")
if err != nil {
fmt.Println("ERROR", err)
os.Exit(1)
}
for {
conn, err := l.Accept()
if err != nil {
fmt.Println("ERROR", err)
continue
}
go echo(conn)
}
}

View File

@@ -0,0 +1,105 @@
import java.nio.*;
import java.nio.channels.*;
import java.net.*;
import java.util.*;
import java.io.IOException;
public class TcpEchoServer {
public static int DEFAULT_PORT = 55555;
public static void main(String[] args) {
int port;
try {
port = Integer.parseInt(args[0]);
}
catch (Exception ex) {
port = DEFAULT_PORT;
}
//System.out.println("Listening for connections on port " + port);
ServerSocketChannel serverChannel;
Selector selector;
try {
serverChannel = ServerSocketChannel.open( );
ServerSocket ss = serverChannel.socket( );
InetSocketAddress address = new InetSocketAddress(port);
ss.bind(address);
serverChannel.configureBlocking(false);
selector = Selector.open( );
serverChannel.register(selector, SelectionKey.OP_ACCEPT);
}
catch (IOException ex) {
ex.printStackTrace( );
return;
}
while (true) {
try {
selector.select( );
}
catch (IOException ex) {
ex.printStackTrace( );
break;
}
Set readyKeys = selector.selectedKeys( );
Iterator iterator = readyKeys.iterator( );
while (iterator.hasNext( )) {
SelectionKey key = (SelectionKey) iterator.next( );
iterator.remove( );
try {
if (key.isAcceptable( )) {
ServerSocketChannel server = (ServerSocketChannel ) key.channel( );
SocketChannel client = server.accept( );
//System.out.println("Accepted connection from " + client);
client.configureBlocking(false);
SelectionKey clientKey = client.register(
selector, SelectionKey.OP_WRITE | SelectionKey.OP_READ);
ByteBuffer buffer = ByteBuffer.allocate(100);
clientKey.attach(buffer);
//System.out.println(buffer.toString());
}
if (key.isReadable( )) {
SocketChannel client = (SocketChannel) key.channel( );
ByteBuffer output = (ByteBuffer) key.attachment( );
client.read(output);
}
if (key.isWritable( )) {
SocketChannel client = (SocketChannel) key.channel( );
ByteBuffer output = (ByteBuffer) key.attachment( );
output.flip( );
client.write(output);
output.compact( );
}
}
catch (IOException ex) {
key.cancel( );
try {
key.channel().close();
}
catch (IOException cex) {}
}
}
}
}
}

View File

@@ -0,0 +1,2 @@
{
}

View File

@@ -1,7 +1,7 @@
import { createClient } from 'redis';
import * as net from 'net';
const client = createClient();
const client = createClient({url: 'redis://db.occase.de:6379' });
client.on('error', (err) => console.log('Redis Client Error', err));
await client.connect();

View File

@@ -0,0 +1,169 @@
{
"name": "echo_server_over_redis",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"dependencies": {
"redis": "^4.2.0"
}
},
"node_modules/@redis/bloom": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/@redis/bloom/-/bloom-1.0.2.tgz",
"integrity": "sha512-EBw7Ag1hPgFzdznK2PBblc1kdlj5B5Cw3XwI9/oG7tSn85/HKy3X9xHy/8tm/eNXJYHLXHJL/pkwBpFMVVefkw==",
"peerDependencies": {
"@redis/client": "^1.0.0"
}
},
"node_modules/@redis/client": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/@redis/client/-/client-1.2.0.tgz",
"integrity": "sha512-a8Nlw5fv2EIAFJxTDSSDVUT7yfBGpZO96ybZXzQpgkyLg/dxtQ1uiwTc0EGfzg1mrPjZokeBSEGTbGXekqTNOg==",
"dependencies": {
"cluster-key-slot": "1.1.0",
"generic-pool": "3.8.2",
"yallist": "4.0.0"
},
"engines": {
"node": ">=14"
}
},
"node_modules/@redis/graph": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/@redis/graph/-/graph-1.0.1.tgz",
"integrity": "sha512-oDE4myMCJOCVKYMygEMWuriBgqlS5FqdWerikMoJxzmmTUErnTRRgmIDa2VcgytACZMFqpAOWDzops4DOlnkfQ==",
"peerDependencies": {
"@redis/client": "^1.0.0"
}
},
"node_modules/@redis/json": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/@redis/json/-/json-1.0.3.tgz",
"integrity": "sha512-4X0Qv0BzD9Zlb0edkUoau5c1bInWSICqXAGrpwEltkncUwcxJIGEcVryZhLgb0p/3PkKaLIWkjhHRtLe9yiA7Q==",
"peerDependencies": {
"@redis/client": "^1.0.0"
}
},
"node_modules/@redis/search": {
"version": "1.0.6",
"resolved": "https://registry.npmjs.org/@redis/search/-/search-1.0.6.tgz",
"integrity": "sha512-pP+ZQRis5P21SD6fjyCeLcQdps+LuTzp2wdUbzxEmNhleighDDTD5ck8+cYof+WLec4csZX7ks+BuoMw0RaZrA==",
"peerDependencies": {
"@redis/client": "^1.0.0"
}
},
"node_modules/@redis/time-series": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/@redis/time-series/-/time-series-1.0.3.tgz",
"integrity": "sha512-OFp0q4SGrTH0Mruf6oFsHGea58u8vS/iI5+NpYdicaM+7BgqBZH8FFvNZ8rYYLrUO/QRqMq72NpXmxLVNcdmjA==",
"peerDependencies": {
"@redis/client": "^1.0.0"
}
},
"node_modules/cluster-key-slot": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/cluster-key-slot/-/cluster-key-slot-1.1.0.tgz",
"integrity": "sha512-2Nii8p3RwAPiFwsnZvukotvow2rIHM+yQ6ZcBXGHdniadkYGZYiGmkHJIbZPIV9nfv7m/U1IPMVVcAhoWFeklw==",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/generic-pool": {
"version": "3.8.2",
"resolved": "https://registry.npmjs.org/generic-pool/-/generic-pool-3.8.2.tgz",
"integrity": "sha512-nGToKy6p3PAbYQ7p1UlWl6vSPwfwU6TMSWK7TTu+WUY4ZjyZQGniGGt2oNVvyNSpyZYSB43zMXVLcBm08MTMkg==",
"engines": {
"node": ">= 4"
}
},
"node_modules/redis": {
"version": "4.2.0",
"resolved": "https://registry.npmjs.org/redis/-/redis-4.2.0.tgz",
"integrity": "sha512-bCR0gKVhIXFg8zCQjXEANzgI01DDixtPZgIUZHBCmwqixnu+MK3Tb2yqGjh+HCLASQVVgApiwhNkv+FoedZOGQ==",
"dependencies": {
"@redis/bloom": "1.0.2",
"@redis/client": "1.2.0",
"@redis/graph": "1.0.1",
"@redis/json": "1.0.3",
"@redis/search": "1.0.6",
"@redis/time-series": "1.0.3"
}
},
"node_modules/yallist": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz",
"integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A=="
}
},
"dependencies": {
"@redis/bloom": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/@redis/bloom/-/bloom-1.0.2.tgz",
"integrity": "sha512-EBw7Ag1hPgFzdznK2PBblc1kdlj5B5Cw3XwI9/oG7tSn85/HKy3X9xHy/8tm/eNXJYHLXHJL/pkwBpFMVVefkw==",
"requires": {}
},
"@redis/client": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/@redis/client/-/client-1.2.0.tgz",
"integrity": "sha512-a8Nlw5fv2EIAFJxTDSSDVUT7yfBGpZO96ybZXzQpgkyLg/dxtQ1uiwTc0EGfzg1mrPjZokeBSEGTbGXekqTNOg==",
"requires": {
"cluster-key-slot": "1.1.0",
"generic-pool": "3.8.2",
"yallist": "4.0.0"
}
},
"@redis/graph": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/@redis/graph/-/graph-1.0.1.tgz",
"integrity": "sha512-oDE4myMCJOCVKYMygEMWuriBgqlS5FqdWerikMoJxzmmTUErnTRRgmIDa2VcgytACZMFqpAOWDzops4DOlnkfQ==",
"requires": {}
},
"@redis/json": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/@redis/json/-/json-1.0.3.tgz",
"integrity": "sha512-4X0Qv0BzD9Zlb0edkUoau5c1bInWSICqXAGrpwEltkncUwcxJIGEcVryZhLgb0p/3PkKaLIWkjhHRtLe9yiA7Q==",
"requires": {}
},
"@redis/search": {
"version": "1.0.6",
"resolved": "https://registry.npmjs.org/@redis/search/-/search-1.0.6.tgz",
"integrity": "sha512-pP+ZQRis5P21SD6fjyCeLcQdps+LuTzp2wdUbzxEmNhleighDDTD5ck8+cYof+WLec4csZX7ks+BuoMw0RaZrA==",
"requires": {}
},
"@redis/time-series": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/@redis/time-series/-/time-series-1.0.3.tgz",
"integrity": "sha512-OFp0q4SGrTH0Mruf6oFsHGea58u8vS/iI5+NpYdicaM+7BgqBZH8FFvNZ8rYYLrUO/QRqMq72NpXmxLVNcdmjA==",
"requires": {}
},
"cluster-key-slot": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/cluster-key-slot/-/cluster-key-slot-1.1.0.tgz",
"integrity": "sha512-2Nii8p3RwAPiFwsnZvukotvow2rIHM+yQ6ZcBXGHdniadkYGZYiGmkHJIbZPIV9nfv7m/U1IPMVVcAhoWFeklw=="
},
"generic-pool": {
"version": "3.8.2",
"resolved": "https://registry.npmjs.org/generic-pool/-/generic-pool-3.8.2.tgz",
"integrity": "sha512-nGToKy6p3PAbYQ7p1UlWl6vSPwfwU6TMSWK7TTu+WUY4ZjyZQGniGGt2oNVvyNSpyZYSB43zMXVLcBm08MTMkg=="
},
"redis": {
"version": "4.2.0",
"resolved": "https://registry.npmjs.org/redis/-/redis-4.2.0.tgz",
"integrity": "sha512-bCR0gKVhIXFg8zCQjXEANzgI01DDixtPZgIUZHBCmwqixnu+MK3Tb2yqGjh+HCLASQVVgApiwhNkv+FoedZOGQ==",
"requires": {
"@redis/bloom": "1.0.2",
"@redis/client": "1.2.0",
"@redis/graph": "1.0.1",
"@redis/json": "1.0.3",
"@redis/search": "1.0.6",
"@redis/time-series": "1.0.3"
}
},
"yallist": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz",
"integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A=="
}
}
}

View File

@@ -0,0 +1,6 @@
{
"type": "module",
"dependencies": {
"redis": "^4.2.0"
}
}

View File

@@ -1,169 +0,0 @@
{
"name": "aedis",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"dependencies": {
"redis": "^4.1.0"
}
},
"node_modules/@redis/bloom": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/@redis/bloom/-/bloom-1.0.2.tgz",
"integrity": "sha512-EBw7Ag1hPgFzdznK2PBblc1kdlj5B5Cw3XwI9/oG7tSn85/HKy3X9xHy/8tm/eNXJYHLXHJL/pkwBpFMVVefkw==",
"peerDependencies": {
"@redis/client": "^1.0.0"
}
},
"node_modules/@redis/client": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@redis/client/-/client-1.1.0.tgz",
"integrity": "sha512-xO9JDIgzsZYDl3EvFhl6LC52DP3q3GCMUer8zHgKV6qSYsq1zB+pZs9+T80VgcRogrlRYhi4ZlfX6A+bHiBAgA==",
"dependencies": {
"cluster-key-slot": "1.1.0",
"generic-pool": "3.8.2",
"yallist": "4.0.0"
},
"engines": {
"node": ">=14"
}
},
"node_modules/@redis/graph": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/@redis/graph/-/graph-1.0.1.tgz",
"integrity": "sha512-oDE4myMCJOCVKYMygEMWuriBgqlS5FqdWerikMoJxzmmTUErnTRRgmIDa2VcgytACZMFqpAOWDzops4DOlnkfQ==",
"peerDependencies": {
"@redis/client": "^1.0.0"
}
},
"node_modules/@redis/json": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/@redis/json/-/json-1.0.3.tgz",
"integrity": "sha512-4X0Qv0BzD9Zlb0edkUoau5c1bInWSICqXAGrpwEltkncUwcxJIGEcVryZhLgb0p/3PkKaLIWkjhHRtLe9yiA7Q==",
"peerDependencies": {
"@redis/client": "^1.0.0"
}
},
"node_modules/@redis/search": {
"version": "1.0.6",
"resolved": "https://registry.npmjs.org/@redis/search/-/search-1.0.6.tgz",
"integrity": "sha512-pP+ZQRis5P21SD6fjyCeLcQdps+LuTzp2wdUbzxEmNhleighDDTD5ck8+cYof+WLec4csZX7ks+BuoMw0RaZrA==",
"peerDependencies": {
"@redis/client": "^1.0.0"
}
},
"node_modules/@redis/time-series": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/@redis/time-series/-/time-series-1.0.3.tgz",
"integrity": "sha512-OFp0q4SGrTH0Mruf6oFsHGea58u8vS/iI5+NpYdicaM+7BgqBZH8FFvNZ8rYYLrUO/QRqMq72NpXmxLVNcdmjA==",
"peerDependencies": {
"@redis/client": "^1.0.0"
}
},
"node_modules/cluster-key-slot": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/cluster-key-slot/-/cluster-key-slot-1.1.0.tgz",
"integrity": "sha512-2Nii8p3RwAPiFwsnZvukotvow2rIHM+yQ6ZcBXGHdniadkYGZYiGmkHJIbZPIV9nfv7m/U1IPMVVcAhoWFeklw==",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/generic-pool": {
"version": "3.8.2",
"resolved": "https://registry.npmjs.org/generic-pool/-/generic-pool-3.8.2.tgz",
"integrity": "sha512-nGToKy6p3PAbYQ7p1UlWl6vSPwfwU6TMSWK7TTu+WUY4ZjyZQGniGGt2oNVvyNSpyZYSB43zMXVLcBm08MTMkg==",
"engines": {
"node": ">= 4"
}
},
"node_modules/redis": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/redis/-/redis-4.1.0.tgz",
"integrity": "sha512-5hvJ8wbzpCCiuN1ges6tx2SAh2XXCY0ayresBmu40/SGusWHFW86TAlIPpbimMX2DFHOX7RN34G2XlPA1Z43zg==",
"dependencies": {
"@redis/bloom": "1.0.2",
"@redis/client": "1.1.0",
"@redis/graph": "1.0.1",
"@redis/json": "1.0.3",
"@redis/search": "1.0.6",
"@redis/time-series": "1.0.3"
}
},
"node_modules/yallist": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz",
"integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A=="
}
},
"dependencies": {
"@redis/bloom": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/@redis/bloom/-/bloom-1.0.2.tgz",
"integrity": "sha512-EBw7Ag1hPgFzdznK2PBblc1kdlj5B5Cw3XwI9/oG7tSn85/HKy3X9xHy/8tm/eNXJYHLXHJL/pkwBpFMVVefkw==",
"requires": {}
},
"@redis/client": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@redis/client/-/client-1.1.0.tgz",
"integrity": "sha512-xO9JDIgzsZYDl3EvFhl6LC52DP3q3GCMUer8zHgKV6qSYsq1zB+pZs9+T80VgcRogrlRYhi4ZlfX6A+bHiBAgA==",
"requires": {
"cluster-key-slot": "1.1.0",
"generic-pool": "3.8.2",
"yallist": "4.0.0"
}
},
"@redis/graph": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/@redis/graph/-/graph-1.0.1.tgz",
"integrity": "sha512-oDE4myMCJOCVKYMygEMWuriBgqlS5FqdWerikMoJxzmmTUErnTRRgmIDa2VcgytACZMFqpAOWDzops4DOlnkfQ==",
"requires": {}
},
"@redis/json": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/@redis/json/-/json-1.0.3.tgz",
"integrity": "sha512-4X0Qv0BzD9Zlb0edkUoau5c1bInWSICqXAGrpwEltkncUwcxJIGEcVryZhLgb0p/3PkKaLIWkjhHRtLe9yiA7Q==",
"requires": {}
},
"@redis/search": {
"version": "1.0.6",
"resolved": "https://registry.npmjs.org/@redis/search/-/search-1.0.6.tgz",
"integrity": "sha512-pP+ZQRis5P21SD6fjyCeLcQdps+LuTzp2wdUbzxEmNhleighDDTD5ck8+cYof+WLec4csZX7ks+BuoMw0RaZrA==",
"requires": {}
},
"@redis/time-series": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/@redis/time-series/-/time-series-1.0.3.tgz",
"integrity": "sha512-OFp0q4SGrTH0Mruf6oFsHGea58u8vS/iI5+NpYdicaM+7BgqBZH8FFvNZ8rYYLrUO/QRqMq72NpXmxLVNcdmjA==",
"requires": {}
},
"cluster-key-slot": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/cluster-key-slot/-/cluster-key-slot-1.1.0.tgz",
"integrity": "sha512-2Nii8p3RwAPiFwsnZvukotvow2rIHM+yQ6ZcBXGHdniadkYGZYiGmkHJIbZPIV9nfv7m/U1IPMVVcAhoWFeklw=="
},
"generic-pool": {
"version": "3.8.2",
"resolved": "https://registry.npmjs.org/generic-pool/-/generic-pool-3.8.2.tgz",
"integrity": "sha512-nGToKy6p3PAbYQ7p1UlWl6vSPwfwU6TMSWK7TTu+WUY4ZjyZQGniGGt2oNVvyNSpyZYSB43zMXVLcBm08MTMkg=="
},
"redis": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/redis/-/redis-4.1.0.tgz",
"integrity": "sha512-5hvJ8wbzpCCiuN1ges6tx2SAh2XXCY0ayresBmu40/SGusWHFW86TAlIPpbimMX2DFHOX7RN34G2XlPA1Z43zg==",
"requires": {
"@redis/bloom": "1.0.2",
"@redis/client": "1.1.0",
"@redis/graph": "1.0.1",
"@redis/json": "1.0.3",
"@redis/search": "1.0.6",
"@redis/time-series": "1.0.3"
}
},
"yallist": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz",
"integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A=="
}
}
}

View File

@@ -1,4 +0,0 @@
{
"type": "module",
"dependencies": { "redis": "^4.1.0" }
}

View File

@@ -7,3 +7,6 @@ edition = "2021"
[dependencies]
tokio = { version = "0.2", features = ["full"] }
[build]
target-dir = "/home/marcelo/local/rust/target"

View File

@@ -7,7 +7,7 @@ use std::sync::{Arc};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let listener = TcpListener::bind("127.0.0.1:55555").await?;
let client = redis::Client::open("redis://127.0.0.1/").unwrap();
let client = redis::Client::open("redis://db.occase.de/").unwrap();
let con = Arc::new(Mutex::new(client.get_async_connection().await?));
loop {

View File

@@ -1,5 +1,5 @@
AC_PREREQ([2.69])
AC_INIT([Aedis], [0.1.2], [mzimbres@gmail.com])
AC_INIT([Aedis], [0.2.1], [mzimbres@gmail.com])
AC_CONFIG_MACRO_DIR([m4])
#AC_CONFIG_SRCDIR([src/aedis.cpp])
AC_CONFIG_HEADERS([config.h])

View File

@@ -4,6 +4,9 @@
* accompanying file LICENSE.txt)
*/
//#define BOOST_ASIO_HAS_IO_URING
//#define BOOST_ASIO_DISABLE_EPOLL
#include <string>
#include <iostream>
#include <boost/asio.hpp>