diff --git a/build-preview.sh b/build-preview.sh new file mode 100755 index 0000000..0687d24 --- /dev/null +++ b/build-preview.sh @@ -0,0 +1,95 @@ +#!/bin/sh +# Build preview site from website-v2-docs and swap in boostlook-v3.css +# +# Usage: +# ./build-preview.sh # build lib docs + site docs, sync into preview/ +# ./build-preview.sh --css-only # just rebuild CSS and swap it in +# ./build-preview.sh --serve # just start the local server + +set -e + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +DOCS_DIR="$SCRIPT_DIR/../website-v2-docs" +BOOST_DIR="$HOME/boost" +PREVIEW_DIR="$SCRIPT_DIR/preview" +BUILD_DIR="$DOCS_DIR/build" +PORT=8000 + +css_swap() { + echo "Swapping boostlook-v3.css into preview..." + cp "$SCRIPT_DIR/boostlook-v3.css" "$PREVIEW_DIR/_/css/boostlook.css" +} + +serve() { + echo "Serving preview at http://localhost:$PORT/" + open "http://localhost:$PORT/" + cd "$PREVIEW_DIR" + python3 -m http.server "$PORT" +} + +if [ "$1" = "--serve" ]; then + serve + exit 0 +fi + +if [ ! -d "$DOCS_DIR" ]; then + echo "Error: website-v2-docs not found at $DOCS_DIR" + exit 1 +fi + +if [ ! -d "$BOOST_DIR" ]; then + echo "Error: boost superproject not found at $BOOST_DIR" + exit 1 +fi + +# Rebuild CSS from sources +echo "Building boostlook-v3.css..." +sh "$SCRIPT_DIR/build-css.sh" + +if [ "$1" = "--css-only" ]; then + css_swap + echo "Done (CSS only)." + exit 0 +fi + +# Build library docs +echo "Building library docs..." +cd "$DOCS_DIR" +sh libdoc.sh develop + +# Build site docs +echo "Building site docs..." +sh sitedoc.sh develop + +# Sync capy library docs into preview/capy/ +echo "Syncing capy docs into preview..." +mkdir -p "$PREVIEW_DIR/capy" +rsync -a --delete "$BUILD_DIR/lib/doc/capy/" "$PREVIEW_DIR/capy/" + +# Sync UI assets (fonts, JS, images) +echo "Syncing UI assets..." +rsync -a "$BUILD_DIR/lib/doc/_/" "$PREVIEW_DIR/_/" + +# Sync site docs +echo "Syncing site docs..." +for dir in user-guide contributor-guide formal-reviews; do + if [ -d "$BUILD_DIR/$dir" ]; then + rsync -a "$BUILD_DIR/$dir/" "$PREVIEW_DIR/$dir/" + fi +done + +# Build charconv (asciidoctor + b2) +echo "Building charconv docs..." +cp "$SCRIPT_DIR/boostlook-v3.css" "$BOOST_DIR/tools/boostlook/boostlook.css" +cd "$BOOST_DIR/libs/charconv/doc" +"$BOOST_DIR/b2" html_ +mkdir -p "$PREVIEW_DIR/charconv" +cp "$BOOST_DIR/libs/charconv/doc/html/charconv.html" "$PREVIEW_DIR/charconv/index.html" + +# Swap in our CSS +css_swap + +echo "Done. Preview ready at preview/" + +# Start local server +serve diff --git a/netlify.toml b/netlify.toml index c35c28c..e92579d 100644 --- a/netlify.toml +++ b/netlify.toml @@ -1,3 +1,3 @@ [build] publish = "preview/" - command = "rm preview/_/css/boostlook.css && cp boostlook-v3.css preview/_/css/boostlook.css" + command = "rm preview/_/css/boostlook.css && cp boostlook-v3.css preview/_/css/boostlook.css" \ No newline at end of file diff --git a/preview/_/css/boostlook.css b/preview/_/css/boostlook.css deleted file mode 120000 index ba7e560..0000000 --- a/preview/_/css/boostlook.css +++ /dev/null @@ -1 +0,0 @@ -../../../boostlook-v3.css \ No newline at end of file diff --git a/preview/_/css/boostlook.css b/preview/_/css/boostlook.css new file mode 100644 index 0000000..e86977c --- /dev/null +++ b/preview/_/css/boostlook.css @@ -0,0 +1,5012 @@ +/** + * Boost Look v3 — Development Build + * New design and development for the Boost C++ Libraries website (boost.io), + * in collaboration with MetaLab. + * Built from modular sources in src/css/ via build-css.sh + * Note: boostlook.css remains the current production stylesheet. + * License: BSL-1.0 + */ + +/** + * File Structure Overview (src/css/): + * 00-header.css - License, file overview + * 01-variables.css - Root CSS custom properties, spacing, typography, icons + * 02-themes.css - Light/dark theme variable mappings + * 03-fonts.css - @font-face declarations (Noto Sans, Monaspace) + * 04-reset.css - CSS reset (box-sizing, margins, defaults) + * 05-global-typography.css - Base container, headings h1-h6, heading anchors + * 06-global-links.css - Paragraph styling, links, footnotes + * 07-global-code.css - Code blocks, inline code, syntax highlighting (hljs) + * 08-global-components.css - Quotes, pagination, admonitions, lists, edit-page link + * 09-global-tables-images.css - Tables, image styles + * 10-scrollbars.css - Scrollbar styling (Firefox + WebKit) + * 11-template-layout.css - Template-specific scrolling, iframe, TOC common styles + * 12-asciidoctor.css - AsciiDoctor-specific styles, Rouge syntax highlighting + * 13-antora.css - Antora navigation, toolbar, breadcrumbs, tabs, search + * 14-quickbook.css - Quickbook legacy wrapper, titles, TOC, tables, footer + * 15-readme.css - Library README styles + * 16-responsive-toc.css - AsciiDoctor responsive TOC layout (768px → 1920px) + */ + +/** + * Template Structure: + * The framework supports these main documentation templates: + * + * 1. AsciiDoctor Template: + * example: https://www.boost.io/doc/libs/1_87_0/libs/charconv/doc/html/charconv.html + *
env→stop_token — A stop token for cooperative cancellation
env→allocator — An optional allocator for frame allocation
env→frame_allocator — An optional frame allocator
Capy abstracts away sockets, files, and asynchrony with type-erased streams and buffer sequences—code compiles fast because the implementation is hidden. It provides the framework for concurrent algorithms that transact in buffers of memory: networking, serial ports, console, timers, and any platform I/O. This is only possible because Capy is coroutine-only, enabling optimizations and ergonomics that hybrid approaches must sacrifice.
+Lazy coroutine tasks — task<T> with forward-propagating stop tokens and automatic cancellation
Buffer sequences — taken straight from Asio and improved
+Stream concepts — ReadStream, WriteStream, ReadSource, WriteSink, BufferSource, BufferSink
Type-erased streams — any_stream, any_read_stream, any_write_stream for fast compilation
Concurrency facilities — executors, strands, thread pools, when_all, when_any
Test utilities — mock streams, mock sources/sinks, error injection
+Networking — no sockets, acceptors, or DNS; that’s what Corosio provides
+Protocols — no HTTP, WebSocket, or TLS; see the Http and Beast2 libraries
+Platform event loops — no io_uring, IOCP, epoll, or kqueue; Capy is the layer above
+Callbacks or futures — coroutine-only means no other continuation styles
+Sender/receiver — Capy uses the IoAwaitable protocol, not std::execution
Users of Corosio — portable coroutine networking
+Users of Http — sans-I/O HTTP/1.1 clients and servers
+Users of Websocket — sans-I/O WebSocket
+Users of Beast2 — high-level HTTP/WebSocket servers
+Users of Burl — high-level HTTP client
+All of these are built on Capy. Understanding its concepts—tasks, buffer sequences, streams, executors—unlocks the full power of the stack.
+Use case first. Buffer sequences, stream concepts, executor affinity—these exist because I/O code needs them, not because they’re theoretically elegant.
+Coroutines-only. No callbacks, futures, or sender/receiver. Hybrid support forces compromises; full commitment unlocks optimizations that adapted models cannot achieve.
+Address the complaints of C++. Type erasure at boundaries, minimal dependencies, and hidden implementations keep builds fast and templates manageable.
+| + + | +
+
+
+Unless otherwise specified, all code examples in this documentation assume the following: +
+
+
+
+
+ |
+
This example demonstrates a minimal coroutine that reads from a stream and echoes the data back:
+#include <boost/capy.hpp>
+
+using namespace boost::capy;
+
+task<> echo(any_stream& stream)
+{
+ char buf[1024];
+ for(;;)
+ {
+ auto [ec, n] = co_await stream.read_some(mutable_buffer(buf));
+ if(ec.failed())
+ co_return;
+ auto [wec, wn] = co_await write(stream, const_buffer(buf, n));
+ if(wec.failed())
+ co_return;
+ }
+}
+
+int main()
+{
+ thread_pool pool;
+ // In a real application, you would obtain a stream from Corosio
+ // and call: run_async(pool.get_executor())(echo(stream));
+ return 0;
+}
+The echo function accepts an any_stream&—a type-erased wrapper that works with any concrete stream implementation. The function reads data into a buffer, then writes it back. Both operations use co_await to suspend until the I/O completes.
The task<> return type (equivalent to task<void>) creates a lazy coroutine that does not start executing until awaited or launched with run_async.
Quick Start — Set up your first Capy project
+C++20 Coroutines Tutorial — Learn coroutines from the ground up
+Concurrency Tutorial — Understand threads, mutexes, and synchronization
+Coroutines in Capy — Deep dive into task<T> and the IoAwaitable protocol
Buffer Sequences — Master the concept-driven buffer model
+Stream Concepts — Understand the six stream concepts
+class const_buffer
- : public /* implementation-defined */
+class const_buffer;
| Name | -Description | -
|---|---|
|
-- |
Description |
|||||
|
+
|
Constructors |
|||
Description |
|||||
| - | The allocator for coroutine frame allocation. |
-||||
The executor for coroutine resumption. |
|||||
| + | The frame allocator for coroutine frame allocation. |
+||||
The stop token for cancellation propagation. |
| Name | -Description | -
|---|---|
|
-- |
Description |
|||||
| - | Tag type for coroutine allocator retrieval. |
-||||
Tag type for coroutine environment retrieval. |
Tag type for coroutine executor retrieval. |
||||
| + | Tag type for coroutine frame allocator retrieval. |
+||||
Tag type for coroutine stop token retrieval. |
Description |
||||
| - | Tag object that yields the current frame allocator when awaited. |
-||||
Tag object that yields the current environment when awaited. |
Tag object that yields the current executor when awaited. |
||||
| + | Tag object that yields the current frame allocator when awaited. |
+||||
Tag object that yields the current stop token when awaited. |
Description |
||||
| - | Tag type for coroutine allocator retrieval. |
-||||
Tag type for coroutine environment retrieval. |
Tag type for coroutine executor retrieval. |
||||
| + | Tag type for coroutine frame allocator retrieval. |
+||||
Tag type for coroutine stop token retrieval. |
Description |
||||
| - | Tag object that yields the current frame allocator when awaited. |
-||||
Tag object that yields the current environment when awaited. |
Tag object that yields the current executor when awaited. |
||||
| + | Tag object that yields the current frame allocator when awaited. |
+||||
Tag object that yields the current stop token when awaited. |
|
+ Important
+ |
++libquadmath is only available on supported platforms (e.g. Linux with x86, x86_64, PPC64, and IA64). + | +
This library depends on: Boost.Assert, Boost.Config, Boost.Core, and optionally libquadmath (see above).
+#include <boost/charconv.hpp>
+
+const char* buffer = "42";
+int v = 0;
+boost::charconv::from_chars_result r = boost::charconv::from_chars(buffer, buffer + std::strlen(buffer), v);
+assert(r.ec == std::errc());
+assert(v == 42);
+
+char buffer[64];
+int v = 123456;
+boost::charconv::to_chars_result r = boost::charconv::to_chars(buffer, buffer + sizeof(buffer), v);
+assert(r.ec == std::errc());
+assert(!strncmp(buffer, "123456", 6)); // Strncmp returns 0 on match
+from_chars is a set of functions that parse a string from [first, last) in an attempt to convert the string into value according to the chars_format specified (if applicable).
+The parsing of number is locale-independent (e.g. equivalent to the "C" locale).
+The result of from_chars is from_chars_result which on success returns ptr == last and ec == std::errc(), and on failure returns ptr equal to the last valid character parsed or last on underflow/overflow, and ec == std::errc::invalid_argument or std::errc::result_out_of_range respectively. from_chars does not require the character sequence to be null terminated.
namespace boost { namespace charconv {
+
+struct from_chars_result
+{
+ const char* ptr;
+ std::errc ec;
+
+ friend constexpr bool operator==(const from_chars_result& lhs, const from_chars_result& rhs) noexcept = default;
+ constexpr explicit operator bool() const noexcept { return ec == std::errc{}; }
+}
+
+template <typename Integral>
+BOOST_CXX14_CONSTEXPR from_chars_result from_chars(const char* first, const char* last, Integral& value, int base = 10) noexcept;
+
+template <typename Integral>
+BOOST_CXX14_CONSTEXPR from_chars_result from_chars(boost::core::string_view sv, Integral& value, int base = 10) noexcept;
+
+BOOST_CXX14_CONSTEXPR from_chars_result from_chars<bool>(const char* first, const char* last, bool& value, int base) = delete;
+
+template <typename Real>
+from_chars_result from_chars(const char* first, const char* last, Real& value, chars_format fmt = chars_format::general) noexcept;
+
+template <typename Real>
+from_chars_result from_chars(boost::core::string_view sv, Real& value, chars_format fmt = chars_format::general) noexcept;
+
+// See note below Usage notes for from_chars for floating point types
+
+template <typename Real>
+from_chars_result from_chars_erange(const char* first, const char* last, Real& value, chars_format fmt = chars_format::general) noexcept;
+
+template <typename Real>
+from_chars_result from_chars_erange(boost::core::string_view sv, Real& value, chars_format fmt = chars_format::general) noexcept;
+
+}} // Namespace boost::charconv
+first, last - pointers to a valid range to parse
sv - string view of a valid range to parse.
+Compatible with boost::core::string_view, std::string, and std::string_view
value - where the output is stored upon successful parsing
base (integer only) - the integer base to use. Must be between 2 and 36 inclusive
fmt (floating point only) - The format of the buffer. See chars_format overview for description.
ptr - On return from from_chars it is a pointer to the first character not matching the pattern, or pointer to last if all characters are successfully parsed.
ec - the error code. Values returned by from_chars are:
Return Value |
+Description |
+
|
+Successful Parsing |
+
|
+1) Parsing a negative into an unsigned type +2) Leading 3) Leading space +4) Incompatible formatting (e.g. exponent on |
+
|
+1) Overflow +2) Underflow |
+
operator== - compares the values of ptr and ec for equality
All built-in integral types are allowed except bool which is deleted
+These functions have been tested to support __int128 and unsigned __int128
from_chars for integral types is constexpr when compiled using -std=c++14 or newer
One known exception is GCC 5 which does not support constexpr comparison of const char*.
A valid string must only contain the characters for numbers. Leading spaces are not ignored, and will return std::errc::invalid_argument.
On std::errc::result_out_of_range we return ±0 for small values (e.g. 1.0e-99999) or ±HUGE_VAL for large values (e.g. 1.0e+99999) to match the handling of std::strtod.
+This is a divergence from the standard which states we should return the value argument unmodified.
from_chars has an open issue with LWG here: https://cplusplus.github.io/LWG/lwg-active.html#3081.
+The standard for <charconv> does not distinguish between underflow and overflow like strtod does.
+Let’s say you are writing a JSON library, and you replace std::strtod with boost::charconv::from_chars for performance reasons.
+Charconv returns std::errc::result_out_of_range on some conversion.
+You would then have to parse the string again yourself to figure out which of the four possible reasons you got std::errc::result_out_of_range.
+Charconv can give you that information by using boost::charconv::from_chars_erange instead of boost::charconv::from_chars throughout the code base.
+By implementing the resolution to the LWG issue that matches the established strtod behavior I think we are providing the correct behavior without waiting on the committee’s decision.
These functions have been tested to support all built-in floating-point types and those from C++23’s <stdfloat>
Long doubles can be 64, 80, or 128-bit, but must be IEEE 754 compliant. An example of a non-compliant, and therefore unsupported, format is __ibm128.
Use of __float128 or std::float128_t requires compiling with -std=gnu++xx and linking GCC’s libquadmath.
+This is done automatically when building with CMake.
const char* buffer = "42";
+int v = 0;
+from_chars_result r = boost::charconv::from_chars(buffer, buffer + std::strlen(buffer), v);
+assert(r.ec == std::errc());
+assert(r); // Same as above but less verbose. Added in C++26.
+assert(v == 42);
+
+std::string str_buffer (buffer);
+boost::core::string_view sv(str_buffer);
+int v2;
+auto r2 = boost::charconv::from_chars(sv, v2);
+assert(r);
+assert(v2 == v);
+const char* buffer = "1.2345"
+double v = 0;
+auto r = boost::charconv::from_chars(buffer, buffer + std::strlen(buffer), v);
+assert(r.ec == std::errc());
+assert(r); // Same as above but less verbose. Added in C++26.
+assert(v == 1.2345);
+
+std::string str_buffer(buffer);
+double v2;
+auto r2 = boost::charconv::from_chars(buffer, v2);
+assert(r2);
+assert(v == v2);
+const char* buffer = "2a";
+unsigned v = 0;
+auto r = boost::charconv::from_chars(buffer, buffer + std::strlen(buffer), v, 16);
+assert(r.ec == std::errc());
+assert(r); // Same as above but less verbose. Added in C++26.
+assert(v == 42);
+const char* buffer = "1.3a2bp-10";
+double v = 0;
+auto r = boost::charconv::from_chars(buffer, buffer + std::strlen(buffer), v, boost::charconv::chars_format::hex);
+assert(r.ec == std::errc());
+assert(r); // Same as above but less verbose. Added in C++26.
+assert(v == 8.0427e-18);
+The below is invalid because a negative value is being parsed into an unsigned integer.
+const char* buffer = "-123";
+unsigned v = 0;
+auto r = boost::charconv::from_chars(buffer, buffer + std::strlen(buffer), v);
+assert(r.ec == std::errc::invalid_argument);
+assert(!r); // Same as above but less verbose. Added in C++26.
+The below is invalid because a fixed format floating-point value can not have an exponent.
+const char* buffer = "-1.573e-3";
+double v = 0;
+auto r = boost::charconv::from_chars(buffer, buffer + std::strlen(buffer), v, boost::charconv::chars_format::fixed);
+assert(r.ec == std::errc::invalid_argument);
+assert(!r); // Same as above but less verbose. Added in C++26.
+Note: In the event of std::errc::invalid_argument, v is not modified by from_chars
const char* buffer = "1234";
+unsigned char v = 0;
+auto r = boost::charconv::from_chars(buffer, buffer + std::strlen(buffer), v);
+assert(r.ec == std::errc::result_out_of_range);
+assert(!r); // Same as above but less verbose. Added in C++26.
+assert(v == 0)
+Note: In the event of std::errc::result_out_of_range, v is not modified by from_chars
to_chars is a set of functions that attempts to convert value into a character buffer specified by [first, last).
+The result of to_chars is to_chars_result which on success returns ptr equal to one-past-the-end of the characters written and ec == std::errc() and on failure returns std::errc::value_too_large and ptr == last.
+to_chars does not null-terminate the returned characters.
namespace boost { namespace charconv {
+
+struct to_chars_result
+{
+ char* ptr;
+ std::errc ec;
+
+ friend constexpr bool operator==(const to_chars_result& lhs, const to_chars_result& rhs) noexcept; = default;
+ constexpr explicit operator bool() const noexcept { return ec == std::errc{}; }
+};
+
+template <typename Integral>
+BOOST_CHARCONV_CONSTEXPR to_chars_result to_chars(char* first, char* last, Integral value, int base = 10) noexcept;
+
+template <typename Integral>
+BOOST_CHARCONV_CONSTEXPR to_chars_result to_chars<bool>(char* first, char* last, Integral value, int base) noexcept = delete;
+
+template <typename Real>
+to_chars_result to_chars(char* first, char* last, Real value, chars_format fmt = chars_format::general, int precision) noexcept;
+
+}} // Namespace boost::charconv
+first, last - pointers to the beginning and end of the character buffer
value - the value to be parsed into the buffer
base (integer only) - the integer base to use. Must be between 2 and 36 inclusive
fmt (float only) - the floating point format to use.
+See chars_format overview for description.
precision (float only) - the number of decimal places required
ptr - On return from to_chars points to one-past-the-end of the characters written on success or last on failure
ec - the error code. Values returned by to_chars are:
Return Value |
+Description |
+
|
+Successful Parsing |
+
|
+1) Overflow +2) Underflow |
+
operator== - compares the value of ptr and ec for equality
All built-in integral types are allowed except bool which is deleted
+from_chars for integral type is constexpr (BOOST_CHARCONV_CONSTEXPR is defined) when:
+compiled using -std=c++14 or newer
using a compiler with __builtin_ is_constant_evaluated
These functions have been tested to support __int128 and unsigned __int128
The following will be returned when handling different values of NaN
qNaN returns "nan"
-qNaN returns "-nan(ind)"
sNaN returns "nan(snan)"
-sNaN returns "-nan(snan)"
These functions have been tested to support all built-in floating-point types and those from C++23’s <stdfloat>
Long doubles can be 64, 80, or 128-bit, but must be IEEE 754 compliant. An example of a non-compliant, and therefore unsupported, format is ibm128.
Use of __float128 or std::float128_t requires compiling with -std=gnu++xx and linking GCC’s libquadmath.
+This is done automatically when building with CMake.
char buffer[64] {};
+int v = 42;
+to_chars_result r = boost::charconv::to_chars(buffer, buffer + sizeof(buffer) - 1, v);
+assert(r.ec == std::errc());
+assert(!strcmp(buffer, "42")); // strcmp returns 0 on match
+char buffer[64] {};
+double v = 1e300;
+to_chars_result r = boost::charconv::to_chars(buffer, buffer + sizeof(buffer) - 1, v);
+assert(r.ec == std::errc());
+assert(r); // Same as above but less verbose. Added in C++26.
+assert(!strcmp(buffer, "1e+300"));
+char buffer[64] {};
+int v = 42;
+to_chars_result r = boost::charconv::to_chars(buffer, buffer + sizeof(buffer) - 1, v, 16);
+assert(r.ec == std::errc());
+assert(r); // Same as above but less verbose. Added in C++26.
+assert(!strcmp(buffer, "2a")); // strcmp returns 0 on match
+char buffer_u[64] {};
+double u = -1.08260383390082950e+20;
+
+char buffer_v[64] {};
+double v = -1.08260383390082946e+20;
+
+to_chars(buffer_u, buffer_u + sizeof(buffer_u) - 1, u, chars_format::hex);
+to_chars(buffer_v, buffer_v + sizeof(buffer_v) - 1, v, chars_format::hex);
+
+std::cout << "U: " << buffer_u << "\nV: " << buffer_v << std::endl;
+
+// U: -1.779a8946bb5fap+66
+// V: -1.779a8946bb5f9p+66
+//
+// With hexfloats we can see the ULP distance between U and V is a - 9 == 1.
+char buffer[3] {};
+int v = -1234;
+to_chars_result r = boost::charconv::to_chars(buffer, buffer + sizeof(buffer) - 1, v, 16);
+assert(r.ec == std::errc::value_too_large);
+assert(!r); // Same as above but less verbose. Added in C++26.
+char buffer[3] {};
+double v = 1.2345;
+auto r = boost::charconv::to_chars(buffer, buffer + sizeof(buffer) - 1, v);
+assert(r.ec == std::errc::value_too_large);
+assert(!r); // Same as above but less verbose. Added in C++26.
+In the event of std::errc::value_too_large, to_chars_result.ptr is equal to last
boost::charconv::chars_format is an enum class used to define the format of floating point types with from_chars and to_chars.
namespace boost { namespace charconv {
+
+enum class chars_format : unsigned
+{
+ scientific = 1 << 0,
+ fixed = 1 << 1,
+ hex = 1 << 2,
+ general = fixed | scientific
+};
+
+}} // Namespace boost::charconv
+Scientific format will be of the form 1.3e+03.
+The integer part will be between 0 and 9 inclusive. The fraction and exponent will always appear.
+The exponent will always have a minimum of 2 digits.
Fixed format will be of the form 2.30 or 3090. An exponent will not appear with this format.
+If the precision of to_chars exceeds that of the type (e.g. std::numeric_limits<double>::chars10), 0s will be appended to the end of the significant digits.
Hex format will be of the form 1.0cp+05. The integer part will always be 0 or 1.
+The exponent will be with a p instead of an e as used with base 10 formats, because e is a valid hex value.
+Note: Every binary floating-point number has a unique representation as a hexfloat, but not every hexfloat has a unique representation as a binary floating-point number.
+This is due to the fact that the number of bits in the significand of an IEEE754 binary32 and binary64 are not divisible by 4.
For those unfamiliar with hexfloats, they are valuable in specific instances:
+Precision control: Hexfloats can offer finer control over the precision of floating-point values. +In hexadecimal notation, each digit represents four bits (one hexit), allowing you to directly manipulate the precision of the number by specifying a certain number of hexadecimal digits. +This can be useful when you need to precisely control the level of accuracy required for your calculations.
+Bit-level representation: Hexfloats provide a direct representation of the underlying bits of a floating-point number. +Each hexadecimal digit corresponds to a specific group of bits, making it easier to visualize and understand the internal structure of the floating-point value. +This can be helpful for debugging or analyzing floating-point arithmetic operations (e.g. Computing ULP distances).
+General format will be the shortest representation of a number in either fixed or general format (e.g. 1234 instead of 1.234e+03.
The contents of <boost/charconv/limits.hpp> are designed to help the user optimize the size of the buffer required for to_chars.
namespace boost { namespace charconv {
+
+template <typename T>
+constexpr int limits<T>::max_chars10;
+
+template <typename T>
+constexpr int limits<T>::max_chars;
+
+}} // Namespace boost::charconv
+The minimum size of the buffer that needs to be
+passed to to_chars to guarantee successful conversion for all values of type T, when either no base is passed, or base 10 is passed.
The minimum size of the buffer that needs to be passed to to_chars to guarantee successful conversion for all values of type T, for any value of base.
The following two examples are for max_chars10 to optimize the buffer size with to_chars for an integral type and a floating-point type respectively.
char buffer [boost::charconv::limits<std::int32_t>::max_chars10;
+auto r = boost::charconv::to_chars(buffer, buffer + sizeof(buffer), std::numeric_limits<std::int32_t>::max());
+
+assert(r.ec == std::errc());
+assert(r); // Same as above but less verbose. Added in C++26.
+assert(!strcmp(buffer, "2147483647")); // strcmp returns 0 on match
+char buffer [boost::charconv::limits<float>::max_chars10;
+auto r = boost::charconv::to_chars(buffer, buffer + sizeof(buffer), std::numeric_limits<float>::max());
+
+assert(r.ec == std::errc());
+assert(r); // Same as above but less verbose. Added in C++26.
+assert(!strcmp(buffer, "3.40282347e+38")); // strcmp returns 0 on match
+The following example is a usage of max_chars when used to serialize an integer in binary (base = 2).
char buffer [boost::charconv::limits<std::uint16_t>::max_chars;
+auto r = boost::charconv::to_chars(buffer, buffer + sizeof(buffer), std::numeric_limits<std::uint16_t>::max(), 2);
+
+assert(r.ec == std::errc());
+assert(r); // Same as above but less verbose. Added in C++26.
+assert(!strcmp(buffer, "1111111111111111")); // strcmp returns 0 on match
+This section describes a range of performance benchmarks that have been run comparing this library with the standard library, and how to run your own benchmarks if required.
+The values are relative to the performance of std::printf and std::strtoX.
+Larger numbers are more performant (e.g. 2.00 means twice as fast, and 0.50 means it takes twice as long).
+std::printf and std::strtoX are always listed first as they will be the reference value.
To run the benchmarks yourself, navigate to the test folder and define BOOST_CHARCONV_RUN_BENCHMARKS when running the tests.
+An example on Linux with b2: ../../../b2 cxxstd=20 toolset=gcc-13 define=BOOST_CHARCONV_RUN_BENCHMARKS STL_benchmark linkflags="-lfmt" -a release .
Additionally, you will need the following:
+A compiler with full <charconv> support:
GCC 11 or newer
+MSVC 19.24 or newer
+Data in tables 1 - 4 were run on Ubuntu 23.04 with x86_64 architecture using GCC 13.1.0 with libstdc++.
+| Function | +Relative Performance (float / double) | +
|---|---|
std::printf |
+1.00 / 1.00 |
+
Boost.lexical_cast |
+0.56 / 0.49 |
+
Boost.spirit.karma |
+1.70 / 2.62 |
+
std::to_chars |
+4.01 / 6.03 |
+
Boost.Charconv.to_chars |
+4.46 / 6.20 |
+
Google double-conversion |
+1.26 / 1.91 |
+
{fmt} |
+2.52 / 3.63 |
+
| Function | +Relative Performance (float / double) | +
|---|---|
std::strto(f/d) |
+1.00 / 1.00 |
+
Boost.lexical_cast |
+0.33 / 0.42 |
+
Boost.spirit.qi |
+3.17 / 4.65 |
+
std::from_chars |
+3.23 / 5.77 |
+
Boost.Charconv.from_chars |
+3.28 / 5.75 |
+
Google double-conversion |
+1.16 / 1.30 |
+
| Function | +Relative Performance (uint32_t / uint64_t) | +
|---|---|
std::printf |
+1.00 / 1.00 |
+
Boost.lexical_cast |
+1.80 / 1.38 |
+
Boost.spirit.karma |
+2.81 / 1.62 |
+
std::to_chars |
+4.06 / 2.45 |
+
Boost.Charconv.to_chars |
+4.13 / 2.48 |
+
{fmt} |
+2.88 / 2.21 |
+
| Function | +Relative Performance (uint32_t / uint64_t) | +
|---|---|
std::strto(ul,ull) |
+1.00 / 1.00 |
+
Boost.lexical_cast |
+0.53 / 0.52 |
+
Boost.spirit.qi |
+2.24 / 1.49 |
+
std::from_chars |
+1.97 / 1.68 |
+
Boost.Charconv.from_chars |
+2.54 / 1.78 |
+
Data in tables 5 - 8 were run on Windows 11 with x86_64 architecture using MSVC 14.3 (V17.7.0).
+| Function | +Relative Performance (float / double) | +
|---|---|
std::printf |
+1.00 / 1.00 |
+
Boost.lexical_cast |
+0.50 / 0.70 |
+
Boost.spirit.karma |
+2.23 / 7.58 |
+
std::to_chars |
+5.58 / 15.77 |
+
Boost.Charconv.to_chars |
+5.62 / 15.26 |
+
| Function | +Relative Performance (float / double) | +
|---|---|
std::strto(f/d) |
+1.00 / 1.00 |
+
Boost.lexical_cast |
+0.14 / 0.20 |
+
Boost.spirit.qi |
+2.03 / 4.58 |
+
std::from_chars |
+1.01 / 1.23 |
+
Boost.Charconv.from_chars |
+2.06 / 5.21 |
+
| Function | +Relative Performance (uint32_t / uint64_t) | +
|---|---|
std::printf |
+1.00 / 1.00 |
+
Boost.lexical_cast |
+0.68 / 0.68 |
+
Boost.spirit.karma |
+2.75 / 1.67 |
+
std::to_chars |
+2.75 / 2.10 |
+
Boost.Charconv.to_chars |
+2.75 / 2.06 |
+
| Function | +Relative Performance (uint32_t / uint64_t) | +
|---|---|
std::strto(ul,ull) |
+1.00 / 1.00 |
+
Boost.lexical_cast |
+0.46 / 0.39 |
+
Boost.spirit.qi |
+1.94 / 1.63 |
+
std::from_chars |
+2.43 / 2.18 |
+
Boost.Charconv.from_chars |
+2.68 / 2.27 |
+
Data in tables 9-12 were run on MacOS Ventura 13.5.2 with M1 Pro architecture using Homebrew GCC 13.2.0 with libstdc++.
+| Function | +Relative Performance (float / double) | +
|---|---|
std::printf |
+1.00 / 1.00 |
+
Boost.lexical_cast |
+0.58 / 0.16 |
+
Boost.spirit.karma |
+1.39 / 1.22 |
+
std::to_chars |
+6.78 / 6.47 |
+
Boost.Charconv.to_chars |
+7.25 / 6.86 |
+
Google double-conversion |
+2.26 / 2.16 |
+
{fmt} |
+3.78 / 3.38 |
+
| Function | +Relative Performance (float / double) | +
|---|---|
std::strto(f/d) |
+1.00 / 1.00 |
+
Boost.lexical_cast |
+0.06 / 0.06 |
+
Boost.spirit.qi |
+1.12 / 1.06 |
+
std::from_chars |
+1.32 / 1.65 |
+
Boost.Charconv.from_chars |
+1.28 / 1.63 |
+
Google double-conversion |
+0.45 / 0.32 |
+
| Function | +Relative Performance (uint32_t / uint64_t) | +
|---|---|
std::printf |
+1.00 / 1.00 |
+
Boost.lexical_cast |
+2.08 / 1.75 |
+
Boost.spirit.karma |
+4.17 / 2.06 |
+
std::to_chars |
+6.25 / 4.12 |
+
Boost.Charconv.to_chars |
+6.25 / 4.12 |
+
{fmt} |
+5.29 / 3.47 |
+
| Function | +Relative Performance (uint32_t / uint64_t) | +
|---|---|
std::strto(ul,ull) |
+1.00 / 1.00 |
+
Boost.lexical_cast |
+0.56 / 0.54 |
+
Boost.spirit.qi |
+1.39 / 1.33 |
+
std::from_chars |
+1.92 / 1.65 |
+
Boost.Charconv.from_chars |
+2.27 / 1.65 |
+
The following papers and blog posts serve as the basis for the algorithms used in the library:
+J.R. Parker A General Character to Integer Conversion Method, Software: Practice and Experience 15 (8), 1985.
+Junekey Jeon, Faster integer formatting - James Anhalt (jeaiii)’s algorithm
+Junekey Jeon, Dragonbox: A New Floating-Point Binary-to-Decimal Conversion Algorithm
+Junekey Jeon, Fixed-precision formatting of floating-point numbers
+William D. Clinger, How to Read Floating Point Numbers Accurately, 1990
+Daniel Lemire, Number Parsing at a Gigabyte per Second, Software: Practice and Experience 51 (8), 2021.
+Noble Mushtak, Daniel Lemire, Fast Number Parsing Without Fallback, Software: Practice and Experience (to appear)
+Ulf Adams, Ryū revisited: printf floating point conversion, Proceedings of the ACM on Programming Languages Volume 3, 2019
+Special thanks to the following people (non-inclusive list):
+Peter Dimov for providing technical guidance and contributing to the library throughout development.
+Junekey Jeon for developing and answering my questions about his integer-formatting, Dragonbox, and Floff.
+Chris Kormanyos for serving as the library review manager.
+Stephan T. Lavavej for providing the basis for the benchmarks.
+All that reviewed the library and provided feedback to make it better.
+This documentation is copyright 2022-2023 Peter Dimov and Matt Borland and is distributed under +the Boost Software License, Version 1.0.
+This section contains guidance on how to reduce development time from concept through to publishing of a new library, whilst ensuring a quality and maintainability to the open source project that will be the result.
+Some libraries are published with the intended primary audience, or in some cases the sole audience, being developers of other libraries. These libraries are published to make some of the time consuming and awkward processes of Boost-compliance easier. It is good practice for a new library developer to read the introductions to each of these libraries, and ascertain if they might be of value to the library they are developing.
+| Library | +Description | +
|---|---|
| + | Helps Boost library developers adapt to compiler idiosyncrasies. The range of macros can be extended, if required, with Boost.Predef. |
+
| + | A collection of simple core utilities with minimal dependencies. The range of utilities can be extended, if required, with Boost.Utility |
+
| + | Customizable assert macros. |
+
| + | A common infrastructure for throwing exceptions from Boost libraries. |
+
| + | Provides a template metaprogramming framework, useful if metaprogramming is a feature of your new library. |
+
Always submit changes to a Boost repo using a Pull Request (PR), never use the GitHub website to change the contents of a repository directly.
+It is good practice to bring your local repo up to date before submitting a PR, perhaps on a weekly or even daily basis - depending on the activity in the repo.
+Merge commits are to be avoided. These commits happens when a local origin branch and remote branch are out of sync.
+The focus is on strategies that emphasize linear history, rebasing, and collaborative discipline. What follows are the best practices that can help.
+When integrating changes from one branch into another, use git rebase instead of git merge. This rewrites the commit history, applying your changes on top of the target branch and maintains a linear history. Keep branches short-lived and focused on specific features or bug fixes. This reduces the likelihood of conflicts and simplifies rebasing onto the main branch. Regularly rebase your feature branch onto main or the target branch to keep up with upstream changes and avoid a large, complex integration at the end. For example:
git checkout feature-branch
+git rebase main
+Then push changes after resolving any conflicts:
+git push --force
+Configure your Git client to rebase when pulling changes, rather than creating merge commits:
+git config --global pull.rebase true
+When working collaboratively on a branch, use git pull --rebase to fetch and reapply your local changes on top of the latest commits from the remote branch.
git pull --rebase origin main
+Use interactive rebasing (git rebase -i) to clean up your branch history and squash multiple commits into one meaningful commit. This keeps the repository history tidy and avoids unnecessary merge commits.
git rebase -i HEAD~n # Replace 'n' with the number of commits to squash
+After squashing, you can fast-forward the branch without creating a merge commit:
+git checkout main
+git rebase feature-branch
+Enable fast-forward-only merges to ensure the branch history is linear. This avoids creating a merge commit.
+git merge --ff-only feature-branch
+Configure the repository to enforce fast-forward merges:
+git config --global merge.ff only
+Avoid unnecessary commits and ensure that every commit is meaningful. Use git add -p or git commit --amend to refine commits before pushing.
git commit --amend
+This ensures that when changes are rebased or fast-forwarded, the history remains clean and easy to understand.
+By following these practices, you’ll avoid merge commits and maintain a clean, linear history in your Git repository while keeping the dev community happy!
+Boost has informal coding standards that encourage clear, concise, and useful comments. The following are the main policy recommendations:
+Doxygen-style comments for documenting APIs.
+Explanatory comments preceding a function for complex logic and important decisions.
+Minimal but meaningful inline comments.
+Boost libraries can use Doxygen to generate API reference documentation from specifically formatted comments. Many libraries follow this structure, for example:
+/// \brief Brief description of the function
+/// \details More detailed explanation if necessary.
+/// \param x Description of parameter x
+/// \return Description of return value
+int my_function(int x);
+This makes it easier to generate consistent and readable documentation for users, as the build process picks up the triple-slash comments and creates API documentation automatically from them. Here is a more complete example using all the most useful annotations:
+#include <cmath>
+#include <stdexcept>
+
+/**
+ * @brief Computes the area of a triangle using Heron's formula.
+ *
+ * This function calculates the area of a triangle given the lengths of its three sides.
+ * It uses Heron's formula, which states that for a triangle with sides a, b, and c:
+ *
+ * \f[
+ * A = \sqrt{s \cdot (s - a) \cdot (s - b) \cdot (s - c)}
+ * \f]
+ *
+ * where \f$s\f$ is the semi-perimeter:
+ *
+ * \f[
+ * s = \frac{a + b + c}{2}
+ * \f]
+ *
+ * @param a The length of the first side (must be positive).
+ * @param b The length of the second side (must be positive).
+ * @param c The length of the third side (must be positive).
+ * @return The computed area of the triangle.
+ * @throws std::invalid_argument if the sides do not form a valid triangle.
+ * @throws std::domain_error if the computed area is invalid due to floating-point errors.
+ */
+double computeTriangleArea(double a, double b, double c) {
+ if (a <= 0 || b <= 0 || c <= 0) {
+ throw std::invalid_argument("All side lengths must be positive.");
+ }
+
+ // Check for the triangle inequality
+ if (a + b <= c || a + c <= b || b + c <= a) {
+ throw std::invalid_argument("The given sides do not form a valid triangle.");
+ }
+
+ // Calculate semi-perimeter
+ double s = (a + b + c) / 2.0;
+
+ // Compute area using Heron's formula
+ double area = std::sqrt(s * (s - a) * (s - b) * (s - c));
+
+ if (std::isnan(area) || area <= 0) {
+ throw std::domain_error("Computed area is invalid due to floating-point errors.");
+ }
+
+ return area;
+}
+The most useful Doxygen annotations are:
+| Annotation | +Description | +
|---|---|
|
+A short summary of the function’s purpose. |
+
|
+Describes the function parameters and their constraints. |
+
|
+Explains the function’s return value. |
+
|
+Lists the possible exceptions that the function may throw. |
+
For mathematical formulas the \f[ … \f] tags render inline LaTeX-style math formulas in the generated documentation.
@brief is used inside block comments (/** … */), while \brief works with both block and single-line (///) comments. Good practice is simply being consistent with your preference.
Since many Boost libraries aim to be compatible with (or eventually integrated into) the Standard Library, you might adopt commenting styles similar to standard library headers, keeping explanations brief, precise, and technical. Sometimes though, the comments are more numerous and helpful in specific implementations, such as Clang libc++, GNU libstc++ or MSVC STL. The following code comes from std::vector::resize in libc++.
/**
+ * @brief Resizes the container to contain @p __sz elements.
+ *
+ * If @p __sz is smaller than the current size, the container is reduced to its first @p __sz elements.
+ * If @p __sz is greater than the current size, additional default-constructed elements are appended.
+ *
+ * @param __sz The new size of the container.
+ *
+ * If an expansion is needed and sufficient capacity exists, no reallocation occurs.
+ * Otherwise, new storage is allocated and existing elements are moved.
+ *
+ * @exception If an exception is thrown during element construction or move, the container remains unchanged.
+ *
+ * Complexity: Linear in the difference between old and new size.
+ */
+template <class _Tp, class _Allocator>
+void vector<_Tp, _Allocator>::resize(size_type __sz) {
+ if (__sz < size()) {
+
+ // Shrink: Destroy extra elements
+ erase(begin() + __sz, end());
+ } else if (__sz > size()) {
+
+ // Grow: Append default-constructed elements
+ insert(end(), __sz - size(), _Tp());
+ }
+}
+Both exception safety and performance considerations are covered in the comments above, which are good practices!
+Boost encourages documenting exception safety guarantees (noexcept, strong guarantee, basic guarantee), and thread-safety considerations if applicable. For example:
/// \pre `ptr` must not be null.
+/// \post Returns a valid shared_ptr managing `ptr`.
+/// \throws std::bad_alloc if allocation fails.
+std::shared_ptr<T> safe_wrap(T* ptr);
+Some Boost libraries include comments explaining design choices, performance considerations, or trade-offs. These are typically found in complex implementations like Boost.Hana, Boost.Asio or Boost.Spirit. Here’s an example from the Boost.Hana library, which demonstrates the use of comments to explain the code’s purpose and functionality:
+/*!
+@file
+Defines `boost::hana::transform`.
+*/
+
+namespace boost { namespace hana {
+
+ //! Transform each element of a sequence with a given function.
+ //! @ingroup group-Sequence
+ //!
+ //! Example:
+ //! @code
+ //! auto doubled = hana::transform(hana::make_tuple(1, 2, 3), [](auto x) { return x * 2; });
+ //! @endcode
+ //! doubled == hana::make_tuple(2, 4, 6)
+ //!
+ template <typename Xs, typename F>
+ constexpr auto transform(Xs&& xs, F&& f) {
+
+ // See below for the commented version of this function.
+ }
+}}
+The @file entry provides an overview of the file contents. The //! syntax precedes a function-level Doxygen comment, providing an example usage of the function hana::transform in the code snippet above.
Inline comments, throughout the source code, are used to explain the purpose of specific statements. This example is taken from hana::transform, mentioned previously.
constexpr auto transform(Xs&& xs, F&& f) {
+ return hana::adjust_if(
+
+ static_cast<Xs&&>(xs), // Forward the sequence `xs`
+
+ [](auto const&) { return true; }, // Always apply the transformation
+
+ static_cast<F&&>(f) // Forward the transformation function
+ );
+ }
+Here is another example of inline commenting, from the Boost.Asio library, notice how the comments make understanding the flow easy.
+void start_read() {
+
+ // Prepare a buffer to store incoming data.
+ socket_.async_read_some(boost::asio::buffer(data_, max_length),
+ [this](boost::system::error_code ec, std::size_t length) {
+ if (!ec) {
+
+ // Successfully read some data, process it.
+ handle_data(data_, length);
+
+ // Initiate another asynchronous read to continue receiving data.
+ start_read();
+ } else {
+
+ // An error occurred, log and handle it.
+ handle_error(ec);
+ }
+ });
+}
+Here is a more in-depth example, showing how to comment non-trivial code behavior (for example, shared pointers, async operations). The comments also describe purpose rather than restating code (for example, "Keep session alive" rather than "Creates a shared pointer"). And finally the comments guide the reader through the flow (such as explaining what happens after a read or write).
+#include <boost/asio.hpp>
+#include <iostream>
+#include <memory>
+#include <utility>
+
+using boost::asio::ip::tcp;
+
+class Session : public std::enable_shared_from_this<Session> {
+public:
+ explicit Session(tcp::socket socket)
+
+ : socket_(std::move(socket)) {} // Move socket into this session
+
+ void start() {
+
+ read(); // Begin reading data from the client
+ }
+
+private:
+ void read() {
+
+ auto self = shared_from_this(); // Ensure session remains alive during async operation
+
+ // Asynchronous read operation
+ socket_.async_read_some(boost::asio::buffer(data_, max_length),
+ [self](boost::system::error_code ec, std::size_t length) {
+ if (!ec) {
+
+ // Successfully received data, now send a response
+ self->write(length);
+ } else {
+
+ // Handle connection errors (for example, client disconnected)
+ std::cerr << "Read error: " << ec.message() << std::endl;
+ }
+ });
+ }
+
+ void write(std::size_t length) {
+
+ auto self = shared_from_this(); // Keep session alive for async write
+
+ // Asynchronous write operation
+ boost::asio::async_write(socket_, boost::asio::buffer(data_, length),
+ [self](boost::system::error_code ec, std::size_t /*bytes_transferred*/) {
+ if (!ec) {
+
+ // Successfully wrote data, continue reading for more client input
+ self->read();
+ } else {
+
+ // Handle write error (for example, broken pipe)
+ std::cerr << "Write error: " << ec.message() << std::endl;
+ }
+ });
+ }
+
+ tcp::socket socket_;
+ enum { max_length = 1024 };
+
+ char data_[max_length]; // Buffer to store incoming data
+};
+
+// Server class that listens for incoming connections
+class Server {
+public:
+ Server(boost::asio::io_context& io_context, short port)
+ : acceptor_(io_context, tcp::endpoint(tcp::v4(), port)) {
+
+ accept(); // Start listening for connections
+ }
+
+private:
+ void accept() {
+ acceptor_.async_accept(
+ [this](boost::system::error_code ec, tcp::socket socket) {
+ if (!ec) {
+
+ // Successfully accepted a connection, create a session
+ std::make_shared<Session>(std::move(socket))->start();
+ } else {
+
+ // Log accept error
+ std::cerr << "Accept error: " << ec.message() << std::endl;
+ }
+
+ // Continue accepting new connections
+ accept();
+ });
+ }
+
+ tcp::acceptor acceptor_;
+};
+
+// Main function to run the server
+int main() {
+ try {
+ boost::asio::io_context io_context;
+
+ Server server(io_context, 12345); // Start server on port 12345
+
+ io_context.run(); // Run the IO context to handle async operations
+ } catch (std::exception& e) {
+ std::cerr << "Exception: " << e.what() << std::endl;
+ }
+}
+Inline comments are clearer if they are preceded by a blank line. Many libraries do not strictly stick to this practice, but it should be clear from the above example that the preceding-blank-line is a best practice for readability.
+Ideally, when writing error message text, use the following rules of thumb:
+Name the library (so user knows the source)
+Show the input and expected type/value
+Give context (function name, file, line, operation)
+Provide standard error codes (for example: POSIX, errno)
+Avoid low-level noise (memory addresses)
+Be actionable (help the user fix the error)
+Here are some example error messages that have issues:
+| Message | +Issue | +
|---|---|
|
+Too verbose, compiler jargon, doesn’t say what the user did wrong. |
+
|
+Just the type name, no context, no error code, no description. |
+
|
+What input? What target type? Which file/line? |
+
|
+Too vague - “input stream error” — which input, which archive format? |
+
|
+Very low-level, memory address is useless to most users. |
+
|
+No filename, no condition, no hint at what caused it. |
+
Here are some useful error messages:
+| Message | +Notes | +
|---|---|
|
+Names the library, the invalid input, and the problem. |
+
|
+Explains what call failed and gives a standard POSIX error code. |
+
|
+Shows the input ("abc") and the target type ( |
+
|
+Explains the archive issue and gives the file name. |
+
|
+Shows both the stored and requested types — no guesswork. |
+
|
+Includes condition, function, file, and line number. |
+
Well written error messages can be one of the hidden superpowers of your library. A well-crafted message can save hours of debugging, while a cryptic one can scare away even experienced devs - part of the approachable vs hostile experience!
+Content under construction.
+Content under construction.
+The Boost Contributors Community is a dedicated group of developers and enthusiasts committed to advancing the Boost libraries. +There are many different ways of Getting Involved with this community.
+The Boost Developers Mailing List is focused on the development and maintenance of the libraries. +It is a platform for proposing new libraries, discussing design and implementation details, and coordinating efforts among contributors. +Participation in these mailing lists is open to all. +In addition to mailing lists, the Boost community also leverages Slack for real-time communication. +The Slack Workspace hosts various channels dedicated to specific libraries, general discussions, and coordination among contributors.
+When discussing Boost-related topics on social media, contributors are encouraged to follow our Tweeting guidelines. +These guidelines help ensure that the community maintains a positive and informative presence on social media.
+New library developers usually have a lot of questions on their mind, before asking for help or information refer to the Contributors Frequently Asked Questions for answers to many common questions and dilemmas.
+A consistent documentation style, that emphasizes readability and accessibility, is encouraged. +This includes all the library Documentation Guidelines and contributions to this website (refer to Site-docs Style Guide).
+Refer to The Fiscal Sponsorship Committee for an overview of the role of this committee, the assets it maintains, and contact information if needed.
+This section contains answers to the common questions that new contributors to Boost often have.
+Do Boost libraries typically come from gauging outside interest, or perhaps more from developers following their own interests?
+Most Boost Libraries originate from individual developers, or small teams, following their own interests, rather than from organized external demand. Typically, self-motivated developers identify a gap in the C++ ecosystem or have a strong interest in a particular domain. To be clear, Boost is not driven by corporate feature requests, community polling or public wishlists, nor top-down direction (though The C++ Alliance might sponsor, or partially sponsor, work it deems of specific value, such as it did with Boost.Json). The community acts as a gatekeeper to ensure quality, generality, and long-term usefulness, rather than as a source for library concepts.
+When considering developing a library for Boost, what is the right balance between ambitious functionality and limited functionality?
+The keys to a successful development of a new library are to identify core functionality and start simple. Prioritize functionality that provides significant value to developers and aligns with Boost’s goals of promoting high-quality, reusable libraries. Avoid adding unnecessary features that may increase complexity without adding much value. It’s often beneficial to start with a simpler implementation that addresses a specific problem or use case effectively. A library with a narrow focus and a clear, intuitive API is more likely to be accepted by the Boost community than one that attempts to solve too many problems at once or introduces unnecessary complexity.
+In addition to this, consider future extensibility, performance, portability, current trends, and always remember you can seek community advice and feedback through the Boost mailing lists.
+Am I right in thinking that Boost libraries are typically low-level and very specific, rather than higher-level and for tackling large scale issues such as generative AI, a modern game engine, or quantum computing, to name a few examples?
+Yes, Boost libraries have traditionally excelled in areas such as high-performance computing, cross-platform, embedded systems, and networking. Taking your example of generative AI, many Boost libraries could be used as components of a new AI model - such as parsing, networking, serialization, errors and exception handling, math and statistics, randomness, and so on. A new quality generative AI model is a considerable undertaking, and Boost libraries could do a lot of the grunt work and leave a development team with the most creative aspects. Simulating quantum computing, or creating a modern game engine, gets a similar answer, a development team could focus on the interactive UI, with Boost libraries handling a myriad of administrative and operational tasks.
+A key aspect of a successful Boost library is focus: what is the problem that the library in question is the solution for?
+If there is social media discussion on new libraries, typically what does the discussion entail?
+The new libraries most requested tend to be more like updates to existing libraries. There’s a common desire for more comprehensive asynchronous programming support, often for higher-level abstractions or more extensive async features. Another example is more programming options, say for an enhanced version of Boost.ProgramOptions, or for libraries supporting earlier or later C++ standards, such as requests to support pre-C++17 standards.
+Where can I read the current version of the Boost Software License?
+Here: The Boost Software License.
+How should Boost programmers apply the license to source and header files?
+Add a multi-line comment based on the following template, substituting appropriate text for the name and date on the top line:
+// Copyright Joe Coder 2004 - 2006.
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at
+// https://www.boost.org/LICENSE_1_0.txt)
+Leave an empty line before and after the above comment block.
+It is fine if the copyright and license messages are not on different lines; in no case should there be other intervening text.
+Do not include "All rights reserved" anywhere.
+Other ways of licensing source files have been considered, but some of them turned out to unintentionally nullify legal elements of the license. Having fixed language for referring to the license helps corporate legal departments evaluate the Boost distribution.
+Creativity in license reference language is strongly discouraged, but judicious changes in the use of whitespace are fine.
+How should the license be applied to documentation files rather than source files?
+Similarly to the way it is applied to source files: the user should see the very same text indicated in the template above, with the only difference that your local copy of LICENSE_1_0.txt should also be linked to.
+How should Boost programmers maintain the copyright messages?
+Copyright is only claimed for changes meeting a certain threshold of originality. Therefore, the copyright message only covers expressions of creativity. It is up to authors of changes to add themselves to the copyright message if they so decide. Typically, a new claimant is added when someone takes over maintenance of a library or a new version of an existing library is developed. In principle, do not remove previous copyright claims - just add new claims and/or claimants.
+How is the Boost Software License (BSL) different from the GNU General Public License (GPL)?
+The GNU General Public License is longer, and may be harder to understand. The Boost license permits the creation of derivative works for any use with no legal requirement to release your source code. Other differences include BSL not requiring reproduction of copyright messages for object code redistribution, and the fact that BSL is not "viral": if you distribute your own code along with some Boost code, the BSL applies only to the Boost code (and modified versions thereof); you are free to license your own code under any terms you like.
+Why the phrase "machine-executable object code generated by a source language processor"?
+To distinguish cases where we do not require reproduction of the copyrights and license (such as object libraries, shared libraries, and final program executables), from cases where reproduction is still required (such as distribution of self-extracting archives of source code or precompiled header files). More detailed wording was rejected as not being legally necessary, and reducing readability.
+Why is the "disclaimer" paragraph of the license entirely in uppercase?
+Capitalization of these particular provisions is a US legal mandate for consumer protection.
+Does the copyright and license cover interfaces too?
+The conceptual interface to a library is not covered. The particular representation expressed in the header is covered, as is the documentation, examples, test programs, and all the other material that goes with the library. A different implementation is free to use the same logical interface, however. Interface issues have been fought out in court several times; refer to a lawyer if this is likely to be an issue.
+Why doesn’t the license prohibit the copyright holder from patenting the covered software?
+No one who distributes their code under the terms of this license could turn around and sue a user for patent infringement. Boost’s lawyers were well aware of patent provisions in licenses like the GPL and CPL, and would have included such provisions in the Boost license if they were believed to be legally useful.
+Why doesn’t the copyright message say "All rights reserved"?
+This provision does not belong in the copyright notice for anything (software, electronic documentation, etc.) that is being licensed. It belongs in books that are sold where, in fact, all rights (for example, to reproduce the book, etc.) are being reserved by the publisher or author.
+Do I have to copyright/license trivial files?
+Yes, even a test file that just contains an empty main() should have a copyright notice. Files without copyright notices make corporate lawyers nervous, and that’s a barrier to adoption. The more Boost is uniformly copyrighted and licensed, the better.
Can I use the Boost Software License for my own projects outside of Boost?
+Yes, there are no restrictions on the use of the license itself.
+Is the Boost license Open Source?
+Yes. The Open Source Initiative certified the Boost Software License 1.0 in early 2008.
+Many developers opt for lightweight integrated developer environments (IDEs), rather than the full-fledged IDE. What lightweight IDEs are popular for C++ development?
+There are several popular options for both Windows and Linux. CLion, developed by JetBrains, is a cross-platform IDE that offers advanced code analysis, refactoring tools, and integration with the CMake build system, which is commonly used in C++ projects.
+GNU Emacs and Vim are highly configurable and popular among developers who prefer a more minimalistic environment. They offer powerful features for editing code, and many plugins are available to enhance development workflows.
+Qt Creator provides features like code completion, syntax highlighting, and debugging support for C++ and Qt (projects that use the Qt framework).
+Sublime Text is a lightweight yet powerful text editor known for its speed and simplicity. It offers features like syntax highlighting, multiple selections, and a wide range of plugins for enhancing functionality, including support for C++ development.
+Atom is an open-source text editor developed by GitHub. It’s highly customizable and extensible through packages, and provides features like syntax highlighting, auto-completion, and project navigation.
+There are many other tools, Microsoft’s Visual Studio provides a full IDE and is well respected as a professional development environment, and Visual Studio Code is a lighter weight but versatile code editor that can be extended and customized with various extensions.
+What are the biggest pain points that developers are running into, that are not addressed by current Boost libraries?
+Some Boost libraries have a steep learning curve, especially for newcomers to C++. Simplifying the API design, providing extensive documentation, and offering beginner-friendly tutorials helps lower the barrier to entry and make your library more accessible to a wider audience. Other pain points include support for modern language features, working with concurrency and parallelism, providing a seamless experience across different platforms, and providing optimal performance.
+For reference, what libraries are good examples of ones that are easy to learn?
+One library known for its relatively straightforward API and ease of learning compared to some others is Boost.Filesystem. +This library provides portable facilities to work with files and directories, offering an intuitive interface for common file system operations such as file creation, deletion, copying, moving, and directory traversal. Its design is user-friendly and follows familiar patterns. Boost.Filesystem documentation is comprehensive and well-structured. Overall, Boost.Filesystem is often recommended as a starting point for those looking to dip their toes into Boost libraries due to its simplicity, practicality, and broad applicability across various projects.
+Other libraries that are known for their shallow learning curve include Boost.Optional which is particularly useful for handling functions that may return an optional value or dealing with nullable data types in a safe and clear manner. Boost.Any allows developers to store objects of different types in a single container and retrieve them without typecasting. Boost.TypeIndex provides facilities for obtaining type information at runtime, making it easy to work with types dynamically.
+What libraries have the steepest learning curve?
+While all Boost libraries have their complexities, some are known to have steeper learning curves due to their advanced nature or the intricacies of the domain they address. Boost.Spirit is a parsing and generation library that uses a domain-specific embedded language (DSEL) implemented as C++ template metaprograms. It allows developers to define parsers and generators directly within code using EBNF-like syntax. However, the template-based approach and the metaprogramming techniques used can make it challenging for newcomers to grasp, especially those unfamiliar with advanced template programming or parsing theory.
+Boost.Mp11 (Meta-Programming Library) is a powerful library for metaprogramming, providing tools for compile-time computation, type manipulation, and template metaprogramming. It allows developers to perform complex compile-time computations and transformations using a functional programming style. However, the functional programming paradigm and the intricacies of template metaprogramming can be daunting for beginners and require a solid understanding of C++ templates and meta-programming concepts.
+What libraries were the most ambitious in what they attempted to achieve?
+The many notable examples include:
+Boost.Graph provides a generic and efficient framework for working with graphs, making it suitable for a variety of applications in areas such as network analysis, optimization, and data visualization.
+Boost.Compute provides abstractions for memory management, kernel execution, and data parallelism, enabling developers to harness the computational power of modern hardware for tasks such as numerical simulations, image processing, and machine learning.
+Boost.Spirit is ambitious in its goal of providing a high-level and composable framework for parsing complex data formats and domain-specific languages entirely within C++ code, without the need for external tools or preprocessors.
+Boost.Hana aims to simplify and modernize metaprogramming in C++, making it more accessible and powerful for developing generic libraries and applications.
+What libraries were the least ambitious technically?
+The useful utilities such as Boost.Any, Boost.Variant, and Boost.Optional offer relatively simple functionality. Another simpler library is Boost.Bimap which provides a container for maintaining one-to-one mappings between keys and values. While bidirectional maps are a useful data structure, the functionality provided is relatively straightforward and focused on this specific use case.
+I have always been interested in Artificial Intelligence (AI), and would like to contribute an AI component library to Boost. Within the field of Generative AI, what components would work well as a C++ library?
+In simple terms, generative AI works by first breaking down known constructs (for example, text or images) into small reusable components. This might be tokens, subwords, or characters for textual input, or pixels, patches, or semantic elements (sky, tree, car, etc.) for an image. Then, using statistical models, patterns, or learned rules, generative AI assembles these atomic components into something new, ideally in novel and interesting ways.
+Of course, text and images are not the only complex constructs you might want to work with. There are too many others to list, but high-value constructs include audio and speech (breaking them down into phonemes, spectral features, or waveforms), video (decomposing into frames, objects, motion vectors, or scene segments), and time series data such as sensor data or stock prices (breaking down into patterns, cycles, and perhaps anomalies). More esoteric examples would include molecular structures and chemical compounds, social graph data, handwriting and gesture data, 3D models, and so on.
+A new Boost library could address one or more of the tasks involved in decomposing existing structures into atomic components, then the processes involved in rebuilding these components into something new that adheres to a significant set of rules/patterns/behavior. Handling user-input to guide the process is another challenging component.
+Perhaps take inspiration from the following table:
+| Construct | +Subcomponents / Atomic Units | +Notes | +
|---|---|---|
Text |
+Subwords, Characters, Tokens, Words |
+BPE (Byte Pair Encoding), WordPiece, SentencePiece, or character-based tokenization |
+
Images |
+Pixels, Patches, Segments, Regions, Object Masks |
+Vision Transformers often use image patches; segmentation maps are used for context |
+
Audio |
+Frames, Spectrogram Windows, Mel-Frequency Cepstral Coefficients (MFCCs), Waveform Samples |
+Typically converted into spectrograms or embeddings for processing. MFCCs determine how humans perceive sound. |
+
Speech |
+Phonemes, Syllables, Graphemes, Acoustic Frames |
+Combines audio processing and linguistic modeling |
+
Video |
+Frames, Clips, Objects per Frame, Motion Vectors, Scene Changes |
+Often handled as sequences of images with temporal dependencies |
+
Time Series |
+Time Steps, Sliding Windows, Seasonal Components, Trends |
+Used in forecasting models like Long-Short Term Memory (LSTMs), Transformers, etc. |
+
3D Models |
+Mesh Vertices, Faces, Point Clouds, Voxels, Primitives |
+Decomposed for neural rendering or reconstruction |
+
Code |
+Tokens, AST Nodes, Lines, Statements, Functions |
+Abstract Syntax Trees (ASTs) used by code Large Language Models (LLMs) |
+
Music |
+Notes, Chords, Bars, Timing Events, MIDI Tokens |
+Representation varies: symbolic (MIDI), waveform, or spectrogram |
+
Sensor Data |
+Events, Packets, Timestamps, Multimodal Vectors |
+Used in robotics and IoT, often real-time |
+
Many current AI libraries are built using Python or Rust, is there a need for C++ versions of these libraries?
+Perhaps not in all cases, but many applications will need performance, cross-platform portability, or integration with existing or embedded systems, all of which C++ excels at. Imagine adding real-time generative AI into a game or visual simulation, the performance requirement is the deciding factor.
+Can you give me some ideas for libraries that could be created and added to Boost?
+Here are some good candidates for AI libraries, with their respective use-cases:
+Boost.TokenStream - efficiently tokenizes words into subwords and characters so that a command such as "Turn on the lights" is understood. A C++ version could support inference on an edge device such as a microcontroller to run offline voice assistance.
+Boost.AIGen - rapidly prototypes models that generate descriptions of simulation states, and returns generated descriptions or structured images. This could be a lightweight generative model abstraction layer that enables experimentation with text, image, audio, or multi-modal generation.
+Boost.Autograd - provides a lightweight automatic differentiation engine to +simulate and optimize fluid flow using neural networks that respect physical laws. This requires differentiation of physical equations.
+Boost.MLGraph - defines and executes computation graphs with typed nodes and edges, enabling graph-based machine language research using custom model formats.
+Boost.Prompting - a C++ toolkit to structure, serialize, and test prompts for Large Lanugage Model (LLM)-based applications. Prompts could be built dynamically and used by assistants, chatbots, games, and perhaps robotics.
+Would the project structure of a generative AI library be any different for any other Boost library?
+Not at all, if you were to take our Boost.TokenStream idea and develop it, the project structure could look like this:
+boost-token-stream/
+├── include/
+│ └── boost/
+│ └── token-stream/
+│ ├── bpe.hpp # Public API
+│ ├── vocab.hpp # Vocab structure
+│ ├── merge_rules.hpp # Merge rules structure
+│ └── error.hpp # Error handling and outcome types
+├── src/
+│ └── bpe.cpp # Implementation (if not header-only)
+├── test/
+│ ├── test_bpe.cpp # Unit tests
+│ └── test_vocab.cpp # Vocab loading/lookup tests
+├── CMakeLists.txt
+└── README.md
+I want to experiment with creating a library for scene based generative AI, but I find all the necessary components somewhat daunting. Are there Boost libraries that can lighten the load?
+For an experimental project, consider structuring it around the following, assuming the input is a raw still image, and the output is a generated image:
+Boost.Gil : Loads your image and provides pixel access
+Boost.Graph : Represents the layout/scene structure
+Boost.Variant2 : Stores object types (components such as Tree, Sky, Road, Building, etc.)
+Boost.Fusion: Serializes scene components
+Boost.Log : Records scene parsing statistics
+Boost.ProgramOptions : CLI for batch parsing and config
+For more ideas, refer to Machine Learning.
+What is considered to be best practices when testing a generative AI model, given we can never be sure when it has got it all right?
+Testing a generative AI model, or library component, is fundamentally different from traditional software testing because there’s no single correct output — outputs are often subjective, diverse, and probabilistic. However, there are best practices that help ensure quality, safety, and usefulness. Start by engaging the following methods:
+| Method | +Description | +Pros | +Cons | +
|---|---|---|---|
Automated Metrics |
+BLEU, ROUGE, METEOR, Perplexity, FID (for images), etc. |
+Fast, repeatable |
+Poor at capturing nuance |
+
Human Evaluation |
+Judges rate quality, relevance, etc. |
+High-quality insights |
+Time-consuming, subjective |
+
Adversarial Testing |
+Try to break the model with edge cases or trick inputs |
+Uncovers weaknesses |
+Requires creativity and care |
+
Behavioral Unit Tests |
+Small, targeted tests for expected responses |
+Precise |
+Limited coverage |
+
Perfect doesn’t apply in generative AI. Instead, strive for consistent quality, clear boundaries, and safe behavior:
+Define clear evaluation goals and test across diverse datasets
+Simulate misuse - prompt injection, toxic output, sensitive topics
+Track hallucinations - the AI term for clearly incorrect statements or images
+Track consistency - does the model contradict itself
+Conduct temperature sweeps - AI term for measuring the balance between boring/repetitive and overly chaotic output
+Be transparent and document limitations
+Consider continuous monitoring in production - collect and analyze feedback
+A prospective generative AI Boost library would only need testing within its own domain of functionality, but the design should be cognizant of the testing a finished application is going to require.
+What is meant by "Modular Boost"?
+Technically, Modular Boost consists of the Boost super-project and separate projects for each individual library in Boost. In terms of Git, the Boost super-project treats the individual libraries as submodules. Currently (early 2024) when the Boost libraries are downloaded and installed, the build organization does not match the modular arrangement of the Git super-project. This is largely a legacy issue, and there are advantages to the build layout matching the super-project layout. This concept, and the effort behind it, is now known as "Modular Boost".
+In the past, the term has been used more broadly to refer simply to libraries in different repositories. This definition has now been tightened to mean a flat layout where each library is in its own sub-module, and there are no sub-libraries as there have been in the past (for example, the numeric libraries).
Refer to Super-Project Layout for a full description of the super-project.
+What exactly is a "modular arrangement"?
+It’s when the libraries can be used, and hence built, without creating the monolithic headers, without needing the root build files, and without needing the libraries to be arranged in the usual root/libs/<name> format.
Will the move to Modular Boost change testing?
+No, unless you want to. You will still be able to test with the current non-modular way. But you could also test the modular way.
+How will modular Boost work if there is no root/libs/<name> structure? Or is the structure still required?
The structure is still required for things like testing and documentation building.
+What happens to the numeric libraries that are currently sub-libraries, when sub-libraries are no longer supported?
+The numeric libraries have been divided into four packages: libboost-numeric-conversion/, libboost-numeric-interval/, libboost-numeric-odeint/, libboost-numeric-ublas/.
+Through what channels do Boost library authors typically receive the most feedback?
+Primarily the Boost Users Mailing List and the Boost Developers Mailing List. In addition, checkout GitHub Issues and Pull Requests. Also, the Boost forums on Slack can be active.
+Example of communication flow:
+A new library release is announced on the Boost mailing lists.
+Users start discussing the new release on the mailing lists, reporting initial impressions and any issues encountered.
+Users report bugs and request features on the relevant GitHub repository, leading to active discussions in the issues and pull requests sections.
+Boost library authors and contributors discuss technical details and implementation strategies on the Boost Developers Mailing List.
+Users seeking immediate help might turn to Slack or sometimes Discord for quick responses, or directly communicate with the authors if email or forum addresses are made public.
+What kind of communication from the C++ developer community have library authors received after public release of their library into the Boost collection?
+Authors often receive a variety of feedback, which should be welcomed to help authors gauge the acceptance and usability of their library. Feedback can be categorized into several types:
+General feedback includes both positive and negative comments about the overall design, usability, and documentation of the library. The most common comments include praise for a well-designed API, criticism of complex or confusing documentation, and suggestions for improving user experience.
+Developers may ask for help or clarification on how to use certain features of the library, especially clarifications on API usage. Another common request is for example code to demonstrate specific use cases, or help with integrating the library into their own projects.
+Bug reports or issues developers encounter while using the library. These can range from minor issues, such as documentation typos, to major bugs that affect the functionality of the library. The more serious examples include: inconsistent behavior across different platforms, crashes or memory leaks in certain use cases, or incorrect results from specific functions.
+Feature requests are not uncommon, such as support for additional platforms or compilers, new algorithms or data structures, or performance improvements and optimizations.
+Performance feedback related to the performance of the library, including benchmarking results and suggestions for optimizations. Typically this centers around reports of slow performance in certain scenarios, comparisons with similar libraries, or suggestions for algorithmic improvements.
+Portability issues are also to be expected, related to building or running the library on different platforms, operating systems, or compilers. Obvious issues include compilation errors on specific platforms, incompatibilities with certain compiler versions, or issues with platform-specific dependencies.
+There can be integration feedback on how well the library integrates with other libraries, frameworks, or tools in the C++ ecosystem. This can include integrating even with other Boost libraries as well as other third-party libraries.
+Finally, it is important to remember that members of the community can contribute to a library by submitting patches, improvements, or additional features. These updates tend to focus on code optimizations, and bug fixes. Less often they are enhancements to the library’s functionality.
+Can you give me some examples of user requests for library improvements and describe the response from the authors or maintainers?
+Boost.Asio received numerous bug reports and feature requests related to its asynchronous I/O capabilities. Users asked for better documentation and examples for common use cases, such as implementing network protocols. The author, Christopher M. Kohlhoff, responded by improving documentation and providing more examples and tutorials. The library also received several community-contributed enhancements and bug fixes.
+Boost.Python users reported issues related to compatibility with different versions of Python and C++ compilers. There were also many requests for new features to better support modern C++ standards and Python 3.x. The maintainers addressed these issues by updating the library to support newer Python versions and C++ standards. The community also contributed patches to fix compatibility issues and add new features.
+Boost.Spirit, a library for creating parsers and generators, received feedback about its steep learning curve and complex documentation. Users requested more tutorials and simpler examples. The authors and the community worked on improving the documentation and providing more examples. The library also saw several enhancements to make it more user-friendly and easier to learn.
+Boost.Thread ran into issues with changes in the C++11 standard library, which introduced its own threading support. +The authors had to adapt Boost.Thread to coexist with and complement the standard library’s threading facilities, leading to significant refactoring.
+Boost.Graph initially included some incorrect assumptions about graph properties and algorithms, this led to bugs that required reworking the design to support a wider range of graph types and use cases.
+Boost.Math initially had issues in mathematical algorithms that produced incorrect results in some cases, and required redesigning the affected components to ensure greater robustness.
+Boost.Interprocess had some bugs related to shared memory management that led to memory corruption, requiring a redesign of the allocation and synchronization mechanisms.
+Boost.Filesystem early versions had an API that was inconsistent and difficult to extend. The introduction of the v3 API addressed these issues, providing a more robust and user-friendly interface.
+Boost.MultiIndex had performance issues with certain types of queries and modifications which required rethinking the internal data structures and algorithms to improve efficiency.
+Boost.Geometry had performance bottlenecks in spatial indexing and query algorithms. This required redesigning parts of the library to handle large datasets more efficiently.
+Boost.Regex had issues with regular expression processing that could be exploited for denial-of-service attacks. This required redesigning parts of the matching engine to improve security and robustness. Refer to Security for more details.
+Boost.Serialization also had Security issues - vulnerabilities in handling serialized data could lead to arbitrary code execution or data corruption. This was addressed with changes to the serialization mechanisms.
+What have been the most unpleasant surprises that Boost library authors have to deal with?
+Here are some of the most notable ones:
+Changes in the C++ standard or the introduction of new features can sometimes break backward compatibility, leading to user complaints and requiring significant effort to fix.
+Authors have encountered unexpected behavior or compilation errors on less common platforms, requiring extensive debugging and platform-specific fixes.
+Boost has its own B2 system (though CMake can also be used), which can be complex and difficult for new users to understand. Authors have received feedback about difficulties in building the library, leading to extensive support and documentation efforts to help users get started.
+Authors have had to deal with unexpected slowdowns (performance regressions) reported by users, requiring detailed performance analysis and sometimes reworking or reverting changes.
+Users often have high expectations, and meeting these expectations can be challenging, especially for volunteer-driven projects. Authors sometimes face criticism for perceived shortcomings in these areas.
+Dependency hell : there can be a complex web of dependencies, and managing these dependencies can be challenging, especially when changes in one library affect others. Authors sometimes find it difficult to keep everything in sync.
+Like all software, Boost libraries can have security vulnerabilities that need to be addressed promptly. Carefully read the Security section.
+Managing contributions from the community, ensuring code quality, and handling disagreements can be challenging.
+Writing and maintaining comprehensive documentation is crucial but often neglected due to the focus on coding.
+What mitigation strategies have Boost library authors employed to mitigate these unpleasant surprises?
+Primarily implementing a comprehensive Test Matrix to catch issues early. Continuous Integration (CI) systems ensure consistent builds and tests across multiple platforms and configurations. Also actively engaging with the community through forums, mailing lists, and GitHub to gather feedback and address issues promptly. Last but not least, investing time in writing clear, detailed Documentation and tutorials to help users get started and understand complex features.
+After the initial release, consider releasing regular updates to address bugs, improve performance, and add features as necessary, based on user feedback. Refer to Version Control.
+Whereas promptly addressing issues is important, haste is not always the right approach. When critical bugs are identified, detailed bug reports are needed, and the library authors and contributors analyze the bugs, discuss possible solutions, and plan the necessary changes. Significant redesign or refactoring of the affected parts of the library may be needed to address the issues. This will involve extensive testing and should not be rushed.
+About how long following release of a library does it take before communication dies down, say to just an occasional email or forum question?
+The duration and intensity of communication following the release of a Boost library obviously varies widely depending on several factors, the complexity and popularity of the library, the initial quality of the release, and the responsiveness of the authors to early feedback. However, a general pattern has been observed:
+During the initial surge (0-3 months) the communication intensity is high. Authors are typically engaged and active during this period, addressing issues promptly, releasing patches or updates, and improving documentation based on feedback.
+A stabilization period (3-12 months) follows, with moderate feedback. Authors continue to be engaged but may start shifting focus to other projects or new features for the library, so response times can slow.
+The maturity phase (12+ months) involves sporadic communication, often related to edge cases or specific environments. Authors may check in periodically but are generally less active unless major issues arise or a significant update is planned.
+More complex libraries (for example, Boost.Spirit or Boost.Asio) tend to have longer periods of active communication due to their advanced features and higher potential for integration challenges. Libraries that quickly gain a large user base will have prolonged and more intense periods of communication. Popular libraries like Boost.Python tend to have more sustained engagement. Libraries with thorough initial documentation and fewer bugs tend to stabilize faster. Poor documentation or frequent bugs can extend the period of high communication.
+Say developers greatly appreciate a new library, what kind of praise have they given, or have they remained fairly silent?
+Silence is common but praise does come in various ways, both publicly and privately. The feedback can be quite enthusiastic and detailed, highlighting the library’s impact on their work and its overall quality. Developers often post messages of appreciation on the Boost mailing lists or Slack forums. These posts can range from simple thank-yous to detailed accounts of how the library has helped solve specific problems: "Boost.Python has significantly reduced the complexity of integrating C++ with Python in our project. The documentation is clear, and the API is intuitive. Kudos to the developers!"
+Praise is also shared on social media platforms like X, LinkedIn, and personal or company blogs. Developers might write blog posts detailing their experiences and the benefits they’ve gained from using the library: "Just integrated Boost.Asio into our server application. The performance improvements are phenomenal! Big thanks to the Boost community!"
+Developers might mention and praise Boost libraries in their talks at conferences or meetups. They often showcase how they used the library to solve challenging problems: for example, a presentation at CppCon highlighted the use of Boost.Hana for metaprogramming and how it simplified complex template code.
+Private emails to the library authors expressing their gratitude and sharing success stories are also not uncommon. +For example "I wanted to thank you for your work on Boost.Graph. It has been instrumental in our network analysis tool. Your dedication and support are greatly appreciated."
+"The documentation for Boost.Spirit is outstanding. The examples made it easy to get started and understand the complex concepts."
+"Boost.MultiIndex has drastically improved our query performance. The library’s efficiency and flexibility are top-notch."
+"Integrating Boost.Beast for our HTTP server was seamless. The design and ease of use are commendable."
+"Boost.Filesystem has been rock-solid in our cross-platform application. It handles all edge cases gracefully."
+Positive feedback and praise from developers not only encourages the library authors but also helps promote the library within the wider community. This can lead to increased adoption, further contributions, and continuous improvement of the library by the community. It can of course lead to additional communication.
+Financial contributions or sponsorships as a token of appreciation are rare!
+As a contributor of a library to Boost, what do I need to know about Safe C++?
+The current lack of memory-safety makes it too easy for malicious software to exploit C++ language vulnerabilities and perform a variety of attacks. However, retrofitting the C++ language with memory-safe constructs has proven to be daunting. The Safe C++ proposal for a memory-safe set of operations is currently in a state of indefinite hiatus.
+Clearly there could be significant interest in safe versions of Boost libraries, though the level of work involved extends well beyond rewriting a library using safe extensions, as all dependencies would also have to be safe versions too.
+Currently, an astute developer should use known safe practices (some of which are shown below), avoid unsafe libraries if there is a choice, and be aware of the discussions on safe coding practices going on in social media.
+What kind of feedback did the proposal for Safe C++ receive?
+Positive feedback centered on appreciation of the initiative to address longstanding safety concerns in C++. More challenging feedback has included concerns about the complexity of integrating new safety features into the existing C++ framework, balancing enhanced safety with the language’s core design features of performance and flexibility, and competition from the RUST and Swift programming languages.
+Are there references I can read that will help me understand safe concepts and so understand the online discussions?
+Yes, in addition to the, now stalled, Safe C++ proposal, the C++ safety, in context blog post, by Herb Sutter, has been written for a broad audience. Also by Herb Sutter, there is a paper entitled Core safety Profiles: Specification, adoptability, and impact.
+If you refer to the References section of any of these papers, you will find a range of books, papers, presentations and the like that delve to various depths into safety issues. For example, the Safety Profiles: Type-and-resource Safe programming in ISO Standard C++, by Bjarne Stroustrup and Gabriel Dos Reis, outlines a talk on the broad spectrum of safety issues in a chattier style than the more formal programming papers - and might be a good place to start!
+Can you recommend some Boost libraries that demonstrate current best safe-coding practices?
+By examining the source code and documentation for any of these libraries, you should be able to educate yourself on a robust approach to safe programming, using current development tools.
+For memory-safety, Boost.SmartPtr provides smart pointer types like boost::shared_ptr, boost::weak_ptr, and boost::scoped_ptr to manage dynamic memory safely and avoid common pitfalls like memory leaks and dangling pointers. Boost.Pool offers memory pooling utilities that efficient managing of memory allocations while minimizing fragmentation. It can help show how to avoid unsafe manual memory management.
For type-safety, Boost.StaticAssert facilitates compile-time checks with BOOST_STATIC_ASSERT, ensuring that certain conditions are met during compilation, thus improving type-safety. Also, Boost.TypeTraits supplies a set of tools for type introspection, enabling safer template programming by providing ways to query and manipulate types.
For resource-safety Boost.Filesystem is designed to work with file paths and directories safely, minimizing errors in handling filesystem resources and ensuring proper cleanup. Boost.ScopeExit provides a mechanism for ensuring cleanup of resources (e.g., releasing locks or closing file handles) when a scope is exited, both normally or due to an exception. And Boost.Interprocess facilitates safe and efficient interprocess communication (IPC), managing shared memory and other resources in a resource-safe way.
+For thread-safety Boost.Thread offers portable thread management and synchronization primitives (such as boost::mutex, boost::lock_guard) to help developers write thread-safe code. Boost.Asio enables asynchronous I/O operations with an emphasis on thread-safety, making it easier to build safe and scalable networked applications. At a lower level, Boost.Atomic provides atomic operations for thread-safe programming, avoiding data races in concurrent applications.
For a more general approach to safety, Boost.Optional introduces a way to handle optional values safely, avoiding issues like null pointer dereferencing.
+Boost.Variant2 provides a type-safe union type, ensuring that only one active type is stored at any time, preventing type misuse errors. Boost.Coroutine2 implements stackful coroutines with resource management in mind, preventing unsafe usage patterns.
Using current development tools what are the design principles of safe programming?
+Current best practices start with the use of static and compile-time checks to enforce constraints early. For resource-safety the idiom is Resource Acquisition Is Initialization (RAII). This idiom ties the lifetime of a resource to a programming object, so that when the object is created the resource is initialized, and when the object is destroyed the resource is released. However, the central theme of current safety is Encapsulation - the encapsulation of known unsafe operations in well-tested, robust, reusable abstractions, for example:
+Instead of exposing raw pointers, use smart pointers or custom encapsulation to ensure safe memory management:
+//
+// Unsafe code
+//
+
+int* allocateArray(size_t size) {
+ return new int[size];
+}
+
+void useArray() {
+ int* arr = allocateArray(10);
+
+ // No bounds checking.
+ arr[10] = 42;
+
+ // Forgetting to delete could cause memory leaks.
+ delete[] arr;
+}
+
+//
+// Safe encapsulation
+//
+
+#include <vector>
+#include <memory>
+
+class SafeArray {
+private:
+ std::unique_ptr<int[]> data;
+ size_t size;
+
+public:
+ SafeArray(size_t size) : data(std::make_unique<int[]>(size)), size(size) {}
+
+ int& operator[](size_t index) {
+ if (index >= size) {
+ throw std::out_of_range("Index out of range");
+ }
+ return data[index];
+ }
+
+ size_t getSize() const { return size; }
+};
+
+void useSafeArray() {
+ SafeArray arr(10);
+
+ // Safe access
+ arr[0] = 42;
+ try {
+
+ // Throws an exception
+ arr[10] = 13;
+ } catch (const std::out_of_range& e) {
+ std::cerr << e.what() << std::endl;
+ }
+}
+Handle file operations safely by ensuring that the file is properly closed after use.
+//
+// Unsafe code
+//
+
+void writeFile(const std::string& filename) {
+ FILE* file = fopen(filename.c_str(), "w");
+ if (file) {
+ fputs("Hello, World!", file);
+
+ // Forgetting fclose could cause resource leaks.
+ }
+}
+
+//
+// Safe encapsulation
+//
+
+#include <fstream>
+#include <string>
+
+class FileHandler {
+private:
+ std::ofstream file;
+
+public:
+ explicit FileHandler(const std::string& filename) {
+ file.open(filename, std::ios::out);
+ if (!file) {
+ throw std::ios_base::failure("Failed to open file");
+ }
+ }
+
+ ~FileHandler() {
+ if (file.is_open()) {
+ file.close();
+ }
+ }
+
+ void write(const std::string& content) {
+ if (!file) {
+ throw std::ios_base::failure("File not open");
+ }
+ file << content;
+ }
+};
+
+void safeWriteFile(const std::string& filename) {
+ try {
+ FileHandler fh(filename);
+ fh.write("Hello, World!");
+ } catch (const std::exception& e) {
+ std::cerr << "Error: " << e.what() << std::endl;
+ }
+}
+Prevent race conditions by wrapping shared resources in a thread-safe interface.
+//
+// Unsafe code
+//
+
+#include <iostream>
+#include <thread>
+#include <vector>
+
+int counter = 0;
+
+void incrementCounter() {
+ for (int i = 0; i < 1000; ++i) {
+
+ // Race condition
+ ++counter;
+ }
+}
+
+void unsafeThreads() {
+ std::thread t1(incrementCounter);
+ std::thread t2(incrementCounter);
+ t1.join();
+ t2.join();
+
+ // Undefined behavior
+ std::cout << "Counter: " << counter << std::endl;
+}
+
+//
+// Safe encapsulation
+//
+
+#include <iostream>
+#include <thread>
+#include <vector>
+#include <mutex>
+
+class ThreadSafeCounter {
+private:
+ int counter = 0;
+ std::mutex mtx;
+
+public:
+ void increment() {
+ std::lock_guard<std::mutex> lock(mtx);
+ ++counter;
+ }
+
+ int get() const {
+ return counter;
+ }
+};
+
+void safeThreads() {
+ ThreadSafeCounter counter;
+
+ auto worker = [&counter]() {
+ for (int i = 0; i < 1000; ++i) {
+ counter.increment();
+ }
+ };
+
+ std::thread t1(worker);
+ std::thread t2(worker);
+ t1.join();
+ t2.join();
+
+ // Guaranteed correct result
+ std::cout << "Counter: " << counter.get() << std::endl;
+}
+Instead of using raw sockets, encapsulate them in a class that ensures proper resource cleanup.
+//
+// Unsafe code
+//
+
+#include <sys/socket.h>
+#include <unistd.h>
+
+int createSocket() {
+ int sock = socket(AF_INET, SOCK_STREAM, 0);
+ if (sock == -1) {
+ perror("Socket creation failed");
+ return -1;
+ }
+
+ // Forgetting close(sock) could cause resource leaks.
+ return sock;
+}
+
+//
+// Safe encapsulation
+//
+
+#include <sys/socket.h>
+#include <unistd.h>
+#include <stdexcept>
+
+class SafeSocket {
+private:
+ int sock;
+
+public:
+ SafeSocket() {
+ sock = socket(AF_INET, SOCK_STREAM, 0);
+ if (sock == -1) {
+ throw std::runtime_error("Socket creation failed");
+ }
+ }
+
+ ~SafeSocket() {
+ if (sock != -1) {
+ close(sock);
+ }
+ }
+
+ int getSocket() const {
+ return sock;
+ }
+};
+By wrapping low-level operations in safe abstractions, you make the code easier to use and much harder to misuse!
+What are the known security vulnerabilities of the language C++ that I should be aware of when developing my Boost library?
+There are several known security vulnerabilities and pitfalls associated with the C++ language that developers should be aware of when developing libraries. Leveraging security tools, static analysis, and code reviews can help identify and address security issues early in the development lifecycle. In particular, be aware of:
+Buffer overflows occur when data is written beyond the boundaries of a fixed-size buffer, leading to memory corruption and potential exploitation. This vulnerability can be exploited by attackers to execute arbitrary code, crash the application, or manipulate program behavior.
+Null Pointer Dereferences (accessing memory at address 0) can lead to undefined behavior, crashes, or security vulnerabilities. Null pointer dereferences are a common source of application instability and can be exploited by attackers to cause denial-of-service conditions or execute arbitrary code.
+Memory leaks occur when memory allocated dynamically is not properly deallocated, leading to the exhaustion of available memory over time. While memory leaks may not directly result in security vulnerabilities, they can indirectly impact system stability and performance, potentially facilitating denial-of-service attacks or other security incidents.
+Deprecated functions and APIs may be insecure or outdated, exposing applications to known vulnerabilities or security risks. Developers should avoid using deprecated functions and select modern, secure alternatives provided by the latest Boost or Standard libraries.
+Integer overflows and underflows occur when arithmetic operations result in values that exceed the range of representable integer types. These vulnerabilities can lead to unexpected behavior, data corruption, or security vulnerabilities, especially in security-critical code paths such as input validation or memory allocation.
+Insecure input handling, such as failure to validate input data or sanitize user input, can lead to injection attacks or buffer overflows. Developers should validate and sanitize input data to ensure that it meets expected criteria and is safe to process further.
+Unsafe type conversions, such as casting pointers between incompatible types or using implicit type conversions without validation, can lead to memory corruption or data integrity issues. Developers should use explicit type conversions and perform appropriate validation to prevent unintended behavior.
+Concurrency and synchronization issues, such as data races, deadlocks, and race conditions, can lead to unpredictable behavior and security vulnerabilities in multithreaded applications. Developers should use thread-safe synchronization primitives and adopt best practices for concurrent programming.
+Are there certain kinds of tests or certain testing styles that work well when trying to identify and remove security liabilities in C++ code?
+There are several types of tests and testing techniques that can be particularly effective for identifying and mitigating security vulnerabilities in C++ code, consider:
+Unit testing involves testing individual components or units of code in isolation to ensure they behave as expected. Writing comprehensive unit tests for critical functions, classes, and modules helps verify their correctness and robustness, including edge cases, boundary conditions, and error handling paths. Refer to Writing Tests.
+Fuzz testing, also known as fuzzing, involves providing invalid, unexpected, or random input data to the program to identify potential vulnerabilities such as buffer overflows, null pointer dereferences, and other memory-related issues. Fuzz testing tools generate large volumes of test cases automatically and monitor the program’s behavior for crashes, hangs, or unexpected outputs. Refer to Fuzz Testing.
+Static analysis tools analyze source code without executing it and identify potential security vulnerabilities, code smells, and best practice violations. Static analysis tools for C++ can detect issues such as buffer overflows, null pointer dereferences, integer overflows, uninitialized variables, and unsafe type conversions.
+Dynamic analysis involves analyzing the behavior of the program during execution to identify security vulnerabilities, memory leaks, and runtime errors. Dynamic analysis tools for C++ can detect issues such as memory corruption, resource leaks, concurrency issues, and other runtime anomalies. Dynamic analysis techniques include memory sanitizers, address sanitizers, thread sanitizers, and runtime instrumentation. Refer to Sanitize Your Code.
+Penetration testing, also known as pen testing or sometimes ethical hacking, involves simulating real-world attacks against the software to identify security vulnerabilities and assess the effectiveness of existing security measures.
+Finally, code reviews conducted by peers, security experts, or automated tools, and focus on identifying potential security vulnerabilities, design flaws, and implementation errors.
+Are there Boost libraries that would help me guard against null pointer dereferencing?
+While there is not a specific library dedicated solely to null pointer dereference prevention, you can leverage several libraries:
+Boost.SmartPtr provides smart pointer classes such as shared_ptr, unique_ptr, and weak_ptr, which help manage dynamic memory allocation and deallocation automatically. Smart pointers implement RAII (Resource Acquisition Is Initialization) semantics, ensuring that memory is properly released when it goes out of scope or is no longer needed. By using smart pointers instead of raw pointers, you can reduce the risk of null pointer dereferencing errors, as smart pointers automatically handle null checks and memory deallocation.
Boost.Optional provides a type-safe wrapper for optional values, allowing you to represent nullable objects without resorting to raw pointers or null references.
+Boost.Assert provides macros and utilities for defining runtime assertions and preconditions in your code. You can use assertions to validate assumptions and guard against null pointer dereferences by checking for null pointers before dereferencing them.
+Boost.Contract provides a framework for specifying and enforcing function contracts, including preconditions, postconditions, and invariants. You can use contracts to define and enforce conditions that must be satisfied by function parameters, return values, and object states, including null pointer checks.
+Are there Boost libraries that I could include in my library project that help with secure input validation?
+There are several libraries that provide functionalities for input validation, sanitization, and handling, helping to mitigate security vulnerabilities related to invalid or malicious input data:
+Boost.String_Algo provides a collection of algorithms for string manipulation, including functions for removing leading or trailing whitespace, case conversion, tokenization, and search.
+Boost.Tokenizer provides a tokenizer class for splitting input strings into tokens based on delimiter characters or regular expressions. This can be useful for parsing and validating input data that is structured or delimited, such as CSV files, configuration files, or network protocols. The tokenizer class allows you to define custom tokenization rules and handle edge cases effectively, improving the reliability and security of input data processing.
+Boost.PropertyTree provides a hierarchical data structure for representing and manipulating structured data, such as XML, JSON, INI, or property list formats. You can use it to parse, validate, and sanitize input data in various formats, ensuring that it conforms to expected schema or constraints before further processing.
+Boost.Regex provides a comprehensive regular expression library for pattern matching and text processing. Regular expressions can be powerful tools for validating and sanitizing input data, such as validating email addresses, URLs, or other structured formats.
+Boost.Spirit is a parsing and generation library that allows you to define parsers and generators directly within C++ code using a domain-specific embedded language (DSEL).
+Are there Boost libraries that help with secure memory management?
+You can leverage various libraries to help ensure memory-safety, prevent memory-related vulnerabilities, and manage resources efficiently:
+Boost.SmartPtr provides smart pointer classes such as shared_ptr, unique_ptr, and weak_ptr, which help manage dynamic memory allocation and deallocation automatically. By using smart pointers, you can prevent common memory-related vulnerabilities such as memory leaks, dangling pointers, and double frees.
Boost.Pool provides memory pool classes that allow you to efficiently allocate and deallocate fixed-size memory blocks from preallocated memory pools. Memory pools can help reduce memory fragmentation, improve memory locality, and minimize overhead associated with dynamic memory allocation.
+Boost.Interprocess povides classes and utilities for interprocess communication and shared memory management. This library allows multiple processes to share memory regions securely and efficiently, facilitating communication and data exchange between them, and offers features such as named shared memory, mutexes, condition variables, and allocators for managing shared memory resources robustly.
+Boost.PtrContainer provides container classes that manage ownership and lifetime of dynamically allocated objects stored within them. These containers, such as ptr_vector, ptr_list, and ptr_map, automatically delete contained objects when the container is destroyed or when objects are removed from it. By using pointer containers, you can simplify memory management and ensure proper cleanup of dynamically allocated objects, reducing the risk of memory leaks and resource exhaustion.
Boost.CircularBuffer provides a circular buffer data structure that manages a fixed-size buffer with automatic wrapping behavior. Circular buffers can be used to manage memory efficiently in scenarios where a fixed-size buffer is sufficient, and memory allocation and deallocation overhead, and fragmentation, need to be minimized.
+What penetration testing frameworks might work well with a new Boost library?
+While penetration testing frameworks typically focus on testing web applications, network services, and software systems, they are still useful for identifying security vulnerabilities and weaknesses in a new library, typically by developing a test application that fully engages the features of the library:
+Metasploit is one of the most popular penetration testing frameworks, offering a wide range of tools and modules for exploiting vulnerabilities, conducting network reconnaissance, and assessing security posture. Metasploit modules can be customized to target specific vulnerabilities or attack vectors, such as buffer overflows, injection attacks, or memory corruption issues.
+OWASP ZAP is an open-source web application security testing tool designed for finding security vulnerabilities in web applications and APIs. Boost libraries used in web applications or services may benefit from integration with ZAP to identify vulnerabilities related to input validation, injection attacks, and other web security issues.
+Nmap (Network Mapper) is a powerful network scanning and reconnaissance tool used for discovering hosts and services on a network, identifying open ports, and detecting potential security vulnerabilities. Boost libraries used in networked applications or services may benefit from integration with Nmap to identify potential attack vectors, misconfigurations, or exposed services.
+NESSUS is a widely-used vulnerability scanning tool designed for identifying security vulnerabilities, misconfigurations, and compliance violations in networked environments.
+Burp Suite is a comprehensive web application security testing tool designed for finding security vulnerabilities in web applications and APIs, including input validation and injection attacks.
+In the past, can you give me some examples of where Boost libraries have fallen short and not prevented a malicious attack?
+Security vulnerabilities in Boost libraries are rare compared to many other software projects, thanks to the rigorous testing, code reviews, and scrutiny they undergo. Nevertheless, there have been a few instances where security issues have been identified in Boost libraries. Here are a couple of examples:
+A vulnerability affected multiple versions of Boost (versions 1.61 through 1.63) and was related to the Boost.Filesystem library. The vulnerability allowed an attacker to bypass security restrictions and potentially execute arbitrary code by exploiting a symbolic link issue in the remove_all function. This issue was addressed in later versions of Boost, and users were advised to update their installations to mitigate the risk.
Another vulnerability affected a later version of Boost (version 1.70). This vulnerability was related to the Boost.Asio library and could allow an attacker to cause a denial-of-service condition by triggering a stack overflow via a recursive function call. The issue was addressed in subsequent versions of Boost.Asio, and users were encouraged to upgrade to the latest version to prevent potential exploitation.
+If I write a library for Boost, what are the legal ramifications if there are security breaches using features of my library?
+As a developer contributing a library to the Boost C++ libraries, you generally retain ownership of the copyright to your code, but you also grant a license to the Boost Software License (BSL) for distribution as part of the Boost libraries. The Boost Software License is a permissive open-source license that allows users to freely use, modify, and distribute the library, subject to certain conditions.
+The Boost Software License includes a disclaimer of liability clause, which limits the liability of the library author and contributors for damages arising from the use or distribution of the library. This means that as the library author, you are generally not held legally responsible for any damages or losses resulting from security breaches or vulnerabilities in your library.
+Users of your library are responsible for their own use and deployment of the library, including ensuring the security of their applications and systems. While you have a duty to exercise reasonable care in the development and maintenance of your library, users are ultimately responsible for assessing and mitigating any security risks associated with its usage.
+In the event of a security breach or vulnerability in your library, it is important to respond promptly and responsibly by disclosing the issue, providing mitigations or workarounds, and releasing updates or patches to address the vulnerability. Prompt and transparent communication with the community helps minimize the impact of security incidents and demonstrates your commitment to security and accountability.
+Depending on the circumstances and applicable laws, there may be legal obligations to report security breaches or vulnerabilities, especially if they involve personal data or sensitive information. It’s important to familiarize yourself with relevant legal requirements and best practices for handling security incidents, including data breach notification laws and industry-specific regulations. Engaging with the Boost community and collaborating with security researchers can help identify and address security vulnerabilities proactively. Encouraging responsible disclosure of security issues, providing clear channels for reporting vulnerabilities, and acknowledging contributions from security researchers fosters a culture of security awareness and helps improve the overall security posture of your library.
+Can you recommend a book that would give me best practices for threat modelling for my Boost library?
+There are several books that cover threat modeling principles, techniques, and applicable best practices:
+Threat Modeling: Designing for Security by Adam Shostack. This book includes a comprehensive introduction to threat modeling, covering fundamental concepts, methodologies, and practical techniques for identifying and mitigating security threats in software systems.
+Threat Modeling: Uncover Security Design Flaws Using the STRIDE Approach by Frank Swiderski and Window Snyder. This book introduces the STRIDE threat modeling framework, which helps identify and analyze security threats based on six categories: Spoofing, Tampering, Repudiation, Information Disclosure, Denial of Service, and Elevation of Privilege.
+Threat Modeling: A Practical Guide for Development Teams by Mark E. Donaldson, James B. Ransome, and Andrew N. Nelson. This book offers practical guidance, real-world examples, and insights for integrating threat modeling into the software development process. It covers a range of threat modeling techniques, tools, and best practices, including data flow diagrams, attack trees, and risk analysis.
+Software Security Engineering: A Guide for Project Managers by Julia H. Allen, Sean Barnum, and Robert J. Ellison. This book provides a comprehensive overview of software security engineering principles, practices, and processes. It covers a wide range of topics related to software security, including threat modeling, security requirements analysis, security architecture, secure coding practices, and security testing.
+What Boost libraries are useful examples of how to add Continuous Integration (CI) into the library testing process?
+The following libraries are solid examples of how Continuous Integration (CI) is integrated into the testing process:
+Boost.Asio is a cross-platform library for network and low-level I/O programming that relies heavily on CI systems for testing and validation.
+Boost.Test supports unit testing in C++ and provides a framework for writing and running test cases, as well as utilities for organizing and reporting test results. Boost.Test leverages CI to ensure the correctness and reliability of its functionality across different platforms, compilers, and network configurations.
+Boost.Thread, a set of classes and functions for multithreading, is tested rigorously using CI systems to verify its correctness, performance, and portability across various platforms and environments. CI helps identify threading-related issues, including feared and difficult-to-debug race conditions.
+Boost.PropertyTree reads, manipulates and writes structured data. CI is used to validate the correctness and robustness of the parsing, serialization and manipulation features across diverse use cases and data sources.
+Boost.Filesystem relies on CI systems to validate its functionality across different operating systems, file systems, and compiler environments, from basic file I/O operations to more complex file management tasks.
+By studying how these libraries implement CI into their testing processes, newcomers can gain valuable insights into best practices for ensuring the quality and reliability of their own library contributions.
+Refer also to Continuous Integration.
+Debug Visualizers offer a powerful way to simplify the debugging process by allowing developers to see complex data structures in a more understandable format. In this section, we explore how to use and write Debug Visualizers for both Microsoft Visual Studio (MSVC) and the GNU Debugger (GDB), two of the most popular debugging tools available today.
+Debug Visualizers are tools that allow developers to customize how complex data structures are displayed during a debugging session. Instead of manually parsing through raw data, Visualizers can present information in a human-readable format, making it easier to understand the state of a program and identify issues.
+For example, if you’re working with a linked list, a Debug Visualizer can display the elements of the list in a clear, ordered format rather than as a series of memory addresses. This can significantly reduce the time and effort required to diagnose and fix bugs.
+This section covers:
+Microsoft Visual Studio comes with built-in support for Debug Visualizers. MSVC includes several built-in Visualizers for common data types like std::vector, std::map, and std::string. However, one of the most powerful features of Visual Studio is the ability to create custom Visualizers to suit your specific needs. These Visualizers are often referred to as Natvis files, - short for Native Visualizer - a script-style display language for creating custom views of C++ objects.
Using built-in Visualizers in Visual Studio is straightforward. When you hit a breakpoint or pause the execution of your program, the debugger will automatically use the appropriate Visualizer to display the contents of variables.
+For example, if you’re debugging a std::vector, Visual Studio will show the number of elements, their values, and the current capacity of the vector in a neatly organized format. You can also hover over variables to see a quick summary, or expand them in the Watch window to see more details.
To create a custom Visualizer, you need to write a Natvis file. Natvis files are XML files that allow you to present data in a way that makes sense for your application, whether that means showing a simplified view or expanding complex structures, and annotating the output appropriately.
+The syntax of a Natvis file is both straightforward and flexible. Here’s a basic structure of a Natvis file:
+<?xml version="1.0" encoding="utf-8"?>
+<AutoVisualizer xmlns="http://schemas.microsoft.com/vstudio/debugger/natvis/2010">
+ <Type Name="MyNamespace::MyClass">
+ <DisplayString>{{m_member1}}, {{m_member2}}</DisplayString>
+ <Expand>
+ <Item Name="Member 1">m_member1</Item>
+ <Item Name="Member 2">m_member2</Item>
+ </Expand>
+ </Type>
+</AutoVisualizer>
+In this example, the DisplayString element defines a simple summary of the data that will be shown when you hover over a variable of type MyClass. The Expand element defines what will be shown when you expand the variable in the debugger.
Let’s consider an example where you have a custom linked list class:
+namespace MyNamespace {
+ struct Node {
+ int value;
+ Node* next;
+ };
+
+ class LinkedList {
+ public:
+ Node* head;
+ };
+}
+Now, write a Natvis file to visualize this linked list in a user-friendly manner:
+<?xml version="1.0" encoding="utf-8"?>
+<AutoVisualizer xmlns="http://schemas.microsoft.com/vstudio/debugger/natvis/2010">
+ <Type Name="MyNamespace::LinkedList">
+ <DisplayString>LinkedList with head at {head}</DisplayString>
+ <Expand>
+ <Item Name="Head">head</Item>
+ <LinkedListItems>
+ <Size>size</Size>
+ <ValuePointer>head</ValuePointer>
+ <NextPointer>next</NextPointer>
+ </LinkedListItems>
+ </Expand>
+ </Type>
+</AutoVisualizer>
+This Natvis file will show the head of the linked list and allow you to expand it to see all the elements in a list format.
+Once you have written your Natvis file, you can deploy it in Visual Studio by placing it in the My Documents\Visual Studio <Version>\Visualizers directory or by including it directly in your project. After loading your project and hitting a breakpoint, Visual Studio will use your custom Visualizer automatically.
When writing Natvis files, keep the following best practices in mind:
+Visualizers should simplify the debugging process, so avoid overly complex representations.
+Visualizers run during debugging, so inefficient Natvis files can slow down the debugger.
+Ensure that your Visualizer works correctly with all possible states of the data structure, including having a single entry, or a NULL empty state, or being highly populated.
+Document and comment your Natvis file so that others (or your future self) can understand and maintain them.
+GNU Debugger (GDB) is a powerful and flexible debugger that is widely used in the open-source Unix community. While GDB does not have a direct equivalent to MSVC’s Natvis files, it supports a feature called pretty-printers, which serve a similar purpose. Pretty-printers are written in Python and allow developers to customize the output of data structures during debugging.
+GDB comes with several built-in pretty-printers, particularly for standard library containers like std::vector and std::map. These pretty-printers can be enabled by loading the appropriate scripts during your debugging session.
For example, to enable STL pretty-printers, you might add the following to your .gdbinit file:
python
+import sys
+sys.path.insert(0, '/usr/share/gcc-<version>/python')
+from libstdcxx.v6.printers import register_libstdcxx_printers
+register_libstdcxx_printers(gdb.current_objfile())
+end
+Once enabled, GDB will automatically use these pretty-printers to display STL containers in a more readable format.
+Here’s a simple Python template for a GDB pretty-printer:
+class MyClassPrinter:
+ "Print a MyNamespace::MyClass"
+
+ def __init__(self, val):
+ self.val = val
+
+ def to_string(self):
+ return "MyClass: member1 = {}, member2 = {}".format(
+ self.val['member1'], self.val['member2'])
+
+def lookup_function(val):
+ if str(val.type) == "MyNamespace::MyClass":
+ return MyClassPrinter(val)
+ return None
+
+gdb.pretty_printers.append(lookup_function)
+Let’s write a pretty-printer for the same linked list example used in the MSVC section:
+class LinkedListPrinter:
+ "Print a MyNamespace::LinkedList"
+
+ class Iterator:
+ def __init__(self, head):
+ self.node = head
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ if self.node == 0:
+ raise StopIteration
+ value = self.node['value']
+ self.node = self.node['next']
+ return value
+
+ def __init__(self, val):
+ self.val = val
+
+ def to_string(self):
+ return "LinkedList"
+
+ def children(self):
+ return enumerate(self.Iterator(self.val['head']))
+
+def lookup_function(val):
+ if str(val.type) == "MyNamespace::LinkedList":
+ return LinkedListPrinter(val)
+ return None
+
+gdb.pretty_printers.append(lookup_function)
+This script will allow GDB to display the elements of the linked list in a way that is easy to understand.
+Write modular pretty-printers that can be easily extended or reused.
+Keep performance in mind, as pretty-printers run in real-time during debugging.
+Ensure that your pretty-printer works correctly with all possible states of the data structure, including having a single entry, or a NULL empty state, or being highly populated.
+Document and comment your pretty-printers so that others (or your future self) can understand and maintain them.
+While both MSVC and GDB support custom visualization of data structures during debugging, they differ significantly in their approach:
+Natvis files are XML-based and tightly integrated with the Visual Studio IDE, offering a more graphical and user-friendly experience.
+GDB’s pretty-printers are written in Python, providing greater flexibility but requiring more manual setup and scripting.
+Debug Visualizers are particularly useful in scenarios where data structures are complex and difficult to interpret from raw memory views. This includes debugging custom containers, graphical objects, or any data structure with a non-trivial internal representation.
+Consider a case where a developer is working on a 3D game engine. The engine uses complex data structures to represent scenes, including trees of graphical objects and spatial partitions. Without Debug Visualizers, diagnosing issues with these structures would involve manually traversing pointers and interpreting binary data. With custom Visualizers, the developer can see these structures as they are meant to be seen, such as a tree view of the scene graph or a grid of spatial partitions, making it much easier to identify and fix problems.
+The following examples refer to Boost.Optional, Boost.Variant, and Boost.Container.
+The boost::optional type represents an object that may or may not contain a value. When debugging code that uses boost::optional, it’s helpful to quickly see whether a value is present and, if so, what that value is.
Here’s an example of a Natvis file that visualizes boost::optional in MSVC:
<?xml version="1.0" encoding="utf-8"?>
+<AutoVisualizer xmlns="http://schemas.microsoft.com/vstudio/debugger/natvis/2010">
+ <Type Name="boost::optional<*>" Priority="High">
+ <DisplayString Condition="!is_initialized">empty</DisplayString>
+ <DisplayString Condition="is_initialized">Value = {*(this->storage_.data_)}</DisplayString>
+ <Expand>
+ <Item Name="Value" Condition="is_initialized">*(this->storage_.data_)</Item>
+ </Expand>
+ </Type>
+</AutoVisualizer>
+This Visualizer checks if the boost::optional contains a value using the is_initialized method. If a value is present, it displays the content; otherwise, it shows "empty".
For GDB, you can create a pretty-printer in Python:
+class OptionalPrinter:
+ "Print a boost::optional"
+
+ def __init__(self, val):
+ self.val = val
+
+ def to_string(self):
+ is_initialized = self.val['m_initialized']
+ if is_initialized:
+ return "Value = {}".format(self.val['m_storage']['m_storage']['data'])
+ else:
+ return "empty"
+
+def lookup_function(val):
+ if str(val.type).startswith('boost::optional'):
+ return OptionalPrinter(val)
+ return None
+
+gdb.pretty_printers.append(lookup_function)
+This pretty-printer works similarly to the Natvis example, displaying either the value stored in the boost::optional or indicating that it is empty.
boost::variant is a type-safe union that can hold one of several types. Visualizing it during debugging can be tricky, as you need to see which type is currently stored and what its value is.
The following Natvis file visualizes boost::variant:
<?xml version="1.0" encoding="utf-8"?>
+<AutoVisualizer xmlns="http://schemas.microsoft.com/vstudio/debugger/natvis/2010">
+ <Type Name="boost::variant<*>">
+ <DisplayString>{ which = {which}, value = {*(void*)&storage_ + 16} }</DisplayString>
+ <Expand>
+ <Item Name="Which">which</Item>
+ <Item Name="Value">{*(void*)&storage_ + 16}</Item>
+ </Expand>
+ </Type>
+</AutoVisualizer>
+This Visualizer displays the active type stored in the boost::variant and its value. The which member determines which of the possible types is currently in use, and the corresponding value is extracted and displayed.
Here’s how you might implement a pretty-printer for boost::variant in GDB:
class VariantPrinter:
+ "Print a boost::variant"
+
+ def __init__(self, val):
+ self.val = val
+
+ def to_string(self):
+ which = self.val['which_']
+ value = gdb.parse_and_eval('((void*)&{})->boost::detail::variant::which_types::types[{}]'.format(self.val.address, which))
+ return "which = {}, value = {}".format(which, value)
+
+def lookup_function(val):
+ if str(val.type).startswith('boost::variant'):
+ return VariantPrinter(val)
+ return None
+
+gdb.pretty_printers.append(lookup_function)
+This pretty-printer identifies the active type using which_ and displays its value.
boost::container::vector is a drop-in replacement for std::vector with improved performance in certain scenarios. Like std::vector, it benefits greatly from a Visualizer that can show the contents of the container in a user-friendly way.
Here’s a Natvis file for visualizing boost::container::vector:
<?xml version="1.0" encoding="utf-8"?>
+<AutoVisualizer xmlns="http://schemas.microsoft.com/vstudio/debugger/natvis/2010">
+ <Type Name="boost::container::vector<*>">
+ <DisplayString>Size = {size()}</DisplayString>
+ <Expand>
+ <Item Name="[size() elements]">[ptr_, ptr_ + size()]</Item>
+ </Expand>
+ </Type>
+</AutoVisualizer>
+This Visualizer displays the size of the vector, and allows you to expand the vector to see all its elements.
+For GDB, you can use the following pretty-printer:
+class BoostVectorPrinter:
+ "Print a boost::container::vector"
+
+ def __init__(self, val):
+ self.val = val
+
+ def to_string(self):
+ size = self.val['m_holder']['m_size']
+ return "Size = {}".format(size)
+
+ def children(self):
+ size = int(self.val['m_holder']['m_size'])
+ start = self.val['m_holder']['m_start']
+ return (('[{}]'.format(i), start[i]) for i in range(size))
+
+def lookup_function(val):
+ if str(val.type).startswith('boost::container::vector'):
+ return BoostVectorPrinter(val)
+ return None
+
+gdb.pretty_printers.append(lookup_function)
+This pretty-printer shows the size of the boost::container::vector, and lists its elements.
Now, let’s look at debugging a more complex library.
+Boost.Asio is a powerful and widely used library, with the challenge of debugging asynchronous code. Debug Visualizers can make this process significantly easier by providing insights into the state of your Asio objects during debugging.
+The boost::asio::io_context (formerly io_service) is a core component of the library, used to initiate and manage asynchronous operations. When debugging, it can be helpful to see the state of the io_context, including the number of pending tasks and whether it is currently running.
Here’s an example of a Natvis file that visualizes boost::asio::io_context in MSVC:
<?xml version="1.0" encoding="utf-8"?>
+<AutoVisualizer xmlns="http://schemas.microsoft.com/vstudio/debugger/natvis/2010">
+ <Type Name="boost::asio::io_context">
+ <DisplayString>Work = {this->impl_.work_count_}, Threads = {this->impl_.thread_count_}</DisplayString>
+ <Expand>
+ <Item Name="Work Count">this->impl_.work_count_</Item>
+ <Item Name="Thread Count">this->impl_.thread_count_</Item>
+ </Expand>
+ </Type>
+</AutoVisualizer>
+This Visualizer displays the number of pending tasks (work_count_) and the number of threads currently running in the io_context. This information is crucial for understanding the load and activity level of the io_context.
For GDB, you can create a pretty-printer in Python:
+class IoContextPrinter:
+ "Print a boost::asio::io_context"
+
+ def __init__(self, val):
+ self.val = val
+
+ def to_string(self):
+ work_count = self.val['impl_']['work_count_']
+ thread_count = self.val['impl_']['thread_count_']
+ return "Work = {}, Threads = {}".format(work_count, thread_count)
+
+def lookup_function(val):
+ if str(val.type).startswith('boost::asio::io_context'):
+ return IoContextPrinter(val)
+ return None
+
+gdb.pretty_printers.append(lookup_function)
+This pretty-printer provides similar information as the Natvis file, showing the number of pending tasks and threads in the io_context.
The boost::asio::steady_timer is used for scheduling asynchronous operations to occur after a specified time period. Visualizing its state can help you understand when the next operation is scheduled to run.
The following Natvis file visualizes boost::asio::steady_timer:
<?xml version="1.0" encoding="utf-8"?>
+<AutoVisualizer xmlns="http://schemas.microsoft.com/vstudio/debugger/natvis/2010">
+ <Type Name="boost::asio::steady_timer">
+ <DisplayString>Expires At = {this->impl_.expiry_}</DisplayString>
+ <Expand>
+ <Item Name="Expiry Time">this->impl_.expiry_</Item>
+ </Expand>
+ </Type>
+</AutoVisualizer>
+This Visualizer displays the time at which the timer is set to expire, helping you to easily track the timing of scheduled operations.
+Here’s a pretty-printer for boost::asio::steady_timer in GDB:
class SteadyTimerPrinter:
+ "Print a boost::asio::steady_timer"
+
+ def __init__(self, val):
+ self.val = val
+
+ def to_string(self):
+ expiry_time = self.val['impl_']['expiry_']
+ return "Expires At = {}".format(expiry_time)
+
+def lookup_function(val):
+ if str(val.type).startswith('boost::asio::steady_timer'):
+ return SteadyTimerPrinter(val)
+ return None
+
+gdb.pretty_printers.append(lookup_function)
+This pretty-printer shows when the timer is set to expire, similar to the Natvis Visualizer.
+Sockets are one of the most commonly used components in Boost.Asio, allowing for network communication. Visualizing socket states and addresses during debugging can provide clarity on the connections being managed.
+Here’s a Natvis file that visualizes a TCP socket:
+<?xml version="1.0" encoding="utf-8"?>
+<AutoVisualizer xmlns="http://schemas.microsoft.com/vstudio/debugger/natvis/2010">
+ <Type Name="boost::asio::ip::tcp::socket">
+ <DisplayString>Local = {this->impl_.socket_.local_address_}:{this->impl_.socket_.local_port_}, Remote = {this->impl_.socket_.remote_address_}:{this->impl_.socket_.remote_port_}</DisplayString>
+ <Expand>
+ <Item Name="Local Address">{this->impl_.socket_.local_address_}:{this->impl_.socket_.local_port_}</Item>
+ <Item Name="Remote Address">{this->impl_.socket_.remote_address_}:{this->impl_.socket_.remote_port_}</Item>
+ </Expand>
+ </Type>
+</AutoVisualizer>
+This Visualizer shows the local and remote addresses and ports for a TCP socket, giving you immediate insight into the connection being managed.
+A pretty-printer for a TCP socket in GDB might look like this:
+class TcpSocketPrinter:
+ "Print a boost::asio::ip::tcp::socket"
+
+ def __init__(self, val):
+ self.val = val
+
+ def to_string(self):
+ local_address = self.val['impl_']['socket_']['local_address_']
+ local_port = self.val['impl_']['socket_']['local_port_']
+ remote_address = self.val['impl_']['socket_']['remote_address_']
+ remote_port = self.val['impl_']['socket_']['remote_port_']
+ return "Local = {}:{}, Remote = {}:{}".format(local_address, local_port, remote_address, remote_port)
+
+def lookup_function(val):
+ if str(val.type).startswith('boost::asio::ip::tcp::socket'):
+ return TcpSocketPrinter(val)
+ return None
+
+gdb.pretty_printers.append(lookup_function)
+This pretty-printer displays the local and remote addresses and ports, providing clear information about the socket’s connections.
+By understanding how to use and write Debug Visualizers, you can gain deeper insights into your code, catch bugs more quickly, and ultimately produce higher-quality software. Whether you’re new to debugging or an experienced developer, taking the time to master these tools will pay off in the long run.
+Consider downloading sample Natvis and Python pretty-printer files from the Boost library’s GitHub repository.
+Boost libraries generally have a large and diverse user base. To ensure successful transitions from old APIs to newer APIs under those circumstances, library authors are encouraged to follow a few guidelines when introducing breaking changes in their library:
+Non-breaking changes can be done without restriction.
+Small breaking changes can be made, but users should be given notice a few releases before the change is published. Most breaking changes fall into this category.
+For large breaking changes with a migration path from the old API to the new API (for example Boost.Filesystem v2 to v3), the new API should be introduced in a separate directory/namespace, and users should be noticed and given a few releases to move over. The old API can be removed after some time.
+For large breaking changes without a migration path (for example Boost.Spirit v2 to v3), the new API should be provided in a separate directory/namespace, and the old API should be preserved (because there’s no migration path). Removing the API should be considered the same as removing a Boost library, which can be done but needs a more extensive deprecation period.
+Large breaking changes that are equivalent to a redesign or rewrite of the library should be treated as a new library and a formal review (or at least a mini review) is encouraged.
+It is a general aim for boost libraries to be Portable. The primary means for achieving this goal is to adhere to ISO Standard C++. However, ISO C++ is a broad and complex standard and most compilers are not fully conformant to ISO C++ yet. In order to achieve portability in the light of this restriction, it seems advisable to get acquainted with those language features that some compilers do not fully implement yet.
+This topic gives portability hints on some language features of the Borland C++ version 5.5.1 compiler. Furthermore, the appendix presents additional problems with Borland C++ version 5.5. Borland C++ 5.5.1 is a freely available command-line compiler for Win32: Download and Install Borland C++ Compiler on Windows 10 & 11.
+Each section below describes a particular issue, complete with sample source code to demonstrate the effect. Most sample code herein has been verified to compile with gcc 2.95.2 and Comeau C++ 4.2.44.
+The preprocessor symbol BORLANDC is defined for all Borland C++ compilers. Its value is the version number of the compiler interpreted as a hexadecimal number. The following table lists some known values.
| Compiler | +BORLANDC value |
+
|---|---|
Borland C++ Builder 4 |
+0x0540 |
+
Borland C++ Builder 5 |
+0x0550 |
+
Borland C++ 5.5 |
+0x0550 |
+
Borland C++ 5.5.1 |
+0x0551 |
+
Borland C++ Builder 6 |
+0x0560 |
+
using-declarations and using-directivesMixing using-directives (which refer to whole namespaces) and namespace-level using-declarations (which refer to individual identifiers within foreign namespaces) causes ambiguities where there are none. The following code fragment illustrates this:
namespace N {
+ int x();
+}
+
+using N::x;
+using namespace N;
+
+int main()
+{
+ &x; // Ambiguous overload
+}
+using-declarations for Class TemplatesIdentifiers for class templates can be used as arguments to using-declarations as any other identifier. However, the following code fails to compile with Borland C++:
template<class T>
+class X { };
+
+namespace N
+{
+// "cannot use template 'X<T>' without specifying specialization parameters"
+ using ::X;
+};
+Template function type deduction should omit top-level constness. However, this code fragment instantiates f<const int>(int):
template<class T>
+void f(T x)
+{
+ x = 1; // works
+ (void) &x;
+ T y = 17;
+ y = 20; // "Cannot modify a const object in function f<const int>(int)"
+ (void) &y;
+}
+
+int main()
+{
+ const int i = 17;
+ f(i);
+}
+Addresses of overloaded functions are not in all contexts properly resolved (std:13.4 [over.over]); here is a small example:
template<class Arg>
+void f( void(\*g)(Arg) );
+
+void h(int);
+void h(double);
+
+template<class T>
+void h2(T);
+
+int main()
+{
+ void (\*p)(int) = h; // this works (std:13.4-1.1)
+ void (\*p2)(unsigned char) = h2; // this works as well (std:13.4-1.1)
+ f<int>(h2); // this also works (std:13.4-1.3)
+
+ // "Cannot generate template specialization from h(int)",
+ // "Could not find a match for f<Arg>(void (\*)(int))"
+ f<double>(h); // should work (std:13.4-1.3)
+
+ f( (void(\*)(double))h); // C-style cast works (std:13.4-1.6 with 5.4)
+
+ // "Overloaded 'h' ambiguous in this context"
+ f(static_cast<void(\*)(double)>(h)); // should work (std:13.4-1.6 with 5.2.9)
+}
+Always use C-style casts when determining addresses of (potentially) overloaded functions.
+const char * to std::stringImplicitly converting const char * parameters to std::string arguments fails if template functions are explicitly instantiated (it works in the usual cases, though):
#include <string>
+
+template<class T>
+void f(const std::string & s)
+{}
+
+int main()
+{
+ f<double>("hello"); // "Could not find a match for f<T>(char \*)"
+}
+Avoid explicit template function instantiations (they have significant problems with Microsoft Visual C++) and pass default-constructed unused dummy arguments with the appropriate type. Alternatively, if you wish to keep to the explicit instantiation, you could use an explicit conversion to std::string or declare the template function as taking a const char * parameter.
Template value parameters which default to an expression dependent on previous template parameters don’t work:
+template<class T>
+struct A
+{
+ static const bool value = true;
+};
+
+// "Templates must be classes or functions", "Declaration syntax error"
+template<class T, bool v = A<T>::value>
+struct B {};
+
+int main()
+{
+ B<int> x;
+}
+If the relevant non-type template parameter is an implementation detail, use inheritance and a fully qualified identifier (for example, ::N::A<T>::value).
Partial ordering of function templates, as described in std:14.5.5.2 [temp.func.order], does not work:
#include <iostream>
+
+template<class T> struct A {};
+
+template<class T1>
+void f(const A<T1> &)
+{
+ std::cout << "f(const A<T1>&)\n";
+}
+
+template<class T>
+void f(T)
+{
+ std::cout << "f(T)\n";
+}
+
+int main()
+{
+ A<double> a;
+ f(a); // output: f(T) (wrong)
+ f(1); // output: f(T) (correct)
+}
+Declare all such functions uniformly as either taking a value or a reference parameter.
+When directly instantiating a template with some member function pointer, which is itself dependent on some template parameter, the compiler cannot cope:
+template<class U> class C { };
+template<class T>
+class A
+{
+ static const int v = C<void (T::\*)()>::value;
+};
+Use an intermediate typedef:
template<class U> class C { };
+template<class T>
+class A
+{
+ typedef void (T::\*my_type)();
+ static const int v = C<my_type>::value;
+};
+Extracted from an e-mail exchange between David Abrahams, Fernando Cacciola, and Peter Dimov; not actually tested.
+double std::abs(double) MissingThe function double std::abs(double) should be defined (std:26.5-5 [lib.c.math]), but it is not:
#include <cmath>
+
+int main()
+{
+ double (\*p)(double) = std::abs; // error
+}
+int std::abs(int) will be used without warning if you write std::abs(5.1).
Similar remarks apply to seemingly all of the other standard math functions, where Borland C++ fails to provide float and long double overloads.
Use std::fabs instead if type genericity is not required.
These issues are documented mainly for historic reasons. If you are still using Borland C++ version 5.5, you are strongly encouraged to obtain an upgrade to version 5.5.1, which fixes the issues described in the following section.
+If a friend function of some class has not been declared before the friend function declaration, the function is declared at the namespace scope surrounding the class definition. Together with class templates and inline definitions of friend functions, the code in the following fragment should declare (and define) a non-template function bool N::f(int,int), which is a friend of class N::A<int>. However, Borland C++ v5.5 expects the function f to be declared beforehand:
namespace N {
+ template<class T>
+ class A
+ {
+ // "f is not a member of 'N' in function main()"
+ friend bool f(T x, T y) { return x < y; }
+ };
+}
+
+int main()
+{
+ N::A<int> a;
+}
+This technique is extensively used in boost/operators.hpp. Giving in to the wish of the compiler doesn’t work in this case, because then the instantiate one template, get lots of helper functions at namespace scope approach doesn’t work anymore. Defining BOOST_NO_OPERATORS_IN_NAMESPACE (a define BOOST_NO_INLINE_FRIENDS_IN_CLASS_TEMPLATES would match this case better) works around this problem and leads to another one, see the template discussion in Core Language.
A Boost library does not generally depend on other libraries, except for Boost or the C++ Standard Library. There are some exceptions. For example, Boost.Iostreams, and Boost.Beast, have an optional dependency on zlib, to provide support for gzip and zlib compression and decompression. If zlib is not available, Boost.Iostreams will still work, but without the ability to handle gzip and zlib formats. Another example is Boost.Asio, which has an optional dependency on OpenSSL, to provide support for SSL and TLS protocols. These protocols are widely used for secure network communications. Again, if OpenSSL is not available, Boost.Asio will still work, but without support for SSL or TLS.
+Other examples include Boost.Python interfaces with the Python C API, so obviously depends on a Python installation. Boost.Locale depends on the International Components for Unicode (ICU) to provide Unicode and localization support. On Unix systems, Boost.Thread relies on the pthread library. Boost.Compute has a dependency on OpenCL.
+For other libraries that you might want to take a dependency on, get a discussion going first with the Boost developers' mailing list. The general rule is avoid unreasonable dependencies.
+A Boost library should use other Boost Libraries, or the C++ Standard Library, when the benefits outweigh the costs. In general, Boost libraries are designed to be as independent as possible, so that users can pick and choose the libraries they need without being forced to include unnecessary code. Many Boost libraries are indeed standalone and can be used separately without any dependencies on other libraries.
+The benefits of using components from other libraries may include clearer, more understandable code, reduced development and maintenance costs, and the assurance which comes from reusing well-known and trusted building blocks. The costs may include undesirable coupling between components, and added compilation and runtime costs. If the interface to the additional component is complex, using it may make code less readable, and thus actually increase development and maintenance costs.
+Negative effects of coupling become obvious when one library uses a second library which uses a third, and so on. The worst form of coupling requires the user understand each of the coupled libraries. Coupling may also reduce the portability of a library - even in cases when all used libraries are self-sufficient (see Examples of Questionable Dependencies).
+The Boost.Graph library depends on Boost.Iterator and Boost.PropertyMap. Similarly, Boost.Asio, mentioned above, depends on several other Boost libraries including Boost.System, Boost.DateTime, and Boost.Bind, and others. Boost.Asio itself is recommended if your library needs networking.
+A good example where another boost component should certainly be used is Boost.Core, as it has considerable benefits; it simplifies code, improves readability, and signals intent. Boost.Core contains small utilities, usually polyfills for standard components, which you use when targeting C++ standards where they are not available. Costs are low as coupling is limited; the Boost.Core header includes only lightweight headers. There are no runtime costs at all. With costs so low and benefits so high, other boost libraries should use Boost.Core when the need arises.
+Other Boost libraries you might consider as good foundational components include:
+Boost.Compatibility has been updated recently and has a similar role to Boost.Core. Its' classes provide polyfills from std classes, and have no extensions.
+Boost.Config is used by almost all libraries. It provides macro definitions to make your code portable. It enables you to detect standard levels prior to C++17, to check which platform/compiler you are building for, and to add the relevant platform-specific code to create compiled libraries.
+Boost.Assert is in general recommended over plain <cassert>, as it provides a source_location polyfill. Boost.Assert is used by almost all Boost libraries, so using it makes your library more interoperable with the others.
Similarly, Boost.ThrowException is in general recommended over plainly throwing exceptions, as it adds more info to the thrown exceptions and makes behavior more configurable. As it too is widely used in exiting Boost libraries, good interoperability applies.
+Boost.Utility includes several non-templated, non-data structure related classes and functions, such as base-from-member idiom, checked delete, next and prior functions, noncopyable, and result_of.
+Boost.ConceptCheck provides tools for specifying and checking that types meet the requirements of generic algorithms. It’s used by many other libraries to ensure that template parameters meet the necessary requirements.
+For handling data types and structures, one of Boost.FunctionTypes, Boost.Fusion, Boost.Any, Boost.Variant, or Boost.Variant2 might provide what you need.
+As well as supporting data types, Boost.Variant2 is preferred over std::variant, as it enforces better invariants and is never valueless, unlike is standard counterpart.
For metaprogramming, Boost.Mp11 is the latest metaprogramming library and provides many of the building blocks for this style of programming. Boost.Mp11 should be used in preference over the older Boost.Mpl and Boost.Preprocessor.
+Boost.Describe is valuable if you need reflection as part of your interface, which will occur when your library users are passing in user-defined types for your library to process.
+An example where another Boost component should not be used is simply where the use of the library is minimal, and does not justify the cost of having the dependency. Or perhaps when a C++ Standard Library has the same functionality for a lower cost.
+Other examples of questionable dependencies, outside of Boost, include libraries with unstable interfaces (libraries that change frequently), libraries that are platform-specific, not widely supported, not public, or are internal in some way. Obviously libraries with heavy runtime requirements should largely be avoided altogether.
+Use these guidelines as recommendations based on past author’s experiences of preparing content for a library submission.
+When designing a new library:
+Aim first for clarity and correctness; optimization should be only a secondary concern in most Boost libraries.
+Aim for ISO Standard C++. Than means making effective use of the standard features of the language, and avoiding non-standard compiler extensions. It also means using the Standard Library where applicable.
+Headers should be good neighbors. See Headers and Naming Consistency.
+Follow quality programming practices. Recommended texts include Effective C++ 2nd Edition and More Effective C++, both by Scott Meyers and published by Addison Wesley.
+Use the C++ Standard Library or other Boost libraries, but only when the benefits outweigh the costs. Except in special cases, do not use libraries other than the C++ Standard Library or Boost.
+Read Implementation Variation Techniques to see how to supply performance, platform, or other implementation variations.
+Read the guidelines for Separate Compilation, to see how to ensure that compiled link libraries meet user expectations.
+Begin all source files (including programs, headers, scripts, etc.) with:
+A comment line describing the contents of the file.
+Comments describing copyright and licensing: again, refer to License Requirements. Note that developers are allowed to provide a copy of the license text in LICENSE_1_0.txt, LICENSE.txt or LICENSE file within repositories of their libraries.
+A comment line referencing your library on the Boost web site. For example:`// See https://www.boost.org/libs/foo` for library home page.
+Where foo is the directory name (see below) for the library. As well as aiding users who come across a Boost file detached from its documentation, some of Boost’s automatic tools depend on this comment to identify which library header files belong to.
+Although some Boost members use proportional fonts, tabs, and unrestricted line lengths in their own code, Boost’s widely distributed source code should follow more conservative guidelines:
+Use fixed-width fonts. See Source Code Fonts Rationale.
+Use spaces rather than tabs. See Tabs Rationale.
+Limit line lengths to 80 characters.
+End all documentation files (HTML or otherwise) with a copyright message and a licensing message. Refer to published library documentation for examples.
+Use the naming conventions of the C++ Standard Library (See Naming Conventions Rationale):
+Names (except as noted below) should be all lowercase, with words separated by underscores.
+Acronyms should be treated as ordinary names (e.g. xml_parser instead of XML_parser).
+Template parameter names begin with an uppercase letter.
+Macro names all uppercase and begin with BOOST_.
+Choose meaningful names - explicit is better than implicit, and readability counts. There is a strong preference for clear and descriptive names, even if lengthy.
+As library developers and users have gained experience with Boost, the following consistent naming approach has come to be viewed as helpful, particularly for larger libraries that need their own header subdirectories and namespaces.
+The library is given a name that describes the contents of the library. Cryptic abbreviations are strongly discouraged. Following the practice of the C++ Standard Library, names are usually singular rather than plural. For example, a library dealing with file systems might chose the name "filesystem", but not "filesystems", "fs" or "nicecode".
+The library’s primary directory (in parent boost-root/libs) is given that same name. For example, boost-root/libs/filesystem.
+The library’s primary header directory (in boost-root/libs/name/include) is given that same name. For example, boost-root/libs/filesystem/boost/filesystem.
+The library’s primary namespace (in parent ::boost) is given that same name, except when there’s a component with that name (e.g., boost::tuple), in which case the namespace name is pluralized. For example, ::boost::filesystem.
+The first letter of the library name is capitalized.
+A period between "Boost" and the library name (e.g., Boost.Bind) is used if and only if the library name is not followed by the word "library".
+The word "library" is not part of the library name and is therefore lowercased.
+Here are a few example sentences of how to apply these conventions:
+"Boost.Bind was written by Peter Dimov."
+"The Boost Asio library was written by Christopher Kohlhoff."
+"I regularly use Spirit, a Boost library written by Joel de Guzman and Hartmut Kaiser."
+Naming requirements ensure that file and directory names are relatively portable, including to ISO 9660:1999 (with extensions) and other relatively limited file systems. Superscript links are provided to detailed rationale for each choice.
+Names must contain only lowercase ASCII letters ('a'-'z'), numbers ('0'-'9'), underscores ('_'), hyphens ('-'), and periods ('.'). Spaces are not allowed.
+Some legacy file systems require single-case names. Single-case names eliminate casing mistakes when moving from case-insensitive to case-sensitive file systems.
+To quote the POSIX standard, "Filenames should be constructed from the portable filename character set because the use of other characters can be confusing or ambiguous in certain contexts."
+Directory names must not contain periods ('.').
+Strict implementations of ISO 9660:1999 and some legacy operating systems prohibit dots in directory names. The need for this restriction is fading, and may be removed in time.
+The first and last character of a file name must not be a period ('.').
+POSIX has special rules for names beginning with a period. Windows prohibits names ending in a period.
+The first character of names must not be a hyphen ('-'), as this would be too confusing or ambiguous in certain contexts.
+The maximum length of directory and file names is 31 characters. We had to draw the line somewhere, and so the limit imposed by a now obsolete Apple file system was chosen years ago.
+The total path length must not exceed 207 characters (ISO 9660:1999).
+Other conventions ease communication:
+Files intended to be processed by a C++ compiler as part of a translation unit should have a three-letter filename extension ending in "pp" (typically .cpp and .hpp). Other files should not use extensions ending in "pp". This convention makes it easy to identify all of the source in Boost.
All libraries have at their highest level a primary directory named for the particular library. See Naming Consistency. The primary directory may have sub-directories.
+Provide sample programs or confidence tests so potential users can see how to use your library.
+Provide a regression test program or programs which follow the Test Policy.
+Use exceptions to report errors where appropriate, and write code that is safe in the face of exceptions.
+Avoid exception-specifications. See Exception Specification Rationale.
+It is recommended you add runtime assertions to your code (including library headers). Avoid C’s assert macro and use Boost’s BOOST_ASSERT macro (in boost/assert.hpp) instead as it is more configurable.
Make sure your code compiles in the presence of the min() and max() macros. Some platform headers define min() and `max() ` macros which cause some common C++ constructs to fail to compile. To protect your code from inappropriate macro substitution:
If you want to call std::min() or std::max():
If you do not require argument-dependent look-up, use (std::min)(a,b).
If you do require argument-dependent look-up, you should:
+#include <boost/config.hpp>
Use BOOST_USING_STD_MIN(); to bring std::min() into the current scope.
Use min BOOST_PREVENT_MACRO_SUBSTITUTION (a,b); to make an argument-dependent call to min(a,b).
If you want to call std::numeric_limits<int>::max(), use (std::numeric_limits<int>::max)() instead.
If you want to call a min() or max() member function, instead of doing obj.min(), use (obj.min)().
If you want to declare or define a function or a member function named min or max, then you must use the BOOST_PREVENT_MACRO_SUBSTITUTION macro. Instead of writing int min() { return 0; } you should write int min BOOST_PREVENT_MACRO_SUBSTITUTION () { return 0; }. This is true regardless if the function is a free (namespace scope) function, a member function or a static member function, and it applies for the function declaration as well as for the function definition.
The primary directory should always contain a file named index.html. Authors have requested this so that they can publish URL’s in the form https://www.boost.org/libs/lib-name with the assurance a documentation reorganization won’t invalidate the URL. Boost’s internal tools are also simplified if a library’s documentation is always reachable via the simplified URL.
The primary directory index.html file should do an automatic redirection to the doc/html subdirectory. For example, the json library contains the following index.html file:
<html>
+ <head>
+ <title>Boost.JSON</title>
+ <meta http-equiv="refresh" content="0; URL=./doc/html/index.html">
+ </head>
+ <body>
+ Automatic redirection failed, please go to
+ <a href="./doc/html/index.html">./doc/html/index.html</a>
+ <hr>
+ <tt>
+ Boost.JSON<br>
+ <br>
+ Copyright (C) 2019 Vinnie Falco<br>
+ Copyright (C) 2020 Krystian Stasiowski<br>
+ <br>
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ <a href=http://www.boost.org/LICENSE_1_0.txt>http://www.boost.org/LICENSE_1_0.txt</a>) <br>
+ <br>
+ </tt>
+ </body>
+</html>
+Rationale is defined as "The fundamental reasons for something; basis" by the American Heritage Dictionary.
+Beman Dawes comments: "Failure to supply contemporaneous rationale for design decisions is a major defect in many software projects. Lack of accurate rationale causes issues to be revisited endlessly, causes maintenance bugs when a maintainer changes something without realizing it was done a certain way for some purpose, and shortens the useful lifetime of software."
+Rationale is fairly easy to provide at the time decisions are made, but hard to accurately recover even a short time later. Rationale for some of the requirements and guidelines follows.
+Exception specifications (ISO 15.4) are sometimes coded to indicate what exceptions may be thrown, or because the programmer hopes they will improve performance. But consider the following member from a smart pointer:
+T& operator*() const throw() { return *ptr; }
+This function calls no other functions; it only manipulates fundamental data types like pointers Therefore, no runtime behavior of the exception-specification can ever be invoked. The function is completely exposed to the compiler; indeed it is declared inline. Therefore, a smart compiler can easily deduce that the functions are incapable of throwing exceptions, and make the same optimizations it would have made based on the empty exception-specification. A "dumb" compiler, however, may make all kinds of pessimizations.
+For example, some compilers turn off inlining if there is an exception-specification. Some compilers add try/catch blocks. Such pessimizations can be a performance disaster which makes the code unusable in practical applications.
+Although initially appealing, an exception-specification tends to have consequences that require very careful thought to understand. The biggest problem with exception-specifications is that programmers use them as though they have the effect the programmer would like, instead of the effect they actually have.
+A non-inline function is the one place a "throws nothing" exception-specification may have some benefit with some compilers.
+The C++ standard committee’s Library Working Group discussed this issue in detail, and over a long period of time. The discussion was repeated again in early Boost postings. A short summary:
+Naming conventions are contentious, and although several are widely used, no one style predominates.
+Given the intent to propose portions of Boost for the next revision of the C++ Standard Library, Boost decided to follow the Standard Library’s conventions.
+Once a library settles on a particular convention, a vast majority of stakeholders want that style to be consistently used.
+Dave Abrahams comments: "An important purpose (I daresay the primary purpose) of source code is communication: the documentation of intent. This is a doubly important goal for Boost, I think. Using a fixed-width font allows us to communicate with more people, in more ways (diagrams are possible) right there in the source. Code written for fixed-width fonts using spaces will read reasonably well when viewed with a variable-width font, and as far as I can tell every editor supporting variable-width fonts also supports fixed width. I don’t think the converse is true".
+Tabs are banned because of the practical problems caused by tabs in multi-developer projects like Boost, rather than any dislike in principle. See mailing list archives. Problems include maintenance of a single source file by programmers using tabs and programmers using spaces, and the difficulty of enforcing a consistent tab policy other than just "no tabs". Discussions concluded that Boost files should either all use tabs, or all use spaces, and thus the decision to stick with spaces for indentation.
+ECMAScript/JavaScript use is allowed but discouraged. Before the 1.29.0 release, two Boost libraries added ECMAScript/JavaScript documentation. Controversy followed (see mailing list archives), and the developers were asked to remove the ECMAScript/JavaScript. Reasons given for banning included:
+Incompatible with some older browsers and some text based browsers.
+Makes printing docs pages difficult.
+Often results in really bad user interface design.
+Would require Boost to test web pages for ECMAScript/JavaScript compliance.
+Makes docs maintenance by other than the original developer more difficult.
+Consider those reasons if you decide that JavaScript is something you must use. In particular keep in mind that the Boost community is not responsible for testing your use of JavaScript. And hence it is up to you to ensure that the above issues are fully resolved in your use case.
+As a library matures, it almost always accumulates improvements suggested to the authors by other Boost members. It is a part of the culture of boost.org to acknowledge such contributions, identifying the person making the suggestion. Major contributions are usually acknowledged in the documentation, while minor fixes are often mentioned in comments within the code itself.
+The following section provides examples of good and not-so-good API design, with examples.
+A class should represent one concept (for example boost::filesystem::path, boost::asio::ip::address, boost::gregorian::date). Avoid “god objects” that know or do everything. If a class has too many unrelated members, split into smaller classes or helpers. If more than seven private members, ask: “should this be two classes?”, and if a class requires long docs to explain, maybe the abstraction is too fat.
Most Boost classes behave like values, not like heavy opaque handles: copyable, movable, comparable, assignable. This makes them easy to store in containers and pass by value.
+Keep private data small and tightly scoped. Avoid storing things that can be recomputed cheaply.
+If something doesn’t need state, make it a free function or template, not a class. Boost prefers non-member functions when possible (for example boost::algorithm::to_upper_copy vs. a string utility class).
Classes don’t usually embed “last error” members. Errors are surfaced via exceptions, or dual APIs with error_code&.
Namespaces in C++ are a balancing act - they prevent name collisions (especially in template-heavy libraries), they clarify context (boost::asio::ip::tcp::socket is very precise), but they can get verbose and intimidating for new users.
The rule of thumb for namespaces is to have a single obvious point of entry, with a namespace depth of no more than three. If you need more, consider grouping with traits or helper classes instead of new namespaces.
+Consider using inline namespaces for versioning, as they are useful when evolving APIs without breaking existing code.
+Repetition does not work well with namespaces, if a higher namespace already disambiguates the purpose, keep lower level namespace names short.
+Implementation details can be encapsulated in a detail namespace, such as boost::asio::detail, and all entries in the detail namespace should be private.
Namespaces should reflect what the user thinks about, not how you implemented it.
+| Less-Good | +Good | +
|---|---|
|
+
|
+
|
+
|
+
It is easier to understand multiple clear, simple overloads instead of one monster function.
+| Less-Good | +Good | +
|---|---|
|
+
|
+
|
+
|
+
Group each concern into option objects, which are practically self-documenting, extensible, and avoids parameter overload. Boolean parameters in particular quickly become unreadable (true, false, true). Wrap them in option types.
| Less-Good | +Good | +
|---|---|
|
+
|
+
Clear constructors, each handling one case. Use factory functions for anything fancy.
+| Less-Good | +Good | +
|---|---|
|
+
|
+
Named keyword parameters (via boost::parameter) give clarity without giant parameter lists.
| Less-Good | +Good | +
|---|---|
|
+
|
+
|
+
|
+
Allow both exception-throwing and error_code style.
+| Less-Good | +Good | +
|---|---|
|
+
|
+
Return the result directly if there is only one natural result. Use output parameters (or error_code) only when performance matters (avoiding temporaries), or multiple results must be returned. Avoid returning int or bool for multi-state results. Use enums, optionals, variants to make meaning clear.
When algorithms partially consume input, returns iterators to mark progress, for example the following calls return iterators and not counts: boost::sregex_iterator it(text.begin(), text.end(), pattern); and auto new_end = boost::remove(vec, 42);. Another example of returning a more complex object is the use of lazy proxy objects that behave like containers but defer computation. This enables zero-cost abstraction and makes pipelines composable. An example is auto rng = boost::adaptors::filter(vec, pred);. It is a key to good return values though not to get too clever, consistency is the king.
| Less-Good | +Good | +
|---|---|
|
+
|
+
|
+
|
+
|
+
|
+
|
+
|
+
|
+
|
+
|
+
|
+
|
+
|
+
|
+
|
+
If a beginner can do something useful in one line of code, and an expert can still optimize/control everything with traits/policies, you’ve designed a good interface.
+Keep interfaces minimal but composable. Boost often prefers non-member, non-friend functions to avoid bloating classes. Free functions are easier to extend later without breaking ABI.
+Avoid int or std::string everywhere when domain types make intent clear.
| Less-Good | +Good | +
|---|---|
|
+
|
+
|
+
|
+
Header files are the place where a library comes into contact with user code and other libraries. To co-exist peacefully and productively, headers must be "good neighbors".
+Here are the standards for boost headers. Many of these are also reasonable guidelines for general use.
+Header filenames should have a .hpp (lowercase) extension.
+Unless multiple inclusion is intended, wrap the header in #ifndef guards. Use a naming convention that minimizes the chance of clashes with macro names from other’s code. The Sample uses the Boost convention of all uppercase letters, with the header name prefixed by the namespace name, followed by the relative path, and suffixed with HPP, separated by underscores. Refer also to the Example Guards.
Wrap the header contents in a namespace to prevent global namespace pollution. The namespace approach to pollution control is strongly preferred to older approaches such as adding funny prefixes to global names. Libraries which are designed to work well with other Boost libraries should be placed in namespace boost.
Make sure that a translation unit, consisting of just the contents of the header file, will compile successfully.
+Place the header file in a sub-directory to prevent conflict with identically named header files in other libraries. The parent directory is added to the compiler’s include search path. Then both your code and user code specifies the sub-directory in #include directives. Thus the header Sample would be included by #include <boost/furball.hpp>. Including from the current file directory using #include "furball.hpp" syntax is discouraged.
The preferred ordering for class definitions is public members, protected members, and finally private members.
Include the boost/config.hpp if there is a need to deal with compiler or platform configuration issues.
// Boost general library furball.hpp header file ---------------------------//
+
+// (C) Copyright <Your Name> 2023. Permission to copy, use, modify, sell and
+// distribute this software is granted provided this copyright notice appears
+// in all copies. This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+
+// See https://www.boost.org/ for latest version.
+
+#ifndef BOOST_FURBALL_HPP
+#define BOOST_FURBALL_HPP
+
+namespace boost {
+
+// Furball class declaration -----------------------------------------------//
+
+ class furball
+ {
+ public:
+ void throw_up();
+ protected:
+ double duration();
+ private:
+ int whatever;
+ }; // furball
+
+} // namespace
+
+#endif // include guard
+Many libraries will include a large number of .hpp header files in a tree structure of folders. Taking Boost.Beast as an example. The header guard code is:
+#ifndef BOOST_BEAST_HPP
+#define BOOST_BEAST_HPP
+The subfolder beast includes several more headers, including Core.hpp, which has the header guard:
#ifndef BOOST_BEAST_CORE_HPP
+#define BOOST_BEAST_CORE_HPP
+In the beast\core subfolder there is the async_base.hpp header file. Its' guard is:
#ifndef BOOST_BEAST_CORE_ASYNC_BASE_HPP
+#define BOOST_BEAST_CORE_ASYNC_BASE_HPP
+And in the beast\core\impl subfolder there is another header named async_base.hpp, this time with the guard:
#ifndef BOOST_BEAST_CORE_IMPL_ASYNC_BASE_HPP
+#define BOOST_BEAST_CORE_IMPL_ASYNC_BASE_HPP
+Refer to beast/include/boost to view the full hierarchy of folders and headers for this library.
+The alert reader will have noticed that the Sample header employs a certain coding style for indentation, positioning braces, commenting ending braces, and similar formatting issues. These stylistic issues are viewed as personal preferences and are not part of the Boost Header Policy.
+These guidelines are designed for authors of Boost libraries which have separate sources that need compiling in order to use the library. Throughout, this guide refers to a fictitious "whatever" library, so replace all occurrences of "whatever" (or "WHATEVER") with your own library’s name when copying the examples.
+There are some compilers (mostly Microsoft Windows compilers), which feature a range of compiler switches that alter the (Application Binary Interface) ABI of C++ classes and functions. By way of example, consider Borland’s compiler which has the following options:
+| Option | +Description | +
|---|---|
|
+on or off - effects enum sizes |
+
|
+on or off - empty members |
+
|
+on or off - empty base classes |
+
|
+alignment - 5 options |
+
|
+Calling convention - 4 options |
+
|
+member pointer size and layout - 5 options |
+
|
+on or off, changes name mangling |
+
|
+on or off, changes |
+
These options are provided in addition to those affecting which runtime library is used (more later); the total number of combinations of options can be obtained by multiplying together the individual options above, so that gives 2x2x2x5x4x5x2x2 = 3200 combinations!
+The problem is that users often expect to be able to build the Boost libraries and then just link to them and have everything just plain work, no matter what their project settings are. Irrespective of whether this is a reasonable expectation or not, without some means of managing this issue, the user may well find that their program will experience strange and hard to track down crashes at runtime unless the library they link to was built with the same options as their project (changes to the default alignment setting are a prime culprit). One way to manage this is with "prefix and suffix" headers: these headers invoke compiler specific #pragma directives to instruct the compiler that whatever code follows was built (or is to be built) with a specific set of compiler ABI settings.
Boost.Config provides the macro BOOST_HAS_ABI_HEADERS which is set whenever there are prefix and suffix headers available for the compiler in use, typical usage in a header like this:
#ifndef BOOST_WHATEVER_HPP
+#define BOOST_WHATEVER_HPP
+
+#include <boost/config.hpp>
+
+// this must occur after all of the includes and before any code appears:
+#ifdef BOOST_HAS_ABI_HEADERS
+# include BOOST_ABI_PREFIX
+#endif
+//
+// this header declares one class, and one function by way of examples:
+//
+class whatever
+{
+ // details.
+};
+
+whatever get_whatever();
+
+// the suffix header occurs after all of our code:
+#ifdef BOOST_HAS_ABI_HEADERS
+# include BOOST_ABI_SUFFIX
+#endif
+
+#endif
+You can include this code in your library source files as well if you want, although you probably shouldn’t need to:
+If you don’t use these in the library source files (but do in your library’s headers) and the user attempts to compile the library source with a non-default ABI setting, then they will get compiler errors if there are any conflicts.
+If you do include them in both the library’s headers and the library source files, then the code should always compile no matter what the compiler settings used, although the result might not match what the user was expecting: since we’ve forced the ABI back into default mode.
+Without some means of managing this issue, users often report bugs along the line of "Your silly library always crashes when I try and call it" and so on. These issues can be extremely difficult and time consuming to track down, only to discover in the end that it’s a compiler setting that’s changed the ABI of the class and/or function types of the program compared to those in the pre-compiled library. The use of prefix/suffix headers can minimize this problem, although probably not remove it completely.
+Prefix/suffix headers have a tendency to "spread" to other boost libraries - for example if boost::shared_ptr<> forms part of your class’s ABI, then including prefix/suffix headers in your code will be of no use unless shared_ptr.hpp also uses them. Authors of header-only boost libraries may not be so keen on this solution - with some justification - since they don’t face the same problem.
When the users runtime is dynamically linked the Boost libraries can be built either as dynamic libraries (.so on Unix platforms, .dll on Windows, .dylib on macOS) or as static libraries (.a on Unix or macOS, .lib on Windows). So we have a choice as to which is supported by default:
On Unix platforms it typically makes no difference to the code: the user just selects in their makesfile which library they prefer to link to.
+On Windows platforms, the code has to be specially annotated to support DLL’s, so we need to pick one option as the default and one as an alternative.
+On Windows platforms, we can inject special code to automatically select which library variant to link against: so again we need to decide which is to be the default (see the section on Auto-linking below).
+The recommendation is to pick static linking by default.
+There is no one policy that fits all here.
+The rationale for the current behaviour was inherited from Boost.Regex (and it’s ancestor regex++): this library originally used dynamic linking by default whenever the runtime was dynamic. It’s actually safer that way should you be using regex from a dll for example. However, this behavior brought a persistent stream of user complaints: mainly about deployment, all asking if static linking could be the default. After regex changed behavior the complaints stopped, and the author hasn’t had one complaint about static linking by default being the wrong decision.
+Note that other libraries might need to make other choices: for example libraries that are intended to be used to implement dll pluggin’s would like need to use dynamic linking in almost all cases.
+On most Unix-like platforms no special annotations of source code are required in order for that source to be compiled as a shared library because all external symbols are exposed. However the majority of Windows compilers require that symbols that are to be imported or exported from a dll, be prefixed with declspec(dllimport) or declspec(dllexport). Without this mangling of source code, it is not possible to correctly build shared libraries on Windows (historical note - originally these declaration modifiers were required on 16-bit Windows where the memory layout for exported classes was different from that of "local" classes - although this is no longer an issue, there is still no way to instruct the linker to "export everything", it also remains to be seen whether 64-bit Windows will resurrect the segmented architecture that led to this problem in the first place. Note also that the mangled names of exported symbols are different from non-exported ones, so __declspec(dllimport) is required in order to link to code within a dll).
In order to support the building of shared libraries on MS Windows your code will have to prefix all the symbols that your library exports with a macro (lets call it BOOST_WHATEVER_DECL) that your library will define to expand to either declspec(dllexport) or declspec(dllimport) or nothing, depending upon how your library is being built or used. Typical usage would look like this:
#ifndef BOOST_WHATEVER_HPP
+#define BOOST_WHATEVER_HPP
+
+#include <boost/config.hpp>
+
+#ifdef BOOST_HAS_DECLSPEC // defined in config system
+// we need to import/export our code only if the user has specifically
+// asked for it by defining either BOOST_ALL_DYN_LINK if they want all boost
+// libraries to be dynamically linked, or BOOST_WHATEVER_DYN_LINK
+// if they want just this one to be dynamically liked:
+#if defined(BOOST_ALL_DYN_LINK) || defined(BOOST_WHATEVER_DYN_LINK)
+// export if this is our own source, otherwise import:
+#ifdef BOOST_WHATEVER_SOURCE
+# define BOOST_WHATEVER_DECL __declspec(dllexport)
+#else
+# define BOOST_WHATEVER_DECL __declspec(dllimport)
+#endif // BOOST_WHATEVER_SOURCE
+#endif // DYN_LINK
+#endif // BOOST_HAS_DECLSPEC
+//
+// if BOOST_WHATEVER_DECL isn't defined yet define it now:
+#ifndef BOOST_WHATEVER_DECL
+#define BOOST_WHATEVER_DECL
+#endif
+
+//
+// this header declares one class, and one function by way of examples:
+//
+class BOOST_WHATEVER_DECL whatever
+{
+ // details.
+};
+
+BOOST_WHATEVER_DECL whatever get_whatever();
+
+#endif
+And then in the source code for this library one would use:
+//
+// define BOOST_WHATEVER SOURCE so that our library's
+// setup code knows that we are building the library (possibly exporting code),
+// rather than using it (possibly importing code):
+//
+#define BOOST_WHATEVER_SOURCE
+#include <boost/whatever.hpp>
+
+// class members don't need any further annotation:
+whatever::whatever() { }
+// but functions do:
+BOOST_WHATEVER_DECL whatever get_whatever()
+{
+ return whatever();
+}
+As well as exporting your main classes and functions (those that are actually documented), Microsoft Visual C++ will warn loudly and often if you try to import/export a class whose dependencies are not also exported. Dependencies include: any base classes, any user defined types used as data members, plus all of the dependencies of your dependencies and so on. This causes particular problems when a dependency is a template class, because although it is technically possible to export these, it is not at all easy, especially if the template itself has dependencies which are implementation-specific details. In most cases it’s probably better to simply suppress the warnings using:
+#ifdef BOOST_MSVC
+# pragma warning(push)
+# pragma warning(disable : 4251 4231 4660)
+#endif
+
+// code here
+
+#ifdef BOOST_MSVC
+#pragma warning(pop)
+#endif
+This is safe provided that there are no dependencies that are (template) classes with non-constant static data members, these really do need exporting, otherwise there will be multiple copies of the static data members in the program, and that’s really really bad.
+Historical note: on 16-bit Windows you really did have to export all dependencies or the code wouldn’t work, however since the latest Visual Studio .NET supports the import/export of individual member functions, it’s a reasonably safe bet that Windows compilers won’t do anything nasty - like changing the class’s ABI - when importing/exporting a class.
+Why bother - doesn’t the import/export mechanism take up more code that the classes themselves?
+A good point, and probably true, however there are some circumstances where library code must be placed in a shared library - for example when the application consists of multiple dll’s as well as the executable, and more than one those dll’s link to the same Boost library - in this case if the library isn’t dynamically linked and it contains any global data (even if that data is private to the internals of the library) then really bad things can happen - even without global data, we will still get a code bloating effect. Incidentally, for larger applications, splitting the application into multiple dll’s can be highly advantageous - by using Microsoft’s "delay load" feature the application will load only those parts it really needs at any one time, giving the impression of a much more responsive and faster-loading application.
+Why static linking by default?
+In the worked example above, the code assumes that the library will be statically linked unless the user asks otherwise. Most users seem to prefer this (there are no separate dll’s to distribute, and the overall distribution size is often significantly smaller this way as well: i.e. you pay for what you use and no more), but this is a subjective call, and some libraries may even only be available in dynamic versions (Boost.Thread for example).
+This section describes selection and linking with auto_link.hpp (find this file in the boost/config folder after installation).
Many Windows compilers ship with multiple runtime libraries - for example Microsoft Visual Studio .NET comes with 6 versions of the C and C++ runtime. It is essential that the Boost library that the user links to is built against the same C runtime as the program is built against. If that is not the case, then the user will experience linker errors at best, and runtime crashes at worst. The Boost build system manages this by providing different build variants, each of which is build against a different runtime, and gets a slightly different mangled name depending upon which runtime it is built against. For example the regex libraries get named as follows when built with Visual Studio .NET 2003:
+boost_regex-vc71-mt-1_31.lib
+boost_regex-vc71-mt-gd-1_31.lib
+libboost_regex-vc71-mt-1_31.lib
+libboost_regex-vc71-mt-gd-1_31.lib
+libboost_regex-vc71-mt-s-1_31.lib
+libboost_regex-vc71-mt-sgd-1_31.lib
+libboost_regex-vc71-s-1_31.lib
+libboost_regex-vc71-sgd-1_31.lib
+The difficulty now is selecting which of these the user should link his or her code to.
+In contrast, most Unix compilers typically only have one runtime (or sometimes two if there is a separate thread safe option). For these systems the only choice in selecting the right library variant is whether they want debugging info, and possibly thread safety.
+Historically Microsoft Windows compilers have managed this issue by providing a #pragma option that allows the header for a library to automatically select the library to link to. This makes everything automatic and extremely easy for the end user: as soon as they include a header file that has separate source code, the name of the right library build variant gets embedded in the object file, and as long as that library is in the linker search path, it will get pulled in by the linker without any user intervention.
+Automatic library selection and linking can be enabled for a Boost library by including the header <boost/config/auto_link.hpp>, after first defining BOOST_LIB_NAME and, if applicable, BOOST_DYN_LINK.
//
+// Automatically link to the correct build variant where possible.
+//
+#if !defined(BOOST_ALL_NO_LIB) && !defined(BOOST_WHATEVER_NO_LIB) && !defined(BOOST_WHATEVER_SOURCE)
+//
+// Set the name of our library, this will get undef'ed by auto_link.hpp
+// once it's done with it:
+//
+#define BOOST_LIB_NAME boost_whatever
+//
+// If we're importing code from a dll, then tell auto_link.hpp about it:
+//
+#if defined(BOOST_ALL_DYN_LINK) || defined(BOOST_WHATEVER_DYN_LINK)
+# define BOOST_DYN_LINK
+#endif
+//
+// And include the header that does the work:
+//
+#include <boost/config/auto_link.hpp>
+#endif // auto-linking disabled
+The library’s user documentation should note that the feature can be disabled by defining either BOOST_ALL_NO_LIB or BOOST_WHATEVER_NO_LIB:
If for any reason you need to debug this feature, the header <boost/config/auto_link.hpp> will output some helpful diagnostic messages if you first define BOOST_LIB_DIAGNOSTIC.
The Jamfile for building library "whatever" typically lives in boost-root/libs/whatever/build, the only extra step required is to add a <define> requirement to the library target so that your code knows whether it’s building a dll or static library, a typical Jamfile would like like this:
lib boost_regex : ../src/whatever.cpp :
+ <link>shared:<define>BOOST_WHATEVER_DYN_LINK=1 ;
+https://antora.org/[Antora] is a static site generator designed for creating documentation sites from AsciiDoc content. +The tool renders the documentation for Boost modules whose documentation are defined as components in the Antora playbook.+
Antora requires Node.js:
+$ node -v
+This command should return the Node.js version number:
+v16.18.0
+Antora requires Node.js version 16 or later. +If you have Node.js installed but need to upgrade it:
+$ nvm install --lts
+The following instructions also require Git to clone the repository:
+$ git --version
+This command should return the Git version number:
+git version 2.25.1
+To clone the repository that defines the Antora playbook for the Boost documentation:
+$ git clone https://www.github.com/boostorg/website-v2-docs
+This command clones the repository into a directory named website-v2-docs.
+This directory contains the Antora playbook files site.playbook.yml (website documentation) and libs.playbook.yml (library documentation).
After cloning the project, you need to install its dependencies using the Node.js package manager, npm:
+$ npm ci
+Then build the Antora UI bundle used for the documentation:
+$ cd antora-ui
+$ npx gulp
+$ cd ..
+The npx command, which comes with npm, can be used to build any of the playbooks in the repository.
$ npx antora --fetch libs.playbook.yml
+Or to build the website documentation:
+$ npx antora --fetch site.playbook.yml
+This commands will build the documentation in the build/ directory.
npx will download the Antora CLI and the Antora site generator, and then run the antora command with the specified playbook.
+These dependencies are cached locally, so the next time you run npx antora, it will be faster.
In the release process, Antora is called with extra attributes used by the documentation components. +For instance:
+$ npx antora --fetch --attribute page-boost-branch=master --attribute page-boost-ui-branch=master --attribute page-commit-id=151c2ac libs.playbook.yml
+| + + | +
+
+
+Instead of using the Antora versioning control system, we render the documentation for a single version by setting |
+
The libdoc.sh script simplifies the process by building the UI bundle, identifying these attributes, and running the Antora command with the specified playbook.
$ ./libdoc.sh master
+Or to build the website documentation:
+$ ./sitedoc.sh master
+Site generation complete!
+Open file:///home/user/path/to/antora/build/master/doc in a browser to view your site.
+Site generation complete!
+Open file:///home/user/path/to/antora/build/doc in a browser to view your site.
+The build.sh script identifies the branch of the current repository and runs the sitedoc.sh script with the branch name as an argument:
$ ./build.sh
+Although not necessary, you also have the option of installing Antora globally so that the antora command is available on your PATH.
$ npm i -g @antora/cli @antora/site-generator
+$ antora -v
+Read more about antora on their Quickstart guide.
+The website is composed of components defined in the content.sources field of its playbook file
+site.playbook.yml.
+All components of the website are relative to the website-v2-docs repository.
The process for generating the documentation for all libraries is similar.
+However, the components are defined in the libs.playbook.yml file and their URLs are relative to the boostorg organization.
+Each library documentation is defined as a component in the playbook file libs.playbook.yml:
content:
+ sources:
+ - url: https://github.com/boostorg/url
+ start_path: doc
+ # ...
+The complete libdoc.sh command syntax is:
Usage: ./libdoc.sh { branch | version | 'release' | 'all' }...
+
+ branch : 'develop' | 'master' | 'release'
+ version: [0-9]+ '.' [0-9]+ '.' [0-9]+
+ 'release': builds master to build/doc/html
+ 'all': rebuilds develop, master, and every version
+
+Examples:
+
+ ./libdoc.sh develop master # build develop and master
+ ./libdoc.sh 1.83.0 # build tagged version boost-1.83.0
+The first positional argument is the only parameter, which identifies which version should be built.
+branch: valid arguments are master or develop.
+Builds the master or develop versions of the documentation in build/master/libs or build/develop/libs.
+It checks out all libraries in their master or develop branches.
version: a semver version, such as 1.82.0 describing a Boost version.
+This allows us to generate the documentation content of an old Boost version with the current version of the Antora UI.
'release': generate the master version to build/doc/html with the release UI layout.
+This layout omits the header, Google analytics, and Edit this Page.
+This version of the documentation is meant to be distributed with sources files in the Boost release.
'all': retroactively iterate and generate the documentation for all versions of Boost
+with the most recent Antora UI. This command iterates each playbook in the history directory.
The master/develop branches of the library documentation are designed to co-exist alongside the per-release documentation and thus the branch name (or release version) does appear in its URLs.
+Each Antora-enabled library includes the component version descriptor file doc/antora.yml.
+Each library should contain an antora.yml describing the component.
+For instance,
name: mp11
+title: Boost.Mp11
+version: ~
+nav:
+ - modules/ROOT/nav.adoc
+After defining the doc/antora.yml file, the source files should be organized in the modules directory.
+Typically, doc/modules/ROOT/nav.adoc is the main navigation file for the library documentation and doc/modules/ROOT/pages/index.adoc is the main page.
+You can find more information about the Component Version Descriptor and Pages in the Antora documentation.
Once these files are in place, the library can be registered as a component in the libs.playbook.yml file with a Pull Request to the website-v2-docs repository:
content:
+ sources:
+ # ...
+ - <library-name>: https://github.com/boostorg/<library-name>
+ start_path: doc
+When working locally on an individual component, it’s usually desirable to create a local playbook for your project so that you can render the documentation locally for a single component. +The local playbook is a copy of the main playbook that removes all components except the one you are working on.
+For instance, you can create a copy of libs.playbook.yml as doc/local-playbook.yml, remove all components except the one you are working on, and adjust the component URL to point to your local filesystem:
# ...
+content:
+ sources:
+ - url: ..
+ start_path: doc
+ edit_url: 'https://github.com/boostorg/<library-name>/edit/{refname}/{path}'
+# ...
+This way, you can render the documentation locally for your component without having to render the entire Boost documentation:
+$ npx antora --fetch local-playbook.yml
+When writing a Boost library proposal, include your library in this local playbook.
+Antora supports extensions that can be used to augment the functionality of the generator.
+The playbooks in the website-v2-docs repository include a number of extensions that are available to all components.
The @cppalliance/antora-cpp-tagfiles-extension extension allows components to include links to C++ symbols defined in the library or external libraries.
+For instance, will generate a link to the std::stringstd::string symbol in the documentation.
+Note that after the cpp: prefix from custom inline macros, the syntax is similar to the one used to generate regular links in AsciiDoc, where the link is replaced by the symbol name.
The link for each symbol is generated from a tagfile provided by the main playbook or by the extension.
+The playbook can define tagfiles for other libraries by including the cpp-tagfiles field in the extension configuration:
antora:
+ extensions:
+ # ...
+ - require: '@cppalliance/antora-cpp-tagfiles-extension'
+ cpp-tagfiles:
+ files:
+ - file: ./doc/tagfiles/boost-url-doxygen.tag.xml
+ base_url: 'xref:reference:'
+ - file: ./doc/tagfiles/boost-system-doxygen.tag.xml
+ base_url: https://www.boost.org/doc/libs/master/libs/system/doc/html/
+ using-namespaces:
+ - 'boost::'
+ # ...
+Note that the files field is a list of tagfiles that are used to generate links to symbols in the documentation.
+These tagfiles can be generated by other tools like Doxygen or MrDocs.
+In some cases, users might want to write their own tagfiles to include symbols from other libraries.
+As tagfiles only describe relative links to symbols, the base_url field is used to generate the full URL to the symbol.
Also note the using-namespaces field, which is a list of namespaces that are used to generate links to symbols in the documentation.
+In the example above, will generate a link to the small_vectorboost::small_vector symbol in the documentation unless there’s a tagfile that defines a symbol with the same name in the global namespace.
Each component can also define its own tagfiles by including the cpp-tagfiles field in the component descriptor file:
ext:
+ cpp-tagfiles:
+ files:
+ - file: ./doc/tagfiles/boost-url-doxygen.tag.xml
+ base_url: 'xref:reference:'
+ - file: ./doc/tagfiles/boost-system-doxygen.tag.xml
+ base_url: https://www.boost.org/doc/libs/master/libs/system/doc/html/
+ - file: ./doc/tagfiles/boost-core-doxygen.tag.xml
+ base_url: https://www.boost.org/doc/libs/master/libs/core/doc/html/
+ - file: ./doc/tagfiles/boost-filesystem-doxygen.tag.xml
+ base_url: https://www.boost.org/doc/libs/master/libs/filesystem/doc/
+ using-namespaces:
+ - boost::urls
+ - boost::urls::grammar
+ - boost::system
+ - boost::core
+Files and namespaces defined in components are only applied to that component.
+More information about the extension can be found in its repository and issues should be reported in its issue tracker.
+The @cppalliance/antora-cpp-reference-extension extension generates reference documentation for C++ symbols in your codebase and creates an Antora module with its pages.
+The asciidoc documentation pages are generated with MrDocs and populated in the reference Antora module.
This means, the generated reference pages can be linked in your doc/modules/ROOT/nav.adoc file as:
// ...
+* Reference
+** xref:reference:index.adoc[]
+// ...
+To enable the extension for your component, include the extension configuration in the antora.yml file:
# ...
+ext:
+ cpp-reference:
+ config: doc/mrdocs.yml
+# ...
+The mrdocs.yml file will typically include parameters to generate a compile_commands.json file used to generate the reference documentation.
+For more information about MrDocs and configuration files, see https://www.mrdocs.com/docs.
The process to generate compile_commands.json typically depends on third-party libraries used to compile the project.
+In the case of Boost libraries, other Boost libraries should be available to the command that generates the compile_commands.json file.
+The dependencies available to components are defined in the libs.playbook.yml file.
antora:
+ extensions:
+ - require: '@cppalliance/antora-cpp-reference-extension'
+ dependencies:
+ - name: 'boost'
+ repo: 'https://github.com/boostorg/boost.git'
+ tag: 'develop'
+ variable: 'BOOST_SRC_DIR'
+ system-env: 'BOOST_SRC_DIR'
+The extension will download each dependency defined in this list and expose it to the MrDocs environment via the environment variable defined in variable.
+If the library is already available in the system, the system-env field can be used to expose it to Antora, so it uses this existing path instead of downloading the library.
More information about the extension can be found in its repository and issues should be reported in its issue tracker.
+The @cppalliance/asciidoctor-boost-links extension allows component pages to include links to Boost libraries and tools. +For instance:
+boost:core[]
+This will render as if the equivalent AsciiDoc code was used:
+https://www.boost.org/libs/core[Boost.Core]
+When processed by Asciidoc, this renders as "Boost.Core":
+<a href="https://www.boost.org/libs/core">Boost.Core</a>
+The extension supports Boost libraries and tools.
+When no custom text is provided, the extension will use the library name in PascalCase as the link text.
+When a Boost author has a preference for a different default link text, these are implemented directly in the extension.
More information about the extension can be found in its repository and issues should be reported in its issue tracker.
+The @cppalliance/antora-playbook-macros-extension extension allows playbooks to include macros that can be used to generate content in the playbook.
+Each macro has a default value that can be overridden with environment variables, the Antora --attribute command line option, or directly in the playbook with the asciidoc.attributes field.
The macro is used to implement the branch functionality described in section Running Antora. +More information about the extension can be found in its repository and issues should be reported in its issue tracker.
+Each Antora playbook includes a UI bundle that defines the layout of the documentation.
+ui:
+ bundle:
+ url: ./antora-ui/build/ui-bundle.zip
+ snapshot: true
+This provides a consistent layout across all components of the playbook.
+The source code for the UI bundle is located in the antora-ui directory of the repository.
The bundle includes a few options to customize the Boost UI by setting the following options in the main playbook:
+asciidoc:
+ attributes:
+ # Enable pagination
+ page-pagination: ''
+ # Remove the sidenav and include TOC in index.adoc page
+ remove-sidenav: ''
+ # Include the contents panel with the TOC for the current page
+ page-toc: ''
+By default, all options are disabled. +Setting the options to any string (including the empty string) enables it. +This is a convention used by Antora to enable/disable options in bundles.
+The settings defined in the playbook apply to all documentation components.
+The UI bundle documentation is available in the antora-ui/README.adoc file. This file describes the structure of the UI bundle and how to customize it.
This section describes the style of writing and formatting to use when contributing to Boost documentation.
+Our documentation priorities are:
+Clarity first - clear and accurate.
+Brevity next.
+Consistency is good.
+All else.
+The mark up tool we are using is AsciiDoc. Throughout this topic examples of AsciiDoc format follow for each topic.
+Headings are crucial elements to guide readers to find the information they’re looking for.
+Title Case: Capitalize all the important words (not: to, and, a, at, with, etc.). In title case, conjunctions, articles, and prepositions typically remain in lowercase unless they’re the first or last word in the title. For example: "To Install a Boost Library" is title cased.
+Top Level: Use only one top-level heading per page, as the first entry.
+Engagement: Use active headings for procedures. For example "Install Boost" is active, "Boost Installation" is inactive.
+Acronyms and Jargon: Do not use acronyms in headings. For example, do not use "Working with CI", instead use "Working with Continuous Integration", and then in the text "Continuous Integration (CI) means ….". Avoid jargon too, as it is notoriously confusing for a non-English-as-a-first-language reader.
+Clarity: Avoid vague or generic headings.
+Avoid Overuse: Too many levels of headings can confuse your readers. Try not to exceed four levels of headings.
+The main priorities of brevity and clarity apply as top priority to all text. Avoid jargon altogether, and spell out acronyms on first use.
+Active Voice: Use the active voice as much as possible. It’s generally more direct and easier to understand than the passive voice.
+Logical Flow: Ensure there’s a logical flow from one paragraph to the next. Each paragraph should build on the previous one, adding new information or further detail.
+Use Contractions: Contractions make for a chattier and easier to read style. For example, use "doesn’t" instead of "does not", or "would’ve" instead of "would have".
+Avoid Walls of Text: Break up large text blocks with images, lists, diagrams where appropriate.
+Avoid Duplication: Sometimes duplication is necessary, for example within a tutorial to keep the flow within the tutorial itself. Otherwise, avoid duplication whenever possible, and simply link to one source of truth.
+Text in AsciiDoc is entered as is. Be sure to leave a blank line between paragraphs.
+
+Next paragraph.
+When writing about the Boost libraries for this website, library documentation or manuals, blog posts, social media, and any official communications - take the following guidelines as pertinent:
+The name of the project as a whole is the "Boost C++ Libraries", noting that "the" is not part of the project name, and that "libraries" is plural.
+After first use of the full project name, avoid over-use of the word "Boost" in following paragraphs. Subsequent references to the project can select from terms such as "project", "libraries", and "collection", as determined by context. It is acceptable to use the single word "Boost" if it is needed for clarity (such as when comparing library collections).
+Avoid the use of the word "Boost" in section or topic headings, especially those that will appear in the table of contents.
+Generally avoid the off-putting user-experience of over-long pages.
+Landing Page: A landing page is best kept fairly short, say around 500 words, though clarity is the top priority.
+Information Pages: For Search Engine Optimization (SEO) a length of 1500 to 2000 words per page works well. Avoid over-long pages - split them logically with a clear heading for each section.
+We assume the users of Boost are using Windows, macOS, or Linux based systems. Examples of installation and running code examples should be provided for each of these three systems. There are many variants of Linux, and providing examples for the most popular variants is encouraged.
+Popular Windows tools are Microsoft Visual C++, Visual Studio Code, and GCC (MinGW or Cygwin). On macOS, Clang is a popular compiler, and on Linux Cmake, GCC and Clang are popular.
+The recommendation for Boost examples is to use a popular tool in the various steps and processes, but not to recommend one tool over any other. And to mention other tools that we know work well with Boost.
+It is important that processes in our documentation work, so it will be necessary to state which tool you are using when creating working examples.
+Our code snippets include C++ examples on using libraries, but also Command Prompt entries on installing and building, and other text commands.
+Accuracy: Remember that code snippets are going to be copied and pasted by users, who are going to (rightly or wrongly) assume that the code conforms to best practices.
+Simplicity: The code snippet should be as simple as possible. Avoid complex or confusing code structures, as they can distract from the point you’re trying to make.
+Commenting: Include comments in your code to explain what’s happening. This can help readers understand the code, especially if it’s complex or non-obvious.
+Consistency: Be consistent in your coding style throughout your documentation. This includes things like indentation, naming conventions, comments, and the use of spaces or tabs.
+Context: Provide enough context around the code snippet. Explain what the code does and how it relates to the text.
+Complete: If a code snippet is intended to be run by the reader, make sure it includes all necessary parts to actually run.
+Versioning: Indicate the version of the programming language, library or framework that the code snippet is intended for. This can help prevent confusion or issues with running the code.
+To include a snippet of source code like the following, precede it with [source, cpp] on one line, followed by ----, then the code itself, and finish it with a second ----, again on its own line.
#include <boost/lambda/lambda.hpp>
+#include <iostream>
+#include <iterator>
+#include <algorithm>
+
+int main()
+{
+ using namespace boost::lambda;
+ typedef std::istream_iterator<int> in;
+
+ std::for_each(
+ in(std::cin), in(), std::cout << (_1 * 3) << " ");
+}
+Whether ordered (with numbers), or unordered (with bullets), these are the general best practices for all lists:
+Parallelism: Start each point with the same part of speech (noun, verb, etc.) to keep the list parallel. This makes the list easier to read and understand.
+Punctuation: If your points are not complete sentences, they typically do not need to be punctuated. If the points are complete sentences or if each point is a distinct idea that forms a multi-sentence paragraph, use proper punctuation.
+Length: Keep your points concise. If a point is running longer than two lines, consider breaking it down further.
+Introduction: Always introduce a list with a lead-in sentence or phrase.
+Numbered lists are best used when describing a process, a sequence of steps, or priorities.
+If the sequence or order of points does not matter, use a Bulleted Lists instead. If the sequence matters, use a numbered list (sometimes called "ordered lists").
+Numbered list entries start with a period (.). There is no need to enter any numbers, the renderer will work them out correctly. Be sure to leave a blank line before and after a list.
Introductory sentence.
+
+. point A
+. point B
+.. Point B.1
+.. Point B.2
+. Point C
+. Point D
+Introductory sentence.
+point A
+point B
+Point B.1
+Point B.2
+Point C
+Point D
+If the sequence or order of points matters, use a Numbered Lists instead. If the sequence doesn’t matter, use a bulleted list (sometimes called "unordered" lists).
+Order: Arrange your bullet points logically. This could be in order of importance, chronologically, or in some other meaningful way for the reader.
+Avoid Overuse: Bulleted lists are most effective when used sparingly. Too many lists can make your document hard to read.
+Note the [disc] entry determining the symbol. Alternatives are [square] and [circle]. Be sure to leave a blank line before and after a list.
Introductory sentence.
+
+[disc]
+* point A
+* point B
+** Point B.1
+** Point B.2
+
+[circle]
+* Point C
+* Point D
+Introductory sentence.
+point A
+point B
+Point B.1
+Point B.2
+Point C
+Point D
+If content naturally falls into a row/column format, then encapsulate as a table.
+Title: Every table should have a clear, concise title that describes its content and purpose.
+Headers: Use headers for each column to indicate what information is contained in that column.
+Consistency: Maintain consistent formatting and structure across all tables in a document to enhance readability and avoid confusion.
+Simplicity: Keep the table as simple as possible. Avoid unnecessary columns or rows, and ensure that the data presented is relevant and necessary.
+Size: The table should fit the page size. If the table is too large, consider breaking it down into several smaller tables.
+Striping: If your table has many rows, consider using striping (alternating row colors) to make it easier to follow across large tables.
+Units: If your table includes measures, ensure to specify the units.
+Notes and References: If necessary, include footnotes or references right below the table for any clarifications.
+Data Order: Consider the most logical order to present your data. This could be alphabetical, numerical, chronological, or in order of importance.
+The following example asciidoc source would produce the table shown below. Note the relative column widths (1 and 2). This means the first column uses 1/3rd of the width available, and the second column 2/3rds of the width. Also, a header row is required, and zebra striping. Be sure to leave a blank line before and after a table.
+[cols="1,2",options="header",stripes=even,frame=none]
+|===
+| *Head1* | *Head2*
+| row1 | text
+| row2 | text
+|===
+| Head1 | +Head2 | +
|---|---|
row1 |
+text |
+
row2 |
+text |
+
Images work well in tutorials, and other process-style documentation, where the reader can find visual confirmation that they have followed the correct procedure.
+Relevance: Ensure the images used are relevant and directly aid in understanding the content. Avoid using images as mere decorations or fillers. Don’t overload diagrams or images with too much information. They should aid understanding, not create confusion.
+Quality: Images should be of high quality. They should be clear and easy to read/understand, even when printed.
+Referencing: Always reference images in the text. This not only directs the reader’s attention to the image but also clarifies what the image is meant to illustrate.
+Accessibility: Ensure images are accessible for people with visual impairments. This can include providing alt text for online documents, and detailed captions for printed documents. Be aware that color choices can have an impact on readability, especially for people with color blindness.
+Consistency: Try to maintain a consistent style, quality, and appearance for all images throughout the document.
+File Type and Compression: Use the correct file type for your images. JPEGs are best for photographs, while PNGs are better for screenshots, SVGs for logos and diagrams. Also, be aware of file size - compress images if they are large, but ensure this doesn’t compromise quality.
+Copyright: Only use images that you have the right to use. Always attribute images correctly according to the terms of the license.
+Boost does not require any specific documentation structure. However, +there are some important considerations that influence content and +structure. For example, many Boost libraries wind up being proposed for +inclusion in the C++ Standard, so writing them initially with text +suitable for inclusion in the Standard may be helpful.
+Also, Boost library documentation is often accessed via the World Wide Web, including via search engines, so context is often important for every page.
+Finally, Boost libraries should provide additional documentation, +such as introductory, tutorial, example, and rationale content. With +those things in mind, we suggest the following guidelines for Boost +library documentation.
+The documentation structure required for the C++ Standard is an +effective way to describe the technical specifications for a library. +Although terse, that format is familiar to many Boost users and is far +more precise than most ad hoc formats. The following description is +based upon §17.3 of the Standard. (Note that while final Standard +proposals must include full standard-ese wording, which the committee +will not do for you, that level of detail is not expected of Boost +library documentation.)
+Each document contains the following elements, as +applicable. (1):
+The Summary provides a synopsis of the category, and introduces the +first-level subclauses. Each subclause also provides a summary, listing the headers specified in the subclause and the library entities provided in each header.
+Paragraphs labeled "Note(s):" or "Example(s):" are informative, other +paragraphs are normative.
+The summary and the detailed specifications are presented in the order:
+Macros
+Values
+Types
+Classes
+Functions
+Objects
+The library can be extended by a C++ program. Each clause, as +applicable, describes the requirements that such extensions must meet. +Such extensions are generally one of the following:
+Template arguments
+Derived classes
+Containers, iterators, and/or algorithms that meet an interface +convention
+Interface convention requirements are stated as generally as possible.
+For example, instead of stating " class X has to define a member function operator++() ", say " the interface requires for any object x of class X, ++x is defined" (noting that whether the operator is a member or not is unspecified).
Requirements are stated in terms of well-defined expressions, which +define valid terms of the types that satisfy the requirements. For every set of requirements there is a table that specifies an initial set of +the valid expressions and their semantics. Any generic algorithm that +uses the requirements is described in terms of the valid expressions for its formal type parameters.
+Template argument requirements are sometimes referenced by name.
+In some cases the semantic requirements are presented as C++ code. Such +code is intended as a specification of equivalence of a construct to +another construct, not necessarily as the way the construct must be +implemented.(2)
+The detailed specifications each contain the following elements:
+Name and brief description
+Synopsis (class definition or function prototype, as appropriate)
+Restrictions on template arguments, if any
+Description of class invariants
+Description of function semantics
+Descriptions of class member functions follow the order (as +appropriate) (3):
+Constructor(s) and destructor
+Copying and assignment functions
+Comparison functions
+Modifier functions
+Observer functions
+Operators and other non-member functions
+Descriptions of function semantics contain the following +elements (as appropriate) (4):
+Requires: the preconditions for calling the function
+Effects: the actions performed by the function
+Post-conditions": the observable results established by the function
+Returns: a description of the value(s) returned by the function
+Throws: any exceptions thrown by the function, and the conditions that would cause the exception
+Complexity: the time and/or space complexity of the function
+Rationale: the rationale for the function’s design or existence
+Complexity requirements specified in the library clauses are upper bounds, and implementations that provide better complexity guarantees satisfy the requirements.
+The function semantic element description above is taken directly from the C++ standard, and is quite terse. Here is a +more detailed explanation of each of the elements.
+Note the use of the <code> … </code> font tag to distinguish actual C++ usage from English prose.
Preconditions for calling the function, typically expressed as predicates. The most common preconditions are requirements on the value of arguments, often in the form of C++ expressions. For example,
+void limit( int * p, int min, int max );
+Requires: p != 0 && min ⇐ max
Requirements already enforced by the C++ language rules (such as the +type of arguments) are not repeated in Requires paragraphs.
+The actions performed by the function, described either in prose or in +C++. A description in prose is often less limiting on implementors, but +is often less precise than C++ code.
+If an effect is specified in one of the other elements, particularly +post-conditions, returns, or throws, it is not also described in +the effects paragraph. Having only a single description ensures that +there is one and only one specification, and thus eliminates the risk of +divergence.
+The observable results of the function, such as the value of variables. +Post-conditions are often expressed as predicates that are true after the +function completes, in the form of C++ expressions. For example:
+void make_zero_if_negative( int & x );
+Post-condition: x >= 0
The value returned by the function, usually in the form of a C++ +expression. For example:
+int sum( int x, int y );
+Returns: x + y
Only specify the return value; the type is already dictated by C++ +language rules.
+Specify both the type of exception thrown, and the condition that causes
+the exception to be thrown. For example, the std::basic_string class
+specifies:
void resize(size_type n, charT c);
+Throws: length_error if n > max_size().
Specifying the time and/or space complexity of a function is often not +desirable because it over-constrains implementors and is hard to specify +correctly. Complexity is thus often best left as a quality of +implementation issue.
+A library component, however, can become effectively non-portable if +there is wide variation in performance between conforming +implementations. Containers are a prime example. In these cases it +becomes worthwhile to specify complexity.
+Complexity is often specified in generalized +"Big-O" notation.
+Boost library documentation is often accessed via the World Web. Using +search engines, a page deep in the reference content could be viewed +without any further context. Therefore, it is helpful to add extra +context, such as the following, to each page:
+Describe the enclosing namespace or use fully scoped identifiers.
+Document required headers for each type or function.
+Link to relevant tutorial information.
+Link to related example code.
+Include the library name.
+Include navigation elements to the beginning of the documentation.
+It is also useful to consider the effectiveness of a description in +search engines. Terse or cryptic descriptions are less likely to help +the curious find a relevant function or type.
+(1) To save space, items that do not apply to +a clause are omitted. For example, if a clause does not specify any +requirements, there will be no "Requirements" subclause.
+(2) Although in some cases the code is +unambiguously the optimum implementation.
+(3) To save space, items that do not apply to +a class are omitted. For example, if a class does not specify any +comparison functions, there will be no "Comparison functions" subclause.
+(4) To save space, items that do not apply to +a function are omitted. For example, if a function does not specify any +precondition, there will be no "Requires" paragraph.
+Revised April, 2023
+Distributed under the Boost Software License, Version 1.0. Refer to http://www.boost.org/LICENSE_1_0.txt.
+It is not a requirement for a Boost library for the documentation to adhere to the following structure. +However, it is listed here as a guide, if needed.
+The following structure for Boost library documentation should work for most libraries. +Take each section in the order listed below, and fill in the details for your library. +Alternatively, if you want your documentation to be closer to the C++ Standard, refer to Documentation Components.
+Although library documentation can use any format in +standalone documentation, the instructions on this page will use AsciiDoc as the format. +Visit AsciiDoc Syntax Quick Reference for more information on AsciiDoc syntax.
+Where you see <LibraryName> in the templates below, replace with the name of your library.
Provide a brief overview of the focus and features of your library.
+Mention the portability of the library, platforms and compilers. +List dependencies.
+A developer should have a good idea if the library is right for their project, after reading your Overview.
+Note that footnote references link to the footnotes section, and the entries in the footnote section link back to the references.
+== Overview
+
+Add an introduction to your library here. Refer to previous libraries on the content of an Overview.
+
+== First Topic
+
+[#footnote1-location]
+text
+text that requires a footnote. link:#footnote1[(1)]
+
+== Second Topic
+
+[#footnote2-location]
+text
+text that requires a footnote. link:#footnote2[(2)]
+
+== Third Topic
+
+text
+
+== Footnotes
+
+[#footnote1]
+link:#footnote1-location[(1)]: footnote 1 text
+
+[#footnote2]
+link:#footnote2-location[(2)]: footnote 2 text
+A Rationale provides a description of the motivation behind the library. +Describe the current problems that exist, and the goals of the library in addressing those problems.
+== Introduction
+
+Add an introduction to the rationale for your library here. Refer to previous libraries on the content of a Rationale.
+
+== First Topic
+
+[#footnote1-location]
+text
+text that requires a footnote. link:#footnote1[(1)]
+
+== Second Topic
+
+[#footnote2-location]
+text
+text that requires a footnote. link:#footnote2[(2)]
+
+== Third Topic
+
+text
+
+
+== Footnotes
+
+[#footnote1]
+link:#footnote1-location[(1)]: footnote 1 text
+
+[#footnote2]
+link:#footnote2-location[(2)]: footnote 2 text
+The contents of the guide should be enough to get a new user up and running with your library.
+Provide a complete API reference to your library, without duplicating the contents of the Configuration or Definitions sections, which follow.
+== Introduction
+
+Introductory text
+
+== Macros
+
+=== Macro1
+
+=== Macro2
+
+== Values
+
+=== Value1
+
+=== Value2
+
+== Types
+
+=== Type1
+
+=== Type2
+
+== Classes
+
+=== Class `class name`
+
+class overview text
+
+==== Class `class name` synopsis
+
+....
+namespace boost
+{
+ class <class name>
+ {
+ };
+};
+....
+
+==== Class `class name` constructors and destructor
+
+....
+constructor
+....
+
+*Requires:* text
+
+*Effects:* text
+
+*Post-conditions:* text
+
+*Returns:* text
+
+*Throws:* text
+
+*Complexity:* text
+
+*Note:* text
+
+*Danger:* text
+
+*Rationale:* text
+
+....
+destructor
+....
+
+*Requires:* text
+
+*Effects:* text
+
+*Post-conditions:* text
+
+*Returns:* text
+
+*Throws:* text
+
+*Complexity:* text
+
+*Note:* text
+
+*Danger:* text
+
+*Rationale:* text
+
+==== Class `class name` comparison functions
+
+....
+comparison-function
+....
+
+*Requires:* text
+
+*Effects:* text
+
+*Post-conditions:* text
+
+*Returns:* text
+
+*Throws:* text
+
+*Complexity:* text
+
+*Note:* text
+
+*Danger:* text
+
+*Rationale:* text
+
+==== Class `class name` modifier functions
+
+....
+modifier-function
+....
+
+*Requires:* text
+
+*Effects:* text
+
+*Post-conditions:* text
+
+*Returns:* text
+
+*Throws:* text
+
+*Complexity:* text
+
+*Note:* text
+
+*Danger:* text
+
+*Rationale:* text
+
+==== Class `class name` observer functions
+
+....
+observer-function
+....
+
+*Requires:* text
+
+*Effects:* text
+
+*Post-conditions:* text
+
+*Returns:* text
+
+*Throws:* text
+
+*Complexity:* text
+
+*Note:* text
+
+*Danger:* text
+
+*Rationale:* text
+
+==== Class `class name` static functions
+
+....
+static-function
+....
+
+*Requires:* text
+
+*Effects:* text
+
+*Post-conditions:* text
+
+*Returns:* text
+
+*Throws:* text
+
+*Complexity:* text
+
+*Note:* text
+
+*Danger:* text
+
+*Rationale:* text
+
+== Functions
+
+....
+function1
+....
+
+*Requires:* text
+
+*Effects:* text
+
+*Post-conditions:* text
+
+*Returns:* text
+
+*Throws:* text
+
+*Complexity:* text
+
+*Note:* text
+
+*Danger:* text
+
+*Rationale:* text
+
+== Objects
+
+== Object specifications
+
+== Examples
+If your documentation is defined as an Antora component, the @cppalliance/antora-cpp-reference-extension extension can be used to generate the reference documentation from the source code.
+Refer to Antora Guide for more details.
Describe the configuration macros that are used in your library.
+== `<LibraryName>` Configuration
+
+== Introduction
+
+`<LibraryName>` uses several configuration macros in
+http://www.boost.org/libs/config/config.htm[<boost/config.hpp>], as well as configuration macros meant to be supplied by the application. These macros are documented here.
+
+== Application Defined Macros
+
+These are the macros that may be defined by an application using `<LibraryName>`.
+
+[cols="1,2",options="header",stripes=even,frame=none]
+|===
+| *Macro* | *Meaning*
+|`macro` |meaning text
+|`macro` |meaning text
+|===
+
+== Public Library Defined Macros
+
+These macros are defined by `<LibraryName>`, but are also expected to be used by application code.
+
+[cols="1,2",options="header",stripes=even,frame=none]
+|===
+| *Macro* | *Meaning*
+|`macro` |meaning text
+|`macro` |meaning text
+|===
+
+== Library Defined Implementation Macros
+
+These macros are defined by `<LibraryName>` and are implementation details of interest only to implementers.
+
+[cols="1,2",options="header",stripes=even,frame=none]
+|===
+| *Macro* | *Meaning*
+|`macro` |meaning text
+|`macro` |meaning text
+|===
+Application Defined Macros
+These are the macros that may be defined by an application using <LibraryName>, for example:
| Macro | +Meaning | +
|---|---|
|
+The x and y values are added together. |
+
|
+The x and Y values are multiplied together. |
+
If your library uses any terminology that might benefit from a description, consider adding a "Definitions" page to your documentation.
+Each definition is typically preceded by an anchor, so can be linked to from any other section of your documentation. +This can help reduce duplication of explanations: link to your definitions rather than repeat explanations.
+== <LibraryName> Definitions
+
+Introductory text.
+
+== Definitions
+
+[#definition-term1]
+*Term1*::
+definition-text1
+
+[#definition-term2]
+*Term2*::
+definition-text2
+Assume there is a String-Container library, and that String container algorithms work using some pre-defined concepts:
+A Finder is a function which searches for an arbitrary part of a container. +For example (add example logic here).
+Formatters are used by string replace algorithms. +For example (add example logic here).
+Advanced topics include advanced tutorials or examples, and also cover porting, customization, synchronization, and performance tuning.
+A Frequently Asked Questions (FAQ) section might add value to your documentation, by aiding developers with answers to known issues or complexities.
+If there are a large number of questions and answers, group them into sections with headings such as Errors and Exceptions, Performance, and so on.
+Note that every question is in bold, and always ends with a question mark.
+=== FAQ
+
+==== *question1?*
+
+answer1
+
+==== *question2?*
+
+answer2
+Does this library work with COM methods?
+Yes, if you add #define BOOST_ENABLE_STDCALL to your code.
Does this library support Windows earlier than Windows 10?
+No, the only supported versions of Windows supported are 10 and 11.
+Make sure to version your library correctly, and provide release notes for each release. +Refer to Version Control and Release Notes for details.
+If bibliographic references are required in your documentation for your library, add a bibliography to the documentation.
+The book title can be text, or can be a link to a site too if the text of the book is available online. +The ISBN number can be replaced by another reference number if the reference is to an academic paper, or other reference that is not published in book form.
+Ideally, list the bibliography in alphabetical order.
+=== Bibliography
+[Surname/s] Authors full names. _Book title_. ISBN number, Publication date.
+[Surname/s] Authors full names. _Book title_. ISBN number, Publication date.
+[Turcan, Wasson] Peter Turcan, Mike Wasson. Fundamentals of Audio and Video Programming for Games. +ISBN: 073561945X, 2003.
+If acknowledgements are required for your library, add an acknowledgements section to the documentation. +As a rule of thumb, the acknowledgements should be ordered with the most important contributions coming first. +Links can be included, if required.
+=== Acknowledgements
+
+The author appreciates the contributions to the library made by the following:
+
+* text1
+* text2
+The author appreciates the contributions to the library made by the following:
+John Doe and Jane Doe for editing the original draft documentation.
+John Doe for input on the architecture and design of the API interfaces.
+Jane Doe for numerous improvements and suggestions on the text of the error messages.
+Revised April, 2023
+Distributed under the Boost Software License, Version 1.0. Refer to http://www.boost.org/LICENSE_1_0.txt.
+This section covers how to structure library documentation.
+In the Boost release, the entry point for the documentation for a library should be in the libs/<LIBRARY NAME>/index.html directory.
+This means all projects should contain an index.html file in the root of the library directory.
Since the Boost releases including all source files and artifacts are available from https://www.boost.org/doc/libs/<VERSION>/, the location for published library documentation will match the following:
https://www.boost.org/doc/libs/<VERSION>/libs/<LIBRARY NAME>/index.html
+For example:
+https://www.boost.org/doc/libs/latest/libs/json/index.html
+https://www.boost.org/doc/libs/latest/libs/serialization/index.html
+https://www.boost.org/doc/libs/latest/libs/beast/index.html
+Although the content of the index.html file is up to the library author, libraries use this index.html entry point to redirect to the main documentation page located elsewhere.
+For instance, assuming the final library documentation is located in doc/index.html, the index.html file might contain the following:
<head>
+ <meta http-equiv="refresh" content="0; URL=doc/index.html">
+</head>
+<body>
+ Automatic redirection failed, please go to
+ <a href="doc/index.html">doc/index.html</a>.
+</body>
+</html>
+In principle, the page to which index.html redirects can be another source file in the library directory.
+In most cases, however, it redirects to another HTML file generated by the build system in the release process.
In order for a library’s documentation to be built correctly and the finished product written to the location described above, you must follow the Organization Requirements, in particular the Building Documentation section.
+In particular, the source files for the documentation must be located in the doc directory of the library.
+Two types of documentation layouts that are supported:
Antora component documentation
+Standalone documentation
+Antora is a documentation site generator used to build the Boost website documentation and library documentation for libraries that opt to use it.
+A library can choose to use Antora for its documentation by including an doc/antora.yml file to the repository, such as:
name: mp11
+title: Boost.Mp11
+version: ~
+nav:
+ - modules/ROOT/nav.adoc
+The navigation structure is defined in doc/modules/ROOT/nav.adoc and the documentation pages are defined in doc/modules/ROOT/pages/.
+All pages are written in AsciiDoc format.
Once the component is defined, it can be enabled in the Boost website repository by adding the component to the libs.playbook.yml file.
In the Boost release process, this Antora playbook is built with all the other documentation components, and the resulting HTML files are written to the doc/antora directory of the release.
+At this point, the Documentation Entry Point for the library documentation should be updated, so it redirects to ../../doc/antora/<LIBRARY NAME>/index.html.
For detailed instructions on how to set up an Antora component for a library, please refer to Antora Guide.
+If a library chooses not to use Antora, the doc directory of the library should contain a doc/Jamfile file that specifies how the documentation should be built.
+Although this build script can be used to invoke any documentation tool, common tools used for building documentation include: AsciiDoc, Doxygen, and Boost.Quickbook.
+When deciding to include standalone documentation with your library, the doc/Jamfile build scripts from other Boost libraries are typically used as reference for new libraries.
In the Boost release process, these scripts defined in doc/Jamfile are built for each library.
+The script typically builds the HTML documentation files in-place, often to doc/html/.
+These in-place files are then copied as-is to the release directory.
+For instance, if your library’s documentation is built to doc/html/index.html, the Documentation Entry Point should be updated to redirect to doc/html/index.html.
Revised April, 2023
+Distributed under the Boost Software License, Version 1.0. Refer to http://www.boost.org/LICENSE_1_0.txt.
+The Boost C++ Library collection (or “Libraries”) is a community-driven, open source project published under the terms of The Boost Software License (the “BSL”). It is governed by a core team (the “Developers”), consisting of the members of the official Boost GitHub organization located at https://github.com/boostorg (the “Boost GitHub”) having the Owner role, plus any proxies they designate.
+The Libraries are sponsored in part by The C++ Alliance, Inc. (or “Alliance”), a California 501(c)(3) non-profit which owns and protects the Boost logo trademark (the “Boost Logo”) shown below. This document provides information about the use of the Logo, as well as examples of common ways people might want to use this trademark, with explanations as to whether those uses are permitted or not or require additional written permission.
+The Boost Logo:
+
+The background is transparent. The source for the image is available at boost-logo-transparent.svg.
+A trademark is a name or design that tells the world the source of a good or service. Protecting trademarks for an open-source project is particularly important. Anyone can change the source code and produce a product from that code, so it’s important that only the original product, or variations that have been approved by the project, use the project’s trademarks. By limiting use of the Boost Logo, the Alliance and the Developers can help users and contributors know they are getting an official product and not someone else’s modified version, or something unrelated. The trademark assures users and developers of the quality and safety of the product they are using.
+The Boost Logo (bitmap and vectors) are owned by the Alliance and distributed under the terms of the license granted by this policy.
+The use of the Boost Logo is also governed by trademark, whose policy is described herein.
+The Boost Logo makes it possible to determine whether or not a library is officially a part of Boost, and whether or not a communication is from the Developers. So we’re careful about where we allow it to appear. But at the same time, we want the community to have confidence using it in prescribed ways without written approval. The policy laid out here explains how we strike a balance. If you have any doubts about whether your intended use requires permission, please contact us (Boost developers mailing list).
+The Boost Logo may not be used in ways that appear (to a casual observer) official, affiliated, or endorsed by the Developers or part of the Libraries, unless you have written permission from the Alliance.
+As with any trademark, the Boost Logo can be used with minimal restriction to refer to the Libraries, or on official communication from the Developers such as when announcing the results of a formal review, or announcing the publishing of a new version of the Libraries. If you use the Boost Logo to distribute any product or content created by someone else, then you must give appropriate credit to the creator(s) of the content and you must indicate if you made changes to the content.
+The Boost Logo may not be used:
+to refer to any other library or collection of libraries,
+in a way that is misleading or may imply an association between the Libraries and unrelated modules, tools, documentation, or other resources, or
+in a way that implies an endorsement or communication from the Developers, or
+in ways that confuse the community as to whether the Libraries are open source and free to use.
+Indicating accurately that a work (components such as source code, documentation, and tests) is part of the Libraries. In this case you may reflect this by using the Boost Logo without prior approval, for both commercial and non-commercial uses.
+Watermarking official communications from the Developers when referencing milestones or events related to the Libraries such as formal review results or releases is allowed.
+Using the Boost Logo on websites, brochures, documentation, academic papers, and books to refer to the Libraries is allowed.
+Using the Boost Logo where the products are not part of the Libraries. In this case, written approval from the Alliance is required to ensure that the usage of the Boost Logo does not confuse users or indicate an official association.
+Using the Boost Logo on any thing you wear such as T-shirts, hats, and other artwork or merchandise, requires written approval from the Alliance to ensure that the presentation of the mark reflects the high quality of the brand, and does not confuse users or indicate an official association.
+Using the Boost Logo for social events like meetups, tutorials, and the like is allowed for events that are related to the Libraries and free to attend. For commercial events (including sponsored ones), prior written approval from the Alliance is required. The event cannot appear to be officially endorsed or run by the Developers without written permission.
+Reproduction of the Boost Logo in compliance with the terms and conditions stated above must meet the following additional requirements:
+Color reproductions must use Boost Mustard (or the closest possible color match), defined as:
+Freetone: P 17-8 C
+CIE Lab: 69.52, 21.09, 66.9
+Black and white reproductions of the Boost Logo must be either pure black or pure white.
+The background color must maintain at least 50% contrast with the logo color.
+The logo must not be rotated or have its aspect ratio changed.
+The logo may only appear once on any individual page, artwork, or product packaging.
+The logo cannot be combined with any other artwork without prior written approval from the Alliance.
+Permission to use the Boost Logo is granted for as long as the usage is consistent with the terms and conditions herein. Usage of the Boost Logo not in accordance with this document and without written approval from Alliance is expressly prohibited.
+There are many ways to get involved with Boost, up to and including submitting your own library. In roughly ascending order of time and work commitment, here are the ways you can contribute to Boost:
+The Boost mailing list is a platform for discussions and communications related to the Boost Libraries. The mailing list serves several key purposes:
+Announcements: The mailing list is a channel for updates, announcements about new releases, bug fixes, events, or any other relevant news.
+Collaboration: The mailing list also serves as a platform for collaboration, fostering connections between developers and encouraging team-based projects.
+Discussion: Developers discuss ideas, techniques, issues, and potential improvements related to Boost libraries.
+Support: Newcomers or users encountering difficulties can seek help from the community. Experienced members provide solutions, advice, and share their experiences.
+If you do join, follow the Boost Discussion Policy of the mailing list, as it helps in maintaining a respectful and productive environment.
+To subscribe to the Developers Mailing List go Boost developers' mailing list.
+There are other mailing lists you might want to join, such as the Boost Users mailing list, or project-specific lists. Visit Boost Mailing Lists for more details.
+There are different locations to report issues, depending on which content needs updated.
+To formally report an issue on an individual library, locate the repo for that library. Many of these repos exist in the Boost Super-project on GitHub. Search under the libs folder. For example, to file an issue on the Boost.Json library, click New Issue at https://github.com/boostorg/json/issues. If you have difficulty locating the correct repo, then ask the question of the Boost developers' mailing list.
Before filing an issue, it is good practice to identify the admins (authors or maintainers) of a library. This information can be found in the meta folder for the library. For example: https://github.com/boostorg/json/blob/develop/meta/libraries.json contains the authors for the Boost.Json library. Also, library documentation and commit history may well specify who is actively involved. Make a connection first to ensure the library admins are open to updates.
Whichever list you add your issue to, the issue will be triaged and addressed appropriately. Providing full details, examples and screenshots where appropriate, should give you a quicker response.
+There are sources of information on the Boost libraries outside of this website and the library documentation, including:
+If you notice content that needs updated in these sources, perhaps out-of-date information, incorrect or missing information, or perhaps just language that could be improved, then complete the same process as To Report an Issue with this Website. Feedback like this is welcome.
+To report an issue with the website, including this documentation, click the New Issue button on GitHub page: website-v2-docs/issues.
+After clicking New Issue, you will be able to add a Title and Description. Provide as much detail as possible, including links where helpful and suggestions for the updated content.
+All libraries are reviewed by the community before becoming part of Boost. There are several roles in the review process, from managing the steps of the review process, to testing and providing feedback. Read the Formal Review Guide for details.
+Rather than report an issue, or suggested improvement, and wait for another developer to address it, you can make the code changes yourself (after verifying with the library admins that they are open to updates). To be successful you will need to be familiar with GitHub, and obviously a competent C++ developer. After finding the issue, you will need to go to the GitHub repository for that library, and create a fork and clone the library.
+With your fork of the library in your own GitHub account, you can make any changes and updates you wish. It is certainly recommended to thoroughly test the changes, and be aware of the test matrix for the library. It is also recommended you thoroughly comment your code changes, potentially every line of code you add should be commented as to its purpose.
+When you are satisfied you have met the bar for testing, you can commit the changes, push them to GitHub, and create a Pull Request. Most libraries have Continuous Integration (CI) set up, and will check your changes on several operating systems and compilers. If CI is not set up, there will be more onus on you and your testers to verify the test matrix.
+After this, you can take a step back, and the owners or maintainers of the library will review your code. They might ask for some modifications or improvements. Incorporate the feedback provided by the reviewers and push your modifications.
+If all goes well, your changes will be merged by the library admins, and you will have made a welcome contribution.
+There may be an existing library that you have a deep interest in, that for some reason no longer has an active owner or maintainer. Or, perhaps, is being less well maintained than might be ideal. Find what you can out by asking questions of the existing maintainers (listed in the Library Metadata json file) and, if no reply, on the Boost developers' mailing list. If the replies suggest you could take a role, consider requesting, again via the mailing list, to become a library maintainer. For larger libraries there can certainly be more than one maintainer.
+Maintaining an existing library is similar in steps to Contribute to an Existing Library, in that you will have to create a fork and clone the library. However, more is expected. You will have to spend time educating yourself on the full functionality of the library, and be prepared to make changes to the code anywhere changes are needed.
+For a larger library this education can turn out to be a significant, and perhaps onerous task. It can be quite difficult to reverse engineer in your mind the processes and purpose of code written by another developer, with all the expected personal style and idiosyncrasies that make their way into source code. However, that being said, it can be a valuable contribution to take an existing, but not fully maintained, library you are interested in and give it a new lease of life.
+| + + | ++All libraries in Boost are tested when the super-project is tested, so even without a dedicated maintainer the libraries must pass a series of automated tests. + | +
This is the big dog of contributions. There are developers who have contributed several libraries to Boost. Start by reading the Library Requirements section, and make sure to engage the Boost community before getting too deep into a massive coding project.
+This Contributor Guide is for C++ developers who wish to submit a library to the Boost collection. Or to contribute in some other way, such as testing or evaluating library submissions.
+Feedback on any aspect of the documentation is encouraged, and is available by creating a New Issue. Also, if you have feedback on any particular library, create an issue in the repo for that library.
+Start by reading about how to get involved in Boost.
+The information in this appendix is only pertinent to those contributors who are involved with the server hosting, operations and infrastructure for the Website v2. The sections contains links to the eclectic set of documentation maintained on GitHub which provides the detailed implementation notes required by developers and system administrators for the website.
+Provides details on the staging and production processes.
+| Document Link | +Description | +
|---|---|
| + | If you are involved in testing the Boost website-v2, you might not need a full cloud deployment. This document describes potentially more convenient options, including using a local Docker composition, the existing staging site, or the existing production site. If you are involved in the C++ Alliance’s development efforts, code may be checked in directly to the staging and production sites, so another site isn’t needed. |
+
| + | The website is hosted on Google Cloud Platform in a project named "boostorg-project1" within a CPPAlliance account. This document describes the Kubernetes cluster running in the us-central1 region, the Memorystore instance for each environment, and Archive Registry of the Docker images for each website release. |
+
| + | In the us-east-2 region AWS S3 buckets store the described content. |
+
| + | A Fastly CDN is configured as a front-end to the stage and production sites. This document describes the configuration steps. |
+
| + | Mailman-core servers have been installed to test a selection of REST API calls. |
+
| + | Contains some notes on staging synchronization. |
+
Provides operational details, specifically for each release.
+| Document Link | +Description | +
|---|---|
| + | This section covers what contributors should know about the preview generation on cppalliance.github.io and other github repositories. |
+
| + | Provides notes on the operation of Jenkins. A Jenkins build server is hosted on AWS at https://jenkins.cppalliance.org:8443. The server builds and publishes previews of the documentation for a number of GitHub repositories when pull requests are submitted. |
+
| + | This section goes into exhaustive detail on Jenkins output that can be used as a reference for any job. |
+
| + | The current Jenkins job inventory. |
+
| + | Provides operational details on the CPPAlliance Drone CI implementation. |
+
| + | There are four subdomains which redirect to The C++ Alliance. The redirects act as shortcuts to quickly reach the Slack invitations page, or the main homepage. This section provides implementation details. |
+
There a two main areas of documentation that are built for this website:
+Antora Docs on the website : including this Contributor Guide, along with the User Guide and Formal Review Guide.
+Boost Docs on the website : covering the documentation for each individual library.
+In addition to these there are also Boost Archives created by Python scripts, and automated Doc Previews that are built by Jenkins.
+These build processes are outlined in Doc Builds, which includes links to more details of the build processes.
+The purpose of a Boost Release Report is to inspire and credit all of the volunteer contributors who put the work in for the release. A significant audience for the report are these contributors. It is a "feel-good" document by design.
+The introductory text for each release will depend on the priorities and new content for the release. For example, refer to: Boost 1.87 Release Report.
+Leave it to the technical documentation for each library, tool or process to go into such things as limitations, known issues, performance considerations, and so on.
+Write this report in such a way as to inspire our contributors to re-engage for a future release!
+The Fiscal Sponsorship Committee plays a custodial and oversight role rather than being actively involved in guiding the technical direction of the Boost C++ Libraries. Its primary function is to look after Boost’s organizational, legal, and logistical assets, ensuring the community and its resources are well-supported for the long-term sustainability of the project.
+Boost operates with a decentralized and community-driven development model, where individual contributors and maintainers have the autonomy to drive their libraries' progress. The committee supports this by ensuring the ecosystem remains healthy and sustainable for current and future contributors.
+When referring to Boost "assets", these assets fall into five broad categories:
+The Boost name and logo are recognized worldwide within the C++ development community. The committee ensures that these are protected and used appropriately to maintain the reputation of the project. The Boost website, its domain (boost.org), and associated online content are managed to ensure accessibility and credibility.
+The libraries themselves represent a significant intellectual asset, as they include contributions from many developers under the Boost Software License (BSL), or a similar open-source license. The committee ensures the letter and spirit of the license terms are upheld and remains compatible with Boost’s goals of being free and open-source. The extensive and detailed documentation accompanying Boost libraries is an important part of the codebase, ensuring that developers can effectively use the libraries.
+Boost’s GitHub repositories are a critical part of its infrastructure. The committee helps ensure these resources are properly maintained and accessible to contributors. Boost’s Test Matrix and automated build systems, particularly the Continuous Integration (CI) systems, are essential for quality assurance. The committee supports the infrastructure that allows library authors to ensure compatibility across compilers and platforms, and oversees the mailing lists and communication channels used by the Boost community for development discussions.
+Boost’s community of contributors and users is one of its most valuable assets. The committee provides governance to maintain the open, collaborative, and meritocratic culture of the project. This includes a role in the C++ community, including participation in conferences like C++ Now, and other events.
+Boost operates as part of the C++ Alliance, a non-profit organization that ensures proper legal and financial management of Boost resources. The committee collaborates with the Alliance on these matters. If funding or sponsorships are received (for example, for infrastructure or events), the committee helps ensure these are used appropriately.
+Currently, to contact the committee either post on the #boost Slack channel, or email the Boost Developers Mailing List, clearly mentioning the "Fiscal Sponsorship Committee" as the intended recipient.
Writing release notes is crucial for informing users about the changes made in the latest version of your library.
+Two versions of your release notes need to be prepared: one for your library’s own documentation, one for a Boost history that covers all libraries added or updated in a release.
+For the former, the Boost community does not have a strict format for your libraries' version of release notes (and some libraries refer to release notes as a change log or history). Follow the Checklist.
+For the latter, you will need to update a Boost history file with a succinct version of your completed release notes. These files do have a specified format, described in Update Boost History.
+When writing release notes for your library remember that the main goal is to effectively communicate the changes to the end-users. It’s always a good practice to put yourself in the mind set of your developers, and think about what information they would need and appreciate. Be brief, but not so brief that only a seasoned user of your library will understand the context.
+If this release addresses particular topics, provide a brief overview of the main changes, enhancements, or the theme of this release if there’s one.
+Before detailing other updates, immediately inform users of any backward-incompatible changes. This is crucial as these changes can directly impact existing code. The process for breaking changes is covered in detail in Version Control.
+If you have added additional parameters (or changed a return value, or similar API change) to a function that are not breaking changes, then list them separately. If an API change might break an existing application, then ensure to list it under [_breaking_changes].
+Describe any new features or major enhancements. For complex features, consider adding a brief example or pointing to updated documentation. If the feature is referenced in an Issue, then consider adding a link to that issue.
+List the fixed bugs, ideally with a brief description. If you’re using a bug tracking system (like GitHub issues), provide links to the bug reports described in the issues. Include the identifier or number of the bug if available.
+Mention any functions, classes, or features that are deprecated and might be removed in future releases.
+Detail any optimizations or performance-related changes, ideally with metrics or benchmarks if they are significant.
+Highlight and link to any significant updates in documentation, new examples, or tutorials.
+Credit contributors or those who reported crucial bugs. Recognize any person or organization that played a special role in this release.
+Use clear and concise language. Experience has shown that short release notes are read, long files much less so. Do not add into release notes extensive text that should be in your main library documentation.
+Be consistent in the formatting. If you’re using bullet points for one section, maintain that for others.
+Use headers and sub-headers to clearly differentiate sections.
+Provide links to:
+Your main library documentation, if there is updated text, examples or tutorials
+Relevant issues, discussions or threads
+Various macros are available to library authors and maintainers to help keep release notes consistent between libraries, and easier to both write and read. Absolute links can be added to release notes, if necessary, though use of the following macros is encouraged and recommended.
+| Macro | +Description | +Examples | +
|---|---|---|
|
+Shows the library title in a consistent format, and links to the library on Git. |
+
|
+
|
+The specified "title" becomes a link to the file. |
+
|
+
|
+Provides a link to a GitHub PR for the specified library. |
+
|
+
|
+Provides a link to the GitHub Issue for the specified library. |
+
|
+
Checking out the release notes from fellow library authors and maintainers is good practice:
+When you have completed the library release notes, add the required information to the [section New Libraries] or [section Updated Libraries] of the Boostorg History. Copy the formatting of the examples below, which for reference is Quickbook format.
The examples below come from the boost_1_83_0.qbk file:
+[section New Libraries]
+
+[/ Example:
+* [phrase library..[@/libs/accumulators/ Accumulators]:] Framework for
+ incremental calculation, and collection of statistical accumulators, from Eric Niebler.
+]
+
+* [phrase library..[@/libs/compat/ Compat]:] A repository of C++11 implementations of
+ standard components added in later C++ standards, from Peter Dimov and contributors.
+
+[endsect]
+
+[section Updated Libraries]
+
+[/ Example:
+* [phrase library..[@/libs/interprocess/ Interprocess]:]
+ * Added anonymous shared memory for UNIX systems.
+ * Move semantics for shared objects ([ticket 1932]).
+ * Conform to `std::pointer_traits` requirements ([github_pr interprocess 32])
+ * Fixed `named_condition_any` fails to notify ([github interprocess 62])
+]
+
+* [phrase library..[@/libs/any/ Any]:]
+ * New `boost::anys::unique_any` C++11 class - an alternative to
+ `boost::any` (or to `std::any`) that does not require copy
+ or move construction from the held type.
+ * Ported documentation to Quickbook, Doxygen comments now used in code.
+
+ ....
+
+ [endsect]
+The macros used in Quickbook format for the history sections differ in detail from the macros available for the Release Notes.
+Experience has shown the following layout works well when writing release notes and having your users actually read them.
+The first three (or less) bullet points should briefly explain the most impactful changes to your library. If new functionality has been added, this is the place for it.
+The next bullet points, at most seven, but perhaps five is better, contain concise changes that affect the use of your library. This will include functions that have been improved, functions removed or deprecated, and other pertinent changes say to an API’s parameters, errors, return values and similar.
+If documentation has been updated, comments cleaned up, examples improved or removed, a single bullet point describing this has occurred is enough.
+The rule of thumb here is to keep the number of bullet points to ten or less, as adding more is a good way of ensuring your users will skip or skim your release notes - a classic example of information "hiding in plain sight"!
+The Boost libraries are released publicly three times per year:
+Second week of April
+Second week of August
+Second week of December
+Each release will contain updates to existing libraries, and some releases will contain new libraries. The release is built from the master branch of Boost’s GitHub site: https://github.com/boostorg/boost.
+The release managers set the release timeline, which involves planning deadlines for library submissions, reviews, and incorporation into the repository.
+Once the release timeline is set, library maintainers work to prepare their libraries for the release. This involves updating documentation, fixing bugs, and addressing any compatibility issues. Additionally, library maintainers ensure that their libraries pass the Boost regression tests, which help identify potential problems before the release.
+Boost regression testing is an essential part of the release process, ensuring the quality and compatibility of the libraries. The Boost community maintains a set of regression tests, which are run on a diverse range of platforms and compilers. The tests are performed by volunteers who contribute their computing resources to the project.
+The results of the regression tests are published on the Boost website, providing library maintainers and users with up-to-date information about the library’s compatibility and performance. Library maintainers use this information to identify and fix any issues before the release.
+There is a strict countdown to a public release.
+The master branch is closed to all check ins, except bug fixes and quality checks.
+The master branch is closed to major code changes. There can be no rewrites of code, even to fix issues.
+The master branch is closed to all check ins, except with permission from the release committee.
+The master branch is closed. Beta release candidates are built.
+The Beta release is published to the Boost site. The master branch is opened to small bug fixes and documentation changes. Permission from the release committee is required for larger changes.
+The master branch is closed to all check ins, except high-priority fixes.
+The master branch is closed. Release candidates are built.
+The release candidate is published to the Boost site. The master branch is opened for all check ins.
+If issues are found with a release candidate that are important enough to address quickly (that is, before the next full public release), then a point release will be built when fixes are available and tested. This will not typically result in the master branch being closed to other check ins.
+For details of the Release Process that are pertinent to users, refer to the +User Guide Release Process.
+The sources for each library, or possibly a number of related libraries, is contained in a sub-module of the Boost super-project.
+Each sub-module must contain a file which describes the libraries that it contains. This is located at meta/libraries.json.
If the sub-module contains a single library, libraries.json should contain a single object, for example:
{
+ "key": "unordered",
+ "name": "Unordered",
+ "authors": [
+ "Daniel James"
+ ],
+ "maintainers": [
+ "Daniel James <dnljms -at- gmail.com>"
+ ],
+ "description": "Unordered associative containers.",
+ "std": [
+ "tr1"
+ ],
+ "category": [
+ "Containers"
+ ],
+ "cxxstd": "03"
+}
+Or view: unordered/meta +/libraries.json.
+If the sub-module contains multiple libraries, libraries.json should contain a list of objects, for example:
[
+ {
+ "key": "algorithm",
+ "name": "Algorithm",
+ "authors": [
+ "Marshall Clow"
+ ],
+ "description": "A collection of useful generic algorithms.",
+ "category": [
+ "Algorithms"
+ ],
+ "maintainers": [
+ "Marshall Clow <marshall -at- idio.com>"
+ ],
+ "cxxstd": "03"
+ },
+ {
+ "key": "algorithm/minmax",
+ "name": "Min-Max",
+ "authors": [
+ "Hervé Brönnimann"
+ ],
+ "description": "Standard library extensions for simultaneous min/max and min/max element computations.",
+ "documentation": "minmax/",
+ "category": [
+ "Algorithms"
+ ],
+ "maintainers": [
+ "Marshall Clow <marshall -at- idio.com>"
+ ],
+ "cxxstd": "03"
+ },
+ {
+ "key": "algorithm/string",
+ "name": "String Algo",
+ "authors": [
+ "Pavol Droba"
+ ],
+ "description": "String algorithms library.",
+ "documentation": "string/",
+ "category": [
+ "Algorithms",
+ "String"
+ ],
+ "maintainers": [
+ "Marshall Clow <marshall -at- idio.com>"
+ ],
+ "cxxstd": "03"
+ }
+]
+Or view: algorithm/meta/libraries.json.
+All meta files should contain key, name, description, authors, maintainers, and category. The other fields are optional.
keyThis is a unique identifier for the library, typically the path to it from the libs directory.
boost-versionThe Boost version where the library was first added, such as: "boost-version": "1.33.0".
statusUsed for libraries with special statuses, currently can have the value deprecated for deprecated libraries, and hidden for libraries which shouldn’t be displayed to the user. Hidden libraries include detail and winapi, both of which are hidden because they are used as components of other libraries, and not intended as stand-alone libraries themselves.
The library coroutine is marked as "deprecated", though this does not mean full deprecation as this library is part of the Boost super-project, and will be tested each time the super-project is tested.
+maintainersOne, or more, strings containing both the names and, usually, email addresses of the maintainers.
+categoryA list of one or more of the Categories that the library belongs to.
+cxxstdThe minimum C++ standard compilation level at which all, or the large majority, of the functionality in the library is usable. The possible values are:
+98 = C++98
+03 = C++03
+11 = C++11
+14 = C++14
+17 = C++17
+20 = C++20
+23 = C++23
+The level only indicates the minimum level, which means that the functionality of the library can be used when compiling at that level or at a higher level. There may be some functionality in the library which will need a higher C++ standard compilation level than is indicated by this value, but the information about that specific functionality will be provided for the end-user within the documentation for that library. If a library does not have this field it indicates that the end-user will have to read the library documentation to understand what C++ standard compilation level is needed to use the library.
+Note that 11 and 14 are commonly set minimum levels.
A library can be in one or more categories. The string is not case-sensitive. In some cases, the string used to describe the category on our website is slightly more descriptive than the string used in the category field.
| Metadata Category | +Website Category | +
|---|---|
|
+Algorithms |
+
|
+Concurrent Programming |
+
|
+Containers |
+
|
+Correctness and testing |
+
|
+Data structures |
+
|
+Domain Specific |
+
|
+Language Features Emulation |
+
|
+Error handling and recovery |
+
|
+Function objects and higher-order programming |
+
|
+Generic Programming |
+
|
+Image processing |
+
|
+Input/Output |
+
|
+Inter-language support |
+
|
+Iterators |
+
|
+Math and numerics |
+
|
+Memory |
+
|
+Template Metaprogramming |
+
|
+Miscellaneous |
+
|
+Parsing |
+
|
+Patterns and Idioms |
+
|
+Preprocessor Metaprogramming |
+
|
+Programming Interfaces |
+
|
+State Machines |
+
|
+String and text processing |
+
|
+System |
+
|
+Broken compiler workarounds |
+
This section describes the requirements and guidelines for the content of a library submitted to Boost.
+Boost developers constitute a wide array of people throughout much of the world. Over the years much work has gone into the quantity and quality of the C++ libraries and tools that make up Boost. There are many ways to become part of the Boost developer community, all starting with getting involved in the development discussion. But if you are looking for an + easier place to start than developing a library, consider volunteering as a tester.
+As a first step to developing a library, read the Requirements Overview for Boost.
+To avoid a proposed library being rejected, it must meet these requirements:
+The license must meet the License Requirements. Restricted licenses like the GPL and LGPL are not acceptable.
+The Ownership (copyright) must be clear.
+The library should be useful to a general audience.
+The library must meet the Portability Requirements.
+The library should preferably meet the Organization Requirements. But is only required to meet them after acceptance.
+The library must come reasonably close to meeting the Design Best Practices.
+The library must be well documented, refer to Documentation Guidelines.
+The author must be willing to participate in discussions on the mailing list, and to refine the library accordingly.
+There’s no requirement that an author read the mailing list for a time before making a submission. It has been noted, however, that submissions which begin "I just started to read this mailing list …" seem to fail, often embarrassingly.
+Before proceeding, are you sure you own the library you are thinking of submitting? In the book How to Copyright Software" by MJ Salone, Nolo Press, 1990 says:
+"Doing work on your own time that is very similar to programming you do for your employer on company time can raise nasty legal problems. In this situation, it’s best to get a written release from your employer in advance."
+Place a copyright notice in all the important files you submit. Boost won’t accept libraries without clear copyright information.
+The preferred way to meet the license requirements is to use The Boost Software License. See license information. If for any reason you do not intend to use the Boost Software License, please discuss the issues on the Boost developers mailing list first.
+The license requirements:
+Must be simple to read and understand.
+Must grant permission without fee to copy, use and modify the software for any use (commercial and non-commercial).
+Must require that the license appear on all copies of the software source code.
+Must not require that the license appear with executables or other binary uses of the library.
+Must not require that the source code be available for execution or other binary uses of the library.
+May restrict the use of the name and description of the library to the standard version found on the Boost web site.
+The quality of the Boost libraries is not just about the APIs and code design, but also about presenting a consistent view to users of the libraries as a whole. Upon acceptance libraries should adhere to this directory and file structure.
+Referring to accepted libraries is a great way to educate yourself on the library directory structure. In the examples given below, Boost.Atomic, Boost.Json, and Boost.Serialization are examples of libraries that requires building, and Boost.Asio, Boost.Geometry, and Boost.Hana are examples of libraries that are header-only.
+The following directories should appear, along with others specific to the library, as top-level sub-directories for the library.
+| Sub-Directory | +Contents | +Required | +Examples | +
|---|---|---|---|
build |
+Library build files such as a Jamfile, IDE projects, Makefiles, Cmake files, etc. |
+Required if the library has sources to build. |
+
+
|
+
config |
+Files used for build-time configuration checks. This directory may contain source files and build system scripts to be used when building the library, tests or examples to check if the target system satisfies certain conditions. For example, a check may test if the compiler implements a certain feature, or if the target system supports a certain API. |
+Optional. |
+
+
|
+
doc |
+Sources to build with and built documentation for the library. If the library needs to build documentation from non-HTML files this location must be build-able with Boost Build. |
+Required for all libraries. |
+
+
|
+
doc/html |
+Documentation (HTML) files. |
+Required for all libraries with pregenerated documentation. And generated documentation must be generated here. |
+
+
|
+
example |
+Sample program files. |
+Required if library has sample files. Which is highly recommended. |
+
+
|
+
include/boost/<library-name> |
+Header files for the library. |
+Required for all libraries. |
+
+
+ |
+
meta |
+Meta-data about the library. |
+Required for all libraries. |
+
+
|
+
src |
+Source files which must be compiled to build the library. |
+Required if the library has source files to build. |
+
+
|
+
test |
+Regression or other test programs or scripts. This is the only location considered for automated testing. If you have additional locations that need to be part of automated testing it is required that this location refer to the additional test locations. |
+Required for all libraries. |
+
+
|
+
test/cmake_test |
+CMake sub-folders can be named for the type of Continuous Integration tests they contain ( |
+Not required, though most new libraries include these tests. |
+
+
+ |
+
tools |
+Tools used, or offered, by the library. The structure within this is up to the library, but it’s recommended to use similar structure as a regular Boost library or tool. |
+Required for libraries that have run-able tools. |
+
+
|
+
To reference the existing master repo, refer to boost/libs/.
+For a list of libraries that require building, refer to Required Compiled Binaries.
+There are some individual files that are also required, or are optional and recognized by the build system.
+| File | +Contents | +Required | +Examples | +
|---|---|---|---|
build.jam |
+Top level jamfile. |
+Required if the library has sources to build. |
+
+
|
+
CMakeLists.txt |
+Refer to CMake for Boost Developers. |
+Required for all libraries. |
+
+
|
+
index.html |
+Redirection to HTML documentation. Refer to Design Guide/Redirection for a template for this file. |
+Required for all libraries. |
+
+
|
+
meta/libraries.json |
+A JSON file containing information about the library, which is used to generate website and documentation for the Boost Libraries collection. Refer to Library Metadata. |
+Required for all libraries. |
+
+
|
+
meta/explicit-failures-markup.xml |
+XML file describing expected test failures, used to generate the test report. |
+Optional |
+
+
+ |
+
After a library is accepted as part of the Boost Libraries it is required that it integrate properly into the development, testing, documentation, and release processes. This integration increases the eventual quality of all the libraries and is integral to the expected quality of the whole of the Boost C++ Libraries from users. In addition to the organization requirements above the following integration is required:
+The library needs to provide a Boost Build project that the user, and the top level Boost project, can use to build the library if it has sources to build. The Jamfile for the source build needs to minimally declare the project, the library targets, and register the targets for installation. For example:
+project boost/my_lib ;
+
+lib boost_my_lib : a.cpp ;
+
+boost-install boost_my_lib ;
+The library needs to provide a Boost Build project that the user, and the root Boost test script, can use to build and run the tests for the library. The testing build project must reside in the project-root/test directory and must be build-able from this or another directory, for example, b2 libs/library/test from the Boost root must work.
An example test/Jamfile is given below:
+import testing ;
+
+run default_constructor.cpp ;
+run copy_constructor.cpp ;
+compile nested_value_type.cpp ;
+compile-fail invalid_conversion_1.cpp ;
+This is the only location considered for testing by the top level testing script. If you want to test additional locations you must declare such that they are built as dependencies or by using build-project.
+If the library requires a level of C++ conformance that precludes certain compilers or configurations from working, it’s recommended to declare these requirements in the test Jamfile. This ensures that unnecessary tests aren’t run, to conserve test resources, as given in the example below:
+import testing ;
+import ../../config/checks/config : requires ;
+
+project : requirements [ requires cxx11_variadic_templates cxx11_template_aliases ] ;
+
+run cpp11_test.cpp ;
+For more information, see the documentation for Boost.Config.
+The library needs to provide a Boost Build project for building the documentation for the library. The project-root/doc project is the only location referred to by the top level documentation build scripts and the release building scripts. The documentation build project must have the following two features:
+Define a boostrelease target. This target should likely be an alias that looks roughly like:
alias boostrelease : my_boostbook_target
+ : : : <implicit-dependency>my_boostbook_target ;
+But if your project doesn’t integrate into the global documentation book you can use an empty alias like:
+alias boostrelease ;
+The project must default to building standalone documentation if it has any. The release scripts build this default so as to guarantee all projects have up to date documentation.
+Integrated documentation, using the boostdoc target (instead of the boostrelease target) is now considered legacy, and should be avoided for new library documentation.
A library’s interface must be portable and not restricted to a particular compiler or operating system.
+A library’s implementation must if possible be portable and not restricted to a particular compiler or operating system. If a portable implementation is not possible, non-portable constructions are acceptable if reasonably easy to port to other environments, and implementations are provided for at least two popular operating systems (such as UNIX and Windows).
+A library runs on at least two C++ compilers implementing the latest ISO standard.
+There is no requirement that a library run on C++ compilers which do not conform to the ISO standard.
+There is no requirement that a library run on any particular C++ compiler. Boost contributors often try to ensure their libraries work with popular compilers. The boost/config.hpp configuration header is the preferred mechanism for working around compiler deficiencies.
Since there is no absolute way to prove portability, many boost submissions demonstrate practical portability by compiling and executing correctly with two different C++ compilers, often under different operating systems. Otherwise reviewers may disbelieve that porting is in fact practical.
+This document outlines the style guidelines for the site-docs. Adhering to these guidelines will help ensure consistency and cohesion across all pages of the website.
+Boost Mustard
+only use for one or two key top level headings
+never on inactive UI controls
+OK to use for hover state
+never use large areas of this color in illustrations
+never used as an endpoint of a color blend
+The word "Boost" should occur no more than once per page, not counting logo, but logo should not be visible in two places.
+user generated content excluded
+The word "C++" should occur no more than once on a page
+user-generated content excluded
+We want to minimize the number of times we use the word "Boost" on a page. Since the logo in the header has the word "Boost" in it, sometimes we will have no choice but to use the word at least twice on a page. But we should try to minimize the repetitions.
+Per-library links (e.g. GitHub or docs) should use a schemeless URL:
+
+User avatars should never be displayed squared, always with rounded corners
+Boost libraries reside in subdirectories under the libs directory. For example, the contents of the Boost.Filesystem library are in libs/filesystem. This includes the build scripts (in libs/filesystem/build), the source files (in libs/filesystem/src), the tests (in libs/filesystem/test), the documentation (in libs/filesystem/doc), and so on.
In the past, when Boost used SVN as its version control system, the header files were an exception. The header files of all libraries resided in the boost subdirectory, and it wasn’t possible to accurately determine which header belonged to which library.
When Boost moved to Git for version control, header files were moved to their corresponding libraries, into an include subdirectory. The header files of Boost.Filesystem are now in libs/filesystem/include.
For compatibility, boost is now a "virtual" directory, containing links to the headers. It’s maintained automatically by B2. The command b2 creates or recreates the contents of the boost directory.
This structure allows us to determine that, when faced with an #include <boost/filesystem.hpp> directive, that this header is part of Boost.Filesystem, and that therefore, the current library being scanned depends on Boost.Filesystem.
Unfortunately, Boost releases do not have this structure. For backward compatibility, they have an old-style boost directory containing all header files, whereas the per-library include subdirectories are missing.
+To work within the Super-project, you will have to clone the Boost Git repository. To do that, execute the following command:
+git clone https://github.com/boostorg/boost.git boost
+This will download the Super-project (the master project, without any libraries) and place it into the subdirectory boost of the current directory. To override the directory name, pass it as a second argument instead of boost:
git clone https://github.com/boostorg/boost.git mydir
+You can now cd into the newly created directory with cd mydir. This directory is called the "Boost root". All of the commands below assume that it is the current directory.
The above git clone commands download the default branch of the Boost Git repository, which is master. This is the current stable version of Boost. To verify this, issue the command git status from the Boost root. This will output
# On branch master
+nothing to commit, working directory clean
+To download a specific release instead, such as 1.58.0, issue the following command after git clone, from the Boost root:
git checkout boost-1.58.0
+git status will now say"
# HEAD detached at boost-1.58.0
+nothing to commit, working directory clean
+Then, download all the libraries:
+git submodule update --init
+This step will take a while.
+If all goes well, you will now have the complete contents of Boost’s latest master branch (if you didn’t checkout a specific release by name) or the corresponding Boost release (if you did).
You can switch between the master branch, the develop branch, and a release, by issuing the following commands:
For the master branch:
git checkout master
+git pull
+git submodule update --init
+git pull updates your local copy of the master branch from the server, in case it has changed since your initial checkout.
For the develop branch:
git checkout develop
+git pull
+git submodule update --init
+For the boost-1.58.0 release:
+git checkout boost-1.58.0
+git submodule update --init
+For the boost-1.57.0 release:
+git checkout boost-1.57.0
+git submodule update --init
+While the initial git submodule update is quite slow, as it needs to download all the libraries, the subsequent invocations are a lot faster.
Also note that if a new Boost library (libs/convert, for example) is present in, say, master, and you have it checked out, when you later switch to boost-1.58.0, where this library doesn’t exist, Git will not delete libs/convert. In this case, git status will output
# HEAD detached at boost-1.58.0
+# Untracked files:
+# (use "git add <file>..." to include in what will be committed)
+#
+# libs/convert/
+nothing added to commit but untracked files present (use "git add" to track)
+and you will have to remove libs/convert by hand.
Now, you have successfully installed the Super-project.
+This page briefly sketches the mechanics of maintaining a Boost library using Git. The intended audience is developers getting started with the maintenance of an existing Super-project library.
+This page is intended to get you started only; it does not provide in-depth coverage. See links below for that.
+Illustrations of how to perform various actions are given using the Git command line client.
+Library maintenance occurs in the context of how Boost repositories are organized. Study the Super-Project Layout before continuing, since a Boost developer needs to be familiar with how Boost organizes its repositories.
+The examples given on this page follow Boost recommended workflow practices, but keep workflow discussion simple for this introduction. To better understand workflow recommendations and rationale before continuing, read Super-Project Library Workflow.
+A recent release of the Git command line client installed.
+A GitHub account.
+A C++ compiler and development environment installed and working smoothly.
+The Super-project installed, as described in Getting Started with the Super-Project.
+b2 in your path. That allows the command line examples given here to work as shown on both Windows and POSIX-like systems.
The preferred environment library maintenance is to checkout the library’s develop branch, or some other development branch, while other Boost libraries are as defined by the Super-project master branch. This causes local tests of your library to run against master for other Boost libraries at the point in time referenced by the Super-project.
This is a more realistic test environment in than testing against the possibly unstable develop branch of other Boost libraries or against the master branch of other libraries at a different point in time than that referenced by the Super-project. Robert Ramey has advocated this approach to testing for years, and the Super-project makes this approach relatively easy and fast.
The following examples assume you have installed the Super-project to a folder called boost-root.
cd boost-root
+git checkout master
+git pull
+git submodule update
+The git submodule update` will fail if it would result in uncommitted changes being overwritten.
The git submodule update may switch submodules back to the detached state, depending on the working copy’s exact situation.
See Effects of git submodule update for details.
To get more information about a submodule:
+cd boost-root
+git submodule summary
+If for some reason you wanted to test against the current head of master for all libraries, disregarding the Super-project state, the git submodule update command would be changed to:
git submodule foreach --recursive "git checkout master; git pull"
+If modules are added, these should be added to your project too, which is not done by the commands above. Run:
+git submodule update --init
+Note that if you use the --init option, the already-initialized submodules will not be updated. You might have to run the command without --init afterwards.
You can see what branch mylib is currently on like this:
cd boost-root/libs/mylib
+git branch
+Then if you need to change the branch to a development branch such as develop, do this:
cd boost-root/libs/mylib
+git checkout develop
+You only have to do that once; your local repo working copy will sit on the branch until it is explicitly changed by a command you give.
+Of course, you don’t have to change the directory before every command, and from here on this tutorial will assume the directory has not been changed since the prior example.
+If there is any possibility the branch head content in the public upstream repo has changed, you also will want to update content:
+cd boost-root/libs/mylib
+git pull
+From this point on, it is assumed you have already done a cd boost-root/libs/mylib.
Unless you are 100% sure of the state of your library’s regression tests, it is a good idea to run the regression tests before making any changes to the library:
+pushd test
+b2
+popd
+Before making changes, it is a good idea to check status. Here is what that looks like on Windows; the message you get may vary somewhat:
+>git status
+# On branch develop
+nothing to commit, working directory clean
+developFor simple bugs, particularly in projects with a single maintainer, it is common practice to fix bugs directly in the develop branch. Creating a test case with your favorite editor, testing the test case, fixing the bug, testing the fix, and then iterating if necessary is no different than with any programming environment.
Once the fix is complete, you then commit the fix locally and push from your local repo up to your public boostorg repo on GitHub. These same commands would be used for any Git project, so hopefully you are already somewhat familiar with them:
cd boost-root/libs/mylib
+git commit -a -m "my bug fix"
+git push
+There are some significant disadvantages to this simple approach:
+The fix is now made to develop but you must remember to merge it to a release branch or directly to master. It is very easy to forget to do that merge, particularly if this is a mature library you are not working with very often.
Users who need the bug fix right away are forced to jump through hoops to retrieve the fix from develop.
+Putting out a point release solves both of those problems. Read on…
Fixing a bug directly on the develop branch is fine, if that’s the library’s policy, but if the bug is messy, multiple maintainers are involved, interruptions are expected, or other complexities are present, then it is better practice to work on the bug in a separate branch. And doing that on a hot-fix branch solves the problems mentioned at the end of the prior section.
The operational distinction between a bug-fix branch and a hot-fix branch is that a bug-fix branch is branched from develop and then at completion merged back to develop, while a hot-fix branch is branched from master and then at completion is merged to both master and develop. With either approach, the branch is deleted after it has been merged.
git checkout master
+git checkout -b hotfix/complex-boo-boo
+This creates the branch hotfix/complex-boo-boo, and switches to it. Incidentally, hotfix/ is part of the name, not a directory specifier. The new branch is based on branch master because the working copy was on branch master at the time of the branch.
Since the bug is complex, it may take some time to fix and may go through several cycles of fixes, tests, and commits.
+Once the bug is fixed and a final commit is done, then it is time to merge the hotfix/complex-boo-boo branch into master and develop:
git checkout master
+git merge hotfix/complex-boo-boo
+git push
+git checkout develop
+git merge hotfix/complex-boo-boo
+git push
+git branch -d hotfix/complex-boo-boo
+Developers are encouraged to create a (possibly private) branch to work on new features, even simple ones, since development of new features on the develop branch might leave it unstable for longer that expected. Using the Git Flow convention, the branch will be named feature/add-checksum-option.
git checkout develop
+git checkout -b feature/add-checksum-option
+When you create the branch, or perhaps later, you may decide the branch should be public (i.e. be present in the library’s public boostorg repo) so that you can share the branch with others or just to back it up. If so, set that up by running:
git push --set-upstream origin feature/add-checksum-option
+Whether or not --set-upstream origin bugfix/complex-boo-boo is actually needed depends on the branch.autosetupmerge configuration variable that isn’t discussed here. If you don’t supply --set-upstream origin bugfix/complex-boo-boo on your first push and it turns out to be needed, you will get an error message explaining that.
The usual cycle of coding, testing, commits, and pushes (if public) then begins. If other work needs to be done, a stash or commit may be done to save work-in-progress, and the working copy switched to another branch for awhile. If significant fixes or other enhancements have been made to develop over time, it may be useful to merge develop into the feature branch so that the eventual merge back to develop has less conflicts. Here is how to merge from develop to feature/add-checksum-option:
git checkout feature/add-checksum-option
+git merge develop
+Small, simple libraries and simple releases just merge the development branch, such as develop, into master, and test like this:
git checkout master
+git merge --no-ff develop
+pushd test
+b2
+popd
+If there are any test failures, correct the problem, retest, and commit the fixes before proceeding with the release.
+If there are no test failures, tag for release and declare victory:
+git push # push merge results
+git tag -a -m "tag for release" mylib-2014-06-02
+git push origin mylib-2014-06-02 # push specific tag to avoid pushing all local tags
+See Super-Project Library Workflow for release tag naming conventions.
+Large, complex libraries, particularly those with multiple developers working in parallel, need to use a release procedure that scales up better than the lightweight procedure. The Git Flow approach is recommended. Find out more at Super-Project Library Workflow and be sure to study the examples given in Vincent Driessen’s original blog post.
+git submodule updateThe following table, based on actual tests run using git version 1.8.4.msysgit.0, shows the effects on a submodule of:
cd boost-root
+git checkout master
+git pull
+git submodule update
+| Submodule Branch | +Submodule Contents | +Submodule relative to Super-project | +Effects on Submodule | +
|---|---|---|---|
Detached |
+Unmodified |
+Up-to-date |
+None |
+
Detached |
+Unmodified |
+Behind |
+
|
+
Detached |
+Uncommitted change |
+Up-to-date |
+error: Your local changes to the following files would be overwritten by checkout: … Please, commit your changes or stash them before you can switch branches. Aborting Unable to checkout (SHA…) in submodule path '…' |
+
Detached |
+Uncommitted change |
+Behind |
+error: Your local changes to the following files would be overwritten by checkout: … Please, commit your changes or stash them before you can switch branches. Aborting Unable to checkout (SHA …) in submodule path '…' |
+
Detached |
+Committed change |
+Up-to-date |
+
|
+
Detached |
+Committed change |
+Behind |
+
|
+
|
+Unmodified |
+Up-to-date |
+
|
+
|
+Unmodified |
+Behind |
+
|
+
|
+Uncommitted change |
+Up-to-date |
+error: Your local changes to the following files would be overwritten by checkout: … Please, commit your changes or stash them before you can switch branches. Aborting Unable to checkout (SHA…) in submodule path '…' |
+
|
+Uncommitted change |
+Behind |
+error: Your local changes to the following files would be overwritten by checkout: … Please, commit your changes or stash them before you can switch branches. Aborting Unable to checkout (SHA…) in submodule path '…' |
+
|
+Committed change |
+Up-to-date |
+
|
+
|
+Committed change |
+Behind |
+
|
+
|
+Unmodified |
+Up-to-date |
+
|
+
|
+Unmodified |
+Behind |
+
|
+
|
+Uncommitted change |
+Up-to-date |
+error: Your local changes to the following files would be overwritten by checkout: … Please, commit your changes or stash them before you can switch branches. Aborting Unable to checkout (SHA…) in submodule path '…' |
+
|
+Uncommitted change |
+Behind |
+error: Your local changes to the following files would be overwritten by checkout: … Please, commit your changes or stash them before you can switch branches. Aborting Unable to checkout (SHA…) in submodule path '…' |
+
|
+Committed change |
+Up-to-date |
+
|
+
|
+Committed change |
+Behind |
+
|
+
Be aware that git checkout --detach; and git pull are used as shorthand to describe the apparent effects. The actual git implementation may differ.
Beman Dawes created and maintained this page. The content has been revised many times based on comments and list postings from Andrey Semashev, John Maddock, Daniel James, Michael Cox, Pete Dimov, Edward Diener, Bjørn Roald, Klaim - Joël Lamotte, Peter A. Bigot, and others.
+Workflow is the term used to describe the steps a Boost library developer follows to create, maintain, and release a library. The workflow presented here is designed for Boost’s individual libraries. The workflow for the Super-project may differ.
+The workflow model Boost recommends is called Git Flow. It was introduced as a simple blog posting by Vincent Driessen on January 5th, 2010, that went viral and has become a widely used software engineering practice.
+This workflow has arguably become so successful because it scales well from very small to very large projects, and that’s one of the reasons it is recommended (but not required) for Boost libraries.
+An unusually simple, single developer library would have only the permanent develop and master branches that are required for all Boost libraries.
A more typical library would occasionally add temporary feature branches, either private or public. Feature branch names follow the feature/x model, where x names the specific feature being developed.
A larger library, particularly if it has multiple developers, would always have some active public feature branches, and at least occasionally employ release staging branches and hotfix branches. Individual developers would often use private branches.
+Many Git Flow model diagrams are available online - print one out and hang it on your wall!
+For those who use Git from the command line, git-flow command line tools are available to automate common operations. See git-flow wiki for more information, including installation instructions for various platforms.
+All Boost libraries are required to have two branches:
+master is always the library’s latest release. It should always be stable.
develop is always the main development branch. Whether it is always stable or not is up to the individual library.
These branches are required so that Boost release management and other scripts know the branch names.
+While Boost libraries are not required to use the following branches, these naming conventions are recommended if the branches are present.
+feature/descriptive-name for feature branches. For example, feature/add-roman-numeral-math.
bugfix/descriptive-name for problem fix branches of develop that will be merged back to develop after the fix. For example, bugfix/ticket-1234-error-msg-not-clear.
hotfix/descriptive-name for problem fix branches of master that will be merged back to master and also to develop after the fix. For example, hotfix/ticket-5678-crash-if-result-negative.
release/n.n.n for release staging branches. For example, release/1.56.2.
Individual Boost libraries are free to choose their own release numbers, and these library release numbers are normally unrelated to the release numbers for the Super-project. The recommended release naming convention is the traditional three unsigned integers separated by periods (for example: 1.2.3) where:
+The first integer is the major version number, with each major version, with 0 being used for initial development and 1 for the first production-usable version. A change in version number is recommended when there are breaking changes.
+The middle integer is the release number, reset to 0 with each version update and otherwise increasing monotonically.
+The last integer is the patch level, reset to 0 with each revision and otherwise increasing monotonically. A patch level greater than 1 indicates a so-called point releases, normally containing bug fixes but not new features.
+A release tag is usually the library name, a hyphen, the release number, and then possibly "-beta#" or "-rc#" if applicable. Thus the second release candidate for Boost.Timer release 1.2.3 would be "timer-1.2.3-rc2".
+Peter A. Bigot suggested library name prefixes for tags to avoid tag namespace pollution. Without the prefix, local tags could be overwritten.
+Libraries choose their own release numbers. A simple library that does not require a complex release numbering convention might just use the date, such as "system-2014-06-02".
+Git Flow scales well from very small to very large projects. The same overall workflow model serves the whole spectrum of Boost libraries, even though the details differ. +Git Flow has become widely known and widely used. Boost doesn’t have to pioneer a new workflow.
+The usual culture with Git is to delete feature branches as soon as they are merged to some other branch, and is followed by Git Flow. This approach is also recommended for Boost developers. After all, the merged-to branch keeps the commit history alive and there’s no longer any need to keep the old label around. If you delete a branch without merging it, of course, any content and commit history exclusive to that branch is lost.
+This section covers the organization of the Boost C++ Libraries into separate, independently developable modules, each residing in its own repository, to facilitate development, integration, and selective inclusion of individual libraries.
+This section provides an overview of how the Super-project is organized.
+The Boost Super-project consists of separate projects for each individual library. In terms of Git, the Super-project treats the individual libraries as submodules.
+All public repositories are hosted at GitHub boostorg.
+Releases of individual libraries occur asynchronously from releases of the Super-project.
+The Super-project has its own public repository within boostorg. It treats each individual library as a submodule, i.e. a link to a particular release in the library’s public GitHub repository. The Super-project is maintained by the Boost release managers, and most library developers do not have write access.
Each individual library has its own public repository within boostorg. For example, Boost.Config has a public repository here. The maintainer of a library decides who has write access to the public repository. Release managers and their helpers also have write access for administrative purposes.
As with any Git project, a library’s developers do day-to-day work using private repositories on their local machines. When they push changes from these local private repositories up to the library’s public repository is up to them, as is when the library issues releases. As usual with Git, the local repositories may have private branches that are never pushed to the public repository.
+Libraries are required to follow certain naming conventions for branches and directories, so that both humans and automatic test and management tools know where to find commonly referenced components. But beyond those requirements, each library is free to use whatever branch and directory strategies they wish.
+Boost requires all libraries have at least the two branches master and develop.
Releases for both the Super-project and individual libraries are always on branch master. master in the library’s boostorg public repo should be stable at all times, so should only be pushed to the library’s public repository when it is stable and ready to ship.
Branch develop is the main development branch. Whether or not develop is stable in the library’s public boostorg repository is up to the library maintainer.
Additional branches, if any, are up to the library maintainer. See Git Flow for Workflow.
+Your library’s directory structure conforms to Boost directory structure conventions, so both users and automatic processes can find header files, test files, build configurations, and the like. Beyond the conventions, your library’s directory structure is up to you.
+The Super-project header files are placed in an include/boost directory hierarchy within the library’s top-level directory. Here is what a very simple header-only library named simple would look like:
simple
+ include
+ boost
+ simple
+ twice.hpp
+ test
+ twice_test.cpp
+ Jamfile.v2
+ index.html
+The sub-directory hierarchy include/boost/… (where … represents the library’s directories and header files) ensures that the library is entirely self-contained within the top-level directory.
A real library would also have additional sub-directories such as doc, example, and src, as described in the directory conventions, but they are left out of simple for the sake of brevity.
Boost recommends, but does not require, the approach to library workflow that has come to be known as Git Flow. For more about how this applies to Boost libraries, see Super-Project Library Workflow.
+Refer also to Best Practices: GitHub Strategies.
+The Boost Test Matrix is an automated testing system that runs tests on Boost libraries across a wide range of platforms, compiler versions, and configurations. Its primary purpose is to ensure that the libraries work correctly under various conditions and to identify any compatibility issues.
+The Test Matrix includes tests run on different operating systems (Windows, Linux, macOS) and with various compilers (such as GCC, Clang, MSVC). This diversity helps in catching issues that might only appear in specific environments.
+For information on running regression tests locally, refer to Local Regression Tests.
+The results of library tests are published on the +Boost Regression Testing Dashboard:
+| Version | +Results | +Issues | +
|---|---|---|
Develop branch |
++ | + |
Master branch |
++ | + |
This dashboard is publicly accessible and provides detailed information about the test results for most libraries.
+If your contribution causes test failures, it’s expected that you take responsibility for fixing them. This might involve making code adjustments or collaborating with others if the issue is complex, or affects multiple parts of Boost.
+On the dashboard, you will find a matrix of test results. The columns represent different test runners (which correspond to different platforms, compiler versions, etc.), and the rows represent individual libraries.
+When you click on the Dashboard URL, you will see the test Summary.
+
+The table is large, and you will need to scroll up and down and left and right to view all relevant results. Note that some entries are underlined (even OK entries) which indicates they are links to further information.
+
+The dashboard uses different colors or labels to indicate the status of the tests (such as 'pass', 'fail', or 'unresolved'). Understanding these labels is key to interpreting the test results correctly. Note that some tests are expected to fail.
+
+By clicking on specific cells in the matrix, you can view detailed results for a particular library and test runner. This includes information about which tests passed, which failed, and often detailed logs of the test runs.
+
+For example, click on the Boost.Asio entry in the left-hand column, and you will bring up the test matrix for the Asio/Master library.
+
+Go deeper by clicking on the entries in the left-hand column, which are now individual tests, to bring up the test code. For example, click on any_completion_handler to view its source.
+
+Selecting the column headers will bring up the configuration information that is being specified before each test in the column is run.
+
+The Test Matrix uses B2 as its build system. Understanding how to write and modify Jamfiles (Boost.Build scripts) can be helpful for integrating your tests into the matrix.
+If you encounter issues with the testing infrastructure or have suggestions for improvement, engage with the Boost community (refer to Getting Involved). The testing process is continually evolving, and contributions to the testing infrastructure itself are valuable.
+The Boost project uses Continuous Integration(CI) to ensure the quality and integrity of its code. CI is the practice of merging all developers' working copies into a shared mainline several times a day. The main aim is to prevent integration issues, which can be identified and addressed as early as possible.
+Boost uses several CI services for testing on different platforms and compilers. Many libraries use two or three of the systems described here, as does the Super-project itself.
+It is a requirement for a new library submission to Boost to include an appropriate CI system. Refer to the examples for each CI system to better understand what is involved.
+Currently, Boost does not use Continuous Deployment(CD) - the release schedule is fixed and libraries are not continuously updated. This is to ensure complete testing and code reliability before any release for public use.
+A recommended process to start a new Boost library is to clone the contents of the boost-ci repo. This repo contains the basic CI framework for a new library. Clone it, then adjust and edit it appropriately.
+This repository contains scripts that enable CI with:
+For step-by-step processes, and tables of supported compilers, refer to the boost-ci README.md.
+There are a few areas of CI testing that are fairly unique to Boost, and can present difficulties to newcomers:
+You test with the Super-project, cloning it and placing your library inside a submodule. This is unique to Boost, you might expect to consume Boost as a dependency, but this is not how it works.
+You test with the develop or master branches of the Super-project Repository, not with the latest stable release.
+The CMake workflows are not trivial, nor are they the usual CMake steps.
+Refer to the sections:
+When tackling CI issues, it is expected that you will ask questions on the Boost developers mailing list.
+Boost has been incorporating GitHub Actions into its testing workflows. This is a CI/CD system platform provided directly by GitHub. It can run tests on a variety of platforms and configurations. Here’s a basic outline of how GitHub Actions works for Boost:
+GitHub Actions uses YAML files stored in a directory called .github/workflows/ at the root of the repository to define the build environment and steps. For instance, a workflow file might specify which operating systems and compilers to use, any dependencies to install, and the commands to run for building and testing the code.
When changes are pushed to the repository, or at scheduled intervals, GitHub Actions automatically initiates the actions defined in the workflow file. This might include building the project and running the test suite.
+After the workflow runs, GitHub Actions reports the result. If any step in the workflow fails, the failure is reported, which helps developers to quickly identify and address issues. The status of each workflow run is displayed on the GitHub interface, allowing anyone to quickly check the status of the project.
+Boost also uses GitHub Actions support for matrix builds (allowing Boost to run the same build steps on multiple combinations of operating systems, compilers, etc.), caching of dependencies to speed up builds, and the ability to create custom actions.
+As there are a lot of libraries under boostorg, the quota of GitHub Actions can be reached, and they can take some time to complete.
Refer to:
+URL library ci.yml - contains an extensive test matrix
+Drone is an open-source CI system built on container technology. Each step in a Drone build usually runs in its own Docker container, allowing it to use any language, tool, or service that can run inside a Docker container. This offers excellent environment consistency and isolation between steps. You can run your Drone pipelines locally on your machine for testing purposes before pushing changes to your repository.
+Drone can use a simple YAML configuration file, .drone.yml, placed at the root of your git repository. However, as pipelines grow in complexity, managing them with YAML can become challenging. This is where .drone.jsonnet and .drone.star files come in, which are associated with the Jsonnet and Starlark scripting languages respectively. They both serve the purpose of creating more dynamic, reusable, and maintainable pipeline configurations.
Jsonnet is a data templating language for app configuration. It is a superset of JSON and allows for custom scripting to dynamically produce JSON (and by extension, YAML). With .drone.jsonnet, you can create advanced pipeline configurations that aren’t feasible with static YAML files.
Starlark is a Python-inspired language that was created by Google and is used for configuring Bazel build systems. Similar to Jsonnet, it allows you to create more complex and maintainable pipeline configurations.
+Both Jsonnet and Starlark allow you to use logic like conditionals and loops in your configuration files, making them a powerful tool for complex CI/CD pipelines. If you have solid Python experience, for example, Starlark’s Python-like syntax might be a better choice. If you’re working in a JSON-heavy environment, Jsonnet might be more suitable.
+Drone uses a file named .drone.yml, .drone.jsonnet or .drone.star, and a .drone folder, at the root of the repository to define the build pipeline, including the environment, build steps, and notification settings. The environment is typically a Docker container, allowing for a high degree of flexibility and customization.
When changes are pushed to the repository, Drone automatically runs the build pipeline defined in the .drone file. This involves building the software and running a suite of tests.
After the pipeline finishes, Drone reports the results. If any step fails, developers can be notified immediately, helping to catch and fix issues early. The status of each pipeline run can also be seen on the Drone dashboard and optionally on the GitHub interface.
+Drone also includes support for matrix builds, secrets management (for handling sensitive data), and plugins (for extending functionality).
+Drone will not be available to a new library developer until the project is whitelisted. This whitelisting is not required for GitHub Actions and for the other CI systems listed in this section. Refer to Drone-ci for specific details.
+Travis CI is used for testing on Linux and macOS environments. It is a hosted, distributed continuous integration service used to build and test software projects hosted at GitHub. Here’s the overall process:
+Travis CI uses a file named .travis.yml in the root of the repository to define the build environment and the build steps. This file lists the operating systems and compilers to use, any dependencies to install, and the commands to run for building and testing the code.
Whenever changes are pushed to the repository on GitHub, Travis CI automatically initiates a build and runs the tests according to the instructions in .travis.yml. Boost libraries usually have extensive test suites, and Travis CI helps ensure that changes do not break existing functionality.
After each build, Travis CI reports the results. If the build or any tests fail, it can notify the developers so that they can fix the issue. On GitHub, the status of the latest build is shown next to each commit, so anyone can quickly see whether the current version of the code is passing all tests.
+Boost also uses Travis CI’s features for more complex workflows, using the matrix feature to test code with multiple versions of compilers or dependencies, and uses stages to structure their build pipeline into phases like build, test, and deploy.
+Although a fair number of libraries use Travis CI, it is not recommended for new libraries, due to some instances of stalling test builds.
+Appveyor is used for testing on Windows. It is a continuous integration service which can be configured to build projects for various systems, including MSVC, MinGW, and Cygwin. The overall process is:
+AppVeyor uses a file named appveyor.yml in the root of the repository to define the build environment and the steps for building and testing. This file describes which Windows images to use, any dependencies that need to be installed, and the commands to run for building and testing the code.
When changes are pushed to the GitHub repository, AppVeyor automatically initiates a build and runs the tests according to the instructions in appveyor.yml. The goal of this is to catch and fix any failures or issues that occur in the Windows environment.
After each build, AppVeyor reports the result. If the build or any tests fail, it notifies the developers, allowing them to address the issues. The status of the latest build can also be seen on GitHub, providing an at-a-glance view of the code’s health.
+AppVeyor also supports parallel testing, a build cache to speed up builds, and the ability to deploy built artifacts.
+CircleCI is a CI/CD platform that supports a wide range of languages, tools, and services, making it flexible for different testing requirements. It is less commonly used than GitHub Actions or AppVeyor, but is used by the Super-project Repository and a few libraries.
+CircleCI uses a file named config.yml stored in a directory called .circleci at the root of the repository. This file defines the build environment and steps, such as which Docker images to use, dependencies to install, and the commands for building and testing.
Upon changes being pushed to the repository or on a schedule, CircleCI automatically executes the instructions in the config.yml file. This usually includes building the project and running the test suite.
After the workflow completes, CircleCI reports the results. If any part of the workflow fails, developers are notified, which allows them to address the issues swiftly. The status of the workflow run is visible on the GitHub interface, providing at-a-glance insights into the project’s health.
+CircleCI also supports parallel testing, caching of dependencies, and matrix builds.
+Azure Pipelines is a cloud service provided by Microsoft to automatically build, test, and deploy applications. Here’s how it generally works:
+Azure Pipelines uses a file named .azure-pipelines.yml at the root of the repository to define the build environment and steps. This file specifies the operating systems and compilers to use, any dependencies to install, and the commands to run for building and testing the code.
When changes are pushed to the repository, Azure Pipelines automatically triggers a build and runs the tests according to the instructions in the azure-pipelines.yml file. This helps ensure that changes do not break existing functionality.
After each build, Azure Pipelines reports the results. If the build or any tests fail, it notifies the developers, allowing them to address the issues. The status of the latest build can also be seen on GitHub, providing an at-a-glance view of the code’s health.
+Azure Pipelines provides several additional features, such as support for parallel testing, a build cache to speed up builds, and the ability to deploy built artifacts.
+Code coverage is a measure used to describe the degree to which the source code of a program is executed when a particular test suite runs. A program with high code coverage, measured as a percentage, has had more of its source code executed during testing, which generally means it has a lower chance of containing undetected bugs compared to a program with low code coverage.
+Code coverage analysis can uncover areas of a library not covered by existing tests, so developers can write new tests to cover these blind spots. It can also highlight areas of over-testing, where the same code is tested redundantly, which could lead to slower test times without providing extra benefit.
+There are several types of code coverage, including:
+Function Coverage measures if each function or method in the codebase has been called.
+Statement Coverage measures if each statement or instruction in the codebase has been executed.
+Branch Coverage measures if each possible branch from each decision point has been executed. For example, both true and false branches from an if statement.
Condition Coverage measures if each boolean sub-expression has been evaluated to both true and false. For example, given if (A==B || C==D), there are two boolean sub-expressions to evaluate.
Path Coverage measures if all possible paths (sequence of statements, branches) have been covered. This is generally considered the most comprehensive, but also the most challenging to achieve, especially in complex programs.
+In a CI pipeline, code coverage is typically measured (by Codecov.io or Coverity Scan) after each change to the codebase. Coverage reports can be generated and reviewed to spot areas of the code that are not well-tested.
+Codecov.io is a tool that provides insights about code coverage in a software project. Code coverage is a measure of how much of your code is actually executed when your test suite runs. By highlighting parts of your code that aren’t tested, code coverage tools like Codecov help you write better tests and thus improve the quality of your software.
+Here’s an overview of how Codecov works in the context of a CI pipeline:
+Codecov integrates with GitHub. When you push code to your repository or create a pull request, it triggers your CI pipeline. Codecov uses a .codecov.yml (or codecov.yml) file to manage its settings. It’s placed at the root of your repository.
You can set minimum coverage thresholds that must be met, and configure how Codecov should behave if the thresholds aren’t met. For example, you might want Codecov to fail the status checks if the coverage drops by a certain percentage.
+You can specify files or directories that should be ignored by Codecov. And you can customize the comments that Codecov makes on your pull requests. For example, you can change the layout of the comment, or disable comments entirely.
+Codecov flags allow you to segregate coverage reports for different parts of your project or for different types of tests. Flags can be useful for projects that have multiple test suites or modules. Carryforward Flags help to handle reports for parts of the project that are not included in every CI run.
In your CI pipeline, after your tests run, you’ll generate a coverage report. The report is in XML or JSON format.
+The generated coverage report is then uploaded to Codecov. This is usually done by a command-line tool provided by Codecov, which you’ll add as a step in your CI pipeline. The tool takes care of finding the report, compressing it, and sending it to Codecov’s servers.
+Codecov processes the uploaded report and provides detailed coverage information on its dashboard. It shows overall project coverage, coverage changes over time, coverage for individual files, and more. Codecov can also comment on pull requests, showing how the changes would affect overall coverage.
+Codecov also provides a browser extension that overlays coverage data directly on GitHub, so you can see coverage information as you browse your code.
+Coverity Scan is a static analysis tool that detects defects and vulnerabilities in your source code. It is provided as a free service for open source projects, but there’s also a commercial version for private projects. Here’s a general workflow of how you can use Coverity Scan:
+First, you need to register your project with Coverity Scan. This involves providing some basic information about your project and agreeing to their terms of service.
+The next step is to build your code and upload it to the Coverity Scan servers. This is typically done in your development environment, and there are a few steps involved:
+Install the Coverity Scan Tool. This tool is used to "build" your code and analyze it for defects.
+Instead of building with your usual build tool (like CMake or Maven), you build with the Coverity tool. This produces a file that contains all the information Coverity needs to analyze your code.
+You then upload this file to the Coverity servers. You can automate this step as part of your CI pipeline.
+Once your code is uploaded, Coverity analyzes it for defects and vulnerabilities. This process can take some time, depending on the size of your codebase.
+Once the analysis is complete, you can review the results on the Coverity Scan website. Defects are categorized by type and severity, and you can drill down to see the exact lines of code that are affected.
+Based on the results, you can then fix the defects in your code. After making changes, you’ll typically run the Coverity Scan process again to verify the fixes and find any new defects.
+Coverity Scan is a powerful tool that can help improve the quality of your code. It’s particularly good at finding complex defects that are hard to catch with regular testing. However, it does require some setup and learning to use effectively, particularly when integrating it with a CI pipeline.
+Coverity Scan does not directly use a .yml or .yaml file for configuration like the other CI tools discussed here. Instead, Coverity Scan primarily relies on the build commands and Coverity Scan command-line tools to analyze the source code. You include the necessary Coverity Scan commands within the .yml files of your other CI tools, for example:
+Beast library .drone.star line 25, and Beast library drone.sh line 134
+Json library .drone.star line 58, and Json library drone.sh line 110
+Your CI test matrix should include one or more of the most popular compilers for each supported OS.
+Microsoft Visual C++ (MSVC): This is Microsoft’s own compiler that comes with Visual Studio. It has excellent support for Windows-specific development and great debugging tools.
+MinGW - Minimalist GNU for Windows: MinGW includes a port of the GCC (GNU Compiler Collection), which includes a C++ compiler. It’s useful for open-source projects and cross-platform development.
+Clang: Clang is a compiler front end for the C, C++, and Objective-C programming languages. It uses LLVM as its back end and has been part of the LLVM release cycle since LLVM 2.6.
+GCC, the GNU Compiler Collection: GCC is one of the most popular compilers for Linux. It supports multiple programming languages but is most often used as a C++ compiler. It’s open-source and is the default compiler on most Linux distributions.
+Clang: Clang, part of the LLVM project, is a C++ compiler that provides a number of advantages over GCC, such as faster compile times and improved performance. It’s also known for providing more understandable compile errors.
+Intel Compiler: While not as common for general use as GCC or Clang, the Intel C++ Compiler can produce highly optimized code, especially for parallel computation and vector operations. It’s often used in high-performance computing scenarios.
+Clang is the default compiler for macOS and is provided with Xcode, Apple’s integrated development environment. It’s known for providing more understandable compile errors and faster compile times compared to GCC.
+GCC, the GNU Compiler Collection: While not the default, GCC can also be used on macOS. It’s typically installed via a package manager like Homebrew. However, it’s worth noting that when you install GCC on a Mac, the default "gcc" command often still points to Clang for compatibility reasons, so you might need to use a version-specific command like "gcc-9" to use the real GCC.
+Intel Compiler: The Intel C++ Compiler is also available on macOS and can produce highly optimized code, especially for parallel computation and vector operations. Like on Linux, it’s often used in high-performance computing scenarios.
+Docker can be used to provide isolation, which can be very useful with certain development environments. For example, when there is a need to replicate an environment which could not be replicated otherwise. For example, we use Ubuntu 16 frequently, but there’s no GitHub image for it. As a workaround, the Ubuntu 22 image is used, and a Ubuntu 16 Docker container is run on it.
+What is fuzz testing? Fuzzing is a testing technique that injects random pieces of data to a software function to uncover crashes and vulnerabilities. It helps improving code security and reliability, since it can trigger edge cases that went unnoticed during unit testing.
+How does it work? Fuzz testing relies on a fuzzing engine, a library that runs your code in a loop, injecting different inputs at each iteration. The fuzzing engine will instrument your code to measure coverage, and use this information to drive the generation of samples. Most of the samples will contain malformed input, and will test your code’s tolerance to ill-formed inputs.
+Which kind of errors does fuzzing detect? The fuzzing engine will monitor your code for crashes. Fuzzing is often used with the address and undefined sanitizers. In short, fuzzing will make sure that your code doesn’t crash, leak or incur in undefined behavior, regardless of how malformed the input is. A lot of vulnerabilities in C++ code are related to the former kind of errors, so fuzzing can make your code more secure.
+Should I use it? Fuzz testing is specially relevant for libraries that process potentially untrusted, user-controlled input, like network data. Libraries that implement parsers, decoders or network protocols usually benefit from fuzz testing.
+Which Boost libraries use it? Libraries like Boost.Json, Boost.URL and Boost.Mysql use this technique - if you’re about to implement it in your library, have a look at what these libraries do.
+Should I still write unit tests? Yes. Absolutely. Fuzzing does not replace unit tests, but complements them. Unit tests verify that your code produces the intended results by providing known inputs and running assertions on the outputs. In fuzz testing, inputs are generated randomly by the fuzzing engine, so no assertions are usually run on the outputs - fuzzing will only monitor for crashes and memory errors.
+How can I add fuzzing to my library? We recommend using LibFuzzer, since it’s the easiest fuzzing engine to use, and the one that other Boost libraries use. You can use other fuzzing engines if you prefer.
+Quoting documentation, "LibFuzzer is an in-process, coverage-guided, evolutionary fuzzing engine". LibFuzzer will run your code multiple times with different, random inputs. It will instrument your code to measure coverage, and will attempt to generate inputs that maximize it, effectively trying to discover new paths in your code.
+LibFuzzer is included in clang, so you don’t need to install anything to get started.
Let’s say we want to fuzz a function that parses JSON data, like parse_json(string_view input). We will create a source file with the following code:
#include <string_view>
+#include <your/parsing/function.hpp>
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size)
+{
+ // The range [data, data+size) contains the data generated by the fuzzer
+ std::string_view input_data (reinterpret_cast<const char*>(data), size);
+ parse_json(input_data);
+ return 0;
+}
+We can build a fuzzer executable by adding -fsanitize=fuzzer to clang’s compile and link flags. This will automatically link LibFuzzer to your code. It’s advised to also enable the address and undefined sanitizers, which increases the range of errors detected by the fuzzer. We recommend building in release mode with debug symbols enabled, so crashes are symbolized correctly.
From the command line:
+clang++ -g -O3 -fsanitize=fuzzer,address,undefined -o fuzzer fuzzer.cpp
+As a Jamfile target:
exe fuzzer : fuzzer.cpp : requirements
+ <debug-symbols>on
+ <optimization>speed
+ <address-sanitizer>norecover
+ <undefined-sanitizer>norecover
+ <cxxflags>-fsanitize=fuzzer
+ <linkflags>-fsanitize=fuzzer
+;
+Or as a CMake target:
+add_executable(fuzzer fuzzer.cpp)
+target_compile_options(
+ fuzzer
+ PRIVATE
+ -fsanitize=fuzzer,address,undefined
+ -fno-sanitize-recover=address,undefined
+ -g
+ -O3
+)
+target_link_options(
+ fuzzer
+ PRIVATE
+ -fsanitize=fuzzer,address,undefined
+ -fno-sanitize-recover=address,undefined
+)
+Note that you must not define a main function - LibFuzzer will do it for you. The LLVMFuzzerTestOneInput function will be invoked repeatedly, with different input ranges.
You can run your fuzzer with no arguments, which will fuzz until you stop it with Ctrl+C. The executable will print a lot of messages to stdout. This section contains a reference to what they mean, if you’re curious.
+To run the fuzzer for a limited period of time (for example, 30 seconds), use:
+./fuzzer -max_total_time=30
+A corpus is a collection of input samples to be used by the fuzzer. LibFuzzer uses these samples to create random mutations to use as new inputs. If a newly created sample triggers extra coverage, this sample is stored in the corpus.
+Until now, we’ve been running our fuzzer without an initial corpus. The fuzzer will try random inputs, without any guidance, and will generate a corpus. Doing this is not advisable, though, since it reduces the effectiveness of your fuzzing - the fuzzer may fail to find some relevant inputs.
+We always advise to provide an initial corpus (often called a seed corpus) to the fuzzer, to provide some guidance. The seed corpus should contain a variety of valid and invalid samples. You can reuse samples from your unit tests. In our JSON example, we could create a seedcorpus directory and copy all JSON files we use for unit testing.
Assuming that your seed corpus resides in your-lib/test/fuzzing/seedcorpus, we can run the fuzzer like this:
./fuzzer /tmp/corpus your-lib/test/fuzzing/seedcorpus -max_total_time=30
+The two positional arguments are understood as corpus directories. The first one is an empty directory, and the second one is our seed corpus. The fuzzer will use the first corpus directory we provide (/tmp/corpus in our case) to write all the samples it finds relevant. Using separate directories allows us to keep the seed corpus clean, since it may reside in source control.
When running your fuzzer as part of your CI builds, you’ll likely want to persist this new corpus to make the newly generated samples available to subsequent fuzzer runs. This section digs deeper on running fuzzers during CI builds.
+Once you’ve written a fuzzer and run it with an adequate corpus, you should have a look at the code coverage that your fuzzer triggered. This will help you verify that your fuzzing code is correct and that your corpus is in shape. The authors have found cases where some paths were missed due to errors in the seed corpus samples. Better check!
+We recommend to use clang’s source-based coverage for this task. To get coverage info, you should build your fuzzer with the -fprofile-instr-generate and -fcoverage-mapping compile and link flags, and then run the fuzzer normally. This will create a default.profraw file in your current directory, containing raw coverage data.
To visualize your coverage, run:
+llvm-profdata merge -sparse default.profraw -o fuzzer.profdata (1)
+llvm-cov show path/to/fuzzer -instr-profile=fuzzer.profdata (2)
+| 1 | +Converts from the raw profile format emitted by the binary to something llvm-cov can understand. This command can be used to merge several coverage files from different runs, too. |
+
| 2 | +Prints a report with line coverage for your fuzzer and any headers it uses. Replace path/to/fuzzer with the path to your compiled fuzzer. llvm-cov requires it to properly understand coverage data. |
+
This may generate a lot of output. You can use the -sources argument to scope which files are presented. Pay attention to the header path printed by the above command, since Boost creates symlinks for headers. For example, if you’re in the Boost super-project root, you can scope the report to Boost.Json headers by running:
llvm-cov show path/to/fuzzer -instr-profile=fuzzer.profdata -sources=boost/json/
+As we’ve mentioned, it’s advisable to persist the corpus generated by your fuzzer between runs. However, it can become very big as new samples are added. Before saving the corpus, we recommend performing corpus minimization.
+This process is run by the same fuzzer executable we’ve been using. It will run the different samples in your corpus and discard "repeated" ones, based on the code paths they trigger.
+To run corpus minimization, use the -merge=1 flag:
./fuzzer /tmp/mincorpus /tmp/corpus -merge=1
+This will minimize the samples in /tmp/corpus, writing the results to /tmp/mincorpus. Note that no actual fuzzing is performed by this command.
If your fuzzer finds an input that makes your code crash, it will report the error and exit immediately, creating a file named crash-<id> containing the sample that caused the problem. Similarly, if an input takes too long to process, or a memory leak is found, a file timeout-<id> or leak-<id> will be written.
When a crash is detected, you should save the offending sample to source control, reproduce the crash, and fix your code. +During regression testing, you should make your fuzzer run that specific sample, to verify that the crash doesn’t happen again.
+You can make your fuzzer run a single sample by specifying it as a positional command-line argument. For example, if the sample that caused the crash is your-lib/test/fuzzing/old_crashes/crash-abc:
./fuzzer your-lib/test/fuzzing/old_crashes/crash-abc
+This will run your fuzzer only with crash-abc. It will not perform actual fuzzing.
Your fuzzer won’t be really useful unless you run it continuously. CI platforms are a good way to achieve this. We recommend using GitHub Actions for fuzzing jobs, although other platforms with similar functionality should work, too.
+Your fuzzing CI job should, at least:
+Attempt to restore corpus samples from previous runs.
+Build the fuzzers.
+Run them with any old crash samples, to prevent regressions.
+Run the actual fuzzing for some time. Most libraries run each fuzzer for 30 seconds.
+Minimize the corpus generated by the previous step.
+Persist the minimized corpus so that it can be used by subsequent CI runs.
+Archive any crashes, timeouts and leaks, so you can recover them later.
+If you’re using GitHub actions, corpus persistance can be achieved using the cache action. Building the fuzzers should be part of your B2 or CMake builds. You can use Boost.MySQL’s Jamfile as inspiration. It’s a good practice to run the fuzzers both nightly and on push/pull request events.
It is advisable to keep your fuzzers as targeted as possible. For example, if you have functions to parse JSON and BSON (binary JSON) files, you should write two different fuzzers, instead of a single one that invokes one or the other based on the input.
+Your fuzzing code should be as efficient as possible. The faster it is, the more iterations the fuzzer will do, and the better the results. Avoid logging, cubic or greater complexity, and anything else that may slow down your code.
+Try to avoid any randomness in your code. LibFuzzer works best with deterministic functions - that is, functions that, for a certain input, take always the same code paths.
+Aside from the raw input data, you may need some extra input to configure your parsing function. For example, a JSON parser may be configured to allow comments or not. You may use part of the raw input data to configure flags like this and boost your coverage.
+Boost.Mysql fuzzes all its message deserialization routines. Fuzzers are located under test/fuzzing. The seed corpus is composed of multiple binary files, compressed and stored in the same directory. Fuzzers are built and run from test/fuzzing/Jamfile. Targets in this directory are built using b2 from the fuzz.yml GitHub Actions workflow.
Boost.Json fuzzes its JSON parsing functions. Fuzzers are stored under fuzzing/. The seed corpus is generated dynamically, by copying all JSON files used for unit testing. Fuzzers are built and run from fuzzing/Jamfile. Targets in this directory are built using b2 from the run_fuzzer.yml GitHub Actions workflow.
Boost.URL is similar to to JSON, but doesn’t use a seed corpus.
+The Boost libraries are intended to be both reliable and portable. Every experienced programmer knows that means each library must be tested against a suitable number of test cases, on a range of platforms and compilers, and then tested again (regression tested) every time a change is made. The regression tests are run again before every public release.
+There are three broad dimensions to Boost library testing:
+The actual test codes - unit tests, regression tests, new feature and bug tests. Refer to Writing Tests.
+Tests that are integrated with the primary build systems, B2 and CMake.
+The new library developer needs to consider all three, however the initial focus on a new library will be the first in this list. Or, to put it another way, "Quality assurance based on a wide range of targeted tests" as one of the key answers to Professor Hoare’s question:
+"How did software get so reliable without proof?"
+When you’re looking to submit a library to the Boost collection, it’s essential to ensure broad compatibility with various compilers, platforms, and configurations. Create a test matrix of what you intend to support, and document what you do not intend to support, and consider:
+Testing with multiple versions of each compiler. Popular compilers to consider include:
+GCC (GNU Compiler Collection)
+Clang
+MSVC (Microsoft Visual C++)
+The Boost user base can be using older versions of these compilers, so strive for compatibility with a reasonable range of versions if possible.
+If your library depends on other Boost libraries or external libraries, ensure they are compatible with the compilers you are targeting. Be clear about any dependencies or prerequisites in your documentation.
+Be wary of non-standard compiler features or extensions. If you must use them, guard them with appropriate preprocessor checks. +Boost provides its own set of configuration macros to help with this.
+Compatibility with various versions of the C++ Standard: C++11, C++14, C++17, C++20, and C++23. Some Boost libraries support many of the standards, while others target only more recent ones.
+Supporting C++03 is no longer considered good practice.
+Different operating systems, including Linux, Windows, macOS, and others like various BSDs.
+Different architectures: x86, x64, ARM, MIPS. Architecture can affect word size (usually 32 or 64 bit), endianness, memory alignment, inline assembly, cache sizes, latency, and other memory and performance issues.
+When you have outlined your test matrix, study the predefined macros available to assist you, and make adjustments to your matrix appropriately.
+There are a set of macros in the Boost.Config library that can be used to identify compiler capabilities, platform specifics, and other configuration details. These macros are designed to smooth out differences between compilers and platforms, allowing for more portable code, for example:
+BOOST_COMP_GNUC: Defined if the compiler is GCC.
BOOST_COMP_MSVC: Defined if the compiler is Microsoft Visual C++.
BOOST_COMP_MSVC >= BOOST_VERSION_NUMBER(19, 0, 0): Checks if the MSVC version is 19 or greater.
BOOST_OS_LINUX: Defined if the operating system is Linux.
BOOST_ARCH_X86_64: Defined if the architecture is x86_64.
BOOST_LIB_C_GNU: Defined if the C standard library is from GNU.
BOOST_LIB_STD_DINKUMWARE: Defined if the standard library is from Dinkumware (often associated with MSVC).
BOOST_NO_CXX14_RETURN_TYPE_DEDUCTION: Defined if the compiler does not support return type deduction introduced in C++14.
BOOST_NO_CXX11_AUTO_DECLARATIONS: Defined if the compiler does not support auto declarations from C++11. For example:
#include <boost/config.hpp>
+
+// ...
+#ifdef BOOST_NO_CXX11_AUTO_DECLARATIONS
+// Use traditional type declaration
+int x = 42;
+#else
+// Use C++11 auto
+auto x = 42;
+#endif
+// ...
+Use this same coding structure for any of the other macros.
+Boost releases are run through regression tests which automatically generates compiler status HTML tables for various platforms. Unless otherwise indicated, the C++ Standard Library implementation is the one shipped with the compiler. Refer to Generating Library Status Tables.
+The HTML tables are not a good indication of a particular compiler’s compliance with the C++ Standard. The Boost libraries often contain workarounds which mask compiler deficiencies.
+Some regression tests are run only occasionally, and might be relatively out-of-date. Check the date and revision in the column headings.
+First, familiarize yourself with the Boost Test Policy.
+Then, read the documentation for the following libraries, which support the writing of unit, feature and regression tests:
+Start small and develop a good understanding of how these testing libraries work, before writing more than a few tests.
+When you have a good understanding of the basic testing procedures, look into more advanced techniques, such as Fuzz Testing.
+This section describes how to run regression tests on your local machine, by downloading and running a Python command-line tool.
+For information on the regression tests run on all libraries, refer to Test Matrix.
+It’s easy to run regression tests on your Boost clone.
+To run a library’s regression tests, run Boost’s b2 utility from the <boost-root>/libs/<library>/test directory. To run a single test, specify its name (as found in <boost-root>/libs/<library>/test/Jamfile.v2) on the command line.
See the Building BJam guide for help building or downloading bjam for your platform, and navigating your Boost distribution.
To run every library’s regression tests, run b2 from the <boost-root>/status directory.
To run Boost.Build’s regression tests, run python test_all.py from the <boost-root>/tools/build/v2/test directory.
This tool runs all Boost regression tests and reports the results back to the Boost community.
+Python (2.3 ≤ version < 3.0)
+Git (recent version)
+At least 5 gigabytes of disk space per compiler to be tested
+Create a new directory for the branch you want to test.
+Download the run.py script into that directory:
Open the run.py script in your browser.
Click the Raw button.
+Save as run.py in the directory you just created.
The syntax to run the tool is python run.py <options>… [<commands>] with the following three required options, plus any others you wish to employ (for a full list, refer to Commands and Options):
--runner=: Your choice of name that identifies your results in the reports.
If you are running regressions interlacingly with a different set of compilers (e.g. for Intel in the morning and GCC at the end of the day), you need to provide a different runner ID for each of these runs, e.g. "your_name-intel", and "your_name-gcc".
+The limitations of the report format imposes a direct dependency between the number of compilers you are testing with and the amount of space available for your runner ID. If you are running regressions for a single compiler, make sure to choose a short enough ID that does not significantly disturb the report layout. You can also use spaces in the runner ID to allow the reports to wrap the name to fit.
+--toolsets=: The toolsets you want to test with.
If the --toolsets option is not provided, the script will try to use the platform’s default toolset (gcc for most Unix-based systems).
For supported toolsets, refer to toolset.
+--tag=: The tag you want to test. The only tags that currently make sense are develop and master.
For example:
+python run.py --runner=Metacomm --toolsets=gcc-4.2.1,msvc-8.0 --tag=develop
+If you are behind a firewall/proxy server, everything should still "just work". In the rare cases when it doesn’t, you can explicitly specify the proxy server parameters through the --proxy option. For example:
python run.py ... --proxy=http://www.someproxy.com:3128
+The following commands are available: cleanup, collect-logs, get-source, get-tools, patch, regression, setup, show-revision, test, test-boost-build, test-clean, test-process, test-run, update-source, and upload-logs.
The following options are available:
+| Option | +Description | +
|---|---|
|
+show this help message and exit |
+
|
+runner ID (e.g. 'Metacomm') |
+
|
+an HTML comment file to be inserted in the reports |
+
|
+the tag for the results |
+
|
+comma-separated list of toolsets to test with |
+
|
+comma separated list of libraries to test |
+
|
+do incremental run (do not remove previous binaries). Refer to Incremental Runs. |
+
|
+specifies the timeout, in minutes, for a single test run/compilation |
+
|
+options to pass to the regression test |
+
|
+bootstrap toolset for 'bjam' executable |
+
|
+bootstrap toolset for 'process_jam_log' executable |
+
|
++ |
|
+Boost SVN user ID |
+
|
+the name of the boost tarball |
+
|
+do an SVN update (if applicable) instead of a clean checkout, even when performing a full run |
+
|
+do neither a tarball download nor an SVN update; used primarily for testing script changes |
+
|
+FTP URL to upload results to. |
+
|
+HTTP proxy server address and port (e.g.'http://www.someproxy.com:3128') |
+
|
+FTP proxy server (e.g. 'ftpproxy') |
+
|
+the dart server to send results to |
+
|
+debugging level; controls the amount of debugging output printed |
+
|
+send full |
+
|
+email address to send run notification to |
+
|
+STMP server address/login information, in the following form: |
+
|
+do not run |
+
The regression run procedure will:
+Download the most recent regression scripts.
+Download the designated testing tool sources including Boost.Jam, Boost.Build, and the various regression programs.
+Download the most recent from the Boost Git Repository into the subdirectory boost.
+Build b2 and process_jam_log if needed. (process_jam_log is a utility, which extracts the test results from the log file produced by Boost.Build).
Run regression tests, process and collect the results.
+Upload the results to a common FTP server.
+The report merger process running continuously will merge all submitted test runs and publish them at various locations.
+Once you have your regression results displayed in the Boost-wide reports, you may consider providing a bit more information about yourself and your test environment. This additional information will be presented in the reports on a page associated with your runner ID.
+By default, the page’s content is just a single line coming from the comment.html file in your run.py directory, specifying the tested platform. You can put online a more detailed description of your environment, such as your hardware configuration, compiler builds, and test schedule, by altering the file’s content. Also, consider providing your name and email address for cases where Boost developers have questions specific to your particular set of results.
+By default, the script runs in what is known as full mode: on each run.py invocation all the files that were left in place by the previous run — including the binaries for the successfully built tests and libraries — are deleted, and everything is rebuilt once again from scratch. By contrast, in incremental mode the already existing binaries are left intact, and only the tests and libraries which source files has changed since the previous run are re-built and re-tested.
The main advantage of incremental runs is a significantly shorter turnaround time, but unfortunately incremental runs don’t always produce reliable results. Some type of changes to the codebase (changes to the b2 testing subsystem in particular) often require switching to a full mode for one cycle in order to produce trustworthy reports.
Run run.py in incremental mode by passing it the identically named command-line flag: python run.py … --incremental.
As a general guideline, if you can afford it, testing in full mode is preferable.
+You might encounter an occasional need to make local modifications to the Boost codebase before running the tests, without disturbing the automatic nature of the regression process. To implement this under regression.py:
Codify applying the desired modifications to the sources located in the ./boost_root subdirectory in a single executable script named patch_boost (patch_boost.bat on Windows).
Place the script in the run.py directory.
The driver will check for the existence of the patch_boost script, and, if found, execute it after obtaining the Boost sources.
+Send all comments/suggestions regarding this document and the testing procedure itself to the Boost developers' mailing list.
+Consider using a code sanitizer to check for some of the more mundane, but nevertheless real, bugs and inefficiencies in your library.
+All the tools listed have different strengths and are useful in different scenarios, so you might want to use several of them in combination.
+Clang AddressSanitizer (ASan) is a fast memory error detector built into LLVM/Clang, gcc and other compilers. As such, it works on Windows, Linux, and MacOS. It can detect out-of-bounds accesses to heap, stack, and globals, use-after-free and use-after-return bugs, and other memory-related errors. AddressSanitizer is generally faster than Valgrind and can be used in continuous integration without significantly slowing down the test suite.
+To use ASAN, pass the -fsanitize=address switch to Clang, or set the address-sanitizer=on B2 flag. Under CMake, you need to add the flags manually.
The LeakSanitizer (LSan) runs with ASAN, and is a memory leak detector. It’s integrated into AddressSanitizer.
+The sanitizer suite also includes:
+UndefinedBehaviorSanitizer (UBSan), which is a runtime undefined behavior detector that can catch misaligned or null pointers, integer overflows, and invalid bit shifts. Set -fsanitize=undefined in Clang, or undefined-sanitizer=on in B2. For details on the usage and output, refer to Clang UndefinedBehaviorSanitizer.
MemorySanitizer (MSan) detects uninitialized reads. This tool is similar to Valgrind, but it’s generally faster and can catch some bugs that Valgrind might miss. The New Library CI Framework doesn’t include any build for MSAN, and B2 doesn’t have a feature for it, so for either you need to manually specify the setting: -fsanitize=memory in Clang, or memory-santizer=on in B2. Refer to Clang MemorySanitizer.
ThreadSanitizer (TSan) detects data races. It’s available in Clang and gcc. Set -fsanitize=thread in Clang, or thread-sanitizer=on in B2. Refer to Clang ThreadSanitizer.
For Linux based systems, Valgrind is an open-source software tool suite that helps in debugging memory management and threading bugs, and profiling programs. It is often used to detect memory leaks and uninitialised memory blocks in C++ programs, among other things.
+Here’s how you can set it up for your project:
+Depending on your OS, the command will differ. For Ubuntu or Debian, you can use:
+sudo apt-get install valgrind
+For CentOS or Fedora, you can use:
+sudo yum install valgrind
+After Valgrind is installed, you can use it to run your program. Here’s an example:
+valgrind --leak-check=yes ./your_program
+The --leak-check=yes option tells Valgrind to perform memory leak checks. Your program runs as usual, but with Valgrind checking its memory usage in the background.
Setting up Valgrind in a CI environment depends on your CI system and might look something like this:
+In your CI configuration file, such as .github/workflows/workflow.yml for GitHub Actions, you would add a step to install Valgrind in your build environment.
Next, in your script steps, instead of running your test executable directly, you’d use Valgrind to run it. This will generate a report of any memory issues detected by Valgrind.
+Valgrind can significantly slow down your program, so it might not be suitable for all CI use cases, especially for large projects or tests that need to run quickly.
+There are required and optional (though highly recommended) tests for libraries.
+Boost uses an automatic regression test suite which generates HTML compiler status tables. Boost also uses Continuous Integration to ensure these tests are regularly run. Ensure your library testing includes the following:
+Every Boost library should supply one or more suitable test programs to be exercised by the Boost regression test suite. In addition to the usual compile-link-run tests expecting successful completion, compile-only or compile-and-link-only tests may be performed, and success for the test may be defined as failure of the steps.
+Test program execution must report errors by returning a non-zero value. They may also write to stdout or stderr, but that output should be relatively brief. Regardless of other output, a non-zero return value is the only way the regression test framework will recognize an error has occurred. Note that test programs to be included in the status tables must compile, link, and run quickly since the tests are executed many times.
Libraries with time consuming tests should be divided into a fast-execution basic test program for the status tables, and a separate full-coverage test program for exhaustive test cases. The basic test should concentrate on compilation issues so that the status tables accurately reflect the library’s likelihood of correct compilation on a platform.
+If for any reason the usual test policies do not apply to a particular library, an alternate test strategy must be described and implemented.
+Authors should supply a Jamfile to drive the regression tests for the library.
+Use Boost.Test to create individual test cases and group them into test suites. Then use the library’s test tools for Controlling outputs to verify the expected behavior.
+For simple cases, you can use the lightweight_test features of Boost.Core.
+Before fixing the bug, or adding the feature, add regression test cases that detect the bug or tests the feature. Sometimes adding one case suggests similar untested cases, and add those too.
+For bugs, run the regression test and verify that the bug is detected.
+Now, fix the bug or add the feature.
+Rerun the full regression tests, as sometimes the change breaks something else.
+Snapshots are used for quality control checks, and are posted to Archives. Because the snapshots represent work-in-process, they may not be suitable for production use.
+The Unix tarballs and Windows zipballs are identical except for the line endings exported from Git.
+| Version | +Download | +
|---|---|
Master branch |
++ |
Develop branch |
++ |
The Git master branch can be checked out from boostorg/boost.
+By following these guidelines and examples, contributors can write effective and maintainable unit tests that ensure the robustness and reliability of their Boost library contributions.
+Tests should be neatly organized into test suites to maintain clarity about which aspect of the library is being tested.
+When testing features that are compiler or platform-specific, use Boost.Config to ensure portability.
+Test names should be descriptive enough to understand the purpose without diving deep into the test logic.
+If a test contains non-trivial logic or checks for edge cases, include comments explaining the rationale.
+Ensure that the tests cover all corner cases, edge conditions, and typical usage scenarios.
+Each test should be independent, not relying on the state or outcome of another test.
+Well-commented unit tests not only describe the "what" but also the "why" behind certain checks, making them invaluable for both current developers and future maintainers. It’s crucial for unit tests to be self-explanatory, and while descriptive names play a significant role, comments can further elucidate complex or non-obvious logic.
+Writing good unit tests is crucial. Boost.Test provides the facilities needed to write unit tests, and Boost.Config can be used to adjust code depending on the platform and compiler features. The following are some examples to help contributors get started:
+To begin, contributors should include the necessary headers and use the right macros.
+#define BOOST_TEST_MODULE MyLibraryTest
+#include <boost/test/included/unit_test.hpp>
+
+BOOST_AUTO_TEST_CASE(test_case1)
+{
+ BOOST_TEST(2 + 2 == 4);
+}
+In this example, a test module named MyLibraryTest is defined, and a single test case (test_case1) checks a trivial arithmetic operation.
For a library with multiple functionalities, it’s a good idea to organize tests into test suites.
+#include <boost/test/included/unit_test.hpp>
+
+BOOST_AUTO_TEST_SUITE(MathTestSuite)
+
+BOOST_AUTO_TEST_CASE(test_addition)
+{
+ BOOST_TEST(2 + 2 == 4);
+}
+
+BOOST_AUTO_TEST_CASE(test_subtraction)
+{
+ BOOST_TEST(4 - 2 == 2);
+}
+
+BOOST_AUTO_TEST_SUITE_END()
+Let’s say a certain test is only valid for compilers supporting C++14. Boost.Config can be used to conditionally include or exclude that test.
+#include <boost/test/included/unit_test.hpp>
+#include <boost/config.hpp>
+
+BOOST_AUTO_TEST_CASE(test_cpp14_feature)
+{
+#if !defined(BOOST_NO_CXX14_GENERIC_LAMBDAS)
+ auto lambda = [](auto x) { return x * x; };
+ BOOST_TEST(lambda(3) == 9);
+#endif
+}
+In the above test, we ensure that the lambda (which uses a C++14 feature) only gets compiled if the compiler supports C++14 generic lambdas. The BOOST_NO_CXX14_GENERIC_LAMBDAS macro is provided by Boost.Config.
Boost.Test has special support for floating-point comparison to handle rounding errors.
+BOOST_AUTO_TEST_CASE(test_floating_point)
+{
+ double result = 0.1 * 0.1;
+ BOOST_TEST(result == 0.01, boost::test_tools::tolerance(1e-9));
+}
+In this test, the boost::test_tools::tolerance call specifies the allowed difference between the computed result and the expected result.
Boost.Test provides facilities to check if the right exceptions are thrown.
+#include <stdexcept>
+
+void foo() { throw std::runtime_error("Error!"); }
+
+BOOST_AUTO_TEST_CASE(test_exception)
+{
+ BOOST_CHECK_THROW(foo(), std::runtime_error);
+}
+Here is an example that works around expectations.
+#define BOOST_TEST_MODULE ExceptionTest
+#include <boost/test/included/unit_test.hpp>
+
+void mightThrow(bool doThrow)
+{
+ if (doThrow)
+ throw std::runtime_error("An error occurred!");
+}
+
+BOOST_AUTO_TEST_CASE(test_exception_handling)
+{
+ // This call should not throw any exceptions.
+ mightThrow(false);
+
+ // Testing if the function throws the expected exception when asked to.
+ // This is especially useful when certain conditions in the application
+ // logic are expected to trigger specific exceptions.
+ BOOST_CHECK_THROW(mightThrow(true), std::runtime_error);
+}
+Testing edge cases is crucial in ensuring the robustness and reliability of any software component. Edge cases often arise from boundary conditions, interactions of features, or uncommon input scenarios. The following examples demonstrate some common edge cases and how they can be tested using Boost.Test. In practice, understanding the problem domain and potential pitfalls of the library/component being developed is crucial in identifying and effectively testing edge cases.
+When working with arrays or data structures with a fixed size, it’s crucial to test both lower and upper boundaries.
+#include <array>
+#define BOOST_TEST_MODULE ArrayBoundaryTest
+#include <boost/test/included/unit_test.hpp>
+
+std::array<int, 5> data = {1, 2, 3, 4, 5};
+
+BOOST_AUTO_TEST_CASE(test_lower_boundary)
+{
+ BOOST_TEST(data[0] == 1);
+}
+
+BOOST_AUTO_TEST_CASE(test_upper_boundary)
+{
+ BOOST_TEST(data[4] == 5);
+}
+
+// This should fail if accessing out of bounds
+BOOST_AUTO_TEST_CASE(test_out_of_bounds)
+{
+ BOOST_CHECK_THROW(data.at(5), std::out_of_range);
+}
+Comments help identify the purpose of the tests in this example of testing list size and boundaries.
+#define BOOST_TEST_MODULE BoundaryTest
+#include <boost/test/included/unit_test.hpp>
+
+BOOST_AUTO_TEST_CASE(test_list_boundary_conditions)
+{
+ std::list<int> myList;
+
+ // Testing the lower boundary. An empty list should have a size of 0.
+ BOOST_TEST(myList.size() == 0);
+
+ myList.push_back(1);
+ myList.push_back(2);
+
+ // When two items are added, size should reflect that.
+ BOOST_TEST(myList.size() == 2);
+
+ myList.clear();
+
+ // After clearing, the list should return to its initial empty state.
+ BOOST_TEST(myList.size() == 0);
+}
+When working with numerical operations, it’s vital to test the smallest, largest, and other boundary values.
+#include <limits>
+#define BOOST_TEST_MODULE NumericLimitsTest
+#include <boost/test/included/unit_test.hpp>
+
+BOOST_AUTO_TEST_CASE(test_integer_overflow)
+{
+ int max_int = std::numeric_limits<int>::max();
+ BOOST_CHECK_THROW([&](){
+ int result = max_int + 1;
+ }(), std::overflow_error);
+}
+Numerical algorithms often have trouble with 0!
+#define BOOST_TEST_MODULE AlgorithmTest
+#include <boost/test/included/unit_test.hpp>
+
+double divide(double a, double b)
+{
+ if (b == 0.0)
+ throw std::domain_error("Denominator cannot be zero.");
+ return a / b;
+}
+
+BOOST_AUTO_TEST_CASE(test_division)
+{
+ // Regular division scenario: 10 divided by 2 should give 5.
+ BOOST_TEST(divide(10.0, 2.0) == 5.0);
+
+ // Division by zero should throw an error. We're ensuring that our
+ // function correctly handles this edge case and provides meaningful feedback.
+ BOOST_CHECK_THROW(divide(10.0, 0.0), std::domain_error);
+}
+When working with strings, some common edge cases include empty strings, strings with special characters, and extremely long strings.
+#include <string>
+#define BOOST_TEST_MODULE StringTest
+#include <boost/test/included/unit_test.hpp>
+
+std::string concatenate(const std::string &a, const std::string &b)
+{
+ return a + b;
+}
+
+BOOST_AUTO_TEST_CASE(test_empty_string)
+{
+ BOOST_TEST(concatenate("", "world") == "world");
+}
+
+BOOST_AUTO_TEST_CASE(test_special_characters)
+{
+ BOOST_TEST(concatenate("hello", "\n\t!") == "hello\n\t!");
+}
+
+// Use this test cautiously as it can consume a lot of memory
+// BOOST_AUTO_TEST_CASE(test_extremely_long_string)
+// {
+// std::string long_string(1e7, 'a'); // 10 million 'a's
+// BOOST_TEST(concatenate(long_string, "b").back() == 'b');
+// }
+NULL or nullptrFor libraries that might work with pointers, always check for null pointer scenarios.
+#define BOOST_TEST_MODULE PointerTest
+#include <boost/test/included/unit_test.hpp>
+
+int dereference(int* ptr)
+{
+ return *ptr;
+}
+
+BOOST_AUTO_TEST_CASE(test_null_pointer)
+{
+ int* null_ptr = nullptr;
+ BOOST_CHECK_THROW(dereference(null_ptr), std::runtime_error);
+}
+For recursive algorithms, consider the maximum depth and base cases.
+#define BOOST_TEST_MODULE RecursionTest
+#include <boost/test/included/unit_test.hpp>
+
+int factorial(int n)
+{
+ if (n < 0) throw std::runtime_error("Negative input not allowed");
+ if (n == 0) return 1;
+ return n * factorial(n - 1);
+}
+
+BOOST_AUTO_TEST_CASE(test_negative_input)
+{
+ BOOST_CHECK_THROW(factorial(-1), std::runtime_error);
+}
+
+BOOST_AUTO_TEST_CASE(test_base_case)
+{
+ BOOST_TEST(factorial(0) == 1);
+}
+
+BOOST_AUTO_TEST_CASE(test_general_case)
+{
+ BOOST_TEST(factorial(5) == 120);
+}
+A "mock" is a hypothetical example. Mocks are useful in isolating units of code and simulating external interactions without actually invoking them.
+#define BOOST_TEST_MODULE MockTest
+#include <boost/test/included/unit_test.hpp>
+#include <mock_database.hpp> // hypothetical mock database header
+
+BOOST_AUTO_TEST_CASE(test_database_read)
+{
+ MockDatabase db; // Creating a mock database instance
+
+ // Presetting the mock to return specific data when read is called.
+ db.setMockData("sample_data");
+
+ // The data returned from our mock should match the preset data.
+ BOOST_TEST(db.read() == "sample_data");
+}
+Boost.Core provides a set of core utility components intended for use by other libraries. Features include utility classes like noncopyable, type traits like is_same, and low-level functions like addressof.
In each of the following tests, the Boost.Test framework is used to verify the behavior of components based on Boost.Core:
+noncopyableSuppose you have a class that inherits from boost::noncopyable to ensure it can’t be copied.
#include <boost/core/noncopyable.hpp>
+
+class MyClass : private boost::noncopyable {
+ // class contents
+};
+To test this:
+#define BOOST_TEST_MODULE NonCopyableTest
+#include <boost/test/included/unit_test.hpp>
+
+BOOST_AUTO_TEST_CASE(test_noncopyable)
+{
+ MyClass instance1;
+
+ // The following lines should result in compile-time errors because copy
+ // constructor and assignment operator are deleted for noncopyable.
+ // Uncommenting these lines will cause the test to fail at compilation.
+ //
+ // MyClass instance2(instance1); // Copy construction
+ // instance1 = instance2; // Copy assignment
+
+ BOOST_TEST(true); // If we reach here, it means the class is noncopyable
+}
+is_sameUsing boost::is_same type trait:
#include <boost/type_traits/is_same.hpp>
+
+template <typename T, typename U>
+bool are_same_type() {
+ return boost::is_same<T, U>::value;
+}
+To test this:
+#define BOOST_TEST_MODULE IsSameTest
+#include <boost/test/included/unit_test.hpp>
+
+BOOST_AUTO_TEST_CASE(test_is_same)
+{
+ BOOST_TEST(are_same_type<int, int>());
+ BOOST_TEST(!are_same_type<int, double>());
+}
+boost::addressofThis function obtains the memory address of an object, even if its operator& is overloaded.
struct OverloadedAddress {
+ OverloadedAddress* operator&() {
+ return nullptr;
+ }
+};
+Testing it:
+#define BOOST_TEST_MODULE AddressOfTest
+#include <boost/test/included/unit_test.hpp>
+#include <boost/core/addressof.hpp>
+
+BOOST_AUTO_TEST_CASE(test_addressof)
+{
+ OverloadedAddress obj;
+ BOOST_TEST(boost::addressof(obj) != nullptr);
+}
+Descriptive test names are crucial for several reasons:
+When a test fails, a good test name instantly conveys what was expected and what aspect of the system was being tested.
+As the software evolves, descriptive test names make it easier for developers to update tests or understand the impact of a code change.
+Tests often serve as a form of living documentation for a system. Good test names provide an outline of the system’s behavior.
+Test names should often start with a verb to indicate the action or condition being tested.
+It’s usually better to have a longer, descriptive name than a short, vague one.
+If you’re using a test framework that already prefixes methods with test_, you don’t need to start every test name with test_. Consider using a more descriptive prefix.
If there’s a naming convention in the existing test suite, stick to it.
+The name should describe the expected behavior or outcome, not just the input conditions. For instance, test_negative_balance doesn’t tell us what to expect, while test_withdrawing_more_than_balance_throws_error is much clearer.
Let’s delve into some more examples:
+| Name | +Description | +
|---|---|
|
+This name is clear about the context (empty list) and the expectation (size is zero). |
+
|
+Clear and specific about the business rule being enforced. |
+
|
+Indicates that a connection should time out, and also specifies the expected time frame. |
+
|
+Describes a specific characteristic (stability) of a sorting function. |
+
|
+Clear about the rule being checked. |
+
| Name | +Description | +
|---|---|
|
+Vague. Does not tell anything about the purpose or expected outcome. |
+
|
+Too broad. What kind of errors? Under what conditions? |
+
|
+Ambiguous. What specific logic? Why is it being tested? |
+
|
+What spec? How? This name doesn’t give a clear picture of what’s being tested or what to expect. |
+
|
+Too vague. What about the flag? Are we testing its default value, its behavior when set, or something else? |
+
|
+Will a future maintainer know how to access Issue 576? |
+
The collection is represented on social media by the @BoostLibraries +X (formerly Twitter) account, where news and information about our libraries in particular and C++ in general are shared +with the community.
+The account publishes two types of posts:
+Official posts: These inform the community about new releases and libraries, library proposals, +review schedules and results, and generally all the events associated to the evolution and deployment of the project. +Official posts have a Boost brand imagery with watermark, and are produced by volunteers.
+Non-official posts: Everything else. Imagery for these posts is not branded so that they can be told from +the former. Anyone from the community can propose a non-official post for publication; if you’re interested in +doing so, follow these simple guidelines:
+Upcoming C++ events, meetups, conferences, etc.
+Talks about some particular library, or where such library is put to good effect.
+Boost-related articles, blogposts, C++ Committee papers.
+Interesting discussions ongoing on Reddit or some other online forums.
+The list is not exhaustive: if in doubt, just go with your submission or ask +for guidance using the same submission channel.
+
+Text: Make it short and attractive. Posts in X are currently capped at +280 characters, but you should strive for even less than that: one or two sentences +should suffice. Go straight to the point and phrase your sentences with a +clear call to action ("learn about", "watch", "contribute", you get the idea).
+Link(s): After the text, the post must include a link (or maybe more) to the +piece of news, talk, etc. you’re posting about. Links can be provided as-is +or with a short introductory text (like "Check it out: link"). There’s no +need to pass the links through an URL shortener: X does it automatically +with its own shortening service.
+Hashtags: The post should end with the following:
+#boost #boostcpp #cpp #cplusplus
plus other hashtags specific to the post you’re proposing. Experiment with +different hashtags to see what X search brings on them.
+Image: Always provide an image to go with the text —this dramatically +increases the visibility and reach of your post. The image should be +as large as possible. X accepts any aspect ratio, but given that +the majority of accesses will be through a mobile device, consider +ratios close to 1:1 or even taller than wider. +In our era of constant fight for attention and +information overload, the actual content of the image is less important +than how it will stand out amidst X infinite scroll: so, favor +colorful, original images that will make readers pause for a moment +and read the post (which is where the real info is shared).
+Making the perfect post is not trivial: if you need inspiration, +go to @BoostLibraries for actual +post examples.
+Log into the C++ Language Slack Workspace
+(or join it if you’re not a member yet)
+and post a message in the
+#boost channel, including:
Main text
+Link(s), with or without introductory texts
+Hashtags
+Attached image (or link to it)
+The team in charge of @BoostLibraries +may make some editorial adjustments +to your submission or get back to you to help them polish the final post. +Also, they will schedule the publication date and time for +maximum impact (typically, on weekday mornings with an eye to +catching readers both sides of the Atlantic, but this may be adjusted +for events specific to some country or geographical area).
+Boost uses the Git version control system for its development. This allows multiple contributors to work on the libraries concurrently, while keeping track of the changes that each person makes. Git is a distributed version control system, which means each contributor has their own local copy of the entire project, including its history.
+This section covers:
+The Boost libraries are modular, and each library has its own separate repository. This makes it easier to work with the entire collection of Boost libraries.
+The official Boost repositories are hosted in GitHub under the https://github.com/boostorg/ account. Each library has its own repository under this organization. For example, https://github.com/boostorg/json for Boost.Json, or https://github.com/boostorg/date_time/ for Boost.DateTime.
+As with any Git-based project, you can clone the repositories, make changes, commit those changes to your local repository, and push your changes back to the server. If you want to contribute changes to the official Boost libraries, refer to Contribute to an Existing Library.
+The Boost Super-project repository includes all the libraries as submodules.
+In addition to the libraries, you’ll find many other repositories under boostorg that handle other tasks, for example:
boost/tools includes B2 sources in build, and CMake support
+The Super-project has both master and develop branches, which operate somewhat differently than for individual libraries.
+New features are added and bugs are fixed in library develop branches. When the develop branch passes its' merging criteria (Continuous Integration, projects are built correctly, other test processes run without errors, etc.), it is merged with the library master branch.
+For the Super-project, the develop and master branches are independent. Both branches track the latest changes in the corresponding library branch, for all libraries. The Super-project branches are never merged, as this strategy prevents merging. In other words, the Super-project master branch is created from the library master branches, not from the Super-project develop branch (which is primarily used for testing).
+When there is a public release of Boost, it is built from the master branch after that branch has been closed to updates (refer to the Release Process).
+Usually, a submodule in the Super-project will be created for a library developer by a staff member of Boost.
+Recall that, in Git, when a submodule is added to a project, the submodule references a particular commit. This means that, when developers update their sub-projects, the Super-project doesn’t get immediately updated. For this reason, there is a commit bot that runs every 15 minutes and updates the commit to which each submodule references in the Super-project. This happens for both the develop and master branches.
+As you update your library, and the Super-project develop branch is updated by the commit bot, you may affect other developers. For example, if a mistake is made to the MySQL library develop branch, then it will affect the CMake module, because this module runs tests to verify that you can use MySQL via CMake.
+As part of Boost testing, Continuous Integration (CI) usually clones the Super-project and any required submodules.
+When testing any branch that is not master, the develop branch of the Super-project is cloned.
+When you’re testing master (which could be for a release), the master branch of the Super-project is cloned.
+While this cloning process is not a requirement, most libraries follow it.
+If you need to look at the code for the 1.82.0 release, navigate to the relevant repo, and enter:
+git checkout boost-1.82.0
+Developers don’t directly create Tags under repositories, when working with Boost. Tags are created by the release scripts, both in the Super-project and the individual repos, so do not add any of your own. This differs from the usual workflow in other non-Boost projects.
+Boost maintains its release versions with Tags, where the tag is always the boost- prefix followed by a major.minor.patch number string that matches the Semantic Versioning string format. The string format is useful because it’s something many tools understand, and for most users who just want to find a certain Boost version, that’s enough information. However, there is a unique Boost interpretation of the string.
Boost has not to date increased its' major version number. This is reserved for when something "big" happens, not for feature improvements nor API breaks.
+While library maintainers try hard not to break anything, the minor version is increased when:
+Three months have passed since the last release.
+New features have been added because they always are.
+A few things might have been broken. Details on new features and discontinued features have to be consulted in the individual libraries' Release Notes.
+The patch number is rarely used, but will be incremented from zero if there is a need for a quick update following a scheduled release. For example, boost-1.65.1 followed less than one month after boost-1.65.0.
One thing library maintainers do to mitigate problems is announce their intention to break something two releases (or six months) in advance. Some maintainers keep a parallel versioning system for their library, for example:
+Ultimately though, even after checking both a library readme file, and for library announcements, some testing may be necessary to be certain of whether a breaking change occurred, or not.
+The repositories use the built-in GitHub issue tracker. With Boost, users are encouraged to use the issue tracker for discussions and feature requests, as well as to report bugs and other issues.
+Consider creating custom templates for your library. The goal of these templates is to ensure that contributors provide enough context and information that you, and the other library authors and maintainers, can understand and reproduce the issue, or fully understand what is being discussed.
+Currently, the Boost Super-project does not use the GitHub Discussion feature. If filing an issue does not seem appropriate, users are encouraged to post on the Boost developers mailing list.
+Creating an issue template in GitHub can help guide contributors to provide the necessary information when they create new issues with your library.
+Here are the steps to create an issue template:
+Navigate to the main page of your repository.
+In the menu bar (Code, Pull Requests, etc.), click on Settings.
In the Features section, ensure that the Issues checkbox is selected.
In the section Get organized with issue templates, click on Set up templates.
Click on the down arrow of Add template: select, then select Custom template.
Click on Preview and edit for your custom template. Then select the pen icon to bring up the template fields.
Give your template a descriptive name, perhaps the name of your library followed by "feature request", "performance issue", "bug report" or "discussion". Remember you can enter as many templates as you think appropriate.
+Give the template a full description in the About box.
Then add the meat of the template to the Template content. Consider adding the following, in the form of Markdown syntax and example text, to ask your users to enter:
Boost version number
+The OS, compiler, hardware they are using
+A brief summary of the issue/request/discussion topic
+In the case of a bug or issue:
+Steps to reproduce the issue
+Expected behavior
+Actual behavior
+Screenshots, error messages, output
+In the case of a feature request or discussion:
+Accurately describe the purpose of the request (the use case, not the implementation)
+Describe what they are currently doing to address the issue
+Any other relevant context or information
+Add the Optional additional items if they fit the purpose of the template, and perhaps add yourself as one of the Assignees.
When you’re done editing, at the top right of the page, click Propose changes.
Click Commit changes and create a Pull Request to update your repo.
Once the template is added, users who create new issues in your repository can choose to use one of your templates.
+For the user’s perspective on issues, refer to Reporting Issues.
+Before submitting a library for review, work through this checklist to help polish the library to a high standard.
+Examining your code, can you verify:
+[] Consistent Coding Style - naming conventions, indentation, spacing. Consider using clang-format for consistency.
+[] Modular Design - the code should be broken down into small, manageable, and reusable components, and fit into the structure of the Boost Super-Project.
+[] Avoid Code Smells - eliminate dead code, redundant logic, overly complex functions, and other common red-flag issues.
+[] Adherence to C++ Standards - ensure the library is compatible with a current standard (for example, C++17 or later).
+[] Template Usage - are templates appropriately used, and consider the potential impact on compile times.
+[] Exception Safety - make sure the library has well-defined behavior when exceptions are thrown. Aim for strong or basic exception safety guarantees.
+[] Thread Safety - if relevant, ensure thread safety or clearly document any multi-threading limitations.
+[] Minimize Dependencies - use other Boost components when appropriate - ideally the latest incarnation, and avoid unnecessary external and circular dependencies.
+Refer to Design Best Practices for considerably more detail.
+Documentation should be on a website (rather than, say, a downloadable pdf file) so that it can be regularly and easily be updated. It should also be a single document as many reviewers will not explore beyond one link.
+[] The Introduction or Overview should compel an interested developer to read further. The purpose of the library - in not-too-technical terms - should be clear as daylight. What kind of task this library is the solution for should be well understood by reading the introduction completely, but no further. Remember that it is not experts, but developers struggling, who most often turn to documentation for help.
+[] Have a developer unfamiliar with the library read the introduction. If they struggle to understand it, revise accordingly.
+[] Include a sub-section on testimonials, if you have any that are noteworthy.
+[] Include information on how long the library has been stable, especially if it has been used for a significant period before the submission to Boost.
+Divide the rest of the documentation into sections:
+[] how to install the library - mentioning B2 and CMake as appropriate
+[] the dependencies
+[] the requirements - compilers, operating systems, hardware, etc.
+[] a "hello world" example
+[] architecture and rationale - include historical information and timelines if they impacted the design
+[] license
+[] references
+[] acknowledgements
+[] how to get started
+[] common use cases
+[] advanced use cases
+[] corner cases
+[] common patterns - for example, how to handle common errors
+Provide detailed descriptions of all:
+[] classes
+[] interfaces
+[] functions
+[] properties
+[] macros
+[] types
+[] constants
+[] error codes - ensure that error messages are clear and helpful for debugging
+[] exceptions
+It can be helpful to use tools like Doxygen or QuickBook.
+Ensure comprehensive test coverage for all functionalities.
+[] Unit Tests - use Boost.Test or another test framework.
+[] Edge Cases and Boundary Conditions - especially for algorithms and data structures.
+[] Cross-Platform Compatibility - verify that the library works on different platforms (Windows, Linux, macOS). Consider using CI tools like GitHub Actions or Travis CI to automate this process.
+[] Build Configurations - test with different compilers (GCC, Clang, MSVC) and optimization levels.
+[] Stress Tests - add stress tests (low memory, high CPU usage, etc.) to see how the library performs under heavy loads.
+As a minimum for validating your library performance, consider:
+[] Including benchmarks to demonstrate the library’s performance. Compare against existing solutions if possible.
+[] Optimizing for memory usage and consider using std::move and std::unique_ptr where appropriate to minimize allocations.
[] Avoiding unnecessary copies by using const &, std::move, and std::forward properly to avoid unnecessary data copying.
Boost library technical requirements are laid out in detail:
+For usability, verify that you have:
+[] A Simple and Intuitive Interface - avoid unnecessarily complex APIs.
+[] A Consistent API - consistent naming conventions, argument orders, and return types across the library.
+[] Clear Error Handling - clearly define and handle error cases. Use exceptions where appropriate and document expected exceptions.
+[] Template Type Deduction - ensure templates are designed to support type deduction and intuitive usage.
+Before submitting for a formal review, have you:
+[] Sought feedback from a smaller group of developers. Consider hosting the code on GitHub to get initial feedback from your community.
+[] Addressed all feedback from the pre-submission review.
+Consider a brief study of some existing popular Boost libraries and use them as benchmarks for quality and completeness. Recommended libraries for study include:
+[] Boost.Asio
+[] Boost.Json
+[] Boost.Mp11
+[] Boost.Spirit
+The Boost library collection is known for the formal review process. A process that needs to be carefully navigated before a new library is accepted for inclusion.
+This guide provides detailed information on the main roles in library submission:
+The Authors who wish to submit the library to the Boost collection: refer to Library Submission Process, and Contributor Checklist.
+The Review Manager who’s job it is to lead and coordinate the review process: refer to Managing Reviews.
+The Reviewers who test the submission, or elements of it, and write up their findings: refer to Writing Reviews.
+This guide also includes a comprehensive history of submissions, most with links to the rationale and announcements made during the evaluation process: refer to the Formal Review Schedule.
+Feedback on any aspect of this documentation is encouraged, and is available by creating a New Issue.
+Before a library can be scheduled for formal review, an active Boost member (not connected with the library submission) must volunteer to be the Review Manager for the library. Members may contact a library author on- or off-list to express interest in managing the review. The library author has to accept a person as a review manager.
+Before submitting a library, it will help to understand the role of the review manager.
+The review manager works through the following process:
+Checks the submission to make sure it really is complete enough to warrant formal review. For full requirements, refer to the Library Requirements. If necessary, work with the submitter to verify the code compiles and runs correctly on several compilers and platforms.
+Finalizes the schedule with the Review Wizards and the submitter.
+Posts a notice of the review schedule on both the Boost developers' mailing list and the Boost-announce mailing list.
+The notice should include a brief description of the library and what it does, to let readers know if the library is one they are interested in reviewing.
+If the library is known to fail with certain compilers, mention them in the review notice so reviewers with those compilers won’t waste time diagnosing known problems.
+It is advised to send the notice to each mailing list in a separate e-mail, otherwise online e-mail to news gateways could get confused.
+Inspects the Boost library catalogue for libraries which may interact with the new submission. These potential interactions should be pointed out in the review announcement, and the authors of these libraries should be privately notified and urged to participate in the review.
+Urges people to do reviews if they aren’t forthcoming.
+Follows review discussions regarding the library, moderating or answering questions as needed. It is the review manager’s job to give reviewers the benefit of the doubt and to try to coax a serious review out of participants, by asking pertinent questions.
+Asks the Review Wizards for permission to extend the review schedule if it appears that too few reviews will be submitted during the review period.
+Decides if there is consensus to accept the library and if there are any conditions attached. Consensus is not the same as a vote. The review manager has discretion to weigh opinions based on authority or thoughtfulness.
+Posts a notice of the review results on the Boost users mailing list as well as the Boost developers' mailing list and Boost-announce mailing list. A rationale is also helpful, but its extent is up to the review manager. If there are suggestions, or conditions that must be met before final inclusion, they should be stated. Concerns about the timeliness or quality of the review report should be brought to the Review Wizards off-list.
+Ideally, the review summary should contain all the information the manager has taken into account when coming to a decision. If there were discussions out of band, they need to be summarized. If there were discussions on the list that haven’t made their way into the formal reviews, they should be summarized too. Refer to the Best Practices section on Writing Reviews for some issues to look out for.
+The review manager is not supposed to impartially reflect the community opinion, as expressed in the form of formal reviews. Instead, the review manager’s role is to decide whether the library should be accepted, and the reviews help them with this, rather than decide for them. Simply put, the review manager does not impartially tally votes - reviews are not votes.
+A review manager can write a review themselves, though this process is independent of their role of review manager.
+The review process is more like a court - with the judge (the review manager) asking "Does the library meet the required standards for acceptance as a Boost library?". This question is resolved by various advocates presenting their cases, criticizing other advocates cases, presenting facts, logical arguments, their own experiences, and so on. The judge’s job is to weigh all this and reach a decision. And they may add conditions on acceptance, or not. If consensus between judge and authors cannot be reached on meeting any conditions - the process ends without a resolution (the mistrial).
+The review manager is - for better or worse - personally responsible for making the decision, defending the results, and dealing with future criticism. Their name will be public. This is not a job for everyone.
+Sometimes the judging process is going to produce irreconcilable differences, and continued discussions about the "verdict" are a fact of life. With any process there will be "winners" and "losers". So it’s not about everyone being happy at the end of the day.
+If, years from now, someone would like to know why a library was accepted or rejected, they only needs to read the court documents (the review posts and the review summary), and should come away with the correct understanding of what happened and why.
+One of the challenging tasks a review manager might have to take on is to write rejection rationale for a submitted library. For reference, here are links to the rationale of several libraries that were rejected in recent years.
+| Submission | +Review Dates | +Result Rationale | +
|---|---|---|
Mustache |
+February 5, 2023 - February 14, 2023 |
++ |
Text |
+June 11, 2020 - June 20, 2020 |
++ |
out_ptr |
+June 16, 2019 - July 10, 2019 |
++ |
Timsort |
+June 3, 2017 - June 12, 2017 |
++ |
Synapse |
+December 2, 2016 - December 11, 2016 |
++ |
For links to all available acceptance and rejection rationales, refer to Past Review Results and Milestones.
+To manage a review, you should have experience with the review process and ideally expert knowledge of the library’s domain. To volunteer to become a review manager, contact the current Review Wizards.
+Currently the review wizards are: Matt Borland (matt@mattborland.com), Mateusz Łoskot (mateusz@loskot.net) and John Phillips (johnphillipsithaca@gmail.com).
+Reviews are scheduled when the Review Wizards approve a review manager and agree with the manager and author on dates. See Library Submission Process for more information.
+The review results include recent completed reviews - where the library may have been accepted but is not yet part of Boost. There is often a lag between acceptance and site posting as authors address issues raised in the formal review.
+| Submission | +Submitter | +Links | +Review Manager | +Review Dates | +Result | +
|---|---|---|---|---|---|
| + | Alfredo Correa |
++ | Matt Borland |
+March 5, 2025 - March 15, 2025 |
+TBD |
+
| + | Joaquín M López Muñoz |
++ | Ion Gaztañaga |
+April 16, 2025 - April 26, 2025 |
+TBD |
+
In order for a review to proceed, a Boost member must volunteer to manage the review. This should be someone with experience with the review process and knowledge of the library’s domain. If you would like to volunteer to become a review manager, refer to Managing Reviews.
+| Submission | +Submitter | +Review Manager | +Review/Release Dates | +Result | +
|---|---|---|---|---|
Boost 1.90.0 Released |
+- |
+Marshall Clow |
+December 10, 2025 |
++ |
Decimal Re-Review |
+Matt Borland and Chris Kormanyos |
+John Maddock |
+October 6, 2025 - October 15, 2025 |
++ |
SQLite Re-Review |
+Klemens Morgenstern |
+Mohammad Nejati |
+August 22, 2025 - August 31, 2025 |
++ |
Boost 1.89.0 Released |
+- |
+Marshall Clow |
+August 14, 2025 |
++ |
| + | Joaquin M Lopez Munoz |
+Arnaud Becheler |
+May 13, 2025 - May 22, 2025 |
+Accepted Added in 1.89.0 |
+
| + | Jean-Louis Leroy |
+Dmitry Arkhipov |
+April 28, 2025 - May 07, 2025 |
+Accepted Added in 1.90.0 |
+
Boost 1.88.0 Released |
+- |
+Marshall Clow |
+April 10, 2025 |
++ |
Decimal |
+Matt Borland and Chris Kormanyos |
+John Maddock |
+January 15, 2025 - January 22, 2025 |
++ |
Hash2 |
+Peter Dimov and Christian Mazakas |
+Matt Borland |
+December 7, 2024 - December 15, 2024 |
+Accepted Added in 1.88.0 |
+
Boost 1.87.0 Released |
+- |
+Marshall Clow |
+December 11, 2024 |
++ |
SQLite |
+Klemens Morgenstern |
+Richard Hodges |
+November 13, 2024 - November 22, 2024 |
++ |
MQTT5 |
+Ivica Siladic |
+Klemens Morgenstern |
+October 16, 2024 - October 25, 2024 |
+Conditionally Accepted Added in 1.88.0 |
+
Boost Fiscal Sponsorship |
+Vinnie Falco |
+Glen Fernandes |
+September 3, 2024 - September 22, 2024 |
++ |
Boost 1.86.0 Released |
+- |
+Marshall Clow |
+August 14, 2024 |
++ |
Boost 1.85.0 Released |
+- |
+Marshall Clow |
+April 15, 2024 |
++ |
Parser |
+Zach Laine |
+Marshall Clow |
+February 19, 2024 - February 28, 2024 |
+Pending Conditionally Accepted Added in 1.87.0 |
+
CharConv |
+Matt Borland |
+Christopher Kormanyos |
+January 15, 2024 - January 25, 2024 |
++ |
Boost 1.84.0 Released |
+- |
+Marshall Clow |
+December 13, 2023 |
++ |
Cobalt (fka Async) |
+Klemens Morgenstern |
+Niall Douglas |
+September 22, 2023 - October 2, 2023 repeated due lack of reviews |
+Pending Conditionally Accepted — Added in 1.84 |
+
Scope |
+Andrey Semashev |
+Dmitry Arkhipov |
+November 26, 2023 - December 5, 2023 |
+Pending Conditionally Accepted — Added in 1.85 |
+
Boost 1.83.0 Released |
+- |
+Marshall Clow |
+August 11, 2023 |
++ |
Boost 1.82.0 Released |
+- |
+Marshall Clow |
+April 14, 2023 |
++ |
Mustache |
+Peter Dimov |
+Klemens Morgenstern |
+February 5, 2023 - February 14, 2023 |
++ |
Redis (fka Aedis) |
+Marcelo Zimbres Silva |
+Klemens Morgenstern |
+January 15, 2023 - January 24, 2023 |
+Pending Conditionally Accepted — Added in 1.84 |
+
Boost 1.81.0 Released |
+- |
+Marshall Clow |
+December 14, 2022 |
++ |
URL |
+Vinnie Falco, Alan de Freitas |
+Klemens Morgenstern |
+August 13, 2022 - August 22, 2022 |
++ |
Boost 1.80.0 Released |
+- |
+Marshall Clow |
+August 10, 2022 |
++ |
MySQL |
+Ruben Perez |
+Richard Hodges |
+May 9, 2022 - May 18, 2022 |
++ |
Boost 1.79.0 Released |
+- |
+Marshall Clow |
+April 13, 2022 |
++ |
Boost 1.78.0 Released |
+- |
+Marshall Clow |
+December 8, 2021 |
++ |
Boost 1.77.0 Released |
+- |
+Marshall Clow |
+August 11, 2021 |
++ |
Boost 1.76.0 Released |
+- |
+Marshall Clow |
+April 16, 2021 |
++ |
Lambda2 |
+Peter Dimov |
+Joel de Guzman |
+March 22, 2021 - March 31, 2021 |
++ |
Describe |
+Peter Dimov |
+Richard Hodges |
+March 1, 2021 - March 10, 2021 |
++ |
Boost 1.75.0 Released |
+- |
+Marshall Clow |
+December 11, 2020 |
++ |
PFR(Precise and Flat Reflection) |
+Antony Polukhin |
+Benedek Thaler |
+September 28, 2020 - October 7, 2020 |
++ |
JSON |
+Vinnie Falco, Krystian Stasiowski |
+Pranam Lashkari |
+September 14, 2020 - September 23, 2020 |
++ |
Boost 1.74.0 Released |
+- |
+Marshall Clow |
+August 14, 2020 |
++ |
LEAF(Lightweight Error Augmentation Framework) |
+Emil Dotchevski |
+Michael Caisse |
+May 22, 2020 - May 31, 2020 |
++ |
Text |
+Zach Laine |
+Glen Fernandes |
+June 11, 2020 - June 20, 2020 |
++ |
Review Wizard Status Report |
++ | Mateusz Loskot |
+May 20, 2020 |
++ |
Boost 1.73.0 Released |
+- |
+Marshall Clow |
+April 28, 2020 |
++ |
Boost 1.72.0 Released |
+- |
+Marshall Clow |
+December 11, 2019 |
++ |
STLInterfaces |
+Zach Laine |
+Barrett Adair |
+December 10, 2019 - December 19, 2019 |
+Pending + Conditionally Accepted — Added in 1.74 |
+
StaticString (was FixedString) |
+Krystian Stasiowski and Vinnie Falco |
+Joaquin M López Muñoz |
+November 25, 2019 - December 4, 2019 |
++ |
Boost 1.71.0 Released |
+- |
+Marshall Clow |
+August 19, 2019 |
++ |
out_ptr |
+JeanHeyd Meneide |
+Zach Laine |
+June 16, 2019 - July 10, 2019 |
++ | +
Boost 1.70.0 Released |
+- |
+Marshall Clow |
+April 12, 2019 |
++ |
Variant2 |
+Peter Dimov |
+Michael Caisse |
+April 1, 2019 - April 15, 2019 |
++ |
Boost 1.69.0 Released |
+- |
+Marshall Clow |
+December 11, 2018 |
++ |
Histogram |
+Hans Dembinski |
+Mateusz Loskot |
+September 17, 2018 - September 26, 2018 |
++ |
Boost 1.68.0 Released |
+- |
+Marshall Clow |
+August 09, 2018 |
++ |
Boost 1.67.0 Released |
+- |
+Daniel James |
+April 16, 2018 |
++ |
YAP |
+Zach Laine |
+Louis Dionne |
+February 5, 2018 - February 14, 2018 |
+Ongoing Conditionally Accepted Accepted Added in 1.70 |
+
Outcome |
+Niall Douglas |
+Charley Bay |
+January 19, 2018 - January 28, 2018 |
++ |
Boost 1.66.0 Released |
+- |
+Daniel James |
+December 19, 2017 |
++ |
Double-Ended |
+Benedek Thaler |
+Thorsten Ottosen |
+September 21, 2017 - October 7, 2017 |
++ |
Fit (now HOF) |
+Paul Fultz |
+Matt Calabrese |
+September 8, 2017 - September 20, 2017 |
++ |
Boost 1.65.1 Released |
+- |
+Daniel James |
+September 7, 2017 |
++ |
Boost 1.65.0 Released |
+- |
+Daniel James |
+August 21, 2017 |
++ |
mp11 |
+Peter Dimov |
+Bjorn Reese |
+July 15, 2017 - July 24, 2017 |
++ |
Beast |
+Vinnie Falco |
+Michael Caisse |
+July 1, 2017 - July 10, 2017 |
++ |
pdqsort |
+Orson Peters |
+Steven Ross |
+June 21, 2017 - June 30, 2017 |
++ |
Nowide |
+Artyom Beilis |
+Frédéric Bron |
+June 12, 2017 - June 21, 2017 |
++ |
Timsort |
+Alexander Zaitsev |
+Steven Ross |
+June 3, 2017 - June 12, 2017 |
++ |
Outcome |
+Niall Douglas |
+Charley Bay |
+May 19, 2017 - May 28, 2017 |
++ |
PolyCollection |
+Joaquín Mª López Muñoz |
+Ion Gaztañaga |
+May 3, 2017 - May 12, 2017 |
++ |
Boost 1.64.0 Released |
+- |
+Rene Rivera |
+April 19, 2017 |
++ |
CallableTraits |
+Barrett Adair |
+Louis Dionne |
+April 3, 2017 - April 12, 2017 |
+Ongoing + Conditionally Accepted — Added in 1.66 |
+
Stacktrace |
+Antony Polukhin |
+Niall Douglas |
+March 17, 2017 - March 26, 2017 |
++ |
Safe Numerics |
+Robert Ramey |
+Andrzej Krzemienski |
+March 2, 2017 - March 16, 2017 |
+Ongoing + Conditionally Accepted — Added in 1.69 |
+
Boost 1.63.0 Released |
+- |
+Marshall Clow |
+December 26, 2016 |
++ |
Stacktrace |
+Antony Polukhin |
+Niall Douglas |
+December 14, 2016 - December 23, 2016 |
++ |
Synapse |
+Emil Dotchevski |
+Edward Diener |
+December 2, 2016 - December 11, 2016 |
++ |
Parallel Sorting Sub-library |
+Francisco José Tapia |
+Steven Ross |
+November 11, 2016 - November 20, 2016 |
++ |
Process |
+Klemens Morgenstern |
+Antony Polukhin |
+October 27, 2016 - November 5, 2016 |
++ |
Boost 1.62.0 Released |
+- |
+Rene Rivera |
+September 28, 2016 |
++ |
Fiber (mini-review) |
+Oliver Kowalke |
+Nat Goodspeed |
+May 23, 2016 - June 2, 2016 |
++ |
Review Wizard Status Report |
+- |
+Ronald Garcia |
+May 19, 2016 |
++ |
Boost 1.61.0 Released |
+- |
+Rene Rivera |
+May 13, 2016 |
++ |
Fit |
+Paul Fultz |
+Vicente Botet |
+March 2, 2016 - March 13, 2016 |
++ |
Quaternions, Vectors, Matrices (QVM) |
+Emil Dotchevski |
+Adam Wulkiewicz |
+December 7, 2015 - December 23, 2015 |
++ |
Boost 1.60.0 Released |
+- |
+Marshall Clow |
+December 17, 2015 |
++ |
Fiber (mini-review) |
+Oliver Kowalke |
+Nat Goodspeed |
+September 4, 2015 - September 13, 2015 |
++ |
Asynchronous File I/O |
+Niall Douglas and + Paul Kirth |
+Ahmed Charles |
+August 21, 2015 - August 31, 2015 |
+Pending + Rejected (no result posted) |
+
Boost 1.59.0 Released |
+- |
+Marshall Clow |
+August 13, 2015 |
++ |
Http |
+Vinícius dos Santos Oliveira |
+Bjorn Reese |
+August 7, 2015 - August 16, 2015 |
++ |
DLL |
+Antony Polukhin |
+Vladimir Prus |
+June 29, 2015 - July 12, 2015 |
++ |
Hana |
+Louis Dionne |
+Glen Fernandes |
+June 10, 2015 - June 24, 2015 |
+Accepted Added in 1.61 |
+
Metaparse |
+Abel Sinkovics |
+Christophe Henry |
+May 25, 2015 - June 7, 2015 |
++ |
Boost 1.58.0 Released |
+- |
+Marshall Clow |
+April 17, 2015 |
++ |
Endian Mini-Review |
+Beman Dawes |
+Joel Falcou |
+January 23, 2015 - February 1, 2015 |
++ |
Review Wizard Status Report |
+- |
+Ronald Garcia |
+January 22, 2015 |
++ |
Compute |
+Kyle Lutz |
+Antony Polukhin |
+December 15, 2014 - December 30, 2014 |
++ |
Sort |
+Steven Ross |
+Edward Diener |
+November 10, 2014 - November 19, 2014 |
++ |
Boost 1.57.0 Released |
+- |
+Marshall Clow |
+November 3, 2014 |
++ |
Review Wizard Status Report |
+- |
+Ronald Garcia |
+August 30, 2014 |
++ |
Variadic Macro Data |
+Edward Diener |
+Steven Watanabe |
+August 21, 2014 - August 30, 2014 |
++ |
Boost 1.56.0 Released |
+- |
+Marshall Clow |
+August 7, 2014 |
++ |
Convert |
+Vladimir Batov |
+Edward Diener |
+May 12, 2014 - May 25, 2014 |
++ |
TypeIndex Mini-Review |
+Antony Polukhin |
+Niall Douglas |
+April 21, 2014 - April 30, 2014 |
++ |
Align |
+Glen Fernandes |
+Ahmed Charles |
+April 11, 2014 - April 20, 2014 |
++ |
Fiber |
+Oliver Kowalke |
+Nat Goodspeed |
+January 6, 2014 - January 15, 2014 |
++ |
TypeIndex |
+Antony Polukhin |
+Niall Douglas |
+November 11, 2013 - November 20, 2013 |
++ |
Boost 1.55.0 Released |
+- |
+Marshall Clow |
+November 11, 2013 |
++ |
Boost 1.54.0 Released |
+- |
+Marshall Clow |
+July 1, 2013 |
++ |
Review Wizard Status Report |
+- |
+Ronald Garcia |
+March 14, 2013 |
++ |
Boost 1.53.0 Released |
+- |
+Marshall Clow |
+February 4, 2013 |
++ |
Review Wizard Status Report |
+- |
+Ronald Garcia |
+November 10, 2012 |
++ |
Boost 1.52.0 Released |
+- |
+Marshall Clow |
+October 5, 2012 |
++ |
Review Wizard Status Report |
+- |
+Ronald Garcia |
+September 27, 2012 |
++ |
ODEint |
+Karsten Ahnert and + Mario Mulansky |
+Steven Watanabe |
+September 19, 2012 - September 28, 2012 |
++ |
Coroutine |
+Oliver Kowalke |
+Hartmut Kaiser |
+September 3, 2012 - September 12, 2012 |
++ |
Contract |
+Lorenzo Caminiti |
+Dave Abrahams |
+August 22, 2012 - August 31, 2012 |
++ |
Boost 1.51.0 Released |
+- |
+Marshall Clow |
+August 20, 2012 |
++ |
Review Wizard Status Report |
+- |
+Ronald Garcia |
+August 15, 2012 |
++ |
Type Erasure |
+Steven Watanabe |
+Lorenzo Caminiti |
+July 18, 2012 - July 27, 2012 |
++ |
Review Wizard Status Report |
+- |
+Ronald Garcia |
+July 13, 2012 |
++ |
Boost 1.50.0 Released |
+- |
+Beman Dawes |
+June 28, 2012 |
++ |
Multiprecision Arithmetic |
+John Maddock |
+Jeffrey Hellrung |
+June 8, 2012 - June 17, 2012 |
++ |
Boost 1.49.0 Released |
+- |
+Beman Dawes |
+February 24, 2012 |
++ |
Predef |
+Rene Rivera |
+Joel Falcou |
+February 20, 2012 - February 29, 2012 |
++ |
Review Wizard Status Report |
+- |
+Ronald Garcia |
+January 10, 2012 |
++ |
Context (mini-review) |
+Oliver Kowalke |
+Giovanni Deretta |
+January 2, 2012 - January 11, 2012 |
++ |
Boost 1.48.0 Released |
+- |
+Beman Dawes |
+November 16, 2011 |
++ |
Local |
+Lorenzo Caminiti |
+Jeffrey Hellrung |
+November 10, 2011 - November 19, 2011 |
++ |
Atomic |
+Helge Bahmann |
+Tim Blechmann |
+October 17, 2011 - October 26, 2011 |
+Accepted — Added in 1.53 |
+
Algorithm |
+Marshall Clow |
+Dave Abrahams |
+September 22, 2011 - October 1, 2011 |
++ |
Endian |
+Beman Dawes |
+Joel Falcou |
+September 5, 2011 - September 14, 2011 |
++ |
Conversion |
+Vicente Botet |
+Gordon Woodhull |
+August 20, 2011 - August 29, 2011 |
++ |
Containers |
+Ion Gaztañaga |
+John Maddock |
+August 3, 2011 - August 12, 2011 |
++ |
Lockfree |
+Tim Blechmann |
+Hartmut Kaiser |
+July 18, 2011 - July 27, 2011 |
++ |
Boost 1.47.0 Released |
+- |
+Beman Dawes |
+July 12, 2011 |
++ |
Type Traits Introspection |
+Edward Diener |
+Joel Falcou |
+July 1, 2011 - July 10, 2011 |
++ |
Assign v2 |
+Erwann Rogard, + Thorsten Ottosen |
+John Bytheway |
+June 15, 2011 - June 24, 2011 |
++ |
Heaps |
+Tim Blechmann |
+Andrew Sutton |
+May 30, 2011 - June 8, 2011 |
++ |
Review Wizard Status Report |
+- |
+Ronald Garcia |
+May 23, 2011 |
++ |
AutoIndex (Tool) |
+John Maddock |
+Daniel James |
+May 5, 2011 - May 14, 2011 |
++ |
Convert |
+Vladimir Batov |
+Edward Diener |
+April 23, 2011 - May 2, 2011- |
++ |
Locale |
+Artyom Beilis |
+Chad Nelson |
+April 7, 2011 - April 16, 2011 |
++ |
Context |
+Oliver Kowalke |
+Vicente Botet |
+March 21, 2011 - March 30, 2011 |
+Pending +Accepted Provisionally Accepted — Added in 1.51.0 |
+
Type Traits Extensions (Fast-Track) |
+Frédéric Bron |
+Joel Falcou |
+March 14, 2011 - March 18, 2011 |
+Accepted — Added in 1.48.0 |
+
Boost 1.46.1 Released |
+- |
+Beman Dawes |
+March 21, 2011 |
++ |
Review Wizard Status Report |
+- |
+Ronald Garcia |
+March 4, 2011 |
++ |
XInt |
+Chad Nelson |
+Vladimir Prus |
+March 2, 2011 - March 12, 2011 |
++ |
Boost 1.46 Released |
+- |
+Beman Dawes |
+February 21, 2011 |
++ |
Phoenix (mini-review) |
+Joel de Guzmann |
+Hartmut Kaiser |
+February 20, 2011 - March 2, 2011 |
+Accepted — Added in 1.47.0 |
+
Process |
+Boris Schaeling |
+Marshall Clow |
+February 7. 2011 - February 16, 2011 |
++ |
GIL.IO |
+Christian Henning |
+Mateusz Loskot |
+December 1, 2010 - December 10, 2010 |
+Accepted — Added in 1.68.0 |
+
Boost 1.45 Released |
+- |
+Beman Dawes |
+November 20, 2010 |
++ |
Chrono |
+Vicente Botet |
+Anthony Williams |
+November 6, 2010 - November 15, 2010 |
+Accepted — Added in 1.47.0 |
+
Ratio |
+Vicente Botet |
+Anthony Williams |
+October 2, 2010 - October 11, 2010 |
+Accepted — Added in 1.47.0 |
+
Boost 1.44 Released |
+- |
+Beman Dawes |
+August 17, 2010 |
++ |
Boost.Assign Extensions (Mini-Review) |
+Erwann Rogard |
+Thorsten Ottosen |
+June 13, 2010 - June 19, 2010 |
++ |
Review Wizard Status Report |
+- |
+Ronald Garcia |
+May 26, 2010 |
++ |
Move |
+Ion Gaztañaga |
+OvermindDL1 Michael Caisse |
+May 10, 2010 - May 24, 2010 |
+Accepted — Added in 1.48.0 |
+
Boost 1.43 Released |
+- |
+Beman Dawes |
+May 6, 2010 |
++ |
Log |
+Andrey Semashev |
+Vladimir Prus |
+March 8, 2010 - March 17, 2010 |
+Accepted Provisionally Accepted — Added in 1.54 |
+
Interval Containers |
+Joachim Faulhaber |
+Hartmut Kaiser |
+February 18, 2010 - February 27, 2010 |
+Accepted — Added in 1.46 |
+
Boost 1.42 Released |
+- |
+Beman Dawes |
+February 2, 2010 |
++ |
Review Wizard Status Report |
+- |
+John Phillips |
+December 7, 2009 |
++ |
Meta State Machine (MSM) |
+Christophe Henry |
+Dave Abrahams |
+November 23, 2009 - December 13, 2009 |
+Accepted — Added in 1.44 |
+
Boost 1.41 Released |
+- |
+Beman Dawes |
+November 18, 2009 |
++ |
Geometry |
+Barend Gehrels, Bruno Lalande, and Mateusz Loskot |
+Hartmut Kaiser |
+November 5, 2009 - November 22, 2009 |
+Accepted — Added in 1.47.0 |
+
Boost 1.40 Released |
+- |
+Beman Dawes |
+August 27, 2009 |
++ |
Polygon |
+Lucanus Simonson |
+Fernando Cacciola |
+August 24, 2009 - September 2, 2009 |
+Accepted — Added in 1.44 |
+
Review Wizard Status Report |
+- |
+Ronald Garcia |
+June 4, 2009 |
++ |
Boost 1.39 Released |
+- |
+Beman Dawes |
+May 3, 2009 |
++ |
Polynomial |
+Paweł Kieliszczyk |
+John Maddock |
+March 10, 2009 - March 19, 2009 |
++ |
Boost.Range (Update) |
+Neil Groves |
+Thorsten Ottosen |
+February 20, 2009 - March 3, 2009 |
+Accepted — Added in 1.43 |
+
Boost 1.38 Released |
+- |
+Beman Dawes |
+February 9, 2009 |
++ |
Futures (1st candidate) |
+Anthony Williams |
+Tom Brinkman |
+January 5, 2009 - January 20, 2009 |
+Accepted — Added in 1.41 |
+
Futures (2nd candidate) |
+Braddock Gaskill |
+Tom Brinkman |
+January 5, 2009 - January 20, 2009 |
+Rejected |
+
Constrained Value |
+Robert Kawulak |
+Jeff Garland Gordon Woodhull |
+December 1, 2008 - December 10, 2008 |
+Accepted Orphaned |
+
Review Wizard Status Report |
+- |
+John Phillips |
+November 25, 2008 |
++ |
Globally Unique Identifier (mini-review) (now UUID) |
+Andy Tompkins |
+Hartmut Kaiser |
+November 23, 2008 - November 29, 2008 |
+Accepted — Added in 1.42 |
+
Boost 1.37 Released |
+- |
+Beman Dawes |
+November 3, 2008 |
++ |
Thread-Safe Signals (now Signals2) |
+Frank Hess |
+Stjepan Rajko |
+November 1, 2008 - November 10, 2008 |
+Accepted — Added in 1.39 |
+
Phoenix |
+Joel de Guzman |
+Hartmut Kaiser |
+September 21, 2008 - September 30, 2008 |
+Accepted Conditionally Under Development Accepted — Added in 1.47.0 |
+
DataFlow Signals |
+Stjepan Rajko |
+Jaakko Järvi |
+September 1, 2008 - September 10, 2008 |
++ |
Boost 1.36 Released |
+- |
+Beman Dawes |
+August 14, 2008 |
++ |
Finite State Machines |
+Andrey Semashev |
+Martin Vuille |
+August 11, 2008 - August 27, 2008 |
++ |
Review Wizard Status Report |
+- |
+John Phillips |
+2008 May 16 |
++ |
Egg |
+Shunsuke Sogame |
+Dan Marsden |
+March 31, 2008 - April 13, 2008 |
++ |
Boost 1.35 Released |
+- |
+Beman Dawes |
+March 29, 2007 |
++ |
Proto |
+Eric Niebler |
+Hartmut Kaiser |
+March 1, 2008 - March 14, 2008 |
+Accepted — Added in 1.37 |
+
Floating Point Utilities |
+Johan Råde |
+John Maddock |
+February 18, 2008 - February 27, 2008 |
+Accepted — Added in 1.?? |
+
Logging |
+John Torjo |
+Gennadiy Rozental |
+February 4, 2008 - February 13, 2008 |
++ |
Flyweight |
+Joaquín Mª López Muñoz |
+Ion Gaztañaga |
+January 21, 2008 - January 30, 2008 |
+Accepted — Added in 1.38 |
+
Singleton (fast-track) |
+Tobias Schwinger |
+John Torjo |
+January 14, 2008 - January 18, 2008 |
++ |
Switch |
+Steven Watanabe |
+Stejpan Rajko |
+January 5, 2008 - January 13, 2008 |
+Accepted Provisionally Orphaned |
+
Factory (fast-track) |
+Tobias Schwinger |
+John Torjo |
+December 17, 2007 - December 21, 2007 |
+Accepted — Added in 1.43 |
+
Unordered Containers |
+Daniel James |
+Ion Gaztañaga |
+December 7, 2007 - December 16, 2007 |
+Accepted — Added in 1.36 |
+
Forward (fast-track) |
+Tobias Schwinger |
+John Torjo |
+December 3, 2007 - December 7, 2007 |
+Accepted — Added in 1.43 |
+
Review Wizard Status Report |
+- |
+Ronald Garcia |
+2007 November 16 |
++ |
Exception |
+Emil Dotchevski |
+Tobias Schwinger |
+September 27, 2007 - October 7, 2007 |
+Accepted — Added in 1.36 |
+
Review Wizard Status Report |
+- |
+Ronald Garcia |
+2007 September 14 |
++ |
Scope Exit |
+Alexander Nasonov |
+Jody Hagins John R. Phillips |
+August 13, 2007 - August 22, 2007- |
+Accepted — Added in 1.38 |
+
Time Series |
+Eric Niebler |
+John R. Phillips |
+July 30, 2007 - August 13, 2007 |
++ |
Boost 1.34.1 Released |
+- |
+Thomas Witt |
+July 24, 2007 |
++ |
Boost 1.34.0 Released |
+- |
+Thomas Witt |
+May 12, 2007 |
++ |
Globally Unique Identifier |
+Andy Tompkins |
+Hartmut Kaiser |
+April 30, 2007 - May 10, 2007 |
+Accepted Provisionally Accepted — Added in 1.42 |
+
Math Toolkit |
+John Maddock |
+Matthias Schabel |
+April 11, 2007 - April 27, 2007 |
+Accepted — Added in 1.35 |
+
Quantitative Units |
+Matthias Schabel |
+John R. Phillips |
+March 26, 2007 - April 4, 2007 |
+Accepted — Added in 1.36 |
+
Intrusive Containers |
+Ion Gaztañaga |
+Joaquín Mª López Muñoz |
+March 12, 2007 - March 21, 2007 |
+Accepted — Added in 1.35 |
+
Bimap |
+Matias Capeletto |
+Ion Gaztañaga |
+February 15 2007- March 2, 2007 |
+Accepted — Added in 1.35 |
+
Accumulators |
+Eric Niebler |
+John R. Phillips |
+January 29, 2007 - February 7, 2007 |
+Accepted — Added in 1.36 |
+
Function Types (Re-review) |
+Tobias Schwinger |
+Tom Brinkman |
+2006 November 6 - 2006 November 17 |
+Accepted — Added in 1.35 |
+
Generic Image Library |
+Lubomir Bourdev |
+Tom Brinkman |
+2006 October 5 - 2006 October 25 |
+Accepted — Added in 1.35 |
+
Message Passing |
+Doug Gregor |
+Jeremy Siek |
+2006 September 6 - 2006 September 15 |
+Accepted — Added in 1.35 |
+
Physical Quantities System |
+Andy Little |
+Fred Bertsch |
+2006 May 31 - 2006 June 9 |
++ |
Pimpl Pointer |
+Asger Mangaard |
+Rene Rivera |
+2006 May 15 - 2006 May 24 |
++ |
Fusion |
+Joel de Guzman |
+Ronald Garcia |
+2006 May 1 - 2006 May 10 |
+Accepted — Added in 1.35 |
+
Property Tree |
+Marcin Kalicinski |
+Thorsten Ottosen |
+2006 April 18 - 2006 April 30 |
+Accepted — Added in 1.41 |
+
Promotion Traits (fast-track) |
+Alexander Nasonov |
+Tobias Schwinger |
+2006 April 1 - 2006 April 9 |
+Accepted — Added in 1.35 |
+
Review Wizard Status Report |
+- |
+Tom Brinkman |
+2006 March 30 |
++ |
Shmem (now Interprocess) |
+Ion Gaztañaga |
+Fred Bertsch |
+2006 February 6 - 2006 February 15 |
+Accepted — Added in 1.35 |
+
Fixed Strings |
+Reece Dunn |
+Harmut Kaiser |
+2006 January 19 - 2006 February 5 |
++ |
Review Wizard Status Report |
+- |
+Ronald Garcia |
+2006 January 19 |
++ |
asio |
+Christopher Kohlhoff |
+Jeff Garland |
+2005 December 10 - 2005 December 30 |
+Accepted — Added in 1.35 |
+
Boost 1.33.1 Released |
+- |
+Doug Gregor |
+2005 December 5 |
++ |
Review Wizard Status Report |
+- |
+Ronald Garcia |
+2005 December 1 |
++ |
Logging Library |
+John Torjo |
+Hartmut Kaiser |
+2005 November 7 - 2005 November 16th |
++ |
Boost 1.33.1 Beta Released |
+- |
+Doug Gregor |
+2005 November 9 |
++ |
binary_int |
+Scott Schurr and Matt Calabrese |
+Pavel Vozenilek |
+2005 October 13 - 2005 October 20 |
+Accepted — Added in 1.37. |
+
TR1 |
+John Maddock |
+Beman Dawes |
+2005 September 24 - 2005 October 5 |
+Accepted — Added in 1.34 |
+
Xpressive |
+Eric Niebler |
+Thomas Witt |
+2005 September 8 - 2005 September 18 |
+Accepted — Added in 1.34 |
+
Boost 1.33.0 Released |
+- |
+Doug Gregor |
+17 August 2005 |
++ |
Function Types |
+Tobias Schwinger |
+John Maddock |
+2005-Jun-6 to 2005-June-16 |
+Accepted Provisionally, Accepted — Added in 1.35 |
+
Typeof |
+Arkadiy Vertleyb and + Peder Holt |
+Andy Little |
+2005 May 20 - 2005 May 30 |
+Accepted — Added in 1.34 |
+
Singleton |
+Jason Hise |
+Pavel Vozenilek |
+2005 May 5 - 2005 May 15 |
++ |
FOREACH Macro |
+Eric Niebler |
+Gennadiy Rozental |
+2005 April 25 - 2005 May 1 |
+Accepted — Added in 1.34 |
+
Hash |
+Daniel James |
+Thorsten Ottosen |
+2005 Mar 21 - 2005 March 12 |
+Accepted — Added in 1.33 |
+
State Chart |
+Andreas Huber |
+Pavel Vozenilek |
+2005 Feb 23 - 2005 March 9 |
+Accepted — Added in 1.34 |
+
Wave |
+Hartmut Kaiser |
+Tom Brinkman |
+2005 Feb 7 - 2005 Feb 20 |
+Accepted — Added in 1.33 |
+
Pointer Containers |
+Thorsten Ottosen |
+Pavol Droba |
+2004 Sept 26 - Oct 5 |
+Accepted — Added in 1.33 |
+
Named Params |
+David Abrahams & + Daniel Wallin |
+Doug Gregor |
+2004 Nov 1 - 2004 Nov 20 |
+Accepted — Added in 1.33 |
+
Output Formatters |
+Reece Dunn |
+John Torjo |
+2004 Sept 11 - Sept 25 |
++ |
Iostreams |
+Jonathan Turkanis |
+Jeff Garland |
+2004 Aug 28 - Sep 11 |
+Accepted — Added in 1.33 |
+
More IO |
+Daryle Walker |
+Tom Brinkman |
+2004 Aug 21 - 28 |
+Rejected |
+
Tribool |
+Douglas Gregor |
+Thomas Witt |
+2004 May 19-29 |
+Accepted — Added in 1.32 |
+
Assignment |
+Thorsten Ottosen |
+Tom Brinkman |
+2004 Apr 1 - 11 |
+Accepted — Added in 1.32 |
+
Serialization (re-review) |
+Robert Ramey |
+Jeff Garland |
+2004 Apr 13 - 26 |
+Accepted — Added in 1.32 |
+
Container Traits (now Range) |
+Thorsten Ottosen |
+Hartmut Kaiser |
+2004 Apr 28 - May 7 |
+Accepted — Added in 1.32 |
+
Indexed Set (now MultiIndex) |
+Joaquín Mª López Muñoz |
+Pavel Vozenilek |
+2004 Mar 20 - 30 |
+Accepted — Added in 1.32 |
+
Circular Buffer |
+Jan Gaspar |
+Pavel Vozenilek |
+2004 Mar 5 - 15 |
+Accepted — Added in 1.35 |
+
enable_if |
+Jaakko Järvi & Jeremiah Willcock & Andrew Lumsdaine |
+(fasttrack) |
+Dec 2003 |
+Accepted — added in 1.31 |
+
FC++ |
+Brian McNamara & Yannis Smaragdakis |
+Mat Marcus |
+2004 Feb 14 - Mar 1 |
+Rejected |
+
Numeric Conversions Library |
+Fernando Cacciola |
+Thorsten Ottosen |
+8 - 22 Dec 2003 |
+Accepted — added in 1.32 |
+
String Algorithm Library |
+Pavol Droba |
+Thorsten Ottosen |
+17 - 30 Oct 2003 |
+Accepted — added in 1.32 |
+
Shifted Pointer |
+Philippe A. Bouchard |
+Doug Gregor |
+24 - 30 Sep 2003 |
+Rejected |
+
Fixed-Point Decimal |
+Bill Seymour |
+Jens Maurer |
+11 - 21 Jul 2003 |
+Rejected |
+
Math Constants |
+Paul A. Bristow |
+Jaap Suter |
+06 - 15 Jun 2003 |
+Rejected |
+
Command Line & Config |
+Vladimir Prus |
+Aleksey Gurtovoy |
+21 May - 03 Jun 2003 |
+Accepted — added in 1.32 |
+
I/O Manipulators and Adaptors |
+Daryle Walker |
+Ed Brey |
+27 Feb - 11 Mar 2003 |
+- |
+
Variant |
+Eric Friedman & Itay Maman |
+Jeff Garland |
+16 - 25 Feb 2003 |
+Accepted — added in 1.31 |
+
Optional |
+Fernando Cacciola |
+Douglas Gregor |
+09 - 18 Dec 2002 |
+Accepted — added in 1.30 |
+
Serialization |
+Robert Ramey |
+Dave Abrahams |
+02 - 11 Nov 2002 |
+Rejected |
+
Spirit |
+Joel de Guzman |
+John Maddock |
+11 - 20 Oct 2002 |
+Accepted — added in 1.30 |
+
Minmax |
+Hervé Bronnimann |
+Thomas Witt |
+28 Sep - 07 Oct 2002 |
+Accepted — added in 1.32 |
+
Filesystem |
+Beman Dawes |
+William Kempf |
+14 - 23 Sep 2002 |
+Accepted — added in 1.30 |
+
Interval Arithmetic Library |
+Hervé Bronnimann & Guillaume Melquiond & Sylvain Pion |
+Beman Dawes |
+31 Aug - 09 Sep 2002 |
+Accepted — added in 1.30 |
+
Template Meta Programming Library MPL |
+Aleksey Gurtovoy |
+Douglas Gregor |
+15 - 29 Jul 2002 |
+Accepted — added in 1.30 |
+
uBLAS |
+Joerg Walter & Mathias Koch |
+Ed Brey |
+21 Jun - 01 Jul 2002 |
+Accepted — added in 1.29 |
+
Dynamic Bitset |
+Chuck Alison & Jeremy Siek |
+Mat Marcus |
+08 - 17 Jun 2002 |
+Accepted — added in 1.29 |
+
Date / Time |
+Jeff Garland |
+Darin Adler |
+15 - 24 Apr 2002 |
+Accepted — added in 1.29 |
+
Lambda |
+Jaakko Järvi & Gary Powell |
+Aleksey Gurtovoy |
+08 - 20 Mar 2002 |
+Accepted and added |
+
Signals |
+Douglas Gregor |
+William Kempf |
+18 - 27 Feb 2002 |
+Accepted — added in 1.29 |
+
I/O State Saver |
+Daryle Walker |
+Beman Dawes |
+06 - 16 Feb 2002 |
+Accepted and added |
+
printf-like formatting for iostreams |
+Samuel Krempp |
+Jens Maurer |
+13 - 23 Jan 2002 |
+Accepted — added in 1.29 |
+
Multi-array |
+Ron Garcia |
+John Maddock |
+02 - 12 Jan 2002 |
+Accepted — added in 1.29 |
+
Unit Test Library |
+Gennadiy Rozental |
+Jeremy Siek |
+01 - 13 Dec 2001 |
+Accepted and added |
+
GCD Library plus integer additions |
+Daryle Walker |
+Dave Abrahams |
+17 - 26 Sep 2001 |
+- |
+
Thread Library |
+Bill Kempf |
+Ed Brey |
+Aug 30 - Sep 8 |
+Accepted and added |
+
Config System |
+John Maddock |
+Doug Gregor |
+Aug 20 - 29 |
+Accepted and added |
+
Bind Library |
+Peter Dimov |
+Darin Adler |
+Aug 10 - 19 |
+Accepted and added |
+
Base from Member Library |
+Daryle Walker |
+Beman Dawes |
+Jul 30 - Aug 9 |
+- |
+
Coding Guidelines |
+Dave Abrahams |
+Aleksey Gurtovoy |
+Jul 20 - 29 |
+- |
+
Preprocessor Library |
+Vesa Karvonen |
+Jeremy Siek |
+Jun 28 - Jul 9 |
+Accepted and added |
+
Tuples Library |
+Jaakko Järvi |
+Beman Dawes |
+Jun 17 - 26 |
+Accepted and added |
+
Function Library |
+Doug Gregor |
+John Maddock |
+Jun 6 - 16 |
+Accepted and added |
+
Tokenizer |
+John Bandela |
+Doug Gregor |
+May 28 - Jun 6 |
+Accepted and added |
+
Special Functions |
+Hubert Holin |
+Jens Maurer |
+May 18 - 27 |
+Accepted and added |
+
This page describes the process library developers go through to get a library accepted into Boost.
+Steps for Getting a Library Accepted by Boost
+Follow posts on the Boost developers' mailing list for a while, or look through the message archives. Explore this website. Learn the Library Requirements. Read the rest of this page to learn about the process. Search the web to get an idea of the commitment required to get a library into Boost.
+There is a culture associated with Boost, aimed at encouraging high quality libraries by a process of discussion and refinement. Some libraries get past community review in less than two years from first concept, but most take longer, sometimes a lot longer. Five to ten years to get a library past review and into Boost is not unheard of, and you should prepare yourself for the personal investment required.
+While participation in reviews for other submissions is not a prerequisite for submitting a library to Boost, it is highly recommended; it will acquaint you with the process and the emotional demands of a formal review. There’s nothing that quite deflates the ego like having brilliant members of the C++ community critiquing your work, but, alas, it’s worth it!
+Potential library submitters should be careful to research the prior art before beginning to design a new library. Unfortunately, now and then folks arrive at Boost with a new library into which they have invested many hours, only to find that Boost already has that functionality, and sometimes has had it for years. Candidates should also research libraries being developed by others intended for Boost - if you have an itch to scratch, often so have had others and collaboration developing their library is usually considerably more efficient than going at it alone.
+Potential library submitters should also be careful to publicize, canvas for, and gauge interest in their library, ideally before beginning it, but certainly before submitting it for review. Even a superbly designed library can fail review if there isn’t enough interest in the subject matter; We can only review libraries with enough appeal to form a viable peer review. Ensuring that enough people are interested in your potential library goes a long way to ensure that.
+There are many places to publicize and canvas for a library. The Boost developers' mailing list ought to be your first stop in gauging interest in a possible new C++ library. Be prepared to pivot your design and focus until your proposed library finds traction. Other places useful for gauging interest in a library might be Reddit/r/cpp.
+A message to the Boost developers mailing list might be as simple as "Is there any interest in a library which solves Traveling Salesperson problems in linear time?"
+A bit of further description or snippet of code may be helpful. By the way, the preferred format for messages on the mailing list is plain text; not rich text, HTML, etc.
+Avoid posting lengthy descriptions, documentation, or code to the mailing list, and, please, no attachments. The best place to provide lengthy material is via. a web link. Project hosting services such as sourceforge, github, google code, and bitbucket serve well for this purpose.
+If response to an initial query indicates interest, then by all means make your library publicly available if you haven’t already done so.
+Please commit your code to a version control system such as Git, and make your documentation available in HTML format on a public website such as Github. An issue tracker such as the one provided by Github is also highly recommended.
+Your library should contain material as if it were on the boost.org web site. The closer your library reflects the final directory structure and format of the web site, the better. This makes it possible for reviewers to simply copy your code into the Boost distribution for testing.
+Please verify that your library compiles and runs under at least two compilers. This flushes out obvious portability problems.
+It is recommended that you release your code under the Boost Software License.
+Discuss, refine, rewrite. Repeat until satisfied.
+The exact details of this process varies a lot. Usually it is public, on the mailing list, but frequently discussion happens in private emails. For some libraries the process is over quickly, but for others it goes on for months. It’s often challenging, and sometimes veers in completely unexpected directions.
+The mailing list archives of past messages is one way to see how this process worked for other Boost libraries.
+Alternatively, follow the status links in the previously submitted libraries listed in Past Review Results and Milestones.
+When you feel that your library is ready for entry into Boost, you need to find at least one member (but preferably several) of the Boost community who is willing to publicly endorse your library for entry into Boost. A simple method of achieving this is to post to the Boost developers' mailing list a short description of your library, links to its github and documentation, and a request for endorsements.
+It is expected that those who endorse a library for review will have performed at least a cursory check of the library’s suitability for Boost in terms of documentation, fit with the rest of Boost and usefulness. A public endorsement of a library for review means that from an initial glance, they believe that the library has a reasonable chance to be accepted during a formal review. The expectation is that these endorsers will themselves review of the library during formal review period, though this is not binding.
+Once you have a list of people who have publicly endorsed your library for review, email the Boost developers' mailing list to request that your library be added to the Current Schedule where the following information will be shown:
+Submission (Library name)
+Submitter (author or authors)
+Review Manager
+Review Dates (start and end dates of the review period)
+Links to the status of the review, given as announcements.
+In order to schedule a formal review, the author must find a capable volunteer to manage the review. This should be someone with knowledge of the library domain, and experience with the review process. See Managing Reviews for the responsibilities of the review manager.
+Authors can find community members interested in managing reviews through discussion of the library on the developer list. If no one steps forward to volunteer to manage the review, it is appropriate to contact an experienced Boost member who showed interest in the library. Be considerate that managing a review is a serious commitment; for this reason, it’s better to contact the member off-list.
+If you cannot find a review manager after three weeks using the means above, and your submission is targeting eventual standardization, there is a list of Boost regulars who are also WG21 committee members who have volunteered to act as review managers in such cases. Try them in the order listed. They are: Zach Laine, Micheal Caisse, Matt Calabrese, Edward Diener, Louis Dionne, Vinnie Falco, Glen Fernandes, and David Sankel.
+Once a potential review manager has been identified, contact the Review Wizards for approval. The wizards approve review managers based on their level of participation in the Boost community.
+The review wizards will coordinate with both the author and review manager to schedule a date convenient for both.
+Before your formal review begins, double-, triple-, and quadruple-check your library. Verify that every code example works, that all unit tests pass on at least two compilers on at least two major operating systems, and run your documentation through a spelling and grammar checker.
+Please do not modify your library on its master branch during a review. Instead, modify a separate develop branch in response to feedback and reviews. For bigger ticket items of work, open issues on your issue tracker so interested people can track the fixing of specific issues raised.
+The review manager will consider all the reviews made by members of the community and arrive at a decision on whether your library is rejected, conditionally accepted or unconditionally accepted. They will post a report summarizing the decision publicly. If conditions are attached to acceptance, you will need to implement those conditions or else undergo an additional formal review.
+To qualify for a fast track review:
+The component must be small.
+The technique must be already in use in Boost libraries and the new component provides a common implementation.
+A full Boost-conformant implementation is available in the sandbox.
+The review wizard determines that the proposal qualifies for fast track review.
+The Boost review wizard posts a review announcement to the main Boost developer’s list. The fast track review period will normally last for 5 days. No two fast-track reviews will run in parallel. Fast track reviews may run during full reviews, though generally, this is to be avoided.
+After the review period ends, the submitter will post a review summary containing proposed changes to the reviewed implementation.
+The review wizard will accept or reject the proposed library and proposed changes.
+After applying the proposed changes, the component is checked into the repository like any other library.
+It is possible that in the review process some issues might need to be fixed as a requirement for acceptance. If a review does result in conditions on acceptance, the review manager may request a Mini-Review, at a later date, to determine if the conditions have been met. The Mini-Review is usually conducted by the same review manager.
+Once an accepted library is ready for inclusion on the Boost web site, the submitter is typically given Boost repository write access, and expected to check-in and maintain the library there. Contact the moderators if you need write access or direct use of the repository isn’t possible for you.
+If the boost.org web site doesn’t already have your capsule biography and picture (optional, with not-too-serious pictures preferred!), please send them to the Boost webmaster. It is up to you as to whether or not the biography includes your email address or other contact information. The preferred picture format is .jpg, but other common formats are acceptable. The preferred image size is 500x375 but the webmaster has photo editing software and can do the image preparation if necessary.
+Libraries are software; they lose their value over time if not maintained. Postings on the Boost developers or users mailing lists can alert you to potential maintenance needs; please plan to maintain your library over time. If you no longer can or wish to maintain your library, please post a message on the Boost developers mailing list asking for a new maintainer to volunteer and then spend the time to help them take over.
+Orphaned libraries will be put in the care of a maintenance team, pending a search for a new maintainer.
+By submitting a library to Boost, you accept responsibility for maintaining your library, or finding a qualified volunteer to serve as maintainer. You must be willing to put your library and documentation under a Boost-compatible license.
+You will be expected to respond to reasonable bug reports and questions on time and to participate as needed in discussions of your library on the Boost mailing lists.
+You are free to change your library in any way you wish, and you are encouraged to actively make improvements. However, peer review is an important part of the Boost process and as such you are also encouraged to get feedback from the Boost community before making substantial changes to the interface of an accepted library.
+If at some point you no longer wish to serve as maintainer of your library, it is your responsibility to make this known to the Boost community, and to find another individual to take your place.
+Libraries which have been abandoned will be put in care of a maintenance team.
+The goal of a Boost library review is to improve a candidate library through constructive criticism. At the end a decision must be made: is the candidate library good enough at this point to accept into Boost? If not, we hope to have provided enough constructive criticism for it to be improved and accepted at a later time. The Boost.Serialization library is a good example of a library that was originally rejected, but constructive criticism led to revisions resulting in an excellent library that was accepted in its second review.
+Your comments may be brief or lengthy. The review manager needs your evaluation of the library. If you identify problems during your evaluation, try to categorize them as minor, serious, or showstoppers.
+Ask questions for the library authors, on the Boost Developers Mailing List, so all interested parties can see it. Authors are interested in defending their library. If you don’t get a response to your question quickly, be patient; if it takes too long or you don’t get an answer you feel is sufficient, ask again or try to rephrase the question. Clarity is important, English is not the native language of many Boost developers. Try to get your questions answered before you submit your review.
+Here’s a structured approach to writing a review of a library, covering all the bases:
+Introduce yourself, and your knowledge of the problem domain. Remember that input from a newcomer to a domain is valuable. Briefly introduce the library and its purpose as you see it.
+Describe the scope of your review, such as a focus on any particular area of the API, or any particular type of problem within the domain. Be clear on the level of commitment you made to the evaluation (a quick reading, in-depth testing of the API or a subset of it, focus on any particular area such as documentation, tutorials, portability, or similar).
+Evaluate the design principles and architectural decisions of the library. Discuss the modularity, extensibility, and flexibility of the library’s design. Assess how well the design aligns with Boost’s guidelines and best practices.
+Provide an overview of the key functionality and features offered by the library. Evaluate the completeness and robustness of the feature set. Discuss any unique or innovative features that differentiate this library from existing alternatives.
+Evaluate the usability of the library from your developer’s perspective. Discuss the ease of integration, API usability, and critically the learning curve.
+If you wrote some test code, which is highly recommended, assess the performance characteristics of the library, including not only runtime efficiency but resource usage. Include code snippets from your test code in your evaluation, in particular highlighting the code you wrote to prepare for an API call into the library, and the code following the call to handle the result/data/errors appropriately.
+If possible, benchmark the library against similar solutions or existing libraries in terms of speed, memory footprint, and scalability.
+Assess the reliability and stability of the library, including error handling and corner case handling. Discuss any known issues, limitations, or areas for improvement in terms of reliability. It is key to address the clarity and usefulness of error messages, and how they helped or hindered you in locating any particular problem.
+If you are able, evaluate the portability of the library across different platforms, compilers, and environments. Give precise details on the tools or environment you used. Assess the library’s compatibility with relevant C++ standards and other Boost libraries. Discuss any platform-specific issues, considerations or dependencies that you encountered.
+Assess the quality and comprehensiveness of the documentation, including tutorials, examples, and API reference materials. Include comments on the ease of navigation within the documentation, the structure of the documentation, and how easy or difficult it was to find what you were looking for.
+Briefly summarize the strengths and weaknesses of the candidate library. Critically, provide a recommendation regarding the suitability of the library for inclusion in Boost. If the library falls short in some way, offer suggestions for improvements or areas of focus that would bring the library up to the standard for a positive recommendation for inclusion.
+If it’s helpful, cut and paste the following template into your email text when you have completed your review:
+1. Introduce yourself
+* Are you knowledgeable about the problem domain?
+* How much effort did you put into your evaluation? A glance? A quick reading? In-depth study?
+* Do you have an affiliation to disclose?
+
+2. What is your evaluation of the design and architecture?
+* What is your evaluation of the potential usefulness of the library?
+* Is the feature set, or subset, of the library available in another Boost or Standard library?
+
+3. What is your evaluation of the implementation?
+* Did you try to use the library? With what compiler? Did you have any problems (minor, series, showstoppers)?
+* Can you highlight any performance issues?
+* Any feedback on testing and reliability?
+* Did you investigate portability and compatibility?
+
+4. What is your evaluation of the documentation?
+* Any thoughts on the introduction, tutorials, API reference, completeness, helpfulness?
+
+5. My conclusion
+* How do you think the community will react?
+* What is your verdict: Yes, No, or Yes with changes?
+From experience, there a number of more common pitfalls when writing reviews and engaging in review feedback discussions, so keep the following in mind:
+Literally, keep on the same page. Submissions, discussions, comment should always be on the Boost Developers Mailing List and not on Slack, Reddit, in personal email, or any other media.
+Elaborate in full. If you have series technical feedback on a library then explain yourself in detail. Try to avoid "teaser" type comments where other developers might feel you are onto something, but they are just not sure what.
+Reviews needs to be self-contained. It’s not a starting point for a discussion. Nobody is obligated to ask you clarifying questions, and there should be no missing parts that you fill in later in subsequent posts. This means that if you have questions about the library that you feel need to be answered by the author or review manager, you should ask these questions before you submit your review.
+Do not expect complete agreement. Too much compromise, consensus, in engineering endeavors leads to poorer design.
+Refer to the Reality Check section in the topic on Review Managers for clarity on the role of your review.
+Capy abstracts away sockets, files, and asynchrony with type-erased streams and buffer sequences—code compiles fast because the implementation is hidden. It provides the framework for concurrent algorithms that transact in buffers of memory: networking, serial ports, console, timers, and any platform I/O. This is only possible because Capy is coroutine-only, enabling optimizations and ergonomics that hybrid approaches must sacrifice.
-Lazy coroutine tasks — task<T> with forward-propagating stop tokens and automatic cancellation
Buffer sequences — taken straight from Asio and improved
-Stream concepts — ReadStream, WriteStream, ReadSource, WriteSink, BufferSource, BufferSink
Type-erased streams — any_stream, any_read_stream, any_write_stream for fast compilation
Concurrency facilities — executors, strands, thread pools, when_all, when_any
Test utilities — mock streams, mock sources/sinks, error injection
-Networking — no sockets, acceptors, or DNS; that’s what Corosio provides
-Protocols — no HTTP, WebSocket, or TLS; see the Http and Beast2 libraries
-Platform event loops — no io_uring, IOCP, epoll, or kqueue; Capy is the layer above
-Callbacks or futures — coroutine-only means no other continuation styles
-Sender/receiver — Capy uses the IoAwaitable protocol, not std::execution
Users of Corosio — portable coroutine networking
-Users of Http — sans-I/O HTTP/1.1 clients and servers
-Users of Websocket — sans-I/O WebSocket
-Users of Beast2 — high-level HTTP/WebSocket servers
-Users of Burl — high-level HTTP client
-All of these are built on Capy. Understanding its concepts—tasks, buffer sequences, streams, executors—unlocks the full power of the stack.
-Use case first. Buffer sequences, stream concepts, executor affinity—these exist because I/O code needs them, not because they’re theoretically elegant.
-Coroutines-only. No callbacks, futures, or sender/receiver. Hybrid support forces compromises; full commitment unlocks optimizations that adapted models cannot achieve.
-Address the complaints of C++. Type erasure at boundaries, minimal dependencies, and hidden implementations keep builds fast and templates manageable.
-| - - | -
-
-
-Unless otherwise specified, all code examples in this documentation assume the following: -
-
-
-
-
- |
-
This example demonstrates a minimal coroutine that reads from a stream and echoes the data back:
-#include <boost/capy.hpp>
-
-using namespace boost::capy;
-
-task<> echo(any_stream& stream)
-{
- char buf[1024];
- for(;;)
- {
- auto [ec, n] = co_await stream.read_some(mutable_buffer(buf));
- if(ec.failed())
- co_return;
- auto [wec, wn] = co_await write(stream, const_buffer(buf, n));
- if(wec.failed())
- co_return;
}
-}
+ html.light {
+ --bg: #f9fafb; --fg: #1a1a1a; --muted: #6b7280;
+ --link: #1a6fb5; --border: #e5e7eb; --card-bg: #ffffff;
+ }
+ html.dark {
+ --bg: #111214; --fg: #e5e7eb; --muted: #9ca3af;
+ --link: #60a5fa; --border: #2a2d32; --card-bg: #1a1c20;
+ }
+ * { margin: 0; padding: 0; box-sizing: border-box; }
+ body {
+ font-family: system-ui, -apple-system, sans-serif;
+ background: var(--bg); color: var(--fg);
+ max-width: 640px; margin: 0 auto;
+ padding: 3rem 1.5rem;
+ }
+ header { display: flex; justify-content: space-between; align-items: baseline; margin-bottom: 0.5rem; }
+ h1 { font-size: 1.375rem; font-weight: 600; }
+ .theme-btn {
+ background: none; border: none; cursor: pointer;
+ padding: 0.25rem; color: var(--muted); display: flex; align-items: center;
+ }
+ .theme-btn:hover { color: var(--fg); }
+ .theme-btn svg { width: 18px; height: 18px; fill: currentColor; }
+ .icon-sun { display: none; }
+ .icon-moon { display: block; }
+ html.dark .icon-sun { display: block; }
+ html.dark .icon-moon { display: none; }
+ @media (prefers-color-scheme: dark) {
+ .icon-sun { display: block; }
+ .icon-moon { display: none; }
+ }
+ html.light .icon-sun { display: none; }
+ html.light .icon-moon { display: block; }
+ .subtitle { color: var(--muted); margin-bottom: 2.5rem; font-size: 0.9375rem; }
+ h2 { font-size: 0.75rem; font-weight: 600; text-transform: uppercase; letter-spacing: 0.05em; color: var(--muted); margin-bottom: 0.5rem; margin-top: 1.75rem; }
+ .links { display: flex; flex-direction: column; gap: 0.25rem; margin-bottom: 0.5rem; }
+ .link-row { display: flex; align-items: baseline; gap: 0.5rem; padding: 0.375rem 0; }
+ a { color: var(--link); text-decoration: none; }
+ a:hover { text-decoration: underline; }
+ .tag { font-size: 0.75rem; color: var(--muted); }
+ .desc { font-size: 0.8125rem; color: var(--muted); }
+ hr { border: none; border-top: 1px solid var(--border); margin: 2rem 0; }
+ footer { font-size: 0.8125rem; color: var(--muted); }
+ footer a { color: var(--muted); }
+
+
+
+
+ Boostlook v3
+
+
+ Preview builds for the new Boost C++ Libraries site design, in collaboration with MetaLab.
-int main()
-{
- thread_pool pool;
- // In a real application, you would obtain a stream from Corosio
- // and call: run_async(pool.get_executor())(echo(stream));
- return 0;
-}
-The echo function accepts an any_stream&—a type-erased wrapper that works with any concrete stream implementation. The function reads data into a buffer, then writes it back. Both operations use co_await to suspend until the I/O completes.
The task<> return type (equivalent to task<void>) creates a lazy coroutine that does not start executing until awaited or launched with run_async.
Quick Start — Set up your first Capy project
-C++20 Coroutines Tutorial — Learn coroutines from the ground up
-Concurrency Tutorial — Understand threads, mutexes, and synchronization
-Coroutines in Capy — Deep dive into task<T> and the IoAwaitable protocol
Buffer Sequences — Master the concept-driven buffer model
-Stream Concepts — Understand the six stream concepts
-