2
0
mirror of https://github.com/boostorg/math.git synced 2026-02-24 16:12:15 +00:00

Merge branch 'boostorg:develop' into non-central-f-inverse

This commit is contained in:
Jacob Hass
2026-02-12 10:32:10 -08:00
committed by GitHub
556 changed files with 24983 additions and 2722 deletions

View File

@@ -1368,6 +1368,13 @@ test-suite test_reverse_mode_autodiff
[ run test_reverse_mode_autodiff_basic_math_ops.cpp /boost/test//boost_unit_test_framework : : : <toolset>gcc-mingw:<cxxflags>-Wa,-mbig-obj <debug-symbols>off <toolset>msvc:<cxxflags>/bigobj release ]
[ run test_reverse_mode_autodiff_error_functions.cpp /boost/test//boost_unit_test_framework : : : <toolset>gcc-mingw:<cxxflags>-Wa,-mbig-obj <debug-symbols>off <toolset>msvc:<cxxflags>/bigobj release ]
;
test-suite gradient_based_optimizers
:
[ run test_gradient_descent_optimizer.cpp /boost/test//boost_unit_test_framework : : : <toolset>gcc-mingw:<cxxflags>-Wa,-mbig-obj <debug-symbols>off <toolset>msvc:<cxxflags>/bigobj release ]
[ run test_nesterov_optimizer.cpp /boost/test//boost_unit_test_framework : : : <toolset>gcc-mingw:<cxxflags>-Wa,-mbig-obj <debug-symbols>off <toolset>msvc:<cxxflags>/bigobj release ]
[ run test_lbfgs.cpp /boost/test//boost_unit_test_framework : : : <toolset>gcc-mingw:<cxxflags>-Wa,-mbig-obj <debug-symbols>off <toolset>msvc:<cxxflags>/bigobj release ]
;
# BEGIN AUTODIFF LONG RUNNING TESTS
test-suite autodiff-long-running-tests
:
@@ -2355,6 +2362,6 @@ explicit no_eh_tests ;
# Some aliases which group blocks of tests for CI testing:
alias github_ci_block_1 : special_fun float128_tests distribution_tests mp misc concepts ;
alias github_ci_block_2 : quadrature interpolators autodiff test_reverse_mode_autodiff ../example//examples ../tools ;
alias github_ci_block_2 : quadrature interpolators autodiff test_reverse_mode_autodiff gradient_based_optimizers ../example//examples ../tools ;
explicit github_ci_block_1 ;
explicit github_ci_block_2 ;

View File

@@ -264,6 +264,7 @@ void instantiate(RealType)
boost::math::gamma_p(v1, v2);
boost::math::gamma_q(v1, v2);
boost::math::lgamma_q(v1, v2);
boost::math::lgamma_p(v1, v2);
boost::math::gamma_p_inv(v1, v2);
boost::math::gamma_q_inv(v1, v2);
boost::math::gamma_p_inva(v1, v2);
@@ -544,6 +545,7 @@ void instantiate(RealType)
boost::math::gamma_p(v1 * 1, v2 + 0);
boost::math::gamma_q(v1 * 1, v2 + 0);
boost::math::lgamma_q(v1 * 1, v2 + 0);
boost::math::lgamma_p(v1 * 1, v2 + 0);
boost::math::gamma_p_inv(v1 * 1, v2 + 0);
boost::math::gamma_q_inv(v1 * 1, v2 + 0);
boost::math::gamma_p_inva(v1 * 1, v2 + 0);
@@ -796,6 +798,7 @@ void instantiate(RealType)
boost::math::gamma_p(v1, v2, pol);
boost::math::gamma_q(v1, v2, pol);
boost::math::lgamma_q(v1, v2, pol);
boost::math::lgamma_p(v1, v2, pol);
boost::math::gamma_p_inv(v1, v2, pol);
boost::math::gamma_q_inv(v1, v2, pol);
boost::math::gamma_p_inva(v1, v2, pol);
@@ -1074,6 +1077,7 @@ void instantiate(RealType)
test::gamma_p(v1, v2);
test::gamma_q(v1, v2);
test::lgamma_q(v1, v2);
test::lgamma_p(v1, v2);
test::gamma_p_inv(v1, v2);
test::gamma_q_inv(v1, v2);
test::gamma_p_inva(v1, v2);
@@ -1356,6 +1360,7 @@ void instantiate_mixed(RealType)
boost::math::gamma_p(fr, lr);
boost::math::gamma_q(i, s);
boost::math::lgamma_q(i, s);
boost::math::lgamma_p(i, s);
boost::math::gamma_q(fr, lr);
boost::math::gamma_p_inv(i, fr);
boost::math::gamma_q_inv(s, fr);
@@ -1572,6 +1577,7 @@ void instantiate_mixed(RealType)
boost::math::gamma_p(fr, lr, pol);
boost::math::gamma_q(i, s, pol);
boost::math::lgamma_q(i, s, pol);
boost::math::lgamma_p(i, s, pol);
boost::math::gamma_q(fr, lr, pol);
boost::math::gamma_p_inv(i, fr, pol);
boost::math::gamma_q_inv(s, fr, pol);
@@ -1784,8 +1790,10 @@ void instantiate_mixed(RealType)
test::gamma_p(fr, lr);
test::gamma_q(i, s);
test::lgamma_q(i, s);
test::lgamma_p(i, s);
test::gamma_q(fr, lr);
test::lgamma_q(fr, lr);
test::lgamma_p(fr, lr);
test::gamma_p_inv(i, fr);
test::gamma_q_inv(s, fr);
test::gamma_p_inva(i, lr);

View File

@@ -45,6 +45,12 @@ void compile_and_link_test()
check_result<long double>(boost::math::lgamma_q<long double>(l, l));
#endif
check_result<float>(boost::math::lgamma_p<float>(f, f));
check_result<double>(boost::math::lgamma_p<double>(d, d));
#ifndef BOOST_MATH_NO_LONG_DOUBLE_MATH_FUNCTIONS
check_result<long double>(boost::math::lgamma_p<long double>(l, l));
#endif
check_result<float>(boost::math::gamma_p_inv<float>(f, f));
check_result<double>(boost::math::gamma_p_inv<double>(d, d));
#ifndef BOOST_MATH_NO_LONG_DOUBLE_MATH_FUNCTIONS

View File

@@ -373,6 +373,8 @@ run test_gamma_p_inv_double.cu ;
run test_gamma_p_inv_float.cu ;
run test_lgamma_q_double.cu ;
run test_lgamma_q_float.cu ;
run test_lgamma_p_double.cu ;
run test_lgamma_p_float.cu ;
run test_log1p_double.cu ;
run test_log1p_float.cu ;

View File

@@ -12,13 +12,35 @@
#include <array>
#include <vector>
/* simple n-d quadratic function */
template<typename RealType>
RealType
quadratic(std::vector<RealType>& x)
{
RealType res{ 0.0 };
for (auto& item : x) {
res += item * item;
}
return res;
}
template<typename RealType>
RealType
quadratic_high_cond_2D(std::vector<RealType>& x)
{
return 1000 * x[0] * x[0] + x[1] * x[1];
}
// Taken from: https://en.wikipedia.org/wiki/Test_functions_for_optimization
template <typename Real> Real ackley(std::array<Real, 2> const &v) {
using std::sqrt;
template<typename Real>
Real
ackley(std::array<Real, 2> const& v)
{
using boost::math::constants::e;
using boost::math::constants::two_pi;
using std::cos;
using std::exp;
using boost::math::constants::two_pi;
using boost::math::constants::e;
using std::sqrt;
Real x = v[0];
Real y = v[1];
Real arg1 = -sqrt((x * x + y * y) / 2) / 5;
@@ -26,16 +48,21 @@ template <typename Real> Real ackley(std::array<Real, 2> const &v) {
return -20 * exp(arg1) - exp(arg2 / 2) + 20 + e<Real>();
}
template <typename Real> auto rosenbrock_saddle(std::array<Real, 2> const &v) -> Real {
Real x { v[0] };
Real y { v[1] };
template<typename Real>
auto
rosenbrock_saddle(std::array<Real, 2> const& v) -> Real
{
Real x{ v[0] };
Real y{ v[1] };
return static_cast<Real>(100 * (x * x - y) * (x * x - y) + (1 - x) * (1 - x));
}
template <class Real> Real rastrigin(std::vector<Real> const &v) {
using std::cos;
template<class Real>
Real
rastrigin(std::vector<Real> const& v)
{
using boost::math::constants::two_pi;
using std::cos;
auto A = static_cast<Real>(10);
auto y = static_cast<Real>(10 * v.size());
for (auto x : v) {
@@ -46,7 +73,9 @@ template <class Real> Real rastrigin(std::vector<Real> const &v) {
// Useful for testing return-type != scalar argument type,
// and robustness to NaNs:
double sphere(std::vector<float> const &v) {
double
sphere(std::vector<float> const& v)
{
double r = 0.0;
for (auto x : v) {
double x_ = static_cast<double>(x);
@@ -59,23 +88,27 @@ double sphere(std::vector<float> const &v) {
}
template<typename Real>
Real three_hump_camel(std::array<Real, 2> const & v) {
Real
three_hump_camel(std::array<Real, 2> const& v)
{
Real x = v[0];
Real y = v[1];
auto xsq = x*x;
return 2*xsq - (1 + Real(1)/Real(20))*xsq*xsq + xsq*xsq*xsq/6 + x*y + y*y;
auto xsq = x * x;
return 2 * xsq - (1 + Real(1) / Real(20)) * xsq * xsq + xsq * xsq * xsq / 6 +
x * y + y * y;
}
// Minima occurs at (3, 1/2) with value 0:
template<typename Real>
Real beale(std::array<Real, 2> const & v) {
Real
beale(std::array<Real, 2> const& v)
{
Real x = v[0];
Real y = v[1];
Real t1 = Real(3)/Real(2) -x + x*y;
Real t2 = Real(9)/Real(4) -x + x*y*y;
Real t3 = Real(21)/Real(8) -x + x*y*y*y;
return t1*t1 + t2*t2 + t3*t3;
Real t1 = Real(3) / Real(2) - x + x * y;
Real t2 = Real(9) / Real(4) - x + x * y * y;
Real t3 = Real(21) / Real(8) - x + x * y * y * y;
return t1 * t1 + t2 * t2 + t3 * t3;
}
#endif

View File

@@ -0,0 +1,339 @@
// Copyright Maksym Zhelyenzyakov 2025-2026.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// https://www.boost.org/LICENSE_1_0.txt)
#include "test_autodiff_reverse.hpp" // reuse for some basic options
#include "test_functions_for_optimization.hpp"
#include <boost/math/differentiation/autodiff_reverse.hpp>
#include <boost/math/optimization/gradient_descent.hpp>
#include <boost/math/optimization/minimizer.hpp>
namespace rdiff = boost::math::differentiation::reverse_mode;
namespace bopt = boost::math::optimization;
BOOST_AUTO_TEST_SUITE(basic_gradient_descent)
BOOST_AUTO_TEST_CASE_TEMPLATE(default_gd_test, T, all_float_types)
{
size_t NITER = 2000;
size_t N = 15;
T lr = T{ 1e-2 };
RandomSample<T> rng{ T(-100), (100) };
std::vector<rdiff::rvar<T, 1>> x_ad;
T eps = T{ 1e-3 };
for (size_t i = 0; i < N; ++i) {
x_ad.push_back(rng.next());
}
auto gdopt =
bopt::make_gradient_descent(&quadratic<rdiff::rvar<T, 1>>, x_ad, lr);
for (size_t i = 0; i < NITER; ++i) {
gdopt.step();
}
for (auto& x : x_ad) {
BOOST_REQUIRE_SMALL(x.item(), eps);
}
}
BOOST_AUTO_TEST_CASE_TEMPLATE(test_minimize, T, all_float_types)
{
size_t NITER = 2000;
size_t N = 15;
T lr = T{ 1e-2 };
RandomSample<T> rng{ T(-100), (100) };
std::vector<rdiff::rvar<T, 1>> x_ad;
T eps = T{ 1e-3 };
for (size_t i = 0; i < N; ++i) {
x_ad.push_back(rng.next());
}
auto gdopt =
bopt::make_gradient_descent(&quadratic<rdiff::rvar<T, 1>>, x_ad, lr);
auto z = minimize(gdopt);
for (auto& x : x_ad) {
BOOST_REQUIRE_SMALL(x.item(), eps);
}
}
BOOST_AUTO_TEST_CASE_TEMPLATE(random_initializer_test, T, all_float_types)
{
size_t N = 10;
T lr = T{ 1e-2 };
std::vector<rdiff::rvar<T, 1>> x(N);
auto gdopt =
bopt::make_gradient_descent(&quadratic<rdiff::rvar<T, 1>>,
x,
lr,
bopt::random_uniform_initializer_rvar<T>(
T(-2.0), T(2.0), 1234));
for (auto& xi : x) {
T v = xi.item();
BOOST_TEST(v >= -2);
BOOST_TEST(v <= 2);
}
gdopt.step();
}
BOOST_AUTO_TEST_CASE_TEMPLATE(const_initializer_test, T, all_float_types)
{
size_t N = 10;
T lr = T{ 1e-2 };
std::vector<rdiff::rvar<T, 1>> x(N);
auto gdopt = bopt::make_gradient_descent(
&quadratic<rdiff::rvar<T, 1>>,
x,
lr,
bopt::costant_initializer_rvar<T>(T{ 5.0 })); // all initialized to 5
for (auto& xi : x) {
T v = xi.item();
BOOST_REQUIRE_CLOSE(v, T{ 5.0 }, T{ 1e-3 });
}
gdopt.step();
}
BOOST_AUTO_TEST_CASE_TEMPLATE(box_constraint_test, T, all_float_types)
{
size_t N = 5;
T lr = T{ 1e-2 };
std::vector<rdiff::rvar<T, 1>> x(N, T{ 10 });
auto gdopt =
bopt::make_gradient_descent(&quadratic<rdiff::rvar<T, 1>>, x, lr);
auto res = bopt::minimize(
gdopt, bopt::box_constraints<std::vector<rdiff::rvar<T, 1>>, T>(-1.0, 1.0));
for (auto& xi : x) {
BOOST_TEST(xi.item() >= -1.0);
BOOST_TEST(xi.item() <= 1.0);
}
}
BOOST_AUTO_TEST_CASE_TEMPLATE(max_iter_test, T, all_float_types)
{
size_t N = 2;
T lr = T{ 1e-6 }; // very slow learning
std::vector<rdiff::rvar<T, 1>> x = { T{ 5 }, T{ 5 } };
auto gdopt =
bopt::make_gradient_descent(&quadratic<rdiff::rvar<T, 1>>, x, lr);
size_t max_iter = 50;
auto res =
bopt::minimize(gdopt,
bopt::unconstrained_policy<std::vector<rdiff::rvar<T, 1>>>{},
bopt::gradient_norm_convergence_policy<T>(T{ 1e-20 }),
bopt::max_iter_termination_policy(max_iter));
BOOST_TEST(!res.converged); // should not converge with tiny lr
BOOST_REQUIRE_EQUAL(res.num_iter, max_iter);
}
BOOST_AUTO_TEST_CASE_TEMPLATE(history_tracking_test, T, all_float_types)
{
size_t N = 3;
T lr = T{ 1e-2 };
std::vector<rdiff::rvar<T, 1>> x = { T{ 3 }, T{ -4 }, T{ 5 } };
auto gdopt =
bopt::make_gradient_descent(&quadratic<rdiff::rvar<T, 1>>, x, lr);
auto res =
bopt::minimize(gdopt,
bopt::unconstrained_policy<std::vector<rdiff::rvar<T, 1>>>{},
bopt::gradient_norm_convergence_policy<T>(T{ 1e-6 }),
bopt::max_iter_termination_policy(1000),
true); // enable history
BOOST_TEST(!res.objective_history.empty());
BOOST_TEST(res.objective_history.front() > res.objective_history.back());
}
BOOST_AUTO_TEST_CASE_TEMPLATE(rosenbrock_test, T, all_float_types)
{
std::array<rdiff::rvar<T, 1>, 2> x = { T{ -1.2 }, T{ 1.0 } }; // bad start
T lr = T{ 1e-3 };
auto gdopt =
bopt::make_gradient_descent(&rosenbrock_saddle<rdiff::rvar<T, 1>>, x, lr);
auto res = bopt::minimize(
gdopt,
bopt::unconstrained_policy<std::array<rdiff::rvar<T, 1>, 2>>{},
bopt::gradient_norm_convergence_policy<T>(T{ 1e-4 }),
bopt::max_iter_termination_policy(50000));
BOOST_TEST(res.converged);
BOOST_REQUIRE_CLOSE(x[0].item(), T{ 1.0 }, T{ 1e-1 });
BOOST_REQUIRE_CLOSE(x[1].item(), T{ 1.0 }, T{ 1e-1 });
}
BOOST_AUTO_TEST_CASE_TEMPLATE(objective_tol_convergence_test,
T,
all_float_types)
{
using policy_t = bopt::objective_tol_convergence_policy<T>;
policy_t pol(1e-3);
std::vector<T> dummy_grad;
BOOST_TEST(!pol(dummy_grad, T{100.0}));
BOOST_TEST(!pol(dummy_grad, T{99.0}));
BOOST_TEST(pol(dummy_grad, T{99.0005}));
}
BOOST_AUTO_TEST_CASE_TEMPLATE(relative_objective_tol_test, T, all_float_types)
{
using policy_t = bopt::relative_objective_tol_policy<T>;
policy_t pol(1e-3);
std::vector<T> dummy_grad;
BOOST_TEST(!pol(dummy_grad, T{1000.0}));
BOOST_TEST(!pol(dummy_grad, T{1010.0}));
BOOST_TEST(pol(dummy_grad, T{1010.5}));
}
BOOST_AUTO_TEST_CASE_TEMPLATE(combined_policy_test, T, all_float_types)
{
using pol_abs = bopt::objective_tol_convergence_policy<T>;
using pol_rel = bopt::relative_objective_tol_policy<T>;
using pol_comb = bopt::combined_convergence_policy<pol_abs, pol_rel>;
pol_abs abs_pol(1e-6);
pol_rel rel_pol(1e-3);
pol_comb comb(abs_pol, rel_pol);
std::vector<T> dummy_grad;
BOOST_TEST(!comb(dummy_grad, T{100.0}));
BOOST_TEST(!comb(dummy_grad, T{110.0}));
BOOST_TEST(comb(dummy_grad, T{110.1}));
BOOST_TEST(comb(dummy_grad, T{110.1000001}));
}
BOOST_AUTO_TEST_CASE_TEMPLATE(nonnegativity_constraint_test, T, all_float_types)
{
std::vector<T> x = {T{1.0}, T{-2.0}, T{3.0}, T{-4.0}};
bopt::nonnegativity_constraint<std::vector<T>, T> proj;
proj(x);
for (auto& xi : x)
BOOST_TEST(xi >= 0.0);
BOOST_TEST(x == std::vector<T>({T{1.0}, T{0.0}, T{3.0}, T{0.0}}));
}
BOOST_AUTO_TEST_CASE_TEMPLATE(l2_ball_constraint_test, T, all_float_types)
{
std::vector<T> x = {T{3.0}, T{4.0}}; // norm = 5
bopt::l2_ball_constraint<std::vector<T>, T> proj(1.0);
proj(x);
T norm = sqrt(x[0] * x[0] + x[1] * x[1]);
BOOST_TEST(abs(norm - T{1.0}) < T{1e-12}); // projected to unit circle
}
BOOST_AUTO_TEST_CASE_TEMPLATE(l1_ball_constraint_test, T, all_float_types)
{
std::vector<T> x = {T{3.0}, T{4.0}}; // L1 norm = 7
bopt::l1_ball_constraint<std::vector<T>, T> proj(2.0);
proj(x);
T norm1 = abs(x[0]) + abs(x[1]);
BOOST_TEST(abs(norm1 - T{2.0}) < T{1e-12});
}
BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_constraint_test, T, all_float_types)
{
std::vector<T> x = {T{-1.0}, T{2.0}, T{3.0}}; // has negative and sum != 1
bopt::simplex_constraint<std::vector<T>, T> proj;
proj(x);
T sum = 0.0;
for (auto& xi : x) {
BOOST_TEST(xi >= 0.0); // all nonnegative
sum += xi;
}
BOOST_TEST(abs(sum - T{1.0}) < T{1e-12}); // normalized to sum=1
}
BOOST_AUTO_TEST_CASE_TEMPLATE(unit_sphere_constraint_test, T, all_float_types)
{
std::vector<T> x = {T{0.3}, T{0.4}}; // norm = 0.5
bopt::unit_sphere_constraint<std::vector<T>, T> proj;
proj(x);
T norm = sqrt(x[0] * x[0] + x[1] * x[1]);
BOOST_TEST(abs(norm - T{1.0}) < T{1e-12}); // always projected to sphere
}
BOOST_AUTO_TEST_CASE_TEMPLATE(function_constraint_test, T, all_float_types)
{
auto clip_to_half = [](std::vector<T>& v) {
for (auto& xi : v)
if (xi > 0.5)
xi = 0.5;
};
bopt::function_constraint<std::vector<T>> proj(clip_to_half);
std::vector<T> x = {T{0.2}, T{0.7}, T{1.5}};
proj(x);
BOOST_TEST(x == std::vector<T>({T{0.2}, T{0.5}, T{0.5}}));
}
template<typename RealType>
struct no_init_policy
{
void operator()(std::vector<RealType>& x) const noexcept {}
};
template<typename RealType>
struct analytic_objective_eval_pol
{
template<typename Objective, typename ArgumentContainer>
RealType operator()(Objective&& objective, ArgumentContainer& x)
{
return objective(x);
}
};
template<typename RealType>
struct analytic_gradient_eval_pol
{
template<class Objective,
class ArgumentContainer,
class FunctionEvaluationPolicy>
void operator()(Objective&& obj_f,
ArgumentContainer& x,
FunctionEvaluationPolicy&& f_eval_pol,
RealType& obj_v,
std::vector<RealType>& grad_container)
{
RealType v = f_eval_pol(obj_f, x);
obj_v = v;
grad_container.resize(x.size());
for (size_t i = 0; i < x.size(); ++i) {
grad_container[i] = 2 * x[i];
}
}
};
BOOST_AUTO_TEST_CASE_TEMPLATE(analytic_derivative_policies, T, all_float_types)
{
std::vector<T> x;
size_t NITER = 2000;
size_t N = 15;
T lr = T{ 1e-2 };
RandomSample<T> rng{ T(-100), (100) };
T eps = T{ 1e-3 };
for (size_t i = 0; i < N; ++i) {
x.push_back(rng.next());
}
auto gdopt = bopt::make_gradient_descent(&quadratic<T>,
x,
lr,
no_init_policy<T>{},
analytic_objective_eval_pol<T>{},
analytic_gradient_eval_pol<T>{});
for (size_t i = 0; i < NITER; ++i) {
gdopt.step();
}
for (auto& xi : x) {
BOOST_REQUIRE_SMALL(xi, eps);
}
}
BOOST_AUTO_TEST_SUITE_END()

View File

@@ -263,7 +263,19 @@ void test_spots(T, const char* name = nullptr)
BOOST_CHECK_CLOSE(::boost::math::lgamma_q(static_cast<T>(501.25), static_cast<T>(2000)), static_cast<T>(-810.2453406781655559126505101822969531699112391075198076300675402L), tolerance);
BOOST_CHECK_CLOSE(::boost::math::lgamma_q(static_cast<T>(20), static_cast<T>(0.25)), static_cast<T>(-2.946458104491857816330873290969917497748067639461638294404e-31L), tolerance);
BOOST_CHECK_CLOSE(::boost::math::lgamma_q(static_cast<T>(40), static_cast<T>(0.75)), static_cast<T>(-5.930604927955460343652485525435087275997461623988991819824e-54L), tolerance);
#if defined(__CYGWIN__) || defined(__MINGW32__)
//
// Check that lgamma_q returns correct values with spot values calculated via wolframalpha log(P[a, x])
// This is calculated using: N[Log[GammaRegularized[a,0, z]],64]
//
BOOST_CHECK_CLOSE(::boost::math::lgamma_p(static_cast<T>(500), static_cast<T>(10)), static_cast<T>(-1470.017750815998931281954666549641187420649099004671023115157832L), tolerance);
BOOST_CHECK_CLOSE(::boost::math::lgamma_p(static_cast<T>(100), static_cast<T>(0.25)), static_cast<T>(-502.6163334118978895536207514636026023439623265152862757105793000L), tolerance);
BOOST_CHECK_CLOSE(::boost::math::lgamma_p(static_cast<T>(20), static_cast<T>(10.25)), static_cast<T>(-5.404004887981642339930593767572610169901594898478031307722239712L), tolerance);
// Small "a" produce larger errors
BOOST_CHECK_CLOSE(::boost::math::lgamma_p(static_cast<T>(0.25), static_cast<T>(100)), static_cast<T>(-3.220751038854414755009496530271388459559061551701603447517040280e-46L), tolerance);
BOOST_CHECK_CLOSE(::boost::math::lgamma_p(static_cast<T>(0.25), static_cast<T>(10)), static_cast<T>(-2.083032578160285760530530498275075010777428544413918699832758176e-6L), tolerance);
#if defined(__CYGWIN__) || defined(__MINGW32__)
T gcc_win_mul = 2;
#else
T gcc_win_mul = 1;
@@ -287,6 +299,24 @@ void test_spots(T, const char* name = nullptr)
BOOST_CHECK_CLOSE(::boost::math::lgamma_q(static_cast<T>(1200), static_cast<T>(1250.25)), static_cast<T>(-2.591934862117586205519309712218581885256650074210410262843591453L), tolerance * ((std::numeric_limits<T>::max_digits10 >= 36 || std::is_same<T, boost::math::concepts::real_concept>::value) ? 750 : (std::is_same<T, float>::value ? 1 : 50))); // Test fails on ARM64 and s390x long doubles and real_concept types unless tolerance is adjusted
BOOST_CHECK_CLOSE(::boost::math::lgamma_q(static_cast<T>(2200), static_cast<T>(2249.75)), static_cast<T>(-1.933779894897391651410597618307863427927461116308937004149240320L), tolerance * (std::is_floating_point<T>::value ? 1 : 10));
BOOST_CHECK_CLOSE(::boost::math::lgamma_q(static_cast<T>(2200), static_cast<T>(2250.25)), static_cast<T>(-1.950346484067948344620463026377077515919992808640737320057812268L), tolerance * (std::is_same<T, float>::value ? 1 : (std::is_floating_point<T>::value ? 100 : 200)));
// Long double and real_concept types need increased precision
T real_concept_tol = 1;
if (std::is_same<T, boost::math::concepts::real_concept>::value || std::is_same<T, long double>::value){
real_concept_tol = 3;
}
// Pair of tests that bisect the crossover condition in our code at double and then quad precision
// Oddly, the crossover condition is smaller for quad precision. This is because max_factorial is 100
// for boost::multiprecision::cpp_bin_float_quad and 170 for doubles.
BOOST_CHECK_CLOSE(::boost::math::lgamma_p(static_cast<T>(169.75), static_cast<T>(0.75)), static_cast<T>(-754.8681912874632573100058312311927462406154378562940316233389406L), tolerance * real_concept_tol);
BOOST_CHECK_CLOSE(::boost::math::lgamma_p(static_cast<T>(170.25), static_cast<T>(0.75)), static_cast<T>(-757.5814133895304434271729579978676692688834086380018151200693572L), tolerance * real_concept_tol);
BOOST_CHECK_CLOSE(::boost::math::lgamma_p(static_cast<T>(99.75), static_cast<T>(0.75)), static_cast<T>(-392.0259615581237826290999388631292473247947826682978959914359465L), tolerance * real_concept_tol);
BOOST_CHECK_CLOSE(::boost::math::lgamma_p(static_cast<T>(100.25), static_cast<T>(0.75)), static_cast<T>(-394.4749200332583219473980963811639065003421270272773619742710832L), tolerance * real_concept_tol);
// Check large a, x values. Precision just isn't great here.
BOOST_CHECK_CLOSE(::boost::math::lgamma_p(static_cast<T>(1450.25), static_cast<T>(1500.75)), static_cast<T>(-0.09812447528127799786140178403478691390753413399549580160096975713L), tolerance * (std::is_same<T, boost::math::concepts::real_concept>::value ? 16 : 1));
BOOST_CHECK_CLOSE(::boost::math::lgamma_p(static_cast<T>(2000), static_cast<T>(1900)), static_cast<T>(-4.448523733381445722945397105917814000790587922314824687065050805L), tolerance * gcc_win_mul * (std::is_same<T, boost::math::concepts::real_concept>::value ? 8 : 1));
//
// Coverage:
//
@@ -302,6 +332,10 @@ void test_spots(T, const char* name = nullptr)
BOOST_CHECK_THROW(boost::math::lgamma_q(static_cast<T>(1), static_cast<T>(-2)), std::domain_error);
BOOST_CHECK_THROW(boost::math::lgamma_q(static_cast<T>(0), static_cast<T>(2)), std::domain_error);
BOOST_CHECK_THROW(boost::math::lgamma_p(static_cast<T>(-1), static_cast<T>(2)), std::domain_error);
BOOST_CHECK_THROW(boost::math::lgamma_p(static_cast<T>(1), static_cast<T>(-2)), std::domain_error);
BOOST_CHECK_THROW(boost::math::lgamma_p(static_cast<T>(0), static_cast<T>(2)), std::domain_error);
BOOST_CHECK_THROW(boost::math::gamma_p_derivative(static_cast<T>(-1), static_cast<T>(2)), std::domain_error);
BOOST_CHECK_THROW(boost::math::gamma_p_derivative(static_cast<T>(1), static_cast<T>(-2)), std::domain_error);
BOOST_CHECK_THROW(boost::math::gamma_p_derivative(static_cast<T>(0), static_cast<T>(2)), std::domain_error);
@@ -317,6 +351,10 @@ void test_spots(T, const char* name = nullptr)
BOOST_CHECK((boost::math::isnan)(boost::math::lgamma_q(static_cast<T>(1), static_cast<T>(-2))));
BOOST_CHECK((boost::math::isnan)(boost::math::lgamma_q(static_cast<T>(0), static_cast<T>(2))));
BOOST_CHECK((boost::math::isnan)(boost::math::lgamma_p(static_cast<T>(-1), static_cast<T>(2))));
BOOST_CHECK((boost::math::isnan)(boost::math::lgamma_p(static_cast<T>(1), static_cast<T>(-2))));
BOOST_CHECK((boost::math::isnan)(boost::math::lgamma_p(static_cast<T>(0), static_cast<T>(2))));
BOOST_CHECK((boost::math::isnan)(boost::math::gamma_p_derivative(static_cast<T>(-1), static_cast<T>(2))));
BOOST_CHECK((boost::math::isnan)(boost::math::gamma_p_derivative(static_cast<T>(1), static_cast<T>(-2))));
BOOST_CHECK((boost::math::isnan)(boost::math::gamma_p_derivative(static_cast<T>(0), static_cast<T>(2))));

124
test/test_lbfgs.cpp Normal file
View File

@@ -0,0 +1,124 @@
// Copyright Maksym Zhelyenzyakov 2025-2026.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// https://www.boost.org/LICENSE_1_0.txt)
#include "test_autodiff_reverse.hpp"
#include "test_functions_for_optimization.hpp"
#include <boost/math/differentiation/autodiff_reverse.hpp>
#include <boost/math/optimization/lbfgs.hpp>
#include <boost/math/optimization/minimizer.hpp>
#include <boost/math/optimization/gradient_descent.hpp>
namespace rdiff = boost::math::differentiation::reverse_mode;
namespace bopt = boost::math::optimization;
BOOST_AUTO_TEST_SUITE(basic_lbfgs)
BOOST_AUTO_TEST_CASE(default_lbfgs_test) //, T, all_float_types)
{
using T = double;
constexpr size_t NITER = 10;
constexpr size_t M = 10;
const T eps = T{1e-5};
RandomSample<T> rng{T(-10), T(10)};
std::array<rdiff::rvar<T, 1>, 2> x;
x[0] = rng.next();
x[1] = rng.next();
auto opt = bopt::make_lbfgs(&rosenbrock_saddle<rdiff::rvar<T, 1>>, x, M);
auto constraint = bopt::unconstrained_policy<std::array<rdiff::rvar<T, 1>,2>>{};
auto convergence_policy = bopt::gradient_norm_convergence_policy<T>(T{ 1e-20 });
auto result = bopt::minimize(opt, constraint, convergence_policy);
for (auto& xi : x) {
BOOST_REQUIRE_CLOSE(xi, T{1.0}, eps);
}
}
// Custom initialization policy that zeros out the parameters
template<typename RealType>
struct zero_init_policy
{
void operator()(std::vector<RealType>& x) const noexcept
{
std::fill(x.begin(), x.end(), RealType{0});
}
};
template<typename RealType>
struct analytic_objective_eval_pol
{
template<typename Objective, typename ArgumentContainer>
RealType operator()(Objective&& objective, ArgumentContainer& x)
{
return objective(x);
}
};
template<typename RealType>
struct analytic_gradient_eval_pol
{
template<class Objective, class ArgumentContainer, class FunctionEvaluationPolicy>
void operator()(Objective&& obj_f,
ArgumentContainer& x,
FunctionEvaluationPolicy&& f_eval_pol,
RealType& obj_v,
std::vector<RealType>& grad_container)
{
RealType v = f_eval_pol(obj_f, x);
obj_v = v;
grad_container.resize(x.size());
for (size_t i = 0; i < x.size(); ++i) {
grad_container[i] = 2 * x[i];
}
}
};
BOOST_AUTO_TEST_CASE_TEMPLATE(custom_init_lbfgs_test, T, all_float_types)
{
constexpr size_t M = 8;
const T eps = T{1e-6};
RandomSample<T> rng{T(-5), T(5)};
std::array<rdiff::rvar<T, 1>, 2> x;
x[0] = rng.next();
x[1] = rng.next();
auto opt = bopt::make_lbfgs(&rosenbrock_saddle<rdiff::rvar<T, 1>>,
x,
M,
bopt::costant_initializer_rvar<T>(0.0));
auto constraint = bopt::unconstrained_policy<std::array<rdiff::rvar<T, 1>,2>>{};
auto convergence_policy = bopt::gradient_norm_convergence_policy<T>(T{ 1e-8 });
auto result = minimize(opt, constraint, convergence_policy);
for (auto& xi : x) {
BOOST_REQUIRE_CLOSE(xi, T{1.0}, eps);
}
}
BOOST_AUTO_TEST_CASE_TEMPLATE(analytic_lbfgs_test, T, all_float_types)
{
constexpr size_t M = 10;
const T eps = T{1e-3};
RandomSample<T> rng{T(-5), T(5)};
std::vector<T> x(3);
for (auto& xi : x)
xi = rng.next();
auto opt = bopt::make_lbfgs(&quadratic<T>, // Objective
x, // Arguments
M, // History size
zero_init_policy<T>{}, // Initialization
analytic_objective_eval_pol<T>{}, // Function eval
analytic_gradient_eval_pol<T>{}, // Gradient eval
bopt::armijo_line_search_policy<T>{});
auto result = minimize(opt);
for (auto& xi : x) {
BOOST_REQUIRE_SMALL(xi, eps);
}
}
BOOST_AUTO_TEST_SUITE_END()

View File

@@ -0,0 +1,102 @@
// Copyright John Maddock 2016.
// Copyright Matt Borland 2024.
// Use, modification and distribution are subject to the
// Boost Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#define BOOST_MATH_PROMOTE_DOUBLE_POLICY false
#include <iostream>
#include <iomanip>
#include <vector>
#include <boost/math/special_functions.hpp>
#include "cuda_managed_ptr.hpp"
#include "stopwatch.hpp"
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
typedef double float_type;
/**
* CUDA Kernel Device code
*
*/
__global__ void cuda_test(const float_type *in, float_type *out, int numElements)
{
using std::cos;
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
out[i] = boost::math::lgamma_p(in[i], in[i]);
}
}
/**
* Host main routine
*/
int main(void)
{
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
// Print the vector length to be used, and compute its size
int numElements = 50000;
std::cout << "[Vector operation on " << numElements << " elements]" << std::endl;
// Allocate the managed input vector A
cuda_managed_ptr<float_type> input_vector(numElements);
// Allocate the managed output vector C
cuda_managed_ptr<float_type> output_vector(numElements);
// Initialize the input vectors
for (int i = 0; i < numElements; ++i)
{
input_vector[i] = rand()/(float_type)RAND_MAX;
}
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 1024;
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
std::cout << "CUDA kernel launch with " << blocksPerGrid << " blocks of " << threadsPerBlock << " threads" << std::endl;
watch w;
cuda_test<<<blocksPerGrid, threadsPerBlock>>>(input_vector.get(), output_vector.get(), numElements);
cudaDeviceSynchronize();
std::cout << "CUDA kernal done in: " << w.elapsed() << "s" << std::endl;
err = cudaGetLastError();
if (err != cudaSuccess)
{
std::cerr << "Failed to launch vectorAdd kernel (error code " << cudaGetErrorString(err) << ")!" << std::endl;
return EXIT_FAILURE;
}
// Verify that the result vector is correct
std::vector<float_type> results;
results.reserve(numElements);
w.reset();
for(int i = 0; i < numElements; ++i)
results.push_back(boost::math::lgamma_p(input_vector[i], input_vector[i]));
double t = w.elapsed();
// check the results
for(int i = 0; i < numElements; ++i)
{
if (boost::math::epsilon_difference(output_vector[i], results[i]) > 10)
{
std::cerr << "Result verification failed at element " << i << "!" << std::endl;
return EXIT_FAILURE;
}
}
std::cout << "Test PASSED, normal calculation time: " << t << "s" << std::endl;
std::cout << "Done\n";
return 0;
}

102
test/test_lgamma_p_float.cu Normal file
View File

@@ -0,0 +1,102 @@
// Copyright John Maddock 2016.
// Copyright Matt Borland 2024.
// Use, modification and distribution are subject to the
// Boost Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#define BOOST_MATH_PROMOTE_DOUBLE_POLICY false
#include <iostream>
#include <iomanip>
#include <vector>
#include <boost/math/special_functions.hpp>
#include "cuda_managed_ptr.hpp"
#include "stopwatch.hpp"
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
typedef float float_type;
/**
* CUDA Kernel Device code
*
*/
__global__ void cuda_test(const float_type *in, float_type *out, int numElements)
{
using std::cos;
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
out[i] = boost::math::lgamma_p(in[i], in[i]);
}
}
/**
* Host main routine
*/
int main(void)
{
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
// Print the vector length to be used, and compute its size
int numElements = 50000;
std::cout << "[Vector operation on " << numElements << " elements]" << std::endl;
// Allocate the managed input vector A
cuda_managed_ptr<float_type> input_vector(numElements);
// Allocate the managed output vector C
cuda_managed_ptr<float_type> output_vector(numElements);
// Initialize the input vectors
for (int i = 0; i < numElements; ++i)
{
input_vector[i] = rand()/(float_type)RAND_MAX;
}
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 1024;
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
std::cout << "CUDA kernel launch with " << blocksPerGrid << " blocks of " << threadsPerBlock << " threads" << std::endl;
watch w;
cuda_test<<<blocksPerGrid, threadsPerBlock>>>(input_vector.get(), output_vector.get(), numElements);
cudaDeviceSynchronize();
std::cout << "CUDA kernal done in: " << w.elapsed() << "s" << std::endl;
err = cudaGetLastError();
if (err != cudaSuccess)
{
std::cerr << "Failed to launch vectorAdd kernel (error code " << cudaGetErrorString(err) << ")!" << std::endl;
return EXIT_FAILURE;
}
// Verify that the result vector is correct
std::vector<float_type> results;
results.reserve(numElements);
w.reset();
for(int i = 0; i < numElements; ++i)
results.push_back(boost::math::lgamma_p(input_vector[i], input_vector[i]));
double t = w.elapsed();
// check the results
for(int i = 0; i < numElements; ++i)
{
if (boost::math::epsilon_difference(output_vector[i], results[i]) > 10)
{
std::cerr << "Result verification failed at element " << i << "!" << std::endl;
return EXIT_FAILURE;
}
}
std::cout << "Test PASSED, normal calculation time: " << t << "s" << std::endl;
std::cout << "Done\n";
return 0;
}

View File

@@ -0,0 +1,33 @@
// Copyright Maksym Zhelyenzyakov 2025-2026.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// https://www.boost.org/LICENSE_1_0.txt)
#include "test_autodiff_reverse.hpp" // reuse for some basic options
#include "test_functions_for_optimization.hpp"
#include <boost/math/differentiation/autodiff_reverse.hpp>
#include <boost/math/optimization/minimizer.hpp>
#include <boost/math/optimization/nesterov.hpp>
namespace rdiff = boost::math::differentiation::reverse_mode;
namespace bopt = boost::math::optimization;
BOOST_AUTO_TEST_SUITE(nesterov_descent)
BOOST_AUTO_TEST_CASE_TEMPLATE(default_nesterov_test, T, all_float_types)
{
T lr = T{ 1e-5 };
T mu = T{ 0.95 };
RandomSample<T> rng{ T(-10), (10) };
std::vector<rdiff::rvar<T, 1>> x;
x.push_back(rng.next());
x.push_back(rng.next());
T eps = T{ 1e-8 };
auto nag =
bopt::make_nag(&quadratic_high_cond_2D<rdiff::rvar<T, 1>>, x, lr, mu);
auto constraint = bopt::unconstrained_policy<std::vector<rdiff::rvar<T, 1>>>{};
auto convergence_policy = bopt::gradient_norm_convergence_policy<T>(T{ 1e-8 });
auto z = minimize(nag, constraint, convergence_policy);
for (auto& xi : x) {
BOOST_REQUIRE_SMALL(xi.item(), eps);
}
}
BOOST_AUTO_TEST_SUITE_END()

View File

@@ -6,113 +6,129 @@
#include <vector>
BOOST_AUTO_TEST_SUITE(test_flat_linear_allocator)
BOOST_AUTO_TEST_CASE_TEMPLATE(flat_linear_allocator_constructors, T, all_float_types)
BOOST_AUTO_TEST_CASE_TEMPLATE(flat_linear_allocator_constructors,
T,
all_float_types)
{
size_t buffer_size = 16;
RandomSample<T> rng{-1, 1};
rdiff_detail::flat_linear_allocator<T, 16> float_allocator{};
for (size_t i = 0; i < 2 * buffer_size - buffer_size / 2; i++) {
float_allocator.emplace_back(rng.next());
}
size_t buffer_size = 16;
RandomSample<T> rng{ -1, 1 };
rdiff_detail::flat_linear_allocator<T, 16> float_allocator{};
for (size_t i = 0; i < 2 * buffer_size - buffer_size / 2; i++) {
float_allocator.emplace_back(rng.next());
}
BOOST_CHECK_EQUAL(float_allocator.size(),
static_cast<size_t>(2 * buffer_size - buffer_size / 2));
BOOST_CHECK_EQUAL(float_allocator.capacity(), static_cast<size_t>(2 * buffer_size));
BOOST_CHECK_EQUAL(float_allocator.size(),
static_cast<size_t>(2 * buffer_size - buffer_size / 2));
BOOST_CHECK_EQUAL(float_allocator.capacity(),
static_cast<size_t>(2 * buffer_size));
float_allocator.clear();
BOOST_CHECK_EQUAL(float_allocator.size(), static_cast<size_t>(0));
BOOST_CHECK_EQUAL(float_allocator.capacity(), buffer_size);
float_allocator.clear();
BOOST_CHECK_EQUAL(float_allocator.size(), static_cast<size_t>(0));
BOOST_CHECK_EQUAL(float_allocator.capacity(), buffer_size);
for (size_t i = 0; i < 2 * buffer_size - buffer_size / 2; i++) {
float_allocator.emplace_back(rng.next());
}
float_allocator.reset();
BOOST_CHECK_EQUAL(float_allocator.size(), static_cast<size_t>(0));
BOOST_CHECK_EQUAL(float_allocator.capacity(), 2 * buffer_size);
for (size_t i = 0; i < 2 * buffer_size - buffer_size / 2; i++) {
float_allocator.emplace_back(rng.next());
}
float_allocator.reset();
BOOST_CHECK_EQUAL(float_allocator.size(), static_cast<size_t>(0));
BOOST_CHECK_EQUAL(float_allocator.capacity(), 2 * buffer_size);
for (size_t i = 0; i < 2 * buffer_size - buffer_size / 2; i++) {
float_allocator.emplace_back(rng.next());
}
T fill_value = T(0.25);
float_allocator.fill(fill_value);
for (size_t i = 0; i < float_allocator.size(); i++) {
BOOST_CHECK_EQUAL(float_allocator[i], fill_value);
}
for (size_t i = 0; i < 2 * buffer_size - buffer_size / 2; i++) {
float_allocator.emplace_back(rng.next());
}
T fill_value = T(0.25);
float_allocator.fill(fill_value);
for (size_t i = 0; i < float_allocator.size(); i++) {
BOOST_CHECK_EQUAL(float_allocator[i], fill_value);
}
}
BOOST_AUTO_TEST_CASE_TEMPLATE(flat_linear_allocator_test_emplace, T, all_float_types)
BOOST_AUTO_TEST_CASE_TEMPLATE(flat_linear_allocator_test_emplace,
T,
all_float_types)
{
size_t buffer_size = 16;
RandomSample<T> rng{-1, 1};
rdiff_detail::flat_linear_allocator<T, 16> float_allocator{};
std::vector<T> test_vector;
size_t buffer_size = 16;
RandomSample<T> rng{ -1, 1 };
rdiff_detail::flat_linear_allocator<T, 16> float_allocator{};
std::vector<T> test_vector;
for (size_t i = 0; i < 2 * buffer_size - 1; i++) {
test_vector.push_back(rng.next());
float_allocator.emplace_back(test_vector[i]);
}
for (size_t i = 0; i < 2 * buffer_size - 1; i++) {
test_vector.push_back(rng.next());
float_allocator.emplace_back(test_vector[i]);
}
auto it1 = float_allocator.template emplace_back_n<4>();
for (size_t i = 0; i < 4; i++) {
T literal = rng.next();
test_vector.push_back(literal);
*(it1 + i) = literal;
}
auto it1 = float_allocator.template emplace_back_n<4>();
for (size_t i = 0; i < 4; i++) {
T literal = rng.next();
test_vector.push_back(literal);
*(it1 + i) = literal;
}
auto it2 = float_allocator.emplace_back_n(buffer_size);
for (size_t i = 0; i < buffer_size; i++) {
T literal = rng.next();
test_vector.push_back(literal);
*(it2 + i) = literal;
}
auto it2 = float_allocator.emplace_back_n(buffer_size);
for (size_t i = 0; i < buffer_size; i++) {
T literal = rng.next();
test_vector.push_back(literal);
*(it2 + i) = literal;
}
auto vit = test_vector.begin();
auto alloc_it = float_allocator.begin();
for (; vit != test_vector.end(); vit++, alloc_it++) {
BOOST_CHECK_EQUAL(
*vit,
*alloc_it); // should be ok to check floats like this since they are expected to be the same.
}
auto vit = test_vector.begin();
auto alloc_it = float_allocator.begin();
for (; vit != test_vector.end(); vit++, alloc_it++) {
BOOST_CHECK_EQUAL(*vit,
*alloc_it); // should be ok to check floats like this
// since they are expected to be the same.
}
for (size_t i = 0; i < test_vector.size(); i++) {
BOOST_CHECK_EQUAL(test_vector[i], float_allocator[i]); // check random access aswell;
}
for (size_t i = 0; i < test_vector.size(); i++) {
BOOST_CHECK_EQUAL(test_vector[i],
float_allocator[i]); // check random access aswell;
}
BOOST_CHECK_EQUAL(test_vector.size(), float_allocator.size());
}
BOOST_AUTO_TEST_CASE_TEMPLATE(flat_linear_allocator_test_checkpointing, T, all_float_types)
{
size_t buffer_size = 16;
RandomSample<T> rng{-1, 1};
rdiff_detail::flat_linear_allocator<T, 16> float_allocator{};
std::vector<T> test_vector;
std::vector<size_t> checkpoint_indices{2, 11, 15, 16, 17, 28};
std::vector<T> expected_value_at_checkpoint;
size_t ckp_id = 0;
for (size_t i = 0; i < 2 * buffer_size; i++) {
T literal = rng.next();
float_allocator.emplace_back(literal);
if (ckp_id < checkpoint_indices.size() && i == checkpoint_indices[ckp_id]) {
float_allocator.add_checkpoint();
expected_value_at_checkpoint.push_back(literal);
++ckp_id;
}
}
for (size_t i = 0; i < checkpoint_indices.size(); i++) {
auto it = float_allocator.checkpoint_at(i);
BOOST_CHECK_EQUAL(*it, expected_value_at_checkpoint[i]);
}
auto first_ckp = float_allocator.first_checkpoint();
auto last_ckp = float_allocator.last_checkpoint();
BOOST_CHECK_EQUAL(*first_ckp, expected_value_at_checkpoint[0]);
BOOST_CHECK_EQUAL(*last_ckp, expected_value_at_checkpoint.back());
float_allocator.rewind_to_last_checkpoint();
BOOST_CHECK_EQUAL(float_allocator.size(), checkpoint_indices.back());
BOOST_CHECK_EQUAL(float_allocator.capacity(), 2 * buffer_size);
BOOST_CHECK_EQUAL(test_vector.size(), float_allocator.size());
}
BOOST_AUTO_TEST_CASE_TEMPLATE(flat_linear_allocator_test_checkpointing,
T,
all_float_types)
{
constexpr size_t buffer_size = 16;
RandomSample<T> rng{ -1, 1 };
rdiff_detail::flat_linear_allocator<T, buffer_size> float_allocator{};
std::vector<size_t> checkpoint_indices{ 2, 11, 15, 16, 17, 28 };
std::vector<T> expected_value_at_checkpoint;
size_t ckp_id = 0;
for (size_t i = 0; i < 2 * buffer_size; ++i) {
T literal = rng.next();
float_allocator.emplace_back(literal);
if (ckp_id < checkpoint_indices.size() &&
(i + 1) == checkpoint_indices[ckp_id]) {
float_allocator.add_checkpoint();
expected_value_at_checkpoint.push_back(literal);
++ckp_id;
}
}
for (size_t i = 0; i < checkpoint_indices.size(); ++i) {
auto it = float_allocator.checkpoint_at(i);
BOOST_REQUIRE(it != float_allocator.begin());
--it;
BOOST_CHECK_EQUAL(*it, expected_value_at_checkpoint[i]);
}
auto first_ckp = float_allocator.first_checkpoint();
BOOST_REQUIRE(first_ckp != float_allocator.begin());
--first_ckp;
BOOST_CHECK_EQUAL(*first_ckp, expected_value_at_checkpoint.front());
auto last_ckp = float_allocator.last_checkpoint();
BOOST_REQUIRE(last_ckp != float_allocator.begin());
--last_ckp;
BOOST_CHECK_EQUAL(*last_ckp, expected_value_at_checkpoint.back());
float_allocator.rewind_to_last_checkpoint();
BOOST_CHECK_EQUAL(float_allocator.size(), checkpoint_indices.back());
BOOST_CHECK_EQUAL(float_allocator.capacity(), 2 * buffer_size);
}
BOOST_AUTO_TEST_SUITE_END()