diff --git a/doc/equations/alternating_series.svg b/doc/equations/alternating_series.svg
new file mode 100644
index 000000000..b7f1e14e5
--- /dev/null
+++ b/doc/equations/alternating_series.svg
@@ -0,0 +1,2 @@
+
+
\ No newline at end of file
diff --git a/doc/equations/cohen_acceleration_error_model.svg b/doc/equations/cohen_acceleration_error_model.svg
new file mode 100644
index 000000000..bcdc09ef6
--- /dev/null
+++ b/doc/equations/cohen_acceleration_error_model.svg
@@ -0,0 +1,2 @@
+
+
\ No newline at end of file
diff --git a/doc/equations/zeta_related_alternating.svg b/doc/equations/zeta_related_alternating.svg
new file mode 100644
index 000000000..74eb55a34
--- /dev/null
+++ b/doc/equations/zeta_related_alternating.svg
@@ -0,0 +1,2 @@
+
+
\ No newline at end of file
diff --git a/doc/internals/cohen_acceleration.qbk b/doc/internals/cohen_acceleration.qbk
new file mode 100644
index 000000000..2b3a7e7ca
--- /dev/null
+++ b/doc/internals/cohen_acceleration.qbk
@@ -0,0 +1,106 @@
+[/
+ Copyright Nick Thompson 2020
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
+
+[section:cohen_acceleration Cohen Acceleration]
+
+[h4 Synopsis]
+
+ #include
+
+ namespace boost::math::tools {
+
+ template
+ auto cohen_acceleration(G& generator, int64_t n = -1);
+
+ } // namespaces
+
+
+The function `cohen_acceleration` rapidly computes the limiting value of an alternating series via a technique developed by [@https://www.johndcook.com/blog/2020/08/06/cohen-acceleration/ Henri Cohen et al].
+To compute
+
+[$../equations/alternating_series.svg]
+
+
+we first define a callable that produces /a/[sub /k/] on the kth call.
+For example, suppose we wish to compute
+
+[$../equations/zeta_related_alternating.svg]
+
+First, we need to define a callable which returns the requisite terms:
+
+ template
+ class G {
+ public:
+ G(){
+ k_ = 0;
+ }
+
+ Real operator()() {
+ k_ += 1;
+ return 1/(k_*k_);
+ }
+
+ private:
+ Real k_;
+ };
+
+Then we pass this into the `cohen_acceleration` function:
+
+ auto gen = G();
+ double computed = cohen_acceleration(gen);
+
+See `cohen_acceleration.cpp` in the `examples` directory for more.
+
+The number of terms consumed is computed from the error model
+
+[$../equations/cohen_acceleration_error_model.svg]
+
+and must be computed /a priori/.
+If we read the reference carefully, we notice that this error model is derived under the assumption that the terms /a/[sub /k/] are given as the moments of a positive measure on [0,1].
+If this assumption does not hold, then the number of terms chosen by the method is incorrect.
+Hence we permit the user to provide a second argument to specify the number of terms:
+
+ double computed = cohen_acceleration(gen, 5);
+
+/Nota bene/: When experimenting with this option, we found that adding more terms was no guarantee of additional accuracy, and could not find an example where a user-provided number of terms outperformed the default.
+In addition, it is easy to generate intermediates which overflow if we let /n/ grow too large.
+Hence we recommend only playing with this parameter to /decrease/ the default number of terms to increase speed.
+
+[heading Performance]
+
+To see that Cohen acceleration is in fact faster than naive summation for the same level of relative accuracy, we can run the `reporting/performance/cohen_acceleration_performance.cpp` file.
+This benchmark computes the alternating Basel series discussed above:
+
+```
+Running ./reporting/performance/cohen_acceleration_performance.x
+Run on (16 X 2300 MHz CPU s)
+CPU Caches:
+ L1 Data 32 KiB (x8)
+ L1 Instruction 32 KiB (x8)
+ L2 Unified 256 KiB (x8)
+ L3 Unified 16384 KiB (x1)
+Load Average: 4.13, 3.71, 3.30
+-----------------------------------------------------------------
+Benchmark Time
+-----------------------------------------------------------------
+CohenAcceleration 20.7 ns
+CohenAcceleration 64.6 ns
+CohenAcceleration 115 ns
+NaiveSum 4994 ns
+NaiveSum 112803698 ns
+NaiveSum 5009564877 ns
+```
+
+In fact not only does the naive sum take orders of magnitude longer to compute, it is less accurate as well.
+
+
+[heading References]
+
+* Cohen, Henri, Fernando Rodriguez Villegas, and Don Zagier. ['Convergence acceleration of alternating series.] Experimental mathematics 9.1 (2000): 3-12.
+
+
+[endsect]
diff --git a/doc/math.qbk b/doc/math.qbk
index 8fbbcb750..fe20b81fd 100644
--- a/doc/math.qbk
+++ b/doc/math.qbk
@@ -756,6 +756,7 @@ and as a CD ISBN 0-9504833-2-X 978-0-9504833-2-0, Classification 519.2-dc22.
[include internals/luroth_expansion.qbk]
[include internals/engel_expansion.qbk]
[include internals/recurrence.qbk]
+[include internals/cohen_acceleration.qbk]
[/include internals/rational.qbk] [/moved to tools]
[include internals/tuple.qbk]
[/include internals/polynomial.qbk] [/moved to tools]
diff --git a/example/cohen_acceleration.cpp b/example/cohen_acceleration.cpp
new file mode 100644
index 000000000..e16357d13
--- /dev/null
+++ b/example/cohen_acceleration.cpp
@@ -0,0 +1,46 @@
+// (C) Copyright Nick Thompson 2020.
+// Use, modification and distribution are subject to the
+// Boost Software License, Version 1.0. (See accompanying file
+// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+#include
+#include
+#include
+#include
+
+using boost::math::tools::cohen_acceleration;
+using boost::math::constants::pi;
+
+template
+class G {
+public:
+ G(){
+ k_ = 0;
+ }
+
+ Real operator()() {
+ k_ += 1;
+ return 1/(k_*k_);
+ }
+
+private:
+ Real k_;
+};
+
+int main() {
+ using Real = float;
+ auto g = G();
+ Real computed = cohen_acceleration(g);
+ Real expected = pi()*pi()/12;
+ std::cout << std::setprecision(std::numeric_limits::max_digits10);
+
+ std::cout << "Computed = " << computed << " = " << std::hexfloat << computed << "\n";
+ std::cout << std::defaultfloat;
+ std::cout << "Expected = " << expected << " = " << std::hexfloat << expected << "\n";
+
+ // Compute with a specified number of terms:
+ // Make sure to reset g:
+ g = G();
+ computed = cohen_acceleration(g, 5);
+ std::cout << std::defaultfloat;
+ std::cout << "Computed = " << computed << " = " << std::hexfloat << computed << " using 5 terms.\n";
+}
diff --git a/include/boost/math/tools/cohen_acceleration.hpp b/include/boost/math/tools/cohen_acceleration.hpp
new file mode 100644
index 000000000..397fda13f
--- /dev/null
+++ b/include/boost/math/tools/cohen_acceleration.hpp
@@ -0,0 +1,49 @@
+// (C) Copyright Nick Thompson 2020.
+// Use, modification and distribution are subject to the
+// Boost Software License, Version 1.0. (See accompanying file
+// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+
+#ifndef BOOST_MATH_TOOLS_COHEN_ACCELERATION_HPP
+#define BOOST_MATH_TOOLS_COHEN_ACCELERATION_HPP
+#include
+#include
+
+namespace boost::math::tools {
+
+// Algorithm 1 of https://people.mpim-bonn.mpg.de/zagier/files/exp-math-9/fulltext.pdf
+// Convergence Acceleration of Alternating Series: Henri Cohen, Fernando Rodriguez Villegas, and Don Zagier
+template
+auto cohen_acceleration(G& generator, int64_t n = -1)
+{
+ using Real = decltype(generator());
+ // This test doesn't pass for float128, sad!
+ //static_assert(std::is_floating_point_v, "Real must be a floating point type.");
+ using std::log;
+ using std::pow;
+ using std::ceil;
+ using std::sqrt;
+ Real n_ = n;
+ if (n < 0)
+ {
+ // relative error grows as 2*5.828^-n; take 5.828^-n < eps/4 => -nln(5.828) < ln(eps/4) => n > ln(4/eps)/ln(5.828).
+ // Is there a way to do it rapidly with std::log2? (Yes, of course; but for primitive types it's computed at compile-time anyway.)
+ n_ = ceil(log(4/std::numeric_limits::epsilon())*0.5672963285532555);
+ n = static_cast(n_);
+ }
+ // d can get huge and overflow if you pick n too large:
+ Real d = pow(3 + sqrt(Real(8)), n);
+ d = (d + 1/d)/2;
+ Real b = -1;
+ Real c = -d;
+ Real s = 0;
+ for (Real k = 0; k < n_; ++k) {
+ c = b - c;
+ s += c*generator();
+ b = (k+n_)*(k-n_)*b/((k+Real(1)/Real(2))*(k+1));
+ }
+
+ return s/d;
+}
+
+}
+#endif
diff --git a/reporting/performance/cohen_acceleration_performance.cpp b/reporting/performance/cohen_acceleration_performance.cpp
new file mode 100644
index 000000000..7a9b6ca25
--- /dev/null
+++ b/reporting/performance/cohen_acceleration_performance.cpp
@@ -0,0 +1,117 @@
+// (C) Copyright Nick Thompson 2020.
+// Use, modification and distribution are subject to the
+// Boost Software License, Version 1.0. (See accompanying file
+// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+using boost::multiprecision::number;
+using boost::multiprecision::mpfr_float_backend;
+using boost::multiprecision::float128;
+using boost::multiprecision::cpp_bin_float_50;
+using boost::multiprecision::cpp_bin_float_100;
+using boost::math::tools::cohen_acceleration;
+using boost::math::constants::pi;
+
+template
+class G {
+public:
+ G(){
+ k_ = 0;
+ }
+
+ Real operator()() {
+ k_ += 1;
+ return 1/(k_*k_);
+ }
+
+private:
+ Real k_;
+};
+
+
+template
+void CohenAcceleration(benchmark::State& state)
+{
+ using std::abs;
+ Real x = pi()*pi()/12;
+ for (auto _ : state)
+ {
+ auto g = G();
+ x = cohen_acceleration(g);
+ benchmark::DoNotOptimize(x);
+ }
+ if (abs(x - pi()*pi()/12) > 16*std::numeric_limits::epsilon())
+ {
+ std::cerr << std::setprecision(std::numeric_limits::max_digits10);
+ std::cerr << "Cohen acceleration computed " << x << " on type " << boost::core::demangle(typeid(Real).name()) << "\n";
+ std::cerr << "But expected value is " << pi()*pi()/12 << "\n";
+ }
+}
+
+BENCHMARK_TEMPLATE(CohenAcceleration, float);
+BENCHMARK_TEMPLATE(CohenAcceleration, double);
+BENCHMARK_TEMPLATE(CohenAcceleration, long double);
+BENCHMARK_TEMPLATE(CohenAcceleration, float128);
+BENCHMARK_TEMPLATE(CohenAcceleration, cpp_bin_float_50);
+BENCHMARK_TEMPLATE(CohenAcceleration, cpp_bin_float_100);
+BENCHMARK_TEMPLATE(CohenAcceleration, number>);
+BENCHMARK_TEMPLATE(CohenAcceleration, number>);
+BENCHMARK_TEMPLATE(CohenAcceleration, number>);
+BENCHMARK_TEMPLATE(CohenAcceleration, number>);
+BENCHMARK_TEMPLATE(CohenAcceleration, number>);
+
+
+template
+void NaiveSum(benchmark::State& state)
+{
+ using std::abs;
+ Real x = pi()*pi()/12;
+ for (auto _ : state)
+ {
+ auto g = G();
+ Real term = g();
+ x = term;
+ bool even = false;
+ while (term > std::numeric_limits::epsilon()/2) {
+ term = g();
+ if (even) {
+ x += term;
+ even = false;
+ } else {
+ x -= term;
+ even = true;
+ }
+ }
+ benchmark::DoNotOptimize(x);
+ }
+ // The accuracy tests don't pass because the sum is ill-conditioned:
+ /*if (abs(x - pi()*pi()/12) > 16*std::numeric_limits::epsilon())
+ {
+ std::cerr << std::setprecision(std::numeric_limits::max_digits10);
+ std::cerr << "Cohen acceleration computed " << x << " on type " << boost::core::demangle(typeid(Real).name()) << "\n";
+ std::cerr << "But expected value is " << pi()*pi()/12 << "\n";
+ }*/
+}
+
+BENCHMARK_TEMPLATE(NaiveSum, float);
+BENCHMARK_TEMPLATE(NaiveSum, double);
+BENCHMARK_TEMPLATE(NaiveSum, long double);
+BENCHMARK_TEMPLATE(NaiveSum, float128);
+BENCHMARK_TEMPLATE(NaiveSum, cpp_bin_float_50);
+BENCHMARK_TEMPLATE(NaiveSum, cpp_bin_float_100);
+BENCHMARK_TEMPLATE(NaiveSum, number>);
+BENCHMARK_TEMPLATE(NaiveSum, number>);
+BENCHMARK_TEMPLATE(NaiveSum, number>);
+BENCHMARK_TEMPLATE(NaiveSum, number>);
+BENCHMARK_TEMPLATE(NaiveSum, number>);
+
+BENCHMARK_MAIN();
diff --git a/test/Jamfile.v2 b/test/Jamfile.v2
index a109f5c9b..19c18b358 100644
--- a/test/Jamfile.v2
+++ b/test/Jamfile.v2
@@ -951,6 +951,7 @@ test-suite misc :
[ run wavelet_transform_test.cpp : : : msvc:/bigobj [ requires cxx17_if_constexpr cxx17_std_apply ] [ check-target-builds ../config//has_float128 "GCC libquadmath and __float128 support" : -lquadmath ] ]
[ run agm_test.cpp : : : msvc:/bigobj [ requires cxx17_if_constexpr cxx17_std_apply ] [ check-target-builds ../config//has_float128 "GCC libquadmath and __float128 support" : -lquadmath ] ]
[ run rsqrt_test.cpp : : : msvc:/bigobj [ requires cxx17_if_constexpr cxx17_std_apply ] [ check-target-builds ../config//has_float128 "GCC libquadmath and __float128 support" : -lquadmath ] ]
+ [ run cohen_acceleration_test.cpp : : : msvc:/bigobj [ requires cxx17_if_constexpr cxx17_std_apply ] [ check-target-builds ../config//has_float128 "GCC libquadmath and __float128 support" : -lquadmath ] ]
[ compile compile_test/daubechies_filters_incl_test.cpp : [ requires cxx17_if_constexpr cxx17_std_apply ] ]
[ compile compile_test/daubechies_scaling_incl_test.cpp : [ requires cxx17_if_constexpr cxx17_std_apply ] ]
[ run whittaker_shannon_test.cpp : : : [ requires cxx11_auto_declarations cxx11_constexpr cxx11_smart_ptr cxx11_defaulted_functions ] ]
diff --git a/test/cohen_acceleration_test.cpp b/test/cohen_acceleration_test.cpp
new file mode 100644
index 000000000..291cb85b6
--- /dev/null
+++ b/test/cohen_acceleration_test.cpp
@@ -0,0 +1,86 @@
+/*
+ * Copyright Nick Thompson, 2020
+ * Use, modification and distribution are subject to the
+ * Boost Software License, Version 1.0. (See accompanying file
+ * LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+ */
+
+#include "math_unit_test.hpp"
+#include
+#include
+#ifdef BOOST_HAS_FLOAT128
+#include
+using boost::multiprecision::float128;
+#endif
+#include
+
+using boost::math::tools::cohen_acceleration;
+using boost::multiprecision::cpp_bin_float_100;
+using boost::math::constants::pi;
+
+template
+class G {
+public:
+ G(){
+ k_ = 0;
+ }
+
+ Real operator()() {
+ k_ += 1;
+ return 1/(k_*k_);
+ }
+
+private:
+ Real k_;
+};
+
+template
+void test_pisq_div12()
+{
+ auto g = G();
+ Real x = cohen_acceleration(g);
+ CHECK_ULP_CLOSE(pi()*pi()/12, x, 3);
+}
+
+template
+class Divergent {
+public:
+ Divergent(){
+ k_ = 0;
+ }
+
+ // See C3 of: https://people.mpim-bonn.mpg.de/zagier/files/exp-math-9/fulltext.pdf
+ Real operator()() {
+ using std::log;
+ k_ += 1;
+ return log(k_);
+ }
+
+private:
+ Real k_;
+};
+
+template
+void test_divergent()
+{
+ auto g = Divergent();
+ Real x = -cohen_acceleration(g);
+ CHECK_ULP_CLOSE(log(pi()/2)/2, x, 80);
+}
+
+int main()
+{
+ test_pisq_div12();
+ test_pisq_div12();
+ test_pisq_div12();
+
+ test_divergent();
+ test_divergent();
+ test_divergent();
+
+ #ifdef BOOST_HAS_FLOAT128
+ test_pisq_div12();
+ test_divergent();
+ #endif
+ return boost::math::test::report_errors();
+}
diff --git a/test/math_unit_test.hpp b/test/math_unit_test.hpp
index 718e061af..c8a81c0dc 100644
--- a/test/math_unit_test.hpp
+++ b/test/math_unit_test.hpp
@@ -70,7 +70,7 @@ bool check_ulp_close(PreciseReal expected1, Real computed, size_t ulps, std::str
using std::max;
using std::abs;
using std::isnan;
- using boost::math::itrunc;
+ using boost::math::lltrunc;
// Of course integers can be expected values, and they are exact:
if (!std::is_integral::value) {
BOOST_ASSERT_MSG(!isnan(expected1), "Expected value cannot be a nan.");
@@ -98,7 +98,7 @@ bool check_ulp_close(PreciseReal expected1, Real computed, size_t ulps, std::str
Real dist = abs(boost::math::float_distance(expected, computed));
if (dist > ulps)
{
- detail::total_ulp_distance += static_cast(itrunc(dist));
+ detail::total_ulp_distance += static_cast(lltrunc(dist));
Real abs_expected = abs(expected);
Real denom = (max)(abs_expected, Real(1));
Real mollified_relative_error = abs(expected - computed)/denom;