mirror of
https://github.com/boostorg/math.git
synced 2026-01-19 04:22:09 +00:00
Cohen acceleration (#415)
* Cohen acceleration Accelerates convergence of an alternating series by a method designed by Cohen, Villegas, and Zagier.
This commit is contained in:
2
doc/equations/alternating_series.svg
Normal file
2
doc/equations/alternating_series.svg
Normal file
File diff suppressed because one or more lines are too long
|
After Width: | Height: | Size: 5.6 KiB |
2
doc/equations/cohen_acceleration_error_model.svg
Normal file
2
doc/equations/cohen_acceleration_error_model.svg
Normal file
File diff suppressed because one or more lines are too long
|
After Width: | Height: | Size: 13 KiB |
2
doc/equations/zeta_related_alternating.svg
Normal file
2
doc/equations/zeta_related_alternating.svg
Normal file
File diff suppressed because one or more lines are too long
|
After Width: | Height: | Size: 7.6 KiB |
106
doc/internals/cohen_acceleration.qbk
Normal file
106
doc/internals/cohen_acceleration.qbk
Normal file
@@ -0,0 +1,106 @@
|
||||
[/
|
||||
Copyright Nick Thompson 2020
|
||||
Distributed under the Boost Software License, Version 1.0.
|
||||
(See accompanying file LICENSE_1_0.txt or copy at
|
||||
http://www.boost.org/LICENSE_1_0.txt).
|
||||
]
|
||||
|
||||
[section:cohen_acceleration Cohen Acceleration]
|
||||
|
||||
[h4 Synopsis]
|
||||
|
||||
#include <boost/math/tools/cohen_acceleration.hpp>
|
||||
|
||||
namespace boost::math::tools {
|
||||
|
||||
template<class G>
|
||||
auto cohen_acceleration(G& generator, int64_t n = -1);
|
||||
|
||||
} // namespaces
|
||||
|
||||
|
||||
The function `cohen_acceleration` rapidly computes the limiting value of an alternating series via a technique developed by [@https://www.johndcook.com/blog/2020/08/06/cohen-acceleration/ Henri Cohen et al].
|
||||
To compute
|
||||
|
||||
[$../equations/alternating_series.svg]
|
||||
|
||||
|
||||
we first define a callable that produces /a/[sub /k/] on the kth call.
|
||||
For example, suppose we wish to compute
|
||||
|
||||
[$../equations/zeta_related_alternating.svg]
|
||||
|
||||
First, we need to define a callable which returns the requisite terms:
|
||||
|
||||
template<typename Real>
|
||||
class G {
|
||||
public:
|
||||
G(){
|
||||
k_ = 0;
|
||||
}
|
||||
|
||||
Real operator()() {
|
||||
k_ += 1;
|
||||
return 1/(k_*k_);
|
||||
}
|
||||
|
||||
private:
|
||||
Real k_;
|
||||
};
|
||||
|
||||
Then we pass this into the `cohen_acceleration` function:
|
||||
|
||||
auto gen = G<double>();
|
||||
double computed = cohen_acceleration(gen);
|
||||
|
||||
See `cohen_acceleration.cpp` in the `examples` directory for more.
|
||||
|
||||
The number of terms consumed is computed from the error model
|
||||
|
||||
[$../equations/cohen_acceleration_error_model.svg]
|
||||
|
||||
and must be computed /a priori/.
|
||||
If we read the reference carefully, we notice that this error model is derived under the assumption that the terms /a/[sub /k/] are given as the moments of a positive measure on [0,1].
|
||||
If this assumption does not hold, then the number of terms chosen by the method is incorrect.
|
||||
Hence we permit the user to provide a second argument to specify the number of terms:
|
||||
|
||||
double computed = cohen_acceleration(gen, 5);
|
||||
|
||||
/Nota bene/: When experimenting with this option, we found that adding more terms was no guarantee of additional accuracy, and could not find an example where a user-provided number of terms outperformed the default.
|
||||
In addition, it is easy to generate intermediates which overflow if we let /n/ grow too large.
|
||||
Hence we recommend only playing with this parameter to /decrease/ the default number of terms to increase speed.
|
||||
|
||||
[heading Performance]
|
||||
|
||||
To see that Cohen acceleration is in fact faster than naive summation for the same level of relative accuracy, we can run the `reporting/performance/cohen_acceleration_performance.cpp` file.
|
||||
This benchmark computes the alternating Basel series discussed above:
|
||||
|
||||
```
|
||||
Running ./reporting/performance/cohen_acceleration_performance.x
|
||||
Run on (16 X 2300 MHz CPU s)
|
||||
CPU Caches:
|
||||
L1 Data 32 KiB (x8)
|
||||
L1 Instruction 32 KiB (x8)
|
||||
L2 Unified 256 KiB (x8)
|
||||
L3 Unified 16384 KiB (x1)
|
||||
Load Average: 4.13, 3.71, 3.30
|
||||
-----------------------------------------------------------------
|
||||
Benchmark Time
|
||||
-----------------------------------------------------------------
|
||||
CohenAcceleration<float> 20.7 ns
|
||||
CohenAcceleration<double> 64.6 ns
|
||||
CohenAcceleration<long double> 115 ns
|
||||
NaiveSum<float> 4994 ns
|
||||
NaiveSum<double> 112803698 ns
|
||||
NaiveSum<long double> 5009564877 ns
|
||||
```
|
||||
|
||||
In fact not only does the naive sum take orders of magnitude longer to compute, it is less accurate as well.
|
||||
|
||||
|
||||
[heading References]
|
||||
|
||||
* Cohen, Henri, Fernando Rodriguez Villegas, and Don Zagier. ['Convergence acceleration of alternating series.] Experimental mathematics 9.1 (2000): 3-12.
|
||||
|
||||
|
||||
[endsect]
|
||||
@@ -756,6 +756,7 @@ and as a CD ISBN 0-9504833-2-X 978-0-9504833-2-0, Classification 519.2-dc22.
|
||||
[include internals/luroth_expansion.qbk]
|
||||
[include internals/engel_expansion.qbk]
|
||||
[include internals/recurrence.qbk]
|
||||
[include internals/cohen_acceleration.qbk]
|
||||
[/include internals/rational.qbk] [/moved to tools]
|
||||
[include internals/tuple.qbk]
|
||||
[/include internals/polynomial.qbk] [/moved to tools]
|
||||
|
||||
46
example/cohen_acceleration.cpp
Normal file
46
example/cohen_acceleration.cpp
Normal file
@@ -0,0 +1,46 @@
|
||||
// (C) Copyright Nick Thompson 2020.
|
||||
// Use, modification and distribution are subject to the
|
||||
// Boost Software License, Version 1.0. (See accompanying file
|
||||
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
|
||||
#include <iostream>
|
||||
#include <iomanip>
|
||||
#include <boost/math/tools/cohen_acceleration.hpp>
|
||||
#include <boost/math/constants/constants.hpp>
|
||||
|
||||
using boost::math::tools::cohen_acceleration;
|
||||
using boost::math::constants::pi;
|
||||
|
||||
template<typename Real>
|
||||
class G {
|
||||
public:
|
||||
G(){
|
||||
k_ = 0;
|
||||
}
|
||||
|
||||
Real operator()() {
|
||||
k_ += 1;
|
||||
return 1/(k_*k_);
|
||||
}
|
||||
|
||||
private:
|
||||
Real k_;
|
||||
};
|
||||
|
||||
int main() {
|
||||
using Real = float;
|
||||
auto g = G<Real>();
|
||||
Real computed = cohen_acceleration(g);
|
||||
Real expected = pi<Real>()*pi<Real>()/12;
|
||||
std::cout << std::setprecision(std::numeric_limits<Real>::max_digits10);
|
||||
|
||||
std::cout << "Computed = " << computed << " = " << std::hexfloat << computed << "\n";
|
||||
std::cout << std::defaultfloat;
|
||||
std::cout << "Expected = " << expected << " = " << std::hexfloat << expected << "\n";
|
||||
|
||||
// Compute with a specified number of terms:
|
||||
// Make sure to reset g:
|
||||
g = G<Real>();
|
||||
computed = cohen_acceleration(g, 5);
|
||||
std::cout << std::defaultfloat;
|
||||
std::cout << "Computed = " << computed << " = " << std::hexfloat << computed << " using 5 terms.\n";
|
||||
}
|
||||
49
include/boost/math/tools/cohen_acceleration.hpp
Normal file
49
include/boost/math/tools/cohen_acceleration.hpp
Normal file
@@ -0,0 +1,49 @@
|
||||
// (C) Copyright Nick Thompson 2020.
|
||||
// Use, modification and distribution are subject to the
|
||||
// Boost Software License, Version 1.0. (See accompanying file
|
||||
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
|
||||
|
||||
#ifndef BOOST_MATH_TOOLS_COHEN_ACCELERATION_HPP
|
||||
#define BOOST_MATH_TOOLS_COHEN_ACCELERATION_HPP
|
||||
#include <limits>
|
||||
#include <cmath>
|
||||
|
||||
namespace boost::math::tools {
|
||||
|
||||
// Algorithm 1 of https://people.mpim-bonn.mpg.de/zagier/files/exp-math-9/fulltext.pdf
|
||||
// Convergence Acceleration of Alternating Series: Henri Cohen, Fernando Rodriguez Villegas, and Don Zagier
|
||||
template<class G>
|
||||
auto cohen_acceleration(G& generator, int64_t n = -1)
|
||||
{
|
||||
using Real = decltype(generator());
|
||||
// This test doesn't pass for float128, sad!
|
||||
//static_assert(std::is_floating_point_v<Real>, "Real must be a floating point type.");
|
||||
using std::log;
|
||||
using std::pow;
|
||||
using std::ceil;
|
||||
using std::sqrt;
|
||||
Real n_ = n;
|
||||
if (n < 0)
|
||||
{
|
||||
// relative error grows as 2*5.828^-n; take 5.828^-n < eps/4 => -nln(5.828) < ln(eps/4) => n > ln(4/eps)/ln(5.828).
|
||||
// Is there a way to do it rapidly with std::log2? (Yes, of course; but for primitive types it's computed at compile-time anyway.)
|
||||
n_ = ceil(log(4/std::numeric_limits<Real>::epsilon())*0.5672963285532555);
|
||||
n = static_cast<int64_t>(n_);
|
||||
}
|
||||
// d can get huge and overflow if you pick n too large:
|
||||
Real d = pow(3 + sqrt(Real(8)), n);
|
||||
d = (d + 1/d)/2;
|
||||
Real b = -1;
|
||||
Real c = -d;
|
||||
Real s = 0;
|
||||
for (Real k = 0; k < n_; ++k) {
|
||||
c = b - c;
|
||||
s += c*generator();
|
||||
b = (k+n_)*(k-n_)*b/((k+Real(1)/Real(2))*(k+1));
|
||||
}
|
||||
|
||||
return s/d;
|
||||
}
|
||||
|
||||
}
|
||||
#endif
|
||||
117
reporting/performance/cohen_acceleration_performance.cpp
Normal file
117
reporting/performance/cohen_acceleration_performance.cpp
Normal file
@@ -0,0 +1,117 @@
|
||||
// (C) Copyright Nick Thompson 2020.
|
||||
// Use, modification and distribution are subject to the
|
||||
// Boost Software License, Version 1.0. (See accompanying file
|
||||
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
|
||||
|
||||
#include <random>
|
||||
#include <iostream>
|
||||
#include <iomanip>
|
||||
#include <benchmark/benchmark.h>
|
||||
#include <boost/math/tools/cohen_acceleration.hpp>
|
||||
#include <boost/multiprecision/float128.hpp>
|
||||
#include <boost/multiprecision/mpfr.hpp>
|
||||
#include <boost/multiprecision/cpp_bin_float.hpp>
|
||||
#include <boost/core/demangle.hpp>
|
||||
|
||||
using boost::multiprecision::number;
|
||||
using boost::multiprecision::mpfr_float_backend;
|
||||
using boost::multiprecision::float128;
|
||||
using boost::multiprecision::cpp_bin_float_50;
|
||||
using boost::multiprecision::cpp_bin_float_100;
|
||||
using boost::math::tools::cohen_acceleration;
|
||||
using boost::math::constants::pi;
|
||||
|
||||
template<typename Real>
|
||||
class G {
|
||||
public:
|
||||
G(){
|
||||
k_ = 0;
|
||||
}
|
||||
|
||||
Real operator()() {
|
||||
k_ += 1;
|
||||
return 1/(k_*k_);
|
||||
}
|
||||
|
||||
private:
|
||||
Real k_;
|
||||
};
|
||||
|
||||
|
||||
template<class Real>
|
||||
void CohenAcceleration(benchmark::State& state)
|
||||
{
|
||||
using std::abs;
|
||||
Real x = pi<Real>()*pi<Real>()/12;
|
||||
for (auto _ : state)
|
||||
{
|
||||
auto g = G<Real>();
|
||||
x = cohen_acceleration(g);
|
||||
benchmark::DoNotOptimize(x);
|
||||
}
|
||||
if (abs(x - pi<Real>()*pi<Real>()/12) > 16*std::numeric_limits<Real>::epsilon())
|
||||
{
|
||||
std::cerr << std::setprecision(std::numeric_limits<Real>::max_digits10);
|
||||
std::cerr << "Cohen acceleration computed " << x << " on type " << boost::core::demangle(typeid(Real).name()) << "\n";
|
||||
std::cerr << "But expected value is " << pi<Real>()*pi<Real>()/12 << "\n";
|
||||
}
|
||||
}
|
||||
|
||||
BENCHMARK_TEMPLATE(CohenAcceleration, float);
|
||||
BENCHMARK_TEMPLATE(CohenAcceleration, double);
|
||||
BENCHMARK_TEMPLATE(CohenAcceleration, long double);
|
||||
BENCHMARK_TEMPLATE(CohenAcceleration, float128);
|
||||
BENCHMARK_TEMPLATE(CohenAcceleration, cpp_bin_float_50);
|
||||
BENCHMARK_TEMPLATE(CohenAcceleration, cpp_bin_float_100);
|
||||
BENCHMARK_TEMPLATE(CohenAcceleration, number<mpfr_float_backend<100>>);
|
||||
BENCHMARK_TEMPLATE(CohenAcceleration, number<mpfr_float_backend<200>>);
|
||||
BENCHMARK_TEMPLATE(CohenAcceleration, number<mpfr_float_backend<300>>);
|
||||
BENCHMARK_TEMPLATE(CohenAcceleration, number<mpfr_float_backend<400>>);
|
||||
BENCHMARK_TEMPLATE(CohenAcceleration, number<mpfr_float_backend<1000>>);
|
||||
|
||||
|
||||
template<class Real>
|
||||
void NaiveSum(benchmark::State& state)
|
||||
{
|
||||
using std::abs;
|
||||
Real x = pi<Real>()*pi<Real>()/12;
|
||||
for (auto _ : state)
|
||||
{
|
||||
auto g = G<Real>();
|
||||
Real term = g();
|
||||
x = term;
|
||||
bool even = false;
|
||||
while (term > std::numeric_limits<Real>::epsilon()/2) {
|
||||
term = g();
|
||||
if (even) {
|
||||
x += term;
|
||||
even = false;
|
||||
} else {
|
||||
x -= term;
|
||||
even = true;
|
||||
}
|
||||
}
|
||||
benchmark::DoNotOptimize(x);
|
||||
}
|
||||
// The accuracy tests don't pass because the sum is ill-conditioned:
|
||||
/*if (abs(x - pi<Real>()*pi<Real>()/12) > 16*std::numeric_limits<Real>::epsilon())
|
||||
{
|
||||
std::cerr << std::setprecision(std::numeric_limits<Real>::max_digits10);
|
||||
std::cerr << "Cohen acceleration computed " << x << " on type " << boost::core::demangle(typeid(Real).name()) << "\n";
|
||||
std::cerr << "But expected value is " << pi<Real>()*pi<Real>()/12 << "\n";
|
||||
}*/
|
||||
}
|
||||
|
||||
BENCHMARK_TEMPLATE(NaiveSum, float);
|
||||
BENCHMARK_TEMPLATE(NaiveSum, double);
|
||||
BENCHMARK_TEMPLATE(NaiveSum, long double);
|
||||
BENCHMARK_TEMPLATE(NaiveSum, float128);
|
||||
BENCHMARK_TEMPLATE(NaiveSum, cpp_bin_float_50);
|
||||
BENCHMARK_TEMPLATE(NaiveSum, cpp_bin_float_100);
|
||||
BENCHMARK_TEMPLATE(NaiveSum, number<mpfr_float_backend<100>>);
|
||||
BENCHMARK_TEMPLATE(NaiveSum, number<mpfr_float_backend<200>>);
|
||||
BENCHMARK_TEMPLATE(NaiveSum, number<mpfr_float_backend<300>>);
|
||||
BENCHMARK_TEMPLATE(NaiveSum, number<mpfr_float_backend<400>>);
|
||||
BENCHMARK_TEMPLATE(NaiveSum, number<mpfr_float_backend<1000>>);
|
||||
|
||||
BENCHMARK_MAIN();
|
||||
@@ -951,6 +951,7 @@ test-suite misc :
|
||||
[ run wavelet_transform_test.cpp : : : <toolset>msvc:<cxxflags>/bigobj [ requires cxx17_if_constexpr cxx17_std_apply ] [ check-target-builds ../config//has_float128 "GCC libquadmath and __float128 support" : <linkflags>-lquadmath ] ]
|
||||
[ run agm_test.cpp : : : <toolset>msvc:<cxxflags>/bigobj [ requires cxx17_if_constexpr cxx17_std_apply ] [ check-target-builds ../config//has_float128 "GCC libquadmath and __float128 support" : <linkflags>-lquadmath ] ]
|
||||
[ run rsqrt_test.cpp : : : <toolset>msvc:<cxxflags>/bigobj [ requires cxx17_if_constexpr cxx17_std_apply ] [ check-target-builds ../config//has_float128 "GCC libquadmath and __float128 support" : <linkflags>-lquadmath ] ]
|
||||
[ run cohen_acceleration_test.cpp : : : <toolset>msvc:<cxxflags>/bigobj [ requires cxx17_if_constexpr cxx17_std_apply ] [ check-target-builds ../config//has_float128 "GCC libquadmath and __float128 support" : <linkflags>-lquadmath ] ]
|
||||
[ compile compile_test/daubechies_filters_incl_test.cpp : [ requires cxx17_if_constexpr cxx17_std_apply ] ]
|
||||
[ compile compile_test/daubechies_scaling_incl_test.cpp : [ requires cxx17_if_constexpr cxx17_std_apply ] ]
|
||||
[ run whittaker_shannon_test.cpp : : : [ requires cxx11_auto_declarations cxx11_constexpr cxx11_smart_ptr cxx11_defaulted_functions ] ]
|
||||
|
||||
86
test/cohen_acceleration_test.cpp
Normal file
86
test/cohen_acceleration_test.cpp
Normal file
@@ -0,0 +1,86 @@
|
||||
/*
|
||||
* Copyright Nick Thompson, 2020
|
||||
* Use, modification and distribution are subject to the
|
||||
* Boost Software License, Version 1.0. (See accompanying file
|
||||
* LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
|
||||
*/
|
||||
|
||||
#include "math_unit_test.hpp"
|
||||
#include <boost/math/tools/cohen_acceleration.hpp>
|
||||
#include <boost/math/constants/constants.hpp>
|
||||
#ifdef BOOST_HAS_FLOAT128
|
||||
#include <boost/multiprecision/float128.hpp>
|
||||
using boost::multiprecision::float128;
|
||||
#endif
|
||||
#include <boost/multiprecision/cpp_bin_float.hpp>
|
||||
|
||||
using boost::math::tools::cohen_acceleration;
|
||||
using boost::multiprecision::cpp_bin_float_100;
|
||||
using boost::math::constants::pi;
|
||||
|
||||
template<typename Real>
|
||||
class G {
|
||||
public:
|
||||
G(){
|
||||
k_ = 0;
|
||||
}
|
||||
|
||||
Real operator()() {
|
||||
k_ += 1;
|
||||
return 1/(k_*k_);
|
||||
}
|
||||
|
||||
private:
|
||||
Real k_;
|
||||
};
|
||||
|
||||
template<typename Real>
|
||||
void test_pisq_div12()
|
||||
{
|
||||
auto g = G<Real>();
|
||||
Real x = cohen_acceleration(g);
|
||||
CHECK_ULP_CLOSE(pi<Real>()*pi<Real>()/12, x, 3);
|
||||
}
|
||||
|
||||
template<typename Real>
|
||||
class Divergent {
|
||||
public:
|
||||
Divergent(){
|
||||
k_ = 0;
|
||||
}
|
||||
|
||||
// See C3 of: https://people.mpim-bonn.mpg.de/zagier/files/exp-math-9/fulltext.pdf
|
||||
Real operator()() {
|
||||
using std::log;
|
||||
k_ += 1;
|
||||
return log(k_);
|
||||
}
|
||||
|
||||
private:
|
||||
Real k_;
|
||||
};
|
||||
|
||||
template<typename Real>
|
||||
void test_divergent()
|
||||
{
|
||||
auto g = Divergent<Real>();
|
||||
Real x = -cohen_acceleration(g);
|
||||
CHECK_ULP_CLOSE(log(pi<Real>()/2)/2, x, 80);
|
||||
}
|
||||
|
||||
int main()
|
||||
{
|
||||
test_pisq_div12<float>();
|
||||
test_pisq_div12<double>();
|
||||
test_pisq_div12<long double>();
|
||||
|
||||
test_divergent<float>();
|
||||
test_divergent<double>();
|
||||
test_divergent<long double>();
|
||||
|
||||
#ifdef BOOST_HAS_FLOAT128
|
||||
test_pisq_div12<float128>();
|
||||
test_divergent<float128>();
|
||||
#endif
|
||||
return boost::math::test::report_errors();
|
||||
}
|
||||
@@ -70,7 +70,7 @@ bool check_ulp_close(PreciseReal expected1, Real computed, size_t ulps, std::str
|
||||
using std::max;
|
||||
using std::abs;
|
||||
using std::isnan;
|
||||
using boost::math::itrunc;
|
||||
using boost::math::lltrunc;
|
||||
// Of course integers can be expected values, and they are exact:
|
||||
if (!std::is_integral<PreciseReal>::value) {
|
||||
BOOST_ASSERT_MSG(!isnan(expected1), "Expected value cannot be a nan.");
|
||||
@@ -98,7 +98,7 @@ bool check_ulp_close(PreciseReal expected1, Real computed, size_t ulps, std::str
|
||||
Real dist = abs(boost::math::float_distance(expected, computed));
|
||||
if (dist > ulps)
|
||||
{
|
||||
detail::total_ulp_distance += static_cast<int64_t>(itrunc(dist));
|
||||
detail::total_ulp_distance += static_cast<int64_t>(lltrunc(dist));
|
||||
Real abs_expected = abs(expected);
|
||||
Real denom = (max)(abs_expected, Real(1));
|
||||
Real mollified_relative_error = abs(expected - computed)/denom;
|
||||
|
||||
Reference in New Issue
Block a user