From 45226c7ef1369e728cdba4fdd2561fea7682d246 Mon Sep 17 00:00:00 2001 From: Nick Thompson Date: Tue, 2 May 2017 18:29:03 -0600 Subject: [PATCH] Numerical differentiation by finite differences and the complex step derivative. --- doc/math.qbk | 2 + doc/sf/numerical_differentiation.qbk | 99 +++++++ .../math/tools/numerical_differentiation.hpp | 234 ++++++++++++++++ test/Jamfile.v2 | 1 + test/test_numerical_differentiation.cpp | 252 ++++++++++++++++++ 5 files changed, 588 insertions(+) create mode 100644 doc/sf/numerical_differentiation.qbk create mode 100644 include/boost/math/tools/numerical_differentiation.hpp create mode 100644 test/test_numerical_differentiation.cpp diff --git a/doc/math.qbk b/doc/math.qbk index 2f4ceab3b..26d996b44 100644 --- a/doc/math.qbk +++ b/doc/math.qbk @@ -603,6 +603,7 @@ and as a CD ISBN 0-9504833-2-X 978-0-9504833-2-0, Classification 519.2-dc22. [include sf/inv_hyper.qbk] [include sf/owens_t.qbk] +[include sf/numerical_differentiation.qbk] [endmathpart] [/section:special Special Functions] @@ -660,6 +661,7 @@ and as a CD ISBN 0-9504833-2-X 978-0-9504833-2-0, Classification 519.2-dc22. [include background/remez.qbk] [include background/references.qbk] + [section:logs_and_tables Error logs and tables] [section:all_table Tables of Error Rates for all Functions] diff --git a/doc/sf/numerical_differentiation.qbk b/doc/sf/numerical_differentiation.qbk new file mode 100644 index 000000000..27cb23fe3 --- /dev/null +++ b/doc/sf/numerical_differentiation.qbk @@ -0,0 +1,99 @@ +[/ +Copyright (c) 2017 Nick Thompson +Use, modification and distribution are subject to the +Boost Software License, Version 1.0. (See accompanying file +LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +] + +[section:diff Numerical Differentiation] + +[heading Synopsis] + +`` +#include + + +namespace boost { namespace math { namespace tools { + + template + Real complex_step_derivative(const F f, Real x); + + template + Real finite_difference_derivative(const F f, Real x, Real* error); + +}}} // namespaces +`` + +[heading Description] + +The function `finite_difference_derivative` calculates a finite-difference approximation to the derivative of of a function /f/ at point /x/. +A basic usage is + + auto f = [](double x) { return std::exp(x); }; + double x = 1.7; + double dfdx = finite_difference_derivative(f, x); + +It is well-known that finite-difference approximations to the derivative are ill-conditioned. +There are two sources of error: One, the truncation error arising from using a finite number of samples to cancel out higher order terms in the Taylor series. +The second is the roundoff error involved in evaluating the function. +The truncation error goes to zero as /h/ \u2192 0, but the roundoff error becomes unbounded. +By balancing these two sources of error, we can choose a value of /h/ that minimizes the maximum total error. +For this reason boost's `finite_difference_derivative` does not require the user to input a stepsize. +For more details about the theoretical error analysis involved in finite-difference approximations to the derivative, see [@http://web.archive.org/web/20150420195907/http://www.uio.no/studier/emner/matnat/math/MAT-INF1100/h08/kompendiet/diffint.pdf here]. + +Despite the effort that has went into choosing a reasonable value of /h/, the problem is still fundamentally ill-conditioned, and hence an error estimate is essential. +It can be queried as follows + + double error_estimate; + double d = finite_difference_derivative(f, x, &error_estimate); + +N.B.: Producing an error estimate requires additional function evaluations and as such is slower than simple evaluation of the derivative. +It also expands the domain over which the function must be differentiable. + +The default order of accuracy is 6, which reflects that fact that people tend to be interested in functions with many continuous derivatives. +If your function does not have 7 continuous derivatives, is may be of interest to use a lower order method, which can be achieved via (say) + + double d = finite_difference_derivative(f, x); + +This requests a second-order accurate derivative be computed. + +It is emphatically /not/ the case that higher order methods give higher accuracy for smooth functions. +Higher order methods require more addition of positive and negative terms, which can lead to catastrophic cancellation. +A function which is very good at making a mockery of finite-difference differentiation is exp(x)/(cos(x)[super 3] + sin(x)[super 3]). +Differentiating this function by `finite_difference_derivative` in double precision at /x=5.5/ gives zero correct digits at order 4, 6, and 8, but recovers 5 correct digits at order 2. +These are dangerous waters; use the error estimates to tread carefully. + +For a finite-difference method of order /k/, the error is /C/ \u03B5[super k/k+1]. +In the limit /k/ \u2192 \u221E, we see that the error tends to \u03B5, recovering the full precision for the type. +However, this ignores the fact that higher-order methods require subtracting more nearly-equal terms, so the constant /C/ grows with /k/. +Since /C/ grows quickly and \u03B5[super k/k+1] approaches \u03B5 slowly, we can see there is a compromise between high-order accuracy and conditioning of the difference quotient. +In practice we have found that /k=6/ seems to be a good compromise between the two (and have made this the default), but users are encouraged to examine the error estimates to choose an optimal order of accuracy for the given problem. + +[table:id Cost of Finite-Difference Numerical Differentiation + [[Order of Accuracy] [Function Evaluations] [Error] [Continuous Derivatives Required for Error Estimate to Hold] [Additional Function Evaluations to Produce Error Estimates]] + [[1] [2] [\u03B5[super 1/2]] [2] [1]] + [[2] [2] [\u03B5[super 2/3]] [3] [2]] + [[4] [4] [\u03B5[super 4/5]] [5] [2]] + [[6] [6] [\u03B5[super 6/7]] [7] [2]] + [[8] [8] [\u03B5[super 8/9]] [9] [2]] +] + + +Given all the caveats which must be kept in mind for successful use of finite-difference differentiation, it is reasonable to try to avoid it if possible. +Boost provides one such possibility: If your function is the restriction to the real line of a holomorphic function which takes real values at real argument, then the miraculous *complex step derivative* can be used. +The idea is very simple: Since /f/ is complex-differentiable, ['f(x+\u2148 h) = f(x) + \u2148hf'(x) - h[super 2]f''(x) + [bigo](h[super 3])]. +As long as /f(x)/ \u2208 \u211D, then ['f'(x) = \u2111 f(x+\u2148 h)/h + [bigo](h[super 2])]. +This method requires a single complex function evaluation and is not subject to the catastrophic subtractive cancellation that plagues finite-difference calculations. + +An example usage: + + double x = 7.2; + double e_prime = complex_step_derivative(std::exp>, x); + +References: + +Squire, William, and George Trapp. ['Using complex variables to estimate derivatives of real functions.] Siam Review 40.1 (1998): 110-112. + +Fornberg, Bengt. ['Generation of finite difference formulas on arbitrarily spaced grids.] Mathematics of computation 51.184 (1988): 699-706. + +[endsect] diff --git a/include/boost/math/tools/numerical_differentiation.hpp b/include/boost/math/tools/numerical_differentiation.hpp new file mode 100644 index 000000000..1b055fc1e --- /dev/null +++ b/include/boost/math/tools/numerical_differentiation.hpp @@ -0,0 +1,234 @@ +#ifndef BOOST_MATH_TOOLS_NUMERICAL_DIFFERENTIATION_HPP +#define BOOST_MATH_TOOLS_NUMERICAL_DIFFERENTIATION_HPP + +/* + * Performs numerical differentiation by finite-differences. + * + * All numerical differentiation using finite-differences are ill-conditioned, and these routines are no exception. + * A simple argument demonstrates that the error is unbounded as h->0. + * Take the one sides finite difference formula f'(x) = (f(x+h)-f(x))/h. + * The evaluation of f induces an error as well as the error from the finite-difference approximation, giving + * |f'(x) - (f(x+h) -f(x))/h| < h|f''(x)|/2 + (|f(x)|+|f(x+h)|)eps/h =: g(h), where eps is the unit roundoff for the type. + * It is reasonable to choose h in a way that minimizes the maximum error bound g(h). + * The value of h that minimizes g is h = sqrt(2eps(|f(x)| + |f(x+h)|)/|f''(x)|), and for this value of h the error bound is + * sqrt(2eps(|f(x+h) +f(x)||f''(x)|)). + * In fact it is not necessary to compute the ratio (|f(x+h)| + |f(x)|)/|f''(x)|; the error bound of ~\sqrt{\epsilon} still holds if we set it to one. + * + * However, this analysis holds under the assumption that f can be computed within 1 ULP for any x. + * This is often incorrect, so in the computation of the error we assume more generously that the function can be evaluated to within \pm 3 ULP. + * + * + * For more details on this method of analysis, see + * + * http://www.uio.no/studier/emner/matnat/math/MAT-INF1100/h08/kompendiet/diffint.pdf + * http://web.archive.org/web/20150420195907/http://www.uio.no/studier/emner/matnat/math/MAT-INF1100/h08/kompendiet/diffint.pdf + * + * + * It can be shown on general grounds that when choosing the optimal h, the maximum error in f'(x) is ~(|f(x)|eps)^k/k+1|f^(k-1)(x)|^1/k+1. + * From this we can see that full precision can be recovered in the limit k->infinity. + * + * References: + * + * 1) Fornberg, Bengt. "Generation of finite difference formulas on arbitrarily spaced grids." Mathematics of computation 51.184 (1988): 699-706. + * + * + * The second algorithm is, IMO, miraculous. However, it requires that your function can be evaluated at complex arguments. + * The idea is that f(x+ih) = f(x) +ihf'(x) - h^2f''(x) + ... so f'(x) \approx Im[f(x+ih)]/h. + * No subtractive cancellation occurs. The error is ~ eps|f'(x)| + eps^2|f'''(x)|/6; hard to beat that. + * + * References: + * + * 1) Squire, William, and George Trapp. "Using complex variables to estimate derivatives of real functions." Siam Review 40.1 (1998): 110-112. + */ + +#include +#include + +namespace boost { namespace math { namespace tools { + +template +Real complex_step_derivative(const F f, Real x) +{ + // Is it really this easy? Yes. + // Note that some authors recommend taking the stepsize h to be smaller than epsilon(), some recommending use of the min(). + // This idea was tested over a few billion test cases and found the make the error *much* worse. + // Even 2eps and eps/2 made the error worse, which was surprising. + using std::complex; + constexpr const Real step = std::numeric_limits::epsilon(); + constexpr const Real inv_step = 1/std::numeric_limits::epsilon(); + return f(complex(x, step)).imag()*inv_step; +} + +template +Real finite_difference_derivative(const F f, Real x, Real* error = nullptr) +{ + static_assert(order == 1 || order == 2 || order == 4 || order == 6 || order == 8, + "Order of accuracy must be one of 1, 2, 4, 6, or 8.\n"); + + using std::sqrt; + using std::pow; + using std::abs; + using std::max; + using std::nextafter; + using boost::math::constants::half; + + const Real eps = std::numeric_limits::epsilon(); + // TODO: static if in C++17! + // Error bound ~eps^1/2 + if (order == 1) + { + // Note that this estimate of h differs from the best estimate by a factor of sqrt((|f(x)| + |f(x+h)|)/|f''(x)|). + // Since this factor is invariant under the scaling f -> kf, then we are somewhat justified in approximating it by 1. + // This approximation will get better as we move to higher orders of accuracy. + Real h = 2*sqrt(eps); + + // Redefine h so that x + h is representable. Not using this trick leads to large error. + // The compiler flag -ffast-math evaporates these operations . . . + Real temp = x + h; + h = temp - x; + // Handle the case x + h == x: + if (h == 0) + { + h = nextafter(x, std::numeric_limits::max()) - x; + } + Real yh = f(x+h); + Real y0 = f(x); + Real diff = yh - y0; + if (error) + { + Real ym = f(x-h); + Real ypph = abs(yh - 2*y0 + ym)/h; + // h*|f''(x)|*0.5 + (|f(x+h)+|f(x)|)*3*eps/h, where 3 allows for the function to be evaluated to \pm 3 ULP. + *error = ypph*half() + (abs(yh) + abs(y0))*3*eps/h; + } + return diff/h; + } + + // Error bound ~eps^2/3 + if (order == 2) + { + // See the previous discussion to understand determination of h and the error bound. + // Series[(f[x+h] - f[x-h])/(2*h), {h, 0, 4}] + Real h = pow(3*eps, boost::math::constants::third()); + Real temp = x + h; + h = temp - x; + if (h == 0) + { + h = nextafter(x, std::numeric_limits::max()) - x; + } + Real yh = f(x+h); + Real ymh = f(x-h); + Real diff = yh - ymh; + if (error) + { + Real yth = f(x+2*h); + Real ymth = f(x-2*h); + *error = 3*eps*(abs(yh) + abs(ymh))/(2*h) + abs((yth - ymth)*half() - diff)/(6*h); + } + + return diff/(2*h); + } + + // Error bound ~eps^4/5 + if (order == 4) + { + Real h = pow(11.25*eps, (Real) 1/ (Real) 5); + Real temp = x + h; + h = temp - x; + if (h == 0) + { + h = nextafter(x, std::numeric_limits::max()) - x; + } + Real ymth = f(x-2*h); + Real yth = f(x+2*h); + Real yh = f(x+h); + Real ymh = f(x-h); + Real y2 = ymth - yth; + Real y1 = yh - ymh; + if (error) + { + // Mathematica code to extrace the remainder: + // Series[(f[x-2*h]+ 8*f[x+h] - 8*f[x-h] - f[x+2*h])/(12*h), {h, 0, 7}] + Real y_three_h = f(x+3*h); + Real y_m_three_h = f(x-3*h); + // Error from fifth derivative: + *error = abs(half()*(y_three_h - y_m_three_h) + 2*(ymth - yth) + 5*half()*(yh - ymh) )/(30*h); + // Error from function evaluation, assuming 3ULP: + *error += 3*eps*(abs(yth) + abs(ymth) + 8*(abs(ymh) + abs(yh)))/(12*h); + } + return (y2+8*y1)/(12*h); + } + + // Error bound ~eps^6/7 + if (order == 6) + { + // Error: h^6f^(7)(x)/140 + 5|f(x)|eps/h + Real h = pow(eps/168, (Real) 1/ (Real) 7); + Real temp = x + h; + h = temp - x; + if (h == 0) + { + h = nextafter(x, std::numeric_limits::max()) - x; + } + + Real yh = f(x+h); + Real ymh = f(x-h); + Real y1 = yh - ymh; + Real y2 = f(x-2*h) - f(x + 2*h); + Real y3 = f(x+3*h) - f(x - 3*h); + + if (error) + { + // Mathematica code to generate fd scheme for 7th derivative: + // Sum[(-1)^i*Binomial[7, i]*(f[x+(3-i)*h] + f[x+(4-i)*h])/2, {i, 0, 7}] + // Mathematica to demonstrate that this is a finite difference formula for 7th derivative: + // Series[(f[x+4*h]-f[x-4*h] + 6*(f[x-3*h] - f[x+3*h]) + 14*(f[x-h] - f[x+h] + f[x+2*h] - f[x-2*h]))/2, {h, 0, 15}] + Real y7 = half()*(f(x+4*h) - f(x-4*h) - 6*y3 - 14*y1 - 14*y2); + *error = abs(y7)/(140*h) + 5*(abs(yh) + abs(ymh))*eps/h; + } + return (y3 + 9*y2 + 45*y1)/(60*h); + } + + // Error bound ~eps^8/9. + // In double precision, we only expect to lose two digits of precision while using this formula, at the cost of 8 function evaluations. + if (order == 8) + { + // Error: h^8|f^(9)(x)|/630 + 7|f(x)|eps/h assuming 7 unstabilized additions. + // Mathematica code to get the error: + // Series[(f[x+h]-f[x-h])*(4/5) + (1/5)*(f[x-2*h] - f[x+2*h]) + (4/105)*(f[x+3*h] - f[x-3*h]) + (1/280)*(f[x-4*h] - f[x+4*h]), {h, 0, 9}] + // If we used Kahan summation, we could get the max error down to h^8|f^(9)(x)|/630 + |f(x)|eps/h. + Real h = pow(551.25*eps, (Real)1 / (Real) 9); + Real temp = x + h; + h = temp - x; + if (h == 0) + { + h = nextafter(x, std::numeric_limits::max()) - x; + } + + Real yh = f(x+h); + Real ymh = f(x-h); + Real y1 = yh - ymh; + Real y2 = f(x-2*h) - f(x + 2*h); + Real y3 = f(x+3*h) - f(x - 3*h); + Real y4 = f(x-4*h) - f(x + 4*h); + + Real tmp1 = 3*y4/8 + 4*y3; + Real tmp2 = 21*y2 + 84*y1; + + if (error) + { + // Mathematica code to generate fd scheme for 7th derivative: + // Sum[(-1)^i*Binomial[9, i]*(f[x+(4-i)*h] + f[x+(5-i)*h])/2, {i, 0, 9}] + // Mathematica to demonstrate that this is a finite difference formula for 7th derivative: + // Series[(f[x+5*h]-f[x- 5*h])/2 + 4*(f[x-4*h] - f[x+4*h]) + 27*(f[x+3*h] - f[x-3*h])/2 + 24*(f[x-2*h] - f[x+2*h]) + 21*(f[x+h] - f[x-h]), {h, 0, 15}] + Real f9 = (f(x+5*h) - f(x-5*h))*half() + 4*y4 + 27*half()*y3 + 24*y2 + 21*y1; + *error = abs(f9)/(630*h) + 7*(abs(yh)+abs(ymh))*eps/h; + } + return (tmp1 + tmp2)/(105*h); + } + + throw std::logic_error("Order higher than 8 is not implemented.\n"); +} + +}}} +#endif diff --git a/test/Jamfile.v2 b/test/Jamfile.v2 index 29c694143..5883ee27e 100644 --- a/test/Jamfile.v2 +++ b/test/Jamfile.v2 @@ -689,6 +689,7 @@ run test_nc_t.cpp pch ../../test/build//boost_unit_test_framework intel:off : test_nc_t_real_concept ; run test_normal.cpp pch ../../test/build//boost_unit_test_framework ; +run test_numerical_differentiation.cpp ../../test/build//boost_unit_test_framework ; run test_owens_t.cpp ../../test/build//boost_unit_test_framework ; run test_pareto.cpp ../../test/build//boost_unit_test_framework ; run test_polygamma.cpp test_instances//test_instances pch_light ../../test/build//boost_unit_test_framework ; diff --git a/test/test_numerical_differentiation.cpp b/test/test_numerical_differentiation.cpp new file mode 100644 index 000000000..d99b41b47 --- /dev/null +++ b/test/test_numerical_differentiation.cpp @@ -0,0 +1,252 @@ +#define BOOST_TEST_MODULE numerical_differentiation_test + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +using std::abs; +using std::pow; +using boost::math::tools::finite_difference_derivative; +using boost::math::tools::complex_step_derivative; +using boost::math::cyl_bessel_j; +using boost::math::cyl_bessel_j_prime; +using boost::math::constants::half; +using boost::multiprecision::cpp_dec_float_100; +using boost::multiprecision::cpp_bin_float_50; +using boost::multiprecision::cpp_bin_float_100; +using boost::multiprecision::cpp_bin_float_quad; + + +template +void test_order(size_t points_to_test) +{ + std::cout << "Testing order " << order << " derivative error estimate on type " << boost::typeindex::type_id().pretty_name() << "\n"; + std::cout << std::setprecision(std::numeric_limits::digits10); + //std::cout << std::fixed << std::scientific; + auto f = [](Real t) { return cyl_bessel_j(1, t); }; + Real min = -100000.0; + Real max = -min; + Real x = min; + Real max_error = 0; + Real max_relative_error_in_error = 0; + size_t j = 0; + size_t failures = 0; + while (j < points_to_test) + { + x = min + (Real) 2*j*max/ (Real) points_to_test; + Real error_estimate; + Real computed = finite_difference_derivative(f, x, &error_estimate); + Real expected = (Real) cyl_bessel_j_prime(1, x); + Real error = abs(computed - expected); + if (error > error_estimate) + { + ++failures; + Real relative_error_in_error = abs(error - error_estimate)/ error; + if (relative_error_in_error > max_relative_error_in_error) + { + max_relative_error_in_error = relative_error_in_error; + } + if (relative_error_in_error > 2) + { + throw std::logic_error("Relative error in error is too high!"); + } + } + if (error > max_error) + { + max_error = error; + } + ++j; + } + //std::cout << "Maximum error :" << max_error << "\n"; + //std::cout << "Error estimate failed " << failures << " times out of " << points_to_test << "\n"; + //std::cout << "Failure rate: " << (double) failures / (double) points_to_test << "\n"; + //std::cout << "Maximum error in estimated error = " << max_relative_error_in_error << "\n"; + //Real convergence_rate = (Real) order/ (Real) (order + 1); + //std::cout << "eps^(order/order+1) = " << pow(std::numeric_limits::epsilon(), convergence_rate) << "\n\n\n"; + + bool max_error_good = max_error < 2*sqrt(std::numeric_limits::epsilon()); + BOOST_TEST(max_error_good); + + bool error_estimate_good = max_relative_error_in_error < (Real) 2; + BOOST_TEST(error_estimate_good); + + double failure_rate = (double) failures / (double) points_to_test; + BOOST_CHECK_SMALL(failure_rate, 0.05); +} + +template +void test_bessel() +{ + std::cout << "Testing numerical derivatives of Bessel's function on type " << boost::typeindex::type_id().pretty_name() << "\n"; + std::cout << std::setprecision(std::numeric_limits::digits10); + + Real eps = std::numeric_limits::epsilon(); + Real x = 25.1; + auto f = [](Real t) { return cyl_bessel_j(12, t); }; + + Real computed = finite_difference_derivative(f, x); + Real expected = cyl_bessel_j_prime(12, x); + Real error_estimate = 4*abs(f(x))*sqrt(eps); + //std::cout << std::setprecision(std::numeric_limits::digits10); + //std::cout << "cyl_bessel_j_prime: " << expected << std::endl; + //std::cout << "First order fd : " << computed << std::endl; + //std::cout << "Error : " << abs(computed - expected) << std::endl; + //std::cout << "a prior error est : " << error_estimate << std::endl; + + BOOST_CHECK_CLOSE_FRACTION(expected, computed, 10*error_estimate); + + computed = finite_difference_derivative(f, x); + expected = cyl_bessel_j_prime(12, x); + error_estimate = abs(f(x))*pow(eps, boost::math::constants::two_thirds()); + //std::cout << std::setprecision(std::numeric_limits::digits10); + //std::cout << "cyl_bessel_j_prime: " << expected << std::endl; + //std::cout << "Second order fd : " << computed << std::endl; + //std::cout << "Error : " << abs(computed - expected) << std::endl; + //std::cout << "a prior error est : " << error_estimate << std::endl; + + BOOST_CHECK_CLOSE_FRACTION(expected, computed, 50*error_estimate); + + computed = finite_difference_derivative(f, x); + expected = cyl_bessel_j_prime(12, x); + error_estimate = abs(f(x))*pow(eps, (Real) 4 / (Real) 5); + //std::cout << std::setprecision(std::numeric_limits::digits10); + //std::cout << "cyl_bessel_j_prime: " << expected << std::endl; + //std::cout << "Fourth order fd : " << computed << std::endl; + //std::cout << "Error : " << abs(computed - expected) << std::endl; + //std::cout << "a prior error est : " << error_estimate << std::endl; + + BOOST_CHECK_CLOSE_FRACTION(expected, computed, 25*error_estimate); + + + computed = finite_difference_derivative(f, x); + expected = cyl_bessel_j_prime(12, x); + error_estimate = abs(f(x))*pow(eps, (Real) 6/ (Real) 7); + //std::cout << std::setprecision(std::numeric_limits::digits10); + //std::cout << "cyl_bessel_j_prime: " << expected << std::endl; + //std::cout << "Sixth order fd : " << computed << std::endl; + //std::cout << "Error : " << abs(computed - expected) << std::endl; + //std::cout << "a prior error est : " << error_estimate << std::endl; + + BOOST_CHECK_CLOSE_FRACTION(expected, computed, 100*error_estimate); + + computed = finite_difference_derivative(f, x); + expected = cyl_bessel_j_prime(12, x); + error_estimate = abs(f(x))*pow(eps, (Real) 8/ (Real) 9); + //std::cout << std::setprecision(std::numeric_limits::digits10); + //std::cout << "cyl_bessel_j_prime: " << expected << std::endl; + //std::cout << "Eighth order fd : " << computed << std::endl; + //std::cout << "Error : " << abs(computed - expected) << std::endl; + //std::cout << "a prior error est : " << error_estimate << std::endl; + + BOOST_CHECK_CLOSE_FRACTION(expected, computed, 25*error_estimate); +} + +// Example of a function which is subject to catastrophic cancellation using finite-differences, but is almost perfectly stable using complex step: +template +RealOrComplex moler_example(RealOrComplex x) +{ + using std::sin; + using std::cos; + using std::exp; + + RealOrComplex cosx = cos(x); + RealOrComplex sinx = sin(x); + return exp(x)/(cosx*cosx*cosx + sinx*sinx*sinx); +} + +template +RealOrComplex moler_example_derivative(RealOrComplex x) +{ + using std::sin; + using std::cos; + using std::exp; + + RealOrComplex expx = exp(x); + RealOrComplex cosx = cos(x); + RealOrComplex sinx = sin(x); + RealOrComplex coscubed_sincubed = cosx*cosx*cosx + sinx*sinx*sinx; + return (expx/coscubed_sincubed)*(1 - 3*(sinx*sinx*cosx - sinx*cosx*cosx)/ (coscubed_sincubed)); +} + + +template +void test_complex_step() +{ + using std::abs; + using std::complex; + using std::isfinite; + using std::isnormal; + std::cout << "Testing numerical derivatives of Bessel's function on type " << boost::typeindex::type_id().pretty_name() << "\n"; + std::cout << std::setprecision(std::numeric_limits::digits10); + Real x = -100; + while ( x < 100 ) + { + if (!isfinite(moler_example(x))) + { + x += 1; + continue; + } + Real expected = moler_example_derivative(x); + Real computed = complex_step_derivative(moler_example>, x); + if (!isfinite(expected)) + { + x += 1; + continue; + } + if (abs(expected) <= std::numeric_limits::epsilon()) + { + bool small = computed < std::numeric_limits::epsilon(); + BOOST_TEST(small); + } + else + { + BOOST_CHECK_CLOSE_FRACTION(expected, computed, 200*std::numeric_limits::epsilon()); + } + x += 1; + } +} + + +BOOST_AUTO_TEST_CASE(numerical_differentiation_test) +{ + test_complex_step(); + test_complex_step(); + test_complex_step(); + + test_bessel(); + test_bessel(); + test_bessel(); + test_bessel(); + + size_t points_to_test = 1000; + test_order(points_to_test); + test_order(points_to_test); + test_order(points_to_test); + test_order(points_to_test); + + test_order(points_to_test); + test_order(points_to_test); + test_order(points_to_test); + test_order(points_to_test); + + test_order(points_to_test); + test_order(points_to_test); + test_order(points_to_test); + + test_order(points_to_test); + test_order(points_to_test); + test_order(points_to_test); + + test_order(points_to_test); + test_order(points_to_test); + test_order(points_to_test); +}