diff --git a/doc/fp_utilities/ulps_plots.qbk b/doc/fp_utilities/ulps_plots.qbk new file mode 100644 index 000000000..b1e6e2952 --- /dev/null +++ b/doc/fp_utilities/ulps_plots.qbk @@ -0,0 +1,109 @@ +[/ + Copyright Nick Thompson, John Maddock, 2020 + Distributed under the Boost Software License, Version 1.0. + (See accompanying file LICENSE_1_0.txt or copy at + http://www.boost.org/LICENSE_1_0.txt). +] + +[section:ulps_plots ULPs Plots] + +[h4 Synopsis] + + #include + + namespace boost::math::tools { + + template + class ulps_plot { + public: + ulps_plot(F hi_acc_impl, CoarseReal a, CoarseReal b, + size_t samples = 10000, bool perturb_abscissas = false, int random_seed = -1); + + ulps_plot& clip(PreciseReal clip); + + ulps_plot& width(int width); + + ulps_plot& envelope_color(std::string const & color); + + ulps_plot& title(std::string const & title); + + ulps_plot& background_color(std::string const & background_color); + + ulps_plot& font_color(std::string const & font_color); + + ulps_plot& ulp_envelope(bool write_ulp); + + ulps_plot& horizontal_lines(int horizontal_lines); + + ulps_plot& vertical_lines(int vertical_lines); + + template + ulps_plot& add_fn(G g, std::string const & color = "steelblue"); + + friend std::ostream& operator<<(std::ostream& fs, ulps_plot const & plot) + + void write(std::string const & filename) const; + + }; + + } // namespaces + + +An ULPs plot measures the accuracy of an implementation of a function implemented in floating point arithmetic. +ULP stands for /unit in the last place/; i.e., we create a unit system that normalizes the distance between one representable and the next greater representable to 1. + +So for example, the ULP distance between `1.0` and `1.0 + std::numeric_limits::epsilon()` is 1, the ulp distance between `1.0` and `1.0 + 2*std::numeric_limits::epsilon()` is 2. +A key concept of this measurement is /semi-scale invariance/: The ULP distance between `2.0` and `2.0 + 2*std::numeric_limits::epsilon()` is 1, not 2. + +An ULPs plot gives us an idea of how efficiently we are using the floating point number system we have chosen. +To compute a ULP plot, we need a higher precision implementation; for example we can test a 32 bit floating point implementation against an 80-bit long double implementation. +The higher precision value is computed in the template type `PreciseReal`, and the lower precision value is computed in type `CoarseReal`. +For simplicity, we will refer to the result of the higher precision implementation the exact value, which we will denote by /y/, +and the value computed in lower precision we will denote by ŷ. +The ULP distance between /y/ and ŷ is then + +[$../graphs/ulp_distance.svg] + +where μ is the unit roundoff of the `CoarseReal` type. + +If every sample in an ULPs plot is bounded by ±½, then we have good evidence that we have used our floating point number as efficiently as we can possibly expect: +We are getting the correctly rounded result. +However, the converse is not true: If there exists samples that are well above the unit roundoff, we do not have evidence that the function is poorly implemented. +Considering the /y/=0 case makes this obvious, but another more subtle issue is at play. +Consider that when we evaluate a function /f/, it is rare that we want to evaluate /f/ at a representable x̂, +instead we have an infinite precision value /x/ which must be rounded into x̂ using the floating point rounding model x̂ = x(1+ε), where |ε|<μ. + +We can use a first order Taylor series to determine the best accuracy we can hope for under the assumption that the only source of error is rounding the abscissa to the nearest to nearest representable: + +[$../graphs/ulps_and_condition_number.svg] + +Hence the condition number of function evaluation provides a natural scale for evaluating the quality of an implementation of a special function. +Since, in addition, we cannot expect better than half-ULP accuracy, we can create a /ulp envelope/ by taking the maximum of 0.5 and half the condition number of function evaluation at each point. + +[$../graphs/airy_ai_float.svg] + +The graph shows the ULP accuracy of Boost's implementation of the Airy Ai function. +We see only a few evaluations outside the ULP envelope. +Note how the condition number of function evaluation becomes unbounded at the roots. + + +A minimal working example which reproduces the plot above is given [@../../example/airy_ulps_plot.cpp here]. + +Note that you can compare two different implementations of a single function by calling `add_fn` multiple times. +This will give you a graphical answer to which one is more accurate. +For example: + + auto plot = ulps_plot(f, 1.0f, 2.0f); + plot.add_fn(impl1, "steelblue"); + plot.add_fn(impl2, "orange"); + + +Note that the graph is written as an SVG. We use `firefox foo.svg` or `open -a Firefox foo.svg` to display these files. + +[heading References] + +* Cleve Moler. ['Ulps Plots Reveal Math Function Accuracy], https://blogs.mathworks.com/cleve/2017/01/23/ulps-plots-reveal-math-function-accurary/ + + + +[endsect] diff --git a/doc/graphs/airy_ai_float.svg b/doc/graphs/airy_ai_float.svg new file mode 100644 index 000000000..ec9a605f4 --- /dev/null +++ b/doc/graphs/airy_ai_float.svg @@ -0,0 +1,9917 @@ + + + + + + + +-15 + +-10 + +-5 + +0 + +5 + +10 + +15 + +20 + +-2 + +-1 + +0 + +1 + +2 + +3 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/doc/graphs/chebyshev_convention_1.svg b/doc/graphs/chebyshev_convention_1.svg new file mode 100644 index 000000000..e8cbefb07 --- /dev/null +++ b/doc/graphs/chebyshev_convention_1.svg @@ -0,0 +1,2 @@ + + \ No newline at end of file diff --git a/doc/graphs/chebyshev_convention_2.svg b/doc/graphs/chebyshev_convention_2.svg new file mode 100644 index 000000000..5bf3f650c --- /dev/null +++ b/doc/graphs/chebyshev_convention_2.svg @@ -0,0 +1,2 @@ + + \ No newline at end of file diff --git a/doc/graphs/ulp_distance.svg b/doc/graphs/ulp_distance.svg new file mode 100644 index 000000000..99808b635 --- /dev/null +++ b/doc/graphs/ulp_distance.svg @@ -0,0 +1,2 @@ + + \ No newline at end of file diff --git a/doc/graphs/ulps_and_condition_number.svg b/doc/graphs/ulps_and_condition_number.svg new file mode 100644 index 000000000..4db34d41e --- /dev/null +++ b/doc/graphs/ulps_and_condition_number.svg @@ -0,0 +1,2 @@ + + \ No newline at end of file diff --git a/doc/math.qbk b/doc/math.qbk index dee2a0c53..89053b0ad 100644 --- a/doc/math.qbk +++ b/doc/math.qbk @@ -574,6 +574,7 @@ and as a CD ISBN 0-9504833-2-X 978-0-9504833-2-0, Classification 519.2-dc22. [include fp_utilities/float_next.qbk] [include fp_utilities/float_comparison.qbk] [include fp_utilities/condition_numbers.qbk] +[include fp_utilities/ulps_plots.qbk] [endmathpart] [mathpart cstdfloat..Specified-width floating-point typedefs] diff --git a/doc/sf/chebyshev.qbk b/doc/sf/chebyshev.qbk index a51b30adc..1ba3d3830 100644 --- a/doc/sf/chebyshev.qbk +++ b/doc/sf/chebyshev.qbk @@ -34,7 +34,10 @@ ``__sf_result`` chebyshev_t_prime(unsigned n, Real const & x); template - ``__sf_result`` chebyshev_clenshaw_recurrence(const Real* const c, size_t length, Real2 x); + ``__sf_result`` chebyshev_clenshaw_recurrence(const Real1* const c, size_t length, Real2 x); + + template + Real chebyshev_clenshaw_recurrence(const Real* const c, size_t length, Real a, Real b, Real x); }} // namespaces @@ -78,20 +81,32 @@ and is implemented in boost as double x = 0.5; std::vector c{14.2, -13.7, 82.3, 96}; - double f = chebyshev_clenshaw_recurrence(c.data(), c.size(), Real x); + double f = chebyshev_clenshaw_recurrence(c.data(), c.size(), x); N.B.: There is factor of /2/ difference in our definition of the first coefficient in the Chebyshev series from Clenshaw's original work. This is because two traditions exist in notation for the Chebyshev series expansion, -[:/f/(/x/) \u2248 \u2211[sub n=0][super N-1] /a/[sub n]/T/[sub n](/x/)] +[$../graphs/chebyshev_convention_1.svg] + and -[:/f/(/x/) \u2248 /c/[sub 0]/2 + \u2211[sub n=1][super N-1] /c/[sub n]/T/[sub n](/x/)] +[$../graphs/chebyshev_convention_2.svg] ['*boost math always uses the second convention, with the factor of 1/2 on the first coefficient.*] +Another common use case is when the polynomial must be evaluated on some interval \[/a/, /b/\]. +The translation to the interval \[-1, 1\] causes a few accuracy problems and also gives us some opportunities. +For this case, we use [@https://doi.org/10.1093/imamat/20.3.379 Reinch's modification] to the Clenshaw recurrence, which is also discussed [@https://archive.siam.org/books/ot99/OT99SampleChapter.pdf here]. +The usage is as follows: + + double x = 9; + double a = 7; + double b = 12; + std::vector c{14.2, -13.7, 82.3, 96}; + double f = chebyshev_clenshaw_recurrence(c.data(), c.size(), a, b, x); + Chebyshev polynomials of the second kind can be evaluated via `chebyshev_u`: double x = -0.23; @@ -151,7 +166,7 @@ The notion of "very close" can be made rigorous; see Trefethen's "Approximation The Chebyshev transform works by creating a vector of values by evaluating the input function at the Chebyshev points, and then performing a discrete cosine transform on the resulting vector. In order to do this efficiently, we have used [@http://www.fftw.org/ FFTW3]. -So to compile, you must have `FFTW3` installed, and link with `-lfftw3` for double precision, `-lfftw3f` for float precision, `-lfftw3l` for long double precision, and -lfftwq for quad (`__float128`) precision. +So to compile, you must have `FFTW3` installed, and link with `-lfftw3` for double precision, `-lfftw3f` for float precision, `-lfftw3l` for long double precision, and `-lfftw3q` for quad (`__float128`) precision. After the coefficients of the Chebyshev series are known, the routine goes back through them and filters out all the coefficients whose absolute ratio to the largest coefficient are less than the tolerance requested in the constructor. [endsect] [/section:chebyshev Chebyshev Polynomials] diff --git a/example/Jamfile.v2 b/example/Jamfile.v2 index fd40f3982..feaf77faa 100644 --- a/example/Jamfile.v2 +++ b/example/Jamfile.v2 @@ -73,6 +73,7 @@ test-suite examples : [ run hyperexponential_more_snips.cpp ] [ run inverse_chi_squared_example.cpp ] [ run legendre_stieltjes_example.cpp : : : [ requires cxx11_auto_declarations cxx11_defaulted_functions cxx11_lambdas ] ] + [ run airy_ulps_plot.cpp : : : [ requires cxx17_std_apply cxx17_if_constexpr ] ] #[ # run inverse_chi_squared_find_df_example.cpp ] #[ run lambert_w_basic_example.cpp ] [ run lambert_w_basic_example.cpp : : : [ requires cxx11_numeric_limits ] ] diff --git a/example/airy_ulps_plot.cpp b/example/airy_ulps_plot.cpp new file mode 100644 index 000000000..695d49287 --- /dev/null +++ b/example/airy_ulps_plot.cpp @@ -0,0 +1,60 @@ +// (C) Copyright Nick Thompson 2020. +// Use, modification and distribution are subject to the +// Boost Software License, Version 1.0. (See accompanying file +// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +#include +#include +#include +#include + +using boost::math::tools::ulps_plot; + +int main() { + using PreciseReal = long double; + using CoarseReal = float; + + typedef boost::math::policies::policy< + boost::math::policies::promote_float, + boost::math::policies::promote_double > + no_promote_policy; + + auto ai_coarse = [](CoarseReal x) { + return boost::math::airy_ai(x, no_promote_policy()); + }; + auto ai_precise = [](PreciseReal x) { + return boost::math::airy_ai(x, no_promote_policy()); + }; + + std::string filename = "airy_ai_" + boost::core::demangle(typeid(CoarseReal).name()) + ".svg"; + int samples = 10000; + // How many pixels wide do you want your .svg? + int width = 700; + // Near a root, we have unbounded relative error. So for functions with roots, we define an ULP clip: + PreciseReal clip = 2.5; + // Should we perturb the abscissas? i.e., should we compute the high precision function f at x, + // and the low precision function at the nearest representable x̂ to x? + // Or should we compute both the high precision and low precision function at a low precision representable x̂? + bool perturb_abscissas = false; + auto plot = ulps_plot(ai_precise, CoarseReal(-3), CoarseReal(3), samples, perturb_abscissas); + // Note the argument chaining: + plot.clip(clip).width(width); + // Sometimes it's useful to set a title, but in many cases it's more useful to just use a caption. + //std::string title = "Airy Ai ULP plot at " + boost::core::demangle(typeid(CoarseReal).name()) + " precision"; + //plot.title(title); + plot.vertical_lines(6); + plot.add_fn(ai_coarse); + // You can write the plot to a stream: + //std::cout << plot; + // Or to a file: + plot.write(filename); + + // Don't like the default dark theme? + plot.background_color("white").font_color("black"); + filename = "airy_ai_" + boost::core::demangle(typeid(CoarseReal).name()) + "_white.svg"; + plot.write(filename); + + // Don't like the envelope? + plot.ulp_envelope(false); + filename = "airy_ai_" + boost::core::demangle(typeid(CoarseReal).name()) + "_white_no_envelope.svg"; + plot.write(filename); +} diff --git a/example/daubechies_wavelets/daubechies_plots.cpp b/example/daubechies_wavelets/daubechies_plots.cpp index 70172c98e..3c4e81936 100644 --- a/example/daubechies_wavelets/daubechies_plots.cpp +++ b/example/daubechies_wavelets/daubechies_plots.cpp @@ -11,8 +11,8 @@ #include #include +#include #include -#include using boost::multiprecision::float128; @@ -135,11 +135,16 @@ void do_ulp(int coarse_refinements, PhiPrecise phi_precise) title = ""; std::string filename = "daubechies_" + std::to_string(p) + "_" + boost::core::demangle(typeid(CoarseReal).name()) + "_" + std::to_string(coarse_refinements) + "_refinements.svg"; - int samples = 20000; + int samples = 1000000; int clip = 20; int horizontal_lines = 8; int vertical_lines = 2*p - 1; - quicksvg::ulp_plot(phi_coarse, phi_precise, CoarseReal(0), phi_coarse.support().second, title, filename, samples, GRAPH_WIDTH, clip, horizontal_lines, vertical_lines); + auto [a, b] = phi_coarse.support(); + auto plot = boost::math::tools::ulp_plot(phi_precise, a, b, samples); + plot.set_clip(clip); + plot.set_width(GRAPH_WIDTH); + plot.add_fn(phi_coarse); + plot.write(filename, true, title, horizontal_lines, vertical_lines); } @@ -151,9 +156,9 @@ int main() boost::hana::for_each(std::make_index_sequence<18>(), [&](auto i){ plot_convergence(); }); using PreciseReal = float128; - using CoarseReal = double; + using CoarseReal = float; int precise_refinements = 22; - constexpr const int p = 8; + constexpr const int p = 14; std::cout << "Computing precise scaling function in " << boost::core::demangle(typeid(PreciseReal).name()) << " precision.\n"; auto phi_precise = boost::math::daubechies_scaling(precise_refinements); std::cout << "Beginning comparison with functions computed in " << boost::core::demangle(typeid(CoarseReal).name()) << " precision.\n"; diff --git a/include/boost/math/special_functions/chebyshev.hpp b/include/boost/math/special_functions/chebyshev.hpp index 0f4d9f2da..9dcab5138 100644 --- a/include/boost/math/special_functions/chebyshev.hpp +++ b/include/boost/math/special_functions/chebyshev.hpp @@ -165,5 +165,100 @@ inline Real chebyshev_clenshaw_recurrence(const Real* const c, size_t length, co } + +namespace detail { +template +inline Real unchecked_chebyshev_clenshaw_recurrence(const Real* const c, size_t length, const Real & a, const Real & b, const Real& x) +{ + Real t; + Real u; + // This cutoff is not super well defined, but it's a good estimate. + // See "An Error Analysis of the Modified Clenshaw Method for Evaluating Chebyshev and Fourier Series" + // J. OLIVER, IMA Journal of Applied Mathematics, Volume 20, Issue 3, November 1977, Pages 379–391 + // https://doi.org/10.1093/imamat/20.3.379 + const Real cutoff = 0.6; + if (x - a < b - x) + { + u = 2*(x-a)/(b-a); + t = u - 1; + if (t > -cutoff) + { + Real b2 = 0; + Real b1 = c[length -1]; + for(size_t j = length - 2; j >= 1; --j) + { + Real tmp = 2*t*b1 - b2 + c[j]; + b2 = b1; + b1 = tmp; + } + return t*b1 - b2 + c[0]/2; + } + else + { + Real b = c[length -1]; + Real d = b; + Real b2 = 0; + for (size_t r = length - 2; r >= 1; --r) + { + d = 2*u*b - d + c[r]; + b2 = b; + b = d - b; + } + return t*b - b2 + c[0]/2; + } + } + else + { + u = -2*(b-x)/(b-a); + t = u + 1; + if (t < cutoff) + { + Real b2 = 0; + Real b1 = c[length -1]; + for(size_t j = length - 2; j >= 1; --j) + { + Real tmp = 2*t*b1 - b2 + c[j]; + b2 = b1; + b1 = tmp; + } + return t*b1 - b2 + c[0]/2; + } + else + { + Real b = c[length -1]; + Real d = b; + Real b2 = 0; + for (size_t r = length - 2; r >= 1; --r) + { + d = 2*u*b + d + c[r]; + b2 = b; + b = d + b; + } + return t*b - b2 + c[0]/2; + } + } +} + +} // namespace detail + +template +inline Real chebyshev_clenshaw_recurrence(const Real* const c, size_t length, const Real & a, const Real & b, const Real& x) +{ + if (x < a || x > b) + { + throw std::domain_error("x in [a, b] is required."); + } + if (length < 2) + { + if (length == 0) + { + return 0; + } + return c[0]/2; + } + return detail::unchecked_chebyshev_clenshaw_recurrence(c, length, a, b, x); +} + + }} #endif diff --git a/include/boost/math/special_functions/chebyshev_transform.hpp b/include/boost/math/special_functions/chebyshev_transform.hpp index e50c40bd9..81543c6f4 100644 --- a/include/boost/math/special_functions/chebyshev_transform.hpp +++ b/include/boost/math/special_functions/chebyshev_transform.hpp @@ -116,7 +116,7 @@ public: template chebyshev_transform(const F& f, Real a, Real b, Real tol = 500 * std::numeric_limits::epsilon(), - size_t max_refinements = 15) : m_a(a), m_b(b) + size_t max_refinements = 16) : m_a(a), m_b(b) { if (a >= b) { @@ -174,16 +174,9 @@ public: } } - Real operator()(Real x) const + inline Real operator()(Real x) const { - using boost::math::constants::half; - if (x > m_b || x < m_a) - { - throw std::domain_error("x not in [a, b]\n"); - } - - Real z = (2*x - m_a - m_b)/(m_b - m_a); - return chebyshev_clenshaw_recurrence(m_coeffs.data(), m_coeffs.size(), z); + return chebyshev_clenshaw_recurrence(m_coeffs.data(), m_coeffs.size(), m_a, m_b, x); } // Integral over entire domain [a, b] diff --git a/include/boost/math/tools/ulps_plot.hpp b/include/boost/math/tools/ulps_plot.hpp new file mode 100644 index 000000000..16570299d --- /dev/null +++ b/include/boost/math/tools/ulps_plot.hpp @@ -0,0 +1,541 @@ +// (C) Copyright Nick Thompson 2020. +// Use, modification and distribution are subject to the +// Boost Software License, Version 1.0. (See accompanying file +// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +#ifndef BOOST_MATH_TOOLS_ULP_PLOT_HPP +#define BOOST_MATH_TOOLS_ULP_PLOT_HPP +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +// Design of this function comes from: +// https://blogs.mathworks.com/cleve/2017/01/23/ulps-plots-reveal-math-function-accurary/ + +// The envelope is the maximum of 1/2 and half the condition number of function evaluation. + +namespace boost::math::tools { + +namespace detail { +template +void write_gridlines(std::ostream& fs, int horizontal_lines, int vertical_lines, + F1 x_scale, F2 y_scale, Real min_x, Real max_x, Real min_y, Real max_y, + int graph_width, int graph_height, int margin_left, std::string const & font_color) +{ + // Make a grid: + for (int i = 1; i <= horizontal_lines; ++i) { + Real y_cord_dataspace = min_y + ((max_y - min_y)*i)/horizontal_lines; + auto y = y_scale(y_cord_dataspace); + fs << "\n"; + + fs << "" + << std::setprecision(4) << y_cord_dataspace << "\n"; + } + + for (int i = 1; i <= vertical_lines; ++i) { + Real x_cord_dataspace = min_x + ((max_x - min_x)*i)/vertical_lines; + Real x = x_scale(x_cord_dataspace); + fs << "\n"; + + fs << "" + << std::setprecision(4) << x_cord_dataspace << "\n"; + } +} +} + +template +class ulps_plot { +public: + ulps_plot(F hi_acc_impl, CoarseReal a, CoarseReal b, + size_t samples = 10000, bool perturb_abscissas = false, int random_seed = -1); + + ulps_plot& clip(PreciseReal clip); + + ulps_plot& width(int width); + + ulps_plot& envelope_color(std::string const & color); + + ulps_plot& title(std::string const & title); + + ulps_plot& background_color(std::string const & background_color); + + ulps_plot& font_color(std::string const & font_color); + + ulps_plot& ulp_envelope(bool write_ulp); + + template + ulps_plot& add_fn(G g, std::string const & color = "steelblue"); + + ulps_plot& horizontal_lines(int horizontal_lines); + + ulps_plot& vertical_lines(int vertical_lines); + + void write(std::string const & filename) const; + + friend std::ostream& operator<<(std::ostream& fs, ulps_plot const & plot) + { + using std::abs; + using std::floor; + using std::isnan; + if (plot.ulp_list_.size() == 0) + { + throw std::domain_error("No functions added for comparison."); + } + if (plot.width_ <= 1) + { + throw std::domain_error("Width = " + std::to_string(plot.width_) + ", which is too small."); + } + + PreciseReal worst_ulp_distance = 0; + PreciseReal min_y = std::numeric_limits::max(); + PreciseReal max_y = std::numeric_limits::lowest(); + for (auto const & ulp_vec : plot.ulp_list_) + { + for (auto const & ulp : ulp_vec) + { + if (abs(ulp) > worst_ulp_distance) + { + worst_ulp_distance = abs(ulp); + } + if (ulp < min_y) + { + min_y = ulp; + } + if (ulp > max_y) + { + max_y = ulp; + } + } + } + + // half-ulp accuracy is the best that can be expected; sometimes we can get less, but barely less. + // then the axes don't show up; painful! + if (max_y < 0.5) { + max_y = 0.5; + } + if (min_y > -0.5) { + min_y = -0.5; + } + + if (plot.clip_ > 0) + { + if (max_y > plot.clip_) + { + max_y = plot.clip_; + } + if (min_y < -plot.clip_) + { + min_y = -plot.clip_; + } + } + + int height = floor(double(plot.width_)/1.61803); + int margin_top = 40; + int margin_left = 25; + if (plot.title_.size() == 0) + { + margin_top = 10; + margin_left = 15; + } + int margin_bottom = 20; + int margin_right = 20; + int graph_height = height - margin_bottom - margin_top; + int graph_width = plot.width_ - margin_left - margin_right; + + // Maps [a,b] to [0, graph_width] + auto x_scale = [&](CoarseReal x)->CoarseReal + { + return ((x-plot.a_)/(plot.b_ - plot.a_))*static_cast(graph_width); + }; + + auto y_scale = [&](PreciseReal y)->PreciseReal + { + return ((max_y - y)/(max_y - min_y) )*static_cast(graph_height); + }; + + fs << "\n" + << "\n" + << "\n"; + if (plot.title_.size() > 0) + { + fs << "" + << plot.title_ + << "\n"; + } + + // Construct SVG group to simplify the calculations slightly: + fs << "\n"; + // y-axis: + fs << "\n"; + PreciseReal x_axis_loc = y_scale(static_cast(0)); + fs << "\n"; + + if (worst_ulp_distance > 3) + { + detail::write_gridlines(fs, plot.horizontal_lines_, plot.vertical_lines_, x_scale, y_scale, plot.a_, plot.b_, + static_cast(min_y), static_cast(max_y), graph_width, graph_height, margin_left, plot.font_color_); + } + else + { + std::vector ys{-3.0, -2.5, -2.0, -1.5, -1.0, -0.5, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0}; + for (size_t i = 0; i < ys.size(); ++i) + { + if (min_y <= ys[i] && ys[i] <= max_y) + { + PreciseReal y_cord_dataspace = ys[i]; + PreciseReal y = y_scale(y_cord_dataspace); + fs << "\n"; + + fs << "" + << std::setprecision(4) << y_cord_dataspace << "\n"; + } + } + for (int i = 1; i <= plot.vertical_lines_; ++i) + { + CoarseReal x_cord_dataspace = plot.a_ + ((plot.b_ - plot.a_)*i)/plot.vertical_lines_; + CoarseReal x = x_scale(x_cord_dataspace); + fs << "\n"; + + fs << "" + << std::setprecision(4) << x_cord_dataspace << "\n"; + } + } + + int color_idx = 0; + for (auto const & ulp : plot.ulp_list_) + { + std::string color = plot.colors_[color_idx++]; + for (size_t j = 0; j < ulp.size(); ++j) + { + if (isnan(ulp[j])) + { + continue; + } + if (plot.clip_ > 0 && abs(ulp[j]) > plot.clip_) + { + continue; + } + CoarseReal x = x_scale(plot.coarse_abscissas_[j]); + PreciseReal y = y_scale(ulp[j]); + fs << "\n"; + } + } + + if (plot.ulp_envelope_) + { + std::string close_path = "' stroke='" + plot.envelope_color_ + "' stroke-width='1' fill='none'>\n"; + size_t jstart = 0; + while (plot.cond_[jstart] > max_y) + { + ++jstart; + if (jstart >= plot.cond_.size()) + { + goto done; + } + } + + size_t jmin = jstart; + new_top_path: + if (jmin >= plot.cond_.size()) + { + goto start_bottom_paths; + } + fs << "\n" + << "\n"; + return fs; + } + +private: + std::vector precise_abscissas_; + std::vector coarse_abscissas_; + std::vector precise_ordinates_; + std::vector cond_; + std::list> ulp_list_; + std::vector colors_; + CoarseReal a_; + CoarseReal b_; + PreciseReal clip_; + int width_; + std::string envelope_color_; + bool ulp_envelope_; + int horizontal_lines_; + int vertical_lines_; + std::string title_; + std::string background_color_; + std::string font_color_; +}; + +template +ulps_plot& ulps_plot::envelope_color(std::string const & color) +{ + envelope_color_ = color; + return *this; +} + +template +ulps_plot& ulps_plot::clip(PreciseReal clip) +{ + clip_ = clip; + return *this; +} + +template +ulps_plot& ulps_plot::width(int width) +{ + width_ = width; + return *this; +} + +template +ulps_plot& ulps_plot::horizontal_lines(int horizontal_lines) +{ + horizontal_lines_ = horizontal_lines; + return *this; +} + +template +ulps_plot& ulps_plot::vertical_lines(int vertical_lines) +{ + vertical_lines_ = vertical_lines; + return *this; +} + +template +ulps_plot& ulps_plot::title(std::string const & title) +{ + title_ = title; + return *this; +} + +template +ulps_plot& ulps_plot::background_color(std::string const & background_color) +{ + background_color_ = background_color; + return *this; +} + +template +ulps_plot& ulps_plot::font_color(std::string const & font_color) +{ + font_color_ = font_color; + return *this; +} + +template +ulps_plot& ulps_plot::ulp_envelope(bool write_ulp_envelope) +{ + ulp_envelope_ = write_ulp_envelope; + return *this; +} + +template +void ulps_plot::write(std::string const & filename) const +{ + if (!boost::algorithm::ends_with(filename, ".svg")) + { + throw std::logic_error("Only svg files are supported at this time."); + } + std::ofstream fs(filename); + fs << *this; + fs.close(); +} + + +template +ulps_plot::ulps_plot(F hi_acc_impl, CoarseReal a, CoarseReal b, + size_t samples, bool perturb_abscissas, int random_seed) +{ + static_assert(sizeof(PreciseReal) >= sizeof(CoarseReal), "PreciseReal must have larger size than CoarseReal"); + if (samples < 10) + { + throw std::domain_error("Must have at least 10 samples, samples = " + std::to_string(samples)); + } + if (b <= a) + { + throw std::domain_error("On interval [a,b], b > a is required."); + } + a_ = a; + b_ = b; + + std::mt19937_64 gen; + if (random_seed == -1) + { + std::random_device rd; + gen.seed(rd()); + } + // Boost's uniform_real_distribution can generate quad and multiprecision random numbers; std's cannot: + boost::random::uniform_real_distribution dis(a, b); + precise_abscissas_.resize(samples); + coarse_abscissas_.resize(samples); + + if (perturb_abscissas) + { + for(size_t i = 0; i < samples; ++i) + { + precise_abscissas_[i] = dis(gen); + } + std::sort(precise_abscissas_.begin(), precise_abscissas_.end()); + for (size_t i = 0; i < samples; ++i) + { + coarse_abscissas_[i] = static_cast(precise_abscissas_[i]); + } + } + else + { + for(size_t i = 0; i < samples; ++i) + { + coarse_abscissas_[i] = static_cast(dis(gen)); + } + std::sort(coarse_abscissas_.begin(), coarse_abscissas_.end()); + for (size_t i = 0; i < samples; ++i) + { + precise_abscissas_[i] = coarse_abscissas_[i]; + } + } + + precise_ordinates_.resize(samples); + for (size_t i = 0; i < samples; ++i) + { + precise_ordinates_[i] = hi_acc_impl(precise_abscissas_[i]); + } + + cond_.resize(samples, std::numeric_limits::quiet_NaN()); + for (size_t i = 0 ; i < samples; ++i) + { + PreciseReal y = precise_ordinates_[i]; + if (y != 0) + { + // Maybe cond_ is badly names; should it be half_cond_? + cond_[i] = boost::math::tools::evaluation_condition_number(hi_acc_impl, precise_abscissas_[i])/2; + // Half-ULP accuracy is the correctly rounded result, so make sure the envelop doesn't go below this: + if (cond_[i] < 0.5) + { + cond_[i] = 0.5; + } + } + // else leave it as nan. + } + clip_ = -1; + width_ = 1100; + envelope_color_ = "chartreuse"; + ulp_envelope_ = true; + horizontal_lines_ = 8; + vertical_lines_ = 10; + title_ = ""; + background_color_ = "black"; + font_color_ = "white"; +} + +template +template +ulps_plot& ulps_plot::add_fn(G g, std::string const & color) +{ + using std::abs; + size_t samples = precise_abscissas_.size(); + std::vector ulps(samples); + for (size_t i = 0; i < samples; ++i) + { + PreciseReal y_hi_acc = precise_ordinates_[i]; + PreciseReal y_lo_acc = g(coarse_abscissas_[i]); + PreciseReal absy = abs(y_hi_acc); + PreciseReal dist = nextafter(static_cast(absy), std::numeric_limits::max()) - static_cast(absy); + ulps[i] = static_cast((y_lo_acc - y_hi_acc)/dist); + } + ulp_list_.emplace_back(ulps); + colors_.emplace_back(color); + return *this; +} + + + + +} // namespace boost::math::tools +#endif diff --git a/reporting/performance/chebyshev_clenshaw.cpp b/reporting/performance/chebyshev_clenshaw.cpp new file mode 100644 index 000000000..170c10c05 --- /dev/null +++ b/reporting/performance/chebyshev_clenshaw.cpp @@ -0,0 +1,57 @@ +#include +#include +#include +#include + + +template +void ChebyshevClenshaw(benchmark::State& state) +{ + std::vector v(state.range(0)); + std::random_device rd; + std::mt19937_64 mt(rd()); + std::uniform_real_distribution unif(-1,1); + for (size_t i = 0; i < v.size(); ++i) + { + v[i] = unif(mt); + } + + using boost::math::chebyshev_clenshaw_recurrence; + Real x = unif(mt); + for (auto _ : state) + { + benchmark::DoNotOptimize(chebyshev_clenshaw_recurrence(v.data(), v.size(), x)); + } + state.SetComplexityN(state.range(0)); +} + +template +void TranslatedChebyshevClenshaw(benchmark::State& state) +{ + std::vector v(state.range(0)); + std::random_device rd; + std::mt19937_64 mt(rd()); + std::uniform_real_distribution unif(-1,1); + for (size_t i = 0; i < v.size(); ++i) + { + v[i] = unif(mt); + } + + using boost::math::detail::unchecked_chebyshev_clenshaw_recurrence; + Real x = unif(mt); + Real a = -2; + Real b = 5; + for (auto _ : state) + { + benchmark::DoNotOptimize(unchecked_chebyshev_clenshaw_recurrence(v.data(), v.size(), a, b, x)); + } + state.SetComplexityN(state.range(0)); +} + + +BENCHMARK_TEMPLATE(TranslatedChebyshevClenshaw, double)->RangeMultiplier(2)->Range(1<<1, 1<<22)->Complexity(benchmark::oN); +BENCHMARK_TEMPLATE(ChebyshevClenshaw, double)->RangeMultiplier(2)->Range(1<<1, 1<<22)->Complexity(benchmark::oN); + + + +BENCHMARK_MAIN(); \ No newline at end of file diff --git a/test/Jamfile.v2 b/test/Jamfile.v2 index 1b35f7c39..d3f7e68c0 100644 --- a/test/Jamfile.v2 +++ b/test/Jamfile.v2 @@ -464,11 +464,11 @@ test-suite special_fun : [ run test_lambert_w_derivative.cpp ../../test/build//boost_unit_test_framework : : : BOOST_MATH_TEST_MULTIPRECISION [ check-target-builds ../config//has_float128 "GCC libquadmath and __float128 support" : BOOST_MATH_TEST_FLOAT128 "-Bstatic -lquadmath -Bdynamic" ] ] [ run test_legendre.cpp test_instances//test_instances pch_light ../../test/build//boost_unit_test_framework : : : [ check-target-builds ../config//has_float128 "GCC libquadmath and __float128 support" : "-Bstatic -lquadmath -Bdynamic" ] ] - [ run chebyshev_test.cpp : : : [ check-target-builds ../config//has_float128 "GCC libquadmath and __float128 support" : "-Bstatic -lquadmath -Bdynamic" ] ] - [ run chebyshev_transform_test.cpp ../config//fftw3f : : : TEST1 [ requires cxx11_auto_declarations cxx11_range_based_for ] [ check-target-builds ../config//has_fftw3 "libfftw3" : : no ] : chebyshev_transform_test_1 ] - [ run chebyshev_transform_test.cpp ../config//fftw3 : : : TEST2 [ requires cxx11_auto_declarations cxx11_range_based_for ] [ check-target-builds ../config//has_fftw3 "libfftw3" : : no ] : chebyshev_transform_test_2 ] - [ run chebyshev_transform_test.cpp ../config//fftw3l : : : TEST3 [ requires cxx11_auto_declarations cxx11_range_based_for ] [ check-target-builds ../config//has_fftw3 "libfftw3" : : no ] : chebyshev_transform_test_3 ] - [ run chebyshev_transform_test.cpp ../config//fftw3q ../config//quadmath : : : TEST4 [ requires cxx11_auto_declarations cxx11_range_based_for ] [ check-target-builds ../config//has_fftw3 "libfftw3" : : no ] [ check-target-builds ../config//has_float128 "__float128" : : no ] : chebyshev_transform_test_4 ] + [ run chebyshev_test.cpp : : : [ requires cxx11_inline_namespaces cxx11_unified_initialization_syntax cxx11_hdr_tuple cxx11_smart_ptr cxx11_defaulted_functions cxx11_auto_declarations cxx11_range_based_for cxx11_constexpr ] [ check-target-builds ../config//has_float128 "GCC libquadmath and __float128 support" : "-Bstatic -lquadmath -Bdynamic" ] ] + [ run chebyshev_transform_test.cpp ../config//fftw3f : : : TEST1 [ requires cxx11_smart_ptr cxx11_defaulted_functions cxx11_auto_declarations cxx11_range_based_for ] [ check-target-builds ../config//has_fftw3 "libfftw3" : : no ] : chebyshev_transform_test_1 ] + [ run chebyshev_transform_test.cpp ../config//fftw3 : : : TEST2 [ requires cxx11_smart_ptr cxx11_defaulted_functions cxx11_auto_declarations cxx11_range_based_for ] [ check-target-builds ../config//has_fftw3 "libfftw3" : : no ] : chebyshev_transform_test_2 ] + [ run chebyshev_transform_test.cpp ../config//fftw3l : : : TEST3 [ requires cxx11_smart_ptr cxx11_defaulted_functions cxx11_auto_declarations cxx11_range_based_for ] [ check-target-builds ../config//has_fftw3 "libfftw3" : : no ] : chebyshev_transform_test_3 ] + [ run chebyshev_transform_test.cpp ../config//fftw3q ../config//quadmath : : : TEST4 [ requires cxx11_smart_ptr cxx11_defaulted_functions cxx11_auto_declarations cxx11_range_based_for ] [ check-target-builds ../config//has_fftw3 "libfftw3" : : no ] [ check-target-builds ../config//has_float128 "__float128" : : no ] : chebyshev_transform_test_4 ] [ run cardinal_trigonometric_test.cpp ../config//fftw3f : : : TEST1 [ requires cxx11_auto_declarations cxx11_range_based_for ] [ check-target-builds ../config//has_fftw3 "libfftw3" : : no ] : cardinal_trigonometric_test_1 ] [ run cardinal_trigonometric_test.cpp ../config//fftw3 : : : TEST2 [ requires cxx11_auto_declarations cxx11_range_based_for ] [ check-target-builds ../config//has_fftw3 "libfftw3" : : no ] : cardinal_trigonometric_test_2 ] diff --git a/test/chebyshev_test.cpp b/test/chebyshev_test.cpp index b5dca51bc..90982d68a 100644 --- a/test/chebyshev_test.cpp +++ b/test/chebyshev_test.cpp @@ -5,20 +5,16 @@ * LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) */ -#define BOOST_TEST_MODULE chebyshev_test - +#include +#include #include -#include -#include #include #include #include -#include #include +#include "math_unit_test.hpp" using boost::multiprecision::cpp_bin_float_quad; -using boost::multiprecision::cpp_bin_float_50; -using boost::multiprecision::cpp_bin_float_100; using boost::math::chebyshev_t; using boost::math::chebyshev_t_prime; using boost::math::chebyshev_u; @@ -29,28 +25,25 @@ void test_polynomials() std::cout << "Testing explicit polynomial representations of the Chebyshev polynomials on type " << boost::typeindex::type_id().pretty_name() << "\n"; Real x = -2; - Real tol = 400*std::numeric_limits::epsilon(); - if (tol > std::numeric_limits::epsilon()) - tol *= 10; // float results have much larger error rates. + Real tol = 32*std::numeric_limits::epsilon(); while (x < 2) { - BOOST_CHECK_CLOSE_FRACTION(chebyshev_t(0, x), Real(1), tol); - BOOST_CHECK_CLOSE_FRACTION(chebyshev_t(1, x), x, tol); - BOOST_CHECK_CLOSE_FRACTION(chebyshev_t(2, x), 2*x*x - 1, tol); - BOOST_CHECK_CLOSE_FRACTION(chebyshev_t(3, x), x*(4*x*x-3), tol); - BOOST_CHECK_CLOSE_FRACTION(chebyshev_t(4, x), 8*x*x*(x*x - 1) + 1, tol); - BOOST_CHECK_CLOSE_FRACTION(chebyshev_t(5, x), x*(16*x*x*x*x - 20*x*x + 5), tol); + CHECK_MOLLIFIED_CLOSE(chebyshev_t(0, x), Real(1), tol); + CHECK_MOLLIFIED_CLOSE(chebyshev_t(1, x), x, tol); + CHECK_MOLLIFIED_CLOSE(chebyshev_t(2, x), 2*x*x - 1, tol); + CHECK_MOLLIFIED_CLOSE(chebyshev_t(3, x), x*(4*x*x-3), tol); + CHECK_MOLLIFIED_CLOSE(chebyshev_t(4, x), 8*x*x*(x*x - 1) + 1, tol); + CHECK_MOLLIFIED_CLOSE(chebyshev_t(5, x), x*(16*x*x*x*x - 20*x*x + 5), tol); x += 1/static_cast(1<<7); } x = -2; - tol = 10*tol; while (x < 2) { - BOOST_CHECK_CLOSE_FRACTION(chebyshev_u(0, x), Real(1), tol); - BOOST_CHECK_CLOSE_FRACTION(chebyshev_u(1, x), 2*x, tol); - BOOST_CHECK_CLOSE_FRACTION(chebyshev_u(2, x), 4*x*x - 1, tol); - BOOST_CHECK_CLOSE_FRACTION(chebyshev_u(3, x), 4*x*(2*x*x - 1), tol); + CHECK_MOLLIFIED_CLOSE(chebyshev_u(0, x), Real(1), tol); + CHECK_MOLLIFIED_CLOSE(chebyshev_u(1, x), 2*x, tol); + CHECK_MOLLIFIED_CLOSE(chebyshev_u(2, x), 4*x*x - 1, tol); + CHECK_MOLLIFIED_CLOSE(chebyshev_u(3, x), 4*x*(2*x*x - 1), tol); x += 1/static_cast(1<<7); } } @@ -62,14 +55,14 @@ void test_derivatives() std::cout << "Testing explicit polynomial representations of the Chebyshev polynomial derivatives on type " << boost::typeindex::type_id().pretty_name() << "\n"; Real x = -2; - Real tol = 1000*std::numeric_limits::epsilon(); + Real tol = 4*std::numeric_limits::epsilon(); while (x < 2) { - BOOST_CHECK_CLOSE_FRACTION(chebyshev_t_prime(0, x), Real(0), tol); - BOOST_CHECK_CLOSE_FRACTION(chebyshev_t_prime(1, x), Real(1), tol); - BOOST_CHECK_CLOSE_FRACTION(chebyshev_t_prime(2, x), 4*x, tol); - BOOST_CHECK_CLOSE_FRACTION(chebyshev_t_prime(3, x), 3*(4*x*x - 1), tol); - BOOST_CHECK_CLOSE_FRACTION(chebyshev_t_prime(4, x), 16*x*(2*x*x - 1), tol); + CHECK_MOLLIFIED_CLOSE(chebyshev_t_prime(0, x), Real(0), tol); + CHECK_MOLLIFIED_CLOSE(chebyshev_t_prime(1, x), Real(1), tol); + CHECK_MOLLIFIED_CLOSE(chebyshev_t_prime(2, x), 4*x, tol); + CHECK_MOLLIFIED_CLOSE(chebyshev_t_prime(3, x), 3*(4*x*x - 1), tol); + CHECK_MOLLIFIED_CLOSE(chebyshev_t_prime(4, x), 16*x*(2*x*x - 1), tol); // This one makes the tolerance have to grow too large; the Chebyshev recurrence is more stable than naive polynomial evaluation anyway. //BOOST_CHECK_CLOSE_FRACTION(chebyshev_t_prime(5, x), 5*(4*x*x*(4*x*x - 3) + 1), tol); x += 1/static_cast(1<<7); @@ -93,48 +86,93 @@ void test_clenshaw_recurrence() boost::array c6 = { {0, 0, 0, 0, 0, 0, 1} }; Real x = -1; - Real tol = 10*std::numeric_limits::epsilon(); - if (tol > std::numeric_limits::epsilon()) - tol *= 100; // float results have much larger error rates. + // It's not clear from this test which one is more accurate; higher precision cast testing is required, and is done elsewhere: + int ulps = 50; while (x <= 1) { Real y = chebyshev_clenshaw_recurrence(c0.data(), c0.size(), x); - BOOST_CHECK_CLOSE_FRACTION(y, chebyshev_t(0, x), tol); + Real expected = chebyshev_t(0, x); + CHECK_ULP_CLOSE(expected, y, ulps); + y = chebyshev_clenshaw_recurrence(c0.data(), c0.size(), Real(-1), Real(1), x); + CHECK_ULP_CLOSE(expected, y, ulps); y = chebyshev_clenshaw_recurrence(c01.data(), c01.size(), x); - BOOST_CHECK_CLOSE_FRACTION(y, chebyshev_t(0, x), tol); + CHECK_ULP_CLOSE(expected, y, ulps); + y = chebyshev_clenshaw_recurrence(c01.data(), c01.size(), Real(-1), Real(1), x); + CHECK_ULP_CLOSE(expected, y, ulps); y = chebyshev_clenshaw_recurrence(c02.data(), c02.size(), x); - BOOST_CHECK_CLOSE_FRACTION(y, chebyshev_t(0, x), tol); + CHECK_ULP_CLOSE(expected, y, ulps); + y = chebyshev_clenshaw_recurrence(c02.data(), c02.size(), Real(-1), Real(1), x); + CHECK_ULP_CLOSE(expected, y, ulps); + expected = chebyshev_t(1,x); y = chebyshev_clenshaw_recurrence(c1.data(), c1.size(), x); - BOOST_CHECK_CLOSE_FRACTION(y, chebyshev_t(1, x), tol); + CHECK_ULP_CLOSE(expected, y, ulps); + y = chebyshev_clenshaw_recurrence(c1.data(), c1.size(), Real(-1), Real(1), x); + CHECK_ULP_CLOSE(expected, y, ulps); + expected = chebyshev_t(2, x); y = chebyshev_clenshaw_recurrence(c2.data(), c2.size(), x); - BOOST_CHECK_CLOSE_FRACTION(y, chebyshev_t(2, x), tol); + CHECK_ULP_CLOSE(expected, y, ulps); + y = chebyshev_clenshaw_recurrence(c2.data(), c2.size(), Real(-1), Real(1), x); + CHECK_ULP_CLOSE(expected, y, ulps); + expected = chebyshev_t(3, x); y = chebyshev_clenshaw_recurrence(c3.data(), c3.size(), x); - BOOST_CHECK_CLOSE_FRACTION(y, chebyshev_t(3, x), tol); + CHECK_ULP_CLOSE(expected, y, ulps); + y = chebyshev_clenshaw_recurrence(c3.data(), c3.size(), Real(-1), Real(1), x); + CHECK_ULP_CLOSE(expected, y, ulps); + expected = chebyshev_t(4, x); y = chebyshev_clenshaw_recurrence(c4.data(), c4.size(), x); - BOOST_CHECK_CLOSE_FRACTION(y, chebyshev_t(4, x), tol); + CHECK_ULP_CLOSE(expected, y, ulps); + y = chebyshev_clenshaw_recurrence(c4.data(), c4.size(), Real(-1), Real(1), x); + CHECK_ULP_CLOSE(expected, y, ulps); + expected = chebyshev_t(5, x); y = chebyshev_clenshaw_recurrence(c5.data(), c5.size(), x); - BOOST_CHECK_CLOSE_FRACTION(y, chebyshev_t(5, x), tol); + CHECK_ULP_CLOSE(expected, y, ulps); + y = chebyshev_clenshaw_recurrence(c5.data(), c5.size(), Real(-1), Real(1), x); + CHECK_ULP_CLOSE(expected, y, ulps); + expected = chebyshev_t(6, x); y = chebyshev_clenshaw_recurrence(c6.data(), c6.size(), x); - BOOST_CHECK_CLOSE_FRACTION(y, chebyshev_t(6, x), tol); + CHECK_ULP_CLOSE(expected, y, ulps); + y = chebyshev_clenshaw_recurrence(c6.data(), c6.size(), Real(-1), Real(1), x); + CHECK_ULP_CLOSE(expected, y, ulps); x += static_cast(1)/static_cast(1 << 7); } } -BOOST_AUTO_TEST_CASE(chebyshev_test) +template +void test_translated_clenshaw_recurrence() { - test_clenshaw_recurrence(); - test_clenshaw_recurrence(); - test_clenshaw_recurrence(); + using boost::math::chebyshev_clenshaw_recurrence; + std::mt19937_64 mt(123242); + std::uniform_real_distribution dis(-1,1); + std::vector c(32); + for (auto & d : c) { + d = dis(mt); + } + int samples = 0; + while (samples++ < 250) { + Real x = dis(mt); + Real expected = chebyshev_clenshaw_recurrence(c.data(), c.size(), x); + // The computed value, in this case, should actually be better, since it has more information to use. + // In any case, this test doesn't show Reinch's modification to the Clenshaw recurrence is better; + // it shows they are doing roughly the same thing. + Real computed = chebyshev_clenshaw_recurrence(c.data(), c.size(), Real(-1), Real(1), x); + if (!CHECK_ULP_CLOSE(expected, computed, 1000)) { + std::cerr << " Problem occured at x = " << x << "\n"; + } + } +} + +int main() +{ test_polynomials(); test_polynomials(); test_polynomials(); @@ -144,4 +182,11 @@ BOOST_AUTO_TEST_CASE(chebyshev_test) test_derivatives(); test_derivatives(); test_derivatives(); + + test_clenshaw_recurrence(); + test_clenshaw_recurrence(); + test_clenshaw_recurrence(); + + test_translated_clenshaw_recurrence(); + return boost::math::test::report_errors(); } diff --git a/test/chebyshev_transform_test.cpp b/test/chebyshev_transform_test.cpp index fa6171be2..cae79cf6b 100644 --- a/test/chebyshev_transform_test.cpp +++ b/test/chebyshev_transform_test.cpp @@ -4,17 +4,11 @@ * Boost Software License, Version 1.0. (See accompanying file * LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) */ -#define BOOST_TEST_MODULE chebyshev_transform_test - -#include +#include "math_unit_test.hpp" #include -#include -#include #include #include #include -#include -#include #if !defined(TEST1) && !defined(TEST2) && !defined(TEST3) && !defined(TEST4) # define TEST1 @@ -23,9 +17,6 @@ # define TEST4 #endif -using boost::multiprecision::cpp_bin_float_quad; -using boost::multiprecision::cpp_bin_float_50; -using boost::multiprecision::cpp_bin_float_100; using boost::math::chebyshev_t; using boost::math::chebyshev_t_prime; using boost::math::chebyshev_u; @@ -41,8 +32,8 @@ void test_sin_chebyshev_transform() using std::cos; using std::abs; - Real tol = 10*std::numeric_limits::epsilon(); - auto f = [](Real x) { return sin(x); }; + Real tol = std::numeric_limits::epsilon(); + auto f = [](Real x)->Real { return sin(x); }; Real a = 0; Real b = 1; chebyshev_transform cheb(f, a, b, tol); @@ -52,29 +43,14 @@ void test_sin_chebyshev_transform() { Real s = sin(x); Real c = cos(x); - if (abs(s) < tol) - { - BOOST_CHECK_SMALL(cheb(x), 100*tol); - BOOST_CHECK_CLOSE_FRACTION(c, cheb.prime(x), 100*tol); - } - else - { - BOOST_CHECK_CLOSE_FRACTION(s, cheb(x), 100*tol); - if (abs(c) < tol) - { - BOOST_CHECK_SMALL(cheb.prime(x), 100*tol); - } - else - { - BOOST_CHECK_CLOSE_FRACTION(c, cheb.prime(x), 100*tol); - } - } + CHECK_ABSOLUTE_ERROR(s, cheb(x), tol); + CHECK_ABSOLUTE_ERROR(c, cheb.prime(x), 150*tol); x += static_cast(1)/static_cast(1 << 7); } Real Q = cheb.integrate(); - BOOST_CHECK_CLOSE_FRACTION(1 - cos(static_cast(1)), Q, 100*tol); + CHECK_ABSOLUTE_ERROR(1 - cos(static_cast(1)), Q, 100*tol); } @@ -88,7 +64,7 @@ void test_sinc_chebyshev_transform() using boost::math::chebyshev_transform; using boost::math::constants::half_pi; - Real tol = 500*std::numeric_limits::epsilon(); + Real tol = 100*std::numeric_limits::epsilon(); auto f = [](Real x) { return boost::math::sinc_pi(x); }; Real a = 0; Real b = 1; @@ -100,30 +76,16 @@ void test_sinc_chebyshev_transform() Real s = sinc_pi(x); Real ds = (cos(x)-sinc_pi(x))/x; if (x == 0) { ds = 0; } - if (s < tol) - { - BOOST_CHECK_SMALL(cheb(x), tol); - } - else - { - BOOST_CHECK_CLOSE_FRACTION(s, cheb(x), tol); - } - if (abs(ds) < tol) - { - BOOST_CHECK_SMALL(cheb.prime(x), 5 * tol); - } - else - { - BOOST_CHECK_CLOSE_FRACTION(ds, cheb.prime(x), 300*tol); - } + CHECK_ABSOLUTE_ERROR(s, cheb(x), tol); + CHECK_ABSOLUTE_ERROR(ds, cheb.prime(x), 10*tol); x += static_cast(1)/static_cast(1 << 7); } Real Q = cheb.integrate(); //NIntegrate[Sinc[x], {x, 0, 1}, WorkingPrecision -> 200, AccuracyGoal -> 150, PrecisionGoal -> 150, MaxRecursion -> 150] Real Q_exp = boost::lexical_cast("0.94608307036718301494135331382317965781233795473811179047145477356668"); - BOOST_CHECK_CLOSE_FRACTION(Q_exp, Q, tol); + CHECK_ABSOLUTE_ERROR(Q_exp, Q, tol); } @@ -133,6 +95,8 @@ template void test_atap_examples() { using std::sin; + using std::exp; + using std::sqrt; using boost::math::constants::half; using boost::math::sinc_pi; using boost::math::chebyshev_transform; @@ -144,33 +108,37 @@ void test_atap_examples() Real u = (0 < s) - (s < 0); return t + u; }; - auto f3 = [](Real x) { return sin(6*x) + sin(60*exp(x)); }; - - auto f4 = [](Real x) { return 1/(1+1000*(x+half())*(x+half())) + 1/sqrt(1+1000*(x-.5)*(x-0.5));}; + //auto f3 = [](Real x) { return sin(6*x) + sin(60*exp(x)); }; + //auto f4 = [](Real x) { return 1/(1+1000*(x+half())*(x+half())) + 1/sqrt(1+1000*(x-Real(1)/Real(2))*(x-Real(1)/Real(2)));}; Real a = -1; Real b = 1; - chebyshev_transform cheb1(f1, a, b); + chebyshev_transform cheb1(f1, a, b, tol); chebyshev_transform cheb2(f2, a, b, tol); //chebyshev_transform cheb3(f3, a, b, tol); Real x = a; while (x < b) { - //Real s = f1(x); - if (sizeof(Real) == sizeof(float)) + // f1 and f2 are not differentiable; standard convergence rate theorems don't apply. + // Basically, the max refinements are always hit; so the error is not related to the precision of the type. + Real acceptable_error = sqrt(tol); + Real acceptable_error_2 = 9e-4; + if (std::is_same::value) { - BOOST_CHECK_CLOSE_FRACTION(f1(x), cheb1(x), 4e-3); + acceptable_error = 1.6e-5; } - else + if (std::is_same::value) { - BOOST_CHECK_CLOSE_FRACTION(f1(x), cheb1(x), 1.3e-5); + acceptable_error *= 500; } - BOOST_CHECK_CLOSE_FRACTION(f2(x), cheb2(x), 6e-3); - //BOOST_CHECK_CLOSE_FRACTION(f3(x), cheb3(x), 100*tol); + CHECK_ABSOLUTE_ERROR(f1(x), cheb1(x), acceptable_error); + + CHECK_ABSOLUTE_ERROR(f2(x), cheb2(x), acceptable_error_2); x += static_cast(1)/static_cast(1 << 7); } } + //Validate that the Chebyshev polynomials are well approximated by the Chebyshev transform. template void test_chebyshev_chebyshev_transform() @@ -179,58 +147,44 @@ void test_chebyshev_chebyshev_transform() // T_0 = 1: auto t0 = [](Real) { return 1; }; chebyshev_transform cheb0(t0, -1, 1); - BOOST_CHECK_CLOSE_FRACTION(cheb0.coefficients()[0], 2, tol); + CHECK_ABSOLUTE_ERROR(2, cheb0.coefficients()[0], tol); Real x = -1; while (x < 1) { - BOOST_CHECK_CLOSE_FRACTION(cheb0(x), 1, tol); - BOOST_CHECK_SMALL(cheb0.prime(x), tol); + CHECK_ABSOLUTE_ERROR(1, cheb0(x), tol); + CHECK_ABSOLUTE_ERROR(Real(0), cheb0.prime(x), tol); x += static_cast(1)/static_cast(1 << 7); } // T_1 = x: auto t1 = [](Real x) { return x; }; chebyshev_transform cheb1(t1, -1, 1); - BOOST_CHECK_CLOSE_FRACTION(cheb1.coefficients()[1], 1, tol); + CHECK_ABSOLUTE_ERROR(Real(1), cheb1.coefficients()[1], tol); x = -1; while (x < 1) { - if (x == 0) - { - BOOST_CHECK_SMALL(cheb1(x), tol); - } - else - { - BOOST_CHECK_CLOSE_FRACTION(cheb1(x), x, tol); - } - BOOST_CHECK_CLOSE_FRACTION(cheb1.prime(x), 1, tol); + CHECK_ABSOLUTE_ERROR(x, cheb1(x), tol); + CHECK_ABSOLUTE_ERROR(Real(1), cheb1.prime(x), tol); x += static_cast(1)/static_cast(1 << 7); } auto t2 = [](Real x) { return 2*x*x-1; }; chebyshev_transform cheb2(t2, -1, 1); - BOOST_CHECK_CLOSE_FRACTION(cheb2.coefficients()[2], 1, tol); + CHECK_ABSOLUTE_ERROR(Real(1), cheb2.coefficients()[2], tol); x = -1; while (x < 1) { - BOOST_CHECK_CLOSE_FRACTION(cheb2(x), t2(x), tol); - if (x != 0) - { - BOOST_CHECK_CLOSE_FRACTION(cheb2.prime(x), 4*x, tol); - } - else - { - BOOST_CHECK_SMALL(cheb2.prime(x), tol); - } + CHECK_ABSOLUTE_ERROR(t2(x), cheb2(x), tol); + CHECK_ABSOLUTE_ERROR(4*x, cheb2.prime(x), tol); x += static_cast(1)/static_cast(1 << 7); } } -BOOST_AUTO_TEST_CASE(chebyshev_transform_test) +int main() { #ifdef TEST1 test_chebyshev_chebyshev_transform(); @@ -258,6 +212,8 @@ BOOST_AUTO_TEST_CASE(chebyshev_transform_test) test_sinc_chebyshev_transform<__float128>(); #endif #endif + + return boost::math::test::report_errors(); } diff --git a/test/math_unit_test.hpp b/test/math_unit_test.hpp index 7a2613784..2d2feedd0 100644 --- a/test/math_unit_test.hpp +++ b/test/math_unit_test.hpp @@ -236,7 +236,7 @@ bool check_conditioned_error(Real abscissa, PreciseReal expected1, PreciseReal e << " This exceeds the tolerance " << tol << "\n" << std::showpos << " Expected: " << std::defaultfloat << std::fixed << expected << " = " << std::scientific << expected << std::hexfloat << " = " << expected << "\n" - << " Computed: " << std::defaultfloat << std::fixed << computed << " = " << std::scientific << expected << std::hexfloat << " = " << computed << "\n" + << " Computed: " << std::defaultfloat << std::fixed << computed << " = " << std::scientific << computed << std::hexfloat << " = " << computed << "\n" << " Condition number of function evaluation: " << std::noshowpos << std::defaultfloat << std::scientific << cond << " = " << std::fixed << cond << "\n" << " Badness scale required to make this message go away: " << std::defaultfloat << relative_error/(cond*mu) << "\n"; std::cerr.flags(f); @@ -247,6 +247,53 @@ bool check_conditioned_error(Real abscissa, PreciseReal expected1, PreciseReal e } +template +bool check_absolute_error(PreciseReal expected1, Real computed, Real acceptable_error, std::string const & filename, std::string const & function, int line) +{ + using std::max; + using std::abs; + using std::isnan; + // Of course integers can be expected values, and they are exact: + if (!std::is_integral::value) { + BOOST_ASSERT_MSG(sizeof(PreciseReal) >= sizeof(Real), + "The expected number must be computed in higher (or equal) precision than the number being tested."); + BOOST_ASSERT_MSG(!isnan(expected1), "Expected value cannot be a nan."); + } + BOOST_ASSERT_MSG(acceptable_error > 0, "Error must be > 0."); + + if (isnan(computed)) + { + std::ios_base::fmtflags f(std::cerr.flags()); + std::cerr << std::setprecision(3); + std::cerr << "\033[0;31mError at " << filename << ":" << function << ":" << line << ":\n" + << " \033[0m Computed value is a nan\n"; + std::cerr.flags(f); + ++detail::global_error_count; + return false; + } + + Real expected = Real(expected1); + Real error = abs(expected - computed); + if (error > acceptable_error) + { + std::ios_base::fmtflags f( std::cerr.flags() ); + std::cerr << std::setprecision(3); + std::cerr << "\033[0;31mError at " << filename << ":" << function << ":" << line << "\n"; + std::cerr << std::setprecision(std::numeric_limits::max_digits10); + std::cerr << "\033[0m The absolute error in " << boost::core::demangle(typeid(Real).name()) << " precision is " << std::scientific << error << "\n" + << " This exceeds the acceptable error " << acceptable_error << "\n" + << std::showpos + << " Expected: " << std::defaultfloat << std::fixed << expected << " = " << std::scientific << expected << std::hexfloat << " = " << expected << "\n" + << " Computed: " << std::defaultfloat << std::fixed << computed << " = " << std::scientific << computed<< std::hexfloat << " = " << computed << "\n" + << " Error/Acceptable error: " << std::defaultfloat << error/acceptable_error << "\n"; + std::cerr.flags(f); + ++detail::global_error_count; + return false; + } + return true; +} + + int report_errors() { if (detail::global_error_count > 0) @@ -277,4 +324,7 @@ int report_errors() #define CHECK_LE(X, Y) boost::math::test::check_le((X), (Y), __FILE__, __func__, __LINE__) #define CHECK_CONDITIONED_ERROR(V, W, X, Y, Z) boost::math::test::check_conditioned_error((V), (W), (X), (Y), (Z), __FILE__, __func__, __LINE__) + +#define CHECK_ABSOLUTE_ERROR(X, Y, Z) boost::math::test::check_absolute_error((X), (Y), (Z), __FILE__, __func__, __LINE__) + #endif