From 6981dc12f937079dd6b5c74be5902b3b133b21ec Mon Sep 17 00:00:00 2001 From: pabristow Date: Fri, 9 Aug 2019 13:22:33 +0100 Subject: [PATCH] [CI SKIP]Editorial work using changes in math.css, part one. --- doc/distributions/arcsine.qbk | 8 +- doc/distributions/background.qbk | 6 +- doc/distributions/bernoulli.qbk | 5 +- doc/distributions/beta.qbk | 22 ++---- doc/distributions/binomial.qbk | 6 +- doc/distributions/binomial_example.qbk | 2 +- doc/distributions/chi_squared.qbk | 2 +- doc/distributions/dist_tutorial.qbk | 2 +- doc/distributions/exponential.qbk | 2 +- doc/distributions/extreme_value.qbk | 6 +- doc/distributions/f_dist_example.qbk | 6 +- doc/distributions/find_location_and_scale.qbk | 2 - doc/distributions/fisher.qbk | 28 +++---- doc/distributions/gamma.qbk | 4 +- doc/distributions/geometric.qbk | 13 ++-- doc/distributions/inverse_chi_squared.qbk | 12 +-- doc/distributions/inverse_gamma.qbk | 9 +-- doc/distributions/inverse_gaussian.qbk | 4 +- doc/distributions/laplace.qbk | 2 +- doc/distributions/logistic.qbk | 3 +- doc/distributions/lognormal.qbk | 2 +- doc/distributions/nc_beta.qbk | 9 ++- doc/distributions/nc_chi_squared.qbk | 14 ++-- doc/distributions/nc_f.qbk | 40 +++++----- doc/distributions/nc_t.qbk | 11 ++- doc/distributions/normal.qbk | 2 +- doc/distributions/pareto.qbk | 10 +-- doc/distributions/poisson.qbk | 3 +- doc/distributions/rayleigh.qbk | 8 +- doc/distributions/students_t.qbk | 17 ++--- doc/distributions/triangular.qbk | 6 +- doc/distributions/uniform.qbk | 10 +-- doc/distributions/weibull.qbk | 25 +++---- doc/html/index.html | 2 +- doc/html/indexes/s01.html | 9 +-- doc/html/indexes/s02.html | 6 +- doc/html/indexes/s03.html | 4 +- doc/html/indexes/s04.html | 2 +- doc/html/indexes/s05.html | 53 ++++++++------ doc/html/internals.html | 2 +- doc/html/math.css | 32 ++++++-- doc/html/math_toolkit/barycentric.html | 22 +++--- doc/html/math_toolkit/building.html | 22 +++--- .../math_toolkit/cardinal_quadratic_b.html | 4 +- doc/html/math_toolkit/catmull_rom.html | 11 +-- doc/html/math_toolkit/contact.html | 12 ++- doc/html/math_toolkit/conventions.html | 2 +- doc/html/math_toolkit/cubic_b.html | 11 +-- doc/html/math_toolkit/directories.html | 3 +- .../dist_ref/dists/arcine_dist.html | 21 +++--- .../dist_ref/dists/bernoulli_dist.html | 13 +++- .../dist_ref/dists/beta_dist.html | 24 +++--- .../dist_ref/dists/binomial_dist.html | 6 +- .../dist_ref/dists/extreme_dist.html | 4 +- .../math_toolkit/dist_ref/dists/f_dist.html | 42 +++++------ .../dist_ref/dists/gamma_dist.html | 2 +- .../dist_ref/dists/geometric_dist.html | 16 ++-- .../dists/inverse_chi_squared_dist.html | 36 ++++----- .../dist_ref/dists/inverse_gamma_dist.html | 14 ++-- .../dist_ref/dists/inverse_gaussian_dist.html | 13 ++-- .../dist_ref/dists/nc_beta_dist.html | 15 ++-- .../dist_ref/dists/nc_chi_squared_dist.html | 14 ++-- .../dist_ref/dists/nc_f_dist.html | 49 +++++++------ .../dist_ref/dists/nc_t_dist.html | 11 +-- .../dist_ref/dists/normal_dist.html | 2 +- .../math_toolkit/dist_ref/dists/pareto.html | 4 +- .../math_toolkit/dist_ref/dists/rayleigh.html | 8 +- .../dist_ref/dists/students_t_dist.html | 27 +++---- .../dist_ref/dists/triangular_dist.html | 36 +++++---- .../dist_ref/dists/uniform_dist.html | 18 +++-- .../dist_ref/dists/weibull_dist.html | 33 +++++---- .../double_exponential/de_caveats.html | 25 ++++--- .../double_exponential/de_exp_sinh.html | 7 +- .../double_exponential/de_overview.html | 10 +-- .../double_exponential/de_sinh_sinh.html | 2 +- .../double_exponential/de_tanh_sinh.html | 43 ++++++----- .../de_tanh_sinh_2_arg.html | 2 +- doc/html/math_toolkit/error_handling.html | 32 +++----- doc/html/math_toolkit/hints.html | 2 +- doc/html/math_toolkit/history1.html | 26 ++++--- doc/html/math_toolkit/history2.html | 26 ++++--- doc/html/math_toolkit/internals.html | 2 +- doc/html/math_toolkit/internals/cf.html | 2 +- .../math_toolkit/internals/error_test.html | 11 +-- doc/html/math_toolkit/internals/minimax.html | 9 ++- .../internals/series_evaluation.html | 5 +- .../math_toolkit/internals/test_data.html | 2 +- doc/html/math_toolkit/internals/tuples.html | 2 +- doc/html/math_toolkit/main_intro.html | 21 +++--- doc/html/math_toolkit/namespaces.html | 14 +++- doc/html/math_toolkit/navigation.html | 2 +- doc/html/math_toolkit/oct_todo.html | 2 +- doc/html/math_toolkit/oct_typedefs.html | 2 +- doc/html/math_toolkit/result_type.html | 33 ++++++++- .../stat_tut/overview/generic.html | 7 +- doc/html/math_toolkit/stat_tut/weg/f_eg.html | 10 +-- doc/html/math_toolkit/vector_barycentric.html | 2 +- doc/html/math_toolkit/whittaker_shannon.html | 11 ++- doc/internals/fraction.qbk | 2 +- doc/internals/minimax.qbk | 15 ++-- doc/internals/recurrence.qbk | 8 +- doc/internals/relative_error.qbk | 12 +-- doc/internals/series.qbk | 4 +- doc/internals/test_data.qbk | 5 +- .../barycentric_rational_interpolation.qbk | 16 ++-- .../cardinal_quadratic_b_spline.qbk | 10 +-- doc/interpolators/catmull_rom.qbk | 9 +-- doc/interpolators/cubic_b_spline.qbk | 16 ++-- .../vector_barycentric_rational.qbk | 2 +- doc/interpolators/whittaker_shannon.qbk | 13 ++-- doc/math.css | 32 ++++++-- doc/math.qbk | 14 +++- doc/octonion/math-octonion.qbk | 35 +++++---- doc/overview/building.qbk | 20 ++--- doc/overview/common_overviews.qbk | 9 ++- doc/overview/contact_info.qbk | 15 ++-- doc/overview/error_handling.qbk | 22 +++--- doc/overview/overview.qbk | 14 ++-- doc/overview/result_type_calc.qbk | 22 +++++- doc/overview/roadmap.qbk | 25 ++++--- doc/overview/structure.qbk | 9 ++- doc/overview/tr1.qbk | 1 - doc/performance/performance.qbk | 4 +- doc/policies/policy.qbk | 18 ++--- doc/policies/policy_tutorial.qbk | 5 +- doc/quadrature/double_exponential.qbk | 65 +++++++++-------- doc/sf/hypergeometric.qbk | 73 ++++++++++--------- example/barycentric_interpolation_example.cpp | 2 +- .../barycentric_interpolation_example_2.cpp | 5 +- 129 files changed, 945 insertions(+), 775 deletions(-) diff --git a/doc/distributions/arcsine.qbk b/doc/distributions/arcsine.qbk index 53e794bfa..85755e857 100644 --- a/doc/distributions/arcsine.qbk +++ b/doc/distributions/arcsine.qbk @@ -52,7 +52,7 @@ The [@http://en.wikipedia.org/wiki/Probability_density_function probability dens for the [@http://en.wikipedia.org/wiki/arcsine_distribution arcsine distribution] defined on the interval \[['x_min, x_max]\] is given by: -[figspace] [figspace] f(x; x_min, x_max) = 1 /([pi][sdot][sqrt]((x - x_min)[sdot](x_max - x_min)) +[expression f(x; x_min, x_max) = 1 /([pi][sdot][sqrt]((x - x_min)[sdot](x_max - x_min))] For example, __WolframAlpha arcsine distribution, from input of @@ -77,7 +77,7 @@ and some generalized examples with other ['x] ranges. The Cumulative Distribution Function CDF is defined as -[:F(x) = 2[sdot]arcsin([sqrt]((x-x_min)/(x_max - x))) / [pi]] +[expression F(x) = 2[sdot]arcsin([sqrt]((x-x_min)/(x_max - x))) / [pi]] [graph arcsine_cdf] @@ -137,7 +137,7 @@ and their appplication to solve stock market and other The random variate ['x] is constrained to ['x_min] and ['x_max], (for our 'standard' distribution, 0 and 1), and is usually some fraction. For any other ['x_min] and ['x_max] a fraction can be obtained from ['x] using -[sixemspace] fraction = (x - x_min) / (x_max - x_min) +[expression fraction = (x - x_min) / (x_max - x_min)] The simplest example is tossing heads and tails with a fair coin and modelling the risk of losing, or winning. Walkers (molecules, drunks...) moving left or right of a centre line are another common example. @@ -258,7 +258,7 @@ which was interpreted as and produced the resulting expression - x = -a sin^2((pi p)/2)+a+b sin^2((pi p)/2) +[expression x = -a sin^2((pi p)/2)+a+b sin^2((pi p)/2)] Thanks to Wolfram for providing this facility. diff --git a/doc/distributions/background.qbk b/doc/distributions/background.qbk index 09cb043bb..a3036cdb8 100644 --- a/doc/distributions/background.qbk +++ b/doc/distributions/background.qbk @@ -14,14 +14,14 @@ This means the probability density\/mass function (pdf) is written as ['f(k; n, Translating this into code the `binomial_distribution` constructor therefore has two parameters: - binomial_distribution(RealType n, RealType p); + binomial_distribution(RealType n, RealType p); While the function `pdf` has one argument specifying the distribution type (which includes its parameters, if any), and a second argument for the __random_variate. So taking our binomial distribution example, we would write: - pdf(binomial_distribution(n, p), k); + pdf(binomial_distribution(n, p), k); [endsect] @@ -72,7 +72,7 @@ describe how to change the rounding policy for these distributions. ] -[endsect] +[endsect] [/section:variates Random Variates and Distribution Parameters] [/ Copyright 2006 John Maddock and Paul A. Bristow. diff --git a/doc/distributions/bernoulli.qbk b/doc/distributions/bernoulli.qbk index ae9c577bf..f73208132 100644 --- a/doc/distributions/bernoulli.qbk +++ b/doc/distributions/bernoulli.qbk @@ -34,9 +34,10 @@ sequences of independent Bernoulli trials can be based. The Bernoulli is the binomial distribution (k = 1, p) with only one trial. [@http://en.wikipedia.org/wiki/Probability_density_function probability density function pdf] -f(0) = 1 - p, f(1) = p. +[expression f(0) = 1 - p, f(1) = p] + [@http://en.wikipedia.org/wiki/Cumulative_Distribution_Function Cumulative distribution function] -D(k) = if (k == 0) 1 - p else 1. +[expression D(k) = if (k == 0) 1 - p else 1] The following graph illustrates how the [@http://en.wikipedia.org/wiki/Probability_density_function probability density function pdf] diff --git a/doc/distributions/beta.qbk b/doc/distributions/beta.qbk index 149bef772..507642521 100644 --- a/doc/distributions/beta.qbk +++ b/doc/distributions/beta.qbk @@ -72,15 +72,15 @@ The [@http://en.wikipedia.org/wiki/Probability_density_function probability dens for the [@http://en.wikipedia.org/wiki/Beta_distribution beta distribution] defined on the interval \[0,1\] is given by: -f(x;[alpha],[beta]) = x[super[alpha] - 1] (1 - x)[super[beta] -1] / B([alpha], [beta]) +[expression f(x;[alpha],[beta]) = x[super[alpha] - 1] (1 - x)[super[beta] -1] / B([alpha], [beta])] -where B([alpha], [beta]) is the +where [role serif_italic B([alpha], [beta])] is the [@http://en.wikipedia.org/wiki/Beta_function beta function], implemented in this library as __beta. Division by the beta function ensures that the pdf is normalized to the range zero to unity. The following graph illustrates examples of the pdf for various values -of the shape parameters. Note the [alpha] = [beta] = 2 (blue line) +of the shape parameters. Note the ['[alpha] = [beta] = 2] (blue line) is dome-shaped, and might be approximated by a symmetrical triangular distribution. @@ -228,11 +228,9 @@ In the following table /a/ and /b/ are the parameters [alpha] and [beta], [table [[Function][Implementation Notes]] -[[pdf] - [f(x;[alpha],[beta]) = x[super[alpha] - 1] (1 - x)[super[beta] -1] / B([alpha], [beta]) +[[pdf][[role serif_italic f(x;[alpha],[beta]) = x[super[alpha] - 1] (1 - x)[super[beta] -1] / B([alpha], [beta])] Implemented using __ibeta_derivative(a, b, x).]] - [[cdf][Using the incomplete beta function __ibeta(a, b, x)]] [[cdf complement][__ibetac(a, b, x)]] [[quantile][Using the inverse incomplete beta function __ibeta_inv(a, b, p)]] @@ -244,12 +242,8 @@ In the following table /a/ and /b/ are the parameters [alpha] and [beta], [[kurtosis excess][ [equation beta_dist_kurtosis] ]] [[kurtosis][`kurtosis + 3`]] [[parameter estimation][ ]] -[[alpha - - from mean and variance][`mean * (( (mean * (1 - mean)) / variance)- 1)`]] -[[beta - - from mean and variance][`(1 - mean) * (((mean * (1 - mean)) /variance)-1)`]] +[[alpha (from mean and variance)][`mean * (( (mean * (1 - mean)) / variance)- 1)`]] +[[beta (from mean and variance)][`(1 - mean) * (((mean * (1 - mean)) /variance)-1)`]] [[The member functions `find_alpha` and `find_beta` from cdf and probability x @@ -260,7 +254,7 @@ In the following table /a/ and /b/ are the parameters [alpha] and [beta], __ibeta_inva, and __ibeta_invb respectively.]] [[`find_alpha`][`ibeta_inva(beta, x, probability)`]] [[`find_beta`][`ibeta_invb(alpha, x, probability)`]] -] +] [/table] [h4 References] @@ -270,7 +264,7 @@ __ibeta_inva, and __ibeta_invb respectively.]] [@http://mathworld.wolfram.com/BetaDistribution.html Wolfram MathWorld] -[endsect][/section:beta_dist beta] +[endsect] [/section:beta_dist beta] [/ beta.qbk Copyright 2006 John Maddock and Paul A. Bristow. diff --git a/doc/distributions/binomial.qbk b/doc/distributions/binomial.qbk index bb9c4bfd8..cb61db52e 100644 --- a/doc/distributions/binomial.qbk +++ b/doc/distributions/binomial.qbk @@ -130,8 +130,7 @@ best estimate for the success fraction is simply ['k/n], but if you want to be 95% sure that the true value is [*greater than] some value, ['p[sub min]], then: - p``[sub min]`` = binomial_distribution::find_lower_bound_on_p( - n, k, 0.05); + p``[sub min]`` = binomial_distribution::find_lower_bound_on_p(n, k, 0.05); [link math_toolkit.stat_tut.weg.binom_eg.binom_conf See worked example.] @@ -210,8 +209,7 @@ best estimate for the success fraction is simply ['k/n], but if you want to be 95% sure that the true value is [*less than] some value, ['p[sub max]], then: - p``[sub max]`` = binomial_distribution::find_upper_bound_on_p( - n, k, 0.05); + p``[sub max]`` = binomial_distribution::find_upper_bound_on_p(n, k, 0.05); [link math_toolkit.stat_tut.weg.binom_eg.binom_conf See worked example.] diff --git a/doc/distributions/binomial_example.qbk b/doc/distributions/binomial_example.qbk index 7f03e518b..ff96adffd 100644 --- a/doc/distributions/binomial_example.qbk +++ b/doc/distributions/binomial_example.qbk @@ -321,7 +321,7 @@ less than 1 in 1000 chance of observing a component failure [endsect] [/section:binom_size_eg Estimating Sample Sizes for a Binomial Distribution.] -[endsect][/section:binom_eg Binomial Distribution] +[endsect] [/section:binom_eg Binomial Distribution] [/ Copyright 2006 John Maddock and Paul A. Bristow. diff --git a/doc/distributions/chi_squared.qbk b/doc/distributions/chi_squared.qbk index 77237e04c..55c1a7fbd 100644 --- a/doc/distributions/chi_squared.qbk +++ b/doc/distributions/chi_squared.qbk @@ -150,7 +150,7 @@ In the following table /v/ is the number of degrees of freedom of the distributi * [@http://mathworld.wolfram.com/Chi-SquaredDistribution.html Weisstein, Eric W. "Chi-Squared Distribution." From MathWorld--A Wolfram Web Resource.] -[endsect][/section:chi_squared_dist Chi Squared] +[endsect] [/section:chi_squared_dist Chi Squared] [/ chi_squared.qbk Copyright 2006 John Maddock and Paul A. Bristow. diff --git a/doc/distributions/dist_tutorial.qbk b/doc/distributions/dist_tutorial.qbk index 14b3cf627..10037e581 100644 --- a/doc/distributions/dist_tutorial.qbk +++ b/doc/distributions/dist_tutorial.qbk @@ -171,7 +171,7 @@ by placing a semi-colon or vertical bar) to separate the variate from the parameter(s) that defines the shape of the distribution. For example, the binomial distribution probability distribution function (PDF) is written as -['f(k| n, p)] = Pr(K = k|n, p) = probability of observing k successes out of n trials. +[role serif_italic ['f(k| n, p)] = Pr(K = k|n, p) = ] probability of observing k successes out of n trials. K is the __random_variable, k is the __random_variate, the parameters are n (trials) and p (probability). ] [/tip Random Variates and Distribution Parameters] diff --git a/doc/distributions/exponential.qbk b/doc/distributions/exponential.qbk index af83a2970..e8b61de7b 100644 --- a/doc/distributions/exponential.qbk +++ b/doc/distributions/exponential.qbk @@ -97,7 +97,7 @@ In the following table [lambda] is the parameter lambda of the distribution, Samuel Kotz & Saralees Nadarajah] discuss the relationship of the types of extreme value distributions. -[endsect][/section:exp_dist Exponential] +[endsect] [/section:exp_dist Exponential] [/ exponential.qbk Copyright 2006 John Maddock and Paul A. Bristow. diff --git a/doc/distributions/extreme_value.qbk b/doc/distributions/extreme_value.qbk index 6658386f2..26b110a67 100644 --- a/doc/distributions/extreme_value.qbk +++ b/doc/distributions/extreme_value.qbk @@ -43,11 +43,11 @@ Samuel Kotz & Saralees Nadarajah]. The distribution has a PDF given by: -[:f(x) = (1/scale) e[super -(x-location)/scale] e[super -e[super -(x-location)/scale]]] +[expression f(x) = (1/scale) e[super -(x-location)/scale] e[super -e[super -(x-location)/scale]]] which in the standard case (scale = 1, location = 0) reduces to: -[:f(x) = e[super -x]e[super -e[super -x]]] +[expression f(x) = e[super -x]e[super -e[super -x]]] The following graph illustrates how the PDF varies with the location parameter: @@ -108,7 +108,7 @@ In the following table: [[kurtosis excess][kurtosis - 3 or 12 / 5]] ] -[endsect][/section:extreme_dist Extreme Value] +[endsect] [/section:extreme_dist Extreme Value] [/ extreme_value.qbk Copyright 2006 John Maddock and Paul A. Bristow. diff --git a/doc/distributions/f_dist_example.qbk b/doc/distributions/f_dist_example.qbk index 30e9cfe79..77d323c33 100644 --- a/doc/distributions/f_dist_example.qbk +++ b/doc/distributions/f_dist_example.qbk @@ -50,7 +50,7 @@ The procedure begins by printing out a summary of our input data: The test statistic for an F-test is simply the ratio of the square of the two standard deviations: -[:F = s[sub 1][super 2] / s[sub 2][super 2]] +[expression F = s[sub 1][super 2] / s[sub 2][super 2]] where s[sub 1] is the standard deviation of the first sample and s[sub 2] is the standard deviation of the second sample. Or in code: @@ -83,9 +83,9 @@ critical value of the F distribution with degrees of freedom N1-1 and N2-1. The upper and lower critical values can be computed using the quantile function: -[:F[sub (1-alpha; N1-1, N2-1)] = `quantile(fisher_f(N1-1, N2-1), alpha)`] +[expression F[sub (1-alpha; N1-1, N2-1)] = `quantile(fisher_f(N1-1, N2-1), alpha)`] -[:F[sub (alpha; N1-1, N2-1)] = `quantile(complement(fisher_f(N1-1, N2-1), alpha))`] +[expression F[sub (alpha; N1-1, N2-1)] = `quantile(complement(fisher_f(N1-1, N2-1), alpha))`] In our example program we need both upper and lower critical values for alpha and for alpha/2: diff --git a/doc/distributions/find_location_and_scale.qbk b/doc/distributions/find_location_and_scale.qbk index b722aec11..f994738ed 100644 --- a/doc/distributions/find_location_and_scale.qbk +++ b/doc/distributions/find_location_and_scale.qbk @@ -28,8 +28,6 @@ for full source code & appended program output. [endsect] [/section:find_eg Find Location and Scale Examples] - - [/ Copyright 2006 John Maddock and Paul A. Bristow. Distributed under the Boost Software License, Version 1.0. diff --git a/doc/distributions/fisher.qbk b/doc/distributions/fisher.qbk index bb175d35d..89b56774c 100644 --- a/doc/distributions/fisher.qbk +++ b/doc/distributions/fisher.qbk @@ -31,7 +31,7 @@ whether two samples have the same variance. If [chi][super 2][sub m][space] and [chi][super 2][sub n][space] are independent variates each distributed as Chi-Squared with /m/ and /n/ degrees of freedom, then the test statistic: -[:F[sub n,m][space] = ([chi][super 2][sub n][space] / n) / ([chi][super 2][sub m][space] / m)] +[expression F[sub n,m][space] = ([chi][super 2][sub n][space] / n) / ([chi][super 2][sub m][space] / m)] Is distributed over the range \[0, [infin]\] with an F distribution, and has the PDF: @@ -100,13 +100,13 @@ Direct differentiation of the CDF expressed in terms of the incomplete beta func led to the following two formulas: -[:f[sub v1,v2](x) = y * __ibeta_derivative(v2 \/ 2, v1 \/ 2, v2 \/ (v2 + v1 * x))] +[expression f[sub v1,v2](x) = y * __ibeta_derivative(v2 \/ 2, v1 \/ 2, v2 \/ (v2 + v1 * x))] with y = (v2 * v1) \/ ((v2 + v1 * x) * (v2 + v1 * x)) and -[:f[sub v1,v2](x) = y * __ibeta_derivative(v1 \/ 2, v2 \/ 2, v1 * x \/ (v2 + v1 * x))] +[expression f[sub v1,v2](x) = y * __ibeta_derivative(v1 \/ 2, v2 \/ 2, v1 * x \/ (v2 + v1 * x))] with y = (z * v1 - x * v1 * v1) \/ z[super 2] @@ -118,11 +118,11 @@ The aim is to keep the /x/ argument to __ibeta_derivative away from 1 to avoid rounding error. ]] [[cdf][Using the relations: -[:p = __ibeta(v1 \/ 2, v2 \/ 2, v1 * x \/ (v2 + v1 * x))] +[expression p = __ibeta(v1 \/ 2, v2 \/ 2, v1 * x \/ (v2 + v1 * x))] and -[:p = __ibetac(v2 \/ 2, v1 \/ 2, v2 \/ (v2 + v1 * x))] +[expression :p = __ibetac(v2 \/ 2, v1 \/ 2, v2 \/ (v2 + v1 * x))] The first is used for v1 * x > v2, otherwise the second is used. @@ -131,11 +131,11 @@ avoid rounding error. ]] [[cdf complement][Using the relations: -[:p = __ibetac(v1 \/ 2, v2 \/ 2, v1 * x \/ (v2 + v1 * x))] +[expression p = __ibetac(v1 \/ 2, v2 \/ 2, v1 * x \/ (v2 + v1 * x))] and -[:p = __ibeta(v2 \/ 2, v1 \/ 2, v2 \/ (v2 + v1 * x))] +[expression p = __ibeta(v2 \/ 2, v1 \/ 2, v2 \/ (v2 + v1 * x))] The first is used for v1 * x < v2, otherwise the second is used. @@ -143,15 +143,15 @@ The aim is to keep the /x/ argument to __ibeta well away from 1 to avoid rounding error. ]] [[quantile][Using the relation: -[:x = v2 * a \/ (v1 * b)] +[expression x = v2 * a \/ (v1 * b)] where: -[:a = __ibeta_inv(v1 \/ 2, v2 \/ 2, p)] +[expression a = __ibeta_inv(v1 \/ 2, v2 \/ 2, p)] and -[:b = 1 - a] +[expression b = 1 - a] Quantities /a/ and /b/ are both computed by __ibeta_inv without the subtraction implied above.]] @@ -159,15 +159,15 @@ subtraction implied above.]] from the complement][Using the relation: -[:x = v2 * a \/ (v1 * b)] +[expression x = v2 * a \/ (v1 * b)] where -[:a = __ibetac_inv(v1 \/ 2, v2 \/ 2, p)] +[expression a = __ibetac_inv(v1 \/ 2, v2 \/ 2, p)] and -[:b = 1 - a] +[expression b = 1 - a] Quantities /a/ and /b/ are both computed by __ibetac_inv without the subtraction implied above.]] @@ -180,7 +180,7 @@ subtraction implied above.]] Weisstein, Eric W. "F-Distribution." From MathWorld--A Wolfram Web Resource.] ]] ] -[endsect][/section:f_dist F distribution] +[endsect] [/section:f_dist F distribution] [/ fisher.qbk Copyright 2006 John Maddock and Paul A. Bristow. diff --git a/doc/distributions/gamma.qbk b/doc/distributions/gamma.qbk index 9d5faabd6..4a4227411 100644 --- a/doc/distributions/gamma.qbk +++ b/doc/distributions/gamma.qbk @@ -109,7 +109,7 @@ data for those functions for more information. [h4 Implementation] In the following table /k/ is the shape parameter of the distribution, -[theta][space] is its scale parameter, /x/ is the random variate, /p/ is the probability +[theta] is its scale parameter, /x/ is the random variate, /p/ is the probability and /q = 1-p/. [table @@ -127,7 +127,7 @@ and /q = 1-p/. [[kurtosis excess][6 / k ]] ] -[endsect][/section:gamma_dist Gamma (and Erlang) Distribution] +[endsect] [/section:gamma_dist Gamma (and Erlang) Distribution] [/ diff --git a/doc/distributions/geometric.qbk b/doc/distributions/geometric.qbk index 5129b4516..5294ee124 100644 --- a/doc/distributions/geometric.qbk +++ b/doc/distributions/geometric.qbk @@ -61,18 +61,17 @@ before the first success. (unlike another definition where the set of trials starts at one, sometimes named /shifted/).] The geometric distribution assumes that success_fraction /p/ is fixed for all /k/ trials. -The probability that there are /k/ failures before the first success is +The probability that there are /k/ failures before the first success -__spaces Pr(Y=/k/) = (1-/p/)[super /k/]/p/ +[expression Pr(Y=/k/) = (1-/p/)[super /k/]/p/] -For example, when throwing a 6-face dice the success probability /p/ = 1/6 = 0.1666[recur][space]. +For example, when throwing a 6-face dice the success probability /p/ = 1/6 = 0.1666[recur]. Throwing repeatedly until a /three/ appears, -the probability distribution of the number of times /not-a-three/ is thrown -is geometric. +the probability distribution of the number of times /not-a-three/ is thrown is geometric. Geometric distribution has the Probability Density Function PDF: -__spaces (1-/p/)[super /k/]/p/ +[expression (1-/p/)[super /k/]/p/] The following graph illustrates how the PDF and CDF vary for three examples of the success fraction /p/, @@ -339,7 +338,7 @@ the expected number of failures using the quantile. [[`find_maximum_number_of_trials`][See __negative_binomial_distrib]] ] -[endsect][/section:geometric_dist geometric] +[endsect] [/section:geometric_dist geometric] [/ geometric.qbk Copyright 2010 John Maddock and Paul A. Bristow. diff --git a/doc/distributions/inverse_chi_squared.qbk b/doc/distributions/inverse_chi_squared.qbk index 4c1f9f983..ba3ce2b99 100644 --- a/doc/distributions/inverse_chi_squared.qbk +++ b/doc/distributions/inverse_chi_squared.qbk @@ -38,7 +38,7 @@ A second version has an implicit scale = 1/degrees of freedom and gives the 1st The 2nd Wikipedia inverse chi_squared distribution definition can be implemented by explicitly specifying a scale = 1. -Both definitions are also available in Wolfram Mathematica and in __R (geoR) with default scale = 1/degrees of freedom. +Both definitions are also available in __Mathematica and in __R (geoR) with default scale = 1/degrees of freedom. See @@ -60,7 +60,7 @@ See also __inverse_gamma_distrib and __chi_squared_distrib. The inverse_chi_squared distribution is a special case of a inverse_gamma distribution with [nu] (degrees_of_freedom) shape ([alpha]) and scale ([beta]) where -__spaces [alpha]= [nu] /2 and [beta] = [frac12]. +[expression [alpha]= [nu] /2 and [beta] = [frac12]] [note This distribution *does* provide the typedef: @@ -75,20 +75,20 @@ or you can write `inverse_chi_squared my_invchisqr(2, 3);`] For degrees of freedom parameter [nu], the (*unscaled*) inverse chi_squared distribution is defined by the probability density function (PDF): -__spaces f(x;[nu]) = 2[super -[nu]/2] x[super -[nu]/2-1] e[super -1/2x] / [Gamma]([nu]/2) +[expression f(x;[nu]) = 2[super -[nu]/2] x[super -[nu]/2-1] e[super -1/2x] / [Gamma]([nu]/2)] and Cumulative Density Function (CDF) -__spaces F(x;[nu]) = [Gamma]([nu]/2, 1/2x) / [Gamma]([nu]/2) +[expression F(x;[nu]) = [Gamma]([nu]/2, 1/2x) / [Gamma]([nu]/2)] For degrees of freedom parameter [nu] and scale parameter [xi], the *scaled* inverse chi_squared distribution is defined by the probability density function (PDF): -__spaces f(x;[nu], [xi]) = ([xi][nu]/2)[super [nu]/2] e[super -[nu][xi]/2x] x[super -1-[nu]/2] / [Gamma]([nu]/2) +[expression f(x;[nu], [xi]) = ([xi][nu]/2)[super [nu]/2] e[super -[nu][xi]/2x] x[super -1-[nu]/2] / [Gamma]([nu]/2)] and Cumulative Density Function (CDF) -__spaces F(x;[nu], [xi]) = [Gamma]([nu]/2, [nu][xi]/2x) / [Gamma]([nu]/2) +[expression F(x;[nu], [xi]) = [Gamma]([nu]/2, [nu][xi]/2x) / [Gamma]([nu]/2)] The following graphs illustrate how the PDF and CDF of the inverse chi_squared distribution varies for a few values of parameters [nu] and [xi]: diff --git a/doc/distributions/inverse_gamma.qbk b/doc/distributions/inverse_gamma.qbk index 0f8afea4d..4a812efa6 100644 --- a/doc/distributions/inverse_gamma.qbk +++ b/doc/distributions/inverse_gamma.qbk @@ -33,7 +33,6 @@ See [@http://en.wikipedia.org/wiki/Inverse-gamma_distribution inverse gamma dist See also __gamma_distrib. - [note In spite of potential confusion with the inverse gamma function, this distribution *does* provide the typedef: @@ -49,11 +48,11 @@ or you can write `inverse_gamma my_ig(2, 3);`] For shape parameter [alpha] and scale parameter [beta], it is defined by the probability density function (PDF): -__spaces f(x;[alpha], [beta]) = [beta][super [alpha]] * (1/x) [super [alpha]+1] exp(-[beta]/x) / [Gamma]([alpha]) +[expression f(x;[alpha], [beta]) = [beta][super [alpha]] * (1/x) [super [alpha]+1] exp(-[beta]/x) / [Gamma]([alpha])] and cumulative density function (CDF) -__spaces F(x;[alpha], [beta]) = [Gamma]([alpha], [beta]/x) / [Gamma]([alpha]) +[expression F(x;[alpha], [beta]) = [Gamma]([alpha], [beta]/x) / [Gamma]([alpha])] The following graphs illustrate how the PDF and CDF of the inverse gamma distribution varies as the parameters vary: @@ -100,7 +99,7 @@ But in general, inverse_gamma results are accurate to a few epsilon, [h4 Implementation] In the following table [alpha] is the shape parameter of the distribution, -[alpha][space] is its scale parameter, /x/ is the random variate, /p/ is the probability +[alpha] is its scale parameter, /x/ is the random variate, /p/ is the probability and /q = 1-p/. [table @@ -118,7 +117,7 @@ and /q = 1-p/. [[kurtosis_excess][(30 * [alpha] - 66) / (([alpha]-3)*([alpha] - 4)) for [alpha] >4, else a __domain_error]] ] [/table] -[endsect][/section:inverse_gamma_dist Inverse Gamma Distribution] +[endsect] [/section:inverse_gamma_dist Inverse Gamma Distribution] [/ Copyright 2010 John Maddock and Paul A. Bristow. diff --git a/doc/distributions/inverse_gaussian.qbk b/doc/distributions/inverse_gaussian.qbk index 612801fe6..5f3accb98 100644 --- a/doc/distributions/inverse_gaussian.qbk +++ b/doc/distributions/inverse_gaussian.qbk @@ -67,11 +67,11 @@ For mean parameters [mu] and scale (also called precision) parameter [lambda], and random variate x, the inverse_gaussian distribution is defined by the probability density function (PDF): -__spaces f(x;[mu], [lambda]) = [sqrt]([lambda]/2[pi]x[super 3]) e[super -[lambda](x-[mu])[sup2]/2[mu][sup2]x] +[expression f(x;[mu], [lambda]) = [sqrt]([lambda]/2[pi]x[super 3]) e[super -[lambda](x-[mu])[sup2]/2[mu][sup2]x] ] and Cumulative Density Function (CDF): -__spaces F(x;[mu], [lambda]) = [Phi]{[sqrt]([lambda]/x) (x/[mu]-1)} + e[super 2[mu]/[lambda]] [Phi]{-[sqrt]([lambda]/[mu]) (1+x/[mu])} +[expression F(x;[mu], [lambda]) = [Phi]{[sqrt]([lambda]/x) (x/[mu]-1)} + e[super 2[mu]/[lambda]] [Phi]{-[sqrt]([lambda]/[mu]) (1+x/[mu])} ] where [Phi] is the standard normal distribution CDF. diff --git a/doc/distributions/laplace.qbk b/doc/distributions/laplace.qbk index e62cc21c7..2eb9f5898 100644 --- a/doc/distributions/laplace.qbk +++ b/doc/distributions/laplace.qbk @@ -130,7 +130,7 @@ q <=0.5: x = [mu] - [sigma]*log( 2*q ) * M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions, 1972, p. 930. -[endsect][/section:laplace_dist laplace] +[endsect] [/section:laplace_dist laplace] [/ Copyright 2008, 2009 John Maddock, Paul A. Bristow and M.A. (Thijs) van den Berg. diff --git a/doc/distributions/logistic.qbk b/doc/distributions/logistic.qbk index c2d04a8d8..d6d21a0ec 100644 --- a/doc/distributions/logistic.qbk +++ b/doc/distributions/logistic.qbk @@ -92,7 +92,8 @@ in such cases, only a low /absolute error/ can be guaranteed. [[variance][ ([pi]*s)[super 2] / 3]] ] -[endsect] +[endsect] [/section:logistic_dist Logistic Distribution] + [/ logistic.qbk Copyright 2006, 2007 John Maddock and Paul A. Bristow. diff --git a/doc/distributions/lognormal.qbk b/doc/distributions/lognormal.qbk index 5f19ed5ec..6e7604357 100644 --- a/doc/distributions/lognormal.qbk +++ b/doc/distributions/lognormal.qbk @@ -108,7 +108,7 @@ and /q = 1-p/. [[kurtosis excess][e[super 4s[super 2] ] + 2e[super 3s[super 2] ] + 3e[super 2s[super 2] ] - 6 ]] ] -[endsect][/section:normal_dist Normal] +[endsect] [/section:lognormal_dist Log Normal Distribution] [/ Copyright 2006 John Maddock and Paul A. Bristow. diff --git a/doc/distributions/nc_beta.qbk b/doc/distributions/nc_beta.qbk index d74b7167a..92008ff95 100644 --- a/doc/distributions/nc_beta.qbk +++ b/doc/distributions/nc_beta.qbk @@ -33,11 +33,12 @@ The noncentral beta distribution is a generalization of the __beta_distrib. It is defined as the ratio -X = [chi][sub m][super 2]([lambda]) \/ ([chi][sub m][super 2]([lambda]) -+ [chi][sub n][super 2]) -where [chi][sub m][super 2]([lambda]) is a noncentral [chi][super 2] +[expression X = [chi][sub m][super 2]([lambda]) \/ ([chi][sub m][super 2]([lambda]) ++ [chi][sub n][super 2])] +where [role serif_italic [chi][sub m][super 2]([lambda])] +is a noncentral [role serif_italic [chi][super 2]] random variable with /m/ degrees of freedom, and [chi][sub n][super 2] -is a central [chi][super 2] random variable with /n/ degrees of freedom. +is a central [role serif_italic [chi][super 2] ] random variable with /n/ degrees of freedom. This gives a PDF that can be expressed as a Poisson mixture of beta distribution PDFs: diff --git a/doc/distributions/nc_chi_squared.qbk b/doc/distributions/nc_chi_squared.qbk index a1c2a32dc..3b5d174f7 100644 --- a/doc/distributions/nc_chi_squared.qbk +++ b/doc/distributions/nc_chi_squared.qbk @@ -39,18 +39,18 @@ }} // namespaces The noncentral chi-squared distribution is a generalization of the -__chi_squared_distrib. If X[sub i] are [nu] independent, normally -distributed random variables with means [mu][sub i] and variances -[sigma][sub i][super 2], then the random variable +__chi_squared_distrib. If ['X[sub i]] are /[nu]/ independent, normally +distributed random variables with means /[mu][sub i]/ and variances +['[sigma][sub i][super 2]], then the random variable [equation nc_chi_squ_ref1] is distributed according to the noncentral chi-squared distribution. The noncentral chi-squared distribution has two parameters: -[nu] which specifies the number of degrees of freedom -(i.e. the number of X[sub i]), and [lambda] which is related to the -mean of the random variables X[sub i] by: +/[nu]/ which specifies the number of degrees of freedom +(i.e. the number of ['X[sub i])], and [lambda] which is related to the +mean of the random variables ['X[sub i]] by: [equation nc_chi_squ_ref2] @@ -205,7 +205,7 @@ This method uses the well known sum: [equation nc_chi_squ_ref5] -Where P[sub a](x) is the incomplete gamma function. +Where ['P[sub a](x)] is the incomplete gamma function. The method starts at the [lambda]th term, which is where the Poisson weighting function achieves its maximum value, although this is not necessarily diff --git a/doc/distributions/nc_f.qbk b/doc/distributions/nc_f.qbk index 535a5da45..6436c3433 100644 --- a/doc/distributions/nc_f.qbk +++ b/doc/distributions/nc_f.qbk @@ -33,7 +33,7 @@ The noncentral F distribution is a generalization of the __F_distrib. It is defined as the ratio - F = (X/v1) / (Y/v2) +[expression F = (X/v1) / (Y/v2)] where X is a noncentral [chi][super 2] random variable with /v1/ degrees of freedom and non-centrality parameter [lambda], @@ -43,7 +43,7 @@ This gives the following PDF: [equation nc_f_ref1] -where L[sub a][super b](c) is a generalised Laguerre polynomial and B(a,b) is the +where ['L[sub a][super b](c)] is a generalised Laguerre polynomial and ['B(a,b)] is the __beta function, or [equation nc_f_ref2] @@ -60,7 +60,7 @@ for different values of [lambda]: Constructs a non-central beta distribution with parameters /v1/ and /v2/ and non-centrality parameter /lambda/. -Requires v1 > 0, v2 > 0 and lambda >= 0, otherwise calls __domain_error. +Requires /v1/ > 0, /v2/ > 0 and lambda >= 0, otherwise calls __domain_error. RealType degrees_of_freedom1()const; @@ -104,43 +104,43 @@ is the non-centrality parameter, [[Function][Implementation Notes]] [[pdf][Implemented in terms of the non-central beta PDF using the relation: -f(x;v1,v2;[lambda]) = (v1\/v2) / ((1+y)*(1+y)) * g(y\/(1+y);v1\/2,v2\/2;[lambda]) +[role serif_italic f(x;v1,v2;[lambda]) = (v1\/v2) / ((1+y)*(1+y)) * g(y\/(1+y);v1\/2,v2\/2;[lambda])] -where g(x; a, b; [lambda]) is the non central beta PDF, and: - -y = x * v1 \/ v2 +where [role serif_italic g(x; a, b; [lambda])] is the non central beta PDF, and: + +[role serif_italic y = x * v1 \/ v2] ]] [[cdf][Using the relation: -p = B[sub y](v1\/2, v2\/2; [lambda]) +[role serif_italic p = B[sub y](v1\/2, v2\/2; [lambda])] -where B[sub x](a, b; [lambda]) is the noncentral beta distribution CDF and +where [role serif_italic B[sub x](a, b; [lambda])] is the noncentral beta distribution CDF and -y = x * v1 \/ v2 +[role serif_italic y = x * v1 \/ v2] ]] [[cdf complement][Using the relation: -q = 1 - B[sub y](v1\/2, v2\/2; [lambda]) +[role serif_italic q = 1 - B[sub y](v1\/2, v2\/2; [lambda])] -where 1 - B[sub x](a, b; [lambda]) is the complement of the +where [role serif_italic 1 - B[sub x](a, b; [lambda])] is the complement of the noncentral beta distribution CDF and -y = x * v1 \/ v2 +[role serif_italic y = x * v1 \/ v2] ]] [[quantile][Using the relation: -x = (bx \/ (1-bx)) * (v1 \/ v2) +[role serif_italic x = (bx \/ (1-bx)) * (v1 \/ v2)] where -bx = Q[sub p][super -1](v1\/2, v2\/2; [lambda]) +[role serif_italic bx = Q[sub p][super -1](v1\/2, v2\/2; [lambda])] and -Q[sub p][super -1](v1\/2, v2\/2; [lambda]) +[role serif_italic Q[sub p][super -1](v1\/2, v2\/2; [lambda])] is the noncentral beta quantile. @@ -150,18 +150,18 @@ is the noncentral beta quantile. from the complement][ Using the relation: -x = (bx \/ (1-bx)) * (v1 \/ v2) +[role serif_italic x = (bx \/ (1-bx)) * (v1 \/ v2)] where -bx = QC[sub q][super -1](v1\/2, v2\/2; [lambda]) +[role serif_italic bx = QC[sub q][super -1](v1\/2, v2\/2; [lambda])] and -QC[sub q][super -1](v1\/2, v2\/2; [lambda]) +[role serif_italic QC[sub q][super -1](v1\/2, v2\/2; [lambda])] is the noncentral beta quantile from the complement.]] -[[mean][v2 * (v1 + l) \/ (v1 * (v2 - 2))]] +[[mean][[role serif_italic v2 * (v1 + l) \/ (v1 * (v2 - 2))]]] [[mode][By numeric maximalisation of the PDF.]] [[variance][Refer to, [@http://mathworld.wolfram.com/NoncentralF-Distribution.html Weisstein, Eric W. "Noncentral F-Distribution." From MathWorld--A Wolfram Web Resource.] ]] diff --git a/doc/distributions/nc_t.qbk b/doc/distributions/nc_t.qbk index 5318d1a3d..16e84f94d 100644 --- a/doc/distributions/nc_t.qbk +++ b/doc/distributions/nc_t.qbk @@ -31,18 +31,17 @@ The noncentral T distribution is a generalization of the __students_t_distrib. Let X have a normal distribution with mean [delta] and variance 1, and let -[nu] S[super 2] have +['[nu] S[super 2]] have a chi-squared distribution with degrees of freedom [nu]. Assume that -X and S[super 2] are independent. The -distribution of t[sub [nu]]([delta])=X/S is called a -noncentral t distribution with degrees of freedom [nu] and noncentrality -parameter [delta]. +X and S[super 2] are independent. +The distribution of [role serif_italic t[sub [nu]]([delta])=X/S] is called a +noncentral t distribution with degrees of freedom [nu] and noncentrality parameter [delta]. This gives the following PDF: [equation nc_t_ref1] -where [sub 1]F[sub 1](a;b;x) is a confluent hypergeometric function. +where [role serif_italic [sub 1]F[sub 1](a;b;x)] is a confluent hypergeometric function. The following graph illustrates how the distribution changes for different values of [nu] and [delta]: diff --git a/doc/distributions/normal.qbk b/doc/distributions/normal.qbk index 9dd435816..a04db2024 100644 --- a/doc/distributions/normal.qbk +++ b/doc/distributions/normal.qbk @@ -33,7 +33,7 @@ distribution: it is also known as the Gaussian Distribution. A normal distribution with mean zero and standard deviation one is known as the ['Standard Normal Distribution]. -Given mean [mu][space]and standard deviation [sigma] it has the PDF: +Given mean [mu] and standard deviation [sigma] it has the PDF: [equation normal_ref1] diff --git a/doc/distributions/pareto.qbk b/doc/distributions/pareto.qbk index 57535a861..d6537f577 100644 --- a/doc/distributions/pareto.qbk +++ b/doc/distributions/pareto.qbk @@ -29,10 +29,10 @@ The [@http://en.wikipedia.org/wiki/pareto_distribution Pareto distribution] is a continuous distribution with the [@http://en.wikipedia.org/wiki/Probability_density_function probability density function (pdf)]: -[:f(x; [alpha], [beta]) = [alpha][beta][super [alpha]] / x[super [alpha]+ 1]] +[expression f(x; [alpha], [beta]) = [alpha][beta][super [alpha]] / x[super [alpha]+ 1]] -For shape parameter [alpha][space] > 0, and scale parameter [beta][space] > 0. -If x < [beta][space], the pdf is zero. +For shape parameter [alpha] > 0, and scale parameter [beta] > 0. +If x < [beta], the pdf is zero. The [@http://mathworld.wolfram.com/ParetoDistribution.html Pareto distribution] often describes the larger compared to the smaller. @@ -46,10 +46,8 @@ And this graph illustrates how the PDF varies with the shape parameter [alpha]: [graph pareto_pdf2] - [h4 Related distributions] - [h4 Member Functions] pareto_distribution(RealType scale = 1, RealType shape = 1); @@ -110,7 +108,7 @@ and its complement /q = 1-p/. * Handbook of Statistical Distributions with Applications, K Krishnamoorthy, ISBN 1-58488-635-8, Chapter 23, pp 257 - 267. (Note the meaning of a and b is reversed in Wolfram and Krishnamoorthy). -[endsect][/section:pareto pareto] +[endsect] [/section:pareto pareto] [/ Copyright 2006, 2009 John Maddock and Paul A. Bristow. diff --git a/doc/distributions/poisson.qbk b/doc/distributions/poisson.qbk index 432862ede..018cebc31 100644 --- a/doc/distributions/poisson.qbk +++ b/doc/distributions/poisson.qbk @@ -92,6 +92,8 @@ In the following table [lambda][space] is the mean of the distribution, [[kurtosis excess][1/[lambda]]] ] +[endsect] [/section:poisson_dist Poisson] + [/ poisson.qbk Copyright 2006 John Maddock and Paul A. Bristow. Distributed under the Boost Software License, Version 1.0. @@ -99,5 +101,4 @@ In the following table [lambda][space] is the mean of the distribution, http://www.boost.org/LICENSE_1_0.txt). ] -[endsect][/section:poisson_dist Poisson] diff --git a/doc/distributions/rayleigh.qbk b/doc/distributions/rayleigh.qbk index a0a275507..756dceb6c 100644 --- a/doc/distributions/rayleigh.qbk +++ b/doc/distributions/rayleigh.qbk @@ -29,9 +29,9 @@ The [@http://en.wikipedia.org/wiki/Rayleigh_distribution Rayleigh distribution] is a continuous distribution with the [@http://en.wikipedia.org/wiki/Probability_density_function probability density function]: -[:f(x; sigma) = x * exp(-x[super 2]/2 [sigma][super 2]) / [sigma][super 2]] +[expression f(x; sigma) = x * exp(-x[super 2]/2 [sigma][super 2]) / [sigma][super 2]] -For sigma parameter [sigma][space] > 0, and x > 0. +For sigma parameter /[sigma]/ > 0, and /x/ > 0. The Rayleigh distribution is often used where two orthogonal components have an absolute value, @@ -86,13 +86,13 @@ NTL RR type with 150-bit accuracy, about 50 decimal digits. [h4 Implementation] -In the following table [sigma][space] is the sigma parameter of the distribution, +In the following table [sigma] is the sigma parameter of the distribution, /x/ is the random variate, /p/ is the probability and /q = 1-p/. [table [[Function][Implementation Notes]] [[pdf][Using the relation: pdf = x * exp(-x[super 2])/2 [sigma][super 2] ]] -[[cdf][Using the relation: p = 1 - exp(-x[super 2]/2) [sigma][super 2][space] = -__expm1(-x[super 2]/2) [sigma][super 2]]] +[[cdf][Using the relation: p = 1 - exp(-x[super 2]/2) [sigma][super 2]= -__expm1(-x[super 2]/2) [sigma][super 2]]] [[cdf complement][Using the relation: q = exp(-x[super 2]/ 2) * [sigma][super 2] ]] [[quantile][Using the relation: x = sqrt(-2 * [sigma] [super 2]) * log(1 - p)) = sqrt(-2 * [sigma] [super 2]) * __log1p(-p))]] [[quantile from the complement][Using the relation: x = sqrt(-2 * [sigma] [super 2]) * log(q)) ]] diff --git a/doc/distributions/students_t.qbk b/doc/distributions/students_t.qbk index d9146e528..9701ce9fc 100644 --- a/doc/distributions/students_t.qbk +++ b/doc/distributions/students_t.qbk @@ -42,12 +42,11 @@ Given N independent measurements, let [equation students_t_dist] -where /M/ is the population mean, [' ''' μ '''] is the sample mean, and /s/ is the -sample variance. +where /M/ is the population mean, [mu] is the sample mean, and /s/ is the sample variance. [@https://en.wikipedia.org/wiki/Student%27s_t-distribution Student's t-distribution] is defined as the distribution of the random -variable t which is - very loosely - the "best" that we can do not +variable t which is - very loosely - the "best" that we can do while not knowing the true standard deviation of the sample. It has the PDF: [equation students_t_ref1] @@ -133,12 +132,12 @@ In the following table /v/ is the degrees of freedom of the distribution, [table [[Function][Implementation Notes]] -[[pdf][Using the relation: pdf = (v \/ (v + t[super 2]))[super (1+v)\/2 ] / (sqrt(v) * __beta(v\/2, 0.5)) ]] +[[pdf][Using the relation: [role serif_italic pdf = (v \/ (v + t[super 2]))[super (1+v)\/2 ] / (sqrt(v) * __beta(v\/2, 0.5))] ]] [[cdf][Using the relations: -p = 1 - z /iff t > 0/ +[role serif_italic p = 1 - z /iff t > 0/] -p = z /otherwise/ +[role serif_italic p = z /otherwise/] where z is given by: @@ -146,13 +145,13 @@ __ibeta(v \/ 2, 0.5, v \/ (v + t[super 2])) \/ 2 ['iff v < 2t[super 2]] __ibetac(0.5, v \/ 2, t[super 2 ] / (v + t[super 2]) \/ 2 /otherwise/]] [[cdf complement][Using the relation: q = cdf(-t) ]] -[[quantile][Using the relation: t = sign(p - 0.5) * sqrt(v * y \/ x) +[[quantile][Using the relation: [role serif_italic t = sign(p - 0.5) * sqrt(v * y \/ x)] where: -x = __ibeta_inv(v \/ 2, 0.5, 2 * min(p, q)) +[role serif_italic x = __ibeta_inv(v \/ 2, 0.5, 2 * min(p, q)) ] -y = 1 - x +[role serif_italic y = 1 - x] The quantities /x/ and /y/ are both returned by __ibeta_inv without the subtraction implied above.]] diff --git a/doc/distributions/triangular.qbk b/doc/distributions/triangular.qbk index 48daa081d..9f98668eb 100644 --- a/doc/distributions/triangular.qbk +++ b/doc/distributions/triangular.qbk @@ -45,9 +45,9 @@ The [@http://en.wikipedia.org/wiki/Triangular_distribution triangular distributi is a distribution with the [@http://en.wikipedia.org/wiki/Probability_density_function probability density function]: -[:f(x) =] -[:[:2(x-a)/(b-a) (c-a) [:for a <= x <= c]]] -[:[:2(b-x)/(b-a) (b-c) [:for c < x <= b]]] +[expression f(x) =] +[expression[:2(x-a)/(b-a) (c-a) [:for a <= x <= c]]] +[expression[:2(b-x)/(b-a) (b-c) [:for c < x <= b]]] Parameter ['a] (lower) can be any finite value. Parameter ['b] (upper) can be any finite value > a (lower). diff --git a/doc/distributions/uniform.qbk b/doc/distributions/uniform.qbk index 673b02720..88847c171 100644 --- a/doc/distributions/uniform.qbk +++ b/doc/distributions/uniform.qbk @@ -32,13 +32,13 @@ The [@http://en.wikipedia.org/wiki/Uniform_distribution_%28continuous%29 continu is a distribution with the [@http://en.wikipedia.org/wiki/Probability_density_function probability density function]: -[:f(x) =] -[:[: 1 / (upper - lower) for lower < x < upper]] -[:[:zero for x < lower or x > upper]] +[expression f(x) =] +[expression 1 / (upper - lower) for lower < x < upper] +[expression zero for x < lower or x > upper] and in this implementation: -[:1 / (upper - lower) for x = lower or x = upper] +[expression 1 / (upper - lower) for x = lower or x = upper] The choice of x = lower or x = upper is made because statistical use of this distribution judged is most likely: the method of maximum likelihood uses this definition. @@ -121,7 +121,7 @@ b is the /upper/ parameter, * [@http://mathworld.wolfram.com/UniformDistribution.html Weisstein, Weisstein, Eric W. "Uniform Distribution." From MathWorld--A Wolfram Web Resource.] * [@http://www.itl.nist.gov/div898/handbook/eda/section3/eda3662.htm] -[endsect][/section:uniform_dist Uniform] +[endsect] [/section:uniform_dist Uniform] [/ Copyright 2006 John Maddock and Paul A. Bristow. diff --git a/doc/distributions/weibull.qbk b/doc/distributions/weibull.qbk index 6e038cc8a..205e056be 100644 --- a/doc/distributions/weibull.qbk +++ b/doc/distributions/weibull.qbk @@ -1,6 +1,5 @@ [section:weibull_dist Weibull Distribution] - ``#include `` namespace boost{ namespace math{ @@ -31,32 +30,32 @@ is a continuous distribution with the [@http://en.wikipedia.org/wiki/Probability_density_function probability density function]: -[:f(x; [alpha], [beta]) = ([alpha]\/[beta]) * (x \/ [beta])[super [alpha] - 1] * e[super -(x\/[beta])[super [alpha]]]] +[expression f(x; [alpha], [beta]) = ([alpha]\/[beta]) * (x \/ [beta])[super [alpha] - 1] * e[super -(x\/[beta])[super [alpha]]]] -For shape parameter [alpha] > 0, and scale parameter [beta] > 0, and x > 0. +For shape parameter ['[alpha]] > 0, and scale parameter ['[beta]] > 0, and /x/ > 0. The Weibull distribution is often used in the field of failure analysis; in particular it can mimic distributions where the failure rate varies over time. If the failure rate is: -* constant over time, then [alpha][space] = 1, suggests that items are failing from random events. -* decreases over time, then [alpha][space] < 1, suggesting "infant mortality". -* increases over time, then [alpha][space] > 1, suggesting "wear out" - more likely to fail as time goes by. +* constant over time, then ['[alpha]] = 1, suggests that items are failing from random events. +* decreases over time, then ['[alpha]] < 1, suggesting "infant mortality". +* increases over time, then ['[alpha]] > 1, suggesting "wear out" - more likely to fail as time goes by. -The following graph illustrates how the PDF varies with the shape parameter [alpha]: +The following graph illustrates how the PDF varies with the shape parameter ['[alpha]]: [graph weibull_pdf1] -While this graph illustrates how the PDF varies with the scale parameter [beta]: +While this graph illustrates how the PDF varies with the scale parameter ['[beta]]: [graph weibull_pdf2] [h4 Related distributions] -When [alpha][space] = 3, the +When ['[alpha]] = 3, the [@http://en.wikipedia.org/wiki/Weibull_distribution Weibull distribution] appears similar to the [@http://en.wikipedia.org/wiki/Normal_distribution normal distribution]. -When [alpha][space] = 1, the Weibull distribution reduces to the +When ['[alpha]] = 1, the Weibull distribution reduces to the [@http://en.wikipedia.org/wiki/Exponential_distribution exponential distribution]. The relationship of the types of extreme value distributions, of which the Weibull is but one, is discussed by @@ -98,8 +97,8 @@ and as such should have very low error rates. [h4 Implementation] -In the following table [alpha][space] is the shape parameter of the distribution, -[beta][space] is its scale parameter, /x/ is the random variate, /p/ is the probability +In the following table ['[alpha]] is the shape parameter of the distribution, +['[beta]] is its scale parameter, /x/ is the random variate, /p/ is the probability and /q = 1-p/. [table @@ -122,7 +121,7 @@ and /q = 1-p/. * [@http://mathworld.wolfram.com/WeibullDistribution.html Weisstein, Eric W. "Weibull Distribution." From MathWorld--A Wolfram Web Resource.] * [@http://www.itl.nist.gov/div898/handbook/eda/section3/eda3668.htm Weibull in NIST Exploratory Data Analysis] -[endsect][/section:weibull Weibull] +[endsect] [/section:weibull Weibull] [/ Copyright 2006 John Maddock and Paul A. Bristow. diff --git a/doc/html/index.html b/doc/html/index.html index 8079aea57..2900f4564 100644 --- a/doc/html/index.html +++ b/doc/html/index.html @@ -126,7 +126,7 @@ This manual is also available in -

Last revised: August 07, 2019 at 14:09:37 GMT

+

Last revised: August 09, 2019 at 11:49:38 GMT


diff --git a/doc/html/indexes/s01.html b/doc/html/indexes/s01.html index 5cb00af04..73beb0512 100644 --- a/doc/html/indexes/s01.html +++ b/doc/html/indexes/s01.html @@ -24,7 +24,7 @@

-Function Index

+Function Index

1 2 4 A B C D E F G H I J K L M N O P Q R S T U V W X Y Z

@@ -150,11 +150,11 @@
  • apply_recurrence_relation_backward

    - +
  • apply_recurrence_relation_forward

    - +
  • area

    @@ -835,7 +835,6 @@
  • D

    @@ -3164,7 +3163,6 @@

    support

  • @@ -3467,7 +3465,6 @@
  • variance

    -Class Index

    +Class Index
  • A B C D E F G H I L M N O P Q R S T U V W

    @@ -37,7 +37,7 @@

    -Typedef Index

    +Typedef Index

    A B C D E F G H I L N O P R S T U V W

    @@ -423,7 +423,7 @@
  • Template Class octonion

  • Template Class quaternion

  • Testing

  • -
  • Tools For 3 Term Recurrence Relations

  • +
  • Tools For 3-Term Recurrence Relations

  • Triangular Distribution

  • Uniform Distribution

  • Weibull Distribution

  • diff --git a/doc/html/indexes/s04.html b/doc/html/indexes/s04.html index fd8009ea9..a3da54ae5 100644 --- a/doc/html/indexes/s04.html +++ b/doc/html/indexes/s04.html @@ -24,7 +24,7 @@

    -Macro Index

    +Macro Index

    B F

    diff --git a/doc/html/indexes/s05.html b/doc/html/indexes/s05.html index 47cb2a796..253310760 100644 --- a/doc/html/indexes/s05.html +++ b/doc/html/indexes/s05.html @@ -23,7 +23,7 @@

    -Index

    +Index

    1 2 4 5 7 A B C D E F G H I J K L M N O P Q R S T U V W X Y Z

    @@ -434,11 +434,11 @@
  • apply_recurrence_relation_backward

    - +
  • apply_recurrence_relation_forward

    - +
  • arcsine

    @@ -618,7 +618,7 @@
  • @@ -1195,6 +1193,7 @@
  • Additional Implementation Notes

  • Boost.Math Frequently Asked Questions (FAQs)

  • Boost.Math Macros

  • +
  • Contact Info and Support

  • Credits and Acknowledgements

  • Error Handling Policies

  • Error Logs For Error Rate Tables

  • @@ -1657,7 +1656,11 @@
  • Calculation of the Type of the Result

    - +
  • called

    @@ -2293,6 +2296,14 @@
  • +

    Contact Info and Support

    +
    +
  • +
  • Continued Fraction Evaluation

  • @@ -4304,7 +4314,7 @@
  • forward_recurrence_iterator

    - +
  • Fourier Integrals

    @@ -4601,6 +4611,7 @@

    GIT

  • -

    Tools For 3 Term Recurrence Relations

    +

    Tools For 3-Term Recurrence Relations

  • @@ -8575,6 +8586,7 @@

    Trac

    - The main support for this library is via the Boost mailing lists: + The main place to see and raise issues is now at GIThub. + Currently open bug reports can be viewed here. +

    +

    + All old bug reports, including closed ones, can be viewed on Trac (now read-only) + here + and more recent issues on GIThub here. +

    +

    + The other places for discussion about this library are via the Boost mailing + lists:

    • diff --git a/doc/html/math_toolkit/conventions.html b/doc/html/math_toolkit/conventions.html index 6b1554e9b..2628f7a95 100644 --- a/doc/html/math_toolkit/conventions.html +++ b/doc/html/math_toolkit/conventions.html @@ -27,7 +27,7 @@ Document Conventions

    - +

    This documentation aims to use of the following naming and formatting conventions. diff --git a/doc/html/math_toolkit/cubic_b.html b/doc/html/math_toolkit/cubic_b.html index 75fca263e..71c76ce82 100644 --- a/doc/html/math_toolkit/cubic_b.html +++ b/doc/html/math_toolkit/cubic_b.html @@ -91,10 +91,10 @@

    • - The start of the functions domain + The start of the functions domain,
    • - The step size + The step size.

    @@ -166,9 +166,10 @@ Testing

    - Since the interpolant obeys s(xj) = f(xj) at all interpolation - points, the tests generate random data and evaluate the interpolant at the - interpolation points, validating that equality with the data holds. + Since the interpolant obeys s(xj) = f(xj) + at all interpolation points, the tests generate random data and evaluate the + interpolant at the interpolation points, validating that equality with the + data holds.

    In addition, constant, linear, and quadratic functions are interpolated to diff --git a/doc/html/math_toolkit/directories.html b/doc/html/math_toolkit/directories.html index 11954a413..99e574ac4 100644 --- a/doc/html/math_toolkit/directories.html +++ b/doc/html/math_toolkit/directories.html @@ -44,7 +44,8 @@ to use higher precision types like NTL::RR, GNU Multiple Precision Arithmetic Library, GNU MPFR library, Boost.Multiprecision - like cpp_bin_float_50 that conform to the requirements specified by real_concept. + like cpp_bin_float_50 that conform to the requirements specified by + real_concept.

    /constants/

    diff --git a/doc/html/math_toolkit/dist_ref/dists/arcine_dist.html b/doc/html/math_toolkit/dist_ref/dists/arcine_dist.html index 48094656c..f34d2c4d4 100644 --- a/doc/html/math_toolkit/dist_ref/dists/arcine_dist.html +++ b/doc/html/math_toolkit/dist_ref/dists/arcine_dist.html @@ -82,9 +82,10 @@ distribution defined on the interval [x_min, x_max] is given by:

    -

    -     f(x; x_min, x_max) = 1 /(π⋅√((x - x_min)⋅(x_max - x_min)) -

    +

    + f(x; x_min, x_max) = 1 /(π⋅√((x - x_min)⋅(x_max + - x_min)) +

    For example, Wolfram Alpha arcsine distribution, from input of @@ -121,7 +122,8 @@ The Cumulative Distribution Function CDF is defined as

    - F(x) = 2⋅arcsin(√((x-x_min)/(x_max - x))) / π + F(x) = 2⋅arcsin(√((x-x_min)/(x_max - x))) / + π

    @@ -236,9 +238,9 @@ and x_max a fraction can be obtained from x using

    -

    -   fraction = (x - x_min) / (x_max - x_min) -

    +

    + fraction = (x - x_min) / (x_max - x_min) +

    The simplest example is tossing heads and tails with a fair coin and modelling the risk of losing, or winning. Walkers (molecules, drunks...) moving left @@ -572,8 +574,9 @@

    and produced the resulting expression

    -
    x = -a sin^2((pi p)/2)+a+b sin^2((pi p)/2)
    -
    +

    + x = -a sin^2((pi p)/2)+a+b sin^2((pi p)/2) +

    Thanks to Wolfram for providing this facility.

    diff --git a/doc/html/math_toolkit/dist_ref/dists/bernoulli_dist.html b/doc/html/math_toolkit/dist_ref/dists/bernoulli_dist.html index 0fa5cbbdb..131085fb5 100644 --- a/doc/html/math_toolkit/dist_ref/dists/bernoulli_dist.html +++ b/doc/html/math_toolkit/dist_ref/dists/bernoulli_dist.html @@ -64,9 +64,18 @@

    probability - density function pdf f(0) = 1 - p, f(1) = p. Cumulative - distribution function D(k) = if (k == 0) 1 - p else 1. + density function pdf

    +

    + f(0) = 1 - p, f(1) = p +

    +

    + Cumulative + distribution function +

    +

    + D(k) = if (k == 0) 1 - p else 1 +

    The following graph illustrates how the probability density function pdf varies with the outcome of the single trial: diff --git a/doc/html/math_toolkit/dist_ref/dists/beta_dist.html b/doc/html/math_toolkit/dist_ref/dists/beta_dist.html index 9cb1d7302..6e18971bf 100644 --- a/doc/html/math_toolkit/dist_ref/dists/beta_dist.html +++ b/doc/html/math_toolkit/dist_ref/dists/beta_dist.html @@ -101,19 +101,19 @@ density function PDF for the beta distribution defined on the interval [0,1] is given by:

    +

    + f(x;α,β) = xα - 1 (1 - x)β -1 / B(α, β) +

    - f(x;α,β) = xα - 1 (1 - x)β -1 / B(α, β) -

    -

    - where B(α, β) is the beta + where B(α, β) is the beta function, implemented in this library as beta. Division by the beta function ensures that the pdf is normalized to the range zero to unity.

    The following graph illustrates examples of the pdf for various values - of the shape parameters. Note the α = β = 2 (blue line) is dome-shaped, and - might be approximated by a symmetrical triangular distribution. + of the shape parameters. Note the α = β = 2 (blue line) + is dome-shaped, and might be approximated by a symmetrical triangular distribution.

    @@ -344,7 +344,7 @@ from presumed-known mean and variance.

    - f(x;α,β) = xα - 1 (1 - x)β -1 / B(α, β) + f(x;α,β) = xα - 1 (1 - x)β -1 / B(α, β)

    Implemented using ibeta_derivative(a, @@ -499,10 +499,7 @@ from presumed-known mean and variance.

    - alpha -

    -

    - from mean and variance + alpha (from mean and variance)

    @@ -519,10 +516,7 @@ from presumed-known mean and variance.

    - beta -

    -

    - from mean and variance + beta (from mean and variance)

    diff --git a/doc/html/math_toolkit/dist_ref/dists/binomial_dist.html b/doc/html/math_toolkit/dist_ref/dists/binomial_dist.html index f5a4722ce..fe1174ad8 100644 --- a/doc/html/math_toolkit/dist_ref/dists/binomial_dist.html +++ b/doc/html/math_toolkit/dist_ref/dists/binomial_dist.html @@ -240,8 +240,7 @@ but if you want to be 95% sure that the true value is greater than some value, pmin, then:

    -
    pmin = binomial_distribution<RealType>::find_lower_bound_on_p(
    -                    n, k, 0.05);
    +
    pmin = binomial_distribution<RealType>::find_lower_bound_on_p(n, k, 0.05);
     

    See worked @@ -346,8 +345,7 @@ but if you want to be 95% sure that the true value is less than some value, pmax, then:

    -
    pmax = binomial_distribution<RealType>::find_upper_bound_on_p(
    -                    n, k, 0.05);
    +
    pmax = binomial_distribution<RealType>::find_upper_bound_on_p(n, k, 0.05);
     

    See worked diff --git a/doc/html/math_toolkit/dist_ref/dists/extreme_dist.html b/doc/html/math_toolkit/dist_ref/dists/extreme_dist.html index 650bfbc46..a40941be3 100644 --- a/doc/html/math_toolkit/dist_ref/dists/extreme_dist.html +++ b/doc/html/math_toolkit/dist_ref/dists/extreme_dist.html @@ -73,13 +73,13 @@ The distribution has a PDF given by:

    - f(x) = (1/scale) e-(x-location)/scale e-e-(x-location)/scale + f(x) = (1/scale) e-(x-location)/scale e-e-(x-location)/scale

    which in the standard case (scale = 1, location = 0) reduces to:

    - f(x) = e-xe-e-x + f(x) = e-xe-e-x

    The following graph illustrates how the PDF varies with the location parameter: diff --git a/doc/html/math_toolkit/dist_ref/dists/f_dist.html b/doc/html/math_toolkit/dist_ref/dists/f_dist.html index c697bb4da..30aa80aa2 100644 --- a/doc/html/math_toolkit/dist_ref/dists/f_dist.html +++ b/doc/html/math_toolkit/dist_ref/dists/f_dist.html @@ -58,7 +58,7 @@ n degrees of freedom, then the test statistic:

    - Fn,m   = (χ2n   / n) / (χ2m   / m) + Fn,m   = (χ2n   / n) / (χ2m   / m)

    Is distributed over the range [0, ∞] with an F distribution, and has the @@ -192,8 +192,8 @@ led to the following two formulas:

    - fv1,v2(x) = y * ibeta_derivative(v2 - / 2, v1 / 2, v2 / (v2 + v1 * x)) + fv1,v2(x) = y * ibeta_derivative(v2 + / 2, v1 / 2, v2 / (v2 + v1 * x))

    with y = (v2 * v1) / ((v2 + v1 * x) * (v2 + v1 * x)) @@ -202,8 +202,8 @@ and

    - fv1,v2(x) = y * ibeta_derivative(v1 - / 2, v2 / 2, v1 * x / (v2 + v1 * x)) + fv1,v2(x) = y * ibeta_derivative(v1 + / 2, v2 / 2, v1 * x / (v2 + v1 * x))

    with y = (z * v1 - x * v1 * v1) / z2 @@ -232,15 +232,15 @@ Using the relations:

    - p = ibeta(v1 - / 2, v2 / 2, v1 * x / (v2 + v1 * x)) + p = ibeta(v1 + / 2, v2 / 2, v1 * x / (v2 + v1 * x))

    and

    - p = ibetac(v2 - / 2, v1 / 2, v2 / (v2 + v1 * x)) + :p = ibetac(v2 + / 2, v1 / 2, v2 / (v2 + v1 * x))

    The first is used for v1 * x > v2, otherwise the second is @@ -263,15 +263,15 @@ Using the relations:

    - p = ibetac(v1 - / 2, v2 / 2, v1 * x / (v2 + v1 * x)) + p = ibetac(v1 + / 2, v2 / 2, v1 * x / (v2 + v1 * x))

    and

    - p = ibeta(v2 - / 2, v1 / 2, v2 / (v2 + v1 * x)) + p = ibeta(v2 + / 2, v1 / 2, v2 / (v2 + v1 * x))

    The first is used for v1 * x < v2, otherwise the second is @@ -294,20 +294,20 @@ Using the relation:

    - x = v2 * a / (v1 * b) + x = v2 * a / (v1 * b)

    where:

    - a = ibeta_inv(v1 - / 2, v2 / 2, p) + a = ibeta_inv(v1 + / 2, v2 / 2, p)

    and

    - b = 1 - a + b = 1 - a

    Quantities a and b @@ -330,20 +330,20 @@ Using the relation:

    - x = v2 * a / (v1 * b) + x = v2 * a / (v1 * b)

    where

    - a = ibetac_inv(v1 - / 2, v2 / 2, p) + a = ibetac_inv(v1 + / 2, v2 / 2, p)

    and

    - b = 1 - a + b = 1 - a

    Quantities a and b diff --git a/doc/html/math_toolkit/dist_ref/dists/gamma_dist.html b/doc/html/math_toolkit/dist_ref/dists/gamma_dist.html index 61fbabbf1..68baff5bc 100644 --- a/doc/html/math_toolkit/dist_ref/dists/gamma_dist.html +++ b/doc/html/math_toolkit/dist_ref/dists/gamma_dist.html @@ -187,7 +187,7 @@

    In the following table k is the shape parameter of - the distribution, θ   is its scale parameter, x is the + the distribution, θ is its scale parameter, x is the random variate, p is the probability and q = 1-p.

    diff --git a/doc/html/math_toolkit/dist_ref/dists/geometric_dist.html b/doc/html/math_toolkit/dist_ref/dists/geometric_dist.html index be6990c6f..fde1d011e 100644 --- a/doc/html/math_toolkit/dist_ref/dists/geometric_dist.html +++ b/doc/html/math_toolkit/dist_ref/dists/geometric_dist.html @@ -102,23 +102,23 @@

    The probability that there are k failures before the - first success is -

    -

    -    Pr(Y=k) = (1-p)kp + first success

    +

    + Pr(Y=k) = (1-p)kp +

    For example, when throwing a 6-face dice the success probability p - = 1/6 = 0.1666 ̇  . Throwing repeatedly until a three + = 1/6 = 0.1666 ̇. Throwing repeatedly until a three appears, the probability distribution of the number of times not-a-three is thrown is geometric.

    Geometric distribution has the Probability Density Function PDF:

    -

    -    (1-p)kp -

    +

    + (1-p)kp +

    The following graph illustrates how the PDF and CDF vary for three examples of the success fraction p, (when considering the geometric diff --git a/doc/html/math_toolkit/dist_ref/dists/inverse_chi_squared_dist.html b/doc/html/math_toolkit/dist_ref/dists/inverse_chi_squared_dist.html index eae5ed5ea..906ae83d2 100644 --- a/doc/html/math_toolkit/dist_ref/dists/inverse_chi_squared_dist.html +++ b/doc/html/math_toolkit/dist_ref/dists/inverse_chi_squared_dist.html @@ -74,8 +74,10 @@ = 1.

    - Both definitions are also available in Wolfram Mathematica and in The R Project for Statistical Computing - (geoR) with default scale = 1/degrees of freedom. + Both definitions are also available in Wolfram + Mathematica and in The R + Project for Statistical Computing (geoR) with default scale = 1/degrees + of freedom.

    See @@ -120,9 +122,9 @@ The inverse_chi_squared distribution is a special case of a inverse_gamma distribution with ν (degrees_of_freedom) shape (α) and scale (β) where

    -

    -    α= ν /2 and β = ½. -

    +

    + α= ν /2 and β = ½ +

    @@ -149,29 +151,29 @@ inverse chi_squared distribution is defined by the probability density function (PDF):

    -

    -    f(x;ν) = 2-ν/2 x-ν/2-1 e-1/2x / Γ(ν/2) -

    +

    + f(x;ν) = 2-ν/2 x-ν/2-1 e-1/2x / Γ(ν/2) +

    and Cumulative Density Function (CDF)

    -

    -    F(x;ν) = Γ(ν/2, 1/2x) / Γ(ν/2) -

    +

    + F(x;ν) = Γ(ν/2, 1/2x) / Γ(ν/2) +

    For degrees of freedom parameter ν and scale parameter ξ, the scaled inverse chi_squared distribution is defined by the probability density function (PDF):

    -

    -    f(x;ν, ξ) = (ξν/2)ν/2 e-νξ/2x x-1-ν/2 / Γ(ν/2) -

    +

    + f(x;ν, ξ) = (ξν/2)ν/2 e-νξ/2x x-1-ν/2 / Γ(ν/2) +

    and Cumulative Density Function (CDF)

    -

    -    F(x;ν, ξ) = Γ(ν/2, νξ/2x) / Γ(ν/2) -

    +

    + F(x;ν, ξ) = Γ(ν/2, νξ/2x) / Γ(ν/2) +

    The following graphs illustrate how the PDF and CDF of the inverse chi_squared distribution varies for a few values of parameters ν and ξ: diff --git a/doc/html/math_toolkit/dist_ref/dists/inverse_gamma_dist.html b/doc/html/math_toolkit/dist_ref/dists/inverse_gamma_dist.html index fa8f32dd8..bb29bc643 100644 --- a/doc/html/math_toolkit/dist_ref/dists/inverse_gamma_dist.html +++ b/doc/html/math_toolkit/dist_ref/dists/inverse_gamma_dist.html @@ -94,15 +94,15 @@ For shape parameter α and scale parameter β, it is defined by the probability density function (PDF):

    -

    -    f(x;α, β) = βα * (1/x) α+1 exp(-β/x) / Γ(α) -

    +

    + f(x;α, β) = βα * (1/x) α+1 exp(-β/x) / Γ(α) +

    and cumulative density function (CDF)

    -

    -    F(x;α, β) = Γ(α, β/x) / Γ(α) -

    +

    + F(x;α, β) = Γ(α, β/x) / Γ(α) +

    The following graphs illustrate how the PDF and CDF of the inverse gamma distribution varies as the parameters vary: @@ -188,7 +188,7 @@ Implementation

    - In the following table α is the shape parameter of the distribution, α   is its + In the following table α is the shape parameter of the distribution, α is its scale parameter, x is the random variate, p is the probability and q = 1-p.

    diff --git a/doc/html/math_toolkit/dist_ref/dists/inverse_gaussian_dist.html b/doc/html/math_toolkit/dist_ref/dists/inverse_gaussian_dist.html index af772f047..eb9482a00 100644 --- a/doc/html/math_toolkit/dist_ref/dists/inverse_gaussian_dist.html +++ b/doc/html/math_toolkit/dist_ref/dists/inverse_gaussian_dist.html @@ -106,15 +106,16 @@ variate x, the inverse_gaussian distribution is defined by the probability density function (PDF):

    -

    -    f(x;μ, λ) = √(λ/2πx3) e-λ(x-μ)²/2μ²x -

    +

    + f(x;μ, λ) = √(λ/2πx3) e-λ(x-μ)²/2μ²x +

    and Cumulative Density Function (CDF):

    -

    -    F(x;μ, λ) = Φ{√(λx) (xμ-1)} + e2μ/λ Φ{-√(λ/μ) (1+x/μ)} -

    +

    + F(x;μ, λ) = Φ{√(λx) (xμ-1)} + + e2μ/λ Φ{-√(λ/μ) (1+x/μ)} +

    where Φ is the standard normal distribution CDF.

    diff --git a/doc/html/math_toolkit/dist_ref/dists/nc_beta_dist.html b/doc/html/math_toolkit/dist_ref/dists/nc_beta_dist.html index 811c3e4e3..568e82336 100644 --- a/doc/html/math_toolkit/dist_ref/dists/nc_beta_dist.html +++ b/doc/html/math_toolkit/dist_ref/dists/nc_beta_dist.html @@ -61,11 +61,16 @@ Distribution.

    - It is defined as the ratio X = χm2(λ) / (χm2(λ) + χn2) where χm2(λ) is a noncentral - χ2 -random variable with m degrees of freedom, and χn2 -is - a central χ2 random variable with n degrees of freedom. + It is defined as the ratio +

    +

    + X = χm2(λ) / (χm2(λ) + χn2) +

    +

    + where χm2(λ) is a noncentral χ2 random variable with m + degrees of freedom, and χn2 +is a central χ2 + random variable with n degrees of freedom.

    This gives a PDF that can be expressed as a Poisson mixture of beta distribution diff --git a/doc/html/math_toolkit/dist_ref/dists/nc_chi_squared_dist.html b/doc/html/math_toolkit/dist_ref/dists/nc_chi_squared_dist.html index ed39211b9..43551803c 100644 --- a/doc/html/math_toolkit/dist_ref/dists/nc_chi_squared_dist.html +++ b/doc/html/math_toolkit/dist_ref/dists/nc_chi_squared_dist.html @@ -66,8 +66,9 @@

    The noncentral chi-squared distribution is a generalization of the Chi Squared Distribution. - If Xi are ν independent, normally distributed random variables with means - μi and variances σi2, then the random variable + If Xi are /ν/ independent, normally distributed random + variables with means /μi/ and variances σi2, then the + random variable

    @@ -77,9 +78,10 @@ is distributed according to the noncentral chi-squared distribution.

    - The noncentral chi-squared distribution has two parameters: ν which specifies - the number of degrees of freedom (i.e. the number of Xi), and λ which is - related to the mean of the random variables Xi by: + The noncentral chi-squared distribution has two parameters: /ν/ which specifies + the number of degrees of freedom (i.e. the number of Xi), + and λ which is related to the mean of the random variables Xi + by:

    @@ -489,7 +491,7 @@

    - Where Pa(x) is the incomplete gamma function. + Where Pa(x) is the incomplete gamma function.

    The method starts at the λth term, which is where the Poisson weighting diff --git a/doc/html/math_toolkit/dist_ref/dists/nc_f_dist.html b/doc/html/math_toolkit/dist_ref/dists/nc_f_dist.html index fd5fa5a1d..7149bb196 100644 --- a/doc/html/math_toolkit/dist_ref/dists/nc_f_dist.html +++ b/doc/html/math_toolkit/dist_ref/dists/nc_f_dist.html @@ -60,8 +60,9 @@ The noncentral F distribution is a generalization of the Fisher F Distribution. It is defined as the ratio

    -
    F = (X/v1) / (Y/v2)
    -
    +

    + F = (X/v1) / (Y/v2) +

    where X is a noncentral χ2 random variable with v1 degrees @@ -76,7 +77,9 @@ random variable with v1 degrees

    - where Lab(c) is a generalised Laguerre polynomial and B(a,b) is the beta function, or + where Lab(c) is a generalised Laguerre polynomial + and B(a,b) is the beta + function, or

    @@ -102,7 +105,8 @@ random variable with v1 degrees and v2 and non-centrality parameter lambda.

    - Requires v1 > 0, v2 > 0 and lambda >= 0, otherwise calls domain_error. + Requires v1 > 0, v2 > 0 + and lambda >= 0, otherwise calls domain_error.

    RealType degrees_of_freedom1()const;
     
    @@ -199,13 +203,15 @@ is the non-centrality parameter, x is the Implemented in terms of the non-central beta PDF using the relation:

    - f(x;v1,v2;λ) = (v1/v2) / ((1+y)*(1+y)) * g(y/(1+y);v1/2,v2/2;λ) + f(x;v1,v2;λ) = (v1/v2) / ((1+y)*(1+y)) + * g(y/(1+y);v1/2,v2/2;λ)

    - where g(x; a, b; λ) is the non central beta PDF, and: + where g(x; a, b; λ) is the + non central beta PDF, and:

    - y = x * v1 / v2 + y = x * v1 / v2

    @@ -220,13 +226,14 @@ is the non-centrality parameter, x is the Using the relation:

    - p = By(v1/2, v2/2; λ) + p = By(v1/2, v2/2; λ)

    - where Bx(a, b; λ) is the noncentral beta distribution CDF and + where Bx(a, b; λ) is the + noncentral beta distribution CDF and

    - y = x * v1 / v2 + y = x * v1 / v2

    @@ -241,14 +248,14 @@ is the non-centrality parameter, x is the Using the relation:

    - q = 1 - By(v1/2, v2/2; λ) + q = 1 - By(v1/2, v2/2; λ)

    - where 1 - Bx(a, b; λ) is the complement of the noncentral beta - distribution CDF and + where 1 - Bx(a, b; λ) is + the complement of the noncentral beta distribution CDF and

    - y = x * v1 / v2 + y = x * v1 / v2

    @@ -263,19 +270,19 @@ is the non-centrality parameter, x is the Using the relation:

    - x = (bx / (1-bx)) * (v1 / v2) + x = (bx / (1-bx)) * (v1 / v2)

    where

    - bx = Qp-1(v1/2, v2/2; λ) + bx = Qp-1(v1/2, v2/2; λ)

    and

    - Qp-1(v1/2, v2/2; λ) + Qp-1(v1/2, v2/2; λ)

    is the noncentral beta quantile. @@ -296,19 +303,19 @@ is the non-centrality parameter, x is the Using the relation:

    - x = (bx / (1-bx)) * (v1 / v2) + x = (bx / (1-bx)) * (v1 / v2)

    where

    - bx = QCq-1(v1/2, v2/2; λ) + bx = QCq-1(v1/2, v2/2; λ)

    and

    - QCq-1(v1/2, v2/2; λ) + QCq-1(v1/2, v2/2; λ)

    is the noncentral beta quantile from the complement. @@ -323,7 +330,7 @@ is the non-centrality parameter, x is the

    diff --git a/doc/html/math_toolkit/dist_ref/dists/nc_t_dist.html b/doc/html/math_toolkit/dist_ref/dists/nc_t_dist.html index 2ce5580cb..6bdef5ffe 100644 --- a/doc/html/math_toolkit/dist_ref/dists/nc_t_dist.html +++ b/doc/html/math_toolkit/dist_ref/dists/nc_t_dist.html @@ -58,10 +58,10 @@

    The noncentral T distribution is a generalization of the Students t Distribution. Let X have a normal distribution with mean δ and variance - 1, and let ν S2 have a chi-squared distribution with degrees of freedom ν. - Assume that X and S2 are independent. The distribution of tν(δ)=X/S is called - a noncentral t distribution with degrees of freedom ν and noncentrality parameter - δ. + 1, and let ν S2 have a chi-squared distribution with + degrees of freedom ν. Assume that X and S2 are independent. The distribution + of tν(δ)=X/S is called a noncentral + t distribution with degrees of freedom ν and noncentrality parameter δ.

    This gives the following PDF: @@ -71,7 +71,8 @@

    - where 1F1(a;b;x) is a confluent hypergeometric function. + where 1F1(a;b;x) is a confluent hypergeometric + function.

    The following graph illustrates how the distribution changes for different diff --git a/doc/html/math_toolkit/dist_ref/dists/normal_dist.html b/doc/html/math_toolkit/dist_ref/dists/normal_dist.html index 0b26b5c5c..793c41790 100644 --- a/doc/html/math_toolkit/dist_ref/dists/normal_dist.html +++ b/doc/html/math_toolkit/dist_ref/dists/normal_dist.html @@ -61,7 +61,7 @@ Normal Distribution.

    - Given mean μ  and standard deviation σ it has the PDF: + Given mean μ and standard deviation σ it has the PDF:

    diff --git a/doc/html/math_toolkit/dist_ref/dists/pareto.html b/doc/html/math_toolkit/dist_ref/dists/pareto.html index b6037e7ee..36777a4ab 100644 --- a/doc/html/math_toolkit/dist_ref/dists/pareto.html +++ b/doc/html/math_toolkit/dist_ref/dists/pareto.html @@ -55,10 +55,10 @@ density function (pdf):

    - f(x; α, β) = αβα / xα+ 1 + f(x; α, β) = αβα / xα+ 1

    - For shape parameter α   > 0, and scale parameter β   > 0. If x < β  , the + For shape parameter α > 0, and scale parameter β > 0. If x < β, the pdf is zero.

    diff --git a/doc/html/math_toolkit/dist_ref/dists/rayleigh.html b/doc/html/math_toolkit/dist_ref/dists/rayleigh.html index 4c0702e24..9955cd082 100644 --- a/doc/html/math_toolkit/dist_ref/dists/rayleigh.html +++ b/doc/html/math_toolkit/dist_ref/dists/rayleigh.html @@ -55,10 +55,10 @@ density function:

    - f(x; sigma) = x * exp(-x2/2 σ2) / σ2 + f(x; sigma) = x * exp(-x2/2 σ2) / σ2

    - For sigma parameter σ   > 0, and x > 0. + For sigma parameter /σ/ > 0, and x > 0.

    The Rayleigh distribution is often used where two orthogonal components @@ -151,7 +151,7 @@ Implementation

    - In the following table σ   is the sigma parameter of the distribution, x + In the following table σ is the sigma parameter of the distribution, x is the random variate, p is the probability and q = 1-p.

    @@ -193,7 +193,7 @@
    diff --git a/doc/html/math_toolkit/dist_ref/dists/students_t_dist.html b/doc/html/math_toolkit/dist_ref/dists/students_t_dist.html index 249041f19..b55675932 100644 --- a/doc/html/math_toolkit/dist_ref/dists/students_t_dist.html +++ b/doc/html/math_toolkit/dist_ref/dists/students_t_dist.html @@ -73,14 +73,14 @@

    - where M is the population mean, μ - is the sample mean, and s is the sample variance. + where M is the population mean, μ is the sample mean, + and s is the sample variance.

    Student's t-distribution is defined as the distribution of the random variable - t which is - very loosely - the "best" that we can do not knowing - the true standard deviation of the sample. It has the PDF: + t which is - very loosely - the "best" that we can do while not + knowing the true standard deviation of the sample. It has the PDF:

    @@ -248,9 +248,9 @@

    @@ -265,10 +265,10 @@ Using the relations:

    - p = 1 - z iff t > 0 + p = 1 - z iff t > 0

    - p = z otherwise + p = z otherwise

    where z is given by: @@ -303,17 +303,18 @@

    [Note]

    - v2 * (v1 + l) / (v1 * (v2 - 2)) + v2 * (v1 + l) / (v1 * (v2 - 2))

    - Using the relation: p = 1 - exp(-x2/2) σ2   = -expm1(-x2/2) + Using the relation: p = 1 - exp(-x2/2) σ2= -expm1(-x2/2) σ2

    - Using the relation: pdf = (v / (v + t2))(1+v)/2 / (sqrt(v) * - beta(v/2, - 0.5)) + Using the relation: pdf = (v / (v + + t2))(1+v)/2 / (sqrt(v) * beta(v/2, + 0.5))

    - Using the relation: t = sign(p - 0.5) * sqrt(v * y / x) + Using the relation: t = sign(p - + 0.5) * sqrt(v * y / x)

    where:

    - x = ibeta_inv(v - / 2, 0.5, 2 * min(p, q)) + x = ibeta_inv(v + / 2, 0.5, 2 * min(p, q))

    - y = 1 - x + y = 1 - x

    The quantities x and y diff --git a/doc/html/math_toolkit/dist_ref/dists/triangular_dist.html b/doc/html/math_toolkit/dist_ref/dists/triangular_dist.html index 6e82ee9aa..00c96ba9b 100644 --- a/doc/html/math_toolkit/dist_ref/dists/triangular_dist.html +++ b/doc/html/math_toolkit/dist_ref/dists/triangular_dist.html @@ -74,24 +74,32 @@ density function:

    - f(x) = + f(x) =

    -
    -

    - 2(x-a)/(b-a) (c-a) -

    - for a <= x <= c -

    -
    -
    + +

    - 2(b-x)/(b-a) (b-c) -

    + 2(x-a)/(b-a) (c-a) +

    - for c < x <= b -

    -
    + for a <= x <= c +

    + + +

    +

    + +

    +

    + 2(b-x)/(b-a) (b-c) +

    +

    + for c < x <= b +

    +
    + +

    Parameter a (lower) can be any finite value. Parameter b (upper) can be any finite value > a (lower). diff --git a/doc/html/math_toolkit/dist_ref/dists/uniform_dist.html b/doc/html/math_toolkit/dist_ref/dists/uniform_dist.html index 8e1cf2cb5..59f6da2ab 100644 --- a/doc/html/math_toolkit/dist_ref/dists/uniform_dist.html +++ b/doc/html/math_toolkit/dist_ref/dists/uniform_dist.html @@ -59,19 +59,21 @@ density function:

    - f(x) = + f(x) = +

    +

    + 1 / (upper - lower) for lower < x < + upper +

    +

    + zero for x < lower or x > upper

    -

    - 1 / (upper - lower) for lower < x < upper -

    -

    - zero for x < lower or x > upper -

    and in this implementation:

    - 1 / (upper - lower) for x = lower or x = upper + 1 / (upper - lower) for x = lower or x = + upper

    The choice of x = lower or x = upper is made because statistical use of diff --git a/doc/html/math_toolkit/dist_ref/dists/weibull_dist.html b/doc/html/math_toolkit/dist_ref/dists/weibull_dist.html index 86b929fd4..2bbfa1751 100644 --- a/doc/html/math_toolkit/dist_ref/dists/weibull_dist.html +++ b/doc/html/math_toolkit/dist_ref/dists/weibull_dist.html @@ -56,10 +56,11 @@ density function:

    - f(x; α, β) = (α/β) * (x / β)α - 1 * e-(x/β)α + f(x; α, β) = (α/β) * (x / β)α - 1 * e-(x/β)α

    - For shape parameter α > 0, and scale parameter β > 0, and x > 0. + For shape parameter α > 0, and scale parameter + β > 0, and x > 0.

    The Weibull distribution is often used in the field of failure analysis; @@ -68,20 +69,21 @@

    • - constant over time, then α   = 1, suggests that items are failing from - random events. + constant over time, then α = 1, suggests that items + are failing from random events.
    • - decreases over time, then α   < 1, suggesting "infant mortality". + decreases over time, then α < 1, suggesting + "infant mortality".
    • - increases over time, then α   > 1, suggesting "wear out" - - more likely to fail as time goes by. + increases over time, then α > 1, suggesting + "wear out" - more likely to fail as time goes by.

    The following graph illustrates how the PDF varies with the shape parameter - α: + α:

    @@ -89,7 +91,7 @@

    While this graph illustrates how the PDF varies with the scale parameter - β: + β:

    @@ -101,10 +103,10 @@ distributions

    - When α   = 3, the Weibull + When α = 3, the Weibull distribution appears similar to the normal - distribution. When α   = 1, the Weibull distribution reduces to the - exponential + distribution. When α = 1, the Weibull distribution + reduces to the exponential distribution. The relationship of the types of extreme value distributions, of which the Weibull is but one, is discussed by Extreme Value Distributions, Theory and Applications Samuel Kotz & Saralees @@ -171,9 +173,10 @@ Implementation

    - In the following table α   is the shape parameter of the distribution, β   is its - scale parameter, x is the random variate, p - is the probability and q = 1-p. + In the following table α is the shape parameter of + the distribution, β is its scale parameter, x + is the random variate, p is the probability and q + = 1-p.

    diff --git a/doc/html/math_toolkit/double_exponential/de_caveats.html b/doc/html/math_toolkit/double_exponential/de_caveats.html index c711d03d1..3b50e21f4 100644 --- a/doc/html/math_toolkit/double_exponential/de_caveats.html +++ b/doc/html/math_toolkit/double_exponential/de_caveats.html @@ -75,8 +75,9 @@ when the origin is neither in the center of the range, nor at an endpoint. Consider integrating:

    -
    1 / (1 +x^2)
    -
    +

    + 1 / (1 +x^2) +

    Over (a, ∞). As long as a >= 0 both the tanh_sinh and the exp_sinh integrators will handle this just fine: in @@ -92,8 +93,8 @@ each seperately using the tanh-sinh integrator, works just fine.

    - Finally, some endpoint singularities are too strong to be handled by tanh_sinh - or equivalent methods, for example consider integrating the function: + Finally, some endpoint singularities are too strong to be handled by tanh_sinh or equivalent methods, for example + consider integrating the function:

    double p = some_value;
     tanh_sinh<double> integrator;
    @@ -116,12 +117,12 @@
             over.
           

    - This actually works just fine for p < 0.95, but after that the tanh_sinh - integrator starts thrashing around and is unable to converge on the integral. - The problem is actually a lack of exponent range: if we simply swap type - double for something with a greater exponent range (an 80-bit long double - or a quad precision type), then we can get to at least p = 0.99. If we want - to go beyond that, or stick with type double, then we have to get smart. + This actually works just fine for p < 0.95, but after that the tanh_sinh integrator starts thrashing around + and is unable to converge on the integral. The problem is actually a lack + of exponent range: if we simply swap type double for something with a greater + exponent range (an 80-bit long double or a quad precision type), then we + can get to at least p = 0.99. If we want to go beyond that, or stick with + type double, then we have to get smart.

    The easiest method is to notice that for small x, then tan(x) ≅ x, @@ -164,8 +165,8 @@ };

    - This form integrates just fine over (-log(π/2), +∞) using either the tanh_sinh - or exp_sinh classes. + This form integrates just fine over (-log(π/2), +∞) using either the tanh_sinh or exp_sinh + classes.

    diff --git a/doc/html/math_toolkit/double_exponential/de_exp_sinh.html b/doc/html/math_toolkit/double_exponential/de_exp_sinh.html index 83d03e0a7..7716a7b9a 100644 --- a/doc/html/math_toolkit/double_exponential/de_exp_sinh.html +++ b/doc/html/math_toolkit/double_exponential/de_exp_sinh.html @@ -47,7 +47,8 @@ };

    - For half-infinite intervals, the exp-sinh quadrature is provided: + For half-infinite intervals, the exp-sinh quadrature + is provided:

    exp_sinh<double> integrator;
     auto f = [](double x) { return exp(-3*x); };
    @@ -58,10 +59,10 @@
     

    The native integration range of this integrator is (0, ∞), but we also support - (a, ∞), (-∞, 0) and (-∞, b) via argument transformations. + /(a, ∞), (-∞, 0)/ and /(-∞, b)/ via argument transformations.

    - Endpoint singularities and complex-valued integrands are supported by exp-sinh. + Endpoint singularities and complex-valued integrands are supported by exp-sinh.

    For example, the modified Bessel function K can be represented via: diff --git a/doc/html/math_toolkit/double_exponential/de_overview.html b/doc/html/math_toolkit/double_exponential/de_overview.html index f54f81a88..927707cb4 100644 --- a/doc/html/math_toolkit/double_exponential/de_overview.html +++ b/doc/html/math_toolkit/double_exponential/de_overview.html @@ -100,11 +100,11 @@ For example, the sinh_sinh quadrature integrates over the entire real line, the tanh_sinh over (-1, 1), and the exp_sinh - over (0, ∞). The latter integrators also have auxilliary ranges which - are handled via a change of variables on the function being integrated, so - that the tanh_sinh can handle - integration over (a, b), and exp_sinh - over (a, ∞) and(-∞, b). + over (0, ∞). The latter integrators also have auxilliary ranges which are + handled via a change of variables on the function being integrated, so that + the tanh_sinh can handle + integration over (a, b), and exp_sinh + over /(a, ∞) and(-∞, b)/.

    Like the other quadrature routines in Boost, these routines support both diff --git a/doc/html/math_toolkit/double_exponential/de_sinh_sinh.html b/doc/html/math_toolkit/double_exponential/de_sinh_sinh.html index bd689dfa1..55edf40f8 100644 --- a/doc/html/math_toolkit/double_exponential/de_sinh_sinh.html +++ b/doc/html/math_toolkit/double_exponential/de_sinh_sinh.html @@ -51,7 +51,7 @@ double Q = integrator.integrate(f, &error, &L1);

    - Note that the limits of integration are understood to be (-∞, ∞). + Note that the limits of integration are understood to be (-∞, +∞).

    Complex valued integrands are supported as well, for example the Dirichlet diff --git a/doc/html/math_toolkit/double_exponential/de_tanh_sinh.html b/doc/html/math_toolkit/double_exponential/de_tanh_sinh.html index 824df7cde..764747299 100644 --- a/doc/html/math_toolkit/double_exponential/de_tanh_sinh.html +++ b/doc/html/math_toolkit/double_exponential/de_tanh_sinh.html @@ -49,16 +49,20 @@ };

    - The tanh-sinh quadrature routine provided by boost is a rapidly convergent - numerical integration scheme for holomorphic integrands. By this we mean - that the integrand is the restriction to the real line of a complex-differentiable - function which is bounded on the interior of the unit disk |z| - < 1, so that it lies within the so-called Hardy - space. If your integrand obeys these conditions, it can be shown - that tanh-sinh integration is optimal, in the sense that it requires the - fewest function evaluations for a given accuracy of any quadrature algorithm - for a random element from the Hardy space. A basic example of how to use - the tanh-sinh quadrature is shown below: + The tanh-sinh quadrature routine provided by boost + is a rapidly convergent numerical integration scheme for holomorphic integrands. + By this we mean that the integrand is the restriction to the real line of + a complex-differentiable function which is bounded on the interior of the + unit disk |z| < 1, so that it lies within the so-called + Hardy space. + If your integrand obeys these conditions, it can be shown that tanh-sinh + integration is optimal, in the sense that it requires the fewest function + evaluations for a given accuracy of any quadrature algorithm for a random + element from the Hardy space. +

    +

    + A basic example of how to use the tanh-sinh quadrature + is shown below:

    tanh_sinh<double> integrator;
     auto f = [](double x) { return 5*x + 7; };
    @@ -68,7 +72,7 @@
     Q = integrator.integrate(f, 0.0, 1.1);
     

    - The basic idea of tanh-sinh quadrature is that a variable transformation + The basic idea of tanh-sinh quadrature is that a variable transformation can cause the endpoint derivatives to decay rapidly. When the derivatives at the endpoints decay much faster than the Bernoulli numbers grow, the Euler-Maclaurin summation formula tells us that simple trapezoidal quadrature converges faster @@ -483,11 +487,11 @@

    - Although the tanh-sinh quadrature can compute integral over infinite domains - by variable transformations, these transformations can create a very poorly - behaved integrand. For this reason, double-exponential variable transformations - have been provided that allow stable computation over infinite domains; these - being the exp-sinh and sinh-sinh quadrature. + Although the tanh-sinh quadrature can compute integral over + infinite domains by variable transformations, these transformations can create + a very poorly behaved integrand. For this reason, double-exponential variable + transformations have been provided that allow stable computation over infinite + domains; these being the exp-sinh and sinh-sinh quadrature.

    @@ -495,9 +499,10 @@ integrals

    - The tanh_sinh integrator supports integration of functions which return complex - results, for example the sine-integral Si(z) - has the integral representation: + The tanh_sinh integrator + supports integration of functions which return complex results, for example + the sine-integral Si(z) has + the integral representation:

    diff --git a/doc/html/math_toolkit/double_exponential/de_tanh_sinh_2_arg.html b/doc/html/math_toolkit/double_exponential/de_tanh_sinh_2_arg.html index 90c7638c5..9b30a2e59 100644 --- a/doc/html/math_toolkit/double_exponential/de_tanh_sinh_2_arg.html +++ b/doc/html/math_toolkit/double_exponential/de_tanh_sinh_2_arg.html @@ -80,7 +80,7 @@ double Q = integrator.integrate(f, 0.0, 1.0);

    - Not only is this form accurate to full machine precision, but it converges + Not only is this form accurate to full machine-precision, but it converges to the result faster as well.

    diff --git a/doc/html/math_toolkit/error_handling.html b/doc/html/math_toolkit/error_handling.html index f35c7c029..b19387fbb 100644 --- a/doc/html/math_toolkit/error_handling.html +++ b/doc/html/math_toolkit/error_handling.html @@ -49,7 +49,8 @@

    The default error actions are to throw an exception with an informative error - message. If you do not try to catch the exception, you will not see the message! + message. If you do not try to catch the exception, you + will not see the message!

    @@ -832,27 +833,14 @@ "Each of the functions declared above shall return a NaN (Not a Number) if any argument value is a NaN, but it shall not report a domain error. Otherwise, each of the functions declared above shall report a domain - error for just those argument values for which: -

    -
    -

    - "the function description's Returns clause explicitly specifies - a domain, and those arguments fall outside the specified domain; or -

    -

    - "the corresponding mathematical function value has a non-zero - imaginary component; or -

    -

    - "the corresponding mathematical function is not mathematically - defined. -

    -
    -

    - "Note 2: A mathematical function is mathematically defined - for a given set of argument values if it is explicitly defined for that set - of argument values or if its limiting value exists and does not depend on - the direction of approach." + error for just those argument values for which:
    the function description's + Returns clause explicitly specifies a domain, and those arguments fall outside + the specified domain; or
    the corresponding mathematical function value + has a non-zero imaginary component; or
    the corresponding mathematical + function is not mathematically defined.
    Note 2: A mathematical function + is mathematically defined for a given set of argument values if it is explicitly + defined for that set of argument values or if its limiting value exists and + does not depend on the direction of approach."

    Note that in order to support information-rich error messages when throwing diff --git a/doc/html/math_toolkit/hints.html b/doc/html/math_toolkit/hints.html index 147d24f6f..d9c117727 100644 --- a/doc/html/math_toolkit/hints.html +++ b/doc/html/math_toolkit/hints.html @@ -111,7 +111,7 @@ Warning

    - Failure to heed this will lead to incorrect, and very likely undesired, results. + Failure to heed this will lead to incorrect, and very likely undesired, results!

    diff --git a/doc/html/math_toolkit/history1.html b/doc/html/math_toolkit/history1.html index f5f267b70..80e3b3d45 100644 --- a/doc/html/math_toolkit/history1.html +++ b/doc/html/math_toolkit/history1.html @@ -28,11 +28,13 @@

    Currently open bug reports can be viewed here - on GIThub. + on GitHub.

    - All old bug reports including closed ones can be viewed on Trac here - and more recent issues on GIThub here. + All old bug reports including closed ones can be viewed on Trac here. +

    +

    + Recent issues on GitHub here.

    @@ -41,31 +43,31 @@
    • - Catmull-Rom interpolator now works in C++11 + Catmull-Rom interpolator now works in C++11.
    • - Cardinal quadratic B-spline interpolation + Cardinal quadratic B-spline interpolation.
    • - Domain of elliptic integrals extended + Domain of elliptic integrals extended.
    • - sin_pi and cos_pi performance improvements + sin_pi and cos_pi performance improvements.
    • - Forward-mode automatic differentiation + Forward-mode automatic differentiation.
    • - Vector valued barycentric rational interpolation + Vector valued barycentric rational interpolation.
    • - Ooura's method for evaluation of Fourier integrals + Ooura's method for evaluation of Fourier integrals.
    • - Multiple compatibility issues with Multiprecision fixed + Multiple compatibility issues with Multiprecision fixed.
    • - Lambert-W fixed on a rare architecture + Lambert-W fixed on a rare architecture.
    diff --git a/doc/html/math_toolkit/history2.html b/doc/html/math_toolkit/history2.html index 1626e5475..e5d2ac021 100644 --- a/doc/html/math_toolkit/history2.html +++ b/doc/html/math_toolkit/history2.html @@ -28,11 +28,13 @@

    Currently open bug reports can be viewed here - on GIThub. + on GitHub.

    - All old bug reports including closed ones can be viewed on Trac here - and more recent issues on GIThub here. + All old bug reports including closed ones can be viewed on Trac here. +

    +

    + Recent issues on GitHub here.

    @@ -41,31 +43,31 @@
    • - Catmull-Rom interpolator now works in C++11 + Catmull-Rom interpolator now works in C++11.
    • - Cardinal quadratic B-spline interpolation + Cardinal quadratic B-spline interpolation.
    • - Domain of elliptic integrals extended + Domain of elliptic integrals extended.
    • - sin_pi and cos_pi performance improvements + sin_pi and cos_pi performance improvements.
    • - Forward-mode automatic differentiation + Forward-mode automatic differentiation.
    • - Vector valued barycentric rational interpolation + Vector valued barycentric rational interpolation.
    • - Ooura's method for evaluation of Fourier integrals + Ooura's method for evaluation of Fourier integrals.
    • - Multiple compatibility issues with Multiprecision fixed + Multiple compatibility issues with Multiprecision fixed.
    • - Lambert-W fixed on a rare architecture + Lambert-W fixed on a rare architecture.
    diff --git a/doc/html/math_toolkit/internals.html b/doc/html/math_toolkit/internals.html index e375d62a7..3b8124547 100644 --- a/doc/html/math_toolkit/internals.html +++ b/doc/html/math_toolkit/internals.html @@ -29,7 +29,7 @@
    Series Evaluation
    Continued Fraction Evaluation
    -
    Tools For 3 Term Recurrence +
    Tools For 3-Term Recurrence Relations
    Tuples
    Minimax Approximations diff --git a/doc/html/math_toolkit/internals/cf.html b/doc/html/math_toolkit/internals/cf.html index 3061af7d5..8a953ade8 100644 --- a/doc/html/math_toolkit/internals/cf.html +++ b/doc/html/math_toolkit/internals/cf.html @@ -7,7 +7,7 @@ - + diff --git a/doc/html/math_toolkit/internals/error_test.html b/doc/html/math_toolkit/internals/error_test.html index a23b80164..97c38af5f 100644 --- a/doc/html/math_toolkit/internals/error_test.html +++ b/doc/html/math_toolkit/internals/error_test.html @@ -40,7 +40,7 @@ @@ -170,10 +170,11 @@ Example

    - Suppose we want to test the tgamma and lgamma functions, we can create a - two dimensional matrix of test data, each row is one test case, and contains - three elements: the input value, and the expected results for the tgamma - and lgamma functions respectively. + Suppose we want to test the tgamma + and lgamma functions, we + can create a two-dimensional matrix of test data, each row is one test case, + and contains three elements: the input value, and the expected results for + the tgamma and lgamma functions respectively.

    static const boost::array<boost::array<TestType, 3>, NumberOfTests>
        factorials = {
    diff --git a/doc/html/math_toolkit/internals/minimax.html b/doc/html/math_toolkit/internals/minimax.html
    index 8fa4e8a17..2db8e8ca9 100644
    --- a/doc/html/math_toolkit/internals/minimax.html
    +++ b/doc/html/math_toolkit/internals/minimax.html
    @@ -28,7 +28,7 @@
           and the Remez Algorithm
     
     

    - The directory libs/math/minimax contains a command line driven program for + The directory libs/math/minimax contains a command-line driven program for the generation of minimax approximations using the Remez algorithm. Both polynomial and rational approximations are supported, although the latter are tricky to converge: it is not uncommon for convergence of rational forms @@ -82,8 +82,9 @@ of the approximation: for example if you are approximating a function f(x) then it is quite common to use:

    -
    f(x) = g(x)(Y + R(x))
    -
    +

    + f(x) = g(x)(Y + R(x)) +

    where g(x) is the dominant part of f(x), Y is some constant, and R(x) is @@ -91,7 +92,7 @@ compared to |Y|.

    - In this case you would define f to return f(x)/g(x) + In this case you would define f to return f(x)/g(x) and then set the y-offset of the approximation to Y (see command line options below).

    diff --git a/doc/html/math_toolkit/internals/series_evaluation.html b/doc/html/math_toolkit/internals/series_evaluation.html index b881ae084..4abc77cc3 100644 --- a/doc/html/math_toolkit/internals/series_evaluation.html +++ b/doc/html/math_toolkit/internals/series_evaluation.html @@ -79,8 +79,9 @@

    The second argument is the precision required, summation will stop when the next term is less than tolerance times the result. The - deprecated versions of sum_series take an integer number of bits here - internally - they just convert this to a tolerance and forward the call. + deprecated versions of sum_series + take an integer number of bits here - internally they just convert this to + a tolerance and forward the call.

    The third argument max_terms sets an upper limit on diff --git a/doc/html/math_toolkit/internals/test_data.html b/doc/html/math_toolkit/internals/test_data.html index 571e95a37..ef18ff830 100644 --- a/doc/html/math_toolkit/internals/test_data.html +++ b/doc/html/math_toolkit/internals/test_data.html @@ -493,7 +493,7 @@

    So it's pretty clear that this fraction shouldn't be used for small values - of a and z. + of a and z.

    diff --git a/doc/html/math_toolkit/internals/tuples.html b/doc/html/math_toolkit/internals/tuples.html index f1ded3935..876b58f73 100644 --- a/doc/html/math_toolkit/internals/tuples.html +++ b/doc/html/math_toolkit/internals/tuples.html @@ -6,7 +6,7 @@ - + diff --git a/doc/html/math_toolkit/main_intro.html b/doc/html/math_toolkit/main_intro.html index 4e6a6b184..6c7155c0d 100644 --- a/doc/html/math_toolkit/main_intro.html +++ b/doc/html/math_toolkit/main_intro.html @@ -36,8 +36,10 @@

    Utility functions for dealing with floating-point arithmetic, includes functions - for floating point classification (fpclassify, isnan, isinf etc), sign manipulation, - rounding, comparison, and computing the distance between floating point numbers. + for floating point classification (fpclassify, + isnan, isinf + etc), sign manipulation, rounding, comparison, and computing the distance between + floating point numbers.

    @@ -45,8 +47,8 @@ Width Floating-Point Types

    - A set of typedefs similar to those provided by <cstdint> - but for floating-point types. + A set of typedefs similar to those + provided by <cstdint> but for floating-point types.

    @@ -54,8 +56,8 @@ Constants

    - A wide range of constants ranging from various multiples of π, fractions, through - to euler's constant etc. + A wide range of high-precision constants ranging from various multiples of + π, fractions, through to Euler's constant etc.

    These are of course usable from template code, or as non-templates with a simplified @@ -151,7 +153,8 @@

    A reasonably comprehensive set of routines for integration (trapezoidal, Gauss-Legendre, - Gauss-Kronrod and double-exponential) and differentiation. + Gauss-Kronrod and double-exponential) and differentiation. (See also automatic + differentiation).

    The integration routines are all usable for functions returning complex results @@ -163,7 +166,7 @@ and Octonions

    - Quaternion and Octonians as class templates similar to std::complex. + Quaternions and Octonians as class templates similar to std::complex.

    @@ -172,7 +175,7 @@

    Autodiff is a header-only C++ library that facilitates the automaticdifferentiation - (forward mode) of mathematical functions of singleand multiple variables + (forward mode) of mathematical functions of single and multiple variables.

    The header boost/math/tools/test.hpp is located under libs/math/include_private - and is not installed to the usual locations by default, you will need to + and is NOT installed to the usual locations by default; you will need to add libs/math/include_private to your compiler's include path in order to use this header.

    diff --git a/doc/html/math_toolkit/namespaces.html b/doc/html/math_toolkit/namespaces.html index ae3e99b32..8be6649f2 100644 --- a/doc/html/math_toolkit/namespaces.html +++ b/doc/html/math_toolkit/namespaces.html @@ -28,7 +28,7 @@

    All math functions and distributions are in namespace - boost::math + boost::math.

    So, for example, the Students-t distribution template in namespace @@ -64,8 +64,7 @@ min_value and epsilon are in boost::math::tools.

    - Policy and configuration information is in namespace - boost::math::policies. + Policy and configuration information is in namespace boost::math::policies.

    @@ -77,6 +76,15 @@ boost::math.

    +
    + + + + + +
    [Tip]Tip

    + Start your work from a copy of the example source code; links usually provided. +

    diff --git a/doc/html/math_toolkit/navigation.html b/doc/html/math_toolkit/navigation.html index b1e200e31..88799d994 100644 --- a/doc/html/math_toolkit/navigation.html +++ b/doc/html/math_toolkit/navigation.html @@ -27,7 +27,7 @@ Navigation

    - +

    Boost.Math documentation is provided in both HTML and PDF formats. diff --git a/doc/html/math_toolkit/oct_todo.html b/doc/html/math_toolkit/oct_todo.html index 0f6772b4b..8aca0a98c 100644 --- a/doc/html/math_toolkit/oct_todo.html +++ b/doc/html/math_toolkit/oct_todo.html @@ -31,7 +31,7 @@ Improve testing.

  • - Rewrite input operatore using Spirit (creates a dependency). + Rewrite input operators using Spirit (creates a dependency).
  • Put in place an Expression Template mechanism (perhaps borrowing from uBlas). diff --git a/doc/html/math_toolkit/oct_typedefs.html b/doc/html/math_toolkit/oct_typedefs.html index b6a85d65f..4a71c5feb 100644 --- a/doc/html/math_toolkit/oct_typedefs.html +++ b/doc/html/math_toolkit/oct_typedefs.html @@ -50,7 +50,7 @@
    typedef long double value_type;
     

    - These provide easy acces to the type the template is built upon. + These provide easy access to the type the template is built upon.

  • diff --git a/doc/html/math_toolkit/result_type.html b/doc/html/math_toolkit/result_type.html index 4261308f6..3c1d56098 100644 --- a/doc/html/math_toolkit/result_type.html +++ b/doc/html/math_toolkit/result_type.html @@ -112,7 +112,38 @@ float.

    - And for user-defined types, all of the following return an NTL::RR result: + And for user-defined types, typically Boost.Multiprecision, +

    +

    + All of the following return a boost::multiprecision::cpp_bin_quad_float + result: +

    +
    cyl_bessel_j(0, boost::multiprecision::cpp_bin_quad_float(2));
    +
    +cyl_bessel_j(boost::multiprecision::cpp_bin_quad_float(2), 3);
    +
    +cyl_bessel_j(boost::multiprecision::cpp_bin_quad_float(2), boost::multiprecision::cpp_bin_quad_float(3));
    +
    +

    + but rely on the parameters provided being exactly representable, avoiding loss + of precision from construction from double. +

    +
    + + + + + +
    [Tip]Tip

    + All new projects should use Boost.Multiprecision. +

    +

    + During development of Boost.Math, NTL + A Library for doing Number Theory was invaluable to create highly precise + tables. +

    +

    + All of the following return an NTL::RR result:

    cyl_bessel_j(0, NTL::RR(2));
     
    diff --git a/doc/html/math_toolkit/stat_tut/overview/generic.html b/doc/html/math_toolkit/stat_tut/overview/generic.html
    index 31f335080..bda1f44d3 100644
    --- a/doc/html/math_toolkit/stat_tut/overview/generic.html
    +++ b/doc/html/math_toolkit/stat_tut/overview/generic.html
    @@ -112,9 +112,10 @@
               

    For example, the binomial distribution probability distribution function - (PDF) is written as f(k| n, p) = Pr(K = k|n, p) - = probability of observing k successes out of n trials. K is the random variable, - k is the random + (PDF) is written as f(k| n, p) + = Pr(K = k|n, p) = probability of observing k successes out + of n trials. K is the random + variable, k is the random variate, the parameters are n (trials) and p (probability).

    diff --git a/doc/html/math_toolkit/stat_tut/weg/f_eg.html b/doc/html/math_toolkit/stat_tut/weg/f_eg.html index d6c02ae8b..3aa5ada26 100644 --- a/doc/html/math_toolkit/stat_tut/weg/f_eg.html +++ b/doc/html/math_toolkit/stat_tut/weg/f_eg.html @@ -80,7 +80,7 @@ two standard deviations:

    - F = s12 / s22 + F = s12 / s22

    where s1 is the standard deviation of the first sample and s2 @@ -180,12 +180,12 @@ is the standard function:

    - F(1-alpha; N1-1, N2-1) = quantile(fisher_f(N1-1, - N2-1), alpha) + F(1-alpha; N1-1, N2-1) = quantile(fisher_f(N1-1, + N2-1), alpha)

    - F(alpha; N1-1, N2-1) = quantile(complement(fisher_f(N1-1, - N2-1), alpha)) + F(alpha; N1-1, N2-1) = quantile(complement(fisher_f(N1-1, + N2-1), alpha))

    In our example program we need both upper and lower critical values for diff --git a/doc/html/math_toolkit/vector_barycentric.html b/doc/html/math_toolkit/vector_barycentric.html index 9a276a9ca..476bec94e 100644 --- a/doc/html/math_toolkit/vector_barycentric.html +++ b/doc/html/math_toolkit/vector_barycentric.html @@ -71,7 +71,7 @@

    Use of the class requires a Point-type - which has size known at compile time. These requirements are satisfied by (for + which has size known at compile-time. These requirements are satisfied by (for example) Eigen::Vector2ds and std::array<Real, N> classes. The call to the constructor computes the weights:

    diff --git a/doc/html/math_toolkit/whittaker_shannon.html b/doc/html/math_toolkit/whittaker_shannon.html index a7e106982..5d3acf310 100644 --- a/doc/html/math_toolkit/whittaker_shannon.html +++ b/doc/html/math_toolkit/whittaker_shannon.html @@ -61,10 +61,10 @@ smooth, but has linear complexity in the data, making it slow relative to compactly-supported b-splines. In addition, we cannot pass an infinite amount of data into the class, and must truncate the (perhaps) infinite sinc series to a finite number - of terms. Since the sinc function has slow 1/x decay, the truncation of the - series can incur large error. Hence this interpolator works best when operating - on samples of compactly supported functions. Here is an example of interpolating - a smooth "bump function": + of terms. Since the sinc function has slow 1/x decay, + the truncation of the series can incur large error. Hence this interpolator + works best when operating on samples of compactly-supported functions. Here + is an example of interpolating a smooth "bump function":

    auto bump = [](double x) { if (std::abs(x) >= 1) { return 0.0; } return std::exp(-1.0/(1.0-x*x)); };
     
    @@ -78,7 +78,6 @@
         v[i] = bump(t);
     }
     
    -
     auto ws = whittaker_shannon(std::move(v), t0, h);
     
     double y = ws(0.3);
    @@ -96,7 +95,7 @@
         

    The call to the constructor requires 𝑶(1) operations, simply moving data into - the class. Each call the the interpolant is 𝑶(n), where + the class. Each call to the interpolant is 𝑶(n), where n is the number of points to interpolate.

    diff --git a/doc/internals/fraction.qbk b/doc/internals/fraction.qbk index 921c81e03..b01ca8601 100644 --- a/doc/internals/fraction.qbk +++ b/doc/internals/fraction.qbk @@ -166,7 +166,7 @@ So now we can implement Q, this time using [^continued_fraction_a]: [cf_gamma_Q] -[endsect][/section:cf Continued Fraction Evaluation] +[endsect] [/section:cf Continued Fraction Evaluation] [/ Copyright 2006 John Maddock and Paul A. Bristow. diff --git a/doc/internals/minimax.qbk b/doc/internals/minimax.qbk index 61a14f43d..27501b7f2 100644 --- a/doc/internals/minimax.qbk +++ b/doc/internals/minimax.qbk @@ -1,6 +1,6 @@ [section:minimax Minimax Approximations and the Remez Algorithm] -The directory libs/math/minimax contains a command line driven +The directory libs/math/minimax contains a command-line driven program for the generation of minimax approximations using the Remez algorithm. Both polynomial and rational approximations are supported, although the latter are tricky to converge: it is not uncommon for @@ -12,10 +12,8 @@ is often not an easy task, and one to which many books have been devoted. To use this tool, you will need to have a reasonable grasp of what the Remez algorithm is, and the general form of the approximation you want to achieve. -Unless you already familar with the Remez method, -you should first read the [link math_toolkit.remez -brief background article explaining the principles behind the -Remez algorithm]. +Unless you already familar with the Remez method, you should first read the +[link math_toolkit.remez brief background article explaining the principles behind the Remez algorithm]. The program consists of two parts: @@ -41,13 +39,13 @@ Note that the function /f/ must return the rational part of the approximation: for example if you are approximating a function /f(x)/ then it is quite common to use: - f(x) = g(x)(Y + R(x)) +[expression f(x) = g(x)(Y + R(x))] where /g(x)/ is the dominant part of /f(x)/, /Y/ is some constant, and /R(x)/ is the rational approximation part, usually optimised for a low absolute error compared to |Y|. -In this case you would define /f/ to return ['f(x)/g(x)] and then set the +In this case you would define /f/ to return [role serif-italic f(x)/g(x)] and then set the y-offset of the approximation to /Y/ (see command line options below). Many other forms are possible, but in all cases the objective is to @@ -156,8 +154,7 @@ Command line options for the program are as follows: x and y offsets, and of course the coefficients of the polynomials.]] ] - -[endsect][/section:minimax Minimax Approximations and the Remez Algorithm] +[endsect] [/section:minimax Minimax Approximations and the Remez Algorithm] [/ Copyright 2006 John Maddock and Paul A. Bristow. diff --git a/doc/internals/recurrence.qbk b/doc/internals/recurrence.qbk index 278863b46..e8fbb1e90 100644 --- a/doc/internals/recurrence.qbk +++ b/doc/internals/recurrence.qbk @@ -1,4 +1,4 @@ -[section:recurrence Tools For 3 Term Recurrence Relations] +[section:recurrence Tools For 3-Term Recurrence Relations] [h4 Synopsis] @@ -33,13 +33,13 @@ All of the tools in this header require a description of the recurrence relation: this takes the form of a functor that returns a tuple containing the 3 coefficients, specifically, given a recurrence relation: -[/\Large $$ a_nF_{n-1} + b_nF_n + c_nF_{n+1} = 0 $$][$../equations/three_term_recurrence.svg] +[/\Large $$ a_nF_{n-1} + b_nF_n + c_nF_{n+1} = 0 $$] [equation three_term_recurrence.svg] And a functor F then the expression: F(n); -Returns a tuple containing ['{ a[sub n], b[sub n], c[sub n] }]. +Returns a tuple containing [role serif_italic { a[sub n], b[sub n], c[sub n] }]. For example, the recurrence relation for the Bessel J and Y functions when written in this form is: @@ -154,7 +154,7 @@ only one value is provided, then the second is computed by using the recurrence Note that /incrementing/ this iterator moves the value returned successively to F[sub n-1], F[sub n-2] etc. -[endsect] +[endsect] [/section:recurrence Tools For 3-Term Recurrence Relations] [/ Copyright 2019 John Maddock. diff --git a/doc/internals/relative_error.qbk b/doc/internals/relative_error.qbk index b5739af80..83976fba5 100644 --- a/doc/internals/relative_error.qbk +++ b/doc/internals/relative_error.qbk @@ -8,7 +8,7 @@ [important The header `boost/math/tools/test.hpp` is located under `libs/math/include_private` -and is not installed to the usual locations by default, you will need to add `libs/math/include_private` +and is NOT installed to the usual locations by default; you will need to add `libs/math/include_private` to your compiler's include path in order to use this header.] template @@ -82,10 +82,10 @@ you use Boost.Test). This is mainly a debugging/development aid [h4 Example] -Suppose we want to test the tgamma and lgamma functions, we can create a -two dimensional matrix of test data, each row is one test case, and contains -three elements: the input value, and the expected results for the tgamma and -lgamma functions respectively. +Suppose we want to test the `tgamma` and `lgamma` functions, we can create a +two-dimensional matrix of test data, each row is one test case, and contains +three elements: the input value, and the expected results for the `tgamma` and +`lgamma` functions respectively. static const boost::array, NumberOfTests> factorials = { @@ -129,7 +129,7 @@ Now we can invoke the test function to test tgamma: // etc ... // -[endsect][/section:error_test Relative Error and Testing] +[endsect] [/section:error_test Relative Error and Testing] [/ Copyright 2006 John Maddock and Paul A. Bristow. diff --git a/doc/internals/series.qbk b/doc/internals/series.qbk index 23905c66f..e9e6253cf 100644 --- a/doc/internals/series.qbk +++ b/doc/internals/series.qbk @@ -48,7 +48,7 @@ the series being summed. The second argument is the precision required, summation will stop when the next term is less than -/tolerance/ times the result. The deprecated versions of sum_series +/tolerance/ times the result. The deprecated versions of `sum_series` take an integer number of bits here - internally they just convert this to a tolerance and forward the call. @@ -108,7 +108,7 @@ definition for epsilon, and within the functor, mixed complex and integer arithm Of course with a few traits classes and a bit of meta-programming we could fold these two implementations into one, but that's beyond the scope of these examples. -[endsect][/section Series Evaluation] +[endsect] [/section Series Evaluation] [/ Copyright 2006 John Maddock and Paul A. Bristow. diff --git a/doc/internals/test_data.qbk b/doc/internals/test_data.qbk index 48461635f..99fa881a1 100644 --- a/doc/internals/test_data.qbk +++ b/doc/internals/test_data.qbk @@ -394,8 +394,7 @@ This time there's no need to plot a graph, the first few rows are: 4.0096, 2.39712, 21 5.0095, 0.233263, 16 -So it's pretty clear that this fraction shouldn't be used for small values -of a and z. +So it's pretty clear that this fraction shouldn't be used for small values of /a/ and /z/. [h4 reference] @@ -436,7 +435,7 @@ before passing it to the function under test, usually the functor will then return both the transformed input and the result in a tuple, so there's no need for the original pseudo-parameter to be included in program output. -[endsect][/section:test_data Graphing, Profiling, and Generating Test Data for Special Functions] +[endsect] [/section:test_data Graphing, Profiling, and Generating Test Data for Special Functions] [/ Copyright 2006 John Maddock and Paul A. Bristow. diff --git a/doc/interpolators/barycentric_rational_interpolation.qbk b/doc/interpolators/barycentric_rational_interpolation.qbk index 14f5213a4..217baca6a 100644 --- a/doc/interpolators/barycentric_rational_interpolation.qbk +++ b/doc/interpolators/barycentric_rational_interpolation.qbk @@ -43,7 +43,7 @@ namespace boost{ namespace math{ Barycentric rational interpolation is a high-accuracy interpolation method for non-uniformly spaced samples. It requires [bigo](/N/) time for construction, and [bigo](/N/) time for each evaluation. Linear time evaluation is not optimal; for instance the cubic B-spline can be evaluated in constant time. -However, using the cubic B-spline requires uniformly spaced samples, which are not always available. +However, using the cubic B-spline requires uniformly-spaced samples, which are not always available. Use of the class requires a vector of independent variables `x[0]`, `x[1]`, .... `x[n-1]` where `x[i+1] > x[i]`, and a vector of dependent variables `y[0]`, `y[1]`, ... , `y[n-1]`. @@ -80,13 +80,18 @@ Be aware that once you return your data, the interpolant is *dead*. Although this algorithm is robust, it can surprise you. The main way this occurs is if the sample spacing at the endpoints is much larger than the spacing in the center. -This is to be expected; all interpolants perform better in the opposite regime, where samples are clustered at the endpoints and somewhat uniformly spaced throughout the center. +This is to be expected; all interpolants perform better in the opposite regime, +where samples are clustered at the endpoints and somewhat uniformly spaced throughout the center. + +A desirable property of any interpolator /f/ is that for all +/x/[sub min] [le] /x/ [le] /x/[sub max], also /y/[sub min] [le] /f/(/x/) [le] /y/[sub max]. -A desirable property of any interpolator /f/ is that for all /x/[sub min] \u2264 /x/ \u2264 /x/[sub max], /y/[sub min] \u2264 /f/(/x/) \u2264 /y/[sub max]. /This property does not hold for the barycentric rational interpolator./ -However, unless you deliberately try to antagonize this interpolator (by, for instance, placing the final value far from all the rest), you will probably not fall victim to this problem. +However, unless you deliberately try to antagonize this interpolator (by, for instance, placing the final value far from all the rest), +you will probably not fall victim to this problem. -The reference used for implementation of this algorithm is [@https://web.archive.org/save/_embed/http://www.mn.uio.no/math/english/people/aca/michaelf/papers/rational.pdf Barycentric rational interpolation with no poles and a high rate of interpolation], and the evaluation of the derivative is given by [@http://www.ams.org/journals/mcom/1986-47-175/S0025-5718-1986-0842136-8/S0025-5718-1986-0842136-8.pdf Some New Aspects of Rational Interpolation]. +The reference used for implementation of this algorithm is +[@https://web.archive.org/save/_embed/http://www.mn.uio.no/math/english/people/aca/michaelf/papers/rational.pdf Barycentric rational interpolation with no poles and a high rate of interpolation], and the evaluation of the derivative is given by [@http://www.ams.org/journals/mcom/1986-47-175/S0025-5718-1986-0842136-8/S0025-5718-1986-0842136-8.pdf Some New Aspects of Rational Interpolation]. [heading Examples] @@ -99,5 +104,4 @@ The reference used for implementation of this algorithm is [@https://web.archive [barycentric_rational_example2_out] - [endsect] [/section:barycentric Barycentric Rational Interpolation] diff --git a/doc/interpolators/cardinal_quadratic_b_spline.qbk b/doc/interpolators/cardinal_quadratic_b_spline.qbk index 219e6d208..c1162b584 100644 --- a/doc/interpolators/cardinal_quadratic_b_spline.qbk +++ b/doc/interpolators/cardinal_quadratic_b_spline.qbk @@ -18,7 +18,7 @@ LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) class cardinal_quadratic_b_spline { public: - // If you don't know the value of the derivative at the endpoints, leave them as nans and the routine will estimate them. + // If you don't know the value of the derivative at the endpoints, leave them as NaNs and the routine will estimate them. // y[0] = y(a), y[n - 1] = y(b), step_size = (b - a)/(n -1). cardinal_quadratic_b_spline(const Real* const y, size_t n, @@ -48,9 +48,9 @@ rather than three times as is done with the cubic B-spline. Since the basis functions are less smooth than the cubic B-spline, you will nearly always wish to use the cubic B-spline interpolator rather than this. However, this interpolator is occasionally useful for approximating functions of reduced smoothness, -as hence finds a uses internally in the Boost.Math library. +as hence finds use internally in the Boost.Math library. -It is reasonable to test this interpolator against the cubic b-spline interpolator when you are approximating functions which are two or three times continuously differentiable, but not three or four times differentiable. +It is reasonable to test this interpolator against the cubic b-spline interpolator when you are approximating functions +which are two or three times continuously differentiable, but not three or four times differentiable. -[endsect] -[/section:cardinal_quadratic_b] +[endsect] [/section:cardinal_quadratic_b] diff --git a/doc/interpolators/catmull_rom.qbk b/doc/interpolators/catmull_rom.qbk index 90f377af9..5208c303b 100644 --- a/doc/interpolators/catmull_rom.qbk +++ b/doc/interpolators/catmull_rom.qbk @@ -45,10 +45,10 @@ Catmull-Rom splines enjoy the following properties: * Local support of the basis functions: This gives stability and fast evaluation. * /C/[super 2]-smoothness * Interpolation of control points-this means the curve passes through the control points. -Many curves (such as Bezier) are /approximating/-they do not pass through their control points. +Many curves (such as B[eacute]zier) are /approximating/ - they do not pass through their control points. This makes them more difficult to use than interpolating splines. -The `catmull_rom` class provided by Boost creates a cubic Catmull-Rom spline from an array of points in any dimension. +The `catmull_rom` class provided by Boost.Math creates a cubic Catmull-Rom spline from an array of points in any dimension. Since there are numerous ways to represent a point in /n/-dimensional space, the class attempts to be flexible by templating on the point type. The requirements on the point type are discussing in more detail below, but roughly, it must have a dereference operator defined (e.g., `p[0]` is not a syntax error), @@ -215,7 +215,7 @@ These conditions are satisfied by both `std::array` and `std::vector`, but it ma The Catmull-Rom interpolator requires memory for three more points than is provided by the user. This causes the class to call a `resize()` on the input vector. If `v.capacity() >= v.size() + 3`, then no problems arise; there are no reallocs, and in practice this condition is almost always satisfied. -However, if `v.capacity() < v.size() + 3`, the realloc causes a performance penalty of roughly 20%. +However, if `v.capacity() < v.size() + 3`, the `realloc` causes a performance penalty of roughly 20%. [heading Generic Containers] @@ -246,5 +246,4 @@ For example, here is how to store the points in a Boost.uBLAS vector: * Cem Yuksel, Scott Schaefer, and John Keyser, ['Parameterization and applications of Catmull–Rom curves], Computer-Aided Design 43 (2011) 747–755. * Phillip J. Barry and Ronald N. Goldman, ['A Recursive Evaluation Algorithm for a Class of Catmull-Rom Splines], Computer Graphics, Volume 22, Number 4, August 1988 -[endsect] -[/section:catmull_rom Catmull-Rom Splines] +[endsect] [/section:catmull_rom Catmull-Rom Splines] diff --git a/doc/interpolators/cubic_b_spline.qbk b/doc/interpolators/cubic_b_spline.qbk index de1475ff8..759fdfd95 100644 --- a/doc/interpolators/cubic_b_spline.qbk +++ b/doc/interpolators/cubic_b_spline.qbk @@ -43,7 +43,8 @@ This is to be contrasted to traditional cubic spline interpolation which is ill- There are many use cases for interpolating a function at equally spaced points. One particularly important example is solving ODEs whose coefficients depend on data determined from experiment or numerical simulation. -Since most ODE steppers are adaptive, they must be able to sample the coefficients at arbitrary points; not just at the points we know the values of our function. +Since most ODE steppers are adaptive, they must be able to sample the coefficients at arbitrary points; +not just at the points we know the values of our function. The first two arguments to the constructor are either: @@ -52,8 +53,8 @@ The first two arguments to the constructor are either: These are then followed by: -* The start of the functions domain -* The step size +* The start of the functions domain, +* The step size. Optionally, you may provide two additional arguments to the constructor, namely the derivative of the function at the left endpoint, and the derivative at the right endpoint. If you do not provide these arguments, they will be estimated using one-sided finite-difference formulas. @@ -64,11 +65,11 @@ An example of a valid call to the constructor is double h = 0.01; boost::math::cubic_b_spline spline(f.begin(), f.end(), t0, h); -The endpoints are estimated using a one-sided finite-difference formula. If you know the derivative at the endpoint, you may pass it to the constructor via +The endpoints are estimated using a one-sided finite-difference formula. +If you know the derivative at the endpoint, you may pass it to the constructor via boost::math::cubic_b_spline spline(f.begin(), f.end(), t0, h, a_prime, b_prime); - To evaluate the interpolant at a point, we simply use double y = spline(x); @@ -77,7 +78,8 @@ and to evaluate the derivative of the interpolant we use double yp = spline.prime(x); -Be aware that the accuracy guarantees on the derivative of the spline are an order lower than the guarantees on the original function, see [@http://www.springer.com/us/book/9780387984087 Numerical Analysis, Graduate Texts in Mathematics, 181, Rainer Kress] for details. +Be aware that the accuracy guarantees on the derivative of the spline are an order lower than the guarantees on the original function, +see [@http://www.springer.com/us/book/9780387984087 Numerical Analysis, Graduate Texts in Mathematics, 181, Rainer Kress] for details. Finally, note that this is an interpolator, not an extrapolator. Therefore, you should strenuously avoid evaluating the spline outside the endpoints. @@ -99,7 +101,7 @@ Let /h/ be the stepsize. If /f/ is four-times continuously differentiable, then [heading Testing] -Since the interpolant obeys ['s(x[sub j]) = f(x[sub j])] at all interpolation points, +Since the interpolant obeys [role serif_italic s(x[sub j]) = f(x[sub j])] at all interpolation points, the tests generate random data and evaluate the interpolant at the interpolation points, validating that equality with the data holds. diff --git a/doc/interpolators/vector_barycentric_rational.qbk b/doc/interpolators/vector_barycentric_rational.qbk index 335ecf372..a0bb4d717 100644 --- a/doc/interpolators/vector_barycentric_rational.qbk +++ b/doc/interpolators/vector_barycentric_rational.qbk @@ -44,7 +44,7 @@ public: The /n/ dimensional vector-valued barycentric rational interpolator is exactly the same as /n/ scalar-valued barycentric rational interpolators. This is provided primarily for convenience and a slight improvement in efficiency over using /n/ different rational interpolators and combining their results. -Use of the class requires a `Point`-type which has size known at compile time. +Use of the class requires a `Point`-type which has size known at compile-time. These requirements are satisfied by (for example) `Eigen::Vector2d`s and `std::array` classes. The call to the constructor computes the weights: diff --git a/doc/interpolators/whittaker_shannon.qbk b/doc/interpolators/whittaker_shannon.qbk index 701651f53..6ae586cf6 100644 --- a/doc/interpolators/whittaker_shannon.qbk +++ b/doc/interpolators/whittaker_shannon.qbk @@ -34,10 +34,12 @@ LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) [heading Whittaker-Shannon Interpolation] The Whittaker-Shannon interpolator takes equispaced data and interpolates between them via a sum of sinc functions. -This interpolation is stable and infinitely smooth, but has linear complexity in the data, making it slow relative to compactly-supported b-splines. -In addition, we cannot pass an infinite amount of data into the class, and must truncate the (perhaps) infinite sinc series to a finite number of terms. -Since the sinc function has slow 1/x decay, the truncation of the series can incur large error. -Hence this interpolator works best when operating on samples of compactly supported functions. +This interpolation is stable and infinitely smooth, but has linear complexity in the data, +making it slow relative to compactly-supported b-splines. +In addition, we cannot pass an infinite amount of data into the class, +and must truncate the (perhaps) infinite sinc series to a finite number of terms. +Since the sinc function has slow /1/x/ decay, the truncation of the series can incur large error. +Hence this interpolator works best when operating on samples of compactly-supported functions. Here is an example of interpolating a smooth "bump function": auto bump = [](double x) { if (std::abs(x) >= 1) { return 0.0; } return std::exp(-1.0/(1.0-x*x)); }; @@ -52,7 +54,6 @@ Here is an example of interpolating a smooth "bump function": v[i] = bump(t); } - auto ws = whittaker_shannon(std::move(v), t0, h); double y = ws(0.3); @@ -64,6 +65,6 @@ The derivative of the interpolant can also be evaluated, but the accuracy is not [heading Complexity and Performance] The call to the constructor requires [bigo](1) operations, simply moving data into the class. -Each call the the interpolant is [bigo](/n/), where /n/ is the number of points to interpolate. +Each call to the interpolant is [bigo](/n/), where /n/ is the number of points to interpolate. [endsect] [/section:whittaker_shannon] diff --git a/doc/math.css b/doc/math.css index 94f4d6bdf..8e5e67eb9 100644 --- a/doc/math.css +++ b/doc/math.css @@ -1,16 +1,21 @@ @import url('../../../../doc/src/boostbook.css'); +/* Contains the basic settings for BoostBook and used by Quickbook to docbook conversion. */ /*============================================================================= -Copyright (c) 2004 Joel de Guzman +Copyright (c) 2004 Joel de Guzman http://spirit.sourceforge.net/ Copyright (c) 2014 John Maddock -http://spirit.sourceforge.net/ - Copyright 2013 Niall Douglas additions for colors and alignment. Copyright 2013 Paul A. Bristow additions for more colors and alignments. +Copyright 2019 Paul A. Bristow additions for more control of font etc. Distributed under the Boost Software License, Version 1.0. (See accompany- ing file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) -=============================================================================*/ + + This Cascading Style Sheet is used to override and add to the standard Boost + CSS BoostBook for a particular library, for example Boost.Math and Boost.Multiprecision. + + Visual Studio is recommended for editing this file + because it checks syntax, does layout and provides help on options. /*============================================================================= Program listings @@ -34,9 +39,9 @@ Program listings { font-size: 10pt; display: block; - margin: 1pc 4% 0pc 4%; + /* was margin: 1pc 4% 0pc 4%; */ + margin: 1pc 2% 0pc 2%; padding: 0.5pc 0.5pc 0.5pc 0.5pc; - } @media screen { @@ -128,6 +133,21 @@ span.gold { color: gold; } span.silver { color: silver; } /* lighter gray */ span.gray { color: #808080; } /* light gray */ +/* role for inline Unicode mathematical equations, + making font an italic (as is conventional for equations) + and a serif version of font (to match those generated using .mml to SVG or PNG) + and a little bigger because the serif font appears smaller than the default sans serif fonts. + Used, for example: [role serif_italic This is in serif font and italic]. + Used in turn by template for inline expressions to match equations as SVG or PNG images. + +*/ +span.serif_italic { + font-family: serif; + font-style: italic; + font-size: 115%; + font-stretch: expanded; +} + /* Custom indent of paragraphs to make equations look nicer. https://www.w3schools.com/tags/tag_blockquote.asp says "Most browsers will display the
    element with left and right margin 40px values: " diff --git a/doc/math.qbk b/doc/math.qbk index 05efd4315..9a3f9635b 100644 --- a/doc/math.qbk +++ b/doc/math.qbk @@ -33,7 +33,7 @@ [/Used thus [equation ellint6] ] -[/insert Graph as an image, previous generated with an external tool like SVG_plot.] +[/insert Graph as an SVG or PNG image, previous generated with an external tool like SVG_plot.] [template graph[name] [: @@ -48,6 +48,14 @@ ] ] +[/insert Indented one-line expression italic and serif font probably using Unicode symbols for Greek and symbols.] +[/Example: [expression [sub 1]F[sub 0](a, z) = (1-z)[super -a]]] +[template expression[equation] +[: +[role serif_italic [equation]] +] +] + [import ../../../tools/auto_index/include/auto_index_helpers.qbk] [/ Must be first included file!] @@ -465,7 +473,9 @@ and use the function's name as the link text.] [def __rational_function [@https://en.wikipedia.org/wiki/Rational_function rational function]] [def __remez [@http://en.wikipedia.org/wiki/Remez_algorithm Remez algorithm]] [def __ULP [@https://en.wikipedia.org/wiki/Unit_in_the_last_place Unit in the Last Place]] - +[def __github [@github.com GitHub]] +[def __github_boost [@https://github.com/boostorg Boost GitHub]] +[def __github_math [@https://github.com/boostorg/math Boost.Math GitHub]] [/ Some composite templates] [template super[x]''''''[x]''''''] [template sub[x]''''''[x]''''''] diff --git a/doc/octonion/math-octonion.qbk b/doc/octonion/math-octonion.qbk index 40a238b9c..46d59562f 100644 --- a/doc/octonion/math-octonion.qbk +++ b/doc/octonion/math-octonion.qbk @@ -1,4 +1,4 @@ - +[/macro definitions specific to octonions] [def __R ['[*R]]] [def __C ['[*C]]] [def __H ['[*H]]] @@ -33,8 +33,7 @@ A way of remembering things is by using the following multiplication table: [$../octonion/graphics/octonion_blurb17.jpeg] Octonions (and their kin) are described in far more details in this other -[@../quaternion/TQE.pdf document] -(with [@../quaternion/TQE_EA.pdf errata and addenda]). +[@../quaternion/TQE.pdf document] (with [@../quaternion/TQE_EA.pdf errata and addenda]). Some traditional constructs, such as the exponential, carry over without too much change into the realms of octonions, but other, such as taking a square root, @@ -42,7 +41,7 @@ do not (the fact that the exponential has a closed form is a result of the author, but the fact that the exponential exists at all for octonions is known since quite a long time ago). -[endsect] +[endsect] [/section:oct_overview Overview] [section:oct_header Header File] @@ -148,7 +147,7 @@ The interface and implementation are both supplied by the header file } } // namespaces -[endsect] +[endsect] [/section:oct_header Header File] [section:octonion Template Class octonion] @@ -220,7 +219,7 @@ The interface and implementation are both supplied by the header file } } // namespaces -[endsect] +[endsect] [/section:octonion Template Class octonion] [section:oct_specialization Octonion Specializations] @@ -424,7 +423,7 @@ The interface and implementation are both supplied by the header file } } // namespaces -[endsect] +[endsect] [/section:oct_specialization Octonion Specializations] [section:oct_typedefs Octonion Member Typedefs] @@ -446,9 +445,9 @@ Long double specialization version: typedef long double value_type; -These provide easy acces to the type the template is built upon. +These provide easy access to the type the template is built upon. -[endsect] +[endsect] [/section:oct_typedefs Octonion Member Typedefs] [section:oct_mem_fun Octonion Member Functions] @@ -622,7 +621,7 @@ insure exception safety. For the unspecialized form, the base type's assignment operators must not throw. As for the multiplication, remember to group any two factors using parenthesis. -[endsect] +[endsect] [/section:oct_mem_fun Octonion Member Functions] [section:oct_non_mem Octonion Non-Member Operators] @@ -755,7 +754,7 @@ Inserts the octonion `o` onto the stream `os` as if it were implemented as follo return os << s.str(); } -[endsect] +[endsect] [/section:oct_non_mem Octonion Non-Member Operators] [section:oct_value_ops Octonion Value Operations] @@ -802,7 +801,7 @@ be confusing, as most people associate it with the Euclidian norm objects known as) octonions, the Euclidian norm (also known as magnitude) is the square root of the Cayley norm. -[endsect] +[endsect] [/section:oct_value_ops Octonion Value Operations] [section:oct_create Octonion Creation Functions] @@ -838,7 +837,7 @@ In this version of our implementation of octonions, there is no analogue of the complex value operation arg as the situation is somewhat more complicated. -[endsect] +[endsect] [/section:oct_create Octonion Creation Functions] [section:oct_trans Octonions Transcendentals] @@ -939,7 +938,7 @@ you may in addition define the symbol BOOST_INTERACTIVE_TEST_INPUT_ITERATOR to interactively test the input operator with input of your choice from the standard input (instead of hard-coding it in the test). -[endsect] +[endsect] [/section:oct_trans Octonions Transcendentals] [section:acknowledgements Acknowledgements] @@ -949,7 +948,7 @@ Jens Maurer has helped with portability and standard adherence, and was the Review Manager for this library. More acknowledgements in the History section. Thank you to all who contributed to the discussion about this library. -[endsect] +[endsect] [/section:acknowledgements Acknowledgements] [section:oct_history History] @@ -970,15 +969,15 @@ History section. Thank you to all who contributed to the discussion about this l * 1.1.0 - 23/05/2000: changed sinc into sinc_pi; added sin, cos, sinh, cosh. * 1.0.0 - 10/08/1999: first public version. -[endsect] +[endsect] [/section:oct_history History] [section:oct_todo To Do] * Improve testing. -* Rewrite input operatore using Spirit (creates a dependency). +* Rewrite input operators using Spirit (creates a dependency). * Put in place an Expression Template mechanism (perhaps borrowing from uBlas). -[endsect] +[endsect] [/section:oct_todo To Do] [endmathpart] diff --git a/doc/overview/building.qbk b/doc/overview/building.qbk index 05618d36a..30a121733 100644 --- a/doc/overview/building.qbk +++ b/doc/overview/building.qbk @@ -16,7 +16,7 @@ the TR1 components as opposed to the header only ones. The ['only] time you ['need] to build the library is if you want to use the `extern "C"` functions declared in ``. To build this -using Boost.Build, from a commandline boost-root directory issue a command like: +using Boost.Build, from a command-line boost-root directory issue a command like: bjam toolset=gcc --with-math install @@ -51,7 +51,7 @@ BOOST_MATH_NO_LIB or BOOST_ALL_NO_LIB at project level Optionally the sources in `libs/math/src/tr1` have support for using `libs/math/src/tr1/pch.hpp` as a precompiled header ['if your compiler supports precompiled headers.] Note that normally -this header is a do-nothing include: to activate the header so that +this header is a do-nothing `#include` to activate the header so that it #includes everything required by all the sources you will need to define BOOST_BUILD_PCH_ENABLED on the command line, both when building the pre-compiled header and when building the sources. Boost.Build @@ -77,26 +77,28 @@ You will also need to build and link to the Boost.Regex library for many of the tests: this can built from the command line by following the [@http://www.boost.org/doc/libs/release/more/getting_started/index.html -getting started guide], using a command such as: +getting started guide], using commands such as: bjam toolset=gcc --with-regex install - +or + bjam toolset=clang --with-regex install +or + bjam toolset=gcc,clang --with-regex install or - bjam toolset=msvc --with-regex --build-type=complete stage depending on whether you are on Linux or Windows. Many of the tests have optional precompiled header support using the header `libs/math/test/pch.hpp`. -Note that normally -this header is a do-nothing include: to activate the header so that -it #includes everything required by all the sources you will need to +Note that normally this header is a do-nothing include: +to activate the header so that +it `#include`s everything required by all the sources you will need to define BOOST_BUILD_PCH_ENABLED on the command line, both when building the pre-compiled header and when building the sources. Boost.Build will do this automatically when appropriate. -[endsect] +[endsect] [/section:building If and How to Build a Boost.Math Library, and its Examples and Tests] [/ building.qbk Copyright 2006, 2007, 2008, 2010 John Maddock and Paul A. Bristow. diff --git a/doc/overview/common_overviews.qbk b/doc/overview/common_overviews.qbk index 857171bbf..c69fa6697 100644 --- a/doc/overview/common_overviews.qbk +++ b/doc/overview/common_overviews.qbk @@ -1,4 +1,4 @@ - +[/These are templates used in many other Quickbook files.] [template policy_overview[] @@ -48,7 +48,8 @@ by passing a specific policy to a special function], or to a [link math_toolkit.pol_tutorial.ad_hoc_dist_policies statistical distribution]. -] +] [/template policy_overview] + [template performance_overview[] @@ -85,7 +86,7 @@ information on the performance of this library, what you can do to fine tune it, and how this library compares to some other open source alternatives. -] +] [/template performance_overview] [template compilers_overview[] @@ -220,7 +221,7 @@ for some functions). You will need to cast an eye over the output from the failing tests and make a judgement as to whether the error rates are acceptable or not. -] +] [/template compilers_overview] [/ common_overviews.qbk Copyright 2007, 2012, 2014 John Maddock and Paul A. Bristow. diff --git a/doc/overview/contact_info.qbk b/doc/overview/contact_info.qbk index ba8c40e18..2cd4dd8fe 100644 --- a/doc/overview/contact_info.qbk +++ b/doc/overview/contact_info.qbk @@ -1,20 +1,25 @@ - [section:contact Contact Info and Support] -The main support for this library is via the Boost mailing lists: +The main place to see and raise issues is now at [@ @https://github.com/boostorg/math/ GIThub]. +Currently open bug reports can be viewed [@https://github.com/boostorg/math/issues here]. + +All old bug reports, including closed ones, can be viewed on Trac (now read-only) +[@https://svn.boost.org/trac/boost/query?status=assigned&status=closed&status=new&status=reopened&component=math&col=id&col=summary&col=status&col=type&col=milestone&col=component&order=priority here] +and more recent issues on GIThub [@https://github.com/boostorg/math/issues?utf8=%E2%9C%93&q=is%3Aissue here]. + +The other places for discussion about this library are via the Boost mailing lists: * Use the [@http://www.boost.org/more/mailing_lists.htm#users boost-user list] for general support questions. * Use the [@http://www.boost.org/more/mailing_lists.htm#main boost-developer list] -for discussion about implementation -and or submission of extensions. +for discussion about implementation and or submission of extensions. You can also find JM at john - at - johnmaddock.co.uk and PAB at pbristow - at - hetp.u-net.com. -[endsect] +[endsect] [/section:contact Contact Info and Support] [/ Copyright 2006 John Maddock and Paul A. Bristow. diff --git a/doc/overview/error_handling.qbk b/doc/overview/error_handling.qbk index 07eeffa36..14bc1627e 100644 --- a/doc/overview/error_handling.qbk +++ b/doc/overview/error_handling.qbk @@ -10,7 +10,7 @@ Handling of errors by this library is split into two orthogonal parts: * What should be done when the error is raised? [warning The default error actions are to throw an exception with an informative error message. -If you do not try to catch the exception, you will not see the message!] +[role red If you do not try to catch the exception, you will not see the message!]] The kinds of errors that can be raised are: @@ -199,21 +199,17 @@ This behaviour is chosen to assist compatibility with the behaviour of and with the [@http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2005/n1836.pdf Draft Technical Report on C++ Library Extensions, 2005-06-24, section 5.2.1, paragraph 6]: -[:['"Each of the functions declared above shall return a NaN (Not a Number) +[:['\"Each of the functions declared above shall return a NaN (Not a Number) if any argument value is a NaN, but it shall not report a domain error. Otherwise, each of the functions declared above shall report a domain error -for just those argument values for which:]] - -[:['"the function description's Returns clause explicitly specifies a domain, and those arguments fall outside the specified domain; or] - -['"the corresponding mathematical function value has a non-zero imaginary component; or] - -['"the corresponding mathematical function is not mathematically defined.]] - -[:['"Note 2: A mathematical function is mathematically defined +for just those argument values for which:[br] +the function description's Returns clause explicitly specifies a domain, and those arguments fall outside the specified domain; or [br] +the corresponding mathematical function value has a non-zero imaginary component; or [br] +the corresponding mathematical function is not mathematically defined. [br] +Note 2: A mathematical function is mathematically defined for a given set of argument values if it is explicitly defined for that set of argument values or -if its limiting value exists and does not depend on the direction of approach."]] +if its limiting value exists and does not depend on the direction of approach.\"]] Note that in order to support information-rich error messages when throwing exceptions, `Message` must contain @@ -392,7 +388,7 @@ Is used to perform these conversions, and will call the error handlers listed above on [link math_toolkit.error_handling.overflow_error overflow], [link math_toolkit.error_handling.underflow_error underflow] or [link math_toolkit.error_handling.denorm_error denormalisation]. -[endsect][/section:error_handling Error Handling] +[endsect] [/section:error_handling Error Handling] [/ Copyright 2006 - 2012 John Maddock and Paul A. Bristow. diff --git a/doc/overview/overview.qbk b/doc/overview/overview.qbk index 25c262df4..33a37c154 100644 --- a/doc/overview/overview.qbk +++ b/doc/overview/overview.qbk @@ -5,16 +5,16 @@ This library is divided into several interconnected parts: [h4 Floating Point Utilities] Utility functions for dealing with floating-point arithmetic, includes functions -for floating point classification (fpclassify, isnan, isinf etc), sign manipulation, +for floating point classification (`fpclassify`, `isnan`, `isinf` etc), sign manipulation, rounding, comparison, and computing the distance between floating point numbers. [h4 Specific Width Floating-Point Types] -A set of typedefs similar to those provided by `` but for floating-point types. +A set of `typedef`s similar to those provided by `` but for floating-point types. [h4 Mathematical Constants] -A wide range of constants ranging from various multiples of [pi], fractions, through to euler's constant etc. +A wide range of high-precision constants ranging from various multiples of [pi], fractions, through to Euler's constant etc. These are of course usable from template code, or as non-templates with a simplified interface if that is more appropriate. @@ -81,17 +81,19 @@ Function interpolation via Barycentric or cubic B_spline approximations. Smoothi [h4 Numerical Integration (Quadrature) and Differentiation] -A reasonably comprehensive set of routines for integration (trapezoidal, Gauss-Legendre, Gauss-Kronrod and double-exponential) and differentiation. +A reasonably comprehensive set of routines for integration (trapezoidal, Gauss-Legendre, Gauss-Kronrod and double-exponential) +and differentiation. (See also automatic differentiation). The integration routines are all usable for functions returning complex results - and as a result for contour integrals as well. [h4 Quaternions and Octonions] -Quaternion and Octonians as class templates similar to `std::complex`. +Quaternions and Octonians as class templates similar to `std::complex`. [h4 Automatic Differentiation] -Autodiff is a header-only C++ library that facilitates the automaticdifferentiation (forward mode) of mathematical functions of singleand multiple variables +Autodiff is a header-only C++ library that facilitates the automaticdifferentiation (forward mode) +of mathematical functions of single and multiple variables. [endsect] [/section:main_intro About the Math Toolkit] diff --git a/doc/overview/result_type_calc.qbk b/doc/overview/result_type_calc.qbk index da50d0eda..9a98027df 100644 --- a/doc/overview/result_type_calc.qbk +++ b/doc/overview/result_type_calc.qbk @@ -1,4 +1,3 @@ - [section:result_type Calculation of the Type of the Result] The functions in this library are all overloaded to accept @@ -58,7 +57,23 @@ returns a `float`, since the first argument is not a template argument and so doesn't effect the result: without this rule functions that take explicitly integer arguments could never return `float`. -And for user-defined types, all of the following return an `NTL::RR` result: +And for user-defined types, typically __multiprecision, + +All of the following return a `boost::multiprecision::cpp_bin_quad_float` result: + + cyl_bessel_j(0, boost::multiprecision::cpp_bin_quad_float(2)); + + cyl_bessel_j(boost::multiprecision::cpp_bin_quad_float(2), 3); + + cyl_bessel_j(boost::multiprecision::cpp_bin_quad_float(2), boost::multiprecision::cpp_bin_quad_float(3)); + +but rely on the parameters provided being exactly representable, avoiding loss of precision from construction from `double`. + +[tip All new projects should use Boost.Multiprecision.] + +During development of Boost.Math, __NTL was invaluable to create highly precise tables. + +All of the following return an `NTL::RR` result: cyl_bessel_j(0, NTL::RR(2)); @@ -70,12 +85,13 @@ In the last case, `quad_float` is convertible to `RR`, but not vice-versa, so the result will be an `NTL::RR`. Note that this assumes that you are using a [link math_toolkit.high_precision.use_ntl patched NTL library]. + These rules are chosen to be compatible with the behaviour of ['ISO/IEC 9899:1999 Programming languages - C] and with the [@http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2005/n1836.pdf Draft Technical Report on C++ Library Extensions, 2005-06-24, section 5.2.1, paragraph 5]. -[endsect] +[endsect] [/section:result_type Calculation of the Type of the Result] [/ Copyright 2006, 2012 John Maddock and Paul A. Bristow. diff --git a/doc/overview/roadmap.qbk b/doc/overview/roadmap.qbk index 833feb7d3..8d123ce50 100644 --- a/doc/overview/roadmap.qbk +++ b/doc/overview/roadmap.qbk @@ -1,23 +1,24 @@ [template history[] Currently open bug reports can be viewed -[@https://github.com/boostorg/math/issues here] on GIThub. +[@https://github.com/boostorg/math/issues here] on GitHub. All old bug reports including closed ones can be viewed on Trac -[@https://svn.boost.org/trac/boost/query?status=assigned&status=closed&status=new&status=reopened&component=math&col=id&col=summary&col=status&col=type&col=milestone&col=component&order=priority here] -and more recent issues on GIThub [@https://github.com/boostorg/math/issues?utf8=%E2%9C%93&q=is%3Aissue here]. +[@https://svn.boost.org/trac/boost/query?status=assigned&status=closed&status=new&status=reopened&component=math&col=id&col=summary&col=status&col=type&col=milestone&col=component&order=priority here]. + +Recent issues on GitHub [@https://github.com/boostorg/math/issues?utf8=%E2%9C%93&q=is%3Aissue here]. [h4 Math-2.10.0 (Boost-1.71)] -* Catmull-Rom interpolator now works in C++11 -* Cardinal quadratic B-spline interpolation -* Domain of elliptic integrals extended -* sin_pi and cos_pi performance improvements -* Forward-mode automatic differentiation -* Vector valued barycentric rational interpolation -* Ooura's method for evaluation of Fourier integrals -* Multiple compatibility issues with Multiprecision fixed -* Lambert-W fixed on a rare architecture +* Catmull-Rom interpolator now works in C++11. +* Cardinal quadratic B-spline interpolation. +* Domain of elliptic integrals extended. +* sin_pi and cos_pi performance improvements. +* Forward-mode automatic differentiation. +* Vector valued barycentric rational interpolation. +* Ooura's method for evaluation of Fourier integrals. +* Multiple compatibility issues with Multiprecision fixed. +* Lambert-W fixed on a rare architecture. [h4 Math-2.9.0 (Boost-1.70)] diff --git a/doc/overview/structure.qbk b/doc/overview/structure.qbk index 57702ced9..0c2f6e3f6 100644 --- a/doc/overview/structure.qbk +++ b/doc/overview/structure.qbk @@ -85,7 +85,7 @@ provides additional background or rationale.] Failure to follow suggestions in these blocks will probably result in undesired behavior. Read all of these you find.] -[warning Failure to heed this will lead to incorrect, and very likely undesired, results.] +[warning Failure to heed this will lead to incorrect, and very likely undesired, results!] [endsect] [/section:hints Other Hints and tips] @@ -102,7 +102,7 @@ Read all of these you find.] reserved for this type where possible), a few will use `float` or `long double`, but it is also possible to use higher precision types like __NTL_RR, __GMP, __MPFR, __multiprecision like cpp_bin_float_50 -that conform to the requirements specified by real_concept.]] +that conform to the requirements specified by `real_concept`.]] [[\/constants\/] [Templated definition of some highly accurate math constants ([@https://github.com/boostorg/math/blob/develop/include/boost/math/constants/constants.hpp constants.hpp]).]] @@ -154,7 +154,7 @@ that conform to the requirements specified by real_concept.]] [section:namespaces Namespaces] -All math functions and distributions are in `namespace boost::math` +All math functions and distributions are in `namespace boost::math`. So, for example, the Students-t distribution template in `namespace boost::math` is @@ -176,11 +176,12 @@ Functions that may have more general use, like `digits` (significand), `max_value`, `min_value` and `epsilon` are in `boost::math::tools`. -__Policy and configuration information is in namespace `boost::math::policies`. +__Policy and configuration information is in `namespace` `boost::math::policies`. [tip Many code snippets assume implicit namespace(s), for example, `std::` or `boost::math`.] +[tip Start your work from a copy of the example source code; links usually provided.] [endsect] [/section:namespaces Namespaces] diff --git a/doc/overview/tr1.qbk b/doc/overview/tr1.qbk index 8e4ba4c10..2e1d7c50f 100644 --- a/doc/overview/tr1.qbk +++ b/doc/overview/tr1.qbk @@ -385,7 +385,6 @@ return type determined by the __arg_promotion_rules. long double hypergl(long double a, long double b, long double c, long double x); - ] [/ diff --git a/doc/performance/performance.qbk b/doc/performance/performance.qbk index f6ee5fd30..3eb4d1d6c 100644 --- a/doc/performance/performance.qbk +++ b/doc/performance/performance.qbk @@ -31,7 +31,7 @@ all the appropriate corner cases. They do not necessarily represent "typical" usage: whatever that may be! ] -[endsect] +[endsect] [/section:interp Interpreting these Results] [section:getting_best Getting the Best Performance from this Library: Compiler and Compiler Options] @@ -364,7 +364,7 @@ compiler. [endsect] [/section:perf_test_app The Performance Test Applications] -[endmathpart] +[endmathpart] [/mathpart perf Performance] [/ Copyright 2006 John Maddock and Paul A. Bristow. diff --git a/doc/policies/policy.qbk b/doc/policies/policy.qbk index 1389ea9c1..dc6852e6f 100644 --- a/doc/policies/policy.qbk +++ b/doc/policies/policy.qbk @@ -309,7 +309,7 @@ For example: at the head of the file - see __policy_macros. -[endsect][/section:assert_undefined Mathematically Undefined Function Policies] +[endsect] [/section:assert_undefined Mathematically Undefined Function Policies] [section:discrete_quant_ref Discrete Quantile Policies] @@ -422,7 +422,7 @@ For example: Results in `x = 27` (rounded from 27.3898) and `y = 68` (rounded from 68.1584). -[endsect][/section:discrete_quant_ref Discrete Quantile Policies] +[endsect] [/section:discrete_quant_ref Discrete Quantile Policies] [section:precision_pol Precision Policies] @@ -468,7 +468,7 @@ And for a quantile of a distribution to approximately 25-bit precision: [policy_ref_snip11] -[endsect][/section:precision_pol Precision Policies] +[endsect] [/section:precision_pol Precision Policies] [section:iteration_pol Iteration Limits Policies] @@ -638,7 +638,7 @@ or we could place these definitions *before* in a source .cpp file. -[endsect][/section:policy_defaults Changing the Policy Defaults] +[endsect] [/section:policy_defaults Changing the Policy Defaults] [section:namespace_pol Setting Polices at Namespace Scope] @@ -691,7 +691,7 @@ declare a typedef for each distribution like this: and so on. The name given to each typedef is the name of the distribution with the "_distribution" suffix removed. -[endsect][/section Changing the Policy Defaults] +[endsect] [/section Changing the Policy Defaults] [section:pol_ref_ref Policy Class Reference] @@ -882,12 +882,12 @@ implementation. In this way code bloat is reduced, since the actual implementation depends only on the policy types that they actually use. -[endsect][/section:pol_ref_ref Policy Class Reference] +[endsect] [/section:pol_ref_ref Policy Class Reference] -[endsect][/section:pol_ref Policy Reference] -[endmathpart][/section:policy Policies] +[endsect] [/section:pol_ref Policy Reference] +[endmathpart] [/section:policy Policies] -[/ qbk +[/ policy.qbk Copyright 2007, 2010 John Maddock and Paul A. Bristow. Distributed under the Boost Software License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at diff --git a/doc/policies/policy_tutorial.qbk b/doc/policies/policy_tutorial.qbk index 537734496..f44e9d6ee 100644 --- a/doc/policies/policy_tutorial.qbk +++ b/doc/policies/policy_tutorial.qbk @@ -1,4 +1,3 @@ - [section:pol_tutorial Policy Tutorial] [section:what_is_a_policy So Just What is a Policy Anyway?] @@ -233,7 +232,7 @@ Which outputs: [pre quantile is: 40] -[endsect][/section:ad_hoc_dist_policies Setting Policies for Distributions on an Ad Hoc Basis] +[endsect] [/section:ad_hoc_dist_policies Setting Policies for Distributions on an Ad Hoc Basis] [section:ad_hoc_sf_policies Changing the Policy on an Ad Hoc Basis for the Special Functions] @@ -521,7 +520,7 @@ to know that other rounding options are available: [policy_eg_10] -[endsect] +[endsect] [/section:understand_dis_quant Understanding Quantiles of Discrete Distributions] [endsect] [/section:pol_Tutorial Policy Tutorial] diff --git a/doc/quadrature/double_exponential.qbk b/doc/quadrature/double_exponential.qbk index fdd4a1088..cad1b98d0 100644 --- a/doc/quadrature/double_exponential.qbk +++ b/doc/quadrature/double_exponential.qbk @@ -79,9 +79,10 @@ LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) These three integration routines provide robust general purpose quadrature, each having a "native" range over which quadrature is performed. -For example, the `sinh_sinh` quadrature integrates over the entire real line, the `tanh_sinh` over (-1, 1), and the `exp_sinh` over (0, \u221E). +For example, the `sinh_sinh` quadrature integrates over the entire real line, the `tanh_sinh` over (-1, 1), +and the `exp_sinh` over (0, [infin]). The latter integrators also have auxilliary ranges which are handled via a change of variables on the function being integrated, -so that the `tanh_sinh` can handle integration over (a, b), and `exp_sinh` over (a, \u221E) and(-\u221E, b). +so that the `tanh_sinh` can handle integration over /(a, b)/, and `exp_sinh` over /(a, [infin]) and(-[infin], b)/. Like the other quadrature routines in Boost, these routines support both real and complex-valued integrands. @@ -100,7 +101,7 @@ the ranges supported by each method: [[sinh_sinh] [(-[infin],[infin])] [][]] ] -[endsect] +[endsect] [/section:de_overview Overview] [section:de_tanh_sinh tanh_sinh] @@ -126,12 +127,13 @@ the ranges supported by each method: }; -The tanh-sinh quadrature routine provided by boost is a rapidly convergent numerical integration scheme for holomorphic integrands. +The `tanh-sinh` quadrature routine provided by boost is a rapidly convergent numerical integration scheme for holomorphic integrands. By this we mean that the integrand is the restriction to the real line of a complex-differentiable function which is bounded on the interior of the unit disk /|z| < 1/, so that it lies within the so-called [@https://en.wikipedia.org/wiki/Hardy_space Hardy space]. -If your integrand obeys these conditions, it can be shown that tanh-sinh integration is optimal, +If your integrand obeys these conditions, it can be shown that `tanh-sinh` integration is optimal, in the sense that it requires the fewest function evaluations for a given accuracy of any quadrature algorithm for a random element from the Hardy space. -A basic example of how to use the tanh-sinh quadrature is shown below: + +A basic example of how to use the `tanh-sinh` quadrature is shown below: tanh_sinh integrator; auto f = [](double x) { return 5*x + 7; }; @@ -140,7 +142,7 @@ A basic example of how to use the tanh-sinh quadrature is shown below: // Integrate over (0,1.1) instead: Q = integrator.integrate(f, 0.0, 1.1); -The basic idea of tanh-sinh quadrature is that a variable transformation can cause the endpoint derivatives to decay rapidly. +The basic idea of `tanh-sinh` quadrature is that a variable transformation can cause the endpoint derivatives to decay rapidly. When the derivatives at the endpoints decay much faster than the Bernoulli numbers grow, the Euler-Maclaurin summation formula tells us that simple trapezoidal quadrature converges faster than any power of /h/. That means the number of correct digits of the result should roughly double with each new level of integration (halving of /h/), @@ -148,7 +150,6 @@ Hence the default termination condition for integration is usually set to the sq Most well-behaved integrals should converge to full machine precision with this termination condition, and in 6 or fewer levels at double precision, or 7 or fewer levels for quad precision. - One very nice property of tanh-sinh quadrature is that it can handle singularities at the endpoints of the integration domain. For instance, the following integrand, singular at both endpoints, can be efficiently evaluated to 100 binary digits: @@ -176,7 +177,6 @@ Like the trapezoidal quadrature, the tanh-sinh quadrature produces an estimate o This is to establish a scale against which to measure the tolerance, and to provide an estimate of the condition number of the summation. This can be queried as follows: - tanh_sinh integrator; auto f = [](double x) { return 5*x + 7; }; double termination = std::sqrt(std::numeric_limits::epsilon()); @@ -210,12 +210,12 @@ to the square root of epsilon, and all tests were conducted in double precision: argument ['xc] is `1-x` in this case, and we use 1-x[super 2] == (1-x)(1+x) to calculate 1-x[super 2] with greater accuracy.]] ] -Although the tanh-sinh quadrature can compute integral over infinite domains by variable transformations, these transformations can create a very poorly behaved integrand. +Although the `tanh-sinh` quadrature can compute integral over infinite domains by variable transformations, these transformations can create a very poorly behaved integrand. For this reason, double-exponential variable transformations have been provided that allow stable computation over infinite domains; these being the exp-sinh and sinh-sinh quadrature. [h4 Complex integrals] -The tanh_sinh integrator supports integration of functions which return complex results, for example the sine-integral `Si(z)` has the integral representation: +The `tanh_sinh` integrator supports integration of functions which return complex results, for example the sine-integral `Si(z)` has the integral representation: [equation sine_integral] @@ -231,7 +231,7 @@ Which we can code up directly as: return integrator.integrate(f, 0, boost::math::constants::half_pi()) + boost::math::constants::half_pi(); } -[endsect] +[endsect] [/section:de_tanh_sinh tanh_sinh] [section:de_tanh_sinh_2_arg Handling functions with large features near an endpoint with tanh-sinh quadrature] @@ -263,9 +263,9 @@ Knowing this, we can rewrite our lambda expression to take advantage of this add auto f = [](double x, double xc) { return x <= 0.5 ? sqrt(x) / sqrt(1 - x * x) : sqrt(x / ((x + 1) * (xc))); }; double Q = integrator.integrate(f, 0.0, 1.0); -Not only is this form accurate to full machine precision, but it converges to the result faster as well. +Not only is this form accurate to full machine-precision, but it converges to the result faster as well. -[endsect] +[endsect] [/section:de_tanh_sinh_2_arg Handling functions with large features near an endpoint with tanh-sinh quadrature] [section:de_sinh_sinh sinh_sinh] @@ -291,7 +291,7 @@ The sinh-sinh quadrature allows computation over the entire real line, and is ca double L1; double Q = integrator.integrate(f, &error, &L1); -Note that the limits of integration are understood to be (-\u221E, \u221E). +Note that the limits of integration are understood to be (-[infin], +[infin]). Complex valued integrands are supported as well, for example the [@https://en.wikipedia.org/wiki/Dirichlet_eta_function Dirichlet Eta function] can be represented via: @@ -313,7 +313,7 @@ which we can directly code up as: } -[endsect] +[endsect] [/section:de_sinh_sinh sinh_sinh] [section:de_exp_sinh exp_sinh] @@ -337,7 +337,7 @@ which we can directly code up as: size_t* levels = nullptr)->decltype(std::declval()(std::declval())) const; }; -For half-infinite intervals, the exp-sinh quadrature is provided: +For half-infinite intervals, the `exp-sinh` quadrature is provided: exp_sinh integrator; auto f = [](double x) { return exp(-3*x); }; @@ -347,9 +347,9 @@ For half-infinite intervals, the exp-sinh quadrature is provided: double Q = integrator.integrate(f, termination, &error, &L1); -The native integration range of this integrator is (0, [infin]), but we also support (a, [infin]), (-[infin], 0) and (-[infin], b) via argument transformations. +The native integration range of this integrator is (0, [infin]), but we also support /(a, [infin]), (-[infin], 0)/ and /(-[infin], b)/ via argument transformations. -Endpoint singularities and complex-valued integrands are supported by exp-sinh. +Endpoint singularities and complex-valued integrands are supported by `exp-sinh`. For example, the modified Bessel function K can be represented via: @@ -377,7 +377,7 @@ The only wrinkle in the above code is the need to check for large `cosh(t)` in w `exp(-x cosh(t))` tends to zero faster than `cosh(alpha x)` tends to infinity and return `0`. Without that check we end up with `0 * Infinity` as the result (a NaN). -[endsect] +[endsect] [/section:de_exp_sinh exp_sinh] [section:de_tol Setting the Termination Condition for Integration] @@ -390,14 +390,15 @@ It is highly recommended that the tolerance be left at the default value of [rad Since double exponential quadrature converges exponentially fast for functions in Hardy spaces, then once the routine has *proved* that the error is ~[radic][epsilon], then the error should in fact be ~[epsilon]. -If you request that the error be ~[epsilon], this tolerance might never be achieved (as the summation is not stabilized ala Kahan), and the routine will simply flounder, +If you request that the error be ~[epsilon], this tolerance might never be achieved (as the summation is not stabilized ala Kahan), +and the routine will simply flounder, dividing the interval in half in order to increase the precision of the integrand, only to be thwarted by floating point roundoff. If for some reason, the default value doesn't quite achieve full precision, then you could try something a little smaller such as [radic][epsilon]/4 or [epsilon][super 2/3]. However, more likely, you need to check that your function to be integrated is able to return accurate values, and that there are no other issues with your integration scheme. -[endsect] +[endsect] [/section:de_tol Setting the Termination Condition for Integration] [section:de_levels Setting the Maximum Interval Halvings and Memory Requirements] @@ -425,7 +426,7 @@ pairs for new levels are computed only when a new level is actually required (se you should avoid setting the maximum arbitrarily high "just in case" as the time and space requirements for a large number of levels can quickly grow out of control. -[endsect] +[endsect] [/section:de_levels Setting the Maximum Interval Halvings and Memory Requirements] [section:de_thread Thread Safety] @@ -441,7 +442,7 @@ atomic read, and only populating new levels requires a thread lock. In addition, the three built in types (plus `__float128` when available), have the first 7 levels pre-computed: this is generally sufficient for the vast majority of integrals - even at quad precision - and means that integrators for these types are relatively cheap to construct. -[endsect] +[endsect] [/section:de_thread Thread Safety] [section:de_caveats Caveats] @@ -474,7 +475,7 @@ Fortunately the error estimates and L1 norm are massive for these integrals, but A special mention should be made about integrating through zero: while our range adaptors preserve precision when one endpoint is zero, things get harder when the origin is neither in the center of the range, nor at an endpoint. Consider integrating: - 1 / (1 +x^2) +[expression 1 / (1 +x^2)] Over (a, [infin]). As long as `a >= 0` both the tanh_sinh and the exp_sinh integrators will handle this just fine: in fact they provide a rather efficient method for this kind of integral. However, if we have `a < 0` then we are forced to adapt the range in a way that @@ -483,7 +484,7 @@ both integrators thrash around trying to reach the target accuracy, but never ac simple expedient of breaking the integral into two domains: (a, 0) and (0, b) and integrating each seperately using the tanh-sinh integrator, works just fine. -Finally, some endpoint singularities are too strong to be handled by tanh_sinh or equivalent methods, for example consider integrating +Finally, some endpoint singularities are too strong to be handled by `tanh_sinh` or equivalent methods, for example consider integrating the function: double p = some_value; @@ -500,7 +501,7 @@ to find all the area under the function. If we recall the identity [^tan([pi]/2 And now the singularity is at the origin and we can get much closer to it when evaluating the integral: all we have done is swap the integral endpoints over. -This actually works just fine for p < 0.95, but after that the tanh_sinh integrator starts thrashing around and is unable to +This actually works just fine for p < 0.95, but after that the `tanh_sinh` integrator starts thrashing around and is unable to converge on the integral. The problem is actually a lack of exponent range: if we simply swap type double for something with a greater exponent range (an 80-bit long double or a quad precision type), then we can get to at least p = 0.99. If we want to go beyond that, or stick with type double, then we have to get smart. @@ -533,10 +534,9 @@ small `exp(-x)`, and therefore [^x > -log([epsilon])], we can greatly simplify t return x > crossover ? exp((p - 1) * x) : exp(-x) * pow(tan(exp(-x)), -p); }; -This form integrates just fine over (-log([pi]/2), +[infin]) using either the tanh_sinh or exp_sinh classes. +This form integrates just fine over (-log([pi]/2), +[infin]) using either the `tanh_sinh` or `exp_sinh` classes. - -[endsect] +[endsect] [/section:de_caveats Caveats] [section:de_refes References] @@ -545,5 +545,6 @@ This form integrates just fine over (-log([pi]/2), +[infin]) using either the ta * David H. Bailey, Karthik Jeyabalan and Xiaoye S. Li ['A Comparison of Three High-Precision Quadrature Schemes] Office of Scientific & Technical Information Technical Reports. * Tanaka, Ken’ichiro, et al. ['Function classes for double exponential integration formulas.] Numerische Mathematik 111.4 (2009): 631-655. -[endsect] -[endsect] +[endsect] [/section:de_refes References] + +[endsect] [/section:double_exponential Double-exponential quadrature] diff --git a/doc/sf/hypergeometric.qbk b/doc/sf/hypergeometric.qbk index 858584e9e..0c62b6c1e 100644 --- a/doc/sf/hypergeometric.qbk +++ b/doc/sf/hypergeometric.qbk @@ -1,4 +1,3 @@ - [section:hypergeometric Hypergeometric Functions] [section:hyper_geometric_1f0 Hypergeometric [sub 1]/F/[sub 0] ] @@ -33,7 +32,7 @@ undefined or complex. This occurs for `z == 1` or `1 - z < 0` and `a` not an in The implementation is trivial: -[:['[sub 1]F[sub 0](a, z) = (1-z)[super -a]]] +[expression [sub 1]F[sub 0](a, z) = (1-z)[super -a]] [endsect] [/section:hyper_geometric_1f0 Hypergeometric [sub 1]/F/[sub 0] ] @@ -170,11 +169,13 @@ The function `hypergeometric_1F1(a, b, z)` returns the non-singular solution to [@https://en.wikipedia.org/wiki/Confluent_hypergeometric_function Kummer's equation] [/\large $$z \frac{d^2 w}{d z^2} + (b-z) \frac{dw}{dz} - aw = 0 $$] -[$../equations/hypergeometric_1f1_2.svg] + +[equation hypergeometric_1f1_2] + which for |/z/| < 1 has the hypergeometric series expansion -[$../equations/hypergeometric_1f1_1.svg] +[equation hypergeometric_1f1_1] where (/a/)[sub /n/] denotes rising factorial. This function has the same definition as Mathematica's `Hypergeometric1F1[a, b, z]` and Maple's `KummerM(a, b, z)`. @@ -182,12 +183,12 @@ This function has the same definition as Mathematica's `Hypergeometric1F1[a, b, The "regularized" versions of the function return: [/ \Large $$ \textbf{M}(a, b; z) = \frac{{_1F_1}(a, b; z)}{\Gamma(b)} = \sum_{n=0}^{\infty} \frac{(a)_n z^n}{\Gamma(b+n) n!} $$] -[$../equations/hypergeometric_1f1_17.svg] +[equation hypergeometric_1f1_17] The "log" versions of the function return: [/ \Large $$ \ln(|_1F_1(a, b, z)|) $$ ] -[$../equations/hypergeometric_1f1_18.svg] +[equation hypergeometric_1f1_18] When the optional `sign` argument is supplied it is set on exit to either +1 or -1 depending on the sign of [sub 1]/F/[sub 1](/a/, /b/, /z/). @@ -224,7 +225,7 @@ There are 3 main groups of tests: * Spot tests for special inputs with known values. * Sanity checks which use integrals of hypergeometric functions of known value. These are particularly useful -since for fixed ['a] and ['b] they evaluate ['[sub 1]F[sub 1](a,b,z)] for all /z/. +since for fixed ['a] and ['b] they evaluate [role serif_italic [sub 1]F[sub 1](a,b,z)] for all /z/. * Testing against values precomputed using arbitrary precision arithmetic and the /pFq/ series. We also have a [@../../tools/hypergeometric_1F1_error_plot.cpp small program] for testing random values over a user-specified domain of /a/, /b/ and /z/, this program @@ -277,11 +278,11 @@ In almost all cases where /z < 0/ we use [@https://en.wikipedia.org/wiki/Conflue to make /z/ positive: [/\large $$_1F_1(a, b; -z) = e^{-z} {_1F_1}(a, b; z)$$] -[$../equations/hypergeometric_1f1_12.svg] +[equation hypergeometric_1f1_12] The main series representation -[$../equations/hypergeometric_1f1_1.svg] +[equation hypergeometric_1f1_1] is used only when @@ -294,24 +295,24 @@ to life again as /b/ crosses the origin. A&S 13.3.6 gives [/\large $$_1F_1(a, b; z) = e^{ \frac{z}{2} } \Gamma(b-a- \frac{1}{2} ) ( \frac{z}{4})^{a-b+ \frac{1}{2}} \sum_{n=0}^{\infty} \frac{(2b-2a-1)_n(b-2a)_n(b-a-\frac{1}{2}+n)}{n!(b)_n} (-1)^n I_{b-a-\frac{1}{2}+n}(\frac{z}{2})$$] -[$../equations/hypergeometric_1f1_3.svg] +[equation hypergeometric_1f1_3] which is particularly useful for ['a [cong] b] and ['z > 0], or /a/ \u226a 1, and ['z < 0]. Then we have Tricomi's approximation (given in simplified form in A&S 13.3.7) [link math_toolkit.hypergeometric.hypergeometric_refs (7)] [/\large $$_1F_1(a, b; z) = \Gamma(b) e^{ \frac{1}{2} z} \sum_{n=0}^{\infty} 2^{-n}z^n A_n(a,b) E_{b-1+n}(z(\frac{b}{2}-a)) $$] -[$../equations/hypergeometric_1f1_4.svg] +[equation hypergeometric_1f1_4] with [/\large $$A_0(a,b) = 1, A_1(a,b) = 0, A_2(a,b) = \frac{b}{2}, A_3(a,b)= - \frac{1}{3}(b-2a) $$] -[$../equations/hypergeometric_1f1_5.svg] +[equation hypergeometric_1f1_5] and [/\large $$(n+1)A_{n+1} = (n+b-1)A_{n-1} - (b-2a)A_{n-2} \quad;\quad n \geq 2 $$] -[$../equations/hypergeometric_1f1_6.svg] +[equation hypergeometric_1f1_6] With ['E[sub v]] defined as: @@ -323,33 +324,34 @@ E_v(-z) & = z^{-\frac{1}{2}v} I_v(2 \sqrt{z})\\ E_v(0) & = \frac{1}{\Gamma(v + 1)} \end{split} \end{equation*}] -[$../equations/hypergeometric_1f1_7.svg] +[equation hypergeometric_1f1_7] This approximation is especially effective when ['a < 0]. For /a/ and /b/ both large and positive and somewhat similar in size then an expansion in terms of gamma functions can be used [link math_toolkit.hypergeometric.hypergeometric_refs (6)]: [/\large $$_1F_1(a, b; x) = \frac{1}{B(a, b-a)} e^x \sum_{k=0}^{\infty} \frac{(1-a)_k}{k!} \frac{\gamma(b-a+k, x)}{x^{b-a+k}} $$] -[$../equations/hypergeometric_1f1_8.svg] +[equation hypergeometric_1f1_8] For /z/ large we have the asymptotic expansion: [/\large $$_1F_1(a, b; x) \approx \frac{e^x x^{a-b}}{\Gamma(a)} \sum_{k=0}^{\infty} \frac{(1-a)_k(b-a)_k}{k! x^k} $$] -[$../equations/hypergeometric_1f1_9.svg] +[equation hypergeometric_1f1_9] which is a special case of the gamma function expansion above. In the situation where `ab/z` is reasonably small then Luke's rational approximation is used - this is too complex to document here, refer either to the code or the original paper [link math_toolkit.hypergeometric.hypergeometric_refs (3)]. -The special case [sub 1]/F/[sub 1](1, /b/, -/z/) is treated via Luke's Pade approximation [link math_toolkit.hypergeometric.hypergeometric_refs (3)]. +The special case [role serif_italic [sub 1]/F/[sub 1](1, /b/, -/z/)] +is treated via Luke's Pade approximation [link math_toolkit.hypergeometric.hypergeometric_refs (3)]. That effectively completes the "direct" methods used, the remaining areas are treated indirectly via recurrence relations. These require extreme care in their use, as they often change the direction of stability, or else are not stable at all. Sometimes this is a well defined and characterized change in direction: for example with /a,b/ and /z/ all positive recurrence on /a/ via [/\large $$(b-a) _1F_1(a-1, b; z) + (2a-b+z) _1F_1(a, b; z) -a _1F_1(a+1, b; z) = 0 $$] -[$../equations/hypergeometric_1f1_10.svg] +[equation hypergeometric_1f1_10] abruptly changes from stable forwards recursion to stable backwards if /2a-b+z/ changes sign. Thus recurrence on /a/, even when [sub 1]/F/[sub 1](/a/+/N/, /b/, /z/) is strictly increasing, needs careful handling as /a/ \u2192 0. @@ -362,21 +364,24 @@ Gauchi also provides the definitive reference on 3-term recurrence relations in general in [link math_toolkit.hypergeometric.hypergeometric_refs (11)]. Unfortunately, not all recurrence stabilities are so well defined. -For example, when considering [sub 1]F[sub 1](/a/, -/b/, /z/) it would be convenient to use -the continued fractions associated with the recurrence relations to calculate [sub 1]F[sub 1](/a/, -/b/, /z/) / [sub 1]F[sub 1](/a/, 1-/b/, /z/) +For example, when considering [role serif_italic [sub 1]F[sub 1](/a/, -/b/, /z/)] it would be convenient to use +the continued fractions associated with the recurrence relations to calculate +[role serif_italic [sub 1]F[sub 1](/a/, -/b/, /z/) / [sub 1]F[sub 1](/a/, 1-/b/, /z/)] and then normalize either by iterating forwards on /b/ until /b > 0/ and comparison with a reference value, or by using the Wronskian [/\large $$_1F_1(a, b; z) \frac{d}{dz}z^{1-b}_1F_1(1+a-b, 2-b; z) - z^{1-b}_1F_1(1+a-b, 2-b; z)\frac{d}{dz}{_1F_1}(a, b; z) = (1-b)z^{-b}e^z,$$] -[$../equations/hypergeometric_1f1_11.svg] +[equation hypergeometric_1f1_11] which is of course the well known Miller's method [link math_toolkit.hypergeometric.hypergeometric_refs (12)]. Unfortunately, stable forwards recursion on /b/ is stable only for /|b| << |z|/, as /|b|/ increases in magnitude it passes through a region where recursion is unstable in both directions before eventually becoming stable in the backwards direction (at least for a while!). This transition is moderated not by a change of sign in the recurrence coefficients themselves, but by a change in the behaviour of the ['values] of [sub 1]F[sub 1] - -from alternating in sign when ['|b|] is small to having the same sign when /b/ is larger. During the actual transition, [sub 1]F[sub 1] will either -pass through a zero or a minima depending on whether b is even or odd (if there is a minima at [sub 1]F[sub 1](a, b, z) then there is necessarily a zero +from alternating in sign when ['|b|] is small to having the same sign when /b/ is larger. +During the actual transition, [role serif_italic [sub 1]F[sub 1]] will either +pass through a zero or a minima depending on whether b is even or odd +(if there is a minima at [role serif_italic [sub 1]F[sub 1](a, b, z)] then there is necessarily a zero at [sub 1]F[sub 1](a+1, b+1, z) as per the [@https://dlmf.nist.gov/13.3#E15 derivative of the function]). As a result the behaviour of the recurrence relations is almost impossible to reason about in this area, and we are left to using heuristic based approaches to "guess" which region we are in. @@ -390,25 +395,27 @@ For /a,b,z > 0/ and large we can either: * We can recurse on /b/ alone so that /b-1 < a < b/ and use A&S 13.3.6 as long as /z/ is not too large. For ['b < 0] and ['a] tiny we would normally use A&S 13.3.6, but when that is non-convergent for some inputs we can use the forward recurrence relation on ['b] to -calculate the ratio ['[sub 1]F[sub 1](a, -b, z)/[sub 1]F[sub 1](a, 1-b, z)] and then iterate forwards until ['b > 0] and compute a reference value +calculate the ratio [role serif_italic [sub 1]F[sub 1](a, -b, z)/[sub 1]F[sub 1](a, 1-b, z)] and then iterate forwards until ['b > 0] and compute a reference value and normalize (Millers Method). -In the same domain when ['b] is near -1 we can use a single backwards recursion on /b/ to compute ['[sub 1]F[sub 1](a, -b, z)] -from ['[sub 1]F[sub 1](a, 2-b, z)] and ['[sub 1]F[sub 1](/a/, 1-/b/, /z/)] even though technically we are recursing in the unstable direction. +In the same domain when ['b] is near -1 we can use a single backwards recursion on /b/ +to compute [role serif_italic [sub 1]F[sub 1](a, -b, z)] +from [role serif_italic [sub 1]F[sub 1](a, 2-b, z)] and [role serif_italic [sub 1]F[sub 1](/a/, 1-/b/, /z/)] even though technically we are recursing in the unstable direction. -For ['[sub 1]F[sub 1](-N, b, z)] and integer /N/ then backwards recursion from ['[sub 1]F[sub 1](0, b, z)] and ['[sub 1]F[sub 1](-1, b, z)] works well. +For [role serif_italic [sub 1]F[sub 1](-N, b, z)] and integer /N/ then backwards recursion +from [role serif_italic [sub 1]F[sub 1](0, b, z)] and [role serif_italic [sub 1]F[sub 1](-1, b, z)] works well. For /a/ or /b/ < 0 and if all the direct methods have failed, then we use various fallbacks: -For ['[sub 1]F[sub 1](-a, b, z)] we can use backwards recursion on /a/ as long as ['b > z], otherwise a more complex scheme is required -which starts from ['[sub 1]F[sub 1](-a + N, b + M, z)], and recurses backwards in up to 3 stages: first on /a/, then on /a/ and /b/ together, +For [role serif_italic [sub 1]F[sub 1](-a, b, z)] we can use backwards recursion on /a/ as long as ['b > z], otherwise a more complex scheme is required +which starts from [role serif_italic [sub 1]F[sub 1](-a + N, b + M, z)], and recurses backwards in up to 3 stages: first on /a/, then on /a/ and /b/ together, and finally on /b/ alone. For /b < 0/ we have no good methods in some domains (see the unsolved issues above). However in some circumstances we can either use: * 3-stage backwards recursion on both /a/, /a/ and /b/ and then /b/ as above. -* Calculate the ratio ['[sub 1]F[sub 1](a, b, z) / ['[sub 1]F[sub 1](a-1, b-1, z)]] via backwards recurrence when z is small, and then normalize via the Wronskian above (Miller's method). -* Calculate the ratio ['[sub 1]F[sub 1](a, b, z) / ['[sub 1]F[sub 1](a+1, b+1, z)]] via forwards recurrence when z is large, and then normalize by iterating until b > 1 and comparing to a reference value. +* Calculate the ratio [role serif_italic [sub 1]F[sub 1](a, b, z) / [sub 1]F[sub 1](a-1, b-1, z)] via backwards recurrence when z is small, and then normalize via the Wronskian above (Miller's method). +* Calculate the ratio [role serif_italic [sub 1]F[sub 1](a, b, z) / [sub 1]F[sub 1](a+1, b+1, z)] via forwards recurrence when z is large, and then normalize by iterating until b > 1 and comparing to a reference value. The latter two methods use a lookup table to determine whether inputs are in either of the domains or neither. Unfortunately the methods are basically limited to double precision: calculation of the ratios require iteration ['towards] the no-mans-land between the two methods where iteration is unstable in @@ -456,7 +463,7 @@ are destroyed by cancellation. The function `hypergeometric_pFq` returns the result of: [/\Large $$ _pF_q(\{a_1...a_n\}, \{b_1...b_n\}; z) = \sum_{k=0}^{\infty} \frac{\Pi_{j=1}^n(a_j)_n z^k}{\Pi_{j=1}^n(b_j)_n k!} $$] -[$../equations/hypergeometric_pfq_1.svg] +[equation hypergeometric_pfq_1] It is most naturally used via initializer lists as in: @@ -512,7 +519,7 @@ error inherent in calculating the N'th term via logarithms. # Beals, Richard, and Roderick Wong. ['Special functions: a graduate text.] Vol. 126. Cambridge University Press, 2010. # Pearson, John W., Sheehan Olver, and Mason A. Porter. ['Numerical methods for the computation of the confluent and Gauss hypergeometric functions.] Numerical Algorithms 74.3 (2017): 821-866. -# Luke, Yudell L. ['Algorithms for Rational Approximations for a Confluent Hypergeometric Function II.] MISSOURI UNIV KANSAS CITY DEPT OF MATHEMATICS, 1976. +# Luke, Yudell L. ['Algorithms for Rational Approximations for a Confluent Hypergeometric Function II.] Missouri Univ Kansas City Dept of Mathematics, 1976. # Dereziński, Jan. ['Hypergeometric type functions and their symmetries.] Annales Henri Poincaré. Vol. 15. No. 8. Springer Basel, 2014. # Keith E. Muller ['Computing the confluent hypergeometric function, M(a, b, x)]. Numer. Math. 90: 179-196 (2001). # Carlo Morosi, Livio Pizzocchero. ['On the expansion of the Kummer function in terms of incomplete Gamma functions.] Arch. Inequal. Appl. 2 (2004), 49-72. diff --git a/example/barycentric_interpolation_example.cpp b/example/barycentric_interpolation_example.cpp index 31e86cc88..263e20f19 100644 --- a/example/barycentric_interpolation_example.cpp +++ b/example/barycentric_interpolation_example.cpp @@ -17,7 +17,7 @@ This example shows how to use barycentric rational interpolation, using Walter K In this paper, Kohn needs to repeatedly solve an ODE (the radial Schrodinger equation) given a potential which is only known at non-equally samples data. -If he'd only had the barycentric rational interpolant of boost::math! +If he'd only had the barycentric rational interpolant of Boost.Math! References: Kohn, W., and N. Rostoker. "Solution of the Schrodinger equation in periodic lattices with an application to metallic lithium." Physical Review 94.5 (1954): 1111. */ diff --git a/example/barycentric_interpolation_example_2.cpp b/example/barycentric_interpolation_example_2.cpp index c31cabe6d..c7e834ec0 100644 --- a/example/barycentric_interpolation_example_2.cpp +++ b/example/barycentric_interpolation_example_2.cpp @@ -22,9 +22,8 @@ achieves a specific value. int main() { - // The lithium potential is given in Kohn's paper, Table I, - // we could equally use an unordered_map, a list of tuples or pairs, - // or a 2-dimentional array equally easily: + // The lithium potential is given in Kohn's paper, Table I. + // (We could equally easily use an unordered_map, a list of tuples or pairs, or a 2-dimentional array). std::map r; r[0.02] = 5.727;