From 9a2cf459dc04e2162d7ca42e7ee56df5965d2f2c Mon Sep 17 00:00:00 2001 From: jzmaddock Date: Wed, 11 Sep 2019 18:05:40 +0100 Subject: [PATCH 1/4] Kick off CI build --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index d0fb8688..b1c5b7fd 100644 --- a/.travis.yml +++ b/.travis.yml @@ -780,3 +780,4 @@ script: notifications: email: on_success: always + From d93bd06e7f593c2459f5cb473d572ec772b5054e Mon Sep 17 00:00:00 2001 From: jzmaddock Date: Thu, 12 Sep 2019 13:07:08 +0100 Subject: [PATCH 2/4] float128.hpp: correct value of numeric_limits::is_bounded [CI SKIP] --- include/boost/multiprecision/float128.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/boost/multiprecision/float128.hpp b/include/boost/multiprecision/float128.hpp index 63ef4edf..5f7513eb 100644 --- a/include/boost/multiprecision/float128.hpp +++ b/include/boost/multiprecision/float128.hpp @@ -778,7 +778,7 @@ class numeric_limits Date: Thu, 19 Sep 2019 12:07:23 +0100 Subject: [PATCH 3/4] More comment in jamfiles about using big-obj if assembler file is too big. --- .../boost_multiprecision/indexes/s01.html | 21 +- .../boost_multiprecision/indexes/s02.html | 2 +- .../boost_multiprecision/indexes/s03.html | 4 +- .../boost_multiprecision/indexes/s04.html | 59 +++-- doc/html/boost_multiprecision/intro.html | 39 +-- doc/html/boost_multiprecision/map/faq.html | 3 +- doc/html/boost_multiprecision/tut.html | 6 +- .../tut/floats/float128.html | 9 + .../tut/floats/fp_eg/variable_precision.html | 2 +- .../boost_multiprecision/tut/gen_int.html | 5 +- .../tut/import_export.html | 16 +- .../tut/interval/mpfi.html | 10 +- .../tut/ints/egs/bitops.html | 6 +- .../tut/ints/egs/factorials.html | 4 +- doc/html/boost_multiprecision/tut/limits.html | 3 +- .../tut/limits/functions.html | 57 ++--- doc/html/boost_multiprecision/tut/lits.html | 225 +++++++++++++++--- doc/html/index.html | 8 +- doc/multiprecision.qbk | 94 ++++---- example/Jamfile.v2 | 17 +- example/numeric_limits_snips.cpp | 1 + test/Jamfile.v2 | 9 + 22 files changed, 419 insertions(+), 181 deletions(-) diff --git a/doc/html/boost_multiprecision/indexes/s01.html b/doc/html/boost_multiprecision/indexes/s01.html index edc8e022..4b1fc5ff 100644 --- a/doc/html/boost_multiprecision/indexes/s01.html +++ b/doc/html/boost_multiprecision/indexes/s01.html @@ -24,7 +24,7 @@

-Function Index

+Function Index

A B C D E F H I L M N O P R S T V X Z

@@ -127,6 +127,10 @@
  • number

  • +
  • +

    BOOST_ASSERT

    + +
  • C @@ -155,6 +159,10 @@
  • +

    constexpr

    + +
  • +
  • convert_to

  • @@ -192,6 +200,7 @@
  • gmp_float

  • gmp_int

  • gmp_rational

  • +
  • Literal Types and constexpr Support

  • mpc_complex

  • mpfi_float

  • mpfr_float

  • @@ -227,6 +236,10 @@
  • @@ -983,7 +996,7 @@
  • diff --git a/doc/html/boost_multiprecision/indexes/s02.html b/doc/html/boost_multiprecision/indexes/s02.html index b06a7aa6..7440ac25 100644 --- a/doc/html/boost_multiprecision/indexes/s02.html +++ b/doc/html/boost_multiprecision/indexes/s02.html @@ -24,7 +24,7 @@

    -Class Index

    +Class Index

    C D E F G I L M N T

    diff --git a/doc/html/boost_multiprecision/indexes/s03.html b/doc/html/boost_multiprecision/indexes/s03.html index 1b39455e..e1dbf1e0 100644 --- a/doc/html/boost_multiprecision/indexes/s03.html +++ b/doc/html/boost_multiprecision/indexes/s03.html @@ -24,7 +24,7 @@

    -Typedef Index

    +Typedef Index

    B C F I L M S T U V

    @@ -147,8 +147,8 @@

    cpp_dec_float_50

  • cpp_int

  • diff --git a/doc/html/boost_multiprecision/indexes/s04.html b/doc/html/boost_multiprecision/indexes/s04.html index 51d19bb8..7bea5499 100644 --- a/doc/html/boost_multiprecision/indexes/s04.html +++ b/doc/html/boost_multiprecision/indexes/s04.html @@ -23,7 +23,7 @@

    -Index

    +Index

    A B C D E F G H I L M N O P R S T U V X Z

    @@ -118,7 +118,10 @@
  • Bit Operations

    - +
  • bits

    @@ -158,6 +161,10 @@
  • +

    BOOST_ASSERT

    + +
  • +
  • BOOST_MP_DEFINE_SIZED_CPP_INT_LITERAL

  • @@ -295,6 +302,10 @@
  • +

    constexpr

    + +
  • +
  • Constructing and Interconverting Between Number Types

  • @@ -416,8 +427,8 @@

    cpp_dec_float_50

  • @@ -482,6 +493,7 @@
  • gmp_float

  • gmp_int

  • gmp_rational

  • +
  • Literal Types and constexpr Support

  • mpc_complex

  • mpfi_float

  • mpfr_float

  • @@ -525,6 +537,10 @@
    • +

      e

      + +
    • +
    • empty

    • @@ -1013,7 +1029,7 @@
    • infinity

      - +
    • Input Output

      @@ -1162,7 +1178,10 @@
    • Literal Types and constexpr Support

      - +
    • llrint

      @@ -1243,6 +1262,7 @@
    • @@ -1734,18 +1754,7 @@
    • -

      std::numeric_limits<> constants

      - -
    • -
    • -

      std::numeric_limits<> functions

      +

      std :: numeric_limits <> functions

    • +

      std::numeric_limits<> constants

      + +
    • +
    • str

    • @@ -1879,7 +1900,7 @@
    • diff --git a/doc/html/boost_multiprecision/intro.html b/doc/html/boost_multiprecision/intro.html index 311d05dd..28526263 100644 --- a/doc/html/boost_multiprecision/intro.html +++ b/doc/html/boost_multiprecision/intro.html @@ -34,27 +34,29 @@ The big number types in Multiprecision can be used with a wide selection of basic mathematical operations, elementary transcendental functions as well as the functions in Boost.Math. The Multiprecision types can also interoperate - with the __fundamental types in C++ using clearly defined conversion rules. - This allows Boost.Multiprecision to be used for all kinds of mathematical calculations + with any fundamental + (built-in) type in C++ using clearly defined conversion rules. This + allows Boost.Multiprecision to be used for all kinds of mathematical calculations involving integer, rational and floating-point types requiring extended range and precision.

      Multiprecision consists of a generic interface to the mathematics of large - numbers as well as a selection of big number back ends, with support for integer, + numbers as well as a selection of big number back-ends, with support for integer, rational, floating-point, and complex types. Boost.Multiprecision provides - a selection of back ends provided off-the-rack in including interfaces to GMP, + a selection of back-ends provided off-the-rack in including interfaces to GMP, MPFR, MPIR, MPC, TomMath as well as its own collection of Boost-licensed, header-only - back ends for integers, rationals and floats. In addition, user-defined back - ends can be created and used with the interface of Multiprecision, provided - the class implementation adheres to the necessary concepts. + back-ends for integers, rationals and floats. In addition, user-defined back-ends + can be created and used with the interface of Multiprecision, provided the + class implementation adheres to the necessary concepts.

      Depending upon the number type, precision may be arbitrarily large (limited - only by available memory), fixed at compile time (for example 50 or 100 decimal + only by available memory), fixed at compile time (for example, 50 or 100 decimal digits), or a variable controlled at run-time by member functions. The types are expression - template -enabled for better performance than naive user-defined types. + templates - enabled for better performance than naive user-defined + types.

      The Multiprecision library comes in two distinct parts: @@ -232,7 +234,7 @@

      The great advantage of this method is the elimination of temporaries: - for example the "naive" implementation of operator* above, requires one temporary for computing + for example, the "naive" implementation of operator* above, requires one temporary for computing the result, and at least another one to return it. It's true that sometimes this overhead can be reduced by using move-semantics, but it can't be eliminated completely. For example, lets suppose we're evaluating a polynomial via Horner's @@ -374,28 +376,29 @@

      In fact it is particularly easy to create dangling references by mixing expression - templates with the auto keyword, for example: + templates with the auto keyword, + for example:

      auto val = cpp_dec_float_50("23.1") * 100;

      - In this situation the integer literal is stored directly in the expression - template - so its use is OK here - but the cpp_dec_float_50 temporary is - stored by reference and then destructed when the statement completes leaving - a dangling reference. + In this situation, the integer literal is stored directly in the expression + template - so its use is OK here - but the cpp_dec_float_50 + temporary is stored by reference and then destructed when the statement completes, + leaving a dangling reference.

      If in doubt, do not ever mix expression templates - with the auto keyword. + with the auto keyword.

    And finally... the performance improvements from an expression template library like this are often not as dramatic as the reduction in number of temporaries - would suggest. For example if we compare this library with mpfr_class + would suggest. For example, if we compare this library with mpfr_class and mpreal, with all three using the underlying MPFR library at 50 decimal digits precision then we see the following typical results @@ -720,7 +723,7 @@

    [1] The actual number generated will depend on the compiler, how well it optimizes the code, and whether it supports rvalue references. The number of 11 temporaries - was generated with Visual C++ 10 + was generated with Visual C++ 2010.

    diff --git a/doc/html/boost_multiprecision/map/faq.html b/doc/html/boost_multiprecision/map/faq.html index 0505ac19..1eae3076 100644 --- a/doc/html/boost_multiprecision/map/faq.html +++ b/doc/html/boost_multiprecision/map/faq.html @@ -101,7 +101,8 @@ This was deemed not to be practical: these algorithms are intimately tied to the actual data representation used.

    -
    How do I choose between Boost.Multiprecision cpp_bin_50 and cpp_dec_50?
    +
    How do I choose between Boost.Multiprecision cpp_bin_50 + and cpp_dec_50?

    Unless you have a specific reason to choose cpp_dec_, diff --git a/doc/html/boost_multiprecision/tut.html b/doc/html/boost_multiprecision/tut.html index f92dc00a..34fcb293 100644 --- a/doc/html/boost_multiprecision/tut.html +++ b/doc/html/boost_multiprecision/tut.html @@ -91,7 +91,8 @@

    Primality Testing
    Literal Types and constexpr Support
    Importing and - Exporting Data to and from cpp_int and cpp_bin_float
    + Exporting Data to and from cpp_int + and cpp_bin_float
    Rounding Rules for Conversions
    Mixed Precision Arithmetic
    @@ -102,8 +103,7 @@
    std::numeric_limits<> constants
    -
    std::numeric_limits<> - functions
    +
    std::numeric_limits<> functions
    Numeric limits for 32-bit platform
    How to diff --git a/doc/html/boost_multiprecision/tut/floats/float128.html b/doc/html/boost_multiprecision/tut/floats/float128.html index 87dff5a6..2c6021c6 100644 --- a/doc/html/boost_multiprecision/tut/floats/float128.html +++ b/doc/html/boost_multiprecision/tut/floats/float128.html @@ -77,6 +77,15 @@ of number on this backend move aware. +
  • + This type is fully constexpr + aware - basic constexpr arithmetic is supported from C++14 and onwards, + comparisons, plus the functions fabs, + abs, fpclassify, isnormal, + isfinite, isinf and isnan + are also supported if either the compiler implements C++20's std::is_constant_evaluated(), + or if the compiler is GCC. +
  • It is not possible to round-trip objects of this type to and from a string and get back exactly the same value when compiled with Intel's diff --git a/doc/html/boost_multiprecision/tut/floats/fp_eg/variable_precision.html b/doc/html/boost_multiprecision/tut/floats/fp_eg/variable_precision.html index fc8dba89..946d625c 100644 --- a/doc/html/boost_multiprecision/tut/floats/fp_eg/variable_precision.html +++ b/doc/html/boost_multiprecision/tut/floats/fp_eg/variable_precision.html @@ -191,7 +191,7 @@ Relative error: 7.55843e-1501 while (current_digits < digits10) { current_digits *= 2; - scoped_precision sp(std::min(current_digits, digits10)); + scoped_precision sp((std::min)(current_digits, digits10)); mpfr_float a(a_, mpfr_float::default_precision()), b(b_, mpfr_float::default_precision()); guess.precision(mpfr_float::default_precision()); f = boost::math::detail::ibeta_imp(a, b, guess, boost::math::policies::policy<>(), false, true, &f1) - 0.5f; diff --git a/doc/html/boost_multiprecision/tut/gen_int.html b/doc/html/boost_multiprecision/tut/gen_int.html index a272d08c..b4bb1179 100644 --- a/doc/html/boost_multiprecision/tut/gen_int.html +++ b/doc/html/boost_multiprecision/tut/gen_int.html @@ -31,7 +31,8 @@ integer operations are overloaded for the built in integer types in <boost/multiprecision/integer.hpp>. Where these operations require a temporary increase in precision (such as - for powm), then if no built in type is available, a cpp_int + for powm), then if no built + in type is available, a cpp_int of appropriate precision will be used.

    @@ -161,7 +162,7 @@

    The regular Miller-Rabin functions in <boost/multiprecision/miller_rabin.hpp> are defined in terms of the above generic operations, and so function equally - well for built in and multiprecision types. + well for built-in or __fundamental_types and multiprecision types.

    diff --git a/doc/html/boost_multiprecision/tut/import_export.html b/doc/html/boost_multiprecision/tut/import_export.html index 362d5d49..6146b386 100644 --- a/doc/html/boost_multiprecision/tut/import_export.html +++ b/doc/html/boost_multiprecision/tut/import_export.html @@ -25,11 +25,12 @@

    Importing and - Exporting Data to and from cpp_int and cpp_bin_float + Exporting Data to and from cpp_int + and cpp_bin_float

    Any integer number type that uses cpp_int_backend - as it's implementation layer can import or export it's bits via two non-member + as it's implementation layer can import or export its bits via two non-member functions:

    template <unsigned MinBits, unsigned MaxBits, cpp_integer_type SignType, cpp_int_check_type Checked, class Allocator,
    @@ -54,7 +55,7 @@
             These functions are designed for data-interchange with other storage formats,
             and since cpp_bin_float
             uses cpp_int
    -        internally, by extension they can be used for floating point numbers based
    +        internally, by extension they can be used for floating-point numbers based
             on that backend as well (see example below). Parameters and use are as follows:
           

    template <unsigned MinBits, unsigned MaxBits, cpp_integer_type SignType, cpp_int_check_type Checked, class Allocator,
    @@ -134,8 +135,9 @@
     

    Note that this function is optimized for the case where the data can be - memcpy'ed from the source to the integer - in this case both iterators - much be pointers, and everything must be little-endian. + memcpyed from the source + to the integer - in this case both iterators much be pointers, and everything + must be little-endian.

    @@ -167,7 +169,7 @@ // import back again, and check for equality: cpp_int j; import_bits(j, v.begin(), v.end()); - assert(i == j); + BOOST_ASSERT(i == j); }

    @@ -199,7 +201,7 @@ import_bits(i, v.begin(), v.end()); cpp_bin_float_100 g(i); g.backend().exponent() = e; - assert(f == g); + BOOST_ASSERT(f == g); } diff --git a/doc/html/boost_multiprecision/tut/interval/mpfi.html b/doc/html/boost_multiprecision/tut/interval/mpfi.html index 13c791fa..b4b9d7b1 100644 --- a/doc/html/boost_multiprecision/tut/interval/mpfi.html +++ b/doc/html/boost_multiprecision/tut/interval/mpfi.html @@ -54,11 +54,11 @@ Type mpfi_float_backend can be used at fixed precision by specifying a non-zero Digits10 template parameter, or at variable precision by setting the template argument - to zero. The typedefs mpfi_float_50, mpfi_float_100, mpfi_float_500, mpfi_float_1000 - provide arithmetic types at 50, 100, 500 and 1000 decimal digits precision - respectively. The typedef mpfi_float provides a variable precision type - whose precision can be controlled via the numbers - member functions. + to zero. The typedefs mpfi_float_50, mpfi_float_100, + mpfi_float_500, mpfi_float_1000 provide arithmetic types + at 50, 100, 500 and 1000 decimal digits precision respectively. The typedef mpfi_float + provides a variable precision type whose precision can be controlled via + the numbers member functions.

    diff --git a/doc/html/boost_multiprecision/tut/ints/egs/bitops.html b/doc/html/boost_multiprecision/tut/ints/egs/bitops.html index 86be9a90..b9463ebb 100644 --- a/doc/html/boost_multiprecision/tut/ints/egs/bitops.html +++ b/doc/html/boost_multiprecision/tut/ints/egs/bitops.html @@ -69,14 +69,14 @@ from b2 shouldn't be set unless we increment it first:

    -
    assert(!bit_test(b1(200), 200));     // OK
    -assert(bit_test(++b1(200), 200));    // OK
    +
    BOOST_ASSERT(!bit_test(b1(200), 200));     // OK
    +BOOST_ASSERT(bit_test(++b1(200), 200));    // OK
     

    And of course if we flip the n'th bit after increment, then we should get back to zero:

    -
    assert(!bit_flip(++b1(200), 200));   // OK
    +
    BOOST_ASSERT(!bit_flip(++b1(200), 200));   // OK
     
    diff --git a/doc/html/boost_multiprecision/tut/ints/egs/factorials.html b/doc/html/boost_multiprecision/tut/ints/egs/factorials.html index 8c5195ff..6d048da8 100644 --- a/doc/html/boost_multiprecision/tut/ints/egs/factorials.html +++ b/doc/html/boost_multiprecision/tut/ints/egs/factorials.html @@ -43,13 +43,13 @@ //// Print all the factorials that will fit inside a 128-bit integer.// - // Begin by building a big table of factorials, once we know just how + // Begin by building a big table of factorials, once we know just how// large the largest is, we'll be able to "pretty format" the results.//// Calculate the largest number that will fit inside 128 bits, we could// also have used numeric_limits<int128_t>::max() for this value:cpp_intlimit=(cpp_int(1)<<128)-1; - // + //// Our table of values:std::vector<cpp_int>results;// diff --git a/doc/html/boost_multiprecision/tut/limits.html b/doc/html/boost_multiprecision/tut/limits.html index 6b22e1eb..93510f23 100644 --- a/doc/html/boost_multiprecision/tut/limits.html +++ b/doc/html/boost_multiprecision/tut/limits.html @@ -29,8 +29,7 @@
    - max function + max function

    Function (std::numeric_limits<T>::max)() returns the largest finite value that @@ -77,21 +76,22 @@

    -(std::numeric_limits<double>::max)() == std::numeric_limits<double>::lowest();
     
    -
    - - min function -
    +

    + [h4min_function min function] +

    Function (std::numeric_limits<T>::min)() returns the minimum finite value that can be represented by the type T.

    - For built-in types there is usually a corresponding MACRO value TYPE_MIN, + For built-in types, there is usually a corresponding MACRO value TYPE_MIN, where TYPE is CHAR, INT, FLOAT etc.

    - Other types, including those provided by a typedef, for example INT64_T_MIN for int64_t, - may provide a macro definition. + Other types, including those provided by a typedef, + for example, INT64_T_MIN + for int64_t, may provide + a macro definition.

    For floating-point types, it is more fully defined as the minimum @@ -117,7 +117,7 @@ Of course, these simply use std::numeric_limits<T>::min() if available.

    - + denorm_min function
    @@ -152,7 +152,7 @@ precision until the significand reaches zero).

    - + round_error

    @@ -195,7 +195,7 @@ of significance or cancellation error or very many iterations.

    - + epsilon

    @@ -263,14 +263,19 @@

    RealType tolerance = boost::math::tools::epsilon<RealType>() * 2;
     
    - + Tolerance for Floating-point Comparisons

    - epsilon is very useful - to compute a tolerance when comparing floating-point values, a much more - difficult task than is commonly imagined. + Machine epsilon + ε is very useful to compute a tolerance when comparing floating-point + values, a much more difficult task than is commonly imagined. +

    +

    + The C++ standard specifies std::numeric_limits<>::epsilon() + and Boost.Multiprecision implements this (where possible) for its program-defined + types analogous to the __fundamental floating-point types like double float.

    For more information than you probably want (but still need) see What @@ -321,10 +326,8 @@ BOOST_CHECK_CLOSE_FRACTION(expected, calculated, tolerance);

    - used thus: + used thus: cd ./test BOOST_CHECK_CLOSE_FRACTION(expected, calculated, tolerance);

    -
    BOOST_CHECK_CLOSE_FRACTION(expected, calculated, tolerance);
    -

    (There is also a version BOOST_CHECK_CLOSE using tolerance as a percentage rather than a fraction; usually the fraction version is simpler to use). @@ -353,7 +356,7 @@ BOOST_CHECK_CLOSE_FRACTION(expected, calculated, tolerance);

    - + Infinity - positive and negative
    @@ -390,12 +393,12 @@ useful features portably and including Boost.Serialization.

    - + Not-A-Number NaN
    - + Quiet_NaN

    @@ -469,10 +472,10 @@ ss.imbue(new_locale); T inf = std::numeric_limits<T>::infinity(); ss << inf; // Write out. - assert(ss.str() == "inf"); + BOOST_ASSERT(ss.str() == "inf"); T r; ss >> r; // Read back in. - assert(inf == r); // Confirms that the floating-point values really are identical. + BOOST_ASSERT(inf == r); // Confirms that the floating-point values really are identical. std::cout << "infinity output was " << ss.str() << std::endl; std::cout << "infinity input was " << r << std::endl; } @@ -492,7 +495,7 @@ T n; T NaN = std::numeric_limits<T>::quiet_NaN(); ss << NaN; // Write out. - assert(ss.str() == "nan"); + BOOST_ASSERT(ss.str() == "nan"); std::cout << "NaN output was " << ss.str() << std::endl; ss >> n; // Read back in. std::cout << "NaN input was " << n << std::endl; @@ -502,7 +505,7 @@ NaN output was nan NaN input was nan

    - + Signaling NaN
    diff --git a/doc/html/boost_multiprecision/tut/lits.html b/doc/html/boost_multiprecision/tut/lits.html index 3bd26573..55f53eef 100644 --- a/doc/html/boost_multiprecision/tut/lits.html +++ b/doc/html/boost_multiprecision/tut/lits.html @@ -26,40 +26,46 @@ -
    - - - - - -
    [Note]Note

    - The features described in this section make heavy use of C++11 language - features, currently (as of May 2013) only GCC-4.7 and later, and Clang - 3.3 and later have the support required to make these features work. -

    - There is limited support for constexpr - and user-defined literals in the library, currently the number - front end supports constexpr - on default construction and all forwarding constructors, but not on any of - the non-member operators. So if some type B - is a literal type, then number<B> - is also a literal type, and you will be able to compile-time-construct such - a type from any literal that B - is compile-time-constructible from. However, you will not be able to perform - compile-time arithmetic on such types. + There are two kinds of constexpr + support in this library:

    +
      +
    • + The more basic version requires only C++11 and allow the construction + of some number types as literals. +
    • +
    • + The more advanced support permits constexpr arithmetic and requires at + least C++14 constexpr support, and for many operations C++2a support +
    • +
    +
    + + Declaring + numeric literals +

    - Currently the only backend type provided by the library that is also a literal - type are instantiations of cpp_int_backend - where the Allocator parameter is type void, - and the Checked parameter is boost::multiprecision::unchecked. + There are two backend types which are literals:

    +
      +
    • + float128 + (which requires GCC), and +
    • +
    • + Instantiations of cpp_int_backend + where the Allocator parameter is type void, + and the Checked parameter is boost::multiprecision::unchecked. +
    • +

    For example:

    using namespace boost::multiprecision;
     
    +constexpr float128            f = 0.1Q   // OK, float128's are always literals in C++11
    +
     constexpr int128_t            i = 0;     // OK, fixed precision int128_t has no allocator.
     constexpr uint1024_t          j = 0xFFFFFFFF00000000uLL;  // OK, fixed precision uint1024_t has no allocator.
     
    @@ -67,8 +73,8 @@
     constexpr cpp_int             l = 2;  // Error, type is not a literal as it performs memory management.
     

    - There is also limited support for user defined-literals - these are limited - to unchecked, fixed precision cpp_int's + There is also limited support for user defined-literals with cpp_int + - these are limited to unchecked, fixed precision cpp_int's which are specified in hexadecimal notation. The suffixes supported are:

    @@ -193,6 +199,171 @@ // Which means this also works:constexprint1024_tj=-g;// OK: unary minus operator is constexpr. +
    + + constexpr + arithmetic +
    +

    + The front end of the library is all constexpr + from C++14 and later. Currently there is only one back-end type that is + constexpr aware, and that is + float128. + More backends will follow at a later date. +

    +

    + Provided the compiler is GCC, type float128 + support constexpr operations + on all arithmetic operations from C++14, comparisons, abs, + fabs, fpclassify, + isnan, isinf, + isfinite and isnormal are also fully supported, but + the transcendental functions are not. +

    +

    + For example given: +

    +
    template <class T>
    +inline constexpr T circumference(T radius)
    +{
    +   return 2 * boost::math::constants::pi<T>() * radius;
    +}
    +
    +template <class T>
    +inline constexpr T area(T radius)
    +{
    +   return boost::math::constants::pi<T>() * radius * radius;
    +}
    +
    +

    + We can now calculate areas and circumferences using all constexpr arithmetic: +

    +
    using boost::multiprecision::float128;
    +
    +constexpr float128 radius = 2.25;
    +constexpr float128 c      = circumference(radius);
    +constexpr float128 a      = area(radius);
    +
    +std::cout << "Circumference = " << c << std::endl;
    +std::cout << "Area = " << a << std::endl;
    +
    +

    + Note that these make use of the numeric constants from the Math library, + which also happen to be constexpr. +

    +

    + For a more interesting example, in constexpr_float_arithmetic_examples.cpp + we define a simple class for constexpr + polynomial arithmetic: +

    +
    template <class T, unsigned Order>
    +struct const_polynomial;
    +
    +

    + Given this, we can use recurrence relations to calculate the coefficients + for various orthogonal polynomials - in the example we use the Hermite polynomials, + only the constructor does any work - it uses the recurrence relations to + calculate the coefficient array: +

    +
    template <class T, unsigned Order>
    +class hermite_polynomial
    +{
    +   const_polynomial<T, Order> m_data;
    +
    + public:
    +   constexpr hermite_polynomial() : m_data(hermite_polynomial<T, Order - 1>().data() * const_polynomial<T, 1>{0, 2} - hermite_polynomial<T, Order - 1>().data().derivative())
    +   {
    +   }
    +   constexpr const const_polynomial<T, Order>& data() const
    +   {
    +      return m_data;
    +   }
    +   constexpr const T& operator[](std::size_t N)const
    +   {
    +      return m_data[N];
    +   }
    +   template <class U>
    +   constexpr T operator()(U val)const
    +   {
    +      return m_data(val);
    +   }
    +};
    +
    +

    + Now we just need to define H0 and H1 as termination conditions for the recurrence: +

    +
    template <class T>
    +class hermite_polynomial<T, 0>
    +{
    +   const_polynomial<T, 0> m_data;
    +
    + public:
    +   constexpr hermite_polynomial() : m_data{1} {}
    +   constexpr const const_polynomial<T, 0>& data() const
    +   {
    +      return m_data;
    +   }
    +   constexpr const T& operator[](std::size_t N) const
    +   {
    +      return m_data[N];
    +   }
    +   template <class U>
    +   constexpr T operator()(U val)
    +   {
    +      return m_data(val);
    +   }
    +};
    +
    +template <class T>
    +class hermite_polynomial<T, 1>
    +{
    +   const_polynomial<T, 1> m_data;
    +
    + public:
    +   constexpr hermite_polynomial() : m_data{0, 2} {}
    +   constexpr const const_polynomial<T, 1>& data() const
    +   {
    +      return m_data;
    +   }
    +   constexpr const T& operator[](std::size_t N) const
    +   {
    +      return m_data[N];
    +   }
    +   template <class U>
    +   constexpr T operator()(U val)
    +   {
    +      return m_data(val);
    +   }
    +};
    +
    +

    + We can now declare H9 as a constexpr object, access the coefficients, and + evaluate at an abscissa value, all using constexpr + arithmetic: +

    +
    constexpr hermite_polynomial<float128, 9> h9;
    +//
    +// Verify that the polynomial's coefficients match the known values:
    +//
    +static_assert(h9[0] == 0);
    +static_assert(h9[1] == 30240);
    +static_assert(h9[2] == 0);
    +static_assert(h9[3] == -80640);
    +static_assert(h9[4] == 0);
    +static_assert(h9[5] == 48384);
    +static_assert(h9[6] == 0);
    +static_assert(h9[7] == -9216);
    +static_assert(h9[8] == 0);
    +static_assert(h9[9] == 512);
    +//
    +// Define an abscissa value to evaluate at:
    +//
    +constexpr float128 abscissa(0.5);
    +//
    +// Evaluate H_9(0.5) using all constexpr arithmetic:
    +//
    +static_assert(h9(abscissa) == 6481);
    +
    diff --git a/doc/html/index.html b/doc/html/index.html index f178b550..c0b9ef2a 100644 --- a/doc/html/index.html +++ b/doc/html/index.html @@ -107,7 +107,8 @@
    Primality Testing
    Literal Types and constexpr Support
    Importing and - Exporting Data to and from cpp_int and cpp_bin_float
    + Exporting Data to and from cpp_int + and cpp_bin_float
    Rounding Rules for Conversions
    Mixed Precision Arithmetic
    @@ -118,8 +119,7 @@
    std::numeric_limits<> constants
    -
    std::numeric_limits<> - functions
    +
    std::numeric_limits<> functions
    Numeric limits for 32-bit platform
    How to @@ -177,7 +177,7 @@
    - +

    Last revised: August 13, 2019 at 14:01:03 GMT

    Last revised: August 30, 2019 at 15:57:26 GMT


    diff --git a/doc/multiprecision.qbk b/doc/multiprecision.qbk index 476e58f0..4bfcb818 100644 --- a/doc/multiprecision.qbk +++ b/doc/multiprecision.qbk @@ -83,9 +83,9 @@ [/ Hint you may need to enclose equation in brackets if it contains comma(s) to avoid "error invalid number of arguments"] ] -[def __tick [role aligncenter [role green \u2714]]] [/ u2714 is a HEAVY CHECK MARK tick (2713 check mark)] -[def __cross [role aligncenter [role red \u2718]]] [/ u2718 is a heavy cross] -[def __star [role aligncenter [role red \u2736]]] [/ 6-point star] +[def __tick [role aligncenter [role green \u2714]]] [/ u2714 is a HEAVY CHECK MARK tick (2713 check mark), green] +[def __cross [role aligncenter [role red \u2718]]] [/ u2718 is a heavy cross, red] +[def __star [role aligncenter [role red \u2736]]] [/ 6-point star red ] [/Boost.Multiprecision internals links] [def __cpp_int [link boost_multiprecision.tut.ints.cpp_int cpp_int]] @@ -113,6 +113,7 @@ [/External links as macro definitions.] [def __expression_template [@https://en.wikipedia.org/wiki/Expression_templates expression template]] +[def __expression_templates [@https://en.wikipedia.org/wiki/Expression_templates expression templates]] [/plural version] [def __UDT [@http://eel.is/c++draft/definitions#defns.prog.def.type program-defined type]] [def __fundamental_type [@https://en.cppreference.com/w/cpp/language/types fundamental (built-in) type]] @@ -126,8 +127,8 @@ range and precision than C++'s ordinary built-in types. The big number types in Multiprecision can be used with a wide selection of basic mathematical operations, elementary transcendental functions as well as the functions in Boost.Math. -The Multiprecision types can also interoperate with the -__fundamental types in C++ using clearly defined conversion rules. +The Multiprecision types can also interoperate with any +__fundamental_type in C++ using clearly defined conversion rules. This allows Boost.Multiprecision to be used for all kinds of mathematical calculations involving integer, rational and floating-point types requiring extended @@ -135,20 +136,20 @@ range and precision. Multiprecision consists of a generic interface to the mathematics of large numbers as well as a selection of -big number back ends, with support for integer, rational, +big number back-ends, with support for integer, rational, floating-point, and complex types. Boost.Multiprecision provides a selection -of back ends provided off-the-rack in including +of back-ends provided off-the-rack in including interfaces to GMP, MPFR, MPIR, MPC, TomMath as well as -its own collection of Boost-licensed, header-only back ends for -integers, rationals and floats. In addition, user-defined back ends +its own collection of Boost-licensed, header-only back-ends for +integers, rationals and floats. In addition, user-defined back-ends can be created and used with the interface of Multiprecision, provided the class implementation adheres to the necessary [link boost_multiprecision.ref.backendconc concepts]. Depending upon the number type, precision may be arbitrarily large (limited only by available memory), fixed at compile time -(for example 50 or 100 decimal digits), or a variable controlled at run-time -by member functions. The types are __expression_template -enabled for +(for example, 50 or 100 decimal digits), or a variable controlled at run-time +by member functions. The types are __expression_templates - enabled for better performance than naive user-defined types. The Multiprecision library comes in two distinct parts: @@ -281,7 +282,7 @@ of the multiplication, contains instructions on how to compute the result. In e of references to the arguments of the function, plus some compile-time information that stores what the operation is. -The great advantage of this method is the ['elimination of temporaries]: for example the "naive" implementation +The great advantage of this method is the ['elimination of temporaries]: for example, the "naive" implementation of `operator*` above, requires one temporary for computing the result, and at least another one to return it. It's true that sometimes this overhead can be reduced by using move-semantics, but it can't be eliminated completely. For example, lets suppose we're evaluating a polynomial via Horner's method, something like this: @@ -300,7 +301,7 @@ temporaries to pretty much zero). Note that if we compile with expression templ on, then actually still have no wasted memory allocations as even though temporaries are created, their contents are moved rather than copied. [footnote The actual number generated will depend on the compiler, how well it optimizes the code, and whether it supports -rvalue references. The number of 11 temporaries was generated with Visual C++ 10] +rvalue references. The number of 11 temporaries was generated with Visual C++ 2010.] [important Expression templates can radically reorder the operations in an expression, for example: @@ -368,20 +369,20 @@ internally by the Boost.Math library. unless you're absolutely sure that the lifetimes of `a`, `b` and `c` will outlive that of `my_expression`. -In fact it is particularly easy to create dangling references by mixing expression templates with the auto +In fact it is particularly easy to create dangling references by mixing expression templates with the `auto` keyword, for example: `auto val = cpp_dec_float_50("23.1") * 100;` -In this situation the integer literal is stored directly in the expression template - so its use is OK here - -but the cpp_dec_float_50 temporary is stored by reference and then destructed when the statement completes +In this situation, the integer literal is stored directly in the expression template - so its use is OK here - +but the `cpp_dec_float_50` temporary is stored by reference and then destructed when the statement completes, leaving a dangling reference. -[*['If in doubt, do not ever mix expression templates with the auto keyword.]] +[*['If in doubt, do not ever mix expression templates with the `auto` keyword.]] ] And finally... the performance improvements from an expression template library like this are often not as -dramatic as the reduction in number of temporaries would suggest. For example if we compare this library with +dramatic as the reduction in number of temporaries would suggest. For example, if we compare this library with [mpfr_class] and [mpreal], with all three using the underlying [mpfr] library at 50 decimal digits precision then we see the following typical results for polynomial execution: @@ -1141,9 +1142,9 @@ to provide an real-number type that is a drop-in replacement for the native C++ much greater precision and implementing interval arithmetic. Type `mpfi_float_backend` can be used at fixed precision by specifying a non-zero `Digits10` template parameter, or -at variable precision by setting the template argument to zero. The typedefs mpfi_float_50, mpfi_float_100, -mpfi_float_500, mpfi_float_1000 provide arithmetic types at 50, 100, 500 and 1000 decimal digits precision -respectively. The typedef mpfi_float provides a variable precision type whose precision can be controlled via the +at variable precision by setting the template argument to zero. The `typedef`s `mpfi_float_50`, `mpfi_float_100`, +`mpfi_float_500`, `mpfi_float_1000` provide arithmetic types at 50, 100, 500 and 1000 decimal digits precision +respectively. The `typedef mpfi_float` provides a variable precision type whose precision can be controlled via the `number`s member functions. [note This type only provides `numeric_limits` support when the precision is fixed at compile time.] @@ -2102,7 +2103,7 @@ Examples: [h4 constexpr arithmetic] The front end of the library is all `constexpr` from C++14 and later. Currently there is only one -back end type that is `constexpr` aware, and that is __float128. More backends will follow at a later date. +back-end type that is `constexpr` aware, and that is __float128. More backends will follow at a later date. Provided the compiler is GCC, type __float128 support `constexpr` operations on all arithmetic operations from C++14, comparisons, `abs`, `fabs`, `fpclassify`, `isnan`, `isinf`, `isfinite` and `isnormal` are also fully supported, but the transcendental functions are not. @@ -2141,9 +2142,9 @@ at an abscissa value, all using `constexpr` arithmetic: [endsect] -[section:import_export Importing and Exporting Data to and from cpp_int and cpp_bin_float] +[section:import_export Importing and Exporting Data to and from `cpp_int` and `cpp_bin_float`] -Any integer number type that uses `cpp_int_backend` as it's implementation layer can import or export it's bits via two non-member functions: +Any integer number type that uses `cpp_int_backend` as it's implementation layer can import or export its bits via two non-member functions: template @@ -2164,7 +2165,8 @@ Any integer number type that uses `cpp_int_backend` as it's implementation layer bool msv_first = true); These functions are designed for data-interchange with other storage formats, and since __cpp_bin_float uses __cpp_int internally, -by extension they can be used for floating point numbers based on that backend as well (see example below). Parameters and use are as follows: +by extension they can be used for floating-point numbers based on that backend as well (see example below). +Parameters and use are as follows: template @@ -2184,9 +2186,8 @@ within each `chunk_size` block is always in the machines native format. Further has to be specified manually. It may also result in compiler warnings about the value being narrowed.] [tip If you're exporting to non-native byte layout, then use -[@http://www.boost.org/doc/libs/release/libs/endian/doc/index.html -Boost.Endian] to create a custom OutputIterator that -reverses the byte order of each chunk prior to actually storing the result.] +[@http://www.boost.org/doc/libs/release/libs/endian/doc/index.html Boost.Endian] +to create a custom OutputIterator that reverses the byte order of each chunk prior to actually storing the result.] template @@ -2209,7 +2210,7 @@ As with exporting, if the external data is to be in a non-native byte order (wit that presents it in native order (see [@http://www.boost.org/doc/libs/release/libs/endian/doc/index.html Boost.Endian]). [note -Note that this function is optimized for the case where the data can be memcpy'ed from the source to the integer - in this case both +Note that this function is optimized for the case where the data can be `memcpy`ed from the source to the integer - in this case both iterators much be pointers, and everything must be little-endian.] [h4 Examples] @@ -2218,7 +2219,7 @@ iterators much be pointers, and everything must be little-endian.] [IE2] -[endsect] +[endsect] [/section:import_export Importing and Exporting Data to and from `cpp_int` and `cpp_bin_float`] [section:rounding Rounding Rules for Conversions] @@ -2243,7 +2244,7 @@ The following table summarises the situation for conversions from native types: [[__tommath_rational][See __tom_int]] ] -[endsect] +[endsect] [/section:rounding Rounding Rules for Conversions] [section:mixed Mixed Precision Arithmetic] @@ -2317,18 +2318,17 @@ than it is to explicitly cast the operands to the result type: __mpfr_float_backend, __mpf_float, __cpp_int. -[endsect] +[endsect] [/section:mixed Mixed Precision Arithmetic] [section:gen_int Generic Integer Operations] All of the [link boost_multiprecision.ref.number.integer_functions non-member integer operations] are overloaded for the built in integer types in ``. -Where these operations require a temporary increase in precision (such as for powm), then +Where these operations require a temporary increase in precision (such as for `powm`), then if no built in type is available, a __cpp_int of appropriate precision will be used. -Some of these functions are trivial, others use compiler intrinsics (where available) to ensure optimal -evaluation. +Some of these functions are trivial, others use compiler intrinsics (where available) to ensure optimal evaluation. The overloaded functions are: @@ -2414,9 +2414,9 @@ Returns the integer square root `s` of x and sets `r` to the remainder ['x - s[s bool miller_rabin_test(const number-or-expression-template-type& n, unsigned trials); The regular Miller-Rabin functions in `` are defined in terms of the above -generic operations, and so function equally well for built in and multiprecision types. +generic operations, and so function equally well for built-in or __fundamental_types and multiprecision types. -[endsect] +[endsect] [/section:gen_int Generic Integer Operations] [section:serial Boost.Serialization Support] @@ -2431,8 +2431,7 @@ support which requires the underlying backend to be serializable. [section:limits Numeric Limits] Boost.Multiprecision tries hard to implement `std::numeric_limits` for all types -as far as possible and meaningful because experience with Boost.Math -has shown that this aids portability. +as far as possible and meaningful because experience with Boost.Math has shown that this aids portability. The [@http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2013/n3690.pdf C++ standard library] defines `std::numeric_limits` in section 18.3.2. @@ -2877,9 +2876,9 @@ resulted in the loss of precision, such implementation detects ['denorm loss]. [endsect] [/section:constants std::numeric_limits<> Constants] -[section:functions std::numeric_limits<> functions] +[section:functions `std::numeric_limits<>` functions] -[h4 max function] +[h4:max_function `max` function] Function `(std::numeric_limits::max)()` returns the largest finite value that can be represented by the type T. If there is no such value (and @@ -2912,16 +2911,16 @@ Since C++11: `std::numeric_limits::lowest()` is [digits10_5] -[h4 min function] +[h4min_function `min` function] Function `(std::numeric_limits::min)()` returns the minimum finite value that can be represented by the type T. -For built-in types there is usually a corresponding MACRO value TYPE_MIN, +For built-in types, there is usually a corresponding MACRO value TYPE_MIN, where TYPE is CHAR, INT, FLOAT etc. -Other types, including those provided by a typedef, -for example `INT64_T_MIN` for `int64_t`, may provide a macro definition. +Other types, including those provided by a `typedef`, +for example, `INT64_T_MIN` for `int64_t`, may provide a macro definition. For floating-point types, it is more fully defined as the ['minimum positive normalized value]. @@ -2930,7 +2929,6 @@ See `std::numeric_limits::denorm_min()` for the smallest denormalized value, std::numeric_limits::has_denorm == std::denorm_present - To cater for situations where no `numeric_limits` specialization is available (for example because the precision of the type varies at runtime), packaged versions of this (and other functions) are provided using @@ -3215,13 +3213,11 @@ inspect `std::numeric_limits`. [endsect] [/section:how_to_tell How to Determine the Kind of a Number From `std::numeric_limits`] - [endsect] [/section:limits Numeric Limits] [section:input_output Input Output] - [h4 Loopback testing] ['Loopback] or ['round-tripping] refers to writing out a value as a decimal digit string using `std::iostream`, @@ -6087,7 +6083,7 @@ Open question - what should be the default - int32_t or int64_t? (done 2012/09/ [[Why not abstract out addition/multiplication algorithms?] [This was deemed not to be practical: these algorithms are intimately tied to the actual data representation used.]] - [[How do I choose between Boost.Multiprecision cpp_bin_50 and cpp_dec_50?] + [[How do I choose between Boost.Multiprecision `cpp_bin_50` and `cpp_dec_50`?] [Unless you have a specific reason to choose `cpp_dec_`, then the default choice should be `cpp_bin_`, for example using the convenience `typedefs` like `boost::multiprecision::cpp_bin_50` or `boost::multiprecision::cpp_bin_100`. diff --git a/example/Jamfile.v2 b/example/Jamfile.v2 index 04b32f3c..0e09c848 100644 --- a/example/Jamfile.v2 +++ b/example/Jamfile.v2 @@ -30,9 +30,17 @@ project $(tommath_path) ../include ../../.. - gcc:-Wno-missing-braces - gcc-mingw:-Wa,-mbig-obj - # https://digitalkarabela.com/mingw-w64-how-to-fix-file-too-big-too-many-sections/ + + gcc:-Wno-missing-braces + + # Assembler error "File too big" caused by lots of C++ templates, for example, math/floating_point_examples.cpp. + # Some projects on some toolsets may require + # gcc-mingw:\"-Wa,-mbig-obj\" + # See https://digitalkarabela.com/mingw-w64-how-to-fix-file-too-big-too-many-sections/ + # gcc-mingw:-Wa,-mbig-obj # Some projects may overflow assembler and require equivalent of MSVC /bigobj. + # Requires version 2.30 of GNU binutils. + # Best applied only to projects that require this, see run math/floating_point_examples.cpp below. + darwin:-Wno-missing-braces acc:+W2068,2461,2236,4070 intel:-Qwd264,239 @@ -84,7 +92,8 @@ test-suite examples : [ run debug_adaptor_snips.cpp no_eh_eg_support ] [ run float128_snips.cpp quadmath no_eh_eg_support : : : [ check-target-builds ../config//has_float128 : : no ] ] - [ run floating_point_examples.cpp no_eh_eg_support ] + + [ run floating_point_examples.cpp no_eh_eg_support : : : gcc-mingw:-Wa,-mbig-obj ] # See note above. [ run gauss_laguerre_quadrature.cpp no_eh_eg_support : : : release [ requires cxx11_lambdas ] ] [ run hypergeometric_luke_algorithms.cpp no_eh_eg_support ../../chrono/build//boost_chrono ../../system/build//boost_system : : : [ requires cxx11_nullptr ] ] [ run integer_examples.cpp no_eh_eg_support ] diff --git a/example/numeric_limits_snips.cpp b/example/numeric_limits_snips.cpp index 47f4f0a6..739fc419 100644 --- a/example/numeric_limits_snips.cpp +++ b/example/numeric_limits_snips.cpp @@ -266,6 +266,7 @@ BOOST_AUTO_TEST_CASE(test_numeric_limits_snips) //[digits10_5 -(std::numeric_limits::max)() == std::numeric_limits::lowest(); //] [/digits10_5] +// warning C4553: '==': result of expression not used; did you intend '='? is spurious. } { diff --git a/test/Jamfile.v2 b/test/Jamfile.v2 index 6d2b4045..2df14e5f 100644 --- a/test/Jamfile.v2 +++ b/test/Jamfile.v2 @@ -59,6 +59,15 @@ project : requirements intel-win:static intel-win:static clang-win:static + + # Assembler error "File too big" caused by lots of C++ templates, for example, math/floating_point_examples.cpp. + # Some projects on some toolsets may require + # gcc-mingw:\"-Wa,-mbig-obj\" + # See https://digitalkarabela.com/mingw-w64-how-to-fix-file-too-big-too-many-sections/ + # gcc-mingw:-Wa,-mbig-obj # Some projects may overflow assembler and require equivalent of MSVC /bigobj. + # Requires version 2.30 of GNU binutils. + # Best applied only to projects that require this, see multiprecision/example run math/floating_point_examples.cpp. + # Speed up compiles: msvc:off intel:off From 88b2b602b32551495faae3e0e919b63da069d5ce Mon Sep 17 00:00:00 2001 From: pabristow Date: Thu, 19 Sep 2019 15:44:34 +0100 Subject: [PATCH 4/4] Editorial changes, part 1. --- .../boost_multiprecision/indexes/s01.html | 6 +- .../boost_multiprecision/indexes/s02.html | 2 +- .../boost_multiprecision/indexes/s03.html | 2 +- .../boost_multiprecision/indexes/s04.html | 11 +-- doc/html/boost_multiprecision/intro.html | 85 ++++++++++++++----- .../tut/floats/float128.html | 8 ++ .../tut/ints/cpp_int.html | 19 +++++ .../tut/limits/functions.html | 44 ++++++---- doc/html/boost_multiprecision/tut/lits.html | 57 +++++++++++-- doc/html/index.html | 2 +- doc/multiprecision.qbk | 53 +++++++++--- example/numeric_limits_snips.cpp | 12 +-- 12 files changed, 218 insertions(+), 83 deletions(-) diff --git a/doc/html/boost_multiprecision/indexes/s01.html b/doc/html/boost_multiprecision/indexes/s01.html index 4b1fc5ff..d247bd13 100644 --- a/doc/html/boost_multiprecision/indexes/s01.html +++ b/doc/html/boost_multiprecision/indexes/s01.html @@ -24,7 +24,7 @@

    -Function Index

    +Function Index

    A B C D E F H I L M N O P R S T V X Z

    @@ -127,10 +127,6 @@
  • number

  • -
  • -

    BOOST_ASSERT

    - -
  • C diff --git a/doc/html/boost_multiprecision/indexes/s02.html b/doc/html/boost_multiprecision/indexes/s02.html index 7440ac25..651d58f3 100644 --- a/doc/html/boost_multiprecision/indexes/s02.html +++ b/doc/html/boost_multiprecision/indexes/s02.html @@ -24,7 +24,7 @@

    -Class Index

    +Class Index

    C D E F G I L M N T

    diff --git a/doc/html/boost_multiprecision/indexes/s03.html b/doc/html/boost_multiprecision/indexes/s03.html index e1dbf1e0..4c99bfab 100644 --- a/doc/html/boost_multiprecision/indexes/s03.html +++ b/doc/html/boost_multiprecision/indexes/s03.html @@ -24,7 +24,7 @@

    -Typedef Index

    +Typedef Index

    B C F I L M S T U V

    diff --git a/doc/html/boost_multiprecision/indexes/s04.html b/doc/html/boost_multiprecision/indexes/s04.html index 7bea5499..c85b4359 100644 --- a/doc/html/boost_multiprecision/indexes/s04.html +++ b/doc/html/boost_multiprecision/indexes/s04.html @@ -23,7 +23,7 @@

    -Index

    +Index

    A B C D E F G H I L M N O P R S T U V X Z

    @@ -118,10 +118,7 @@
  • Bit Operations

    - +
  • bits

    @@ -161,10 +158,6 @@
  • -

    BOOST_ASSERT

    - -
  • -
  • BOOST_MP_DEFINE_SIZED_CPP_INT_LITERAL

  • diff --git a/doc/html/boost_multiprecision/intro.html b/doc/html/boost_multiprecision/intro.html index 28526263..9fb0aaa2 100644 --- a/doc/html/boost_multiprecision/intro.html +++ b/doc/html/boost_multiprecision/intro.html @@ -79,17 +79,50 @@ back-ends rely on 3rd party libraries, but a header-only Boost license version is always available (if somewhat slower).

    +
    + + Getting + started with Boost.Multiprecision +

    - Should you just wish to cut to the chase and use a fully Boost-licensed number - type, then skip to cpp_int - for multiprecision integers, cpp_rational - for rational types, cpp_dec_float - for multiprecision floating-point types and cpp_complex - for complex types. + Should you just wish to 'cut to the chase' just to get bigger integers or bigger + and more precise reals as simply and portably as possible, close to 'drop-in' + replacements for the fundamental + (built-in) type anlogs, then use a fully Boost-licensed number type, + and skip to: +

    +
    +

    + The library is very often used via one of the predefined convenience typedefs like boost::multiprecision::int128_t + or boost::multiprecision::cpp_bin_float_quad.

    - The library is often used via one of the predefined typedefs: for example if - you wanted an arbitrary + For example, if you want a signed, 128-bit fixed size integer: +

    +
    #include <boost/multiprecision/cpp_int.hpp>  //  Integer types.
    +
    +boost::multiprecision::int128_t my_128_bit_int;
    +
    +

    + Alternatively, and more adventurously, if you wanted an arbitrary precision integer type using GMP as the underlying implementation then you could use:

    @@ -98,27 +131,37 @@ boost::multiprecision::mpz_int myint; // Arbitrary precision integer type.

    - Alternatively, you can compose your own multiprecision type, by combining + Or for a simple, portable 128-bit floating-point close to a drop-in for a + fundamental (built-in) + type like double, usually + 64-bit +

    +
    #include <boost/multiprecision/cpp_bin_float.hpp>
    +
    +boost::multiprecision::cpp_bin_float_quad my_quad_real;
    +
    +

    + Alternatively, you can compose your own 'custom' multiprecision type, by combining number with one of the predefined back-end types. For example, suppose you wanted a 300 decimal digit floating-point type based on the MPFR library. In - this case, there's no predefined typedef with that level of precision, so instead - we compose our own: + this case, there's no predefined typedef + with that level of precision, so instead we compose our own:

    -
    #include <boost/multiprecision/mpfr.hpp>  // Defines the Backend type that wraps MPFR
    +
    #include <boost/multiprecision/mpfr.hpp>  // Defines the Backend type that wraps MPFR.
     
     namespace mp = boost::multiprecision;     // Reduce the typing a bit later...
     
     typedef mp::number<mp::mpfr_float_backend<300> >  my_float;
     
    -my_float a, b, c; // These variables have 300 decimal digits precision
    +my_float a, b, c; // These variables have 300 decimal digits precision.
     

    We can repeat the above example, but with the expression templates disabled (for faster compile times, but slower runtimes) by passing a second template argument to number:

    -
    #include <boost/multiprecision/mpfr.hpp>  // Defines the Backend type that wraps MPFR
    +
    #include <boost/multiprecision/mpfr.hpp>  // Defines the Backend type that wraps MPFR.
     
     namespace mp = boost::multiprecision;     // Reduce the typing a bit later...
     
    @@ -162,7 +205,7 @@
     b = a * 3.14; // Error, no operator overload if the conversion would be explicit.
     
    - + Move Semantics
    @@ -202,7 +245,7 @@ library containers.

    - + Expression Templates
    @@ -226,11 +269,11 @@ unmentionable-type operator * (const number<Backend>& a, const number<Backend>& b);

    - Where the "unmentionable" return type is an implementation detail - that, rather than containing the result of the multiplication, contains instructions - on how to compute the result. In effect it's just a pair of references to the - arguments of the function, plus some compile-time information that stores what - the operation is. + Where the 'unmentionable' return type is an implementation + detail that, rather than containing the result of the multiplication, contains + instructions on how to compute the result. In effect it's just a pair of references + to the arguments of the function, plus some compile-time information that stores + what the operation is.

    The great advantage of this method is the elimination of temporaries: diff --git a/doc/html/boost_multiprecision/tut/floats/float128.html b/doc/html/boost_multiprecision/tut/floats/float128.html index 2c6021c6..7bd1c961 100644 --- a/doc/html/boost_multiprecision/tut/floats/float128.html +++ b/doc/html/boost_multiprecision/tut/floats/float128.html @@ -106,6 +106,14 @@ Type float128 can be used as a literal type (constexpr support). +

  • + Type float128 can be + used for full constexpr + arithmetic from C++14 and later with GCC. The functions abs, fabs, + fpclassify, isnan, isinf, + isfinite and isnormal are also constexpr, + but the transcendental functions are not. +
  • When using the Intel compiler, the underlying type defaults to float128 if it's available and _Quad diff --git a/doc/html/boost_multiprecision/tut/ints/cpp_int.html b/doc/html/boost_multiprecision/tut/ints/cpp_int.html index bb069c52..81670ef5 100644 --- a/doc/html/boost_multiprecision/tut/ints/cpp_int.html +++ b/doc/html/boost_multiprecision/tut/ints/cpp_int.html @@ -343,6 +343,25 @@ with the value 0xffff. This can be used to generate compile time constants that are too large to fit into any built in number type.
  • +
  • + The cpp_int + types support constexpr arithmetic, provided it is a fixed precision + type with no allocator. It may also be a checked integer: in which + case a compiler error will be generated on overflow or undefined behaviour. + In addition the free functions abs, + swap, multiply, add, + subtract, divide_qr, integer_modulus, + powm, lsb, msb, + bit_test, bit_set, bit_unset, + bit_flip, sqrt, gcd, + lcm are all supported. + Use of cpp_int + in this way requires either a C++2a compiler (one which supports std::is_constant_evaluated()), + or GCC-6 or later in C++14 mode. Compilers other than GCC and without + std::is_constant_evaluated() + will support a very limited set of operations: expect to hit roadblocks + rather easily. +
  • You can import/export the raw bits of a cpp_int to and from external storage via the import_bits diff --git a/doc/html/boost_multiprecision/tut/limits/functions.html b/doc/html/boost_multiprecision/tut/limits/functions.html index 75ae53c7..8cb3baa0 100644 --- a/doc/html/boost_multiprecision/tut/limits/functions.html +++ b/doc/html/boost_multiprecision/tut/limits/functions.html @@ -76,9 +76,10 @@
    -(std::numeric_limits<double>::max)() == std::numeric_limits<double>::lowest();
     
    -

    - [h4min_function min function] -

    +
    + + min function +

    Function (std::numeric_limits<T>::min)() returns the minimum finite value that can be represented by the type T. @@ -117,7 +118,7 @@ Of course, these simply use std::numeric_limits<T>::min() if available.

    - + denorm_min function
    @@ -152,7 +153,7 @@ precision until the significand reaches zero).

    - + round_error

    @@ -195,7 +196,7 @@ of significance or cancellation error or very many iterations.

    - + epsilon

    @@ -263,7 +264,7 @@

    RealType tolerance = boost::math::tools::epsilon<RealType>() * 2;
     
    - + Tolerance for Floating-point Comparisons
    @@ -293,7 +294,12 @@

    See Donald. E. Knuth. The art of computer programming (vol II). Copyright 1998 Addison-Wesley Longman, Inc., 0-201-89684-2. Addison-Wesley Professional; - 3rd edition. + 3rd edition. (The relevant equations are in paragraph 4.2.2, Eq. 36 and + 37.) +

    +

    + See Boost.Math + floating_point comparison for more details.

    See also: @@ -326,8 +332,11 @@ BOOST_CHECK_CLOSE_FRACTION(expected, calculated, tolerance);

  • - used thus: cd ./test BOOST_CHECK_CLOSE_FRACTION(expected, calculated, tolerance); + used thus:

    +
    cd ./test
    +BOOST_CHECK_CLOSE_FRACTION(expected, calculated, tolerance);
    +

    (There is also a version BOOST_CHECK_CLOSE using tolerance as a percentage rather than a fraction; usually the fraction version is simpler to use). @@ -356,7 +365,7 @@ BOOST_CHECK_CLOSE_FRACTION(expected, calculated, tolerance);

    - + Infinity - positive and negative
    @@ -393,12 +402,12 @@ useful features portably and including Boost.Serialization.

    - + Not-A-Number NaN
    - + Quiet_NaN

    @@ -484,7 +493,8 @@ infinity input was inf

    - Similarly we can do the same with NaN (except that we cannot use assert) + Similarly we can do the same with NaN (except that we cannot use assert (because any comparisons with + NaN always return false).

    {
       std::locale old_locale;
    @@ -501,11 +511,11 @@
       std::cout << "NaN input was " << n << std::endl;
     }
     
    -

    - NaN output was nan NaN input was nan -

    +
    NaN output was nan
    +NaN input was nan
    +
    - + Signaling NaN
    diff --git a/doc/html/boost_multiprecision/tut/lits.html b/doc/html/boost_multiprecision/tut/lits.html index 55f53eef..66e16527 100644 --- a/doc/html/boost_multiprecision/tut/lits.html +++ b/doc/html/boost_multiprecision/tut/lits.html @@ -55,8 +55,8 @@
  • Instantiations of cpp_int_backend - where the Allocator parameter is type void, - and the Checked parameter is boost::multiprecision::unchecked. + where the Allocator parameter is type void. + In addition, prior to C++14 the Checked parameter must be boost::multiprecision::unchecked.
  • @@ -69,11 +69,12 @@ constexpr int128_t i = 0; // OK, fixed precision int128_t has no allocator. constexpr uint1024_t j = 0xFFFFFFFF00000000uLL; // OK, fixed precision uint1024_t has no allocator. -constexpr checked_uint128_t k = -1; // Error, checked type is not a literal type as we need runtime error checking. +constexpr checked_uint128_t k = 1; // OK from C++14 and later, not supported for C++11. +constexpr checked_uint128_t k = -1; // Error, as this would normally lead to a runtime failure (exception). constexpr cpp_int l = 2; // Error, type is not a literal as it performs memory management.

    - There is also limited support for user defined-literals with cpp_int + There is also support for user defined-literals with cpp_int - these are limited to unchecked, fixed precision cpp_int's which are specified in hexadecimal notation. The suffixes supported are:

    @@ -172,8 +173,8 @@ // Smaller values can be assigned to larger values: int256_t c = 0x1234_cppi; // OK // -// However, this does not currently work in constexpr contexts: -constexpr int256_t d = 0x1_cppi; // Compiler error +// However, this only works in constexpr contexts from C++14 onwards: +constexpr int256_t d = 0x1_cppi; // Compiler error in C++11, requires C++14 // // Constants can be padded out with leading zeros to generate wider types: constexpr uint256_t e = 0x0000000000000000000000000000000000000000000FFFFFFFFFFFFFFFFFFFFF_cppui; // OK @@ -206,9 +207,9 @@

    The front end of the library is all constexpr - from C++14 and later. Currently there is only one back-end type that is - constexpr aware, and that is - float128. + from C++14 and later. Currently there are only two back end types that are + constexpr aware: float128 + and cpp_int. More backends will follow at a later date.

    @@ -220,6 +221,23 @@ isfinite and isnormal are also fully supported, but the transcendental functions are not.

    +

    + The cpp_int + types support constexpr arithmetic, provided it is a fixed precision type + with no allocator. It may also be a checked integer: in which case a compiler + error will be generated on overflow or undefined behaviour. In addition the + free functions abs, swap, multiply, + add, subtract, + divide_qr, integer_modulus, powm, + lsb, msb, + bit_test, bit_set, + bit_unset, bit_flip, sqrt, + gcd, lcm + are all supported. Use of cpp_int + in this way requires either a C++2a compiler (one which supports std::is_constant_evaluated()), or GCC-6 or later in C++14 mode. Compilers + other than GCC and without std::is_constant_evaluated() will support a very limited set of operations: + expect to hit roadblocks rather easily. +

    For example given:

    @@ -364,6 +382,27 @@ // static_assert(h9(abscissa) == 6481); +

    + Also since the coefficients to the Hermite polynomials are integers, we can + also generate the Hermite coefficients using (fixed precision) cpp_int's: + see constexpr_test_cpp_int_6.cpp. +

    +

    + We can also generate factorials (and validate the result) like so: +

    +
    template <class T>
    +constexpr T factorial(const T& a)
    +{
    +   return a ? a * factorial(a - 1) : 1;
    +}
    +
    +
    constexpr uint1024_t f1 = factorial(uint1024_t(31));
    +static_assert(f1 == 0x1956ad0aae33a4560c5cd2c000000_cppi);
    +
    +

    + Another example in constexpr_test_cpp_int_7.cpp + generates a fresh multiprecision random number each time the file is compiled. +

    diff --git a/doc/html/index.html b/doc/html/index.html index c0b9ef2a..ee1d40f6 100644 --- a/doc/html/index.html +++ b/doc/html/index.html @@ -177,7 +177,7 @@
    - +

    Last revised: August 30, 2019 at 15:57:26 GMT

    Last revised: September 19, 2019 at 14:40:03 GMT


    diff --git a/doc/multiprecision.qbk b/doc/multiprecision.qbk index a1fee3bc..fcb62edc 100644 --- a/doc/multiprecision.qbk +++ b/doc/multiprecision.qbk @@ -166,36 +166,57 @@ unconstrained license. Which is to say some back-ends rely on 3rd party libraries, but a header-only Boost license version is always available (if somewhat slower). -Should you just wish to cut to the chase and use a fully Boost-licensed number type, then skip to -__cpp_int for multiprecision integers, __cpp_rational for rational types, -__cpp_dec_float for multiprecision floating-point types -and __cpp_complex for complex types. +[h5:getting_started Getting started with Boost.Multiprecision] -The library is often used via one of the predefined typedefs: for example if you wanted an +Should you just wish to 'cut to the chase' just to get bigger integers and/or bigger and more precise reals as simply and portably as possible, +close to 'drop-in' replacements for the __fundamental_type analogs, +then use a fully Boost-licensed number type, and skip to one of more of : + +* __cpp_int for multiprecision integers, +* __cpp_rational for rational types, +* __cpp_bin_float and __cpp_dec_float for multiprecision floating-point types, +* __cpp_complex for complex types. + +The library is very often used via one of the predefined convenience `typedef`s +like `boost::multiprecision::int128_t` or `boost::multiprecision::cpp_bin_float_quad`. + +For example, if you want a signed, 128-bit fixed size integer: + + #include // Integer types. + + boost::multiprecision::int128_t my_128_bit_int; + +Alternatively, and more adventurously, if you wanted an [@http://en.wikipedia.org/wiki/Arbitrary-precision_arithmetic arbitrary precision] integer type using [gmp] as the underlying implementation then you could use: #include // Defines the wrappers around the GMP library's types boost::multiprecision::mpz_int myint; // Arbitrary precision integer type. + +Or for a simple, portable 128-bit floating-point close to a drop-in for a __fundamental_type like `double`, usually 64-bit -Alternatively, you can compose your own multiprecision type, by combining `number` with one of the + #include + + boost::multiprecision::cpp_bin_float_quad my_quad_real; + +Alternatively, you can compose your own 'custom' multiprecision type, by combining `number` with one of the predefined back-end types. For example, suppose you wanted a 300 decimal digit floating-point type -based on the [mpfr] library. In this case, there's no predefined typedef with that level of precision, +based on the [mpfr] library. In this case, there's no predefined `typedef` with that level of precision, so instead we compose our own: - #include // Defines the Backend type that wraps MPFR + #include // Defines the Backend type that wraps MPFR. namespace mp = boost::multiprecision; // Reduce the typing a bit later... typedef mp::number > my_float; - my_float a, b, c; // These variables have 300 decimal digits precision + my_float a, b, c; // These variables have 300 decimal digits precision. We can repeat the above example, but with the expression templates disabled (for faster compile times, but slower runtimes) by passing a second template argument to `number`: - #include // Defines the Backend type that wraps MPFR + #include // Defines the Backend type that wraps MPFR. namespace mp = boost::multiprecision; // Reduce the typing a bit later... @@ -278,7 +299,7 @@ Instead the operator looks more like this: template ``['unmentionable-type]`` operator * (const number& a, const number& b); -Where the "unmentionable" return type is an implementation detail that, rather than containing the result +Where the '['unmentionable]' return type is an implementation detail that, rather than containing the result of the multiplication, contains instructions on how to compute the result. In effect it's just a pair of references to the arguments of the function, plus some compile-time information that stores what the operation is. @@ -370,7 +391,7 @@ internally by the Boost.Math library. unless you're absolutely sure that the lifetimes of `a`, `b` and `c` will outlive that of `my_expression`. -In fact it is particularly easy to create dangling references by mixing expression templates with the `auto` +In fact, it is particularly easy to create dangling references by mixing expression templates with the `auto` keyword, for example: `auto val = cpp_dec_float_50("23.1") * 100;` @@ -3067,7 +3088,10 @@ for reliably checking if floating-point values are close enough. See Donald. E. Knuth. The art of computer programming (vol II). Copyright 1998 Addison-Wesley Longman, Inc., 0-201-89684-2. -Addison-Wesley Professional; 3rd edition. +Addison-Wesley Professional; 3rd edition. (The relevant equations are in paragraph 4.2.2, Eq. 36 and 37.) + +See [@https://www.boost.org/doc/libs/release/libs/test/doc/html/boost_test/testing_tools/extended_comparison/floating_point/floating_points_comparison_theory.html Boost.Math floating_point comparison] +for more details. See also: @@ -3080,7 +3104,8 @@ See also: [tolerance_1] used thus: -cd ./test + + cd ./test BOOST_CHECK_CLOSE_FRACTION(expected, calculated, tolerance); (There is also a version BOOST_CHECK_CLOSE using tolerance as a [*percentage] rather than a fraction; diff --git a/example/numeric_limits_snips.cpp b/example/numeric_limits_snips.cpp index 739fc419..e114f0a2 100644 --- a/example/numeric_limits_snips.cpp +++ b/example/numeric_limits_snips.cpp @@ -397,7 +397,7 @@ so the default expression template parameter has been replaced by `et_off`.] std::cout << "Type " << typeid(cpp_bin_float_quad).name() << " does not have NaNs!" << std::endl; } -//] [/nan_1] +//] [/nan_1] } { @@ -435,10 +435,11 @@ Then we can equally well use a multiprecision type cpp_bin_float_quad: } /*` +`` infinity output was inf infinity input was inf - -Similarly we can do the same with NaN (except that we cannot use `assert`) +`` +Similarly we can do the same with NaN (except that we cannot use `assert` (because any comparisons with NaN always return false). */ { std::locale old_locale; @@ -455,11 +456,12 @@ Similarly we can do the same with NaN (except that we cannot use `assert`) std::cout << "NaN input was " << n << std::endl; } /*` +`` NaN output was nan NaN input was nan - +`` */ -//] [/facet_1] +//] [/facet_1] } #endif