From bea370732635d76c824c10b7ef664a233e82cd28 Mon Sep 17 00:00:00 2001 From: John Maddock Date: Wed, 29 Feb 2012 13:05:51 +0000 Subject: [PATCH] Add initial version of an arbitrary precision integer type. Hook up some more fixed_int tests. [SVN r77141] --- include/boost/multiprecision/cpp_int.hpp | 1915 ++++++++++++++++++++ include/boost/multiprecision/fixed_int.hpp | 2 +- performance/performance_test.cpp | 13 +- test/Jamfile.v2 | 37 +- test/test_arithmetic.cpp | 18 +- test/test_cpp_int.cpp | 179 ++ test/test_numeric_limits.cpp | 21 +- 7 files changed, 2179 insertions(+), 6 deletions(-) create mode 100644 include/boost/multiprecision/cpp_int.hpp create mode 100644 test/test_cpp_int.cpp diff --git a/include/boost/multiprecision/cpp_int.hpp b/include/boost/multiprecision/cpp_int.hpp new file mode 100644 index 00000000..6ffe9f8a --- /dev/null +++ b/include/boost/multiprecision/cpp_int.hpp @@ -0,0 +1,1915 @@ +/////////////////////////////////////////////////////////////// +// Copyright 2012 John Maddock. Distributed under the Boost +// Software License, Version 1.0. (See accompanying file +// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_ + +#ifndef BOOST_MP_CPP_INT_HPP +#define BOOST_MP_CPP_INT_HPP + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace boost{ +namespace multiprecision{ + + /* +typedef boost::uint32_t limb_type; +typedef boost::int32_t signed_limb_type; +typedef boost::uint64_t double_limb_type; +typedef boost::int64_t signed_double_limb_type; +static const limb_type max_block_10 = 1000000000; +static const limb_type digits_per_block_10 = 9; + +inline limb_type block_multiplier(int count) +{ + static const limb_type values[digits_per_block_10] + = { 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000 }; + BOOST_ASSERT(count < digits_per_block_10); + return values[count]; +} +*/ +template +inline void minmax(const T& a, const T& b, T& aa, T& bb) +{ + if(a < b) + { + aa = a; + bb = b; + } + else + { + aa = b; + bb = a; + } +} + +template > +struct cpp_int_backend : private Allocator::template rebind::other +{ + typedef typename Allocator::template rebind::other allocator_type; + typedef typename allocator_type::pointer limb_pointer; + typedef typename allocator_type::const_pointer const_limb_pointer; + +private: + struct limb_data + { + unsigned capacity; + limb_pointer data; + }; + +public: + typedef mpl::list signed_types; + typedef mpl::list unsigned_types; + typedef mpl::list float_types; + + BOOST_STATIC_CONSTANT(unsigned, limb_bits = sizeof(limb_type) * CHAR_BIT); + BOOST_STATIC_CONSTANT(limb_type, max_limb_value = ~static_cast(0u)); + BOOST_STATIC_CONSTANT(limb_type, sign_bit_mask = 1u << (limb_bits - 1)); + BOOST_STATIC_CONSTANT(unsigned, internal_limb_count = InternalLimbs ? InternalLimbs : sizeof(limb_data) / sizeof(limb_type)); + +private: + + union data_type + { + limb_data ld; + limb_type la[internal_limb_count]; + }; + + data_type m_data; + unsigned m_limbs; + bool m_sign, m_internal; + +public: + + // + // Helper functions for getting at our internal data, and manipulating storage: + // + allocator_type& allocator(){ return *this; } + const allocator_type& allocator()const{ return *this; } + unsigned size()const { return m_limbs; } + limb_pointer limbs() { return m_internal ? m_data.la : m_data.ld.data; } + const_limb_pointer limbs()const { return m_internal ? m_data.la : m_data.ld.data; } + unsigned capacity()const { return m_internal ? internal_limb_count : m_data.ld.capacity; } + bool sign()const { return m_sign; } + void sign(bool b) + { + m_sign = b; + // Check for zero value: + if(m_sign && (m_limbs == 1)) + { + if(limbs()[0] == 0) + m_sign = false; + } + } + void resize(unsigned new_size) + { + unsigned cap = capacity(); + if(new_size > cap) + { + cap = (std::max)(cap * 4, new_size); + limb_pointer pl = allocator().allocate(cap); + std::memcpy(pl, limbs(), size() * sizeof(limb_type)); + if(!m_internal) + allocator().deallocate(limbs(), capacity()); + else + m_internal = false; + m_limbs = new_size; + m_data.ld.capacity = cap; + m_data.ld.data = pl; + } + else + { + m_limbs = new_size; + } + } + + cpp_int_backend() : m_limbs(1), m_internal(true), m_sign(false) + { + *limbs() = 0; + } + cpp_int_backend(const cpp_int_backend& o) : m_limbs(0), m_internal(true) + { + resize(o.size()); + std::memcpy(limbs(), o.limbs(), size() * sizeof(limb_type)); + m_sign = o.m_sign; + } +#ifndef BOOST_NO_RVALUE_REFERENCES + cpp_int_backend(cpp_int_backend&& o) : m_limbs(o.m_limbs), m_sign(o.m_sign), m_internal(o.m_internal) + { + if(m_internal) + { + std::memcpy(limbs(), o.limbs(), size() * sizeof(limb_type)); + } + else + { + data.ld = o.data.ld; + o.m_limbs = 0; + o.m_internal = true; + } + } +#endif + ~cpp_int_backend() + { + if(!m_internal) + allocator().deallocate(limbs(), capacity()); + } + cpp_int_backend& operator = (const cpp_int_backend& o) + { + if(this != &o) + { + m_limbs = 0; + resize(o.size()); + std::memcpy(limbs(), o.limbs(), size() * sizeof(limb_type)); + m_sign = o.m_sign; + } + return *this; + } + cpp_int_backend& operator = (limb_type i) + { + m_limbs = 1; + *limbs() = i; + m_sign = false; + return *this; + } + cpp_int_backend& operator = (signed_limb_type i) + { + m_limbs = 1; + *limbs() = static_cast(std::abs(i)); + m_sign = i < 0; + return *this; + } + cpp_int_backend& operator = (double_limb_type i) + { + BOOST_STATIC_ASSERT(sizeof(i) == 2 * sizeof(limb_type)); + BOOST_STATIC_ASSERT(internal_limb_count >= 2); + limb_pointer p = limbs(); + *p = static_cast(i); + p[1] = static_cast(i >> limb_bits); + m_limbs = p[1] ? 2 : 1; + m_sign = false; + return *this; + } + cpp_int_backend& operator = (signed_double_limb_type i) + { + BOOST_STATIC_ASSERT(sizeof(i) == 2 * sizeof(limb_type)); + BOOST_STATIC_ASSERT(internal_limb_count >= 2); + if(i < 0) + { + m_sign = true; + i = -i; + } + else + m_sign = false; + limb_pointer p = limbs(); + *p = static_cast(i); + p[1] = static_cast(i >> limb_bits); + m_limbs = p[1] ? 2 : 1; + return *this; + } + + cpp_int_backend& operator = (long double a) + { + using default_ops::add; + using default_ops::subtract; + using std::frexp; + using std::ldexp; + using std::floor; + + if (a == 0) { + return *this = static_cast(0u); + } + + if (a == 1) { + return *this = static_cast(1u); + } + + BOOST_ASSERT(!(boost::math::isinf)(a)); + BOOST_ASSERT(!(boost::math::isnan)(a)); + + int e; + long double f, term; + *this = static_cast(0u); + + f = frexp(a, &e); + + static const limb_type shift = std::numeric_limits::digits; + + while(f) + { + // extract int sized bits from f: + f = ldexp(f, shift); + term = floor(f); + e -= shift; + left_shift(*this, shift); + if(term > 0) + add(*this, static_cast(term)); + else + subtract(*this, static_cast(-term)); + f -= term; + } + if(e > 0) + left_shift(*this, e); + else if(e < 0) + right_shift(*this, -e); + return *this; + } + cpp_int_backend& operator = (const char* s) + { + using default_ops::multiply; + using default_ops::add; + std::size_t n = s ? std::strlen(s) : 0; + *this = static_cast(0u); + unsigned radix = 10; + bool isneg = false; + if(n && (*s == '-')) + { + --n; + ++s; + isneg = true; + } + if(n && (*s == '0')) + { + if((n > 1) && ((s[1] == 'x') || (s[1] == 'X'))) + { + radix = 16; + s +=2; + n -= 2; + } + else + { + radix = 8; + n -= 1; + } + } + if(n) + { + if(radix == 8 || radix == 16) + { + unsigned shift = radix == 8 ? 3 : 4; + unsigned block_count = limb_bits / shift; + unsigned block_shift = shift * block_count; + limb_type val, block; + while(*s) + { + block = 0; + for(unsigned i = 0; (i < block_count); ++i) + { + if(*s >= '0' && *s <= '9') + val = *s - '0'; + else if(*s >= 'a' && *s <= 'f') + val = 10 + *s - 'a'; + else if(*s >= 'A' && *s <= 'F') + val = 10 + *s - 'A'; + else + val = max_limb_value; + if(val > radix) + { + BOOST_THROW_EXCEPTION(std::runtime_error("Unexpected content found while parsing character string.")); + } + block <<= shift; + block |= val; + if(!*++s) + { + // final shift is different: + block_shift = (i + 1) * shift; + break; + } + } + left_shift(*this, block_shift); + limbs()[0] |= block; + } + } + else + { + // Base 10, we extract blocks of size 10^9 at a time, that way + // the number of multiplications is kept to a minimum: + limb_type block_mult = max_block_10; + while(*s) + { + limb_type block = 0; + for(unsigned i = 0; i < digits_per_block_10; ++i) + { + limb_type val; + if(*s >= '0' && *s <= '9') + val = *s - '0'; + else + BOOST_THROW_EXCEPTION(std::runtime_error("Unexpected character encountered in input.")); + block *= 10; + block += val; + if(!*++s) + { + block_mult = block_multiplier(i); + break; + } + } + multiply(*this, block_mult); + add(*this, block); + } + } + } + if(isneg) + negate(); + return *this; + } + void swap(cpp_int_backend& o) + { + std::swap(m_data, o.m_data); + std::swap(m_sign, o.m_sign); + std::swap(m_internal, o.m_internal); + std::swap(m_limbs, o.m_limbs); + } + std::string str(std::streamsize digits, std::ios_base::fmtflags f)const + { + int base = 10; + if((f & std::ios_base::oct) == std::ios_base::oct) + base = 8; + else if((f & std::ios_base::hex) == std::ios_base::hex) + base = 16; + std::string result; + + unsigned Bits = size() * limb_bits; + + if(base == 8 || base == 16) + { + limb_type shift = base == 8 ? 3 : 4; + limb_type mask = static_cast((1u << shift) - 1); + cpp_int_backend t(*this); + result.assign(Bits / shift + (Bits % shift ? 1 : 0), '0'); + int pos = result.size() - 1; + for(unsigned i = 0; i < Bits / shift; ++i) + { + char c = '0' + (t.limbs()[0] & mask); + if(c > '9') + c += 'A' - '9' - 1; + result[pos--] = c; + right_shift(t, shift); + } + if(Bits % shift) + { + mask = static_cast((1u << (Bits % shift)) - 1); + char c = '0' + (t.limbs()[0] & mask); + if(c > '9') + c += 'A' - '9'; + result[pos] = c; + } + // + // Get rid of leading zeros: + // + std::string::size_type n = result.find_first_not_of('0'); + if(!result.empty() && (n == std::string::npos)) + n = result.size() - 1; + result.erase(0, n); + if(f & std::ios_base::showbase) + { + const char* pp = base == 8 ? "0" : "0x"; + result.insert(0, pp); + } + } + else + { + result.assign(Bits / 3 + 1, '0'); + int pos = result.size() - 1; + cpp_int_backend t(*this); + cpp_int_backend r; + bool neg = false; + if(t.sign()) + { + t.negate(); + neg = true; + } + if(size() == 1) + { + result = boost::lexical_cast(t.limbs()[0]); + } + else + { + cpp_int_backend block10; + block10 = max_block_10; + while(get_sign(t) != 0) + { + cpp_int_backend t2; + divide_unsigned_helper(t2, t, block10, r); + t = t2; + limb_type v = r.limbs()[0]; + for(unsigned i = 0; i < digits_per_block_10; ++i) + { + char c = '0' + v % 10; + v /= 10; + result[pos] = c; + if(pos-- == 0) + break; + } + } + } + unsigned n = result.find_first_not_of('0'); + result.erase(0, n); + if(result.empty()) + result = "0"; + if(neg) + result.insert(0, 1, '-'); + else if(f & std::ios_base::showpos) + result.insert(0, 1, '+'); + } + return result; + } + void negate() + { + m_sign = !m_sign; + // Check for zero value: + if(m_sign && (m_limbs == 1)) + { + if(limbs()[0] == 0) + m_sign = false; + } + } + bool isneg()const + { + return m_sign; + } + int compare(const cpp_int_backend& o)const + { + if(m_sign != o.m_sign) + return m_sign ? -1 : 1; + int result = 0; + // Only do the compare if the same sign: + result = compare_unsigned(o); + + if(m_sign) + result = -result; + return result; + } + int compare_unsigned(const cpp_int_backend& o)const + { + if(m_limbs != o.m_limbs) + { + return m_limbs > o.m_limbs ? 1 : -1; + } + const_limb_pointer pa = limbs(); + const_limb_pointer pb = o.limbs(); + for(int i = m_limbs - 1; i >= 0; --i) + { + if(pa[i] != pb[i]) + return pa[i] > pb[i] ? 1 : -1; + } + return 0; + } + template + typename enable_if, int>::type compare(Arithmatic i)const + { + // braindead version: + cpp_int_backend t; + t = i; + return compare(t); + } +}; + +template +const unsigned cpp_int_backend::limb_bits; +template +const limb_type cpp_int_backend::max_limb_value; +template +const limb_type cpp_int_backend::sign_bit_mask; +template +const unsigned cpp_int_backend::internal_limb_count; + + +template +inline void add(cpp_int_backend& result, const cpp_int_backend& o) +{ + add(result, result, o); +} +template +inline void add_unsigned(cpp_int_backend& result, const cpp_int_backend& a, const cpp_int_backend& b) +{ + using std::swap; + + // Nothing fancy, just let uintmax_t take the strain: + double_limb_type carry = 0; + unsigned m, x; + minmax(a.size(), b.size(), m, x); + if(x == 1) + { + bool s = a.sign(); + result = static_cast(*a.limbs()) + static_cast(*b.limbs()); + result.sign(s); + return; + } + result.resize(x); + typename cpp_int_backend::const_limb_pointer pa = a.limbs(); + typename cpp_int_backend::const_limb_pointer pb = b.limbs(); + typename cpp_int_backend::limb_pointer pr = result.limbs(); + typename cpp_int_backend::limb_pointer pr_end = pr + m; + + if(a.size() < b.size()) + swap(pa, pb); + + // First where a and b overlap: + while(pr != pr_end) + { + carry += static_cast(*pa) + static_cast(*pb); + *pr = static_cast(carry); + carry >>= cpp_int_backend::limb_bits; + ++pr, ++pa, ++pb; + } + pr_end += x - m; + // Now where only a has digits: + while(pr != pr_end) + { + if(!carry) + { + if(pa != pr) + std::memcpy(pr, pa, (pr_end - pr) * sizeof(limb_type)); + break; + } + carry += static_cast(*pa); + *pr = static_cast(carry); + carry >>= cpp_int_backend::limb_bits; + ++pr, ++pa; + } + if(carry) + { + result.resize(x + 1); + // We overflowed, need to add one more limb: + result.limbs()[x] = static_cast(carry); + } + result.sign(a.sign()); +} +template +inline void add(cpp_int_backend& result, const cpp_int_backend& a, const cpp_int_backend& b) +{ + if(a.sign() != b.sign()) + { + subtract_unsigned(result, a, b); + return; + } + add_unsigned(result, a, b); +} + +template +inline void add_unsigned(cpp_int_backend& result, const limb_type& o) +{ + // Addition using modular arithmatic. + // Nothing fancy, just let uintmax_t take the strain: + double_limb_type carry = o; + typename cpp_int_backend::limb_pointer pr = result.limbs(); + for(unsigned i = 0; carry && (i < result.size()); ++i) + { + carry += static_cast(pr[i]); + pr[i] = static_cast(carry); + carry >>= cpp_int_backend::limb_bits; + } +} +template +inline void add(cpp_int_backend& result, const limb_type& o) +{ + if(result.sign()) + { + subtract_unsigned(result, o); + } + else + add_unsigned(result, o); +} +template +inline void add(cpp_int_backend& result, const signed_limb_type& o) +{ + if(o < 0) + subtract(result, static_cast(-o)); + else if(o > 0) + add(result, static_cast(o)); +} + +template +inline void subtract_unsigned(cpp_int_backend& result, const limb_type& o) +{ + // Subtract one limb. + // Nothing fancy, just let uintmax_t take the strain: + BOOST_STATIC_CONSTANT(double_limb_type, borrow = static_cast(cpp_int_backend::max_limb_value) + 1); + typename cpp_int_backend::limb_pointer p = result.limbs(); + if(*p > o) + { + *p -= o; + } + else if(result.size() == 1) + { + *p = o - *p; + result.negate(); + } + else + { + *p = static_cast((borrow + *p) - o); + ++p; + while(!*p) + { + *p = cpp_int_backend::max_limb_value; + ++p; + } + --*p; + if(!result.limbs()[result.size() - 1]) + result.resize(result.size() - 1); + } +} +template +inline void subtract(cpp_int_backend& result, const limb_type& o) +{ + if(result.sign()) + add_unsigned(result, o); + else + subtract_unsigned(result, o); +} +template +inline void subtract(cpp_int_backend& result, const signed_limb_type& o) +{ + if(o) + { + if(o < 0) + add(result, static_cast(-o)); + else + subtract(result, static_cast(o)); + } +} +template +inline void increment(cpp_int_backend& result) +{ + static const limb_type one = 1; + if(!result.sign() && (result.limbs()[0] < cpp_int_backend::max_limb_value)) + ++result.limbs()[0]; + else if(result.sign() && result.limbs()[0]) + --result.limbs()[0]; + else + add(result, one); +} +template +inline void decrement(cpp_int_backend& result) +{ + static const limb_type one = 1; + if(!result.sign() && result.limbs()[0]) + --result.limbs()[0]; + else if(result.sign() && (result.limbs()[0] < cpp_int_backend::max_limb_value)) + ++result.limbs()[0]; + else + subtract(result, one); +} +template +inline void subtract(cpp_int_backend& result, const cpp_int_backend& o) +{ + subtract(result, result, o); +} +template +inline void subtract_unsigned(cpp_int_backend& result, const cpp_int_backend& a, const cpp_int_backend& b) +{ + using std::swap; + + // Nothing fancy, just let uintmax_t take the strain: + double_limb_type borrow = 0; + unsigned m, x; + minmax(a.size(), b.size(), m, x); + // + // special cases for small limb counts: + // + if(x == 1) + { + bool s = a.sign(); + limb_type al = *a.limbs(); + limb_type bl = *b.limbs(); + if(bl > al) + { + std::swap(al, bl); + s = !s; + } + result = al - bl; + result.sign(s); + return; + }/* + else if(m == 1) + { + if(b.size() == 1) + { + result = a; + subtract_unsigned(result, *b.limbs()); + } + else + { + result = b; + subtract_unsigned(result, *a.limbs()); + result.negate(); + } + return; + }*/ + result.resize(x); + typename cpp_int_backend::const_limb_pointer pa = a.limbs(); + typename cpp_int_backend::const_limb_pointer pb = b.limbs(); + typename cpp_int_backend::limb_pointer pr = result.limbs(); + bool swapped = false; + int c = a.compare_unsigned(b); + if(c < 0) + { + swap(pa, pb); + swapped = true; + } + else if(c == 0) + { + result = 0; + return; + } + + unsigned i = 0; + BOOST_STATIC_CONSTANT(double_limb_type, borrow_value = static_cast(cpp_int_backend::max_limb_value) + 1); + // First where a and b overlap: + while(i < m) + { + borrow += pb[i]; + borrow = (borrow_value + static_cast(pa[i])) - borrow; + pr[i] = static_cast(borrow); + borrow = borrow & borrow_value ? 0 : 1; + ++i; + } + // Now where only a has digits, only as long as we've borrowed: + while(borrow && (i < x)) + { + borrow = (borrow_value + static_cast(pa[i])) - borrow; + pr[i] = static_cast(borrow); + borrow = borrow & borrow_value ? 0 : 1; + ++i; + } + // Any remaining digits are the same as those in pa: + if((x != i) && (pa != pr)) + std::memcpy(pr + i, pa + i, (x - i) * sizeof(limb_type)); + BOOST_ASSERT(0 == borrow); + + // + // We may have lost digits, if so update limb usage count: + // + i = result.size() - 1; + while(!pr[i] && i) + { + --i; + } + result.resize(i + 1); + result.sign(a.sign()); + if(swapped) + result.negate(); +} +template +inline void subtract(cpp_int_backend& result, const cpp_int_backend& a, const cpp_int_backend& b) +{ + if(a.sign() != b.sign()) + { + add_unsigned(result, a, b); + return; + } + subtract_unsigned(result, a, b); +} +template +inline void multiply(cpp_int_backend& result, const cpp_int_backend& a, const cpp_int_backend& b) +{ + // Very simple long multiplication, only usable for small numbers of limb_type's + // but that's the typical use case for this type anyway: + // + // Special cases first: + // + unsigned as = a.size(); + unsigned bs = b.size(); + typename cpp_int_backend::const_limb_pointer pa = a.limbs(); + typename cpp_int_backend::const_limb_pointer pb = b.limbs(); + if(as == 1) + { + bool s = b.sign() != a.sign(); + if(bs == 1) + { + result = static_cast(*pa) * static_cast(*pb); + } + else + { + limb_type l = *pa; + result = b; + multiply(result, l); + } + result.sign(s); + return; + } + if(bs == 1) + { + limb_type l = *pb; + bool s = b.sign() != a.sign(); + result = a; + multiply(result, l); + result.sign(s); + return; + } + + if(&result == &a) + { + cpp_int_backend t(a); + multiply(result, t, b); + return; + } + if(&result == &b) + { + cpp_int_backend t(b); + multiply(result, a, t); + return; + } + + result.resize(as + bs); + typename cpp_int_backend::limb_pointer pr = result.limbs(); + + double_limb_type carry = 0; + std::memset(pr, 0, (as + bs) * sizeof(limb_type)); + for(unsigned i = 0; i < as; ++i) + { + for(unsigned j = 0; j < bs; ++j) + { + carry += static_cast(pa[i]) * static_cast(pb[j]); + carry += pr[i + j]; + pr[i + j] = static_cast(carry); + carry >>= cpp_int_backend::limb_bits; + } + pr[i + bs] = static_cast(carry); + carry = 0; + } + // + // We may not have filled all the digits, if so update limb usage count: + // + unsigned i = result.size() - 1; + while(!pr[i] && i) + { + --i; + } + result.resize(i + 1); + // + // Set the sign of the result: + // + result.sign(a.sign() != b.sign()); +} +template +inline void multiply(cpp_int_backend& result, const cpp_int_backend& a) +{ + multiply(result, result, a); +} +template +inline void multiply(cpp_int_backend& result, cpp_int_backend& a, const limb_type& val) +{ + if(!val) + { + result = 0; + return; + } + double_limb_type carry = 0; + typename cpp_int_backend::limb_pointer p = result.limbs(); + typename cpp_int_backend::limb_pointer pe = result.limbs() + result.size(); + typename cpp_int_backend::limb_pointer pa = a.limbs(); + while(p != pe) + { + carry += static_cast(*pa * static_cast(val)); + *p = static_cast(carry); + carry >>= cpp_int_backend::limb_bits; + ++p, ++pa; + } + if(carry) + { + result.resize(result.size() + 1); + result.limbs()[result.size() - 1] = static_cast(carry); + } +} +template +inline void multiply(cpp_int_backend& result, const limb_type& val) +{ + multiply(result, result, val); +} +template +inline void multiply(cpp_int_backend& result, cpp_int_backend& a, const signed_limb_type& val) +{ + if(val > 0) + multiply(result, a, static_cast(val)); + else + { + multiply(result, a, static_cast(-val)); + result.negate(); + } +} +template +inline void multiply(cpp_int_backend& result, const signed_limb_type& val) +{ + multiply(result, result, val); +} +template +void divide_unsigned_helper(cpp_int_backend& result, const cpp_int_backend& x, const cpp_int_backend& y, cpp_int_backend& r) +{ + if((&result == &x) || (&r == &x)) + { + cpp_int_backend t(x); + divide_unsigned_helper(result, t, y, r); + return; + } + if((&result == &y) || (&r == &y)) + { + cpp_int_backend t(y); + divide_unsigned_helper(result, x, t, r); + return; + } + + /* + Very simple, fairly braindead long division. + Start by setting the remainder equal to x, and the + result equal to 0. Then in each loop we calculate our + "best guess" for how many times y divides into r, + add our guess to the result, and subtract guess*y + from the remainder r. One wrinkle is that the remainder + may go negative, in which case we subtract the current guess + from the result rather than adding. The value of the guess + is determined by dividing the most-significant-limb of the + current remainder by the most-significant-limb of y. + + Note that there are more efficient algorithms than this + available, in particular see Knuth Vol 2. However for small + numbers of limbs this generally outperforms the alternatives + and avoids the normalisation step which would require extra storage. + */ + + + using default_ops::subtract; + + if(&result == &r) + { + cpp_int_backend rem; + divide_unsigned_helper(result, x, y, rem); + r = rem; + return; + } + + // + // Find the most significant words of numerator and denominator. + // + limb_type y_order = y.size() - 1; + + if(y_order == 0) + { + // + // Only a single non-zero limb in the denominator, in this case + // we can use a specialized divide-by-single-limb routine which is + // much faster. This also handles division by zero: + // + divide_unsigned_helper(result, x, y.limbs()[y_order], r); + return; + } + + typename cpp_int_backend::const_limb_pointer px = x.limbs(); + typename cpp_int_backend::const_limb_pointer py = y.limbs(); + + limb_type r_order = x.size() - 1; + if((r_order == 0) && (*px == 0)) + { + // x is zero, so is the result: + r = y; + result = x; + return; + } + + r = x; + r.sign(false); + result = static_cast(0u); + // + // Check if the remainder is already less than the divisor, if so + // we already have the result. Note we try and avoid a full compare + // if we can: + // + if(r_order <= y_order) + { + if((r_order < y_order) || (r.compare(y) < 0)) + { + return; + } + } + + cpp_int_backend t; + bool r_neg = false; + + // + // See if we can short-circuit long division, and use basic arithmetic instead: + // + if(r_order == 0) + { + result = px[0] / py[0]; + r = px[0] % py[0]; + return; + } + else if(r_order == 1) + { + double_limb_type a, b; + a = (static_cast(px[1]) << cpp_int_backend::limb_bits) | px[0]; + b = y_order > 1 ? + (static_cast(py[1]) << cpp_int_backend::limb_bits) | py[0] + : py[0]; + result = a / b; + r = a % b; + return; + } + // + // prepare result: + // + result.resize(1 + r_order - y_order); + typename cpp_int_backend::const_limb_pointer prem = r.limbs(); + typename cpp_int_backend::limb_pointer pr = result.limbs(); + for(unsigned i = 1; i < 1 + r_order - y_order; ++i) + pr[i] = 0; + bool first_pass = true; + + do + { + // + // Update r_order, this can't run past the end as r must be non-zero at this point: + // + r_order = r.size() - 1; + // + // Calculate our best guess for how many times y divides into r: + // + limb_type guess; + if((prem[r_order] <= py[y_order]) && (r_order > 0)) + { + double_limb_type a, b, v; + a = (static_cast(prem[r_order]) << cpp_int_backend::limb_bits) | prem[r_order - 1]; + b = py[y_order]; + v = a / b; + if(v > cpp_int_backend::max_limb_value) + guess = 1; + else + { + guess = static_cast(v); + --r_order; + } + } + else if(r_order == 0) + { + guess = prem[0] / py[y_order]; + } + else + { + double_limb_type a, b, v; + a = (static_cast(prem[r_order]) << cpp_int_backend::limb_bits) | prem[r_order - 1]; + b = (y_order > 0) ? (static_cast(py[y_order]) << cpp_int_backend::limb_bits) | py[y_order - 1] : (static_cast(py[y_order]) << cpp_int_backend::limb_bits); + v = a / b; + guess = static_cast(v); + } + BOOST_ASSERT(guess); // If the guess ever gets to zero we go on forever.... + // + // Update result: + // + limb_type shift = r_order - y_order; + if(r_neg) + { + if(pr[shift] > guess) + pr[shift] -= guess; + else + { + t.resize(shift + 1); + t.limbs()[shift] = guess; + for(unsigned i = 0; i < shift; ++i) + t.limbs()[i] = 0; + subtract(result, t); + } + } + else if(cpp_int_backend::max_limb_value - pr[shift] > guess) + pr[shift] += guess; + else + { + t.resize(shift + 1); + t.limbs()[shift] = guess; + for(unsigned i = 0; i < shift; ++i) + t.limbs()[i] = 0; + add(result, t); + } + // + // Calculate guess * y, we use a fused mutiply-shift O(N) for this + // rather than a full O(N^2) multiply: + // + double_limb_type carry = 0; + t.resize(y.size() + shift + 1); + typename cpp_int_backend::limb_pointer pt = t.limbs(); + for(unsigned i = 0; i < shift; ++i) + pt[i] = 0; + for(unsigned i = 0; i < y.size(); ++i) + { + carry += static_cast(py[i]) * static_cast(guess); + pt[i + shift] = static_cast(carry); + carry >>= cpp_int_backend::limb_bits; + } + if(carry) + { + pt[t.size() - 1] = static_cast(carry); + } + else + { + t.resize(t.size() - 1); + } + // + // Update r: + // + subtract(r, t); + if(r.isneg()) + { + r.negate(); + r_neg = !r_neg; + } + // + // First time through we need to strip any leading zero, otherwise + // the termination condition goes belly-up: + // + if(first_pass) + { + first_pass = false; + while(pr[result.size() - 1] == 0) + result.resize(result.size() - 1); + } + } + // Termination condition is really just a check that r > y, but with two common + // short-circuit cases handled first: + while((r_order > y_order) || (prem[r_order] > py[y_order]) || (r.compare_unsigned(y) > 0)); + + // + // We now just have to normalise the result: + // + if(r_neg) + { + // We have one too many in the result: + decrement(result); + r.negate(); + if(y.sign()) + subtract(r, y); + else + add(r, y); + } + + BOOST_ASSERT(r.compare_unsigned(y) < 0); // remainder must be less than the divisor or our code has failed +} + +template +void divide_unsigned_helper(cpp_int_backend& result, const cpp_int_backend& x, limb_type y, cpp_int_backend& r) +{ + if((&result == &x) || (&r == &x)) + { + cpp_int_backend t(x); + divide_unsigned_helper(result, t, y, r); + return; + } + + if(&result == &r) + { + cpp_int_backend rem; + divide_unsigned_helper(result, x, y, rem); + r = rem; + return; + } + + // As above, but simplified for integer divisor: + + using default_ops::subtract; + + if(y == 0) + { + BOOST_THROW_EXCEPTION(std::runtime_error("Integer Division by zero.")); + } + // + // Find the most significant word of numerator. + // + limb_type r_order = x.size() - 1; + + // + // Set remainder and result to their initial values: + // + r = x; + r.sign(false); + typename cpp_int_backend::limb_pointer pr = r.limbs(); + + if((r_order == 0) && (*pr == 0)) + { + // All the limbs in x are zero, so is the result: + return; + } + // + // check for x < y, try to do this without actually having to + // do a full comparison: + // + if((r_order == 0) && (*pr < y)) + { + result = static_cast(0u); + return; + } + + // + // See if we can short-circuit long division, and use basic arithmetic instead: + // + if(r_order == 0) + { + result = *pr / y; + result.sign(x.sign()); + r.sign(x.sign()); + *pr %= y; + return; + } + else if(r_order == 1) + { + double_limb_type a; + a = (static_cast(pr[r_order]) << cpp_int_backend::limb_bits) | pr[0]; + result = a / y; + result.sign(x.sign()); + r = a % y; + r.sign(x.sign()); + return; + } + + result.resize(r_order + 1); + typename cpp_int_backend::limb_pointer pres = result.limbs(); + pres[r_order] = 0; // just in case we don't set the most significant limb below. + + do + { + // + // Calculate our best guess for how many times y divides into r: + // + if((pr[r_order] < y) && r_order) + { + double_limb_type a, b; + a = (static_cast(pr[r_order]) << cpp_int_backend::limb_bits) | pr[r_order - 1]; + b = a % y; + r.resize(r.size() - 1); + --r_order; + pr[r_order] = static_cast(b); + pres[r_order] = static_cast(a / y); + } + else + { + pres[r_order] = pr[r_order] / y; + pr[r_order] %= y; + } + } + // Termination condition is really just a check that r > y, but with two common + // short-circuit cases handled first: + while(r_order || (pr[r_order] > y)); + + if(pres[result.size() - 1] == 0) + result.resize(result.size() - 1); + + result.sign(x.sign()); + r.sign(x.sign()); + + BOOST_ASSERT(r.compare(y) < 0); // remainder must be less than the divisor or our code has failed +} + +template +inline void divide(cpp_int_backend& result, const cpp_int_backend& a, const cpp_int_backend& b) +{ + cpp_int_backend r; + divide_unsigned_helper(result, a, b, r); + result.sign(a.sign() != b.sign()); +} +template +inline void divide(cpp_int_backend& result, const cpp_int_backend& a, limb_type& b) +{ + cpp_int_backend r; + divide_unsigned_helper(result, a, b, r); +} +template +inline void divide(cpp_int_backend& result, const cpp_int_backend& a, signed_limb_type& b) +{ + cpp_int_backend r; + divide_unsigned_helper(result, a, std::abs(b), r); + if(b < 0) + result.negate(); +} +template +inline void divide(cpp_int_backend& result, const cpp_int_backend& b) +{ + // There is no in place divide: + cpp_int_backend a(result); + divide(result, a, b); +} +template +inline void divide(cpp_int_backend& result, limb_type b) +{ + // There is no in place divide: + cpp_int_backend a(result); + divide(result, a, b); +} +template +inline void divide(cpp_int_backend& result, signed_limb_type b) +{ + // There is no in place divide: + cpp_int_backend a(result); + divide(result, a, b); +} +template +inline void modulus(cpp_int_backend& result, const cpp_int_backend& a, const cpp_int_backend& b) +{ + cpp_int_backend r; + divide_unsigned_helper(r, a, b, result); + result.sign(a.sign()); +} +template +inline void modulus(cpp_int_backend& result, const cpp_int_backend& a, limb_type b) +{ + cpp_int_backend r; + divide_unsigned_helper(r, a, b, result); +} +template +inline void modulus(cpp_int_backend& result, const cpp_int_backend& a, signed_limb_type b) +{ + cpp_int_backend r; + divide_unsigned_helper(r, a, static_cast(std::abs(b)), result); +} +template +inline void modulus(cpp_int_backend& result, const cpp_int_backend& b) +{ + // There is no in place divide: + cpp_int_backend a(result); + modulus(result, a, b); +} +template +inline void modulus(cpp_int_backend& result, limb_type b) +{ + // There is no in place divide: + cpp_int_backend a(result); + modulus(result, a, b); +} +template +inline void modulus(cpp_int_backend& result, signed_limb_type b) +{ + // There is no in place divide: + cpp_int_backend a(result); + modulus(result, a, b); +} +template +void bitwise_op(cpp_int_backend& result, const cpp_int_backend& o, Op op) +{ + // + // There are 4 cases: + // * Both positive. + // * result negative, o positive. + // * o negative, result positive. + // * Both negative. + // + // When one arg is negative we convert to 2's complement form "on the fly", + // and then convert back to signed-magnitude form at the end. + // + // First figure out how big the result needs to be and set up some data: + // + unsigned rs = result.size(); + unsigned os = o.size(); + unsigned m, x; + minmax(rs, os, m, x); + result.resize(x); + typename cpp_int_backend::limb_pointer pr = result.limbs(); + typename cpp_int_backend::const_limb_pointer po = o.limbs(); + for(unsigned i = rs; i < x; ++i) + pr[i] = 0; + + limb_type next_limb = 0; + + if(!result.sign()) + { + if(!o.sign()) + { + for(unsigned i = 0; i < os; ++i) + pr[i] = op(pr[i], po[i]); + for(unsigned i = os; i < x; ++i) + pr[i] = op(pr[i], limb_type(0)); + } + else + { + // "o" is negative: + double_limb_type carry = 1; + for(unsigned i = 0; i < os; ++i) + { + carry += static_cast(~po[i]); + pr[i] = op(pr[i], static_cast(carry)); + carry >>= cpp_int_backend::limb_bits; + } + for(unsigned i = os; i < x; ++i) + { + carry += static_cast(~limb_type(0)); + pr[i] = op(pr[i], static_cast(carry)); + carry >>= cpp_int_backend::limb_bits; + } + // Set the overflow into the "extra" limb: + carry += static_cast(~limb_type(0)); + next_limb = op(limb_type(0), static_cast(carry)); + } + } + else + { + if(!o.sign()) + { + // "result" is negative: + double_limb_type carry = 1; + for(unsigned i = 0; i < os; ++i) + { + carry += static_cast(~pr[i]); + pr[i] = op(static_cast(carry), po[i]); + carry >>= cpp_int_backend::limb_bits; + } + for(unsigned i = os; i < x; ++i) + { + carry += static_cast(~pr[i]); + pr[i] = op(static_cast(carry), limb_type(0)); + carry >>= cpp_int_backend::limb_bits; + } + // Set the overflow into the "extra" limb: + carry += static_cast(~limb_type(0)); + next_limb = op(static_cast(carry), limb_type(0)); + } + else + { + // both are negative: + double_limb_type r_carry = 1; + double_limb_type o_carry = 1; + for(unsigned i = 0; i < os; ++i) + { + r_carry += static_cast(~pr[i]); + o_carry += static_cast(~po[i]); + pr[i] = op(static_cast(r_carry), static_cast(o_carry)); + r_carry >>= cpp_int_backend::limb_bits; + o_carry >>= cpp_int_backend::limb_bits; + } + for(unsigned i = os; i < x; ++i) + { + r_carry += static_cast(~pr[i]); + o_carry += static_cast(~limb_type(0)); + pr[i] = op(static_cast(r_carry), static_cast(o_carry)); + r_carry >>= cpp_int_backend::limb_bits; + o_carry >>= cpp_int_backend::limb_bits; + } + // Set the overflow into the "extra" limb: + r_carry += static_cast(~limb_type(0)); + o_carry += static_cast(~limb_type(0)); + next_limb = op(static_cast(r_carry), static_cast(o_carry)); + } + } + // + // See if the result is negative or not: + // + if(static_cast(next_limb) < 0) + { + result.sign(true); + double_limb_type carry = 1; + for(unsigned i = 0; i < x; ++i) + { + carry += static_cast(~pr[i]); + pr[i] = static_cast(carry); + carry >>= cpp_int_backend::limb_bits; + } + } + else + result.sign(false); + // + // Strip leading zeros: + // + while(result.limbs()[result.size()-1] == 0) + result.resize(result.size()-1); +} + +struct bit_and{ limb_type operator()(limb_type a, limb_type b)const{ return a & b; } }; +struct bit_or{ limb_type operator()(limb_type a, limb_type b)const{ return a | b; } }; +struct bit_xor{ limb_type operator()(limb_type a, limb_type b)const{ return a ^ b; } }; + +template +inline void bitwise_and(cpp_int_backend& result, const cpp_int_backend& o) +{ + bitwise_op(result, o, bit_and()); +} +#if 0 +template +inline void bitwise_and(cpp_int_backend& result, limb_type o) +{ + result.data()[cpp_int_backend::limb_count - 1] &= o; + for(typename cpp_int_backend::data_type::size_type i = 0; i < cpp_int_backend::limb_count - 1; ++i) + result.data()[i] = 0; +} +template +inline void bitwise_and(cpp_int_backend& result, signed_limb_type o) +{ + result.data()[cpp_int_backend::limb_count - 1] &= o; + limb_type mask = o < 0 ? cpp_int_backend::max_limb_value : 0; + for(typename cpp_int_backend::data_type::size_type i = 0; i < cpp_int_backend::limb_count - 1; ++i) + result.data()[i] &= mask; +} +template +inline void bitwise_or(cpp_int_backend& result, const cpp_int_backend& o) +{ + for(typename cpp_int_backend::data_type::size_type i = 0; i < cpp_int_backend::limb_count; ++i) + result.data()[i] |= o.data()[i]; +} +template +inline void bitwise_or(cpp_int_backend& result, limb_type o) +{ + result.data()[cpp_int_backend::limb_count - 1] |= o; +} +#endif +template +inline void bitwise_or(cpp_int_backend& result, const cpp_int_backend& o) +{ + bitwise_op(result, o, bit_or()); +} +template +inline void bitwise_xor(cpp_int_backend& result, const cpp_int_backend& o) +{ + bitwise_op(result, o, bit_xor()); +} +#if 0 +template +inline void bitwise_xor(cpp_int_backend& result, limb_type o) +{ + result.data()[cpp_int_backend::limb_count - 1] ^= o; +} +template +inline void bitwise_xor(cpp_int_backend& result, signed_limb_type o) +{ + result.data()[cpp_int_backend::limb_count - 1] ^= o; + limb_type mask = o < 0 ? cpp_int_backend::max_limb_value : 0; + for(typename cpp_int_backend::data_type::size_type i = 0; i < cpp_int_backend::limb_count - 1; ++i) + result.data()[i] ^= mask; +} +#endif +template +inline void complement(cpp_int_backend& result, const cpp_int_backend& o) +{ + // Increment and negate: + result = o; + increment(result); + result.negate(); +} +template +inline void left_shift(cpp_int_backend& result, double_limb_type s) +{ + if(!s) + return; + + limb_type offset = static_cast(s / cpp_int_backend::limb_bits); + limb_type shift = static_cast(s % cpp_int_backend::limb_bits); + + unsigned ors = result.size(); + if((ors == 1) && (!*result.limbs())) + return; // shifting zero yields zero. + unsigned rs = ors; + if(shift && (result.limbs()[rs - 1] >> (cpp_int_backend::limb_bits - shift))) + ++rs; // Most significant limb will overflow when shifted + rs += offset; + result.resize(rs); + typename cpp_int_backend::limb_pointer pr = result.limbs(); + + unsigned i = 0; + if(shift) + { + // This code only works when shift is non-zero, otherwise we invoke undefined behaviour! + i = 0; + if(rs > ors) + { + pr[rs - 1 - i] = pr[ors - 1 - i] >> (cpp_int_backend::limb_bits - shift); + --rs; + } + else + { + pr[rs - 1 - i] = pr[ors - 1 - i] << shift; + if(ors > 1) + pr[rs - 1 - i] |= pr[ors - 2 - i] >> (cpp_int_backend::limb_bits - shift); + ++i; + } + for(; ors > 1 + i; ++i) + { + pr[rs - 1 - i] = pr[ors - 1 - i] << shift; + pr[rs - 1 - i] |= pr[ors - 2 - i] >> (cpp_int_backend::limb_bits - shift); + } + if(ors >= 1 + i) + { + pr[rs - 1 - i] = pr[ors - 1 - i] << shift; + ++i; + } + for(; i < rs; ++i) + pr[rs - 1 - i] = 0; + } + else + { + for(; i < ors; ++i) + pr[rs - 1 - i] = pr[ors - 1 - i]; + for(; i < rs; ++i) + pr[rs - 1 - i] = 0; + } +} +template +inline void right_shift(cpp_int_backend& result, double_limb_type s) +{ + if(!s) + return; + + limb_type offset = static_cast(s / cpp_int_backend::limb_bits); + limb_type shift = static_cast(s % cpp_int_backend::limb_bits); + unsigned ors = result.size(); + unsigned rs = ors; + if(offset >= rs) + { + result = limb_type(0); + return; + } + rs -= offset; + typename cpp_int_backend::limb_pointer pr = result.limbs(); + if((pr[ors - 1] >> shift) == 0) + --rs; + if(rs == 0) + { + result = limb_type(0); + return; + } + unsigned i = 0; + if(shift) + { + // This code only works for non-zero shift, otherwise we invoke undefined behaviour! + for(; i + offset + 1 < ors; ++i) + { + pr[i] = pr[i + offset] >> shift; + pr[i] |= pr[i + offset + 1] << (cpp_int_backend::limb_bits - shift); + } + pr[i] = pr[i + offset] >> shift; + } + else + { + for(; i < rs; ++i) + pr[i] = pr[i + offset]; + } + result.resize(rs); +} + +template +inline Integer negate_integer(Integer i, const mpl::true_&) +{ + return -i; +} +template +inline Integer negate_integer(Integer i, const mpl::false_&) +{ + return ~--i; +} + +template +inline typename enable_if, void>::type convert_to(R* result, const cpp_int_backend& backend) +{ + *result = static_cast(backend.limbs()[0]); + unsigned shift = cpp_int_backend::limb_bits; + for(unsigned i = 1; i < backend.size(); ++i) + { + *result += static_cast(backend.limbs()[i]) << shift; + shift += cpp_int_backend::limb_bits; + if(shift > std::numeric_limits::digits) + break; + } + if(backend.sign()) + { + *result = negate_integer(*result, mpl::bool_::is_signed>()); + } +} + +template +inline typename enable_if, void>::type convert_to(R* result, const cpp_int_backend& backend) +{ + typename cpp_int_backend::const_limb_pointer p = backend.limbs(); + unsigned shift = cpp_int_backend::limb_bits; + *result = static_cast(*p); + for(unsigned i = 1; i < backend.size(); ++i) + { + *result += static_cast(std::ldexp(static_cast(p[i]), shift)); + shift += cpp_int_backend::limb_bits; + } + if(backend.sign()) + *result = -*result; +} + +template +inline bool is_zero(const cpp_int_backend& val) +{ + return (val.size() == 1) && (val.limbs()[0] == 0); +} +template +inline int get_sign(const cpp_int_backend& val) +{ + return is_zero(val) ? 0 : val.sign() ? -1 : 1; +} + +namespace detail{ +// +// Get the location of the least-significant-bit: +// +template +inline unsigned get_lsb(const cpp_int_backend& a) +{ + BOOST_ASSERT(get_sign(a) != 0); + + unsigned result = 0; + // + // Find the index of the least significant limb that is non-zero: + // + unsigned index = 0; + while(!a.limbs()[index] && (index < a.size())) + ++index; + // + // Find the index of the least significant bit within that limb: + // + limb_type l = a.limbs()[index]; + while(!(l & 1u)) + { + l >>= 1; + ++result; + } + + return result + index * cpp_int_backend::limb_bits; +} + +} + +template +inline void eval_gcd(cpp_int_backend& result, const cpp_int_backend& a, const cpp_int_backend& b) +{ + int shift; + + cpp_int_backend u(a), v(b); + + int s = get_sign(u); + + /* GCD(0,x) := x */ + if(s < 0) + { + u.negate(); + } + else if(s == 0) + { + result = v; + return; + } + s = get_sign(v); + if(s < 0) + { + v.negate(); + } + else if(s == 0) + { + result = u; + return; + } + + /* Let shift := lg K, where K is the greatest power of 2 + dividing both u and v. */ + + unsigned us = detail::get_lsb(u); + unsigned vs = detail::get_lsb(v); + shift = (std::min)(us, vs); + right_shift(u, us); + right_shift(v, vs); + + do + { + /* Now u and v are both odd, so diff(u, v) is even. + Let u = min(u, v), v = diff(u, v)/2. */ + if(u.compare(v) > 0) + u.swap(v); + subtract(v, u); + // Termination condition tries not to do a full compare if possible: + if(!v.limbs()[0] && is_zero(v)) + break; + vs = detail::get_lsb(v); + right_shift(v, vs); + BOOST_ASSERT((v.limbs()[0] & 1)); + BOOST_ASSERT((u.limbs()[0] & 1)); + } + while(true); + + result = u; + left_shift(result, shift); +} + +template +inline void eval_lcm(cpp_int_backend& result, const cpp_int_backend& a, const cpp_int_backend& b) +{ + cpp_int_backend t; + eval_gcd(t, a, b); + + if(is_zero(t)) + { + result = 0; + } + else + { + divide(result, a, t); + multiply(result, b); + } + if(get_sign(result) < 0) + result.negate(); +} + +template +struct number_category > : public mpl::int_{}; + +typedef mp_number > cpp_int; +typedef rational_adapter > cpp_rational_backend; +typedef mp_number cpp_rational; + +}} // namespaces + + +namespace std{ + +template +class numeric_limits > > +{ + typedef boost::multiprecision::mp_number > number_type; + +public: + BOOST_STATIC_CONSTEXPR bool is_specialized = true; + // + // Largest and smallest numbers are bounded only by available memory, set + // to zero: + // + static number_type (min)() BOOST_MP_NOEXCEPT + { + return number_type(0); + } + static number_type (max)() BOOST_MP_NOEXCEPT + { + return number_type(0); + } + static number_type lowest() BOOST_MP_NOEXCEPT { return (min)(); } + BOOST_STATIC_CONSTEXPR int digits = 0; + BOOST_STATIC_CONSTEXPR int digits10 = 0; + BOOST_STATIC_CONSTEXPR int max_digits10 = 0; + BOOST_STATIC_CONSTEXPR bool is_signed = true; + BOOST_STATIC_CONSTEXPR bool is_integer = true; + BOOST_STATIC_CONSTEXPR bool is_exact = true; + BOOST_STATIC_CONSTEXPR int radix = 2; + static number_type epsilon() BOOST_MP_NOEXCEPT { return 0; } + static number_type round_error() BOOST_MP_NOEXCEPT { return 0; } + BOOST_STATIC_CONSTEXPR int min_exponent = 0; + BOOST_STATIC_CONSTEXPR int min_exponent10 = 0; + BOOST_STATIC_CONSTEXPR int max_exponent = 0; + BOOST_STATIC_CONSTEXPR int max_exponent10 = 0; + BOOST_STATIC_CONSTEXPR bool has_infinity = false; + BOOST_STATIC_CONSTEXPR bool has_quiet_NaN = false; + BOOST_STATIC_CONSTEXPR bool has_signaling_NaN = false; + BOOST_STATIC_CONSTEXPR float_denorm_style has_denorm = denorm_absent; + BOOST_STATIC_CONSTEXPR bool has_denorm_loss = false; + static number_type infinity() BOOST_MP_NOEXCEPT { return 0; } + static number_type quiet_NaN() BOOST_MP_NOEXCEPT { return 0; } + static number_type signaling_NaN() BOOST_MP_NOEXCEPT { return 0; } + static number_type denorm_min() BOOST_MP_NOEXCEPT { return 0; } + BOOST_STATIC_CONSTEXPR bool is_iec559 = false; + BOOST_STATIC_CONSTEXPR bool is_bounded = false; + BOOST_STATIC_CONSTEXPR bool is_modulo = false; + BOOST_STATIC_CONSTEXPR bool traps = false; + BOOST_STATIC_CONSTEXPR bool tinyness_before = false; + BOOST_STATIC_CONSTEXPR float_round_style round_style = round_toward_zero; +}; + +} + +#endif diff --git a/include/boost/multiprecision/fixed_int.hpp b/include/boost/multiprecision/fixed_int.hpp index a8b99318..5ea5be09 100644 --- a/include/boost/multiprecision/fixed_int.hpp +++ b/include/boost/multiprecision/fixed_int.hpp @@ -1559,7 +1559,7 @@ public: if(!init) { init = true; - val = Signed ? number_type(1) << Bits - 1 : 0; + val = Signed ? number_type(number_type(1) << (Bits - 1)) : 0; } return val; } diff --git a/performance/performance_test.cpp b/performance/performance_test.cpp index c98059f9..72ef2938 100644 --- a/performance/performance_test.cpp +++ b/performance/performance_test.cpp @@ -12,7 +12,7 @@ #if !defined(TEST_MPF) && !defined(TEST_MPZ) && \ !defined(TEST_CPP_DEC_FLOAT) && !defined(TEST_MPFR) && !defined(TEST_MPQ) \ && !defined(TEST_TOMMATH) && !defined(TEST_TOMMATH_BOOST_RATIONAL) && !defined(TEST_MPZ_BOOST_RATIONAL)\ - && !defined(TEST_FIXED_INT) + && !defined(TEST_FIXED_INT) && !defined(TEST_CPP_INT) # define TEST_MPF # define TEST_MPZ # define TEST_MPQ @@ -21,6 +21,7 @@ # define TEST_MPQ # define TEST_TOMMATH # define TEST_FIXED_INT +# define TEST_CPP_INT #ifdef _MSC_VER #pragma message("CAUTION!!: No backend type specified so testing everything.... this will take some time!!") @@ -48,6 +49,9 @@ #if defined(TEST_FIXED_INT) #include #endif +#if defined(TEST_CPP_INT) +#include +#endif #include #include @@ -577,6 +581,13 @@ int main() test("gmp_int", 512); test("gmp_int", 1024); #endif +#ifdef TEST_CPP_INT + test("cpp_int", 64); + test("cpp_int", 128); + test("cpp_int", 256); + test("cpp_int", 512); + test("cpp_int", 1024); +#endif #ifdef TEST_MPQ test("mpq_rational", 64); test("mpq_rational", 128); diff --git a/test/Jamfile.v2 b/test/Jamfile.v2 index 9b7d1c23..47dc5082 100644 --- a/test/Jamfile.v2 +++ b/test/Jamfile.v2 @@ -142,6 +142,20 @@ run test_arithmetic.cpp TEST_FIXED_INT2 : test_arithmetic_fixed_int2 ; +run test_arithmetic.cpp + : # command line + : # input files + : # requirements + TEST_CPP_INT + : test_arithmetic_fixed_cpp_int ; + +run test_arithmetic.cpp + : # command line + : # input files + : # requirements + TEST_CPP_INT_BR + : test_arithmetic_cpp_int_br ; + run test_numeric_limits.cpp : # command line : # input files @@ -202,7 +216,6 @@ run test_numeric_limits.cpp : # input files : # requirements TEST_CPP_DEC_FLOAT - [ check-target-builds ../config//has_mpfr : : no ] : test_numeric_limits_cpp_dec_float ; run test_numeric_limits.cpp $(TOMMATH) @@ -213,6 +226,20 @@ run test_numeric_limits.cpp $(TOMMATH) [ check-target-builds ../config//has_tommath : : no ] : test_numeric_limits_tommath ; +run test_numeric_limits.cpp + : # command line + : # input files + : # requirements + TEST_CPP_INT + : test_numeric_limits_cpp_int ; + +run test_numeric_limits.cpp + : # command line + : # input files + : # requirements + TEST_FIXED_INT + : test_numeric_limits_fixed_int ; + run test_exp.cpp gmp : # command line : # input files @@ -603,6 +630,14 @@ run test_fixed_int.cpp gmp release # otherwise runtime is too slow!! ; +run test_cpp_int.cpp gmp + : # command line + : # input files + : # requirements + [ check-target-builds ../config//has_gmp : : no ] + release # otherwise runtime is too slow!! + ; + run test_rational_io.cpp $(TOMMATH) : # command line : # input files diff --git a/test/test_arithmetic.cpp b/test/test_arithmetic.cpp index 14d3da67..eb2a7de0 100644 --- a/test/test_arithmetic.cpp +++ b/test/test_arithmetic.cpp @@ -7,6 +7,10 @@ # define _SCL_SECURE_NO_WARNINGS #endif +#ifdef TEST_VLD +#include +#endif + #include #include #include @@ -14,7 +18,7 @@ #if !defined(TEST_MPF_50) && !defined(TEST_MPF) && !defined(TEST_BACKEND) && !defined(TEST_MPZ) && \ !defined(TEST_CPP_DEC_FLOAT) && !defined(TEST_MPFR) && !defined(TEST_MPFR_50) && !defined(TEST_MPQ) \ && !defined(TEST_TOMMATH) && !defined(TEST_TOMMATH_BOOST_RATIONAL) && !defined(TEST_MPZ_BOOST_RATIONAL)\ - && !defined(TEST_FIXED_INT1) && !defined(TEST_FIXED_INT2) + && !defined(TEST_FIXED_INT1) && !defined(TEST_FIXED_INT2) && !defined(TEST_CPP_INT) && !defined(TEST_CPP_INT_BR) # define TEST_MPF_50 # define TEST_MPF # define TEST_BACKEND @@ -26,6 +30,8 @@ # define TEST_TOMMATH # define TEST_FIXED_INT1 # define TEST_FIXED_INT2 +# define TEST_CPP_INT +# define TEST_CPP_INT_BR #ifdef _MSC_VER #pragma message("CAUTION!!: No backend type specified so testing everything.... this will take some time!!") @@ -56,6 +62,9 @@ #if defined(TEST_FIXED_INT1) || defined(TEST_FIXED_INT2) #include #endif +#if defined(TEST_CPP_INT) || defined(TEST_CPP_INT_BR) +#include +#endif #if defined(TEST_TOMMATH_BOOST_RATIONAL) || defined(TEST_MPZ_BOOST_RATIONAL) #include @@ -364,7 +373,6 @@ void test_integer_ops(const boost::mpl::int_(); test(); test > >(); +#endif +#ifdef TEST_CPP_INT + test(); +#endif +#ifdef TEST_CPP_INT_BR + test(); #endif return boost::report_errors(); } diff --git a/test/test_cpp_int.cpp b/test/test_cpp_int.cpp new file mode 100644 index 00000000..ee6bcde2 --- /dev/null +++ b/test/test_cpp_int.cpp @@ -0,0 +1,179 @@ +/////////////////////////////////////////////////////////////// +// Copyright 2012 John Maddock. Distributed under the Boost +// Software License, Version 1.0. (See accompanying file +// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_ + +// +// Compare arithmetic results using fixed_int to GMP results. +// + +#ifdef _MSC_VER +# define _SCL_SECURE_NO_WARNINGS +#endif + +#include +#include +#include +#include +#include "test.hpp" + +template +T generate_random(unsigned bits_wanted) +{ + static boost::random::mt19937 gen; + typedef boost::random::mt19937::result_type random_type; + + T max_val; + unsigned digits; + if(std::numeric_limits::is_bounded && (bits_wanted == std::numeric_limits::digits)) + { + max_val = (std::numeric_limits::max)(); + digits = std::numeric_limits::digits; + } + else + { + max_val = T(1) << bits_wanted; + digits = bits_wanted; + } + + unsigned bits_per_r_val = std::numeric_limits::digits - 1; + while((random_type(1) << bits_per_r_val) > (gen.max)()) --bits_per_r_val; + + unsigned terms_needed = digits / bits_per_r_val + 1; + + T val = 0; + for(unsigned i = 0; i < terms_needed; ++i) + { + val *= (gen.max)(); + val += gen(); + } + val %= max_val; + return val; +} + +int main() +{ + using namespace boost::multiprecision; + typedef cpp_int packed_type; + unsigned last_error_count = 0; + for(int i = 0; i < 1000; ++i) + { + mpz_int a = generate_random(1000); + mpz_int b = generate_random(512); + mpz_int c = generate_random(256); + mpz_int d = generate_random(32); + + int si = d.convert_to(); + + packed_type a1 = a.str(); + packed_type b1 = b.str(); + packed_type c1 = c.str(); + packed_type d1 = d.str(); + + BOOST_CHECK_EQUAL(a.str(), a1.str()); + BOOST_CHECK_EQUAL(b.str(), b1.str()); + BOOST_CHECK_EQUAL(c.str(), c1.str()); + BOOST_CHECK_EQUAL(d.str(), d1.str()); + BOOST_CHECK_EQUAL(mpz_int(a+b).str(), packed_type(a1 + b1).str()); + BOOST_CHECK_EQUAL(mpz_int(a-b).str(), packed_type(a1 - b1).str()); + BOOST_CHECK_EQUAL(mpz_int(mpz_int(-a)+b).str(), packed_type(packed_type(-a1) + b1).str()); + BOOST_CHECK_EQUAL(mpz_int(mpz_int(-a)-b).str(), packed_type(packed_type(-a1) - b1).str()); + BOOST_CHECK_EQUAL(mpz_int(c * d).str(), packed_type(c1 * d1).str()); + BOOST_CHECK_EQUAL(mpz_int(c * -d).str(), packed_type(c1 * -d1).str()); + BOOST_CHECK_EQUAL(mpz_int(-c * d).str(), packed_type(-c1 * d1).str()); + BOOST_CHECK_EQUAL(mpz_int(b * c).str(), packed_type(b1 * c1).str()); + BOOST_CHECK_EQUAL(mpz_int(a / b).str(), packed_type(a1 / b1).str()); + BOOST_CHECK_EQUAL(mpz_int(a / -b).str(), packed_type(a1 / -b1).str()); + BOOST_CHECK_EQUAL(mpz_int(-a / b).str(), packed_type(-a1 / b1).str()); + BOOST_CHECK_EQUAL(mpz_int(a / d).str(), packed_type(a1 / d1).str()); + BOOST_CHECK_EQUAL(mpz_int(a % b).str(), packed_type(a1 % b1).str()); + BOOST_CHECK_EQUAL(mpz_int(a % -b).str(), packed_type(a1 % -b1).str()); + BOOST_CHECK_EQUAL(mpz_int(-a % b).str(), packed_type(-a1 % b1).str()); + BOOST_CHECK_EQUAL(mpz_int(a % d).str(), packed_type(a1 % d1).str()); + // bitwise ops: + BOOST_CHECK_EQUAL(mpz_int(a|b).str(), packed_type(a1 | b1).str()); + BOOST_CHECK_EQUAL(mpz_int(-a|b).str(), packed_type(-a1 | b1).str()); + BOOST_CHECK_EQUAL(mpz_int(a|-b).str(), packed_type(a1 | -b1).str()); + BOOST_CHECK_EQUAL(mpz_int(-a|-b).str(), packed_type(-a1 | -b1).str()); + BOOST_CHECK_EQUAL(mpz_int(a&b).str(), packed_type(a1 & b1).str()); + BOOST_CHECK_EQUAL(mpz_int(-a&b).str(), packed_type(-a1 & b1).str()); + BOOST_CHECK_EQUAL(mpz_int(a&-b).str(), packed_type(a1 & -b1).str()); + BOOST_CHECK_EQUAL(mpz_int(-a&-b).str(), packed_type(-a1 & -b1).str()); + BOOST_CHECK_EQUAL(mpz_int(a^b).str(), packed_type(a1 ^ b1).str()); + BOOST_CHECK_EQUAL(mpz_int(-a^b).str(), packed_type(-a1 ^ b1).str()); + BOOST_CHECK_EQUAL(mpz_int(a^-b).str(), packed_type(a1 ^ -b1).str()); + BOOST_CHECK_EQUAL(mpz_int(-a^-b).str(), packed_type(-a1 ^ -b1).str()); + // Now check operations involving integers: + BOOST_CHECK_EQUAL(mpz_int(a + si).str(), packed_type(a1 + si).str()); + BOOST_CHECK_EQUAL(mpz_int(a + -si).str(), packed_type(a1 + -si).str()); + BOOST_CHECK_EQUAL(mpz_int(-a + si).str(), packed_type(-a1 + si).str()); + BOOST_CHECK_EQUAL(mpz_int(si + a).str(), packed_type(si + a1).str()); + BOOST_CHECK_EQUAL(mpz_int(a - si).str(), packed_type(a1 - si).str()); + BOOST_CHECK_EQUAL(mpz_int(a - -si).str(), packed_type(a1 - -si).str()); + BOOST_CHECK_EQUAL(mpz_int(-a - si).str(), packed_type(-a1 - si).str()); + BOOST_CHECK_EQUAL(mpz_int(si - a).str(), packed_type(si - a1).str()); + BOOST_CHECK_EQUAL(mpz_int(b * si).str(), packed_type(b1 * si).str()); + BOOST_CHECK_EQUAL(mpz_int(b * -si).str(), packed_type(b1 * -si).str()); + BOOST_CHECK_EQUAL(mpz_int(-b * si).str(), packed_type(-b1 * si).str()); + BOOST_CHECK_EQUAL(mpz_int(si * b).str(), packed_type(si * b1).str()); + BOOST_CHECK_EQUAL(mpz_int(a / si).str(), packed_type(a1 / si).str()); + BOOST_CHECK_EQUAL(mpz_int(a / -si).str(), packed_type(a1 / -si).str()); + BOOST_CHECK_EQUAL(mpz_int(-a / si).str(), packed_type(-a1 / si).str()); + BOOST_CHECK_EQUAL(mpz_int(a % si).str(), packed_type(a1 % si).str()); + BOOST_CHECK_EQUAL(mpz_int(a % -si).str(), packed_type(a1 % -si).str()); + BOOST_CHECK_EQUAL(mpz_int(-a % si).str(), packed_type(-a1 % si).str()); + BOOST_CHECK_EQUAL(mpz_int(a|si).str(), packed_type(a1 | si).str()); + BOOST_CHECK_EQUAL(mpz_int(a&si).str(), packed_type(a1 & si).str()); + BOOST_CHECK_EQUAL(mpz_int(a^si).str(), packed_type(a1 ^ si).str()); + BOOST_CHECK_EQUAL(mpz_int(si|a).str(), packed_type(si|a1).str()); + BOOST_CHECK_EQUAL(mpz_int(si&a).str(), packed_type(si&a1).str()); + BOOST_CHECK_EQUAL(mpz_int(si^a).str(), packed_type(si^a1).str()); + BOOST_CHECK_EQUAL(mpz_int(gcd(a, b)).str(), packed_type(gcd(a1, b1)).str()); + BOOST_CHECK_EQUAL(mpz_int(lcm(c, d)).str(), packed_type(lcm(c1, d1)).str()); + BOOST_CHECK_EQUAL(mpz_int(gcd(-a, b)).str(), packed_type(gcd(-a1, b1)).str()); + BOOST_CHECK_EQUAL(mpz_int(lcm(-c, d)).str(), packed_type(lcm(-c1, d1)).str()); + BOOST_CHECK_EQUAL(mpz_int(gcd(-a, -b)).str(), packed_type(gcd(-a1, -b1)).str()); + BOOST_CHECK_EQUAL(mpz_int(lcm(-c, -d)).str(), packed_type(lcm(-c1, -d1)).str()); + BOOST_CHECK_EQUAL(mpz_int(gcd(a, -b)).str(), packed_type(gcd(a1, -b1)).str()); + BOOST_CHECK_EQUAL(mpz_int(lcm(c, -d)).str(), packed_type(lcm(c1, -d1)).str()); + if(last_error_count != boost::detail::test_errors()) + { + last_error_count = boost::detail::test_errors(); + std::cout << std::hex << std::showbase; + + std::cout << "a = " << a << std::endl; + std::cout << "a1 = " << a1 << std::endl; + std::cout << "b = " << b << std::endl; + std::cout << "b1 = " << b1 << std::endl; + std::cout << "c = " << c << std::endl; + std::cout << "c1 = " << c1 << std::endl; + std::cout << "d = " << d << std::endl; + std::cout << "d1 = " << d1 << std::endl; + std::cout << "a + b = " << a+b << std::endl; + std::cout << "a1 + b1 = " << a1+b1 << std::endl; + std::cout << std::dec; + std::cout << "a - b = " << a-b << std::endl; + std::cout << "a1 - b1 = " << a1-b1 << std::endl; + std::cout << "-a + b = " << mpz_int(-a)+b << std::endl; + std::cout << "-a1 + b1 = " << packed_type(-a1)+b1 << std::endl; + std::cout << "-a - b = " << mpz_int(-a)-b << std::endl; + std::cout << "-a1 - b1 = " << packed_type(-a1)-b1 << std::endl; + std::cout << "c*d = " << c*d << std::endl; + std::cout << "c1*d1 = " << c1*d1 << std::endl; + std::cout << "b*c = " << b*c << std::endl; + std::cout << "b1*c1 = " << b1*c1 << std::endl; + std::cout << "a/b = " << a/b << std::endl; + std::cout << "a1/b1 = " << a1/b1 << std::endl; + std::cout << "a/d = " << a/d << std::endl; + std::cout << "a1/d1 = " << a1/d1 << std::endl; + std::cout << "a%b = " << a%b << std::endl; + std::cout << "a1%b1 = " << a1%b1 << std::endl; + std::cout << "a%d = " << a%d << std::endl; + std::cout << "a1%d1 = " << a1%d1 << std::endl; + } + } + return boost::report_errors(); +} + + + diff --git a/test/test_numeric_limits.cpp b/test/test_numeric_limits.cpp index 6cc53374..ddec4db1 100644 --- a/test/test_numeric_limits.cpp +++ b/test/test_numeric_limits.cpp @@ -9,7 +9,9 @@ #include "test.hpp" -#if !defined(TEST_MPF_50) && !defined(TEST_MPF) && !defined(TEST_BACKEND) && !defined(TEST_MPZ) && !defined(TEST_CPP_DEC_FLOAT) && !defined(TEST_MPFR) && !defined(TEST_MPFR_50) && !defined(TEST_MPQ) && !defined(TEST_TOMMATH) +#if !defined(TEST_MPF_50) && !defined(TEST_MPF) && !defined(TEST_BACKEND) && !defined(TEST_MPZ) && \ + !defined(TEST_CPP_DEC_FLOAT) && !defined(TEST_MPFR) && !defined(TEST_MPFR_50) && !defined(TEST_MPQ) && \ + !defined(TEST_TOMMATH) && !defined(TEST_CPP_INT) && !defined(TEST_FIXED_INT) # define TEST_MPF_50 # define TEST_MPF # define TEST_BACKEND @@ -19,6 +21,8 @@ # define TEST_CPP_DEC_FLOAT # define TEST_MPQ # define TEST_TOMMATH +# define TEST_FIXED_INT +# define TEST_CPP_INT #ifdef _MSC_VER #pragma message("CAUTION!!: No backend type specified so testing everything.... this will take some time!!") @@ -44,6 +48,12 @@ #ifdef TEST_TOMMATH #include #endif +#ifdef TEST_FIXED_INT +#include +#endif +#ifdef TEST_CPP_INT +#include +#endif #define PRINT(x)\ std::cout << BOOST_STRINGIZE(x) << " = " << std::numeric_limits::x << std::endl; @@ -206,6 +216,15 @@ int main() #endif #ifdef TEST_TOMMATH test(); +#endif +#ifdef TEST_FIXED_INT + test(); + test(); + test(); + test(); +#endif +#ifdef TEST_CPP_INT + test(); #endif return boost::report_errors(); }