Bulk macro replacement

This commit is contained in:
Matt Borland
2023-05-30 15:45:22 +02:00
parent e436cb42e2
commit 13ad334aa4
11 changed files with 172 additions and 213 deletions

View File

@@ -160,46 +160,4 @@ static_assert((BOOST_CHARCONV_ENDIAN_BIG_BYTE || BOOST_CHARCONV_ENDIAN_LITTLE_BY
# define BOOST_CHARCONV_NO_CONSTEXPR_DETECTION
#endif
#if !defined(BOOST_CHARCONV_NO_CONSTEXPR_DETECTION) && __cpp_lib_constexpr_algorithms >= 201806L
# define BOOST_CHARCONV_CXX20_CONSTEXPR constexpr
#else
# define BOOST_CHARCONV_CXX20_CONSTEXPR
#endif
#if (defined(__x86_64) || defined(__x86_64__) || defined(_M_X64) \
|| defined(__amd64) || defined(__aarch64__) || defined(_M_ARM64) \
|| defined(__MINGW64__) \
|| defined(__s390x__) \
|| (defined(__ppc64__) || defined(__PPC64__) || defined(__ppc64le__) || defined(__PPC64LE__)) )
#define BOOST_CHARCONV_FASTFLOAT_64BIT 1
#elif (defined(__i386) || defined(__i386__) || defined(_M_IX86) \
|| defined(__arm__) || defined(_M_ARM) || defined(__ppc__) \
|| defined(__MINGW32__) || defined(__EMSCRIPTEN__))
#define BOOST_CHARCONV_FASTFLOAT_32BIT 1
#else
// Need to check incrementally, since SIZE_MAX is a size_t, avoid overflow.
// We can never tell the register width, but the SIZE_MAX is a good approximation.
// UINTPTR_MAX and INTPTR_MAX are optional, so avoid them for max portability.
#if SIZE_MAX == 0xffff
#error Unknown platform (16-bit, unsupported)
#elif SIZE_MAX == 0xffffffff
#define BOOST_CHARCONV_FASTFLOAT_32BIT 1
#elif SIZE_MAX == 0xffffffffffffffff
#define BOOST_CHARCONV_FASTFLOAT_64BIT 1
#else
#error Unknown platform (not 32-bit, not 64-bit?)
#endif
#endif
#ifdef __has_include
# if __has_include(<bit>)
# if __cplusplus >= 202002L || BOOST_MSVC >= 202002L
# include <bit>
# if __cpp_lib_bit_cast >= 201806L
# define BOOST_CHARCONV_HAS_STD_BITCAST
# endif
# endif
# endif
#endif
#endif // BOOST_CHARCONV_DETAIL_CONFIG_HPP

View File

@@ -34,7 +34,7 @@ fastfloat_really_inline constexpr std::uint64_t byteswap(std::uint64_t val) {
| (val & 0x00000000000000FF) << 56;
}
fastfloat_really_inline FASTFLOAT_CONSTEXPR20
fastfloat_really_inline BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20
std::uint64_t read_u64(const char *chars) {
if (cpp20_and_in_constexpr()) {
std::uint64_t val = 0;
@@ -46,14 +46,14 @@ std::uint64_t read_u64(const char *chars) {
}
std::uint64_t val;
::memcpy(&val, chars, sizeof(std::uint64_t));
#if FASTFLOAT_IS_BIG_ENDIAN == 1
#if BOOST_CHARCONV_FASTFLOAT_IS_BIG_ENDIAN == 1
// Need to read as-if the number was in little-endian order.
val = byteswap(val);
#endif
return val;
}
fastfloat_really_inline FASTFLOAT_CONSTEXPR20
fastfloat_really_inline BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20
void write_u64(std::uint8_t *chars, std::uint64_t val) {
if (cpp20_and_in_constexpr()) {
for(int i = 0; i < 8; ++i) {
@@ -63,7 +63,7 @@ void write_u64(std::uint8_t *chars, std::uint64_t val) {
}
return;
}
#if FASTFLOAT_IS_BIG_ENDIAN == 1
#if BOOST_CHARCONV_FASTFLOAT_IS_BIG_ENDIAN == 1
// Need to read as-if the number was in little-endian order.
val = byteswap(val);
#endif
@@ -71,7 +71,7 @@ void write_u64(std::uint8_t *chars, std::uint64_t val) {
}
// credit @aqrit
fastfloat_really_inline FASTFLOAT_CONSTEXPR14
fastfloat_really_inline BOOST_CHARCONV_FASTFLOAT_CONSTEXPR14
std::uint32_t parse_eight_digits_unrolled(std::uint64_t val) {
const std::uint64_t mask = 0x000000FF000000FF;
const std::uint64_t mul1 = 0x000F424000000064; // 100 + (1000000ULL << 32)
@@ -92,7 +92,7 @@ std::uint32_t parse_eight_digits_unrolled(const char32_t *) noexcept {
return 0;
}
fastfloat_really_inline FASTFLOAT_CONSTEXPR20
fastfloat_really_inline BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20
std::uint32_t parse_eight_digits_unrolled(const char *chars) noexcept {
return parse_eight_digits_unrolled(read_u64(chars));
}
@@ -113,7 +113,7 @@ bool is_made_of_eight_digits_fast(const char32_t *) noexcept {
return false;
}
fastfloat_really_inline FASTFLOAT_CONSTEXPR20
fastfloat_really_inline BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20
bool is_made_of_eight_digits_fast(const char *chars) noexcept {
return is_made_of_eight_digits_fast(read_u64(chars));
}
@@ -135,7 +135,7 @@ using parsed_number_string = parsed_number_string_t<char>;
// Assuming that you use no more than 19 digits, this will
// parse an ASCII string.
template <typename UC>
fastfloat_really_inline FASTFLOAT_CONSTEXPR20
fastfloat_really_inline BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20
parsed_number_string_t<UC> parse_number_string(UC const *p, UC const * pend, parse_options_t<UC> options) noexcept {
chars_format const fmt = options.format;
UC const decimal_point = options.decimal_point;
@@ -144,7 +144,7 @@ parsed_number_string_t<UC> parse_number_string(UC const *p, UC const * pend, par
answer.valid = false;
answer.too_many_digits = false;
answer.negative = (*p == UC('-'));
#ifdef FASTFLOAT_ALLOWS_LEADING_PLUS // disabled by default
#ifdef BOOST_CHARCONV_FASTFLOAT_ALLOWS_LEADING_PLUS // disabled by default
if ((*p == UC('-')) || (*p == UC('+')))
#else
if (*p == UC('-')) // C++17 20.19.3.(7.1) explicitly forbids '+' sign here

View File

@@ -22,12 +22,12 @@ namespace boost { namespace charconv { namespace detail { namespace fast_float {
// architecture except for sparc, which emulates 128-bit multiplication.
// we might have platforms where `CHAR_BIT` is not 8, so let's avoid
// doing `8 * sizeof(limb)`.
#if defined(FASTFLOAT_64BIT) && !defined(__sparc)
#define FASTFLOAT_64BIT_LIMB 1
#if defined(BOOST_CHARCONV_FASTFLOAT_64BIT) && !defined(__sparc)
#define BOOST_CHARCONV_FASTFLOAT_64BIT_LIMB 1
typedef uint64_t limb;
constexpr size_t limb_bits = 64;
#else
#define FASTFLOAT_32BIT_LIMB
#define BOOST_CHARCONV_FASTFLOAT_32BIT_LIMB
typedef uint32_t limb;
constexpr size_t limb_bits = 32;
#endif
@@ -56,27 +56,27 @@ struct stackvec {
stackvec &operator=(stackvec &&other) = delete;
// create stack vector from existing limb span.
FASTFLOAT_CONSTEXPR20 stackvec(limb_span s) {
FASTFLOAT_ASSERT(try_extend(s));
BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20 stackvec(limb_span s) {
BOOST_CHARCONV_FASTFLOAT_ASSERT(try_extend(s));
}
FASTFLOAT_CONSTEXPR14 limb& operator[](size_t index) noexcept {
FASTFLOAT_DEBUG_ASSERT(index < length);
BOOST_CHARCONV_FASTFLOAT_CONSTEXPR14 limb& operator[](size_t index) noexcept {
BOOST_CHARCONV_FASTFLOAT_DEBUG_ASSERT(index < length);
return data[index];
}
FASTFLOAT_CONSTEXPR14 const limb& operator[](size_t index) const noexcept {
FASTFLOAT_DEBUG_ASSERT(index < length);
BOOST_CHARCONV_FASTFLOAT_CONSTEXPR14 const limb& operator[](size_t index) const noexcept {
BOOST_CHARCONV_FASTFLOAT_DEBUG_ASSERT(index < length);
return data[index];
}
// index from the end of the container
FASTFLOAT_CONSTEXPR14 const limb& rindex(size_t index) const noexcept {
FASTFLOAT_DEBUG_ASSERT(index < length);
BOOST_CHARCONV_FASTFLOAT_CONSTEXPR14 const limb& rindex(size_t index) const noexcept {
BOOST_CHARCONV_FASTFLOAT_DEBUG_ASSERT(index < length);
size_t rindex = length - index - 1;
return data[rindex];
}
// set the length, without bounds checking.
FASTFLOAT_CONSTEXPR14 void set_len(size_t len) noexcept {
BOOST_CHARCONV_FASTFLOAT_CONSTEXPR14 void set_len(size_t len) noexcept {
length = uint16_t(len);
}
constexpr size_t len() const noexcept {
@@ -89,12 +89,12 @@ struct stackvec {
return size;
}
// append item to vector, without bounds checking
FASTFLOAT_CONSTEXPR14 void push_unchecked(limb value) noexcept {
BOOST_CHARCONV_FASTFLOAT_CONSTEXPR14 void push_unchecked(limb value) noexcept {
data[length] = value;
length++;
}
// append item to vector, returning if item was added
FASTFLOAT_CONSTEXPR14 bool try_push(limb value) noexcept {
BOOST_CHARCONV_FASTFLOAT_CONSTEXPR14 bool try_push(limb value) noexcept {
if (len() < capacity()) {
push_unchecked(value);
return true;
@@ -103,13 +103,13 @@ struct stackvec {
}
}
// add items to the vector, from a span, without bounds checking
FASTFLOAT_CONSTEXPR20 void extend_unchecked(limb_span s) noexcept {
BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20 void extend_unchecked(limb_span s) noexcept {
limb* ptr = data + length;
std::copy_n(s.ptr, s.len(), ptr);
set_len(len() + s.len());
}
// try to add items to the vector, returning if items were added
FASTFLOAT_CONSTEXPR20 bool try_extend(limb_span s) noexcept {
BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20 bool try_extend(limb_span s) noexcept {
if (len() + s.len() <= capacity()) {
extend_unchecked(s);
return true;
@@ -120,7 +120,7 @@ struct stackvec {
// resize the vector, without bounds checking
// if the new size is longer than the vector, assign value to each
// appended item.
FASTFLOAT_CONSTEXPR20
BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20
void resize_unchecked(size_t new_len, limb value) noexcept {
if (new_len > len()) {
size_t count = new_len - len();
@@ -133,7 +133,7 @@ struct stackvec {
}
}
// try to resize the vector, returning if the vector was resized.
FASTFLOAT_CONSTEXPR20 bool try_resize(size_t new_len, limb value) noexcept {
BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20 bool try_resize(size_t new_len, limb value) noexcept {
if (new_len > capacity()) {
return false;
} else {
@@ -144,7 +144,7 @@ struct stackvec {
// check if any limbs are non-zero after the given index.
// this needs to be done in reverse order, since the index
// is relative to the most significant limbs.
FASTFLOAT_CONSTEXPR14 bool nonzero(size_t index) const noexcept {
BOOST_CHARCONV_FASTFLOAT_CONSTEXPR14 bool nonzero(size_t index) const noexcept {
while (index < len()) {
if (rindex(index) != 0) {
return true;
@@ -154,27 +154,27 @@ struct stackvec {
return false;
}
// normalize the big integer, so most-significant zero limbs are removed.
FASTFLOAT_CONSTEXPR14 void normalize() noexcept {
BOOST_CHARCONV_FASTFLOAT_CONSTEXPR14 void normalize() noexcept {
while (len() > 0 && rindex(0) == 0) {
length--;
}
}
};
fastfloat_really_inline FASTFLOAT_CONSTEXPR14
fastfloat_really_inline BOOST_CHARCONV_FASTFLOAT_CONSTEXPR14
uint64_t empty_hi64(bool& truncated) noexcept {
truncated = false;
return 0;
}
fastfloat_really_inline FASTFLOAT_CONSTEXPR20
fastfloat_really_inline BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20
uint64_t uint64_hi64(uint64_t r0, bool& truncated) noexcept {
truncated = false;
int shl = leading_zeroes(r0);
return r0 << shl;
}
fastfloat_really_inline FASTFLOAT_CONSTEXPR20
fastfloat_really_inline BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20
uint64_t uint64_hi64(uint64_t r0, uint64_t r1, bool& truncated) noexcept {
int shl = leading_zeroes(r0);
if (shl == 0) {
@@ -187,19 +187,19 @@ uint64_t uint64_hi64(uint64_t r0, uint64_t r1, bool& truncated) noexcept {
}
}
fastfloat_really_inline FASTFLOAT_CONSTEXPR20
fastfloat_really_inline BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20
uint64_t uint32_hi64(uint32_t r0, bool& truncated) noexcept {
return uint64_hi64(r0, truncated);
}
fastfloat_really_inline FASTFLOAT_CONSTEXPR20
fastfloat_really_inline BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20
uint64_t uint32_hi64(uint32_t r0, uint32_t r1, bool& truncated) noexcept {
uint64_t x0 = r0;
uint64_t x1 = r1;
return uint64_hi64((x0 << 32) | x1, truncated);
}
fastfloat_really_inline FASTFLOAT_CONSTEXPR20
fastfloat_really_inline BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20
uint64_t uint32_hi64(uint32_t r0, uint32_t r1, uint32_t r2, bool& truncated) noexcept {
uint64_t x0 = r0;
uint64_t x1 = r1;
@@ -211,7 +211,7 @@ uint64_t uint32_hi64(uint32_t r0, uint32_t r1, uint32_t r2, bool& truncated) noe
// we want an efficient operation. for msvc, where
// we don't have built-in intrinsics, this is still
// pretty fast.
fastfloat_really_inline FASTFLOAT_CONSTEXPR20
fastfloat_really_inline BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20
limb scalar_add(limb x, limb y, bool& overflow) noexcept {
limb z;
// gcc and clang
@@ -231,9 +231,9 @@ limb scalar_add(limb x, limb y, bool& overflow) noexcept {
}
// multiply two small integers, getting both the high and low bits.
fastfloat_really_inline FASTFLOAT_CONSTEXPR20
fastfloat_really_inline BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20
limb scalar_mul(limb x, limb y, limb& carry) noexcept {
#ifdef FASTFLOAT_64BIT_LIMB
#ifdef BOOST_CHARCONV_FASTFLOAT_64BIT_LIMB
#if defined(__SIZEOF_INT128__)
// GCC and clang both define it as an extension.
__uint128_t z = __uint128_t(x) * __uint128_t(y) + __uint128_t(carry);
@@ -259,7 +259,7 @@ limb scalar_mul(limb x, limb y, limb& carry) noexcept {
// add scalar value to bigint starting from offset.
// used in grade school multiplication
template <uint16_t size>
inline FASTFLOAT_CONSTEXPR20
inline BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20
bool small_add_from(stackvec<size>& vec, limb y, size_t start) noexcept {
size_t index = start;
limb carry = y;
@@ -270,28 +270,28 @@ bool small_add_from(stackvec<size>& vec, limb y, size_t start) noexcept {
index += 1;
}
if (carry != 0) {
FASTFLOAT_TRY(vec.try_push(carry));
BOOST_CHARCONV_FASTFLOAT_TRY(vec.try_push(carry));
}
return true;
}
// add scalar value to bigint.
template <uint16_t size>
fastfloat_really_inline FASTFLOAT_CONSTEXPR20
fastfloat_really_inline BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20
bool small_add(stackvec<size>& vec, limb y) noexcept {
return small_add_from(vec, y, 0);
}
// multiply bigint by scalar value.
template <uint16_t size>
inline FASTFLOAT_CONSTEXPR20
inline BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20
bool small_mul(stackvec<size>& vec, limb y) noexcept {
limb carry = 0;
for (size_t index = 0; index < vec.len(); index++) {
vec[index] = scalar_mul(vec[index], y, carry);
}
if (carry != 0) {
FASTFLOAT_TRY(vec.try_push(carry));
BOOST_CHARCONV_FASTFLOAT_TRY(vec.try_push(carry));
}
return true;
}
@@ -299,12 +299,12 @@ bool small_mul(stackvec<size>& vec, limb y) noexcept {
// add bigint to bigint starting from index.
// used in grade school multiplication
template <uint16_t size>
FASTFLOAT_CONSTEXPR20
BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20
bool large_add_from(stackvec<size>& x, limb_span y, size_t start) noexcept {
// the effective x buffer is from `xstart..x.len()`, so exit early
// if we can't get that current range.
if (x.len() < start || y.len() > x.len() - start) {
FASTFLOAT_TRY(x.try_resize(y.len() + start, 0));
BOOST_CHARCONV_FASTFLOAT_TRY(x.try_resize(y.len() + start, 0));
}
bool carry = false;
@@ -323,21 +323,21 @@ bool large_add_from(stackvec<size>& x, limb_span y, size_t start) noexcept {
// handle overflow
if (carry) {
FASTFLOAT_TRY(small_add_from(x, 1, y.len() + start));
BOOST_CHARCONV_FASTFLOAT_TRY(small_add_from(x, 1, y.len() + start));
}
return true;
}
// add bigint to bigint.
template <uint16_t size>
fastfloat_really_inline FASTFLOAT_CONSTEXPR20
fastfloat_really_inline BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20
bool large_add_from(stackvec<size>& x, limb_span y) noexcept {
return large_add_from(x, y, 0);
}
// grade-school multiplication algorithm
template <uint16_t size>
FASTFLOAT_CONSTEXPR20
BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20
bool long_mul(stackvec<size>& x, limb_span y) noexcept {
limb_span xs = limb_span(x.data, x.len());
stackvec<size> z(xs);
@@ -345,17 +345,17 @@ bool long_mul(stackvec<size>& x, limb_span y) noexcept {
if (y.len() != 0) {
limb y0 = y[0];
FASTFLOAT_TRY(small_mul(x, y0));
BOOST_CHARCONV_FASTFLOAT_TRY(small_mul(x, y0));
for (size_t index = 1; index < y.len(); index++) {
limb yi = y[index];
stackvec<size> zi;
if (yi != 0) {
// re-use the same buffer throughout
zi.set_len(0);
FASTFLOAT_TRY(zi.try_extend(zs));
FASTFLOAT_TRY(small_mul(zi, yi));
BOOST_CHARCONV_FASTFLOAT_TRY(zi.try_extend(zs));
BOOST_CHARCONV_FASTFLOAT_TRY(small_mul(zi, yi));
limb_span zis = limb_span(zi.data, zi.len());
FASTFLOAT_TRY(large_add_from(x, zis, index));
BOOST_CHARCONV_FASTFLOAT_TRY(large_add_from(x, zis, index));
}
}
}
@@ -366,12 +366,12 @@ bool long_mul(stackvec<size>& x, limb_span y) noexcept {
// grade-school multiplication algorithm
template <uint16_t size>
FASTFLOAT_CONSTEXPR20
BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20
bool large_mul(stackvec<size>& x, limb_span y) noexcept {
if (y.len() == 1) {
FASTFLOAT_TRY(small_mul(x, y[0]));
BOOST_CHARCONV_FASTFLOAT_TRY(small_mul(x, y[0]));
} else {
FASTFLOAT_TRY(long_mul(x, y));
BOOST_CHARCONV_FASTFLOAT_TRY(long_mul(x, y));
}
return true;
}
@@ -387,7 +387,7 @@ struct pow5_tables {
2384185791015625UL, 11920928955078125UL, 59604644775390625UL,
298023223876953125UL, 1490116119384765625UL, 7450580596923828125UL,
};
#ifdef FASTFLOAT_64BIT_LIMB
#ifdef BOOST_CHARCONV_FASTFLOAT_64BIT_LIMB
constexpr static limb large_power_of_5[] = {
1414648277510068013UL, 9180637584431281687UL, 4539964771860779200UL,
10482974169319127550UL, 198276706040285095UL};
@@ -415,14 +415,14 @@ struct bigint : pow5_tables<> {
// storage of the limbs, in little-endian order.
stackvec<bigint_limbs> vec;
FASTFLOAT_CONSTEXPR20 bigint(): vec() {}
BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20 bigint(): vec() {}
bigint(const bigint &) = delete;
bigint &operator=(const bigint &) = delete;
bigint(bigint &&) = delete;
bigint &operator=(bigint &&other) = delete;
FASTFLOAT_CONSTEXPR20 bigint(uint64_t value): vec() {
#ifdef FASTFLOAT_64BIT_LIMB
BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20 bigint(uint64_t value): vec() {
#ifdef BOOST_CHARCONV_FASTFLOAT_64BIT_LIMB
vec.push_unchecked(value);
#else
vec.push_unchecked(uint32_t(value));
@@ -433,8 +433,8 @@ struct bigint : pow5_tables<> {
// get the high 64 bits from the vector, and if bits were truncated.
// this is to get the significant digits for the float.
FASTFLOAT_CONSTEXPR20 uint64_t hi64(bool& truncated) const noexcept {
#ifdef FASTFLOAT_64BIT_LIMB
BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20 uint64_t hi64(bool& truncated) const noexcept {
#ifdef BOOST_CHARCONV_FASTFLOAT_64BIT_LIMB
if (vec.len() == 0) {
return empty_hi64(truncated);
} else if (vec.len() == 1) {
@@ -465,7 +465,7 @@ struct bigint : pow5_tables<> {
// positive, this is larger, otherwise they are equal.
// the limbs are stored in little-endian order, so we
// must compare the limbs in ever order.
FASTFLOAT_CONSTEXPR20 int compare(const bigint& other) const noexcept {
BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20 int compare(const bigint& other) const noexcept {
if (vec.len() > other.vec.len()) {
return 1;
} else if (vec.len() < other.vec.len()) {
@@ -486,14 +486,14 @@ struct bigint : pow5_tables<> {
// shift left each limb n bits, carrying over to the new limb
// returns true if we were able to shift all the digits.
FASTFLOAT_CONSTEXPR20 bool shl_bits(size_t n) noexcept {
BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20 bool shl_bits(size_t n) noexcept {
// Internally, for each item, we shift left by n, and add the previous
// right shifted limb-bits.
// For example, we transform (for u8) shifted left 2, to:
// b10100100 b01000010
// b10 b10010001 b00001000
FASTFLOAT_DEBUG_ASSERT(n != 0);
FASTFLOAT_DEBUG_ASSERT(n < sizeof(limb) * 8);
BOOST_CHARCONV_FASTFLOAT_DEBUG_ASSERT(n != 0);
BOOST_CHARCONV_FASTFLOAT_DEBUG_ASSERT(n < sizeof(limb) * 8);
size_t shl = n;
size_t shr = limb_bits - shl;
@@ -512,8 +512,8 @@ struct bigint : pow5_tables<> {
}
// move the limbs left by `n` limbs.
FASTFLOAT_CONSTEXPR20 bool shl_limbs(size_t n) noexcept {
FASTFLOAT_DEBUG_ASSERT(n != 0);
BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20 bool shl_limbs(size_t n) noexcept {
BOOST_CHARCONV_FASTFLOAT_DEBUG_ASSERT(n != 0);
if (n + vec.len() > vec.capacity()) {
return false;
} else if (!vec.is_empty()) {
@@ -533,24 +533,24 @@ struct bigint : pow5_tables<> {
}
// move the limbs left by `n` bits.
FASTFLOAT_CONSTEXPR20 bool shl(size_t n) noexcept {
BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20 bool shl(size_t n) noexcept {
size_t rem = n % limb_bits;
size_t div = n / limb_bits;
if (rem != 0) {
FASTFLOAT_TRY(shl_bits(rem));
BOOST_CHARCONV_FASTFLOAT_TRY(shl_bits(rem));
}
if (div != 0) {
FASTFLOAT_TRY(shl_limbs(div));
BOOST_CHARCONV_FASTFLOAT_TRY(shl_limbs(div));
}
return true;
}
// get the number of leading zeros in the bigint.
FASTFLOAT_CONSTEXPR20 int ctlz() const noexcept {
BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20 int ctlz() const noexcept {
if (vec.is_empty()) {
return 0;
} else {
#ifdef FASTFLOAT_64BIT_LIMB
#ifdef BOOST_CHARCONV_FASTFLOAT_64BIT_LIMB
return leading_zeroes(vec.rindex(0));
#else
// no use defining a specialized leading_zeroes for a 32-bit type.
@@ -561,34 +561,34 @@ struct bigint : pow5_tables<> {
}
// get the number of bits in the bigint.
FASTFLOAT_CONSTEXPR20 int bit_length() const noexcept {
BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20 int bit_length() const noexcept {
int lz = ctlz();
return int(limb_bits * vec.len()) - lz;
}
FASTFLOAT_CONSTEXPR20 bool mul(limb y) noexcept {
BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20 bool mul(limb y) noexcept {
return small_mul(vec, y);
}
FASTFLOAT_CONSTEXPR20 bool add(limb y) noexcept {
BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20 bool add(limb y) noexcept {
return small_add(vec, y);
}
// multiply as if by 2 raised to a power.
FASTFLOAT_CONSTEXPR20 bool pow2(uint32_t exp) noexcept {
BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20 bool pow2(uint32_t exp) noexcept {
return shl(exp);
}
// multiply as if by 5 raised to a power.
FASTFLOAT_CONSTEXPR20 bool pow5(uint32_t exp) noexcept {
BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20 bool pow5(uint32_t exp) noexcept {
// multiply by a power of 5
size_t large_length = sizeof(large_power_of_5) / sizeof(limb);
limb_span large = limb_span(large_power_of_5, large_length);
while (exp >= large_step) {
FASTFLOAT_TRY(large_mul(vec, large));
BOOST_CHARCONV_FASTFLOAT_TRY(large_mul(vec, large));
exp -= large_step;
}
#ifdef FASTFLOAT_64BIT_LIMB
#ifdef BOOST_CHARCONV_FASTFLOAT_64BIT_LIMB
uint32_t small_step = 27;
limb max_native = 7450580596923828125UL;
#else
@@ -596,14 +596,14 @@ struct bigint : pow5_tables<> {
limb max_native = 1220703125U;
#endif
while (exp >= small_step) {
FASTFLOAT_TRY(small_mul(vec, max_native));
BOOST_CHARCONV_FASTFLOAT_TRY(small_mul(vec, max_native));
exp -= small_step;
}
if (exp != 0) {
// Work around clang bug https://godbolt.org/z/zedh7rrhc
// This is similar to https://github.com/llvm/llvm-project/issues/47746,
// except the workaround described there don't work here
FASTFLOAT_TRY(
BOOST_CHARCONV_FASTFLOAT_TRY(
small_mul(vec, limb(((void)small_power_of_5[0], small_power_of_5[exp])))
);
}
@@ -612,8 +612,8 @@ struct bigint : pow5_tables<> {
}
// multiply as if by 10 raised to a power.
FASTFLOAT_CONSTEXPR20 bool pow10(uint32_t exp) noexcept {
FASTFLOAT_TRY(pow5(exp));
BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20 bool pow10(uint32_t exp) noexcept {
BOOST_CHARCONV_FASTFLOAT_TRY(pow5(exp));
return pow2(exp);
}
};

View File

@@ -16,32 +16,32 @@
// Testing for https://wg21.link/N3652, adopted in C++14
#if __cpp_constexpr >= 201304
#define FASTFLOAT_CONSTEXPR14 constexpr
#define BOOST_CHARCONV_FASTFLOAT_CONSTEXPR14 constexpr
#else
#define FASTFLOAT_CONSTEXPR14
#define BOOST_CHARCONV_FASTFLOAT_CONSTEXPR14
#endif
#if defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L
#define FASTFLOAT_HAS_BIT_CAST 1
#define BOOST_CHARCONV_FASTFLOAT_HAS_BIT_CAST 1
#else
#define FASTFLOAT_HAS_BIT_CAST 0
#define BOOST_CHARCONV_FASTFLOAT_HAS_BIT_CAST 0
#endif
#if defined(__cpp_lib_is_constant_evaluated) && __cpp_lib_is_constant_evaluated >= 201811L
#define FASTFLOAT_HAS_IS_CONSTANT_EVALUATED 1
#define BOOST_CHARCONV_FASTFLOAT_HAS_IS_CONSTANT_EVALUATED 1
#else
#define FASTFLOAT_HAS_IS_CONSTANT_EVALUATED 0
#define BOOST_CHARCONV_FASTFLOAT_HAS_IS_CONSTANT_EVALUATED 0
#endif
// Testing for relevant C++20 constexpr library features
#if FASTFLOAT_HAS_IS_CONSTANT_EVALUATED \
&& FASTFLOAT_HAS_BIT_CAST \
#if BOOST_CHARCONV_FASTFLOAT_HAS_IS_CONSTANT_EVALUATED \
&& BOOST_CHARCONV_FASTFLOAT_HAS_BIT_CAST \
&& __cpp_lib_constexpr_algorithms >= 201806L /*For std::copy and std::fill*/
#define FASTFLOAT_CONSTEXPR20 constexpr
#define FASTFLOAT_IS_CONSTEXPR 1
#define BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20 constexpr
#define BOOST_CHARCONV_FASTFLOAT_IS_CONSTEXPR 1
#else
#define FASTFLOAT_CONSTEXPR20
#define FASTFLOAT_IS_CONSTEXPR 0
#define BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20
#define BOOST_CHARCONV_FASTFLOAT_IS_CONSTEXPR 0
#endif
#endif // FASTFLOAT_CONSTEXPR_FEATURE_DETECT_H
#endif // BOOST_CHARCONV_FASTFLOAT_CONSTEXPR_FEATURE_DETECT_H

View File

@@ -24,7 +24,7 @@ namespace boost { namespace charconv { namespace detail { namespace fast_float {
// low part corresponding to the least significant bits.
//
template <int bit_precision>
fastfloat_really_inline FASTFLOAT_CONSTEXPR20
fastfloat_really_inline BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20
value128 compute_product_approximation(int64_t q, uint64_t w) {
const int index = 2 * int(q - powers::smallest_power_of_five);
// For small values of q, e.g., q in [0,27], the answer is always exact because
@@ -70,7 +70,7 @@ namespace detail {
// create an adjusted mantissa, biased by the invalid power2
// for significant digits already multiplied by 10 ** q.
template <typename binary>
fastfloat_really_inline FASTFLOAT_CONSTEXPR14
fastfloat_really_inline BOOST_CHARCONV_FASTFLOAT_CONSTEXPR14
adjusted_mantissa compute_error_scaled(int64_t q, uint64_t w, int lz) noexcept {
int hilz = int(w >> 63) ^ 1;
adjusted_mantissa answer;
@@ -83,7 +83,7 @@ adjusted_mantissa compute_error_scaled(int64_t q, uint64_t w, int lz) noexcept
// w * 10 ** q, without rounding the representation up.
// the power2 in the exponent will be adjusted by invalid_am_bias.
template <typename binary>
fastfloat_really_inline FASTFLOAT_CONSTEXPR20
fastfloat_really_inline BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20
adjusted_mantissa compute_error(int64_t q, uint64_t w) noexcept {
int lz = leading_zeroes(w);
w <<= lz;
@@ -97,7 +97,7 @@ adjusted_mantissa compute_error(int64_t q, uint64_t w) noexcept {
// return an adjusted_mantissa with a negative power of 2: the caller should recompute
// in such cases.
template <typename binary>
fastfloat_really_inline FASTFLOAT_CONSTEXPR20
fastfloat_really_inline BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20
adjusted_mantissa compute_float(int64_t q, uint64_t w) noexcept {
adjusted_mantissa answer;
if ((w == 0) || (q < binary::smallest_power_of_ten())) {

View File

@@ -30,7 +30,7 @@ constexpr static uint64_t powers_of_ten_uint64[] = {
// effect on performance: in order to have a faster algorithm, we'd need
// to slow down performance for faster algorithms, and this is still fast.
template <typename UC>
fastfloat_really_inline FASTFLOAT_CONSTEXPR14
fastfloat_really_inline BOOST_CHARCONV_FASTFLOAT_CONSTEXPR14
int32_t scientific_exponent(parsed_number_string_t<UC> & num) noexcept {
uint64_t mantissa = num.mantissa;
int32_t exponent = int32_t(num.exponent);
@@ -51,7 +51,7 @@ int32_t scientific_exponent(parsed_number_string_t<UC> & num) noexcept {
// this converts a native floating-point number to an extended-precision float.
template <typename T>
fastfloat_really_inline FASTFLOAT_CONSTEXPR20
fastfloat_really_inline BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20
adjusted_mantissa to_extended(T value) noexcept {
using equiv_uint = typename binary_format<T>::equiv_uint;
constexpr equiv_uint exponent_mask = binary_format<T>::exponent_mask();
@@ -61,7 +61,7 @@ adjusted_mantissa to_extended(T value) noexcept {
adjusted_mantissa am;
int32_t bias = binary_format<T>::mantissa_explicit_bits() - binary_format<T>::minimum_exponent();
equiv_uint bits;
#if FASTFLOAT_HAS_BIT_CAST
#if BOOST_CHARCONV_FASTFLOAT_HAS_BIT_CAST
bits = std::bit_cast<equiv_uint>(value);
#else
::memcpy(&bits, &value, sizeof(T));
@@ -84,7 +84,7 @@ adjusted_mantissa to_extended(T value) noexcept {
// we are given a native float that represents b, so we need to adjust it
// halfway between b and b+u.
template <typename T>
fastfloat_really_inline FASTFLOAT_CONSTEXPR20
fastfloat_really_inline BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20
adjusted_mantissa to_extended_halfway(T value) noexcept {
adjusted_mantissa am = to_extended(value);
am.mantissa <<= 1;
@@ -95,7 +95,7 @@ adjusted_mantissa to_extended_halfway(T value) noexcept {
// round an extended-precision float to the nearest machine float.
template <typename T, typename callback>
fastfloat_really_inline FASTFLOAT_CONSTEXPR14
fastfloat_really_inline BOOST_CHARCONV_FASTFLOAT_CONSTEXPR14
void round(adjusted_mantissa& am, callback cb) noexcept {
int32_t mantissa_shift = 64 - binary_format<T>::mantissa_explicit_bits() - 1;
if (-am.power2 >= mantissa_shift) {
@@ -125,7 +125,7 @@ void round(adjusted_mantissa& am, callback cb) noexcept {
}
template <typename callback>
fastfloat_really_inline FASTFLOAT_CONSTEXPR14
fastfloat_really_inline BOOST_CHARCONV_FASTFLOAT_CONSTEXPR14
void round_nearest_tie_even(adjusted_mantissa& am, int32_t shift, callback cb) noexcept {
const uint64_t mask
= (shift == 64)
@@ -151,7 +151,7 @@ void round_nearest_tie_even(adjusted_mantissa& am, int32_t shift, callback cb) n
am.mantissa += uint64_t(cb(is_odd, is_halfway, is_above));
}
fastfloat_really_inline FASTFLOAT_CONSTEXPR14
fastfloat_really_inline BOOST_CHARCONV_FASTFLOAT_CONSTEXPR14
void round_down(adjusted_mantissa& am, int32_t shift) noexcept {
if (shift == 64) {
am.mantissa = 0;
@@ -161,7 +161,7 @@ void round_down(adjusted_mantissa& am, int32_t shift) noexcept {
am.power2 += shift;
}
template <typename UC>
fastfloat_really_inline FASTFLOAT_CONSTEXPR20
fastfloat_really_inline BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20
void skip_zeros(UC const * & first, UC const * last) noexcept {
uint64_t val;
while (!cpp20_and_in_constexpr() && std::distance(first, last) >= int_cmp_len<UC>()) {
@@ -182,7 +182,7 @@ void skip_zeros(UC const * & first, UC const * last) noexcept {
// determine if any non-zero digits were truncated.
// all characters must be valid digits.
template <typename UC>
fastfloat_really_inline FASTFLOAT_CONSTEXPR20
fastfloat_really_inline BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20
bool is_truncated(UC const * first, UC const * last) noexcept {
// do 8-bit optimizations, can just compare to 8 literal 0s.
uint64_t val;
@@ -202,22 +202,22 @@ bool is_truncated(UC const * first, UC const * last) noexcept {
return false;
}
template <typename UC>
fastfloat_really_inline FASTFLOAT_CONSTEXPR20
fastfloat_really_inline BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20
bool is_truncated(span<const UC> s) noexcept {
return is_truncated(s.ptr, s.ptr + s.len());
}
fastfloat_really_inline FASTFLOAT_CONSTEXPR20
fastfloat_really_inline BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20
void parse_eight_digits(const char16_t*& , limb& , size_t& , size_t& ) noexcept {
// currently unused
}
fastfloat_really_inline FASTFLOAT_CONSTEXPR20
fastfloat_really_inline BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20
void parse_eight_digits(const char32_t*& , limb& , size_t& , size_t& ) noexcept {
// currently unused
}
fastfloat_really_inline FASTFLOAT_CONSTEXPR20
fastfloat_really_inline BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20
void parse_eight_digits(const char*& p, limb& value, size_t& counter, size_t& count) noexcept {
value = value * 100000000 + parse_eight_digits_unrolled(p);
p += 8;
@@ -226,7 +226,7 @@ void parse_eight_digits(const char*& p, limb& value, size_t& counter, size_t& co
}
template <typename UC>
fastfloat_really_inline FASTFLOAT_CONSTEXPR14
fastfloat_really_inline BOOST_CHARCONV_FASTFLOAT_CONSTEXPR14
void parse_one_digit(UC const *& p, limb& value, size_t& counter, size_t& count) noexcept {
value = value * 10 + limb(*p - UC('0'));
p++;
@@ -234,13 +234,13 @@ void parse_one_digit(UC const *& p, limb& value, size_t& counter, size_t& count)
count++;
}
fastfloat_really_inline FASTFLOAT_CONSTEXPR20
fastfloat_really_inline BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20
void add_native(bigint& big, limb power, limb value) noexcept {
big.mul(power);
big.add(value);
}
fastfloat_really_inline FASTFLOAT_CONSTEXPR20
fastfloat_really_inline BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20
void round_up_bigint(bigint& big, size_t& count) noexcept {
// need to round-up the digits, but need to avoid rounding
// ....9999 to ...10000, which could cause a false halfway point.
@@ -250,7 +250,7 @@ void round_up_bigint(bigint& big, size_t& count) noexcept {
// parse the significant digits into a big integer
template <typename UC>
inline FASTFLOAT_CONSTEXPR20
inline BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20
void parse_mantissa(bigint& result, parsed_number_string_t<UC>& num, size_t max_digits, size_t& digits) noexcept {
// try to minimize the number of big integer and scalar multiplication.
// therefore, try to parse 8 digits at a time, and multiply by the largest
@@ -258,7 +258,7 @@ void parse_mantissa(bigint& result, parsed_number_string_t<UC>& num, size_t max_
size_t counter = 0;
digits = 0;
limb value = 0;
#ifdef FASTFLOAT_64BIT_LIMB
#ifdef BOOST_CHARCONV_FASTFLOAT_64BIT_LIMB
size_t step = 19;
#else
size_t step = 9;
@@ -335,9 +335,9 @@ void parse_mantissa(bigint& result, parsed_number_string_t<UC>& num, size_t max_
}
template <typename T>
inline FASTFLOAT_CONSTEXPR20
inline BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20
adjusted_mantissa positive_digit_comp(bigint& bigmant, int32_t exponent) noexcept {
FASTFLOAT_ASSERT(bigmant.pow10(uint32_t(exponent)));
BOOST_CHARCONV_FASTFLOAT_ASSERT(bigmant.pow10(uint32_t(exponent)));
adjusted_mantissa answer;
bool truncated;
answer.mantissa = bigmant.hi64(truncated);
@@ -359,7 +359,7 @@ adjusted_mantissa positive_digit_comp(bigint& bigmant, int32_t exponent) noexcep
// we then need to scale by `2^(f- e)`, and then the two significant digits
// are of the same magnitude.
template <typename T>
inline FASTFLOAT_CONSTEXPR20
inline BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20
adjusted_mantissa negative_digit_comp(bigint& bigmant, adjusted_mantissa am, int32_t exponent) noexcept {
bigint& real_digits = bigmant;
int32_t real_exp = exponent;
@@ -378,12 +378,12 @@ adjusted_mantissa negative_digit_comp(bigint& bigmant, adjusted_mantissa am, int
int32_t pow2_exp = theor_exp - real_exp;
uint32_t pow5_exp = uint32_t(-real_exp);
if (pow5_exp != 0) {
FASTFLOAT_ASSERT(theor_digits.pow5(pow5_exp));
BOOST_CHARCONV_FASTFLOAT_ASSERT(theor_digits.pow5(pow5_exp));
}
if (pow2_exp > 0) {
FASTFLOAT_ASSERT(theor_digits.pow2(uint32_t(pow2_exp)));
BOOST_CHARCONV_FASTFLOAT_ASSERT(theor_digits.pow2(uint32_t(pow2_exp)));
} else if (pow2_exp < 0) {
FASTFLOAT_ASSERT(real_digits.pow2(uint32_t(-pow2_exp)));
BOOST_CHARCONV_FASTFLOAT_ASSERT(real_digits.pow2(uint32_t(-pow2_exp)));
}
// compare digits, and use it to director rounding
@@ -420,7 +420,7 @@ adjusted_mantissa negative_digit_comp(bigint& bigmant, adjusted_mantissa am, int
// the actual digits. we then compare the big integer representations
// of both, and use that to direct rounding.
template <typename T, typename UC>
inline FASTFLOAT_CONSTEXPR20
inline BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20
adjusted_mantissa digit_comp(parsed_number_string_t<UC>& num, adjusted_mantissa am) noexcept {
// remove the invalid exponent bias
am.power2 -= invalid_am_bias;

View File

@@ -31,7 +31,7 @@ namespace boost { namespace charconv { namespace detail { namespace fast_float {
* The default is `fast_float::chars_format::general` which allows both `fixed` and `scientific`.
*/
template<typename T, typename UC = char>
FASTFLOAT_CONSTEXPR20
BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20
from_chars_result_t<UC> from_chars(UC const * first, UC const * last,
T &value, chars_format fmt = chars_format::general) noexcept;
@@ -39,10 +39,10 @@ from_chars_result_t<UC> from_chars(UC const * first, UC const * last,
* Like from_chars, but accepts an `options` argument to govern number parsing.
*/
template<typename T, typename UC = char>
FASTFLOAT_CONSTEXPR20
BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20
from_chars_result_t<UC> from_chars_advanced(UC const * first, UC const * last,
T &value, parse_options_t<UC> options) noexcept;
}}}} // namespace fast_float
#include <boost/charconv/detail/fast_float/parse_number.hpp>
#endif // FASTFLOAT_FAST_FLOAT_H
#endif // BOOST_CHARCONV_FASTFLOAT_FAST_FLOAT_H

View File

@@ -8,6 +8,7 @@
#ifndef BOOST_CHARCONV_DETAIL_FASTFLOAT_FAST_TABLE_HPP
#define BOOST_CHARCONV_DETAIL_FASTFLOAT_FAST_TABLE_HPP
#include <boost/charconv/detail/fast_float/float_common.hpp>
#include <cstdint>
namespace boost { namespace charconv { namespace detail { namespace fast_float {

View File

@@ -36,7 +36,7 @@ using parse_options = parse_options_t<char>;
}
#if FASTFLOAT_HAS_BIT_CAST
#if BOOST_CHARCONV_FASTFLOAT_HAS_BIT_CAST
#include <bit>
#endif
@@ -45,11 +45,11 @@ using parse_options = parse_options_t<char>;
|| defined(__MINGW64__) \
|| defined(__s390x__) \
|| (defined(__ppc64__) || defined(__PPC64__) || defined(__ppc64le__) || defined(__PPC64LE__)) )
#define FASTFLOAT_64BIT 1
#define BOOST_CHARCONV_FASTFLOAT_64BIT 1
#elif (defined(__i386) || defined(__i386__) || defined(_M_IX86) \
|| defined(__arm__) || defined(_M_ARM) || defined(__ppc__) \
|| defined(__MINGW32__) || defined(__EMSCRIPTEN__))
#define FASTFLOAT_32BIT 1
#define BOOST_CHARCONV_FASTFLOAT_32BIT 1
#else
// Need to check incrementally, since SIZE_MAX is a size_t, avoid overflow.
// We can never tell the register width, but the SIZE_MAX is a good approximation.
@@ -57,9 +57,9 @@ using parse_options = parse_options_t<char>;
#if SIZE_MAX == 0xffff
#error Unknown platform (16-bit, unsupported)
#elif SIZE_MAX == 0xffffffff
#define FASTFLOAT_32BIT 1
#define BOOST_CHARCONV_FASTFLOAT_32BIT 1
#elif SIZE_MAX == 0xffffffffffffffff
#define FASTFLOAT_64BIT 1
#define BOOST_CHARCONV_FASTFLOAT_64BIT 1
#else
#error Unknown platform (not 32-bit, not 64-bit?)
#endif
@@ -70,13 +70,13 @@ using parse_options = parse_options_t<char>;
#endif
#if defined(_MSC_VER) && !defined(__clang__)
#define FASTFLOAT_VISUAL_STUDIO 1
#define BOOST_CHARCONV_FASTFLOAT_VISUAL_STUDIO 1
#endif
#if defined __BYTE_ORDER__ && defined __ORDER_BIG_ENDIAN__
#define FASTFLOAT_IS_BIG_ENDIAN (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
#define BOOST_CHARCONV_FASTFLOAT_IS_BIG_ENDIAN (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
#elif defined _WIN32
#define FASTFLOAT_IS_BIG_ENDIAN 0
#define BOOST_CHARCONV_FASTFLOAT_IS_BIG_ENDIAN 0
#else
#if defined(__APPLE__) || defined(__FreeBSD__)
#include <machine/endian.h>
@@ -92,42 +92,42 @@ using parse_options = parse_options_t<char>;
#
#ifndef __BYTE_ORDER__
// safe choice
#define FASTFLOAT_IS_BIG_ENDIAN 0
#define BOOST_CHARCONV_FASTFLOAT_IS_BIG_ENDIAN 0
#endif
#
#ifndef __ORDER_LITTLE_ENDIAN__
// safe choice
#define FASTFLOAT_IS_BIG_ENDIAN 0
#define BOOST_CHARCONV_FASTFLOAT_IS_BIG_ENDIAN 0
#endif
#
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define FASTFLOAT_IS_BIG_ENDIAN 0
#define BOOST_CHARCONV_FASTFLOAT_IS_BIG_ENDIAN 0
#else
#define FASTFLOAT_IS_BIG_ENDIAN 1
#define BOOST_CHARCONV_FASTFLOAT_IS_BIG_ENDIAN 1
#endif
#endif
#ifdef FASTFLOAT_VISUAL_STUDIO
#ifdef BOOST_CHARCONV_FASTFLOAT_VISUAL_STUDIO
#define fastfloat_really_inline __forceinline
#else
#define fastfloat_really_inline inline __attribute__((always_inline))
#endif
#ifndef FASTFLOAT_ASSERT
#define FASTFLOAT_ASSERT(x) { ((void)(x)); }
#ifndef BOOST_CHARCONV_FASTFLOAT_ASSERT
#define BOOST_CHARCONV_FASTFLOAT_ASSERT(x) { ((void)(x)); }
#endif
#ifndef FASTFLOAT_DEBUG_ASSERT
#define FASTFLOAT_DEBUG_ASSERT(x) { ((void)(x)); }
#ifndef BOOST_CHARCONV_FASTFLOAT_DEBUG_ASSERT
#define BOOST_CHARCONV_FASTFLOAT_DEBUG_ASSERT(x) { ((void)(x)); }
#endif
// rust style `try!()` macro, or `?` operator
#define FASTFLOAT_TRY(x) { if (!(x)) return false; }
#define BOOST_CHARCONV_FASTFLOAT_TRY(x) { if (!(x)) return false; }
namespace fast_float {
fastfloat_really_inline constexpr bool cpp20_and_in_constexpr() {
#if FASTFLOAT_HAS_IS_CONSTANT_EVALUATED
#if BOOST_CHARCONV_FASTFLOAT_HAS_IS_CONSTANT_EVALUATED
return std::is_constant_evaluated();
#else
return false;
@@ -136,7 +136,7 @@ fastfloat_really_inline constexpr bool cpp20_and_in_constexpr() {
// Compares two ASCII strings in a case insensitive manner.
template <typename UC>
inline FASTFLOAT_CONSTEXPR14 bool
inline BOOST_CHARCONV_FASTFLOAT_CONSTEXPR14 bool
fastfloat_strncasecmp(UC const * input1, UC const * input2, size_t length) {
char running_diff{0};
for (size_t i = 0; i < length; ++i) {
@@ -161,8 +161,8 @@ struct span {
return length;
}
FASTFLOAT_CONSTEXPR14 const T& operator[](size_t index) const noexcept {
FASTFLOAT_DEBUG_ASSERT(index < length);
BOOST_CHARCONV_FASTFLOAT_CONSTEXPR14 const T& operator[](size_t index) const noexcept {
BOOST_CHARCONV_FASTFLOAT_DEBUG_ASSERT(index < length);
return ptr[index];
}
};
@@ -189,13 +189,13 @@ int leading_zeroes_generic(uint64_t input_num, int last_bit = 0) {
}
/* result might be undefined when input_num is zero */
fastfloat_really_inline FASTFLOAT_CONSTEXPR20
fastfloat_really_inline BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20
int leading_zeroes(uint64_t input_num) {
assert(input_num > 0);
if (cpp20_and_in_constexpr()) {
return leading_zeroes_generic(input_num);
}
#ifdef FASTFLOAT_VISUAL_STUDIO
#ifdef BOOST_CHARCONV_FASTFLOAT_VISUAL_STUDIO
#if defined(_M_X64) || defined(_M_ARM64)
unsigned long leading_zero = 0;
// Search the mask data from most significant bit (MSB)
@@ -215,7 +215,7 @@ fastfloat_really_inline constexpr uint64_t emulu(uint32_t x, uint32_t y) {
return x * (uint64_t)y;
}
fastfloat_really_inline FASTFLOAT_CONSTEXPR14
fastfloat_really_inline BOOST_CHARCONV_FASTFLOAT_CONSTEXPR14
uint64_t umul128_generic(uint64_t ab, uint64_t cd, uint64_t *hi) {
uint64_t ad = emulu((uint32_t)(ab >> 32), (uint32_t)cd);
uint64_t bd = emulu((uint32_t)ab, (uint32_t)cd);
@@ -227,21 +227,21 @@ uint64_t umul128_generic(uint64_t ab, uint64_t cd, uint64_t *hi) {
return lo;
}
#ifdef FASTFLOAT_32BIT
#ifdef BOOST_CHARCONV_FASTFLOAT_32BIT
// slow emulation routine for 32-bit
#if !defined(__MINGW64__)
fastfloat_really_inline FASTFLOAT_CONSTEXPR14
fastfloat_really_inline BOOST_CHARCONV_FASTFLOAT_CONSTEXPR14
uint64_t _umul128(uint64_t ab, uint64_t cd, uint64_t *hi) {
return umul128_generic(ab, cd, hi);
}
#endif // !__MINGW64__
#endif // FASTFLOAT_32BIT
#endif // BOOST_CHARCONV_FASTFLOAT_32BIT
// compute 64-bit a*b
fastfloat_really_inline FASTFLOAT_CONSTEXPR20
fastfloat_really_inline BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20
value128 full_multiplication(uint64_t a, uint64_t b) {
if (cpp20_and_in_constexpr()) {
value128 answer;
@@ -254,9 +254,9 @@ value128 full_multiplication(uint64_t a, uint64_t b) {
// But MinGW on ARM64 doesn't have native support for 64-bit multiplications
answer.high = __umulh(a, b);
answer.low = a * b;
#elif defined(FASTFLOAT_32BIT) || (defined(_WIN64) && !defined(__clang__))
#elif defined(BOOST_CHARCONV_FASTFLOAT_32BIT) || (defined(_WIN64) && !defined(__clang__))
answer.low = _umul128(a, b, &answer.high); // _umul128 not available on ARM64
#elif defined(FASTFLOAT_64BIT)
#elif defined(BOOST_CHARCONV_FASTFLOAT_64BIT)
__uint128_t r = ((__uint128_t)a) * b;
answer.low = uint64_t(r);
answer.high = uint64_t(r >> 64);
@@ -527,20 +527,20 @@ template <> inline constexpr binary_format<double>::equiv_uint
}
template<typename T>
fastfloat_really_inline FASTFLOAT_CONSTEXPR20
fastfloat_really_inline BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20
void to_float(bool negative, adjusted_mantissa am, T &value) {
using uint = typename binary_format<T>::equiv_uint;
uint word = (uint)am.mantissa;
word |= uint(am.power2) << binary_format<T>::mantissa_explicit_bits();
word |= uint(negative) << binary_format<T>::sign_index();
#if FASTFLOAT_HAS_BIT_CAST
#if BOOST_CHARCONV_FASTFLOAT_HAS_BIT_CAST
value = std::bit_cast<T>(word);
#else
::memcpy(&value, &word, sizeof(T));
#endif
}
#ifdef FASTFLOAT_SKIP_WHITE_SPACE // disabled by default
#ifdef BOOST_CHARCONV_FASTFLOAT_SKIP_WHITE_SPACE // disabled by default
template <typename = void>
struct space_lut {
static constexpr bool value[] = {

View File

@@ -34,7 +34,7 @@ namespace detail {
#endif
template <typename T, typename UC>
from_chars_result_t<UC> FASTFLOAT_CONSTEXPR14
from_chars_result_t<UC> BOOST_CHARCONV_FASTFLOAT_CONSTEXPR14
parse_infnan(UC const * first, UC const * last, T &value) noexcept {
from_chars_result_t<UC> answer{};
answer.ptr = first;
@@ -44,7 +44,7 @@ parse_infnan(UC const * first, UC const * last, T &value) noexcept {
minusSign = true;
++first;
}
#ifdef FASTFLOAT_ALLOWS_LEADING_PLUS // disabled by default
#ifdef BOOST_CHARCONV_FASTFLOAT_ALLOWS_LEADING_PLUS // disabled by default
if (*first == UC('+')) {
++first;
}
@@ -127,7 +127,7 @@ fastfloat_really_inline bool rounds_to_nearest() noexcept {
//
// Note: This may fail to be accurate if fast-math has been
// enabled, as rounding conventions may not apply.
#ifdef FASTFLOAT_VISUAL_STUDIO
#ifdef BOOST_CHARCONV_FASTFLOAT_VISUAL_STUDIO
# pragma warning(push)
// todo: is there a VS warning?
// see https://stackoverflow.com/questions/46079446/is-there-a-warning-for-floating-point-equality-checking-in-visual-studio-2013
@@ -139,7 +139,7 @@ fastfloat_really_inline bool rounds_to_nearest() noexcept {
# pragma GCC diagnostic ignored "-Wfloat-equal"
#endif
return (fmini + 1.0f == 1.0f - fmini);
#ifdef FASTFLOAT_VISUAL_STUDIO
#ifdef BOOST_CHARCONV_FASTFLOAT_VISUAL_STUDIO
# pragma warning(pop)
#elif defined(__clang__)
# pragma clang diagnostic pop
@@ -151,14 +151,14 @@ fastfloat_really_inline bool rounds_to_nearest() noexcept {
} // namespace detail
template<typename T, typename UC>
FASTFLOAT_CONSTEXPR20
BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20
from_chars_result_t<UC> from_chars(UC const * first, UC const * last,
T &value, chars_format fmt /*= chars_format::general*/) noexcept {
return from_chars_advanced(first, last, value, parse_options_t<UC>{fmt});
}
template<typename T, typename UC>
FASTFLOAT_CONSTEXPR20
BOOST_CHARCONV_FASTFLOAT_CONSTEXPR20
from_chars_result_t<UC> from_chars_advanced(UC const * first, UC const * last,
T &value, parse_options_t<UC> options) noexcept {
@@ -169,7 +169,7 @@ from_chars_result_t<UC> from_chars_advanced(UC const * first, UC const * last,
std::is_same<UC, char32_t>::value , "only char, wchar_t, char16_t and char32_t are supported");
from_chars_result_t<UC> answer;
#ifdef FASTFLOAT_SKIP_WHITE_SPACE // disabled by default
#ifdef BOOST_CHARCONV_FASTFLOAT_SKIP_WHITE_SPACE // disabled by default
while ((first != last) && fast_float::is_space(uint8_t(*first))) {
first++;
}

View File

@@ -15,8 +15,8 @@ namespace boost { namespace charconv { namespace detail {
// The significands of powers of ten from -308 to 308, extended out to sixty four
// bits. The array contains the powers of ten approximated
// as a 64-bit significand. It goes from 10^FASTFLOAT_SMALLEST_POWER to
// 10^FASTFLOAT_LARGEST_POWER (inclusively).
// as a 64-bit significand. It goes from 10^BOOST_CHARCONV_FASTFLOAT_SMALLEST_POWER to
// 10^BOOST_CHARCONV_FASTFLOAT_LARGEST_POWER (inclusively).
// The significand is truncated, and never rounded up.
// Uses about 5KB.
static constexpr std::uint64_t significand_64[] = {