| //! Converting decimal strings into IEEE 754 binary floating point numbers. |
| //! |
| //! # Problem statement |
| //! |
| //! We are given a decimal string such as `12.34e56`. This string consists of integral (`12`), |
| //! fractional (`34`), and exponent (`56`) parts. All parts are optional and interpreted as a |
| //! default value (1 or 0) when missing. |
| //! |
| //! We seek the IEEE 754 floating point number that is closest to the exact value of the decimal |
| //! string. It is well-known that many decimal strings do not have terminating representations in |
| //! base two, so we round to 0.5 units in the last place (in other words, as well as possible). |
| //! Ties, decimal values exactly half-way between two consecutive floats, are resolved with the |
| //! half-to-even strategy, also known as banker's rounding. |
| //! |
| //! Needless to say, this is quite hard, both in terms of implementation complexity and in terms |
| //! of CPU cycles taken. |
| //! |
| //! # Implementation |
| //! |
| //! First, we ignore signs. Or rather, we remove it at the very beginning of the conversion |
| //! process and re-apply it at the very end. This is correct in all edge cases since IEEE |
| //! floats are symmetric around zero, negating one simply flips the first bit. |
| //! |
| //! Then we remove the decimal point by adjusting the exponent: Conceptually, `12.34e56` turns |
| //! into `1234e54`, which we describe with a positive integer `f = 1234` and an integer `e = 54`. |
| //! The `(f, e)` representation is used by almost all code past the parsing stage. |
| //! |
| //! We then try a long chain of progressively more general and expensive special cases using |
| //! machine-sized integers and small, fixed-sized floating point numbers (first `f32`/`f64`, then |
| //! a type with 64 bit significand). The extended-precision algorithm |
| //! uses the Eisel-Lemire algorithm, which uses a 128-bit (or 192-bit) |
| //! representation that can accurately and quickly compute the vast majority |
| //! of floats. When all these fail, we bite the bullet and resort to using |
| //! a large-decimal representation, shifting the digits into range, calculating |
| //! the upper significant bits and exactly round to the nearest representation. |
| //! |
| //! Another aspect that needs attention is the ``RawFloat`` trait by which almost all functions |
| //! are parametrized. One might think that it's enough to parse to `f64` and cast the result to |
| //! `f32`. Unfortunately this is not the world we live in, and this has nothing to do with using |
| //! base two or half-to-even rounding. |
| //! |
| //! Consider for example two types `d2` and `d4` representing a decimal type with two decimal |
| //! digits and four decimal digits each and take "0.01499" as input. Let's use half-up rounding. |
| //! Going directly to two decimal digits gives `0.01`, but if we round to four digits first, |
| //! we get `0.0150`, which is then rounded up to `0.02`. The same principle applies to other |
| //! operations as well, if you want 0.5 ULP accuracy you need to do *everything* in full precision |
| //! and round *exactly once, at the end*, by considering all truncated bits at once. |
| //! |
| //! Primarily, this module and its children implement the algorithms described in: |
| //! "Number Parsing at a Gigabyte per Second", available online: |
| //! <https://arxiv.org/abs/2101.11408>. |
| //! |
| //! # Other |
| //! |
| //! The conversion should *never* panic. There are assertions and explicit panics in the code, |
| //! but they should never be triggered and only serve as internal sanity checks. Any panics should |
| //! be considered a bug. |
| //! |
| //! There are unit tests but they are woefully inadequate at ensuring correctness, they only cover |
| //! a small percentage of possible errors. Far more extensive tests are located in the directory |
| //! `src/tools/test-float-parse` as a Rust program. |
| //! |
| //! A note on integer overflow: Many parts of this file perform arithmetic with the decimal |
| //! exponent `e`. Primarily, we shift the decimal point around: Before the first decimal digit, |
| //! after the last decimal digit, and so on. This could overflow if done carelessly. We rely on |
| //! the parsing submodule to only hand out sufficiently small exponents, where "sufficient" means |
| //! "such that the exponent +/- the number of decimal digits fits into a 64 bit integer". |
| //! Larger exponents are accepted, but we don't do arithmetic with them, they are immediately |
| //! turned into {positive,negative} {zero,infinity}. |
| //! |
| //! # Notation |
| //! |
| //! This module uses the same notation as the Lemire paper: |
| //! |
| //! - `m`: binary mantissa; always nonnegative |
| //! - `p`: binary exponent; a signed integer |
| //! - `w`: decimal significand; always nonnegative |
| //! - `q`: decimal exponent; a signed integer |
| //! |
| //! This gives `m * 2^p` for the binary floating-point number, with `w * 10^q` as the decimal |
| //! equivalent. |
| |
| #![doc(hidden)] |
| #![unstable( |
| feature = "dec2flt", |
| reason = "internal routines only exposed for testing", |
| issue = "none" |
| )] |
| |
| use common::BiasedFp; |
| use lemire::compute_float; |
| use parse::{parse_inf_nan, parse_number}; |
| use slow::parse_long_mantissa; |
| |
| use crate::f64; |
| use crate::num::ParseFloatError; |
| use crate::num::float_parse::FloatErrorKind; |
| use crate::num::imp::FloatExt; |
| |
| mod common; |
| pub mod decimal; |
| pub mod decimal_seq; |
| mod fpu; |
| pub mod lemire; |
| pub mod parse; |
| mod slow; |
| mod table; |
| |
| /// Extension to `Float` that are necessary for parsing using the Lemire method. |
| /// |
| /// See the parent module's doc comment for why this is necessary. |
| /// |
| /// Not intended for use outside of the `dec2flt` module. |
| #[doc(hidden)] |
| pub trait Lemire: FloatExt { |
| /// Maximum exponent for a fast path case, or `⌊(SIG_BITS+1)/log2(5)⌋` |
| // assuming FLT_EVAL_METHOD = 0 |
| const MAX_EXPONENT_FAST_PATH: i64 = { |
| let log2_5 = f64::consts::LOG2_10 - 1.0; |
| (Self::SIG_TOTAL_BITS as f64 / log2_5) as i64 |
| }; |
| |
| /// Minimum exponent for a fast path case, or `-⌊(SIG_BITS+1)/log2(5)⌋` |
| const MIN_EXPONENT_FAST_PATH: i64 = -Self::MAX_EXPONENT_FAST_PATH; |
| |
| /// Maximum exponent that can be represented for a disguised-fast path case. |
| /// This is `MAX_EXPONENT_FAST_PATH + ⌊(SIG_BITS+1)/log2(10)⌋` |
| const MAX_EXPONENT_DISGUISED_FAST_PATH: i64 = |
| Self::MAX_EXPONENT_FAST_PATH + (Self::SIG_TOTAL_BITS as f64 / f64::consts::LOG2_10) as i64; |
| |
| /// Maximum mantissa for the fast-path (`1 << 53` for f64). |
| const MAX_MANTISSA_FAST_PATH: u64 = 1 << Self::SIG_TOTAL_BITS; |
| |
| /// Gets a small power-of-ten for fast-path multiplication. |
| fn pow10_fast_path(exponent: usize) -> Self; |
| |
| /// Converts integer into float through an as cast. |
| /// This is only called in the fast-path algorithm, and therefore |
| /// will not lose precision, since the value will always have |
| /// only if the value is <= Self::MAX_MANTISSA_FAST_PATH. |
| fn from_u64(v: u64) -> Self; |
| } |
| |
| #[cfg(target_has_reliable_f16)] |
| impl Lemire for f16 { |
| fn pow10_fast_path(exponent: usize) -> Self { |
| #[allow(clippy::use_self)] |
| const TABLE: [f16; 8] = [1e0, 1e1, 1e2, 1e3, 1e4, 0.0, 0.0, 0.]; |
| TABLE[exponent & 7] |
| } |
| |
| #[inline] |
| fn from_u64(v: u64) -> Self { |
| debug_assert!(v <= Self::MAX_MANTISSA_FAST_PATH); |
| v as _ |
| } |
| } |
| |
| impl Lemire for f32 { |
| fn pow10_fast_path(exponent: usize) -> Self { |
| #[allow(clippy::use_self)] |
| const TABLE: [f32; 16] = |
| [1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, 1e10, 0., 0., 0., 0., 0.]; |
| TABLE[exponent & 15] |
| } |
| |
| #[inline] |
| fn from_u64(v: u64) -> Self { |
| debug_assert!(v <= Self::MAX_MANTISSA_FAST_PATH); |
| v as _ |
| } |
| } |
| |
| impl Lemire for f64 { |
| fn pow10_fast_path(exponent: usize) -> Self { |
| const TABLE: [f64; 32] = [ |
| 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, |
| 1e16, 1e17, 1e18, 1e19, 1e20, 1e21, 1e22, 0., 0., 0., 0., 0., 0., 0., 0., 0., |
| ]; |
| TABLE[exponent & 31] |
| } |
| |
| #[inline] |
| fn from_u64(v: u64) -> Self { |
| debug_assert!(v <= Self::MAX_MANTISSA_FAST_PATH); |
| v as _ |
| } |
| } |
| |
| #[inline] |
| pub(super) fn pfe_empty() -> ParseFloatError { |
| ParseFloatError { kind: FloatErrorKind::Empty } |
| } |
| |
| // Used in unit tests, keep public. |
| // This is much better than making FloatErrorKind and ParseFloatError::kind public. |
| #[inline] |
| pub fn pfe_invalid() -> ParseFloatError { |
| ParseFloatError { kind: FloatErrorKind::Invalid } |
| } |
| |
| /// Converts a `BiasedFp` to the closest machine float type. |
| fn biased_fp_to_float<F: FloatExt>(x: BiasedFp) -> F { |
| let mut word = x.m; |
| word |= (x.p_biased as u64) << F::SIG_BITS; |
| F::from_u64_bits(word) |
| } |
| |
| /// Converts a decimal string into a floating point number. |
| #[inline(always)] // Will be inlined into a function with `#[inline(never)]`, see above |
| pub fn dec2flt<F: Lemire>(s: &str) -> Result<F, ParseFloatError> { |
| let mut s = s.as_bytes(); |
| let Some(&c) = s.first() else { return Err(pfe_empty()) }; |
| let negative = c == b'-'; |
| if c == b'-' || c == b'+' { |
| s = &s[1..]; |
| } |
| if s.is_empty() { |
| return Err(pfe_invalid()); |
| } |
| |
| let mut num = match parse_number(s) { |
| Some(r) => r, |
| None if let Some(value) = parse_inf_nan(s, negative) => return Ok(value), |
| None => return Err(pfe_invalid()), |
| }; |
| num.negative = negative; |
| if !cfg!(feature = "optimize_for_size") { |
| if let Some(value) = num.try_fast_path::<F>() { |
| return Ok(value); |
| } |
| } |
| |
| // If significant digits were truncated, then we can have rounding error |
| // only if `mantissa + 1` produces a different result. We also avoid |
| // redundantly using the Eisel-Lemire algorithm if it was unable to |
| // correctly round on the first pass. |
| let mut fp = compute_float::<F>(num.exponent, num.mantissa); |
| if num.many_digits |
| && fp.p_biased >= 0 |
| && fp != compute_float::<F>(num.exponent, num.mantissa + 1) |
| { |
| fp.p_biased = -1; |
| } |
| // Unable to correctly round the float using the Eisel-Lemire algorithm. |
| // Fallback to a slower, but always correct algorithm. |
| if fp.p_biased < 0 { |
| fp = parse_long_mantissa::<F>(s); |
| } |
| |
| let mut float = biased_fp_to_float::<F>(fp); |
| if num.negative { |
| float = -float; |
| } |
| Ok(float) |
| } |