diff --git a/.gitignore b/.gitignore index 06639cad..6ea63c83 100644 --- a/.gitignore +++ b/.gitignore @@ -32,3 +32,6 @@ cobertura.xml # Fuzzing data corpus/ artifacts/ + +# Spellchecking files +spellcheck.txt diff --git a/Cargo.toml b/Cargo.toml index 1e689c4b..9fdfe8b6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,6 +9,9 @@ members = [ ] resolver = "2" +[workspace.metadata.spellcheck] +config = "spellcheck.toml" + [profile.dev] opt-level = 0 debug = true diff --git a/docs/Development.md b/docs/Development.md index 6be022a3..8d43469d 100644 --- a/docs/Development.md +++ b/docs/Development.md @@ -53,6 +53,10 @@ cargo +nightly install cargo-fuzz # which uses a deprecated binary that requires an old nightly to # install. cargo +nightly install cargo-count --git https://github.com/kbknapp/cargo-count --rev eebe6f8 --locked + +# Only if editing doc comments. This requires a Linux or macOS install. +# On Ubuntu, the packages `libclang-dev` and `llvm` are required. +cargo install cargo-spellcheck ``` In addition, the following non-Rust dependencies must be installed: @@ -117,3 +121,7 @@ Each workspace has a "docs" directory containing detailed descriptions of algori ## Pitfalls **ALWAYS** benchmark, even for trivial changes. I've been burned many times by `#[cfg(...)]` being way faster than `if cfg!()`, which youl would think both would be eliminated during optimization, just one during the first stage of compilation. It's better to confirm than assume. This is a nightmare development-wise because of how many features we support but there's not many alternatives: it seems it doesn't entirely remove code as if by tree-shaking which can majorly impact performance. + +## Documentation + +If making significant changes to the documentation, running the spellchecker can be useful. Remember these are **guidelines** and anything inside `libm.rs` should be ignored. To check the spelling, run `cargo spellcheck check`. diff --git a/lexical-core/src/lib.rs b/lexical-core/src/lib.rs index 9dee2357..7cf67103 100644 --- a/lexical-core/src/lib.rs +++ b/lexical-core/src/lib.rs @@ -185,11 +185,11 @@ //! #### safe //! //! This replaces most unchecked indexing, required in cases where the -//! compiler cannot ellide the check, with checked indexing. However, +//! compiler cannot elide the check, with checked indexing. However, //! it does not fully replace all unsafe behavior with safe behavior. -//! To minimize the risk of UB and out-of-bounds reads/writers, extensive -//! edge-cases, property-based tests, and fuzzing is done with both the -//! safe feature enabled and disabled, with the tests verified by Miri +//! To minimize the risk of undefined behavior and out-of-bounds reads/writers, +//! extensive edge-cases, property-based tests, and fuzzing is done with both +//! the safe feature enabled and disabled, with the tests verified by Miri //! and Valgrind. //! //! # Configuration API diff --git a/lexical-parse-float/etc/correctness/test-parse-random/validate.rs b/lexical-parse-float/etc/correctness/test-parse-random/validate.rs index 1eb3699c..cd515842 100644 --- a/lexical-parse-float/etc/correctness/test-parse-random/validate.rs +++ b/lexical-parse-float/etc/correctness/test-parse-random/validate.rs @@ -42,7 +42,7 @@ pub struct Constants { /// /// This is a mapping from integers to half the precision available at that exponent. In other /// words, `0.5 * 2^n` = `2^(n-1)`, which is half the distance between `m * 2^n` and - /// `(m + 1) * 2^n`, m ∈ ℤ. + /// `(m + 1) * 2^n`, `m ∈ ℤ`. /// /// So, this is the maximum error from a real number to its floating point representation, /// assuming the float type can represent the exponent. diff --git a/lexical-parse-float/src/api.rs b/lexical-parse-float/src/api.rs index dbffa7d4..aa9639f4 100644 --- a/lexical-parse-float/src/api.rs +++ b/lexical-parse-float/src/api.rs @@ -19,7 +19,7 @@ const DEFAULT_OPTIONS: Options = Options::new(); /// Implement `FromLexical` for numeric type. /// -/// Need to inline these, otherwise codegen is suboptimal. +/// Need to inline these, otherwise code generation is sub-optimal. /// For some reason, it can't determine some of the const evaluations /// can actually be evaluated at compile-time, which causes major branching /// issues. diff --git a/lexical-parse-float/src/bellerophon.rs b/lexical-parse-float/src/bellerophon.rs index 812c4a3e..726d41e4 100644 --- a/lexical-parse-float/src/bellerophon.rs +++ b/lexical-parse-float/src/bellerophon.rs @@ -160,7 +160,7 @@ pub fn bellerophon(num: &Number, lossy: bool) - // Specifically, we want to know if we are close to a halfway representation, // or halfway between `b` and `b+1`, or `b+h`. The halfway representation // has the form: -// SEEEEEEEHMMMMMMMMMMMMMMMMMMMMMMM100... +// `SEEEEEEEHMMMMMMMMMMMMMMMMMMMMMMM100...` // where: // S = Sign Bit // E = Exponent Bits @@ -196,7 +196,7 @@ fn error_is_accurate(errors: u32, fp: &ExtendedFloat80) -> bool { // See the documentation for dtoa for more information. // This is always a valid u32, since `fp.exp >= -64` - // will always be positive and the significand size is {23, 52}. + // will always be positive and the significand size is `{23, 52}`. let mantissa_shift = 64 - F::MANTISSA_SIZE - 1; // The unbiased exponent checks is `unbiased_exp <= F::MANTISSA_SIZE @@ -293,7 +293,7 @@ fn error_is_accurate(errors: u32, fp: &ExtendedFloat80) -> bool { #[cfg_attr(not(feature = "compact"), inline(always))] pub fn normalize(fp: &mut ExtendedFloat80) -> i32 { // Note: - // Using the ctlz intrinsic via leading_zeros is way faster (~10x) + // Using the ctlz intrinsic via `leading_zeros` is way faster (~10x) // than shifting 1-bit at a time, via while loop, and also way // faster (~2x) than an unrolled loop that checks at 32, 16, 4, // 2, and 1 bit. @@ -304,7 +304,7 @@ pub fn normalize(fp: &mut ExtendedFloat80) -> i32 { // code as it removes conditional logic. // Calculate the number of leading zeros, and then zero-out - // any overflowing bits, to avoid shl overflow when self.mant == 0. + // any overflowing bits, to avoid shl overflow when `self.mant == 0`. if fp.mant != 0 { let shift = fp.mant.leading_zeros() as i32; fp.mant <<= shift; @@ -358,7 +358,7 @@ pub fn mul(x: &ExtendedFloat80, y: &ExtendedFloat80) -> ExtendedFloat80 { // POWERS // ------ -/// Precalculated powers of base N for the Bellerophon algorithm. +/// Pre-calculated powers of base N for the Bellerophon algorithm. pub struct BellerophonPowers { // Pre-calculated small powers. pub small: &'static [u64], @@ -370,9 +370,9 @@ pub struct BellerophonPowers { pub step: i32, // Exponent bias for the large powers. pub bias: i32, - /// ceil(log2(radix)) scaled as a multiplier. + /// `ceil(log2(radix))` scaled as a multiplier. pub log2: i64, - /// Bitshift for the log2 multiplier. + /// Bit shift for the log2 multiplier. pub log2_shift: i32, } diff --git a/lexical-parse-float/src/bigint.rs b/lexical-parse-float/src/bigint.rs index 475a45e5..143da7e0 100644 --- a/lexical-parse-float/src/bigint.rs +++ b/lexical-parse-float/src/bigint.rs @@ -1,6 +1,6 @@ //! A simple big-integer type for slow path algorithms. //! -//! This includes minimal stackvector for use in big-integer arithmetic. +//! This includes minimal stack vector for use in big-integer arithmetic. #![doc(hidden)] @@ -996,10 +996,10 @@ pub const fn u64_to_hi64_2(r0: u64, r1: u64) -> (u64, bool) { /// /// Even using worst-case scenarios, exponentiation by squaring is /// significantly slower for our workloads. Just multiply by small powers, -/// in simple cases, and use precalculated large powers in other cases. +/// in simple cases, and use pre-calculated large powers in other cases. /// /// Furthermore, using sufficiently big large powers is also crucial for -/// performance. This is a tradeoff of binary size and performance, and +/// performance. This is a trade-off of binary size and performance, and /// using a single value at ~`5^(5 * max_exp)` seems optimal. #[allow(clippy::doc_markdown)] // reason="not attempted to be referencing items" #[allow(clippy::missing_inline_in_public_items)] // reason="only public for testing" @@ -1116,8 +1116,8 @@ pub fn large_add_from( y: &[Limb], start: usize, ) -> Option<()> { - // The effective x buffer is from `xstart..x.len()`, so we need to treat - // that as the current range. If the effective y buffer is longer, need + // The effective `x` buffer is from `xstart..x.len()`, so we need to treat + // that as the current range. If the effective `y` buffer is longer, need // to resize to that, + the start index. if y.len() > x.len().saturating_sub(start) { // Ensure we panic if we can't extend the buffer. @@ -1125,14 +1125,14 @@ pub fn large_add_from( x.try_resize(y.len() + start, 0)?; } - // Iteratively add elements from y to x. + // Iteratively add elements from `y` to `x`. let mut carry = false; for index in 0..y.len() { let xi = &mut x[start + index]; let yi = y[index]; // Only one op of the two ops can overflow, since we added at max - // Limb::max_value() + Limb::max_value(). Add the previous carry, + // `Limb::max_value() + Limb::max_value()`. Add the previous carry, // and store the current carry for the next. let result = scalar_add(*xi, yi); *xi = result.0; @@ -1432,7 +1432,7 @@ pub fn shl(x: &mut StackVec, n: usize) -> Option<()> { #[inline(always)] pub fn leading_zeros(x: &[Limb]) -> u32 { let length = x.len(); - // wrapping_sub is fine, since it'll just return None. + // `wrapping_sub` is fine, since it'll just return None. if let Some(&value) = x.get(length.wrapping_sub(1)) { value.leading_zeros() } else { diff --git a/lexical-parse-float/src/binary.rs b/lexical-parse-float/src/binary.rs index bf695ebb..a1baee63 100644 --- a/lexical-parse-float/src/binary.rs +++ b/lexical-parse-float/src/binary.rs @@ -46,7 +46,7 @@ pub fn binary(num: &Number, lossy: bool) -> Ext // is not a power-of-two. If it's odd and we're at halfway, we'll // always round-up **anyway**. // - // We need to check the truncated bits are equal to 0b100000...., + // We need to check the truncated bits are equal to `0b100000....`, // if it's above that, always round-up. If it's odd, we can always // disambiguate the float. If it's even, and exactly halfway, this // step fails. @@ -96,7 +96,7 @@ pub fn binary(num: &Number, lossy: bool) -> Ext /// /// We're guaranteed to have a large number of digits here /// (in general, 20+ or much higher), due to how close we -/// are to a halfway representation, so an uncheced loop +/// are to a halfway representation, so an unchecked loop /// optimization isn't worth it. #[cfg_attr(not(feature = "compact"), inline(always))] #[allow(unused_mut)] diff --git a/lexical-parse-float/src/float.rs b/lexical-parse-float/src/float.rs index 83da8600..1af794b4 100644 --- a/lexical-parse-float/src/float.rs +++ b/lexical-parse-float/src/float.rs @@ -117,22 +117,22 @@ impl RawFloat for bf16 { /// algorithm. pub trait LemireFloat: RawFloat { // Round-to-even only happens for negative values of q - // when q ≥ −4 in the 64-bit case and when q ≥ −17 in + // when `q ≥ −4` in the 64-bit case and when `q ≥ −17` in // the 32-bitcase. // - // When q ≥ 0,we have that 5^q ≤ 2m+1. In the 64-bit case,we - // have 5^q ≤ 2m+1 ≤ 2^54 or q ≤ 23. In the 32-bit case,we have - // 5^q ≤ 2m+1 ≤ 2^25 or q ≤ 10. + // When `q ≥ 0`,we have that `5^q ≤ 2m+1`. In the 64-bit case,we + // have `5^q ≤ 2m+1 ≤ 2^54` or `q ≤ 23`. In the 32-bit case,we have + // `5^q ≤ 2m+1 ≤ 2^25` or `q ≤ 10`. // - // When q < 0, we have w ≥ (2m+1)×5^−q. We must have that w < 2^64 - // so (2m+1)×5^−q < 2^64. We have that 2m+1 > 2^53 (64-bit case) - // or 2m+1 > 2^24 (32-bit case). Hence,we must have 2^53×5^−q < 2^64 - // (64-bit) and 2^24×5^−q < 2^64 (32-bit). Hence we have 5^−q < 2^11 - // or q ≥ −4 (64-bit case) and 5^−q < 2^40 or q ≥ −17 (32-bitcase). + // When q < 0, we have `w ≥ (2m+1)×5^−q`. We must have that `w < 2^64` + // so `(2m+1)×5^−q < 2^64`. We have that `2m+1 > 2^53` (64-bit case) + // or `2m+1 > 2^24` (32-bit case). Hence,we must have `2^53×5^−q < 2^64` + // (64-bit) and `2^24×5^−q < 2^64` (32-bit). Hence we have `5^−q < 2^11` + // or `q ≥ −4` (64-bit case) and `5^−q < 2^40` or `q ≥ −17` (32-bitcase). // // Thus we have that we only need to round ties to even when - // we have that q ∈ [−4,23](in the 64-bit case) or q∈[−17,10] - // (in the 32-bit case). In both cases,the power of five(5^|q|) + // we have that `q ∈ [−4,23]` (in the 64-bit case) or `q∈[−17,10]` + // (in the 32-bit case). In both cases,the power of five (`5^|q|`) // fits in a 64-bit word. const MIN_EXPONENT_ROUND_TO_EVEN: i32; const MAX_EXPONENT_ROUND_TO_EVEN: i32; diff --git a/lexical-parse-float/src/fpu.rs b/lexical-parse-float/src/fpu.rs index 1795eeeb..27af7083 100644 --- a/lexical-parse-float/src/fpu.rs +++ b/lexical-parse-float/src/fpu.rs @@ -19,7 +19,7 @@ pub use fpu_precision::set_precision; #[cfg(all(target_arch = "x86", not(target_feature = "sse2")))] mod fpu_precision { // We only support the latest nightly, which is 1.59+. - // THe `asm!` macro was stabilized in 1.59.0. + // The `asm!` macro was stabilized in 1.59.0. use core::arch::asm; use core::mem::size_of; diff --git a/lexical-parse-float/src/lemire.rs b/lexical-parse-float/src/lemire.rs index 03d8e8ff..8ee48224 100644 --- a/lexical-parse-float/src/lemire.rs +++ b/lexical-parse-float/src/lemire.rs @@ -74,16 +74,16 @@ pub fn compute_float(q: i64, mut w: u64, lossy: bool) -> Extende w <<= lz; let (lo, hi) = compute_product_approx(q, w, F::MANTISSA_SIZE as usize + 3); if !lossy && lo == 0xFFFF_FFFF_FFFF_FFFF { - // If we have failed to approximate w x 5^-q with our 128-bit value. + // If we have failed to approximate `w x 5^-q` with our 128-bit value. // Since the addition of 1 could lead to an overflow which could then // round up over the half-way point, this can lead to improper rounding // of a float. // - // However, this can only occur if q ∈ [-27, 55]. The upper bound of q - // is 55 because 5^55 < 2^128, however, this can only happen if 5^q > 2^64, + // However, this can only occur if `q ∈ [-27, 55]`. The upper bound of q + // is 55 because `5^55 < 2^128`, however, this can only happen if `5^q > 2^64`, // since otherwise the product can be represented in 64-bits, producing // an exact result. For negative exponents, rounding-to-even can - // only occur if 5^-q < 2^64. + // only occur if `5^-q < 2^64`. // // For detailed explanations of rounding for negative exponents, see // . For detailed @@ -117,7 +117,7 @@ pub fn compute_float(q: i64, mut w: u64, lossy: bool) -> Extende // need to round down. // // This will only occur if: - // 1. The lower 64 bits of the 128-bit representation is 0. IE, 5^q fits in + // 1. The lower 64 bits of the 128-bit representation is 0. IE, `5^q` fits in // single 64-bit word. // 2. The least-significant bit prior to truncated mantissa is odd. // 3. All the bits truncated when shifting to mantissa bits + 1 are 0. @@ -197,10 +197,10 @@ const fn full_multiplication(a: u64, b: u64) -> (u64, u64) { (r as u64, (r >> 64) as u64) } -// This will compute or rather approximate w * 5**q and return a pair of 64-bit -// words approximating the result, with the "high" part corresponding to the -// most significant bits and the low part corresponding to the least significant -// bits. +// This will compute or rather approximate `w * 5**q` and return a pair of +// 64-bit words approximating the result, with the "high" part corresponding to +// the most significant bits and the low part corresponding to the least +// significant bits. fn compute_product_approx(q: i64, w: u64, precision: usize) -> (u64, u64) { debug_assert!(q >= SMALLEST_POWER_OF_FIVE as i64, "must be within our required pow5 range"); debug_assert!(q <= LARGEST_POWER_OF_FIVE as i64, "must be within our required pow5 range"); @@ -212,7 +212,7 @@ fn compute_product_approx(q: i64, w: u64, precision: usize) -> (u64, u64) { 0xFFFF_FFFF_FFFF_FFFF_u64 }; - // 5^q < 2^64, then the multiplication always provides an exact value. + // `5^q < 2^64`, then the multiplication always provides an exact value. // That means whenever we need to round ties to even, we always have // an exact value. let index = (q - SMALLEST_POWER_OF_FIVE as i64) as usize; diff --git a/lexical-parse-float/src/libm.rs b/lexical-parse-float/src/libm.rs index 904ca6d9..dae980a1 100644 --- a/lexical-parse-float/src/libm.rs +++ b/lexical-parse-float/src/libm.rs @@ -391,7 +391,7 @@ pub fn sqrtf(x: f32) -> f32 { { // Note: This path is unlikely since LLVM will usually have already // optimized sqrt calls into hardware instructions if sse is available, - // but if someone does end up here they'll apprected the speed increase. + // but if someone does end up here they'll appreciated the speed increase. #[cfg(target_arch = "x86")] use core::arch::x86::*; #[cfg(target_arch = "x86_64")] @@ -1068,7 +1068,7 @@ pub fn sqrtd(x: f64) -> f64 { { // Note: This path is unlikely since LLVM will usually have already // optimized sqrt calls into hardware instructions if sse2 is available, - // but if someone does end up here they'll apprected the speed increase. + // but if someone does end up here they'll appreciated the speed increase. #[cfg(target_arch = "x86")] use core::arch::x86::*; #[cfg(target_arch = "x86_64")] diff --git a/lexical-parse-float/src/limits.rs b/lexical-parse-float/src/limits.rs index 20d452d9..db1e3d2e 100644 --- a/lexical-parse-float/src/limits.rs +++ b/lexical-parse-float/src/limits.rs @@ -766,8 +766,8 @@ pub const fn u64_power_limit(radix: u32) -> u32 { /// > finite number of digits is that β should divide an integer power of γ. /// /// According to the "Handbook of Floating Point Arithmetic", -/// for IEEE754, with emin being the min exponent, p2 being the -/// precision, and b being the radix, the number of digits follows as: +/// for IEEE754, with `emin` being the min exponent, `p2` being the +/// precision, and `b` being the radix, the number of digits follows as: /// /// `−emin + p2 + ⌊(emin + 1) log(2, b) − log(1 − 2^(−p2), b)⌋` /// @@ -900,8 +900,8 @@ impl MaxDigits for bf16 { } } -///// emin = -16382 -///// p2 = 113 +///// `emin = -16382` +///// `p2 = 113` //#[cfg(feature = "f128")] //impl MaxDigits for f128 { // #[inline(always)] diff --git a/lexical-parse-float/src/options.rs b/lexical-parse-float/src/options.rs index a3910cd3..ea99291a 100644 --- a/lexical-parse-float/src/options.rs +++ b/lexical-parse-float/src/options.rs @@ -575,7 +575,7 @@ pub const RUBY_LITERAL: Options = Options::builder() const_assert!(RUBY_LITERAL.is_valid()); /// Number format to parse a `Ruby` float from string. -/// `Ruby` can write NaN and Infinity as strings, but won't roundtrip them back to floats. +/// `Ruby` can write NaN and Infinity as strings, but won't round-trip them back to floats. #[rustfmt::skip] pub const RUBY_STRING: Options = Options::builder() .nan_string(options::RUBY_STRING_NONE) diff --git a/lexical-parse-float/src/parse.rs b/lexical-parse-float/src/parse.rs index 550ab8a3..ccf82bd0 100644 --- a/lexical-parse-float/src/parse.rs +++ b/lexical-parse-float/src/parse.rs @@ -161,7 +161,7 @@ parse_float_as_f32! { bf16 f16 } // NOTE: // The partial and complete parsers are done separately because it provides // minor optimizations when parsing invalid input, and the logic is slightly -// different internally. Most of the code is reshared, so the duplicated +// different internally. Most of the code is shared, so the duplicated // code is only like 30 lines. /// Parse the sign from the leading digits. @@ -194,7 +194,7 @@ pub fn parse_exponent_sign(byte: &mut Bytes<'_, FORMAT>) -> /// Utility to extract the result and handle any errors from parsing a `Number`. /// -/// - `format` - The numberical format as a packed integer +/// - `format` - The numerical format as a packed integer /// - `byte` - The `DigitsIter` iterator /// - `is_negative` - If the final value is negative /// - `parse_normal` - The function to parse non-special numbers with @@ -480,7 +480,7 @@ pub fn slow_path( /// significant digits and the decimal exponent. #[cfg_attr(not(feature = "compact"), inline(always))] #[allow(unused_mut)] // reason = "used when format is enabled" -#[allow(clippy::unwrap_used)] // reason = "developper error if we incorrectly assume an overflow" +#[allow(clippy::unwrap_used)] // reason = "developer error if we incorrectly assume an overflow" #[allow(clippy::collapsible_if)] // reason = "more readable uncollapsed" #[allow(clippy::cast_possible_wrap)] // reason = "no hardware supports buffers >= i64::MAX" #[allow(clippy::too_many_lines)] // reason = "function is one logical entity" @@ -538,7 +538,7 @@ pub fn parse_number<'a, const FORMAT: u128, const IS_PARTIAL: bool>( // Check to see if the next character is the base prefix. // We must have a format like `0x`, `0d`, `0o`. // NOTE: The check for empty integer digits happens below so - // we don't need a redunant check here. + // we don't need a redundant check here. is_prefix = true; if iter.read_if_value(base_prefix, format.case_sensitive_base_prefix()).is_some() && iter.is_buffer_empty() @@ -606,7 +606,7 @@ pub fn parse_number<'a, const FORMAT: u128, const IS_PARTIAL: bool>( let mut fraction_digits = None; let has_decimal = byte.first_is_cased(decimal_point); if has_decimal { - // SAFETY: byte cannot be empty due to first_is + // SAFETY: byte cannot be empty due to `first_is` unsafe { byte.step_unchecked() }; let before = byte.clone(); #[cfg(not(feature = "compact"))] @@ -652,7 +652,7 @@ pub fn parse_number<'a, const FORMAT: u128, const IS_PARTIAL: bool>( let has_exponent = byte .first_is(exponent_character, format.case_sensitive_exponent() && cfg!(feature = "format")); - // check to see if we have any inval;id leading zeros + // check to see if we have any invalid leading zeros n_digits += n_after_dot; if format.required_mantissa_digits() && (n_digits == 0 || (cfg!(feature = "format") && byte.current_count() == 0)) @@ -756,7 +756,7 @@ pub fn parse_number<'a, const FORMAT: u128, const IS_PARTIAL: bool>( let mut zeros_integer = zeros.integer_iter(); n_digits = n_digits.saturating_sub(zeros_integer.skip_zeros()); if zeros.first_is_cased(decimal_point) { - // SAFETY: safe since zeros cannot be empty due to first_is + // SAFETY: safe since zeros cannot be empty due to `first_is` unsafe { zeros.step_unchecked() }; } let mut zeros_fraction = zeros.fraction_iter(); @@ -780,8 +780,8 @@ pub fn parse_number<'a, const FORMAT: u128, const IS_PARTIAL: bool>( // short-circuit and should be determined at compile time. So, the // conditions are either: // 1. Step == 0 - // 2. cfg!(feature = "format") && !byte.is_contiguous() && - // fraction_digits.is_none() + // 2. `cfg!(feature = "format") && !byte.is_contiguous() && + // fraction_digits.is_none()` implicit_exponent = if step == 0 || (cfg!(feature = "format") && !byte.is_contiguous() && fraction_digits.is_none()) { @@ -869,7 +869,7 @@ where } // SAFETY: iter cannot be empty due to `iter.peek()`. // NOTE: Because of the match statement, this would optimize poorly with - // read_if. + // `read_if`. unsafe { iter.step_unchecked() }; iter.increment_count(); } diff --git a/lexical-parse-float/src/shared.rs b/lexical-parse-float/src/shared.rs index 29f64fd6..a417357b 100644 --- a/lexical-parse-float/src/shared.rs +++ b/lexical-parse-float/src/shared.rs @@ -231,7 +231,7 @@ where #[cfg_attr(not(feature = "compact"), inline(always))] pub fn round_nearest_tie_even(fp: &mut ExtendedFloat80, shift: i32, cb: Cb) where - // is_odd, is_halfway, is_above + // `is_odd`, `is_halfway`, `is_above` Cb: Fn(bool, bool, bool) -> bool, { // Ensure we've already handled denormal values that underflow. @@ -250,7 +250,7 @@ where let is_halfway = truncated_bits == halfway; // Bit shift so the leading bit is in the hidden bit. - // This optimixes pretty well: + // This optimizes pretty well: // ```text // mov ecx, esi // shr rdi, cl diff --git a/lexical-parse-float/src/slow.rs b/lexical-parse-float/src/slow.rs index 30e4d522..6aa1e4e2 100644 --- a/lexical-parse-float/src/slow.rs +++ b/lexical-parse-float/src/slow.rs @@ -46,7 +46,7 @@ use crate::shared; /// a large number of digits to unambiguously determine how to round. #[must_use] #[inline(always)] -#[allow(clippy::unwrap_used)] // reason = "none is a developper error" +#[allow(clippy::unwrap_used)] // reason = "none is a developer error" pub fn slow_radix( num: Number, fp: ExtendedFloat80, @@ -119,7 +119,7 @@ pub fn digit_comp( /// Generate the significant digits with a positive exponent relative to /// mantissa. #[must_use] -#[allow(clippy::unwrap_used)] // reason = "none is a developper error" +#[allow(clippy::unwrap_used)] // reason = "none is a developer error" #[allow(clippy::cast_possible_wrap)] // reason = "can't wrap in practice: max is ~1000 limbs" #[allow(clippy::missing_inline_in_public_items)] // reason = "only public for testing" pub fn positive_digit_comp( @@ -319,7 +319,7 @@ macro_rules! add_digit { /// - `format` - The numerical format specification as a packed 128-bit integer /// - `result` - The big integer, /// - `power` - The power to scale the big integer by. -/// - `value` - The value to add to the big intger, +/// - `value` - The value to add to the big integer, /// - `counter` - The number of parsed digits since creating the current u32 macro_rules! add_temporary { // Multiply by the small power and add the native value. @@ -587,7 +587,7 @@ macro_rules! fraction_compare { /// Adapted from "Bigcomp: Deciding Truncated, Near Halfway Conversions", /// available [here](https://www.exploringbinary.com/bigcomp-deciding-truncated-near-halfway-conversions/). #[cfg(feature = "radix")] -#[allow(clippy::unwrap_used)] // reason = "none is a developper error due to shl overflow" +#[allow(clippy::unwrap_used)] // reason = "none is a developer error due to shl overflow" #[allow(clippy::comparison_chain)] // reason = "logically different conditions for algorithm" pub fn byte_comp( number: Number, @@ -615,8 +615,8 @@ pub fn byte_comp( let mut den: Bigfloat; if sci_exp < 0 { - // Need to have the basen factor be the numerator, and the fp - // be the denominator. Since we assumed that theor was the numerator, + // Need to have the basen factor be the numerator, and the `fp` + // be the denominator. Since we assumed that `theor` was the numerator, // if it's the denominator, we need to multiply it into the numerator. num = factor; num.data *= &theor.data; @@ -686,7 +686,7 @@ pub fn byte_comp( /// - `den` - The theoretical digits created by `b+h` to determine if `b` or /// `b+1` #[cfg(feature = "radix")] -#[allow(clippy::unwrap_used)] // reason = "none is a developper error due to a missing fraction" +#[allow(clippy::unwrap_used)] // reason = "none is a developer error due to a missing fraction" pub fn compare_bytes( number: Number, mut num: Bigfloat, @@ -736,7 +736,7 @@ pub fn scientific_exponent(num: &Number) -> i32 { let format = NumberFormat:: {}; // Use power reduction to make this faster: we need at least - // F::MANTISSA_SIZE bits, so we must have at least radix^4 digits. + // `F::MANTISSA_SIZE` bits, so we must have at least radix^4 digits. // IF we're using base 3, we can have at most 11 divisions, and // base 36, at most ~4. So, this is reasonably efficient. let radix = format.radix() as u64; diff --git a/lexical-parse-float/src/table_binary.rs b/lexical-parse-float/src/table_binary.rs index 933c2aad..745f627f 100644 --- a/lexical-parse-float/src/table_binary.rs +++ b/lexical-parse-float/src/table_binary.rs @@ -71,7 +71,7 @@ pub fn get_small_f64_power(exponent: usize, radix: u32) -> f64 { // NOTE: // These functions use the fact that **all** powers-of-two -// can be exactly represented and cheaply using bitshifts for +// can be exactly represented and cheaply using bit shifts for // integers, or by setting the exponent directly. /// Get pre-computed int power of 2. diff --git a/lexical-parse-integer/src/algorithm.rs b/lexical-parse-integer/src/algorithm.rs index 5f0f841f..ec67a59a 100644 --- a/lexical-parse-integer/src/algorithm.rs +++ b/lexical-parse-integer/src/algorithm.rs @@ -147,7 +147,7 @@ macro_rules! fmt_invalid_digit { break; } else if !$iter.is_buffer_empty() { // Haven't finished parsing, so we're going to call - // invalid_digit!. Need to ensure we include the + // `invalid_digit!`. Need to ensure we include the // base suffix in that. // SAFETY: safe since the iterator is not empty, as checked @@ -194,7 +194,7 @@ macro_rules! parse_sign { $invalid_positive:ident, $missing:ident ) => { - // NOTE: read_if optimizes poorly since we then match after + // NOTE: `read_if` optimizes poorly since we then match after match $byte.integer_iter().first() { Some(&b'+') if !$no_positive => { // SAFETY: We have at least 1 item left since we peaked a value @@ -261,9 +261,9 @@ pub fn parse_4digits(mut v: u32) -> u32 { // Normalize our digits to the range `[0, 9]`. v -= 0x3030_3030; - // Scale digits in 0 <= Nn <= 99. + // Scale digits in `0 <= Nn <= 99`. v = (v * radix) + (v >> 8); - // Scale digits in 0 <= Nnnn <= 9999. + // Scale digits in `0 <= Nnnn <= 9999`. v = ((v & 0x0000007f) * radix * radix) + ((v >> 16) & 0x0000007f); v @@ -338,7 +338,7 @@ pub fn parse_8digits(mut v: u64) -> u64 { // Normalize our digits to the base. v -= 0x3030_3030_3030_3030; - // Scale digits in 0 <= Nn <= 99. + // Scale digits in `0 <= Nn <= 99`. v = (v * radix) + (v >> 8); let v1 = (v & mask).wrapping_mul(mul1); let v2 = ((v >> 16) & mask).wrapping_mul(mul2); @@ -375,14 +375,14 @@ where /// Run a loop where the integer cannot possibly overflow. /// -/// If the len of the str is short compared to the range of the type +/// If the length of the str is short compared to the range of the type /// we are parsing into, then we can be certain that an overflow will not occur. /// This bound is when `radix.pow(digits.len()) - 1 <= T::MAX` but the condition /// above is a faster (conservative) approximation of this. /// /// Consider radix 16 as it has the highest information density per digit and -/// will thus overflow the earliest: `u8::MAX` is `ff` - any str of len 2 is -/// guaranteed to not overflow. `i8::MAX` is `7f` - only a str of len 1 is +/// will thus overflow the earliest: `u8::MAX` is `ff` - any str of length 2 is +/// guaranteed to not overflow. `i8::MAX` is `7f` - only a str of length 1 is /// guaranteed to not overflow. /// /// This is based off of [core/num](core). @@ -541,9 +541,9 @@ macro_rules! parse_digits_checked { $no_multi_digit:expr, $overflow_digits:expr ) => {{ - // Can use the unchecked for the max_digits here. If we + // Can use the unchecked for the `max_digits` here. If we // have a non-contiguous iterator, we could have a case like - // 123__456, with no consecutive digit sepatators allowed. If + // 123__456, with no consecutive digit separators allowed. If // it's broken between the `_` characters, the integer will be // seen as valid when it isn't. if cfg!(not(feature = "format")) || $iter.is_contiguous() { @@ -582,7 +582,7 @@ macro_rules! algorithm { // -------- // None of this code can be changed for optimization reasons. // Do not change it without benchmarking every change. - // 1. You cannot use the NoSkipIterator in the loop, + // 1. You cannot use the `NoSkipIterator` in the loop, // you must either return a subslice (indexing) // or increment outside of the loop. // Failing to do so leads to numerous more, unnecessary @@ -599,7 +599,7 @@ macro_rules! algorithm { // With `step_by_unchecked`, this is sufficiently optimized. // Removes conditional paths, to, which simplifies maintenance. // The skip version of the iterator automatically coalesces to - // the noskip iterator. + // the no-skip iterator. let mut byte = $bytes.bytes::(); let radix = NumberFormat::::MANTISSA_RADIX; diff --git a/lexical-parse-integer/src/api.rs b/lexical-parse-integer/src/api.rs index 98dda514..40adf814 100644 --- a/lexical-parse-integer/src/api.rs +++ b/lexical-parse-integer/src/api.rs @@ -10,7 +10,7 @@ use crate::parse::ParseInteger; /// Implement `FromLexical` for numeric type. /// -/// Need to inline these, otherwise codegen is suboptimal. +/// Need to inline these, otherwise code generation is sub-optimal. /// For some reason, it can't determine some of the const evaluations /// can actually be evaluated at compile-time, which causes major branching /// issues. diff --git a/lexical-parse-integer/src/options.rs b/lexical-parse-integer/src/options.rs index 6015a427..fadc01df 100644 --- a/lexical-parse-integer/src/options.rs +++ b/lexical-parse-integer/src/options.rs @@ -167,14 +167,14 @@ impl ParseOptions for Options { pub const STANDARD: Options = Options::new(); const_assert!(STANDARD.is_valid()); -/// Optiobns optimized for small numbers. +/// Options optimized for small numbers. #[rustfmt::skip] pub const SMALL_NUMBERS: Options = Options::builder() .no_multi_digit(true) .build_unchecked(); const_assert!(SMALL_NUMBERS.is_valid()); -/// Optiobns optimized for large numbers and long strings. +/// Options optimized for large numbers and long strings. #[rustfmt::skip] pub const LARGE_NUMBERS: Options = Options::builder() .no_multi_digit(false) diff --git a/lexical-util/src/digit.rs b/lexical-util/src/digit.rs index 20e83fa0..843becf7 100644 --- a/lexical-util/src/digit.rs +++ b/lexical-util/src/digit.rs @@ -7,8 +7,8 @@ // --------- // These are optimized functions for when the radix is known at compile-time, -// which is **most** of our cases. There are cases where for codegen, using a -// runtime algorithm is preferable. +// which is **most** of our cases. There are cases where for code generation, +// using a runtime algorithm is preferable. /// Unchecked, highly optimized algorithm to convert a char to a digit. /// This only works if the input character is known to be a valid digit. diff --git a/lexical-util/src/div128.rs b/lexical-util/src/div128.rs index 7922ba36..008d856f 100644 --- a/lexical-util/src/div128.rs +++ b/lexical-util/src/div128.rs @@ -92,8 +92,9 @@ pub fn moderate_u128_divrem(n: u128, d: u64, factor: u128, factor_shr: u32) -> ( /// Optimized fallback division/remainder algorithm for u128. /// -/// This is because the codegen for u128 divrem is very inefficient in Rust, -/// calling both `__udivmodti4` twice internally, rather than a single time. +/// This is because the code generation for u128 divrem is very inefficient +/// in Rust, calling both `__udivmodti4` twice internally, rather than a single +/// time. /// /// This is still a fair bit slower than the optimized algorithms described /// in the above paper, but this is a suitable fallback when we cannot use diff --git a/lexical-util/src/format.rs b/lexical-util/src/format.rs index 87efe079..e922fa92 100644 --- a/lexical-util/src/format.rs +++ b/lexical-util/src/format.rs @@ -205,7 +205,7 @@ //! //! # Character Shifts and Masks //! -//! Bitmasks and bitshifts to get and set control characters for the format +//! Bitmasks and bit shifts to get and set control characters for the format //! packed struct. //! //! - [`DIGIT_SEPARATOR_SHIFT`] diff --git a/lexical-util/src/noskip.rs b/lexical-util/src/noskip.rs index b58937e9..70a0c4d5 100644 --- a/lexical-util/src/noskip.rs +++ b/lexical-util/src/noskip.rs @@ -160,7 +160,7 @@ unsafe impl<'a, const __: u128> Iter<'a> for Bytes<'a, __> { /// Slice iterator that stores the original length of the slice. pub struct DigitsIterator<'a: 'b, 'b, const __: u128> { - /// The internal byte object for the noskip iterator. + /// The internal byte object for the no-skip iterator. byte: &'b mut Bytes<'a, __>, } diff --git a/lexical-util/tests/num_tests.rs b/lexical-util/tests/num_tests.rs index d35df557..9d732f75 100644 --- a/lexical-util/tests/num_tests.rs +++ b/lexical-util/tests/num_tests.rs @@ -137,7 +137,7 @@ fn check_integer(mut x: T) { x |= T::ONE; x ^= T::ONE; - // Bitshifts + // Bit shifts let _ = x << 1i32; let _ = x >> 1i32; x <<= 1i32; diff --git a/lexical-write-float/etc/correctness/dragonbox/roundtrip.rs b/lexical-write-float/etc/correctness/dragonbox/roundtrip.rs index 6a5634ac..755769ed 100644 --- a/lexical-write-float/etc/correctness/dragonbox/roundtrip.rs +++ b/lexical-write-float/etc/correctness/dragonbox/roundtrip.rs @@ -1,4 +1,4 @@ -// Validate a roundtrip for a given float. +// Validate a round-trip for a given float. use lexical_write_float::float::RawFloat; use lexical_write_float::ToLexical; diff --git a/lexical-write-float/src/algorithm.rs b/lexical-write-float/src/algorithm.rs index 3e833115..4c6b4ec6 100644 --- a/lexical-write-float/src/algorithm.rs +++ b/lexical-write-float/src/algorithm.rs @@ -7,17 +7,17 @@ //! This is therefore under an Apache 2.0/Boost Software dual-license. //! //! We use a u64 for the significant digits, even for a 32-bit integer, -//! however, we use the proper bitshifts, etc. for the float in question, +//! however, we use the proper bit shifts, etc. for the float in question, //! rather than clobbering the result to f64, as Rust's port does. //! //! Each one of the algorithms described here has the main implementation, //! according to the reference Dragonbox paper, as well as an alias for //! our own purposes. The existing algorithms include: //! -//! 1. compute_nearest_normal -//! 2. compute_nearest_shorter -//! 3. compute_left_closed_directed -//! 4. compute_right_closed_directed +//! 1. `compute_nearest_normal` +//! 2. `compute_nearest_shorter` +//! 3. `compute_left_closed_directed` +//! 4. `compute_right_closed_directed` //! //! `compute_nearest_normal` and `compute_nearest_shorter` are used for //! round-nearest, tie-even and `compute_right_closed_directed` is used @@ -295,8 +295,8 @@ pub fn to_decimal(float: F) -> ExtendedFloat80 { } // Shorter interval case; proceed like Schubfach. - // One might think this condition is wrong, since when exponent_bits == 1 - // and two_fc == 0, the interval is actullay regular. However, it turns out + // One might think this condition is wrong, since when `exponent_bits == 1` + // and `two_fc == 0`, the interval is actually regular. However, it turns out // that this seemingly wrong condition is actually fine, because the end // result is anyway the same. // @@ -306,7 +306,7 @@ pub fn to_decimal(float: F) -> ExtendedFloat80 { // fc * 2^e = 1.175'494'35... * 10^-38 // (fc+1/2) * 2^e = 1.175'494'42... * 10^-38 // - // Hence, shorter_interval_case will return 1.175'494'4 * 10^-38. + // Hence, `shorter_interval_case` will return 1.175'494'4 * 10^-38. // 1.175'494'3 * 10^-38 is also a correct shortest representation that will // be rejected if we assume shorter interval, but 1.175'494'4 * 10^-38 is // closer to the true value so it doesn't matter. @@ -317,7 +317,7 @@ pub fn to_decimal(float: F) -> ExtendedFloat80 { // fc * 2^e = 2.225'073'858'507'201'38... * 10^-308 // (fc+1/2) * 2^e = 2.225'073'858'507'201'63... * 10^-308 // - // Hence, shorter_interval_case will return 2.225'073'858'507'201'4 * + // Hence, `shorter_interval_case` will return 2.225'073'858'507'201'4 * // 10^-308. This is indeed of the shortest length, and it is the unique one // closest to the true value among valid representations of the same length. @@ -325,8 +325,8 @@ pub fn to_decimal(float: F) -> ExtendedFloat80 { // // What we need is a compute-nearest, but with truncated digits in the // truncated case. Note that we don't need the left-closed direct - // rounding case of I = [w,w+), or right-closed directed rounding - // case of I = (w−,w], since these produce the shortest intervals for + // rounding case of `I = [w,w+)`, or right-closed directed rounding + // case of `I = (w−,w]`, since these produce the shortest intervals for // a **float parser** assuming the rounding of the float-parser. // The left-directed case assumes the float parser will round-down, // while the right-directed case assumed the float parser will round-up. @@ -369,28 +369,29 @@ pub fn compute_round(float: F) -> ExtendedFloat80 { compute_nearest_normal(float) } -/// Compute the interval I = [m−w,m+w] if even, otherwise, (m−w,m+w). +/// Compute the interval `I = [m−w,m+w]` if even, otherwise, `(m−w,m+w)`. /// This is the simple case for a finite number where only the hidden bit is /// set. #[inline] pub fn compute_nearest_shorter(float: F) -> ExtendedFloat80 { - // Compute k and beta. + // Compute `k` and `beta`. let exponent = float.exponent(); let minus_k = floor_log10_pow2_minus_log10_4_over_3(exponent); let beta = exponent + floor_log2_pow10(-minus_k); - // Compute xi and zi. + // Compute `xi` and `zi`. // SAFETY: safe, since value must be finite and therefore in the correct range. // `-324 <= exponent <= 308`, so `x * log10(2) - log10(4 / 3)` must be in - // `-98 <= x <= 93`, so the final value must be in [-93, 98] (for f64). We have - // precomputed powers for [-292, 326] for f64 (same logic applies for f32) so - // this is **ALWAYS** safe. + // `-98 <= x <= 93`, so the final value must be in `[-93, 98]` (for f64). We + // have pre-computed powers for `[-292, 326]` for f64 (same logic applies + // for f32) so this is **ALWAYS** safe. let pow5 = unsafe { F::dragonbox_power(-minus_k) }; let mut xi = F::compute_left_endpoint(&pow5, beta); let mut zi = F::compute_right_endpoint(&pow5, beta); // Get the interval type. - // Must be Round since we only use compute_round with a round-nearest direction. + // Must be Round since we only use `compute_round` with a round-nearest + // direction. let interval_type = IntervalType::Closed; // If we don't accept the right endpoint and if the right endpoint is an @@ -413,7 +414,7 @@ pub fn compute_nearest_shorter(float: F) -> ExtendedFloat80 { return extended_float(mant, exp); } - // Otherwise, compute the round-up of y. + // Otherwise, compute the round-up of `y`. let mut significand = F::compute_round_up(&pow5, beta); // When tie occurs, choose one of them according to the rule. @@ -428,7 +429,7 @@ pub fn compute_nearest_shorter(float: F) -> ExtendedFloat80 { significand += 1; } - // Ensure we haven't re-assigned exponent or minus_k, since this + // Ensure we haven't re-assigned `exponent` or `minus_k`, since this // is a massive potential security vulnerability. debug_assert!(float.exponent() == exponent); debug_assert!(minus_k == floor_log10_pow2_minus_log10_4_over_3(exponent)); @@ -436,7 +437,7 @@ pub fn compute_nearest_shorter(float: F) -> ExtendedFloat80 { extended_float(significand, minus_k) } -/// Compute the interval I = [m−w,m+w] if even, otherwise, (m−w,m+w). +/// Compute the interval `I = [m−w,m+w]` if even, otherwise, `(m−w,m+w)`. /// This is the normal case for a finite number with non-zero significant /// digits. #[allow(clippy::comparison_chain)] // reason="logical approach for algorithm" @@ -446,26 +447,26 @@ pub fn compute_nearest_normal(float: F) -> ExtendedFloat80 { let is_even = mantissa % 2 == 0; // Step 1: Schubfach multiplier calculation - // Compute k and beta. + // Compute `k` and `beta`. let minus_k = floor_log10_pow2(exponent) - F::KAPPA as i32; // SAFETY: safe, since value must be finite and therefore in the correct range. // `-324 <= exponent <= 308`, so `x * log10(2)` must be in - // `-98 <= x <= 93`, so the final value must be in [-93, 98] (for f64). We have - // precomputed powers for [-292, 326] for f64 (same logic applies for f32) so - // this is **ALWAYS** safe. + // `-98 <= x <= 93`, so the final value must be in `[-93, 98]` (for f64). We + // have pre-computed powers for `[-292, 326]` for f64 (same logic applies + // for f32) so this is **ALWAYS** safe. let pow5 = unsafe { F::dragonbox_power(-minus_k) }; let beta = exponent + floor_log2_pow10(-minus_k); - // Compute zi and deltai. - // 10^kappa <= deltai < 10^(kappa + 1) + // Compute `zi` and `deltai`. + // `10^kappa <= deltai < 10^(kappa + 1)` let two_fc = mantissa << 1; let deltai = F::compute_delta(&pow5, beta); // For the case of binary32, the result of integer check is not correct for - // 29711844 * 2^-82 - // = 6.1442653300000000008655037797566933477355632930994033813476... * 10^-18 - // and 29711844 * 2^-81 - // = 1.2288530660000000001731007559513386695471126586198806762695... * 10^-17, - // and they are the unique counterexamples. However, since 29711844 is even, + // `29711844 * 2^-82 + // = 6.1442653300000000008655037797566933477355632930994033813476... * 10^-18` + // and `29711844 * 2^-81 + // = 1.2288530660000000001731007559513386695471126586198806762695... * 10^-17`, + // and they are the unique counterexamples. However, since `29711844` is even, // this does not cause any problem for the endpoints calculations; it can only // cause a problem when we need to perform integer check for the center. // Fortunately, with these inputs, that branch is never executed, so we are @@ -476,15 +477,16 @@ pub fn compute_nearest_normal(float: F) -> ExtendedFloat80 { let big_divisor = pow32(10, F::KAPPA + 1); let small_divisor = pow32(10, F::KAPPA); - // Using an upper bound on zi, we might be able to optimize the division - // better than the compiler; we are computing zi / big_divisor here. + // Using an upper bound on `zi`, we might be able to optimize the division + // better than the compiler; we are computing `zi / big_divisor` here. let exp = F::KAPPA + 1; let n_max = (1 << (F::MANTISSA_SIZE + 1)) * big_divisor as u64 - 1; let mut significand = F::divide_by_pow10(zi, exp, n_max); let mut r = (zi - (big_divisor as u64).wrapping_mul(significand)) as u32; // Get the interval type. - // Must be Round since we only use compute_round with a round-nearest direction. + // Must be Round since we only use `compute_round` with a round-nearest + // direction. let interval_type = IntervalType::Symmetric(is_even); // Check for short-circuit. @@ -504,7 +506,7 @@ pub fn compute_nearest_normal(float: F) -> ExtendedFloat80 { } else if r > deltai { should_short_circuit = false; } else { - // r == deltai; compare fractional parts. + // `r == deltai`; compare fractional parts. // Due to the more complex logic in the new dragonbox algorithm, // it's much easier logically to store if we should short circuit, // the default, and only mark @@ -513,9 +515,9 @@ pub fn compute_nearest_normal(float: F) -> ExtendedFloat80 { if !include_left || exponent < F::FC_PM_HALF_LOWER || exponent > F::DIV_BY_5_THRESHOLD { // If the left endpoint is not included, the condition for - // success is z^(f) < delta^(f) (odd parity). + // success is `z^(f) < delta^(f)` (odd parity). // Otherwise, the inequalities on exponent ensure that - // x is not an integer, so if z^(f) >= delta^(f) (even parity), we in fact + // `x` is not an integer, so if `z^(f) >= delta^(f)` (even parity), we in fact // have strict inequality. let parity = F::compute_mul_parity(two_fl, &pow5, beta).0; if !parity { @@ -540,32 +542,31 @@ pub fn compute_nearest_normal(float: F) -> ExtendedFloat80 { let dist = r - (deltai / 2) + (small_divisor / 2); let approx_y_parity = ((dist ^ (small_divisor / 2)) & 1) != 0; - // Is dist divisible by 10^kappa? + // Is dist divisible by `10^kappa`? let (dist, is_dist_div_by_kappa) = F::check_div_pow10(dist); - // Add dist / 10^kappa to the significand. + // Add `dist / 10^kappa` to the significand. significand += dist as u64; if is_dist_div_by_kappa { - // Check z^(f) >= epsilon^(f). - // We have either yi == zi - epsiloni or yi == (zi - epsiloni) - 1, - // where yi == zi - epsiloni if and only if z^(f) >= epsilon^(f). + // Check `z^(f) >= epsilon^(f)`. + // We have either `yi == zi - epsiloni` or `yi == (zi - epsiloni) - 1`, + // where `yi == zi - epsiloni` if and only if `z^(f) >= epsilon^(f)`. // Since there are only 2 possibilities, we only need to care about the - // parity. Also, zi and r should have the same parity since the divisor is + // parity. Also, `zi` and `r` should have the same parity since the divisor is // an even number. let (yi_parity, is_y_integer) = F::compute_mul_parity(two_fc, &pow5, beta); let round_down = RoundMode::Round.prefer_round_down(significand); if yi_parity != approx_y_parity || (is_y_integer && round_down) { - // If z^(f) >= epsilon^(f), we might have a tie - // when z^(f) == epsilon^(f), or equivalently, when y is an integer. + // If `z^(f) >= epsilon^(f)`, we might have a tie + // when `z^(f) == epsilon^(f)`, or equivalently, when `y` is an integer. // For tie-to-up case, we can just choose the upper one. - //significand -= 1; significand -= 1; } } - // Ensure we haven't re-assigned exponent or minus_k, since this + // Ensure we haven't re-assigned `exponent` or `minus_k`, since this // is a massive potential security vulnerability. debug_assert!(float.exponent() == exponent); debug_assert!(minus_k == floor_log10_pow2(exponent) - F::KAPPA as i32); @@ -574,33 +575,33 @@ pub fn compute_nearest_normal(float: F) -> ExtendedFloat80 { } } -/// Compute the interval I = [w,w+). +/// Compute the interval `I = [w,w+)`. #[allow(clippy::comparison_chain)] // reason="logical approach for algorithm" pub fn compute_left_closed_directed(float: F) -> ExtendedFloat80 { let mantissa = float.mantissa().as_u64(); let exponent = float.exponent(); // Step 1: Schubfach multiplier calculation - // Compute k and beta. + // Compute `k` and `beta`. let minus_k = floor_log10_pow2(exponent) - F::KAPPA as i32; // SAFETY: safe, since value must be finite and therefore in the correct range. - // `-324 <= exponent <= 308`, so `x * log10(2)` must be in [-98, 93] (for f64). - // We have precomputed powers for [-292, 326] for f64 (same logic applies for - // f32) so this is **ALWAYS** safe. + // `-324 <= exponent <= 308`, so `x * log10(2)` must be in `[-98, 93]` (for + // f64). We have pre-computed powers for `[-292, 326]` for f64 (same logic + // applies for f32) so this is **ALWAYS** safe. let pow5 = unsafe { F::dragonbox_power(-minus_k) }; let beta = exponent + floor_log2_pow10(-minus_k); - // Compute zi and deltai. - // 10^kappa <= deltai < 10^(kappa + 1) + // Compute `zi` and `deltai`. + // `10^kappa <= deltai < 10^(kappa + 1)` let two_fc = mantissa << 1; let deltai = F::compute_delta(&pow5, beta); let (mut xi, mut is_x_integer) = F::compute_mul(two_fc << beta, &pow5); // Deal with the unique exceptional cases - // 29711844 * 2^-82 - // = 6.1442653300000000008655037797566933477355632930994033813476... * 10^-18 - // and 29711844 * 2^-81 - // = 1.2288530660000000001731007559513386695471126586198806762695... * 10^-17 + // `29711844 * 2^-82 + // = 6.1442653300000000008655037797566933477355632930994033813476... * 10^-18` + // and `29711844 * 2^-81 + // = 1.2288530660000000001731007559513386695471126586198806762695... * 10^-17` // for binary32. if F::BITS == 32 && exponent <= -80 { is_x_integer = false; @@ -613,8 +614,8 @@ pub fn compute_left_closed_directed(float: F) -> ExtendedFloat80 { // Step 2: Try larger divisor; remove trailing zeros if necessary let big_divisor = pow32(10, F::KAPPA + 1); - // Using an upper bound on xi, we might be able to optimize the division - // better than the compiler; we are computing xi / big_divisor here. + // Using an upper bound on `xi`, we might be able to optimize the division + // better than the compiler; we are computing `xi / big_divisor` here. let exp = F::KAPPA + 1; let n_max = (1 << (F::MANTISSA_SIZE + 1)) * big_divisor as u64 - 1; let mut significand = F::divide_by_pow10(xi, exp, n_max); @@ -636,10 +637,10 @@ pub fn compute_left_closed_directed(float: F) -> ExtendedFloat80 { } else if r == deltai { // Compare the fractional parts. // This branch is never taken for the exceptional cases - // 2f_c = 29711482, e = -81 - // (6.1442649164096937243516663440523473127541365101933479309082... * 10^-18) - // and 2f_c = 29711482, e = -80 - // (1.2288529832819387448703332688104694625508273020386695861816... * 10^-17). + // `2f_c = 29711482, e = -81` + // `(6.1442649164096937243516663440523473127541365101933479309082... * 10^-18)` + // and `2f_c = 29711482, e = -80` + // `(1.2288529832819387448703332688104694625508273020386695861816... * 10^-17)`. let (zi_parity, is_z_integer) = F::compute_mul_parity(two_fc + 2, &pow5, beta); if zi_parity || is_z_integer { should_short_circuit = false; @@ -654,7 +655,7 @@ pub fn compute_left_closed_directed(float: F) -> ExtendedFloat80 { significand *= 10; significand -= F::div_pow10(r) as u64; - // Ensure we haven't re-assigned exponent or minus_k, since this + // Ensure we haven't re-assigned `exponent` or `minus_k`, since this // is a massive potential security vulnerability. debug_assert!(float.exponent() == exponent); debug_assert!(minus_k == floor_log10_pow2(exponent) - F::KAPPA as i32); @@ -663,7 +664,7 @@ pub fn compute_left_closed_directed(float: F) -> ExtendedFloat80 { } } -/// Compute the interval I = (w−,w].. +/// Compute the interval `I = (w−,w]`. #[allow(clippy::comparison_chain, clippy::if_same_then_else)] // reason="logical approach for algorithm" pub fn compute_right_closed_directed(float: F, shorter: bool) -> ExtendedFloat80 { // ensure our floats have a maximum exp in the range [-324, 308]. @@ -673,19 +674,19 @@ pub fn compute_right_closed_directed(float: F, shorter: bool) -> Ex let exponent = float.exponent(); // Step 1: Schubfach multiplier calculation - // Exponent must be in the range [-324, 308] - // Compute k and beta. + // Exponent must be in the range `[-324, 308]` + // Compute `k` and `beta`. let minus_k = floor_log10_pow2(exponent - shorter as i32) - F::KAPPA as i32; assert!(F::KAPPA <= 2); // SAFETY: safe, since value must be finite and therefore in the correct range. // `-324 <= exponent <= 308`, so `x * log10(2)` must be in [-100, 92] (for f64). - // We have precomputed powers for [-292, 326] for f64 (same logic applies for + // We have pre-computed powers for [-292, 326] for f64 (same logic applies for // f32) so this is **ALWAYS** safe. let pow5: ::Power = unsafe { F::dragonbox_power(-minus_k) }; let beta = exponent + floor_log2_pow10(-minus_k); - // Compute zi and deltai. - // 10^kappa <= deltai < 10^(kappa + 1) + // Compute `zi` and `deltai`. + // `10^kappa <= deltai < 10^(kappa + 1)` let two_fc = mantissa << 1; let deltai = F::compute_delta(&pow5, beta - shorter as i32); let zi = F::compute_mul(two_fc << beta, &pow5).0; @@ -693,8 +694,8 @@ pub fn compute_right_closed_directed(float: F, shorter: bool) -> Ex // Step 2: Try larger divisor; remove trailing zeros if necessary let big_divisor = pow32(10, F::KAPPA + 1); - // Using an upper bound on zi, we might be able to optimize the division better - // than the compiler; we are computing zi / big_divisor here. + // Using an upper bound on `zi`, we might be able to optimize the division + // better than the compiler; we are computing `zi / big_divisor` here. let exp = F::KAPPA + 1; let n_max = (1 << (F::MANTISSA_SIZE + 1)) * big_divisor as u64 - 1; let mut significand = F::divide_by_pow10(zi, exp, n_max); @@ -729,7 +730,7 @@ pub fn compute_right_closed_directed(float: F, shorter: bool) -> Ex significand *= 10; significand -= F::div_pow10(r) as u64; - // Ensure we haven't re-assigned exponent or minus_k. + // Ensure we haven't re-assigned `exponent` or `minus_k`. assert!(float.exponent() == exponent); debug_assert!( minus_k == floor_log10_pow2(float.exponent() - shorter as i32) - F::KAPPA as i32 @@ -941,13 +942,13 @@ pub const fn count_factors(radix: u32, mut n: u64) -> u32 { // DIV // --- -// Compute floor(n / 10^exp) for small exp. -// Precondition: exp >= 0. +// Compute `floor(n / 10^exp)` for small exp. +// Precondition: `exp >= 0.` #[inline(always)] pub const fn divide_by_pow10_32(n: u32, exp: u32) -> u32 { // Specialize for 32-bit division by 100. // Compiler is supposed to generate the identical code for just writing - // "n / 100", but for some reason MSVC generates an inefficient code + // `n / 100`, but for some reason MSVC generates an inefficient code // (mul + mov for no apparent reason, instead of single imul), // so we does this manually. if exp == 2 { @@ -958,8 +959,8 @@ pub const fn divide_by_pow10_32(n: u32, exp: u32) -> u32 { } } -// Compute floor(n / 10^exp) for small exp. -// Precondition: n <= n_max +// Compute `floor(n / 10^exp)` for small exp. +// Precondition: `n <= n_max` #[inline(always)] pub const fn divide_by_pow10_64(n: u64, exp: u32, n_max: u64) -> u64 { // Specialize for 64-bit division by 1000. @@ -1096,9 +1097,9 @@ pub const fn rotr64(n: u64, r: u64) -> u64 { } /// Magic numbers for division by a power of 10. -/// Replace n by floor(n / 10^N). -/// Returns true if and only if n is divisible by 10^N. -/// Precondition: n <= 10^(N+1) +/// Replace `n` by `floor(n / 10^N)`. +/// Returns true if and only if n is divisible by `10^N`. +/// Precondition: `n <= 10^(N+1)` /// !!It takes an in-out parameter!! struct Div10Info { magic_number: u32, @@ -1117,7 +1118,7 @@ const F64_DIV10_INFO: Div10Info = Div10Info { macro_rules! check_div_pow10 { ($n:ident, $exp:literal, $float:ident, $info:ident) => {{ - // Make sure the computation for max_n does not overflow. + // Make sure the computation for `max_n` does not overflow. debug_assert!($exp + 2 < floor_log10_pow2(31)); debug_assert!($n as u64 <= pow64(10, $exp + 1)); @@ -1187,24 +1188,24 @@ pub trait DragonboxFloat: Float { /// Remove trailing zeros from the float. fn remove_trailing_zeros(mantissa: u64) -> (u64, i32); - /// Determine if `two_f` is divisible by 2^exp. + /// Determine if `two_f` is divisible by `2^exp`. #[inline(always)] fn divisible_by_pow2(x: u64, exp: u32) -> bool { - // Preconditions: exp >= 1 && x != 0 + // Preconditions: `exp >= 1 && x != 0` x.trailing_zeros() >= exp } - // Replace n by floor(n / 10^N). - // Returns true if and only if n is divisible by 10^N. - // Precondition: n <= 10^(N+1) + // Replace `n` by `floor(n / 10^N)`. + // Returns true if and only if `n` is divisible by `10^N`. + // Precondition: `n <= 10^(N+1)` fn check_div_pow10(n: u32) -> (u32, bool); - // Compute floor(n / 10^N) for small n and exp. - // Precondition: n <= 10^(N+1) + // Compute `floor(n / 10^N)` for small `n` and exp. + // Precondition: `n <= 10^(N+1)` fn div_pow10(n: u32) -> u32; - // Compute floor(n / 10^N) for small N. - // Precondition: n <= n_max + // Compute `floor(n / 10^N)` for small `N`. + // Precondition: `n <= n_max` fn divide_by_pow10(n: u64, exp: u32, n_max: u64) -> u64; } @@ -1391,7 +1392,7 @@ impl DragonboxFloat for f64 { fn remove_trailing_zeros(mantissa: u64) -> (u64, i32) { debug_assert!(mantissa != 0); - // This magic number is ceil(2^90 / 10^8). + // This magic number is `ceil(2^90 / 10^8)`. let magic_number = 12379400392853802749u64; let nm = mantissa as u128 * magic_number as u128; diff --git a/lexical-write-float/src/api.rs b/lexical-write-float/src/api.rs index f5836188..0513990e 100644 --- a/lexical-write-float/src/api.rs +++ b/lexical-write-float/src/api.rs @@ -16,7 +16,7 @@ use crate::write::WriteFloat; const DEFAULT_OPTIONS: Options = Options::new(); -// Implement ToLexical for numeric type. +// Implement `ToLexical` for numeric type. macro_rules! float_to_lexical { ($($t:tt ; )*) => ($( impl ToLexical for $t { diff --git a/lexical-write-float/src/binary.rs b/lexical-write-float/src/binary.rs index 63ad4ba6..73747e43 100644 --- a/lexical-write-float/src/binary.rs +++ b/lexical-write-float/src/binary.rs @@ -274,7 +274,7 @@ where // Write the significant digits. // Calculate the number of digits we can write left of the decimal point. // If we have a scientific exp of 0, we still need - // to write 1 digit before, so it's ⌊ leading_bits / bits_per_digit ⌋ + 1. + // to write 1 digit before, so it's `⌊ leading_bits / bits_per_digit ⌋ + 1`. let leading_bits = sci_exp; let leading_digits = (leading_bits / bits_per_digit) as usize + 1; @@ -697,7 +697,7 @@ pub fn calculate_shl(exp: i32, bits_per_digit: i32) -> i32 { /// If we have a negative exp, then when scaling that, /// we need to consider that an exp of -1 with 5 bits /// per base is still <0, IE, the sci exp we write has -/// to be: `⌊sci_exp / bits_per_base⌋`, where ceil is +/// to be: `⌊sci_exp / bits_per_base⌋`, where `ceil` is /// wrapping towards greatest magnitude. /// /// If we have a positive exp, we just need the floor of the diff --git a/lexical-write-float/src/compact.rs b/lexical-write-float/src/compact.rs index 574499bd..30977230 100644 --- a/lexical-write-float/src/compact.rs +++ b/lexical-write-float/src/compact.rs @@ -393,7 +393,7 @@ pub fn from_float(float: F) -> ExtendedFloat80 { /// Get the number of bytes shifted. pub fn normalize(fp: &mut ExtendedFloat80) { // Note: - // Using the ctlz intrinsic via leading_zeros is way faster (~10x) + // Using the ctlz intrinsic via `leading_zeros` is way faster (~10x) // than shifting 1-bit at a time, via while loop, and also way // faster (~2x) than an unrolled loop that checks at 32, 16, 4, // 2, and 1 bit. @@ -404,7 +404,7 @@ pub fn normalize(fp: &mut ExtendedFloat80) { // code as it removes conditional logic. // Calculate the number of leading zeros, and then zero-out - // any overflowing bits, to avoid shl overflow when self.mant == 0. + // any overflowing bits, to avoid shl overflow when `self.mant == 0`. if fp.mant != 0 { let shift = fp.mant.leading_zeros() as i32; fp.mant <<= shift; diff --git a/lexical-write-float/src/hex.rs b/lexical-write-float/src/hex.rs index e85387fd..f454f8e7 100644 --- a/lexical-write-float/src/hex.rs +++ b/lexical-write-float/src/hex.rs @@ -4,7 +4,7 @@ //! any power of the mantissa radix using the exponent base. For example, //! given a mantissa radix of `16`, and an exponent base of `8`, //! `16^2` cannot be exactly represented in octal. In short: -//! ⌊log2(r) / log2(b)⌋ == ⌈log2(r) / log2(b)⌉. +//! `⌊log2(r) / log2(b)⌋ == ⌈log2(r) / log2(b)⌉`. //! //! This gives us the following mantissa radix/exponent base combinations: //! diff --git a/lexical-write-float/src/radix.rs b/lexical-write-float/src/radix.rs index e41ab23c..cc695a99 100644 --- a/lexical-write-float/src/radix.rs +++ b/lexical-write-float/src/radix.rs @@ -154,7 +154,7 @@ where // Get our exponent. // We can't use a naive float log algorithm, since rounding issues can // cause major issues. For example, `12157665459056928801f64` is `3^40`, - // but glibc gives us (f.ln() / 3.0.ln()) of `39.999`, while Android, and + // but glibc gives us `(f.ln() / 3.0.ln())` of `39.999`, while Android, and // MUSL libm, and openlibm give us `40.0`, the correct answer. This of // course means we have off-by-1 errors, so the correct way is to trim // leading zeros, and then calculate the exponent as the offset. @@ -397,7 +397,7 @@ pub fn truncate_and_round( return (max_digits, false); } - // Need to add the number of leading zeros to the digits digit_count. + // Need to add the number of leading zeros to the digits `digit_count`. let max_digits = { let digits = &mut buffer[start..start + max_digits]; max_digits + ltrim_char_count(digits, b'0') diff --git a/lexical-write-float/src/shared.rs b/lexical-write-float/src/shared.rs index 42cab65a..4e8431af 100644 --- a/lexical-write-float/src/shared.rs +++ b/lexical-write-float/src/shared.rs @@ -35,7 +35,7 @@ pub fn round_up(digits: &mut [u8], count: usize, radix: u32) -> (usize, bool) { digits[index - 1] = rounded; return (index, false); } - // Don't have to assign b'0' otherwise, since we're just carrying + // Don't have to assign `b'0'` otherwise, since we're just carrying // to the next digit. index -= 1; } diff --git a/lexical-write-float/src/table_grisu.rs b/lexical-write-float/src/table_grisu.rs index dfb6b4c3..9949340f 100644 --- a/lexical-write-float/src/table_grisu.rs +++ b/lexical-write-float/src/table_grisu.rs @@ -5,7 +5,7 @@ /// Cached powers of ten as specified by the Grisu algorithm. /// -/// Cached powers of 10^k, calculated as if by: +/// Cached powers of `10^k`, calculated as if by: /// `ceil((alpha-e+63) * ONE_LOG_TEN);` /// /// The estimation of the exponents can be trivially shown to be true, diff --git a/lexical-write-integer/src/algorithm.rs b/lexical-write-integer/src/algorithm.rs index 22122f79..85dc59b6 100644 --- a/lexical-write-integer/src/algorithm.rs +++ b/lexical-write-integer/src/algorithm.rs @@ -108,10 +108,10 @@ unsafe fn write_digits( let r1 = usize::as_cast(T::TWO * (r / radix2)); let r2 = usize::as_cast(T::TWO * (r % radix2)); - // SAFETY: This is always safe, since the table is 2*radix^2, and - // r1 and r2 must be in the range [0, 2*radix^2-1), since the maximum - // value of r is `radix4-1`, which must have a div and r - // in the range [0, radix^2-1). + // SAFETY: This is always safe, since the table is `2*radix^2`, and + // `r1` and `r2` must be in the range `[0, 2*radix^2-1)`, since the maximum + // value of r is `radix4-1`, which must have a `div` and `r` + // in the range `[0, radix^2-1)`. write_digits!(buffer, index, table, r2); write_digits!(buffer, index, table, r1); } @@ -121,21 +121,21 @@ unsafe fn write_digits( let r = usize::as_cast(T::TWO * (value % radix2)); value /= radix2; - // SAFETY: this is always safe, since the table is 2*radix^2, and - // r must be in the range [0, 2*radix^2-1). + // SAFETY: this is always safe, since the table is `2*radix^2`, and + // `r` must be in the range `[0, 2*radix^2-1)`. write_digits!(buffer, index, table, r); } // Decode last 2 digits. if value < radix { let r = u32::as_cast(value); - // SAFETY: this is always safe, since value < radix, so it must be < 36. + // SAFETY: this is always safe, since `value < radix`, so it must be < 36. write_digit!(buffer, index, r); } else { let r = usize::as_cast(T::TWO * value); - // SAFETY: this is always safe, since the table is 2*radix^2, and - // the value must <= radix^2, so rem must be in the range - // [0, 2*radix^2-1). + // SAFETY: this is always safe, since the table is `2*radix^2`, and + // the value must `<= radix^2`, so rem must be in the range + // `[0, 2*radix^2-1)`. write_digits!(buffer, index, table, r); } @@ -161,7 +161,7 @@ unsafe fn write_step_digits( debug_assert_radix(radix); let start = index; - // SAFETY: safe as long as the call to write_step_digits is safe. + // SAFETY: safe as long as the call to `write_step_digits` is safe. let index = unsafe { write_digits(value, radix, table, buffer, index, count) }; // Write the remaining 0 bytes. let end = start.saturating_sub(step); @@ -189,7 +189,7 @@ pub fn algorithm(value: T, radix: u32, table: &[u8], buffer: &mut [u8], count where T: UnsignedInteger, { - // This is so that radix^4 does not overflow, since 36^4 overflows a u16. + // This is so that `radix^4` does not overflow, since `36^4` overflows a u16. assert!(T::BITS >= 32, "Must have at least 32 bits in the input."); assert!(radix <= 36, "radix must be <= 36"); assert!(table.len() >= (radix * radix * 2) as usize, "table must be 2 * radix^2 long"); @@ -198,7 +198,7 @@ where let buffer = &mut buffer[..count]; // SAFETY: Both forms of unchecked indexing cannot overflow. - // The table always has 2*radix^2 elements, so it must be a legal index. + // The table always has `2*radix^2` elements, so it must be a legal index. // The buffer is ensured to have at least `FORMATTED_SIZE` or // `FORMATTED_SIZE_DECIMAL` characters, which is the maximum number of // digits an integer of that size may write. @@ -224,8 +224,8 @@ pub fn algorithm_u128( buffer: &mut [u8], count: usize, ) -> usize { - // NOTE: Use the const version of radix for u64_step and - // u128_divrem to ensure they're evaluated at compile time. + // NOTE: Use the const version of radix for `u64_step` and + // `u128_divrem` to ensure they're evaluated at compile time. assert!(NumberFormat::<{ FORMAT }> {}.is_valid()); assert!(count <= buffer.len()); @@ -243,7 +243,7 @@ pub fn algorithm_u128( } // LOGIC: Both forms of unchecked indexing cannot overflow. - // The table always has 2*radix^2 elements, so it must be a legal index. + // The table always has `2*radix^2` elements, so it must be a legal index. // The buffer is ensured to have at least `FORMATTED_SIZE` or // `FORMATTED_SIZE_DECIMAL` characters, which is the maximum number of // digits an integer of that size may write. diff --git a/lexical-write-integer/src/decimal.rs b/lexical-write-integer/src/decimal.rs index 52d304bc..e21c131d 100644 --- a/lexical-write-integer/src/decimal.rs +++ b/lexical-write-integer/src/decimal.rs @@ -99,7 +99,7 @@ pub fn fast_digit_count(x: u32) -> usize { 42949672960, 42949672960, ]; - // This always safe, since fast_log2 will always return a value + // This always safe, since `fast_log2` will always return a value // <= 32. This is because the range of values from `ctlz(x | 1)` is // `[0, 31]`, so `32 - 1 - ctlz(x | 1)` must be in the range `[0, 31]`. let shift = TABLE[fast_log2(x)]; diff --git a/lexical-write-integer/src/lib.rs b/lexical-write-integer/src/lib.rs index ea9e0e95..fb168010 100644 --- a/lexical-write-integer/src/lib.rs +++ b/lexical-write-integer/src/lib.rs @@ -48,7 +48,7 @@ //! This module uses a some more unsafe code for moderately acceptable //! performance. The compact decimal serializer has no non-local safety //! invariants, which since it's focused on code size rather than performance, -//! this tradeoff is acceptable and it uses a temporary, over-allocated buffer +//! this trade-off is acceptable and it uses a temporary, over-allocated buffer //! as an intermediate. //! //! The decimal writer relies on pre-computed tables and an exact calculation @@ -59,7 +59,7 @@ //! pre-computed tables, so we cannot just iterate over the slice and assign //! iteratively. Using checked indexing can lead to 30%+ decreases in //! performance. However, with careful analysis and factoring of the code, it's -//! fairly easy to demonstrate the safety as long as the caller enusres at least +//! fairly easy to demonstrate the safety as long as the caller ensures at least //! the required number of digits are provided. //! //! Our algorithms work like this, carving off the lower digits and writing them @@ -101,12 +101,12 @@ //! tables are large enough so there are no non-local safety considerations //! there. The current logic call stack is: //! 1. [`to_lexical`] -//! 2. [decimal][dec], compact, or radix (gts the correct tables and calls +//! 2. [decimal][dec], compact, or radix (gets the correct tables and calls //! algorithm) //! 3. [algorithm] //! //! [decimal][dec], compact, and radix therefore **MUST** be safe and do type -//! check of the bounds to avoid too much expoosure to unsafety. Only +//! check of the bounds to avoid too much exposure to unsafety. Only //! [`algorithm`] should have any unsafety associated with it. That is, as long //! as the direct caller has ensure the proper buffer is allocated, there are //! non-local safety invariants. diff --git a/lexical-write-integer/src/table_decimal.rs b/lexical-write-integer/src/table_decimal.rs index d5b5fa7f..6c34eac8 100644 --- a/lexical-write-integer/src/table_decimal.rs +++ b/lexical-write-integer/src/table_decimal.rs @@ -6,7 +6,7 @@ // RADIX^2 TABLES // -------------- -// Conditionally compile the precompiled radix**2 tables. +// Conditionally compile the pre-computed radix**2 tables. // These tables take `2 * (value % (radix^2))`, and return // two consecutive values corresponding to both digits. // diff --git a/lexical/src/lib.rs b/lexical/src/lib.rs index 66974b22..722046d7 100644 --- a/lexical/src/lib.rs +++ b/lexical/src/lib.rs @@ -144,11 +144,11 @@ //! #### safe //! //! This replaces most unchecked indexing, required in cases where the -//! compiler cannot ellide the check, with checked indexing. However, +//! compiler cannot elide the check, with checked indexing. However, //! it does not fully replace all unsafe behavior with safe behavior. -//! To minimize the risk of UB and out-of-bounds reads/writers, extensive -//! edge-cases, property-based tests, and fuzzing is done with both the -//! safe feature enabled and disabled, with the tests verified by Miri +//! To minimize the risk of undefined behavior and out-of-bounds reads/writers, +//! extensive edge-cases, property-based tests, and fuzzing is done with both +//! the safe feature enabled and disabled, with the tests verified by Miri //! and Valgrind. //! //! # Configuration API @@ -357,12 +357,12 @@ pub use lexical_core::{ToLexical, ToLexicalWithOptions}; // NOTE: We cannot just use an uninitialized vector with excess capacity and // then use read-assign rather than `ptr::write` or `MaybeUninit.write` to -// modify the values. When LLVM was the primary codegen, this was +// modify the values. When LLVM was the primary code generator, this was // **UNSPECIFIED** but not undefined behavior: reading undef primitives is safe: // https://llvm.org/docs/LangRef.html#undefined-values // // However, a different backend such as cranelift might make this undefined -// behavior. That is, from the perspective of Rust, this is UB: +// behavior. That is, from the perspective of Rust, this is undefined behavior: // // ```rust // let x = Vec::::with_capacity(500); @@ -377,7 +377,7 @@ pub use lexical_core::{ToLexical, ToLexicalWithOptions}; // // Currently, since LLVM treats it as unspecified behavior and will not drop // values, there is no risk of a memory leak and this is **currently** safe. -// However, this can explode at any time, just like any UB. +// However, this can explode at any time, just like any undefined behavior. /// High-level conversion of a number to a decimal-encoded string. /// @@ -431,15 +431,15 @@ pub fn to_string_with_options( n: N, options: &N::Options, ) -> String { - // Need to use the buffer_size hint to properly deal with float formatting + // Need to use the `buffer_size` hint to properly deal with float formatting // options. let size = N::Options::buffer_size::(options); let mut buf = vec![0u8; size]; let slc = buf.as_mut_slice(); let len = lexical_core::write_with_options::<_, FORMAT>(n, slc, options).len(); - // SAFETY: safe since the buffer is of sufficient size, len() must be <= the vec - // size. + // SAFETY: safe since the buffer is of sufficient size, `len()` must be <= the + // vec size. unsafe { buf.set_len(len); String::from_utf8_unchecked(buf) diff --git a/spellcheck.dic b/spellcheck.dic new file mode 100644 index 00000000..97e27fb8 --- /dev/null +++ b/spellcheck.dic @@ -0,0 +1,166 @@ +1000 +f32 +f64 +radix +dragonbox +n_max +SPARC +xcross +sparc64 +sparcv9 +riscv +riscv64 +powerpc +powerpc64 +aarch64 +s390x +mips64 +x86 +x87 +x86_64 +natively +base2 +base10 +base36 +basen +u8 +u16 +u32 +u64 +u128 +i8 +i16 +i32 +i64 +i128 +NaN +sNaN +fn +struct +const +GETTERS +SETTERS +runtime +unbias +sse2 +llvm +Microsystems +SunSoft +e_sqrt +src +msun +usr +dtoa +ftoa +karatsuba +bigint +len +reallocations +ROR +mul +imul +mov +MSVC +significand +unmaintainable +freebsd +FreeBSD +FIXME +TODO +boolean +ctlz +0s +iteratively +resize +unsafety +log2 +log10 +bellerophon +ns +ms +iter +μs +MulAssign +ptr +bigfloat +vec +radixes +radices +Schubfach +binary32 +binary64 +Lemire +Apache2 +Grisu +shl +shr +bitwise +pow2 +formatter +formatters +config +Loitsch +Florian +Samoljuk +Samoljuk's +denormal +subnormal +serializer +deserializer +cryptographically +clippy +structs +parsers +ASM +backend +codebase +invariants +fast_float +bitmask +bitmasks +miri +valgrind +itoa +atoi +Golang +glibc +MUSL +libm +libc +openlibm +customizable +unoptimized +FPU +FPUs +subslice +subslicing +JSON +inlining +allocator +undef +cfg +impl +f128 +f16 +bf16 +representable +β +γ +IEEE754 +bfloat16 +underflows +overflows +str +benchmarking +refactor +ok +configurability +num +APIs +uncollapsed +backends +lossy +divmod +bigcomp +unittesting +fallthrough diff --git a/spellcheck.toml b/spellcheck.toml new file mode 100644 index 00000000..30815c05 --- /dev/null +++ b/spellcheck.toml @@ -0,0 +1,28 @@ + +dev_comments = true + +[Hunspell] +lang = "en_US" +use_builtin = true +skip_os_lookups = false +tokenization_splitchars = "\",;:.!?#(){}[]|/_-‒^'`&@§¶…=<>~+≅|%" +search_dirs = [ "." ] +extra_dictionaries = [ "spellcheck.dic" ] + +[Hunspell.quirks] +transform_regex = [ + "^[+-]?\\d+\\^[+-]?\\d*$", + "^0?b\\d+$", + "^0x[A-Fa-f0-9]+$", + # this is the C-style hex floats + "^0x[A-Fa-f0-9]+p\\d+$", + # these are for mathematical notation + "^[A-Za-z]\\d?(?:_[A-Za-z]\\d?)?$", + # this is for multiplication cases + "^\\d+x$", + # this is for our custom radix bases + "^[Bb][Aa][Ss][Ee]\\d+$", + "^[Rr][Aa][Dd][Ii][Xx]\\d+$", + # this is for markdown bolding + "\\*{2}\\w+\\*{2}" +]