ordered-float-4.2.2/.cargo_vcs_info.json0000644000000001360000000000100135700ustar { "git": { "sha1": "f3cd47cd89fb6b0fdb5183b2de10b8785b56c6ed" }, "path_in_vcs": "" }ordered-float-4.2.2/.github/workflows/tests.yaml000064400000000000000000000037141046102023000200100ustar 00000000000000on: [push, pull_request] name: Tests jobs: test: name: Tests runs-on: ubuntu-latest strategy: matrix: rust: - stable - 1.60.0 steps: - name: Checkout uses: actions/checkout@v2 - name: Install toolchain uses: actions-rs/toolchain@v1 with: profile: minimal toolchain: ${{ matrix.rust }} override: true - name: Install rustfmt + clippy if: matrix.rust == 'stable' uses: actions-rs/toolchain@v1 with: profile: minimal toolchain: ${{ matrix.rust }} override: true components: rustfmt, clippy - name: Check Fmt if: matrix.rust == 'stable' uses: actions-rs/cargo@v1 with: command: fmt args: -- --check - name: Check Lints (all features) if: matrix.rust == 'stable' uses: actions-rs/cargo@v1 with: command: clippy args: --tests --features ${{ env.all_features }} - name: Test (default features) uses: actions-rs/cargo@v1 with: command: test - name: Test (no default features) uses: actions-rs/cargo@v1 with: command: test args: --no-default-features - name: Test (schema features subset) if: matrix.rust == 'stable' uses: actions-rs/cargo@v1 with: command: test args: --features "std,schemars" - name: Test (rand features subset) if: matrix.rust == 'stable' uses: actions-rs/cargo@v1 with: command: test args: --features "rand,randtest" - name: Test (all features) if: matrix.rust == 'stable' uses: actions-rs/cargo@v1 with: command: test args: --features ${{ env.all_features }} env: all_features: "arbitrary,bytemuck,rand,randtest,serde,schemars,proptest,rkyv,rkyv_ck,speedy" ordered-float-4.2.2/.gitignore000064400000000000000000000001731046102023000143510ustar 00000000000000.DS_Store *~ *# *.o *.so *.swp *.dylib *.dSYM *.dll *.rlib *.dummy *.exe *-test /doc/ /target/ /examples/* !/examples/*.rs ordered-float-4.2.2/.travis.yml000064400000000000000000000004171046102023000144730ustar 00000000000000language: rust rust: - 1.34.0 - nightly - beta - stable sudo: false env: matrix: - FEATURES="" - FEATURES="std" - FEATURES="serde" - FEATURES="std,serde" script: - cargo test -v --no-default-features --features "$FEATURES" ordered-float-4.2.2/Cargo.toml0000644000000040770000000000100115760ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2021" rust-version = "1.60" name = "ordered-float" version = "4.2.2" authors = [ "Jonathan Reem ", "Matt Brubeck ", ] description = "Wrappers for total ordering on floats" readme = "README.md" keywords = [ "no_std", "ord", "f64", "f32", "sort", ] categories = [ "science", "rust-patterns", "no-std", ] license = "MIT" repository = "https://github.com/reem/rust-ordered-float" [dependencies.arbitrary] version = "1.0.0" optional = true [dependencies.borsh] version = "1.2.0" optional = true default-features = false [dependencies.bytemuck] version = "1.12.2" optional = true default-features = false [dependencies.num-traits] version = "0.2.1" default-features = false [dependencies.proptest] version = "1.0.0" optional = true [dependencies.rand] version = "0.8.3" optional = true default-features = false [dependencies.rkyv] version = "0.7.41" features = ["rend"] optional = true default-features = false [dependencies.schemars] version = "0.8.8" optional = true [dependencies.serde] version = "1.0" optional = true default-features = false [dependencies.speedy] version = "0.8.3" optional = true default-features = false [dev-dependencies.serde_test] version = "1.0" [features] default = ["std"] randtest = [ "rand/std", "rand/std_rng", ] rkyv = ["rkyv_32"] rkyv_16 = [ "dep:rkyv", "rkyv?/size_16", ] rkyv_32 = [ "dep:rkyv", "rkyv?/size_32", ] rkyv_64 = [ "dep:rkyv", "rkyv?/size_64", ] rkyv_ck = ["rkyv?/validation"] serde = [ "dep:serde", "rand?/serde1", ] std = ["num-traits/std"] ordered-float-4.2.2/Cargo.toml.orig000064400000000000000000000030231046102023000152450ustar 00000000000000[package] name = "ordered-float" version = "4.2.2" authors = [ "Jonathan Reem ", "Matt Brubeck ", ] license = "MIT" description = "Wrappers for total ordering on floats" repository = "https://github.com/reem/rust-ordered-float" readme = "README.md" keywords = ["no_std", "ord", "f64", "f32", "sort"] categories = ["science", "rust-patterns", "no-std"] edition = "2021" rust-version = "1.60" [dependencies] num-traits = { version = "0.2.1", default-features = false } serde = { version = "1.0", optional = true, default-features = false } rkyv = { version = "0.7.41", optional = true, default-features = false, features = ["rend"] } schemars = { version = "0.8.8", optional = true } rand = { version = "0.8.3", optional = true, default-features = false } arbitrary = { version = "1.0.0", optional = true } proptest = { version = "1.0.0", optional = true } speedy = { version = "0.8.3", optional = true, default-features = false } bytemuck = { version = "1.12.2", optional = true, default-features = false } borsh = { version = "1.2.0", optional = true, default-features = false } [dev-dependencies] serde_test = "1.0" [features] default = ["std"] std = ["num-traits/std"] serde = ["dep:serde", "rand?/serde1"] randtest = ["rand/std", "rand/std_rng"] rkyv = ["rkyv_32"] rkyv_16 = ["dep:rkyv", "rkyv?/size_16"] rkyv_32 = ["dep:rkyv", "rkyv?/size_32"] rkyv_64 = ["dep:rkyv", "rkyv?/size_64"] rkyv_ck = ["rkyv?/validation"] ordered-float-4.2.2/LICENSE-MIT000064400000000000000000000020411046102023000140110ustar 00000000000000Copyright (c) 2015 Jonathan Reem Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ordered-float-4.2.2/README.md000064400000000000000000000026611046102023000136440ustar 00000000000000# ordered-float Provides several wrapper types for `Ord` and `Eq` implementations on f64 and friends. * [API documentation](https://docs.rs/ordered-float) * [Release notes](https://github.com/reem/rust-ordered-float/releases) ## no_std To use `ordered_float` without requiring the Rust standard library, disable the default `std` feature: ```toml [dependencies] ordered-float = { version = "4.0", default-features = false } ``` ## Optional features The following optional features can be enabled in `Cargo.toml`: * `arbitrary`: Implements the `arbitrary::Arbitrary` trait. * `bytemuck`: Adds implementations for traits provided by the `bytemuck` crate. * `borsh`: Adds implementations for traits provided by the `borsh` crate. * `rand`: Adds implementations for various distribution types provided by the `rand` crate. * `serde`: Implements the `serde::Serialize` and `serde::Deserialize` traits. * `schemars`: Implements the `schemars::JsonSchema` trait. * `proptest`: Implements the `proptest::Arbitrary` trait. * `rkyv_16`: Implements `rkyv`'s `Archive`, `Serialize` and `Deserialize` traits with `size_16`. * `rkyv_32`: Implements `rkyv`'s `Archive`, `Serialize` and `Deserialize` traits with `size_32`. * `rkyv_64`: Implements `rkyv`'s `Archive`, `Serialize` and `Deserialize` traits with `size_64`. * `rkyv_ck`: Implements the `bytecheck::CheckBytes` trait. * `speedy`: Implements `speedy`'s `Readable` and `Writable` traits. ## License MIT ordered-float-4.2.2/rustfmt.toml000064400000000000000000000001641046102023000147620ustar 00000000000000# These two unstable options might improve the layout of the code: #fn_single_line = true #where_single_line = true ordered-float-4.2.2/src/lib.rs000064400000000000000000002412771046102023000143000ustar 00000000000000#![no_std] #![cfg_attr(test, deny(warnings))] #![deny(missing_docs)] #![allow(clippy::derive_partial_eq_without_eq)] //! Wrappers for total order on Floats. See the [`OrderedFloat`] and [`NotNan`] docs for details. #[cfg(feature = "std")] extern crate std; #[cfg(feature = "std")] use std::error::Error; use core::borrow::Borrow; use core::cmp::Ordering; use core::convert::TryFrom; use core::fmt; use core::hash::{Hash, Hasher}; use core::iter::{Product, Sum}; use core::num::FpCategory; use core::ops::{ Add, AddAssign, Deref, DerefMut, Div, DivAssign, Mul, MulAssign, Neg, Rem, RemAssign, Sub, SubAssign, }; use core::str::FromStr; pub use num_traits::float::FloatCore; use num_traits::{ AsPrimitive, Bounded, FloatConst, FromPrimitive, Num, NumCast, One, Signed, ToPrimitive, Zero, }; #[cfg(feature = "std")] pub use num_traits::{Float, Pow}; #[cfg(feature = "rand")] pub use impl_rand::{UniformNotNan, UniformOrdered}; // masks for the parts of the IEEE 754 float const SIGN_MASK: u64 = 0x8000000000000000u64; const EXP_MASK: u64 = 0x7ff0000000000000u64; const MAN_MASK: u64 = 0x000fffffffffffffu64; // canonical raw bit patterns (for hashing) const CANONICAL_NAN_BITS: u64 = 0x7ff8000000000000u64; #[inline(always)] fn canonicalize_signed_zero(x: T) -> T { // -0.0 + 0.0 == +0.0 under IEEE754 roundTiesToEven rounding mode, // which Rust guarantees. Thus by adding a positive zero we // canonicalize signed zero without any branches in one instruction. x + T::zero() } /// A wrapper around floats providing implementations of `Eq`, `Ord`, and `Hash`. /// /// NaN is sorted as *greater* than all other values and *equal* /// to itself, in contradiction with the IEEE standard. /// /// ``` /// use ordered_float::OrderedFloat; /// use std::f32::NAN; /// /// let mut v = [OrderedFloat(NAN), OrderedFloat(2.0), OrderedFloat(1.0)]; /// v.sort(); /// assert_eq!(v, [OrderedFloat(1.0), OrderedFloat(2.0), OrderedFloat(NAN)]); /// ``` /// /// Because `OrderedFloat` implements `Ord` and `Eq`, it can be used as a key in a `HashSet`, /// `HashMap`, `BTreeMap`, or `BTreeSet` (unlike the primitive `f32` or `f64` types): /// /// ``` /// # use ordered_float::OrderedFloat; /// # use std::collections::HashSet; /// # use std::f32::NAN; /// /// let mut s: HashSet> = HashSet::new(); /// s.insert(OrderedFloat(NAN)); /// assert!(s.contains(&OrderedFloat(NAN))); /// ``` #[derive(Default, Clone, Copy)] #[repr(transparent)] pub struct OrderedFloat(pub T); impl OrderedFloat { /// Get the value out. #[inline] pub fn into_inner(self) -> T { self.0 } } impl AsRef for OrderedFloat { #[inline] fn as_ref(&self) -> &T { &self.0 } } impl AsMut for OrderedFloat { #[inline] fn as_mut(&mut self) -> &mut T { &mut self.0 } } impl<'a, T: FloatCore> From<&'a T> for &'a OrderedFloat { #[inline] fn from(t: &'a T) -> &'a OrderedFloat { // Safety: OrderedFloat is #[repr(transparent)] and has no invalid values. unsafe { &*(t as *const T as *const OrderedFloat) } } } impl<'a, T: FloatCore> From<&'a mut T> for &'a mut OrderedFloat { #[inline] fn from(t: &'a mut T) -> &'a mut OrderedFloat { // Safety: OrderedFloat is #[repr(transparent)] and has no invalid values. unsafe { &mut *(t as *mut T as *mut OrderedFloat) } } } impl PartialOrd for OrderedFloat { #[inline] fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } #[inline] fn lt(&self, other: &Self) -> bool { !self.ge(other) } #[inline] fn le(&self, other: &Self) -> bool { other.ge(self) } #[inline] fn gt(&self, other: &Self) -> bool { !other.ge(self) } #[inline] fn ge(&self, other: &Self) -> bool { // We consider all NaNs equal, and NaN is the largest possible // value. Thus if self is NaN we always return true. Otherwise // self >= other is correct. If other is also not NaN it is trivially // correct, and if it is we note that nothing can be greater or // equal to NaN except NaN itself, which we already handled earlier. self.0.is_nan() | (self.0 >= other.0) } } impl Ord for OrderedFloat { #[inline] fn cmp(&self, other: &Self) -> Ordering { #[allow(clippy::comparison_chain)] if self < other { Ordering::Less } else if self > other { Ordering::Greater } else { Ordering::Equal } } } impl PartialEq for OrderedFloat { #[inline] fn eq(&self, other: &OrderedFloat) -> bool { if self.0.is_nan() { other.0.is_nan() } else { self.0 == other.0 } } } impl PartialEq for OrderedFloat { #[inline] fn eq(&self, other: &T) -> bool { self.0 == *other } } impl Hash for OrderedFloat { fn hash(&self, state: &mut H) { let bits = if self.is_nan() { CANONICAL_NAN_BITS } else { raw_double_bits(&canonicalize_signed_zero(self.0)) }; bits.hash(state) } } impl fmt::Debug for OrderedFloat { #[inline] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.0.fmt(f) } } impl fmt::Display for OrderedFloat { #[inline] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.0.fmt(f) } } impl fmt::LowerExp for OrderedFloat { #[inline] fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.0.fmt(f) } } impl fmt::UpperExp for OrderedFloat { #[inline] fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.0.fmt(f) } } impl From> for f32 { #[inline] fn from(f: OrderedFloat) -> f32 { f.0 } } impl From> for f64 { #[inline] fn from(f: OrderedFloat) -> f64 { f.0 } } impl From for OrderedFloat { #[inline] fn from(val: T) -> Self { OrderedFloat(val) } } impl From for OrderedFloat { fn from(val: bool) -> Self { OrderedFloat(val as u8 as f32) } } impl From for OrderedFloat { fn from(val: bool) -> Self { OrderedFloat(val as u8 as f64) } } macro_rules! impl_ordered_float_from { ($dst:ty, $src:ty) => { impl From<$src> for OrderedFloat<$dst> { fn from(val: $src) -> Self { OrderedFloat(val.into()) } } }; } impl_ordered_float_from! {f64, i8} impl_ordered_float_from! {f64, i16} impl_ordered_float_from! {f64, i32} impl_ordered_float_from! {f64, u8} impl_ordered_float_from! {f64, u16} impl_ordered_float_from! {f64, u32} impl_ordered_float_from! {f32, i8} impl_ordered_float_from! {f32, i16} impl_ordered_float_from! {f32, u8} impl_ordered_float_from! {f32, u16} impl Deref for OrderedFloat { type Target = T; #[inline] fn deref(&self) -> &Self::Target { &self.0 } } impl DerefMut for OrderedFloat { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl Eq for OrderedFloat {} macro_rules! impl_ordered_float_binop { ($imp:ident, $method:ident, $assign_imp:ident, $assign_method:ident) => { impl $imp for OrderedFloat { type Output = OrderedFloat; #[inline] fn $method(self, other: Self) -> Self::Output { OrderedFloat((self.0).$method(other.0)) } } impl $imp for OrderedFloat { type Output = OrderedFloat; #[inline] fn $method(self, other: T) -> Self::Output { OrderedFloat((self.0).$method(other)) } } impl<'a, T> $imp<&'a T> for OrderedFloat where T: $imp<&'a T>, { type Output = OrderedFloat<>::Output>; #[inline] fn $method(self, other: &'a T) -> Self::Output { OrderedFloat((self.0).$method(other)) } } impl<'a, T> $imp<&'a Self> for OrderedFloat where T: $imp<&'a T>, { type Output = OrderedFloat<>::Output>; #[inline] fn $method(self, other: &'a Self) -> Self::Output { OrderedFloat((self.0).$method(&other.0)) } } impl<'a, T> $imp> for &'a OrderedFloat where &'a T: $imp, { type Output = OrderedFloat<<&'a T as $imp>::Output>; #[inline] fn $method(self, other: OrderedFloat) -> Self::Output { OrderedFloat((self.0).$method(other.0)) } } impl<'a, T> $imp for &'a OrderedFloat where &'a T: $imp, { type Output = OrderedFloat<<&'a T as $imp>::Output>; #[inline] fn $method(self, other: T) -> Self::Output { OrderedFloat((self.0).$method(other)) } } impl<'a, T> $imp<&'a T> for &'a OrderedFloat where &'a T: $imp, { type Output = OrderedFloat<<&'a T as $imp>::Output>; #[inline] fn $method(self, other: &'a T) -> Self::Output { OrderedFloat((self.0).$method(other)) } } impl $assign_imp for OrderedFloat { #[inline] fn $assign_method(&mut self, other: T) { (self.0).$assign_method(other); } } impl<'a, T: $assign_imp<&'a T>> $assign_imp<&'a T> for OrderedFloat { #[inline] fn $assign_method(&mut self, other: &'a T) { (self.0).$assign_method(other); } } impl $assign_imp for OrderedFloat { #[inline] fn $assign_method(&mut self, other: Self) { (self.0).$assign_method(other.0); } } impl<'a, T: $assign_imp<&'a T>> $assign_imp<&'a Self> for OrderedFloat { #[inline] fn $assign_method(&mut self, other: &'a Self) { (self.0).$assign_method(&other.0); } } }; } impl_ordered_float_binop! {Add, add, AddAssign, add_assign} impl_ordered_float_binop! {Sub, sub, SubAssign, sub_assign} impl_ordered_float_binop! {Mul, mul, MulAssign, mul_assign} impl_ordered_float_binop! {Div, div, DivAssign, div_assign} impl_ordered_float_binop! {Rem, rem, RemAssign, rem_assign} macro_rules! impl_ordered_float_pow { ($inner:ty, $rhs:ty) => { #[cfg(feature = "std")] impl Pow<$rhs> for OrderedFloat<$inner> { type Output = OrderedFloat<$inner>; #[inline] fn pow(self, rhs: $rhs) -> OrderedFloat<$inner> { OrderedFloat(<$inner>::pow(self.0, rhs)) } } #[cfg(feature = "std")] impl<'a> Pow<&'a $rhs> for OrderedFloat<$inner> { type Output = OrderedFloat<$inner>; #[inline] fn pow(self, rhs: &'a $rhs) -> OrderedFloat<$inner> { OrderedFloat(<$inner>::pow(self.0, *rhs)) } } #[cfg(feature = "std")] impl<'a> Pow<$rhs> for &'a OrderedFloat<$inner> { type Output = OrderedFloat<$inner>; #[inline] fn pow(self, rhs: $rhs) -> OrderedFloat<$inner> { OrderedFloat(<$inner>::pow(self.0, rhs)) } } #[cfg(feature = "std")] impl<'a, 'b> Pow<&'a $rhs> for &'b OrderedFloat<$inner> { type Output = OrderedFloat<$inner>; #[inline] fn pow(self, rhs: &'a $rhs) -> OrderedFloat<$inner> { OrderedFloat(<$inner>::pow(self.0, *rhs)) } } }; } impl_ordered_float_pow! {f32, i8} impl_ordered_float_pow! {f32, i16} impl_ordered_float_pow! {f32, u8} impl_ordered_float_pow! {f32, u16} impl_ordered_float_pow! {f32, i32} impl_ordered_float_pow! {f64, i8} impl_ordered_float_pow! {f64, i16} impl_ordered_float_pow! {f64, u8} impl_ordered_float_pow! {f64, u16} impl_ordered_float_pow! {f64, i32} impl_ordered_float_pow! {f32, f32} impl_ordered_float_pow! {f64, f32} impl_ordered_float_pow! {f64, f64} macro_rules! impl_ordered_float_self_pow { ($base:ty, $exp:ty) => { #[cfg(feature = "std")] impl Pow> for OrderedFloat<$base> { type Output = OrderedFloat<$base>; #[inline] fn pow(self, rhs: OrderedFloat<$exp>) -> OrderedFloat<$base> { OrderedFloat(<$base>::pow(self.0, rhs.0)) } } #[cfg(feature = "std")] impl<'a> Pow<&'a OrderedFloat<$exp>> for OrderedFloat<$base> { type Output = OrderedFloat<$base>; #[inline] fn pow(self, rhs: &'a OrderedFloat<$exp>) -> OrderedFloat<$base> { OrderedFloat(<$base>::pow(self.0, rhs.0)) } } #[cfg(feature = "std")] impl<'a> Pow> for &'a OrderedFloat<$base> { type Output = OrderedFloat<$base>; #[inline] fn pow(self, rhs: OrderedFloat<$exp>) -> OrderedFloat<$base> { OrderedFloat(<$base>::pow(self.0, rhs.0)) } } #[cfg(feature = "std")] impl<'a, 'b> Pow<&'a OrderedFloat<$exp>> for &'b OrderedFloat<$base> { type Output = OrderedFloat<$base>; #[inline] fn pow(self, rhs: &'a OrderedFloat<$exp>) -> OrderedFloat<$base> { OrderedFloat(<$base>::pow(self.0, rhs.0)) } } }; } impl_ordered_float_self_pow! {f32, f32} impl_ordered_float_self_pow! {f64, f32} impl_ordered_float_self_pow! {f64, f64} /// Adds a float directly. impl Sum for OrderedFloat { fn sum>>(iter: I) -> Self { OrderedFloat(iter.map(|v| v.0).sum()) } } impl<'a, T: FloatCore + Sum + 'a> Sum<&'a OrderedFloat> for OrderedFloat { #[inline] fn sum>>(iter: I) -> Self { iter.cloned().sum() } } impl Product for OrderedFloat { fn product>>(iter: I) -> Self { OrderedFloat(iter.map(|v| v.0).product()) } } impl<'a, T: FloatCore + Product + 'a> Product<&'a OrderedFloat> for OrderedFloat { #[inline] fn product>>(iter: I) -> Self { iter.cloned().product() } } impl Signed for OrderedFloat { #[inline] fn abs(&self) -> Self { OrderedFloat(self.0.abs()) } fn abs_sub(&self, other: &Self) -> Self { OrderedFloat(Signed::abs_sub(&self.0, &other.0)) } #[inline] fn signum(&self) -> Self { OrderedFloat(self.0.signum()) } #[inline] fn is_positive(&self) -> bool { self.0.is_positive() } #[inline] fn is_negative(&self) -> bool { self.0.is_negative() } } impl Bounded for OrderedFloat { #[inline] fn min_value() -> Self { OrderedFloat(T::min_value()) } #[inline] fn max_value() -> Self { OrderedFloat(T::max_value()) } } impl FromStr for OrderedFloat { type Err = T::Err; /// Convert a &str to `OrderedFloat`. Returns an error if the string fails to parse. /// /// ``` /// use ordered_float::OrderedFloat; /// /// assert!("-10".parse::>().is_ok()); /// assert!("abc".parse::>().is_err()); /// assert!("NaN".parse::>().is_ok()); /// ``` fn from_str(s: &str) -> Result { T::from_str(s).map(OrderedFloat) } } impl Neg for OrderedFloat { type Output = OrderedFloat; #[inline] fn neg(self) -> Self::Output { OrderedFloat(-self.0) } } impl<'a, T> Neg for &'a OrderedFloat where &'a T: Neg, { type Output = OrderedFloat<<&'a T as Neg>::Output>; #[inline] fn neg(self) -> Self::Output { OrderedFloat(-(&self.0)) } } impl Zero for OrderedFloat { #[inline] fn zero() -> Self { OrderedFloat(T::zero()) } #[inline] fn is_zero(&self) -> bool { self.0.is_zero() } } impl One for OrderedFloat { #[inline] fn one() -> Self { OrderedFloat(T::one()) } } impl NumCast for OrderedFloat { #[inline] fn from(n: F) -> Option { T::from(n).map(OrderedFloat) } } macro_rules! impl_as_primitive { (@ (NotNan<$T: ty>) => $(#[$cfg:meta])* impl (NotNan<$U: ty>) ) => { $(#[$cfg])* impl AsPrimitive> for NotNan<$T> { #[inline] fn as_(self) -> NotNan<$U> { // Safety: `NotNan` guarantees that the value is not NaN. unsafe {NotNan::new_unchecked(self.0 as $U) } } } }; (@ ($T: ty) => $(#[$cfg:meta])* impl (NotNan<$U: ty>) ) => { $(#[$cfg])* impl AsPrimitive> for $T { #[inline] fn as_(self) -> NotNan<$U> { NotNan(self as $U) } } }; (@ (NotNan<$T: ty>) => $(#[$cfg:meta])* impl ($U: ty) ) => { $(#[$cfg])* impl AsPrimitive<$U> for NotNan<$T> { #[inline] fn as_(self) -> $U { self.0 as $U } } }; (@ (OrderedFloat<$T: ty>) => $(#[$cfg:meta])* impl (OrderedFloat<$U: ty>) ) => { $(#[$cfg])* impl AsPrimitive> for OrderedFloat<$T> { #[inline] fn as_(self) -> OrderedFloat<$U> { OrderedFloat(self.0 as $U) } } }; (@ ($T: ty) => $(#[$cfg:meta])* impl (OrderedFloat<$U: ty>) ) => { $(#[$cfg])* impl AsPrimitive> for $T { #[inline] fn as_(self) -> OrderedFloat<$U> { OrderedFloat(self as $U) } } }; (@ (OrderedFloat<$T: ty>) => $(#[$cfg:meta])* impl ($U: ty) ) => { $(#[$cfg])* impl AsPrimitive<$U> for OrderedFloat<$T> { #[inline] fn as_(self) -> $U { self.0 as $U } } }; ($T: tt => { $( $U: tt ),* } ) => {$( impl_as_primitive!(@ $T => impl $U); )*}; } impl_as_primitive!((OrderedFloat) => { (OrderedFloat), (OrderedFloat) }); impl_as_primitive!((OrderedFloat) => { (OrderedFloat), (OrderedFloat) }); impl_as_primitive!((NotNan) => { (NotNan), (NotNan) }); impl_as_primitive!((NotNan) => { (NotNan), (NotNan) }); impl_as_primitive!((u8) => { (OrderedFloat), (OrderedFloat) }); impl_as_primitive!((i8) => { (OrderedFloat), (OrderedFloat) }); impl_as_primitive!((u16) => { (OrderedFloat), (OrderedFloat) }); impl_as_primitive!((i16) => { (OrderedFloat), (OrderedFloat) }); impl_as_primitive!((u32) => { (OrderedFloat), (OrderedFloat) }); impl_as_primitive!((i32) => { (OrderedFloat), (OrderedFloat) }); impl_as_primitive!((u64) => { (OrderedFloat), (OrderedFloat) }); impl_as_primitive!((i64) => { (OrderedFloat), (OrderedFloat) }); impl_as_primitive!((usize) => { (OrderedFloat), (OrderedFloat) }); impl_as_primitive!((isize) => { (OrderedFloat), (OrderedFloat) }); impl_as_primitive!((f32) => { (OrderedFloat), (OrderedFloat) }); impl_as_primitive!((f64) => { (OrderedFloat), (OrderedFloat) }); impl_as_primitive!((u8) => { (NotNan), (NotNan) }); impl_as_primitive!((i8) => { (NotNan), (NotNan) }); impl_as_primitive!((u16) => { (NotNan), (NotNan) }); impl_as_primitive!((i16) => { (NotNan), (NotNan) }); impl_as_primitive!((u32) => { (NotNan), (NotNan) }); impl_as_primitive!((i32) => { (NotNan), (NotNan) }); impl_as_primitive!((u64) => { (NotNan), (NotNan) }); impl_as_primitive!((i64) => { (NotNan), (NotNan) }); impl_as_primitive!((usize) => { (NotNan), (NotNan) }); impl_as_primitive!((isize) => { (NotNan), (NotNan) }); impl_as_primitive!((OrderedFloat) => { (u8), (u16), (u32), (u64), (usize), (i8), (i16), (i32), (i64), (isize), (f32), (f64) }); impl_as_primitive!((OrderedFloat) => { (u8), (u16), (u32), (u64), (usize), (i8), (i16), (i32), (i64), (isize), (f32), (f64) }); impl_as_primitive!((NotNan) => { (u8), (u16), (u32), (u64), (usize), (i8), (i16), (i32), (i64), (isize), (f32), (f64) }); impl_as_primitive!((NotNan) => { (u8), (u16), (u32), (u64), (usize), (i8), (i16), (i32), (i64), (isize), (f32), (f64) }); impl FromPrimitive for OrderedFloat { fn from_i64(n: i64) -> Option { T::from_i64(n).map(OrderedFloat) } fn from_u64(n: u64) -> Option { T::from_u64(n).map(OrderedFloat) } fn from_isize(n: isize) -> Option { T::from_isize(n).map(OrderedFloat) } fn from_i8(n: i8) -> Option { T::from_i8(n).map(OrderedFloat) } fn from_i16(n: i16) -> Option { T::from_i16(n).map(OrderedFloat) } fn from_i32(n: i32) -> Option { T::from_i32(n).map(OrderedFloat) } fn from_usize(n: usize) -> Option { T::from_usize(n).map(OrderedFloat) } fn from_u8(n: u8) -> Option { T::from_u8(n).map(OrderedFloat) } fn from_u16(n: u16) -> Option { T::from_u16(n).map(OrderedFloat) } fn from_u32(n: u32) -> Option { T::from_u32(n).map(OrderedFloat) } fn from_f32(n: f32) -> Option { T::from_f32(n).map(OrderedFloat) } fn from_f64(n: f64) -> Option { T::from_f64(n).map(OrderedFloat) } } impl ToPrimitive for OrderedFloat { fn to_i64(&self) -> Option { self.0.to_i64() } fn to_u64(&self) -> Option { self.0.to_u64() } fn to_isize(&self) -> Option { self.0.to_isize() } fn to_i8(&self) -> Option { self.0.to_i8() } fn to_i16(&self) -> Option { self.0.to_i16() } fn to_i32(&self) -> Option { self.0.to_i32() } fn to_usize(&self) -> Option { self.0.to_usize() } fn to_u8(&self) -> Option { self.0.to_u8() } fn to_u16(&self) -> Option { self.0.to_u16() } fn to_u32(&self) -> Option { self.0.to_u32() } fn to_f32(&self) -> Option { self.0.to_f32() } fn to_f64(&self) -> Option { self.0.to_f64() } } impl FloatCore for OrderedFloat { fn nan() -> Self { OrderedFloat(T::nan()) } fn infinity() -> Self { OrderedFloat(T::infinity()) } fn neg_infinity() -> Self { OrderedFloat(T::neg_infinity()) } fn neg_zero() -> Self { OrderedFloat(T::neg_zero()) } fn min_value() -> Self { OrderedFloat(T::min_value()) } fn min_positive_value() -> Self { OrderedFloat(T::min_positive_value()) } fn max_value() -> Self { OrderedFloat(T::max_value()) } fn is_nan(self) -> bool { self.0.is_nan() } fn is_infinite(self) -> bool { self.0.is_infinite() } fn is_finite(self) -> bool { self.0.is_finite() } fn is_normal(self) -> bool { self.0.is_normal() } fn classify(self) -> FpCategory { self.0.classify() } fn floor(self) -> Self { OrderedFloat(self.0.floor()) } fn ceil(self) -> Self { OrderedFloat(self.0.ceil()) } fn round(self) -> Self { OrderedFloat(self.0.round()) } fn trunc(self) -> Self { OrderedFloat(self.0.trunc()) } fn fract(self) -> Self { OrderedFloat(self.0.fract()) } fn abs(self) -> Self { OrderedFloat(self.0.abs()) } fn signum(self) -> Self { OrderedFloat(self.0.signum()) } fn is_sign_positive(self) -> bool { self.0.is_sign_positive() } fn is_sign_negative(self) -> bool { self.0.is_sign_negative() } fn recip(self) -> Self { OrderedFloat(self.0.recip()) } fn powi(self, n: i32) -> Self { OrderedFloat(self.0.powi(n)) } fn integer_decode(self) -> (u64, i16, i8) { self.0.integer_decode() } fn epsilon() -> Self { OrderedFloat(T::epsilon()) } fn to_degrees(self) -> Self { OrderedFloat(self.0.to_degrees()) } fn to_radians(self) -> Self { OrderedFloat(self.0.to_radians()) } } #[cfg(feature = "std")] impl Float for OrderedFloat { fn nan() -> Self { OrderedFloat(::nan()) } fn infinity() -> Self { OrderedFloat(::infinity()) } fn neg_infinity() -> Self { OrderedFloat(::neg_infinity()) } fn neg_zero() -> Self { OrderedFloat(::neg_zero()) } fn min_value() -> Self { OrderedFloat(::min_value()) } fn min_positive_value() -> Self { OrderedFloat(::min_positive_value()) } fn max_value() -> Self { OrderedFloat(::max_value()) } fn is_nan(self) -> bool { Float::is_nan(self.0) } fn is_infinite(self) -> bool { Float::is_infinite(self.0) } fn is_finite(self) -> bool { Float::is_finite(self.0) } fn is_normal(self) -> bool { Float::is_normal(self.0) } fn classify(self) -> FpCategory { Float::classify(self.0) } fn floor(self) -> Self { OrderedFloat(Float::floor(self.0)) } fn ceil(self) -> Self { OrderedFloat(Float::ceil(self.0)) } fn round(self) -> Self { OrderedFloat(Float::round(self.0)) } fn trunc(self) -> Self { OrderedFloat(Float::trunc(self.0)) } fn fract(self) -> Self { OrderedFloat(Float::fract(self.0)) } fn abs(self) -> Self { OrderedFloat(Float::abs(self.0)) } fn signum(self) -> Self { OrderedFloat(Float::signum(self.0)) } fn is_sign_positive(self) -> bool { Float::is_sign_positive(self.0) } fn is_sign_negative(self) -> bool { Float::is_sign_negative(self.0) } fn mul_add(self, a: Self, b: Self) -> Self { OrderedFloat(self.0.mul_add(a.0, b.0)) } fn recip(self) -> Self { OrderedFloat(Float::recip(self.0)) } fn powi(self, n: i32) -> Self { OrderedFloat(Float::powi(self.0, n)) } fn powf(self, n: Self) -> Self { OrderedFloat(self.0.powf(n.0)) } fn sqrt(self) -> Self { OrderedFloat(self.0.sqrt()) } fn exp(self) -> Self { OrderedFloat(self.0.exp()) } fn exp2(self) -> Self { OrderedFloat(self.0.exp2()) } fn ln(self) -> Self { OrderedFloat(self.0.ln()) } fn log(self, base: Self) -> Self { OrderedFloat(self.0.log(base.0)) } fn log2(self) -> Self { OrderedFloat(self.0.log2()) } fn log10(self) -> Self { OrderedFloat(self.0.log10()) } fn max(self, other: Self) -> Self { OrderedFloat(Float::max(self.0, other.0)) } fn min(self, other: Self) -> Self { OrderedFloat(Float::min(self.0, other.0)) } fn abs_sub(self, other: Self) -> Self { OrderedFloat(self.0.abs_sub(other.0)) } fn cbrt(self) -> Self { OrderedFloat(self.0.cbrt()) } fn hypot(self, other: Self) -> Self { OrderedFloat(self.0.hypot(other.0)) } fn sin(self) -> Self { OrderedFloat(self.0.sin()) } fn cos(self) -> Self { OrderedFloat(self.0.cos()) } fn tan(self) -> Self { OrderedFloat(self.0.tan()) } fn asin(self) -> Self { OrderedFloat(self.0.asin()) } fn acos(self) -> Self { OrderedFloat(self.0.acos()) } fn atan(self) -> Self { OrderedFloat(self.0.atan()) } fn atan2(self, other: Self) -> Self { OrderedFloat(self.0.atan2(other.0)) } fn sin_cos(self) -> (Self, Self) { let (a, b) = self.0.sin_cos(); (OrderedFloat(a), OrderedFloat(b)) } fn exp_m1(self) -> Self { OrderedFloat(self.0.exp_m1()) } fn ln_1p(self) -> Self { OrderedFloat(self.0.ln_1p()) } fn sinh(self) -> Self { OrderedFloat(self.0.sinh()) } fn cosh(self) -> Self { OrderedFloat(self.0.cosh()) } fn tanh(self) -> Self { OrderedFloat(self.0.tanh()) } fn asinh(self) -> Self { OrderedFloat(self.0.asinh()) } fn acosh(self) -> Self { OrderedFloat(self.0.acosh()) } fn atanh(self) -> Self { OrderedFloat(self.0.atanh()) } fn integer_decode(self) -> (u64, i16, i8) { Float::integer_decode(self.0) } fn epsilon() -> Self { OrderedFloat(::epsilon()) } fn to_degrees(self) -> Self { OrderedFloat(Float::to_degrees(self.0)) } fn to_radians(self) -> Self { OrderedFloat(Float::to_radians(self.0)) } } impl Num for OrderedFloat { type FromStrRadixErr = T::FromStrRadixErr; fn from_str_radix(str: &str, radix: u32) -> Result { T::from_str_radix(str, radix).map(OrderedFloat) } } /// A wrapper around floats providing an implementation of `Eq`, `Ord` and `Hash`. /// /// A NaN value cannot be stored in this type. /// /// ``` /// use ordered_float::NotNan; /// /// let mut v = [ /// NotNan::new(2.0).unwrap(), /// NotNan::new(1.0).unwrap(), /// ]; /// v.sort(); /// assert_eq!(v, [1.0, 2.0]); /// ``` /// /// Because `NotNan` implements `Ord` and `Eq`, it can be used as a key in a `HashSet`, /// `HashMap`, `BTreeMap`, or `BTreeSet` (unlike the primitive `f32` or `f64` types): /// /// ``` /// # use ordered_float::NotNan; /// # use std::collections::HashSet; /// /// let mut s: HashSet> = HashSet::new(); /// let key = NotNan::new(1.0).unwrap(); /// s.insert(key); /// assert!(s.contains(&key)); /// ``` /// /// Arithmetic on NotNan values will panic if it produces a NaN value: /// /// ```should_panic /// # use ordered_float::NotNan; /// let a = NotNan::new(std::f32::INFINITY).unwrap(); /// let b = NotNan::new(std::f32::NEG_INFINITY).unwrap(); /// /// // This will panic: /// let c = a + b; /// ``` #[derive(PartialOrd, PartialEq, Default, Clone, Copy)] #[repr(transparent)] pub struct NotNan(T); impl NotNan { /// Create a `NotNan` value. /// /// Returns `Err` if `val` is NaN pub fn new(val: T) -> Result { match val { ref val if val.is_nan() => Err(FloatIsNan), val => Ok(NotNan(val)), } } } impl NotNan { /// Get the value out. #[inline] pub fn into_inner(self) -> T { self.0 } /// Create a `NotNan` value from a value that is guaranteed to not be NaN /// /// # Safety /// /// Behaviour is undefined if `val` is NaN #[inline] pub const unsafe fn new_unchecked(val: T) -> Self { NotNan(val) } /// Create a `NotNan` value from a value that is guaranteed to not be NaN /// /// # Safety /// /// Behaviour is undefined if `val` is NaN #[deprecated( since = "2.5.0", note = "Please use the new_unchecked function instead." )] #[inline] pub const unsafe fn unchecked_new(val: T) -> Self { Self::new_unchecked(val) } } impl AsRef for NotNan { #[inline] fn as_ref(&self) -> &T { &self.0 } } impl Borrow for NotNan { #[inline] fn borrow(&self) -> &f32 { &self.0 } } impl Borrow for NotNan { #[inline] fn borrow(&self) -> &f64 { &self.0 } } #[allow(clippy::derive_ord_xor_partial_ord)] impl Ord for NotNan { fn cmp(&self, other: &NotNan) -> Ordering { // Can't use unreachable_unchecked because unsafe code can't depend on FloatCore impl. // https://github.com/reem/rust-ordered-float/issues/150 self.partial_cmp(other) .expect("partial_cmp failed for non-NaN value") } } impl Hash for NotNan { #[inline] fn hash(&self, state: &mut H) { let bits = raw_double_bits(&canonicalize_signed_zero(self.0)); bits.hash(state) } } impl fmt::Debug for NotNan { #[inline] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.0.fmt(f) } } impl fmt::Display for NotNan { #[inline] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.0.fmt(f) } } impl NotNan { /// Converts this [`NotNan`]`<`[`f64`]`>` to a [`NotNan`]`<`[`f32`]`>` while giving up on /// precision, [using `roundTiesToEven` as rounding mode, yielding `Infinity` on /// overflow](https://doc.rust-lang.org/reference/expressions/operator-expr.html#semantics). /// /// Note: For the reverse conversion (from `NotNan` to `NotNan`), you can use /// `.into()`. pub fn as_f32(self) -> NotNan { // This is not destroying invariants, as it is a pure rounding operation. The only two special // cases are where f32 would be overflowing, then the operation yields Infinity, or where // the input is already NaN, in which case the invariant is already broken elsewhere. NotNan(self.0 as f32) } } impl From> for f32 { #[inline] fn from(value: NotNan) -> Self { value.0 } } impl From> for f64 { #[inline] fn from(value: NotNan) -> Self { value.0 } } impl TryFrom for NotNan { type Error = FloatIsNan; #[inline] fn try_from(v: f32) -> Result { NotNan::new(v) } } impl TryFrom for NotNan { type Error = FloatIsNan; #[inline] fn try_from(v: f64) -> Result { NotNan::new(v) } } macro_rules! impl_from_int_primitive { ($primitive:ty, $inner:ty) => { impl From<$primitive> for NotNan<$inner> { fn from(source: $primitive) -> Self { // the primitives with which this macro will be called cannot hold a value that // f64::from would convert to NaN, so this does not hurt invariants NotNan(<$inner as From<$primitive>>::from(source)) } } }; } impl_from_int_primitive!(i8, f64); impl_from_int_primitive!(i16, f64); impl_from_int_primitive!(i32, f64); impl_from_int_primitive!(u8, f64); impl_from_int_primitive!(u16, f64); impl_from_int_primitive!(u32, f64); impl_from_int_primitive!(i8, f32); impl_from_int_primitive!(i16, f32); impl_from_int_primitive!(u8, f32); impl_from_int_primitive!(u16, f32); impl From> for NotNan { #[inline] fn from(v: NotNan) -> NotNan { unsafe { NotNan::new_unchecked(v.0 as f64) } } } impl Deref for NotNan { type Target = T; #[inline] fn deref(&self) -> &Self::Target { &self.0 } } impl Eq for NotNan {} impl PartialEq for NotNan { #[inline] fn eq(&self, other: &T) -> bool { self.0 == *other } } /// Adds a float directly. /// /// Panics if the provided value is NaN or the computation results in NaN impl Add for NotNan { type Output = Self; #[inline] fn add(self, other: T) -> Self { NotNan::new(self.0 + other).expect("Addition resulted in NaN") } } /// Adds a float directly. /// /// Panics if the provided value is NaN. impl Sum for NotNan { fn sum>>(iter: I) -> Self { NotNan::new(iter.map(|v| v.0).sum()).expect("Sum resulted in NaN") } } impl<'a, T: FloatCore + Sum + 'a> Sum<&'a NotNan> for NotNan { #[inline] fn sum>>(iter: I) -> Self { iter.cloned().sum() } } /// Subtracts a float directly. /// /// Panics if the provided value is NaN or the computation results in NaN impl Sub for NotNan { type Output = Self; #[inline] fn sub(self, other: T) -> Self { NotNan::new(self.0 - other).expect("Subtraction resulted in NaN") } } /// Multiplies a float directly. /// /// Panics if the provided value is NaN or the computation results in NaN impl Mul for NotNan { type Output = Self; #[inline] fn mul(self, other: T) -> Self { NotNan::new(self.0 * other).expect("Multiplication resulted in NaN") } } impl Product for NotNan { fn product>>(iter: I) -> Self { NotNan::new(iter.map(|v| v.0).product()).expect("Product resulted in NaN") } } impl<'a, T: FloatCore + Product + 'a> Product<&'a NotNan> for NotNan { #[inline] fn product>>(iter: I) -> Self { iter.cloned().product() } } /// Divides a float directly. /// /// Panics if the provided value is NaN or the computation results in NaN impl Div for NotNan { type Output = Self; #[inline] fn div(self, other: T) -> Self { NotNan::new(self.0 / other).expect("Division resulted in NaN") } } /// Calculates `%` with a float directly. /// /// Panics if the provided value is NaN or the computation results in NaN impl Rem for NotNan { type Output = Self; #[inline] fn rem(self, other: T) -> Self { NotNan::new(self.0 % other).expect("Rem resulted in NaN") } } macro_rules! impl_not_nan_binop { ($imp:ident, $method:ident, $assign_imp:ident, $assign_method:ident) => { impl $imp for NotNan { type Output = Self; #[inline] fn $method(self, other: Self) -> Self { self.$method(other.0) } } impl $imp<&T> for NotNan { type Output = NotNan; #[inline] fn $method(self, other: &T) -> Self::Output { self.$method(*other) } } impl $imp<&Self> for NotNan { type Output = NotNan; #[inline] fn $method(self, other: &Self) -> Self::Output { self.$method(other.0) } } impl $imp for &NotNan { type Output = NotNan; #[inline] fn $method(self, other: Self) -> Self::Output { (*self).$method(other.0) } } impl $imp> for &NotNan { type Output = NotNan; #[inline] fn $method(self, other: NotNan) -> Self::Output { (*self).$method(other.0) } } impl $imp for &NotNan { type Output = NotNan; #[inline] fn $method(self, other: T) -> Self::Output { (*self).$method(other) } } impl $imp<&T> for &NotNan { type Output = NotNan; #[inline] fn $method(self, other: &T) -> Self::Output { (*self).$method(*other) } } impl $assign_imp for NotNan { #[inline] fn $assign_method(&mut self, other: T) { *self = (*self).$method(other); } } impl $assign_imp<&T> for NotNan { #[inline] fn $assign_method(&mut self, other: &T) { *self = (*self).$method(*other); } } impl $assign_imp for NotNan { #[inline] fn $assign_method(&mut self, other: Self) { (*self).$assign_method(other.0); } } impl $assign_imp<&Self> for NotNan { #[inline] fn $assign_method(&mut self, other: &Self) { (*self).$assign_method(other.0); } } }; } impl_not_nan_binop! {Add, add, AddAssign, add_assign} impl_not_nan_binop! {Sub, sub, SubAssign, sub_assign} impl_not_nan_binop! {Mul, mul, MulAssign, mul_assign} impl_not_nan_binop! {Div, div, DivAssign, div_assign} impl_not_nan_binop! {Rem, rem, RemAssign, rem_assign} // Will panic if NaN value is return from the operation macro_rules! impl_not_nan_pow { ($inner:ty, $rhs:ty) => { #[cfg(feature = "std")] impl Pow<$rhs> for NotNan<$inner> { type Output = NotNan<$inner>; #[inline] fn pow(self, rhs: $rhs) -> NotNan<$inner> { NotNan::new(<$inner>::pow(self.0, rhs)).expect("Pow resulted in NaN") } } #[cfg(feature = "std")] impl<'a> Pow<&'a $rhs> for NotNan<$inner> { type Output = NotNan<$inner>; #[inline] fn pow(self, rhs: &'a $rhs) -> NotNan<$inner> { NotNan::new(<$inner>::pow(self.0, *rhs)).expect("Pow resulted in NaN") } } #[cfg(feature = "std")] impl<'a> Pow<$rhs> for &'a NotNan<$inner> { type Output = NotNan<$inner>; #[inline] fn pow(self, rhs: $rhs) -> NotNan<$inner> { NotNan::new(<$inner>::pow(self.0, rhs)).expect("Pow resulted in NaN") } } #[cfg(feature = "std")] impl<'a, 'b> Pow<&'a $rhs> for &'b NotNan<$inner> { type Output = NotNan<$inner>; #[inline] fn pow(self, rhs: &'a $rhs) -> NotNan<$inner> { NotNan::new(<$inner>::pow(self.0, *rhs)).expect("Pow resulted in NaN") } } }; } impl_not_nan_pow! {f32, i8} impl_not_nan_pow! {f32, i16} impl_not_nan_pow! {f32, u8} impl_not_nan_pow! {f32, u16} impl_not_nan_pow! {f32, i32} impl_not_nan_pow! {f64, i8} impl_not_nan_pow! {f64, i16} impl_not_nan_pow! {f64, u8} impl_not_nan_pow! {f64, u16} impl_not_nan_pow! {f64, i32} impl_not_nan_pow! {f32, f32} impl_not_nan_pow! {f64, f32} impl_not_nan_pow! {f64, f64} // This also should panic on NaN macro_rules! impl_not_nan_self_pow { ($base:ty, $exp:ty) => { #[cfg(feature = "std")] impl Pow> for NotNan<$base> { type Output = NotNan<$base>; #[inline] fn pow(self, rhs: NotNan<$exp>) -> NotNan<$base> { NotNan::new(self.0.pow(rhs.0)).expect("Pow resulted in NaN") } } #[cfg(feature = "std")] impl<'a> Pow<&'a NotNan<$exp>> for NotNan<$base> { type Output = NotNan<$base>; #[inline] fn pow(self, rhs: &'a NotNan<$exp>) -> NotNan<$base> { NotNan::new(self.0.pow(rhs.0)).expect("Pow resulted in NaN") } } #[cfg(feature = "std")] impl<'a> Pow> for &'a NotNan<$base> { type Output = NotNan<$base>; #[inline] fn pow(self, rhs: NotNan<$exp>) -> NotNan<$base> { NotNan::new(self.0.pow(rhs.0)).expect("Pow resulted in NaN") } } #[cfg(feature = "std")] impl<'a, 'b> Pow<&'a NotNan<$exp>> for &'b NotNan<$base> { type Output = NotNan<$base>; #[inline] fn pow(self, rhs: &'a NotNan<$exp>) -> NotNan<$base> { NotNan::new(self.0.pow(rhs.0)).expect("Pow resulted in NaN") } } }; } impl_not_nan_self_pow! {f32, f32} impl_not_nan_self_pow! {f64, f32} impl_not_nan_self_pow! {f64, f64} impl Neg for NotNan { type Output = Self; #[inline] fn neg(self) -> Self { NotNan(-self.0) } } impl Neg for &NotNan { type Output = NotNan; #[inline] fn neg(self) -> Self::Output { NotNan(-self.0) } } /// An error indicating an attempt to construct NotNan from a NaN #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub struct FloatIsNan; #[cfg(feature = "std")] impl Error for FloatIsNan { fn description(&self) -> &str { "NotNan constructed with NaN" } } impl fmt::Display for FloatIsNan { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "NotNan constructed with NaN") } } #[cfg(feature = "std")] impl From for std::io::Error { #[inline] fn from(e: FloatIsNan) -> std::io::Error { std::io::Error::new(std::io::ErrorKind::InvalidInput, e) } } #[inline] /// Used for hashing. Input must not be zero or NaN. fn raw_double_bits(f: &F) -> u64 { let (man, exp, sign) = f.integer_decode(); let exp_u64 = exp as u16 as u64; let sign_u64 = (sign > 0) as u64; (man & MAN_MASK) | ((exp_u64 << 52) & EXP_MASK) | ((sign_u64 << 63) & SIGN_MASK) } impl Zero for NotNan { #[inline] fn zero() -> Self { NotNan(T::zero()) } #[inline] fn is_zero(&self) -> bool { self.0.is_zero() } } impl One for NotNan { #[inline] fn one() -> Self { NotNan(T::one()) } } impl Bounded for NotNan { #[inline] fn min_value() -> Self { NotNan(T::min_value()) } #[inline] fn max_value() -> Self { NotNan(T::max_value()) } } impl FromStr for NotNan { type Err = ParseNotNanError; /// Convert a &str to `NotNan`. Returns an error if the string fails to parse, /// or if the resulting value is NaN /// /// ``` /// use ordered_float::NotNan; /// /// assert!("-10".parse::>().is_ok()); /// assert!("abc".parse::>().is_err()); /// assert!("NaN".parse::>().is_err()); /// ``` fn from_str(src: &str) -> Result { src.parse() .map_err(ParseNotNanError::ParseFloatError) .and_then(|f| NotNan::new(f).map_err(|_| ParseNotNanError::IsNaN)) } } impl FromPrimitive for NotNan { fn from_i64(n: i64) -> Option { T::from_i64(n).and_then(|n| NotNan::new(n).ok()) } fn from_u64(n: u64) -> Option { T::from_u64(n).and_then(|n| NotNan::new(n).ok()) } fn from_isize(n: isize) -> Option { T::from_isize(n).and_then(|n| NotNan::new(n).ok()) } fn from_i8(n: i8) -> Option { T::from_i8(n).and_then(|n| NotNan::new(n).ok()) } fn from_i16(n: i16) -> Option { T::from_i16(n).and_then(|n| NotNan::new(n).ok()) } fn from_i32(n: i32) -> Option { T::from_i32(n).and_then(|n| NotNan::new(n).ok()) } fn from_usize(n: usize) -> Option { T::from_usize(n).and_then(|n| NotNan::new(n).ok()) } fn from_u8(n: u8) -> Option { T::from_u8(n).and_then(|n| NotNan::new(n).ok()) } fn from_u16(n: u16) -> Option { T::from_u16(n).and_then(|n| NotNan::new(n).ok()) } fn from_u32(n: u32) -> Option { T::from_u32(n).and_then(|n| NotNan::new(n).ok()) } fn from_f32(n: f32) -> Option { T::from_f32(n).and_then(|n| NotNan::new(n).ok()) } fn from_f64(n: f64) -> Option { T::from_f64(n).and_then(|n| NotNan::new(n).ok()) } } impl ToPrimitive for NotNan { fn to_i64(&self) -> Option { self.0.to_i64() } fn to_u64(&self) -> Option { self.0.to_u64() } fn to_isize(&self) -> Option { self.0.to_isize() } fn to_i8(&self) -> Option { self.0.to_i8() } fn to_i16(&self) -> Option { self.0.to_i16() } fn to_i32(&self) -> Option { self.0.to_i32() } fn to_usize(&self) -> Option { self.0.to_usize() } fn to_u8(&self) -> Option { self.0.to_u8() } fn to_u16(&self) -> Option { self.0.to_u16() } fn to_u32(&self) -> Option { self.0.to_u32() } fn to_f32(&self) -> Option { self.0.to_f32() } fn to_f64(&self) -> Option { self.0.to_f64() } } /// An error indicating a parse error from a string for `NotNan`. #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum ParseNotNanError { /// A plain parse error from the underlying float type. ParseFloatError(E), /// The parsed float value resulted in a NaN. IsNaN, } #[cfg(feature = "std")] impl Error for ParseNotNanError { fn description(&self) -> &str { "Error parsing a not-NaN floating point value" } fn source(&self) -> Option<&(dyn Error + 'static)> { match self { ParseNotNanError::ParseFloatError(e) => Some(e), ParseNotNanError::IsNaN => None, } } } impl fmt::Display for ParseNotNanError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { ParseNotNanError::ParseFloatError(e) => write!(f, "Parse error: {}", e), ParseNotNanError::IsNaN => write!(f, "NotNan parser encounter a NaN"), } } } impl Num for NotNan { type FromStrRadixErr = ParseNotNanError; fn from_str_radix(src: &str, radix: u32) -> Result { T::from_str_radix(src, radix) .map_err(ParseNotNanError::ParseFloatError) .and_then(|n| NotNan::new(n).map_err(|_| ParseNotNanError::IsNaN)) } } impl Signed for NotNan { #[inline] fn abs(&self) -> Self { NotNan(self.0.abs()) } fn abs_sub(&self, other: &Self) -> Self { NotNan::new(Signed::abs_sub(&self.0, &other.0)).expect("Subtraction resulted in NaN") } #[inline] fn signum(&self) -> Self { NotNan(self.0.signum()) } #[inline] fn is_positive(&self) -> bool { self.0.is_positive() } #[inline] fn is_negative(&self) -> bool { self.0.is_negative() } } impl NumCast for NotNan { fn from(n: F) -> Option { T::from(n).and_then(|n| NotNan::new(n).ok()) } } macro_rules! impl_float_const_method { ($wrapper:expr, $method:ident) => { #[allow(non_snake_case)] #[allow(clippy::redundant_closure_call)] fn $method() -> Self { $wrapper(T::$method()) } }; } macro_rules! impl_float_const { ($type:ident, $wrapper:expr) => { impl FloatConst for $type { impl_float_const_method!($wrapper, E); impl_float_const_method!($wrapper, FRAC_1_PI); impl_float_const_method!($wrapper, FRAC_1_SQRT_2); impl_float_const_method!($wrapper, FRAC_2_PI); impl_float_const_method!($wrapper, FRAC_2_SQRT_PI); impl_float_const_method!($wrapper, FRAC_PI_2); impl_float_const_method!($wrapper, FRAC_PI_3); impl_float_const_method!($wrapper, FRAC_PI_4); impl_float_const_method!($wrapper, FRAC_PI_6); impl_float_const_method!($wrapper, FRAC_PI_8); impl_float_const_method!($wrapper, LN_10); impl_float_const_method!($wrapper, LN_2); impl_float_const_method!($wrapper, LOG10_E); impl_float_const_method!($wrapper, LOG2_E); impl_float_const_method!($wrapper, PI); impl_float_const_method!($wrapper, SQRT_2); } }; } impl_float_const!(OrderedFloat, OrderedFloat); // Float constants are not NaN. impl_float_const!(NotNan, |x| unsafe { NotNan::new_unchecked(x) }); #[cfg(feature = "serde")] mod impl_serde { extern crate serde; use self::serde::de::{Error, Unexpected}; use self::serde::{Deserialize, Deserializer, Serialize, Serializer}; use super::{NotNan, OrderedFloat}; use core::f64; use num_traits::float::FloatCore; #[cfg(test)] extern crate serde_test; #[cfg(test)] use self::serde_test::{assert_de_tokens_error, assert_tokens, Token}; impl Serialize for OrderedFloat { #[inline] fn serialize(&self, s: S) -> Result { self.0.serialize(s) } } impl<'de, T: FloatCore + Deserialize<'de>> Deserialize<'de> for OrderedFloat { #[inline] fn deserialize>(d: D) -> Result { T::deserialize(d).map(OrderedFloat) } } impl Serialize for NotNan { #[inline] fn serialize(&self, s: S) -> Result { self.0.serialize(s) } } impl<'de, T: FloatCore + Deserialize<'de>> Deserialize<'de> for NotNan { fn deserialize>(d: D) -> Result { let float = T::deserialize(d)?; NotNan::new(float).map_err(|_| { Error::invalid_value(Unexpected::Float(f64::NAN), &"float (but not NaN)") }) } } #[test] fn test_ordered_float() { let float = OrderedFloat(1.0f64); assert_tokens(&float, &[Token::F64(1.0)]); } #[test] fn test_not_nan() { let float = NotNan(1.0f64); assert_tokens(&float, &[Token::F64(1.0)]); } #[test] fn test_fail_on_nan() { assert_de_tokens_error::>( &[Token::F64(f64::NAN)], "invalid value: floating point `NaN`, expected float (but not NaN)", ); } } #[cfg(any(feature = "rkyv_16", feature = "rkyv_32", feature = "rkyv_64"))] mod impl_rkyv { use super::{NotNan, OrderedFloat}; use num_traits::float::FloatCore; #[cfg(test)] use rkyv::{archived_root, ser::Serializer}; use rkyv::{Archive, Deserialize, Fallible, Serialize}; #[cfg(test)] type DefaultSerializer = rkyv::ser::serializers::CoreSerializer<16, 16>; #[cfg(test)] type DefaultDeserializer = rkyv::Infallible; impl Archive for OrderedFloat { type Archived = OrderedFloat; type Resolver = T::Resolver; unsafe fn resolve(&self, pos: usize, resolver: Self::Resolver, out: *mut Self::Archived) { self.0.resolve(pos, resolver, out.cast()) } } impl, S: Fallible + ?Sized> Serialize for OrderedFloat { fn serialize(&self, s: &mut S) -> Result { self.0.serialize(s) } } impl, D: Fallible + ?Sized> Deserialize, D> for OrderedFloat { fn deserialize(&self, d: &mut D) -> Result, D::Error> { self.0.deserialize(d).map(OrderedFloat) } } impl Archive for NotNan { type Archived = NotNan; type Resolver = T::Resolver; unsafe fn resolve(&self, pos: usize, resolver: Self::Resolver, out: *mut Self::Archived) { self.0.resolve(pos, resolver, out.cast()) } } impl, S: Fallible + ?Sized> Serialize for NotNan { fn serialize(&self, s: &mut S) -> Result { self.0.serialize(s) } } impl, D: Fallible + ?Sized> Deserialize, D> for NotNan { fn deserialize(&self, d: &mut D) -> Result, D::Error> { self.0.deserialize(d).map(NotNan) } } macro_rules! rkyv_eq_ord { ($main:ident, $float:ty, $rend:ty) => { impl PartialEq<$main<$float>> for $main<$rend> { fn eq(&self, other: &$main<$float>) -> bool { other.eq(&self.0.value()) } } impl PartialEq<$main<$rend>> for $main<$float> { fn eq(&self, other: &$main<$rend>) -> bool { self.eq(&other.0.value()) } } impl PartialOrd<$main<$float>> for $main<$rend> { fn partial_cmp(&self, other: &$main<$float>) -> Option { self.0.value().partial_cmp(other) } } impl PartialOrd<$main<$rend>> for $main<$float> { fn partial_cmp(&self, other: &$main<$rend>) -> Option { other .0 .value() .partial_cmp(self) .map(core::cmp::Ordering::reverse) } } }; } rkyv_eq_ord! { OrderedFloat, f32, rkyv::rend::f32_le } rkyv_eq_ord! { OrderedFloat, f32, rkyv::rend::f32_be } rkyv_eq_ord! { OrderedFloat, f64, rkyv::rend::f64_le } rkyv_eq_ord! { OrderedFloat, f64, rkyv::rend::f64_be } rkyv_eq_ord! { NotNan, f32, rkyv::rend::f32_le } rkyv_eq_ord! { NotNan, f32, rkyv::rend::f32_be } rkyv_eq_ord! { NotNan, f64, rkyv::rend::f64_le } rkyv_eq_ord! { NotNan, f64, rkyv::rend::f64_be } #[cfg(feature = "rkyv_ck")] use super::FloatIsNan; #[cfg(feature = "rkyv_ck")] use core::convert::Infallible; #[cfg(feature = "rkyv_ck")] use rkyv::bytecheck::CheckBytes; #[cfg(feature = "rkyv_ck")] impl> CheckBytes for OrderedFloat { type Error = Infallible; #[inline] unsafe fn check_bytes<'a>(value: *const Self, _: &mut C) -> Result<&'a Self, Self::Error> { Ok(&*value) } } #[cfg(feature = "rkyv_ck")] impl> CheckBytes for NotNan { type Error = FloatIsNan; #[inline] unsafe fn check_bytes<'a>(value: *const Self, _: &mut C) -> Result<&'a Self, Self::Error> { Self::new(*(value as *const T)).map(|_| &*value) } } #[test] fn test_ordered_float() { let float = OrderedFloat(1.0f64); let mut serializer = DefaultSerializer::default(); serializer .serialize_value(&float) .expect("failed to archive value"); let len = serializer.pos(); let buffer = serializer.into_serializer().into_inner(); let archived_value = unsafe { archived_root::>(&buffer[0..len]) }; assert_eq!(archived_value, &float); let mut deserializer = DefaultDeserializer::default(); let deser_float: OrderedFloat = archived_value.deserialize(&mut deserializer).unwrap(); assert_eq!(deser_float, float); } #[test] fn test_not_nan() { let float = NotNan(1.0f64); let mut serializer = DefaultSerializer::default(); serializer .serialize_value(&float) .expect("failed to archive value"); let len = serializer.pos(); let buffer = serializer.into_serializer().into_inner(); let archived_value = unsafe { archived_root::>(&buffer[0..len]) }; assert_eq!(archived_value, &float); let mut deserializer = DefaultDeserializer::default(); let deser_float: NotNan = archived_value.deserialize(&mut deserializer).unwrap(); assert_eq!(deser_float, float); } } #[cfg(feature = "speedy")] mod impl_speedy { use super::{NotNan, OrderedFloat}; use num_traits::float::FloatCore; use speedy::{Context, Readable, Reader, Writable, Writer}; impl Writable for OrderedFloat where C: Context, T: Writable, { fn write_to>(&self, writer: &mut W) -> Result<(), C::Error> { self.0.write_to(writer) } fn bytes_needed(&self) -> Result { self.0.bytes_needed() } } impl Writable for NotNan where C: Context, T: Writable, { fn write_to>(&self, writer: &mut W) -> Result<(), C::Error> { self.0.write_to(writer) } fn bytes_needed(&self) -> Result { self.0.bytes_needed() } } impl<'a, T, C: Context> Readable<'a, C> for OrderedFloat where T: Readable<'a, C>, { fn read_from>(reader: &mut R) -> Result { T::read_from(reader).map(OrderedFloat) } fn minimum_bytes_needed() -> usize { T::minimum_bytes_needed() } } impl<'a, T: FloatCore, C: Context> Readable<'a, C> for NotNan where T: Readable<'a, C>, { fn read_from>(reader: &mut R) -> Result { let value: T = reader.read_value()?; Self::new(value).map_err(|error| { speedy::Error::custom(std::format!("failed to read NotNan: {}", error)).into() }) } fn minimum_bytes_needed() -> usize { T::minimum_bytes_needed() } } #[test] fn test_ordered_float() { let float = OrderedFloat(1.0f64); let buffer = float.write_to_vec().unwrap(); let deser_float: OrderedFloat = OrderedFloat::read_from_buffer(&buffer).unwrap(); assert_eq!(deser_float, float); } #[test] fn test_not_nan() { let float = NotNan(1.0f64); let buffer = float.write_to_vec().unwrap(); let deser_float: NotNan = NotNan::read_from_buffer(&buffer).unwrap(); assert_eq!(deser_float, float); } #[test] fn test_not_nan_with_nan() { let nan_buf = f64::nan().write_to_vec().unwrap(); let nan_err: Result, _> = NotNan::read_from_buffer(&nan_buf); assert!(nan_err.is_err()); } } #[cfg(feature = "borsh")] mod impl_borsh { extern crate borsh; use super::{NotNan, OrderedFloat}; use num_traits::float::FloatCore; impl borsh::BorshSerialize for OrderedFloat where T: borsh::BorshSerialize, { #[inline] fn serialize(&self, writer: &mut W) -> borsh::io::Result<()> { ::serialize(&self.0, writer) } } impl borsh::BorshDeserialize for OrderedFloat where T: borsh::BorshDeserialize, { #[inline] fn deserialize_reader(reader: &mut R) -> borsh::io::Result { ::deserialize_reader(reader).map(Self) } } impl borsh::BorshSerialize for NotNan where T: borsh::BorshSerialize, { #[inline] fn serialize(&self, writer: &mut W) -> borsh::io::Result<()> { ::serialize(&self.0, writer) } } impl borsh::BorshDeserialize for NotNan where T: FloatCore + borsh::BorshDeserialize, { #[inline] fn deserialize_reader(reader: &mut R) -> borsh::io::Result { let float = ::deserialize_reader(reader)?; NotNan::new(float).map_err(|_| { borsh::io::Error::new( borsh::io::ErrorKind::InvalidData, "expected a non-NaN float", ) }) } } #[test] fn test_ordered_float() { let float = crate::OrderedFloat(1.0f64); let buffer = borsh::to_vec(&float).expect("failed to serialize value"); let deser_float: crate::OrderedFloat = borsh::from_slice(&buffer).expect("failed to deserialize value"); assert_eq!(deser_float, float); } #[test] fn test_not_nan() { let float = crate::NotNan(1.0f64); let buffer = borsh::to_vec(&float).expect("failed to serialize value"); let deser_float: crate::NotNan = borsh::from_slice(&buffer).expect("failed to deserialize value"); assert_eq!(deser_float, float); } } #[cfg(all(feature = "std", feature = "schemars"))] mod impl_schemars { extern crate schemars; use self::schemars::gen::SchemaGenerator; use self::schemars::schema::{InstanceType, Schema, SchemaObject}; use super::{NotNan, OrderedFloat}; macro_rules! primitive_float_impl { ($type:ty, $schema_name:literal) => { impl schemars::JsonSchema for $type { fn is_referenceable() -> bool { false } fn schema_name() -> std::string::String { std::string::String::from($schema_name) } fn json_schema(_: &mut SchemaGenerator) -> Schema { SchemaObject { instance_type: Some(InstanceType::Number.into()), format: Some(std::string::String::from($schema_name)), ..Default::default() } .into() } } }; } primitive_float_impl!(OrderedFloat, "float"); primitive_float_impl!(OrderedFloat, "double"); primitive_float_impl!(NotNan, "float"); primitive_float_impl!(NotNan, "double"); #[test] fn schema_generation_does_not_panic_for_common_floats() { { let schema = schemars::gen::SchemaGenerator::default() .into_root_schema_for::>(); assert_eq!( schema.schema.instance_type, Some(schemars::schema::SingleOrVec::Single(std::boxed::Box::new( schemars::schema::InstanceType::Number ))) ); assert_eq!( schema.schema.metadata.unwrap().title.unwrap(), std::string::String::from("float") ); } { let schema = schemars::gen::SchemaGenerator::default() .into_root_schema_for::>(); assert_eq!( schema.schema.instance_type, Some(schemars::schema::SingleOrVec::Single(std::boxed::Box::new( schemars::schema::InstanceType::Number ))) ); assert_eq!( schema.schema.metadata.unwrap().title.unwrap(), std::string::String::from("double") ); } { let schema = schemars::gen::SchemaGenerator::default().into_root_schema_for::>(); assert_eq!( schema.schema.instance_type, Some(schemars::schema::SingleOrVec::Single(std::boxed::Box::new( schemars::schema::InstanceType::Number ))) ); assert_eq!( schema.schema.metadata.unwrap().title.unwrap(), std::string::String::from("float") ); } { let schema = schemars::gen::SchemaGenerator::default().into_root_schema_for::>(); assert_eq!( schema.schema.instance_type, Some(schemars::schema::SingleOrVec::Single(std::boxed::Box::new( schemars::schema::InstanceType::Number ))) ); assert_eq!( schema.schema.metadata.unwrap().title.unwrap(), std::string::String::from("double") ); } } #[test] fn ordered_float_schema_match_primitive_schema() { { let of_schema = schemars::gen::SchemaGenerator::default() .into_root_schema_for::>(); let prim_schema = schemars::gen::SchemaGenerator::default().into_root_schema_for::(); assert_eq!(of_schema, prim_schema); } { let of_schema = schemars::gen::SchemaGenerator::default() .into_root_schema_for::>(); let prim_schema = schemars::gen::SchemaGenerator::default().into_root_schema_for::(); assert_eq!(of_schema, prim_schema); } { let of_schema = schemars::gen::SchemaGenerator::default().into_root_schema_for::>(); let prim_schema = schemars::gen::SchemaGenerator::default().into_root_schema_for::(); assert_eq!(of_schema, prim_schema); } { let of_schema = schemars::gen::SchemaGenerator::default().into_root_schema_for::>(); let prim_schema = schemars::gen::SchemaGenerator::default().into_root_schema_for::(); assert_eq!(of_schema, prim_schema); } } } #[cfg(feature = "rand")] mod impl_rand { use super::{NotNan, OrderedFloat}; use rand::distributions::uniform::*; use rand::distributions::{Distribution, Open01, OpenClosed01, Standard}; use rand::Rng; macro_rules! impl_distribution { ($dist:ident, $($f:ty),+) => { $( impl Distribution> for $dist { fn sample(&self, rng: &mut R) -> NotNan<$f> { // 'rand' never generates NaN values in the Standard, Open01, or // OpenClosed01 distributions. Using 'new_unchecked' is therefore // safe. unsafe { NotNan::new_unchecked(self.sample(rng)) } } } impl Distribution> for $dist { fn sample(&self, rng: &mut R) -> OrderedFloat<$f> { OrderedFloat(self.sample(rng)) } } )* } } impl_distribution! { Standard, f32, f64 } impl_distribution! { Open01, f32, f64 } impl_distribution! { OpenClosed01, f32, f64 } /// A sampler for a uniform distribution #[derive(Clone, Copy, Debug)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct UniformNotNan(UniformFloat); impl SampleUniform for NotNan { type Sampler = UniformNotNan; } impl SampleUniform for NotNan { type Sampler = UniformNotNan; } impl PartialEq for UniformNotNan where UniformFloat: PartialEq, { fn eq(&self, other: &Self) -> bool { self.0 == other.0 } } /// A sampler for a uniform distribution #[derive(Clone, Copy, Debug)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct UniformOrdered(UniformFloat); impl SampleUniform for OrderedFloat { type Sampler = UniformOrdered; } impl SampleUniform for OrderedFloat { type Sampler = UniformOrdered; } impl PartialEq for UniformOrdered where UniformFloat: PartialEq, { fn eq(&self, other: &Self) -> bool { self.0 == other.0 } } macro_rules! impl_uniform_sampler { ($f:ty) => { impl UniformSampler for UniformNotNan<$f> { type X = NotNan<$f>; fn new(low: B1, high: B2) -> Self where B1: SampleBorrow + Sized, B2: SampleBorrow + Sized, { UniformNotNan(UniformFloat::<$f>::new(low.borrow().0, high.borrow().0)) } fn new_inclusive(low: B1, high: B2) -> Self where B1: SampleBorrow + Sized, B2: SampleBorrow + Sized, { UniformSampler::new(low, high) } fn sample(&self, rng: &mut R) -> Self::X { // UniformFloat.sample() will never return NaN. unsafe { NotNan::new_unchecked(self.0.sample(rng)) } } } impl UniformSampler for UniformOrdered<$f> { type X = OrderedFloat<$f>; fn new(low: B1, high: B2) -> Self where B1: SampleBorrow + Sized, B2: SampleBorrow + Sized, { UniformOrdered(UniformFloat::<$f>::new(low.borrow().0, high.borrow().0)) } fn new_inclusive(low: B1, high: B2) -> Self where B1: SampleBorrow + Sized, B2: SampleBorrow + Sized, { UniformSampler::new(low, high) } fn sample(&self, rng: &mut R) -> Self::X { OrderedFloat(self.0.sample(rng)) } } }; } impl_uniform_sampler! { f32 } impl_uniform_sampler! { f64 } #[cfg(all(test, feature = "randtest"))] mod tests { use super::*; fn sample_fuzz() where Standard: Distribution>, Open01: Distribution>, OpenClosed01: Distribution>, Standard: Distribution>, Open01: Distribution>, OpenClosed01: Distribution>, T: crate::Float, { let mut rng = rand::thread_rng(); let f1: NotNan = rng.sample(Standard); let f2: NotNan = rng.sample(Open01); let f3: NotNan = rng.sample(OpenClosed01); let _: OrderedFloat = rng.sample(Standard); let _: OrderedFloat = rng.sample(Open01); let _: OrderedFloat = rng.sample(OpenClosed01); assert!(!f1.into_inner().is_nan()); assert!(!f2.into_inner().is_nan()); assert!(!f3.into_inner().is_nan()); } #[test] fn sampling_f32_does_not_panic() { sample_fuzz::(); } #[test] fn sampling_f64_does_not_panic() { sample_fuzz::(); } #[test] #[should_panic] fn uniform_sampling_panic_on_infinity_notnan() { let (low, high) = ( NotNan::new(0f64).unwrap(), NotNan::new(f64::INFINITY).unwrap(), ); let uniform = Uniform::new(low, high); let _ = uniform.sample(&mut rand::thread_rng()); } #[test] #[should_panic] fn uniform_sampling_panic_on_infinity_ordered() { let (low, high) = (OrderedFloat(0f64), OrderedFloat(f64::INFINITY)); let uniform = Uniform::new(low, high); let _ = uniform.sample(&mut rand::thread_rng()); } #[test] #[should_panic] fn uniform_sampling_panic_on_nan_ordered() { let (low, high) = (OrderedFloat(0f64), OrderedFloat(f64::NAN)); let uniform = Uniform::new(low, high); let _ = uniform.sample(&mut rand::thread_rng()); } } } #[cfg(feature = "proptest")] mod impl_proptest { use super::{NotNan, OrderedFloat}; use proptest::arbitrary::{Arbitrary, StrategyFor}; use proptest::num::{f32, f64}; use proptest::strategy::{FilterMap, Map, Strategy}; use std::convert::TryFrom; macro_rules! impl_arbitrary { ($($f:ident),+) => { $( impl Arbitrary for NotNan<$f> { type Strategy = FilterMap, fn(_: $f) -> Option>>; type Parameters = <$f as Arbitrary>::Parameters; fn arbitrary_with(params: Self::Parameters) -> Self::Strategy { <$f>::arbitrary_with(params) .prop_filter_map("filter nan values", |f| NotNan::try_from(f).ok()) } } impl Arbitrary for OrderedFloat<$f> { type Strategy = Map, fn(_: $f) -> OrderedFloat<$f>>; type Parameters = <$f as Arbitrary>::Parameters; fn arbitrary_with(params: Self::Parameters) -> Self::Strategy { <$f>::arbitrary_with(params).prop_map(|f| OrderedFloat::from(f)) } } )* } } impl_arbitrary! { f32, f64 } } #[cfg(feature = "arbitrary")] mod impl_arbitrary { use super::{FloatIsNan, NotNan, OrderedFloat}; use arbitrary::{Arbitrary, Unstructured}; use num_traits::FromPrimitive; macro_rules! impl_arbitrary { ($($f:ident),+) => { $( impl<'a> Arbitrary<'a> for NotNan<$f> { fn arbitrary(u: &mut Unstructured<'a>) -> arbitrary::Result { let float: $f = u.arbitrary()?; match NotNan::new(float) { Ok(notnan_value) => Ok(notnan_value), Err(FloatIsNan) => { // If our arbitrary float input was a NaN (encoded by exponent = max // value), then replace it with a finite float, reusing the mantissa // bits. // // This means the output is not uniformly distributed among all // possible float values, but Arbitrary makes no promise that that // is true. // // An alternative implementation would be to return an // `arbitrary::Error`, but that is not as useful since it forces the // caller to retry with new random/fuzzed data; and the precendent of // `arbitrary`'s built-in implementations is to prefer the approach of // mangling the input bits to fit. let (mantissa, _exponent, sign) = num_traits::float::FloatCore::integer_decode(float); let revised_float = <$f>::from_i64( i64::from(sign) * mantissa as i64 ).unwrap(); // If this unwrap() fails, then there is a bug in the above code. Ok(NotNan::new(revised_float).unwrap()) } } } fn size_hint(depth: usize) -> (usize, Option) { <$f as Arbitrary>::size_hint(depth) } } impl<'a> Arbitrary<'a> for OrderedFloat<$f> { fn arbitrary(u: &mut Unstructured<'a>) -> arbitrary::Result { let float: $f = u.arbitrary()?; Ok(OrderedFloat::from(float)) } fn size_hint(depth: usize) -> (usize, Option) { <$f as Arbitrary>::size_hint(depth) } } )* } } impl_arbitrary! { f32, f64 } } #[cfg(feature = "bytemuck")] mod impl_bytemuck { use super::{FloatCore, NotNan, OrderedFloat}; use bytemuck::{AnyBitPattern, CheckedBitPattern, NoUninit, Pod, Zeroable}; unsafe impl Zeroable for OrderedFloat {} // The zero bit pattern is indeed not a NaN bit pattern. unsafe impl Zeroable for NotNan {} unsafe impl Pod for OrderedFloat {} // `NotNan` can only implement `NoUninit` and not `Pod`, since not every bit pattern is // valid (NaN bit patterns are invalid). `NoUninit` guarantees that we can read any bit pattern // from the value, which is fine in this case. unsafe impl NoUninit for NotNan {} unsafe impl CheckedBitPattern for NotNan { type Bits = T; fn is_valid_bit_pattern(bits: &Self::Bits) -> bool { !bits.is_nan() } } #[test] fn test_not_nan_bit_pattern() { use bytemuck::checked::{try_cast, CheckedCastError}; let nan = f64::NAN; assert_eq!( try_cast::>(nan), Err(CheckedCastError::InvalidBitPattern), ); let pi = core::f64::consts::PI; assert!(try_cast::>(pi).is_ok()); } } ordered-float-4.2.2/tests/evil.rs000064400000000000000000000047331046102023000150360ustar 00000000000000use num_traits::float::FloatCore; use num_traits::{Num, NumCast, One, ToPrimitive, Zero}; use ordered_float::NotNan; use std::num::FpCategory; use std::ops::{Add, Div, Mul, Neg, Rem, Sub}; #[derive(Copy, Clone, PartialOrd, PartialEq)] struct EvilFloat(f32); impl Zero for EvilFloat { fn zero() -> Self { todo!() } fn is_zero(&self) -> bool { todo!() } } impl Add for EvilFloat { type Output = Self; fn add(self, _: Self) -> Self::Output { todo!() } } impl One for EvilFloat { fn one() -> Self { todo!() } } impl Mul for EvilFloat { type Output = Self; fn mul(self, _: Self) -> Self::Output { todo!() } } impl Sub for EvilFloat { type Output = Self; fn sub(self, _: Self) -> Self::Output { todo!() } } impl Div for EvilFloat { type Output = Self; fn div(self, _: Self) -> Self::Output { todo!() } } impl Rem for EvilFloat { type Output = Self; fn rem(self, _: Self) -> Self::Output { todo!() } } impl NumCast for EvilFloat { fn from(_: T) -> Option { todo!() } } impl ToPrimitive for EvilFloat { fn to_i64(&self) -> Option { todo!() } fn to_u64(&self) -> Option { todo!() } } impl Neg for EvilFloat { type Output = Self; fn neg(self) -> Self::Output { todo!() } } impl FloatCore for EvilFloat { fn is_nan(self) -> bool { false } fn infinity() -> Self { todo!() } fn neg_infinity() -> Self { todo!() } fn nan() -> Self { todo!() } fn neg_zero() -> Self { todo!() } fn min_value() -> Self { todo!() } fn min_positive_value() -> Self { todo!() } fn epsilon() -> Self { todo!() } fn max_value() -> Self { todo!() } fn classify(self) -> FpCategory { todo!() } fn to_degrees(self) -> Self { todo!() } fn to_radians(self) -> Self { todo!() } fn integer_decode(self) -> (u64, i16, i8) { todo!() } } impl Num for EvilFloat { type FromStrRadixErr = (); fn from_str_radix(_: &str, _: u32) -> Result { todo!() } } #[test] #[should_panic] fn test_cmp_panic() { let evil_value = NotNan::new(EvilFloat(f32::NAN)).unwrap(); let x = NotNan::new(EvilFloat(0.0)).unwrap(); let _ = evil_value.cmp(&x); } ordered-float-4.2.2/tests/test.rs000064400000000000000000000707471046102023000150660ustar 00000000000000#![allow(clippy::float_cmp, clippy::eq_op, clippy::op_ref)] extern crate num_traits; extern crate ordered_float; pub use num_traits::float::FloatCore; pub use num_traits::{Bounded, FloatConst, FromPrimitive, Num, One, Signed, ToPrimitive, Zero}; #[cfg(feature = "std")] pub use num_traits::{Float, Pow}; pub use ordered_float::*; pub use std::cmp::Ordering::*; pub use std::convert::TryFrom; pub use std::{f32, f64, panic}; pub use std::collections::hash_map::RandomState; pub use std::collections::HashSet; pub use std::hash::*; fn not_nan(x: T) -> NotNan { NotNan::new(x).unwrap() } #[test] fn test_total_order() { let numberline = [ (-f32::INFINITY, 0), (-1.0, 1), (-0.0, 2), (0.0, 2), (1.0, 3), (f32::INFINITY, 4), (f32::NAN, 5), (-f32::NAN, 5), ]; for &(fi, i) in &numberline { for &(fj, j) in &numberline { assert_eq!(OrderedFloat(fi) < OrderedFloat(fj), i < j); assert_eq!(OrderedFloat(fi) > OrderedFloat(fj), i > j); assert_eq!(OrderedFloat(fi) <= OrderedFloat(fj), i <= j); assert_eq!(OrderedFloat(fi) >= OrderedFloat(fj), i >= j); assert_eq!(OrderedFloat(fi) == OrderedFloat(fj), i == j); assert_eq!(OrderedFloat(fi) != OrderedFloat(fj), i != j); assert_eq!(OrderedFloat(fi).cmp(&OrderedFloat(fj)), i.cmp(&j)); } } } #[test] fn ordered_f32_compare_regular_floats() { assert_eq!(OrderedFloat(7.0f32).cmp(&OrderedFloat(7.0)), Equal); assert_eq!(OrderedFloat(8.0f32).cmp(&OrderedFloat(7.0)), Greater); assert_eq!(OrderedFloat(4.0f32).cmp(&OrderedFloat(7.0)), Less); } #[test] fn ordered_f32_compare_regular_floats_op() { assert!(OrderedFloat(7.0f32) == OrderedFloat(7.0)); assert!(OrderedFloat(7.0f32) <= OrderedFloat(7.0)); assert!(OrderedFloat(7.0f32) >= OrderedFloat(7.0)); assert!(OrderedFloat(8.0f32) > OrderedFloat(7.0)); assert!(OrderedFloat(8.0f32) >= OrderedFloat(7.0)); assert!(OrderedFloat(4.0f32) < OrderedFloat(7.0)); assert!(OrderedFloat(4.0f32) <= OrderedFloat(7.0)); } #[test] fn ordered_f32_compare_nan() { let f32_nan: f32 = FloatCore::nan(); assert_eq!( OrderedFloat(f32_nan).cmp(&OrderedFloat(FloatCore::nan())), Equal ); assert_eq!( OrderedFloat(f32_nan).cmp(&OrderedFloat(-100000.0f32)), Greater ); assert_eq!( OrderedFloat(-100.0f32).cmp(&OrderedFloat(FloatCore::nan())), Less ); } #[test] fn ordered_f32_compare_nan_op() { let f32_nan: OrderedFloat = OrderedFloat(FloatCore::nan()); assert!(f32_nan == f32_nan); assert!(f32_nan <= f32_nan); assert!(f32_nan >= f32_nan); assert!(f32_nan > OrderedFloat(-100000.0f32)); assert!(f32_nan >= OrderedFloat(-100000.0f32)); assert!(OrderedFloat(-100.0f32) < f32_nan); assert!(OrderedFloat(-100.0f32) <= f32_nan); assert!(f32_nan > OrderedFloat(::infinity())); assert!(f32_nan >= OrderedFloat(::infinity())); assert!(f32_nan > OrderedFloat(::neg_infinity())); assert!(f32_nan >= OrderedFloat(::neg_infinity())); } #[test] fn ordered_f64_compare_regular_floats() { assert_eq!(OrderedFloat(7.0f64).cmp(&OrderedFloat(7.0)), Equal); assert_eq!(OrderedFloat(8.0f64).cmp(&OrderedFloat(7.0)), Greater); assert_eq!(OrderedFloat(4.0f64).cmp(&OrderedFloat(7.0)), Less); } /// This code is not run, but successfully compiling it checks that the given bounds /// are *sufficient* to write code that is generic over float type. fn _generic_code_can_use_float_core(inputs: &mut [OrderedFloat]) where T: num_traits::float::FloatCore, { inputs.sort(); } #[test] fn not_nan32_zero() { assert_eq!(NotNan::::zero(), 0.0f32); assert!(NotNan::::zero().is_zero()); } #[test] fn not_nan32_one() { assert_eq!(NotNan::::one(), 1.0f32) } #[test] fn not_nan32_bounded() { assert_eq!(NotNan::::min_value(), ::min_value()); assert_eq!(NotNan::::max_value(), ::max_value()); } #[test] fn not_nan32_from_primitive() { assert_eq!(NotNan::::from_i8(42i8), Some(not_nan(42.0))); assert_eq!(NotNan::::from_u8(42u8), Some(not_nan(42.0))); assert_eq!(NotNan::::from_i16(42i16), Some(not_nan(42.0))); assert_eq!(NotNan::::from_u16(42u16), Some(not_nan(42.0))); assert_eq!(NotNan::::from_i32(42i32), Some(not_nan(42.0))); assert_eq!(NotNan::::from_u32(42u32), Some(not_nan(42.0))); assert_eq!(NotNan::::from_i64(42i64), Some(not_nan(42.0))); assert_eq!(NotNan::::from_u64(42u64), Some(not_nan(42.0))); assert_eq!(NotNan::::from_isize(42isize), Some(not_nan(42.0))); assert_eq!(NotNan::::from_usize(42usize), Some(not_nan(42.0))); assert_eq!(NotNan::::from_f32(42f32), Some(not_nan(42.0))); assert_eq!(NotNan::::from_f32(42f32), Some(not_nan(42.0))); assert_eq!(NotNan::::from_f64(42f64), Some(not_nan(42.0))); assert_eq!(NotNan::::from_f64(42f64), Some(not_nan(42.0))); assert_eq!(NotNan::::from_f32(FloatCore::nan()), None); assert_eq!(NotNan::::from_f64(FloatCore::nan()), None); } #[test] fn not_nan32_to_primitive() { let x = not_nan(42.0f32); assert_eq!(x.to_u8(), Some(42u8)); assert_eq!(x.to_i8(), Some(42i8)); assert_eq!(x.to_u16(), Some(42u16)); assert_eq!(x.to_i16(), Some(42i16)); assert_eq!(x.to_u32(), Some(42u32)); assert_eq!(x.to_i32(), Some(42i32)); assert_eq!(x.to_u64(), Some(42u64)); assert_eq!(x.to_i64(), Some(42i64)); assert_eq!(x.to_usize(), Some(42usize)); assert_eq!(x.to_isize(), Some(42isize)); assert_eq!(x.to_f32(), Some(42f32)); assert_eq!(x.to_f32(), Some(42f32)); assert_eq!(x.to_f64(), Some(42f64)); assert_eq!(x.to_f64(), Some(42f64)); } #[test] fn not_nan32_num() { assert_eq!(NotNan::::from_str_radix("42.0", 10).unwrap(), 42.0f32); assert!(NotNan::::from_str_radix("NaN", 10).is_err()); } #[test] fn not_nan32_signed() { assert_eq!(not_nan(42f32).abs(), 42f32); assert_eq!(not_nan(-42f32).abs(), 42f32); assert_eq!(not_nan(50f32).abs_sub(¬_nan(8f32)), 42f32); assert_eq!(not_nan(8f32).abs_sub(¬_nan(50f32)), 0f32); } #[test] fn not_nan32_num_cast() { assert_eq!( as num_traits::NumCast>::from(42).unwrap(), 42f32 ); assert_eq!( as num_traits::NumCast>::from(::nan()), None ); } #[test] fn ordered_f64_compare_nan() { let f64_nan: f64 = FloatCore::nan(); assert_eq!( OrderedFloat(f64_nan).cmp(&OrderedFloat(FloatCore::nan())), Equal ); assert_eq!( OrderedFloat(f64_nan).cmp(&OrderedFloat(-100000.0f64)), Greater ); assert_eq!( OrderedFloat(-100.0f64).cmp(&OrderedFloat(FloatCore::nan())), Less ); } #[test] fn ordered_f64_compare_regular_floats_op() { assert!(OrderedFloat(7.0) == OrderedFloat(7.0)); assert!(OrderedFloat(7.0) <= OrderedFloat(7.0)); assert!(OrderedFloat(7.0) >= OrderedFloat(7.0)); assert!(OrderedFloat(8.0) > OrderedFloat(7.0)); assert!(OrderedFloat(8.0) >= OrderedFloat(7.0)); assert!(OrderedFloat(4.0) < OrderedFloat(7.0)); assert!(OrderedFloat(4.0) <= OrderedFloat(7.0)); } #[test] fn ordered_f64_compare_nan_op() { let f64_nan: OrderedFloat = OrderedFloat(FloatCore::nan()); assert!(f64_nan == f64_nan); assert!(f64_nan <= f64_nan); assert!(f64_nan >= f64_nan); assert!(f64_nan > OrderedFloat(-100000.0)); assert!(f64_nan >= OrderedFloat(-100000.0)); assert!(OrderedFloat(-100.0) < f64_nan); assert!(OrderedFloat(-100.0) <= f64_nan); assert!(f64_nan > OrderedFloat(::infinity())); assert!(f64_nan >= OrderedFloat(::infinity())); assert!(f64_nan > OrderedFloat(::neg_infinity())); assert!(f64_nan >= OrderedFloat(::neg_infinity())); } #[test] fn not_nan32_compare_regular_floats() { assert_eq!(not_nan(7.0f32).cmp(¬_nan(7.0)), Equal); assert_eq!(not_nan(8.0f32).cmp(¬_nan(7.0)), Greater); assert_eq!(not_nan(4.0f32).cmp(¬_nan(7.0)), Less); } #[test] fn not_nan32_fail_when_constructing_with_nan() { let f32_nan: f32 = FloatCore::nan(); assert!(NotNan::new(f32_nan).is_err()); } #[test] fn not_nan32_calculate_correctly() { assert_eq!(*(not_nan(5.0f32) + not_nan(4.0f32)), 5.0f32 + 4.0f32); assert_eq!(*(not_nan(5.0f32) + 4.0f32), 5.0f32 + 4.0f32); assert_eq!(*(not_nan(5.0f32) - not_nan(4.0f32)), 5.0f32 - 4.0f32); assert_eq!(*(not_nan(5.0f32) - 4.0f32), 5.0f32 - 4.0f32); assert_eq!(*(not_nan(5.0f32) * not_nan(4.0f32)), 5.0f32 * 4.0f32); assert_eq!(*(not_nan(5.0f32) * 4.0f32), 5.0f32 * 4.0f32); assert_eq!(*(not_nan(8.0f32) / not_nan(4.0f32)), 8.0f32 / 4.0f32); assert_eq!(*(not_nan(8.0f32) / 4.0f32), 8.0f32 / 4.0f32); assert_eq!(*(not_nan(8.0f32) % not_nan(4.0f32)), 8.0f32 % 4.0f32); assert_eq!(*(not_nan(8.0f32) % 4.0f32), 8.0f32 % 4.0f32); assert_eq!(*(-not_nan(1.0f32)), -1.0f32); assert!(panic::catch_unwind(|| not_nan(0.0f32) + f32::NAN).is_err()); assert!(panic::catch_unwind(|| not_nan(0.0f32) - f32::NAN).is_err()); assert!(panic::catch_unwind(|| not_nan(0.0f32) * f32::NAN).is_err()); assert!(panic::catch_unwind(|| not_nan(0.0f32) / f32::NAN).is_err()); assert!(panic::catch_unwind(|| not_nan(0.0f32) % f32::NAN).is_err()); let mut number = not_nan(5.0f32); number += not_nan(4.0f32); assert_eq!(*number, 9.0f32); number -= not_nan(4.0f32); assert_eq!(*number, 5.0f32); number *= not_nan(4.0f32); assert_eq!(*number, 20.0f32); number /= not_nan(4.0f32); assert_eq!(*number, 5.0f32); number %= not_nan(4.0f32); assert_eq!(*number, 1.0f32); number = not_nan(5.0f32); number += 4.0f32; assert_eq!(*number, 9.0f32); number -= 4.0f32; assert_eq!(*number, 5.0f32); number *= 4.0f32; assert_eq!(*number, 20.0f32); number /= 4.0f32; assert_eq!(*number, 5.0f32); number %= 4.0f32; assert_eq!(*number, 1.0f32); assert!(panic::catch_unwind(|| { let mut tmp = not_nan(0.0f32); tmp += f32::NAN; }) .is_err()); assert!(panic::catch_unwind(|| { let mut tmp = not_nan(0.0f32); tmp -= f32::NAN; }) .is_err()); assert!(panic::catch_unwind(|| { let mut tmp = not_nan(0.0f32); tmp *= f32::NAN; }) .is_err()); assert!(panic::catch_unwind(|| { let mut tmp = not_nan(0.0f32); tmp /= f32::NAN; }) .is_err()); assert!(panic::catch_unwind(|| { let mut tmp = not_nan(0.0f32); tmp %= f32::NAN; }) .is_err()); } #[test] fn not_nan64_compare_regular_floats() { assert_eq!(not_nan(7.0f64).cmp(¬_nan(7.0)), Equal); assert_eq!(not_nan(8.0f64).cmp(¬_nan(7.0)), Greater); assert_eq!(not_nan(4.0f64).cmp(¬_nan(7.0)), Less); } #[test] fn not_nan64_fail_when_constructing_with_nan() { let f64_nan: f64 = FloatCore::nan(); assert!(NotNan::new(f64_nan).is_err()); } #[test] fn not_nan64_calculate_correctly() { assert_eq!(*(not_nan(5.0f64) + not_nan(4.0f64)), 5.0f64 + 4.0f64); assert_eq!(*(not_nan(5.0f64) + 4.0f64), 5.0f64 + 4.0f64); assert_eq!(*(not_nan(5.0f64) - not_nan(4.0f64)), 5.0f64 - 4.0f64); assert_eq!(*(not_nan(5.0f64) - 4.0f64), 5.0f64 - 4.0f64); assert_eq!(*(not_nan(5.0f64) * not_nan(4.0f64)), 5.0f64 * 4.0f64); assert_eq!(*(not_nan(5.0f64) * 4.0f64), 5.0f64 * 4.0f64); assert_eq!(*(not_nan(8.0f64) / not_nan(4.0f64)), 8.0f64 / 4.0f64); assert_eq!(*(not_nan(8.0f64) / 4.0f64), 8.0f64 / 4.0f64); assert_eq!(*(not_nan(8.0f64) % not_nan(4.0f64)), 8.0f64 % 4.0f64); assert_eq!(*(not_nan(8.0f64) % 4.0f64), 8.0f64 % 4.0f64); assert_eq!(*(-not_nan(1.0f64)), -1.0f64); assert!(panic::catch_unwind(|| not_nan(0.0f64) + f64::NAN).is_err()); assert!(panic::catch_unwind(|| not_nan(0.0f64) - f64::NAN).is_err()); assert!(panic::catch_unwind(|| not_nan(0.0f64) * f64::NAN).is_err()); assert!(panic::catch_unwind(|| not_nan(0.0f64) / f64::NAN).is_err()); assert!(panic::catch_unwind(|| not_nan(0.0f64) % f64::NAN).is_err()); let mut number = not_nan(5.0f64); number += not_nan(4.0f64); assert_eq!(*number, 9.0f64); number -= not_nan(4.0f64); assert_eq!(*number, 5.0f64); number *= not_nan(4.0f64); assert_eq!(*number, 20.0f64); number /= not_nan(4.0f64); assert_eq!(*number, 5.0f64); number %= not_nan(4.0f64); assert_eq!(*number, 1.0f64); number = not_nan(5.0f64); number += 4.0f64; assert_eq!(*number, 9.0f64); number -= 4.0f64; assert_eq!(*number, 5.0f64); number *= 4.0f64; assert_eq!(*number, 20.0f64); number /= 4.0f64; assert_eq!(*number, 5.0f64); number %= 4.0f64; assert_eq!(*number, 1.0f64); assert!(panic::catch_unwind(|| { let mut tmp = not_nan(0.0f64); tmp += f64::NAN; }) .is_err()); assert!(panic::catch_unwind(|| { let mut tmp = not_nan(0.0f64); tmp -= f64::NAN; }) .is_err()); assert!(panic::catch_unwind(|| { let mut tmp = not_nan(0.0f64); tmp *= f64::NAN; }) .is_err()); assert!(panic::catch_unwind(|| { let mut tmp = not_nan(0.0f64); tmp /= f64::NAN; }) .is_err()); assert!(panic::catch_unwind(|| { let mut tmp = not_nan(0.0f64); tmp %= f64::NAN; }) .is_err()); } #[test] fn not_nan64_zero() { assert_eq!(NotNan::::zero(), not_nan(0.0f64)); assert!(NotNan::::zero().is_zero()); } #[test] fn not_nan64_one() { assert_eq!(NotNan::::one(), not_nan(1.0f64)) } #[test] fn not_nan64_bounded() { assert_eq!(NotNan::::min_value(), ::min_value()); assert_eq!(NotNan::::max_value(), ::max_value()); } #[test] fn not_nan64_from_primitive() { assert_eq!(NotNan::::from_i8(42i8), Some(not_nan(42.0))); assert_eq!(NotNan::::from_u8(42u8), Some(not_nan(42.0))); assert_eq!(NotNan::::from_i16(42i16), Some(not_nan(42.0))); assert_eq!(NotNan::::from_u16(42u16), Some(not_nan(42.0))); assert_eq!(NotNan::::from_i32(42i32), Some(not_nan(42.0))); assert_eq!(NotNan::::from_u32(42u32), Some(not_nan(42.0))); assert_eq!(NotNan::::from_i64(42i64), Some(not_nan(42.0))); assert_eq!(NotNan::::from_u64(42u64), Some(not_nan(42.0))); assert_eq!(NotNan::::from_isize(42isize), Some(not_nan(42.0))); assert_eq!(NotNan::::from_usize(42usize), Some(not_nan(42.0))); assert_eq!(NotNan::::from_f64(42f64), Some(not_nan(42.0))); assert_eq!(NotNan::::from_f64(42f64), Some(not_nan(42.0))); assert_eq!(NotNan::::from_f64(42f64), Some(not_nan(42.0))); assert_eq!(NotNan::::from_f64(42f64), Some(not_nan(42.0))); assert_eq!(NotNan::::from_f64(FloatCore::nan()), None); assert_eq!(NotNan::::from_f64(FloatCore::nan()), None); } #[test] fn not_nan64_to_primitive() { let x = not_nan(42.0f64); assert_eq!(x.to_u8(), Some(42u8)); assert_eq!(x.to_i8(), Some(42i8)); assert_eq!(x.to_u16(), Some(42u16)); assert_eq!(x.to_i16(), Some(42i16)); assert_eq!(x.to_u32(), Some(42u32)); assert_eq!(x.to_i32(), Some(42i32)); assert_eq!(x.to_u64(), Some(42u64)); assert_eq!(x.to_i64(), Some(42i64)); assert_eq!(x.to_usize(), Some(42usize)); assert_eq!(x.to_isize(), Some(42isize)); assert_eq!(x.to_f64(), Some(42f64)); assert_eq!(x.to_f64(), Some(42f64)); assert_eq!(x.to_f64(), Some(42f64)); assert_eq!(x.to_f64(), Some(42f64)); } #[test] fn not_nan64_num() { assert_eq!( NotNan::::from_str_radix("42.0", 10).unwrap(), not_nan(42.0f64) ); assert!(NotNan::::from_str_radix("NaN", 10).is_err()); } #[test] fn not_nan64_signed() { assert_eq!(not_nan(42f64).abs(), not_nan(42f64)); assert_eq!(not_nan(-42f64).abs(), not_nan(42f64)); assert_eq!(not_nan(50f64).abs_sub(¬_nan(8f64)), not_nan(42f64)); assert_eq!(not_nan(8f64).abs_sub(¬_nan(50f64)), not_nan(0f64)); } #[test] fn not_nan64_num_cast() { assert_eq!( as num_traits::NumCast>::from(42), Some(not_nan(42f64)) ); assert_eq!( as num_traits::NumCast>::from(::nan()), None ); } #[test] fn hash_zero_and_neg_zero_to_the_same_hc_ordered_float64() { let state = RandomState::new(); let mut h1 = state.build_hasher(); let mut h2 = state.build_hasher(); OrderedFloat::from(0f64).hash(&mut h1); OrderedFloat::from(-0f64).hash(&mut h2); assert_eq!(h1.finish(), h2.finish()); } #[test] fn hash_zero_and_neg_zero_to_the_same_hc_not_nan32() { let state = RandomState::new(); let mut h1 = state.build_hasher(); let mut h2 = state.build_hasher(); NotNan::try_from(0f32).unwrap().hash(&mut h1); NotNan::try_from(-0f32).unwrap().hash(&mut h2); assert_eq!(h1.finish(), h2.finish()); } #[test] fn hash_different_nans_to_the_same_hc() { let state = RandomState::new(); let mut h1 = state.build_hasher(); let mut h2 = state.build_hasher(); OrderedFloat::from(::nan()).hash(&mut h1); OrderedFloat::from(-::nan()).hash(&mut h2); assert_eq!(h1.finish(), h2.finish()); } #[test] fn hash_inf_and_neg_inf_to_different_hcs() { let state = RandomState::new(); let mut h1 = state.build_hasher(); let mut h2 = state.build_hasher(); OrderedFloat::from(f64::INFINITY).hash(&mut h1); OrderedFloat::from(f64::NEG_INFINITY).hash(&mut h2); assert!(h1.finish() != h2.finish()); } #[test] fn hash_is_good_for_whole_numbers() { let state = RandomState::new(); let limit = 10000; let mut set = ::std::collections::HashSet::with_capacity(limit); for i in 0..limit { let mut h = state.build_hasher(); OrderedFloat::from(i as f64).hash(&mut h); set.insert(h.finish()); } // This allows 100 collisions, which is far too // many, but should guard against transient issues // that will result from using RandomState let pct_unique = set.len() as f64 / limit as f64; assert!(0.99f64 < pct_unique, "percent-unique={}", pct_unique); } #[test] fn hash_is_good_for_fractional_numbers() { let state = RandomState::new(); let limit = 10000; let mut set = ::std::collections::HashSet::with_capacity(limit); for i in 0..limit { let mut h = state.build_hasher(); OrderedFloat::from(i as f64 * (1f64 / limit as f64)).hash(&mut h); set.insert(h.finish()); } // This allows 100 collisions, which is far too // many, but should guard against transient issues // that will result from using RandomState let pct_unique = set.len() as f64 / limit as f64; assert!(0.99f64 < pct_unique, "percent-unique={}", pct_unique); } #[test] #[should_panic] fn test_add_fails_on_nan() { let a = not_nan(f32::INFINITY); let b = not_nan(f32::NEG_INFINITY); let _c = a + b; } #[test] #[should_panic] fn test_add_fails_on_nan_ref() { let a = not_nan(f32::INFINITY); let b = not_nan(f32::NEG_INFINITY); let _c = a + &b; } #[test] #[should_panic] fn test_add_fails_on_nan_ref_ref() { let a = not_nan(f32::INFINITY); let b = not_nan(f32::NEG_INFINITY); let _c = &a + &b; } #[test] #[should_panic] fn test_add_fails_on_nan_t_ref() { let a = not_nan(f32::INFINITY); let b = f32::NEG_INFINITY; let _c = a + &b; } #[test] #[should_panic] fn test_add_fails_on_nan_ref_t_ref() { let a = not_nan(f32::INFINITY); let b = f32::NEG_INFINITY; let _c = &a + &b; } #[test] #[should_panic] fn test_add_fails_on_nan_ref_t() { let a = not_nan(f32::INFINITY); let b = f32::NEG_INFINITY; let _c = &a + b; } #[test] #[should_panic] fn test_add_assign_fails_on_nan_ref() { let mut a = not_nan(f32::INFINITY); let b = not_nan(f32::NEG_INFINITY); a += &b; } #[test] #[should_panic] fn test_add_assign_fails_on_nan_t_ref() { let mut a = not_nan(f32::INFINITY); let b = f32::NEG_INFINITY; a += &b; } #[test] #[should_panic] fn test_add_assign_fails_on_nan_t() { let mut a = not_nan(f32::INFINITY); let b = f32::NEG_INFINITY; a += b; } #[test] fn add() { assert_eq!(not_nan(0.0) + not_nan(0.0), 0.0); assert_eq!(not_nan(0.0) + ¬_nan(0.0), 0.0); assert_eq!(¬_nan(0.0) + not_nan(0.0), 0.0); assert_eq!(¬_nan(0.0) + ¬_nan(0.0), 0.0); assert_eq!(not_nan(0.0) + 0.0, 0.0); assert_eq!(not_nan(0.0) + &0.0, 0.0); assert_eq!(¬_nan(0.0) + 0.0, 0.0); assert_eq!(¬_nan(0.0) + &0.0, 0.0); assert_eq!(OrderedFloat(0.0) + OrderedFloat(0.0), 0.0); assert_eq!(OrderedFloat(0.0) + &OrderedFloat(0.0), 0.0); assert_eq!(&OrderedFloat(0.0) + OrderedFloat(0.0), 0.0); assert_eq!(OrderedFloat(0.0) + 0.0, 0.0); assert_eq!(OrderedFloat(0.0) + &0.0, 0.0); assert_eq!(&OrderedFloat(0.0) + 0.0, 0.0); assert_eq!(&OrderedFloat(0.0) + &0.0, 0.0); } #[test] fn ordered_f32_neg() { assert_eq!(OrderedFloat(-7.0f32), -OrderedFloat(7.0f32)); } #[test] fn ordered_f64_neg() { assert_eq!(OrderedFloat(-7.0f64), -OrderedFloat(7.0f64)); } #[test] #[should_panic] fn test_sum_fails_on_nan() { let a = not_nan(f32::INFINITY); let b = not_nan(f32::NEG_INFINITY); let _c: NotNan<_> = [a, b].iter().sum(); } #[test] #[should_panic] fn test_product_fails_on_nan() { let a = not_nan(f32::INFINITY); let b = not_nan(0f32); let _c: NotNan<_> = [a, b].iter().product(); } #[test] fn not_nan64_sum_product() { let a = not_nan(2138.1237); let b = not_nan(132f64); let c = not_nan(5.1); assert_eq!( std::iter::empty::>().sum::>(), NotNan::new(0f64).unwrap() ); assert_eq!([a].iter().sum::>(), a); assert_eq!([a, b].iter().sum::>(), a + b); assert_eq!([a, b, c].iter().sum::>(), a + b + c); assert_eq!( std::iter::empty::>().product::>(), NotNan::new(1f64).unwrap() ); assert_eq!([a].iter().product::>(), a); assert_eq!([a, b].iter().product::>(), a * b); assert_eq!([a, b, c].iter().product::>(), a * b * c); } #[test] fn not_nan_usage_in_const_context() { const A: NotNan = unsafe { NotNan::new_unchecked(111f32) }; assert_eq!(A, NotNan::new(111f32).unwrap()); } #[test] fn not_nan_panic_safety() { let catch_op = |mut num, op: fn(&mut NotNan<_>)| { let mut num_ref = panic::AssertUnwindSafe(&mut num); let _ = panic::catch_unwind(move || op(&mut num_ref)); num }; assert!(!catch_op(not_nan(f32::INFINITY), |a| *a += f32::NEG_INFINITY).is_nan()); assert!(!catch_op(not_nan(f32::INFINITY), |a| *a -= f32::INFINITY).is_nan()); assert!(!catch_op(not_nan(0.0), |a| *a *= f32::INFINITY).is_nan()); assert!(!catch_op(not_nan(0.0), |a| *a /= 0.0).is_nan()); assert!(!catch_op(not_nan(0.0), |a| *a %= 0.0).is_nan()); } #[test] fn from_ref() { let f = 1.0f32; let o: &OrderedFloat = (&f).into(); assert_eq!(*o, 1.0f32); let mut f = 1.0f64; let o: &OrderedFloat = (&f).into(); assert_eq!(*o, 1.0f64); let o: &mut OrderedFloat = (&mut f).into(); assert_eq!(*o, 1.0f64); *o = OrderedFloat(2.0); assert_eq!(*o, 2.0f64); assert_eq!(f, 2.0f64); } macro_rules! test_float_const_method { ($type:ident < $inner:ident >, $method:ident) => { assert_eq!($type::<$inner>::$method().into_inner(), $inner::$method()) }; } macro_rules! test_float_const_methods { ($type:ident < $inner:ident >) => { test_float_const_method!($type<$inner>, E); test_float_const_method!($type<$inner>, FRAC_1_PI); test_float_const_method!($type<$inner>, FRAC_1_SQRT_2); test_float_const_method!($type<$inner>, FRAC_2_PI); test_float_const_method!($type<$inner>, FRAC_2_SQRT_PI); test_float_const_method!($type<$inner>, FRAC_PI_2); test_float_const_method!($type<$inner>, FRAC_PI_3); test_float_const_method!($type<$inner>, FRAC_PI_4); test_float_const_method!($type<$inner>, FRAC_PI_6); test_float_const_method!($type<$inner>, FRAC_PI_8); test_float_const_method!($type<$inner>, LN_10); test_float_const_method!($type<$inner>, LN_2); test_float_const_method!($type<$inner>, LOG10_E); test_float_const_method!($type<$inner>, LOG2_E); test_float_const_method!($type<$inner>, PI); test_float_const_method!($type<$inner>, SQRT_2); }; } #[test] fn float_consts_equal_inner() { test_float_const_methods!(OrderedFloat); test_float_const_methods!(OrderedFloat); test_float_const_methods!(NotNan); test_float_const_methods!(NotNan); } #[cfg(feature = "std")] macro_rules! test_pow_ord { ($type:ident < $inner:ident >) => { assert_eq!($type::<$inner>::from(3.0).pow(2i8), OrderedFloat(9.0)); assert_eq!($type::<$inner>::from(3.0).pow(2i16), OrderedFloat(9.0)); assert_eq!($type::<$inner>::from(3.0).pow(2i32), OrderedFloat(9.0)); assert_eq!($type::<$inner>::from(3.0).pow(2u8), OrderedFloat(9.0)); assert_eq!($type::<$inner>::from(3.0).pow(2u16), OrderedFloat(9.0)); assert_eq!($type::<$inner>::from(3.0).pow(2f32), OrderedFloat(9.0)); }; } #[cfg(feature = "std")] macro_rules! test_pow_nn { ($type:ident < $inner:ident >) => { assert_eq!( $type::<$inner>::new(3.0).unwrap().pow(2i8), NotNan::new(9.0).unwrap() ); assert_eq!( $type::<$inner>::new(3.0).unwrap().pow(2u8), NotNan::new(9.0).unwrap() ); assert_eq!( $type::<$inner>::new(3.0).unwrap().pow(2i16), NotNan::new(9.0).unwrap() ); assert_eq!( $type::<$inner>::new(3.0).unwrap().pow(2u16), NotNan::new(9.0).unwrap() ); assert_eq!( $type::<$inner>::new(3.0).unwrap().pow(2i32), NotNan::new(9.0).unwrap() ); assert_eq!( $type::<$inner>::new(3.0).unwrap().pow(2f32), NotNan::new(9.0).unwrap() ); }; } #[cfg(feature = "std")] #[test] fn test_pow_works() { assert_eq!(OrderedFloat(3.0).pow(OrderedFloat(2.0)), OrderedFloat(9.0)); test_pow_ord!(OrderedFloat); test_pow_ord!(OrderedFloat); assert_eq!( NotNan::new(3.0).unwrap().pow(NotNan::new(2.0).unwrap()), NotNan::new(9.0).unwrap() ); test_pow_nn!(NotNan); test_pow_nn!(NotNan); // Only f64 have Pow impl by default, so checking those seperate from macro assert_eq!(OrderedFloat::::from(3.0).pow(2f64), OrderedFloat(9.0)); assert_eq!( NotNan::::new(3.0).unwrap().pow(2f64), NotNan::new(9.0).unwrap() ); } #[cfg(feature = "std")] #[test] #[should_panic] fn test_pow_fails_on_nan() { let a = not_nan(-1.0); let b = f32::NAN; a.pow(b); } #[cfg(feature = "arbitrary")] mod arbitrary_test { use super::{NotNan, OrderedFloat}; use arbitrary::{Arbitrary, Unstructured}; #[test] fn exhaustive() { // Exhaustively search all patterns of sign and exponent bits plus a few mantissa bits. for high_bytes in 0..=u16::MAX { let [h1, h2] = high_bytes.to_be_bytes(); // Each of these should not // * panic, // * return an error, or // * need more bytes than given. let n32: NotNan = Unstructured::new(&[h1, h2, h1, h2]) .arbitrary() .expect("NotNan failure"); let n64: NotNan = Unstructured::new(&[h1, h2, h1, h2, h1, h2, h1, h2]) .arbitrary() .expect("NotNan failure"); let _: OrderedFloat = Unstructured::new(&[h1, h2, h1, h2]) .arbitrary() .expect("OrderedFloat failure"); let _: OrderedFloat = Unstructured::new(&[h1, h2, h1, h2, h1, h2, h1, h2]) .arbitrary() .expect("OrderedFloat failure"); // Check for violation of NotNan's property of never containing a NaN. assert!(!n32.into_inner().is_nan()); assert!(!n64.into_inner().is_nan()); } } #[test] fn size_hints() { assert_eq!(NotNan::::size_hint(0), (4, Some(4))); assert_eq!(NotNan::::size_hint(0), (8, Some(8))); assert_eq!(OrderedFloat::::size_hint(0), (4, Some(4))); assert_eq!(OrderedFloat::::size_hint(0), (8, Some(8))); } }