bencher-0.1.5/.gitignore010064400017510001751000000000251322425607000133340ustar0000000000000000/Cargo.lock /target/ bencher-0.1.5/.travis.yml010064400017510001751000000004441302510434500134560ustar0000000000000000language: rust sudo: false # run builds for all the trains (and more) rust: - 1.10.0 - stable - beta - nightly # the main build script: - | cargo build && cargo test && cargo test --release && cargo doc && cargo bench branches: only: - master bencher-0.1.5/Cargo.toml.orig010064400017510001751000000013271322425612500142420ustar0000000000000000[package] authors = ["bluss", "The Rust Project Developers"] name = "bencher" version = "0.1.5" license = "MIT/Apache-2.0" repository = "https://github.com/bluss/bencher/" documentation = "https://docs.rs/bencher/" description = "A port of the libtest (unstable Rust) benchmark runner to Rust stable releases. Supports running benchmarks and filtering based on the name. Benchmark execution works exactly the same way and no more (caveat: black_box is still missing!)." keywords = ["benchmark"] categories = ["development-tools::profiling", "rust-patterns"] [lib] name = "bencher" path = "lib.rs" bench = false [[bench]] name = "example" harness = false [dependencies] [package.metadata.release] no-dev-version = true bencher-0.1.5/Cargo.toml0000644000000023350000000000000105050ustar00# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g. crates.io) dependencies # # If you believe there's an error in this file please file an # issue against the rust-lang/cargo repository. If you're # editing this file be aware that the upstream Cargo.toml # will likely look very different (and much more reasonable) [package] name = "bencher" version = "0.1.5" authors = ["bluss", "The Rust Project Developers"] description = "A port of the libtest (unstable Rust) benchmark runner to Rust stable releases. Supports running benchmarks and filtering based on the name. Benchmark execution works exactly the same way and no more (caveat: black_box is still missing!)." documentation = "https://docs.rs/bencher/" keywords = ["benchmark"] categories = ["development-tools::profiling", "rust-patterns"] license = "MIT/Apache-2.0" repository = "https://github.com/bluss/bencher/" [package.metadata.release] no-dev-version = true [lib] name = "bencher" path = "lib.rs" bench = false [[bench]] name = "example" harness = false [dependencies] bencher-0.1.5/LICENSE-APACHE010064400017510001751000000251371302510434500132770ustar0000000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. bencher-0.1.5/LICENSE-MIT010064400017510001751000000020231302510434500127740ustar0000000000000000Copyright (c) 2015 Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. bencher-0.1.5/README.rst010064400017510001751000000035121322425611100130330ustar0000000000000000 A port of the libtest (unstable Rust) benchmark runner to Rust stable releases. Supports running benchmarks and filtering based on the name. Benchmark execution works exactly the same way and no more (Warning: black_box is not working perfectly!). Please read the `API documentation here`__ (it includes a usage example). __ https://docs.rs/bencher/ |build_status|_ |crates|_ .. |build_status| image:: https://travis-ci.org/bluss/bencher.svg?branch=master .. _build_status: https://travis-ci.org/bluss/bencher .. |crates| image:: https://meritbadge.herokuapp.com/bencher .. _crates: https://crates.io/crates/bencher Recent Changes -------------- - 0.1.5 - Support trailing commas in the macros by @tbu- - 0.1.4 - Add crates.io categories - 0.1.3 - Fix doc URL - Fix clippy warnings (by @llogiq) - 0.1.2 - Remove unused components (speeds up build time of the crate) - 0.1.1 - Add a provisional implementation of ``black_box``. It's not as good as the original version. (Since reproducibility is key, we will use the same implementation on both stable and nightly.) - Add example for how to set up this to run with ``cargo bench`` on stable. This crate is itself an example of that, see ``Cargo.toml`` and ``benches/`` - 0.1.0 - Initial release Authors ------- Principal original authors of the benchmark and statistics code in the Rust project are: + Brian Anderson + Graydon Hoare Very very many have contributed to lib.rs and stats.rs however, so author credit is due to: + The Rust Project Developers License ------- Dual-licensed just like the Rust project. Licensed under the Apache License, Version 2.0 http://www.apache.org/licenses/LICENSE-2.0 or the MIT license http://opensource.org/licenses/MIT, at your option. This file may not be copied, modified, or distributed except according to those terms. bencher-0.1.5/benches/example.rs010064400017510001751000000006601322425606200147620ustar0000000000000000 // Also look in Cargo.toml how to use a benchmark setup with harness = false #[macro_use] extern crate bencher; use bencher::Bencher; fn a(bench: &mut Bencher) { bench.iter(|| { (0..1000).fold(0, |x, y| x + y) }) } fn b(bench: &mut Bencher) { const N: usize = 1024; bench.iter(|| { vec![0u8; N] }); bench.bytes = N as u64; } benchmark_group!(benches, a, b); benchmark_main!(benches); bencher-0.1.5/lib.rs010064400017510001751000000510061313660640000124630ustar0000000000000000// Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 or the MIT license // , at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Simplified stable-compatible benchmark runner. //! //! Almost all user code will only be interested in `Bencher` and the //! macros that are used to describe benchmarker functions and //! the benchmark runner. //! //! NOTE: There's no proper `black_box` yet in this stable port of the //! benchmark runner, only a workaround implementation. It may not work //! exactly like the upstream `test::black_box`. //! //! One way to use this crate is to use it as dev-dependency and setup //! cargo to compile a file in `benches/` that runs without the testing harness. //! //! In Cargo.toml: //! //! ```ignore //! [[bench]] //! name = "example" //! harness = false //! ``` //! //! In benches/example.rs: //! //! ``` //! #[macro_use] //! extern crate bencher; //! //! use bencher::Bencher; //! //! fn a(bench: &mut Bencher) { //! bench.iter(|| { //! (0..1000).fold(0, |x, y| x + y) //! }) //! } //! //! fn b(bench: &mut Bencher) { //! const N: usize = 1024; //! bench.iter(|| { //! vec![0u8; N] //! }); //! //! bench.bytes = N as u64; //! } //! //! benchmark_group!(benches, a, b); //! benchmark_main!(benches); //! //! # #[cfg(never)] //! # fn main() { } //! ``` //! //! Use `cargo bench` as usual. A command line argument can be used to filter //! which benchmarks to run. pub use self::TestFn::*; use self::TestResult::*; use self::TestEvent::*; use self::NamePadding::*; use self::OutputLocation::*; use std::borrow::Cow; use std::cmp; use std::fmt; use std::fs::File; use std::io::prelude::*; use std::io; use std::iter::repeat; use std::mem::forget; use std::path::PathBuf; use std::ptr; use std::time::{Instant, Duration}; pub mod stats; mod macros; // The name of a test. By convention this follows the rules for rust // paths; i.e. it should be a series of identifiers separated by double // colons. This way if some test runner wants to arrange the tests // hierarchically it may. pub type TestName = Cow<'static, str>; #[derive(Clone, Copy, PartialEq, Eq)] enum NamePadding { PadOnRight, } impl TestDesc { fn padded_name(&self, column_count: usize, align: NamePadding) -> String { let mut name = self.name.to_string(); let fill = column_count.saturating_sub(name.len()); let pad = repeat(" ").take(fill).collect::(); match align { PadOnRight => { name.push_str(&pad); name } } } } /// Represents a benchmark function. pub trait TDynBenchFn: Send { fn run(&self, harness: &mut Bencher); } // A function that runs a test. If the function returns successfully, // the test succeeds; if the function panics then the test fails. We // may need to come up with a more clever definition of test in order // to support isolation of tests into threads. pub enum TestFn { StaticBenchFn(fn(&mut Bencher)), DynBenchFn(Box), } impl TestFn { fn padding(&self) -> NamePadding { match *self { StaticBenchFn(..) | DynBenchFn(..) => PadOnRight, } } } impl fmt::Debug for TestFn { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.write_str(match *self { StaticBenchFn(..) => "StaticBenchFn(..)", DynBenchFn(..) => "DynBenchFn(..)", }) } } /// Manager of the benchmarking runs. /// /// This is fed into functions marked with `#[bench]` to allow for /// set-up & tear-down before running a piece of code repeatedly via a /// call to `iter`. #[derive(Copy, Clone)] pub struct Bencher { iterations: u64, dur: Duration, pub bytes: u64, } // The definition of a single test. A test runner will run a list of // these. #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct TestDesc { pub name: TestName, pub ignore: bool, } #[derive(Clone)] pub struct TestPaths { pub file: PathBuf, // e.g., compile-test/foo/bar/baz.rs pub base: PathBuf, // e.g., compile-test, auxiliary pub relative_dir: PathBuf, // e.g., foo/bar } #[derive(Debug)] pub struct TestDescAndFn { pub desc: TestDesc, pub testfn: TestFn, } #[derive(Default)] pub struct TestOpts { pub filter: Option, pub run_ignored: bool, pub logfile: Option, pub quiet: bool, pub test_threads: Option, } #[derive(Clone, PartialEq)] pub struct BenchSamples { ns_iter_summ: stats::Summary, mb_s: usize, } #[derive(Clone, PartialEq)] enum TestResult { TrIgnored, TrBench(BenchSamples), } unsafe impl Send for TestResult {} enum OutputLocation { Raw(T), } struct ConsoleTestState { log_out: Option, out: OutputLocation, quiet: bool, total: usize, passed: usize, failed: usize, ignored: usize, measured: usize, failures: Vec<(TestDesc, Vec)>, max_name_len: usize, // number of columns to fill when aligning names } impl ConsoleTestState<()> { pub fn new(opts: &TestOpts) -> io::Result> { let log_out = match opts.logfile { Some(ref path) => Some(try!(File::create(path))), None => None, }; let out = Raw(io::stdout()); Ok(ConsoleTestState { out: out, log_out: log_out, quiet: opts.quiet, total: 0, passed: 0, failed: 0, ignored: 0, measured: 0, failures: Vec::new(), max_name_len: 0, }) } } impl ConsoleTestState { pub fn write_ignored(&mut self) -> io::Result<()> { self.write_short_result("ignored", "i") } pub fn write_bench(&mut self) -> io::Result<()> { self.write_pretty("bench") } pub fn write_short_result(&mut self, verbose: &str, quiet: &str) -> io::Result<()> { if self.quiet { self.write_pretty(quiet) } else { try!(self.write_pretty(verbose)); self.write_plain("\n") } } pub fn write_pretty(&mut self, word: &str) -> io::Result<()> { match self.out { Raw(ref mut stdout) => { try!(stdout.write_all(word.as_bytes())); stdout.flush() } } } pub fn write_plain(&mut self, s: &str) -> io::Result<()> { match self.out { Raw(ref mut stdout) => { try!(stdout.write_all(s.as_bytes())); stdout.flush() } } } pub fn write_run_start(&mut self, len: usize) -> io::Result<()> { self.total = len; let noun = if len != 1 { "tests" } else { "test" }; self.write_plain(&format!("\nrunning {} {}\n", len, noun)) } pub fn write_test_start(&mut self, test: &TestDesc, align: NamePadding) -> io::Result<()> { if self.quiet && align != PadOnRight { Ok(()) } else { let name = test.padded_name(self.max_name_len, align); self.write_plain(&format!("test {} ... ", name)) } } pub fn write_result(&mut self, result: &TestResult) -> io::Result<()> { match *result { TrIgnored => self.write_ignored(), TrBench(ref bs) => { try!(self.write_bench()); self.write_plain(&format!(": {}\n", fmt_bench_samples(bs))) } } } pub fn write_log(&mut self, test: &TestDesc, result: &TestResult) -> io::Result<()> { match self.log_out { None => Ok(()), Some(ref mut o) => { let s = format!("{} {}\n", match *result { TrIgnored => "ignored".to_owned(), TrBench(ref bs) => fmt_bench_samples(bs), }, test.name); o.write_all(s.as_bytes()) } } } pub fn write_failures(&mut self) -> io::Result<()> { try!(self.write_plain("\nfailures:\n")); let mut failures = Vec::new(); let mut fail_out = String::new(); for &(ref f, ref stdout) in &self.failures { failures.push(f.name.to_string()); if !stdout.is_empty() { fail_out.push_str(&format!("---- {} stdout ----\n\t", f.name)); let output = String::from_utf8_lossy(stdout); fail_out.push_str(&output); fail_out.push_str("\n"); } } if !fail_out.is_empty() { try!(self.write_plain("\n")); try!(self.write_plain(&fail_out)); } try!(self.write_plain("\nfailures:\n")); failures.sort(); for name in &failures { try!(self.write_plain(&format!(" {}\n", name))); } Ok(()) } pub fn write_run_finish(&mut self) -> io::Result { assert_eq!(self.passed + self.failed + self.ignored + self.measured, self.total); let success = self.failed == 0; if !success { try!(self.write_failures()); } try!(self.write_plain("\ntest result: ")); if success { // There's no parallelism at this point so it's safe to use color try!(self.write_pretty("ok")); } else { try!(self.write_pretty("FAILED")); } let s = format!(". {} passed; {} failed; {} ignored; {} measured\n\n", self.passed, self.failed, self.ignored, self.measured); try!(self.write_plain(&s)); Ok(success) } } // Format a number with thousands separators fn fmt_thousands_sep(mut n: usize, sep: char) -> String { use std::fmt::Write; let mut output = String::new(); let mut trailing = false; for &pow in &[9, 6, 3, 0] { let base = 10_usize.pow(pow); if pow == 0 || trailing || n / base != 0 { if !trailing { output.write_fmt(format_args!("{}", n / base)).unwrap(); } else { output.write_fmt(format_args!("{:03}", n / base)).unwrap(); } if pow != 0 { output.push(sep); } trailing = true; } n %= base; } output } pub fn fmt_bench_samples(bs: &BenchSamples) -> String { use std::fmt::Write; let mut output = String::new(); let median = bs.ns_iter_summ.median as usize; let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize; output.write_fmt(format_args!("{:>11} ns/iter (+/- {})", fmt_thousands_sep(median, ','), fmt_thousands_sep(deviation, ','))) .unwrap(); if bs.mb_s != 0 { output.write_fmt(format_args!(" = {} MB/s", bs.mb_s)).unwrap(); } output } // A simple console test runner pub fn run_tests_console(opts: &TestOpts, tests: Vec) -> io::Result { fn callback(event: &TestEvent, st: &mut ConsoleTestState) -> io::Result<()> { match (*event).clone() { TeFiltered(ref filtered_tests) => st.write_run_start(filtered_tests.len()), TeWait(ref test, padding) => st.write_test_start(test, padding), TeResult(test, result, _) => { try!(st.write_log(&test, &result)); try!(st.write_result(&result)); match result { TrIgnored => st.ignored += 1, TrBench(_) => { st.measured += 1 } } Ok(()) } } } let mut st = try!(ConsoleTestState::new(opts)); fn len_if_padded(t: &TestDescAndFn) -> usize { match t.testfn.padding() { PadOnRight => t.desc.name.len(), } } if let Some(t) = tests.iter().max_by_key(|t| len_if_padded(*t)) { let n = &t.desc.name; st.max_name_len = n.len(); } try!(run_tests(opts, tests, |x| callback(&x, &mut st))); st.write_run_finish() } #[test] fn should_sort_failures_before_printing_them() { let test_a = TestDesc { name: Cow::from("a"), ignore: false, }; let test_b = TestDesc { name: Cow::from("b"), ignore: false, }; let mut st = ConsoleTestState { log_out: None, out: Raw(Vec::new()), quiet: false, total: 0, passed: 0, failed: 0, ignored: 0, measured: 0, max_name_len: 10, failures: vec![(test_b, Vec::new()), (test_a, Vec::new())], }; st.write_failures().unwrap(); let s = match st.out { Raw(ref m) => String::from_utf8_lossy(&m[..]), }; let apos = s.find("a").unwrap(); let bpos = s.find("b").unwrap(); assert!(apos < bpos); } #[derive(Clone)] enum TestEvent { TeFiltered(Vec), TeWait(TestDesc, NamePadding), TeResult(TestDesc, TestResult, Vec), } type MonitorMsg = (TestDesc, TestResult, Vec); fn run_tests(opts: &TestOpts, tests: Vec, mut callback: F) -> io::Result<()> where F: FnMut(TestEvent) -> io::Result<()> { let filtered_tests = filter_tests(opts, tests); let filtered_descs = filtered_tests.iter() .map(|t| t.desc.clone()) .collect(); try!(callback(TeFiltered(filtered_descs))); let filtered_benchs_and_metrics = filtered_tests; // All benchmarks run at the end, in serial. // (this includes metric fns) for b in filtered_benchs_and_metrics { try!(callback(TeWait(b.desc.clone(), b.testfn.padding()))); let (test, result, stdout) = run_test(opts, false, b); try!(callback(TeResult(test, result, stdout))); } Ok(()) } fn filter_tests(opts: &TestOpts, tests: Vec) -> Vec { let mut filtered = tests; // Remove tests that don't match the test filter filtered = match opts.filter { None => filtered, Some(ref filter) => { filtered.into_iter() .filter(|test| test.desc.name.contains(&filter[..])) .collect() } }; // Maybe pull out the ignored test and unignore them filtered = if !opts.run_ignored { filtered } else { fn filter(test: TestDescAndFn) -> Option { if test.desc.ignore { let TestDescAndFn {desc, testfn} = test; Some(TestDescAndFn { desc: TestDesc { ignore: false, ..desc }, testfn: testfn, }) } else { None } } filtered.into_iter().filter_map(filter).collect() }; // Sort the tests alphabetically filtered.sort_by(|t1, t2| t1.desc.name.cmp(&t2.desc.name)); filtered } fn run_test(_opts: &TestOpts, force_ignore: bool, test: TestDescAndFn) -> MonitorMsg { let TestDescAndFn {desc, testfn} = test; if force_ignore || desc.ignore { return (desc, TrIgnored, Vec::new()); } match testfn { DynBenchFn(bencher) => { let bs = ::bench::benchmark(|harness| bencher.run(harness)); (desc, TrBench(bs), Vec::new()) } StaticBenchFn(benchfn) => { let bs = ::bench::benchmark(|harness| benchfn(harness)); (desc, TrBench(bs), Vec::new()) } } } // Benchmarking // FIXME: We don't have black_box in stable rust /// NOTE: We don't have a proper black box in stable Rust. This is /// a workaround implementation, that may have a too big performance overhead, /// depending on operation, or it may fail to properly avoid having code /// optimized out. It is good enough that it is used by default. /// /// A function that is opaque to the optimizer, to allow benchmarks to /// pretend to use outputs to assist in avoiding dead-code /// elimination. pub fn black_box(dummy: T) -> T { unsafe { let ret = ptr::read_volatile(&dummy); forget(dummy); ret } } impl Bencher { /// Callback for benchmark functions to run in their body. pub fn iter(&mut self, mut inner: F) where F: FnMut() -> T { let start = Instant::now(); let k = self.iterations; for _ in 0..k { black_box(inner()); } self.dur = start.elapsed(); } pub fn ns_elapsed(&mut self) -> u64 { self.dur.as_secs() * 1_000_000_000 + (self.dur.subsec_nanos() as u64) } pub fn ns_per_iter(&mut self) -> u64 { if self.iterations == 0 { 0 } else { self.ns_elapsed() / cmp::max(self.iterations, 1) } } pub fn bench_n(&mut self, n: u64, f: F) where F: FnOnce(&mut Bencher) { self.iterations = n; f(self); } // This is a more statistics-driven benchmark algorithm pub fn auto_bench(&mut self, mut f: F) -> stats::Summary where F: FnMut(&mut Bencher) { // Initial bench run to get ballpark figure. let mut n = 1; self.bench_n(n, |x| f(x)); // Try to estimate iter count for 1ms falling back to 1m // iterations if first run took < 1ns. if self.ns_per_iter() == 0 { n = 1_000_000; } else { n = 1_000_000 / cmp::max(self.ns_per_iter(), 1); } // if the first run took more than 1ms we don't want to just // be left doing 0 iterations on every loop. The unfortunate // side effect of not being able to do as many runs is // automatically handled by the statistical analysis below // (i.e. larger error bars). if n == 0 { n = 1; } let mut total_run = Duration::new(0, 0); let samples: &mut [f64] = &mut [0.0_f64; 50]; loop { let loop_start = Instant::now(); for p in &mut *samples { self.bench_n(n, |x| f(x)); *p = self.ns_per_iter() as f64; } stats::winsorize(samples, 5.0); let summ = stats::Summary::new(samples); for p in &mut *samples { self.bench_n(5 * n, |x| f(x)); *p = self.ns_per_iter() as f64; } stats::winsorize(samples, 5.0); let summ5 = stats::Summary::new(samples); let loop_run = loop_start.elapsed(); // If we've run for 100ms and seem to have converged to a // stable median. if loop_run > Duration::from_millis(100) && summ.median_abs_dev_pct < 1.0 && summ.median - summ5.median < summ5.median_abs_dev { return summ5; } total_run += loop_run; // Longest we ever run for is 3s. if total_run > Duration::from_secs(3) { return summ5; } // If we overflow here just return the results so far. We check a // multiplier of 10 because we're about to multiply by 2 and the // next iteration of the loop will also multiply by 5 (to calculate // the summ5 result) n = match n.checked_mul(10) { Some(_) => n * 2, None => return summ5, }; } } } pub mod bench { use std::cmp; use std::time::Duration; use super::{Bencher, BenchSamples}; pub fn benchmark(f: F) -> BenchSamples where F: FnMut(&mut Bencher) { let mut bs = Bencher { iterations: 0, dur: Duration::new(0, 0), bytes: 0, }; let ns_iter_summ = bs.auto_bench(f); let ns_iter = cmp::max(ns_iter_summ.median as u64, 1); let mb_s = bs.bytes * 1000 / ns_iter; BenchSamples { ns_iter_summ: ns_iter_summ, mb_s: mb_s as usize, } } pub fn run_once(f: F) where F: FnOnce(&mut Bencher) { let mut bs = Bencher { iterations: 0, dur: Duration::new(0, 0), bytes: 0, }; bs.bench_n(1, f); } } bencher-0.1.5/macros.rs010064400017510001751000000034661322425607000132120ustar0000000000000000 /// Defines a function called `$group_name` that returns the test description /// values for the listed functions `$function`. #[macro_export] macro_rules! benchmark_group { ($group_name:ident, $($function:path),+) => { pub fn $group_name() -> ::std::vec::Vec<$crate::TestDescAndFn> { use $crate::{TestDescAndFn, TestFn, TestDesc}; use std::borrow::Cow; let mut benches = ::std::vec::Vec::new(); $( benches.push(TestDescAndFn { desc: TestDesc { name: Cow::from(stringify!($function)), ignore: false, }, testfn: TestFn::StaticBenchFn($function), }); )+ benches } }; ($group_name:ident, $($function:path,)+) => { benchmark_group!($group_name, $($function),+); }; } /// Define a `fn main()` that will run all benchmarks defined by the groups /// in `$group_name`. /// /// The main function will read the first argument from the console and use /// it to filter the benchmarks to run. #[macro_export] macro_rules! benchmark_main { ($($group_name:path),+) => { fn main() { use $crate::TestOpts; use $crate::run_tests_console; let mut test_opts = TestOpts::default(); // check to see if we should filter: if let Some(arg) = ::std::env::args().skip(1).find(|arg| *arg != "--bench") { test_opts.filter = Some(arg); } let mut benches = Vec::new(); $( benches.extend($group_name()); )+ run_tests_console(&test_opts, benches).unwrap(); } }; ($($group_name:path,)+) => { benchmark_main!($($group_name),+); }; } bencher-0.1.5/stats.rs010064400017510001751000000712511302510434500130550ustar0000000000000000// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 or the MIT license // , at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![allow(missing_docs)] #![allow(deprecated)] // Float use std::cmp::Ordering::{self, Equal, Greater, Less}; use std::mem; fn local_cmp(x: f64, y: f64) -> Ordering { // arbitrarily decide that NaNs are larger than everything. if y.is_nan() { Less } else if x.is_nan() { Greater } else if x < y { Less } else if x == y { Equal } else { Greater } } fn local_sort(v: &mut [f64]) { v.sort_by(|x: &f64, y: &f64| local_cmp(*x, *y)); } /// Trait that provides simple descriptive statistics on a univariate set of numeric samples. pub trait Stats { /// Sum of the samples. /// /// Note: this method sacrifices performance at the altar of accuracy /// Depends on IEEE-754 arithmetic guarantees. See proof of correctness at: /// ["Adaptive Precision Floating-Point Arithmetic and Fast Robust Geometric Predicates"] /// (http://www.cs.cmu.edu/~quake-papers/robust-arithmetic.ps) fn sum(&self) -> f64; /// Minimum value of the samples. fn min(&self) -> f64; /// Maximum value of the samples. fn max(&self) -> f64; /// Arithmetic mean (average) of the samples: sum divided by sample-count. /// /// See: https://en.wikipedia.org/wiki/Arithmetic_mean fn mean(&self) -> f64; /// Median of the samples: value separating the lower half of the samples from the higher half. /// Equal to `self.percentile(50.0)`. /// /// See: https://en.wikipedia.org/wiki/Median fn median(&self) -> f64; /// Variance of the samples: bias-corrected mean of the squares of the differences of each /// sample from the sample mean. Note that this calculates the _sample variance_ rather than the /// population variance, which is assumed to be unknown. It therefore corrects the `(n-1)/n` /// bias that would appear if we calculated a population variance, by dividing by `(n-1)` rather /// than `n`. /// /// See: https://en.wikipedia.org/wiki/Variance fn var(&self) -> f64; /// Standard deviation: the square root of the sample variance. /// /// Note: this is not a robust statistic for non-normal distributions. Prefer the /// `median_abs_dev` for unknown distributions. /// /// See: https://en.wikipedia.org/wiki/Standard_deviation fn std_dev(&self) -> f64; /// Standard deviation as a percent of the mean value. See `std_dev` and `mean`. /// /// Note: this is not a robust statistic for non-normal distributions. Prefer the /// `median_abs_dev_pct` for unknown distributions. fn std_dev_pct(&self) -> f64; /// Scaled median of the absolute deviations of each sample from the sample median. This is a /// robust (distribution-agnostic) estimator of sample variability. Use this in preference to /// `std_dev` if you cannot assume your sample is normally distributed. Note that this is scaled /// by the constant `1.4826` to allow its use as a consistent estimator for the standard /// deviation. /// /// See: http://en.wikipedia.org/wiki/Median_absolute_deviation fn median_abs_dev(&self) -> f64; /// Median absolute deviation as a percent of the median. See `median_abs_dev` and `median`. fn median_abs_dev_pct(&self) -> f64; /// Percentile: the value below which `pct` percent of the values in `self` fall. For example, /// percentile(95.0) will return the value `v` such that 95% of the samples `s` in `self` /// satisfy `s <= v`. /// /// Calculated by linear interpolation between closest ranks. /// /// See: http://en.wikipedia.org/wiki/Percentile fn percentile(&self, pct: f64) -> f64; /// Quartiles of the sample: three values that divide the sample into four equal groups, each /// with 1/4 of the data. The middle value is the median. See `median` and `percentile`. This /// function may calculate the 3 quartiles more efficiently than 3 calls to `percentile`, but /// is otherwise equivalent. /// /// See also: https://en.wikipedia.org/wiki/Quartile fn quartiles(&self) -> (f64, f64, f64); /// Inter-quartile range: the difference between the 25th percentile (1st quartile) and the 75th /// percentile (3rd quartile). See `quartiles`. /// /// See also: https://en.wikipedia.org/wiki/Interquartile_range fn iqr(&self) -> f64; } /// Extracted collection of all the summary statistics of a sample set. #[derive(Clone, PartialEq)] #[allow(missing_docs)] pub struct Summary { pub sum: f64, pub min: f64, pub max: f64, pub mean: f64, pub median: f64, pub var: f64, pub std_dev: f64, pub std_dev_pct: f64, pub median_abs_dev: f64, pub median_abs_dev_pct: f64, pub quartiles: (f64, f64, f64), pub iqr: f64, } impl Summary { /// Construct a new summary of a sample set. pub fn new(samples: &[f64]) -> Summary { Summary { sum: samples.sum(), min: samples.min(), max: samples.max(), mean: samples.mean(), median: samples.median(), var: samples.var(), std_dev: samples.std_dev(), std_dev_pct: samples.std_dev_pct(), median_abs_dev: samples.median_abs_dev(), median_abs_dev_pct: samples.median_abs_dev_pct(), quartiles: samples.quartiles(), iqr: samples.iqr(), } } } impl Stats for [f64] { // FIXME #11059 handle NaN, inf and overflow fn sum(&self) -> f64 { let mut partials = vec![]; for &x in self { let mut x = x; let mut j = 0; // This inner loop applies `hi`/`lo` summation to each // partial so that the list of partial sums remains exact. for i in 0..partials.len() { let mut y: f64 = partials[i]; if x.abs() < y.abs() { mem::swap(&mut x, &mut y); } // Rounded `x+y` is stored in `hi` with round-off stored in // `lo`. Together `hi+lo` are exactly equal to `x+y`. let hi = x + y; let lo = y - (hi - x); if lo != 0.0 { partials[j] = lo; j += 1; } x = hi; } if j >= partials.len() { partials.push(x); } else { partials[j] = x; partials.truncate(j + 1); } } let zero: f64 = 0.0; partials.iter().fold(zero, |p, q| p + *q) } fn min(&self) -> f64 { assert!(!self.is_empty()); self.iter().fold(self[0], |p, q| p.min(*q)) } fn max(&self) -> f64 { assert!(!self.is_empty()); self.iter().fold(self[0], |p, q| p.max(*q)) } fn mean(&self) -> f64 { assert!(!self.is_empty()); self.sum() / (self.len() as f64) } fn median(&self) -> f64 { self.percentile(50 as f64) } fn var(&self) -> f64 { if self.len() < 2 { 0.0 } else { let mean = self.mean(); let mut v: f64 = 0.0; for s in self { let x = *s - mean; v += x * x; } // NB: this is _supposed to be_ len-1, not len. If you // change it back to len, you will be calculating a // population variance, not a sample variance. let denom = (self.len() - 1) as f64; v / denom } } fn std_dev(&self) -> f64 { self.var().sqrt() } fn std_dev_pct(&self) -> f64 { let hundred = 100 as f64; (self.std_dev() / self.mean()) * hundred } fn median_abs_dev(&self) -> f64 { let med = self.median(); let abs_devs: Vec = self.iter().map(|&v| (med - v).abs()).collect(); // This constant is derived by smarter statistics brains than me, but it is // consistent with how R and other packages treat the MAD. let number = 1.4826; abs_devs.median() * number } fn median_abs_dev_pct(&self) -> f64 { let hundred = 100 as f64; (self.median_abs_dev() / self.median()) * hundred } fn percentile(&self, pct: f64) -> f64 { let mut tmp = self.to_vec(); local_sort(&mut tmp); percentile_of_sorted(&tmp, pct) } fn quartiles(&self) -> (f64, f64, f64) { let mut tmp = self.to_vec(); local_sort(&mut tmp); let first = 25f64; let a = percentile_of_sorted(&tmp, first); let secound = 50f64; let b = percentile_of_sorted(&tmp, secound); let third = 75f64; let c = percentile_of_sorted(&tmp, third); (a, b, c) } fn iqr(&self) -> f64 { let (a, _, c) = self.quartiles(); c - a } } // Helper function: extract a value representing the `pct` percentile of a sorted sample-set, using // linear interpolation. If samples are not sorted, return nonsensical value. fn percentile_of_sorted(sorted_samples: &[f64], pct: f64) -> f64 { assert!(!sorted_samples.is_empty()); if sorted_samples.len() == 1 { return sorted_samples[0]; } let zero: f64 = 0.0; assert!(zero <= pct); let hundred = 100f64; assert!(pct <= hundred); if pct == hundred { return sorted_samples[sorted_samples.len() - 1]; } let length = (sorted_samples.len() - 1) as f64; let rank = (pct / hundred) * length; let lrank = rank.floor(); let d = rank - lrank; let n = lrank as usize; let lo = sorted_samples[n]; let hi = sorted_samples[n + 1]; lo + (hi - lo) * d } /// Winsorize a set of samples, replacing values above the `100-pct` percentile /// and below the `pct` percentile with those percentiles themselves. This is a /// way of minimizing the effect of outliers, at the cost of biasing the sample. /// It differs from trimming in that it does not change the number of samples, /// just changes the values of those that are outliers. /// /// See: http://en.wikipedia.org/wiki/Winsorising pub fn winsorize(samples: &mut [f64], pct: f64) { let mut tmp = samples.to_vec(); local_sort(&mut tmp); let lo = percentile_of_sorted(&tmp, pct); let hundred = 100 as f64; let hi = percentile_of_sorted(&tmp, hundred - pct); for samp in samples { if *samp > hi { *samp = hi } else if *samp < lo { *samp = lo } } } // Test vectors generated from R, using the script src/etc/stat-test-vectors.r. #[cfg(test)] mod tests { use stats::Stats; use stats::Summary; use std::f64; use std::io::prelude::*; use std::io; macro_rules! assert_approx_eq { ($a:expr, $b:expr) => ({ let (a, b) = (&$a, &$b); assert!((*a - *b).abs() < 1.0e-6, "{} is not approximately equal to {}", *a, *b); }) } fn check(samples: &[f64], summ: &Summary) { let summ2 = Summary::new(samples); let mut w = io::sink(); let w = &mut w; (write!(w, "\n")).unwrap(); assert_eq!(summ.sum, summ2.sum); assert_eq!(summ.min, summ2.min); assert_eq!(summ.max, summ2.max); assert_eq!(summ.mean, summ2.mean); assert_eq!(summ.median, summ2.median); // We needed a few more digits to get exact equality on these // but they're within float epsilon, which is 1.0e-6. assert_approx_eq!(summ.var, summ2.var); assert_approx_eq!(summ.std_dev, summ2.std_dev); assert_approx_eq!(summ.std_dev_pct, summ2.std_dev_pct); assert_approx_eq!(summ.median_abs_dev, summ2.median_abs_dev); assert_approx_eq!(summ.median_abs_dev_pct, summ2.median_abs_dev_pct); assert_eq!(summ.quartiles, summ2.quartiles); assert_eq!(summ.iqr, summ2.iqr); } #[test] fn test_min_max_nan() { let xs = &[1.0, 2.0, f64::NAN, 3.0, 4.0]; let summary = Summary::new(xs); assert_eq!(summary.min, 1.0); assert_eq!(summary.max, 4.0); } #[test] fn test_norm2() { let val = &[958.0000000000, 924.0000000000]; let summ = &Summary { sum: 1882.0000000000, min: 924.0000000000, max: 958.0000000000, mean: 941.0000000000, median: 941.0000000000, var: 578.0000000000, std_dev: 24.0416305603, std_dev_pct: 2.5549022912, median_abs_dev: 25.2042000000, median_abs_dev_pct: 2.6784484591, quartiles: (932.5000000000, 941.0000000000, 949.5000000000), iqr: 17.0000000000, }; check(val, summ); } #[test] fn test_norm10narrow() { let val = &[966.0000000000, 985.0000000000, 1110.0000000000, 848.0000000000, 821.0000000000, 975.0000000000, 962.0000000000, 1157.0000000000, 1217.0000000000, 955.0000000000]; let summ = &Summary { sum: 9996.0000000000, min: 821.0000000000, max: 1217.0000000000, mean: 999.6000000000, median: 970.5000000000, var: 16050.7111111111, std_dev: 126.6914010938, std_dev_pct: 12.6742097933, median_abs_dev: 102.2994000000, median_abs_dev_pct: 10.5408964451, quartiles: (956.7500000000, 970.5000000000, 1078.7500000000), iqr: 122.0000000000, }; check(val, summ); } #[test] fn test_norm10medium() { let val = &[954.0000000000, 1064.0000000000, 855.0000000000, 1000.0000000000, 743.0000000000, 1084.0000000000, 704.0000000000, 1023.0000000000, 357.0000000000, 869.0000000000]; let summ = &Summary { sum: 8653.0000000000, min: 357.0000000000, max: 1084.0000000000, mean: 865.3000000000, median: 911.5000000000, var: 48628.4555555556, std_dev: 220.5186059170, std_dev_pct: 25.4846418487, median_abs_dev: 195.7032000000, median_abs_dev_pct: 21.4704552935, quartiles: (771.0000000000, 911.5000000000, 1017.2500000000), iqr: 246.2500000000, }; check(val, summ); } #[test] fn test_norm10wide() { let val = &[505.0000000000, 497.0000000000, 1591.0000000000, 887.0000000000, 1026.0000000000, 136.0000000000, 1580.0000000000, 940.0000000000, 754.0000000000, 1433.0000000000]; let summ = &Summary { sum: 9349.0000000000, min: 136.0000000000, max: 1591.0000000000, mean: 934.9000000000, median: 913.5000000000, var: 239208.9888888889, std_dev: 489.0899599142, std_dev_pct: 52.3146817750, median_abs_dev: 611.5725000000, median_abs_dev_pct: 66.9482758621, quartiles: (567.2500000000, 913.5000000000, 1331.2500000000), iqr: 764.0000000000, }; check(val, summ); } #[test] fn test_norm25verynarrow() { let val = &[991.0000000000, 1018.0000000000, 998.0000000000, 1013.0000000000, 974.0000000000, 1007.0000000000, 1014.0000000000, 999.0000000000, 1011.0000000000, 978.0000000000, 985.0000000000, 999.0000000000, 983.0000000000, 982.0000000000, 1015.0000000000, 1002.0000000000, 977.0000000000, 948.0000000000, 1040.0000000000, 974.0000000000, 996.0000000000, 989.0000000000, 1015.0000000000, 994.0000000000, 1024.0000000000]; let summ = &Summary { sum: 24926.0000000000, min: 948.0000000000, max: 1040.0000000000, mean: 997.0400000000, median: 998.0000000000, var: 393.2066666667, std_dev: 19.8294393937, std_dev_pct: 1.9888308788, median_abs_dev: 22.2390000000, median_abs_dev_pct: 2.2283567134, quartiles: (983.0000000000, 998.0000000000, 1013.0000000000), iqr: 30.0000000000, }; check(val, summ); } #[test] fn test_exp10a() { let val = &[23.0000000000, 11.0000000000, 2.0000000000, 57.0000000000, 4.0000000000, 12.0000000000, 5.0000000000, 29.0000000000, 3.0000000000, 21.0000000000]; let summ = &Summary { sum: 167.0000000000, min: 2.0000000000, max: 57.0000000000, mean: 16.7000000000, median: 11.5000000000, var: 287.7888888889, std_dev: 16.9643416875, std_dev_pct: 101.5828843560, median_abs_dev: 13.3434000000, median_abs_dev_pct: 116.0295652174, quartiles: (4.2500000000, 11.5000000000, 22.5000000000), iqr: 18.2500000000, }; check(val, summ); } #[test] fn test_exp10b() { let val = &[24.0000000000, 17.0000000000, 6.0000000000, 38.0000000000, 25.0000000000, 7.0000000000, 51.0000000000, 2.0000000000, 61.0000000000, 32.0000000000]; let summ = &Summary { sum: 263.0000000000, min: 2.0000000000, max: 61.0000000000, mean: 26.3000000000, median: 24.5000000000, var: 383.5666666667, std_dev: 19.5848580967, std_dev_pct: 74.4671410520, median_abs_dev: 22.9803000000, median_abs_dev_pct: 93.7971428571, quartiles: (9.5000000000, 24.5000000000, 36.5000000000), iqr: 27.0000000000, }; check(val, summ); } #[test] fn test_exp10c() { let val = &[71.0000000000, 2.0000000000, 32.0000000000, 1.0000000000, 6.0000000000, 28.0000000000, 13.0000000000, 37.0000000000, 16.0000000000, 36.0000000000]; let summ = &Summary { sum: 242.0000000000, min: 1.0000000000, max: 71.0000000000, mean: 24.2000000000, median: 22.0000000000, var: 458.1777777778, std_dev: 21.4050876611, std_dev_pct: 88.4507754589, median_abs_dev: 21.4977000000, median_abs_dev_pct: 97.7168181818, quartiles: (7.7500000000, 22.0000000000, 35.0000000000), iqr: 27.2500000000, }; check(val, summ); } #[test] fn test_exp25() { let val = &[3.0000000000, 24.0000000000, 1.0000000000, 19.0000000000, 7.0000000000, 5.0000000000, 30.0000000000, 39.0000000000, 31.0000000000, 13.0000000000, 25.0000000000, 48.0000000000, 1.0000000000, 6.0000000000, 42.0000000000, 63.0000000000, 2.0000000000, 12.0000000000, 108.0000000000, 26.0000000000, 1.0000000000, 7.0000000000, 44.0000000000, 25.0000000000, 11.0000000000]; let summ = &Summary { sum: 593.0000000000, min: 1.0000000000, max: 108.0000000000, mean: 23.7200000000, median: 19.0000000000, var: 601.0433333333, std_dev: 24.5161851301, std_dev_pct: 103.3565983562, median_abs_dev: 19.2738000000, median_abs_dev_pct: 101.4410526316, quartiles: (6.0000000000, 19.0000000000, 31.0000000000), iqr: 25.0000000000, }; check(val, summ); } #[test] fn test_binom25() { let val = &[18.0000000000, 17.0000000000, 27.0000000000, 15.0000000000, 21.0000000000, 25.0000000000, 17.0000000000, 24.0000000000, 25.0000000000, 24.0000000000, 26.0000000000, 26.0000000000, 23.0000000000, 15.0000000000, 23.0000000000, 17.0000000000, 18.0000000000, 18.0000000000, 21.0000000000, 16.0000000000, 15.0000000000, 31.0000000000, 20.0000000000, 17.0000000000, 15.0000000000]; let summ = &Summary { sum: 514.0000000000, min: 15.0000000000, max: 31.0000000000, mean: 20.5600000000, median: 20.0000000000, var: 20.8400000000, std_dev: 4.5650848842, std_dev_pct: 22.2037202539, median_abs_dev: 5.9304000000, median_abs_dev_pct: 29.6520000000, quartiles: (17.0000000000, 20.0000000000, 24.0000000000), iqr: 7.0000000000, }; check(val, summ); } #[test] fn test_pois25lambda30() { let val = &[27.0000000000, 33.0000000000, 34.0000000000, 34.0000000000, 24.0000000000, 39.0000000000, 28.0000000000, 27.0000000000, 31.0000000000, 28.0000000000, 38.0000000000, 21.0000000000, 33.0000000000, 36.0000000000, 29.0000000000, 37.0000000000, 32.0000000000, 34.0000000000, 31.0000000000, 39.0000000000, 25.0000000000, 31.0000000000, 32.0000000000, 40.0000000000, 24.0000000000]; let summ = &Summary { sum: 787.0000000000, min: 21.0000000000, max: 40.0000000000, mean: 31.4800000000, median: 32.0000000000, var: 26.5933333333, std_dev: 5.1568724372, std_dev_pct: 16.3814245145, median_abs_dev: 5.9304000000, median_abs_dev_pct: 18.5325000000, quartiles: (28.0000000000, 32.0000000000, 34.0000000000), iqr: 6.0000000000, }; check(val, summ); } #[test] fn test_pois25lambda40() { let val = &[42.0000000000, 50.0000000000, 42.0000000000, 46.0000000000, 34.0000000000, 45.0000000000, 34.0000000000, 49.0000000000, 39.0000000000, 28.0000000000, 40.0000000000, 35.0000000000, 37.0000000000, 39.0000000000, 46.0000000000, 44.0000000000, 32.0000000000, 45.0000000000, 42.0000000000, 37.0000000000, 48.0000000000, 42.0000000000, 33.0000000000, 42.0000000000, 48.0000000000]; let summ = &Summary { sum: 1019.0000000000, min: 28.0000000000, max: 50.0000000000, mean: 40.7600000000, median: 42.0000000000, var: 34.4400000000, std_dev: 5.8685603004, std_dev_pct: 14.3978417577, median_abs_dev: 5.9304000000, median_abs_dev_pct: 14.1200000000, quartiles: (37.0000000000, 42.0000000000, 45.0000000000), iqr: 8.0000000000, }; check(val, summ); } #[test] fn test_pois25lambda50() { let val = &[45.0000000000, 43.0000000000, 44.0000000000, 61.0000000000, 51.0000000000, 53.0000000000, 59.0000000000, 52.0000000000, 49.0000000000, 51.0000000000, 51.0000000000, 50.0000000000, 49.0000000000, 56.0000000000, 42.0000000000, 52.0000000000, 51.0000000000, 43.0000000000, 48.0000000000, 48.0000000000, 50.0000000000, 42.0000000000, 43.0000000000, 42.0000000000, 60.0000000000]; let summ = &Summary { sum: 1235.0000000000, min: 42.0000000000, max: 61.0000000000, mean: 49.4000000000, median: 50.0000000000, var: 31.6666666667, std_dev: 5.6273143387, std_dev_pct: 11.3913245723, median_abs_dev: 4.4478000000, median_abs_dev_pct: 8.8956000000, quartiles: (44.0000000000, 50.0000000000, 52.0000000000), iqr: 8.0000000000, }; check(val, summ); } #[test] fn test_unif25() { let val = &[99.0000000000, 55.0000000000, 92.0000000000, 79.0000000000, 14.0000000000, 2.0000000000, 33.0000000000, 49.0000000000, 3.0000000000, 32.0000000000, 84.0000000000, 59.0000000000, 22.0000000000, 86.0000000000, 76.0000000000, 31.0000000000, 29.0000000000, 11.0000000000, 41.0000000000, 53.0000000000, 45.0000000000, 44.0000000000, 98.0000000000, 98.0000000000, 7.0000000000]; let summ = &Summary { sum: 1242.0000000000, min: 2.0000000000, max: 99.0000000000, mean: 49.6800000000, median: 45.0000000000, var: 1015.6433333333, std_dev: 31.8691595957, std_dev_pct: 64.1488719719, median_abs_dev: 45.9606000000, median_abs_dev_pct: 102.1346666667, quartiles: (29.0000000000, 45.0000000000, 79.0000000000), iqr: 50.0000000000, }; check(val, summ); } #[test] fn test_sum_f64s() { assert_eq!([0.5f64, 3.2321f64, 1.5678f64].sum(), 5.2999); } #[test] fn test_sum_f64_between_ints_that_sum_to_0() { assert_eq!([1e30f64, 1.2f64, -1e30f64].sum(), 1.2); } }