tiny-bench-0.3.0/.cargo_vcs_info.json0000644000000001500000000000100130700ustar { "git": { "sha1": "b13eece21ebf18266112f229b0be5913dd1273ad" }, "path_in_vcs": "tiny-bench" }tiny-bench-0.3.0/Cargo.lock0000644000000002320000000000100110440ustar # This file is automatically @generated by Cargo. # It is not intended for manual editing. version = 3 [[package]] name = "tiny-bench" version = "0.3.0" tiny-bench-0.3.0/Cargo.toml0000644000000023160000000000100110740ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2021" name = "tiny-bench" version = "0.3.0" authors = ["Embark "] description = "A tiny benchmarking library" homepage = "https://github.com/EmbarkStudios/tiny-bench" readme = "README.md" keywords = ["benchmarking"] license = "MIT OR Apache-2.0" repository = "https://github.com/EmbarkStudios/tiny-bench" resolver = "1" [[example]] name = "time_iterator" required-features = ["timer"] [[example]] name = "time_loop" required-features = ["timer"] [[example]] name = "bad_sort" required-features = ["bench"] [[example]] name = "bench_compare" required-features = ["bench"] [[bench]] name = "benchmark" harness = false [dependencies] [features] bench = [] default = [ "timer", "bench", ] timer = [] tiny-bench-0.3.0/Cargo.toml.orig000064400000000000000000000015031046102023000145520ustar 00000000000000[package] name = "tiny-bench" version = "0.3.0" edition = "2021" authors = ["Embark "] license = "MIT OR Apache-2.0" description = "A tiny benchmarking library" readme = "../README.md" homepage = "https://github.com/EmbarkStudios/tiny-bench" repository = "https://github.com/EmbarkStudios/tiny-bench" keywords = ["benchmarking"] # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [features] default = ["timer", "bench"] timer = [] bench = [] [dependencies] [[bench]] name = "benchmark" harness = false [[example]] name = "time_iterator" required-features = ["timer"] [[example]] name = "time_loop" required-features = ["timer"] [[example]] name = "bad_sort" required-features = ["bench"] [[example]] name = "bench_compare" required-features = ["bench"] tiny-bench-0.3.0/README.md000064400000000000000000000170451046102023000131520ustar 00000000000000
# `🛠 tiny-bench` **A tiny benchmarking library** [![Embark](https://img.shields.io/badge/embark-open%20source-blueviolet.svg)](https://embark.dev) [![Embark](https://img.shields.io/badge/discord-ark-%237289da.svg?logo=discord)](https://discord.gg/dAuKfZS) [![Crates.io](https://img.shields.io/crates/v/tiny-bench.svg)](https://crates.io/crates/tiny-bench) [![Docs](https://docs.rs/tiny-bench/badge.svg)](https://docs.rs/tiny-bench) [![dependency status](https://deps.rs/repo/github/EmbarkStudios/tiny-bench/status.svg)](https://deps.rs/repo/github/EmbarkStudios/tiny-bench) [![Build status](https://github.com/EmbarkStudios/tiny-bench/workflows/CI/badge.svg)](https://github.com/EmbarkStudios/tiny-bench/actions)
## The library A benchmarking and timing library inspired by [Criterion](https://github.com/bheisler/criterion.rs). Inspired in this case means copying the things that criterion does well (and I do mean ctrl-c), like statistical analysis of results, trimming that down, and leaving much of the customizability out. [Criterion](https://github.com/bheisler/criterion.rs) is MIT licensed, please see the license at that repo or [here](tiny-bench/src/benching/criterion/CRITERION-LICENSE-MIT). ## Primary goals * Reliable results * Fast build * No dependencies * Simple code that anyone can read, understand, and modify ## Purpose Sometimes you just need some back-of-the-envelope calculations of how long something takes. This library aims to fulfill that need and not much else. The aim of the benchmarking is to be accurate enough to deliver reliable benchmarks with a minimal footprint, so that you can easily get a sense of whether you're going down a bad path. The aim of the timing is to provide something that will let you figure out the same with the caveat of not being as reliable. It times some code so that you can get a sense of how much time pieces of your code takes to run. ## Caveats This library does not aim to provide production grade analysis tooling. It just prints data to stdout to guide you. If you need advanced analysis [Criterion](https://github.com/bheisler/criterion.rs) has tooling better suited to that. If you need to find where your application spends its time [flamegraph](https://github.com/flamegraph-rs/flamegraph) may be better suited for that. If you need to track single pieces of your application when it's running [Tracing](https://github.com/tokio-rs/tracing) may be better suited for that. Lastly, if you want an even smaller benchmarking library, check out [benchmark-simple](https://github.com/jedisct1/rust-benchmark-simple). ## Unimplemented There are a few statistical measures that would be nice to have but are limited by the methods used by this library. Since it potentially runs billions of iterations, calculating statistics based on seeing all iterations such as median, standard deviation, and percentiles are not feasible without caching data to disk. Therefore, measures like variance, or median are prefixed by "sample" as they are not related to individual iteration times, but a comparison between samples. There is no arg-parsing or bench-matching in this library, so you can't run cargo bench . Instead, the user needs to put different benches into functions, and add/remove those functions from bench main. The reason for this is that those libraries are heavy-weight and would likely require some macros to select which benches to run which decreases readability and understandability. ## Examples ### Getting a hint of what parts of your application take time "I have this iterator, and I'd like to get some sense of how long it takes to complete" ```Rust use std::time::Duration; use tiny_bench::Timeable; pub fn main() { let v = (0..100) .map(|a| { my_expensive_call(); a }) .timed() .max(); assert_eq!(99, v.unwrap()) // prints: // anonymous [100.0 iterations in 512.25ms]: // elapsed [min mean max]: [5.06ms 5.12ms 5.20ms] } fn my_expensive_call() { std::thread::sleep(Duration::from_millis(5)); } ``` "I have this loop that has side effects, and I'd like to time its execution" ```Rust use tiny_bench::run_timed_from_iterator; fn main() { let generator = 0..100; let mut spooky_calculation = 0; let results = run_timed_from_iterator(generator, |i| { spooky_calculation += i; }); results.pretty_print(); assert_eq!(4950, spooky_calculation); } ``` ### More involved comparisons "My algorithm is pretty stupid, but I'm only sorting vectors with a max-length of 5, so maybe it doesn't matter in the grand scheme of things" ```Rust use tiny_bench::BenchmarkConfig; fn main() { let v = vec![10, 5, 3, 8, 7, 5]; tiny_bench::run_bench(&BenchmarkConfig::default(), || { let sorted = bad_sort(v.clone()); assert_eq!(vec![3, 5, 5, 7, 8, 10], sorted); }) // Prints: // anonymous [2.5M iterations in 4.99s with 100.0 samples]: // elapsed [min mean max]: [2.14µs 2.01µs 2.14µs] } fn bad_sort(mut v: Vec) -> Vec { let mut sorted = Vec::with_capacity(v.len()); while !v.is_empty() { let mut min_val = u32::MAX; let mut min_index = 0; for i in 0..v.len() { if v[i] < min_val { min_index = i; min_val = v[i]; } } sorted.push(min_val); v.remove(min_index); } sorted } ``` "I'd like to compare different implementations with each other" ```Rust use tiny_bench::black_box; fn main() { // Results are compared by label let label = "compare_functions"; tiny_bench::bench_labeled(label, my_slow_function); tiny_bench::bench_labeled(label, my_faster_function); // prints: //compare_functions [30.3 thousand iterations in 5.24s with 100.0 samples]: //elapsed [min mean max]: [246.33µs 175.51µs 246.33µs] //compare_functions [60.6 thousand iterations in 5.24s with 100.0 samples]: //elapsed [min mean max]: [87.67µs 86.42µs 87.67µs] //change [min mean max]: [-49.6111% -50.7620% -64.4102%] (p = 0.00) } fn my_slow_function() { let mut num_iters = 0; for _ in 0..10_000 { num_iters += black_box(1); } assert_eq!(10_000, black_box(num_iters)) } fn my_faster_function() { let mut num_iters = 0; for _ in 0..5_000 { num_iters += black_box(1); } assert_eq!(5_000, black_box(num_iters)) } ``` ## Contribution [![Contributor Covenant](https://img.shields.io/badge/contributor%20covenant-v1.4-ff69b4.svg)](CODE_OF_CONDUCT.md) We welcome community contributions to this project. Please read our [Contributor Guide](CONTRIBUTING.md) for more information on how to get started. Please also read our [Contributor Terms](CONTRIBUTING.md#contributor-terms) before you make any contributions. Any contribution intentionally submitted for inclusion in an Embark Studios project, shall comply with the Rust standard licensing model (MIT OR Apache 2.0) and therefore be dual licensed as described below, without any additional terms or conditions: ### License This contribution is dual licensed under EITHER OF * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or ) * MIT license ([LICENSE-MIT](LICENSE-MIT) or ) at your option. For clarity, "your" refers to Embark or any other licensee/user of the contribution. tiny-bench-0.3.0/benches/benchmark.rs000064400000000000000000000025551046102023000156020ustar 00000000000000use std::time::Duration; use tiny_bench::{black_box, BenchmarkConfig}; fn main() { bench_test_one(); bench_test_two(); bench_test_three(); bench_test_four(); } fn bench_test_one() { tiny_bench::bench_labeled("test one", || { let mut v: Vec = Vec::with_capacity(10_000); for i in 0..black_box(10_000) { v.push(black_box(i)); } let mut sum = 0; for i in black_box(v) { sum += black_box(i); } assert!(sum >= black_box(1)); }); } fn bench_test_two() { tiny_bench::bench_with_setup_labeled( "test two", || { std::thread::sleep(Duration::from_micros(1)); let mut v: Vec = Vec::with_capacity(10_000); for i in 0..10_000 { v.push(black_box(i)); } v }, |v| { let mut sum = 0; for i in black_box(v) { sum += black_box(i); } assert!(sum >= black_box(1)); }, ); } fn bench_test_three() { tiny_bench::bench_labeled("test three, empty", || {}); } fn bench_test_four() { tiny_bench::bench_with_configuration_labeled( "test four, max_it", &BenchmarkConfig { max_iterations: Some(5000), ..BenchmarkConfig::default() }, || {}, ); } tiny-bench-0.3.0/examples/bad_sort.rs000064400000000000000000000013131046102023000156430ustar 00000000000000use tiny_bench::BenchmarkConfig; fn main() { let v = vec![10, 5, 3, 8, 7, 5]; tiny_bench::bench_with_configuration(&BenchmarkConfig::default(), || { let sorted = bad_sort(v.clone()); assert_eq!(vec![3, 5, 5, 7, 8, 10], sorted); }); } #[allow(clippy::needless_range_loop)] fn bad_sort(mut v: Vec) -> Vec { let mut sorted = Vec::with_capacity(v.len()); while !v.is_empty() { let mut min_val = u32::MAX; let mut min_index = 0; for i in 0..v.len() { if v[i] < min_val { min_index = i; min_val = v[i]; } } sorted.push(min_val); v.remove(min_index); } sorted } tiny-bench-0.3.0/examples/bench_compare.rs000064400000000000000000000010151046102023000166320ustar 00000000000000use tiny_bench::black_box; fn main() { let label = "compare_functions"; tiny_bench::bench_labeled(label, my_slow_function); tiny_bench::bench_labeled(label, my_faster_function); } fn my_slow_function() { let mut num_iters = 0; for _ in 0..10_000 { num_iters += black_box(1); } assert_eq!(10_000, black_box(num_iters)); } fn my_faster_function() { let mut num_iters = 0; for _ in 0..5_000 { num_iters += black_box(1); } assert_eq!(5_000, black_box(num_iters)); } tiny-bench-0.3.0/examples/time_iterator.rs000064400000000000000000000004671046102023000167260ustar 00000000000000use std::time::Duration; use tiny_bench::Timeable; pub fn main() { let v = (0..100) .map(|a| { my_expensive_call(); a }) .timed() .max(); assert_eq!(99, v.unwrap()); } fn my_expensive_call() { std::thread::sleep(Duration::from_millis(5)); } tiny-bench-0.3.0/examples/time_loop.rs000064400000000000000000000004421046102023000160370ustar 00000000000000use tiny_bench::run_timed_from_iterator; fn main() { let generator = 0..100; let mut spooky_calculation = 0; let results = run_timed_from_iterator(generator, |i| { spooky_calculation += i; }); results.pretty_print(); assert_eq!(4950, spooky_calculation); } tiny-bench-0.3.0/src/benching/mod.rs000064400000000000000000000231311046102023000153550ustar 00000000000000use crate::output::analysis::criterion::calculate_iterations; use crate::output::{ fallback_to_anonymous_on_invalid_label, fmt_num, fmt_time, wrap_bold_green, wrap_high_intensity_white, Output, }; use crate::{black_box, BenchmarkConfig}; use std::time::{Duration, Instant}; /// Will run the closure and print statistics from the benchmarking to stdout. /// Will persist results under the anonymous label which is shared, making comparisons impossible /// if running more than one (different) benchmark on the same project, ie. benching two different /// functions /// ```no_run /// use tiny_bench::bench; /// bench(|| { /// // Some code that should be benched /// }) /// ``` pub fn bench T>(closure: F) { bench_with_configuration(&BenchmarkConfig::default(), closure); } /// Will run the closure with a label, running with a label enables comparisons for subsequent runs. /// ```no_run /// use tiny_bench::bench_labeled; /// bench_labeled("my_benchmark", || { /// // Some code that should be benched /// }) /// ``` pub fn bench_labeled T>(label: &'static str, closure: F) { bench_with_configuration_labeled(label, &BenchmarkConfig::default(), closure); } /// Will run the benchmark with the supplied configuration /// ```no_run /// use std::time::Duration; /// use tiny_bench::{bench_with_configuration, BenchmarkConfig}; /// bench_with_configuration(&BenchmarkConfig { /// measurement_time: Duration::from_secs(10), /// ..BenchmarkConfig::default() /// }, || { /// // Some code that should be benched /// }) /// ``` pub fn bench_with_configuration T>(cfg: &BenchmarkConfig, closure: F) { bench_with_configuration_labeled("anonymous", cfg, closure); } /// Will run the benchmark with the supplied configuration and a label /// ```no_run /// use tiny_bench::{bench_with_configuration_labeled, BenchmarkConfig}; /// bench_with_configuration_labeled("my_benchmark", &BenchmarkConfig::default(), || { /// // Some code that should be benched /// }) /// ``` pub fn bench_with_configuration_labeled T>( label: &'static str, cfg: &BenchmarkConfig, mut closure: F, ) { let label = fallback_to_anonymous_on_invalid_label(label); println!( "{} warming up for {}", wrap_bold_green(label), wrap_high_intensity_white(&fmt_time(cfg.warm_up_time.as_nanos() as f64)) ); let wu = run_warm_up(&mut closure, cfg.warm_up_time); let mean_execution_time = wu.elapsed.as_nanos() as f64 / wu.iterations as f64; let sample_size = cfg.num_samples as u64; let (iters, total_iters) = calculate_iters_and_total_iters(cfg, mean_execution_time, sample_size); println!( "{} mean warm up execution time {} running {} iterations", wrap_bold_green(label), wrap_high_intensity_white(&fmt_time(mean_execution_time)), wrap_high_intensity_white(&fmt_num(total_iters as f64)) ); let sampling_data = run(iters, closure); if cfg.dump_results_to_disk { crate::output::ComparedStdout.dump_sampling_data(label, &sampling_data, cfg, total_iters); } else { crate::output::SimpleStdout.dump_sampling_data(label, &sampling_data, cfg, total_iters); } } fn calculate_iters_and_total_iters( cfg: &BenchmarkConfig, mean_execution_time: f64, sample_size: u64, ) -> (Vec, u128) { if let Some(max_it) = cfg.max_iterations { (vec![max_it], u128::from(max_it)) } else { let iters = calculate_iterations(mean_execution_time, sample_size, cfg.measurement_time); let mut total_iters = 0u128; for count in iters.iter().copied() { total_iters = total_iters.saturating_add(u128::from(count)); } (iters, total_iters) } } fn run T>(sample_sizes: Vec, mut closure: F) -> SamplingData { let times = sample_sizes .iter() .copied() .map(|it_count| { let start = Instant::now(); for _ in 0..it_count { black_box((closure)()); } start.elapsed().as_nanos() }) .collect(); SamplingData { samples: sample_sizes, times, } } /// Fitting if some setup for the benchmark is required, and that setup should not be timed. /// The setup will be run prior to each benchmarking run. /// ```no_run /// use tiny_bench::{bench_with_configuration_labeled, BenchmarkConfig}; /// bench_with_configuration_labeled("my_benchmark", &BenchmarkConfig::default(), || { /// // Some code that should be benched /// }) /// ``` pub fn bench_with_setup T, S: FnMut() -> R>(setup: S, closure: F) { bench_with_setup_configuration_labeled( "anonymous", &BenchmarkConfig::default(), setup, closure, ); } /// Run bench with setup and a label /// ```no_run /// use std::time::Duration; /// use tiny_bench::{bench_with_setup_labeled, BenchmarkConfig}; /// bench_with_setup_labeled("my_benchmark", || std::thread::sleep(Duration::from_micros(5)), |_| { /// // Some code that should be benched /// }) /// ``` pub fn bench_with_setup_labeled T, S: FnMut() -> R>( label: &'static str, setup: S, closure: F, ) { bench_with_setup_configuration_labeled(label, &BenchmarkConfig::default(), setup, closure); } /// Run bench with setup and configuration /// ```no_run /// use std::time::Duration; /// use tiny_bench::{bench_with_setup_configuration, BenchmarkConfig}; /// bench_with_setup_configuration(&BenchmarkConfig::default(), || std::thread::sleep(Duration::from_micros(5)), |_| { /// // Some code that should be benched /// }) /// ``` pub fn bench_with_setup_configuration T, S: FnMut() -> R>( cfg: &BenchmarkConfig, setup: S, closure: F, ) { bench_with_setup_configuration_labeled("anonymous", cfg, setup, closure); } /// Run bench with setup, configuration, and a label /// ```no_run /// use std::time::Duration; /// use tiny_bench::{bench_with_setup_configuration_labeled, BenchmarkConfig}; /// bench_with_setup_configuration_labeled("my_benchmark", &BenchmarkConfig::default(), || std::thread::sleep(Duration::from_micros(5)), |_| { /// // Some code that should be benched /// }) /// ``` pub fn bench_with_setup_configuration_labeled T, S: FnMut() -> R>( label: &'static str, cfg: &BenchmarkConfig, mut setup: S, mut closure: F, ) { let label = fallback_to_anonymous_on_invalid_label(label); let mut wu_routine = || { let input = (setup)(); (closure)(input); }; println!( "{} warming up for {}", wrap_bold_green(label), wrap_high_intensity_white(&fmt_time(cfg.warm_up_time.as_nanos() as f64)) ); let wu = run_warm_up(&mut wu_routine, cfg.warm_up_time); let mean_execution_time = wu.elapsed.as_nanos() as f64 / wu.iterations as f64; let sample_size = cfg.num_samples as u64; let (iters, total_iters) = calculate_iters_and_total_iters(cfg, mean_execution_time, sample_size); println!( "{} mean warm up execution time {} running {} iterations", wrap_bold_green(label), wrap_high_intensity_white(&fmt_time(mean_execution_time)), wrap_high_intensity_white(&fmt_num(total_iters as f64)) ); let sampling_data = run_with_setup(iters, setup, closure); if cfg.dump_results_to_disk { crate::output::ComparedStdout.dump_sampling_data(label, &sampling_data, cfg, total_iters); } else { crate::output::SimpleStdout.dump_sampling_data(label, &sampling_data, cfg, total_iters); } } fn run_with_setup T, S: FnMut() -> R>( sample_sizes: Vec, mut setup: S, mut closure: F, ) -> SamplingData { let times = sample_sizes .iter() .copied() .map(|it_count| { let mut elapsed = Duration::ZERO; for _ in 0..it_count { let input = (setup)(); let start = Instant::now(); black_box((closure)(input)); elapsed += Instant::now().duration_since(start); } elapsed.as_nanos() }) .collect(); SamplingData { samples: sample_sizes, times, } } fn run_warm_up T>(closure: &mut F, warmup_time: Duration) -> WarmupResults { let mut elapsed = Duration::ZERO; let mut iterations = 0u128; let mut run_iterations = 1u64; loop { let start = Instant::now(); for _ in 0..run_iterations { closure(); } elapsed += start.elapsed(); iterations += u128::from(run_iterations); run_iterations = run_iterations.wrapping_mul(2); if elapsed >= warmup_time { return WarmupResults { iterations, elapsed, }; } } } #[derive(Debug)] struct WarmupResults { iterations: u128, elapsed: Duration, } #[derive(Debug)] #[cfg(feature = "bench")] #[cfg_attr(test, derive(Eq, PartialEq))] pub(crate) struct SamplingData { pub(crate) samples: Vec, pub(crate) times: Vec, } #[cfg(test)] mod tests { use super::*; use std::time::Duration; #[test] fn benches() { let closure = || { let mut sum = 0; for _ in 0..100 { sum += black_box(1); } assert_eq!(black_box(100), sum); }; let cfg = BenchmarkConfig { measurement_time: Duration::from_millis(10), warm_up_time: Duration::from_millis(5), ..BenchmarkConfig::default() }; bench_with_configuration(&cfg, closure); } } tiny-bench-0.3.0/src/error.rs000064400000000000000000000006631046102023000141570ustar 00000000000000use std::fmt::{Display, Formatter}; pub(crate) type Result = std::result::Result; #[derive(Debug)] pub(crate) struct Error { msg: String, } impl Error { pub(crate) fn new(msg: impl Into) -> Self { Self { msg: msg.into() } } } impl Display for Error { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { f.write_str(&self.msg) } } impl std::error::Error for Error {} tiny-bench-0.3.0/src/lib.rs000064400000000000000000000024351046102023000135730ustar 00000000000000#![warn(clippy::pedantic)] #![allow(clippy::cast_precision_loss)] #![allow(clippy::cast_possible_truncation)] #![allow(clippy::module_name_repetitions)] #![allow(clippy::cast_sign_loss)] #![deny(missing_docs)] //! `tiny-bench`, a tiny benchmarking library. //! The crate is divided into two sections, benchmarking and timing. //! Benchmarking provides tools to measure code execution, show statistics about that execution, //! and compare those statistics to previous runs. //! Timing provides tools to time code. Timing how long a closure runs, or how long an iterator runs. #[cfg(feature = "bench")] pub(crate) mod benching; #[cfg(feature = "bench")] pub use benching::{ bench, bench_labeled, bench_with_configuration, bench_with_configuration_labeled, bench_with_setup, bench_with_setup_configuration, bench_with_setup_configuration_labeled, bench_with_setup_labeled, }; #[cfg(feature = "bench")] pub use output::analysis::criterion::{black_box, BenchmarkConfig}; #[cfg(any(feature = "bench", feature = "timer"))] mod error; #[cfg(any(feature = "bench", feature = "timer"))] pub(crate) mod output; #[cfg(feature = "timer")] pub(crate) mod timing; #[cfg(feature = "timer")] pub use timing::{ run_timed, run_timed_from_iterator, run_timed_times, Timeable, TimedIterator, TimingData, }; tiny-bench-0.3.0/src/output/analysis/criterion/CRITERION-LICENSE-MIT000064400000000000000000000020421046102023000226220ustar 00000000000000Copyright (c) 2014 Jorge Aparicio Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. tiny-bench-0.3.0/src/output/analysis/criterion/mod.rs000064400000000000000000000147561046102023000207760ustar 00000000000000use crate::output::analysis::random::Rng; use crate::output::wrap_yellow; use std::time::Duration; /// Everything in this module is more or less copied from [criterion.rs](https://github.com/bheisler/criterion.rs) /// with some rewrites to make it fit, the license is included in this file's directory /// A function that is opaque to the optimizer, used to prevent the compiler from /// optimizing away computations in a benchmark. /// /// This variant is stable-compatible, but it may cause some performance overhead /// or fail to prevent code from being eliminated. /// /// ``` /// use tiny_bench::black_box; /// for i in 0..10 { /// assert_eq!(i, black_box(i)); /// } /// ``` #[allow(unsafe_code)] pub fn black_box(dummy: T) -> T { unsafe { let ret = std::ptr::read_volatile(&dummy); std::mem::forget(dummy); ret } } /// Struct containing all of the configuration options for a benchmark. pub struct BenchmarkConfig { /// How long the bench 'should' run, `num_samples` is prioritized so benching will take /// longer to be able to collect `num_samples` if the code to be benched is slower /// than this time limit allowed. pub measurement_time: Duration, /// How many resamples should be done pub num_resamples: usize, /// Recommended at least 50, above 100 /// doesn't seem to yield a significantly different result pub num_samples: usize, /// How long the bench should warm up pub warm_up_time: Duration, /// Puts results in target/tiny-bench/label/.. if target can be found. /// used for comparing previous runs pub dump_results_to_disk: bool, /// Sets a hard ceiling on max iterations, overriding the heuristic calculations for iteration /// count. A rule of thumb; if this is used, the results are unlikely to be statistically /// significant. pub max_iterations: Option, } impl Default for BenchmarkConfig { fn default() -> Self { BenchmarkConfig { measurement_time: Duration::from_secs(5), num_resamples: 100_000, num_samples: 100, warm_up_time: Duration::from_secs(3), dump_results_to_disk: true, max_iterations: None, } } } pub(crate) fn calculate_iterations( warmup_mean_execution_time: f64, num_samples: u64, target_time: Duration, ) -> Vec { let met = warmup_mean_execution_time; let m_ns = target_time.as_nanos(); // Solve: [d + 2*d + 3*d + ... + n*d] * met = m_ns let total_runs = num_samples * (num_samples + 1) / 2; let d = ((m_ns as f64 / met / total_runs as f64).ceil() as u64).max(1); let expected_nanoseconds = total_runs as f64 * d as f64 * met; if d == 1 { let actual_time = Duration::from_nanos(expected_nanoseconds as u64); println!( "{} You may wish to increase target time to {:.1?} or lower the requested number of samples", wrap_yellow(&format!( "Unable to complete {num_samples} samples in {target_time:.1?}" )), actual_time ); } (1..=num_samples).map(|a| a * d).collect() } pub(crate) fn calculate_t_value(sample_a: &[f64], sample_b: &[f64]) -> f64 { let a_mean = calculate_mean(sample_a); let b_mean = calculate_mean(sample_b); let a_var = calculate_variance(sample_a, a_mean); let b_var = calculate_variance(sample_b, b_mean); let a_len = sample_a.len() as f64; let b_len = sample_b.len() as f64; let mean_diff = a_mean - b_mean; let d = (a_var / a_len + b_var / b_len).sqrt(); mean_diff / d } pub(crate) fn calculate_mean(a: &[f64]) -> f64 { a.iter().sum::() / a.len() as f64 } pub(crate) fn calculate_variance(sample: &[f64], mean: f64) -> f64 { let sum = sample .iter() .copied() .map(|val| (val - mean).powi(2)) .sum::(); sum / (sample.len() as f64 - 1f64) // use n - 1 when measuring variance from a sample } pub(crate) fn resample(sample_a: &[f64], sample_b: &[f64], times: usize) -> Vec { let a_len = sample_a.len(); let mut combined = Vec::with_capacity(a_len + sample_b.len()); combined.extend_from_slice(sample_a); combined.extend_from_slice(sample_b); let mut rng = Rng::new(); let combined_len = combined.len(); let mut distributions = Vec::new(); for _ in 0..times { let mut sample = Vec::with_capacity(combined_len); for _ in 0..combined_len { let index = (rng.next() % combined.len() as u64) as usize; sample.push(combined[index]); } let sample_a = Vec::from(&sample[..a_len]); let sample_b = Vec::from(&sample[a_len..]); let t = calculate_t_value(&sample_a, &sample_b); distributions.push(t); } distributions } pub(crate) fn calculate_p_value(total_t: f64, distribution: &[f64]) -> f64 { let hits = distribution.iter().filter(|x| x < &&total_t).count(); let tails = 2; // I don't know what this is, Two-tailed significance testing something something let min = std::cmp::min(hits, distribution.len() - hits); (min * tails) as f64 / distribution.len() as f64 } #[inline] pub(crate) fn calculate_median(sample: &mut Vec) -> f64 { sample.sort_by(f64::total_cmp); sample.get(sample.len() / 2).copied().unwrap_or_default() } pub(crate) struct SamplingDataSimpleAnalysis { pub(crate) elapsed: u128, pub(crate) min: f64, pub(crate) max: f64, pub(crate) average: f64, pub(crate) median: f64, pub(crate) variance: f64, pub(crate) stddev: f64, pub(crate) per_sample_average: Vec, } #[cfg(test)] mod tests { use crate::output::analysis::criterion::{ calculate_mean, calculate_t_value, calculate_variance, }; #[test] fn calculates_mean() { let data = vec![46.0, 69.0, 32.0, 60.0, 52.0, 41.0]; assert!(calculate_mean(&data) - 50.0 < 0.0000_001); } #[test] fn calculates_variance() { let data = vec![46.0, 69.0, 32.0, 60.0, 52.0, 41.0]; assert!(calculate_variance(&data, 50.0) - 177.2 < 0.00001); } #[test] fn calculate_t() { let sample_a = vec![19.7, 20.4, 19.6, 17.8, 18.5, 18.9, 18.3, 18.9, 19.5, 21.95]; let sample_b = vec![ 28.3, 26.7, 20.1, 23.3, 25.2, 22.1, 17.7, 27.6, 20.6, 13.7, 23.2, 17.5, 20.6, 18.0, 23.9, 21.6, 24.3, 20.4, 23.9, 13.3, ]; assert!(calculate_t_value(&sample_a, &sample_b).abs() - 2.24787 < 0.0001); } } tiny-bench-0.3.0/src/output/analysis/mod.rs000064400000000000000000000002331046102023000167610ustar 00000000000000#[cfg(feature = "bench")] pub(crate) mod criterion; #[cfg(feature = "bench")] pub(crate) mod random; #[cfg(feature = "bench")] pub(crate) mod sample_data; tiny-bench-0.3.0/src/output/analysis/random.rs000064400000000000000000000024701046102023000174670ustar 00000000000000use std::time::{SystemTime, UNIX_EPOCH}; /// [LCG](https://en.wikipedia.org/wiki/Linear_congruential_generator) /// Choosing same constants as glibc here const MOD: u128 = 2u128.pow(48); const A: u128 = 25_214_903_917; const C: u128 = 11; pub(crate) struct Rng { seed: u64, } impl Rng { pub(crate) fn new() -> Self { let seed = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); Rng { // And maybe check for overflows. Note: No we're good until about year 2554 seed: seed.as_nanos() as u64, } } pub(crate) fn next(&mut self) -> u64 { self.seed = ((A * u128::from(self.seed) + C) % MOD) as u64; self.seed } } #[cfg(test)] mod tests { use super::*; use std::collections::hash_map::Entry; use std::collections::HashMap; #[test] fn test_lcg() { let mut rng = Rng::new(); let mut distr = HashMap::new(); let test = 10_000; let range = 10; for _ in 0..test { let v = rng.next() % range; match distr.entry(v) { Entry::Vacant(v) => { v.insert(1); } Entry::Occupied(mut o) => { *o.get_mut() += 1; } } } eprintln!("{distr:?}"); } } tiny-bench-0.3.0/src/output/analysis/sample_data.rs000064400000000000000000000026161046102023000204630ustar 00000000000000use crate::benching::SamplingData; use crate::output::analysis::criterion::{ calculate_median, calculate_variance, SamplingDataSimpleAnalysis, }; pub(crate) fn simple_analyze_sampling_data( sampling_data: &SamplingData, ) -> SamplingDataSimpleAnalysis { let mut min = f64::MAX; let mut max = 0f64; let mut total = 0f64; let mut total_elapsed = 0; let mut sample_averages = Vec::with_capacity(sampling_data.samples.len()); for (num_samples, elapsed_nanos) in sampling_data .samples .iter() .copied() .zip(sampling_data.times.iter().copied()) { let sample_average = elapsed_nanos as f64 / num_samples as f64; sample_averages.push(sample_average); if sample_average < min { min = sample_average; } if sample_average > max { max = sample_average; } total += sample_average; total_elapsed += elapsed_nanos; } let median = calculate_median(&mut sample_averages); let total_average = total / sampling_data.samples.len() as f64; let variance = calculate_variance(&sample_averages, total_average); let stddev = variance.sqrt(); SamplingDataSimpleAnalysis { elapsed: total_elapsed, min, max, average: total_average, median, variance, stddev, per_sample_average: sample_averages, } } tiny-bench-0.3.0/src/output/disk.rs000064400000000000000000000151661046102023000153240ustar 00000000000000#[cfg(feature = "bench")] use crate::benching::SamplingData; use crate::error::{Error, Result}; use crate::output::{wrap_high_insensity_red, wrap_yellow}; #[cfg(feature = "timer")] use crate::timing::TimingData; use std::ffi::OsStr; use std::io::ErrorKind; use std::path::PathBuf; #[cfg(feature = "timer")] const CURRENT_RESULTS: &str = "current-results"; #[cfg(feature = "timer")] const OLD_RESULTS: &str = "old-results"; #[cfg(feature = "bench")] const CURRENT_SAMPLE: &str = "current-sample"; #[cfg(feature = "bench")] const OLD_SAMPLE: &str = "old-sample"; #[cfg(feature = "timer")] pub(crate) fn try_read_last_results(label: &'static str) -> Result> { let maybe_data = try_read(label, CURRENT_RESULTS)?; if let Some(data) = maybe_data { Ok(Some(crate::output::ser::try_de_timing_data(&data)?)) } else { Ok(None) } } #[cfg(feature = "timer")] pub(crate) fn try_write_results(label: &'static str, data: TimingData) { if let Err(e) = try_write( label, &crate::output::ser::ser_timing_data(data), CURRENT_RESULTS, OLD_RESULTS, ) { println!( "{} {e}", wrap_high_insensity_red("Failed to write timing data, cause") ); } } #[cfg(feature = "bench")] pub(crate) fn try_write_last_simpling(label: &'static str, data: &SamplingData) { if let Err(e) = try_write( label, &crate::output::ser::ser_sampling_data(data), CURRENT_SAMPLE, OLD_SAMPLE, ) { println!( "{} {e}", wrap_high_insensity_red("Failed to write sampling data, cause:") ); } } fn try_write( label: &'static str, data: &[u8], current_file_name: &str, old_file_name: &'static str, ) -> Result<()> { if label.contains(std::path::is_separator) { return Err(Error::new(format!( "Label {label} contains a path separator, cannot write to disk." ))); } let parent_dir = find_or_create_result_parent_dir(label)?; std::fs::create_dir_all(&parent_dir).map_err(|e| { Error::new(format!( "Failed to create output directory {parent_dir:?}, cause {e}, will not write results" )) })?; let latest_persisted = parent_dir.join(current_file_name); if std::fs::metadata(&latest_persisted).is_ok() { let old_file = parent_dir.join(old_file_name); if let Err(e) = std::fs::rename(&latest_persisted, &old_file) { println!( "{} from {latest_persisted:?} to {old_file:?}, cause {e}, will try to overwrite.", wrap_yellow("Failed to move old sample") ); } } std::fs::write(&latest_persisted, data).map_err(|e| { Error::new(format!( "Failed to write benchmark-data to {latest_persisted:?}, cause {e}" )) }) } fn try_read(label: &'static str, current_file_name: &'static str) -> Result>> { if label.contains(std::path::is_separator) { return Err(Error::new(format!( "Label {label} contains a path separator, cannot read old data from disk." ))); } let parent_dir = find_or_create_result_parent_dir(label)?; let latest_persisted_path = parent_dir.join(current_file_name); match std::fs::read(&latest_persisted_path) { Ok(bytes) => Ok(Some(bytes)), Err(e) => match e.kind() { ErrorKind::NotFound => Ok(None), _ => Err(Error::new(format!( "Failed to read file at {latest_persisted_path:?}, cause: {e}" ))), }, } } #[cfg(feature = "bench")] pub(crate) fn try_read_last_simpling(label: &'static str) -> Result> { let maybe_data = try_read(label, CURRENT_SAMPLE)?; if let Some(data) = maybe_data { Ok(Some(crate::output::ser::try_de_sampling_data(&data)?)) } else { Ok(None) } } fn find_or_create_result_parent_dir(label: &'static str) -> Result { let target = find_target()?; let pb = PathBuf::from(&target); let target_buf = std::fs::metadata(&pb).map_err(|e| { Error::new(format!( "Failed to check metadata for target dir {target:?}, cause {e}" )) })?; if !target_buf.is_dir() { return Err(Error::new(format!( "Expected target directory {pb:?} is not a directory" ))); } let all_results_dir = pb.join("simple-bench"); let result_parent_dir = all_results_dir.join(label); std::fs::create_dir_all(&result_parent_dir).map_err(|e| { Error::new(format!( "Failed to create output directory {result_parent_dir:?}, cause {e}" )) })?; Ok(result_parent_dir) } fn find_target() -> Result { let exe = std::env::current_exe().map_err(|e| { Error::new(format!( "Failed to get this executable's directory from environment, cause {e}" )) })?; let mut cur = exe.as_path(); let target_os_str = OsStr::new("target"); while let Some(parent) = cur.parent() { let last = parent .components() .last() .ok_or_else(|| Error::new("Could not find target directory to place output"))?; if last.as_os_str() == target_os_str { return Ok(parent.to_path_buf()); } cur = parent; } Err(Error::new( "Could not find target directory to place output", )) } #[cfg(test)] mod tests { use super::*; #[test] #[cfg(feature = "timer")] fn can_dump_and_read_results() { let label = "label"; let rd1 = TimingData { min_nanos: 0, max_nanos: 5, elapsed: 10, iterations: 15, }; try_write_results(label, rd1); assert_eq!(rd1, try_read_last_results(label).unwrap().unwrap()); let rd2 = TimingData { min_nanos: 100, max_nanos: 105, elapsed: 110, iterations: 115, }; try_write_results(label, rd2); assert_eq!(rd2, try_read_last_results(label).unwrap().unwrap()); } #[test] #[cfg(feature = "bench")] fn can_dump_and_read_samples() { let label = "label"; let s1 = SamplingData { samples: vec![1, 2, 3, 4, 5], times: vec![6, 7, 8, 9, 10], }; try_write_last_simpling(label, &s1); assert_eq!(s1, try_read_last_simpling(label).unwrap().unwrap()); let s2 = SamplingData { samples: vec![5, 4, 3, 2, 1], times: vec![10, 9, 8, 7, 6], }; try_write_last_simpling(label, &s2); assert_eq!(s2, try_read_last_simpling(label).unwrap().unwrap()); } } tiny-bench-0.3.0/src/output/mod.rs000064400000000000000000000347541046102023000151550ustar 00000000000000pub(crate) mod analysis; pub(crate) mod disk; pub(crate) mod ser; #[cfg(feature = "bench")] use crate::benching::SamplingData; #[cfg(feature = "bench")] use crate::output::analysis::criterion::{ calculate_p_value, calculate_t_value, resample, BenchmarkConfig, SamplingDataSimpleAnalysis, }; #[cfg(feature = "bench")] use crate::output::analysis::sample_data::simple_analyze_sampling_data; #[cfg(feature = "timer")] use crate::timing::TimingData; /// Percentage increase which is deemed to be big enough to matter. /// Only used for highlighting output #[cfg(feature = "timer")] const TIMING_NOISE_THRESHOLD: f64 = 5.0; /// Percentage increase which is deemed to be big enough to matter. /// Only used for highlighting output #[cfg(feature = "bench")] const NOISE_THRESHOLD: f64 = 1.0; /// p-value under which a result is deemed significant enough to matter. /// Only used for highlighting output #[cfg(feature = "bench")] const SIGNIFICANCE_LEVEL: f64 = 0.05; #[cfg(feature = "timer")] pub(crate) struct LabeledOutput { label: &'static str, out: Output, } #[cfg(feature = "timer")] impl LabeledOutput { pub(crate) fn new(label: &'static str, out: O) -> Self { Self { label, out } } } #[cfg(feature = "timer")] impl LabeledOutput where O: Output, { pub(crate) fn dump(&self, data: TimingData) { self.out.dump_timing_data(self.label, data); } } pub(crate) trait Output { #[cfg(feature = "timer")] fn dump_timing_data(&self, label: &'static str, data: TimingData); #[cfg(feature = "bench")] fn dump_sampling_data( &self, label: &'static str, sampling_data: &SamplingData, cfg: &BenchmarkConfig, total_iters: u128, ); } /// Just prints the results straight to stdout pub struct SimpleStdout; impl Output for SimpleStdout { #[cfg(feature = "timer")] fn dump_timing_data(&self, label: &'static str, data: TimingData) { print_timer_header(label, &data); let mean = data.elapsed as f64 / data.iterations as f64; timer_print_elapsed(data.min_nanos as f64, mean, data.max_nanos as f64); } #[cfg(feature = "bench")] fn dump_sampling_data( &self, label: &'static str, sampling_data: &SamplingData, cfg: &BenchmarkConfig, total_iters: u128, ) { let analysis = simple_analyze_sampling_data(sampling_data); print_sample_header(label, total_iters, analysis.elapsed, cfg.num_samples as u64); print_analysis(&analysis); } } /// Checks if there has previously been any results dumped to target and compares with those pub struct ComparedStdout; impl Output for ComparedStdout { #[cfg(feature = "timer")] fn dump_timing_data(&self, label: &'static str, data: TimingData) { let mean = data.elapsed as f64 / data.iterations as f64; let maybe_old = disk::try_read_last_results(label); print_timer_header(label, &data); timer_print_elapsed(data.min_nanos as f64, mean, data.max_nanos as f64); match maybe_old { Ok(Some(old)) => { let min_change = (data.min_nanos as f64 / old.min_nanos as f64 - 1f64) * 100f64; let max_change = (data.max_nanos as f64 / old.max_nanos as f64 - 1f64) * 100f64; let mean_change = (mean / (old.elapsed as f64 / old.iterations as f64) - 1f64) * 100f64; let mean_comparison = if mean_change >= TIMING_NOISE_THRESHOLD { MeanComparison::new(mean_change, Comparison::Better) } else if mean_change <= -TIMING_NOISE_THRESHOLD { MeanComparison::new(mean_change, Comparison::Worse) } else { MeanComparison::new(mean_change, Comparison::Same) }; print_cmp( min_change, &mean_comparison, max_change, "p=? single sample", ); } Err(e) => { println!( "{}, cause {e}", wrap_high_insensity_red("Failed to read last results") ); } _ => {} } disk::try_write_results(label, data); } #[cfg(feature = "bench")] fn dump_sampling_data( &self, label: &'static str, sampling_data: &SamplingData, cfg: &BenchmarkConfig, total_iters: u128, ) { let analysis = simple_analyze_sampling_data(sampling_data); print_sample_header(label, total_iters, analysis.elapsed, cfg.num_samples as u64); print_analysis(&analysis); match disk::try_read_last_simpling(label) { Ok(Some(last)) => { let old_analysis = simple_analyze_sampling_data(&last); let min_change = (analysis.min / old_analysis.min - 1f64) * 100f64; let max_change = (analysis.max / old_analysis.max - 1f64) * 100f64; let mean_change = (analysis.average / old_analysis.average - 1f64) * 100f64; let t = calculate_t_value( &analysis.per_sample_average, &old_analysis.per_sample_average, ); let t_distribution = resample( &analysis.per_sample_average, &old_analysis.per_sample_average, cfg.num_resamples, ); let p = calculate_p_value(t, &t_distribution); let mean_change = if mean_change.abs() >= NOISE_THRESHOLD && p <= SIGNIFICANCE_LEVEL { if mean_change > 0.0 { MeanComparison::new(mean_change, Comparison::Worse) } else if mean_change < 0.0 { MeanComparison::new(mean_change, Comparison::Better) } else { MeanComparison::new(mean_change, Comparison::Same) } } else { MeanComparison::new(mean_change, Comparison::Same) }; print_cmp(min_change, &mean_change, max_change, &format!("p = {p:.2}")); } Err(e) => { println!( "{}, cause {e}", wrap_high_insensity_red("Failed to read last sample") ); } _ => {} } disk::try_write_last_simpling(label, sampling_data); } } #[cfg(feature = "timer")] pub(crate) fn print_timer_header(label: &'static str, data: &TimingData) { println!( "{} [{} iterations in {}]:", wrap_bold_green(label), fmt_num(data.iterations as f64), fmt_time(data.elapsed as f64) ); } #[cfg(feature = "bench")] pub(crate) fn print_sample_header( label: &'static str, total_iterations: u128, total_elapsed: u128, num_samples: u64, ) { println!( "{} [{} iterations in {} with {} samples]:", wrap_bold_green(label), fmt_num(total_iterations as f64), fmt_time(total_elapsed as f64), fmt_num(num_samples as f64) ); } #[cfg(feature = "bench")] pub(crate) fn print_analysis(analysis: &SamplingDataSimpleAnalysis) { // Variance has the unit T-squared, println!( "\telapsed\t[{} {} {}]:\t[{} {} {}] (sample data: med = {}, var = {}², stddev = {})", wrap_gray("min"), wrap_high_intensity_white("mean"), wrap_gray("max"), wrap_gray(&fmt_time(analysis.min)), wrap_high_intensity_white(&fmt_time(analysis.average)), wrap_gray(&fmt_time(analysis.max)), fmt_time(analysis.median), fmt_time(analysis.variance), fmt_time(analysis.stddev), ); } #[cfg(feature = "timer")] pub(crate) fn timer_print_elapsed(min: f64, mean: f64, max: f64) { // Variance has the unit T-squared, println!( "\telapsed\t[{} {} {}]:\t[{} {} {}]", wrap_gray("min"), wrap_high_intensity_white("mean"), wrap_gray("max"), wrap_gray(&fmt_time(min)), wrap_high_intensity_white(&fmt_time(mean)), wrap_gray(&fmt_time(max)), ); } pub(crate) struct MeanComparison { mean: f64, comparison: Comparison, } impl MeanComparison { pub(crate) fn new(mean: f64, comparison: Comparison) -> Self { Self { mean, comparison } } pub(crate) fn format(&self) -> String { match self.comparison { Comparison::Worse => wrap_high_insensity_red(&fmt_change(self.mean)), Comparison::Same => wrap_high_intensity_white(&fmt_change(self.mean)), Comparison::Better => wrap_high_intensity_green(&fmt_change(self.mean)), } } } pub enum Comparison { Worse, Same, Better, } pub(crate) fn print_cmp(min: f64, mean: &MeanComparison, max: f64, reliability_comment: &str) { println!( "\tchange\t[{} {} {}]:\t[{} {} {}] ({reliability_comment})", wrap_gray("min"), wrap_high_intensity_white("mean"), wrap_gray("max"), wrap_gray(&fmt_change(min)), mean.format(), wrap_gray(&fmt_change(max)), ); } const NANO_LIMIT: f64 = 1000f64; const MICRO_LIMIT: f64 = NANO_LIMIT * 1000f64; const MILLI_LIMIT: f64 = MICRO_LIMIT * 1000f64; pub(crate) fn wrap_bold_green(text: &str) -> String { format!("\x1b[1;32m{text}\x1b[0m") } pub(crate) fn wrap_high_intensity_green(text: &str) -> String { format!("\x1b[0;92m{text}\x1b[0m") } pub(crate) fn wrap_yellow(text: &str) -> String { format!("\x1b[0;93m{text}\x1b[0m") } pub(crate) fn wrap_high_insensity_red(text: &str) -> String { format!("\x1b[0;91m{text}\x1b[0m") } pub(crate) fn wrap_gray(text: &str) -> String { format!("\x1b[0;37m{text}\x1b[0m") } pub(crate) fn wrap_high_intensity_white(text: &str) -> String { format!("\x1b[0;97m{text}\x1b[0m") } pub(crate) fn fmt_time(time: f64) -> String { // Nanos if time < NANO_LIMIT { format!("{time:.2}ns") } else if time < MICRO_LIMIT { format!("{:.2}µs", time / NANO_LIMIT) } else if time < MILLI_LIMIT { format!("{:.2}ms", time / MICRO_LIMIT) } else { format!("{:.2}s", time / MILLI_LIMIT) } } fn fmt_change(change: f64) -> String { format!("{change:.4}%") } pub(crate) fn fmt_num(num: f64) -> String { if num < NANO_LIMIT { format!("{num:.1}") } else if num < MICRO_LIMIT { format!("{:.1} thousand", num / NANO_LIMIT) } else if num < MILLI_LIMIT { format!("{:.1}M", num / MICRO_LIMIT) } else { format!("{:.1}B", num / MILLI_LIMIT) } } /// Some illegal filename symbols, not meant to be exhaustive but good enough const ILLEGAL: [char; 10] = [ // Linux '/', '\0', // Windows ':', '<', '>', '"', '\\', '|', '?', '*', ]; #[cfg_attr(test, derive(Eq, PartialEq, Debug))] pub(crate) enum LabelValidationResult { Valid, Invalid(&'static str), } pub(crate) fn fallback_to_anonymous_on_invalid_label(label: &'static str) -> &'static str { if let LabelValidationResult::Invalid(reason) = validate_label(label) { println!( "{} falling back to 'anonymous'.", wrap_high_insensity_red(reason) ); "anonymous" } else { label } } fn validate_label(label: &'static str) -> LabelValidationResult { for ch in ILLEGAL { if label.contains(ch) { return LabelValidationResult::Invalid("Label contains illegal character {ch}"); } } for ch in 0..32 { let ascii_ctrl = char::from(ch); if label.contains(ascii_ctrl) { return LabelValidationResult::Invalid( "Label contains illegal ascii-control character number {ch}", ); } } if label.ends_with('.') { return LabelValidationResult::Invalid("Label cannot end with dot"); } if label.ends_with(' ') { return LabelValidationResult::Invalid("Label cannot end with a space"); } LabelValidationResult::Valid } #[cfg(test)] mod tests { use crate::output::{fmt_change, fmt_num, fmt_time, validate_label, LabelValidationResult}; #[test] fn validates_label() { assert_eq!(LabelValidationResult::Valid, validate_label("Hello!")); assert_eq!( LabelValidationResult::Valid, validate_label("Some,weird_name_but.okay.png") ); assert!(matches!( validate_label("."), LabelValidationResult::Invalid(_) )); assert!(matches!( validate_label("hello!."), LabelValidationResult::Invalid(_) )); assert!(matches!( validate_label("hello! "), LabelValidationResult::Invalid(_) )); assert!(matches!( validate_label("bad/label"), LabelValidationResult::Invalid(_) )); assert!(matches!( validate_label("bad:label"), LabelValidationResult::Invalid(_) )); assert!(matches!( validate_label("bad>label"), LabelValidationResult::Invalid(_) )); assert!(matches!( validate_label("bad Vec { let mut v = Vec::with_capacity(16 * 4); v.extend_from_slice(&run_data.min_nanos.to_le_bytes()); v.extend_from_slice(&run_data.max_nanos.to_le_bytes()); v.extend_from_slice(&run_data.elapsed.to_le_bytes()); v.extend_from_slice(&run_data.iterations.to_le_bytes()); v } #[cfg(feature = "timer")] pub(crate) fn try_de_timing_data(buf: &[u8]) -> Result { if buf.len() != 64 { return Err(Error::new(format!( "Unexpected buffer len for serialized timing data, expected 64 but got {}", buf.len() ))); } // Since the buffer length is fine we're good here. let min_nanos = u128::from_le_bytes(buf[0..16].try_into().ok().unwrap()); let max_nanos = u128::from_le_bytes(buf[16..32].try_into().ok().unwrap()); let elapsed = u128::from_le_bytes(buf[32..48].try_into().ok().unwrap()); let iterations = u128::from_le_bytes(buf[48..64].try_into().ok().unwrap()); Ok(TimingData { min_nanos, max_nanos, elapsed, iterations, }) } #[cfg(feature = "bench")] pub(crate) fn ser_sampling_data(sampling_data: &SamplingData) -> Vec { let mut v = Vec::new(); let len = sampling_data.samples.len() as u64; v.extend_from_slice(&len.to_le_bytes()); for sample in &sampling_data.samples { v.extend_from_slice(&sample.to_le_bytes()); } for time in &sampling_data.times { v.extend_from_slice(&time.to_le_bytes()); } v } #[cfg(feature = "bench")] pub(crate) fn try_de_sampling_data(buf: &[u8]) -> Result { let buf_len = buf.len(); if buf_len < 8 { return Err(Error::new(format!( "Found malformed serialized data, length too short {buf_len}" ))); } // No risk of going out of bounds yet. let len = u64::from_le_bytes(buf[..8].try_into().unwrap()); let mut samples = Vec::with_capacity(len as usize); let mut times = Vec::with_capacity(len as usize); let expected_total_len = 8 + len * 16 + len * 8; if buf_len as u64 != expected_total_len { return Err(Error::new(format!("Found malformed serialized data, unexpected length. Expected {expected_total_len} found {buf_len}"))); } for i in 0..len { let sample_value_offset = (8 + i * 8) as usize; samples.push(u64::from_le_bytes( buf[sample_value_offset..sample_value_offset + 8] .try_into() .ok() .unwrap(), )); let times_value_offset = (8 + len * 8 + i * 16) as usize; times.push(u128::from_le_bytes( buf[times_value_offset..times_value_offset + 16] .try_into() .ok() .unwrap(), )); } Ok(SamplingData { samples, times }) } #[cfg(test)] mod tests { #[test] #[cfg(feature = "timer")] fn can_ser_de_timing() { let min_nanos = 0; let max_nanos = u128::MAX; let elapsed = 555_555; let iterations = 99_959_599_959; let rd = super::TimingData { min_nanos, max_nanos, elapsed, iterations, }; assert_eq!( rd, super::try_de_timing_data(&super::ser_timing_data(rd)).unwrap() ); } #[test] #[cfg(feature = "bench")] fn can_ser_de_sampling() { let sampling = super::SamplingData { samples: vec![5, 6, 7, 8, 9, 10], times: vec![15, 16, 17, 18, 19, 20], }; assert_eq!( sampling, super::try_de_sampling_data(&super::ser_sampling_data(&sampling)).unwrap() ); } } tiny-bench-0.3.0/src/timing/mod.rs000064400000000000000000000175721046102023000151030ustar 00000000000000use crate::output; use crate::output::{ fallback_to_anonymous_on_invalid_label, ComparedStdout, LabeledOutput, Output, SimpleStdout, }; use std::time::{Duration, Instant}; /// The simplest possible timed function that just runs some `FnMut` closure and returns the time it took /// ``` /// use std::time::Duration; /// use tiny_bench::run_timed; /// let time = run_timed(|| std::thread::sleep(Duration::from_micros(5))); /// assert!(time.as_micros() >= 5); /// ``` pub fn run_timed T>(mut closure: F) -> Duration { let start = Instant::now(); (closure)(); Instant::now().duration_since(start) } /// Runs some closure `n` times and returns the data gathered /// ``` /// use std::time::Duration; /// use tiny_bench::run_timed_times; /// let data = run_timed_times(100, || std::thread::sleep(Duration::from_micros(1))); /// data.pretty_print(); /// ``` pub fn run_timed_times T>(iterations: usize, mut closure: F) -> TimingData { let mut elapsed = Duration::ZERO; let mut min_nanos = u128::MAX; let mut max_nanos = 0; for _ in 0..iterations { let start = Instant::now(); closure(); let run_elapsed = Instant::now().duration_since(start); let run_elapsed_nanos = run_elapsed.as_nanos(); if run_elapsed_nanos < min_nanos { min_nanos = run_elapsed_nanos; } if run_elapsed_nanos > max_nanos { max_nanos = run_elapsed_nanos; } elapsed += run_elapsed; } TimingData { iterations: iterations as u128, min_nanos, max_nanos, elapsed: elapsed.as_nanos(), } } /// Drains an iterator and calls the closure with the yielded value, timing the closure's execution. /// ``` /// use std::time::Duration; /// use tiny_bench::run_timed_from_iterator; /// let it = (0..100); /// let mut v = Vec::with_capacity(100); /// let mut counted_iterations = 0; /// let data = run_timed_from_iterator(it, |i| { /// v.push(i); /// counted_iterations += 1; /// }); /// assert_eq!(100, v.len()); /// assert_eq!(100, counted_iterations); /// data.pretty_print(); /// ``` pub fn run_timed_from_iterator T, It>( iterator: It, mut closure: F, ) -> TimingData where It: Iterator, { let mut elapsed = Duration::ZERO; let mut min_nanos = u128::MAX; let mut max_nanos = 0; let mut iterations = 0; for v in iterator { let start = Instant::now(); closure(v); let run_elapsed = Instant::now().duration_since(start); let run_elapsed_nanos = run_elapsed.as_nanos(); if run_elapsed_nanos < min_nanos { min_nanos = run_elapsed_nanos; } if run_elapsed_nanos > max_nanos { max_nanos = run_elapsed_nanos; } elapsed += run_elapsed; iterations += 1; } TimingData { iterations, min_nanos, max_nanos, elapsed: elapsed.as_nanos(), } } /// Data collected after a timed run #[derive(Copy, Clone, Debug)] #[cfg(feature = "timer")] #[cfg_attr(test, derive(Eq, PartialEq))] pub struct TimingData { /// The last amount of time elapsed for an iteration pub min_nanos: u128, /// The most amount of time elapsed for an iteration pub max_nanos: u128, /// The total elapsed time for all iterations combined pub elapsed: u128, /// How many iterations were ran pub iterations: u128, } #[cfg(feature = "timer")] impl TimingData { /// Print the data with pretty colors to stdout pub fn pretty_print(&self) { output::print_timer_header("anonymous", self); output::timer_print_elapsed( self.min_nanos as f64, self.elapsed as f64 / self.iterations as f64, self.max_nanos as f64, ); } } /// A trait for allowing iterators to be used as timers pub trait Timeable: Sized where It: Iterator, { /// Time this iterator with an anonymous label /// ``` /// use tiny_bench::Timeable; /// let v: Vec = (0..100) /// .timed() /// .collect(); /// // Prints results when the iterator has been drained /// assert_eq!(100, v.len()); /// ``` fn timed(self) -> TimedIterator { self.timed_labeled("anonymous") } /// Time this iterator with a specified label /// ``` /// use tiny_bench::Timeable; /// let v: Vec = (0..100) /// .timed_labeled("my_iterator_test") /// .collect(); /// // Prints results when the iterator has been drained /// assert_eq!(100, v.len()); /// ``` fn timed_labeled(self, label: &'static str) -> TimedIterator; /// Time this iterator with an anonymous label and persist the result so that other anonymous /// time results will be compared with it when they run next fn timed_persisted(self) -> TimedIterator { self.timed_persisted_labeled("anonymous") } /// Time this iterator with a custom label to separate different runs for comparison fn timed_persisted_labeled(self, label: &'static str) -> TimedIterator; } impl Timeable for It where It: Iterator, { fn timed_labeled(self, label: &'static str) -> TimedIterator { TimedIterator::new( self, LabeledOutput::new(fallback_to_anonymous_on_invalid_label(label), SimpleStdout), ) } fn timed_persisted_labeled(self, label: &'static str) -> TimedIterator { TimedIterator::new( self, LabeledOutput::new( fallback_to_anonymous_on_invalid_label(label), ComparedStdout, ), ) } } /// An iterator that wraps another iterator and times each call to `next` pub struct TimedIterator where It: Iterator, { inner: It, iterations: u128, min_nanos: u128, max_nanos: u128, elapsed: Duration, out: LabeledOutput, } impl TimedIterator where It: Iterator, { fn new(inner: It, out: LabeledOutput) -> Self { TimedIterator { inner, iterations: 0, min_nanos: u128::MAX, max_nanos: 0, elapsed: Duration::ZERO, out, } } } impl Iterator for TimedIterator where It: Iterator, O: Output, { type Item = T; fn next(&mut self) -> Option { let start = Instant::now(); let maybe_item = self.inner.next(); let run_elapsed = Instant::now().duration_since(start); if let Some(item) = maybe_item { let run_elapsed_nanos = run_elapsed.as_nanos(); if run_elapsed_nanos < self.min_nanos { self.min_nanos = run_elapsed_nanos; } if run_elapsed_nanos > self.max_nanos { self.max_nanos = run_elapsed_nanos; } self.elapsed += run_elapsed; self.iterations += 1; Some(item) } else { self.out.dump(TimingData { min_nanos: self.min_nanos, max_nanos: self.max_nanos, elapsed: self.elapsed.as_nanos(), iterations: self.iterations, }); None } } } #[cfg(test)] #[cfg(feature = "timer")] mod tests { use crate::timing::Timeable; #[test] fn time_iterator() { let _v: Vec = (0..100).timed().chain(0..10_000).timed().collect(); } #[test] fn time_persisted_iterator() { for _ in 0..2 { let _v: Vec = (0..1_000_000).timed_persisted().collect(); } } #[test] fn time_persisted_labled() { for _ in 0..2 { let _v: Vec = (0..1_000_000).timed_persisted_labeled("my_test").collect(); } } }