pax_global_header00006660000000000000000000000064142614067120014515gustar00rootroot0000000000000052 comment=f9ccd05caad4d068679074c3bc33c3f712ff2343 criterion.rs-0.3.6/000077500000000000000000000000001426140671200141445ustar00rootroot00000000000000criterion.rs-0.3.6/.editorconfig000066400000000000000000000002031426140671200166140ustar00rootroot00000000000000root = true [*] end_of_line = lf insert_final_newline = true trim_trailing_whitespace = true charset = utf-8 indent_style = space criterion.rs-0.3.6/.github/000077500000000000000000000000001426140671200155045ustar00rootroot00000000000000criterion.rs-0.3.6/.github/workflows/000077500000000000000000000000001426140671200175415ustar00rootroot00000000000000criterion.rs-0.3.6/.github/workflows/audit.yml000066400000000000000000000004401426140671200213700ustar00rootroot00000000000000name: Security audit on: push: paths: - '**/Cargo.toml' - '**/Cargo.lock' jobs: security_audit: runs-on: ubuntu-latest steps: - uses: actions/checkout@v1 - uses: actions-rs/audit-check@v1 with: token: ${{ secrets.GITHUB_TOKEN }} criterion.rs-0.3.6/.github/workflows/ci.yaml000066400000000000000000000020321426140671200210150ustar00rootroot00000000000000on: push: branches: - master pull_request: branches: - master name: tests jobs: ci: runs-on: ubuntu-latest strategy: matrix: rust: - stable - beta - 1.49.0 # MSRV steps: - uses: actions/checkout@v2 - uses: actions-rs/toolchain@v1 with: profile: minimal toolchain: ${{ matrix.rust }} override: true components: rustfmt, clippy - uses: Swatinem/rust-cache@v1 - uses: actions-rs/cargo@v1 with: command: build args: --features stable - uses: actions-rs/cargo@v1 with: command: test args: --features stable - uses: actions-rs/cargo@v1 if: ${{ matrix.rust == 'stable' }} with: command: fmt args: --all -- --check - uses: actions-rs/cargo@v1 if: ${{ matrix.rust != '1.40.0' }} # 1.40 has horrible lints. with: command: clippy args: -- -D warnings criterion.rs-0.3.6/.gitignore000066400000000000000000000000501426140671200161270ustar00rootroot00000000000000.criterion Cargo.lock target **/.*.sw* criterion.rs-0.3.6/CHANGELOG.md000066400000000000000000000533471426140671200157710ustar00rootroot00000000000000# Changelog All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). ## [Unreleased] ## [0.3.6] - 2022-07-06 ### Changed - MSRV bumped to 1.49 - Symbol for microseconds changed from ASCII 'us' to unicode 'µs' - Documentation fixes - Clippy fixes ## [0.3.5] - 2021-07-26 ### Fixed - Corrected `Criterion.toml` in the book. - Corrected configuration typo in the book. ### Changed - Bump plotters dependency to always include a bug-fix. - MSRV bumped to 1.46. ## [0.3.4] - 2021-01-24 ### Added - Added support for benchmarking async functions - Added `with_output_color` for enabling or disabling CLI output coloring programmatically. ### Fixed - Criterion.rs will now give a clear error message in case of benchmarks that take zero time. - Added some extra code to ensure that every sample has at least one iteration. - Added a notice to the `--help` output regarding "unrecognized option" errors. - Increased opacity on violin charts. - Fixed violin chart X axis not starting at zero in the plotters backend. - Criterion.rs will now automatically detect the right output directory. ### Deprecated - `Criterion::can_plot` is no longer useful and is deprecated pending deletion in 0.4.0. - `Benchmark` and `ParameterizedBenchmark` were already hidden from documentation, but are now formally deprecated pending deletion in 0.4.0. Callers should use `BenchmarkGroup` instead. - `Criterion::bench_function_over_inputs`, `Criterion::bench_functions`, and `Criterion::bench` were already hidden from documentation, but are now formally deprecated pending deletion in 0.4.0. Callers should use `BenchmarkGroup` instead. - Three new optional features have been added; "html_reports", "csv_output" and "cargo_bench_support". These features currently do nothing except disable a warning message at runtime, but in version 0.4.0 they will be used to enable HTML report generation, CSV file generation, and the ability to run in cargo-bench (as opposed to [cargo-criterion]). "cargo_bench_support" is enabled by default, but "html_reports" and "csv_output" are not. If you use Criterion.rs' HTML reports, it is recommended to switch to [cargo-criterion]. If you use CSV output, it is recommended to switch to [cargo-criterion] and use the `--message-format=json` option for machine-readable output instead. A warning message will be printed at the start of benchmark runs which do not have "html_reports" or "cargo_bench_support" enabled, but because CSV output is not widely used it has no warning. [cargo-criterion]: https://github.com/bheisler/cargo-criterion ## [0.3.3] - 2020-06-29 ### Added - Added `CRITERION_HOME` environment variable to set the directory for Criterion to store its results and charts in. - Added support for [cargo-criterion]. The long-term goal here is to remove code from Criterion-rs itself to improve compile times, as well as to add features to `cargo-criterion` that are difficult to implement in Criterion-rs. - Add sampling mode option for benchmarks. This allows the user to change how Criterion.rs chooses the iteration counts in each sample. By default, nothing will change for most benchmarks, but very slow benchmarks will now run fewer iterations to fit in the desired number of samples. This affects the statistics and plots generated. ### Changed - The serialization format for some of the files has changed. This may cause your first benchmark run after updating to produce errors, but they're harmless and will go away after running the benchmarks once. ### Fixed - Fixed a bug where the current measurement was not shown on the relative regression plot. - Fixed rare panic in the plotters backend. - Panic with a clear error message (rather than panicking messily later on) when the user sets the group or function name to the empty string. - Escape single quotes in benchmark names when generating Gnuplot scripts. ## [0.3.2] - 2020-04-26 ### Added - Added `?Sized` bound to benchmark parameter types, which allows dynamically sized types like `&str` and `&[T]` to be used as benchmark parameters. - Added the `--output-format ` command-line option. If `--output-format bencher` is passed, Criterion.rs will print its measurements in a format similar to that used by the `bencher` crate or unstable `libtest` benchmarks, and using similar statistical measurements as well. Though this provides less information than the default format, it may be useful for supporting tools which parse this output format. - Added `--nocapture` argument. This argument does nothing, but prevents Criterion.rs from exiting when running tests or benchmarks and allowing stdout output from other tests. ### Fixed - Fixed panic when environment variables contains non-UTF8 characters. - Fixed panic when `CRITERION_DEBUG` or `CRITERION_TARGET_DIR` environment variables contain non-UTF8 characters. ## [0.3.1] - 2020-01-25 ### Added - Added new plotting backend using the `plotters` crate. Implementation generously provided by Hao Hou, author of the `plotters` crate. - Added `--plotting-backend` command-line option to select the plotting backend. The existing gnuplot backend will be used by default when available, and the plotters backend will be used when gnuplot is not available or when requested. - Added `Criterion::plotting_backend()` function to configure the plotting backend in code. - Added `--load-baseline` command-line option to load a baseline for comparison rather than measuring the current code - Benchmark filters can now be regular expressions. ### Fixed - Fixed `fibonacci` functions. - Fixed `#[criterion]` benchmarks ignoring the command-line options. - Fixed incorrect scaling of the violin plots. - Don't print the recommended sample count if it's the same as the configured sample count. - Fix potential panic when `nresamples` is set too low. Also added a warning against setting `nresamples` too low. - Fixed issue where a slow outer closure would cause Criterion.rs to calculate the wrong estimated time and number of iterations in the warm-up phase. ## [0.3.0] - 2019-08-25 ### Added - Added support for plugging in custom measurements (eg. processor counters) into Criterion.rs' measurement and analysis. - Added support for plugging in instrumentation for internal profilers such as `cpuprofiler` which must be explicitly started and stopped within the profiled process. - Added the `BenchmarkGroup` type, which supersedes `ParameterizedBenchmark`, `Benchmark`, `Criterion::bench_functions`, `Criterion::bench_function_over_inputs`, and `Criterion::bench`. `BenchmarkGroup` performs the same function as all of the above, but is cleaner to use and more powerful and flexible. All of these types/functions are now soft-deprecated (meaning they're hidden from the documentation and should not be used in new code). They will be fully deprecated at some point in the 0.3.* series and removed in 0.4.0. - `iter_custom` - a "timing loop" that allows the caller to perform their own measurements. This is useful for complex measurements that don't fit into the usual mode of calling a lambda in a loop. - If the benchmark cannot be completed in approximately the requested measurement time, Criterion.rs will now print a suggested measurement time and sample size that would work. - Two new fields, `throughput_num` and `throughput_type` have been added to the `raw.csv` file. - Added command-line options to set the defaults for warm-up time, measurement-time, etc. ### Changed - The `raw.csv` file format has been changed slightly. The `sample_time_nanos` field has been split into `sample_measured_value` and `unit` fields to accommodate custom measurements. - Throughput has been expanded from u32 to u64 to accommodate very large input sizes. ### Fixed - Fixed possible invalid file name error on Windows - Fixed potential case where data for two different benchmarks would be stored in the same directory. ### Removed - Removed the `--measure-only` command-line argument; it was deprecated in favor of `--profile-time` in 0.2.6. - External program benchmarks have been removed; they were deprecated in 0.2.6. The new `iter_custom` timing loop can be used as a substitute; see `benches/external_process.rs` for an example of this. ### Deprecated - The `--test` argument is now deprecated. To test benchmarks, use `cargo test --benches`. ## [0.2.11] - 2019-04-08 ### Added - Enabled automatic text-coloring on Windows. ### Fixed - Fixed panic caused by outdated files after benchmark names or types were changed. - Reduced timing overhead of `Criterion::iter_batched/iter_batched_ref`. ## [0.2.10] - 2019-02-09 ### Added - Added `iter_batched/iter_batched_ref` timing loops, which allow for setup (like `iter_with_setup/iter_with_large_setup`) and exclude drop (like `iter_with_large_drop`) but measure the runtime more accurately, use less memory and are more flexible. ### Deprecated - `iter_with_setup/iter_with_large_setup` are now deprecated in favor of `iter_batched`. ## [0.2.9] - 2019-01-24 ### Changed - Criterion.rs no longer depends on the default features of the `rand-core` crate. This fixes some downstream crates which use `rand` in a `no_std` context. ## [0.2.8] - 2019-01-20 ### Changed - Criterion.rs now uses `rayon` internally instead of manual `unsafe` code built with thread-scoped. - Replaced handlebars templates with [TinyTemplate](https://github.com/bheisler/TinyTemplate) - Merged `criterion-stats` crate into `criterion` crate. `criterion-stats` will no longer receive updates. - Replaced or removed various other dependencies to reduce the size of Criterion.rs' dependency tree. ## [0.2.7] - 2018-12-29 ### Fixed - Fixed version numbers to prevent incompatibilities between `criterion` and `criterion-stats` crates. ## [0.2.6] - 2018-12-27 - Yanked ### Added - Added `--list` command line option, which lists the benchmarks but does not run them, to match `cargo test -- --list`. - Added README/CONTRIBUTING/LICENSE files to sub-crates. - Displays change in throughput in the command-line and HTML output as well as change in iteration time. - Benchmarks with multiple functions and multiple values will now generate a per-value summary report file in addition to the existing per-function one. - Added a `--profile-time` command-line argument which disables reporting and analysis and instead simply iterates each benchmark for approximately the given number of seconds. This supersedes the (now-deprecated) `--measure-only` argument. ### Fixed - Functions passed to `Bencher::iter_with_large_setup` can now return output. This is necessary to prevent the compiler from optimizing away the benchmark. This is technically a breaking change - that function requires a new type parameter. It's so unlikely to break existing code that I decided not to delay this for a breaking-change release. - Reduced measurement overhead for the `iter_with_large_setup` and `iter_with_drop` methods. - `criterion_group` and `criterion_main` macros no longer require the `Criterion` struct to be explicitly imported. - Don't panic when `gnuplot --version` fails. - Criterion.rs macros no longer require user to `use criterion::Criterion;` - Criterion.rs no longer initializes a logger, meaning that it will no longer conflict with user code which does. - Criterion.rs no longer fails to parse gnuplot version numbers like `gnuplot 5.2 patchlevel 5a (Gentoo revision r0)` - Criterion.rs no longer prints an error message that gnuplot couldn't be found when chart generation is disabled (either by `Criterion::without_plots`, `--noplot` or disabling the HTML reports feature) - Benchmark names are now automatically truncated to 100 characters and a number may be added to make them unique. This fixes a problem where gnuplot would crash if the title was extremely long, and also improves the general usability of Criterion.rs. ### Changed - Changed timing model of `iter_with_large_setup` to exclude time spent dropping values returned by the routine. Time measurements taken with 0.2.6 using these methods may differ from those taken with 0.2.5. - Benchmarks with multiple functions and multiple values will now appear as a table rather than a tree in the benchmark index. This is to accommodate the new per-value summary reports. ### Deprecated - Deprecated the `--measure-only` command-line-argument in favor of `--profile-time`. This will be removed in 0.3.0. - External-program benchmarks are now deprecated. They will be removed in 0.3.0. - The `html_reports` cargo feature is now deprecated. This feature will become non-optional in 0.3.0. - Sample sizes less than 10 are deprecated and will be disallowed in 0.3.0. - This is not an exhaustive list - the full scope of changes in 0.3.0 is not yet determined. There may be breaking changes that are not listed here. ## [0.2.5] - 2018-08-27 ### Fixed - Fixed links from generated report files to documentation. - Fixed formatting for very large percentage changes (>1000%) - Sorted the benchmarks in the index report by name - Fixed case where benchmark ID with special characters would cause Criterion.rs to open the wrong file and log an error message. - Fixed case where running `cargo clean; cargo bench -- ` would cause Criterion.rs to log an error message. - Fixed a GNUplot error message when sample size is very small. - Fixed several cases where Criterion.rs would generate invalid path names. - Fixed a bug where Criterion.rs would print an error if run with a filter that allowed no benchmarks and a clean target directory. - Fixed bug where some benchmarks didn't appear in the benchmark index report. - Criterion.rs now honors the `CARGO_TARGET_DIR` environment variable. ### Added - Criterion.rs will generate a chart showing the effects of changes in input (or input size) for all benchmarks with numeric inputs or throughput, not just for those which compare multiple functions. ## [0.2.4] 2018-07-08 ### Added - Added a pair of flags, `--save-baseline` and `--baseline`, which change how benchmark results are stored and compared. This is useful for working against a fixed baseline(eg. comparing progress on an optimization feature branch to the commit it forked from). Default behavior of Criterion.rs is now `--save-baseline base` which emulates the previous, user facing behavior. - `--save-baseline` saves the benchmark results under the provided name. - `--baseline` compares the results to a saved baseline. If the baseline does not exist for a benchmark, an error is given. - Added user-guide documentation for baselines, throughput measurements and plot configuration. - Added a flag, `--test`, which causes Criterion to execute the benchmarks once without measuring or reporting the results. This is useful for checking that the benchmarks run successfully in a CI setting. - Added a `raw.csv` file to the output which contains a stable, machine-readable representation of the measurements taken by benchmarks. This enables users to perform their own analysis or keep historical information without depending on private implementation details. ### Fixed - The `sample_size` method on the `Criterion`, `Benchmark` and `ParameterizedBenchmark` structs has been changed to panic if the sample size is less than 2. Other parts of the code require this and will panic if the sample size is 1, so this is not considered to be a breaking change. - API documentation has been updated to show more-complete examples. - Certain characters will now be replaced with underscores when creating benchmark directory paths, to avoid generating invalid or unexpected paths. ## [0.2.3] - 2018-04-14 ### Fixed - Criterion.rs will now panic with a clear error message if the user attempts to run a benchmark which doesn't call the `Bencher::iter` function or a related function, rather than failing in an uncontrolled manner later. - Fixed broken links in some more summary reports. ### Added - Added a `--measure-only` argument which causes the benchmark executable to run the warmup and measurement and then move on to the next benchmark without analyzing or saving data. This is useful to prevent Criterion.rs' analysis code from appearing in profile data when profiling benchmarks. - Added an index report file at "target/criterion/report/index.html" which links to the other reports for easy navigation. ## [0.2.2] - 2018-03-25 ### Fixed - Fixed broken links in some summary reports. - Work around apparent rustc bug in >= 1.24.0. ## [0.2.1] - 2018-02-24 ### Added - HTML reports are now a default Cargo feature. If you wish to disable HTML reports, disable Criterion.rs' default features. Doing so will allow compatibility with older Rust versions such as 1.20. If you wish to continue using HTML reports, you don't need to do anything. - Added a summary report for benchmarks that compare multiple functions or different inputs. ### Changed - The plots and HTML reports are now generated in a `report` folder. ### Fixed - Underscores in benchmark names will no longer cause subscripted characters to appear in generated plots. ## [0.2.0] - 2018-02-05 ### Added - Added `Criterion.bench` function, which accepts either a `Benchmark` or `ParameterizedBenchmark`. These new structures allow for custom per-benchmark configuration as well as more complex benchmark grouping (eg. comparing a Rust function against an external program over a range of inputs) which was not possible previously. - Criterion.rs can now report the throughput of the benchmarked code in units of bytes or elements per second. See the `Benchmark.throughput` and `ParameterizedBenchmark.throughput` functions for further details. - Criterion.rs now generates a basic HTML report for each benchmark. - Added `--noplot` command line option to disable plot generation. ### Changed - The builder methods on the Criterion struct now take and return self by value for easier chaining. Functions which configure a Criterion structure will need to be updated accordingly, or will need to be changed to work with the `Benchmark` or `ParameterizedBenchmark` types to do per-benchmark configuration instead. - The closures taken by `Criterion.bench_*` must now have a `'static` lifetime. This means that you may need to change your closures from `|bencher| {...}` to `move |bencher| {...}`. - `Criterion.bench_functions` now takes `I` as an input parameter, not `&I`. - Input values must now implement `Debug` rather than `Display`. - The generated plots are stored in `target/criterion` rather than `.criterion`. ### Removed - The hidden `criterion::ConfidenceInterval` and`criterion::Estimate` types are no longer publicly accessible. - The `Criterion.summarize` function has been removed. ### Fixed - Fixed the relative mean and median reports. - Fixed panic while summarizing benchmarks. ## [0.1.2] - 2018-01-12 ### Changed - Criterion.rs is now stable-compatible! - Criterion.rs now includes its own stable-compatible `black_box` function. Some benchmarks may now be affected by dead-code-elimination where they previously weren't and may have to be updated. - Criterion.rs now uses `serde` to save results. Existing results files will be automatically removed when benchmarks are run. - Redesigned the command-line output to highlight the important information and reduce noise. ### Added - Running benchmarks with the variable "CRITERION_DEBUG" in the environment will cause Criterion.rs to generate extra debug output and save the gnuplot scripts alongside the generated plots. ### Fixed - Don't panic on IO errors or gnuplot failures - Fix generation of invalid gnuplot scripts when benchmarking over inputs and inputs include values <= 0. - Bug where benchmarks would run one sample fewer than was configured. ### Removed - Generated plots will no longer use log-scale. ## [0.1.1] - 2017-12-12 ### Added - A changelog file. - Added a chapter to the book on how Criterion.rs collects and analyzes data. - Added macro rules to generate a test harness for use with `cargo bench`. Benchmarks defined without these macros should continue to work. - New contribution guidelines - Criterion.rs can selectively run benchmarks. See the Command-line page for more details ## 0.1.0 - 2017-12-02 ### Added - Initial release on Crates.io. [Unreleased]: https://github.com/bheisler/criterion.rs/compare/0.3.6...HEAD [0.1.1]: https://github.com/bheisler/criterion.rs/compare/0.1.0...0.1.1 [0.1.2]: https://github.com/bheisler/criterion.rs/compare/0.1.1...0.1.2 [0.2.0]: https://github.com/bheisler/criterion.rs/compare/0.1.2...0.2.0 [0.2.1]: https://github.com/bheisler/criterion.rs/compare/0.2.0...0.2.1 [0.2.2]: https://github.com/bheisler/criterion.rs/compare/0.2.1...0.2.2 [0.2.3]: https://github.com/bheisler/criterion.rs/compare/0.2.2...0.2.3 [0.2.4]: https://github.com/bheisler/criterion.rs/compare/0.2.3...0.2.4 [0.2.5]: https://github.com/bheisler/criterion.rs/compare/0.2.4...0.2.5 [0.2.6]: https://github.com/bheisler/criterion.rs/compare/0.2.5...0.2.6 [0.2.7]: https://github.com/bheisler/criterion.rs/compare/0.2.6...0.2.7 [0.2.8]: https://github.com/bheisler/criterion.rs/compare/0.2.7...0.2.8 [0.2.9]: https://github.com/bheisler/criterion.rs/compare/0.2.8...0.2.9 [0.2.10]: https://github.com/bheisler/criterion.rs/compare/0.2.9...0.2.10 [0.2.11]: https://github.com/bheisler/criterion.rs/compare/0.2.10...0.2.11 [0.3.0]: https://github.com/bheisler/criterion.rs/compare/0.2.11...0.3.0 [0.3.1]: https://github.com/bheisler/criterion.rs/compare/0.3.0...0.3.1 [0.3.2]: https://github.com/bheisler/criterion.rs/compare/0.3.1...0.3.2 [0.3.3]: https://github.com/bheisler/criterion.rs/compare/0.3.2...0.3.3 [0.3.4]: https://github.com/bheisler/criterion.rs/compare/0.3.3...0.3.4 [0.3.5]: https://github.com/bheisler/criterion.rs/compare/0.3.4...0.3.5 [0.3.5]: https://github.com/bheisler/criterion.rs/compare/0.3.5...0.3.6 criterion.rs-0.3.6/CONTRIBUTING.md000066400000000000000000000064521426140671200164040ustar00rootroot00000000000000# Contributing to Criterion.rs ## Ideas, Experiences and Questions The easiest way to contribute to Criterion.rs is to use it and report your experiences, ask questions and contribute ideas. We'd love to hear your thoughts on how to make Criterion.rs better, or your comments on why you are or are not currently using it. Issues, ideas, requests and questions should be posted on the issue tracker at: https://github.com/bheisler/criterion.rs/issues ## A Note on Dependency Updates Criterion.rs does not accept pull requests to update dependencies unless specifically requested by the maintaner(s). Dependencies are updated manually by the maintainer(s) before each new release. ## Code Pull requests are welcome, though please raise an issue for discussion first if none exists. We're happy to assist new contributors. If you're not sure what to work on, try checking the [Beginner label](https://github.com/bheisler/criterion.rs/issues?q=is%3Aissue+is%3Aopen+label%3ABeginner) To make changes to the code, fork the repo and clone it: `git clone git@github.com:your-username/criterion.rs.git` You'll probably want to install [gnuplot](http://www.gnuplot.info/) as well. See the gnuplot website for installation instructions. Then make your changes to the code. When you're done, run the tests: ``` cargo test --all cargo bench ``` It's a good idea to run clippy and fix any warnings as well: ``` rustup component add clippy-preview cargo clippy --all ``` Finally, run Rustfmt to maintain a common code style: ``` rustup component add rustfmt-preview cargo fmt --all ``` Don't forget to update the CHANGELOG.md file and any appropriate documentation. Once you're finished, push to your fork and submit a pull request. We try to respond to new issues and pull requests quickly, so if there hasn't been any response for more than a few days feel free to ping @bheisler. Some things that will increase the chance that your pull request is accepted: * Write tests * Clearly document public methods * Write a good commit message ## Branches * PRs with breaking changes are made against the unreleased branch. e.g. branch version-0.4 * PRs without breaking changes are made against the master branch. If you're not sure which branch to use just start with master, as this can be changed during review. When it is time to release the unreleased branch, a PR is made from the unreleased branch to master. e.g. https://github.com/bheisler/criterion.rs/pull/496 ## Github Labels Criterion.rs uses a simple set of labels to track issues. Most important are the difficulty labels: * Beginner - Suitable for people new to Criterion.rs, or even new to Rust in general * Intermediate - More challenging, likely involves some non-trivial design decisions and/or knowledge of Criterion.rs' internals * Bigger Project - Large and/or complex project such as designing a complex new feature Additionally, there are a few other noteworthy labels: * Breaking Change - Fixing this will have to wait until the next breaking-change release * Bug - Something isn't working right * Enhancement - Request to add a new feature or otherwise improve Criterion.rs in some way ## Code of Conduct We follow the [Rust Code of Conduct](http://www.rust-lang.org/conduct.html). criterion.rs-0.3.6/Cargo.toml000066400000000000000000000063721426140671200161040ustar00rootroot00000000000000[package] authors = [ "Jorge Aparicio ", "Brook Heisler ", ] name = "criterion" version = "0.3.6" edition = "2018" description = "Statistics-driven micro-benchmarking library" homepage = "https://bheisler.github.io/criterion.rs/book/index.html" repository = "https://github.com/bheisler/criterion.rs" readme = "README.md" keywords = ["criterion", "benchmark"] categories = ["development-tools::profiling"] license = "Apache-2.0/MIT" exclude = ["book/*"] [dependencies] lazy_static = "1.4" criterion-plot = { path = "plot", version = "0.4.4" } itertools = "0.10" serde = "1.0" serde_json = "1.0" serde_derive = "1.0" serde_cbor = "0.11" atty = "~0.2.6" clap = { version = "2.34", default-features = false } csv = "1.1" walkdir = "2.3" tinytemplate = "1.1" cast = "0.3" num-traits = { version = "0.2", default-features = false } oorandom = "11.1" rayon = "1.3" regex = { version = "1.3", default-features = false, features = ["std"] } futures = { version = "0.3", default_features = false, optional = true } smol = { version = "1.2", default-features = false, optional = true } tokio = { version = "1.0", default-features = false, features = ["rt"], optional = true } async-std = { version = "1.9", optional = true } [dependencies.plotters] version = "^0.3.1" default-features = false features = ["svg_backend", "area_series", "line_series"] [dev-dependencies] tempfile = "3.2.0" approx = "0.5.0" quickcheck = { version = "1.0", default-features = false } rand = "0.8" futures = { version = "0.3", default_features = false, features = ["executor"] } [badges] maintenance = { status = "passively-maintained" } [features] stable = ["async_futures", "async_smol", "async_tokio", "async_std"] default = ["cargo_bench_support"] # Enable use of the nightly-only test::black_box function to discourage compiler optimizations. real_blackbox = [] # Enable async/await support async = ["futures"] # These features enable built-in support for running async benchmarks on each different async # runtime. async_futures = ["futures/executor", "async"] async_smol = ["smol", "async"] async_tokio = ["tokio", "async"] async_std = ["async-std", "async"] # This feature _currently_ does nothing except disable a warning message, but in 0.4.0 it will be # required in order to have Criterion.rs generate its own plots (as opposed to using cargo-criterion) html_reports = [] # This feature _currently_ does nothing except disable a warning message, but in 0.4.0 it will be # required in order to have Criterion.rs be usable outside of cargo-criterion. cargo_bench_support = [] # This feature _currently_ does nothing, but in 0.4.0 it will be # required in order to have Criterion.rs generate CSV files. This feature is deprecated in favor of # cargo-criterion's --message-format=json option. csv_output = [] [workspace] exclude = ["cargo-criterion"] [[bench]] name = "bench_main" harness = false [lib] bench = false # Enable all of the async runtimes for the docs.rs output [package.metadata.docs.rs] features = ["async_futures", "async_smol", "async_std", "async_tokio"] criterion.rs-0.3.6/LICENSE-APACHE000066400000000000000000000251371426140671200161000ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. criterion.rs-0.3.6/LICENSE-MIT000066400000000000000000000020421426140671200155760ustar00rootroot00000000000000Copyright (c) 2014 Jorge Aparicio Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. criterion.rs-0.3.6/README.md000066400000000000000000000144001426140671200154220ustar00rootroot00000000000000

Criterion.rs

Statistics-driven Microbenchmarking in Rust
Getting Started | User Guide | Master API Docs | Released API Docs | Changelog
GitHub branch checks state | Appveyor | Crates.io
Criterion.rs helps you write fast code by detecting and measuring performance improvements or regressions, even small ones, quickly and accurately. You can optimize with confidence, knowing how each change affects the performance of your code. ## Table of Contents - [Table of Contents](#table-of-contents) - [Features](#features) - [Quickstart](#quickstart) - [Goals](#goals) - [Contributing](#contributing) - [Compatibility Policy](#compatibility-policy) - [Maintenance](#maintenance) - [License](#license) - [Related Projects](#related-projects) - [Criterion.rs Extensions](#criterionrs-extensions) ### Features - __Statistics__: Statistical analysis detects if, and by how much, performance has changed since the last benchmark run - __Charts__: Uses [gnuplot](http://www.gnuplot.info/) to generate detailed graphs of benchmark results - __Stable-compatible__: Benchmark your code without installing nightly Rust ### Quickstart In order to generate plots, you must have [gnuplot](http://www.gnuplot.info/) installed. See the gnuplot website for installation instructions. See [Compatibility Policy](#compatibility-policy) for details on the minimum supported Rust version. To start with Criterion.rs, add the following to your `Cargo.toml` file: ```toml [dev-dependencies] criterion = "0.3" [[bench]] name = "my_benchmark" harness = false ``` Next, define a benchmark by creating a file at `$PROJECT/benches/my_benchmark.rs` with the following contents: ```rust use criterion::{black_box, criterion_group, criterion_main, Criterion}; fn fibonacci(n: u64) -> u64 { match n { 0 => 1, 1 => 1, n => fibonacci(n-1) + fibonacci(n-2), } } fn criterion_benchmark(c: &mut Criterion) { c.bench_function("fib 20", |b| b.iter(|| fibonacci(black_box(20)))); } criterion_group!(benches, criterion_benchmark); criterion_main!(benches); ``` Finally, run this benchmark with `cargo bench`. You should see output similar to the following: ``` Running target/release/deps/example-423eedc43b2b3a93 fib 20 time: [26.029 us 26.251 us 26.505 us] Found 11 outliers among 99 measurements (11.11%) 6 (6.06%) high mild 5 (5.05%) high severe ``` See the [Getting Started](https://bheisler.github.io/criterion.rs/book/getting_started.html) guide for more details. ### Goals The primary goal of Criterion.rs is to provide a powerful and statistically rigorous tool for measuring the performance of code, preventing performance regressions and accurately measuring optimizations. Additionally, it should be as programmer-friendly as possible and make it easy to create reliable, useful benchmarks, even for programmers without an advanced background in statistics. ### Contributing First, thank you for contributing. One great way to contribute to Criterion.rs is to use it for your own benchmarking needs and report your experiences, file and comment on issues, etc. Code or documentation improvements in the form of pull requests are also welcome. If you're not sure what to work on, try checking the [Beginner label](https://github.com/bheisler/criterion.rs/issues?q=is%3Aissue+is%3Aopen+label%3ABeginner). If your issues or pull requests have no response after a few days, feel free to ping me (@bheisler). For more details, see the [CONTRIBUTING.md file](https://github.com/bheisler/criterion.rs/blob/master/CONTRIBUTING.md). ### Compatibility Policy Criterion.rs supports the last three stable minor releases of Rust. At time of writing, this means Rust 1.50 or later. Older versions may work, but are not guaranteed. Currently, the oldest version of Rust believed to work is 1.49. Future versions of Criterion.rs may break support for such old versions, and this will not be considered a breaking change. If you require Criterion.rs to work on old versions of Rust, you will need to stick to a specific patch version of Criterion.rs. ### Maintenance Criterion.rs was originally created by Jorge Aparicio (@japaric) and is currently being maintained by Brook Heisler (@bheisler). ### License Criterion.rs is dual licensed under the Apache 2.0 license and the MIT license. ### Related Projects - [bencher](https://github.com/bluss/bencher) - A port of the libtest benchmark runner to stable Rust - [criterion](http://www.serpentine.com/criterion/) - The Haskell microbenchmarking library that inspired Criterion.rs - [cargo-benchcmp](https://github.com/BurntSushi/cargo-benchcmp) - Cargo subcommand to compare the output of two libtest or bencher benchmark runs - [cargo-flamegraph](https://github.com/ferrous-systems/flamegraph) - Cargo subcommand to profile an executable and produce a flamegraph ### Criterion.rs Extensions - [criterion-cycles-per-byte](https://crates.io/crates/criterion-cycles-per-byte) - A custom-measurement plugin that counts the number of CPU cycles used by the benchmark - [criterion-perf-events](https://crates.io/crates/criterion-perf-events) - A custom-measurement plugin that counts perf events created by the benchmark criterion.rs-0.3.6/appveyor.yml000066400000000000000000000016611426140671200165400ustar00rootroot00000000000000environment: matrix: - TARGET: x86_64-pc-windows-msvc GNUPLOT: yes - TARGET: x86_64-pc-windows-msvc GNUPLOT: no cache: - 'C:\Users\appveyor\.cargo' install: - curl -sSf -o rustup-init.exe https://win.rustup.rs/ - rustup-init.exe -y --default-host %TARGET% --default-toolchain stable - SET PATH=%PATH%;C:\Users\appveyor\.cargo\bin - rustc -Vv - cargo -V - ps: if (${env:GNUPLOT} -eq "yes") { Start-FileDownload "https://sourceforge.net/projects/gnuplot/files/gnuplot/4.6.7/gp467-win64-setup.exe"; } - if %GNUPLOT%==yes gp467-win64-setup.exe /VERYSILENT /NORESTART - if %GNUPLOT%==yes SET PATH=%PATH%;C:\Program Files\gnuplot\bin build: false test_script: - cargo build --release - cargo test --all --release - cargo build --benches --all --release # Disable benchmarking until performance can be improved. # - cargo bench - cargo doc --release --all --no-deps branches: only: - master criterion.rs-0.3.6/bencher_compat/000077500000000000000000000000001426140671200171155ustar00rootroot00000000000000criterion.rs-0.3.6/bencher_compat/CONTRIBUTING.md000077700000000000000000000000001426140671200240052../CONTRIBUTING.mdustar00rootroot00000000000000criterion.rs-0.3.6/bencher_compat/Cargo.toml000066400000000000000000000012521426140671200210450ustar00rootroot00000000000000[package] name = "criterion_bencher_compat" version = "0.3.4" authors = ["Brook Heisler "] edition = "2018" description = "Drop-in replacement for commonly-used parts of Bencher" homepage = "https://bheisler.github.io/criterion.rs/book/index.html" repository = "https://github.com/bheisler/criterion.rs" readme = "README.md" keywords = ["criterion", "benchmark"] categories = ["development-tools::profiling"] license = "Apache-2.0/MIT" [dependencies] criterion = { version = "0.3.4", path = "..", default-features = false } [features] real_blackbox = ["criterion/real_blackbox"] default = [] [[bench]] name = "bencher_example" harness = false [workspace]criterion.rs-0.3.6/bencher_compat/LICENSE-APACHE000077700000000000000000000000001426140671200231732../LICENSE-APACHEustar00rootroot00000000000000criterion.rs-0.3.6/bencher_compat/LICENSE-MIT000077700000000000000000000000001426140671200224132../LICENSE-MITustar00rootroot00000000000000criterion.rs-0.3.6/bencher_compat/README.md000066400000000000000000000020351426140671200203740ustar00rootroot00000000000000# `criterion-bencher-compat` This crate is a shim that can be used to easily convert most `bencher` benchmarks to [Criterion.rs] benchmarks. ## License This project is licensed under either of * [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0) ([LICENSE-APACHE](LICENSE-APACHE)) * [MIT License](http://opensource.org/licenses/MIT) ([LICENSE-MIT](LICENSE-MIT)) at your option. ## Contributing We welcome all people who want to contribute. Please see the [contributing instructions] for more information. Contributions in any form (issues, pull requests, etc.) to this project must adhere to Rust's [Code of Conduct]. Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in this crate by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions. [Code of Conduct]: https://www.rust-lang.org/en-US/conduct.html [Criterion.rs]: https://github.com/bheisler/criterion.rs [contributing instructions]: CONTRIBUTING.md criterion.rs-0.3.6/bencher_compat/benches/000077500000000000000000000000001426140671200205245ustar00rootroot00000000000000criterion.rs-0.3.6/bencher_compat/benches/bencher_example.rs000066400000000000000000000006261426140671200242170ustar00rootroot00000000000000#[macro_use] extern crate criterion_bencher_compat; use criterion_bencher_compat::Bencher; fn a(bench: &mut Bencher) { bench.iter(|| { (0..1000).fold(0, |x, y| x + y) }) } fn b(bench: &mut Bencher) { const N: usize = 1024; bench.iter(|| { vec![0u8; N] }); bench.bytes = N as u64; } benchmark_group!(benches, a, b); benchmark_main!(benches);criterion.rs-0.3.6/bencher_compat/src/000077500000000000000000000000001426140671200177045ustar00rootroot00000000000000criterion.rs-0.3.6/bencher_compat/src/lib.rs000066400000000000000000000034431426140671200210240ustar00rootroot00000000000000extern crate criterion; pub use criterion::Criterion; pub use criterion::black_box; use criterion::measurement::WallTime; /// Stand-in for `bencher::Bencher` which uses Criterion.rs to perform the benchmark instead. pub struct Bencher<'a, 'b> { pub bytes: u64, pub bencher: &'a mut ::criterion::Bencher<'b, WallTime>, } impl<'a, 'b> Bencher<'a, 'b> { /// Callback for benchmark functions to run to perform the benchmark pub fn iter(&mut self, inner: F) where F: FnMut() -> T { self.bencher.iter(inner); } } /// Stand-in for `bencher::benchmark_group!` which performs benchmarks using Criterion.rs instead. #[macro_export] macro_rules! benchmark_group { ($group_name:ident, $($function:path),+) => { pub fn $group_name() { use $crate::Criterion; let mut criterion: Criterion = Criterion::default().configure_from_args(); $( criterion.bench_function(stringify!($function), |b| { let mut wrapped = $crate::Bencher { bytes: 0, bencher: b, }; $function(&mut wrapped); }); )+ } }; ($group_name:ident, $($function:path,)+) => { benchmark_group!($group_name, $($function),+); }; } /// Stand-in for `bencher::benchmark_main!` which performs benchmarks using Criterion.rs instead. #[macro_export] macro_rules! benchmark_main { ($($group_name:path),+) => { fn main() { $( $group_name(); )+ $crate::Criterion::default() .configure_from_args() .final_summary(); } }; ($($group_name:path,)+) => { benchmark_main!($($group_name),+); }; }criterion.rs-0.3.6/benches/000077500000000000000000000000001426140671200155535ustar00rootroot00000000000000criterion.rs-0.3.6/benches/bench_main.rs000066400000000000000000000010641426140671200202050ustar00rootroot00000000000000use criterion::criterion_main; mod benchmarks; criterion_main! { benchmarks::compare_functions::fibonaccis, benchmarks::external_process::benches, benchmarks::iter_with_large_drop::benches, benchmarks::iter_with_large_setup::benches, benchmarks::iter_with_setup::benches, benchmarks::with_inputs::benches, benchmarks::special_characters::benches, benchmarks::measurement_overhead::benches, benchmarks::custom_measurement::benches, benchmarks::sampling_mode::benches, benchmarks::async_measurement_overhead::benches, } criterion.rs-0.3.6/benches/benchmarks/000077500000000000000000000000001426140671200176705ustar00rootroot00000000000000criterion.rs-0.3.6/benches/benchmarks/async_measurement_overhead.rs000066400000000000000000000035751426140671200256470ustar00rootroot00000000000000use criterion::{async_executor::FuturesExecutor, criterion_group, BatchSize, Criterion}; fn some_benchmark(c: &mut Criterion) { let mut group = c.benchmark_group("async overhead"); group.bench_function("iter", |b| b.to_async(FuturesExecutor).iter(|| async { 1 })); group.bench_function("iter_with_setup", |b| { b.to_async(FuturesExecutor) .iter_with_setup(|| (), |_| async { 1 }) }); group.bench_function("iter_with_large_setup", |b| { b.to_async(FuturesExecutor) .iter_with_large_setup(|| (), |_| async { 1 }) }); group.bench_function("iter_with_large_drop", |b| { b.to_async(FuturesExecutor) .iter_with_large_drop(|| async { 1 }) }); group.bench_function("iter_batched_small_input", |b| { b.to_async(FuturesExecutor) .iter_batched(|| (), |_| async { 1 }, BatchSize::SmallInput) }); group.bench_function("iter_batched_large_input", |b| { b.to_async(FuturesExecutor) .iter_batched(|| (), |_| async { 1 }, BatchSize::LargeInput) }); group.bench_function("iter_batched_per_iteration", |b| { b.to_async(FuturesExecutor) .iter_batched(|| (), |_| async { 1 }, BatchSize::PerIteration) }); group.bench_function("iter_batched_ref_small_input", |b| { b.to_async(FuturesExecutor) .iter_batched_ref(|| (), |_| async { 1 }, BatchSize::SmallInput) }); group.bench_function("iter_batched_ref_large_input", |b| { b.to_async(FuturesExecutor) .iter_batched_ref(|| (), |_| async { 1 }, BatchSize::LargeInput) }); group.bench_function("iter_batched_ref_per_iteration", |b| { b.to_async(FuturesExecutor).iter_batched_ref( || (), |_| async { 1 }, BatchSize::PerIteration, ) }); group.finish(); } criterion_group!(benches, some_benchmark); criterion.rs-0.3.6/benches/benchmarks/compare_functions.rs000066400000000000000000000041321426140671200237540ustar00rootroot00000000000000#![allow(deprecated)] use criterion::{criterion_group, BenchmarkId, Criterion, Fun, ParameterizedBenchmark}; fn fibonacci_slow(n: u64) -> u64 { match n { 0 | 1 => 1, n => fibonacci_slow(n - 1) + fibonacci_slow(n - 2), } } fn fibonacci_fast(n: u64) -> u64 { let mut a = 0; let mut b = 1; match n { 0 => b, _ => { for _ in 0..n { let c = a + b; a = b; b = c; } b } } } fn compare_fibonaccis(c: &mut Criterion) { let fib_slow = Fun::new("Recursive", |b, i| b.iter(|| fibonacci_slow(*i))); let fib_fast = Fun::new("Iterative", |b, i| b.iter(|| fibonacci_fast(*i))); let functions = vec![fib_slow, fib_fast]; c.bench_functions("Fibonacci", functions, 20); } fn compare_fibonaccis_builder(c: &mut Criterion) { c.bench( "Fibonacci2", ParameterizedBenchmark::new( "Recursive", |b, i| b.iter(|| fibonacci_slow(*i)), vec![20u64, 21u64], ) .with_function("Iterative", |b, i| b.iter(|| fibonacci_fast(*i))), ); } fn compare_fibonaccis_group(c: &mut Criterion) { let mut group = c.benchmark_group("Fibonacci3"); for i in 20..=21 { group.bench_with_input(BenchmarkId::new("Recursive", i), &i, |b, i| { b.iter(|| fibonacci_slow(*i)) }); group.bench_with_input(BenchmarkId::new("Iterative", i), &i, |b, i| { b.iter(|| fibonacci_fast(*i)) }); } group.finish() } fn compare_looped(c: &mut Criterion) { use criterion::black_box; c.bench( "small", ParameterizedBenchmark::new("unlooped", |b, i| b.iter(|| i + 10), vec![10]).with_function( "looped", |b, i| { b.iter(|| { for _ in 0..10_000 { black_box(i + 10); } }) }, ), ); } criterion_group!( fibonaccis, compare_fibonaccis, compare_fibonaccis_builder, compare_fibonaccis_group, compare_looped ); criterion.rs-0.3.6/benches/benchmarks/custom_measurement.rs000066400000000000000000000062171426140671200241630ustar00rootroot00000000000000use criterion::{ black_box, criterion_group, measurement::{Measurement, ValueFormatter}, Criterion, Throughput, }; use std::time::{Duration, Instant}; struct HalfSecFormatter; impl ValueFormatter for HalfSecFormatter { fn format_value(&self, value: f64) -> String { // The value will be in nanoseconds so we have to convert to half-seconds. format!("{} s/2", value * 2f64 * 10f64.powi(-9)) } fn format_throughput(&self, throughput: &Throughput, value: f64) -> String { match *throughput { Throughput::Bytes(bytes) => { format!("{} b/s/2", (bytes as f64) / (value * 2f64 * 10f64.powi(-9))) } Throughput::Elements(elems) => format!( "{} elem/s/2", (elems as f64) / (value * 2f64 * 10f64.powi(-9)) ), } } fn scale_values(&self, _typical: f64, values: &mut [f64]) -> &'static str { for val in values { *val *= 2f64 * 10f64.powi(-9); } "s/2" } fn scale_throughputs( &self, _typical: f64, throughput: &Throughput, values: &mut [f64], ) -> &'static str { match *throughput { Throughput::Bytes(bytes) => { for val in values { *val = (bytes as f64) / (*val * 2f64 * 10f64.powi(-9)) } "b/s/2" } Throughput::Elements(elems) => { for val in values { *val = (elems as f64) / (*val * 2f64 * 10f64.powi(-9)) } "elem/s/2" } } } fn scale_for_machines(&self, values: &mut [f64]) -> &'static str { for val in values { *val *= 2f64 * 10f64.powi(-9); } "s/2" } } const NANOS_PER_SEC: u64 = 1_000_000_000; /// Silly "measurement" that is really just wall-clock time reported in half-seconds. struct HalfSeconds; impl Measurement for HalfSeconds { type Intermediate = Instant; type Value = Duration; fn start(&self) -> Self::Intermediate { Instant::now() } fn end(&self, i: Self::Intermediate) -> Self::Value { i.elapsed() } fn add(&self, v1: &Self::Value, v2: &Self::Value) -> Self::Value { *v1 + *v2 } fn zero(&self) -> Self::Value { Duration::from_secs(0) } fn to_f64(&self, val: &Self::Value) -> f64 { let nanos = val.as_secs() * NANOS_PER_SEC + u64::from(val.subsec_nanos()); nanos as f64 } fn formatter(&self) -> &dyn ValueFormatter { &HalfSecFormatter } } fn fibonacci_slow(n: u64) -> u64 { match n { 0 | 1 => 1, n => fibonacci_slow(n - 1) + fibonacci_slow(n - 2), } } fn fibonacci_cycles(criterion: &mut Criterion) { criterion.bench_function("fibonacci_custom_measurement", |bencher| { bencher.iter(|| fibonacci_slow(black_box(10))) }); } fn alternate_measurement() -> Criterion { Criterion::default().with_measurement(HalfSeconds) } criterion_group! { name = benches; config = alternate_measurement(); targets = fibonacci_cycles } criterion.rs-0.3.6/benches/benchmarks/external_process.py000066400000000000000000000011331426140671200236200ustar00rootroot00000000000000import time import sys def fibonacci(n): if n == 0 or n == 1: return 1 return fibonacci(n - 1) + fibonacci(n - 2) MILLIS = 1000 MICROS = MILLIS * 1000 NANOS = MICROS * 1000 def benchmark(): depth = int(sys.argv[1]) for line in sys.stdin: iters = int(line.strip()) # Setup start = time.perf_counter() for x in range(iters): fibonacci(depth) end = time.perf_counter() # Teardown delta = end - start nanos = int(delta * NANOS) print("%d" % nanos) sys.stdout.flush() benchmark() criterion.rs-0.3.6/benches/benchmarks/external_process.rs000066400000000000000000000034061426140671200236210ustar00rootroot00000000000000use criterion::{criterion_group, Criterion}; use std::{ io::{BufRead, BufReader, Write}, process::{Command, Stdio}, str::FromStr, time::Duration, }; fn create_command() -> Command { let mut command = Command::new("python3"); command .arg("benches/benchmarks/external_process.py") .arg("10"); command } #[allow(deprecated)] fn python_fibonacci(c: &mut Criterion) { let has_python3 = Command::new("python3") .arg("--version") .stdout(Stdio::null()) .stderr(Stdio::null()) .output() .is_ok(); if has_python3 { let process = create_command() .stdin(Stdio::piped()) .stdout(Stdio::piped()) .spawn() .expect("Unable to start python process"); let mut stdin = process .stdin .expect("Unable to get stdin for child process"); let stdout = process .stdout .expect("Unable to get stdout for child process"); let mut stdout = BufReader::new(stdout); c.bench_function("fibonacci-python", |b| { b.iter_custom(|iters| { writeln!(stdin, "{}", iters) .expect("Unable to send iteration count to child process"); let mut line = String::new(); stdout .read_line(&mut line) .expect("Unable to read time from child process"); let nanoseconds: u64 = u64::from_str(line.trim()).expect("Unable to parse time from child process"); Duration::from_nanos(nanoseconds) }) }); // Ensure that your child process terminates itself gracefully! } } criterion_group!(benches, python_fibonacci); criterion.rs-0.3.6/benches/benchmarks/iter_with_large_drop.rs000066400000000000000000000015301426140671200244310ustar00rootroot00000000000000#![allow(deprecated)] use criterion::{criterion_group, Benchmark, Criterion, Throughput}; use std::time::Duration; const SIZE: usize = 1024 * 1024; fn large_drop(c: &mut Criterion) { c.bench( "iter_with_large_drop", Benchmark::new("large_drop", |b| { let v: Vec<_> = (0..SIZE).map(|i| i as u8).collect(); b.iter_with_large_drop(|| v.clone()); }) .throughput(Throughput::Bytes(SIZE as u64)), ); } fn small_drop(c: &mut Criterion) { c.bench( "iter_with_large_drop", Benchmark::new("small_drop", |b| { b.iter_with_large_drop(|| SIZE); }), ); } fn short_warmup() -> Criterion { Criterion::default().warm_up_time(Duration::new(1, 0)) } criterion_group! { name = benches; config = short_warmup(); targets = large_drop, small_drop } criterion.rs-0.3.6/benches/benchmarks/iter_with_large_setup.rs000066400000000000000000000017771426140671200246420ustar00rootroot00000000000000#![allow(deprecated)] use criterion::{criterion_group, Benchmark, Criterion, Throughput}; use std::time::Duration; const SIZE: usize = 1024 * 1024; fn large_setup(c: &mut Criterion) { c.bench( "iter_with_large_setup", Benchmark::new("large_setup", |b| { // NOTE: iter_with_large_setup is deprecated. Use iter_batched instead. b.iter_with_large_setup(|| (0..SIZE).map(|i| i as u8).collect::>(), |v| v) }) .throughput(Throughput::Bytes(SIZE as u64)), ); } fn small_setup(c: &mut Criterion) { c.bench( "iter_with_large_setup", Benchmark::new("small_setup", |b| { // NOTE: iter_with_large_setup is deprecated. Use iter_batched instead. b.iter_with_large_setup(|| SIZE, |size| size) }), ); } fn short_warmup() -> Criterion { Criterion::default().warm_up_time(Duration::new(1, 0)) } criterion_group! { name = benches; config = short_warmup(); targets = large_setup, small_setup } criterion.rs-0.3.6/benches/benchmarks/iter_with_setup.rs000066400000000000000000000004351426140671200234560ustar00rootroot00000000000000use criterion::{criterion_group, Criterion}; const SIZE: usize = 1024 * 1024; fn setup(c: &mut Criterion) { c.bench_function("iter_with_setup", |b| { b.iter_with_setup(|| (0..SIZE).map(|i| i as u8).collect::>(), |v| v) }); } criterion_group!(benches, setup); criterion.rs-0.3.6/benches/benchmarks/measurement_overhead.rs000066400000000000000000000024671426140671200244510ustar00rootroot00000000000000use criterion::{criterion_group, BatchSize, Criterion}; fn some_benchmark(c: &mut Criterion) { let mut group = c.benchmark_group("overhead"); group.bench_function("iter", |b| b.iter(|| 1)); group.bench_function("iter_with_setup", |b| b.iter_with_setup(|| (), |_| 1)); group.bench_function("iter_with_large_setup", |b| { b.iter_with_large_setup(|| (), |_| 1) }); group.bench_function("iter_with_large_drop", |b| b.iter_with_large_drop(|| 1)); group.bench_function("iter_batched_small_input", |b| { b.iter_batched(|| (), |_| 1, BatchSize::SmallInput) }); group.bench_function("iter_batched_large_input", |b| { b.iter_batched(|| (), |_| 1, BatchSize::LargeInput) }); group.bench_function("iter_batched_per_iteration", |b| { b.iter_batched(|| (), |_| 1, BatchSize::PerIteration) }); group.bench_function("iter_batched_ref_small_input", |b| { b.iter_batched_ref(|| (), |_| 1, BatchSize::SmallInput) }); group.bench_function("iter_batched_ref_large_input", |b| { b.iter_batched_ref(|| (), |_| 1, BatchSize::LargeInput) }); group.bench_function("iter_batched_ref_per_iteration", |b| { b.iter_batched_ref(|| (), |_| 1, BatchSize::PerIteration) }); group.finish(); } criterion_group!(benches, some_benchmark); criterion.rs-0.3.6/benches/benchmarks/mod.rs000066400000000000000000000010611426140671200210130ustar00rootroot00000000000000pub mod compare_functions; pub mod custom_measurement; pub mod external_process; pub mod iter_with_large_drop; pub mod iter_with_large_setup; pub mod iter_with_setup; pub mod measurement_overhead; pub mod sampling_mode; pub mod special_characters; pub mod with_inputs; #[cfg(feature = "async_futures")] pub mod async_measurement_overhead; #[cfg(not(feature = "async_futures"))] pub mod async_measurement_overhead { use criterion::{criterion_group, Criterion}; fn some_benchmark(_c: &mut Criterion) {} criterion_group!(benches, some_benchmark); } criterion.rs-0.3.6/benches/benchmarks/sampling_mode.rs000066400000000000000000000014151426140671200230550ustar00rootroot00000000000000use criterion::{criterion_group, Criterion, SamplingMode}; use std::thread::sleep; use std::time::Duration; fn sampling_mode_tests(c: &mut Criterion) { let mut group = c.benchmark_group("sampling_mode"); group.sampling_mode(SamplingMode::Auto); group.bench_function("Auto", |bencher| { bencher.iter(|| sleep(Duration::from_millis(0))) }); group.sampling_mode(SamplingMode::Linear); group.bench_function("Linear", |bencher| { bencher.iter(|| sleep(Duration::from_millis(0))) }); group.sampling_mode(SamplingMode::Flat); group.bench_function("Flat", |bencher| { bencher.iter(|| sleep(Duration::from_millis(10))) }); group.finish(); } criterion_group!(benches, sampling_mode_tests,); criterion.rs-0.3.6/benches/benchmarks/special_characters.rs000066400000000000000000000004221426140671200240530ustar00rootroot00000000000000use criterion::{criterion_group, Criterion}; fn some_benchmark(c: &mut Criterion) { let mut group = c.benchmark_group("\"*group/\""); group.bench_function("\"*benchmark/\" '", |b| b.iter(|| 1 + 1)); group.finish(); } criterion_group!(benches, some_benchmark); criterion.rs-0.3.6/benches/benchmarks/with_inputs.rs000066400000000000000000000010621426140671200226120ustar00rootroot00000000000000use std::iter; use criterion::{criterion_group, BenchmarkId, Criterion, Throughput}; fn from_elem(c: &mut Criterion) { static KB: usize = 1024; let mut group = c.benchmark_group("from_elem"); for size in [KB, 2 * KB, 4 * KB, 8 * KB, 16 * KB].iter() { group.throughput(Throughput::Bytes(*size as u64)); group.bench_with_input(BenchmarkId::from_parameter(size), size, |b, &size| { b.iter(|| iter::repeat(0u8).take(size).collect::>()); }); } group.finish(); } criterion_group!(benches, from_elem); criterion.rs-0.3.6/book/000077500000000000000000000000001426140671200150765ustar00rootroot00000000000000criterion.rs-0.3.6/book/.gitignore000066400000000000000000000000061426140671200170620ustar00rootroot00000000000000/book criterion.rs-0.3.6/book/book.toml000066400000000000000000000003561426140671200167310ustar00rootroot00000000000000[book] title = "Criterion.rs Documentation" description = "User Guide and Other Prose Documentation For Criterion.rs" author = "Brook Heisler" [output.html] [output.linkcheck] #follow-web-links = true exclude = [ 'crates\.io' ]criterion.rs-0.3.6/book/src/000077500000000000000000000000001426140671200156655ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/SUMMARY.md000066400000000000000000000031141426140671200173430ustar00rootroot00000000000000# Summary - [Criterion.rs](./criterion_rs.md) - [Getting Started](./getting_started.md) - [User Guide](./user_guide/user_guide.md) - [Migrating from libtest](./user_guide/migrating_from_libtest.md) - [Command-Line Output](./user_guide/command_line_output.md) - [Command-Line Options](./user_guide/command_line_options.md) - [HTML Report](./user_guide/html_report.md) - [Plots & Graphs](./user_guide/plots_and_graphs.md) - [Benchmarking With Inputs](./user_guide/benchmarking_with_inputs.md) - [Advanced Configuration](./user_guide/advanced_configuration.md) - [Comparing Functions](./user_guide/comparing_functions.md) - [CSV Output](./user_guide/csv_output.md) - [Known Limitations](./user_guide/known_limitations.md) - [Bencher Compatibility Layer](./user_guide/bencher_compatibility.md) - [Timing Loops](./user_guide/timing_loops.md) - [Custom Measurements](./user_guide/custom_measurements.md) - [Profiling](./user_guide/profiling.md) - [Custom Test Framework](./user_guide/custom_test_framework.md) - [Benchmarking async functions](./user_guide/benchmarking_async.md) - [cargo-criterion](./cargo_criterion/cargo_criterion.md) - [Configuring cargo-criterion](./cargo_criterion/configuring_cargo_criterion.md) - [External Tools](./cargo_criterion/external_tools.md) - [Iai](./iai/iai.md) - [Getting Started with Iai](./iai/getting_started.md) - [Comparison to Criterion.rs](./iai/comparison.md) - [Analysis Process](./analysis.md) - [Frequently Asked Questions](./faq.md) - [Migrating from 0.2.* to 0.3.*](./migrating_0_2_to_0_3.md)criterion.rs-0.3.6/book/src/analysis.md000066400000000000000000000156631426140671200200450ustar00rootroot00000000000000# Analysis Process # This page details the data collection and analysis process used by Criterion.rs. This is a bit more advanced than the user guide; it is assumed the reader is somewhat familiar with statistical concepts. In particular, the reader should know what bootstrap sampling means. So, without further ado, let's start with a general overview. Each benchmark in Criterion.rs goes through four phases: * Warmup - The routine is executed repeatedly to fill the CPU and OS caches and (if applicable) give the JIT time to compile the code * Measurement - The routine is executed repeatedly and the execution times are recorded * Analysis - The recorded samples are analyzed and distilled into meaningful statistics, which are then reported to the user * Comparison - The performance of the current run is compared to the stored data from the last run to determine whether it has changed, and if so by how much ## Warmup ## The first step in the process is warmup. In this phase, the routine is executed repeatedly to give the OS, CPU and JIT time to adapt to the new workload. This helps prevent things like cold caches and JIT compilation time from throwing off the measurements later. The warmup period is controlled by the `warm_up_time` value in the Criterion struct. The warmup period is quite simple. The routine is executed once, then twice, four times and so on until the total accumulated execution time is greater than the configured warm up time. The number of iterations that were completed during this period is recorded, along with the elapsed time. ## Measurement ## The measurement phase is when Criterion.rs collects the performance data that will be analyzed and used in later stages. This phase is mainly controlled by the `measurement_time` value in the Criterion struct. The measurements are done in a number of samples (see the `sample_size` parameter). Each sample consists of one or more (typically many) iterations of the routine. The elapsed time between the beginning and the end of the iterations, divided by the number of iterations, gives an estimate of the time taken by each iteration. As measurement progresses, the sample iteration counts are increased. Suppose that the first sample contains 10 iterations. The second sample will contain 20, the third will contain 30 and so on. More formally, the iteration counts are calculated like so: `iterations = [d, 2d, 3d, ... Nd]` Where `N` is the total number of samples and `d` is a factor, calculated from the rough estimate of iteration time measured during the warmup period, which is used to scale the number of iterations to meet the configured measurement time. Note that `d` cannot be less than 1, and therefore the actual measurment time may exceed the configured measurement time if the iteration time is large or the configured measurement time is small. Note that Criterion.rs does not measure each individual iteration, only the complete sample. The resulting samples are stored for use in later stages. The sample data is also written to the local disk so that it can be used in the comparison phase of future benchmark runs. ## Analysis ## During this phase Criterion.rs calculates useful statistics from the samples collected during the measurement phase. ### Outlier Classification ### The first step in analysis is outlier classification. Each sample is classified using a modified version of Tukey's Method, which will be summarized here. First, the interquartile range (IQR) is calculated from the difference between the 25th and 75th percentile. In Tukey's Method, values less than (25th percentile - 1.5 * IQR) or greater than (75th percentile + 1.5 * IQR) are considered outliers. Criterion.rs creates additional fences at (25pct - 3 * IQR) and (75pct + 3 * IQR); values outside that range are considered severe outliers. Outlier classification is important because the analysis method used to estimate the average iteration time is sensitive to outliers. Thus, when Criterion.rs detects outliers, a warning is printed to inform the user that the benchmark may be less reliable. Additionally, a plot is generated showing which data points are considered outliers, where the fences are, etc. Note, however, that outlier samples are _not_ dropped from the data, and are used in the following analysis steps along with all other samples. ### Linear Regression ### The samples collected from a good benchmark should form a rough line when plotted on a chart showing the number of iterations and the time for each sample. The slope of that line gives an estimate of the time per iteration. A single estimate is difficult to interpret, however, since it contains no context. A confidence interval is generally more helpful. In order to generate a confidence interval, a large number of bootstrap samples are generated from the measured samples. A line is fitted to each of the bootstrap samples, and the result is a statistical distribution of slopes that gives a reliable confidence interval around the single estimate calculated from the measured samples. This resampling process is repeated to generate the mean, standard deviation, median and median absolute deviation of the measured iteration times as well. All of this information is printed to the user and charts are generated. Finally, if there are saved statistics from a previous run, the two benchmark runs are compared. ## Comparison ## In the comparison phase, the statistics calculated from the current benchmark run are compared against those saved by the previous run to determine if the performance has changed in the meantime, and if so, by how much. Once again, Criterion.rs generates many bootstrap samples, based on the measured samples from the two runs. The new and old bootstrap samples are compared and their T score is calculated using a T-test. The fraction of the bootstrapped T scores which are more extreme than the T score calculated by comparing the two measured samples gives the probability that the observed difference between the two sets of samples is merely by chance. Thus, if that probability is very low or zero, Criterion.rs can be confident that there is truly a difference in execution time between the two samples. In that case, the mean and median differences are bootstrapped and printed for the user, and the entire process begins again with the next benchmark. This process can be extremely sensitive to changes, especially when combined with a small, highly deterministic benchmark routine. In these circumstances even very small changes (eg. differences in the load from background processes) can change the measurements enough that the comparison process detects an optimization or regression. Since these sorts of unpredictable fluctuations are rarely of interest while benchmarking, there is also a configurable noise threshold. Optimizations or regressions within (for example) +-1% are considered noise and ignored. It is best to benchmark on a quiet computer where possible to minimize this noise, but it is not always possible to eliminate it entirely.criterion.rs-0.3.6/book/src/cargo_criterion/000077500000000000000000000000001426140671200210365ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/cargo_criterion/cargo_criterion.md000066400000000000000000000023641426140671200245360ustar00rootroot00000000000000# cargo-criterion cargo-criterion is an experimental Cargo extension which can act as a replacement for `cargo bench`. The long-term goal for cargo-criterion is to handle all of the statistical analysis and report generation in a single tool. Then, the code for that can be removed from Criterion.rs (or made optional), reducing benchmark compilation and linking time. Since it manages the whole lifecycle of a benchmark run, `cargo-criterion` is also in a good position to provide features that would be difficult to implement in Criterion.rs itself. Currently, `cargo-criterion` provides most of the same features as running Criterion.rs benchmarks in `cargo bench`, but with some differences: * `cargo-criterion` does not currently support baselines * `cargo-criterion` is more configurable than Criterion.rs * `cargo-criterion` supports machine-readable output using `--message-format=json` `cargo-criterion` is stable, and you can install it with the following command: `cargo install cargo-criterion` Once installed, you can run your benchmarks with: `cargo criterion` If you encounter any issues or have any suggestions for future features, please raise an issue at [the GitHub repository](https://github.com/bheisler/cargo-criterion). criterion.rs-0.3.6/book/src/cargo_criterion/configuring_cargo_criterion.md000066400000000000000000000037671426140671200271400ustar00rootroot00000000000000# Configuring cargo-criterion cargo-criterion can be configured by placing a `Criterion.toml` file in your crate, alongside your `Cargo.toml`. The available settings are documented below: ```toml # This is used to override the directory where cargo-criterion saves # its data and generates reports. criterion_home = "./target/criterion" # This is used to configure the format of cargo-criterion's command-line output. # Options are: # criterion: Prints confidence intervals for measurement and throughput, and # indicates whether a change was detected from the previous run. The default. # quiet: Like criterion, but does not indicate changes. Useful for simply # presenting output numbers, eg. on a library's README. # verbose: Like criterion, but prints additional statistics. # bencher: Emulates the output format of the bencher crate and nightly-only # libtest benchmarks. output_format = "criterion" # This is used to configure the plotting backend used by cargo-criterion. # Options are "gnuplot" and "plotters", or "auto", which will use gnuplot if it's # available or plotters if it isn't. plotting_backend = "auto" # The colors table allows users to configure the colors used by the charts # cargo-criterion generates. [colors] # These are used in many charts to compare the current measurement against # the previous one. current_sample = {r = 31, g = 120, b = 180} previous_sample = {r = 7, g = 26, b = 28} # These are used by the full PDF chart to highlight which samples were outliers. not_an_outlier = {r = 31, g = 120, b = 180} mild_outlier = {r = 5, g = 127, b = 0} severe_outlier = {r = 7, g = 26, b = 28} # These are used for the line chart to compare multiple different functions. comparison_colors = [ {r = 8, g = 34, b = 34}, {r = 6, g = 139, b = 87}, {r = 0, g = 139, b = 139}, {r = 5, g = 215, b = 0}, {r = 0, g = 0, b = 139}, {r = 0, g = 20, b = 60}, {r = 9, g = 0, b = 139}, {r = 0, g = 255, b = 127}, ] ```criterion.rs-0.3.6/book/src/cargo_criterion/external_tools.md000066400000000000000000000101161426140671200244210ustar00rootroot00000000000000# External Tools cargo-criterion provides a machine-readable output stream which other tools can consume to collect information about the Criterion.rs benchmarks. To enable this output stream, pass the `--message-format` argument when running cargo-criterion. ## JSON messages When passing `--message-format=json` cargo-criterion will output information about: * Benchmarks, including the basic statistics about the measurements * Benchmark groups The output goes to stdout, with one JSON object per line. The `reason` field distinguishes different kinds of messages. Additional messages or fields may be added to the output in the future. ### Benchmark Complete Messages The "benchmark-complete" message includes the measurements and basic statistics from a single Criterion.rs benchmark. The message format is as follows: ```json { /* The "reason" indicates which kind of message this is. */ "reason": "benchmark-complete", /* The id is the identifier of this benchmark */ "id": "norm", /* Path to the directory containing the report for this benchmark */ "report_directory": "target/criterion/reports/norm", /* List of integer iteration counts */ "iteration_count": [ 30, /* ... */ 3000 ], /* List of floating point measurements (eg. time, CPU cycles) taken from the benchmark */ "measured_values": [ 124200.0, /* ... */ 9937100.0 ], /* The unit associated with measured_values. */ "unit": "ns", /* The throughput value associated with this benchmark. This can be used to calculate throughput rates, eg. in bytes or elements per second. */ "throughput": [ { "per_iteration": 1024, "unit": "elements" } ], /* Confidence intervals for the basic statistics that cargo-criterion computes. */ /* "typical" is either the slope (if available) or the mean (if not). It makes a good general-purpose estimate of the typical performance of a function. */ "typical": { "estimate": 3419.4923993891925, "lower_bound": 3375.24221103098, "upper_bound": 3465.458469579234, "unit": "ns" }, "mean": { "estimate": 3419.5340743105917, "lower_bound": 3374.4765622217083, "upper_bound": 3474.096214164006, "unit": "ns" }, "median": { "estimate": 3362.8249818445897, "lower_bound": 3334.259259259259, "upper_bound": 3387.5146198830407, "unit": "ns" }, "median_abs_dev": { "estimate": 130.7846461816652, "lower_bound": 96.55619525548211, "upper_bound": 161.1643711235156, "unit": "ns" }, /* Note that not all benchmarks can measure the slope, so it may be missing. */ "slope": { "estimate": 3419.4923993891925, "lower_bound": 3375.24221103098, "upper_bound": 3465.458469579234, "unit": "ns" }, /* "change" contains some additional statistics about the difference between this run and the last */ "change": { /* Percentage differences in the mean & median values */ "mean": { "estimate": 0.014278477848724602, "lower_bound": -0.01790259435189548, "upper_bound": 0.03912764721581533, "unit": "%" }, "median": { "estimate": 0.012211662837601445, "lower_bound": -0.0005448009516478807, "upper_bound": 0.024243170768727857, "unit": "%" }, /* Indicates whether cargo-criterion found a statistically-significant change. Values are NoChange, Improved, or Regressed */ "change": "NoChange" } } ``` ### Group Complete Messages When a benchmark group is completed, cargo-criterion emits a "group-complete" message containing some information about the group. ```json { "reason": "group-complete", /* The name of the benchmark group */ "group_name": "throughput", /* List of the benchmark IDs in this group */ "benchmarks": [ "throughput/Bytes", "throughput/Bytes", "throughput/Elem" ], /* Path to the directory that contains the report for this group */ "report_directory": "target/criterion/reports/throughput" } ```criterion.rs-0.3.6/book/src/criterion_rs.md000066400000000000000000000027171426140671200207200ustar00rootroot00000000000000# Criterion.rs # Criterion.rs is a statistics-driven micro-benchmarking tool. It is a Rust port of [Haskell's Criterion](https://hackage.haskell.org/package/criterion) library. Criterion.rs benchmarks collect and store statistical information from run to run and can automatically detect performance regressions as well as measuring optimizations. Criterion.rs is free and open source. You can find the source on [GitHub](https://github.com/bheisler/criterion.rs). Issues and feature requests can be posted on [the issue tracker](https://github.com/bheisler/criterion.rs/issues). ## API Docs ## In addition to this book, you may also wish to read [the API documentation](http://bheisler.github.io/criterion.rs/criterion/). ## License ## Criterion.rs is dual-licensed under the [Apache 2.0](https://github.com/bheisler/criterion.rs/blob/master/LICENSE-APACHE) and the [MIT](https://github.com/bheisler/criterion.rs/blob/master/LICENSE-MIT) licenses. ## Debug Output ## To enable debug output in Criterion.rs, define the environment variable `CRITERION_DEBUG`. For example (in bash): ```bash CRITERION_DEBUG=1 cargo bench ``` This will enable extra debug output. If using gnuplot, Criterion.rs will also save the gnuplot scripts alongside the generated plot files. When raising issues with Criterion.rs (especially when reporting issues with the plot generation) please run your benchmarks with this option enabled and provide the additional output and relevant gnuplot scripts.criterion.rs-0.3.6/book/src/faq.md000066400000000000000000000333041426140671200167610ustar00rootroot00000000000000## Frequently Asked Questions ### How Should I Run Criterion.rs Benchmarks In A CI Pipeline? You probably shouldn't (or, if you do, don't rely on the results). The virtualization used by Cloud-CI providers like Travis-CI and Github Actions introduces a great deal of noise into the benchmarking process, and Criterion.rs' statistical analysis can only do so much to mitigate that. This can result in the appearance of large changes in the measured performance even if the actual performance of the code is not changing. A better alternative is to use [Iai](https://github.com/bheisler/iai) instead. Iai runs benchmarks inside Cachegrind to directly count the instructions and memory accesses. Iai's measurements won't be thrown off by the virtual machine slowing down or pausing for a time, so it should be more reliable in virtualized environments. Whichever benchmarking tool you use, though, the process is basically the same. You'll need to: * Check out the main branch of your code * Build it and run the benchmarks once, to establish a baseline * Then switch to the pull request branch * Built it again and run the benchmarks a second time to compare against the baseline. ### `cargo bench` Gives "Unrecognized Option" Errors for Valid Command-line Options By default, Cargo implicitly adds a `libtest` benchmark harness to your crate when benchmarking, to handle any `#[bench]` functions, even if you have none. It compiles and runs this executable first, before any of the other benchmarks. Normally, this is fine - it detects that there are no `libtest` benchmarks to execute and exits, allowing Cargo to move on to the real benchmarks. Unfortunately, it checks the command-line arguments first, and panics when it finds one it doesn't understand. This causes Cargo to stop benchmarking early, and it never executes the Criterion.rs benchmarks. This will occur when running `cargo bench` with any argument that Criterion.rs supports but `libtest` does not. For example, `--verbose` and `--save-baseline` will cause this issue, while `--help` will not. There are two ways to work around this at present: You could run only your Criterion benchmark, like so: `cargo bench --bench my_benchmark -- --verbose` Note that `my_benchmark` here corresponds to the name of your benchmark in your `Cargo.toml` file. Another option is to disable benchmarks for your lib or app crate. For example, for library crates, you could add this to your `Cargo.toml` file: ```toml [lib] bench = false ``` If your crate produces one or more binaries as well as a library, you may need to add additional records to `Cargo.toml` like this: ```toml [[bin]] name = "my-binary" path = "src/bin/my-binary.rs" bench = false ``` This is because Cargo automatically discovers some kinds of binaries and it will enable the default benchmark harness for these as well. Of course, this only works if you define all of your benchmarks in the `benches` directory. See [Rust Issue #47241](https://github.com/rust-lang/rust/issues/47241) for more details. ### How Should I Benchmark Small Functions? Exactly the same way as you would benchmark any other function. It is sometimes suggested that benchmarks of small (nanosecond-scale) functions should iterate the function to be benchmarked many times internally to reduce the impact of measurement overhead. This is _not_ required with Criterion.rs, and it is not recommended. To see this, consider the following benchmark: ```rust fn compare_small(c: &mut Criterion) { use criterion::black_box; let mut group = c.benchmark_group("small"); group.bench_with_input("unlooped", 10, |b, i| b.iter(|| i + 10)); group.bench_with_input("looped", 10, |b, i| b.iter(|| { for _ in 0..10000 { black_box(i + 10); } })); group.finish(); } ``` This benchmark simply adds two numbers - just about the smallest function that could be performed. On my computer, this produces the following output: ``` small/unlooped time: [270.00 ps 270.78 ps 271.56 ps] Found 2 outliers among 100 measurements (2.00%) 2 (2.00%) high severe small/looped time: [2.7051 us 2.7142 us 2.7238 us] Found 5 outliers among 100 measurements (5.00%) 3 (3.00%) high mild 2 (2.00%) high severe ``` 2.714 microseconds/10000 gives 271.4 picoseconds, or pretty much the same result. Interestingly, this is slightly more than one cycle of my 4th-gen Core i7's maximum clock frequency of 4.4 GHz, which shows how good the pipelining is on modern CPUs. Regardless, Criterion.rs is able to accurately measure functions all the way down to single instructions. See the [Analysis Process](./analysis.md) page for more details on how Criterion.rs performs its measurements, or see the [Timing Loops](./user_guide/timing_loops.md) page for details on choosing a timing loop to minimize measurement overhead. ### When Should I Use `criterion::black_box`? `black_box` is a function which prevents certain compiler optimizations. Benchmarks are often slightly artificial in nature and the compiler can take advantage of that to generate faster code when compiling the benchmarks than it would in real usage. In particular, it is common for benchmarked functions to be called with constant parameters, and in some cases rustc can evaluate the function entirely at compile time and replace the function call with a constant. This can produce unnaturally fast benchmarks that don't represent how some code would perform when called normally. Therefore, it's useful to black-box the constant input to prevent this optimization. However, you might have a function which you expect to be called with one or more constant parameters. In this case, you might want to write your benchmark to represent that scenario instead, and allow the compiler to optimize the constant parameters. For the most part, Criterion.rs handles this for you - if you use parameterized benchmarks, the parameters are automatically black-boxed by Criterion.rs so you don't need to do anything. If you're writing an un-parameterized benchmark of a function that takes an argument, however, this may be worth considering. ### Cargo Prints a Warning About Explicit [[bench]] Sections in Cargo.toml Currently, Cargo treats any `*.rs` file in the `benches` directory as a benchmark, unless there are one or more `[[bench]]` sections in the `Cargo.toml` file. In that case, the auto-discovery is disabled entirely. In Rust 2018 edition, Cargo will be changed so that `[[bench]]` no longer disables the auto-discovery. If your `benches` directory contains source files that are not benchmarks, this could break your build when you update, as Cargo will attempt to compile them as benchmarks and fail. There are two ways to prevent this breakage from happening. You can explicitly turn off the autodiscovery like so: ```toml [[package]] autobenches = false ``` The other option is to move those non-benchmark files to a subdirectory (eg. `benches/benchmark_code`) where they will no longer be detected as benchmarks. I would recommend the latter option. Note that a file which contains a `criterion_main!` is a valid benchmark and can safely stay where it is. ### I made a trivial change to my source and Criterion.rs reports a large change in performance. Why? Don't worry, Criterion.rs isn't broken and you (probably) didn't do anything wrong. The most common reason for this is that the optimizer just happened to optimize your function differently after the change. Optimizing compiler backends such as LLVM (which is used by `rustc`) are often complex beasts full of hand-rolled pattern matching code that detects when a particular optimization is possible and tries to guess whether it would make the code faster. Unfortunately, despite all of the engineering work that goes into these compilers, it's pretty common for apparently-trivial changes to the source like changing the order of lines to be enough to cause these optimizers to act differently. On top of this, apparently-small changes like changing the type of a variable or calling a slightly different function (such as `unwrap` vs `expect`) actually have much larger impacts under the hood than the slight different in source text might suggest. If you want to learn more about this (and some proposals for improving this situation in the future), I like [this paper](https://blog.regehr.org/archives/1619) by Regehr et al. On a similar subject, it's important to remember that a benchmark is only ever an estimate of the true performance of your function. If the optimizer can have significant effects on performance in an artificial environment like a benchmark, what about when your function is inlined into a variety of different calling contexts? The optimizer will almost certainly make different decisions for each caller. One hopes that each specialized version will be faster, but that can't be guaranteed. In a world of optimizing compilers, the "true performance" of a function is a fuzzy thing indeed. If you're still sure that Criterion.rs is doing something wrong, file an issue describing the problem. ### I made _no_ change to my source and Criterion.rs reports a large change in performance. Why? Typically this happens because the benchmark environments aren't quite the same. There are a lot of factors that can influence benchmarks. Other processes might be using the CPU or memory. Battery-powered devices often have power-saving modes that clock down the CPU (and these sometimes appear in desktops as well). If your benchmarks are run inside a VM, there might be other VMs on the same physical machine competing for resources. However, sometimes this happens even with no change. It's important to remember that Criterion.rs detects regressions and improvements statistically. There is always a chance that you randomly get unusually fast or slow samples, enough that Criterion.rs detects it as a change even though no change has occurred. In very large benchmark suites you might expect to see several of these spurious detections each time you run the benchmarks. Unfortunately, this is a fundamental trade-off in statistics. In order to decrease the rate of false detections, you must also decrease the sensitivity to small changes. Conversely, to increase the sensitivity to small changes, you must also increase the chance of false detections. Criterion.rs has default settings that strike a generally-good balance between the two, but you can adjust the settings to suit your needs. ### When I run benchmark executables directly (without using Cargo) they just print "Success". Why? When Cargo runs benchmarks, it passes the `--bench` or `--test` command-line arguments to the benchmark executables. Criterion.rs looks for these arguments and tries to either run benchmarks or run in test mode. In particular, when you run `cargo test --benches` (run tests, including testing benchmarks) Cargo does not pass either of these arguments. This is perhaps strange, since `cargo bench --test` passes both `--bench` and `--test`. In any case, Criterion.rs benchmarks run in test mode when `--bench` is not present, or when `--bench` and `--test` are both present. ### My benchmark fails to compile with the error "use of undeclared type or module `` First, check the [Getting Started](https://bheisler.github.io/criterion.rs/book/getting_started.html) guide and ensure that the `[[bench]]` section of your Cargo.toml is set up correctly. If it's correct, read on. This can be caused by two different things. Most commonly, this problem happens when trying to benchmark a binary (as opposed to library) crate. Criterion.rs cannot be used to benchmark binary crates (see the [Known Limitations](https://bheisler.github.io/criterion.rs/book/user_guide/known_limitations.html) page for more details on why). The usual workaround is to structure your application as a library crate that implements most of the functionality of the application and a binary crate which acts as a thin wrapper around the library crate to provide things like a CLI. Then, you can create Criterion.rs benchmarks that depend on the library crate. Less often, the problem is that the library crate is configured to compile as a `cdylib`. In order to benchmark your crate with Criterion.rs, you will need to set your Cargo.toml to enable generating an `rlib` as well. ### How can I benchmark a part of a function? The short answer is - you can't, not accurately. The longer answer is below. When people ask me this, my first response is always "extract that part of the function into a new function, give it a name, and then benchmark _that_". It's sort of unsatisfying, but that is also the only way to get really accurate measurements of that piece of your code. You can always tag it with `#[inline(always)]` to tell rustc to inline it back into the original callsite in the final executable. The problem is that your system's clock is not infinitely precise; there is a certain (often surprisingly large) granularity to the clock time reported by `Instant::now`. That means that, if it were to measure each execution individually, Criterion.rs might see a sequence of times like "0ms, 0ms, 0ms, 0ms, 0ms, 5ms, 0ms..." for a function that takes 1ms. To mitigate this, Criterion.rs runs many iterations of your benchmark, to divide that jitter across each iteration. There would be no way to run such a timing loop on _part_ of your code, unless that part were already easy to factor out and put in a separate function anyway. Instead, you'd have to time each iteration individually, resulting in the maximum possible timing jitter. However, if you need to do this anyway, and you're OK with the reduced accuracy, you can use `Bencher::iter_custom` to measure your code however you want to. `iter_custom` exists to allow for complex cases like multi-threaded code or, yes, measuring part of a function. Just be aware that you're responsible for the accuracy of your measurements. criterion.rs-0.3.6/book/src/getting_started.md000066400000000000000000000142451426140671200214040ustar00rootroot00000000000000# Getting Started # This is a quick walkthrough for adding Criterion.rs benchmarks to an existing crate. I'll assume that we have a crate, `mycrate`, whose `lib.rs` contains the following code: ```rust #[inline] fn fibonacci(n: u64) -> u64 { match n { 0 => 1, 1 => 1, n => fibonacci(n-1) + fibonacci(n-2), } } ``` ### Step 1 - Add Dependency to Cargo.toml ### To enable Criterion.rs benchmarks, add the following to your `Cargo.toml` file: ```toml [dev-dependencies] criterion = "0.3" [[bench]] name = "my_benchmark" harness = false ``` This adds a development dependency on Criterion.rs, and declares a benchmark called `my_benchmark` without the standard benchmarking harness. It's important to disable the standard benchmark harness, because we'll later add our own and we don't want them to conflict. ### Step 2 - Add Benchmark ### As an example, we'll benchmark our implementation of the Fibonacci function. Create a benchmark file at `$PROJECT/benches/my_benchmark.rs` with the following contents (see the Details section below for an explanation of this code): ```rust use criterion::{black_box, criterion_group, criterion_main, Criterion}; use mycrate::fibonacci; pub fn criterion_benchmark(c: &mut Criterion) { c.bench_function("fib 20", |b| b.iter(|| fibonacci(black_box(20)))); } criterion_group!(benches, criterion_benchmark); criterion_main!(benches); ``` ### Step 3 - Run Benchmark ### To run this benchmark, use the following command: `cargo bench` You should see output similar to this: ``` Running target/release/deps/example-423eedc43b2b3a93 Benchmarking fib 20 Benchmarking fib 20: Warming up for 3.0000 s Benchmarking fib 20: Collecting 100 samples in estimated 5.0658 s (188100 iterations) Benchmarking fib 20: Analyzing fib 20 time: [26.029 us 26.251 us 26.505 us] Found 11 outliers among 99 measurements (11.11%) 6 (6.06%) high mild 5 (5.05%) high severe slope [26.029 us 26.505 us] R^2 [0.8745662 0.8728027] mean [26.106 us 26.561 us] std. dev. [808.98 ns 1.4722 us] median [25.733 us 25.988 us] med. abs. dev. [234.09 ns 544.07 ns] ``` ### Details ### Let's go back and walk through that benchmark code in more detail. ```rust use criterion::{black_box, criterion_group, criterion_main, Criterion}; use mycrate::fibonacci; ``` First, we declare the criterion crate and import the [Criterion type](http://bheisler.github.io/criterion.rs/criterion/struct.Criterion.html). Criterion is the main type for the Criterion.rs library. It provides methods to configure and define groups of benchmarks. We also import `black_box`, which will be described later. In addition to this, we declare `mycrate` as an external crate and import our fibonacci function from it. Cargo compiles benchmarks (or at least, the ones in `/benches`) as if each one was a separate crate from the main crate. This means that we need to import our library crate as an external crate, and it means that we can only benchmark public functions. ```rust fn criterion_benchmark(c: &mut Criterion) { ``` Here we create a function to contain our benchmark code. The name of this function doesn't matter, but it should be clear and understandable. ```rust c.bench_function("fib 20", |b| b.iter(|| fibonacci(black_box(20)))); } ``` This is where the real work happens. The `bench_function` method defines a benchmark with a name and a closure. The name should be unique among all of the benchmarks for your project. The closure must accept one argument, a [Bencher](http://bheisler.github.io/criterion.rs/criterion/struct.Bencher.html). The bencher performs the benchmark - in this case, it simply calls our `fibonacci` function in a loop. There are a number of other ways to perform benchmarks, including the option to benchmark with arguments, and to compare the performance of two functions. See the API documentation for details on all of the different benchmarking options. Using the `black_box` function stops the compiler from constant-folding away the whole function and replacing it with a constant. You may recall that we marked the `fibonacci` function as `#[inline]`. This allows it to be inlined across different crates. Since the benchmarks are technically a separate crate, that means it can be inlined into the benchmark, improving performance. ```rust criterion_group!(benches, criterion_benchmark); criterion_main!(benches); ``` Here we invoke the `criterion_group!` [(link)](http://bheisler.github.io/criterion.rs/criterion/macro.criterion_group.html) macro to generate a benchmark group called benches, containing the `criterion_benchmark` function defined earlier. Finally, we invoke the `criterion_main!` [(link)](http://bheisler.github.io/criterion.rs/criterion/macro.criterion_main.html) macro to generate a main function which executes the `benches` group. See the API documentation for more information on these macros. ### Step 4 - Optimize ### This fibonacci function is quite inefficient. We can do better: ```rust fn fibonacci(n: u64) -> u64 { let mut a = 0; let mut b = 1; match n { 0 => b, _ => { for _ in 0..n { let c = a + b; a = b; b = c; } b } } } ``` Running the benchmark now produces output like this: ``` Running target/release/deps/example-423eedc43b2b3a93 Benchmarking fib 20 Benchmarking fib 20: Warming up for 3.0000 s Benchmarking fib 20: Collecting 100 samples in estimated 5.0000 s (13548862800 iterations) Benchmarking fib 20: Analyzing fib 20 time: [353.59 ps 356.19 ps 359.07 ps] change: [-99.999% -99.999% -99.999%] (p = 0.00 < 0.05) Performance has improved. Found 6 outliers among 99 measurements (6.06%) 4 (4.04%) high mild 2 (2.02%) high severe slope [353.59 ps 359.07 ps] R^2 [0.8734356 0.8722124] mean [356.57 ps 362.74 ps] std. dev. [10.672 ps 20.419 ps] median [351.57 ps 355.85 ps] med. abs. dev. [4.6479 ps 10.059 ps] ``` As you can see, Criterion is statistically confident that our optimization has made an improvement. If we introduce a performance regression, Criterion will instead print a message indicating this. criterion.rs-0.3.6/book/src/iai/000077500000000000000000000000001426140671200164275ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/iai/comparison.md000066400000000000000000000050741426140671200211310ustar00rootroot00000000000000### Comparison with Criterion-rs I intend Iai to be a complement to Criterion-rs, not a competitor. The two projects measure different things in different ways and have different pros, cons, and limitations, so for most projects the best approach is to use both. Here's an overview of the important differences: - **Temporary Con:** Right now, Iai is lacking many features of Criterion-rs, including reports and configuration of any kind. - The current intent is to add support to [Cargo-criterion] for configuring and reporting on Iai benchmarks. - **Pro:** Iai can reliably detect much smaller changes in performance than Criterion-rs can. - **Pro:** Iai can work reliably in noisy CI environments or even cloud CI providers like GitHub Actions or Travis-CI, where Criterion-rs cannot. - **Pro:** Iai also generates profile output from the benchmark without further effort. - **Pro:** Although Cachegrind adds considerable runtime overhead, running each benchmark exactly once is still usually faster than Criterion-rs' statistical measurements. - **Mixed:** Because Iai can detect such small changes, it may report performance differences from changes to the order of functions in memory and other compiler details. - **Con:** Iai's measurements merely correlate with wall-clock time (which is usually what you actually care about), where Criterion-rs measures it directly. - **Con:** Iai cannot exclude setup code from the measurements, where Criterion-rs can. - **Con:** Because Cachegrind does not measure system calls, IO time is not accurately measured. - **Con:** Because Iai runs the benchmark exactly once, it cannot measure variation in the performance such as might be caused by OS thread scheduling or hash-table randomization. - **Limitation:** Iai can only be used on platforms supported by Valgrind. Notably, this does not include Windows. For benchmarks that run in CI (especially if you're checking for performance regressions in pull requests on cloud CI) you should use Iai. For benchmarking on Windows or other platforms that Valgrind doesn't support, you should use Criterion-rs. For other cases, I would advise using both. Iai gives more precision and scales better to larger benchmarks, while Criterion-rs allows for excluding setup time and gives you more information about the actual time your code takes and how strongly that is affected by non-determinism like threading or hash-table randomization. If you absolutely need to pick one or the other though, Iai is probably the one to go with. [Cargo-criterion]: https://github.com/bheisler/cargo-criterioncriterion.rs-0.3.6/book/src/iai/getting_started.md000066400000000000000000000025211426140671200221400ustar00rootroot00000000000000## Getting Started Iai is designed to be similar in interface to Criterion.rs, so using it is easy. To get started, add the following to your Cargo.toml file: ```toml [dev-dependencies] iai = "0.1" [[bench]] name = "my_benchmark" harness = false ``` Next, define a benchmark by creating a file at `$PROJECT/benches/my_benchmark.rs` with the following contents: ```rust use iai::{black_box, main}; fn fibonacci(n: u64) -> u64 { match n { 0 => 1, 1 => 1, n => fibonacci(n-1) + fibonacci(n-2), } } fn iai_benchmark_short() -> u64 { fibonacci(black_box(10)) } fn iai_benchmark_long() -> u64 { fibonacci(black_box(30)) } iai::main!(iai_benchmark_short, iai_benchmark_long); ``` Finally, run this benchmark with `cargo bench`. You should see output similar to the following: ``` Running target/release/deps/test_regular_bench-8b173c29ce041afa bench_fibonacci_short Instructions: 1735 L1 Accesses: 2364 L2 Accesses: 1 RAM Accesses: 1 Estimated Cycles: 2404 bench_fibonacci_long Instructions: 26214735 L1 Accesses: 35638623 L2 Accesses: 2 RAM Accesses: 1 Estimated Cycles: 35638668 ``` criterion.rs-0.3.6/book/src/iai/iai.md000066400000000000000000000006351426140671200175170ustar00rootroot00000000000000# Iai # [Iai](https://github.com/bheisler/iai) is an experimental benchmarking harness that uses Cachegrind to perform extremely precise single-shot measurements of Rust code. It is intended as a complement to Criterion.rs; among other things, it's useful for reliable benchmarking in CI. ## API Docs ## In addition to this book, you may also wish to read [the API documentation](https://docs.rs/iai/).criterion.rs-0.3.6/book/src/iteration_times.svg000066400000000000000000001053511426140671200216120ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 10.4 10.45 10.5 10.55 10.6 10.65 10.7 10.75 10.8 10.85 10.9 0 10 20 30 40 50 60 70 80 90 100 Average Iteration Time (ms) Sample sampling_mode/Flat gnuplot_plot_1 criterion.rs-0.3.6/book/src/migrating_0_2_to_0_3.md000066400000000000000000000041651426140671200220010ustar00rootroot00000000000000## Migrating from 0.2.* to 0.3.* Criterion.rs took advantage of 0.3.0 being a breaking-change release to make a number of changes that will require changes to user code. These changes are documented here, along with the newer alternatives. ### `Benchmark`, `ParameterizedBenchmark`, `Criterion::bench_functions`, `Criterion::bench_function_over_inputs`, `Criterion::bench` are deprecated. In the interest of minimizing disruption, all of these functions still exist and still work. They are deliberately hidden from the documentation and should not be used in new code. At some point in the lifecycle of the 0.3.0 series these will be formally deprecated and will start producing deprecation warnings. They will be removed in 0.4.0. All of these types and functions have been superseded by the `BenchmarkGroup` type, which is cleaner to use as well as more powerful and flexible. ### `cargo bench -- --test` is deprecated. Use `cargo test --benches` instead. ### The format of the `raw.csv` file has changed to accommodate custom measurements. The `sample_time_nanos` field has been split into `sample_measured_value` and `unit`. For the default `WallTime` measurement, the `sample_measured_value` is the same as the `sample_time_nanos` was previously. ### External program benchmarks have been removed. These were deprecated in version 0.2.6, as they were not used widely enough to justify the extra maintenance work. It is still possible to benchmark external programs using the `iter_custom` timing loop, but it does require some extra work. Although it does require extra development effort on the part of the benchmark author, using `iter_custom` gives more flexibility in how the benchmark communicates with the external process and also allows benchmarks to work with custom measurements, which was not possible previously. For an example of benchmarking an external process, see the `benches/external_process.rs` benchmark in the Criterion.rs repository. ### Throughput has been expanded to `u64` Existing benchmarks with u32 Throughputs will need to be changed. Using u64 allows Throughput to scale up to much larger numbers of bytes/elements.criterion.rs-0.3.6/book/src/user_guide/000077500000000000000000000000001426140671200200205ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/advanced_configuration.md000066400000000000000000000141741426140671200250450ustar00rootroot00000000000000# Advanced Configuration Criterion.rs provides a number of configuration options for more-complex use cases. These options are documented here. ## Configuring Sample Count & Other Statistical Settings Criterion.rs allows the user to adjust certain statistical parameters. The most common way to set these is using the `BenchmarkGroup` structure - see the documentation for that structure for a list of which settings are available. ```rust use criterion::*; fn my_function() { ... } fn bench(c: &mut Criterion) { let mut group = c.benchmark_group("sample-size-example"); // Configure Criterion.rs to detect smaller differences and increase sample size to improve // precision and counteract the resulting noise. group.significance_level(0.1).sample_size(500); group.bench_function("my-function", |b| b.iter(|| my_function())); group.finish(); } criterion_group!(benches, bench); criterion_main!(benches); ``` It is also possible to change Criterion.rs' default values for these settings, by using the full form of the `criterion_group` macro: ```rust use criterion::*; fn my_function() { ... } fn bench(c: &mut Criterion) { let mut group = c.benchmark_group("sample-size-example"); group.bench_function("my-function", |b| b.iter(|| my_function())); group.finish(); } criterion_group!{ name = benches; // This can be any expression that returns a `Criterion` object. config = Criterion::default().significance_level(0.1).sample_size(500); targets = bench } criterion_main!(benches); ``` ## Throughput Measurements When benchmarking some types of code it is useful to measure the throughput as well as the iteration time, either in bytes per second or elements per second. Criterion.rs can estimate the throughput of a benchmark, but it needs to know how many bytes or elements each iteration will process. Throughput measurements are only supported when using the `BenchmarkGroup` structure; it is not available when using the simpler `bench_function` interface. To measure throughput, use the `throughput` method on `BenchmarkGroup`, like so: ```rust use criterion::*; fn decode(bytes: &[u8]) { // Decode the bytes ... } fn bench(c: &mut Criterion) { let bytes : &[u8] = ...; let mut group = c.benchmark_group("throughput-example"); group.throughput(Throughput::Bytes(bytes.len() as u64)); group.bench_function("decode", |b| b.iter(|| decode(bytes)); group.finish(); } criterion_group!(benches, bench); criterion_main!(benches); ``` For parameterized benchmarks, you can simply call the throughput function inside a loop: ```rust use criterion::*; type Element = ...; fn encode(elements: &[Element]) { // Encode the elements ... } fn bench(c: &mut Criterion) { let elements_1 : &[u8] = ...; let elements_2 : &[u8] = ...; let mut group = c.benchmark_group("throughput-example"); for (i, elements) in [elements_1, elements_2].iter().enumerate() { group.throughput(Throughput::Elements(elems.len() as u64)); group.bench_with_input(format!("Encode {}", i), elements, |elems, b| { b.iter(||encode(elems)) }); } group.finish(); } criterion_group!(benches, bench); criterion_main!(benches); ``` Setting the throughput causes a throughput estimate to appear in the output: ``` alloc time: [5.9846 ms 6.0192 ms 6.0623 ms] thrpt: [164.95 MiB/s 166.14 MiB/s 167.10 MiB/s] ``` ## Chart Axis Scaling By default, Criterion.rs generates plots using a linear-scale axis. When using parameterized benchmarks, it is common for the input sizes to scale exponentially in order to cover a wide range of possible inputs. In this situation, it may be easier to read the resulting plots with a logarithmic axis. As with throughput measurements above, this option is only available when using the `BenchmarkGroup` structure. ```rust use criterion::*; fn do_a_thing(x: u64) { // Do something ... } fn bench(c: &mut Criterion) { let plot_config = PlotConfiguration::default() .summary_scale(AxisScale::Logarithmic); let mut group = c.benchmark_group("log_scale_example"); group.plot_config(plot_config); for i in [1u64, 10u64, 100u64, 1000u64, 10000u64, 100000u64, 1000000u64].iter() { group.bench_function(BenchmarkId::from_parameter(i), i, |b, i| b.iter(|| do_a_thing(i))); } group.finish(); } criterion_group!(benches, bench); criterion_main!(benches); ``` Currently the axis scaling is the only option that can be set on the PlotConfiguration struct. More may be added in the future. ## Sampling Mode By default, Criterion.rs can scale well to handle benchmarks that execute in picoseconds up to benchmarks that execute in milliseconds. Benchmarks that take longer will work just fine, but they tend to take a long time to run. The only way to deal with this was to reduce the sample count. In Criterion.rs 0.3.3, a new option was added to change the sampling mode to handle long-running benchmarks. The benchmark author can call `BenchmarkGroup::sampling_mode(SamplingMode)` to change the sampling mode. Currently three options are available: * `SamplingMode::Auto`, which chooses a sampling mode from the other options automatically. This is the default. * `SamplingMode::Linear`, the original sampling mode intended for faster benchmarks. * `SamplingMode::Flat`, intended for long-running benchmarks. The Flat sampling mode does change some of the statistical analysis and the charts that are generated. It is not recommended to use Flat sampling except where necessary. ```rust use criterion::*; use std::time::Duration; fn my_function() { ::std::thread::sleep(Duration::from_millis(10)) } fn bench(c: &mut Criterion) { let mut group = c.benchmark_group("flat-sampling-example"); group.sampling_mode(SamplingMode::Flat); group.bench_function("my-function", |b| b.iter(|| my_function())); group.finish(); } criterion_group!(benches, bench); criterion_main!(benches); ``` criterion.rs-0.3.6/book/src/user_guide/bencher_compatibility.md000066400000000000000000000044011426140671200247000ustar00rootroot00000000000000# Bencher Compatibility Layer Criterion.rs provides a small crate which can be used as a drop-in replacement for most common usages of `bencher` in order to make it easy for existing `bencher` users to try out Criterion.rs. This page shows an example of how to use this crate. ## Example We'll start with the example benchmark from `bencher`: ```rust use bencher::{benchmark_group, benchmark_main, Bencher}; fn a(bench: &mut Bencher) { bench.iter(|| { (0..1000).fold(0, |x, y| x + y) }) } fn b(bench: &mut Bencher) { const N: usize = 1024; bench.iter(|| { vec![0u8; N] }); bench.bytes = N as u64; } benchmark_group!(benches, a, b); benchmark_main!(benches); ``` The first step is to edit the Cargo.toml file to replace the bencher dependency with `criterion_bencher_compat`: Change: ```toml [dev-dependencies] bencher = "0.1" ``` To: ```toml [dev-dependencies] criterion_bencher_compat = "0.3" ``` Then we update the benchmark file itself to change: ```rust use bencher::{benchmark_group, benchmark_main, Bencher}; ``` To: ```rust use criterion_bencher_compat as bencher; use bencher::{benchmark_group, benchmark_main, Bencher}; ``` That's all! Now just run `cargo bench`: ```text Running target/release/deps/bencher_example-d865087781455bd5 a time: [234.58 ps 237.68 ps 241.94 ps] Found 9 outliers among 100 measurements (9.00%) 4 (4.00%) high mild 5 (5.00%) high severe b time: [23.972 ns 24.218 ns 24.474 ns] Found 4 outliers among 100 measurements (4.00%) 4 (4.00%) high mild ``` ## Limitations `criterion_bencher_compat` does not implement the full API of the `bencher` crate, only the most commonly-used subset. If your benchmarks require parts of the `bencher` crate which are not supported, you may need to temporarily disable them while trying Criterion.rs. `criterion_bencher_compat` does not provide access to most of Criterion.rs' more advanced features. If the Criterion.rs benchmarks work well for you, it is recommended to convert your benchmarks to use the Criterion.rs interface directly. See [Migrating from libtest](./migrating_from_libtest.md) for more information on that. criterion.rs-0.3.6/book/src/user_guide/benchmarking_async.md000066400000000000000000000053241426140671200241730ustar00rootroot00000000000000## Benchmarking async functions As of version 0.3.4, Criterion.rs has optional support for benchmarking async functions. Benchmarking async functions works just like benchmarking regular functions, except that the caller must provide a futures executor to run the benchmark in. ### Example: ```rust use criterion::BenchmarkId; use criterion::Criterion; use criterion::{criterion_group, criterion_main}; // This is a struct that tells Criterion.rs to use the "futures" crate's current-thread executor use criterion::async_executor::FuturesExecutor; // Here we have an async function to benchmark async fn do_something(size: usize) { // Do something async with the size } fn from_elem(c: &mut Criterion) { let size: usize = 1024; c.bench_with_input(BenchmarkId::new("input_example", size), &size, |b, &s| { // Insert a call to `to_async` to convert the bencher to async mode. // The timing loops are the same as with the normal bencher. b.to_async(FuturesExecutor).iter(|| do_something(s)); }); } criterion_group!(benches, from_elem); criterion_main!(benches); ``` As can be seen in the code above, to benchmark async functions we must provide an async runtime to the bencher to run the benchmark in. The runtime structs are listed in the table below. ### Enabling Async Benchmarking To enable async benchmark support, Criterion.rs must be compiled with one or more of the following features, depending on which futures executor(s) you want to benchmark on. It is recommended to use the same executor that you would use in production. If your executor is not listed here, you can implement the `criterion::async_executor::AsyncExecutor` trait for it to add support, or send a pull request. | Crate | Feature | Executor Struct | | --------- | ----------------------------- | ----------------------------------------------------- | | Tokio | "async_tokio" | `tokio::runtime::Runtime`, `&tokio::runtime::Runtime` | | async-std | "async_std" (note underscore) | `AsyncStdExecutor` | | Smol | "async_smol" | `SmolExecutor` | | futures | "async_futures" | `FuturesExecutor` | | Other | "async" | | ### Considerations when benchmarking async functions Async functions naturally result in more measurement overhead than synchronous functions. It is recommended to prefer synchronous functions when benchmarking where possible, especially for small functions.criterion.rs-0.3.6/book/src/user_guide/benchmarking_with_inputs.md000066400000000000000000000057601426140671200254370ustar00rootroot00000000000000# Benchmarking With Inputs Criterion.rs can run benchmarks with one or more different input values to investigate how the performance behavior changes with different inputs. ## Benchmarking With One Input If you only have one input to your function, you can use a simple interface on the `Criterion` struct to run that benchmark. ```rust use criterion::BenchmarkId; use criterion::Criterion; use criterion::{criterion_group, criterion_main}; fn do_something(size: usize) { // Do something with the size } fn from_elem(c: &mut Criterion) { let size: usize = 1024; c.bench_with_input(BenchmarkId::new("input_example", size), &size, |b, &s| { b.iter(|| do_something(s)); }); } criterion_group!(benches, from_elem); criterion_main!(benches); ``` This is convenient in that it automatically passes the input through a `black_box` so that you don't need to call that directly. It also includes the size in the benchmark description. ## Benchmarking With A Range Of Values Criterion.rs can compare the performance of a function over a range of inputs using a `BenchmarkGroup`. ```rust use std::iter; use criterion::BenchmarkId; use criterion::Criterion; use criterion::Throughput; fn from_elem(c: &mut Criterion) { static KB: usize = 1024; let mut group = c.benchmark_group("from_elem"); for size in [KB, 2 * KB, 4 * KB, 8 * KB, 16 * KB].iter() { group.throughput(Throughput::Bytes(*size as u64)); group.bench_with_input(BenchmarkId::from_parameter(size), size, |b, &size| { b.iter(|| iter::repeat(0u8).take(size).collect::>()); }); } group.finish(); } criterion_group!(benches, from_elem); criterion_main!(benches); ``` In this example, we're benchmarking the time it takes to collect an iterator producing a sequence of N bytes into a Vec. First, we create a benchmark group, which is a way of telling Criterion.rs that a set of benchmarks are all related. Criterion.rs will generate extra summary pages for benchmark groups. Then we simply iterate over a set of desired inputs; we could just as easily unroll this loop manually, generate inputs of a particular size, etc. Inside the loop, we call the `throughput` function which informs Criterion.rs that the benchmark operates on `size` bytes per iteration. Criterion.rs will use this to estimate the number of bytes per second that our function can process. Next we call `bench_with_input`, providing a unique benchmark ID (in this case it's just the size, but you could generate custom strings as needed), passing in the size and a lambda that takes the size and a `Bencher` and performs the actual measurement. Finally, we `finish` the benchmark group; this generates the summary pages for that group. It is recommended to call `finish` explicitly, but if you forget it will be called automatically when the group is dropped. ![Line Chart](./line.svg) Here we can see that there is a approximately-linear relationship between the length of an iterator and the time taken to collect it into a Vec. criterion.rs-0.3.6/book/src/user_guide/command_line.md000066400000000000000000000000261426140671200227650ustar00rootroot00000000000000# Command-Line Output criterion.rs-0.3.6/book/src/user_guide/command_line_options.md000066400000000000000000000065221426140671200245470ustar00rootroot00000000000000# Command-Line Options **Note: If `cargo bench` fails with an error message about an unknown argument, see [the FAQ](../faq.html#cargo-bench-gives-unrecognized-option-errors-for-valid-command-line-options).** Criterion.rs benchmarks accept a number of custom command-line parameters. This is a list of the most common options. Run `cargo bench -- -h` to see a full list. * To filter benchmarks, use `cargo bench -- ` where `` is a regular expression matching the benchmark ID. For example, running `cargo bench -- fib_20` would only run benchmarks whose ID contains the string `fib_20`, while `cargo bench -- fib_\d+` would also match `fib_300`. * To print more detailed output, use `cargo bench -- --verbose` * To disable colored output, use `cargo bench -- --color never` * To disable plot generation, use `cargo bench -- --noplot` * To iterate each benchmark for a fixed length of time without saving, analyzing or plotting the results, use `cargo bench -- --profile-time `. This is useful when profiling the benchmarks. It reduces the amount of unrelated clutter in the profiling results and prevents Criterion.rs' normal dynamic sampling logic from greatly increasing the runtime of the benchmarks. * To save a baseline, use `cargo bench -- --save-baseline `. To compare against an existing baseline, use `cargo bench -- --baseline `. For more on baselines, see below. * To test that the benchmarks run successfully without performing the measurement or analysis (eg. in a CI setting), use `cargo test --benches`. * To override the default plotting backend, use `cargo bench -- --plotting-backend gnuplot` or `cargo bench --plotting-backend plotters`. `gnuplot` is used by default if it is installed. * To change the CLI output format, use `cargo bench -- --output-format `. Supported output formats are: * `criterion` - Use Criterion's normal output format * `bencher` - An output format similar to the output produced by the `bencher` crate or nightly `libtest` benchmarks. Though this provides less information than the `criterion` format, it may be useful to support external tools that can parse this output. ## Baselines By default, Criterion.rs will compare the measurements against the previous run (if any). Sometimes it's useful to keep a set of measurements around for several runs. For example, you might want to make multiple changes to the code while comparing against the master branch. For this situation, Criterion.rs supports custom baselines. * `--save-baseline ` will compare against the named baseline, then overwrite it. * `--baseline ` will compare against the named baseline without overwriting it. * `--load-baseline ` will load the named baseline as the new data set rather than the previous baseline. Using these options, you can manage multiple baseline measurements. For instance, if you want to compare against a static reference point such as the master branch, you might run: ```sh git checkout master cargo bench -- --save-baseline master git checkout feature cargo bench -- --save-baseline feature git checkout optimizations # Some optimization work here # Measure again cargo bench # Now compare against the stored baselines without overwriting it or re-running the measurements cargo bench -- --load-baseline new --baseline master cargo bench -- --load-baseline new --baseline feature ``` criterion.rs-0.3.6/book/src/user_guide/command_line_output.md000066400000000000000000000173561426140671200244230ustar00rootroot00000000000000# Command-Line Output The output for this page was produced by running `cargo bench -- --verbose`. `cargo bench` omits some of this information. Note: If `cargo bench` fails with an error message about an unknown argument, see [the FAQ](../faq.html#cargo-bench-gives-unrecognized-option-errors-for-valid-command-line-options). Every Criterion.rs benchmark calculates statistics from the measured iterations and produces a report like this: ``` Benchmarking alloc Benchmarking alloc: Warming up for 1.0000 s Benchmarking alloc: Collecting 100 samples in estimated 13.354 s (5050 iterations) Benchmarking alloc: Analyzing alloc time: [2.5094 ms 2.5306 ms 2.5553 ms] thrpt: [391.34 MiB/s 395.17 MiB/s 398.51 MiB/s] change: [-38.292% -37.342% -36.524%] (p = 0.00 < 0.05) Performance has improved. Found 8 outliers among 100 measurements (8.00%) 4 (4.00%) high mild 4 (4.00%) high severe slope [2.5094 ms 2.5553 ms] R^2 [0.8660614 0.8640630] mean [2.5142 ms 2.5557 ms] std. dev. [62.868 us 149.50 us] median [2.5023 ms 2.5262 ms] med. abs. dev. [40.034 us 73.259 us] ``` ## Warmup Every Criterion.rs benchmark iterates the benchmarked function automatically for a configurable warmup period (by default, for three seconds). For Rust function benchmarks, this is to warm up the processor caches and (if applicable) file system caches. ## Collecting Samples Criterion iterates the function to be benchmarked with a varying number of iterations to generate an estimate of the time taken by each iteration. The number of samples is configurable. It also prints an estimate of the time the sampling process will take based on the time per iteration during the warmup period. ## Time ``` time: [2.5094 ms 2.5306 ms 2.5553 ms] thrpt: [391.34 MiB/s 395.17 MiB/s 398.51 MiB/s] ``` This shows a confidence interval over the measured per-iteration time for this benchmark. The left and right values show the lower and upper bounds of the confidence interval respectively, while the center value shows Criterion.rs' best estimate of the time taken for each iteration of the benchmarked routine. The confidence level is configurable. A greater confidence level (eg. 99%) will widen the interval and thus provide the user with less information about the true slope. On the other hand, a lesser confidence interval (eg. 90%) will narrow the interval but then the user is less confident that the interval contains the true slope. 95% is generally a good balance. Criterion.rs performs [bootstrap resampling](https://en.wikipedia.org/wiki/Bootstrapping_(statistics)) to generate these confidence intervals. The number of bootstrap samples is configurable, and defaults to 100,000. Optionally, Criterion.rs can also report the throughput of the benchmarked code in units of bytes or elements per second. ## Change When a Criterion.rs benchmark is run, it saves statistical information in the `target/criterion` directory. Subsequent executions of the benchmark will load this data and compare it with the current sample to show the effects of changes in the code. ``` change: [-38.292% -37.342% -36.524%] (p = 0.00 < 0.05) Performance has improved. ``` This shows a confidence interval over the difference between this run of the benchmark and the last one, as well as the probability that the measured difference could have occurred by chance. These lines will be omitted if no saved data could be read for this benchmark. The second line shows a quick summary. This line will indicate that the performance has improved or regressed if Criterion.rs has strong statistical evidence that this is the case. It may also indicate that the change was within the noise threshold. Criterion.rs attempts to reduce the effects of noise as much as possible, but differences in benchmark environment (eg. different load from other processes, memory usage, etc.) can influence the results. For highly-deterministic benchmarks, Criterion.rs can be sensitive enough to detect these small fluctuations, so benchmark results that overlap the range `+-noise_threshold` are assumed to be noise and considered insignificant. The noise threshold is configurable, and defaults to `+-2%`. Additional examples: ``` alloc time: [1.2421 ms 1.2540 ms 1.2667 ms] change: [+40.772% +43.934% +47.801%] (p = 0.00 < 0.05) Performance has regressed. ``` ``` alloc time: [1.2508 ms 1.2630 ms 1.2756 ms] change: [-1.8316% +0.9121% +3.4704%] (p = 0.52 > 0.05) No change in performance detected. ``` ``` benchmark time: [442.92 ps 453.66 ps 464.78 ps] change: [-0.7479% +3.2888% +7.5451%] (p = 0.04 > 0.05) Change within noise threshold. ``` ## Detecting Outliers ``` Found 8 outliers among 100 measurements (8.00%) 4 (4.00%) high mild 4 (4.00%) high severe ``` Criterion.rs attempts to detect unusually high or low samples and reports them as outliers. A large number of outliers suggests that the benchmark results are noisy and should be viewed with appropriate skepticism. In this case, you can see that there are some samples which took much longer than normal. This might be caused by unpredictable load on the computer running the benchmarks, thread or process scheduling, or irregularities in the time taken by the code being benchmarked. In order to ensure reliable results, benchmarks should be run on a quiet computer and should be designed to do approximately the same amount of work for each iteration. If this is not possible, consider increasing the measurement time to reduce the influence of outliers on the results at the cost of longer benchmarking period. Alternately, the warmup period can be extended (to ensure that any JIT compilers or similar are warmed up) or other iteration loops can be used to perform setup before each benchmark to prevent that from affecting the results. ## Additional Statistics ``` slope [2.5094 ms 2.5553 ms] R^2 [0.8660614 0.8640630] mean [2.5142 ms 2.5557 ms] std. dev. [62.868 us 149.50 us] median [2.5023 ms 2.5262 ms] med. abs. dev. [40.034 us 73.259 us] ``` This shows additional confidence intervals based on other statistics. Criterion.rs performs a linear regression to calculate the time per iteration. The first line shows the confidence interval of the slopes from the linear regressions, while the R^2 area shows the goodness-of-fit values for the lower and upper bounds of that confidence interval. If the R^2 value is low, this may indicate the benchmark isn't doing the same amount of work on each iteration. You may wish to examine the plot output and consider improving the consistency of your benchmark routine. The second line shows confidence intervals on the mean and standard deviation of the per-iteration times (calculated naively). If std. dev. is large compared to the time values from above, the benchmarks are noisy. You may need to change your benchmark to reduce the noise. The median/med. abs. dev. line is similar to the mean/std. dev. line, except that it uses the median and [median absolute deviation](https://en.wikipedia.org/wiki/Median_absolute_deviation). As with the std. dev., if the med. abs. dev. is large, this indicates the benchmarks are noisy. ## A Note Of Caution Criterion.rs is designed to produce robust statistics when possible, but it can't account for everything. For example, the performance improvements and regressions listed in the above examples were created just by switching my laptop between battery power and wall power rather than changing the code under test. Care must be taken to ensure that benchmarks are performed under similar conditions in order to produce meaningful results. criterion.rs-0.3.6/book/src/user_guide/comparing_functions.md000066400000000000000000000052321426140671200244130ustar00rootroot00000000000000# Comparing Functions Criterion.rs can automatically benchmark multiple implementations of a function and produce summary graphs to show the differences in performance between them. First, lets create a comparison benchmark. We can even combine this with benchmarking over a range of inputs. ```rust use criterion::{criterion_group, criterion_main, Criterion, BenchmarkId}; fn fibonacci_slow(n: u64) -> u64 { match n { 0 => 1, 1 => 1, n => fibonacci_slow(n-1) + fibonacci_slow(n-2), } } fn fibonacci_fast(n: u64) -> u64 { let mut a = 0; let mut b = 1; match n { 0 => b, _ => { for _ in 0..n { let c = a + b; a = b; b = c; } b } } } fn bench_fibs(c: &mut Criterion) { let mut group = c.benchmark_group("Fibonacci"); for i in [20u64, 21u64].iter() { group.bench_with_input(BenchmarkId::new("Recursive", i), i, |b, i| b.iter(|| fibonacci_slow(*i))); group.bench_with_input(BenchmarkId::new("Iterative", i), i, |b, i| b.iter(|| fibonacci_fast(*i))); } group.finish(); } criterion_group!(benches, bench_fibs); criterion_main!(benches); ``` These are the same two fibonacci functions from the [Getting Started](../getting_started.md) page. ```rust fn bench_fibs(c: &mut Criterion) { let mut group = c.benchmark_group("Fibonacci"); for i in [20u64, 21u64].iter() { group.bench_with_input(BenchmarkId::new("Recursive", i), i, |b, i| b.iter(|| fibonacci_slow(*i))); group.bench_with_input(BenchmarkId::new("Iterative", i), i, |b, i| b.iter(|| fibonacci_fast(*i))); } group.finish(); } ``` As in the earlier example of benchmarking over a range of inputs, we create a benchmark group and iterate over our inputs. To compare multiple functions, we simply call `bench_with_input` multiple times inside the loop. Criterion will generate a report for each individual benchmark/input pair, as well as summary reports for each benchmark (across all inputs) and each input (across all benchmarks), as well as an overall summary of the whole benchmark group. Naturally, the benchmark group could just as easily be used to benchmark non-parameterized functions as well. ## Violin Plot ![Violin Plot](./violin_plot.svg) The [Violin Plot](https://en.wikipedia.org/wiki/Violin_plot) shows the median times and the PDF of each implementation. ## Line Chart ![Line Chart](./lines.svg) The line chart shows a comparison of the different functions as the input or input size increases, which can be generated with `Criterion::benchmark_group`. criterion.rs-0.3.6/book/src/user_guide/csv_output.md000066400000000000000000000077101426140671200225620ustar00rootroot00000000000000# CSV Output NOTE: The CSV output is in the process of being deprecated. For machine-readable output, cargo-criterion's `--message-format=json` option is recommended instead - see [External Tools](../cargo_criterion/external_tools.html). CSV output will become an optional feature in Criterion.rs 0.4.0. Criterion.rs saves its measurements in several files, as shown below: ``` $BENCHMARK/ ├── base/ │ ├── raw.csv │ ├── estimates.json │ ├── sample.json │ └── tukey.json ├── change/ │ └── estimates.json ├── new/ │ ├── raw.csv │ ├── estimates.json │ ├── sample.json │ └── tukey.json ``` The JSON files are all considered private implementation details of Criterion.rs, and their structure may change at any time without warning. However, there is a need for some sort of stable and machine-readable output to enable projects like [lolbench](https://github.com/anp/lolbench) to keep historical data or perform additional analysis on the measurements. For this reason, Criterion.rs also writes the `raw.csv` file. The format of this file is expected to remain stable between different versions of Criterion.rs, so this file is suitable for external tools to depend on. The format of `raw.csv` is as follows: ``` group,function,value,throughput_num,throughput_type,sample_measured_value,unit,iteration_count Fibonacci,Iterative,,,,915000,ns,110740 Fibonacci,Iterative,,,,1964000,ns,221480 Fibonacci,Iterative,,,,2812000,ns,332220 Fibonacci,Iterative,,,,3767000,ns,442960 Fibonacci,Iterative,,,,4785000,ns,553700 Fibonacci,Iterative,,,,6302000,ns,664440 Fibonacci,Iterative,,,,6946000,ns,775180 Fibonacci,Iterative,,,,7815000,ns,885920 Fibonacci,Iterative,,,,9186000,ns,996660 Fibonacci,Iterative,,,,9578000,ns,1107400 Fibonacci,Iterative,,,,11206000,ns,1218140 ... ``` This data was taken with this benchmark code: ```rust fn compare_fibonaccis(c: &mut Criterion) { let mut group = c.benchmark_group("Fibonacci"); group.bench_with_input("Recursive", 20, |b, i| b.iter(|| fibonacci_slow(*i))); group.bench_with_input("Iterative", 20, |b, i| b.iter(|| fibonacci_fast(*i))); group.finish(); } ``` `raw.csv` contains the following columns: - `group` - This corresponds to the function group name, in this case "Fibonacci" as seen in the code above. This is the parameter given to the `Criterion::bench` functions. - `function` - This corresponds to the function name, in this case "Iterative". When comparing multiple functions, each function is given a different name. Otherwise, this will be the empty string. - `value` - This is the parameter passed to the benchmarked function when using parameterized benchmarks. In this case, there is no parameter so the value is the empty string. - `throughput_num` - This is the numeric value of the Throughput configured on the benchmark (if any) - `throughput_type` - "bytes" or "elements", corresponding to the variant of the Throughput configured on the benchmark (if any) - `iteration_count` - The number of times the benchmark was iterated for this sample. - `sample_measured_value` - The value of the measurement for this sample. Note that this is the measured value for the whole sample, not the time-per-iteration (see [Analysis Process](../analysis.md#measurement) for more detail). To calculate the time-per-iteration, use `sample_measured_value/iteration_count`. - `unit` - a string representing the unit for the measured value. For the default `WallTime` measurement this will be "ns", for nanoseconds. As you can see, this is the raw measurements taken by the Criterion.rs benchmark process. There is one record for each sample, and one file for each benchmark. The results of Criterion.rs' analysis of these measurements are not currently available in machine-readable form. If you need access to this information, please raise an issue describing your use case. criterion.rs-0.3.6/book/src/user_guide/custom_measurements.md000066400000000000000000000240511426140671200244460ustar00rootroot00000000000000# Custom Measurements By default, Criterion.rs measures the wall-clock time taken by the benchmarks. However, there are many other ways to measure the performance of a function, such as hardware performance counters or POSIX's CPU time. Since version 0.3.0, Criterion.rs has had support for plugging in alternate timing measurements. This page details how to define and use these custom measurements. Note that as of version 0.3.0, only timing measurements are supported, and only a single measurement can be used for one benchmark. These restrictions may be lifted in future versions. ### Defining Custom Measurements For developers who wish to use custom measurements provided by an existing crate, skip to ["Using Custom Measurements"](#using-custom-measurements) below. Custom measurements are defined by a pair of traits, both defined in `criterion::measurement`. #### Measurement First, we'll look at the main trait, `Measurement`. ```rust pub trait Measurement { type Intermediate; type Value: MeasuredValue; fn start(&self) -> Self::Intermediate; fn end(&self, i: Self::Intermediate) -> Self::Value; fn add(&self, v1: &Self::Value, v2: &Self::Value) -> Self::Value; fn zero(&self) -> Self::Value; fn to_f64(&self, val: &Self::Value) -> f64; fn formatter(&self) -> &dyn ValueFormatter; } ``` The most important methods here are `start` and `end` and their associated types, `Intermediate` and `Value`. `start` is called to start a measurement and `end` is called to complete it. As an example, the `start` method of the wall-clock time measurement returns the value of the system clock at the moment that `start` is called. This starting time is then passed to the `end` function, which reads the system clock again and calculates the elapsed time between the two calls. This pattern - reading some system counter before and after the benchmark and reporting the difference - is a common way for code to measure performance. The next two functions, `add` and `zero` are pretty simple; Criterion.rs sometimes needs to be able to break up a sample into batches that are added together (eg. in `Bencher::iter_batched`) and so we need to have a way to calculate the sum of the measurements for each batch to get the overall value for the sample. `to_f64` is used to convert the measured value to an `f64` value so that Criterion can perform its analysis. As of 0.3.0, only a single value can be returned for analysis per benchmark. Since `f64` doesn't carry any unit information, the implementor should be careful to choose their units to avoid having extremely large or extremely small values that may have floating-point precision issues. For wall-clock time, we convert to nanoseconds. Finally, we have `formatter`, which just returns a trait-object reference to a `ValueFormatter` (more on this later). For our half-second measurement, this is all pretty straightforward; we're still measuring wall-clock time so we can just use `Instant` and `Duration` like `WallTime` does: ```rust /// Silly "measurement" that is really just wall-clock time reported in half-seconds. struct HalfSeconds; impl Measurement for HalfSeconds { type Intermediate = Instant; type Value = Duration; fn start(&self) -> Self::Intermediate { Instant::now() } fn end(&self, i: Self::Intermediate) -> Self::Value { i.elapsed() } fn add(&self, v1: &Self::Value, v2: &Self::Value) -> Self::Value { *v1 + *v2 } fn zero(&self) -> Self::Value { Duration::from_secs(0) } fn to_f64(&self, val: &Self::Value) -> f64 { let nanos = val.as_secs() * NANOS_PER_SEC + u64::from(val.subsec_nanos()); nanos as f64 } fn formatter(&self) -> &dyn ValueFormatter { &HalfSecFormatter } } ``` #### ValueFormatter The next trait is `ValueFormatter`, which defines how a measurement is displayed to the user. ```rust pub trait ValueFormatter { fn format_value(&self, value: f64) -> String {...} fn format_throughput(&self, throughput: &Throughput, value: f64) -> String {...} fn scale_values(&self, typical_value: f64, values: &mut [f64]) -> &'static str; fn scale_throughputs(&self, typical_value: f64, throughput: &Throughput, values: &mut [f64]) -> &'static str; fn scale_for_machines(&self, values: &mut [f64]) -> &'static str; } ``` All of these functions accept a value to format in f64 form; the values passed in will be in the same scale as the values returned from `to_f64`, but may not be the exact same values. That is, if `to_f64` returns values scaled to "thousands of cycles", the values passed to `format_value` and the other functions will be in the same units, but may be different numbers (eg. the mean of all sample times). Implementors should try to format the values in a way that will make sense to humans. "1,500,000 ns" is needlessly confusing while "1.5 ms" is much clearer. If you can, try to use SI prefixes to simplify the numbers. An easy way to do this is to have a series of conditionals like so: ```rust if ns < 1.0 { // ns = time in nanoseconds per iteration format!("{:>6} ps", ns * 1e3) } else if ns < 10f64.powi(3) { format!("{:>6} ns", ns) } else if ns < 10f64.powi(6) { format!("{:>6} us", ns / 1e3) } else if ns < 10f64.powi(9) { format!("{:>6} ms", ns / 1e6) } else { format!("{:>6} s", ns / 1e9) } ``` It's also a good idea to limit the amount of precision in floating-point output - after a few digits the numbers don't matter much anymore but add a lot of visual noise and make the results harder to interpret. For example, it's very unlikely that anyone cares about the difference between `10.2896653s` and `10.2896654s` - it's much more salient that their function takes "about 10.290 seconds per iteration". With that out of the way, `format_value` is pretty straightforward. `format_throughput` is also not too difficult; match on `Throughput::Bytes` or `Throughput::Elements` and generate an appropriate description. For wall-clock time, that would likely take the form of "bytes per second", but a measurement that read CPU performance counters might want to display throughput in terms of "cycles per byte". Note that default implementations of `format_value` and `format_throughput` are provided which use `scale_values` and `scale_throughputs`, but you can override them if you wish. `scale_values` is a bit more complex. This accepts a "typical" value chosen by Criterion.rs, and a mutable slice of values to scale. This function should choose an appropriate unit based on the typical value, and convert all values in the slice to that unit. It should also return a string representing the chosen unit. So, for our wall-clock times where the measured values are in nanoseconds, if we wanted to display plots in milliseconds we would multiply all of the input values by `10.0f64.powi(-6)` and return `"ms"`, because multiplying a value in nanoseconds by 10^-6 gives a value in milliseconds. `scale_throughputs` does the same thing, only it converts a slice of measured values to their corresponding scaled throughput values. `scale_for_machines` is similar to `scale_values`, except that it's used for generating machine-readable outputs. It does not accept a typical value, because this function should always return values in the same unit. Our half-second measurement formatter thus looks like this: ```rust struct HalfSecFormatter; impl ValueFormatter for HalfSecFormatter { fn format_value(&self, value: f64) -> String { // The value will be in nanoseconds so we have to convert to half-seconds. format!("{} s/2", value * 2f64 * 10f64.powi(-9)) } fn format_throughput(&self, throughput: &Throughput, value: f64) -> String { match *throughput { Throughput::Bytes(bytes) => format!( "{} b/s/2", f64::from(bytes) / (value * 2f64 * 10f64.powi(-9)) ), Throughput::Elements(elems) => format!( "{} elem/s/2", f64::from(elems) / (value * 2f64 * 10f64.powi(-9)) ), } } fn scale_values(&self, ns: f64, values: &mut [f64]) -> &'static str { for val in values { *val *= 2f64 * 10f64.powi(-9); } "s/2" } fn scale_throughputs( &self, _typical: f64, throughput: &Throughput, values: &mut [f64], ) -> &'static str { match *throughput { Throughput::Bytes(bytes) => { // Convert nanoseconds/iteration to bytes/half-second. for val in values { *val = (bytes as f64) / (*val * 2f64 * 10f64.powi(-9)) } "b/s/2" } Throughput::Elements(elems) => { for val in values { *val = (elems as f64) / (*val * 2f64 * 10f64.powi(-9)) } "elem/s/2" } } } fn scale_for_machines(&self, values: &mut [f64]) -> &'static str { // Convert values in nanoseconds to half-seconds. for val in values { *val *= 2f64 * 10f64.powi(-9); } "s/2" } } ``` ### Using Custom Measurements Once you (or an external crate) have defined a custom measurement, using it is relatively easy. You will need to override the `Criterion` struct (which defaults to `WallTime`) by providing your own measurement using the `with_measurement` function and overriding the default `Criterion` object configuration. Your benchmark functions will also have to declare the measurement type they work with. ```rust fn fibonacci_cycles(criterion: &mut Criterion) { // Use the criterion struct as normal here. } fn alternate_measurement() -> Criterion { Criterion::default().with_measurement(HalfSeconds) } criterion_group! { name = benches; config = alternate_measurement(); targets = fibonacci_cycles } ``` criterion.rs-0.3.6/book/src/user_guide/custom_test_framework.md000066400000000000000000000055031426140671200247730ustar00rootroot00000000000000# Custom Test Framework Nightly versions of the rust compiler support custom test frameworks. Criterion.rs provides an experimental implementation of a custom test framework, meaning that you can use `#[criterion]` attributes to mark your benchmarks instead of the normal `criterion_group!/criterion_main!` macros. Right now this requires some unstable features, but at some point in the future `criterion_group!/criterion_main!` will be deprecated and `#[criterion]` will become the standard way to define a Criterion.rs benchmark. If you'd like to try this feature out early, see the documentation below. ## Using `#[criterion]` Since custom test frameworks are still unstable, you will need to be using a recent nightly compiler. Once that's installed, add the dependencies to your Cargo.toml: ```toml [dev-dependencies] criterion = "0.3" criterion-macro = "0.3" ``` Note that for `#[criterion]` benchmarks, we don't need to disable the normal testing harness as we do with regular Criterion.rs benchmarks. Let's take a look at an example benchmark (note that this example assumes you're using Rust 2018): ```rust #![feature(custom_test_frameworks)] #![test_runner(criterion::runner)] use criterion::{Criterion, black_box}; use criterion_macro::criterion; fn fibonacci(n: u64) -> u64 { match n { 0 | 1 => 1, n => fibonacci(n - 1) + fibonacci(n - 2), } } fn custom_criterion() -> Criterion { Criterion::default() .sample_size(50) } #[criterion] fn bench_simple(c: &mut Criterion) { c.bench_function("Fibonacci-Simple", |b| b.iter(|| fibonacci(black_box(10)))); } #[criterion(custom_criterion())] fn bench_custom(c: &mut Criterion) { c.bench_function("Fibonacci-Custom", |b| b.iter(|| fibonacci(black_box(20)))); } ``` The first thing to note is that we enable the `custom_test_framework` feature and declare that we want to use `criterion::runner` as the test runner. We also import `criterion_macro::criterion`, which is the `#[criterion]` macro itself. In future versions this will likely be re-exported from the `criterion` crate so that it can be imported from there, but for now we have to import it from `criterion_macro`. After that we define our old friend the Fibonacci function and the benchmarks. To create a benchmark with `#[criterion]` you simply attach the attribute to a function that accepts an `&mut Criterion`. To provide a custom Criterion object (to override default settings or similar) you can instead use `#[criterion()]` - here we're calling the `custom_criterion` function. And that's all there is to it! Keep in mind that in addition to being built on unstable compiler features, the API design for Criterion.rs and its test framework is still experimental. The macro subcrate will respect SemVer, but future breaking changes are quite likely. criterion.rs-0.3.6/book/src/user_guide/html_report.md000066400000000000000000000011061426140671200226770ustar00rootroot00000000000000# HTML Report Criterion.rs can generate an HTML report displaying the results of the benchmark under `target/criterion/report/index.html`. By default, the plots are generated using [gnuplot](http://www.gnuplot.info/) if it is available, or the [plotters](https://github.com/38/plotters) crate if it is not. The example below was generated using the gnuplot backend, but the charts generated by plotters are similar. To see an example report, [click here](html_report/report/index.html). For more details on the charts and statistics displayed, check the other pages of this book. criterion.rs-0.3.6/book/src/user_guide/html_report/000077500000000000000000000000001426140671200223575ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/000077500000000000000000000000001426140671200242345ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Iterative/000077500000000000000000000000001426140671200261705ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Iterative/base/000077500000000000000000000000001426140671200271025ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Iterative/base/benchmark.json000066400000000000000000000002721426140671200317300ustar00rootroot00000000000000{"group_id":"Fibonacci","function_id":"Iterative","value_str":null,"throughput":null,"full_id":"Fibonacci/Iterative","directory_name":"Fibonacci/Iterative","title":"Fibonacci/Iterative"}criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Iterative/base/estimates.json000066400000000000000000000017361426140671200320020ustar00rootroot00000000000000{"Mean":{"confidence_interval":{"confidence_level":0.95,"lower_bound":4.689789300216866,"upper_bound":4.760375336010672},"point_estimate":4.723173110355467,"standard_error":0.01796446321086466},"Median":{"confidence_interval":{"confidence_level":0.95,"lower_bound":4.654054197375762,"upper_bound":4.696694163445648},"point_estimate":4.670911442470985,"standard_error":0.01070564545929857},"MedianAbsDev":{"confidence_interval":{"confidence_level":0.95,"lower_bound":0.06450212056716723,"upper_bound":0.11854627378470874},"point_estimate":0.08299438165247179,"standard_error":0.013423727201506403},"Slope":{"confidence_interval":{"confidence_level":0.95,"lower_bound":4.669716648112915,"upper_bound":4.75099122458243},"point_estimate":4.708418938929127,"standard_error":0.020796293177134745},"StdDev":{"confidence_interval":{"confidence_level":0.95,"lower_bound":0.1267373438799889,"upper_bound":0.2317502059547596},"point_estimate":0.1802894617505479,"standard_error":0.026984432976314865}}criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Iterative/base/raw.csv000066400000000000000000000074671426140671200304260ustar00rootroot00000000000000group,function,value,sample_time_nanos,iteration_count Fibonacci,Iterative,,1128000,209454 Fibonacci,Iterative,,1853000,418908 Fibonacci,Iterative,,2976000,628362 Fibonacci,Iterative,,3908000,837816 Fibonacci,Iterative,,4938000,1047270 Fibonacci,Iterative,,5885000,1256724 Fibonacci,Iterative,,7324000,1466178 Fibonacci,Iterative,,8286000,1675632 Fibonacci,Iterative,,8765000,1885086 Fibonacci,Iterative,,10310000,2094540 Fibonacci,Iterative,,10857000,2303994 Fibonacci,Iterative,,11678000,2513448 Fibonacci,Iterative,,12549000,2722902 Fibonacci,Iterative,,13873000,2932356 Fibonacci,Iterative,,14675000,3141810 Fibonacci,Iterative,,16283000,3351264 Fibonacci,Iterative,,20063000,3560718 Fibonacci,Iterative,,17803000,3770172 Fibonacci,Iterative,,20195000,3979626 Fibonacci,Iterative,,20436000,4189080 Fibonacci,Iterative,,21179000,4398534 Fibonacci,Iterative,,22198000,4607988 Fibonacci,Iterative,,22829000,4817442 Fibonacci,Iterative,,23683000,5026896 Fibonacci,Iterative,,24281000,5236350 Fibonacci,Iterative,,25393000,5445804 Fibonacci,Iterative,,26687000,5655258 Fibonacci,Iterative,,27278000,5864712 Fibonacci,Iterative,,28180000,6074166 Fibonacci,Iterative,,29413000,6283620 Fibonacci,Iterative,,30601000,6493074 Fibonacci,Iterative,,31213000,6702528 Fibonacci,Iterative,,32171000,6911982 Fibonacci,Iterative,,33550000,7121436 Fibonacci,Iterative,,34156000,7330890 Fibonacci,Iterative,,35158000,7540344 Fibonacci,Iterative,,36981000,7749798 Fibonacci,Iterative,,36258000,7959252 Fibonacci,Iterative,,37167000,8168706 Fibonacci,Iterative,,37987000,8378160 Fibonacci,Iterative,,39330000,8587614 Fibonacci,Iterative,,40601000,8797068 Fibonacci,Iterative,,41696000,9006522 Fibonacci,Iterative,,42520000,9215976 Fibonacci,Iterative,,43247000,9425430 Fibonacci,Iterative,,45455000,9634884 Fibonacci,Iterative,,45675000,9844338 Fibonacci,Iterative,,49040000,10053792 Fibonacci,Iterative,,47454000,10263246 Fibonacci,Iterative,,50823000,10472700 Fibonacci,Iterative,,50484000,10682154 Fibonacci,Iterative,,49703000,10891608 Fibonacci,Iterative,,52361000,11101062 Fibonacci,Iterative,,54400000,11310516 Fibonacci,Iterative,,52225000,11519970 Fibonacci,Iterative,,53356000,11729424 Fibonacci,Iterative,,60309000,11938878 Fibonacci,Iterative,,57057000,12148332 Fibonacci,Iterative,,57279000,12357786 Fibonacci,Iterative,,58295000,12567240 Fibonacci,Iterative,,58897000,12776694 Fibonacci,Iterative,,59319000,12986148 Fibonacci,Iterative,,60526000,13195602 Fibonacci,Iterative,,61972000,13405056 Fibonacci,Iterative,,63944000,13614510 Fibonacci,Iterative,,70640000,13823964 Fibonacci,Iterative,,68548000,14033418 Fibonacci,Iterative,,66929000,14242872 Fibonacci,Iterative,,66643000,14452326 Fibonacci,Iterative,,70034000,14661780 Fibonacci,Iterative,,68525000,14871234 Fibonacci,Iterative,,69591000,15080688 Fibonacci,Iterative,,71544000,15290142 Fibonacci,Iterative,,72586000,15499596 Fibonacci,Iterative,,71582000,15709050 Fibonacci,Iterative,,74696000,15918504 Fibonacci,Iterative,,75331000,16127958 Fibonacci,Iterative,,75553000,16337412 Fibonacci,Iterative,,76436000,16546866 Fibonacci,Iterative,,87243000,16756320 Fibonacci,Iterative,,80738000,16965774 Fibonacci,Iterative,,79838000,17175228 Fibonacci,Iterative,,80746000,17384682 Fibonacci,Iterative,,81994000,17594136 Fibonacci,Iterative,,83289000,17803590 Fibonacci,Iterative,,84138000,18013044 Fibonacci,Iterative,,91807000,18222498 Fibonacci,Iterative,,84584000,18431952 Fibonacci,Iterative,,85760000,18641406 Fibonacci,Iterative,,87469000,18850860 Fibonacci,Iterative,,89370000,19060314 Fibonacci,Iterative,,96233000,19269768 Fibonacci,Iterative,,94821000,19479222 Fibonacci,Iterative,,98666000,19688676 Fibonacci,Iterative,,95124000,19898130 Fibonacci,Iterative,,92061000,20107584 Fibonacci,Iterative,,93890000,20317038 Fibonacci,Iterative,,96042000,20526492 Fibonacci,Iterative,,96633000,20735946 Fibonacci,Iterative,,94869000,20945400 criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Iterative/base/sample.json000066400000000000000000000041411426140671200312560ustar00rootroot00000000000000[[209454.0,418908.0,628362.0,837816.0,1047270.0,1256724.0,1466178.0,1675632.0,1885086.0,2094540.0,2303994.0,2513448.0,2722902.0,2932356.0,3141810.0,3351264.0,3560718.0,3770172.0,3979626.0,4189080.0,4398534.0,4607988.0,4817442.0,5026896.0,5236350.0,5445804.0,5655258.0,5864712.0,6074166.0,6283620.0,6493074.0,6702528.0,6911982.0,7121436.0,7330890.0,7540344.0,7749798.0,7959252.0,8168706.0,8378160.0,8587614.0,8797068.0,9006522.0,9215976.0,9425430.0,9634884.0,9844338.0,10053792.0,10263246.0,10472700.0,10682154.0,10891608.0,11101062.0,11310516.0,11519970.0,11729424.0,11938878.0,12148332.0,12357786.0,12567240.0,12776694.0,12986148.0,13195602.0,13405056.0,13614510.0,13823964.0,14033418.0,14242872.0,14452326.0,14661780.0,14871234.0,15080688.0,15290142.0,15499596.0,15709050.0,15918504.0,16127958.0,16337412.0,16546866.0,16756320.0,16965774.0,17175228.0,17384682.0,17594136.0,17803590.0,18013044.0,18222498.0,18431952.0,18641406.0,18850860.0,19060314.0,19269768.0,19479222.0,19688676.0,19898130.0,20107584.0,20317038.0,20526492.0,20735946.0,20945400.0],[1128000.0,1853000.0,2976000.0,3908000.0,4938000.0,5885000.0,7324000.0,8286000.0,8765000.0,10310000.0,10857000.0,11678000.0,12549000.0,13873000.0,14675000.0,16283000.0,20063000.0,17803000.0,20195000.0,20436000.0,21179000.0,22198000.0,22829000.0,23683000.0,24281000.0,25393000.0,26687000.0,27278000.0,28180000.0,29413000.0,30601000.0,31213000.0,32171000.0,33550000.0,34156000.0,35158000.0,36981000.0,36258000.0,37167000.0,37987000.0,39330000.0,40601000.0,41696000.0,42520000.0,43247000.0,45455000.0,45675000.0,49040000.0,47454000.0,50823000.0,50484000.0,49703000.0,52361000.0,54400000.0,52225000.0,53356000.0,60309000.0,57057000.0,57279000.0,58295000.0,58897000.0,59319000.0,60526000.0,61972000.0,63944000.0,70640000.0,68548000.0,66929000.0,66643000.0,70034000.0,68525000.0,69591000.0,71544000.0,72586000.0,71582000.0,74696000.0,75331000.0,75553000.0,76436000.0,87243000.0,80738000.0,79838000.0,80746000.0,81994000.0,83289000.0,84138000.0,91807000.0,84584000.0,85760000.0,87469000.0,89370000.0,96233000.0,94821000.0,98666000.0,95124000.0,92061000.0,93890000.0,96042000.0,96633000.0,94869000.0]]criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Iterative/base/tukey.json000066400000000000000000000001121426140671200311300ustar00rootroot00000000000000[4.2588340691769435,4.440709516666814,4.925710709973135,5.107586157463005]criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Iterative/change/000077500000000000000000000000001426140671200274155ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Iterative/change/estimates.json000066400000000000000000000006361426140671200323130ustar00rootroot00000000000000{"Mean":{"confidence_interval":{"confidence_level":0.95,"lower_bound":-0.008476297386567844,"upper_bound":0.013287745367767459},"point_estimate":0.002615814736259159,"standard_error":0.005556642598564102},"Median":{"confidence_interval":{"confidence_level":0.95,"lower_bound":-0.006576235748351755,"upper_bound":0.0076476935685134695},"point_estimate":-0.00042408598385557106,"standard_error":0.00365794755998004}}criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Iterative/new/000077500000000000000000000000001426140671200267615ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Iterative/new/benchmark.json000066400000000000000000000002721426140671200316070ustar00rootroot00000000000000{"group_id":"Fibonacci","function_id":"Iterative","value_str":null,"throughput":null,"full_id":"Fibonacci/Iterative","directory_name":"Fibonacci/Iterative","title":"Fibonacci/Iterative"}criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Iterative/new/estimates.json000066400000000000000000000017361426140671200316610ustar00rootroot00000000000000{"Mean":{"confidence_interval":{"confidence_level":0.95,"lower_bound":4.689789300216866,"upper_bound":4.760375336010672},"point_estimate":4.723173110355467,"standard_error":0.01796446321086466},"Median":{"confidence_interval":{"confidence_level":0.95,"lower_bound":4.654054197375762,"upper_bound":4.696694163445648},"point_estimate":4.670911442470985,"standard_error":0.01070564545929857},"MedianAbsDev":{"confidence_interval":{"confidence_level":0.95,"lower_bound":0.06450212056716723,"upper_bound":0.11854627378470874},"point_estimate":0.08299438165247179,"standard_error":0.013423727201506403},"Slope":{"confidence_interval":{"confidence_level":0.95,"lower_bound":4.669716648112915,"upper_bound":4.75099122458243},"point_estimate":4.708418938929127,"standard_error":0.020796293177134745},"StdDev":{"confidence_interval":{"confidence_level":0.95,"lower_bound":0.1267373438799889,"upper_bound":0.2317502059547596},"point_estimate":0.1802894617505479,"standard_error":0.026984432976314865}}criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Iterative/new/raw.csv000066400000000000000000000074671426140671200303050ustar00rootroot00000000000000group,function,value,sample_time_nanos,iteration_count Fibonacci,Iterative,,1128000,209454 Fibonacci,Iterative,,1853000,418908 Fibonacci,Iterative,,2976000,628362 Fibonacci,Iterative,,3908000,837816 Fibonacci,Iterative,,4938000,1047270 Fibonacci,Iterative,,5885000,1256724 Fibonacci,Iterative,,7324000,1466178 Fibonacci,Iterative,,8286000,1675632 Fibonacci,Iterative,,8765000,1885086 Fibonacci,Iterative,,10310000,2094540 Fibonacci,Iterative,,10857000,2303994 Fibonacci,Iterative,,11678000,2513448 Fibonacci,Iterative,,12549000,2722902 Fibonacci,Iterative,,13873000,2932356 Fibonacci,Iterative,,14675000,3141810 Fibonacci,Iterative,,16283000,3351264 Fibonacci,Iterative,,20063000,3560718 Fibonacci,Iterative,,17803000,3770172 Fibonacci,Iterative,,20195000,3979626 Fibonacci,Iterative,,20436000,4189080 Fibonacci,Iterative,,21179000,4398534 Fibonacci,Iterative,,22198000,4607988 Fibonacci,Iterative,,22829000,4817442 Fibonacci,Iterative,,23683000,5026896 Fibonacci,Iterative,,24281000,5236350 Fibonacci,Iterative,,25393000,5445804 Fibonacci,Iterative,,26687000,5655258 Fibonacci,Iterative,,27278000,5864712 Fibonacci,Iterative,,28180000,6074166 Fibonacci,Iterative,,29413000,6283620 Fibonacci,Iterative,,30601000,6493074 Fibonacci,Iterative,,31213000,6702528 Fibonacci,Iterative,,32171000,6911982 Fibonacci,Iterative,,33550000,7121436 Fibonacci,Iterative,,34156000,7330890 Fibonacci,Iterative,,35158000,7540344 Fibonacci,Iterative,,36981000,7749798 Fibonacci,Iterative,,36258000,7959252 Fibonacci,Iterative,,37167000,8168706 Fibonacci,Iterative,,37987000,8378160 Fibonacci,Iterative,,39330000,8587614 Fibonacci,Iterative,,40601000,8797068 Fibonacci,Iterative,,41696000,9006522 Fibonacci,Iterative,,42520000,9215976 Fibonacci,Iterative,,43247000,9425430 Fibonacci,Iterative,,45455000,9634884 Fibonacci,Iterative,,45675000,9844338 Fibonacci,Iterative,,49040000,10053792 Fibonacci,Iterative,,47454000,10263246 Fibonacci,Iterative,,50823000,10472700 Fibonacci,Iterative,,50484000,10682154 Fibonacci,Iterative,,49703000,10891608 Fibonacci,Iterative,,52361000,11101062 Fibonacci,Iterative,,54400000,11310516 Fibonacci,Iterative,,52225000,11519970 Fibonacci,Iterative,,53356000,11729424 Fibonacci,Iterative,,60309000,11938878 Fibonacci,Iterative,,57057000,12148332 Fibonacci,Iterative,,57279000,12357786 Fibonacci,Iterative,,58295000,12567240 Fibonacci,Iterative,,58897000,12776694 Fibonacci,Iterative,,59319000,12986148 Fibonacci,Iterative,,60526000,13195602 Fibonacci,Iterative,,61972000,13405056 Fibonacci,Iterative,,63944000,13614510 Fibonacci,Iterative,,70640000,13823964 Fibonacci,Iterative,,68548000,14033418 Fibonacci,Iterative,,66929000,14242872 Fibonacci,Iterative,,66643000,14452326 Fibonacci,Iterative,,70034000,14661780 Fibonacci,Iterative,,68525000,14871234 Fibonacci,Iterative,,69591000,15080688 Fibonacci,Iterative,,71544000,15290142 Fibonacci,Iterative,,72586000,15499596 Fibonacci,Iterative,,71582000,15709050 Fibonacci,Iterative,,74696000,15918504 Fibonacci,Iterative,,75331000,16127958 Fibonacci,Iterative,,75553000,16337412 Fibonacci,Iterative,,76436000,16546866 Fibonacci,Iterative,,87243000,16756320 Fibonacci,Iterative,,80738000,16965774 Fibonacci,Iterative,,79838000,17175228 Fibonacci,Iterative,,80746000,17384682 Fibonacci,Iterative,,81994000,17594136 Fibonacci,Iterative,,83289000,17803590 Fibonacci,Iterative,,84138000,18013044 Fibonacci,Iterative,,91807000,18222498 Fibonacci,Iterative,,84584000,18431952 Fibonacci,Iterative,,85760000,18641406 Fibonacci,Iterative,,87469000,18850860 Fibonacci,Iterative,,89370000,19060314 Fibonacci,Iterative,,96233000,19269768 Fibonacci,Iterative,,94821000,19479222 Fibonacci,Iterative,,98666000,19688676 Fibonacci,Iterative,,95124000,19898130 Fibonacci,Iterative,,92061000,20107584 Fibonacci,Iterative,,93890000,20317038 Fibonacci,Iterative,,96042000,20526492 Fibonacci,Iterative,,96633000,20735946 Fibonacci,Iterative,,94869000,20945400 criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Iterative/new/sample.json000066400000000000000000000041411426140671200311350ustar00rootroot00000000000000[[209454.0,418908.0,628362.0,837816.0,1047270.0,1256724.0,1466178.0,1675632.0,1885086.0,2094540.0,2303994.0,2513448.0,2722902.0,2932356.0,3141810.0,3351264.0,3560718.0,3770172.0,3979626.0,4189080.0,4398534.0,4607988.0,4817442.0,5026896.0,5236350.0,5445804.0,5655258.0,5864712.0,6074166.0,6283620.0,6493074.0,6702528.0,6911982.0,7121436.0,7330890.0,7540344.0,7749798.0,7959252.0,8168706.0,8378160.0,8587614.0,8797068.0,9006522.0,9215976.0,9425430.0,9634884.0,9844338.0,10053792.0,10263246.0,10472700.0,10682154.0,10891608.0,11101062.0,11310516.0,11519970.0,11729424.0,11938878.0,12148332.0,12357786.0,12567240.0,12776694.0,12986148.0,13195602.0,13405056.0,13614510.0,13823964.0,14033418.0,14242872.0,14452326.0,14661780.0,14871234.0,15080688.0,15290142.0,15499596.0,15709050.0,15918504.0,16127958.0,16337412.0,16546866.0,16756320.0,16965774.0,17175228.0,17384682.0,17594136.0,17803590.0,18013044.0,18222498.0,18431952.0,18641406.0,18850860.0,19060314.0,19269768.0,19479222.0,19688676.0,19898130.0,20107584.0,20317038.0,20526492.0,20735946.0,20945400.0],[1128000.0,1853000.0,2976000.0,3908000.0,4938000.0,5885000.0,7324000.0,8286000.0,8765000.0,10310000.0,10857000.0,11678000.0,12549000.0,13873000.0,14675000.0,16283000.0,20063000.0,17803000.0,20195000.0,20436000.0,21179000.0,22198000.0,22829000.0,23683000.0,24281000.0,25393000.0,26687000.0,27278000.0,28180000.0,29413000.0,30601000.0,31213000.0,32171000.0,33550000.0,34156000.0,35158000.0,36981000.0,36258000.0,37167000.0,37987000.0,39330000.0,40601000.0,41696000.0,42520000.0,43247000.0,45455000.0,45675000.0,49040000.0,47454000.0,50823000.0,50484000.0,49703000.0,52361000.0,54400000.0,52225000.0,53356000.0,60309000.0,57057000.0,57279000.0,58295000.0,58897000.0,59319000.0,60526000.0,61972000.0,63944000.0,70640000.0,68548000.0,66929000.0,66643000.0,70034000.0,68525000.0,69591000.0,71544000.0,72586000.0,71582000.0,74696000.0,75331000.0,75553000.0,76436000.0,87243000.0,80738000.0,79838000.0,80746000.0,81994000.0,83289000.0,84138000.0,91807000.0,84584000.0,85760000.0,87469000.0,89370000.0,96233000.0,94821000.0,98666000.0,95124000.0,92061000.0,93890000.0,96042000.0,96633000.0,94869000.0]]criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Iterative/new/tukey.json000066400000000000000000000001121426140671200310070ustar00rootroot00000000000000[4.2588340691769435,4.440709516666814,4.925710709973135,5.107586157463005]criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Iterative/report/000077500000000000000000000000001426140671200275035ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Iterative/report/MAD.svg000066400000000000000000000704411426140671200306330ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 0.005 0.01 0.015 0.02 0.025 0.03 0.035 0.04 60 70 80 90 100 110 120 Density (a.u.) Average time (ps) Fibonacci/Iterative: MAD Bootstrap distribution Bootstrap distribution Confidence interval Confidence interval Point estimate Point estimate criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Iterative/report/SD.svg000066400000000000000000000705261426140671200305440ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 0.002 0.004 0.006 0.008 0.01 0.012 0.014 0.016 120 140 160 180 200 220 240 Density (a.u.) Average time (ps) Fibonacci/Iterative: SD Bootstrap distribution Bootstrap distribution Confidence interval Confidence interval Point estimate Point estimate criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Iterative/report/both/000077500000000000000000000000001426140671200304375ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Iterative/report/both/pdf.svg000066400000000000000000001132001426140671200317260ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 0.5 1 1.5 2 2.5 3 3.5 4 4.2 4.4 4.6 4.8 5 5.2 5.4 5.6 5.8 6 Density (a.u.) Average time (ns) Fibonacci/Iterative Base PDF Base PDF Base Mean Base Mean New PDF New PDF New Mean New Mean criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Iterative/report/both/regression.svg000066400000000000000000000542141426140671200333460ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 10 20 30 40 50 60 70 80 90 100 0 5 10 15 20 25 Total sample time (ms) Iterations (x 106) Fibonacci/Iterative gnuplot_plot_1 gnuplot_plot_2 Base sample Base sample New sample New sample criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Iterative/report/change/000077500000000000000000000000001426140671200307305ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Iterative/report/change/mean.svg000066400000000000000000000717571426140671200324120ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 10 20 30 40 50 60 70 80 -1 -0.5 0 0.5 1 1.5 Density (a.u.) Relative change (%) Fibonacci/Iterative: mean Bootstrap distribution Bootstrap distribution Confidence interval Confidence interval Point estimate Point estimate Noise threshold Noise threshold criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Iterative/report/change/median.svg000066400000000000000000000726041426140671200327170ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 20 40 60 80 100 120 -0.8 -0.6 -0.4 -0.2 0 0.2 0.4 0.6 0.8 Density (a.u.) Relative change (%) Fibonacci/Iterative: median Bootstrap distribution Bootstrap distribution Confidence interval Confidence interval Point estimate Point estimate Noise threshold Noise threshold criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Iterative/report/change/t-test.svg000066400000000000000000000617321426140671200327020ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 0.05 0.1 0.15 0.2 0.25 0.3 0.35 0.4 -5 -4 -3 -2 -1 0 1 2 3 4 5 6 Density t score Fibonacci/Iterative: Welch t test t distribution t distribution t statistic t statistic criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Iterative/report/index.html000066400000000000000000000244511426140671200315060ustar00rootroot00000000000000 Fibonacci/Iterative - Criterion.rs

Fibonacci/Iterative

PDF of Slope Regression

Additional Statistics:

Lower bound Estimate Upper bound
Slope 4.6697 ns 4.7084 ns 4.7510 ns
0.8452726 0.8531667 0.8436334
Mean 4.6898 ns 4.7232 ns 4.7604 ns
Std. Dev. 126.74 ps 180.29 ps 231.75 ps
Median 4.6541 ns 4.6709 ns 4.6967 ns
MAD 64.502 ps 82.994 ps 118.55 ps

Additional Plots:

Understanding this report:

The plot on the left displays the average time per iteration for this benchmark. The shaded region shows the estimated probabilty of an iteration taking a certain amount of time, while the line shows the mean. Click on the plot for a larger view showing the outliers.

The plot on the right shows the linear regression calculated from the measurements. Each point represents a sample, though here it shows the total time for the sample rather than time per iteration. The line is the line of best fit for these measurements.

See the documentation for more details on the additional statistics.

Change Since Previous Benchmark

Additional Statistics:

Lower bound Estimate Upper bound
Change in time -0.8476% +0.2616% +1.3288% (p = 0.65 > 0.05)
No change in performance detected.

Understanding this report:

The plot on the left shows the probability of the function taking a certain amount of time. The red curve represents the saved measurements from the last time this benchmark was run, while the blue curve shows the measurements from this run. The lines represent the mean time per iteration. Click on the plot for a larger view.

The plot on the right shows the two regressions. Again, the red line represents the previous measurement while the blue line shows the current measurement.

See the documentation for more details on the additional statistics.

criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Iterative/report/mean.svg000066400000000000000000000671371426140671200311620ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 5 10 15 20 25 4.69 4.7 4.71 4.72 4.73 4.74 4.75 4.76 Density (a.u.) Average time (ns) Fibonacci/Iterative: mean Bootstrap distribution Bootstrap distribution Confidence interval Confidence interval Point estimate Point estimate criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Iterative/report/median.svg000066400000000000000000000671171426140671200314750ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 10 20 30 40 50 60 70 4.65 4.66 4.67 4.68 4.69 4.7 Density (a.u.) Average time (ns) Fibonacci/Iterative: median Bootstrap distribution Bootstrap distribution Confidence interval Confidence interval Point estimate Point estimate criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Iterative/report/pdf.svg000066400000000000000000001154611426140671200310050ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 5 10 15 20 4.2 4.4 4.6 4.8 5 5.2 5.4 5.6 5.8 0 0.5 1 1.5 2 2.5 3 3.5 Iterations (x 106) Density (a.u.) Average time (ns) Fibonacci/Iterative PDF PDF Mean Mean "Clean" sample "Clean" sample Mild outliers Mild outliers Severe outliers Severe outliers gnuplot_plot_6 gnuplot_plot_7 gnuplot_plot_8 gnuplot_plot_9 criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Iterative/report/pdf_small.svg000066400000000000000000000546131426140671200321760ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 0.5 1 1.5 2 2.5 3 3.5 4.2 4.4 4.6 4.8 5 5.2 5.4 5.6 5.8 Density (a.u.) Average time (ns) PDF Mean criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Iterative/report/regression.svg000066400000000000000000001006121426140671200324040ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 10 20 30 40 50 60 70 80 90 100 0 5 10 15 20 25 Total sample time (ms) Iterations (x 106) Fibonacci/Iterative Sample Sample Linear regression Linear regression Confidence interval Confidence interval criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Iterative/report/regression_small.svg000066400000000000000000000755631426140671200336140ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 10 20 30 40 50 60 70 80 90 100 0 5 10 15 20 25 Total sample time (ms) Iterations (x 106) Sample Linear regression Confidence interval criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Iterative/report/relative_pdf_small.svg000066400000000000000000001072601426140671200340660ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 0.5 1 1.5 2 2.5 3 3.5 4 4.2 4.4 4.6 4.8 5 5.2 5.4 5.6 5.8 6 Density (a.u.) Average time (ns) Base PDF Base Mean New PDF New Mean relative_regression_small.svg000066400000000000000000000523221426140671200354140ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Iterative/report Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 10 20 30 40 50 60 70 80 90 100 0 5 10 15 20 25 Total sample time (ms) Iterations (x 106) gnuplot_plot_1 gnuplot_plot_2 Base sample New sample criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Iterative/report/slope.svg000066400000000000000000000740461426140671200313610ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 2 4 6 8 10 12 14 16 18 20 4.67 4.68 4.69 4.7 4.71 4.72 4.73 4.74 4.75 4.76 Density (a.u.) Average time (ns) Fibonacci/Iterative: slope Bootstrap distribution Bootstrap distribution Confidence interval Confidence interval Point estimate Point estimate criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Recursive/000077500000000000000000000000001426140671200262035ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Recursive/base/000077500000000000000000000000001426140671200271155ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Recursive/base/benchmark.json000066400000000000000000000002721426140671200317430ustar00rootroot00000000000000{"group_id":"Fibonacci","function_id":"Recursive","value_str":null,"throughput":null,"full_id":"Fibonacci/Recursive","directory_name":"Fibonacci/Recursive","title":"Fibonacci/Recursive"}criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Recursive/base/estimates.json000066400000000000000000000017211426140671200320070ustar00rootroot00000000000000{"Mean":{"confidence_interval":{"confidence_level":0.95,"lower_bound":19189.436993840445,"upper_bound":19387.95822974072},"point_estimate":19284.21673781045,"standard_error":50.61075953480153},"Median":{"confidence_interval":{"confidence_level":0.95,"lower_bound":19065.189048239896,"upper_bound":19247.31663685152},"point_estimate":19158.025568181816,"standard_error":39.6634584646669},"MedianAbsDev":{"confidence_interval":{"confidence_level":0.95,"lower_bound":229.32252683175213,"upper_bound":391.0153776734743},"point_estimate":300.31279213429315,"standard_error":41.38531961197946},"Slope":{"confidence_interval":{"confidence_level":0.95,"lower_bound":19196.814266495523,"upper_bound":19436.155310861093},"point_estimate":19310.74450671244,"standard_error":61.247839919749566},"StdDev":{"confidence_interval":{"confidence_level":0.95,"lower_bound":380.9687888326143,"upper_bound":620.1314416037659},"point_estimate":508.78945388810087,"standard_error":61.16159006578181}}criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Recursive/base/raw.csv000066400000000000000000000067071426140671200304350ustar00rootroot00000000000000group,function,value,sample_time_nanos,iteration_count Fibonacci,Recursive,,1014000,52 Fibonacci,Recursive,,1920000,104 Fibonacci,Recursive,,3022000,156 Fibonacci,Recursive,,3931000,208 Fibonacci,Recursive,,4810000,260 Fibonacci,Recursive,,6087000,312 Fibonacci,Recursive,,6787000,364 Fibonacci,Recursive,,7899000,416 Fibonacci,Recursive,,8860000,468 Fibonacci,Recursive,,10044000,520 Fibonacci,Recursive,,10699000,572 Fibonacci,Recursive,,11781000,624 Fibonacci,Recursive,,12810000,676 Fibonacci,Recursive,,14182000,728 Fibonacci,Recursive,,15115000,780 Fibonacci,Recursive,,16370000,832 Fibonacci,Recursive,,17157000,884 Fibonacci,Recursive,,17945000,936 Fibonacci,Recursive,,20923000,988 Fibonacci,Recursive,,20542000,1040 Fibonacci,Recursive,,22006000,1092 Fibonacci,Recursive,,22484000,1144 Fibonacci,Recursive,,22785000,1196 Fibonacci,Recursive,,24408000,1248 Fibonacci,Recursive,,24666000,1300 Fibonacci,Recursive,,26059000,1352 Fibonacci,Recursive,,26710000,1404 Fibonacci,Recursive,,27641000,1456 Fibonacci,Recursive,,28874000,1508 Fibonacci,Recursive,,29353000,1560 Fibonacci,Recursive,,30890000,1612 Fibonacci,Recursive,,31883000,1664 Fibonacci,Recursive,,33753000,1716 Fibonacci,Recursive,,33380000,1768 Fibonacci,Recursive,,37843000,1820 Fibonacci,Recursive,,35925000,1872 Fibonacci,Recursive,,37060000,1924 Fibonacci,Recursive,,37824000,1976 Fibonacci,Recursive,,38198000,2028 Fibonacci,Recursive,,39434000,2080 Fibonacci,Recursive,,41046000,2132 Fibonacci,Recursive,,40856000,2184 Fibonacci,Recursive,,42519000,2236 Fibonacci,Recursive,,43460000,2288 Fibonacci,Recursive,,44868000,2340 Fibonacci,Recursive,,44988000,2392 Fibonacci,Recursive,,46970000,2444 Fibonacci,Recursive,,48975000,2496 Fibonacci,Recursive,,49978000,2548 Fibonacci,Recursive,,51451000,2600 Fibonacci,Recursive,,55120000,2652 Fibonacci,Recursive,,51255000,2704 Fibonacci,Recursive,,52947000,2756 Fibonacci,Recursive,,53305000,2808 Fibonacci,Recursive,,54785000,2860 Fibonacci,Recursive,,55320000,2912 Fibonacci,Recursive,,59561000,2964 Fibonacci,Recursive,,57335000,3016 Fibonacci,Recursive,,58492000,3068 Fibonacci,Recursive,,59236000,3120 Fibonacci,Recursive,,59697000,3172 Fibonacci,Recursive,,61642000,3224 Fibonacci,Recursive,,62091000,3276 Fibonacci,Recursive,,62831000,3328 Fibonacci,Recursive,,65607000,3380 Fibonacci,Recursive,,72767000,3432 Fibonacci,Recursive,,67336000,3484 Fibonacci,Recursive,,68879000,3536 Fibonacci,Recursive,,69465000,3588 Fibonacci,Recursive,,69132000,3640 Fibonacci,Recursive,,71783000,3692 Fibonacci,Recursive,,72313000,3744 Fibonacci,Recursive,,73378000,3796 Fibonacci,Recursive,,73543000,3848 Fibonacci,Recursive,,74380000,3900 Fibonacci,Recursive,,75224000,3952 Fibonacci,Recursive,,76812000,4004 Fibonacci,Recursive,,77010000,4056 Fibonacci,Recursive,,79289000,4108 Fibonacci,Recursive,,85558000,4160 Fibonacci,Recursive,,79835000,4212 Fibonacci,Recursive,,81161000,4264 Fibonacci,Recursive,,81174000,4316 Fibonacci,Recursive,,84890000,4368 Fibonacci,Recursive,,88113000,4420 Fibonacci,Recursive,,86074000,4472 Fibonacci,Recursive,,85794000,4524 Fibonacci,Recursive,,86970000,4576 Fibonacci,Recursive,,87827000,4628 Fibonacci,Recursive,,88039000,4680 Fibonacci,Recursive,,95990000,4732 Fibonacci,Recursive,,92917000,4784 Fibonacci,Recursive,,96439000,4836 Fibonacci,Recursive,,93487000,4888 Fibonacci,Recursive,,94448000,4940 Fibonacci,Recursive,,95545000,4992 Fibonacci,Recursive,,97458000,5044 Fibonacci,Recursive,,101856000,5096 Fibonacci,Recursive,,97665000,5148 Fibonacci,Recursive,,99940000,5200 criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Recursive/base/sample.json000066400000000000000000000033611426140671200312740ustar00rootroot00000000000000[[52.0,104.0,156.0,208.0,260.0,312.0,364.0,416.0,468.0,520.0,572.0,624.0,676.0,728.0,780.0,832.0,884.0,936.0,988.0,1040.0,1092.0,1144.0,1196.0,1248.0,1300.0,1352.0,1404.0,1456.0,1508.0,1560.0,1612.0,1664.0,1716.0,1768.0,1820.0,1872.0,1924.0,1976.0,2028.0,2080.0,2132.0,2184.0,2236.0,2288.0,2340.0,2392.0,2444.0,2496.0,2548.0,2600.0,2652.0,2704.0,2756.0,2808.0,2860.0,2912.0,2964.0,3016.0,3068.0,3120.0,3172.0,3224.0,3276.0,3328.0,3380.0,3432.0,3484.0,3536.0,3588.0,3640.0,3692.0,3744.0,3796.0,3848.0,3900.0,3952.0,4004.0,4056.0,4108.0,4160.0,4212.0,4264.0,4316.0,4368.0,4420.0,4472.0,4524.0,4576.0,4628.0,4680.0,4732.0,4784.0,4836.0,4888.0,4940.0,4992.0,5044.0,5096.0,5148.0,5200.0],[1014000.0,1920000.0,3022000.0,3931000.0,4810000.0,6087000.0,6787000.0,7899000.0,8860000.0,10044000.0,10699000.0,11781000.0,12810000.0,14182000.0,15115000.0,16370000.0,17157000.0,17945000.0,20923000.0,20542000.0,22006000.0,22484000.0,22785000.0,24408000.0,24666000.0,26059000.0,26710000.0,27641000.0,28874000.0,29353000.0,30890000.0,31883000.0,33753000.0,33380000.0,37843000.0,35925000.0,37060000.0,37824000.0,38198000.0,39434000.0,41046000.0,40856000.0,42519000.0,43460000.0,44868000.0,44988000.0,46970000.0,48975000.0,49978000.0,51451000.0,55120000.0,51255000.0,52947000.0,53305000.0,54785000.0,55320000.0,59561000.0,57335000.0,58492000.0,59236000.0,59697000.0,61642000.0,62091000.0,62831000.0,65607000.0,72767000.0,67336000.0,68879000.0,69465000.0,69132000.0,71783000.0,72313000.0,73378000.0,73543000.0,74380000.0,75224000.0,76812000.0,77010000.0,79289000.0,85558000.0,79835000.0,81161000.0,81174000.0,84890000.0,88113000.0,86074000.0,85794000.0,86970000.0,87827000.0,88039000.0,95990000.0,92917000.0,96439000.0,93487000.0,94448000.0,95545000.0,97458000.0,101856000.0,97665000.0,99940000.0]]criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Recursive/base/tukey.json000066400000000000000000000001141426140671200311450ustar00rootroot00000000000000[17650.693357726508,18316.23397119015,20091.008940426538,20756.549553890185]criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Recursive/change/000077500000000000000000000000001426140671200274305ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Recursive/change/estimates.json000066400000000000000000000006411426140671200323220ustar00rootroot00000000000000{"Mean":{"confidence_interval":{"confidence_level":0.95,"lower_bound":-0.022760338155223825,"upper_bound":-0.002236127647050845},"point_estimate":-0.012258823084615678,"standard_error":0.005198574246008731},"Median":{"confidence_interval":{"confidence_level":0.95,"lower_bound":-0.012665422281127547,"upper_bound":0.00006044555398099227},"point_estimate":-0.0056351510115123515,"standard_error":0.003336096822956214}}criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Recursive/new/000077500000000000000000000000001426140671200267745ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Recursive/new/benchmark.json000066400000000000000000000002721426140671200316220ustar00rootroot00000000000000{"group_id":"Fibonacci","function_id":"Recursive","value_str":null,"throughput":null,"full_id":"Fibonacci/Recursive","directory_name":"Fibonacci/Recursive","title":"Fibonacci/Recursive"}criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Recursive/new/estimates.json000066400000000000000000000017211426140671200316660ustar00rootroot00000000000000{"Mean":{"confidence_interval":{"confidence_level":0.95,"lower_bound":19189.436993840445,"upper_bound":19387.95822974072},"point_estimate":19284.21673781045,"standard_error":50.61075953480153},"Median":{"confidence_interval":{"confidence_level":0.95,"lower_bound":19065.189048239896,"upper_bound":19247.31663685152},"point_estimate":19158.025568181816,"standard_error":39.6634584646669},"MedianAbsDev":{"confidence_interval":{"confidence_level":0.95,"lower_bound":229.32252683175213,"upper_bound":391.0153776734743},"point_estimate":300.31279213429315,"standard_error":41.38531961197946},"Slope":{"confidence_interval":{"confidence_level":0.95,"lower_bound":19196.814266495523,"upper_bound":19436.155310861093},"point_estimate":19310.74450671244,"standard_error":61.247839919749566},"StdDev":{"confidence_interval":{"confidence_level":0.95,"lower_bound":380.9687888326143,"upper_bound":620.1314416037659},"point_estimate":508.78945388810087,"standard_error":61.16159006578181}}criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Recursive/new/raw.csv000066400000000000000000000067071426140671200303140ustar00rootroot00000000000000group,function,value,sample_time_nanos,iteration_count Fibonacci,Recursive,,1014000,52 Fibonacci,Recursive,,1920000,104 Fibonacci,Recursive,,3022000,156 Fibonacci,Recursive,,3931000,208 Fibonacci,Recursive,,4810000,260 Fibonacci,Recursive,,6087000,312 Fibonacci,Recursive,,6787000,364 Fibonacci,Recursive,,7899000,416 Fibonacci,Recursive,,8860000,468 Fibonacci,Recursive,,10044000,520 Fibonacci,Recursive,,10699000,572 Fibonacci,Recursive,,11781000,624 Fibonacci,Recursive,,12810000,676 Fibonacci,Recursive,,14182000,728 Fibonacci,Recursive,,15115000,780 Fibonacci,Recursive,,16370000,832 Fibonacci,Recursive,,17157000,884 Fibonacci,Recursive,,17945000,936 Fibonacci,Recursive,,20923000,988 Fibonacci,Recursive,,20542000,1040 Fibonacci,Recursive,,22006000,1092 Fibonacci,Recursive,,22484000,1144 Fibonacci,Recursive,,22785000,1196 Fibonacci,Recursive,,24408000,1248 Fibonacci,Recursive,,24666000,1300 Fibonacci,Recursive,,26059000,1352 Fibonacci,Recursive,,26710000,1404 Fibonacci,Recursive,,27641000,1456 Fibonacci,Recursive,,28874000,1508 Fibonacci,Recursive,,29353000,1560 Fibonacci,Recursive,,30890000,1612 Fibonacci,Recursive,,31883000,1664 Fibonacci,Recursive,,33753000,1716 Fibonacci,Recursive,,33380000,1768 Fibonacci,Recursive,,37843000,1820 Fibonacci,Recursive,,35925000,1872 Fibonacci,Recursive,,37060000,1924 Fibonacci,Recursive,,37824000,1976 Fibonacci,Recursive,,38198000,2028 Fibonacci,Recursive,,39434000,2080 Fibonacci,Recursive,,41046000,2132 Fibonacci,Recursive,,40856000,2184 Fibonacci,Recursive,,42519000,2236 Fibonacci,Recursive,,43460000,2288 Fibonacci,Recursive,,44868000,2340 Fibonacci,Recursive,,44988000,2392 Fibonacci,Recursive,,46970000,2444 Fibonacci,Recursive,,48975000,2496 Fibonacci,Recursive,,49978000,2548 Fibonacci,Recursive,,51451000,2600 Fibonacci,Recursive,,55120000,2652 Fibonacci,Recursive,,51255000,2704 Fibonacci,Recursive,,52947000,2756 Fibonacci,Recursive,,53305000,2808 Fibonacci,Recursive,,54785000,2860 Fibonacci,Recursive,,55320000,2912 Fibonacci,Recursive,,59561000,2964 Fibonacci,Recursive,,57335000,3016 Fibonacci,Recursive,,58492000,3068 Fibonacci,Recursive,,59236000,3120 Fibonacci,Recursive,,59697000,3172 Fibonacci,Recursive,,61642000,3224 Fibonacci,Recursive,,62091000,3276 Fibonacci,Recursive,,62831000,3328 Fibonacci,Recursive,,65607000,3380 Fibonacci,Recursive,,72767000,3432 Fibonacci,Recursive,,67336000,3484 Fibonacci,Recursive,,68879000,3536 Fibonacci,Recursive,,69465000,3588 Fibonacci,Recursive,,69132000,3640 Fibonacci,Recursive,,71783000,3692 Fibonacci,Recursive,,72313000,3744 Fibonacci,Recursive,,73378000,3796 Fibonacci,Recursive,,73543000,3848 Fibonacci,Recursive,,74380000,3900 Fibonacci,Recursive,,75224000,3952 Fibonacci,Recursive,,76812000,4004 Fibonacci,Recursive,,77010000,4056 Fibonacci,Recursive,,79289000,4108 Fibonacci,Recursive,,85558000,4160 Fibonacci,Recursive,,79835000,4212 Fibonacci,Recursive,,81161000,4264 Fibonacci,Recursive,,81174000,4316 Fibonacci,Recursive,,84890000,4368 Fibonacci,Recursive,,88113000,4420 Fibonacci,Recursive,,86074000,4472 Fibonacci,Recursive,,85794000,4524 Fibonacci,Recursive,,86970000,4576 Fibonacci,Recursive,,87827000,4628 Fibonacci,Recursive,,88039000,4680 Fibonacci,Recursive,,95990000,4732 Fibonacci,Recursive,,92917000,4784 Fibonacci,Recursive,,96439000,4836 Fibonacci,Recursive,,93487000,4888 Fibonacci,Recursive,,94448000,4940 Fibonacci,Recursive,,95545000,4992 Fibonacci,Recursive,,97458000,5044 Fibonacci,Recursive,,101856000,5096 Fibonacci,Recursive,,97665000,5148 Fibonacci,Recursive,,99940000,5200 criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Recursive/new/sample.json000066400000000000000000000033611426140671200311530ustar00rootroot00000000000000[[52.0,104.0,156.0,208.0,260.0,312.0,364.0,416.0,468.0,520.0,572.0,624.0,676.0,728.0,780.0,832.0,884.0,936.0,988.0,1040.0,1092.0,1144.0,1196.0,1248.0,1300.0,1352.0,1404.0,1456.0,1508.0,1560.0,1612.0,1664.0,1716.0,1768.0,1820.0,1872.0,1924.0,1976.0,2028.0,2080.0,2132.0,2184.0,2236.0,2288.0,2340.0,2392.0,2444.0,2496.0,2548.0,2600.0,2652.0,2704.0,2756.0,2808.0,2860.0,2912.0,2964.0,3016.0,3068.0,3120.0,3172.0,3224.0,3276.0,3328.0,3380.0,3432.0,3484.0,3536.0,3588.0,3640.0,3692.0,3744.0,3796.0,3848.0,3900.0,3952.0,4004.0,4056.0,4108.0,4160.0,4212.0,4264.0,4316.0,4368.0,4420.0,4472.0,4524.0,4576.0,4628.0,4680.0,4732.0,4784.0,4836.0,4888.0,4940.0,4992.0,5044.0,5096.0,5148.0,5200.0],[1014000.0,1920000.0,3022000.0,3931000.0,4810000.0,6087000.0,6787000.0,7899000.0,8860000.0,10044000.0,10699000.0,11781000.0,12810000.0,14182000.0,15115000.0,16370000.0,17157000.0,17945000.0,20923000.0,20542000.0,22006000.0,22484000.0,22785000.0,24408000.0,24666000.0,26059000.0,26710000.0,27641000.0,28874000.0,29353000.0,30890000.0,31883000.0,33753000.0,33380000.0,37843000.0,35925000.0,37060000.0,37824000.0,38198000.0,39434000.0,41046000.0,40856000.0,42519000.0,43460000.0,44868000.0,44988000.0,46970000.0,48975000.0,49978000.0,51451000.0,55120000.0,51255000.0,52947000.0,53305000.0,54785000.0,55320000.0,59561000.0,57335000.0,58492000.0,59236000.0,59697000.0,61642000.0,62091000.0,62831000.0,65607000.0,72767000.0,67336000.0,68879000.0,69465000.0,69132000.0,71783000.0,72313000.0,73378000.0,73543000.0,74380000.0,75224000.0,76812000.0,77010000.0,79289000.0,85558000.0,79835000.0,81161000.0,81174000.0,84890000.0,88113000.0,86074000.0,85794000.0,86970000.0,87827000.0,88039000.0,95990000.0,92917000.0,96439000.0,93487000.0,94448000.0,95545000.0,97458000.0,101856000.0,97665000.0,99940000.0]]criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Recursive/new/tukey.json000066400000000000000000000001141426140671200310240ustar00rootroot00000000000000[17650.693357726508,18316.23397119015,20091.008940426538,20756.549553890185]criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Recursive/report/000077500000000000000000000000001426140671200275165ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Recursive/report/MAD.svg000066400000000000000000000713271426140671200306520ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 0.002 0.004 0.006 0.008 0.01 0.012 220 240 260 280 300 320 340 360 380 400 Density (a.u.) Average time (ns) Fibonacci/Recursive: MAD Bootstrap distribution Bootstrap distribution Confidence interval Confidence interval Point estimate Point estimate criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Recursive/report/SD.svg000066400000000000000000000663631426140671200305630ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 0.001 0.002 0.003 0.004 0.005 0.006 0.007 400 450 500 550 600 Density (a.u.) Average time (ns) Fibonacci/Recursive: SD Bootstrap distribution Bootstrap distribution Confidence interval Confidence interval Point estimate Point estimate criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Recursive/report/both/000077500000000000000000000000001426140671200304525ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Recursive/report/both/pdf.svg000066400000000000000000001116561426140671200317560ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 0.0002 0.0004 0.0006 0.0008 0.001 0.0012 17 18 19 20 21 22 23 24 25 26 Density (a.u.) Average time (us) Fibonacci/Recursive Base PDF Base PDF Base Mean Base Mean New PDF New PDF New Mean New Mean criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Recursive/report/both/regression.svg000066400000000000000000000466221426140671200333650ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 20 40 60 80 100 120 0 1 2 3 4 5 6 Total sample time (ms) Iterations (x 103) Fibonacci/Recursive gnuplot_plot_1 gnuplot_plot_2 Base sample Base sample New sample New sample criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Recursive/report/change/000077500000000000000000000000001426140671200307435ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Recursive/report/change/mean.svg000066400000000000000000000715711426140671200324170ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 10 20 30 40 50 60 70 80 -2.5 -2 -1.5 -1 -0.5 0 Density (a.u.) Relative change (%) Fibonacci/Recursive: mean Bootstrap distribution Bootstrap distribution Confidence interval Confidence interval Point estimate Point estimate Noise threshold Noise threshold criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Recursive/report/change/median.svg000066400000000000000000000725761426140671200327420ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 20 40 60 80 100 120 140 -1.4 -1.2 -1 -0.8 -0.6 -0.4 -0.2 0 Density (a.u.) Relative change (%) Fibonacci/Recursive: median Bootstrap distribution Bootstrap distribution Confidence interval Confidence interval Point estimate Point estimate Noise threshold Noise threshold criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Recursive/report/change/t-test.svg000066400000000000000000000611761426140671200327170ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 0.05 0.1 0.15 0.2 0.25 0.3 0.35 0.4 -5 -4 -3 -2 -1 0 1 2 3 4 5 Density t score Fibonacci/Recursive: Welch t test t distribution t distribution t statistic t statistic criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Recursive/report/index.html000066400000000000000000000244451426140671200315240ustar00rootroot00000000000000 Fibonacci/Recursive - Criterion.rs

Fibonacci/Recursive

PDF of Slope Regression

Additional Statistics:

Lower bound Estimate Upper bound
Slope 19.197 us 19.311 us 19.436 us
0.9164758 0.9206081 0.9156058
Mean 19.189 us 19.284 us 19.388 us
Std. Dev. 380.97 ns 508.79 ns 620.13 ns
Median 19.065 us 19.158 us 19.247 us
MAD 229.32 ns 300.31 ns 391.02 ns

Additional Plots:

Understanding this report:

The plot on the left displays the average time per iteration for this benchmark. The shaded region shows the estimated probabilty of an iteration taking a certain amount of time, while the line shows the mean. Click on the plot for a larger view showing the outliers.

The plot on the right shows the linear regression calculated from the measurements. Each point represents a sample, though here it shows the total time for the sample rather than time per iteration. The line is the line of best fit for these measurements.

See the documentation for more details on the additional statistics.

Change Since Previous Benchmark

Additional Statistics:

Lower bound Estimate Upper bound
Change in time -2.2760% -1.2259% -0.2236% (p = 0.02 < 0.05)
Change within noise threshold.

Understanding this report:

The plot on the left shows the probability of the function taking a certain amount of time. The red curve represents the saved measurements from the last time this benchmark was run, while the blue curve shows the measurements from this run. The lines represent the mean time per iteration. Click on the plot for a larger view.

The plot on the right shows the two regressions. Again, the red line represents the previous measurement while the blue line shows the current measurement.

See the documentation for more details on the additional statistics.

criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Recursive/report/mean.svg000066400000000000000000000666601426140671200311750ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 1 2 3 4 5 6 7 8 19.2 19.25 19.3 19.35 19.4 Density (a.u.) Average time (us) Fibonacci/Recursive: mean Bootstrap distribution Bootstrap distribution Confidence interval Confidence interval Point estimate Point estimate criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Recursive/report/median.svg000066400000000000000000000676701426140671200315140ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 2 4 6 8 10 12 14 16 18 19.05 19.1 19.15 19.2 19.25 Density (a.u.) Average time (us) Fibonacci/Recursive: median Bootstrap distribution Bootstrap distribution Confidence interval Confidence interval Point estimate Point estimate criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Recursive/report/pdf.svg000066400000000000000000001145421426140671200310170ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 1 2 3 4 5 18 18.5 19 19.5 20 20.5 21 21.5 0 0.0002 0.0004 0.0006 0.0008 0.001 0.0012 Iterations (x 103) Density (a.u.) Average time (us) Fibonacci/Recursive PDF PDF Mean Mean "Clean" sample "Clean" sample Mild outliers Mild outliers Severe outliers Severe outliers gnuplot_plot_6 gnuplot_plot_7 gnuplot_plot_8 gnuplot_plot_9 criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Recursive/report/pdf_small.svg000066400000000000000000000526041426140671200322070ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 0.0002 0.0004 0.0006 0.0008 0.001 18 18.5 19 19.5 20 20.5 21 21.5 Density (a.u.) Average time (us) PDF Mean criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Recursive/report/regression.svg000066400000000000000000000732301426140671200324240ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 20 40 60 80 100 120 0 1 2 3 4 5 6 Total sample time (ms) Iterations (x 103) Fibonacci/Recursive Sample Sample Linear regression Linear regression Confidence interval Confidence interval criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Recursive/report/regression_small.svg000066400000000000000000000702051426140671200336130ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 20 40 60 80 100 120 0 1 2 3 4 5 6 Total sample time (ms) Iterations (x 103) Sample Linear regression Confidence interval criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Recursive/report/relative_pdf_small.svg000066400000000000000000001057761426140671200341130ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 0.0002 0.0004 0.0006 0.0008 0.001 0.0012 17 18 19 20 21 22 23 24 25 26 Density (a.u.) Average time (us) Base PDF Base Mean New PDF New Mean relative_regression_small.svg000066400000000000000000000447331426140671200354360ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Recursive/report Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 20 40 60 80 100 120 0 1 2 3 4 5 6 Total sample time (ms) Iterations (x 103) gnuplot_plot_1 gnuplot_plot_2 Base sample New sample criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/Recursive/report/slope.svg000066400000000000000000000670451426140671200313750ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 1 2 3 4 5 6 7 19.2 19.25 19.3 19.35 19.4 19.45 Density (a.u.) Average time (us) Fibonacci/Recursive: slope Bootstrap distribution Bootstrap distribution Confidence interval Confidence interval Point estimate Point estimate criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/report/000077500000000000000000000000001426140671200255475ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/report/index.html000066400000000000000000000072571426140671200275570ustar00rootroot00000000000000 Fibonacci Summary - Criterion.rs

Fibonacci

Violin Plot

Violin Plot

This chart shows the relationship between function/parameter and iteration time. The thickness of the shaded region indicates the probability that a measurement of the given function/parameter would take a particular length of time.

Fibonacci/Recursive

PDF of Slope Regression

Fibonacci/Iterative

PDF of Slope Regression
criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci/report/violin.svg000066400000000000000000001056641426140671200276040ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 Fibonacci/Iterative Fibonacci/Recursive 0 5 10 15 20 25 Input Average time (us) Fibonacci: Violin plot PDF PDF gnuplot_plot_2 criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/000077500000000000000000000000001426140671200243165ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/20/000077500000000000000000000000001426140671200245375ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/20/report/000077500000000000000000000000001426140671200260525ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/20/report/index.html000066400000000000000000000074051426140671200300550ustar00rootroot00000000000000 Fibonacci2/20 Summary - Criterion.rs

Fibonacci2/20

Violin Plot

Violin Plot

This chart shows the relationship between function/parameter and iteration time. The thickness of the shaded region indicates the probability that a measurement of the given function/parameter would take a particular length of time.

Fibonacci2/Recursive/20

PDF of Slope Regression

Fibonacci2/Iterative/20

PDF of Slope Regression
criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/20/report/violin.svg000066400000000000000000001060301426140671200300730ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 Fibonacci2/Iterative/20 Fibonacci2/Recursive/20 0 5 10 15 20 25 Input Average time (us) Fibonacci2/20: Violin plot PDF PDF gnuplot_plot_2 criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/21/000077500000000000000000000000001426140671200245405ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/21/report/000077500000000000000000000000001426140671200260535ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/21/report/index.html000066400000000000000000000074051426140671200300560ustar00rootroot00000000000000 Fibonacci2/21 Summary - Criterion.rs

Fibonacci2/21

Violin Plot

Violin Plot

This chart shows the relationship between function/parameter and iteration time. The thickness of the shaded region indicates the probability that a measurement of the given function/parameter would take a particular length of time.

Fibonacci2/Recursive/21

PDF of Slope Regression

Fibonacci2/Iterative/21

PDF of Slope Regression
criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/21/report/violin.svg000066400000000000000000001066511426140671200301050ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 Fibonacci2/Iterative/21 Fibonacci2/Recursive/21 0 10 20 30 40 50 60 Input Average time (us) Fibonacci2/21: Violin plot PDF PDF gnuplot_plot_2 criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/000077500000000000000000000000001426140671200262525ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/20/000077500000000000000000000000001426140671200264735ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/20/base/000077500000000000000000000000001426140671200274055ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/20/base/benchmark.json000066400000000000000000000003071426140671200322320ustar00rootroot00000000000000{"group_id":"Fibonacci2","function_id":"Iterative","value_str":"20","throughput":null,"full_id":"Fibonacci2/Iterative/20","directory_name":"Fibonacci2/Iterative/20","title":"Fibonacci2/Iterative/20"}criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/20/base/estimates.json000066400000000000000000000017351426140671200323040ustar00rootroot00000000000000{"Mean":{"confidence_interval":{"confidence_level":0.95,"lower_bound":4.6865919326828935,"upper_bound":4.826877407001549},"point_estimate":4.749468876197933,"standard_error":0.03613337128527054},"Median":{"confidence_interval":{"confidence_level":0.95,"lower_bound":4.638498164085483,"upper_bound":4.691865443093111},"point_estimate":4.669174721938917,"standard_error":0.012867922635856397},"MedianAbsDev":{"confidence_interval":{"confidence_level":0.95,"lower_bound":0.0854887608102471,"upper_bound":0.14427215827465295},"point_estimate":0.1135694875778934,"standard_error":0.01556002340961474},"Slope":{"confidence_interval":{"confidence_level":0.95,"lower_bound":4.647754164901754,"upper_bound":4.722266788312594},"point_estimate":4.682346166582889,"standard_error":0.019082129332533393},"StdDev":{"confidence_interval":{"confidence_level":0.95,"lower_bound":0.162066355229056,"upper_bound":0.5136271100210184},"point_estimate":0.36365648434229814,"standard_error":0.08846234196500716}}criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/20/base/raw.csv000066400000000000000000000101421426140671200307110ustar00rootroot00000000000000group,function,value,sample_time_nanos,iteration_count Fibonacci2,Iterative,20,1166000,208873 Fibonacci2,Iterative,20,1932000,417746 Fibonacci2,Iterative,20,2996000,626619 Fibonacci2,Iterative,20,5685000,835492 Fibonacci2,Iterative,20,7115000,1044365 Fibonacci2,Iterative,20,7593000,1253238 Fibonacci2,Iterative,20,7346000,1462111 Fibonacci2,Iterative,20,7901000,1670984 Fibonacci2,Iterative,20,8987000,1879857 Fibonacci2,Iterative,20,9826000,2088730 Fibonacci2,Iterative,20,10585000,2297603 Fibonacci2,Iterative,20,11963000,2506476 Fibonacci2,Iterative,20,12412000,2715349 Fibonacci2,Iterative,20,13163000,2924222 Fibonacci2,Iterative,20,14269000,3133095 Fibonacci2,Iterative,20,15350000,3341968 Fibonacci2,Iterative,20,16702000,3550841 Fibonacci2,Iterative,20,17840000,3759714 Fibonacci2,Iterative,20,18894000,3968587 Fibonacci2,Iterative,20,19320000,4177460 Fibonacci2,Iterative,20,20102000,4386333 Fibonacci2,Iterative,20,21588000,4595206 Fibonacci2,Iterative,20,23392000,4804079 Fibonacci2,Iterative,20,25036000,5012952 Fibonacci2,Iterative,20,25171000,5221825 Fibonacci2,Iterative,20,24842000,5430698 Fibonacci2,Iterative,20,25546000,5639571 Fibonacci2,Iterative,20,27354000,5848444 Fibonacci2,Iterative,20,27299000,6057317 Fibonacci2,Iterative,20,28462000,6266190 Fibonacci2,Iterative,20,29549000,6475063 Fibonacci2,Iterative,20,31418000,6683936 Fibonacci2,Iterative,20,32147000,6892809 Fibonacci2,Iterative,20,33277000,7101682 Fibonacci2,Iterative,20,34386000,7310555 Fibonacci2,Iterative,20,35007000,7519428 Fibonacci2,Iterative,20,34915000,7728301 Fibonacci2,Iterative,20,37223000,7937174 Fibonacci2,Iterative,20,38290000,8146047 Fibonacci2,Iterative,20,39056000,8354920 Fibonacci2,Iterative,20,39406000,8563793 Fibonacci2,Iterative,20,40285000,8772666 Fibonacci2,Iterative,20,42845000,8981539 Fibonacci2,Iterative,20,41972000,9190412 Fibonacci2,Iterative,20,43848000,9399285 Fibonacci2,Iterative,20,50742000,9608158 Fibonacci2,Iterative,20,45830000,9817031 Fibonacci2,Iterative,20,46980000,10025904 Fibonacci2,Iterative,20,47762000,10234777 Fibonacci2,Iterative,20,49210000,10443650 Fibonacci2,Iterative,20,51582000,10652523 Fibonacci2,Iterative,20,51838000,10861396 Fibonacci2,Iterative,20,50997000,11070269 Fibonacci2,Iterative,20,51762000,11279142 Fibonacci2,Iterative,20,52197000,11488015 Fibonacci2,Iterative,20,53447000,11696888 Fibonacci2,Iterative,20,57403000,11905761 Fibonacci2,Iterative,20,57539000,12114634 Fibonacci2,Iterative,20,58028000,12323507 Fibonacci2,Iterative,20,57343000,12532380 Fibonacci2,Iterative,20,58037000,12741253 Fibonacci2,Iterative,20,60101000,12950126 Fibonacci2,Iterative,20,61944000,13158999 Fibonacci2,Iterative,20,68086000,13367872 Fibonacci2,Iterative,20,62524000,13576745 Fibonacci2,Iterative,20,65417000,13785618 Fibonacci2,Iterative,20,65117000,13994491 Fibonacci2,Iterative,20,66069000,14203364 Fibonacci2,Iterative,20,65706000,14412237 Fibonacci2,Iterative,20,67553000,14621110 Fibonacci2,Iterative,20,69830000,14829983 Fibonacci2,Iterative,20,69831000,15038856 Fibonacci2,Iterative,20,71316000,15247729 Fibonacci2,Iterative,20,71747000,15456602 Fibonacci2,Iterative,20,72612000,15665475 Fibonacci2,Iterative,20,71733000,15874348 Fibonacci2,Iterative,20,72975000,16083221 Fibonacci2,Iterative,20,77074000,16292094 Fibonacci2,Iterative,20,79505000,16500967 Fibonacci2,Iterative,20,78109000,16709840 Fibonacci2,Iterative,20,77717000,16918713 Fibonacci2,Iterative,20,80616000,17127586 Fibonacci2,Iterative,20,84237000,17336459 Fibonacci2,Iterative,20,84682000,17545332 Fibonacci2,Iterative,20,81813000,17754205 Fibonacci2,Iterative,20,83985000,17963078 Fibonacci2,Iterative,20,83869000,18171951 Fibonacci2,Iterative,20,84556000,18380824 Fibonacci2,Iterative,20,85705000,18589697 Fibonacci2,Iterative,20,87984000,18798570 Fibonacci2,Iterative,20,95220000,19007443 Fibonacci2,Iterative,20,98072000,19216316 Fibonacci2,Iterative,20,89636000,19425189 Fibonacci2,Iterative,20,95019000,19634062 Fibonacci2,Iterative,20,90877000,19842935 Fibonacci2,Iterative,20,91169000,20051808 Fibonacci2,Iterative,20,94616000,20260681 Fibonacci2,Iterative,20,93363000,20469554 Fibonacci2,Iterative,20,94504000,20678427 Fibonacci2,Iterative,20,95764000,20887300 criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/20/base/sample.json000066400000000000000000000041401426140671200315600ustar00rootroot00000000000000[[208873.0,417746.0,626619.0,835492.0,1044365.0,1253238.0,1462111.0,1670984.0,1879857.0,2088730.0,2297603.0,2506476.0,2715349.0,2924222.0,3133095.0,3341968.0,3550841.0,3759714.0,3968587.0,4177460.0,4386333.0,4595206.0,4804079.0,5012952.0,5221825.0,5430698.0,5639571.0,5848444.0,6057317.0,6266190.0,6475063.0,6683936.0,6892809.0,7101682.0,7310555.0,7519428.0,7728301.0,7937174.0,8146047.0,8354920.0,8563793.0,8772666.0,8981539.0,9190412.0,9399285.0,9608158.0,9817031.0,10025904.0,10234777.0,10443650.0,10652523.0,10861396.0,11070269.0,11279142.0,11488015.0,11696888.0,11905761.0,12114634.0,12323507.0,12532380.0,12741253.0,12950126.0,13158999.0,13367872.0,13576745.0,13785618.0,13994491.0,14203364.0,14412237.0,14621110.0,14829983.0,15038856.0,15247729.0,15456602.0,15665475.0,15874348.0,16083221.0,16292094.0,16500967.0,16709840.0,16918713.0,17127586.0,17336459.0,17545332.0,17754205.0,17963078.0,18171951.0,18380824.0,18589697.0,18798570.0,19007443.0,19216316.0,19425189.0,19634062.0,19842935.0,20051808.0,20260681.0,20469554.0,20678427.0,20887300.0],[1166000.0,1932000.0,2996000.0,5685000.0,7115000.0,7593000.0,7346000.0,7901000.0,8987000.0,9826000.0,10585000.0,11963000.0,12412000.0,13163000.0,14269000.0,15350000.0,16702000.0,17840000.0,18894000.0,19320000.0,20102000.0,21588000.0,23392000.0,25036000.0,25171000.0,24842000.0,25546000.0,27354000.0,27299000.0,28462000.0,29549000.0,31418000.0,32147000.0,33277000.0,34386000.0,35007000.0,34915000.0,37223000.0,38290000.0,39056000.0,39406000.0,40285000.0,42845000.0,41972000.0,43848000.0,50742000.0,45830000.0,46980000.0,47762000.0,49210000.0,51582000.0,51838000.0,50997000.0,51762000.0,52197000.0,53447000.0,57403000.0,57539000.0,58028000.0,57343000.0,58037000.0,60101000.0,61944000.0,68086000.0,62524000.0,65417000.0,65117000.0,66069000.0,65706000.0,67553000.0,69830000.0,69831000.0,71316000.0,71747000.0,72612000.0,71733000.0,72975000.0,77074000.0,79505000.0,78109000.0,77717000.0,80616000.0,84237000.0,84682000.0,81813000.0,83985000.0,83869000.0,84556000.0,85705000.0,87984000.0,95220000.0,98072000.0,89636000.0,95019000.0,90877000.0,91169000.0,94616000.0,93363000.0,94504000.0,95764000.0]]criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/20/base/tukey.json000066400000000000000000000001121426140671200314330ustar00rootroot00000000000000[4.132309458681682,4.362581066556917,4.976638687557543,5.2069102954327775]criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/20/change/000077500000000000000000000000001426140671200277205ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/20/change/estimates.json000066400000000000000000000006331426140671200326130ustar00rootroot00000000000000{"Mean":{"confidence_interval":{"confidence_level":0.95,"lower_bound":-0.01663615203261628,"upper_bound":0.026814317862783554},"point_estimate":0.005073330296754053,"standard_error":0.0110369398141501},"Median":{"confidence_interval":{"confidence_level":0.95,"lower_bound":-0.008705322837157768,"upper_bound":0.006029587119676716},"point_estimate":-0.0004724534880239384,"standard_error":0.0035696763203163576}}criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/20/new/000077500000000000000000000000001426140671200272645ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/20/new/benchmark.json000066400000000000000000000003071426140671200321110ustar00rootroot00000000000000{"group_id":"Fibonacci2","function_id":"Iterative","value_str":"20","throughput":null,"full_id":"Fibonacci2/Iterative/20","directory_name":"Fibonacci2/Iterative/20","title":"Fibonacci2/Iterative/20"}criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/20/new/estimates.json000066400000000000000000000017351426140671200321630ustar00rootroot00000000000000{"Mean":{"confidence_interval":{"confidence_level":0.95,"lower_bound":4.6865919326828935,"upper_bound":4.826877407001549},"point_estimate":4.749468876197933,"standard_error":0.03613337128527054},"Median":{"confidence_interval":{"confidence_level":0.95,"lower_bound":4.638498164085483,"upper_bound":4.691865443093111},"point_estimate":4.669174721938917,"standard_error":0.012867922635856397},"MedianAbsDev":{"confidence_interval":{"confidence_level":0.95,"lower_bound":0.0854887608102471,"upper_bound":0.14427215827465295},"point_estimate":0.1135694875778934,"standard_error":0.01556002340961474},"Slope":{"confidence_interval":{"confidence_level":0.95,"lower_bound":4.647754164901754,"upper_bound":4.722266788312594},"point_estimate":4.682346166582889,"standard_error":0.019082129332533393},"StdDev":{"confidence_interval":{"confidence_level":0.95,"lower_bound":0.162066355229056,"upper_bound":0.5136271100210184},"point_estimate":0.36365648434229814,"standard_error":0.08846234196500716}}criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/20/new/raw.csv000066400000000000000000000101421426140671200305700ustar00rootroot00000000000000group,function,value,sample_time_nanos,iteration_count Fibonacci2,Iterative,20,1166000,208873 Fibonacci2,Iterative,20,1932000,417746 Fibonacci2,Iterative,20,2996000,626619 Fibonacci2,Iterative,20,5685000,835492 Fibonacci2,Iterative,20,7115000,1044365 Fibonacci2,Iterative,20,7593000,1253238 Fibonacci2,Iterative,20,7346000,1462111 Fibonacci2,Iterative,20,7901000,1670984 Fibonacci2,Iterative,20,8987000,1879857 Fibonacci2,Iterative,20,9826000,2088730 Fibonacci2,Iterative,20,10585000,2297603 Fibonacci2,Iterative,20,11963000,2506476 Fibonacci2,Iterative,20,12412000,2715349 Fibonacci2,Iterative,20,13163000,2924222 Fibonacci2,Iterative,20,14269000,3133095 Fibonacci2,Iterative,20,15350000,3341968 Fibonacci2,Iterative,20,16702000,3550841 Fibonacci2,Iterative,20,17840000,3759714 Fibonacci2,Iterative,20,18894000,3968587 Fibonacci2,Iterative,20,19320000,4177460 Fibonacci2,Iterative,20,20102000,4386333 Fibonacci2,Iterative,20,21588000,4595206 Fibonacci2,Iterative,20,23392000,4804079 Fibonacci2,Iterative,20,25036000,5012952 Fibonacci2,Iterative,20,25171000,5221825 Fibonacci2,Iterative,20,24842000,5430698 Fibonacci2,Iterative,20,25546000,5639571 Fibonacci2,Iterative,20,27354000,5848444 Fibonacci2,Iterative,20,27299000,6057317 Fibonacci2,Iterative,20,28462000,6266190 Fibonacci2,Iterative,20,29549000,6475063 Fibonacci2,Iterative,20,31418000,6683936 Fibonacci2,Iterative,20,32147000,6892809 Fibonacci2,Iterative,20,33277000,7101682 Fibonacci2,Iterative,20,34386000,7310555 Fibonacci2,Iterative,20,35007000,7519428 Fibonacci2,Iterative,20,34915000,7728301 Fibonacci2,Iterative,20,37223000,7937174 Fibonacci2,Iterative,20,38290000,8146047 Fibonacci2,Iterative,20,39056000,8354920 Fibonacci2,Iterative,20,39406000,8563793 Fibonacci2,Iterative,20,40285000,8772666 Fibonacci2,Iterative,20,42845000,8981539 Fibonacci2,Iterative,20,41972000,9190412 Fibonacci2,Iterative,20,43848000,9399285 Fibonacci2,Iterative,20,50742000,9608158 Fibonacci2,Iterative,20,45830000,9817031 Fibonacci2,Iterative,20,46980000,10025904 Fibonacci2,Iterative,20,47762000,10234777 Fibonacci2,Iterative,20,49210000,10443650 Fibonacci2,Iterative,20,51582000,10652523 Fibonacci2,Iterative,20,51838000,10861396 Fibonacci2,Iterative,20,50997000,11070269 Fibonacci2,Iterative,20,51762000,11279142 Fibonacci2,Iterative,20,52197000,11488015 Fibonacci2,Iterative,20,53447000,11696888 Fibonacci2,Iterative,20,57403000,11905761 Fibonacci2,Iterative,20,57539000,12114634 Fibonacci2,Iterative,20,58028000,12323507 Fibonacci2,Iterative,20,57343000,12532380 Fibonacci2,Iterative,20,58037000,12741253 Fibonacci2,Iterative,20,60101000,12950126 Fibonacci2,Iterative,20,61944000,13158999 Fibonacci2,Iterative,20,68086000,13367872 Fibonacci2,Iterative,20,62524000,13576745 Fibonacci2,Iterative,20,65417000,13785618 Fibonacci2,Iterative,20,65117000,13994491 Fibonacci2,Iterative,20,66069000,14203364 Fibonacci2,Iterative,20,65706000,14412237 Fibonacci2,Iterative,20,67553000,14621110 Fibonacci2,Iterative,20,69830000,14829983 Fibonacci2,Iterative,20,69831000,15038856 Fibonacci2,Iterative,20,71316000,15247729 Fibonacci2,Iterative,20,71747000,15456602 Fibonacci2,Iterative,20,72612000,15665475 Fibonacci2,Iterative,20,71733000,15874348 Fibonacci2,Iterative,20,72975000,16083221 Fibonacci2,Iterative,20,77074000,16292094 Fibonacci2,Iterative,20,79505000,16500967 Fibonacci2,Iterative,20,78109000,16709840 Fibonacci2,Iterative,20,77717000,16918713 Fibonacci2,Iterative,20,80616000,17127586 Fibonacci2,Iterative,20,84237000,17336459 Fibonacci2,Iterative,20,84682000,17545332 Fibonacci2,Iterative,20,81813000,17754205 Fibonacci2,Iterative,20,83985000,17963078 Fibonacci2,Iterative,20,83869000,18171951 Fibonacci2,Iterative,20,84556000,18380824 Fibonacci2,Iterative,20,85705000,18589697 Fibonacci2,Iterative,20,87984000,18798570 Fibonacci2,Iterative,20,95220000,19007443 Fibonacci2,Iterative,20,98072000,19216316 Fibonacci2,Iterative,20,89636000,19425189 Fibonacci2,Iterative,20,95019000,19634062 Fibonacci2,Iterative,20,90877000,19842935 Fibonacci2,Iterative,20,91169000,20051808 Fibonacci2,Iterative,20,94616000,20260681 Fibonacci2,Iterative,20,93363000,20469554 Fibonacci2,Iterative,20,94504000,20678427 Fibonacci2,Iterative,20,95764000,20887300 criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/20/new/sample.json000066400000000000000000000041401426140671200314370ustar00rootroot00000000000000[[208873.0,417746.0,626619.0,835492.0,1044365.0,1253238.0,1462111.0,1670984.0,1879857.0,2088730.0,2297603.0,2506476.0,2715349.0,2924222.0,3133095.0,3341968.0,3550841.0,3759714.0,3968587.0,4177460.0,4386333.0,4595206.0,4804079.0,5012952.0,5221825.0,5430698.0,5639571.0,5848444.0,6057317.0,6266190.0,6475063.0,6683936.0,6892809.0,7101682.0,7310555.0,7519428.0,7728301.0,7937174.0,8146047.0,8354920.0,8563793.0,8772666.0,8981539.0,9190412.0,9399285.0,9608158.0,9817031.0,10025904.0,10234777.0,10443650.0,10652523.0,10861396.0,11070269.0,11279142.0,11488015.0,11696888.0,11905761.0,12114634.0,12323507.0,12532380.0,12741253.0,12950126.0,13158999.0,13367872.0,13576745.0,13785618.0,13994491.0,14203364.0,14412237.0,14621110.0,14829983.0,15038856.0,15247729.0,15456602.0,15665475.0,15874348.0,16083221.0,16292094.0,16500967.0,16709840.0,16918713.0,17127586.0,17336459.0,17545332.0,17754205.0,17963078.0,18171951.0,18380824.0,18589697.0,18798570.0,19007443.0,19216316.0,19425189.0,19634062.0,19842935.0,20051808.0,20260681.0,20469554.0,20678427.0,20887300.0],[1166000.0,1932000.0,2996000.0,5685000.0,7115000.0,7593000.0,7346000.0,7901000.0,8987000.0,9826000.0,10585000.0,11963000.0,12412000.0,13163000.0,14269000.0,15350000.0,16702000.0,17840000.0,18894000.0,19320000.0,20102000.0,21588000.0,23392000.0,25036000.0,25171000.0,24842000.0,25546000.0,27354000.0,27299000.0,28462000.0,29549000.0,31418000.0,32147000.0,33277000.0,34386000.0,35007000.0,34915000.0,37223000.0,38290000.0,39056000.0,39406000.0,40285000.0,42845000.0,41972000.0,43848000.0,50742000.0,45830000.0,46980000.0,47762000.0,49210000.0,51582000.0,51838000.0,50997000.0,51762000.0,52197000.0,53447000.0,57403000.0,57539000.0,58028000.0,57343000.0,58037000.0,60101000.0,61944000.0,68086000.0,62524000.0,65417000.0,65117000.0,66069000.0,65706000.0,67553000.0,69830000.0,69831000.0,71316000.0,71747000.0,72612000.0,71733000.0,72975000.0,77074000.0,79505000.0,78109000.0,77717000.0,80616000.0,84237000.0,84682000.0,81813000.0,83985000.0,83869000.0,84556000.0,85705000.0,87984000.0,95220000.0,98072000.0,89636000.0,95019000.0,90877000.0,91169000.0,94616000.0,93363000.0,94504000.0,95764000.0]]criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/20/new/tukey.json000066400000000000000000000001121426140671200313120ustar00rootroot00000000000000[4.132309458681682,4.362581066556917,4.976638687557543,5.2069102954327775]criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/20/report/000077500000000000000000000000001426140671200300065ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/20/report/MAD.svg000066400000000000000000000677521426140671200311510ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 0.005 0.01 0.015 0.02 0.025 0.03 80 90 100 110 120 130 140 150 Density (a.u.) Average time (ps) Fibonacci2/Iterative/20: MAD Bootstrap distribution Bootstrap distribution Confidence interval Confidence interval Point estimate Point estimate criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/20/report/SD.svg000066400000000000000000000734351426140671200310510ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 0.0005 0.001 0.0015 0.002 0.0025 0.003 0.0035 0.004 0.0045 0.005 150 200 250 300 350 400 450 500 550 Density (a.u.) Average time (ps) Fibonacci2/Iterative/20: SD Bootstrap distribution Bootstrap distribution Confidence interval Confidence interval Point estimate Point estimate criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/20/report/both/000077500000000000000000000000001426140671200307425ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/20/report/both/pdf.svg000066400000000000000000001117241426140671200322420ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 0.5 1 1.5 2 2.5 3.5 4 4.5 5 5.5 6 6.5 7 7.5 8 8.5 Density (a.u.) Average time (ns) Fibonacci2/Iterative/20 Base PDF Base PDF Base Mean Base Mean New PDF New PDF New Mean New Mean regression.svg000066400000000000000000000542201426140671200335670ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/20/report/both Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 10 20 30 40 50 60 70 80 90 100 0 5 10 15 20 25 Total sample time (ms) Iterations (x 106) Fibonacci2/Iterative/20 gnuplot_plot_1 gnuplot_plot_2 Base sample Base sample New sample New sample criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/20/report/change/000077500000000000000000000000001426140671200312335ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/20/report/change/mean.svg000066400000000000000000000717231426140671200327060ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 5 10 15 20 25 30 35 40 -2 -1 0 1 2 3 Density (a.u.) Relative change (%) Fibonacci2/Iterative/20: mean Bootstrap distribution Bootstrap distribution Confidence interval Confidence interval Point estimate Point estimate Noise threshold Noise threshold criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/20/report/change/median.svg000066400000000000000000000741521426140671200332220ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 20 40 60 80 100 120 140 160 -1 -0.8 -0.6 -0.4 -0.2 0 0.2 0.4 0.6 Density (a.u.) Relative change (%) Fibonacci2/Iterative/20: median Bootstrap distribution Bootstrap distribution Confidence interval Confidence interval Point estimate Point estimate Noise threshold Noise threshold criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/20/report/change/t-test.svg000066400000000000000000000577061426140671200332130ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 0.05 0.1 0.15 0.2 0.25 0.3 0.35 0.4 -4 -3 -2 -1 0 1 2 3 4 Density t score Fibonacci2/Iterative/20: Welch t test t distribution t distribution t statistic t statistic criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/20/report/index.html000066400000000000000000000244611426140671200320120ustar00rootroot00000000000000 Fibonacci2/Iterative/20 - Criterion.rs

Fibonacci2/Iterative/20

PDF of Slope Regression

Additional Statistics:

Lower bound Estimate Upper bound
Slope 4.6478 ns 4.6823 ns 4.7223 ns
0.8710333 0.8773257 0.8689652
Mean 4.6866 ns 4.7495 ns 4.8269 ns
Std. Dev. 162.07 ps 363.66 ps 513.63 ps
Median 4.6385 ns 4.6692 ns 4.6919 ns
MAD 85.489 ps 113.57 ps 144.27 ps

Additional Plots:

Understanding this report:

The plot on the left displays the average time per iteration for this benchmark. The shaded region shows the estimated probabilty of an iteration taking a certain amount of time, while the line shows the mean. Click on the plot for a larger view showing the outliers.

The plot on the right shows the linear regression calculated from the measurements. Each point represents a sample, though here it shows the total time for the sample rather than time per iteration. The line is the line of best fit for these measurements.

See the documentation for more details on the additional statistics.

Change Since Previous Benchmark

Additional Statistics:

Lower bound Estimate Upper bound
Change in time -1.6636% +0.5073% +2.6814% (p = 0.66 > 0.05)
No change in performance detected.

Understanding this report:

The plot on the left shows the probability of the function taking a certain amount of time. The red curve represents the saved measurements from the last time this benchmark was run, while the blue curve shows the measurements from this run. The lines represent the mean time per iteration. Click on the plot for a larger view.

The plot on the right shows the two regressions. Again, the red line represents the previous measurement while the blue line shows the current measurement.

See the documentation for more details on the additional statistics.

criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/20/report/mean.svg000066400000000000000000000705111426140671200314530ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 2 4 6 8 10 12 4.68 4.7 4.72 4.74 4.76 4.78 4.8 4.82 4.84 Density (a.u.) Average time (ns) Fibonacci2/Iterative/20: mean Bootstrap distribution Bootstrap distribution Confidence interval Confidence interval Point estimate Point estimate criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/20/report/median.svg000066400000000000000000000663221426140671200317750ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 10 20 30 40 50 60 4.64 4.65 4.66 4.67 4.68 4.69 Density (a.u.) Average time (ns) Fibonacci2/Iterative/20: median Bootstrap distribution Bootstrap distribution Confidence interval Confidence interval Point estimate Point estimate criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/20/report/pdf.svg000066400000000000000000001117341426140671200313070ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 5 10 15 20 4.5 5 5.5 6 6.5 7 0 0.5 1 1.5 2 2.5 Iterations (x 106) Density (a.u.) Average time (ns) Fibonacci2/Iterative/20 PDF PDF Mean Mean "Clean" sample "Clean" sample Mild outliers Mild outliers Severe outliers Severe outliers gnuplot_plot_6 gnuplot_plot_7 gnuplot_plot_8 gnuplot_plot_9 criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/20/report/pdf_small.svg000066400000000000000000000503301426140671200324710ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 0.5 1 1.5 2 4.5 5 5.5 6 6.5 7 Density (a.u.) Average time (ns) PDF Mean criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/20/report/regression.svg000066400000000000000000001006171426140671200327140ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 10 20 30 40 50 60 70 80 90 100 0 5 10 15 20 25 Total sample time (ms) Iterations (x 106) Fibonacci2/Iterative/20 Sample Sample Linear regression Linear regression Confidence interval Confidence interval regression_small.svg000066400000000000000000000755631426140671200340400ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/20/report Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 10 20 30 40 50 60 70 80 90 100 0 5 10 15 20 25 Total sample time (ms) Iterations (x 106) Sample Linear regression Confidence interval relative_pdf_small.svg000066400000000000000000001057621426140671200343170ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/20/report Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 0.5 1 1.5 2 2.5 3.5 4 4.5 5 5.5 6 6.5 7 7.5 8 8.5 Density (a.u.) Average time (ns) Base PDF Base Mean New PDF New Mean relative_regression_small.svg000066400000000000000000000523221426140671200357170ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/20/report Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 10 20 30 40 50 60 70 80 90 100 0 5 10 15 20 25 Total sample time (ms) Iterations (x 106) gnuplot_plot_1 gnuplot_plot_2 Base sample New sample criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/20/report/slope.svg000066400000000000000000000705261426140671200316630ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 5 10 15 20 25 4.64 4.65 4.66 4.67 4.68 4.69 4.7 4.71 4.72 4.73 Density (a.u.) Average time (ns) Fibonacci2/Iterative/20: slope Bootstrap distribution Bootstrap distribution Confidence interval Confidence interval Point estimate Point estimate criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/21/000077500000000000000000000000001426140671200264745ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/21/base/000077500000000000000000000000001426140671200274065ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/21/base/benchmark.json000066400000000000000000000003071426140671200322330ustar00rootroot00000000000000{"group_id":"Fibonacci2","function_id":"Iterative","value_str":"21","throughput":null,"full_id":"Fibonacci2/Iterative/21","directory_name":"Fibonacci2/Iterative/21","title":"Fibonacci2/Iterative/21"}criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/21/base/estimates.json000066400000000000000000000017371426140671200323070ustar00rootroot00000000000000{"Mean":{"confidence_interval":{"confidence_level":0.95,"lower_bound":5.127090641334778,"upper_bound":5.23123406288508},"point_estimate":5.174134237245827,"standard_error":0.026625233442313354},"Median":{"confidence_interval":{"confidence_level":0.95,"lower_bound":5.083120068147297,"upper_bound":5.1395625941366525},"point_estimate":5.103754249805536,"standard_error":0.01309281554573903},"MedianAbsDev":{"confidence_interval":{"confidence_level":0.95,"lower_bound":0.07887020145424532,"upper_bound":0.1278232685069992},"point_estimate":0.10171122605107209,"standard_error":0.012656760168092964},"Slope":{"confidence_interval":{"confidence_level":0.95,"lower_bound":5.123323368994097,"upper_bound":5.244947819421853},"point_estimate":5.173405844184566,"standard_error":0.03146425257664682},"StdDev":{"confidence_interval":{"confidence_level":0.95,"lower_bound":0.1517623872609745,"upper_bound":0.3795325306839991},"point_estimate":0.26760216042211804,"standard_error":0.060472037630525534}}criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/21/base/raw.csv000066400000000000000000000101341426140671200307130ustar00rootroot00000000000000group,function,value,sample_time_nanos,iteration_count Fibonacci2,Iterative,21,1119000,190695 Fibonacci2,Iterative,21,1874000,381390 Fibonacci2,Iterative,21,2919000,572085 Fibonacci2,Iterative,21,4173000,762780 Fibonacci2,Iterative,21,4799000,953475 Fibonacci2,Iterative,21,5894000,1144170 Fibonacci2,Iterative,21,6944000,1334865 Fibonacci2,Iterative,21,7846000,1525560 Fibonacci2,Iterative,21,8519000,1716255 Fibonacci2,Iterative,21,9606000,1906950 Fibonacci2,Iterative,21,10843000,2097645 Fibonacci2,Iterative,21,11325000,2288340 Fibonacci2,Iterative,21,12936000,2479035 Fibonacci2,Iterative,21,13535000,2669730 Fibonacci2,Iterative,21,14388000,2860425 Fibonacci2,Iterative,21,15331000,3051120 Fibonacci2,Iterative,21,16066000,3241815 Fibonacci2,Iterative,21,17011000,3432510 Fibonacci2,Iterative,21,18354000,3623205 Fibonacci2,Iterative,21,19302000,3813900 Fibonacci2,Iterative,21,20117000,4004595 Fibonacci2,Iterative,21,21040000,4195290 Fibonacci2,Iterative,21,22117000,4385985 Fibonacci2,Iterative,21,23321000,4576680 Fibonacci2,Iterative,21,23471000,4767375 Fibonacci2,Iterative,21,26713000,4958070 Fibonacci2,Iterative,21,26548000,5148765 Fibonacci2,Iterative,21,27755000,5339460 Fibonacci2,Iterative,21,29985000,5530155 Fibonacci2,Iterative,21,29471000,5720850 Fibonacci2,Iterative,21,30248000,5911545 Fibonacci2,Iterative,21,30141000,6102240 Fibonacci2,Iterative,21,31757000,6292935 Fibonacci2,Iterative,21,33262000,6483630 Fibonacci2,Iterative,21,40260000,6674325 Fibonacci2,Iterative,21,35631000,6865020 Fibonacci2,Iterative,21,35490000,7055715 Fibonacci2,Iterative,21,37892000,7246410 Fibonacci2,Iterative,21,37583000,7437105 Fibonacci2,Iterative,21,39502000,7627800 Fibonacci2,Iterative,21,39762000,7818495 Fibonacci2,Iterative,21,39714000,8009190 Fibonacci2,Iterative,21,42862000,8199885 Fibonacci2,Iterative,21,43596000,8390580 Fibonacci2,Iterative,21,44104000,8581275 Fibonacci2,Iterative,21,44656000,8771970 Fibonacci2,Iterative,21,46213000,8962665 Fibonacci2,Iterative,21,46729000,9153360 Fibonacci2,Iterative,21,48263000,9344055 Fibonacci2,Iterative,21,56277000,9534750 Fibonacci2,Iterative,21,50566000,9725445 Fibonacci2,Iterative,21,52883000,9916140 Fibonacci2,Iterative,21,60071000,10106835 Fibonacci2,Iterative,21,52939000,10297530 Fibonacci2,Iterative,21,53094000,10488225 Fibonacci2,Iterative,21,55640000,10678920 Fibonacci2,Iterative,21,59387000,10869615 Fibonacci2,Iterative,21,55319000,11060310 Fibonacci2,Iterative,21,58941000,11251005 Fibonacci2,Iterative,21,59767000,11441700 Fibonacci2,Iterative,21,58409000,11632395 Fibonacci2,Iterative,21,61107000,11823090 Fibonacci2,Iterative,21,65693000,12013785 Fibonacci2,Iterative,21,61985000,12204480 Fibonacci2,Iterative,21,62882000,12395175 Fibonacci2,Iterative,21,62904000,12585870 Fibonacci2,Iterative,21,64141000,12776565 Fibonacci2,Iterative,21,65588000,12967260 Fibonacci2,Iterative,21,68292000,13157955 Fibonacci2,Iterative,21,67554000,13348650 Fibonacci2,Iterative,21,93788000,13539345 Fibonacci2,Iterative,21,75554000,13730040 Fibonacci2,Iterative,21,69606000,13920735 Fibonacci2,Iterative,21,71374000,14111430 Fibonacci2,Iterative,21,74600000,14302125 Fibonacci2,Iterative,21,72845000,14492820 Fibonacci2,Iterative,21,75053000,14683515 Fibonacci2,Iterative,21,75966000,14874210 Fibonacci2,Iterative,21,76851000,15064905 Fibonacci2,Iterative,21,77793000,15255600 Fibonacci2,Iterative,21,77913000,15446295 Fibonacci2,Iterative,21,80271000,15636990 Fibonacci2,Iterative,21,83609000,15827685 Fibonacci2,Iterative,21,80645000,16018380 Fibonacci2,Iterative,21,87953000,16209075 Fibonacci2,Iterative,21,83362000,16399770 Fibonacci2,Iterative,21,85058000,16590465 Fibonacci2,Iterative,21,85587000,16781160 Fibonacci2,Iterative,21,87348000,16971855 Fibonacci2,Iterative,21,87211000,17162550 Fibonacci2,Iterative,21,87387000,17353245 Fibonacci2,Iterative,21,89301000,17543940 Fibonacci2,Iterative,21,89970000,17734635 Fibonacci2,Iterative,21,92585000,17925330 Fibonacci2,Iterative,21,92048000,18116025 Fibonacci2,Iterative,21,95721000,18306720 Fibonacci2,Iterative,21,94016000,18497415 Fibonacci2,Iterative,21,95183000,18688110 Fibonacci2,Iterative,21,96695000,18878805 Fibonacci2,Iterative,21,97384000,19069500 criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/21/base/sample.json000066400000000000000000000041321426140671200315620ustar00rootroot00000000000000[[190695.0,381390.0,572085.0,762780.0,953475.0,1144170.0,1334865.0,1525560.0,1716255.0,1906950.0,2097645.0,2288340.0,2479035.0,2669730.0,2860425.0,3051120.0,3241815.0,3432510.0,3623205.0,3813900.0,4004595.0,4195290.0,4385985.0,4576680.0,4767375.0,4958070.0,5148765.0,5339460.0,5530155.0,5720850.0,5911545.0,6102240.0,6292935.0,6483630.0,6674325.0,6865020.0,7055715.0,7246410.0,7437105.0,7627800.0,7818495.0,8009190.0,8199885.0,8390580.0,8581275.0,8771970.0,8962665.0,9153360.0,9344055.0,9534750.0,9725445.0,9916140.0,10106835.0,10297530.0,10488225.0,10678920.0,10869615.0,11060310.0,11251005.0,11441700.0,11632395.0,11823090.0,12013785.0,12204480.0,12395175.0,12585870.0,12776565.0,12967260.0,13157955.0,13348650.0,13539345.0,13730040.0,13920735.0,14111430.0,14302125.0,14492820.0,14683515.0,14874210.0,15064905.0,15255600.0,15446295.0,15636990.0,15827685.0,16018380.0,16209075.0,16399770.0,16590465.0,16781160.0,16971855.0,17162550.0,17353245.0,17543940.0,17734635.0,17925330.0,18116025.0,18306720.0,18497415.0,18688110.0,18878805.0,19069500.0],[1119000.0,1874000.0,2919000.0,4173000.0,4799000.0,5894000.0,6944000.0,7846000.0,8519000.0,9606000.0,10843000.0,11325000.0,12936000.0,13535000.0,14388000.0,15331000.0,16066000.0,17011000.0,18354000.0,19302000.0,20117000.0,21040000.0,22117000.0,23321000.0,23471000.0,26713000.0,26548000.0,27755000.0,29985000.0,29471000.0,30248000.0,30141000.0,31757000.0,33262000.0,40260000.0,35631000.0,35490000.0,37892000.0,37583000.0,39502000.0,39762000.0,39714000.0,42862000.0,43596000.0,44104000.0,44656000.0,46213000.0,46729000.0,48263000.0,56277000.0,50566000.0,52883000.0,60071000.0,52939000.0,53094000.0,55640000.0,59387000.0,55319000.0,58941000.0,59767000.0,58409000.0,61107000.0,65693000.0,61985000.0,62882000.0,62904000.0,64141000.0,65588000.0,68292000.0,67554000.0,93788000.0,75554000.0,69606000.0,71374000.0,74600000.0,72845000.0,75053000.0,75966000.0,76851000.0,77793000.0,77913000.0,80271000.0,83609000.0,80645000.0,87953000.0,83362000.0,85058000.0,85587000.0,87348000.0,87211000.0,87387000.0,89301000.0,89970000.0,92585000.0,92048000.0,95721000.0,94016000.0,95183000.0,96695000.0,97384000.0]]criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/21/base/tukey.json000066400000000000000000000001111426140671200314330ustar00rootroot00000000000000[4.594302314156006,4.820086268701859,5.422176814157465,5.647960768703317]criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/21/change/000077500000000000000000000000001426140671200277215ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/21/change/estimates.json000066400000000000000000000006361426140671200326170ustar00rootroot00000000000000{"Mean":{"confidence_interval":{"confidence_level":0.95,"lower_bound":-0.040051731403618604,"upper_bound":0.001127823544505596},"point_estimate":-0.018478223494309165,"standard_error":0.010462179388781844},"Median":{"confidence_interval":{"confidence_level":0.95,"lower_bound":-0.012998842533619262,"upper_bound":0.0012414406139817924},"point_estimate":-0.006523417239515439,"standard_error":0.003526083602316944}}criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/21/new/000077500000000000000000000000001426140671200272655ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/21/new/benchmark.json000066400000000000000000000003071426140671200321120ustar00rootroot00000000000000{"group_id":"Fibonacci2","function_id":"Iterative","value_str":"21","throughput":null,"full_id":"Fibonacci2/Iterative/21","directory_name":"Fibonacci2/Iterative/21","title":"Fibonacci2/Iterative/21"}criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/21/new/estimates.json000066400000000000000000000017371426140671200321660ustar00rootroot00000000000000{"Mean":{"confidence_interval":{"confidence_level":0.95,"lower_bound":5.127090641334778,"upper_bound":5.23123406288508},"point_estimate":5.174134237245827,"standard_error":0.026625233442313354},"Median":{"confidence_interval":{"confidence_level":0.95,"lower_bound":5.083120068147297,"upper_bound":5.1395625941366525},"point_estimate":5.103754249805536,"standard_error":0.01309281554573903},"MedianAbsDev":{"confidence_interval":{"confidence_level":0.95,"lower_bound":0.07887020145424532,"upper_bound":0.1278232685069992},"point_estimate":0.10171122605107209,"standard_error":0.012656760168092964},"Slope":{"confidence_interval":{"confidence_level":0.95,"lower_bound":5.123323368994097,"upper_bound":5.244947819421853},"point_estimate":5.173405844184566,"standard_error":0.03146425257664682},"StdDev":{"confidence_interval":{"confidence_level":0.95,"lower_bound":0.1517623872609745,"upper_bound":0.3795325306839991},"point_estimate":0.26760216042211804,"standard_error":0.060472037630525534}}criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/21/new/raw.csv000066400000000000000000000101341426140671200305720ustar00rootroot00000000000000group,function,value,sample_time_nanos,iteration_count Fibonacci2,Iterative,21,1119000,190695 Fibonacci2,Iterative,21,1874000,381390 Fibonacci2,Iterative,21,2919000,572085 Fibonacci2,Iterative,21,4173000,762780 Fibonacci2,Iterative,21,4799000,953475 Fibonacci2,Iterative,21,5894000,1144170 Fibonacci2,Iterative,21,6944000,1334865 Fibonacci2,Iterative,21,7846000,1525560 Fibonacci2,Iterative,21,8519000,1716255 Fibonacci2,Iterative,21,9606000,1906950 Fibonacci2,Iterative,21,10843000,2097645 Fibonacci2,Iterative,21,11325000,2288340 Fibonacci2,Iterative,21,12936000,2479035 Fibonacci2,Iterative,21,13535000,2669730 Fibonacci2,Iterative,21,14388000,2860425 Fibonacci2,Iterative,21,15331000,3051120 Fibonacci2,Iterative,21,16066000,3241815 Fibonacci2,Iterative,21,17011000,3432510 Fibonacci2,Iterative,21,18354000,3623205 Fibonacci2,Iterative,21,19302000,3813900 Fibonacci2,Iterative,21,20117000,4004595 Fibonacci2,Iterative,21,21040000,4195290 Fibonacci2,Iterative,21,22117000,4385985 Fibonacci2,Iterative,21,23321000,4576680 Fibonacci2,Iterative,21,23471000,4767375 Fibonacci2,Iterative,21,26713000,4958070 Fibonacci2,Iterative,21,26548000,5148765 Fibonacci2,Iterative,21,27755000,5339460 Fibonacci2,Iterative,21,29985000,5530155 Fibonacci2,Iterative,21,29471000,5720850 Fibonacci2,Iterative,21,30248000,5911545 Fibonacci2,Iterative,21,30141000,6102240 Fibonacci2,Iterative,21,31757000,6292935 Fibonacci2,Iterative,21,33262000,6483630 Fibonacci2,Iterative,21,40260000,6674325 Fibonacci2,Iterative,21,35631000,6865020 Fibonacci2,Iterative,21,35490000,7055715 Fibonacci2,Iterative,21,37892000,7246410 Fibonacci2,Iterative,21,37583000,7437105 Fibonacci2,Iterative,21,39502000,7627800 Fibonacci2,Iterative,21,39762000,7818495 Fibonacci2,Iterative,21,39714000,8009190 Fibonacci2,Iterative,21,42862000,8199885 Fibonacci2,Iterative,21,43596000,8390580 Fibonacci2,Iterative,21,44104000,8581275 Fibonacci2,Iterative,21,44656000,8771970 Fibonacci2,Iterative,21,46213000,8962665 Fibonacci2,Iterative,21,46729000,9153360 Fibonacci2,Iterative,21,48263000,9344055 Fibonacci2,Iterative,21,56277000,9534750 Fibonacci2,Iterative,21,50566000,9725445 Fibonacci2,Iterative,21,52883000,9916140 Fibonacci2,Iterative,21,60071000,10106835 Fibonacci2,Iterative,21,52939000,10297530 Fibonacci2,Iterative,21,53094000,10488225 Fibonacci2,Iterative,21,55640000,10678920 Fibonacci2,Iterative,21,59387000,10869615 Fibonacci2,Iterative,21,55319000,11060310 Fibonacci2,Iterative,21,58941000,11251005 Fibonacci2,Iterative,21,59767000,11441700 Fibonacci2,Iterative,21,58409000,11632395 Fibonacci2,Iterative,21,61107000,11823090 Fibonacci2,Iterative,21,65693000,12013785 Fibonacci2,Iterative,21,61985000,12204480 Fibonacci2,Iterative,21,62882000,12395175 Fibonacci2,Iterative,21,62904000,12585870 Fibonacci2,Iterative,21,64141000,12776565 Fibonacci2,Iterative,21,65588000,12967260 Fibonacci2,Iterative,21,68292000,13157955 Fibonacci2,Iterative,21,67554000,13348650 Fibonacci2,Iterative,21,93788000,13539345 Fibonacci2,Iterative,21,75554000,13730040 Fibonacci2,Iterative,21,69606000,13920735 Fibonacci2,Iterative,21,71374000,14111430 Fibonacci2,Iterative,21,74600000,14302125 Fibonacci2,Iterative,21,72845000,14492820 Fibonacci2,Iterative,21,75053000,14683515 Fibonacci2,Iterative,21,75966000,14874210 Fibonacci2,Iterative,21,76851000,15064905 Fibonacci2,Iterative,21,77793000,15255600 Fibonacci2,Iterative,21,77913000,15446295 Fibonacci2,Iterative,21,80271000,15636990 Fibonacci2,Iterative,21,83609000,15827685 Fibonacci2,Iterative,21,80645000,16018380 Fibonacci2,Iterative,21,87953000,16209075 Fibonacci2,Iterative,21,83362000,16399770 Fibonacci2,Iterative,21,85058000,16590465 Fibonacci2,Iterative,21,85587000,16781160 Fibonacci2,Iterative,21,87348000,16971855 Fibonacci2,Iterative,21,87211000,17162550 Fibonacci2,Iterative,21,87387000,17353245 Fibonacci2,Iterative,21,89301000,17543940 Fibonacci2,Iterative,21,89970000,17734635 Fibonacci2,Iterative,21,92585000,17925330 Fibonacci2,Iterative,21,92048000,18116025 Fibonacci2,Iterative,21,95721000,18306720 Fibonacci2,Iterative,21,94016000,18497415 Fibonacci2,Iterative,21,95183000,18688110 Fibonacci2,Iterative,21,96695000,18878805 Fibonacci2,Iterative,21,97384000,19069500 criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/21/new/sample.json000066400000000000000000000041321426140671200314410ustar00rootroot00000000000000[[190695.0,381390.0,572085.0,762780.0,953475.0,1144170.0,1334865.0,1525560.0,1716255.0,1906950.0,2097645.0,2288340.0,2479035.0,2669730.0,2860425.0,3051120.0,3241815.0,3432510.0,3623205.0,3813900.0,4004595.0,4195290.0,4385985.0,4576680.0,4767375.0,4958070.0,5148765.0,5339460.0,5530155.0,5720850.0,5911545.0,6102240.0,6292935.0,6483630.0,6674325.0,6865020.0,7055715.0,7246410.0,7437105.0,7627800.0,7818495.0,8009190.0,8199885.0,8390580.0,8581275.0,8771970.0,8962665.0,9153360.0,9344055.0,9534750.0,9725445.0,9916140.0,10106835.0,10297530.0,10488225.0,10678920.0,10869615.0,11060310.0,11251005.0,11441700.0,11632395.0,11823090.0,12013785.0,12204480.0,12395175.0,12585870.0,12776565.0,12967260.0,13157955.0,13348650.0,13539345.0,13730040.0,13920735.0,14111430.0,14302125.0,14492820.0,14683515.0,14874210.0,15064905.0,15255600.0,15446295.0,15636990.0,15827685.0,16018380.0,16209075.0,16399770.0,16590465.0,16781160.0,16971855.0,17162550.0,17353245.0,17543940.0,17734635.0,17925330.0,18116025.0,18306720.0,18497415.0,18688110.0,18878805.0,19069500.0],[1119000.0,1874000.0,2919000.0,4173000.0,4799000.0,5894000.0,6944000.0,7846000.0,8519000.0,9606000.0,10843000.0,11325000.0,12936000.0,13535000.0,14388000.0,15331000.0,16066000.0,17011000.0,18354000.0,19302000.0,20117000.0,21040000.0,22117000.0,23321000.0,23471000.0,26713000.0,26548000.0,27755000.0,29985000.0,29471000.0,30248000.0,30141000.0,31757000.0,33262000.0,40260000.0,35631000.0,35490000.0,37892000.0,37583000.0,39502000.0,39762000.0,39714000.0,42862000.0,43596000.0,44104000.0,44656000.0,46213000.0,46729000.0,48263000.0,56277000.0,50566000.0,52883000.0,60071000.0,52939000.0,53094000.0,55640000.0,59387000.0,55319000.0,58941000.0,59767000.0,58409000.0,61107000.0,65693000.0,61985000.0,62882000.0,62904000.0,64141000.0,65588000.0,68292000.0,67554000.0,93788000.0,75554000.0,69606000.0,71374000.0,74600000.0,72845000.0,75053000.0,75966000.0,76851000.0,77793000.0,77913000.0,80271000.0,83609000.0,80645000.0,87953000.0,83362000.0,85058000.0,85587000.0,87348000.0,87211000.0,87387000.0,89301000.0,89970000.0,92585000.0,92048000.0,95721000.0,94016000.0,95183000.0,96695000.0,97384000.0]]criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/21/new/tukey.json000066400000000000000000000001111426140671200313120ustar00rootroot00000000000000[4.594302314156006,4.820086268701859,5.422176814157465,5.647960768703317]criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/21/report/000077500000000000000000000000001426140671200300075ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/21/report/MAD.svg000066400000000000000000000670511426140671200311420ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 0.005 0.01 0.015 0.02 0.025 0.03 0.035 80 90 100 110 120 130 Density (a.u.) Average time (ps) Fibonacci2/Iterative/21: MAD Bootstrap distribution Bootstrap distribution Confidence interval Confidence interval Point estimate Point estimate criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/21/report/SD.svg000066400000000000000000000670421426140671200310470ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 0.001 0.002 0.003 0.004 0.005 0.006 0.007 150 200 250 300 350 400 Density (a.u.) Average time (ps) Fibonacci2/Iterative/21: SD Bootstrap distribution Bootstrap distribution Confidence interval Confidence interval Point estimate Point estimate criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/21/report/both/000077500000000000000000000000001426140671200307435ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/21/report/both/pdf.svg000066400000000000000000001124321426140671200322400ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 0.5 1 1.5 2 2.5 4 4.5 5 5.5 6 6.5 7 7.5 8 8.5 9 9.5 Density (a.u.) Average time (ns) Fibonacci2/Iterative/21 Base PDF Base PDF Base Mean Base Mean New PDF New PDF New Mean New Mean regression.svg000066400000000000000000000561651426140671200336020ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/21/report/both Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 20 40 60 80 100 120 0 2 4 6 8 10 12 14 16 18 20 Total sample time (ms) Iterations (x 106) Fibonacci2/Iterative/21 gnuplot_plot_1 gnuplot_plot_2 Base sample Base sample New sample New sample criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/21/report/change/000077500000000000000000000000001426140671200312345ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/21/report/change/mean.svg000066400000000000000000000710121426140671200326760ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 5 10 15 20 25 30 35 40 -4 -3 -2 -1 0 Density (a.u.) Relative change (%) Fibonacci2/Iterative/21: mean Bootstrap distribution Bootstrap distribution Confidence interval Confidence interval Point estimate Point estimate Noise threshold Noise threshold criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/21/report/change/median.svg000066400000000000000000000724001426140671200332150ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 20 40 60 80 100 120 -1.4 -1.2 -1 -0.8 -0.6 -0.4 -0.2 0 0.2 Density (a.u.) Relative change (%) Fibonacci2/Iterative/21: median Bootstrap distribution Bootstrap distribution Confidence interval Confidence interval Point estimate Point estimate Noise threshold Noise threshold criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/21/report/change/t-test.svg000066400000000000000000000605061426140671200332040ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 0.05 0.1 0.15 0.2 0.25 0.3 0.35 0.4 -5 -4 -3 -2 -1 0 1 2 3 4 Density t score Fibonacci2/Iterative/21: Welch t test t distribution t distribution t statistic t statistic criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/21/report/index.html000066400000000000000000000244611426140671200320130ustar00rootroot00000000000000 Fibonacci2/Iterative/21 - Criterion.rs

Fibonacci2/Iterative/21

PDF of Slope Regression

Additional Statistics:

Lower bound Estimate Upper bound
Slope 5.1233 ns 5.1734 ns 5.2449 ns
0.7146992 0.7217511 0.7075062
Mean 5.1271 ns 5.1741 ns 5.2312 ns
Std. Dev. 151.76 ps 267.60 ps 379.53 ps
Median 5.0831 ns 5.1038 ns 5.1396 ns
MAD 78.870 ps 101.71 ps 127.82 ps

Additional Plots:

Understanding this report:

The plot on the left displays the average time per iteration for this benchmark. The shaded region shows the estimated probabilty of an iteration taking a certain amount of time, while the line shows the mean. Click on the plot for a larger view showing the outliers.

The plot on the right shows the linear regression calculated from the measurements. Each point represents a sample, though here it shows the total time for the sample rather than time per iteration. The line is the line of best fit for these measurements.

See the documentation for more details on the additional statistics.

Change Since Previous Benchmark

Additional Statistics:

Lower bound Estimate Upper bound
Change in time -4.0052% -1.8478% +0.1128% (p = 0.08 > 0.05)
No change in performance detected.

Understanding this report:

The plot on the left shows the probability of the function taking a certain amount of time. The red curve represents the saved measurements from the last time this benchmark was run, while the blue curve shows the measurements from this run. The lines represent the mean time per iteration. Click on the plot for a larger view.

The plot on the right shows the two regressions. Again, the red line represents the previous measurement while the blue line shows the current measurement.

See the documentation for more details on the additional statistics.

criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/21/report/mean.svg000066400000000000000000000703411426140671200314550ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 2 4 6 8 10 12 14 16 5.12 5.14 5.16 5.18 5.2 5.22 5.24 Density (a.u.) Average time (ns) Fibonacci2/Iterative/21: mean Bootstrap distribution Bootstrap distribution Confidence interval Confidence interval Point estimate Point estimate criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/21/report/median.svg000066400000000000000000000671361426140671200320020ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 10 20 30 40 50 60 5.08 5.09 5.1 5.11 5.12 5.13 5.14 Density (a.u.) Average time (ns) Fibonacci2/Iterative/21: median Bootstrap distribution Bootstrap distribution Confidence interval Confidence interval Point estimate Point estimate criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/21/report/pdf.svg000066400000000000000000001146021426140671200313050ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 2 4 6 8 10 12 14 16 18 5 5.5 6 6.5 7 0 0.5 1 1.5 2 2.5 Iterations (x 106) Density (a.u.) Average time (ns) Fibonacci2/Iterative/21 PDF PDF Mean Mean "Clean" sample "Clean" sample Mild outliers Mild outliers Severe outliers Severe outliers gnuplot_plot_6 gnuplot_plot_7 gnuplot_plot_8 gnuplot_plot_9 criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/21/report/pdf_small.svg000066400000000000000000000503241426140671200324750ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 0.5 1 1.5 2 2.5 5 5.5 6 6.5 7 Density (a.u.) Average time (ns) PDF Mean criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/21/report/regression.svg000066400000000000000000001025531426140671200327160ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 20 40 60 80 100 120 0 2 4 6 8 10 12 14 16 18 20 Total sample time (ms) Iterations (x 106) Fibonacci2/Iterative/21 Sample Sample Linear regression Linear regression Confidence interval Confidence interval regression_small.svg000066400000000000000000000775111426140671200340340ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/21/report Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 20 40 60 80 100 120 0 2 4 6 8 10 12 14 16 18 20 Total sample time (ms) Iterations (x 106) Sample Linear regression Confidence interval relative_pdf_small.svg000066400000000000000000001065401426140671200343130ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/21/report Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 0.5 1 1.5 2 2.5 4 4.5 5 5.5 6 6.5 7 7.5 8 8.5 9 9.5 Density (a.u.) Average time (ns) Base PDF Base Mean New PDF New Mean relative_regression_small.svg000066400000000000000000000542351426140671200357250ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/21/report Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 20 40 60 80 100 120 0 2 4 6 8 10 12 14 16 18 20 Total sample time (ms) Iterations (x 106) gnuplot_plot_1 gnuplot_plot_2 Base sample New sample criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/21/report/slope.svg000066400000000000000000000676121426140671200316660ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 2 4 6 8 10 12 14 5.12 5.14 5.16 5.18 5.2 5.22 5.24 Density (a.u.) Average time (ns) Fibonacci2/Iterative/21: slope Bootstrap distribution Bootstrap distribution Confidence interval Confidence interval Point estimate Point estimate criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/report/000077500000000000000000000000001426140671200275655ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/report/index.html000066400000000000000000000077341426140671200315750ustar00rootroot00000000000000 Fibonacci2/Iterative Summary - Criterion.rs

Fibonacci2/Iterative

Violin Plot

Violin Plot

This chart shows the relationship between function/parameter and iteration time. The thickness of the shaded region indicates the probability that a measurement of the given function/parameter would take a particular length of time.

Line Chart

Line Chart

This chart shows the mean measured time for each function as the input (or the size of the input) increases.

Fibonacci2/Iterative/20

PDF of Slope Regression

Fibonacci2/Iterative/21

PDF of Slope Regression
criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/report/lines.svg000066400000000000000000000434771426140671200314370ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 4.7 4.75 4.8 4.85 4.9 4.95 5 5.05 5.1 5.15 5.2 20 20.2 20.4 20.6 20.8 21 Average time (ns) Input Fibonacci2/Iterative: Comparison Iterative Iterative gnuplot_plot_2 criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Iterative/report/violin.svg000066400000000000000000001107501426140671200316120ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 Fibonacci2/Iterative/21 Fibonacci2/Iterative/20 4 4.5 5 5.5 6 6.5 7 7.5 Input Average time (ns) Fibonacci2/Iterative: Violin plot PDF PDF gnuplot_plot_2 criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/000077500000000000000000000000001426140671200262655ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/20/000077500000000000000000000000001426140671200265065ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/20/base/000077500000000000000000000000001426140671200274205ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/20/base/benchmark.json000066400000000000000000000003071426140671200322450ustar00rootroot00000000000000{"group_id":"Fibonacci2","function_id":"Recursive","value_str":"20","throughput":null,"full_id":"Fibonacci2/Recursive/20","directory_name":"Fibonacci2/Recursive/20","title":"Fibonacci2/Recursive/20"}criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/20/base/estimates.json000066400000000000000000000017231426140671200323140ustar00rootroot00000000000000{"Mean":{"confidence_interval":{"confidence_level":0.95,"lower_bound":19191.30467185672,"upper_bound":19418.699255912317},"point_estimate":19297.485357477723,"standard_error":58.13868311488075},"Median":{"confidence_interval":{"confidence_level":0.95,"lower_bound":19076.168929110107,"upper_bound":19237.458193979932},"point_estimate":19165.752851799363,"standard_error":43.95280551430538},"MedianAbsDev":{"confidence_interval":{"confidence_level":0.95,"lower_bound":259.84492591682596,"upper_bound":425.66108491436256},"point_estimate":347.5011247055599,"standard_error":43.236702358169914},"Slope":{"confidence_interval":{"confidence_level":0.95,"lower_bound":19212.759462607886,"upper_bound":19378.68474490057},"point_estimate":19293.004740198474,"standard_error":42.309293617915046},"StdDev":{"confidence_interval":{"confidence_level":0.95,"lower_bound":391.91697128365684,"upper_bound":767.19906497255},"point_estimate":588.1424963995645,"standard_error":96.23588402421795}}criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/20/base/raw.csv000066400000000000000000000073611426140671200307350ustar00rootroot00000000000000group,function,value,sample_time_nanos,iteration_count Fibonacci2,Recursive,20,1162000,52 Fibonacci2,Recursive,20,1978000,104 Fibonacci2,Recursive,20,2914000,156 Fibonacci2,Recursive,20,3926000,208 Fibonacci2,Recursive,20,4893000,260 Fibonacci2,Recursive,20,5911000,312 Fibonacci2,Recursive,20,6765000,364 Fibonacci2,Recursive,20,7817000,416 Fibonacci2,Recursive,20,8748000,468 Fibonacci2,Recursive,20,9732000,520 Fibonacci2,Recursive,20,11382000,572 Fibonacci2,Recursive,20,11810000,624 Fibonacci2,Recursive,20,12688000,676 Fibonacci2,Recursive,20,13856000,728 Fibonacci2,Recursive,20,14749000,780 Fibonacci2,Recursive,20,16070000,832 Fibonacci2,Recursive,20,16620000,884 Fibonacci2,Recursive,20,20304000,936 Fibonacci2,Recursive,20,20782000,988 Fibonacci2,Recursive,20,20229000,1040 Fibonacci2,Recursive,20,21106000,1092 Fibonacci2,Recursive,20,22153000,1144 Fibonacci2,Recursive,20,23338000,1196 Fibonacci2,Recursive,20,23587000,1248 Fibonacci2,Recursive,20,24727000,1300 Fibonacci2,Recursive,20,25627000,1352 Fibonacci2,Recursive,20,27314000,1404 Fibonacci2,Recursive,20,29169000,1456 Fibonacci2,Recursive,20,29599000,1508 Fibonacci2,Recursive,20,29872000,1560 Fibonacci2,Recursive,20,30757000,1612 Fibonacci2,Recursive,20,31213000,1664 Fibonacci2,Recursive,20,32824000,1716 Fibonacci2,Recursive,20,36599000,1768 Fibonacci2,Recursive,20,34086000,1820 Fibonacci2,Recursive,20,35946000,1872 Fibonacci2,Recursive,20,37171000,1924 Fibonacci2,Recursive,20,37419000,1976 Fibonacci2,Recursive,20,38222000,2028 Fibonacci2,Recursive,20,39270000,2080 Fibonacci2,Recursive,20,41058000,2132 Fibonacci2,Recursive,20,41172000,2184 Fibonacci2,Recursive,20,42525000,2236 Fibonacci2,Recursive,20,43309000,2288 Fibonacci2,Recursive,20,44134000,2340 Fibonacci2,Recursive,20,45216000,2392 Fibonacci2,Recursive,20,46360000,2444 Fibonacci2,Recursive,20,49922000,2496 Fibonacci2,Recursive,20,50534000,2548 Fibonacci2,Recursive,20,50576000,2600 Fibonacci2,Recursive,20,50590000,2652 Fibonacci2,Recursive,20,52051000,2704 Fibonacci2,Recursive,20,54905000,2756 Fibonacci2,Recursive,20,53975000,2808 Fibonacci2,Recursive,20,54288000,2860 Fibonacci2,Recursive,20,58698000,2912 Fibonacci2,Recursive,20,56166000,2964 Fibonacci2,Recursive,20,58179000,3016 Fibonacci2,Recursive,20,58386000,3068 Fibonacci2,Recursive,20,59617000,3120 Fibonacci2,Recursive,20,61554000,3172 Fibonacci2,Recursive,20,61599000,3224 Fibonacci2,Recursive,20,62198000,3276 Fibonacci2,Recursive,20,63505000,3328 Fibonacci2,Recursive,20,64447000,3380 Fibonacci2,Recursive,20,70436000,3432 Fibonacci2,Recursive,20,67152000,3484 Fibonacci2,Recursive,20,69598000,3536 Fibonacci2,Recursive,20,71134000,3588 Fibonacci2,Recursive,20,70352000,3640 Fibonacci2,Recursive,20,70875000,3692 Fibonacci2,Recursive,20,71805000,3744 Fibonacci2,Recursive,20,72407000,3796 Fibonacci2,Recursive,20,72254000,3848 Fibonacci2,Recursive,20,73660000,3900 Fibonacci2,Recursive,20,80229000,3952 Fibonacci2,Recursive,20,76739000,4004 Fibonacci2,Recursive,20,77749000,4056 Fibonacci2,Recursive,20,78957000,4108 Fibonacci2,Recursive,20,81856000,4160 Fibonacci2,Recursive,20,80908000,4212 Fibonacci2,Recursive,20,82306000,4264 Fibonacci2,Recursive,20,84393000,4316 Fibonacci2,Recursive,20,86382000,4368 Fibonacci2,Recursive,20,84331000,4420 Fibonacci2,Recursive,20,85710000,4472 Fibonacci2,Recursive,20,86742000,4524 Fibonacci2,Recursive,20,88563000,4576 Fibonacci2,Recursive,20,87852000,4628 Fibonacci2,Recursive,20,88611000,4680 Fibonacci2,Recursive,20,92949000,4732 Fibonacci2,Recursive,20,92032000,4784 Fibonacci2,Recursive,20,93425000,4836 Fibonacci2,Recursive,20,95356000,4888 Fibonacci2,Recursive,20,95975000,4940 Fibonacci2,Recursive,20,97090000,4992 Fibonacci2,Recursive,20,96199000,5044 Fibonacci2,Recursive,20,96010000,5096 Fibonacci2,Recursive,20,99792000,5148 Fibonacci2,Recursive,20,99439000,5200 criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/20/base/sample.json000066400000000000000000000033571426140671200316040ustar00rootroot00000000000000[[52.0,104.0,156.0,208.0,260.0,312.0,364.0,416.0,468.0,520.0,572.0,624.0,676.0,728.0,780.0,832.0,884.0,936.0,988.0,1040.0,1092.0,1144.0,1196.0,1248.0,1300.0,1352.0,1404.0,1456.0,1508.0,1560.0,1612.0,1664.0,1716.0,1768.0,1820.0,1872.0,1924.0,1976.0,2028.0,2080.0,2132.0,2184.0,2236.0,2288.0,2340.0,2392.0,2444.0,2496.0,2548.0,2600.0,2652.0,2704.0,2756.0,2808.0,2860.0,2912.0,2964.0,3016.0,3068.0,3120.0,3172.0,3224.0,3276.0,3328.0,3380.0,3432.0,3484.0,3536.0,3588.0,3640.0,3692.0,3744.0,3796.0,3848.0,3900.0,3952.0,4004.0,4056.0,4108.0,4160.0,4212.0,4264.0,4316.0,4368.0,4420.0,4472.0,4524.0,4576.0,4628.0,4680.0,4732.0,4784.0,4836.0,4888.0,4940.0,4992.0,5044.0,5096.0,5148.0,5200.0],[1162000.0,1978000.0,2914000.0,3926000.0,4893000.0,5911000.0,6765000.0,7817000.0,8748000.0,9732000.0,11382000.0,11810000.0,12688000.0,13856000.0,14749000.0,16070000.0,16620000.0,20304000.0,20782000.0,20229000.0,21106000.0,22153000.0,23338000.0,23587000.0,24727000.0,25627000.0,27314000.0,29169000.0,29599000.0,29872000.0,30757000.0,31213000.0,32824000.0,36599000.0,34086000.0,35946000.0,37171000.0,37419000.0,38222000.0,39270000.0,41058000.0,41172000.0,42525000.0,43309000.0,44134000.0,45216000.0,46360000.0,49922000.0,50534000.0,50576000.0,50590000.0,52051000.0,54905000.0,53975000.0,54288000.0,58698000.0,56166000.0,58179000.0,58386000.0,59617000.0,61554000.0,61599000.0,62198000.0,63505000.0,64447000.0,70436000.0,67152000.0,69598000.0,71134000.0,70352000.0,70875000.0,71805000.0,72407000.0,72254000.0,73660000.0,80229000.0,76739000.0,77749000.0,78957000.0,81856000.0,80908000.0,82306000.0,84393000.0,86382000.0,84331000.0,85710000.0,86742000.0,88563000.0,87852000.0,88611000.0,92949000.0,92032000.0,93425000.0,95356000.0,95975000.0,97090000.0,96199000.0,96010000.0,99792000.0,99439000.0]]criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/20/base/tukey.json000066400000000000000000000001151426140671200314510ustar00rootroot00000000000000[17473.130693319836,18208.225265688256,20168.477458670714,20903.572031039137]criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/20/change/000077500000000000000000000000001426140671200277335ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/20/change/estimates.json000066400000000000000000000006401426140671200326240ustar00rootroot00000000000000{"Mean":{"confidence_interval":{"confidence_level":0.95,"lower_bound":-0.009653253885065695,"upper_bound":0.0061681645598758135},"point_estimate":-0.001797794950961995,"standard_error":0.004052872795417921},"Median":{"confidence_interval":{"confidence_level":0.95,"lower_bound":-0.006595923278991724,"upper_bound":0.004313850100134742},"point_estimate":-0.00042394017927227523,"standard_error":0.002774008684617836}}criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/20/new/000077500000000000000000000000001426140671200272775ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/20/new/benchmark.json000066400000000000000000000003071426140671200321240ustar00rootroot00000000000000{"group_id":"Fibonacci2","function_id":"Recursive","value_str":"20","throughput":null,"full_id":"Fibonacci2/Recursive/20","directory_name":"Fibonacci2/Recursive/20","title":"Fibonacci2/Recursive/20"}criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/20/new/estimates.json000066400000000000000000000017231426140671200321730ustar00rootroot00000000000000{"Mean":{"confidence_interval":{"confidence_level":0.95,"lower_bound":19191.30467185672,"upper_bound":19418.699255912317},"point_estimate":19297.485357477723,"standard_error":58.13868311488075},"Median":{"confidence_interval":{"confidence_level":0.95,"lower_bound":19076.168929110107,"upper_bound":19237.458193979932},"point_estimate":19165.752851799363,"standard_error":43.95280551430538},"MedianAbsDev":{"confidence_interval":{"confidence_level":0.95,"lower_bound":259.84492591682596,"upper_bound":425.66108491436256},"point_estimate":347.5011247055599,"standard_error":43.236702358169914},"Slope":{"confidence_interval":{"confidence_level":0.95,"lower_bound":19212.759462607886,"upper_bound":19378.68474490057},"point_estimate":19293.004740198474,"standard_error":42.309293617915046},"StdDev":{"confidence_interval":{"confidence_level":0.95,"lower_bound":391.91697128365684,"upper_bound":767.19906497255},"point_estimate":588.1424963995645,"standard_error":96.23588402421795}}criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/20/new/raw.csv000066400000000000000000000073611426140671200306140ustar00rootroot00000000000000group,function,value,sample_time_nanos,iteration_count Fibonacci2,Recursive,20,1162000,52 Fibonacci2,Recursive,20,1978000,104 Fibonacci2,Recursive,20,2914000,156 Fibonacci2,Recursive,20,3926000,208 Fibonacci2,Recursive,20,4893000,260 Fibonacci2,Recursive,20,5911000,312 Fibonacci2,Recursive,20,6765000,364 Fibonacci2,Recursive,20,7817000,416 Fibonacci2,Recursive,20,8748000,468 Fibonacci2,Recursive,20,9732000,520 Fibonacci2,Recursive,20,11382000,572 Fibonacci2,Recursive,20,11810000,624 Fibonacci2,Recursive,20,12688000,676 Fibonacci2,Recursive,20,13856000,728 Fibonacci2,Recursive,20,14749000,780 Fibonacci2,Recursive,20,16070000,832 Fibonacci2,Recursive,20,16620000,884 Fibonacci2,Recursive,20,20304000,936 Fibonacci2,Recursive,20,20782000,988 Fibonacci2,Recursive,20,20229000,1040 Fibonacci2,Recursive,20,21106000,1092 Fibonacci2,Recursive,20,22153000,1144 Fibonacci2,Recursive,20,23338000,1196 Fibonacci2,Recursive,20,23587000,1248 Fibonacci2,Recursive,20,24727000,1300 Fibonacci2,Recursive,20,25627000,1352 Fibonacci2,Recursive,20,27314000,1404 Fibonacci2,Recursive,20,29169000,1456 Fibonacci2,Recursive,20,29599000,1508 Fibonacci2,Recursive,20,29872000,1560 Fibonacci2,Recursive,20,30757000,1612 Fibonacci2,Recursive,20,31213000,1664 Fibonacci2,Recursive,20,32824000,1716 Fibonacci2,Recursive,20,36599000,1768 Fibonacci2,Recursive,20,34086000,1820 Fibonacci2,Recursive,20,35946000,1872 Fibonacci2,Recursive,20,37171000,1924 Fibonacci2,Recursive,20,37419000,1976 Fibonacci2,Recursive,20,38222000,2028 Fibonacci2,Recursive,20,39270000,2080 Fibonacci2,Recursive,20,41058000,2132 Fibonacci2,Recursive,20,41172000,2184 Fibonacci2,Recursive,20,42525000,2236 Fibonacci2,Recursive,20,43309000,2288 Fibonacci2,Recursive,20,44134000,2340 Fibonacci2,Recursive,20,45216000,2392 Fibonacci2,Recursive,20,46360000,2444 Fibonacci2,Recursive,20,49922000,2496 Fibonacci2,Recursive,20,50534000,2548 Fibonacci2,Recursive,20,50576000,2600 Fibonacci2,Recursive,20,50590000,2652 Fibonacci2,Recursive,20,52051000,2704 Fibonacci2,Recursive,20,54905000,2756 Fibonacci2,Recursive,20,53975000,2808 Fibonacci2,Recursive,20,54288000,2860 Fibonacci2,Recursive,20,58698000,2912 Fibonacci2,Recursive,20,56166000,2964 Fibonacci2,Recursive,20,58179000,3016 Fibonacci2,Recursive,20,58386000,3068 Fibonacci2,Recursive,20,59617000,3120 Fibonacci2,Recursive,20,61554000,3172 Fibonacci2,Recursive,20,61599000,3224 Fibonacci2,Recursive,20,62198000,3276 Fibonacci2,Recursive,20,63505000,3328 Fibonacci2,Recursive,20,64447000,3380 Fibonacci2,Recursive,20,70436000,3432 Fibonacci2,Recursive,20,67152000,3484 Fibonacci2,Recursive,20,69598000,3536 Fibonacci2,Recursive,20,71134000,3588 Fibonacci2,Recursive,20,70352000,3640 Fibonacci2,Recursive,20,70875000,3692 Fibonacci2,Recursive,20,71805000,3744 Fibonacci2,Recursive,20,72407000,3796 Fibonacci2,Recursive,20,72254000,3848 Fibonacci2,Recursive,20,73660000,3900 Fibonacci2,Recursive,20,80229000,3952 Fibonacci2,Recursive,20,76739000,4004 Fibonacci2,Recursive,20,77749000,4056 Fibonacci2,Recursive,20,78957000,4108 Fibonacci2,Recursive,20,81856000,4160 Fibonacci2,Recursive,20,80908000,4212 Fibonacci2,Recursive,20,82306000,4264 Fibonacci2,Recursive,20,84393000,4316 Fibonacci2,Recursive,20,86382000,4368 Fibonacci2,Recursive,20,84331000,4420 Fibonacci2,Recursive,20,85710000,4472 Fibonacci2,Recursive,20,86742000,4524 Fibonacci2,Recursive,20,88563000,4576 Fibonacci2,Recursive,20,87852000,4628 Fibonacci2,Recursive,20,88611000,4680 Fibonacci2,Recursive,20,92949000,4732 Fibonacci2,Recursive,20,92032000,4784 Fibonacci2,Recursive,20,93425000,4836 Fibonacci2,Recursive,20,95356000,4888 Fibonacci2,Recursive,20,95975000,4940 Fibonacci2,Recursive,20,97090000,4992 Fibonacci2,Recursive,20,96199000,5044 Fibonacci2,Recursive,20,96010000,5096 Fibonacci2,Recursive,20,99792000,5148 Fibonacci2,Recursive,20,99439000,5200 criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/20/new/sample.json000066400000000000000000000033571426140671200314630ustar00rootroot00000000000000[[52.0,104.0,156.0,208.0,260.0,312.0,364.0,416.0,468.0,520.0,572.0,624.0,676.0,728.0,780.0,832.0,884.0,936.0,988.0,1040.0,1092.0,1144.0,1196.0,1248.0,1300.0,1352.0,1404.0,1456.0,1508.0,1560.0,1612.0,1664.0,1716.0,1768.0,1820.0,1872.0,1924.0,1976.0,2028.0,2080.0,2132.0,2184.0,2236.0,2288.0,2340.0,2392.0,2444.0,2496.0,2548.0,2600.0,2652.0,2704.0,2756.0,2808.0,2860.0,2912.0,2964.0,3016.0,3068.0,3120.0,3172.0,3224.0,3276.0,3328.0,3380.0,3432.0,3484.0,3536.0,3588.0,3640.0,3692.0,3744.0,3796.0,3848.0,3900.0,3952.0,4004.0,4056.0,4108.0,4160.0,4212.0,4264.0,4316.0,4368.0,4420.0,4472.0,4524.0,4576.0,4628.0,4680.0,4732.0,4784.0,4836.0,4888.0,4940.0,4992.0,5044.0,5096.0,5148.0,5200.0],[1162000.0,1978000.0,2914000.0,3926000.0,4893000.0,5911000.0,6765000.0,7817000.0,8748000.0,9732000.0,11382000.0,11810000.0,12688000.0,13856000.0,14749000.0,16070000.0,16620000.0,20304000.0,20782000.0,20229000.0,21106000.0,22153000.0,23338000.0,23587000.0,24727000.0,25627000.0,27314000.0,29169000.0,29599000.0,29872000.0,30757000.0,31213000.0,32824000.0,36599000.0,34086000.0,35946000.0,37171000.0,37419000.0,38222000.0,39270000.0,41058000.0,41172000.0,42525000.0,43309000.0,44134000.0,45216000.0,46360000.0,49922000.0,50534000.0,50576000.0,50590000.0,52051000.0,54905000.0,53975000.0,54288000.0,58698000.0,56166000.0,58179000.0,58386000.0,59617000.0,61554000.0,61599000.0,62198000.0,63505000.0,64447000.0,70436000.0,67152000.0,69598000.0,71134000.0,70352000.0,70875000.0,71805000.0,72407000.0,72254000.0,73660000.0,80229000.0,76739000.0,77749000.0,78957000.0,81856000.0,80908000.0,82306000.0,84393000.0,86382000.0,84331000.0,85710000.0,86742000.0,88563000.0,87852000.0,88611000.0,92949000.0,92032000.0,93425000.0,95356000.0,95975000.0,97090000.0,96199000.0,96010000.0,99792000.0,99439000.0]]criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/20/new/tukey.json000066400000000000000000000001151426140671200313300ustar00rootroot00000000000000[17473.130693319836,18208.225265688256,20168.477458670714,20903.572031039137]criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/20/report/000077500000000000000000000000001426140671200300215ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/20/report/MAD.svg000066400000000000000000000676561426140671200311670ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 0.001 0.002 0.003 0.004 0.005 0.006 0.007 0.008 0.009 0.01 250 300 350 400 Density (a.u.) Average time (ns) Fibonacci2/Recursive/20: MAD Bootstrap distribution Bootstrap distribution Confidence interval Confidence interval Point estimate Point estimate criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/20/report/SD.svg000066400000000000000000000727231426140671200310630ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 0.0005 0.001 0.0015 0.002 0.0025 0.003 0.0035 0.004 0.0045 400 450 500 550 600 650 700 750 800 Density (a.u.) Average time (ns) Fibonacci2/Recursive/20: SD Bootstrap distribution Bootstrap distribution Confidence interval Confidence interval Point estimate Point estimate criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/20/report/both/000077500000000000000000000000001426140671200307555ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/20/report/both/pdf.svg000066400000000000000000001102111426140671200322430ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 0.0002 0.0004 0.0006 0.0008 0.001 0.0012 17 18 19 20 21 22 23 24 Density (a.u.) Average time (us) Fibonacci2/Recursive/20 Base PDF Base PDF Base Mean Base Mean New PDF New PDF New Mean New Mean regression.svg000066400000000000000000000466261426140671200336150ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/20/report/both Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 20 40 60 80 100 120 0 1 2 3 4 5 6 Total sample time (ms) Iterations (x 103) Fibonacci2/Recursive/20 gnuplot_plot_1 gnuplot_plot_2 Base sample Base sample New sample New sample criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/20/report/change/000077500000000000000000000000001426140671200312465ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/20/report/change/mean.svg000066400000000000000000000753171426140671200327240ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 10 20 30 40 50 60 70 80 90 100 -1 -0.8 -0.6 -0.4 -0.2 0 0.2 0.4 0.6 Density (a.u.) Relative change (%) Fibonacci2/Recursive/20: mean Bootstrap distribution Bootstrap distribution Confidence interval Confidence interval Point estimate Point estimate Noise threshold Noise threshold criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/20/report/change/median.svg000066400000000000000000000725711426140671200332400ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 20 40 60 80 100 120 140 160 180 -0.6 -0.4 -0.2 0 0.2 0.4 Density (a.u.) Relative change (%) Fibonacci2/Recursive/20: median Bootstrap distribution Bootstrap distribution Confidence interval Confidence interval Point estimate Point estimate Noise threshold Noise threshold criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/20/report/change/t-test.svg000066400000000000000000000611631426140671200332160ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 0.05 0.1 0.15 0.2 0.25 0.3 0.35 0.4 -5 -4 -3 -2 -1 0 1 2 3 4 5 Density t score Fibonacci2/Recursive/20: Welch t test t distribution t distribution t statistic t statistic criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/20/report/index.html000066400000000000000000000244611426140671200320250ustar00rootroot00000000000000 Fibonacci2/Recursive/20 - Criterion.rs

Fibonacci2/Recursive/20

PDF of Slope Regression

Additional Statistics:

Lower bound Estimate Upper bound
Slope 19.213 us 19.293 us 19.379 us
0.9493450 0.9515822 0.9490325
Mean 19.191 us 19.297 us 19.419 us
Std. Dev. 391.92 ns 588.14 ns 767.20 ns
Median 19.076 us 19.166 us 19.237 us
MAD 259.84 ns 347.50 ns 425.66 ns

Additional Plots:

Understanding this report:

The plot on the left displays the average time per iteration for this benchmark. The shaded region shows the estimated probabilty of an iteration taking a certain amount of time, while the line shows the mean. Click on the plot for a larger view showing the outliers.

The plot on the right shows the linear regression calculated from the measurements. Each point represents a sample, though here it shows the total time for the sample rather than time per iteration. The line is the line of best fit for these measurements.

See the documentation for more details on the additional statistics.

Change Since Previous Benchmark

Additional Statistics:

Lower bound Estimate Upper bound
Change in time -0.9653% -0.1798% +0.6168% (p = 0.66 > 0.05)
No change in performance detected.

Understanding this report:

The plot on the left shows the probability of the function taking a certain amount of time. The red curve represents the saved measurements from the last time this benchmark was run, while the blue curve shows the measurements from this run. The lines represent the mean time per iteration. Click on the plot for a larger view.

The plot on the right shows the two regressions. Again, the red line represents the previous measurement while the blue line shows the current measurement.

See the documentation for more details on the additional statistics.

criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/20/report/mean.svg000066400000000000000000000661001426140671200314650ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 1 2 3 4 5 6 7 19.2 19.25 19.3 19.35 19.4 Density (a.u.) Average time (us) Fibonacci2/Recursive/20: mean Bootstrap distribution Bootstrap distribution Confidence interval Confidence interval Point estimate Point estimate criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/20/report/median.svg000066400000000000000000000741751426140671200320150ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 2 4 6 8 10 12 14 16 18 20 19.06 19.08 19.1 19.12 19.14 19.16 19.18 19.2 19.22 19.24 Density (a.u.) Average time (us) Fibonacci2/Recursive/20: median Bootstrap distribution Bootstrap distribution Confidence interval Confidence interval Point estimate Point estimate criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/20/report/pdf.svg000066400000000000000000001161071426140671200313210ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 1 2 3 4 5 18 19 20 21 22 23 0 0.0001 0.0002 0.0003 0.0004 0.0005 0.0006 0.0007 0.0008 0.0009 0.001 Iterations (x 103) Density (a.u.) Average time (us) Fibonacci2/Recursive/20 PDF PDF Mean Mean "Clean" sample "Clean" sample Mild outliers Mild outliers Severe outliers Severe outliers gnuplot_plot_6 gnuplot_plot_7 gnuplot_plot_8 gnuplot_plot_9 criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/20/report/pdf_small.svg000066400000000000000000000512311426140671200325050ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 0.0002 0.0004 0.0006 0.0008 0.001 18 19 20 21 22 23 Density (a.u.) Average time (us) PDF Mean criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/20/report/regression.svg000066400000000000000000000732341426140671200327330ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 20 40 60 80 100 120 0 1 2 3 4 5 6 Total sample time (ms) Iterations (x 103) Fibonacci2/Recursive/20 Sample Sample Linear regression Linear regression Confidence interval Confidence interval regression_small.svg000066400000000000000000000702071426140671200340410ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/20/report Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 20 40 60 80 100 120 0 1 2 3 4 5 6 Total sample time (ms) Iterations (x 103) Sample Linear regression Confidence interval relative_pdf_small.svg000066400000000000000000001043711426140671200343250ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/20/report Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 0.0002 0.0004 0.0006 0.0008 0.001 0.0012 17 18 19 20 21 22 23 24 Density (a.u.) Average time (us) Base PDF Base Mean New PDF New Mean relative_regression_small.svg000066400000000000000000000447331426140671200357410ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/20/report Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 20 40 60 80 100 120 0 1 2 3 4 5 6 Total sample time (ms) Iterations (x 103) gnuplot_plot_1 gnuplot_plot_2 Base sample New sample criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/20/report/slope.svg000066400000000000000000000675261426140671200317040ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 1 2 3 4 5 6 7 8 9 10 19.2 19.25 19.3 19.35 Density (a.u.) Average time (us) Fibonacci2/Recursive/20: slope Bootstrap distribution Bootstrap distribution Confidence interval Confidence interval Point estimate Point estimate criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/21/000077500000000000000000000000001426140671200265075ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/21/base/000077500000000000000000000000001426140671200274215ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/21/base/benchmark.json000066400000000000000000000003071426140671200322460ustar00rootroot00000000000000{"group_id":"Fibonacci2","function_id":"Recursive","value_str":"21","throughput":null,"full_id":"Fibonacci2/Recursive/21","directory_name":"Fibonacci2/Recursive/21","title":"Fibonacci2/Recursive/21"}criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/21/base/estimates.json000066400000000000000000000017161426140671200323170ustar00rootroot00000000000000{"Mean":{"confidence_interval":{"confidence_level":0.95,"lower_bound":31295.688652968274,"upper_bound":32421.385067277086},"point_estimate":31796.96962697949,"standard_error":290.2748124530989},"Median":{"confidence_interval":{"confidence_level":0.95,"lower_bound":30983.448945615983,"upper_bound":31189.33823529412},"point_estimate":31041.74558080808,"standard_error":45.93060023018637},"MedianAbsDev":{"confidence_interval":{"confidence_level":0.95,"lower_bound":328.0222464369467,"upper_bound":579.225667252921},"point_estimate":468.75290360979113,"standard_error":64.24474801184468},"Slope":{"confidence_interval":{"confidence_level":0.95,"lower_bound":31137.4868152166,"upper_bound":31970.197817775057},"point_estimate":31461.968006502142,"standard_error":221.25723948396336},"StdDev":{"confidence_interval":{"confidence_level":0.95,"lower_bound":1110.5830017484122,"upper_bound":4175.797867472276},"point_estimate":2924.544159350184,"standard_error":751.654443593445}}criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/21/base/raw.csv000066400000000000000000000073451426140671200307400ustar00rootroot00000000000000group,function,value,sample_time_nanos,iteration_count Fibonacci2,Recursive,21,1075000,32 Fibonacci2,Recursive,21,2092000,64 Fibonacci2,Recursive,21,2937000,96 Fibonacci2,Recursive,21,3937000,128 Fibonacci2,Recursive,21,4952000,160 Fibonacci2,Recursive,21,5989000,192 Fibonacci2,Recursive,21,6926000,224 Fibonacci2,Recursive,21,7942000,256 Fibonacci2,Recursive,21,8983000,288 Fibonacci2,Recursive,21,9755000,320 Fibonacci2,Recursive,21,10753000,352 Fibonacci2,Recursive,21,11799000,384 Fibonacci2,Recursive,21,13117000,416 Fibonacci2,Recursive,21,16448000,448 Fibonacci2,Recursive,21,16364000,480 Fibonacci2,Recursive,21,15817000,512 Fibonacci2,Recursive,21,17180000,544 Fibonacci2,Recursive,21,17882000,576 Fibonacci2,Recursive,21,18624000,608 Fibonacci2,Recursive,21,20185000,640 Fibonacci2,Recursive,21,21367000,672 Fibonacci2,Recursive,21,22793000,704 Fibonacci2,Recursive,21,23246000,736 Fibonacci2,Recursive,21,24230000,768 Fibonacci2,Recursive,21,24627000,800 Fibonacci2,Recursive,21,25282000,832 Fibonacci2,Recursive,21,26487000,864 Fibonacci2,Recursive,21,27824000,896 Fibonacci2,Recursive,21,42683000,928 Fibonacci2,Recursive,21,47815000,960 Fibonacci2,Recursive,21,37814000,992 Fibonacci2,Recursive,21,31921000,1024 Fibonacci2,Recursive,21,33115000,1056 Fibonacci2,Recursive,21,34625000,1088 Fibonacci2,Recursive,21,34517000,1120 Fibonacci2,Recursive,21,35318000,1152 Fibonacci2,Recursive,21,36559000,1184 Fibonacci2,Recursive,21,36715000,1216 Fibonacci2,Recursive,21,37996000,1248 Fibonacci2,Recursive,21,38945000,1280 Fibonacci2,Recursive,21,41231000,1312 Fibonacci2,Recursive,21,41283000,1344 Fibonacci2,Recursive,21,42723000,1376 Fibonacci2,Recursive,21,43121000,1408 Fibonacci2,Recursive,21,44239000,1440 Fibonacci2,Recursive,21,47479000,1472 Fibonacci2,Recursive,21,46656000,1504 Fibonacci2,Recursive,21,47161000,1536 Fibonacci2,Recursive,21,50672000,1568 Fibonacci2,Recursive,21,50286000,1600 Fibonacci2,Recursive,21,50901000,1632 Fibonacci2,Recursive,21,51620000,1664 Fibonacci2,Recursive,21,52566000,1696 Fibonacci2,Recursive,21,57475000,1728 Fibonacci2,Recursive,21,55025000,1760 Fibonacci2,Recursive,21,55232000,1792 Fibonacci2,Recursive,21,55667000,1824 Fibonacci2,Recursive,21,57007000,1856 Fibonacci2,Recursive,21,58414000,1888 Fibonacci2,Recursive,21,58679000,1920 Fibonacci2,Recursive,21,60632000,1952 Fibonacci2,Recursive,21,61105000,1984 Fibonacci2,Recursive,21,62552000,2016 Fibonacci2,Recursive,21,69170000,2048 Fibonacci2,Recursive,21,64482000,2080 Fibonacci2,Recursive,21,65553000,2112 Fibonacci2,Recursive,21,67106000,2144 Fibonacci2,Recursive,21,67238000,2176 Fibonacci2,Recursive,21,68880000,2208 Fibonacci2,Recursive,21,101076000,2240 Fibonacci2,Recursive,21,71144000,2272 Fibonacci2,Recursive,21,73274000,2304 Fibonacci2,Recursive,21,72046000,2336 Fibonacci2,Recursive,21,73076000,2368 Fibonacci2,Recursive,21,75352000,2400 Fibonacci2,Recursive,21,76079000,2432 Fibonacci2,Recursive,21,76232000,2464 Fibonacci2,Recursive,21,79205000,2496 Fibonacci2,Recursive,21,78208000,2528 Fibonacci2,Recursive,21,80412000,2560 Fibonacci2,Recursive,21,80335000,2592 Fibonacci2,Recursive,21,81590000,2624 Fibonacci2,Recursive,21,83238000,2656 Fibonacci2,Recursive,21,83941000,2688 Fibonacci2,Recursive,21,84246000,2720 Fibonacci2,Recursive,21,84984000,2752 Fibonacci2,Recursive,21,86480000,2784 Fibonacci2,Recursive,21,87827000,2816 Fibonacci2,Recursive,21,87432000,2848 Fibonacci2,Recursive,21,92365000,2880 Fibonacci2,Recursive,21,90317000,2912 Fibonacci2,Recursive,21,91822000,2944 Fibonacci2,Recursive,21,92474000,2976 Fibonacci2,Recursive,21,92970000,3008 Fibonacci2,Recursive,21,95665000,3040 Fibonacci2,Recursive,21,94136000,3072 Fibonacci2,Recursive,21,95592000,3104 Fibonacci2,Recursive,21,95926000,3136 Fibonacci2,Recursive,21,98319000,3168 Fibonacci2,Recursive,21,101068000,3200 criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/21/base/sample.json000066400000000000000000000033431426140671200316000ustar00rootroot00000000000000[[32.0,64.0,96.0,128.0,160.0,192.0,224.0,256.0,288.0,320.0,352.0,384.0,416.0,448.0,480.0,512.0,544.0,576.0,608.0,640.0,672.0,704.0,736.0,768.0,800.0,832.0,864.0,896.0,928.0,960.0,992.0,1024.0,1056.0,1088.0,1120.0,1152.0,1184.0,1216.0,1248.0,1280.0,1312.0,1344.0,1376.0,1408.0,1440.0,1472.0,1504.0,1536.0,1568.0,1600.0,1632.0,1664.0,1696.0,1728.0,1760.0,1792.0,1824.0,1856.0,1888.0,1920.0,1952.0,1984.0,2016.0,2048.0,2080.0,2112.0,2144.0,2176.0,2208.0,2240.0,2272.0,2304.0,2336.0,2368.0,2400.0,2432.0,2464.0,2496.0,2528.0,2560.0,2592.0,2624.0,2656.0,2688.0,2720.0,2752.0,2784.0,2816.0,2848.0,2880.0,2912.0,2944.0,2976.0,3008.0,3040.0,3072.0,3104.0,3136.0,3168.0,3200.0],[1075000.0,2092000.0,2937000.0,3937000.0,4952000.0,5989000.0,6926000.0,7942000.0,8983000.0,9755000.0,10753000.0,11799000.0,13117000.0,16448000.0,16364000.0,15817000.0,17180000.0,17882000.0,18624000.0,20185000.0,21367000.0,22793000.0,23246000.0,24230000.0,24627000.0,25282000.0,26487000.0,27824000.0,42683000.0,47815000.0,37814000.0,31921000.0,33115000.0,34625000.0,34517000.0,35318000.0,36559000.0,36715000.0,37996000.0,38945000.0,41231000.0,41283000.0,42723000.0,43121000.0,44239000.0,47479000.0,46656000.0,47161000.0,50672000.0,50286000.0,50901000.0,51620000.0,52566000.0,57475000.0,55025000.0,55232000.0,55667000.0,57007000.0,58414000.0,58679000.0,60632000.0,61105000.0,62552000.0,69170000.0,64482000.0,65553000.0,67106000.0,67238000.0,68880000.0,101076000.0,71144000.0,73274000.0,72046000.0,73076000.0,75352000.0,76079000.0,76232000.0,79205000.0,78208000.0,80412000.0,80335000.0,81590000.0,83238000.0,83941000.0,84246000.0,84984000.0,86480000.0,87827000.0,87432000.0,92365000.0,90317000.0,91822000.0,92474000.0,92970000.0,95665000.0,94136000.0,95592000.0,95926000.0,98319000.0,101068000.0]]criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/21/base/tukey.json000066400000000000000000000001101426140671200314450ustar00rootroot00000000000000[28938.891129032258,29876.33820564516,32376.1970766129,33313.6441532258]criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/21/change/000077500000000000000000000000001426140671200277345ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/21/change/estimates.json000066400000000000000000000006351426140671200326310ustar00rootroot00000000000000{"Mean":{"confidence_interval":{"confidence_level":0.95,"lower_bound":-0.005442772901443708,"upper_bound":0.03164478789809129},"point_estimate":0.012304783996208712,"standard_error":0.009672771001248706},"Median":{"confidence_interval":{"confidence_level":0.95,"lower_bound":-0.00741519970103639,"upper_bound":0.0035148967917493223},"point_estimate":-0.0025463516356105664,"standard_error":0.0028092342774936347}}criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/21/new/000077500000000000000000000000001426140671200273005ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/21/new/benchmark.json000066400000000000000000000003071426140671200321250ustar00rootroot00000000000000{"group_id":"Fibonacci2","function_id":"Recursive","value_str":"21","throughput":null,"full_id":"Fibonacci2/Recursive/21","directory_name":"Fibonacci2/Recursive/21","title":"Fibonacci2/Recursive/21"}criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/21/new/estimates.json000066400000000000000000000017161426140671200321760ustar00rootroot00000000000000{"Mean":{"confidence_interval":{"confidence_level":0.95,"lower_bound":31295.688652968274,"upper_bound":32421.385067277086},"point_estimate":31796.96962697949,"standard_error":290.2748124530989},"Median":{"confidence_interval":{"confidence_level":0.95,"lower_bound":30983.448945615983,"upper_bound":31189.33823529412},"point_estimate":31041.74558080808,"standard_error":45.93060023018637},"MedianAbsDev":{"confidence_interval":{"confidence_level":0.95,"lower_bound":328.0222464369467,"upper_bound":579.225667252921},"point_estimate":468.75290360979113,"standard_error":64.24474801184468},"Slope":{"confidence_interval":{"confidence_level":0.95,"lower_bound":31137.4868152166,"upper_bound":31970.197817775057},"point_estimate":31461.968006502142,"standard_error":221.25723948396336},"StdDev":{"confidence_interval":{"confidence_level":0.95,"lower_bound":1110.5830017484122,"upper_bound":4175.797867472276},"point_estimate":2924.544159350184,"standard_error":751.654443593445}}criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/21/new/raw.csv000066400000000000000000000073451426140671200306170ustar00rootroot00000000000000group,function,value,sample_time_nanos,iteration_count Fibonacci2,Recursive,21,1075000,32 Fibonacci2,Recursive,21,2092000,64 Fibonacci2,Recursive,21,2937000,96 Fibonacci2,Recursive,21,3937000,128 Fibonacci2,Recursive,21,4952000,160 Fibonacci2,Recursive,21,5989000,192 Fibonacci2,Recursive,21,6926000,224 Fibonacci2,Recursive,21,7942000,256 Fibonacci2,Recursive,21,8983000,288 Fibonacci2,Recursive,21,9755000,320 Fibonacci2,Recursive,21,10753000,352 Fibonacci2,Recursive,21,11799000,384 Fibonacci2,Recursive,21,13117000,416 Fibonacci2,Recursive,21,16448000,448 Fibonacci2,Recursive,21,16364000,480 Fibonacci2,Recursive,21,15817000,512 Fibonacci2,Recursive,21,17180000,544 Fibonacci2,Recursive,21,17882000,576 Fibonacci2,Recursive,21,18624000,608 Fibonacci2,Recursive,21,20185000,640 Fibonacci2,Recursive,21,21367000,672 Fibonacci2,Recursive,21,22793000,704 Fibonacci2,Recursive,21,23246000,736 Fibonacci2,Recursive,21,24230000,768 Fibonacci2,Recursive,21,24627000,800 Fibonacci2,Recursive,21,25282000,832 Fibonacci2,Recursive,21,26487000,864 Fibonacci2,Recursive,21,27824000,896 Fibonacci2,Recursive,21,42683000,928 Fibonacci2,Recursive,21,47815000,960 Fibonacci2,Recursive,21,37814000,992 Fibonacci2,Recursive,21,31921000,1024 Fibonacci2,Recursive,21,33115000,1056 Fibonacci2,Recursive,21,34625000,1088 Fibonacci2,Recursive,21,34517000,1120 Fibonacci2,Recursive,21,35318000,1152 Fibonacci2,Recursive,21,36559000,1184 Fibonacci2,Recursive,21,36715000,1216 Fibonacci2,Recursive,21,37996000,1248 Fibonacci2,Recursive,21,38945000,1280 Fibonacci2,Recursive,21,41231000,1312 Fibonacci2,Recursive,21,41283000,1344 Fibonacci2,Recursive,21,42723000,1376 Fibonacci2,Recursive,21,43121000,1408 Fibonacci2,Recursive,21,44239000,1440 Fibonacci2,Recursive,21,47479000,1472 Fibonacci2,Recursive,21,46656000,1504 Fibonacci2,Recursive,21,47161000,1536 Fibonacci2,Recursive,21,50672000,1568 Fibonacci2,Recursive,21,50286000,1600 Fibonacci2,Recursive,21,50901000,1632 Fibonacci2,Recursive,21,51620000,1664 Fibonacci2,Recursive,21,52566000,1696 Fibonacci2,Recursive,21,57475000,1728 Fibonacci2,Recursive,21,55025000,1760 Fibonacci2,Recursive,21,55232000,1792 Fibonacci2,Recursive,21,55667000,1824 Fibonacci2,Recursive,21,57007000,1856 Fibonacci2,Recursive,21,58414000,1888 Fibonacci2,Recursive,21,58679000,1920 Fibonacci2,Recursive,21,60632000,1952 Fibonacci2,Recursive,21,61105000,1984 Fibonacci2,Recursive,21,62552000,2016 Fibonacci2,Recursive,21,69170000,2048 Fibonacci2,Recursive,21,64482000,2080 Fibonacci2,Recursive,21,65553000,2112 Fibonacci2,Recursive,21,67106000,2144 Fibonacci2,Recursive,21,67238000,2176 Fibonacci2,Recursive,21,68880000,2208 Fibonacci2,Recursive,21,101076000,2240 Fibonacci2,Recursive,21,71144000,2272 Fibonacci2,Recursive,21,73274000,2304 Fibonacci2,Recursive,21,72046000,2336 Fibonacci2,Recursive,21,73076000,2368 Fibonacci2,Recursive,21,75352000,2400 Fibonacci2,Recursive,21,76079000,2432 Fibonacci2,Recursive,21,76232000,2464 Fibonacci2,Recursive,21,79205000,2496 Fibonacci2,Recursive,21,78208000,2528 Fibonacci2,Recursive,21,80412000,2560 Fibonacci2,Recursive,21,80335000,2592 Fibonacci2,Recursive,21,81590000,2624 Fibonacci2,Recursive,21,83238000,2656 Fibonacci2,Recursive,21,83941000,2688 Fibonacci2,Recursive,21,84246000,2720 Fibonacci2,Recursive,21,84984000,2752 Fibonacci2,Recursive,21,86480000,2784 Fibonacci2,Recursive,21,87827000,2816 Fibonacci2,Recursive,21,87432000,2848 Fibonacci2,Recursive,21,92365000,2880 Fibonacci2,Recursive,21,90317000,2912 Fibonacci2,Recursive,21,91822000,2944 Fibonacci2,Recursive,21,92474000,2976 Fibonacci2,Recursive,21,92970000,3008 Fibonacci2,Recursive,21,95665000,3040 Fibonacci2,Recursive,21,94136000,3072 Fibonacci2,Recursive,21,95592000,3104 Fibonacci2,Recursive,21,95926000,3136 Fibonacci2,Recursive,21,98319000,3168 Fibonacci2,Recursive,21,101068000,3200 criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/21/new/sample.json000066400000000000000000000033431426140671200314570ustar00rootroot00000000000000[[32.0,64.0,96.0,128.0,160.0,192.0,224.0,256.0,288.0,320.0,352.0,384.0,416.0,448.0,480.0,512.0,544.0,576.0,608.0,640.0,672.0,704.0,736.0,768.0,800.0,832.0,864.0,896.0,928.0,960.0,992.0,1024.0,1056.0,1088.0,1120.0,1152.0,1184.0,1216.0,1248.0,1280.0,1312.0,1344.0,1376.0,1408.0,1440.0,1472.0,1504.0,1536.0,1568.0,1600.0,1632.0,1664.0,1696.0,1728.0,1760.0,1792.0,1824.0,1856.0,1888.0,1920.0,1952.0,1984.0,2016.0,2048.0,2080.0,2112.0,2144.0,2176.0,2208.0,2240.0,2272.0,2304.0,2336.0,2368.0,2400.0,2432.0,2464.0,2496.0,2528.0,2560.0,2592.0,2624.0,2656.0,2688.0,2720.0,2752.0,2784.0,2816.0,2848.0,2880.0,2912.0,2944.0,2976.0,3008.0,3040.0,3072.0,3104.0,3136.0,3168.0,3200.0],[1075000.0,2092000.0,2937000.0,3937000.0,4952000.0,5989000.0,6926000.0,7942000.0,8983000.0,9755000.0,10753000.0,11799000.0,13117000.0,16448000.0,16364000.0,15817000.0,17180000.0,17882000.0,18624000.0,20185000.0,21367000.0,22793000.0,23246000.0,24230000.0,24627000.0,25282000.0,26487000.0,27824000.0,42683000.0,47815000.0,37814000.0,31921000.0,33115000.0,34625000.0,34517000.0,35318000.0,36559000.0,36715000.0,37996000.0,38945000.0,41231000.0,41283000.0,42723000.0,43121000.0,44239000.0,47479000.0,46656000.0,47161000.0,50672000.0,50286000.0,50901000.0,51620000.0,52566000.0,57475000.0,55025000.0,55232000.0,55667000.0,57007000.0,58414000.0,58679000.0,60632000.0,61105000.0,62552000.0,69170000.0,64482000.0,65553000.0,67106000.0,67238000.0,68880000.0,101076000.0,71144000.0,73274000.0,72046000.0,73076000.0,75352000.0,76079000.0,76232000.0,79205000.0,78208000.0,80412000.0,80335000.0,81590000.0,83238000.0,83941000.0,84246000.0,84984000.0,86480000.0,87827000.0,87432000.0,92365000.0,90317000.0,91822000.0,92474000.0,92970000.0,95665000.0,94136000.0,95592000.0,95926000.0,98319000.0,101068000.0]]criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/21/new/tukey.json000066400000000000000000000001101426140671200313240ustar00rootroot00000000000000[28938.891129032258,29876.33820564516,32376.1970766129,33313.6441532258]criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/21/report/000077500000000000000000000000001426140671200300225ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/21/report/MAD.svg000066400000000000000000000671071426140671200311570ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 0.001 0.002 0.003 0.004 0.005 0.006 0.007 350 400 450 500 550 600 Density (a.u.) Average time (ns) Fibonacci2/Recursive/21: MAD Bootstrap distribution Bootstrap distribution Confidence interval Confidence interval Point estimate Point estimate criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/21/report/SD.svg000066400000000000000000000676711426140671200310720ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 0.1 0.2 0.3 0.4 0.5 0.6 1 1.5 2 2.5 3 3.5 4 4.5 Density (a.u.) Average time (us) Fibonacci2/Recursive/21: SD Bootstrap distribution Bootstrap distribution Confidence interval Confidence interval Point estimate Point estimate criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/21/report/both/000077500000000000000000000000001426140671200307565ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/21/report/both/pdf.svg000066400000000000000000001075251426140671200322620ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 0.0001 0.0002 0.0003 0.0004 0.0005 0.0006 25 30 35 40 45 50 55 Density (a.u.) Average time (us) Fibonacci2/Recursive/21 Base PDF Base PDF Base Mean Base Mean New PDF New PDF New Mean New Mean regression.svg000066400000000000000000000505151426140671200336060ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/21/report/both Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 20 40 60 80 100 120 0 0.5 1 1.5 2 2.5 3 3.5 Total sample time (ms) Iterations (x 103) Fibonacci2/Recursive/21 gnuplot_plot_1 gnuplot_plot_2 Base sample Base sample New sample New sample criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/21/report/change/000077500000000000000000000000001426140671200312475ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/21/report/change/mean.svg000066400000000000000000000747141426140671200327250ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 5 10 15 20 25 30 35 40 45 -0.5 0 0.5 1 1.5 2 2.5 3 3.5 Density (a.u.) Relative change (%) Fibonacci2/Recursive/21: mean Bootstrap distribution Bootstrap distribution Confidence interval Confidence interval Point estimate Point estimate Noise threshold Noise threshold criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/21/report/change/median.svg000066400000000000000000000725771426140671200332470ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 20 40 60 80 100 120 140 160 -0.8 -0.6 -0.4 -0.2 0 0.2 0.4 Density (a.u.) Relative change (%) Fibonacci2/Recursive/21: median Bootstrap distribution Bootstrap distribution Confidence interval Confidence interval Point estimate Point estimate Noise threshold Noise threshold criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/21/report/change/t-test.svg000066400000000000000000000605261426140671200332210ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 0.05 0.1 0.15 0.2 0.25 0.3 0.35 0.4 -5 -4 -3 -2 -1 0 1 2 3 4 Density t score Fibonacci2/Recursive/21: Welch t test t distribution t distribution t statistic t statistic criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/21/report/index.html000066400000000000000000000244611426140671200320260ustar00rootroot00000000000000 Fibonacci2/Recursive/21 - Criterion.rs

Fibonacci2/Recursive/21

PDF of Slope Regression

Additional Statistics:

Lower bound Estimate Upper bound
Slope 31.137 us 31.462 us 31.970 us
0.6040485 0.6094217 0.5964066
Mean 31.296 us 31.797 us 32.421 us
Std. Dev. 1.1106 us 2.9245 us 4.1758 us
Median 30.983 us 31.042 us 31.189 us
MAD 328.02 ns 468.75 ns 579.23 ns

Additional Plots:

Understanding this report:

The plot on the left displays the average time per iteration for this benchmark. The shaded region shows the estimated probabilty of an iteration taking a certain amount of time, while the line shows the mean. Click on the plot for a larger view showing the outliers.

The plot on the right shows the linear regression calculated from the measurements. Each point represents a sample, though here it shows the total time for the sample rather than time per iteration. The line is the line of best fit for these measurements.

See the documentation for more details on the additional statistics.

Change Since Previous Benchmark

Additional Statistics:

Lower bound Estimate Upper bound
Change in time -0.5443% +1.2305% +3.1645% (p = 0.23 > 0.05)
No change in performance detected.

Understanding this report:

The plot on the left shows the probability of the function taking a certain amount of time. The red curve represents the saved measurements from the last time this benchmark was run, while the blue curve shows the measurements from this run. The lines represent the mean time per iteration. Click on the plot for a larger view.

The plot on the right shows the two regressions. Again, the red line represents the previous measurement while the blue line shows the current measurement.

See the documentation for more details on the additional statistics.

criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/21/report/mean.svg000066400000000000000000000674761426140671200315070ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 0.2 0.4 0.6 0.8 1 1.2 1.4 31.2 31.4 31.6 31.8 32 32.2 32.4 Density (a.u.) Average time (us) Fibonacci2/Recursive/21: mean Bootstrap distribution Bootstrap distribution Confidence interval Confidence interval Point estimate Point estimate criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/21/report/median.svg000066400000000000000000000676661426140671200320250ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 2 4 6 8 10 12 14 16 18 31 31.05 31.1 31.15 31.2 Density (a.u.) Average time (us) Fibonacci2/Recursive/21: median Bootstrap distribution Bootstrap distribution Confidence interval Confidence interval Point estimate Point estimate criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/21/report/pdf.svg000066400000000000000000001134661426140671200313270ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 0.5 1 1.5 2 2.5 3 30 35 40 45 50 0 5x10-5 0.0001 0.00015 0.0002 0.00025 0.0003 Iterations (x 103) Density (a.u.) Average time (us) Fibonacci2/Recursive/21 PDF PDF Mean Mean "Clean" sample "Clean" sample Mild outliers Mild outliers Severe outliers Severe outliers gnuplot_plot_6 gnuplot_plot_7 gnuplot_plot_8 gnuplot_plot_9 criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/21/report/pdf_small.svg000066400000000000000000000514631426140671200325150ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 5x10-5 0.0001 0.00015 0.0002 0.00025 0.0003 30 35 40 45 50 Density (a.u.) Average time (us) PDF Mean criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/21/report/regression.svg000066400000000000000000000751301426140671200327310ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 20 40 60 80 100 120 0 0.5 1 1.5 2 2.5 3 3.5 Total sample time (ms) Iterations (x 103) Fibonacci2/Recursive/21 Sample Sample Linear regression Linear regression Confidence interval Confidence interval regression_small.svg000066400000000000000000000720751426140671200340470ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/21/report Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 20 40 60 80 100 120 0 0.5 1 1.5 2 2.5 3 3.5 Total sample time (ms) Iterations (x 103) Sample Linear regression Confidence interval relative_pdf_small.svg000066400000000000000000001036531426140671200343300ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/21/report Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 0.0001 0.0002 0.0003 0.0004 0.0005 0.0006 25 30 35 40 45 50 55 Density (a.u.) Average time (us) Base PDF Base Mean New PDF New Mean relative_regression_small.svg000066400000000000000000000466221426140671200357410ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/21/report Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 20 40 60 80 100 120 0 0.5 1 1.5 2 2.5 3 3.5 Total sample time (ms) Iterations (x 103) gnuplot_plot_1 gnuplot_plot_2 Base sample New sample criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/21/report/slope.svg000066400000000000000000000704331426140671200316740ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 0.2 0.4 0.6 0.8 1 1.2 1.4 1.6 1.8 2 31.2 31.4 31.6 31.8 32 Density (a.u.) Average time (us) Fibonacci2/Recursive/21: slope Bootstrap distribution Bootstrap distribution Confidence interval Confidence interval Point estimate Point estimate criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/report/000077500000000000000000000000001426140671200276005ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/report/index.html000066400000000000000000000077341426140671200316100ustar00rootroot00000000000000 Fibonacci2/Recursive Summary - Criterion.rs

Fibonacci2/Recursive

Violin Plot

Violin Plot

This chart shows the relationship between function/parameter and iteration time. The thickness of the shaded region indicates the probability that a measurement of the given function/parameter would take a particular length of time.

Line Chart

Line Chart

This chart shows the mean measured time for each function as the input (or the size of the input) increases.

Fibonacci2/Recursive/20

PDF of Slope Regression

Fibonacci2/Recursive/21

PDF of Slope Regression
criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/report/lines.svg000066400000000000000000000360601426140671200314400ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 18 20 22 24 26 28 30 32 20 20.2 20.4 20.6 20.8 21 Average time (us) Input Fibonacci2/Recursive: Comparison Recursive Recursive gnuplot_plot_2 criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/Recursive/report/violin.svg000066400000000000000000001124671426140671200316340ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 Fibonacci2/Recursive/21 Fibonacci2/Recursive/20 15 20 25 30 35 40 45 50 55 Input Average time (us) Fibonacci2/Recursive: Violin plot PDF PDF gnuplot_plot_2 criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/report/000077500000000000000000000000001426140671200256315ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/report/index.html000066400000000000000000000137621426140671200276370ustar00rootroot00000000000000 Fibonacci2 Summary - Criterion.rs

Fibonacci2

Violin Plot

Violin Plot

This chart shows the relationship between function/parameter and iteration time. The thickness of the shaded region indicates the probability that a measurement of the given function/parameter would take a particular length of time.

Line Chart

Line Chart

This chart shows the mean measured time for each function as the input (or the size of the input) increases.

Fibonacci2/Recursive/20

PDF of Slope Regression

Fibonacci2/Recursive/21

PDF of Slope Regression

Fibonacci2/Iterative/20

PDF of Slope Regression

Fibonacci2/Iterative/21

PDF of Slope Regression
criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/report/lines.svg000066400000000000000000000404021426140671200274640ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 0 5 10 15 20 25 30 35 20 20.2 20.4 20.6 20.8 21 Average time (us) Input Fibonacci2: Comparison Recursive Recursive gnuplot_plot_2 Iterative Iterative gnuplot_plot_4 criterion.rs-0.3.6/book/src/user_guide/html_report/Fibonacci2/report/violin.svg000066400000000000000000001702741426140671200276650ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 Fibonacci2/Iterative/21 Fibonacci2/Iterative/20 Fibonacci2/Recursive/21 Fibonacci2/Recursive/20 0 10 20 30 40 50 60 Input Average time (us) Fibonacci2: Violin plot PDF PDF gnuplot_plot_2 gnuplot_plot_3 gnuplot_plot_4 criterion.rs-0.3.6/book/src/user_guide/html_report/report/000077500000000000000000000000001426140671200236725ustar00rootroot00000000000000criterion.rs-0.3.6/book/src/user_guide/html_report/report/index.html000066400000000000000000000061121426140671200256670ustar00rootroot00000000000000 Index - Criterion.rs criterion.rs-0.3.6/book/src/user_guide/iteration_times.svg000066400000000000000000000777401426140671200237570ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.2 patchlevel 2 10.7 10.75 10.8 10.85 10.9 10.95 11 11.05 0 10 20 30 40 50 60 70 80 90 100 Average Iteration Time (ms) Sample sampling_mode/Flat gnuplot_plot_1 criterion.rs-0.3.6/book/src/user_guide/known_limitations.md000066400000000000000000000031761426140671200241210ustar00rootroot00000000000000## Known Limitations There are currently a number of limitations to the use of Criterion.rs relative to the standard benchmark harness. First, it is necessary for Criterion.rs to provide its own `main` function using the `criterion_main` macro. This results in several limitations: * It is not possible to include benchmarks in code in the `src/` directory as one might with the regular benchmark harness. * It is not possible to benchmark non-`pub` functions. External benchmarks, including those using Criterion.rs, are compiled as a separate crate, and non-`pub` functions are not visible to the benchmarks. * It is not possible to benchmark functions in binary crates. Binary crates cannot be dependencies of other crates, and that includes external tests and benchmarks ([see here](https://github.com/rust-lang/cargo/issues/4316) for more details) * Is is not possible to benchmark functions in crates that do not provide an `rlib`. Criterion.rs cannot currently solve these issues. An [experimental RFC](https://github.com/rust-lang/rust/issues/50297) is being implemented to enable custom test and benchmarking frameworks. Second, Criterion.rs provides a stable-compatible replacement for the `black_box` function provided by the standard test crate. This replacement is not as reliable as the official one, and it may allow dead-code-elimination to affect the benchmarks in some circumstances. If you're using a Nightly build of Rust, you can add the `real_blackbox` feature to your dependency on Criterion.rs to use the standard `black_box` function instead. Example: ```toml criterion = { version = '...', features=['real_blackbox'] } ```criterion.rs-0.3.6/book/src/user_guide/line.svg000066400000000000000000000373251426140671200215020ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.0 patchlevel 3 50 100 150 200 250 300 350 0 2000 4000 6000 8000 10000 12000 14000 16000 18000 Average time (ns) Input from_elem: Comparison gnuplot_plot_1 gnuplot_plot_2 criterion.rs-0.3.6/book/src/user_guide/lines.svg000066400000000000000000001005671426140671200216640ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.0 patchlevel 3 1 10 100 1000 10000 100000 1x106 10 100 1000 10000 100000 1x106 Average time (us) Input Size (Bytes) counts: Comparison naive naive gnuplot_plot_2 naive_32 naive_32 gnuplot_plot_4 hyper hyper gnuplot_plot_6 criterion.rs-0.3.6/book/src/user_guide/mean.svg000066400000000000000000002106251426140671200214670ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.0 patchlevel 3 0 1 2 3 4 5 6 6.25 6.3 6.35 6.4 6.45 6.5 6.55 Density (a.u.) Average time (ms) alloc: mean Bootstrap distribution Bootstrap distribution Confidence interval Confidence interval Point estimate Point estimate criterion.rs-0.3.6/book/src/user_guide/migrating_from_libtest.md000066400000000000000000000035101426140671200250730ustar00rootroot00000000000000# Migrating from libtest This page shows an example of converting a libtest or bencher benchmark to use Criterion.rs. ## The Benchmark We'll start with this benchmark as an example: ```rust #![feature(test)] extern crate test; use test::Bencher; use test::black_box; fn fibonacci(n: u64) -> u64 { match n { 0 => 1, 1 => 1, n => fibonacci(n-1) + fibonacci(n-2), } } #[bench] fn bench_fib(b: &mut Bencher) { b.iter(|| fibonacci(black_box(20))); } ``` ## The Migration The first thing to do is update the `Cargo.toml` to disable the libtest benchmark harness: ```toml [[bench]] name = "example" harness = false ``` We also need to add Criterion.rs to the `dev-dependencies` section of `Cargo.toml`: ```toml [dev-dependencies] criterion = "0.3" ``` The next step is to update the imports: ```rust use criterion::{black_box, criterion_group, criterion_main, Criterion}; ``` Then, we can change the `bench_fib` function. Remove the `#[bench]` and change the argument to `&mut Criterion` instead. The contents of this function need to change as well: ```rust fn bench_fib(c: &mut Criterion) { c.bench_function("fib 20", |b| b.iter(|| fibonacci(black_box(20)))); } ``` Finally, we need to invoke some macros to generate a main function, since we no longer have libtest to provide one: ```rust criterion_group!(benches, bench_fib); criterion_main!(benches); ``` And that's it! The complete migrated benchmark code is below: ```rust use criterion::{black_box, criterion_group, criterion_main, Criterion}; fn fibonacci(n: u64) -> u64 { match n { 0 => 1, 1 => 1, n => fibonacci(n-1) + fibonacci(n-2), } } fn bench_fib(c: &mut Criterion) { c.bench_function("fib 20", |b| b.iter(|| fibonacci(black_box(20)))); } criterion_group!(benches, bench_fib); criterion_main!(benches); ``` criterion.rs-0.3.6/book/src/user_guide/pdf.svg000066400000000000000000002652031426140671200213220ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.0 patchlevel 3 0 10 20 30 40 50 60 70 80 90 6 7 8 9 10 11 0 1x10-7 2x10-7 3x10-7 4x10-7 5x10-7 6x10-7 7x10-7 8x10-7 9x10-7 1x10-6 Iterations Density (a.u.) Average time (ms) alloc PDF PDF "Clean" sample "Clean" sample Mild outliers Mild outliers Severe outliers Severe outliers gnuplot_plot_5 gnuplot_plot_6 gnuplot_plot_7 gnuplot_plot_8 criterion.rs-0.3.6/book/src/user_guide/plots_and_graphs.md000066400000000000000000000104301426140671200236670ustar00rootroot00000000000000# Plots & Graphs Criterion.rs can generate a number of useful charts and graphs which you can check to get a better understanding of the behavior of the benchmark. These charts will be generated with [gnuplot](http://www.gnuplot.info/) by default, but will fall back on using the `plotters` crate if it is not available. The examples below were generated using the gnuplot backend, but the plotters ones are similar. ## File Structure The plots and saved data are stored under `target/criterion/$BENCHMARK_NAME/`. Here's an example of the folder structure: ``` $BENCHMARK_NAME/ ├── base/ │ ├── raw.csv │ ├── estimates.json │ ├── sample.json │ └── tukey.json ├── change/ │ └── estimates.json ├── new/ │ ├── raw.csv │ ├── estimates.json │ ├── sample.json │ └── tukey.json └── report/ ├── both/ │ ├── pdf.svg │ ├── regression.svg │ └── iteration_times.svg ├── change/ │ ├── mean.svg │ ├── median.svg │ └── t-test.svg ├── index.html ├── MAD.svg ├── mean.svg ├── median.svg ├── pdf.svg ├── pdf_small.svg ├── regression.svg (optional) ├── regression_small.svg (optional) ├── iteration_times.svg (optional) ├── iteration_times_small.svg (optional) ├── relative_pdf_small.svg ├── relative_regression_small.svg (optional) ├── relative_iteration_times_small.svg (optional) ├── SD.svg └── slope.svg ``` The `new` folder contains the statistics for the last benchmarking run, while the `base` folder contains those for the last run on the `base` baseline (see [Command-Line Options](./command_line_options.md#baselines) for more information on baselines). The plots are in the `report` folder. Criterion.rs only keeps historical data for the last run. The `report/both` folder contains plots which show both runs on one plot, while the `report/change` folder contains plots showing the differences between the last two runs. This example shows the plots produced by the default `bench_function` benchmark method. Other methods may produce additional charts, which will be detailed in their respective pages. ## MAD/Mean/Median/SD/Slope ![Mean Chart](./mean.svg) These are the simplest of the plots generated by Criterion.rs. They display the bootstrapped distributions and confidence intervals for the given statistics. ## Regression ![Regression Chart](./regression.svg) The regression plot shows each data point plotted on an X-Y plane showing the number of iterations vs the time taken. It also shows the line representing Criterion.rs' best guess at the time per iteration. A good benchmark will show the data points all closely following the line. If the data points are scattered widely, this indicates that there is a lot of noise in the data and that the benchmark may not be reliable. If the data points follow a consistent trend but don't match the line (eg. if they follow a curved pattern or show several discrete line segments) this indicates that the benchmark is doing different amounts of work depending on the number of iterations, which prevents Criterion.rs from generating accurate statistics and means that the benchmark may need to be reworked. The combined regression plot in the `report/both` folder shows only the regression lines and is a useful visual indicator of the difference in performance between the two runs. The regression chart can only be displayed when Criterion.rs uses the linear sampling mode. In the flat sampling mode, the iteration times chart is displayed instead. ## Iteration Times ![Iteration Times Chart](./iteration_times.svg) The iteration times chart displays a collection of the average iteration times. It is less useful than the regression chart, but since the regression chart cannot be displayed in the flat sampling mode, this is shown instead. ## PDF ![PDF Chart](./pdf.svg) The PDF chart shows the probability distribution function for the samples. It also shows the ranges used to classify samples as outliers. In this example (as in the regression example above) we can see that the performance trend changes noticeably below ~35 iterations, which we may wish to investigate. criterion.rs-0.3.6/book/src/user_guide/profiling.md000066400000000000000000000057601426140671200223430ustar00rootroot00000000000000# Profiling When optimizing code, it's often helpful to profile it to help understand why it produces the measured performance characteristics. Criterion.rs has several features to assist with profiling benchmarks. ### Note on running benchmark executables directly Because of how Cargo passes certain command-line flags (see the FAQ for more details) when running benchmarks, Criterion.rs benchmark executables expect a `--bench` argument on their command line. Cargo adds this automatically, but when running the executables directly (eg. in a profiler) you will need to add the `--bench` argument. ### `--profile-time` Criterion.rs benchmark executables accept a `--profile-time ` argument. If this argument is provided to a run, the benchmark executable will attempt to iterate the benchmark executable for approximately the given number of seconds, but will not perform its usual analysis or save any results. This way, Criterion.rs' analysis code won't appear in the profiling measurements. For users of external profilers such as Linux perf, simply run the benchmark executable(s) under your favorite profiler, passing the profile-time argument. For users of in-process profilers such as Google's `cpuprofiler`, read on. ### Implementing In-Process Profiling Hooks For developers who wish to use profiling hooks provided by an existing crate, skip to ["Enabling In-Process Profiling"](#enabling-in-process-profiling) below. Since version 0.3.0, Criterion.rs has supported adding hooks to start and stop an in-process profiler such as [cpuprofiler](https://crates.io/crates/cpuprofiler). This hook takes the form of a trait, `criterion::profiler::Profiler`. ```rust pub trait Profiler { fn start_profiling(&mut self, benchmark_id: &str, benchmark_dir: &Path); fn stop_profiling(&mut self, benchmark_id: &str, benchmark_dir: &Path); } ``` These functions will be called before and after each benchmark when running in `--profile-time` mode, and will not be called otherwise. This makes it easy to integrate in-process profiling into benchmarks when wanted, without having the profiling instrumentation affect regular benchmark measurements. ### Enabling In-Process Profiling Once you (or an external crate) have defined a profiler hook, using it is relatively easy. You will need to override the `Criterion` struct (which defaults to `ExternalProfiler`) by providing your own measurement using the `with_profiler` function and overriding the default `Criterion` object configuration. ```rust extern crate my_custom_profiler; use my_custom_profiler::MyCustomProfiler; fn fibonacci_profiled(criterion: &mut Criterion) { // Use the criterion struct as normal here. } fn profiled() -> Criterion { Criterion::default().with_profiler(MyCustomProfiler) } criterion_group! { name = benches; config = profiled(); targets = fibonacci_profiled } ``` The profiler hook will only take effect when running in `--profile-time` mode.criterion.rs-0.3.6/book/src/user_guide/regression.svg000066400000000000000000001021331426140671200227210ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.0 patchlevel 3 0 100 200 300 400 500 600 0 10 20 30 40 50 60 70 80 90 100 Total time (ms) Iterations alloc Sample Sample Linear regression Linear regression Confidence interval Confidence interval criterion.rs-0.3.6/book/src/user_guide/timing_loops.md000066400000000000000000000236361426140671200230570ustar00rootroot00000000000000# Timing Loops The [`Bencher`](https://bheisler.github.io/criterion.rs/criterion/struct.Bencher.html) structure provides a number of functions which implement different timing loops for measuring the performance of a function. This page discusses how these timing loops work and which one is appropriate for different situations. ## `iter` The simplest timing loop is `iter`. This loop should be the default for most benchmarks. `iter` calls the benchmark N times in a tight loop and records the elapsed time for the entire loop. Because it takes only two measurements (the time before and after the loop) and does nothing else in the loop `iter` has effectively zero measurement overhead - meaning it can accurately measure the performance of functions as small as a single processor instruction. However, `iter` has limitations as well. If the benchmark returns a value which implements Drop, it will be dropped inside the loop and the drop function's time will be included in the measurement. Additionally, some benchmarks need per-iteration setup. A benchmark for a sorting algorithm might require some unsorted data to operate on, but we don't want the generation of the unsorted data to affect the measurement. `iter` provides no way to do this. ## `iter_with_large_drop` `iter_with_large_drop` is an answer to the first problem. In this case, the values returned by the benchmark are collected into a `Vec` to be dropped after the measurement is complete. This introduces a small amount of measurement overhead, meaning that the measured value will be slightly higher than the true runtime of the function. This overhead is almost always negligible, but it's important to be aware that it exists. Extremely fast benchmarks (such as those in the hundreds-of-picoseconds range or smaller) or benchmarks that return very large structures may incur more overhead. Aside from the measurement overhead, `iter_with_large_drop` has its own limitations. Collecting the returned values into a `Vec` uses heap memory, and the amount of memory used is not under the control of the user. Rather, it depends on the iteration count which in turn depends on the benchmark settings and the runtime of the benchmarked function. It is possible that a benchmark could run out of memory while collecting the values to drop. ## `iter_batched/iter_batched_ref` `iter_batched` and `iter_batched_ref` are the next step up in complexity for timing loops. These timing loops take two closures rather than one. The first closure takes no arguments and returns a value of type `T` - this is used to generate setup data. For example, the setup function might clone a vector of unsorted data for use in benchmarking a sorting function. The second closure is the function to benchmark, and it takes a `T` (for `iter_batched`) or `&mut T` (for `iter_batched_ref`). These two timing loops generate a batch of inputs and measure the time to execute the benchmark on all values in the batch. As with `iter_with_large_drop` they also collect the values returned from the benchmark into a `Vec` and drop it later without timing the drop. Then another batch of inputs is generated and the process is repeated until enough iterations of the benchmark have been measured. Keep in mind that this is only necessary if the benchmark modifies the input - if the input is constant then one input value can be reused and the benchmark should use `iter` instead. Both timing loops accept a third parameter which controls how large a batch is. If the batch size is too large, we might run out of memory generating the inputs and collecting the outputs. If it's too small, we could introduce more measurement overhead than is necessary. For ease of use, Criterion provides three pre-defined choices of batch size, defined by the [`BatchSize`](https://bheisler.github.io/criterion.rs/criterion/enum.BatchSize.html) enum - `SmallInput`, `LargeInput` and `PerIteration`. It is also possible (though not recommended) to set the batch size manually. `SmallInput` should be the default for most benchmarks. It is tuned for benchmarks where the setup values are small (small enough that millions of values can safely be held in memory) and the output is likewise small or nonexistent. `SmallInput` incurs the least measurement overhead (equivalent to that of `iter_with_large_drop` and therefore negligible for nearly all benchmarks), but also uses the most memory. `LargeInput` should be used if the input or output of the benchmark is large enough that `SmallInput` uses too much memory. `LargeInput` incurs slightly more measurement overhead than `SmallInput`, but the overhead is still small enough to be negligible for almost all benchmarks. `PerIteration` forces the batch size to one. That is, it generates a single setup input, times the execution of the function once, discards the setup and output, then repeats. This results in a great deal of measurement overhead - several orders of magnitude more than the other options. It can be enough to affect benchmarks into the hundreds-of-nanoseconds range. Using `PerIteration` should be avoided wherever possible. However, it is sometimes necessary if the input or output of the benchmark is extremely large or holds a limited resource like a file handle. Although sticking to the pre-defined settings is strongly recommended, Criterion.rs does allow users to choose their own batch size if necessary. This can be done with `BatchSize::NumBatches` or `BatchSize::NumIterations`, which specify the number of batches per sample or the number of iterations per batch respectively. These options should be used only when necessary, as they require the user to tune the settings manually to get accurate results. However, they are provided as an option in case the pre-defined options are all unsuitable. `NumBatches` should be preferred over `NumIterations` as it will typically have less measurement overhead, but `NumIterations` provides more control over the batch size which may be necessary in some situations. ## `iter_custom` This is a special "timing loop" that relies on you to do your own timing. Where the other timing loops take a lambda to call N times in a loop, this takes a lambda of the form `FnMut(iters: u64) -> M::Value` - meaning that it accepts the number of iterations and returns the measured value. Typically, this will be a `Duration` for the default `WallTime` measurement, but it may be other types for other measurements (see the [Custom Measurements](./custom_measurements.md) page for more details). The lambda can do whatever is needed to measure the value. Use `iter_custom` when you need to do something that doesn't fit into the usual approach of calling a function in a loop. For example, this might be used for: * Benchmarking external processes by sending the iteration count and receiving the elapsed time * Measuring how long a thread pool takes to execute N jobs, to see how lock contention or pool-size affects the wall-clock time Try to keep the overhead in the measurement routine to a minimum; Criterion.rs will still use its normal warm-up/target-time logic, which is based on wall-clock time. If your measurement routine takes a long time to perform each measurement it could mess up the calculations and cause Criterion.rs to run too few iterations (not to mention that the benchmarks would take a long time). Because of this, it's best to do heavy setup like starting processes or threads before running the benchmark. ## What do I do if my function's runtime is smaller than the measurement overhead? Criterion.rs' timing loops are carefully designed to minimize the measurement overhead as much as possible. For most benchmarks the measurement overhead can safely be ignored because the true runtime of most benchmarks will be very large relative to the overhead. However, benchmarks with a runtime that is not much larger than the overhead can be difficult to measure. If you believe that your benchmark is small compared to the measurement overhead, the first option is to adjust the timing loop to reduce the overhead. Using `iter` or `iter_batched` with `SmallInput` should be the first choice, as these options incur a minimum of measurement overhead. In general, using `iter_batched` with larger batches produces less overhead, so replacing `PerIteration` with `NumIterations` with a suitable batch size will typically reduce the overhead. It is possible for the batch size to be too large, however, which will increase (rather than decrease) overhead. If this is not sufficient, the only recourse is to benchmark a larger function. It's tempting to do this by manually executing the routine a fixed number of times inside the benchmark, but this is equivalent to what `NumIterations` already does. The only difference is that Criterion.rs can account for `NumIterations` and show the correct runtime for one iteration of the function rather than many. Instead, consider benchmarking at a higher level. It's important to stress that measurement overhead only matters for very fast functions which modify their input. For slower functions (roughly speaking, anything at the nanosecond level or larger, or the microsecond level for `PerIteration`, assuming a reasonably modern x86_64 processor and OS or equivalent) are not meaningfully affected by measurement overhead. For functions which only read their input and do not modify or consume it, one value can be shared by all iterations using the `iter` loop which has effectively no overhead. ## Deprecated Timing Loops In older Criterion.rs benchmarks (pre 2.10), one might see two more timing loops, called `iter_with_setup` and `iter_with_large_setup`. `iter_with_setup` is equivalent to `iter_batched` with `PerIteration`. `iter_with_large_setup` is equivalent to `iter_batched` with `NumBatches(1)`. Both produce much more measurement overhead than `SmallInput`. Additionally. `large_setup` also uses much more memory. Both should be updated to use `iter_batched`, preferably with `SmallInput`. They are kept for backwards-compatibility reasons, but no longer appear in the API documentation. criterion.rs-0.3.6/book/src/user_guide/user_guide.md000066400000000000000000000004201426140671200224710ustar00rootroot00000000000000# User Guide This chapter covers the output produced by Criterion.rs benchmarks, both the command-line reports and the charts. It also details more advanced usages of Criterion.rs such as benchmarking external programs and comparing the performance of multiple functions.criterion.rs-0.3.6/book/src/user_guide/violin_plot.svg000066400000000000000000004352251426140671200231120ustar00rootroot00000000000000 Gnuplot Produced by GNUPLOT 5.0 patchlevel 3 Recursive Iterative 0 5 10 15 20 25 30 35 40 45 50 Input Average time (us) Fibonacci: Violin plot Median Median PDF PDF gnuplot_plot_3 criterion.rs-0.3.6/ci/000077500000000000000000000000001426140671200145375ustar00rootroot00000000000000criterion.rs-0.3.6/ci/install.sh000066400000000000000000000011311426140671200165350ustar00rootroot00000000000000set -ex if [ "$CLIPPY" = "yes" ]; then rustup component add clippy-preview fi if [ "$RUSTFMT" = "yes" ]; then rustup component add rustfmt fi if [ "$DOCS" = "yes" ]; then cargo install mdbook --no-default-features cargo install mdbook-linkcheck sudo apt-get update sudo apt-get install python-pip sudo pip install python-dateutil fi if [ "$TRAVIS_OS_NAME" = "osx" ] && [ "$GNUPLOT" = "yes" ]; then brew unlink python@2 # because we're installing python3 and they both want to install stuff under /usr/local/Frameworks/Python.framework/ brew install gnuplot fi criterion.rs-0.3.6/ci/script.sh000066400000000000000000000017611426140671200164040ustar00rootroot00000000000000set -ex export CARGO_INCREMENTAL=0 FEATURES="async_smol async_tokio async_std async_futures" if [ "$CLIPPY" = "yes" ]; then cargo clippy --all -- -D warnings elif [ "$DOCS" = "yes" ]; then cargo clean cargo doc --features "$FEATURES" --all --no-deps cd book mdbook build cd .. cp -r book/book/html/ target/doc/book/ travis-cargo doc-upload || true elif [ "$RUSTFMT" = "yes" ]; then cargo fmt --all -- --check elif [ "$MINIMAL_VERSIONS" = "yes" ]; then rm Cargo.lock || true cargo build -Z minimal-versions else export RUSTFLAGS="-D warnings" cargo build --features "$FEATURES" $BUILD_ARGS cargo test --features "$FEATURES" --all cargo test --features "$FEATURES" --benches cd bencher_compat export CARGO_TARGET_DIR="../target" cargo test --benches cd .. if [ "$TRAVIS_RUST_VERSION" = "nightly" ]; then cd macro export CARGO_TARGET_DIR="../target" cargo test --benches cd .. fi fi criterion.rs-0.3.6/macro/000077500000000000000000000000001426140671200152455ustar00rootroot00000000000000criterion.rs-0.3.6/macro/Cargo.toml000066400000000000000000000013011426140671200171700ustar00rootroot00000000000000[package] name = "criterion-macro" version = "0.3.4" authors = ["Brook Heisler "] edition = "2018" description = "Custom Test Framework macro for Criterion.rs" homepage = "https://bheisler.github.io/criterion.rs/book/index.html" repository = "https://github.com/bheisler/criterion.rs" readme = "README.md" keywords = ["criterion", "benchmark", "macro"] categories = ["development-tools::profiling"] license = "Apache-2.0/MIT" [lib] proc-macro = true [dependencies] proc-macro2 = { version = "1.0", features = ["nightly"] } quote = "1.0" [dev-dependencies] criterion = { version = "0.3.4", path = "..", default-features = false } [[bench]] name = "test_macro_bench" [workspace]criterion.rs-0.3.6/macro/LICENSE-APACHE000066400000000000000000000251371426140671200172010ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. criterion.rs-0.3.6/macro/LICENSE-MIT000066400000000000000000000020411426140671200166760ustar00rootroot00000000000000Copyright (c) 2018 Brook Heisler Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. criterion.rs-0.3.6/macro/README.md000066400000000000000000000020351426140671200165240ustar00rootroot00000000000000# `criterion-macro` This crate provides a procedural macro that allows the use of `#[criterion]` to mark [Criterion.rs] benchmark functions. ## License This project is licensed under either of * [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0) ([LICENSE-APACHE](LICENSE-APACHE)) * [MIT License](http://opensource.org/licenses/MIT) ([LICENSE-MIT](LICENSE-MIT)) at your option. ## Contributing We welcome all people who want to contribute. Please see the [contributing instructions] for more information. Contributions in any form (issues, pull requests, etc.) to this project must adhere to Rust's [Code of Conduct]. Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in this crate by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions. [Code of Conduct]: https://www.rust-lang.org/en-US/conduct.html [Criterion.rs]: https://github.com/bheisler/criterion.rs [contributing instructions]: CONTRIBUTING.md criterion.rs-0.3.6/macro/benches/000077500000000000000000000000001426140671200166545ustar00rootroot00000000000000criterion.rs-0.3.6/macro/benches/test_macro_bench.rs000066400000000000000000000012411426140671200225170ustar00rootroot00000000000000#![feature(custom_test_frameworks)] #![test_runner(criterion::runner)] use criterion::{Criterion, black_box}; use criterion_macro::criterion; fn fibonacci(n: u64) -> u64 { match n { 0 | 1 => 1, n => fibonacci(n - 1) + fibonacci(n - 2), } } fn custom_criterion() -> Criterion { Criterion::default() .sample_size(50) } #[criterion] fn bench_simple(c: &mut Criterion) { c.bench_function("Fibonacci-Simple", |b| b.iter(|| fibonacci(black_box(10)))); } #[criterion(custom_criterion())] fn bench_custom(c: &mut Criterion) { c.bench_function("Fibonacci-Custom", |b| b.iter(|| fibonacci(black_box(20)))); }criterion.rs-0.3.6/macro/src/000077500000000000000000000000001426140671200160345ustar00rootroot00000000000000criterion.rs-0.3.6/macro/src/lib.rs000066400000000000000000000027071426140671200171560ustar00rootroot00000000000000extern crate proc_macro; use proc_macro::TokenStream; use proc_macro2::{Ident, TokenTree}; use quote::quote_spanned; #[proc_macro_attribute] pub fn criterion(attr: TokenStream, item: TokenStream) -> TokenStream { let attr = proc_macro2::TokenStream::from(attr); let item = proc_macro2::TokenStream::from(item); let span = proc_macro2::Span::call_site(); let init = if stream_length(attr.clone()) != 0 { attr } else { quote_spanned!(span=> criterion::Criterion::default()) }; let function_name = find_name(item.clone()); let wrapped_name = Ident::new(&format!("criterion_wrapped_{}", function_name.to_string()), span); let output = quote_spanned!(span=> #[test_case] pub fn #wrapped_name() { #item let mut c = #init.configure_from_args(); #function_name(&mut c); } ); output.into() } fn stream_length(stream: proc_macro2::TokenStream) -> usize { stream.into_iter().count() } fn find_name(stream: proc_macro2::TokenStream) -> Ident { let mut iter = stream.into_iter(); while let Some(tok) = iter.next() { if let TokenTree::Ident(ident) = tok { if ident == "fn" { break; } } } if let Some(TokenTree::Ident(name)) = iter.next() { name } else { panic!("Unable to find function name") } }criterion.rs-0.3.6/plot/000077500000000000000000000000001426140671200151225ustar00rootroot00000000000000criterion.rs-0.3.6/plot/.gitignore000066400000000000000000000000311426140671200171040ustar00rootroot00000000000000*.bk Cargo.lock target/* criterion.rs-0.3.6/plot/CONTRIBUTING.md000077700000000000000000000000001426140671200220122../CONTRIBUTING.mdustar00rootroot00000000000000criterion.rs-0.3.6/plot/Cargo.toml000066400000000000000000000014001426140671200170450ustar00rootroot00000000000000[package] authors = ["Jorge Aparicio ", "Brook Heisler "] name = "criterion-plot" version = "0.4.4" edition = "2018" description = "Criterion's plotting library" repository = "https://github.com/bheisler/criterion.rs" readme = "README.md" keywords = ["plotting", "gnuplot", "criterion"] categories = ["visualization"] license = "MIT/Apache-2.0" [dependencies] cast = "0.2" itertools = "0.10" [dev-dependencies] itertools-num = "0.1" num-complex = { version = "0.2", default-features = false, features = ["std"] } rand = "0.4" [badges] travis-ci = { repository = "bheisler/criterion.rs" } appveyor = { repository = "bheisler/criterion.rs", id = "4255ads9ctpupcl2" } maintenance = { status = "looking-for-maintainer" } criterion.rs-0.3.6/plot/LICENSE-APACHE000077700000000000000000000000001426140671200212002../LICENSE-APACHEustar00rootroot00000000000000criterion.rs-0.3.6/plot/LICENSE-MIT000077700000000000000000000000001426140671200204202../LICENSE-MITustar00rootroot00000000000000criterion.rs-0.3.6/plot/README.md000066400000000000000000000024471426140671200164100ustar00rootroot00000000000000# `criterion-plot` > Graphing sub-crate of [Criterion.rs]. This is an unstable implementation detail of [Criterion.rs]. Anything may change at any time with no warning, including the public API. For further information, see [Criterion.rs]. `criterion-plot` is currently looking for a new maintainer. See [this thread](https://users.rust-lang.org/t/call-for-maintainers-criterion-plot/24413) for details. ## License This project is licensed under either of * [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0) ([LICENSE-APACHE](LICENSE-APACHE)) * [MIT License](http://opensource.org/licenses/MIT) ([LICENSE-MIT](LICENSE-MIT)) at your option. ## Contributing We welcome all people who want to contribute. Please see the [contributing instructions] for more information. Contributions in any form (issues, pull requests, etc.) to this project must adhere to Rust's [Code of Conduct]. Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in this crate by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions. [Code of Conduct]: https://www.rust-lang.org/en-US/conduct.html [Criterion.rs]: https://github.com/bheisler/criterion.rs [contributing instructions]: CONTRIBUTING.md criterion.rs-0.3.6/plot/src/000077500000000000000000000000001426140671200157115ustar00rootroot00000000000000criterion.rs-0.3.6/plot/src/axis.rs000066400000000000000000000121031426140671200172200ustar00rootroot00000000000000//! Coordinate axis use std::borrow::Cow; use std::iter::IntoIterator; use crate::map; use crate::traits::{Configure, Data, Set}; use crate::{ grid, Axis, Default, Display, Grid, Label, Range, Scale, ScaleFactor, Script, TicLabels, }; /// Properties of the coordinate axes #[derive(Clone)] pub struct Properties { grids: map::grid::Map, hidden: bool, label: Option>, logarithmic: bool, range: Option<(f64, f64)>, scale_factor: f64, tics: Option, } impl Default for Properties { fn default() -> Properties { Properties { grids: map::grid::Map::new(), hidden: false, label: None, logarithmic: false, range: None, scale_factor: 1., tics: None, } } } impl Properties { /// Hides the axis /// /// **Note** The `TopX` and `RightY` axes are hidden by default pub fn hide(&mut self) -> &mut Properties { self.hidden = true; self } /// Makes the axis visible /// /// **Note** The `BottomX` and `LeftY` axes are visible by default pub fn show(&mut self) -> &mut Properties { self.hidden = false; self } } impl Configure for Properties { type Properties = grid::Properties; /// Configures the gridlines fn configure(&mut self, grid: Grid, configure: F) -> &mut Properties where F: FnOnce(&mut grid::Properties) -> &mut grid::Properties, { if self.grids.contains_key(grid) { configure(self.grids.get_mut(grid).unwrap()); } else { let mut properties = Default::default(); configure(&mut properties); self.grids.insert(grid, properties); } self } } impl Set