wasm-bindgen-test-0.3.58/.cargo_vcs_info.json0000644000000001510000000000100144560ustar { "git": { "sha1": "a788f58f95cb6603ad2cbdf87571b27665d6cbd9" }, "path_in_vcs": "crates/test" }wasm-bindgen-test-0.3.58/Cargo.lock0000644000000253100000000000100124350ustar # This file is automatically @generated by Cargo. # It is not intended for manual editing. version = 3 [[package]] name = "async-trait" version = "0.1.89" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "autocfg" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "bumpalo" version = "3.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5dd9dc738b7a8311c7ade152424974d8115f2cdad61e8dab8dac9f2362298510" [[package]] name = "cast" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" version = "1.2.52" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cd4932aefd12402b36c60956a4fe0035421f544799057659ff86f923657aada3" dependencies = [ "find-msvc-tools", "shlex", ] [[package]] name = "cfg-if" version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" [[package]] name = "find-msvc-tools" version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f449e6c6c08c865631d4890cfacf252b3d396c9bcc83adb6623cdb02a8336c41" [[package]] name = "futures-core" version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" [[package]] name = "futures-task" version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" [[package]] name = "futures-util" version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" dependencies = [ "futures-core", "futures-task", "pin-project-lite", "pin-utils", "slab", ] [[package]] name = "gg-alloc" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ee5134f1abeb59e9ba515150ddc26bf2b2207f8bce38e1f5cf8d1596efa22ba" [[package]] name = "itoa" version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2" [[package]] name = "js-sys" version = "0.3.85" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c942ebf8e95485ca0d52d97da7c5a2c387d0e7f0ba4c35e93bfcaee045955b3" dependencies = [ "once_cell", "wasm-bindgen", ] [[package]] name = "libm" version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" [[package]] name = "memchr" version = "2.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" [[package]] name = "minicov" version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4869b6a491569605d66d3952bcdf03df789e5b536e5f0cf7758a7f08a55ae24d" dependencies = [ "cc", "walkdir", ] [[package]] name = "nu-ansi-term" version = "0.50.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" dependencies = [ "windows-sys", ] [[package]] name = "num-traits" version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ "autocfg", "libm", ] [[package]] name = "once_cell" version = "1.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" [[package]] name = "oorandom" version = "11.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" [[package]] name = "pin-project-lite" version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" [[package]] name = "pin-utils" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "proc-macro2" version = "1.0.105" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "535d180e0ecab6268a3e718bb9fd44db66bbbc256257165fc699dadf70d16fe7" dependencies = [ "unicode-ident", ] [[package]] name = "quote" version = "1.0.43" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc74d9a594b72ae6656596548f56f667211f8a97b3d4c3d467150794690dc40a" dependencies = [ "proc-macro2", ] [[package]] name = "rustversion" version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" [[package]] name = "same-file" version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" dependencies = [ "winapi-util", ] [[package]] name = "serde" version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" dependencies = [ "serde_core", "serde_derive", ] [[package]] name = "serde_core" version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "serde_json" version = "1.0.149" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "83fc039473c5595ace860d8c4fafa220ff474b3fc6bfdb4293327f1a37e94d86" dependencies = [ "itoa", "memchr", "serde", "serde_core", "zmij", ] [[package]] name = "shlex" version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "slab" version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" [[package]] name = "syn" version = "2.0.114" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d4d107df263a3013ef9b1879b0df87d706ff80f65a86ea879bd9c31f9b307c2a" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] [[package]] name = "unicode-ident" version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5" [[package]] name = "walkdir" version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" dependencies = [ "same-file", "winapi-util", ] [[package]] name = "wasm-bindgen" version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64024a30ec1e37399cf85a7ffefebdb72205ca1c972291c51512360d90bd8566" dependencies = [ "cfg-if", "once_cell", "rustversion", "wasm-bindgen-macro", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" version = "0.4.58" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70a6e77fd0ae8029c9ea0063f87c46fde723e7d887703d74ad2616d792e51e6f" dependencies = [ "cfg-if", "futures-util", "js-sys", "once_cell", "wasm-bindgen", "web-sys", ] [[package]] name = "wasm-bindgen-macro" version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "008b239d9c740232e71bd39e8ef6429d27097518b6b30bdf9086833bd5b6d608" dependencies = [ "quote", "wasm-bindgen-macro-support", ] [[package]] name = "wasm-bindgen-macro-support" version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5256bae2d58f54820e6490f9839c49780dff84c65aeab9e772f15d5f0e913a55" dependencies = [ "bumpalo", "proc-macro2", "quote", "syn", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1f01b580c9ac74c8d8f0c0e4afb04eeef2acf145458e52c03845ee9cd23e3d12" dependencies = [ "unicode-ident", ] [[package]] name = "wasm-bindgen-test" version = "0.3.58" dependencies = [ "async-trait", "cast", "gg-alloc", "js-sys", "libm", "minicov", "nu-ansi-term", "num-traits", "oorandom", "serde", "serde_json", "wasm-bindgen", "wasm-bindgen-futures", "wasm-bindgen-test-macro", "wasm-bindgen-test-shared", ] [[package]] name = "wasm-bindgen-test-macro" version = "0.3.58" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f579cdd0123ac74b94e1a4a72bd963cf30ebac343f2df347da0b8df24cdebed2" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "wasm-bindgen-test-shared" version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8145dd1593bf0fb137dbfa85b8be79ec560a447298955877804640e40c2d6ea" [[package]] name = "web-sys" version = "0.3.85" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "312e32e551d92129218ea9a2452120f4aabc03529ef03e4d0d82fb2780608598" dependencies = [ "js-sys", "wasm-bindgen", ] [[package]] name = "winapi-util" version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" dependencies = [ "windows-sys", ] [[package]] name = "windows-link" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" [[package]] name = "windows-sys" version = "0.61.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" dependencies = [ "windows-link", ] [[package]] name = "zmij" version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd8f3f50b848df28f887acb68e41201b5aea6bc8a8dacc00fb40635ff9a72fea" wasm-bindgen-test-0.3.58/Cargo.toml0000644000000045500000000000100124630ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2021" rust-version = "1.71" name = "wasm-bindgen-test" version = "0.3.58" authors = ["The wasm-bindgen Developers"] build = false include = [ "/LICENSE-*", "/src", ] autolib = false autobins = false autoexamples = false autotests = false autobenches = false description = "Internal testing crate for wasm-bindgen" readme = "README.md" license = "MIT OR Apache-2.0" repository = "https://github.com/wasm-bindgen/wasm-bindgen" [features] default = ["std"] std = [ "wasm-bindgen/std", "js-sys/std", "wasm-bindgen-futures/std", ] [lib] name = "wasm_bindgen_test" path = "src/lib.rs" test = false [dependencies.async-trait] version = "0.1.89" [dependencies.cast] version = "0.3" [dependencies.gg-alloc] version = "1.0" optional = true [dependencies.js-sys] version = "=0.3.85" default-features = false [dependencies.libm] version = "0.2.11" [dependencies.nu-ansi-term] version = "0.50" default-features = false [dependencies.num-traits] version = "0.2" features = ["libm"] default-features = false [dependencies.oorandom] version = "11.1.5" [dependencies.serde] version = "1.0" features = ["derive"] default-features = false [dependencies.serde_json] version = "1.0" features = ["alloc"] default-features = false [dependencies.wasm-bindgen] version = "=0.2.108" default-features = false [dependencies.wasm-bindgen-futures] version = "=0.4.58" default-features = false [dependencies.wasm-bindgen-test-macro] version = "=0.3.58" [dependencies.wasm-bindgen-test-shared] version = "=0.2.108" [target.'cfg(all(target_arch = "wasm32", wasm_bindgen_unstable_test_coverage))'.dependencies.minicov] version = "0.3.8" [lints.clippy] large_enum_variant = "allow" new_without_default = "allow" overly_complex_bool_expr = "allow" too_many_arguments = "allow" type_complexity = "allow" [lints.rust.unexpected_cfgs] level = "warn" priority = 0 check-cfg = ["cfg(wasm_bindgen_unstable_test_coverage)"] wasm-bindgen-test-0.3.58/Cargo.toml.orig000064400000000000000000000032651046102023000161460ustar 00000000000000[package] authors = ["The wasm-bindgen Developers"] description = "Internal testing crate for wasm-bindgen" edition = "2021" include = ["/LICENSE-*", "/src"] license = "MIT OR Apache-2.0" name = "wasm-bindgen-test" repository = "https://github.com/wasm-bindgen/wasm-bindgen" rust-version = "1.71" version = "0.3.58" [features] default = ["std"] std = ["wasm-bindgen/std", "js-sys/std", "wasm-bindgen-futures/std"] [dependencies] gg-alloc = { version = "1.0", optional = true } js-sys = { path = '../js-sys', version = '=0.3.85', default-features = false } wasm-bindgen = { path = '../..', version = '=0.2.108', default-features = false } wasm-bindgen-futures = { path = '../futures', version = '=0.4.58', default-features = false } wasm-bindgen-test-macro = { path = '../test-macro', version = '=0.3.58' } wasm-bindgen-test-shared = { path = "../test-shared", version = "=0.2.108" } # benchmark required start async-trait = "0.1.89" cast = "0.3" libm = "0.2.11" nu-ansi-term = { version = "0.50", default-features = false } num-traits = { version = "0.2", default-features = false, features = ["libm"] } oorandom = "11.1.5" serde = { version = "1.0", default-features = false, features = ["derive"] } serde_json = { version = "1.0", default-features = false, features = ["alloc"] } # benchmark required end [target.'cfg(all(target_arch = "wasm32", wasm_bindgen_unstable_test_coverage))'.dependencies] minicov = "0.3.8" [lints.rust] unexpected_cfgs = { level = "warn", check-cfg = ['cfg(wasm_bindgen_unstable_test_coverage)'] } [lints.clippy] large_enum_variant = "allow" new_without_default = "allow" overly_complex_bool_expr = "allow" too_many_arguments = "allow" type_complexity = "allow" [lib] test = false wasm-bindgen-test-0.3.58/LICENSE-APACHE000064400000000000000000000251371046102023000152050ustar 00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. wasm-bindgen-test-0.3.58/LICENSE-MIT000064400000000000000000000020411046102023000147020ustar 00000000000000Copyright (c) 2014 Alex Crichton Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. wasm-bindgen-test-0.3.58/README.md000064400000000000000000000056611046102023000145400ustar 00000000000000# `wasm-bindgen-test` [**Read the "Testing with `wasm-bindgen-test`" section of the guide!**](https://wasm-bindgen.github.io/wasm-bindgen/wasm-bindgen-test/index.html) ## Components The test harness is made of three separate components, but you typically don't have to worry about most of them. They're documented here for documentation purposes! ### `wasm-bindgen-test-macro` This crate, living at `crates/test-macro`, is a procedural macro that defines the `#[wasm_bindgen_test]` macro. **The normal `#[test]` cannot be used and will not work.** Eventually it's intended that the `#[wasm_bindgen_test]` attribute could gain arguments like "run in a browser" or something like a minimum Node version. For now though the macro is pretty simple and reexported from the next crate, `wasm-bindgen-test`. ### `wasm-bindgen-test` This is the runtime support needed to execute tests. This is basically the same thing as the `test` crate in the Rust repository, and one day it will likely use the `test` crate itself! For now though it's a minimal reimplementation that provides the support for: * Printing what test cases are running * Collecting `console.log` and `console.error` output of each test case for printing later * Rendering the failure output of each test case * Catching JS exceptions so tests can continue to run after a test fails * Driving execution of all tests This is the crate which you actually link to in your Wasm test and through which you import the `#[wasm_bindgen_test]` macro. Otherwise this crate provides a `console_log!` macro that's a utility like `println!` only using `console.log`. This crate may grow more functionality in the future, but for now it's somewhat bare bones! ### `wasm-bindgen-test-runner` This is where the secret sauce comes into play. We configured Cargo to execute this binary *instead* of directly executing the `*.wasm` file (which Cargo would otherwise try to do). This means that whenever a test is executed it executes this binary with the Wasm file as an argument, allowing it to take full control over the test process! The test runner is currently pretty simple, executing a few steps: * First, it runs the equivalent of `wasm-bindgen`. This'll generate wasm-bindgen output in a temporary directory. * Next, it generates a small shim JS file which imports these wasm-bindgen-generated files and executes the test harness. * Finally, it executes `node` over the generated JS file, executing all of your tests. In essence what happens is that this test runner automatically executes `wasm-bindgen` and then uses Node to actually execute the Wasm file, meaning that your Wasm code currently runs in a Node environment. ## Future Work Things that'd be awesome to support in the future: * Arguments to `wasm-bindgen-test-runner` which are the same as `wasm-bindgen`, for example `--debug` to affect the generated output. * Running each test in its own Wasm instance to avoid poisoning the environment on panic wasm-bindgen-test-0.3.58/src/coverage.rs000064400000000000000000000020331046102023000161770ustar 00000000000000use alloc::vec::Vec; use wasm_bindgen::prelude::wasm_bindgen; #[cfg(wasm_bindgen_unstable_test_coverage)] #[wasm_bindgen] pub fn __wbgtest_cov_dump() -> Option> { let mut coverage = Vec::new(); // SAFETY: this function is not thread-safe, but our whole test runner is running single-threaded. unsafe { minicov::capture_coverage(&mut coverage).unwrap(); } if coverage.is_empty() { console_error!( "Empty coverage data received. Make sure you compile the tests with RUSTFLAGS=\"-Cinstrument-coverage -Zno-profile-runtime --emit=llvm-ir\"", ); } Some(coverage) } #[cfg(not(wasm_bindgen_unstable_test_coverage))] #[wasm_bindgen] pub fn __wbgtest_cov_dump() -> Option> { None } #[cfg(wasm_bindgen_unstable_test_coverage)] #[wasm_bindgen] pub fn __wbgtest_module_signature() -> Option { Some(minicov::module_signature()) } #[cfg(not(wasm_bindgen_unstable_test_coverage))] #[wasm_bindgen] pub fn __wbgtest_module_signature() -> Option { None } wasm-bindgen-test-0.3.58/src/lib.rs000064400000000000000000000113011046102023000151500ustar 00000000000000//! Runtime support for the `#[wasm_bindgen_test]` attribute //! //! More documentation can be found in the README for this crate! #![cfg_attr(not(feature = "std"), no_std)] #![cfg_attr(wasm_bindgen_unstable_test_coverage, feature(coverage_attribute))] #![cfg_attr(target_feature = "atomics", feature(thread_local))] #![deny(missing_docs)] extern crate alloc; pub use wasm_bindgen_test_macro::{wasm_bindgen_bench, wasm_bindgen_test}; // Custom allocator that only returns pointers in the 2GB-4GB range // To ensure we actually support more than 2GB of memory #[cfg(all(test, feature = "gg-alloc"))] #[global_allocator] static A: gg_alloc::GgAlloc = gg_alloc::GgAlloc::new(std::alloc::System); /// Helper macro which acts like `println!` only routes to `console.error` /// instead. #[macro_export] macro_rules! console_error { ($($arg:tt)*) => ( $crate::__rt::console_error(&format_args!($($arg)*)) ) } /// Helper macro which acts like `println!` only routes to `console.log` /// instead. #[macro_export] macro_rules! console_log { ($($arg:tt)*) => ( $crate::__rt::console_log(&format_args!($($arg)*)) ) } /// A macro used to configured how this test is executed by the /// `wasm-bindgen-test-runner` harness. /// /// This macro is invoked as: /// /// ```ignore /// wasm_bindgen_test_configure!(foo bar baz); /// ``` /// /// where all of `foo`, `bar`, and `baz`, would be recognized options to this /// macro. The currently known options to this macro are: /// /// * `run_in_browser` - requires that this test is run in a browser rather than /// node.js, which is the default for executing tests. /// * `run_in_dedicated_worker` - requires that this test is run in a web worker rather than /// node.js, which is the default for executing tests. /// * `run_in_shared_worker` - requires that this test is run in a shared worker rather than /// node.js, which is the default for executing tests. /// * `run_in_service_worker` - requires that this test is run in a service worker rather than /// node.js, which is the default for executing tests. /// /// This macro may be invoked at most one time per test suite (an entire binary /// like `tests/foo.rs`, not per module) #[macro_export] macro_rules! wasm_bindgen_test_configure { (run_in_browser $($others:tt)*) => ( const _: () = { #[link_section = "__wasm_bindgen_test_unstable"] #[cfg(target_arch = "wasm32")] pub static __WBG_TEST_RUN_IN_BROWSER: [u8; 1] = [0x01]; $crate::wasm_bindgen_test_configure!($($others)*); }; ); (run_in_worker $($others:tt)*) => ( const _: () = { #[link_section = "__wasm_bindgen_test_unstable"] #[cfg(target_arch = "wasm32")] pub static __WBG_TEST_RUN_IN_DEDICATED_WORKER: [u8; 1] = [0x02]; $crate::wasm_bindgen_test_configure!($($others)*); }; ); (run_in_dedicated_worker $($others:tt)*) => ( const _: () = { #[link_section = "__wasm_bindgen_test_unstable"] #[cfg(target_arch = "wasm32")] pub static __WBG_TEST_RUN_IN_DEDICATED_WORKER: [u8; 1] = [0x02]; $crate::wasm_bindgen_test_configure!($($others)*); }; ); (run_in_shared_worker $($others:tt)*) => ( const _: () = { #[link_section = "__wasm_bindgen_test_unstable"] #[cfg(target_arch = "wasm32")] pub static __WBG_TEST_RUN_IN_SHARED_WORKER: [u8; 1] = [0x03]; $crate::wasm_bindgen_test_configure!($($others)*); }; ); (run_in_service_worker $($others:tt)*) => ( const _: () = { #[link_section = "__wasm_bindgen_test_unstable"] #[cfg(target_arch = "wasm32")] pub static __WBG_TEST_RUN_IN_SERVICE_WORKER: [u8; 1] = [0x04]; $crate::wasm_bindgen_test_configure!($($others)*); }; ); (run_in_node_experimental $($others:tt)*) => ( const _: () = { #[link_section = "__wasm_bindgen_test_unstable"] #[cfg(target_arch = "wasm32")] pub static __WBG_TEST_run_in_node_experimental: [u8; 1] = [0x05]; $crate::wasm_bindgen_test_configure!($($others)*); }; ); () => () } #[path = "rt/mod.rs"] pub mod __rt; // Make this only available to wasm32 so that we don't // import minicov on other archs. // That way you can use normal cargo test without minicov #[cfg(target_arch = "wasm32")] #[cfg_attr(wasm_bindgen_unstable_test_coverage, coverage(off))] mod coverage; // // // A modified `criterion.rs`, retaining only the basic benchmark capabilities. pub use __rt::criterion::Criterion; // web_time Instant pub use __rt::web_time::Instant; wasm-bindgen-test-0.3.58/src/rt/browser.rs000064400000000000000000000050661046102023000165250ustar 00000000000000//! Support for printing status information of a test suite in a browser. //! //! Currently this is quite simple, rendering the same as the console tests in //! node.js. Output here is rendered in a `pre`, however. use alloc::format; use alloc::string::String; use js_sys::Error; use wasm_bindgen::prelude::*; /// Implementation of `Formatter` for browsers. /// /// Routes all output to a `pre` on the page currently. Eventually this probably /// wants to be a pretty table with colors and folding and whatnot. pub struct Browser { pre: Element, } #[wasm_bindgen] extern "C" { type HTMLDocument; #[wasm_bindgen(thread_local_v2, js_name = document)] static DOCUMENT: HTMLDocument; #[wasm_bindgen(method, structural)] fn getElementById(this: &HTMLDocument, id: &str) -> Element; type Element; #[wasm_bindgen(method, getter = textContent, structural)] fn text_content(this: &Element) -> String; #[wasm_bindgen(method, setter = textContent, structural)] fn set_text_content(this: &Element, text: &str); type BrowserError; #[wasm_bindgen(method, getter, structural)] fn stack(this: &BrowserError) -> JsValue; } impl Browser { /// Creates a new instance of `Browser`, assuming that its APIs will work /// (requires `Node::new()` to have return `None` first). pub fn new() -> Browser { let pre = DOCUMENT.with(|document| document.getElementById("output")); pre.set_text_content(""); Browser { pre } } } impl super::Formatter for Browser { fn writeln(&self, line: &str) { let mut html = self.pre.text_content(); html.extend(line.chars().chain(Some('\n'))); self.pre.set_text_content(&html); } fn stringify_error(&self, err: &JsValue) -> String { // TODO: this should be a checked cast to `Error` let err = Error::from(err.clone()); let name = String::from(err.name()); let message = String::from(err.message()); let err = BrowserError::from(JsValue::from(err)); let stack = err.stack(); let header = format!("{}: {}", name, message); let stack = match stack.as_string() { Some(stack) => stack, None => return header, }; // If the `stack` variable contains the name/message already, this is // probably a chome-like error which is already rendered well, so just // return this info if stack.contains(&header) { return stack; } // Fallback to make sure we don't lose any info format!("{}\n{}", header, stack) } } wasm-bindgen-test-0.3.58/src/rt/criterion/analysis.rs000064400000000000000000000113041046102023000206530ustar 00000000000000use super::stats::bivariate::regression::Slope; use super::stats::bivariate::Data; use super::stats::univariate::Sample; use super::stats::{Distribution, Tails}; use super::benchmark::BenchmarkConfig; use super::estimate::{ build_estimates, ConfidenceInterval, Distributions, Estimate, Estimates, PointEstimates, }; use super::measurement::Measurement; use super::report::{BenchmarkId, Report}; use super::routine::Routine; use super::{baseline, compare, Criterion, SavedSample}; use alloc::vec::Vec; // Common analysis procedure pub(crate) async fn common( id: &BenchmarkId, routine: &mut dyn Routine, config: &BenchmarkConfig, criterion: &Criterion, ) { criterion.report.benchmark_start(id); let (sampling_mode, iters, times); let sample = routine .sample(&criterion.measurement, id, config, criterion) .await; sampling_mode = sample.0; iters = sample.1; times = sample.2; criterion.report.analysis(id); if times.contains(&0.0) { return; } let avg_times = iters .iter() .zip(times.iter()) .map(|(&iters, &elapsed)| elapsed / iters) .collect::>(); let avg_times = Sample::new(&avg_times); let labeled_sample = super::stats::univariate::outliers::tukey::classify(avg_times); let data = Data::new(&iters, ×); let (mut distributions, mut estimates) = estimates(avg_times, config); if sampling_mode.is_linear() { let (distribution, slope) = regression(&data, config); estimates.slope = Some(slope); distributions.slope = Some(distribution); } let comparison = compare::common(id, avg_times, config).map( |(t_value, t_distribution, relative_estimates, ..)| { let p_value = t_distribution.p_value(t_value, &Tails::Two); super::report::ComparisonData { p_value, relative_estimates, significance_threshold: config.significance_level, noise_threshold: config.noise_threshold, } }, ); let measurement_data = super::report::MeasurementData { avg_times: labeled_sample, absolute_estimates: estimates.clone(), comparison, }; criterion .report .measurement_complete(id, &measurement_data, criterion.measurement.formatter()); baseline::write( id.desc(), baseline::BenchmarkBaseline { file: criterion.location.as_ref().map(|l| l.file.clone()), module_path: criterion.location.as_ref().map(|l| l.module_path.clone()), iters: data.x().as_ref().to_vec(), times: data.y().as_ref().to_vec(), sample: SavedSample { sampling_mode, iters: data.x().as_ref().to_vec(), times: data.y().as_ref().to_vec(), }, estimates, }, ); } // Performs a simple linear regression on the sample fn regression( data: &Data<'_, f64, f64>, config: &BenchmarkConfig, ) -> (Distribution, Estimate) { let cl = config.confidence_level; let distribution = data.bootstrap(config.nresamples, |d| (Slope::fit(&d).0,)).0; let point = Slope::fit(data); let (lb, ub) = distribution.confidence_interval(config.confidence_level); let se = distribution.std_dev(None); ( distribution, Estimate { confidence_interval: ConfidenceInterval { confidence_level: cl, lower_bound: lb, upper_bound: ub, }, point_estimate: point.0, standard_error: se, }, ) } // Estimates the statistics of the population from the sample fn estimates(avg_times: &Sample, config: &BenchmarkConfig) -> (Distributions, Estimates) { fn stats(sample: &Sample) -> (f64, f64, f64, f64) { let mean = sample.mean(); let std_dev = sample.std_dev(Some(mean)); let median = sample.percentiles().median(); let mad = sample.median_abs_dev(Some(median)); (mean, std_dev, median, mad) } let cl = config.confidence_level; let nresamples = config.nresamples; let (mean, std_dev, median, mad) = stats(avg_times); let points = PointEstimates { mean, median, std_dev, median_abs_dev: mad, }; let (dist_mean, dist_stddev, dist_median, dist_mad) = avg_times.bootstrap(nresamples, stats); let distributions = Distributions { mean: dist_mean, slope: None, median: dist_median, median_abs_dev: dist_mad, std_dev: dist_stddev, }; let estimates = build_estimates(&distributions, &points, cl); (distributions, estimates) } wasm-bindgen-test-0.3.58/src/rt/criterion/baseline.rs000064400000000000000000000034351046102023000206200ustar 00000000000000//! Record previous benchmark data use serde::{Deserialize, Serialize}; use wasm_bindgen::prelude::wasm_bindgen; use super::{estimate::Estimates, SavedSample}; use alloc::collections::BTreeMap; use alloc::string::String; use alloc::vec::Vec; use core::cell::RefCell; use wasm_bindgen::__rt::LazyCell; #[cfg_attr(target_feature = "atomics", thread_local)] static BASELINE: LazyCell>> = LazyCell::new(|| RefCell::new(BTreeMap::new())); #[derive(Debug, Serialize, Deserialize, Clone)] pub(crate) struct BenchmarkBaseline { pub(crate) file: Option, pub(crate) module_path: Option, pub(crate) iters: Vec, pub(crate) times: Vec, pub(crate) sample: SavedSample, pub(crate) estimates: Estimates, } /// Write the corresponding benchmark ID and corresponding data into the table. pub(crate) fn write(id: &str, baseline: BenchmarkBaseline) { BASELINE.borrow_mut().insert(id.into(), baseline); } /// Read the data corresponding to the benchmark ID from the table. pub(crate) fn read(id: &str) -> Option { BASELINE.borrow().get(id).cloned() } /// Used to write previous benchmark data before the benchmark, for later comparison. #[wasm_bindgen] pub fn __wbgbench_import(baseline: Vec) { match serde_json::from_slice(&baseline) { Ok(prev) => { *BASELINE.borrow_mut() = prev; } Err(e) => { console_log!("Failed to import previous benchmark {e:?}"); } } } /// Used to read benchmark data, and then the runner stores it on the local disk. #[wasm_bindgen] pub fn __wbgbench_dump() -> Option> { let baseline = BASELINE.borrow(); if baseline.is_empty() { return None; } serde_json::to_vec(&*baseline).ok() } wasm-bindgen-test-0.3.58/src/rt/criterion/bencher.rs000064400000000000000000000150071046102023000204420ustar 00000000000000use super::measurement::Measurement; use crate::__rt::web_time::Instant; use core::future::Future; use core::hint::black_box; use core::time::Duration; // ================================== MAINTENANCE NOTE ============================================= // Any changes made to either Bencher or AsyncBencher will have to be replicated to the other! // ================================== MAINTENANCE NOTE ============================================= /// Timer struct used to iterate a benchmarked function and measure the runtime. /// /// This struct provides different timing loops as methods. Each timing loop provides a different /// way to time a routine and each has advantages and disadvantages. /// /// * If you want to do the iteration and measurement yourself (eg. passing the iteration count /// to a separate process), use [`iter_custom`]. /// * If your routine requires no per-iteration setup and returns a value with an expensive `drop` /// method, use [`iter_with_large_drop`]. /// * If your routine requires some per-iteration setup that shouldn't be timed, use [`iter_batched`] /// or [`iter_batched_ref`]. See [`BatchSize`] for a discussion of batch sizes. /// If the setup value implements `Drop` and you don't want to include the `drop` time in the /// measurement, use [`iter_batched_ref`], otherwise use [`iter_batched`]. These methods are also /// suitable for benchmarking routines which return a value with an expensive `drop` method, /// but are more complex than [`iter_with_large_drop`]. /// * Otherwise, use [`iter`]. /// /// [`iter`]: Bencher::iter /// [`iter_custom`]: Bencher::iter_custom /// [`iter_future`]: Bencher::iter_future /// [`iter_custom_future`]: Bencher::iter_custom_future pub struct Bencher<'a, M: Measurement> { pub(crate) iterated: bool, // Have we iterated this benchmark? pub(crate) iters: u64, // Number of times to iterate this benchmark pub(crate) value: Duration, // The measured value pub(crate) measurement: &'a M, // Reference to the measurement object pub(crate) elapsed_time: Duration, // How much time did it take to perform the iteration? Used for the warmup period. } impl<'a, M: Measurement> Bencher<'a, M> { /// Times a `routine` by executing it many times and timing the total elapsed time. /// /// Prefer this timing loop when `routine` returns a value that doesn't have a destructor. /// /// # Timing model /// /// Note that the `Bencher` also times the time required to destroy the output of `routine()`. /// Therefore prefer this timing loop when the runtime of `mem::drop(O)` is negligible compared /// to the runtime of the `routine`. /// /// ```text /// elapsed = Instant::now + iters * (routine + mem::drop(O) + Range::next) /// ``` #[inline(never)] pub fn iter(&mut self, mut routine: R) where R: FnMut() -> O, { self.iterated = true; let start = self.measurement.start(); for _ in 0..self.iters { black_box(routine()); } let end = self.measurement.end(start); self.value = end; self.elapsed_time = end; } /// Times a `routine` by executing it many times and relying on `routine` to measure its own execution time. /// /// # Timing model /// Custom, the timing model is whatever is returned as the [`Duration`] from `routine`. /// /// # Example /// ```rust /// use wasm_bindgen_test::{Criterion, wasm_bindgen_bench, Instant}; /// /// fn foo() { /// // ... /// } /// /// #[wasm_bindgen_bench] /// fn bench(c: &mut Criterion) { /// c.bench_function("iter", move |b| { /// b.iter_custom(|iters| { /// let start = Instant::now(); /// for _i in 0..iters { /// std::hint::black_box(foo()); /// } /// start.elapsed() /// }) /// }); /// } /// ``` /// #[inline(never)] pub fn iter_custom(&mut self, mut routine: R) where R: FnMut(u64) -> Duration, { self.iterated = true; let time_start = Instant::now(); self.value = routine(self.iters); self.elapsed_time = time_start.elapsed(); } /// Times a `routine` by executing it many times and timing the total elapsed time. /// /// Prefer this timing loop when `routine` returns a value that doesn't have a destructor. /// /// # Timing model /// /// Note that the `Bencher` also times the time required to destroy the output of `routine()`. /// Therefore prefer this timing loop when the runtime of `mem::drop(O)` is negligible compared /// to the runtime of the `routine`. /// /// ```text /// elapsed = Instant::now + iters * (routine + mem::drop(O) + Range::next) /// ``` #[inline(never)] pub async fn iter_future(&mut self, mut routine: R) where R: FnMut() -> Fut, Fut: Future, { self.iterated = true; let start = self.measurement.start(); for _ in 0..self.iters { black_box(routine().await); } let end = self.measurement.end(start); self.value = end; self.elapsed_time = end; } /// Times a `routine` by executing it many times and relying on `routine` to measure its own execution time. /// /// # Timing model /// Custom, the timing model is whatever is returned as the [`Duration`] from `routine`. /// /// # Example /// ```rust /// use wasm_bindgen_test::{Criterion, wasm_bindgen_bench, Instant}; /// /// async fn foo() { /// // ... /// } /// /// #[wasm_bindgen_bench] /// async fn bench(c: &mut Criterion) { /// c.bench_async_function("iter", move |b| { /// Box::pin( /// b.iter_custom_future(async |iters| { /// let start = Instant::now(); /// for _i in 0..iters { /// std::hint::black_box(foo().await); /// } /// start.elapsed() /// }) /// ) /// }).await; /// } /// ``` /// #[inline(never)] pub async fn iter_custom_future(&mut self, mut routine: R) where R: FnMut(u64) -> Fut, Fut: Future, { self.iterated = true; let time_start = Instant::now(); self.value = routine(self.iters).await; self.elapsed_time = time_start.elapsed(); } } wasm-bindgen-test-0.3.58/src/rt/criterion/benchmark.rs000064400000000000000000000007501046102023000207650ustar 00000000000000use super::SamplingMode; use core::time::Duration; // TODO: Move the benchmark config stuff to a separate module for easier use. /// Struct containing all of the configuration options for a benchmark. pub struct BenchmarkConfig { pub confidence_level: f64, pub measurement_time: Duration, pub noise_threshold: f64, pub nresamples: usize, pub sample_size: usize, pub significance_level: f64, pub warm_up_time: Duration, pub sampling_mode: SamplingMode, } wasm-bindgen-test-0.3.58/src/rt/criterion/compare.rs000064400000000000000000000061571046102023000204700ustar 00000000000000use super::stats::univariate::Sample; use super::stats::univariate::{self, mixed}; use super::stats::Distribution; use super::benchmark::BenchmarkConfig; use super::estimate::{ build_change_estimates, ChangeDistributions, ChangeEstimates, ChangePointEstimates, Estimates, }; use super::report::BenchmarkId; use super::SavedSample; use alloc::vec::Vec; // Common comparison procedure #[allow(clippy::type_complexity)] pub(crate) fn common( id: &BenchmarkId, avg_times: &Sample, config: &BenchmarkConfig, ) -> Option<( f64, Distribution, ChangeEstimates, ChangeDistributions, Vec, Vec, Vec, Estimates, )> { let prev = super::baseline::read(id.desc())?; let SavedSample { iters, times, .. } = prev.sample; let base_estimates: Estimates = prev.estimates; let base_avg_times: Vec = iters .iter() .zip(times.iter()) .map(|(iters, elapsed)| elapsed / iters) .collect(); let base_avg_time_sample = Sample::new(&base_avg_times); let (t_statistic, t_distribution) = t_test(avg_times, base_avg_time_sample, config); let (estimates, relative_distributions) = estimates(avg_times, base_avg_time_sample, config); Some(( t_statistic, t_distribution, estimates, relative_distributions, iters, times, base_avg_times.clone(), base_estimates, )) } // Performs a two sample t-test fn t_test( avg_times: &Sample, base_avg_times: &Sample, config: &BenchmarkConfig, ) -> (f64, Distribution) { let nresamples = config.nresamples; let t_statistic = avg_times.t(base_avg_times); let t_distribution = mixed::bootstrap(avg_times, base_avg_times, nresamples, |a, b| (a.t(b),)).0; // HACK: Filter out non-finite numbers, which can happen sometimes when sample size is very small. // Downstream code doesn't like non-finite values here. let t_distribution = Distribution::from( t_distribution .iter() .filter(|a| a.is_finite()) .cloned() .collect::>() .into_boxed_slice(), ); (t_statistic, t_distribution) } // Estimates the relative change in the statistics of the population fn estimates( avg_times: &Sample, base_avg_times: &Sample, config: &BenchmarkConfig, ) -> (ChangeEstimates, ChangeDistributions) { fn stats(a: &Sample, b: &Sample) -> (f64, f64) { ( a.mean() / b.mean() - 1., a.percentiles().median() / b.percentiles().median() - 1., ) } let cl = config.confidence_level; let nresamples = config.nresamples; let (dist_mean, dist_median) = univariate::bootstrap(avg_times, base_avg_times, nresamples, stats); let distributions = ChangeDistributions { mean: dist_mean, median: dist_median, }; let (mean, median) = stats(avg_times, base_avg_times); let points = ChangePointEstimates { mean, median }; let estimates = build_change_estimates(&distributions, &points, cl); (estimates, distributions) } wasm-bindgen-test-0.3.58/src/rt/criterion/estimate.rs000064400000000000000000000061411046102023000206460ustar 00000000000000use super::stats::Distribution; use serde::{Deserialize, Serialize}; #[derive(Clone, PartialEq, Deserialize, Serialize, Debug)] pub struct ConfidenceInterval { pub confidence_level: f64, pub lower_bound: f64, pub upper_bound: f64, } #[derive(Clone, PartialEq, Deserialize, Serialize, Debug)] pub struct Estimate { /// The confidence interval for this estimate pub confidence_interval: ConfidenceInterval, /// The value of this estimate pub point_estimate: f64, /// The standard error of this estimate pub standard_error: f64, } pub fn build_estimates( distributions: &Distributions, points: &PointEstimates, cl: f64, ) -> Estimates { let to_estimate = |point_estimate, distribution: &Distribution| { let (lb, ub) = distribution.confidence_interval(cl); Estimate { confidence_interval: ConfidenceInterval { confidence_level: cl, lower_bound: lb, upper_bound: ub, }, point_estimate, standard_error: distribution.std_dev(None), } }; Estimates { mean: to_estimate(points.mean, &distributions.mean), median: to_estimate(points.median, &distributions.median), median_abs_dev: to_estimate(points.median_abs_dev, &distributions.median_abs_dev), slope: None, std_dev: to_estimate(points.std_dev, &distributions.std_dev), } } pub fn build_change_estimates( distributions: &ChangeDistributions, points: &ChangePointEstimates, cl: f64, ) -> ChangeEstimates { let to_estimate = |point_estimate, distribution: &Distribution| { let (lb, ub) = distribution.confidence_interval(cl); Estimate { confidence_interval: ConfidenceInterval { confidence_level: cl, lower_bound: lb, upper_bound: ub, }, point_estimate, standard_error: distribution.std_dev(None), } }; ChangeEstimates { mean: to_estimate(points.mean, &distributions.mean), median: to_estimate(points.median, &distributions.median), } } pub struct PointEstimates { pub mean: f64, pub median: f64, pub median_abs_dev: f64, pub std_dev: f64, } #[derive(Debug, Serialize, Deserialize, Clone)] pub struct Estimates { pub mean: Estimate, pub median: Estimate, pub median_abs_dev: Estimate, pub slope: Option, pub std_dev: Estimate, } impl Estimates { pub fn typical(&self) -> &Estimate { self.slope.as_ref().unwrap_or(&self.mean) } } pub struct Distributions { pub mean: Distribution, pub median: Distribution, pub median_abs_dev: Distribution, pub slope: Option>, pub std_dev: Distribution, } pub struct ChangePointEstimates { pub mean: f64, pub median: f64, } #[derive(Debug, Serialize, Deserialize, Clone)] pub struct ChangeEstimates { pub mean: Estimate, pub median: Estimate, } pub struct ChangeDistributions { pub mean: Distribution, pub median: Distribution, } wasm-bindgen-test-0.3.58/src/rt/criterion/format.rs000064400000000000000000000042061046102023000203230ustar 00000000000000use alloc::format; use alloc::string::String; use libm::{fabs, pow}; pub fn change(pct: f64, signed: bool) -> String { if signed { format!("{:>+6}%", signed_short(pct * 1e2)) } else { format!("{:>6}%", short(pct * 1e2)) } } pub fn time(ns: f64) -> String { if ns < 1.0 { format!("{:>6} ps", short(ns * 1e3)) } else if ns < pow(10f64, 3f64) { format!("{:>6} ns", short(ns)) } else if ns < pow(10f64, 6f64) { format!("{:>6} µs", short(ns / 1e3)) } else if ns < pow(10f64, 9f64) { format!("{:>6} ms", short(ns / 1e6)) } else { format!("{:>6} s", short(ns / 1e9)) } } pub fn short(n: f64) -> String { if n < 10.0 { format!("{:.4}", n) } else if n < 100.0 { format!("{:.3}", n) } else if n < 1000.0 { format!("{:.2}", n) } else if n < 10000.0 { format!("{:.1}", n) } else { format!("{:.0}", n) } } fn signed_short(n: f64) -> String { let n_abs = fabs(n); let sign = if n >= 0.0 { '+' } else { '\u{2212}' }; if n_abs < 10.0 { format!("{}{:.4}", sign, n_abs) } else if n_abs < 100.0 { format!("{}{:.3}", sign, n_abs) } else if n_abs < 1000.0 { format!("{}{:.2}", sign, n_abs) } else if n_abs < 10000.0 { format!("{}{:.1}", sign, n_abs) } else { format!("{}{:.0}", sign, n_abs) } } pub fn iter_count(iterations: u64) -> String { if iterations < 10_000 { format!("{} iterations", iterations) } else if iterations < 1_000_000 { format!("{:.0}k iterations", (iterations as f64) / 1000.0) } else if iterations < 10_000_000 { format!("{:.1}M iterations", (iterations as f64) / (1000.0 * 1000.0)) } else if iterations < 1_000_000_000 { format!("{:.0}M iterations", (iterations as f64) / (1000.0 * 1000.0)) } else if iterations < 10_000_000_000 { format!( "{:.1}B iterations", (iterations as f64) / (1000.0 * 1000.0 * 1000.0) ) } else { format!( "{:.0}B iterations", (iterations as f64) / (1000.0 * 1000.0 * 1000.0) ) } } wasm-bindgen-test-0.3.58/src/rt/criterion/measurement.rs000064400000000000000000000237511046102023000213660ustar 00000000000000//! This module defines a set of traits that can be used to plug different measurements (eg. //! Unix's Processor Time, CPU or GPU performance counters, etc.) into Criterion.rs. It also //! includes the [`WallTime`] struct which defines the default wall-clock time measurement. use super::format::short; use super::Throughput; use crate::__rt::web_time::Instant; use alloc::format; use alloc::string::String; use core::time::Duration; use libm::pow; /// Trait providing functions to format measured values to string so that they can be displayed on /// the command line or in the reports. The functions of this trait take measured values in f64 /// form; implementors can assume that the values are of the same scale as those produced by the /// associated [`Measurement`] (eg. if your measurement produces values in nanoseconds, the /// values passed to the formatter will be in nanoseconds). /// /// Implementors are encouraged to format the values in a way that is intuitive for humans and /// uses the SI prefix system. For example, the format used by [`WallTime`] can display the value /// in units ranging from picoseconds to seconds depending on the magnitude of the elapsed time /// in nanoseconds. pub trait ValueFormatter { /// Format the value (with appropriate unit) and return it as a string. fn format_value(&self, value: f64) -> String { let mut values = [value]; let unit = self.scale_values(value, &mut values); format!("{:>6} {}", short(values[0]), unit) } /// Format the value as a throughput measurement. The value represents the measurement value; /// the implementor will have to calculate bytes per second, iterations per cycle, etc. fn format_throughput(&self, throughput: &Throughput, value: f64) -> String { let mut values = [value]; let unit = self.scale_throughputs(value, throughput, &mut values); format!("{:>6} {}", short(values[0]), unit) } /// Scale the given values to some appropriate unit and return the unit string. /// /// The given typical value should be used to choose the unit. This function may be called /// multiple times with different datasets; the typical value will remain the same to ensure /// that the units remain consistent within a graph. The typical value will not be NaN. /// Values will not contain NaN as input, and the transformed values must not contain NaN. fn scale_values(&self, typical_value: f64, values: &mut [f64]) -> &'static str; /// Convert the given measured values into throughput numbers based on the given throughput /// value, scale them to some appropriate unit, and return the unit string. /// /// The given typical value should be used to choose the unit. This function may be called /// multiple times with different datasets; the typical value will remain the same to ensure /// that the units remain consistent within a graph. The typical value will not be NaN. /// Values will not contain NaN as input, and the transformed values must not contain NaN. fn scale_throughputs( &self, typical_value: f64, throughput: &Throughput, values: &mut [f64], ) -> &'static str; /// Scale the values and return a unit string designed for machines. /// /// For example, this is used for the CSV file output. Implementations should modify the given /// values slice to apply the desired scaling (if any) and return a string representing the unit /// the modified values are in. fn scale_for_machines(&self, values: &mut [f64]) -> &'static str; } /// Trait for all types which define something Criterion.rs can measure. The only measurement /// currently provided is [`WallTime`], but third party crates or benchmarks may define more. /// /// This trait defines two core methods, `start` and `end`. `start` is called at the beginning of /// a measurement to produce some intermediate value (for example, the wall-clock time at the start /// of that set of iterations) and `end` is called at the end of the measurement with the value /// returned by `start`. /// pub trait Measurement { /// This type represents an intermediate value for the measurements. It will be produced by the /// start function and passed to the end function. An example might be the wall-clock time as /// of the `start` call. type Intermediate; /// Criterion.rs will call this before iterating the benchmark. fn start(&self) -> Self::Intermediate; /// Criterion.rs will call this after iterating the benchmark to get the measured value. fn end(&self, i: Self::Intermediate) -> Duration; /// Combine two values. Criterion.rs sometimes needs to perform measurements in multiple batches /// of iterations, so the value from one batch must be added to the sum of the previous batches. fn add(&self, v1: &Duration, v2: &Duration) -> Duration; /// Return a "zero" value for the Value type which can be added to another value. fn zero(&self) -> Duration; /// Converts the measured value to f64 so that it can be used in statistical analysis. fn to_f64(&self, value: &Duration) -> f64; /// Return a trait-object reference to the value formatter for this measurement. fn formatter(&self) -> &dyn ValueFormatter; } /// Default Formatter pub(crate) struct DurationFormatter; impl DurationFormatter { fn bytes_per_second(&self, bytes: f64, typical: f64, values: &mut [f64]) -> &'static str { let bytes_per_second = bytes * (1e9 / typical); let (denominator, unit) = if bytes_per_second < 1024.0 { (1.0, " B/s") } else if bytes_per_second < 1024.0 * 1024.0 { (1024.0, "KiB/s") } else if bytes_per_second < 1024.0 * 1024.0 * 1024.0 { (1024.0 * 1024.0, "MiB/s") } else { (1024.0 * 1024.0 * 1024.0, "GiB/s") }; for val in values { let bytes_per_second = bytes * (1e9 / *val); *val = bytes_per_second / denominator; } unit } fn bytes_per_second_decimal( &self, bytes: f64, typical: f64, values: &mut [f64], ) -> &'static str { let bytes_per_second = bytes * (1e9 / typical); let (denominator, unit) = if bytes_per_second < 1000.0 { (1.0, " B/s") } else if bytes_per_second < 1000.0 * 1000.0 { (1000.0, "KB/s") } else if bytes_per_second < 1000.0 * 1000.0 * 1000.0 { (1000.0 * 1000.0, "MB/s") } else { (1000.0 * 1000.0 * 1000.0, "GB/s") }; for val in values { let bytes_per_second = bytes * (1e9 / *val); *val = bytes_per_second / denominator; } unit } fn elements_per_second(&self, elems: f64, typical: f64, values: &mut [f64]) -> &'static str { let elems_per_second = elems * (1e9 / typical); let (denominator, unit) = if elems_per_second < 1000.0 { (1.0, " elem/s") } else if elems_per_second < 1000.0 * 1000.0 { (1000.0, "Kelem/s") } else if elems_per_second < 1000.0 * 1000.0 * 1000.0 { (1000.0 * 1000.0, "Melem/s") } else { (1000.0 * 1000.0 * 1000.0, "Gelem/s") }; for val in values { let elems_per_second = elems * (1e9 / *val); *val = elems_per_second / denominator; } unit } fn bits_per_second(&self, bits: f64, typical: f64, values: &mut [f64]) -> &'static str { let bits_per_second = bits * (1e9 / typical); let (denominator, unit) = if bits_per_second < 1000.0 { (1.0, " b/s") } else if bits_per_second < 1000.0 * 1000.0 { (1000.0, "Kb/s") } else if bits_per_second < 1000.0 * 1000.0 * 1000.0 { (1000.0 * 1000.0, "Mb/s") } else { (1000.0 * 1000.0 * 1000.0, "Gb/s") }; for val in values { let bits_per_second = bits * (1e9 / *val); *val = bits_per_second / denominator; } unit } } impl ValueFormatter for DurationFormatter { fn scale_throughputs( &self, typical: f64, throughput: &Throughput, values: &mut [f64], ) -> &'static str { match *throughput { Throughput::Bytes(bytes) => self.bytes_per_second(bytes as f64, typical, values), Throughput::BytesDecimal(bytes) => { self.bytes_per_second_decimal(bytes as f64, typical, values) } Throughput::Elements(elems) => self.elements_per_second(elems as f64, typical, values), Throughput::Bits(bits) => self.bits_per_second(bits as f64, typical, values), } } fn scale_values(&self, ns: f64, values: &mut [f64]) -> &'static str { let (factor, unit) = if ns < pow(10f64, 0f64) { (pow(10f64, 3f64), "ps") } else if ns < pow(10f64, 3f64) { (pow(10f64, 0f64), "ns") } else if ns < pow(10f64, 6f64) { (pow(10f64, -3f64), "µs") } else if ns < pow(10f64, 9f64) { (pow(10f64, -6f64), "ms") } else { (pow(10f64, -9f64), "s") }; for val in values { *val *= factor; } unit } fn scale_for_machines(&self, _values: &mut [f64]) -> &'static str { // no scaling is needed "ns" } } /// `WallTime` is the default measurement in Criterion.rs. It measures the elapsed time from the /// beginning of a series of iterations to the end. pub struct WallTime; impl Measurement for WallTime { type Intermediate = Instant; fn start(&self) -> Self::Intermediate { Instant::now() } fn end(&self, i: Self::Intermediate) -> Duration { i.elapsed() } fn add(&self, v1: &Duration, v2: &Duration) -> Duration { *v1 + *v2 } fn zero(&self) -> Duration { Duration::from_secs(0) } fn to_f64(&self, val: &Duration) -> f64 { val.as_nanos() as f64 } fn formatter(&self) -> &dyn ValueFormatter { &DurationFormatter } } wasm-bindgen-test-0.3.58/src/rt/criterion/mod.rs000064400000000000000000000510651046102023000176170ustar 00000000000000//! A statistics-driven micro-benchmarking library written in Rust. //! //! This crate is a microbenchmarking library which aims to provide strong //! statistical confidence in detecting and estimating the size of performance //! improvements and regressions, while also being easy to use. //! //! See //! [the user guide](https://bheisler.github.io/criterion.rs/book/index.html) //! for examples as well as details on the measurement and analysis process, //! and the output. //! //! ## Features: //! * Collects detailed statistics, providing strong confidence that changes //! to performance are real, not measurement noise. //! * Produces detailed charts, providing thorough understanding of your code's //! performance behavior. #![warn(clippy::doc_markdown, missing_docs)] #![warn(bare_trait_objects)] #![allow( clippy::just_underscores_and_digits, // Used in the stats code clippy::transmute_ptr_to_ptr, // Used in the stats code )] // Needs to be declared before other modules // in order to be usable there. mod analysis; mod baseline; mod bencher; mod benchmark; mod compare; mod estimate; mod format; mod measurement; mod report; mod routine; mod stats; use core::future::Future; use core::pin::Pin; use core::ptr; use core::task::{Context, Poll, RawWaker, RawWakerVTable, Waker}; use core::time::Duration; use libm::{ceil, sqrt}; use serde::{Deserialize, Serialize}; use alloc::boxed::Box; use alloc::string::String; use alloc::vec; use alloc::vec::Vec; use benchmark::BenchmarkConfig; use measurement::WallTime; use report::WasmReport; pub use bencher::Bencher; pub use measurement::Measurement; /// The benchmark manager /// /// `Criterion` lets you configure and execute benchmarks /// /// Each benchmark consists of four phases: /// /// - **Warm-up**: The routine is repeatedly executed, to let the CPU/OS/JIT/interpreter adapt to /// the new load /// - **Measurement**: The routine is repeatedly executed, and timing information is collected into /// a sample /// - **Analysis**: The sample is analyzed and distilled into meaningful statistics that get /// reported to stdout, stored in files, and plotted /// - **Comparison**: The current sample is compared with the sample obtained in the previous /// benchmark. pub struct Criterion { config: BenchmarkConfig, report: WasmReport, measurement: M, location: Option, } pub(crate) struct Location { file: String, module_path: String, } impl Default for Criterion { /// Creates a benchmark manager with the following default settings: /// /// - Sample size: 100 measurements /// - Warm-up time: 3 s /// - Measurement time: 5 s /// - Bootstrap size: 100 000 resamples /// - Noise threshold: 0.01 (1%) /// - Confidence level: 0.95 /// - Significance level: 0.05 fn default() -> Criterion { Criterion { config: BenchmarkConfig { confidence_level: 0.95, measurement_time: Duration::from_secs(5), noise_threshold: 0.01, nresamples: 100_000, sample_size: 100, significance_level: 0.05, warm_up_time: Duration::from_secs(3), sampling_mode: SamplingMode::Auto, }, report: WasmReport, measurement: WallTime, location: None, } } } impl Criterion { /// Changes the measurement for the benchmarks run with this runner. See the /// [`Measurement`] trait for more details pub fn with_measurement(self, m: M2) -> Criterion { // Can't use struct update syntax here because they're technically different types. Criterion { config: self.config, report: self.report, measurement: m, location: self.location, } } /// Configure file and module paths for use with codspeed. #[must_use] pub fn with_location(self, file: &str, module_path: &str) -> Criterion { Criterion { location: Some(Location { file: file.into(), module_path: module_path.into(), }), ..self } } /// Changes the default size of the sample for benchmarks run with this runner. /// /// A bigger sample should yield more accurate results if paired with a sufficiently large /// measurement time. /// /// Sample size must be at least 10. /// /// # Panics /// /// Panics if n < 10 #[must_use] pub fn sample_size(mut self, n: usize) -> Criterion { assert!(n >= 10); self.config.sample_size = n; self } /// Changes the default warm up time for benchmarks run with this runner. /// /// # Panics /// /// Panics if the input duration is zero #[must_use] pub fn warm_up_time(mut self, dur: Duration) -> Criterion { assert!(dur.as_nanos() > 0); self.config.warm_up_time = dur; self } /// /// With a longer time, the measurement will become more resilient to transitory peak loads /// caused by external programs /// /// **Note**: If the measurement time is too "low", Criterion will automatically increase it /// /// # Panics /// /// Panics if the input duration in zero /// Changes the default measurement time for benchmarks run with this runner. #[must_use] pub fn measurement_time(mut self, dur: Duration) -> Criterion { assert!(dur.as_nanos() > 0); self.config.measurement_time = dur; self } /// Changes the default number of resamples for benchmarks run with this runner. /// /// Number of resamples to use for the /// [bootstrap](http://en.wikipedia.org/wiki/Bootstrapping_(statistics)#Case_resampling) /// /// A larger number of resamples reduces the random sampling errors, which are inherent to the /// bootstrap method, but also increases the analysis time /// /// # Panics /// /// Panics if the number of resamples is set to zero #[must_use] pub fn nresamples(mut self, n: usize) -> Criterion { assert!(n > 0); if n <= 1000 { console_error!("\nWarning: It is not recommended to reduce nresamples below 1000."); } self.config.nresamples = n; self } /// Changes the default noise threshold for benchmarks run with this runner. The noise threshold /// is used to filter out small changes in performance, even if they are statistically /// significant. Sometimes benchmarking the same code twice will result in small but /// statistically significant differences solely because of noise. This provides a way to filter /// out some of these false positives at the cost of making it harder to detect small changes /// to the true performance of the benchmark. /// /// The default is 0.01, meaning that changes smaller than 1% will be ignored. /// /// # Panics /// /// Panics if the threshold is set to a negative value #[must_use] pub fn noise_threshold(mut self, threshold: f64) -> Criterion { assert!(threshold >= 0.0); self.config.noise_threshold = threshold; self } /// Changes the default confidence level for benchmarks run with this runner. The confidence /// level is the desired probability that the true runtime lies within the estimated /// [confidence interval](https://en.wikipedia.org/wiki/Confidence_interval). The default is /// 0.95, meaning that the confidence interval should capture the true value 95% of the time. /// /// # Panics /// /// Panics if the confidence level is set to a value outside the `(0, 1)` range #[must_use] pub fn confidence_level(mut self, cl: f64) -> Criterion { assert!(cl > 0.0 && cl < 1.0); if cl < 0.5 { console_error!( "\nWarning: It is not recommended to reduce confidence level below 0.5." ); } self.config.confidence_level = cl; self } /// Changes the default [significance level](https://en.wikipedia.org/wiki/Statistical_significance) /// for benchmarks run with this runner. This is used to perform a /// [hypothesis test](https://en.wikipedia.org/wiki/Statistical_hypothesis_testing) to see if /// the measurements from this run are different from the measured performance of the last run. /// The significance level is the desired probability that two measurements of identical code /// will be considered 'different' due to noise in the measurements. The default value is 0.05, /// meaning that approximately 5% of identical benchmarks will register as different due to /// noise. /// /// This presents a trade-off. By setting the significance level closer to 0.0, you can increase /// the statistical robustness against noise, but it also weakens Criterion.rs' ability to /// detect small but real changes in the performance. By setting the significance level /// closer to 1.0, Criterion.rs will be more able to detect small true changes, but will also /// report more spurious differences. /// /// See also the noise threshold setting. /// /// # Panics /// /// Panics if the significance level is set to a value outside the `(0, 1)` range #[must_use] pub fn significance_level(mut self, sl: f64) -> Criterion { assert!(sl > 0.0 && sl < 1.0); self.config.significance_level = sl; self } } impl Criterion where M: Measurement + 'static, { /// Benchmarks a function. /// /// # Example /// /// ```rust /// use wasm_bindgen_test::{Criterion, wasm_bindgen_bench}; /// /// #[wasm_bindgen_bench] /// fn bench(c: &mut Criterion) { /// // Setup (construct data, allocate memory, etc) /// c.bench_function( /// "bench desc", /// |b| b.iter(|| { /// // Code to benchmark goes here /// }), /// ); /// } /// ``` pub fn bench_function(&mut self, desc: &str, f: F) -> &mut Criterion where F: FnMut(&mut Bencher<'_, M>), { const NOOP: RawWaker = { const VTABLE: RawWakerVTable = RawWakerVTable::new( // Cloning just returns a new no-op raw waker |_| NOOP, // `wake` does nothing |_| {}, // `wake_by_ref` does nothing |_| {}, // Dropping does nothing as we don't allocate anything |_| {}, ); RawWaker::new(ptr::null(), &VTABLE) }; // bench_function never be pending fn block_on(f: impl Future) { let waker = unsafe { Waker::from_raw(NOOP) }; let mut ctx = Context::from_waker(&waker); match core::pin::pin!(f).poll(&mut ctx) { Poll::Ready(_) => (), // sync functions not be pending Poll::Pending => unreachable!(), } } let id = report::BenchmarkId::new(desc.into()); block_on(analysis::common( &id, &mut routine::Function::new(f), &self.config, self, )); self } /// Benchmarks a future. /// /// # Example /// /// ```rust /// use wasm_bindgen_test::{Criterion, wasm_bindgen_bench}; /// /// #[wasm_bindgen_bench] /// async fn bench(c: &mut Criterion) { /// // Setup (construct data, allocate memory, etc) /// c.bench_async_function( /// "bench desc", /// |b| { /// Box::pin( /// b.iter_future(|| async { /// // Code to benchmark goes here /// }) /// ) /// } /// ).await; /// } /// ``` pub async fn bench_async_function(&mut self, desc: &str, f: F) -> &mut Criterion where for<'b> F: FnMut(&'b mut Bencher<'_, M>) -> Pin + 'b>>, { let id = report::BenchmarkId::new(desc.into()); analysis::common(&id, &mut routine::AsyncFunction::new(f), &self.config, self).await; self } } /// Enum representing different ways of measuring the throughput of benchmarked code. /// If the throughput setting is configured for a benchmark then the estimated throughput will /// be reported as well as the time per iteration. // TODO: Remove serialize/deserialize from the public API. #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] pub enum Throughput { /// Measure throughput in terms of bytes/second. The value should be the number of bytes /// processed by one iteration of the benchmarked code. Typically, this would be the length of /// an input string or `&[u8]`. Bytes(u64), /// Equivalent to Bytes, but the value will be reported in terms of /// kilobytes (1000 bytes) per second instead of kibibytes (1024 bytes) per /// second, megabytes instead of mibibytes, and gigabytes instead of gibibytes. BytesDecimal(u64), /// Measure throughput in terms of elements/second. The value should be the number of elements /// processed by one iteration of the benchmarked code. Typically, this would be the size of a /// collection, but could also be the number of lines of input text or the number of values to /// parse. Elements(u64), /// Measure throughput in terms of bits/second. The value should be the number of bits /// processed by one iteration of the benchmarked code. Typically, this would be the number of /// bits transferred by a networking function. Bits(u64), } /// This enum allows the user to control how Criterion.rs chooses the iteration count when sampling. /// The default is `Auto`, which will choose a method automatically based on the iteration time during /// the warm-up phase. #[derive(Debug, Default, Clone, Copy)] pub enum SamplingMode { /// Criterion.rs should choose a sampling method automatically. This is the default, and is /// recommended for most users and most benchmarks. #[default] Auto, /// Scale the iteration count in each sample linearly. This is suitable for most benchmarks, /// but it tends to require many iterations which can make it very slow for very long benchmarks. Linear, /// Keep the iteration count the same for all samples. This is not recommended, as it affects /// the statistics that Criterion.rs can compute. However, it requires fewer iterations than /// the `Linear` method and therefore is more suitable for very long-running benchmarks where /// benchmark execution time is more of a problem and statistical precision is less important. Flat, } impl SamplingMode { pub(crate) fn choose_sampling_mode( &self, warmup_mean_execution_time: f64, sample_count: u64, target_time: f64, ) -> ActualSamplingMode { match self { SamplingMode::Linear => ActualSamplingMode::Linear, SamplingMode::Flat => ActualSamplingMode::Flat, SamplingMode::Auto => { // Estimate execution time with linear sampling let total_runs = sample_count * (sample_count + 1) / 2; let d = ceil(target_time / warmup_mean_execution_time / total_runs as f64) as u64; let expected_ns = total_runs as f64 * d as f64 * warmup_mean_execution_time; if expected_ns > (2.0 * target_time) { ActualSamplingMode::Flat } else { ActualSamplingMode::Linear } } } } } /// Enum to represent the sampling mode without Auto. #[derive(Debug, Clone, Copy, Serialize, Deserialize)] pub(crate) enum ActualSamplingMode { Linear, Flat, } impl ActualSamplingMode { pub(crate) fn iteration_counts( &self, warmup_mean_execution_time: f64, sample_count: u64, target_time: &Duration, ) -> Vec { match self { ActualSamplingMode::Linear => { let n = sample_count; let met = warmup_mean_execution_time; let m_ns = target_time.as_nanos(); // Solve: [d + 2*d + 3*d + ... + n*d] * met = m_ns let total_runs = n * (n + 1) / 2; let d = (ceil(m_ns as f64 / met / total_runs as f64) as u64).max(1); let expected_ns = total_runs as f64 * d as f64 * met; if d == 1 { let recommended_sample_size = ActualSamplingMode::recommend_linear_sample_size(m_ns as f64, met); let actual_time = Duration::from_nanos(expected_ns as u64); console_error!("\nWarning: Unable to complete {} samples in {:.1?}. You may wish to increase target time to {:.1?}", n, target_time, actual_time); if recommended_sample_size != n { console_error!( ", enable flat sampling, or reduce sample count to {}.", recommended_sample_size ); } else { console_error!(" or enable flat sampling."); } } (1..(n + 1)).map(|a| a * d).collect::>() } ActualSamplingMode::Flat => { let n = sample_count; let met = warmup_mean_execution_time; let m_ns = target_time.as_nanos() as f64; let time_per_sample = m_ns / (n as f64); // This is pretty simplistic; we could do something smarter to fit into the allotted time. let iterations_per_sample = (ceil(time_per_sample / met) as u64).max(1); let expected_ns = met * (iterations_per_sample * n) as f64; if iterations_per_sample == 1 { let recommended_sample_size = ActualSamplingMode::recommend_flat_sample_size(m_ns, met); let actual_time = Duration::from_nanos(expected_ns as u64); console_error!("\nWarning: Unable to complete {} samples in {:.1?}. You may wish to increase target time to {:.1?}", n, target_time, actual_time); if recommended_sample_size != n { console_error!(", or reduce sample count to {}.", recommended_sample_size); } else { console_error!("."); } } vec![iterations_per_sample; n as usize] } } } fn is_linear(&self) -> bool { matches!(self, ActualSamplingMode::Linear) } fn recommend_linear_sample_size(target_time: f64, met: f64) -> u64 { // Some math shows that n(n+1)/2 * d * met = target_time. d = 1, so it can be ignored. // This leaves n(n+1) = (2*target_time)/met, or n^2 + n - (2*target_time)/met = 0 // Which can be solved with the quadratic formula. Since A and B are constant 1, // this simplifies to sample_size = (-1 +- sqrt(1 - 4C))/2, where C = (2*target_time)/met. // We don't care about the negative solution. Experimentation shows that this actually tends to // result in twice the desired execution time (probably because of the ceil used to calculate // d) so instead I use c = target_time/met. let c = target_time / met; let sample_size = (-1.0 + sqrt(4.0 * c)) / 2.0; let sample_size = sample_size as u64; // Round down to the nearest 10 to give a margin and avoid excessive precision let sample_size = (sample_size / 10) * 10; // Clamp it to be at least 10, since criterion.rs doesn't allow sample sizes smaller than 10. if sample_size < 10 { 10 } else { sample_size } } fn recommend_flat_sample_size(target_time: f64, met: f64) -> u64 { let sample_size = (target_time / met) as u64; // Round down to the nearest 10 to give a margin and avoid excessive precision let sample_size = (sample_size / 10) * 10; // Clamp it to be at least 10, since criterion.rs doesn't allow sample sizes smaller than 10. if sample_size < 10 { 10 } else { sample_size } } } #[derive(Debug, Serialize, Deserialize, Clone)] pub(crate) struct SavedSample { pub(crate) sampling_mode: ActualSamplingMode, pub(crate) iters: Vec, pub(crate) times: Vec, } wasm-bindgen-test-0.3.58/src/rt/criterion/report.rs000064400000000000000000000164431046102023000203540ustar 00000000000000use super::estimate::ChangeEstimates; use super::estimate::Estimate; use super::estimate::Estimates; use super::format; use super::measurement::ValueFormatter; use super::stats::univariate::outliers::tukey::LabeledSample; use alloc::format; use alloc::string::String; use alloc::string::ToString; use core::fmt; use nu_ansi_term::{Color, Style}; use serde::{Deserialize, Serialize}; pub struct ComparisonData { pub p_value: f64, pub relative_estimates: ChangeEstimates, pub significance_threshold: f64, pub noise_threshold: f64, } pub struct MeasurementData<'a> { pub avg_times: LabeledSample<'a, f64>, pub absolute_estimates: Estimates, pub comparison: Option, } #[derive(Clone, Serialize, Deserialize, PartialEq, Eq)] pub struct BenchmarkId { desc: String, } impl BenchmarkId { pub fn new(desc: String) -> BenchmarkId { BenchmarkId { desc } } pub fn desc(&self) -> &str { &self.desc } } impl fmt::Display for BenchmarkId { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str(self.desc()) } } impl fmt::Debug for BenchmarkId { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "BenchmarkId {{ desc: \"{}\" }}", self.desc,) } } pub trait Report { fn benchmark_start(&self, _id: &BenchmarkId) {} fn warmup(&self, _id: &BenchmarkId, _warmup_ns: f64) {} fn analysis(&self, _id: &BenchmarkId) {} fn measurement_start( &self, _id: &BenchmarkId, _sample_count: u64, _estimate_ns: f64, _iter_count: u64, ) { } fn measurement_complete( &self, _id: &BenchmarkId, _measurements: &MeasurementData, _formatter: &dyn ValueFormatter, ) { } } pub(crate) struct WasmReport; impl WasmReport { fn print(&self, s: String) { console_log!("{}", s); } fn with_color(&self, color: Color, s: &str) -> String { color.paint(s).to_string() } fn green(&self, s: &str) -> String { self.with_color(Color::Green, s) } fn yellow(&self, s: &str) -> String { self.with_color(Color::Yellow, s) } fn red(&self, s: &str) -> String { self.with_color(Color::Red, s) } fn bold(&self, s: String) -> String { Style::new().bold().paint(s).to_string() } fn faint(&self, s: String) -> String { Style::new().dimmed().paint(s).to_string() } pub fn outliers(&self, sample: &LabeledSample<'_, f64>) { let (los, lom, _, him, his) = sample.count(); let noutliers = los + lom + him + his; let sample_size = sample.len(); if noutliers == 0 { return; } let percent = |n: usize| 100. * n as f64 / sample_size as f64; console_log!( "{}", self.yellow(&format!( "Found {} outliers among {} measurements ({:.2}%)", noutliers, sample_size, percent(noutliers) )) ); let print = |n, label| { if n != 0 { console_log!(" {} ({:.2}%) {}", n, percent(n), label); } }; print(los, "low severe"); print(lom, "low mild"); print(him, "high mild"); print(his, "high severe"); } } impl Report for WasmReport { fn warmup(&self, _id: &BenchmarkId, warmup_ns: f64) { self.print(format!("Warming up for {}", format::time(warmup_ns))); } fn measurement_start( &self, _id: &BenchmarkId, sample_count: u64, estimate_ns: f64, iter_count: u64, ) { let iter_string = format::iter_count(iter_count); self.print(format!( "Collecting {} samples in estimated {} ({})", sample_count, format::time(estimate_ns), iter_string )); } fn measurement_complete( &self, id: &BenchmarkId, meas: &MeasurementData, formatter: &dyn ValueFormatter, ) { let typical_estimate = &meas.absolute_estimates.typical(); let mut id = id.desc().to_string(); if id.len() > 23 { console_log!("{}", self.green(&id)); id.clear(); } let id_len = id.len(); console_log!( "{}{}time: [{} {} {}]", self.green(&id), " ".repeat(24 - id_len), self.faint(formatter.format_value(typical_estimate.confidence_interval.lower_bound)), self.bold(formatter.format_value(typical_estimate.point_estimate)), self.faint(formatter.format_value(typical_estimate.confidence_interval.upper_bound)) ); if let Some(ref comp) = meas.comparison { let different_mean = comp.p_value < comp.significance_threshold; let mean_est = &comp.relative_estimates.mean; let point_estimate = mean_est.point_estimate; let mut point_estimate_str = format::change(point_estimate, true); // The change in throughput is related to the change in timing. Reducing the timing by // 50% increases the throughput by 100%. let explanation_str: String; if !different_mean { explanation_str = "No change in performance detected.".to_string(); } else { let comparison = compare_to_threshold(mean_est, comp.noise_threshold); match comparison { ComparisonResult::Improved => { point_estimate_str = self.green(&self.bold(point_estimate_str)); explanation_str = format!("Performance has {}.", self.green("improved")); } ComparisonResult::Regressed => { point_estimate_str = self.red(&self.bold(point_estimate_str)); explanation_str = format!("Performance has {}.", self.red("regressed")); } ComparisonResult::NonSignificant => { explanation_str = "Change within noise threshold.".to_string(); } } } console_log!( "{}change: [{} {} {}] (p = {:.2} {} {:.2})", " ".repeat(24), self.faint(format::change( mean_est.confidence_interval.lower_bound, true )), point_estimate_str, self.faint(format::change( mean_est.confidence_interval.upper_bound, true )), comp.p_value, if different_mean { "<" } else { ">" }, comp.significance_threshold ); console_log!("{}{}", " ".repeat(24), explanation_str); } self.outliers(&meas.avg_times); } } enum ComparisonResult { Improved, Regressed, NonSignificant, } fn compare_to_threshold(estimate: &Estimate, noise: f64) -> ComparisonResult { let ci = &estimate.confidence_interval; let lb = ci.lower_bound; let ub = ci.upper_bound; if lb < -noise && ub < -noise { ComparisonResult::Improved } else if lb > noise && ub > noise { ComparisonResult::Regressed } else { ComparisonResult::NonSignificant } } wasm-bindgen-test-0.3.58/src/rt/criterion/routine.rs000064400000000000000000000125761046102023000205310ustar 00000000000000use super::benchmark::BenchmarkConfig; use super::measurement::Measurement; use super::report::{BenchmarkId, Report}; use super::{ActualSamplingMode, Bencher, Criterion}; use alloc::boxed::Box; use alloc::vec::Vec; use async_trait::async_trait; use core::future::Future; use core::marker::PhantomData; use core::pin::Pin; use core::time::Duration; /// PRIVATE #[async_trait(?Send)] pub(crate) trait Routine { /// PRIVATE async fn bench(&mut self, m: &M, iters: &[u64]) -> Vec; /// PRIVATE async fn warm_up(&mut self, m: &M, how_long: Duration) -> (u64, u64); async fn sample( &mut self, measurement: &M, id: &BenchmarkId, config: &BenchmarkConfig, criterion: &Criterion, ) -> (ActualSamplingMode, Box<[f64]>, Box<[f64]>) { let wu = config.warm_up_time; let m_ns = config.measurement_time.as_nanos(); criterion.report.warmup(id, wu.as_nanos() as f64); let (wu_elapsed, wu_iters) = self.warm_up(measurement, wu).await; // Initial guess for the mean execution time let met = wu_elapsed as f64 / wu_iters as f64; let n = config.sample_size as u64; let actual_sampling_mode = config .sampling_mode .choose_sampling_mode(met, n, m_ns as f64); let m_iters = actual_sampling_mode.iteration_counts(met, n, &config.measurement_time); let expected_ns = m_iters .iter() .copied() .map(|count| count as f64 * met) .sum(); // Use saturating_add to handle overflow. let mut total_iters = 0u64; for count in m_iters.iter().copied() { total_iters = total_iters.saturating_add(count); } criterion .report .measurement_start(id, n, expected_ns, total_iters); let m_elapsed = self.bench(measurement, &m_iters).await; let m_iters_f: Vec = m_iters.iter().map(|&x| x as f64).collect(); ( actual_sampling_mode, m_iters_f.into_boxed_slice(), m_elapsed.into_boxed_slice(), ) } } pub struct AsyncFunction { f: F, _phantom: PhantomData, } impl AsyncFunction { pub fn new(f: F) -> Self { Self { f, _phantom: PhantomData, } } } #[async_trait(?Send)] impl Routine for AsyncFunction where M: Measurement, for<'b> F: FnMut(&'b mut Bencher<'_, M>) -> Pin + 'b>>, { async fn bench(&mut self, m: &M, iters: &[u64]) -> Vec { let f = &mut self.f; let mut b = Bencher { iterated: false, iters: 0, value: m.zero(), measurement: m, elapsed_time: Duration::from_millis(0), }; let mut result = Vec::with_capacity(iters.len()); for iters in iters { b.iters = *iters; (*f)(&mut b).await; result.push(m.to_f64(&b.value)); } result } async fn warm_up(&mut self, m: &M, how_long: Duration) -> (u64, u64) { let f = &mut self.f; let mut b = Bencher { iterated: false, iters: 1, value: m.zero(), measurement: m, elapsed_time: Duration::from_millis(0), }; let mut total_iters = 0; let mut elapsed_time = Duration::from_millis(0); loop { (*f)(&mut b).await; total_iters += b.iters; elapsed_time += b.elapsed_time; if elapsed_time > how_long { return (elapsed_time.as_nanos() as u64, total_iters); } b.iters = b.iters.wrapping_mul(2); } } } pub struct Function where F: FnMut(&mut Bencher<'_, M>), { f: F, _phamtom: PhantomData, } impl Function where F: FnMut(&mut Bencher<'_, M>), { pub fn new(f: F) -> Function { Function { f, _phamtom: PhantomData, } } } #[async_trait(?Send)] impl Routine for Function where F: FnMut(&mut Bencher<'_, M>), { async fn bench(&mut self, m: &M, iters: &[u64]) -> Vec { let f = &mut self.f; let mut b = Bencher { iterated: false, iters: 0, value: m.zero(), measurement: m, elapsed_time: Duration::from_millis(0), }; iters .iter() .map(|iters| { b.iters = *iters; (*f)(&mut b); m.to_f64(&b.value) }) .collect() } async fn warm_up(&mut self, m: &M, how_long: Duration) -> (u64, u64) { let f = &mut self.f; let mut b = Bencher { iterated: false, iters: 1, value: m.zero(), measurement: m, elapsed_time: Duration::from_millis(0), }; let mut total_iters = 0; let mut elapsed_time = Duration::from_millis(0); loop { (*f)(&mut b); total_iters += b.iters; elapsed_time += b.elapsed_time; if elapsed_time > how_long { return (elapsed_time.as_nanos() as u64, total_iters); } b.iters = b.iters.wrapping_mul(2); } } } wasm-bindgen-test-0.3.58/src/rt/criterion/stats/bivariate/mod.rs000064400000000000000000000037331046102023000227220ustar 00000000000000//! Bivariate analysis pub mod regression; mod resamples; use super::bivariate::resamples::Resamples; use super::float::Float; use super::tuple::{Tuple, TupledDistributionsBuilder}; use super::univariate::Sample; /// Bivariate `(X, Y)` data /// /// Invariants: /// /// - No `NaN`s in the data /// - At least two data points in the set pub struct Data<'a, X, Y>(&'a [X], &'a [Y]); impl<'a, X, Y> Copy for Data<'a, X, Y> {} #[allow(clippy::expl_impl_clone_on_copy)] impl<'a, X, Y> Clone for Data<'a, X, Y> { fn clone(&self) -> Data<'a, X, Y> { *self } } impl<'a, X, Y> Data<'a, X, Y> where X: Float, Y: Float, { /// Creates a new data set from two existing slices pub fn new(xs: &'a [X], ys: &'a [Y]) -> Data<'a, X, Y> { assert!( xs.len() == ys.len() && xs.len() > 1 && xs.iter().all(|x| !x.is_nan()) && ys.iter().all(|y| !y.is_nan()) ); Data(xs, ys) } // TODO Remove the `T` parameter in favor of `S::Output` /// Returns the bootstrap distributions of the parameters estimated by the `statistic` /// /// - Multi-threaded /// - Time: `O(nresamples)` /// - Memory: `O(nresamples)` pub fn bootstrap(&self, nresamples: usize, statistic: S) -> T::Distributions where S: Fn(Data) -> T + Sync, T: Tuple + Send, T::Distributions: Send, T::Builder: Send, { let mut resamples = Resamples::new(*self); (0..nresamples) .map(|_| statistic(resamples.next())) .fold(T::Builder::new(0), |mut sub_distributions, sample| { sub_distributions.push(sample); sub_distributions }) .complete() } /// Returns a view into the `X` data pub fn x(&self) -> &'a Sample { Sample::new(self.0) } /// Returns a view into the `Y` data pub fn y(&self) -> &'a Sample { Sample::new(self.1) } } wasm-bindgen-test-0.3.58/src/rt/criterion/stats/bivariate/regression.rs000064400000000000000000000011651046102023000243200ustar 00000000000000//! Regression analysis use super::super::bivariate::Data; use super::super::dot; use super::super::float::Float; /// A straight line that passes through the origin `y = m * x` #[derive(Clone, Copy)] pub struct Slope(pub A) where A: Float; impl Slope where A: Float, { /// Fits the data to a straight line that passes through the origin using ordinary least /// squares /// /// - Time: `O(length)` pub fn fit(data: &Data<'_, A, A>) -> Slope { let xs = data.0; let ys = data.1; let xy = dot(xs, ys); let x2 = dot(xs, xs); Slope(xy / x2) } } wasm-bindgen-test-0.3.58/src/rt/criterion/stats/bivariate/resamples.rs000064400000000000000000000030331046102023000241270ustar 00000000000000use super::super::bivariate::Data; use super::super::float::Float; use super::super::rand_util::{new_rng, Rng}; use alloc::vec::Vec; pub struct Resamples<'a, X, Y> where X: 'a + Float, Y: 'a + Float, { rng: Rng, data: (&'a [X], &'a [Y]), stage: Option<(Vec, Vec)>, } #[allow(clippy::should_implement_trait)] impl<'a, X, Y> Resamples<'a, X, Y> where X: 'a + Float, Y: 'a + Float, { pub fn new(data: Data<'a, X, Y>) -> Resamples<'a, X, Y> { Resamples { rng: new_rng(), data: (data.x(), data.y()), stage: None, } } pub fn next(&mut self) -> Data<'_, X, Y> { let n = self.data.0.len(); match self.stage { None => { let mut stage = (Vec::with_capacity(n), Vec::with_capacity(n)); for _ in 0..n { let i = self.rng.rand_range(0u64..(self.data.0.len() as u64)) as usize; stage.0.push(self.data.0[i]); stage.1.push(self.data.1[i]); } self.stage = Some(stage); } Some(ref mut stage) => { for i in 0..n { let j = self.rng.rand_range(0u64..(self.data.0.len() as u64)) as usize; stage.0[i] = self.data.0[j]; stage.1[i] = self.data.1[j]; } } } if let Some((ref x, ref y)) = self.stage { Data(x, y) } else { unreachable!(); } } } wasm-bindgen-test-0.3.58/src/rt/criterion/stats/float.rs000064400000000000000000000005631046102023000213000ustar 00000000000000//! Float trait use cast::From; use num_traits::float; /// This is an extension of `num_traits::float::Float` that adds safe /// casting and Sync + Send. Once `num_traits` has these features this /// can be removed. pub trait Float: float::Float + From + From + Sync + Send { } impl Float for f32 {} impl Float for f64 {} wasm-bindgen-test-0.3.58/src/rt/criterion/stats/mod.rs000064400000000000000000000050141046102023000207460ustar 00000000000000//! [Criterion]'s statistics library. //! //! [Criterion]: https://github.com/bheisler/criterion.rs //! //! **WARNING** This library is criterion's implementation detail and there no plans to stabilize //! it. In other words, the API may break at any time without notice. pub mod bivariate; pub mod tuple; pub mod univariate; mod float; mod rand_util; use core::mem; use core::ops::Deref; use alloc::boxed::Box; use float::Float; use univariate::Sample; /// The bootstrap distribution of some parameter #[derive(Clone)] pub struct Distribution(Box<[A]>); impl Distribution where A: Float, { /// Create a distribution from the given values pub fn from(values: Box<[A]>) -> Distribution { Distribution(values) } /// Computes the confidence interval of the population parameter using percentiles /// /// # Panics /// /// Panics if the `confidence_level` is not in the `(0, 1)` range. pub fn confidence_interval(&self, confidence_level: A) -> (A, A) where usize: cast::From>, { let _0 = A::cast(0); let _1 = A::cast(1); let _50 = A::cast(50); assert!(confidence_level > _0 && confidence_level < _1); let percentiles = self.percentiles(); // FIXME(privacy) this should use the `at_unchecked()` method ( percentiles.at(_50 * (_1 - confidence_level)), percentiles.at(_50 * (_1 + confidence_level)), ) } /// Computes the "likelihood" of seeing the value `t` or "more extreme" values in the /// distribution. pub fn p_value(&self, t: A, tails: &Tails) -> A { use core::cmp; let n = self.0.len(); let hits = self.0.iter().filter(|&&x| x < t).count(); let tails = A::cast(match *tails { Tails::One => 1, Tails::Two => 2, }); A::cast(cmp::min(hits, n - hits)) / A::cast(n) * tails } } impl Deref for Distribution { type Target = Sample; fn deref(&self) -> &Sample { let slice: &[_] = &self.0; unsafe { mem::transmute(slice) } } } /// Number of tails for significance testing pub enum Tails { /// One tailed test One, /// Two tailed test Two, } fn dot(xs: &[A], ys: &[A]) -> A where A: Float, { xs.iter() .zip(ys) .fold(A::cast(0), |acc, (&x, &y)| acc + x * y) } fn sum(xs: &[A]) -> A where A: Float, { use core::ops::Add; xs.iter().cloned().fold(A::cast(0), Add::add) } wasm-bindgen-test-0.3.58/src/rt/criterion/stats/rand_util.rs000064400000000000000000000011511046102023000221460ustar 00000000000000use crate::__rt::web_time::{SystemTime, UNIX_EPOCH}; use core::cell::RefCell; use oorandom::Rand64; use wasm_bindgen::__rt::LazyCell; pub type Rng = Rand64; #[cfg_attr(target_feature = "atomics", thread_local)] static SEED_RAND: LazyCell> = LazyCell::new(|| { RefCell::new(Rand64::new( SystemTime::now() .duration_since(UNIX_EPOCH) .expect("Time went backwards") .as_millis(), )) }); pub fn new_rng() -> Rng { let mut r = SEED_RAND.borrow_mut(); let seed = ((r.rand_u64() as u128) << 64) | (r.rand_u64() as u128); Rand64::new(seed) } wasm-bindgen-test-0.3.58/src/rt/criterion/stats/tuple.rs000064400000000000000000000135071046102023000213260ustar 00000000000000//! Helper traits for tupling/untupling use super::Distribution; use alloc::vec::Vec; /// Any tuple: `(A, B, ..)` pub trait Tuple: Sized { /// A tuple of distributions associated with this tuple type Distributions: TupledDistributions; /// A tuple of vectors associated with this tuple type Builder: TupledDistributionsBuilder; } /// A tuple of distributions: `(Distribution, Distribution, ..)` pub trait TupledDistributions: Sized { /// A tuple that can be pushed/inserted into the tupled distributions type Item: Tuple; } /// A tuple of vecs used to build distributions. pub trait TupledDistributionsBuilder: Sized { /// A tuple that can be pushed/inserted into the tupled distributions type Item: Tuple; /// Creates a new tuple of vecs fn new(size: usize) -> Self; /// Push one element into each of the vecs fn push(&mut self, tuple: Self::Item); /// Append one tuple of vecs to this one, leaving the vecs in the other tuple empty fn extend(&mut self, other: &mut Self); /// Convert the tuple of vectors into a tuple of distributions fn complete(self) -> ::Distributions; } impl Tuple for (A,) where A: Copy, { type Distributions = (Distribution,); type Builder = (Vec,); } impl TupledDistributions for (Distribution,) where A: Copy, { type Item = (A,); } impl TupledDistributionsBuilder for (Vec,) where A: Copy, { type Item = (A,); fn new(size: usize) -> (Vec,) { (Vec::with_capacity(size),) } fn push(&mut self, tuple: (A,)) { (self.0).push(tuple.0); } fn extend(&mut self, other: &mut (Vec,)) { (self.0).append(&mut other.0); } fn complete(self) -> (Distribution,) { (Distribution(self.0.into_boxed_slice()),) } } impl Tuple for (A, B) where A: Copy, B: Copy, { type Distributions = (Distribution, Distribution); type Builder = (Vec, Vec); } impl TupledDistributions for (Distribution, Distribution) where A: Copy, B: Copy, { type Item = (A, B); } impl TupledDistributionsBuilder for (Vec, Vec) where A: Copy, B: Copy, { type Item = (A, B); fn new(size: usize) -> (Vec, Vec) { (Vec::with_capacity(size), Vec::with_capacity(size)) } fn push(&mut self, tuple: (A, B)) { (self.0).push(tuple.0); (self.1).push(tuple.1); } fn extend(&mut self, other: &mut (Vec, Vec)) { (self.0).append(&mut other.0); (self.1).append(&mut other.1); } fn complete(self) -> (Distribution, Distribution) { ( Distribution(self.0.into_boxed_slice()), Distribution(self.1.into_boxed_slice()), ) } } impl Tuple for (A, B, C) where A: Copy, B: Copy, C: Copy, { type Distributions = (Distribution, Distribution, Distribution); type Builder = (Vec, Vec, Vec); } impl TupledDistributions for (Distribution, Distribution, Distribution) where A: Copy, B: Copy, C: Copy, { type Item = (A, B, C); } impl TupledDistributionsBuilder for (Vec, Vec, Vec) where A: Copy, B: Copy, C: Copy, { type Item = (A, B, C); fn new(size: usize) -> (Vec, Vec, Vec) { ( Vec::with_capacity(size), Vec::with_capacity(size), Vec::with_capacity(size), ) } fn push(&mut self, tuple: (A, B, C)) { (self.0).push(tuple.0); (self.1).push(tuple.1); (self.2).push(tuple.2); } fn extend(&mut self, other: &mut (Vec, Vec, Vec)) { (self.0).append(&mut other.0); (self.1).append(&mut other.1); (self.2).append(&mut other.2); } fn complete(self) -> (Distribution, Distribution, Distribution) { ( Distribution(self.0.into_boxed_slice()), Distribution(self.1.into_boxed_slice()), Distribution(self.2.into_boxed_slice()), ) } } impl Tuple for (A, B, C, D) where A: Copy, B: Copy, C: Copy, D: Copy, { type Distributions = ( Distribution, Distribution, Distribution, Distribution, ); type Builder = (Vec, Vec, Vec, Vec); } impl TupledDistributions for ( Distribution, Distribution, Distribution, Distribution, ) where A: Copy, B: Copy, C: Copy, D: Copy, { type Item = (A, B, C, D); } impl TupledDistributionsBuilder for (Vec, Vec, Vec, Vec) where A: Copy, B: Copy, C: Copy, D: Copy, { type Item = (A, B, C, D); fn new(size: usize) -> (Vec, Vec, Vec, Vec) { ( Vec::with_capacity(size), Vec::with_capacity(size), Vec::with_capacity(size), Vec::with_capacity(size), ) } fn push(&mut self, tuple: (A, B, C, D)) { (self.0).push(tuple.0); (self.1).push(tuple.1); (self.2).push(tuple.2); (self.3).push(tuple.3); } fn extend(&mut self, other: &mut (Vec, Vec, Vec, Vec)) { (self.0).append(&mut other.0); (self.1).append(&mut other.1); (self.2).append(&mut other.2); (self.3).append(&mut other.3); } fn complete( self, ) -> ( Distribution, Distribution, Distribution, Distribution, ) { ( Distribution(self.0.into_boxed_slice()), Distribution(self.1.into_boxed_slice()), Distribution(self.2.into_boxed_slice()), Distribution(self.3.into_boxed_slice()), ) } } wasm-bindgen-test-0.3.58/src/rt/criterion/stats/univariate/mixed.rs000064400000000000000000000021621046102023000234450ustar 00000000000000//! Mixed bootstrap use super::super::float::Float; use super::super::tuple::{Tuple, TupledDistributionsBuilder}; use super::Resamples; use super::Sample; use alloc::vec::Vec; /// Performs a *mixed* two-sample bootstrap pub fn bootstrap( a: &Sample, b: &Sample, nresamples: usize, statistic: S, ) -> T::Distributions where A: Float, S: Fn(&Sample, &Sample) -> T + Sync, T: Tuple + Send, T::Distributions: Send, T::Builder: Send, { let n_a = a.len(); let n_b = b.len(); let mut c = Vec::with_capacity(n_a + n_b); c.extend_from_slice(a); c.extend_from_slice(b); let c = Sample::new(&c); let mut resamples = Resamples::new(c); (0..nresamples) .map(|_| { let resample = resamples.next(); let a: &Sample = Sample::new(&resample[..n_a]); let b: &Sample = Sample::new(&resample[n_a..]); statistic(a, b) }) .fold(T::Builder::new(0), |mut sub_distributions, sample| { sub_distributions.push(sample); sub_distributions }) .complete() } wasm-bindgen-test-0.3.58/src/rt/criterion/stats/univariate/mod.rs000064400000000000000000000031211046102023000231120ustar 00000000000000//! Univariate analysis pub mod mixed; pub mod outliers; mod percentiles; mod resamples; mod sample; use core::cmp; use super::float::Float; use super::tuple::{Tuple, TupledDistributionsBuilder}; use libm::{ceil, sqrt}; use resamples::Resamples; pub use percentiles::Percentiles; pub use sample::Sample; /// Performs a two-sample bootstrap /// /// - Multithreaded /// - Time: `O(nresamples)` /// - Memory: `O(nresamples)` #[allow(clippy::cast_lossless)] pub fn bootstrap( a: &Sample, b: &Sample, nresamples: usize, statistic: S, ) -> T::Distributions where A: Float, B: Float, S: Fn(&Sample, &Sample) -> T + Sync, T: Tuple + Send, T::Distributions: Send, T::Builder: Send, { let nresamples_sqrt = ceil(sqrt(nresamples as f64)) as usize; let per_chunk = (nresamples + nresamples_sqrt - 1) / nresamples_sqrt; let mut a_resamples = Resamples::new(a); let mut b_resamples = Resamples::new(b); (0..nresamples_sqrt) .map(|i| { let start = i * per_chunk; let end = cmp::min((i + 1) * per_chunk, nresamples); let a_resample = a_resamples.next(); let mut sub_distributions: T::Builder = TupledDistributionsBuilder::new(end - start); for _ in start..end { let b_resample = b_resamples.next(); sub_distributions.push(statistic(a_resample, b_resample)); } sub_distributions }) .fold(T::Builder::new(0), |mut a, mut b| { a.extend(&mut b); a }) .complete() } wasm-bindgen-test-0.3.58/src/rt/criterion/stats/univariate/outliers/mod.rs000064400000000000000000000004301046102023000247600ustar 00000000000000//! Classification of outliers //! //! WARNING: There's no formal/mathematical definition of what an outlier actually is. Therefore, //! all outlier classifiers are *subjective*, however some classifiers that have become *de facto* //! standard are provided here. pub mod tukey; wasm-bindgen-test-0.3.58/src/rt/criterion/stats/univariate/outliers/tukey.rs000064400000000000000000000154611046102023000253540ustar 00000000000000//! Tukey's method //! //! The original method uses two "fences" to classify the data. All the observations "inside" the //! fences are considered "normal", and the rest are considered outliers. //! //! The fences are computed from the quartiles of the sample, according to the following formula: //! //! ``` ignore //! // q1, q3 are the first and third quartiles //! let iqr = q3 - q1; // The interquartile range //! let (f1, f2) = (q1 - 1.5 * iqr, q3 + 1.5 * iqr); // the "fences" //! //! let is_outlier = |x| if x > f1 && x < f2 { true } else { false }; //! ``` //! //! The classifier provided here adds two extra outer fences: //! //! ``` ignore //! let (f3, f4) = (q1 - 3 * iqr, q3 + 3 * iqr); // the outer "fences" //! ``` //! //! The extra fences add a sense of "severity" to the classification. Data points outside of the //! outer fences are considered "severe" outliers, whereas points outside the inner fences are just //! "mild" outliers, and, as the original method, everything inside the inner fences is considered //! "normal" data. //! //! Some ASCII art for the visually oriented people: //! //! ``` ignore //! LOW-ish NORMAL-ish HIGH-ish //! x | + | o o o o o o o | + | x //! f3 f1 f2 f4 //! //! Legend: //! o: "normal" data (not an outlier) //! +: "mild" outlier //! x: "severe" outlier //! ``` use core::ops::{Deref, Index}; use core::slice; use super::super::super::float::Float; use super::super::Sample; use self::Label::*; /// A classified/labeled sample. /// /// The labeled data can be accessed using the indexing operator. The order of the data points is /// retained. /// /// NOTE: Due to limitations in the indexing traits, only the label is returned. Once the /// `IndexGet` trait lands in stdlib, the indexing operation will return a `(data_point, label)` /// pair. #[derive(Clone, Copy)] pub struct LabeledSample<'a, A> where A: Float, { fences: (A, A, A, A), sample: &'a Sample, } impl<'a, A> LabeledSample<'a, A> where A: Float, { /// Returns the number of data points per label /// /// - Time: `O(length)` #[allow(clippy::similar_names)] pub fn count(&self) -> (usize, usize, usize, usize, usize) { let (mut los, mut lom, mut noa, mut him, mut his) = (0, 0, 0, 0, 0); for (_, label) in self { match label { LowSevere => { los += 1; } LowMild => { lom += 1; } NotAnOutlier => { noa += 1; } HighMild => { him += 1; } HighSevere => { his += 1; } } } (los, lom, noa, him, his) } /// Returns the fences used to classify the outliers pub fn fences(&self) -> (A, A, A, A) { self.fences } /// Returns an iterator over the labeled data pub fn iter(&self) -> Iter<'a, A> { Iter { fences: self.fences, iter: self.sample.iter(), } } } impl<'a, A> Deref for LabeledSample<'a, A> where A: Float, { type Target = Sample; fn deref(&self) -> &Sample { self.sample } } // FIXME Use the `IndexGet` trait impl<'a, A> Index for LabeledSample<'a, A> where A: Float, { type Output = Label; #[allow(clippy::similar_names)] fn index(&self, i: usize) -> &Label { static LOW_SEVERE: Label = LowSevere; static LOW_MILD: Label = LowMild; static HIGH_MILD: Label = HighMild; static HIGH_SEVERE: Label = HighSevere; static NOT_AN_OUTLIER: Label = NotAnOutlier; let x = self.sample[i]; let (lost, lomt, himt, hist) = self.fences; if x < lost { &LOW_SEVERE } else if x > hist { &HIGH_SEVERE } else if x < lomt { &LOW_MILD } else if x > himt { &HIGH_MILD } else { &NOT_AN_OUTLIER } } } impl<'a, A> IntoIterator for &LabeledSample<'a, A> where A: Float, { type Item = (A, Label); type IntoIter = Iter<'a, A>; fn into_iter(self) -> Iter<'a, A> { self.iter() } } /// Iterator over the labeled data pub struct Iter<'a, A> where A: Float, { fences: (A, A, A, A), iter: slice::Iter<'a, A>, } impl<'a, A> Iterator for Iter<'a, A> where A: Float, { type Item = (A, Label); #[allow(clippy::similar_names)] fn next(&mut self) -> Option<(A, Label)> { self.iter.next().map(|&x| { let (lost, lomt, himt, hist) = self.fences; let label = if x < lost { LowSevere } else if x > hist { HighSevere } else if x < lomt { LowMild } else if x > himt { HighMild } else { NotAnOutlier }; (x, label) }) } fn size_hint(&self) -> (usize, Option) { self.iter.size_hint() } } /// Labels used to classify outliers pub enum Label { /// A "mild" outlier in the "high" spectrum HighMild, /// A "severe" outlier in the "high" spectrum HighSevere, /// A "mild" outlier in the "low" spectrum LowMild, /// A "severe" outlier in the "low" spectrum LowSevere, /// A normal data point NotAnOutlier, } impl Label { /// Checks if the data point has an "unusually" high value pub fn is_high(&self) -> bool { matches!(*self, HighMild | HighSevere) } /// Checks if the data point is labeled as a "mild" outlier pub fn is_mild(&self) -> bool { matches!(*self, HighMild | LowMild) } /// Checks if the data point has an "unusually" low value pub fn is_low(&self) -> bool { matches!(*self, LowMild | LowSevere) } /// Checks if the data point is labeled as an outlier pub fn is_outlier(&self) -> bool { !matches!(*self, NotAnOutlier) } /// Checks if the data point is labeled as a "severe" outlier pub fn is_severe(&self) -> bool { matches!(*self, HighSevere | LowSevere) } } /// Classifies the sample, and returns a labeled sample. /// /// - Time: `O(N log N) where N = length` pub fn classify(sample: &Sample) -> LabeledSample<'_, A> where A: Float, usize: cast::From>, { let (q1, _, q3) = sample.percentiles().quartiles(); let iqr = q3 - q1; // Mild let k_m = A::cast(1.5_f32); // Severe let k_s = A::cast(3); LabeledSample { fences: ( q1 - k_s * iqr, q1 - k_m * iqr, q3 + k_m * iqr, q3 + k_s * iqr, ), sample, } } wasm-bindgen-test-0.3.58/src/rt/criterion/stats/univariate/percentiles.rs000064400000000000000000000034111046102023000246520ustar 00000000000000use super::super::float::Float; use alloc::boxed::Box; use cast::usize; /// A "view" into the percentiles of a sample pub struct Percentiles(Box<[A]>) where A: Float; // TODO(rust-lang/rfcs#735) move this `impl` into a private percentiles module impl Percentiles where A: Float, usize: cast::From>, { /// Returns the percentile at `p`% /// /// Safety: /// /// - Make sure that `p` is in the range `[0, 100]` unsafe fn at_unchecked(&self, p: A) -> A { let _100 = A::cast(100); debug_assert!(p >= A::cast(0) && p <= _100); debug_assert!(!self.0.is_empty()); let len = self.0.len() - 1; if p == _100 { self.0[len] } else { let rank = (p / _100) * A::cast(len); let integer = rank.floor(); let fraction = rank - integer; let n = usize(integer).unwrap(); let &floor = self.0.get_unchecked(n); let &ceiling = self.0.get_unchecked(n + 1); floor + (ceiling - floor) * fraction } } /// Returns the percentile at `p`% /// /// # Panics /// /// Panics if `p` is outside the closed `[0, 100]` range pub fn at(&self, p: A) -> A { let _0 = A::cast(0); let _100 = A::cast(100); assert!(p >= _0 && p <= _100); assert!(!self.0.is_empty()); unsafe { self.at_unchecked(p) } } /// Returns the 50th percentile pub fn median(&self) -> A { self.at(A::cast(50)) } /// Returns the 25th, 50th and 75th percentiles pub fn quartiles(&self) -> (A, A, A) { ( self.at(A::cast(25)), self.at(A::cast(50)), self.at(A::cast(75)), ) } } wasm-bindgen-test-0.3.58/src/rt/criterion/stats/univariate/resamples.rs000064400000000000000000000026671046102023000243440ustar 00000000000000use core::mem; use super::super::float::Float; use super::super::rand_util::{new_rng, Rng}; use super::super::univariate::Sample; use alloc::vec::Vec; pub struct Resamples<'a, A> where A: Float, { rng: Rng, sample: &'a [A], stage: Option>, } #[allow(clippy::should_implement_trait)] impl<'a, A> Resamples<'a, A> where A: 'a + Float, { pub fn new(sample: &'a Sample) -> Resamples<'a, A> { let slice = sample; Resamples { rng: new_rng(), sample: slice, stage: None, } } pub fn next(&mut self) -> &Sample { let n = self.sample.len(); let rng = &mut self.rng; match self.stage { None => { let mut stage = Vec::with_capacity(n); for _ in 0..n { let idx = rng.rand_range(0u64..(self.sample.len() as u64)); stage.push(self.sample[idx as usize]); } self.stage = Some(stage); } Some(ref mut stage) => { for elem in stage.iter_mut() { let idx = rng.rand_range(0u64..(self.sample.len() as u64)); *elem = self.sample[idx as usize]; } } } if let Some(ref v) = self.stage { unsafe { mem::transmute::<&[A], &Sample>(v) } } else { unreachable!(); } } } wasm-bindgen-test-0.3.58/src/rt/criterion/stats/univariate/sample.rs000064400000000000000000000121411046102023000236160ustar 00000000000000use alloc::vec::Vec; use core::{mem, ops}; use super::super::float::Float; use super::super::sum; use super::super::tuple::{Tuple, TupledDistributionsBuilder}; use super::super::univariate::Percentiles; use super::super::univariate::Resamples; /// A collection of data points drawn from a population /// /// Invariants: /// /// - The sample contains at least 2 data points /// - The sample contains no `NaN`s #[repr(transparent)] pub struct Sample([A]); // TODO(rust-lang/rfcs#735) move this `impl` into a private percentiles module impl Sample where A: Float, { /// Creates a new sample from an existing slice /// /// # Panics /// /// Panics if `slice` contains any `NaN` or if `slice` has less than two elements #[allow(clippy::new_ret_no_self)] pub fn new(slice: &[A]) -> &Sample { assert!(slice.len() > 1 && slice.iter().all(|x| !x.is_nan())); unsafe { mem::transmute(slice) } } /// Returns the arithmetic average of the sample /// /// - Time: `O(length)` pub fn mean(&self) -> A { let n = self.len(); self.sum() / A::cast(n) } /// Returns the median absolute deviation /// /// The `median` can be optionally passed along to speed up (2X) the computation /// /// - Time: `O(length)` /// - Memory: `O(length)` pub fn median_abs_dev(&self, median: Option) -> A where usize: cast::From>, { let median = median.unwrap_or_else(|| self.percentiles().median()); // NB Although this operation can be SIMD accelerated, the gain is negligible because the // bottle neck is the sorting operation which is part of the computation of the median let abs_devs = self.iter().map(|&x| (x - median).abs()).collect::>(); let abs_devs: &Self = Self::new(&abs_devs); abs_devs.percentiles().median() * A::cast(1.4826) } /// Returns a "view" into the percentiles of the sample /// /// This "view" makes consecutive computations of percentiles much faster (`O(1)`) /// /// - Time: `O(N log N) where N = length` /// - Memory: `O(length)` pub fn percentiles(&self) -> Percentiles where usize: cast::From>, { use core::cmp::Ordering; // NB This function assumes that there are no `NaN`s in the sample fn cmp(a: &T, b: &T) -> Ordering where T: PartialOrd, { match a.partial_cmp(b) { Some(o) => o, // Arbitrary way to handle NaNs that should never happen None => Ordering::Equal, } } let mut v = self.to_vec().into_boxed_slice(); v.sort_unstable_by(cmp); // NB :-1: to intra-crate privacy rules unsafe { mem::transmute(v) } } /// Returns the standard deviation of the sample /// /// The `mean` can be optionally passed along to speed up (2X) the computation /// /// - Time: `O(length)` pub fn std_dev(&self, mean: Option) -> A { self.var(mean).sqrt() } /// Returns the sum of all the elements of the sample /// /// - Time: `O(length)` pub fn sum(&self) -> A { sum(self) } /// Returns the t score between these two samples /// /// - Time: `O(length)` pub fn t(&self, other: &Sample) -> A { let (x_bar, y_bar) = (self.mean(), other.mean()); let (s2_x, s2_y) = (self.var(Some(x_bar)), other.var(Some(y_bar))); let n_x = A::cast(self.len()); let n_y = A::cast(other.len()); let num = x_bar - y_bar; let den = (s2_x / n_x + s2_y / n_y).sqrt(); num / den } /// Returns the variance of the sample /// /// The `mean` can be optionally passed along to speed up (2X) the computation /// /// - Time: `O(length)` pub fn var(&self, mean: Option) -> A { use core::ops::Add; let mean = mean.unwrap_or_else(|| self.mean()); let slice = self; let sum = slice .iter() .map(|&x| (x - mean).powi(2)) .fold(A::cast(0), Add::add); sum / A::cast(slice.len() - 1) } // TODO Remove the `T` parameter in favor of `S::Output` /// Returns the bootstrap distributions of the parameters estimated by the 1-sample statistic /// /// - Multi-threaded /// - Time: `O(nresamples)` /// - Memory: `O(nresamples)` pub fn bootstrap(&self, nresamples: usize, statistic: S) -> T::Distributions where S: Fn(&Sample) -> T + Sync, T: Tuple + Send, T::Distributions: Send, T::Builder: Send, { let mut resamples = Resamples::new(self); (0..nresamples) .map(|_| statistic(resamples.next())) .fold(T::Builder::new(0), |mut sub_distributions, sample| { sub_distributions.push(sample); sub_distributions }) .complete() } } impl ops::Deref for Sample { type Target = [A]; fn deref(&self) -> &[A] { &self.0 } } wasm-bindgen-test-0.3.58/src/rt/detect.rs000064400000000000000000000030271046102023000163050ustar 00000000000000//! Runtime detection of whether we're in node.js or a browser. use alloc::string::String; use wasm_bindgen::prelude::*; #[wasm_bindgen] extern "C" { type This; #[wasm_bindgen(method, getter, structural, js_name = self)] fn self_(me: &This) -> Option; type Scope; #[wasm_bindgen(method, getter, structural)] fn constructor(me: &Scope) -> Constructor; #[wasm_bindgen(method, getter, structural, js_name = Deno)] fn deno(me: &Scope) -> Option; type Deno; type Constructor; #[wasm_bindgen(method, getter, structural)] fn name(me: &Constructor) -> String; } /// Detecting the current JS scope pub fn detect() -> Runtime { // Test whether we're in a browser/worker by seeing if the `self` property is // defined on the global object and it is not equal to a WorkerScope, which should in turn // only be true in browsers. match js_sys::global().unchecked_into::().self_() { Some(scope) => match scope.constructor().name().as_str() { "DedicatedWorkerGlobalScope" | "SharedWorkerGlobalScope" | "ServiceWorkerGlobalScope" => Runtime::Worker, _ => match scope.deno() { Some(_) => Runtime::Node, _ => Runtime::Browser, }, }, None => Runtime::Node, } } /// Current runtime environment pub enum Runtime { /// Current scope is a browser scope Browser, /// Current scope is a node scope Node, /// Current scope is a worker scope Worker, } wasm-bindgen-test-0.3.58/src/rt/mod.rs000064400000000000000000000733621046102023000156250ustar 00000000000000//! Internal-only runtime module used for the `wasm_bindgen_test` crate. //! //! No API contained in this module will respect semver, these should all be //! considered private APIs. // # Architecture of `wasm_bindgen_test` // // This module can seem a bit funky, but it's intended to be the runtime support // of the `#[wasm_bindgen_test]` macro and be amenable to executing Wasm test // suites. The general idea is that for a Wasm test binary there will be a set // of functions tagged `#[wasm_bindgen_test]`. It's the job of the runtime // support to execute all of these functions, collecting and collating the // results. // // This runtime support works in tandem with the `wasm-bindgen-test-runner` // binary as part of the `wasm-bindgen-cli` package. // // ## High Level Overview // // Here's a rough and (semi) high level overview of what happens when this crate // runs. // // * First, the user runs `cargo test --target wasm32-unknown-unknown` // // * Cargo then compiles all the test suites (aka `tests/*.rs`) as Wasm binaries // (the `bin` crate type). These binaries all have entry points that are // `main` functions, but it's actually not used. The binaries are also // compiled with `--test`, which means they're linked to the standard `test` // crate, but this crate doesn't work on Wasm and so we bypass it entirely. // // * Instead of using `#[test]`, which doesn't work, users wrote tests with // `#[wasm_bindgen_test]`. This macro expands to a bunch of `#[no_mangle]` // functions with known names (currently named `__wbg_test_*`). // // * Next up, Cargo was configured via its test runner support to execute the // `wasm-bindgen-test-runner` binary. Instead of what Cargo normally does, // executing `target/wasm32-unknown-unknown/debug/deps/foo-xxxxx.wasm` (which // will fail as we can't actually execute was binaries), Cargo will execute // `wasm-bindgen-test-runner target/.../foo-xxxxx.wasm`. // // * The `wasm-bindgen-test-runner` binary takes over. It runs `wasm-bindgen` // over the binary, generating JS bindings and such. It also figures out if // we're running in node.js or a browser. // // * The `wasm-bindgen-test-runner` binary generates a JS entry point. This // entry point creates a `Context` below. The runner binary also parses the // Wasm file and finds all functions that are named `__wbg_test_*`. The // generate file gathers up all these functions into an array and then passes // them to `Context` below. Note that these functions are passed as *JS // values*. // // * Somehow, the runner then executes the JS file. This may be with node.js, it // may serve up files in a server and wait for the user, or it serves up files // in a server and starts headless testing. // // * Testing starts, it loads all the modules using either ES imports or Node // `require` statements. Everything is loaded in JS now. // // * A `Context` is created. The `Context` is forwarded the CLI arguments of the // original `wasm-bindgen-test-runner` in an environment specific fashion. // This is used for test filters today. // // * The `Context::run` function is called. Again, the generated JS has gathered // all Wasm tests to be executed into a list, and it's passed in here. // // * Next, `Context::run` returns a `Promise` representing the eventual // execution of all the tests. The Rust `Future` that's returned will work // with the tests to ensure that everything's executed by the time the // `Promise` resolves. // // * When a test executes, it's executing an entry point generated by // `#[wasm_bindgen_test]`. The test informs the `Context` of its name and // other metadata, and then `Context::execute_*` function creates a future // representing the execution of the test. This feeds back into the future // returned by `Context::run` to finish the test suite. // // * Finally, after all tests are run, the `Context`'s future resolves, prints // out all the result, and finishes in JS. // // ## Other various notes // // Phew, that was a lot! Some other various bits and pieces you may want to be // aware of are throughout the code. These include things like how printing // results is different in node vs a browser, or how we even detect if we're in // node or a browser. // // Overall this is all somewhat in flux as it's pretty new, and feedback is // always of course welcome! use alloc::borrow::ToOwned; use alloc::boxed::Box; use alloc::format; use alloc::rc::Rc; use alloc::string::{String, ToString}; use alloc::vec::Vec; use core::cell::{Cell, RefCell}; use core::fmt::{self, Display}; use core::future::Future; use core::panic::AssertUnwindSafe; use core::pin::Pin; use core::task::{self, Poll}; use js_sys::{Array, Function, Promise}; pub use wasm_bindgen; use wasm_bindgen::prelude::*; use wasm_bindgen_futures::future_to_promise; // Maximum number of tests to execute concurrently. Eventually this should be a // configuration option specified at runtime or at compile time rather than // baked in here. // // Currently the default is 1 because the DOM has a lot of shared state, and // conccurrently doing things by default would likely end up in a bad situation. const CONCURRENCY: usize = 1; pub mod browser; /// A modified `criterion.rs`, retaining only the basic benchmark capabilities. #[cfg_attr(wasm_bindgen_unstable_test_coverage, coverage(off))] pub mod criterion; pub mod detect; pub mod node; mod scoped_tls; /// Directly depending on wasm-bindgen-test-based libraries should be avoided, /// as it creates a circular dependency that breaks their usage within `wasm-bindgen-test`. /// /// Let's copy web-time. #[cfg_attr(wasm_bindgen_unstable_test_coverage, coverage(off))] pub(crate) mod web_time; pub mod worker; /// Runtime test harness support instantiated in JS. /// /// The node.js entry script instantiates a `Context` here which is used to /// drive test execution. #[wasm_bindgen(js_name = WasmBindgenTestContext)] pub struct Context { state: Rc, } struct State { /// In Benchmark is_bench: bool, /// Include ignored tests. include_ignored: Cell, /// Counter of the number of tests that have succeeded. succeeded_count: Cell, /// Number of tests that have been filtered. filtered_count: Cell, /// Number of tests that have been ignored. ignored_count: Cell, /// A list of all tests which have failed. /// /// Each test listed here is paired with a `JsValue` that represents the /// exception thrown which caused the test to fail. failures: RefCell>, /// Remaining tests to execute, when empty we're just waiting on the /// `Running` tests to finish. remaining: RefCell>, /// List of currently executing tests. These tests all involve some level /// of asynchronous work, so they're sitting on the running list. running: RefCell>, /// How to actually format output, either node.js or browser-specific /// implementation. formatter: Box, /// Timing the total duration. timer: Option, } /// Failure reasons. enum Failure { /// Normal failing test. Error(JsValue), /// A test that `should_panic` but didn't. ShouldPanic, /// A test that `should_panic` with a specific message, /// but panicked with a different message. ShouldPanicExpected, } /// Representation of one test that needs to be executed. /// /// Tests are all represented as futures, and tests perform no work until their /// future is polled. struct Test { name: String, future: Pin>>>, output: Rc>, should_panic: Option>, } /// Captured output of each test. #[derive(Default)] struct Output { debug: String, log: String, info: String, warn: String, error: String, panic: String, should_panic: bool, } enum TestResult { Ok, Err(JsValue), Ignored(Option), } impl From> for TestResult { fn from(value: Result<(), JsValue>) -> Self { match value { Ok(()) => Self::Ok, Err(err) => Self::Err(err), } } } impl Display for TestResult { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { TestResult::Ok => write!(f, "ok"), TestResult::Err(_) => write!(f, "FAIL"), TestResult::Ignored(None) => write!(f, "ignored"), TestResult::Ignored(Some(reason)) => write!(f, "ignored, {}", reason), } } } trait Formatter { /// Writes a line of output, typically status information. fn writeln(&self, line: &str); /// Log the result of a test, either passing or failing. fn log_test(&self, is_bench: bool, name: &str, result: &TestResult) { if !is_bench { self.writeln(&format!("test {} ... {}", name, result)); } } /// Convert a thrown value into a string, using platform-specific apis /// perhaps to turn the error into a string. fn stringify_error(&self, val: &JsValue) -> String; } #[wasm_bindgen] extern "C" { #[wasm_bindgen(js_namespace = console, js_name = log)] #[doc(hidden)] pub fn js_console_log(s: &str); #[wasm_bindgen(js_namespace = console, js_name = error)] #[doc(hidden)] pub fn js_console_error(s: &str); // General-purpose conversion into a `String`. #[wasm_bindgen(js_name = String)] fn stringify(val: &JsValue) -> String; type Global; #[wasm_bindgen(method, getter)] fn performance(this: &Global) -> JsValue; /// Type for the [`Performance` object](https://developer.mozilla.org/en-US/docs/Web/API/Performance). type Performance; /// Binding to [`Performance.now()`](https://developer.mozilla.org/en-US/docs/Web/API/Performance/now). #[wasm_bindgen(method)] fn now(this: &Performance) -> f64; } /// Internal implementation detail of the `console_log!` macro. pub fn console_log(args: &fmt::Arguments) { js_console_log(&args.to_string()); } /// Internal implementation detail of the `console_error!` macro. pub fn console_error(args: &fmt::Arguments) { js_console_error(&args.to_string()); } #[wasm_bindgen(js_class = WasmBindgenTestContext)] impl Context { /// Creates a new context ready to run tests. /// /// A `Context` is the main structure through which test execution is /// coordinated, and this will collect output and results for all executed /// tests. #[wasm_bindgen(constructor)] pub fn new(is_bench: bool) -> Context { fn panic_handling(mut message: String) { let should_panic = if !CURRENT_OUTPUT.is_set() { false } else { CURRENT_OUTPUT.with(|output| { let mut output = output.borrow_mut(); output.panic.push_str(&message); output.should_panic }) }; // See https://github.com/rustwasm/console_error_panic_hook/blob/4dc30a5448ed3ffcfb961b1ad54d000cca881b84/src/lib.rs#L83-L123. if !should_panic { #[wasm_bindgen] extern "C" { type Error; #[wasm_bindgen(constructor)] fn new() -> Error; #[wasm_bindgen(method, getter)] fn stack(error: &Error) -> String; } message.push_str("\n\nStack:\n\n"); let e = Error::new(); message.push_str(&e.stack()); message.push_str("\n\n"); js_console_error(&message); } } #[cfg(feature = "std")] static SET_HOOK: std::sync::Once = std::sync::Once::new(); #[cfg(feature = "std")] SET_HOOK.call_once(|| { std::panic::set_hook(Box::new(|panic_info| { panic_handling(panic_info.to_string()); })); }); #[cfg(not(feature = "std"))] #[panic_handler] fn panic_handler(panic_info: &core::panic::PanicInfo<'_>) -> ! { panic_handling(panic_info.to_string()); unreachable!(); } let formatter = match detect::detect() { detect::Runtime::Browser => Box::new(browser::Browser::new()) as Box, detect::Runtime::Node => Box::new(node::Node::new()) as Box, detect::Runtime::Worker => Box::new(worker::Worker::new()) as Box, }; let timer = Timer::new(); Context { state: Rc::new(State { is_bench, include_ignored: Default::default(), failures: Default::default(), succeeded_count: Default::default(), filtered_count: Default::default(), ignored_count: Default::default(), remaining: Default::default(), running: Default::default(), formatter, timer, }), } } /// Handle `--include-ignored` flag. pub fn include_ignored(&mut self, include_ignored: bool) { self.state.include_ignored.set(include_ignored); } /// Handle filter argument. pub fn filtered_count(&mut self, filtered: usize) { self.state.filtered_count.set(filtered); } /// Executes a list of tests, returning a promise representing their /// eventual completion. /// /// This is the main entry point for executing tests. All the tests passed /// in are the JS `Function` object that was plucked off the /// `WebAssembly.Instance` exports list. /// /// The promise returned resolves to either `true` if all tests passed or /// `false` if at least one test failed. pub fn run(&self, tests: Vec) -> Promise { if !self.state.is_bench { let noun = if tests.len() == 1 { "test" } else { "tests" }; self.state .formatter .writeln(&format!("running {} {}", tests.len(), noun)); } // Execute all our test functions through their Wasm shims (unclear how // to pass native function pointers around here). Each test will // execute one of the `execute_*` tests below which will push a // future onto our `remaining` list, which we'll process later. let cx_arg = (self as *const Context as u32).into(); for test in tests { match Function::from(test).call1(&JsValue::null(), &cx_arg) { Ok(_) => {} Err(e) => { panic!( "exception thrown while creating a test: {}", self.state.formatter.stringify_error(&e) ); } } } // Now that we've collected all our tests we wrap everything up in a // future to actually do all the processing, and pass it out to JS as a // `Promise`. let state = AssertUnwindSafe(self.state.clone()); future_to_promise(async { let passed = ExecuteTests(state).await; Ok(JsValue::from(passed)) }) } } crate::scoped_thread_local!(static CURRENT_OUTPUT: RefCell); /// Handler for `console.log` invocations. /// /// If a test is currently running it takes the `args` array and stringifies /// it and appends it to the current output of the test. Otherwise it passes /// the arguments to the original `console.log` function, psased as /// `original`. // // TODO: how worth is it to actually capture the output here? Due to the nature // of futures/js we can't guarantee that all output is captured because JS code // could just be executing in the void and we wouldn't know which test to // attach it to. The main `test` crate in the rust repo also has issues about // how not all output is captured, causing some inconsistencies sometimes. #[wasm_bindgen] pub fn __wbgtest_console_log(args: &Array) { record(args, |output| &mut output.log) } /// Handler for `console.debug` invocations. See above. #[wasm_bindgen] pub fn __wbgtest_console_debug(args: &Array) { record(args, |output| &mut output.debug) } /// Handler for `console.info` invocations. See above. #[wasm_bindgen] pub fn __wbgtest_console_info(args: &Array) { record(args, |output| &mut output.info) } /// Handler for `console.warn` invocations. See above. #[wasm_bindgen] pub fn __wbgtest_console_warn(args: &Array) { record(args, |output| &mut output.warn) } /// Handler for `console.error` invocations. See above. #[wasm_bindgen] pub fn __wbgtest_console_error(args: &Array) { record(args, |output| &mut output.error) } fn record(args: &Array, dst: impl FnOnce(&mut Output) -> &mut String) { if !CURRENT_OUTPUT.is_set() { return; } CURRENT_OUTPUT.with(|output| { let mut out = output.borrow_mut(); let dst = dst(&mut out); args.for_each(&mut |val, idx, _array| { if idx != 0 { dst.push(' '); } dst.push_str(&stringify(&val)); }); dst.push('\n'); }); } /// Similar to [`std::process::Termination`], but for wasm-bindgen tests. pub trait Termination { /// Convert this into a JS result. fn into_js_result(self) -> Result<(), JsValue>; } impl Termination for () { fn into_js_result(self) -> Result<(), JsValue> { Ok(()) } } impl Termination for Result<(), E> { fn into_js_result(self) -> Result<(), JsValue> { self.map_err(|e| JsError::new(&format!("{:?}", e)).into()) } } impl Context { /// Entry point for a synchronous test in wasm. The `#[wasm_bindgen_test]` /// macro generates invocations of this method. pub fn execute_sync( &self, name: &str, f: impl 'static + FnOnce() -> T, should_panic: Option>, ignore: Option>, ) { self.execute(name, async { f().into_js_result() }, should_panic, ignore); } /// Entry point for an asynchronous in wasm. The /// `#[wasm_bindgen_test(async)]` macro generates invocations of this /// method. pub fn execute_async( &self, name: &str, f: impl FnOnce() -> F + 'static, should_panic: Option>, ignore: Option>, ) where F: Future + 'static, F::Output: Termination, { self.execute( name, async { f().await.into_js_result() }, should_panic, ignore, ) } fn execute( &self, name: &str, test: impl Future> + 'static, should_panic: Option>, ignore: Option>, ) { // Remove the crate name to mimic libtest more closely. // This also removes our `__wbgt_` or `__wbgb_` prefix and the `ignored` and `should_panic` modifiers. let name = name.split_once("::").unwrap().1; if let Some(ignore) = ignore { if !self.state.include_ignored.get() { self.state.formatter.log_test( self.state.is_bench, name, &TestResult::Ignored(ignore.map(str::to_owned)), ); let ignored = self.state.ignored_count.get(); self.state.ignored_count.set(ignored + 1); return; } } // Looks like we've got a test that needs to be executed! Push it onto // the list of remaining tests. let output = Output { should_panic: should_panic.is_some(), ..Default::default() }; let output = Rc::new(RefCell::new(output)); let future = TestFuture { output: output.clone(), test, }; self.state.remaining.borrow_mut().push(Test { name: name.to_string(), future: Pin::from(Box::new(future)), output, should_panic, }); } } struct ExecuteTests(AssertUnwindSafe>); impl Future for ExecuteTests { type Output = bool; fn poll(self: Pin<&mut Self>, cx: &mut task::Context) -> Poll { let mut running = self.0.running.borrow_mut(); let mut remaining = self.0.remaining.borrow_mut(); // First up, try to make progress on all active tests. Remove any // finished tests. for i in (0..running.len()).rev() { let result = match running[i].future.as_mut().poll(cx) { Poll::Ready(result) => result, Poll::Pending => continue, }; let test = running.remove(i); self.0.log_test_result(test, result.into()); } // Next up, try to schedule as many tests as we can. Once we get a test // we `poll` it once to ensure we'll receive notifications. We only // want to schedule up to a maximum amount of work though, so this may // not schedule all tests. while running.len() < CONCURRENCY { let mut test = match remaining.pop() { Some(test) => test, None => break, }; let result = match test.future.as_mut().poll(cx) { Poll::Ready(result) => result, Poll::Pending => { running.push(test); continue; } }; self.0.log_test_result(test, result.into()); } // Tests are still executing, we're registered to get a notification, // keep going. if !running.is_empty() { return Poll::Pending; } // If there are no tests running then we must have finished everything, // so we shouldn't have any more remaining tests either. assert_eq!(remaining.len(), 0); self.0.print_results(); let all_passed = self.0.failures.borrow().is_empty(); Poll::Ready(all_passed) } } impl State { fn log_test_result(&self, test: Test, result: TestResult) { // Save off the test for later processing when we print the final // results. if let Some(should_panic) = test.should_panic { if let TestResult::Err(_e) = result { if let Some(expected) = should_panic { if !test.output.borrow().panic.contains(expected) { self.formatter.log_test( self.is_bench, &test.name, &TestResult::Err(JsValue::NULL), ); self.failures .borrow_mut() .push((test, Failure::ShouldPanicExpected)); return; } } self.formatter .log_test(self.is_bench, &test.name, &TestResult::Ok); self.succeeded_count.set(self.succeeded_count.get() + 1); } else { self.formatter .log_test(self.is_bench, &test.name, &TestResult::Err(JsValue::NULL)); self.failures .borrow_mut() .push((test, Failure::ShouldPanic)); } } else { self.formatter.log_test(self.is_bench, &test.name, &result); match result { TestResult::Ok => self.succeeded_count.set(self.succeeded_count.get() + 1), TestResult::Err(e) => self.failures.borrow_mut().push((test, Failure::Error(e))), _ => (), } } } fn print_results(&self) { let failures = self.failures.borrow(); if !failures.is_empty() { self.formatter.writeln("\nfailures:\n"); for (test, failure) in failures.iter() { self.print_failure(test, failure); } self.formatter.writeln("failures:\n"); for (test, _) in failures.iter() { self.formatter.writeln(&format!(" {}", test.name)); } } let finished_in = if let Some(timer) = &self.timer { format!("; finished in {:.2?}s", timer.elapsed()) } else { String::new() }; self.formatter.writeln(""); self.formatter.writeln(&format!( "test result: {}. \ {} passed; \ {} failed; \ {} ignored; \ {} filtered out\ {}\n", if failures.is_empty() { "ok" } else { "FAILED" }, self.succeeded_count.get(), failures.len(), self.ignored_count.get(), self.filtered_count.get(), finished_in, )); } fn accumulate_console_output(&self, logs: &mut String, which: &str, output: &str) { if output.is_empty() { return; } logs.push_str(which); logs.push_str(" output:\n"); logs.push_str(&tab(output)); logs.push('\n'); } fn print_failure(&self, test: &Test, failure: &Failure) { let mut logs = String::new(); let output = test.output.borrow(); match failure { Failure::ShouldPanic => { logs.push_str(&format!( "note: {} did not panic as expected\n\n", test.name )); } Failure::ShouldPanicExpected => { logs.push_str("note: panic did not contain expected string\n"); logs.push_str(&format!(" panic message: `\"{}\"`,\n", output.panic)); logs.push_str(&format!( " expected substring: `\"{}\"`\n\n", test.should_panic.unwrap().unwrap() )); } _ => (), } self.accumulate_console_output(&mut logs, "debug", &output.debug); self.accumulate_console_output(&mut logs, "log", &output.log); self.accumulate_console_output(&mut logs, "info", &output.info); self.accumulate_console_output(&mut logs, "warn", &output.warn); self.accumulate_console_output(&mut logs, "error", &output.error); if let Failure::Error(error) = failure { logs.push_str("JS exception that was thrown:\n"); let error_string = self.formatter.stringify_error(error); logs.push_str(&tab(&error_string)); } let msg = format!("---- {} output ----\n{}", test.name, tab(&logs)); self.formatter.writeln(&msg); } } /// A wrapper future around each test /// /// This future is what's actually executed for each test and is what's stored /// inside of a `Test`. This wrapper future performs two critical functions: /// /// * First, every time when polled, it configures the `CURRENT_OUTPUT` tls /// variable to capture output for the current test. That way at least when /// we've got Rust code running we'll be able to capture output. /// /// * Next, this "catches panics". Right now all Wasm code is configured as /// panic=abort, but it's more like an exception in JS. It's pretty sketchy /// to actually continue executing Rust code after an "abort", but we don't /// have much of a choice for now. /// /// Panics are caught here by using a shim function that is annotated with /// `catch` so we can capture JS exceptions (which Rust panics become). This /// way if any Rust code along the execution of a test panics we'll hopefully /// capture it. /// /// Note that both of the above aspects of this future are really just best /// effort. This is all a bit of a hack right now when it comes down to it and /// it definitely won't work in some situations. Hopefully as those situations /// arise though we can handle them! /// /// The good news is that everything should work flawlessly in the case where /// tests have no output and execute successfully. And everyone always writes /// perfect code on the first try, right? *sobs* struct TestFuture { output: Rc>, test: F, } #[wasm_bindgen] extern "C" { #[wasm_bindgen(catch)] fn __wbg_test_invoke(f: &mut dyn FnMut()) -> Result<(), JsValue>; } impl>> Future for TestFuture { type Output = F::Output; fn poll(self: Pin<&mut Self>, cx: &mut task::Context) -> Poll { let output = self.output.clone(); // Use `new_unchecked` here to project our own pin, and we never // move `test` so this should be safe let test = unsafe { Pin::map_unchecked_mut(self, |me| &mut me.test) }; let mut future_output = None; let result = CURRENT_OUTPUT.set(&output, || { let mut test = Some(test); __wbg_test_invoke(&mut || { let test = test.take().unwrap_throw(); future_output = Some(test.poll(cx)) }) }); match (result, future_output) { (_, Some(Poll::Ready(result))) => Poll::Ready(result), (_, Some(Poll::Pending)) => Poll::Pending, (Err(e), _) => Poll::Ready(Err(e)), (Ok(_), None) => wasm_bindgen::throw_str("invalid poll state"), } } } fn tab(s: &str) -> String { let mut result = String::new(); for line in s.lines() { result.push_str(" "); result.push_str(line); result.push('\n'); } result } struct Timer { performance: Performance, started: f64, } impl Timer { fn new() -> Option { let global: Global = js_sys::global().unchecked_into(); let performance = global.performance(); (!performance.is_undefined()).then(|| { let performance: Performance = performance.unchecked_into(); let started = performance.now(); Self { performance, started, } }) } fn elapsed(&self) -> f64 { (self.performance.now() - self.started) / 1000. } } wasm-bindgen-test-0.3.58/src/rt/node.rs000064400000000000000000000030301046102023000157540ustar 00000000000000//! Support for printing status information of a test suite in node.js //! //! This currently uses the same output as `libtest`, only reimplemented here //! for node itself. use alloc::string::String; use wasm_bindgen::prelude::*; /// Implementation of the `Formatter` trait for node.js pub struct Node {} #[wasm_bindgen] extern "C" { // Not using `js_sys::Error` because node's errors specifically have a // `stack` attribute. type NodeError; #[wasm_bindgen(method, getter, js_class = "Error", structural)] fn stack(this: &NodeError) -> Option; #[wasm_bindgen(method, js_class = "Error", js_name = toString, structural, catch)] fn to_string(this: &NodeError) -> Result; #[wasm_bindgen(js_name = __wbgtest_og_console_log)] fn og_console_log(s: &str); } impl Node { /// Attempts to create a new formatter for node.js pub fn new() -> Node { Node {} } } impl super::Formatter for Node { fn writeln(&self, line: &str) { og_console_log(line); } fn stringify_error(&self, err: &JsValue) -> String { // TODO: should do a checked cast to `NodeError` let err = NodeError::from(err.clone()); err.stack().unwrap_or(err.to_string().unwrap_or("".into())) } } /// Path to use for coverage data. #[wasm_bindgen] pub fn __wbgtest_coverage_path( env: Option, pid: u32, temp_dir: &str, module_signature: u64, ) -> String { wasm_bindgen_test_shared::coverage_path(env.as_deref(), pid, temp_dir, module_signature) } wasm-bindgen-test-0.3.58/src/rt/scoped_tls.rs000064400000000000000000000044541046102023000172010ustar 00000000000000//! See . use core::cell::Cell; use core::marker::PhantomData; /// `no_std` polyfill for [`scoped_tls`](https://crates.io/crates/scoped-tls). #[macro_export] macro_rules! scoped_thread_local { (static $name:ident: $ty:ty) => { static $name: scoped_tls::ScopedKey<$ty> = unsafe { static FOO: scoped_tls::Wrapper<::core::cell::Cell<*const ()>> = scoped_tls::Wrapper::new(::core::cell::Cell::new(::core::ptr::null())); // Safety: nothing else can access FOO since it's hidden in its own scope scoped_tls::ScopedKey::new(&FOO) }; }; } pub(super) struct Wrapper(T); impl Wrapper { pub(super) const fn new(value: T) -> Self { Self(value) } } unsafe impl Sync for Wrapper {} pub struct ScopedKey { inner: &'static Wrapper>, _marker: PhantomData, } unsafe impl Sync for ScopedKey {} impl ScopedKey { #[doc(hidden)] /// # Safety /// `inner` must only be accessed through `ScopedKey`'s API pub(super) const unsafe fn new(inner: &'static Wrapper>) -> Self { Self { inner, _marker: PhantomData, } } pub fn set(&'static self, t: &T, f: F) -> R where F: FnOnce() -> R, { struct Reset { key: &'static Wrapper>, val: *const (), } impl Drop for Reset { fn drop(&mut self) { self.key.0.set(self.val); } } let prev = self.inner.0.get(); self.inner.0.set(t as *const T as *const ()); let _reset = Reset { key: self.inner, val: prev, }; f() } pub fn with(&'static self, f: F) -> R where F: FnOnce(&T) -> R, { let val = self.inner.0.get(); assert!( !val.is_null(), "cannot access a scoped thread local variable without calling `set` first" ); unsafe { f(&*(val as *const T)) } } /// Test whether this TLS key has been `set` for the current thread. pub fn is_set(&'static self) -> bool { !self.inner.0.get().is_null() } } wasm-bindgen-test-0.3.58/src/rt/web_time/instant.rs000064400000000000000000000110311046102023000203020ustar 00000000000000//! Re-implementation of [`std::time::Instant`]. //! //! See . use core::ops::Sub; use core::time::Duration; use super::js::PERFORMANCE; #[cfg(target_feature = "atomics")] use super::js::TIME_ORIGIN; /// See [`std::time::Instant`]. #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct Instant(Duration); impl Instant { /// See [`std::time::Instant::now()`]. /// /// # Panics /// /// This call will panic if the [`Performance` object] was not found, e.g. /// calling from a [worklet]. /// /// [`Performance` object]: https://developer.mozilla.org/en-US/docs/Web/API/performance_property /// [worklet]: https://developer.mozilla.org/en-US/docs/Web/API/Worklet #[must_use] pub fn now() -> Self { let now = PERFORMANCE.with(|performance| { let performance = performance .as_ref() .expect("`Performance` object not found"); #[cfg(not(target_feature = "atomics"))] return performance.now(); #[cfg(target_feature = "atomics")] TIME_ORIGIN.with(|origin| performance.now() + origin) }); assert!( now.is_sign_positive(), "negative `DOMHighResTimeStamp`s are not supported" ); Self(time_stamp_to_duration(now)) } /// See [`std::time::Instant::duration_since()`]. #[must_use] pub fn duration_since(&self, earlier: Self) -> Duration { self.checked_duration_since(earlier).unwrap_or_default() } /// See [`std::time::Instant::checked_duration_since()`]. #[must_use] pub fn checked_duration_since(&self, earlier: Self) -> Option { self.0.checked_sub(earlier.0) } /// See [`std::time::Instant::elapsed()`]. #[must_use] pub fn elapsed(&self) -> Duration { Self::now() - *self } } impl Sub for Instant { type Output = Duration; /// Returns the amount of time elapsed from another instant to this one, /// or zero duration if that instant is later than this one. fn sub(self, rhs: Self) -> Duration { self.duration_since(rhs) } } /// Converts a `DOMHighResTimeStamp` to a [`Duration`]. /// /// # Note /// /// Keep in mind that like [`Duration::from_secs_f64()`] this doesn't do perfect /// rounding. fn time_stamp_to_duration(time_stamp: f64) -> Duration { let time_stamp = F64(time_stamp); Duration::from_millis(time_stamp.trunc() as u64) + Duration::from_nanos(F64(time_stamp.fract() * 1.0e6).internal_round_ties_even() as u64) } /// [`f64`] `no_std` compatibility wrapper. #[derive(Clone, Copy)] struct F64(f64); impl F64 { /// See [`f64::trunc()`]. fn trunc(self) -> f64 { libm::trunc(self.0) } /// See [`f64::fract()`]. fn fract(self) -> f64 { self.0 - self.trunc() } /// A specialized version of [`f64::round_ties_even()`]. [`f64`] must be /// positive and have an exponent smaller than `52`. /// /// - We expect `DOMHighResTimeStamp` to always be positive. We check that /// in [`Instant::now()`]. /// - We only round the fractional part after multiplying it by `1e6`. A /// fraction always has a negative exponent. `1e6` has an exponent of /// `19`. Therefor the resulting exponent can at most be `19`. /// /// [`f64::round_ties_even()`]: https://doc.rust-lang.org/1.83.0/std/primitive.f64.html#method.round_ties_even fn internal_round_ties_even(self) -> f64 { /// Put `debug_assert!` in a function to clap `coverage(off)` on it. /// /// See . fn check(this: f64) { debug_assert!(this.is_sign_positive(), "found negative input"); debug_assert!( { let exponent: u64 = this.to_bits() >> 52 & 0x7ff; exponent < 0x3ff + 52 }, "found number with exponent bigger than 51" ); } check(self.0); // See . let one_over_e = 1.0 / f64::EPSILON; // REMOVED: We don't support numbers with exponents bigger than 51. // REMOVED: We don't support negative numbers. // REMOVED: We don't support numbers with exponents bigger than 51. let xplusoneovere = self.0 + one_over_e; xplusoneovere - one_over_e // REMOVED: We don't support negative numbers. } } wasm-bindgen-test-0.3.58/src/rt/web_time/js.rs000064400000000000000000000024711046102023000172460ustar 00000000000000//! Bindings to the JS API. use wasm_bindgen::prelude::wasm_bindgen; #[wasm_bindgen] extern "C" { /// Type for the [`Performance` object](https://developer.mozilla.org/en-US/docs/Web/API/Performance). pub(super) type Performance; /// Holds the [`Performance`](https://developer.mozilla.org/en-US/docs/Web/API/Performance) object. #[wasm_bindgen(thread_local_v2, js_namespace = globalThis, js_name = performance)] pub(super) static PERFORMANCE: Option; /// Binding to [`Performance.now()`](https://developer.mozilla.org/en-US/docs/Web/API/Performance/now). #[wasm_bindgen(method)] pub(super) fn now(this: &Performance) -> f64; /// Holds the [`Performance.timeOrigin`](https://developer.mozilla.org/en-US/docs/Web/API/Performance/timeOrigin). #[cfg(target_feature = "atomics")] #[wasm_bindgen(thread_local_v2, js_namespace = ["globalThis", "performance"], js_name = timeOrigin)] pub(super) static TIME_ORIGIN: f64; /// Type for the [`Date` object](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date). pub(super) type Date; /// Binding to [`Date.now()`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/now). #[wasm_bindgen(static_method_of = Date)] pub(super) fn now() -> f64; } wasm-bindgen-test-0.3.58/src/rt/web_time/mod.rs000064400000000000000000000002351046102023000174050ustar 00000000000000mod instant; mod js; mod system_time; pub use instant::Instant; pub use system_time::SystemTime; pub const UNIX_EPOCH: SystemTime = SystemTime::UNIX_EPOCH; wasm-bindgen-test-0.3.58/src/rt/web_time/system_time.rs000064400000000000000000000022701046102023000211710ustar 00000000000000//! Re-implementation of [`std::time::SystemTime`]. //! //! See . use core::time::Duration; use super::js::Date; /// See [`std::time::SystemTime`]. #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct SystemTime(pub(crate) Duration); impl SystemTime { /// See [`std::time::SystemTime::UNIX_EPOCH`]. pub const UNIX_EPOCH: Self = Self(Duration::ZERO); /// See [`std::time::SystemTime::now()`]. #[must_use] pub fn now() -> Self { #[allow(clippy::as_conversions, clippy::cast_possible_truncation)] let ms = Date::now() as i64; let ms = ms.try_into().expect("found negative timestamp"); Self(Duration::from_millis(ms)) } /// See [`std::time::SystemTime::duration_since()`]. pub fn duration_since(&self, earlier: Self) -> Result { // See . self.0.checked_sub(earlier.0).ok_or(SystemTimeError) } } /// See [`std::time::SystemTimeError`]. #[derive(Clone, Debug)] pub struct SystemTimeError; wasm-bindgen-test-0.3.58/src/rt/worker.rs000064400000000000000000000036261046102023000163530ustar 00000000000000//! Support for printing status information of a test suite in a browser. //! //! Currently this is quite simple, rendering the same as the console tests in //! node.js. Output here is rendered in a `pre`, however. use alloc::format; use alloc::string::String; use js_sys::Error; use wasm_bindgen::prelude::*; /// Implementation of `Formatter` for browsers. /// /// Routes all output to a `pre` on the page currently. Eventually this probably /// wants to be a pretty table with colors and folding and whatnot. pub struct Worker {} #[wasm_bindgen] extern "C" { type WorkerError; #[wasm_bindgen(method, getter, structural)] fn stack(this: &WorkerError) -> JsValue; #[wasm_bindgen(js_name = "__wbg_test_output_writeln")] fn write_output_line(data: JsValue); } impl Worker { /// Attempts to create a new formatter for web worker pub fn new() -> Worker { Worker {} } } impl super::Formatter for Worker { fn writeln(&self, line: &str) { write_output_line(JsValue::from(String::from(line))); } fn stringify_error(&self, err: &JsValue) -> String { // TODO: this should be a checked cast to `Error` let error = Error::from(err.clone()); let name = String::from(error.name()); let message = String::from(error.message()); let err = WorkerError::from(err.clone()); let stack = err.stack(); let header = format!("{}: {}", name, message); let stack = match stack.as_string() { Some(stack) => stack, None => return header, }; // If the `stack` variable contains the name/message already, this is // probably a chome-like error which is already rendered well, so just // return this info if stack.contains(&header) { return stack; } // Fallback to make sure we don't lose any info format!("{}\n{}", header, stack) } }