bitvec-1.0.1/.cargo_vcs_info.json0000644000000001360000000000100123070ustar { "git": { "sha1": "c922de6998be994cb5be1a349c7a0a0d16a19f07" }, "path_in_vcs": "" }bitvec-1.0.1/Cargo.lock0000644000000442430000000000100102710ustar # This file is automatically @generated by Cargo. # It is not intended for manual editing. version = 3 [[package]] name = "atty" version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" dependencies = [ "hermit-abi", "libc", "winapi", ] [[package]] name = "autocfg" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "bincode" version = "1.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" dependencies = [ "serde", ] [[package]] name = "bitflags" version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitvec" version = "1.0.1" dependencies = [ "bincode", "criterion", "funty", "radium", "rand", "serde", "serde_json", "serde_test", "static_assertions", "tap", "wyz", ] [[package]] name = "bstr" version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba3569f383e8f1598449f1a423e72e99569137b47740b1da11ef19af3d5c3223" dependencies = [ "lazy_static", "memchr", "regex-automata", "serde", ] [[package]] name = "bumpalo" version = "3.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4a45a46ab1f2412e53d3a0ade76ffad2025804294569aae387231a0cd6e0899" [[package]] name = "cast" version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c24dab4283a142afa2fdca129b80ad2c6284e073930f964c3a1293c225ee39a" dependencies = [ "rustc_version", ] [[package]] name = "cfg-if" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "clap" version = "2.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" dependencies = [ "bitflags", "textwrap", "unicode-width", ] [[package]] name = "criterion" version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1604dafd25fba2fe2d5895a9da139f8dc9b319a5fe5354ca137cbbce4e178d10" dependencies = [ "atty", "cast", "clap", "criterion-plot", "csv", "itertools", "lazy_static", "num-traits", "oorandom", "plotters", "rayon", "regex", "serde", "serde_cbor", "serde_derive", "serde_json", "tinytemplate", "walkdir", ] [[package]] name = "criterion-plot" version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d00996de9f2f7559f7f4dc286073197f83e92256a59ed395f9aac01fe717da57" dependencies = [ "cast", "itertools", ] [[package]] name = "crossbeam-channel" version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e54ea8bc3fb1ee042f5aace6e3c6e025d3874866da222930f70ce62aceba0bfa" dependencies = [ "cfg-if", "crossbeam-utils", ] [[package]] name = "crossbeam-deque" version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" dependencies = [ "cfg-if", "crossbeam-epoch", "crossbeam-utils", ] [[package]] name = "crossbeam-epoch" version = "0.9.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c00d6d2ea26e8b151d99093005cb442fb9a37aeaca582a03ec70946f49ab5ed9" dependencies = [ "cfg-if", "crossbeam-utils", "lazy_static", "memoffset", "scopeguard", ] [[package]] name = "crossbeam-utils" version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5e5bed1f1c269533fa816a0a5492b3545209a205ca1a54842be180eb63a16a6" dependencies = [ "cfg-if", "lazy_static", ] [[package]] name = "csv" version = "1.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22813a6dc45b335f9bade10bf7271dc477e81113e89eb251a0bc2a8a81c536e1" dependencies = [ "bstr", "csv-core", "itoa 0.4.8", "ryu", "serde", ] [[package]] name = "csv-core" version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b2466559f260f48ad25fe6317b3c8dac77b5bdb5763ac7d9d6103530663bc90" dependencies = [ "memchr", ] [[package]] name = "either" version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" [[package]] name = "funty" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "getrandom" version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "418d37c8b1d42553c93648be529cb70f920d3baf8ef469b74b9638df426e0b4c" dependencies = [ "cfg-if", "libc", "wasi", ] [[package]] name = "half" version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" [[package]] name = "hermit-abi" version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" dependencies = [ "libc", ] [[package]] name = "itertools" version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a9a9d19fa1e79b6215ff29b9d6880b706147f16e9b1dbb1e4e5947b5b02bc5e3" dependencies = [ "either", ] [[package]] name = "itoa" version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" [[package]] name = "itoa" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1aab8fc367588b89dcee83ab0fd66b72b50b72fa1904d7095045ace2b0c81c35" [[package]] name = "js-sys" version = "0.3.56" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a38fc24e30fd564ce974c02bf1d337caddff65be6cc4735a1f7eab22a7440f04" dependencies = [ "wasm-bindgen", ] [[package]] name = "lazy_static" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" version = "0.2.117" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e74d72e0f9b65b5b4ca49a346af3976df0f9c61d550727f349ecd559f251a26c" [[package]] name = "log" version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" dependencies = [ "cfg-if", ] [[package]] name = "memchr" version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" [[package]] name = "memoffset" version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" dependencies = [ "autocfg", ] [[package]] name = "num-traits" version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" dependencies = [ "autocfg", ] [[package]] name = "num_cpus" version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1" dependencies = [ "hermit-abi", "libc", ] [[package]] name = "oorandom" version = "11.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" [[package]] name = "plotters" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a3fd9ec30b9749ce28cd91f255d569591cdf937fe280c312143e3c4bad6f2a" dependencies = [ "num-traits", "plotters-backend", "plotters-svg", "wasm-bindgen", "web-sys", ] [[package]] name = "plotters-backend" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d88417318da0eaf0fdcdb51a0ee6c3bed624333bff8f946733049380be67ac1c" [[package]] name = "plotters-svg" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "521fa9638fa597e1dc53e9412a4f9cefb01187ee1f7413076f9e6749e2885ba9" dependencies = [ "plotters-backend", ] [[package]] name = "ppv-lite86" version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872" [[package]] name = "proc-macro2" version = "1.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c7342d5883fbccae1cc37a2353b09c87c9b0f3afd73f5fb9bba687a1f733b029" dependencies = [ "unicode-xid", ] [[package]] name = "quote" version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "864d3e96a899863136fc6e99f3d7cae289dafe43bf2c5ac19b70df7210c0a145" dependencies = [ "proc-macro2", ] [[package]] name = "radium" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" [[package]] name = "rand" version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8" dependencies = [ "libc", "rand_chacha", "rand_core", "rand_hc", ] [[package]] name = "rand_chacha" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", "rand_core", ] [[package]] name = "rand_core" version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" dependencies = [ "getrandom", ] [[package]] name = "rand_hc" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7" dependencies = [ "rand_core", ] [[package]] name = "rayon" version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c06aca804d41dbc8ba42dfd964f0d01334eceb64314b9ecf7c5fad5188a06d90" dependencies = [ "autocfg", "crossbeam-deque", "either", "rayon-core", ] [[package]] name = "rayon-core" version = "1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d78120e2c850279833f1dd3582f730c4ab53ed95aeaaaa862a2a5c71b1656d8e" dependencies = [ "crossbeam-channel", "crossbeam-deque", "crossbeam-utils", "lazy_static", "num_cpus", ] [[package]] name = "regex" version = "1.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d07a8629359eb56f1e2fb1652bb04212c072a87ba68546a04065d525673ac461" dependencies = [ "regex-syntax", ] [[package]] name = "regex-automata" version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" [[package]] name = "regex-syntax" version = "0.6.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b" [[package]] name = "rustc_version" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ "semver", ] [[package]] name = "ryu" version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73b4b750c782965c211b42f022f59af1fbceabdd026623714f104152f1ec149f" [[package]] name = "same-file" version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" dependencies = [ "winapi-util", ] [[package]] name = "scopeguard" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "semver" version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0486718e92ec9a68fbed73bb5ef687d71103b142595b406835649bebd33f72c7" [[package]] name = "serde" version = "1.0.136" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce31e24b01e1e524df96f1c2fdd054405f8d7376249a5110886fb4b658484789" [[package]] name = "serde_cbor" version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2bef2ebfde456fb76bbcf9f59315333decc4fda0b2b44b420243c11e0f5ec1f5" dependencies = [ "half", "serde", ] [[package]] name = "serde_derive" version = "1.0.136" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08597e7152fcd306f41838ed3e37be9eaeed2b61c42e2117266a554fab4662f9" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "serde_json" version = "1.0.78" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d23c1ba4cf0efd44be32017709280b32d1cea5c3f1275c3b6d9e8bc54f758085" dependencies = [ "itoa 1.0.1", "ryu", "serde", ] [[package]] name = "serde_test" version = "1.0.136" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21675ba6f9d97711cc00eee79d8dd7d0a31e571c350fb4d8a7c78f70c0e7b0e9" dependencies = [ "serde", ] [[package]] name = "static_assertions" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" [[package]] name = "syn" version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a65b3f4ffa0092e9887669db0eae07941f023991ab58ea44da8fe8e2d511c6b" dependencies = [ "proc-macro2", "quote", "unicode-xid", ] [[package]] name = "tap" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "textwrap" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" dependencies = [ "unicode-width", ] [[package]] name = "tinytemplate" version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" dependencies = [ "serde", "serde_json", ] [[package]] name = "unicode-width" version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973" [[package]] name = "unicode-xid" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" [[package]] name = "walkdir" version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56" dependencies = [ "same-file", "winapi", "winapi-util", ] [[package]] name = "wasi" version = "0.10.2+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" [[package]] name = "wasm-bindgen" version = "0.2.79" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25f1af7423d8588a3d840681122e72e6a24ddbcb3f0ec385cac0d12d24256c06" dependencies = [ "cfg-if", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" version = "0.2.79" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b21c0df030f5a177f3cba22e9bc4322695ec43e7257d865302900290bcdedca" dependencies = [ "bumpalo", "lazy_static", "log", "proc-macro2", "quote", "syn", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-macro" version = "0.2.79" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2f4203d69e40a52ee523b2529a773d5ffc1dc0071801c87b3d270b471b80ed01" dependencies = [ "quote", "wasm-bindgen-macro-support", ] [[package]] name = "wasm-bindgen-macro-support" version = "0.2.79" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa8a30d46208db204854cadbb5d4baf5fcf8071ba5bf48190c3e59937962ebc" dependencies = [ "proc-macro2", "quote", "syn", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" version = "0.2.79" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3d958d035c4438e28c70e4321a2911302f10135ce78a9c7834c0cab4123d06a2" [[package]] name = "web-sys" version = "0.3.56" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c060b319f29dd25724f09a2ba1418f142f539b2be99fbf4d2d5a8f7330afb8eb" dependencies = [ "js-sys", "wasm-bindgen", ] [[package]] name = "winapi" version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" dependencies = [ "winapi-i686-pc-windows-gnu", "winapi-x86_64-pc-windows-gnu", ] [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" dependencies = [ "winapi", ] [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "wyz" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "30b31594f29d27036c383b53b59ed3476874d518f0efb151b27a4c275141390e" dependencies = [ "tap", ] bitvec-1.0.1/Cargo.toml0000644000000042460000000000100103130ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2021" rust-version = "1.56" name = "bitvec" version = "1.0.1" include = [ "Cargo.toml", "LICENSE.txt", "README.md", "doc/**/*.md", "src/**/*.rs", "benches/*.rs", ] description = "Addresses memory by bits, for packed collections and bitfields" homepage = "https://bitvecto-rs.github.io/bitvec" documentation = "https://docs.rs/bitvec/latest/bitvec" readme = "README.md" keywords = [ "bitfields", "bitmap", "bitstream", "bitvec", "bitvector", ] categories = [ "data-structures", "embedded", "no-std", "rust-patterns", ] license = "MIT" repository = "https://github.com/bitvecto-rs/bitvec" resolver = "2" [package.metadata.docs.rs] features = [ "atomic", "serde", "std", ] [dependencies.funty] version = "^2.0" default-features = false [dependencies.radium] version = "0.7" [dependencies.serde] version = "1" optional = true default-features = false [dependencies.tap] version = "1" [dependencies.wyz] version = "0.5" default-features = false [dev-dependencies.bincode] version = "1.3" [dev-dependencies.criterion] version = "0.3" [dev-dependencies.rand] version = "0.8" [dev-dependencies.serde] version = "1" [dev-dependencies.serde_json] version = "1" [dev-dependencies.serde_test] version = "1" [dev-dependencies.static_assertions] version = "1" [features] alloc = [] atomic = [] default = [ "atomic", "std", ] std = ["alloc"] testing = [] [badges.codecov] branch = "main" repository = "bitvecto-rs/bitvec" service = "github" [badges.is-it-maintained-issue-resolution] repository = "bitvecto-rs/bitvec" [badges.is-it-maintained-open-issues] repository = "bitvecto-rs/bitvec" [badges.maintenance] status = "passively-maintained" bitvec-1.0.1/Cargo.toml.orig0000644000000042170000000000100112500ustar ######################################################################## # Project Manifest # # # # This file describes the `bitvec` project to Cargo. # ######################################################################## [package] name = "bitvec" version = "1.0.1" edition = "2021" categories = [ "data-structures", "embedded", "no-std", "rust-patterns", ] description = "Addresses memory by bits, for packed collections and bitfields" documentation = "https://docs.rs/bitvec/latest/bitvec" homepage = "https://bitvecto-rs.github.io/bitvec" include = [ "Cargo.toml", "LICENSE.txt", "README.md", "doc/**/*.md", "src/**/*.rs", "benches/*.rs", ] keywords = [ "bitfields", "bitmap", "bitstream", "bitvec", "bitvector", ] license = "MIT" readme = "README.md" repository = "https://github.com/bitvecto-rs/bitvec" rust-version = "1.56" [features] alloc = [ ] atomic = [ ] # Enable use of atomics and the standard library by default. no-std # users will need to opt out with `default-features = false`. default = [ "atomic", "std", ] # The standard library includes the allocator. std = [ "alloc", ] testing = [ ] [dependencies] radium = "0.7" tap = "1" [dependencies.funty] version = "^2.0" default-features = false [dependencies.serde] default-features = false optional = true version = "1" [dependencies.wyz] version = "0.5" default-features = false # Crates required when running the test suite. [dev-dependencies] bincode = "1.3" criterion = "0.3" rand = "0.8" serde = "1" serde_json = "1" serde_test = "1" static_assertions = "1" # [[bench]] # name = "memcpy" # harness = false # Indicates the features that docs.rs should enable when building documentation. [package.metadata.docs.rs] features = [ "atomic", "serde", "std", ] [badges.codecov] repository = "bitvecto-rs/bitvec" branch = "main" service = "github" [badges.is-it-maintained-issue-resolution] repository = "bitvecto-rs/bitvec" [badges.is-it-maintained-open-issues] repository = "bitvecto-rs/bitvec" [badges.maintenance] status = "passively-maintained" bitvec-1.0.1/Cargo.toml.orig000064400000000000000000000042171046102023000137720ustar 00000000000000######################################################################## # Project Manifest # # # # This file describes the `bitvec` project to Cargo. # ######################################################################## [package] name = "bitvec" version = "1.0.1" edition = "2021" categories = [ "data-structures", "embedded", "no-std", "rust-patterns", ] description = "Addresses memory by bits, for packed collections and bitfields" documentation = "https://docs.rs/bitvec/latest/bitvec" homepage = "https://bitvecto-rs.github.io/bitvec" include = [ "Cargo.toml", "LICENSE.txt", "README.md", "doc/**/*.md", "src/**/*.rs", "benches/*.rs", ] keywords = [ "bitfields", "bitmap", "bitstream", "bitvec", "bitvector", ] license = "MIT" readme = "README.md" repository = "https://github.com/bitvecto-rs/bitvec" rust-version = "1.56" [features] alloc = [ ] atomic = [ ] # Enable use of atomics and the standard library by default. no-std # users will need to opt out with `default-features = false`. default = [ "atomic", "std", ] # The standard library includes the allocator. std = [ "alloc", ] testing = [ ] [dependencies] radium = "0.7" tap = "1" [dependencies.funty] version = "^2.0" default-features = false [dependencies.serde] default-features = false optional = true version = "1" [dependencies.wyz] version = "0.5" default-features = false # Crates required when running the test suite. [dev-dependencies] bincode = "1.3" criterion = "0.3" rand = "0.8" serde = "1" serde_json = "1" serde_test = "1" static_assertions = "1" # [[bench]] # name = "memcpy" # harness = false # Indicates the features that docs.rs should enable when building documentation. [package.metadata.docs.rs] features = [ "atomic", "serde", "std", ] [badges.codecov] repository = "bitvecto-rs/bitvec" branch = "main" service = "github" [badges.is-it-maintained-issue-resolution] repository = "bitvecto-rs/bitvec" [badges.is-it-maintained-open-issues] repository = "bitvecto-rs/bitvec" [badges.maintenance] status = "passively-maintained" bitvec-1.0.1/LICENSE.txt000064400000000000000000000020721046102023000127230ustar 00000000000000MIT License Copyright (c) 2018 myrrlyn (Alexander Payne) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. bitvec-1.0.1/README.md000064400000000000000000000363101046102023000123610ustar 00000000000000
# `bitvec` ## A Magnifying Glass for Memory [![Crate][crate_img]][crate] [![Documentation][docs_img]][docs] [![License][license_img]][license_file] [![Crate Downloads][downloads_img]][crate] [![Project Size][loc_img]][loc]
1. [Summary](#summary) 1. [Introduction](#introduction) 1. [Highlights](#highlights) 1. [Usage](#usage) 1. [Examples](#examples) 1. [User Stories](#user-stories) 1. [Bit Collections](#bit-collections) 1. [Bit-Field Memory Access](#bit-field-memory-access) 1. [Transport Protocols](#transport-protocols) 1. [Feature Flags](#feature-flags) 1. [Deeper Reading](#deeper-reading) ## Summary `bitvec` provides a foundational API for bitfields in Rust. It specializes standard-library data structures (slices, arrays, and vectors of `bool`) to use one-bit-per-`bool` storage, similar to [`std::bitset`] and [`std::vector`] in C++. Additionally, it allows a memory region to be divided into arbitrary regions of integer storage, like [binaries][erl_bit] in Erlang. If you need to view memory as bit-addressed instead of byte-addressed, then `bitvec` is the fastest, most complete, and Rust-idiomatic crate for you. ## Introduction Computers do not operate on bits. The memory bus is byte-addressed, and processors operate on register words, which are typically four to eight bytes, or even wider. This means that when programmers wish to operate on individual bits within a byte of memory or a word of register, they have to do so manually, using shift and mask operations that are likely familiar to anyone who has done this before. `bitvec` brings the capabilities of C++’s compact `bool` storage and Erlang’s decomposable bit-streams to Rust, in a package that fits in with your existing Rust idioms and in the most capable, performant, implementation possible. The bit-stream behavior provides the logic necessary for C-style structural bitfields, and syntax sugar for it can be found in [`deku`]. `bitvec` enables you to write code for bit-addressed memory that is simple, easy, and fast. It compiles to the same, or even better, object code than you would get from writing shift/mask instructions manually. It leverages Rust’s powerful reference and type systems to create a system that seamlessly bridges single-bit addressing, precise control of in-memory layout, and Rust-native ownership and borrowing mechanisms. ## Highlights `bitvec` has a number of unique capabilities related to its place as a Rust library and as a bit-addressing system. - It supports arbitrary bit-addressing, and its bit slices can be munched from the front. - `BitSlice` is a region type equivalent to `[bool]`, and can be described by Rust references and thus fit into reference-based APIs. - Type parameters enable users to select the precise memory representation they desire. - A memory model accounts for element-level aliasing and is safe for concurrent use. In particular, the “Beware Bitfields” bug described in [this Mozilla report][moz] is simply impossible to produce. - Native support for atomic integers as bit-field storage. - Users can supply their own translation layer for memory representation if the built-in translations are insufficient. However, it does also have some small costs associated with its capabilities: - `BitSlice` cannot be used as a referent type in pointers, such as `Box`, `Rc`, or `Arc`. - `BitSlice` cannot implement `IndexMut`, so `bitslice[index] = true;` does not work. ## Usage **Minimum Supported Rust Version**: 1.56.0 `bitvec` strives to follow the sequence APIs in the standard library. However, as most of its functionality is a reïmplementation that does not require the standard library to actually have the symbols present, doing so may not require an MSRV raise. Now that `bitvec` is at 1.0, it will only raise MSRV in minor-edition releases. If you have a pinned Rust toolchain, you should depend on `bitvec` with a limiting minor-version constraint like `"~1.0"`. First, depend on it in your Cargo manifest: ```toml [dependencies] bitvec = "1" ``` > Note: `bitvec` supports `#![no_std]` targets. If you do not have `std`, > disable the default features, and explicitly restore any features that you do > have: > > ```toml > [dependencies.bitvec] > version = "1" > default-features = false > features = ["atomic", "alloc"] > ``` Once Cargo knows about it, bring its prelude into scope: ```rust use bitvec::prelude::*; ``` You can read the [prelude reëxports][prelude] to see exactly which symbols are being imported. The prelude brings in many symbols, and while name collisions are not likely, you may wish to instead import the prelude *module* rather than its contents: ```rust use bitvec::prelude as bv; ``` You should almost certainly use type aliases to make names for specific instantiations of `bitvec` type parameters, and use that rather than attempting to remain generic over an `` pair throughout your project. ## Examples ```rust use bitvec::prelude::*; // All data-types have macro // constructors. let arr = bitarr![u32, Lsb0; 0; 80]; let bits = bits![u16, Msb0; 0; 40]; // Unsigned integers (scalar, array, // and slice) can be borrowed. let data = 0x2021u16; let bits = data.view_bits::(); let data = [0xA5u8, 0x3C]; let bits = data.view_bits::(); // Bit-slices can split anywhere. let (head, rest) = bits.split_at(4); assert_eq!(head, bits[.. 4]); assert_eq!(rest, bits[4 ..]); // And they are writable! let mut data = [0u8; 2]; let bits = data.view_bits_mut::(); // l and r each own one byte. let (l, r) = bits.split_at_mut(8); // but now a, b, c, and d own a nibble! let ((a, b), (c, d)) = ( l.split_at_mut(4), r.split_at_mut(4), ); // and all four of them are writable. a.set(0, true); b.set(1, true); c.set(2, true); d.set(3, true); assert!(bits[0]); // a[0] assert!(bits[5]); // b[1] assert!(bits[10]); // c[2] assert!(bits[15]); // d[3] // `BitSlice` is accessed by reference, // which means it respects NLL styles. assert_eq!(data, [0x21u8, 0x84]); // Furthermore, bit-slices can store // ordinary integers: let eight = [0u8, 4, 8, 12, 16, 20, 24, 28]; // a b c d e f g h let mut five = [0u8; 5]; for (slot, byte) in five .view_bits_mut::() .chunks_mut(5) .zip(eight.iter().copied()) { slot.store_be(byte); assert_eq!(slot.load_be::(), byte); } assert_eq!(five, [ 0b00000_001, // aaaaa bbb 0b00_01000_0, // bb ccccc d 0b1100_1000, // dddd eeee 0b0_10100_11, // e fffff gg 0b000_11100, // ggg hhhhh ]); ``` The `BitSlice` type is a view that alters the behavior of a borrowed memory region. It is never held directly, but only by references (created by borrowing integer memory) or the `BitArray` value type. In addition, the presence of a dynamic allocator enables the `BitBox` and `BitVec` buffer types, which can be used for more advanced buffer manipulation: ```rust #[cfg(feature = "alloc")] fn main() { use bitvec::prelude::*; let mut bv = bitvec![u8, Msb0;]; bv.push(false); bv.push(true); bv.extend([false; 4].iter()); bv.extend(&15u8.view_bits::()[.. 4]); assert_eq!(bv.as_raw_slice(), &[ 0b01_0000_11, 0b11_000000 // ^ dead ]); } ``` While place expressions like `bits[index] = value;` are not available, `bitvec` instead provides a proxy structure that can be used as *nearly* an `&mut bit` reference: ```rust use bitvec::prelude::*; let bits = bits![mut 0]; // `bit` is not a reference, so // it must be bound with `mut`. let mut bit = bits.get_mut(0).unwrap(); assert!(!*bit); *bit = true; assert!(*bit); // `bit` is not a reference, // so NLL rules do not apply. drop(bit); assert!(bits[0]); ``` The `bitvec` data types implement a complete replacement for their standard-library counterparts, including all of the inherent methods, traits, and operator behaviors. ## User Stories Uses of `bitvec` generally fall into three major genres. - compact, fast, `usize => bit` collections - truncated integer storage - precise control of memory layout ### Bit Collections At its most basic, `bitvec` provides sequence types analogous to the standard library’s `bool` collections. The default behavior is optimized for fast memory access and simple codegen, and can compact `[bool]` or `Vec` with minimal overhead. While `bitvec` does not attempt to take advantage of SIMD or other vectorized instructions in its default work, its codegen should be a good candidate for autovectorization in LLVM. If explicit vectorization is important to you, please [file an issue][issue]. Example uses might be implementing a Sieve of Eratosthenes to store primes, or other collections that test a yes/no property of a number; or replacing `Vec>` with `(BitVec, Vec>`). To get started, you can perform basic text replacement on your project. Translate any existing types as follows: - `[bool; N]` becomes `BitArray` - `[bool]` becomes `BitSlice` - `Vec` becomes `BitVec` - `Box<[bool]>` becomes `BitBox` and then follow any compiler errors that arise. ### Bit-Field Memory Access A single bit of information has very few uses. `bitvec` also enables you to store integers wider than a single bit, by selecting a bit-slice and using the [`BitField`] trait on it. You can store and retrieve both unsigned and signed integers, as long as the ordering type parameter is [`Lsb0`] or [`Msb0`]. If your bit-field storage buffers are never serialized for exchange between machines, then you can get away with using the default type parameters and unadorned load/store methods. While the in-memory layout of stored integers may be surprising if directly inspected, the overall behavior should be optimal for your target. Remember: `bitvec` only provides array place expressions, using integer start and end points. You can use [`deku`] if you want C-style named structural fields with bit-field memory storage. However, if you are de/serializing buffers for transport, then you fall into the third category. ### Transport Protocols Many protocols use sub-element fields in order to save space in transport; for example, TCP headers have single-bit and 4-bit fields in order to pack all the needed information into a desirable amount of space. In C or Erlang, these TCP protocol fields could be mapped by record fields in the language. In Rust, they can be mapped by indexing into a bit-slice. When using `bitvec` to manage protocol buffers, you will need to select the exact type parameters that match your memory layout. For instance, TCP uses ``, while IPv6 on a little-endian machine uses ``. Once you have done this, you can replace all of your `(memory & mask) >> shift` or `memory |= (value & mask) << shift` expressions with `memory[start .. end]`. As a direct example, the Itanium instruction set IA-64 uses very-long instruction words containing three 41-bit fields in a `[u8; 16]`. One IA-64 disassembler replaced its manual shift/mask implementation with `bitvec` range indexing, taking the bit numbers directly from the datasheet, and observed that their code was both easier to maintain and also had better performance as a result! ## Feature Flags `bitvec` has a few Cargo features that govern its API surface. The default feature set is: ```toml [dependencies.bitvec] version = "1" features = [ "alloc", "atomic", # "serde", "std", ] ``` Use `default-features = false` to disable all of them, then `features = []` to restore the ones you need. - `alloc`: This links against the `alloc` distribution crate, and provides the `BitVec` and `BitBox` types. It can be used on `#![no_std]` targets that possess a dynamic allocator but not an operating system. - `atomic`: This controls whether atomic instructions can be used for aliased memory. `bitvec` uses the [`radium`] crate to perform automatic detection of atomic capability, and targets that do not possess atomic instructions can still function with this feature *enabled*. Its only effect is that targets which do have atomic instructions may choose to disable it and enforce single-threaded behavior that never incurs atomic synchronization. - `serde`: This enables the de/serialization of `bitvec` buffers through the `serde` system. This can be useful if you need to transmit `usize => bool` collections. - `std`: This provides some `std::io::{Read,Write}` implementations, as well as `std::error::Error` for the various error types. It is otherwise unnecessary. ## Deeper Reading The [API Documentation][docsrs] explores `bitvec`’s usage and implementation in great detail. In particular, you should read the documentation for the [`order`], [`store`], and [`field`] modules, as well as the [`BitSlice`] and [`BitArray`] types. In addition, the [user guide][guide] explores the philosophical and academic concepts behind `bitvec`’s construction, its goals, and the more intricate parts of its behavior. While you should be able to get started with `bitvec` with only dropping it into your code and using the same habits you have with the standard library, both of these resources contain all of the information needed to understand what it does, how it works, and how it can be useful to you. [crate]: https://crates.io/crates/bitvec "Crate listing" [crate_img]: https://img.shields.io/crates/v/bitvec.svg?logo=rust&style=for-the-badge "Crate badge" [docs]: https://docs.rs/bitvec/latest/bitvec "Crate documentation" [docs_img]: https://img.shields.io/docsrs/bitvec/latest.svg?style=for-the-badge "Documentation badge" [downloads_img]: https://img.shields.io/crates/dv/bitvec.svg?logo=rust&style=for-the-badge "Crate downloads" [license_file]: https://github.com/bitvecto-rs/bitvec/blob/main/LICENSE.txt "Project license" [license_img]: https://img.shields.io/crates/l/bitvec.svg?style=for-the-badge "License badge" [loc]: https://github.com/bitvecto-rs/bitvec "Project repository" [loc_img]: https://img.shields.io/tokei/lines/github/bitvecto-rs/bitvec?category=code&style=for-the-badge "Project size" [`BitArray`]: https://docs.rs/bitvec/latest/bitvec/array/struct.BitArray.html [`BitField`]: https://docs.rs/bitvec/latest/bitvec/field/trait.BitField.html [`BitSlice`]: https://docs.rs/bitvec/latest/bitvec/slice/struct.BitSlice.html [`Lsb0`]: https://docs.rs/bitvec/latest/bitvec/order/struct.Lsb0.html [`Msb0`]: https://docs.rs/bitvec/latest/bitvec/order/struct.Msb0.html [`field`]: https://docs.rs/bitvec/latest/bitvec/field/index.html [`order`]: https://docs.rs/bitvec/latest/bitvec/order/index.html [`store`]: https://docs.rs/bitvec/latest/bitvec/store/index.html [layout]: https://bitvecto-rs.github.io/bitvec/memory-representation [prelude]: https://docs.rs/bitvec/latest/bitvec/prelude [`deku`]: https://crates.io/crates/deku [docsrs]: https://docs.rs/bitvec/latest/bitvec [erl_bit]: https://www.erlang.org/doc/programming_examples/bit_syntax.html [guide]: https://bitvecto-rs.github.io/bitvec/ [issue]: https://github.com/bitvecto-rs/bitvec/issues/new [moz]: https://hacks.mozilla.org/2021/04/eliminating-data-races-in-firefox-a-technical-report/ "Mozilla Hacks article describing various concurrency bugs in FireFox" [`radium`]: https://crates.io/crates/radium [`std::bitset`]: https://en.cppreference.com/w/cpp/utility/bitset [`std::vector`]: https://en.cppreference.com/w/cpp/container/vector_bool bitvec-1.0.1/doc/README.md000064400000000000000000000011711046102023000131230ustar 00000000000000# `bitvec` API Documentation Rust release `1.54` stabilized the use of `#[doc = include_str!()]`, which allows documentation to be sourced from external files. This directory contains the Rustdoc API documentation for items whose text is larger than a comment block warrants. The files here use Rustdoc’s ability to resolve symbol paths as link references, and so will not render correctly in other Markdown viewers. The target renderer is Rustdoc, not CommonMark. Module and type documentation should generally be moved to this directory; function, struct field, and enum variant documentation should generally stay in source. bitvec-1.0.1/doc/access/BitAccess.md000064400000000000000000000034611046102023000152730ustar 00000000000000# Bit-Level Access Instructions This trait extends [`Radium`] in order to manipulate specific bits in an element according to the crate’s logic. It drives all memory access instructions and is responsible for translating the bit-selection logic of the [`index`] module into real effects. This is blanket-implemented on all types that permit shared-mutable memory access via the [`radium`] crate. Its use is constrained in the [`store`] module. It is required to be a publicly accessible symbol, as it is exported in other traits, but it is a crate-internal item and is not part of the public API. Its blanket implementation for `` prevents any other implementations from being written. ## Implementation and Safety Notes This trait is automatically implemented for all types that implement `Radium`, and relies exclusively on `Radium`’s API and implementations for its work. In particular, `Radium` has no functions which operate on **pointers**: it exclusively operates on memory through **references**. Since references must always refer to initialized memory, `BitAccess` and, by extension, all APIs in `bitvec` that touch memory, cannot be used to operate on uninitialized memory in any way. While you may *create* a `bitvec` pointer object that targets uninitialized memory, you may not *dereference* it until the targeted memory has been wholly initialized with integer values. This restriction cannot be loosened without stable access to pointer-based atomic intrinsics in the Rust standard library and corresponding updates to the `Radium` trait. Do not attempt to access uninitialized memory through `bitvec`. Doing so will cause `bitvec` to produce references to uninitialized memory, which is undefined behavior. [`Radium`]: radium::Radium [`index`]: crate::index [`radium`]: radium [`store`]: crate::store bitvec-1.0.1/doc/access/BitSafe.md000064400000000000000000000015361046102023000147510ustar 00000000000000# Read-Only Semivolatile Handle This trait describes views of memory that are not permitted to modify the value they reference, but must tolerate external modification to that value. Implementors must tolerate shared-mutability behaviors, but are not allowed to expose shared mutation APIs. They are permitted to modify the referent only under `&mut` exclusive references. This behavior enables an important aspect of the `bitvec` memory model when working with memory elements that multiple [`&mut BitSlice`][0] references touch: each `BitSlice` needs to be able to give the caller a view of the memory element, but they also need to prevent modification of bits outside of their span. This trait enables callers to view raw underlying memory without improperly modifying memory that *other* `&mut BitSlice`s expect to be stable. [0]: crate::slice::BitSlice bitvec-1.0.1/doc/access/impl_BitSafe.md000064400000000000000000000010371046102023000157660ustar 00000000000000# Read-Only Shared-Mutable Handle This type marks a handle to a shared-mutable type that may be modified through *other* handles, but cannot be modified through *this* one. It is used when a [`BitSlice`] region has partial ownership of an element and wishes to expose the entire underlying raw element to the user without granting them write permissions. Under the `feature = "atomic"` build setting, this uses `radium`’s best-effort atomic alias; when this feature is disabled, it reverts to `Cell`. [`BitSlice`]: crate::slice::BitSlice bitvec-1.0.1/doc/access.md000064400000000000000000000020231046102023000134240ustar 00000000000000# Memory Bus Access Management `bitvec` allows a program to produce handles over memory that do not *logically* alias their bits, but *may* alias their hardware locations. This module provides a unified interface for memory accesses that can be specialized to handle such aliased and unaliased events. The [`BitAccess`] trait provides capabilities to access individual or clustered bits in memory elements through shared, maybe-aliased, references. Its implementations are responsible for coördinating synchronization and contention as needed. The [`BitSafe`] trait guards [`Radium`] types in order to forbid writing through shared-only references, and require access to an `&mut` exclusive reference for modification. This permits other components in the crate that do *not* have `BitSafe` reference guards to safely mutate a referent element that a `BitSafe`d reference can observe, while preventing that reference from emitting mutations of its own. [`BitAccess`]: self::BitAccess [`BitSafe`]: self::BitSafe [`Radium`]: radium::Radium bitvec-1.0.1/doc/array/BitArray.md000064400000000000000000000070521046102023000150250ustar 00000000000000# Bit-Precision Array Immediate This type is a wrapper over the [array fundamental][0] `[T; N]` that views its contents as a [`BitSlice`] region. As an array, it can be held directly by value and does not require an indirection such as the `&BitSlice` reference. ## Original [`[T; N]`](https://doc.rust-lang.org/std/primitive.array.html) ## Usage `BitArray` is a Rust analogue of the C++ [`std::bitset`] container. However, restrictions in the Rust type system do not allow specifying exact bit lengths in the array type. Instead, it must specify a storage array that can contain all the bits you want. Because `BitArray` is a plain-old-data object, its fields are public and it has no restrictions on its interior value. You can freely access the interior storage and move data in or out of the `BitArray` type with no cost. As a convenience, the [`BitArr!`] type-constructor macro can produce correct type definitions from an exact bit count and your memory-layout type parameters. Values of that type can then be built from the [`bitarr!`] *value*-constructor macro: ```rust use bitvec::prelude::*; type Example = BitArr!(for 43, in u32, Msb0); let example: Example = bitarr!(u32, Msb0; 1; 33); struct HasBitfield { inner: Example, } let ex2 = HasBitfield { inner: BitArray::new([1, 2]), }; ``` Note that the actual type of the `Example` alias is `BitArray<[u32; 2], Msb0>`, as that is `ceil(32, 43)`, so the `bitarr!` macro can accept any number of bits in `33 .. 65` and will produce a value of the correct type. ## Type Parameters `BitArray` differs from the other data structures in the crate in that it does not take a `T: BitStore` parameter, but rather takes `A: BitViewSized`. That trait is implemented by all `T: BitStore` scalars and all `[T; N]` arrays of them, and provides the logic to translate the aggregate storage into the memory sequence that the crate expects. As with all `BitSlice` regions, the `O: BitOrder` parameter specifies the ordering of bits within a single `A::Store` element. ## Future API Changes Exact bit lengths cannot be encoded into the `BitArray` type until the const-generics system in the compiler can allow type-level computation on type integers. When this stabilizes, `bitvec` will issue a major upgrade that replaces the `BitArray` definition with `BitArray` and match the C++ `std::bitset` definition. ## Large Bit-Arrays As with ordinary arrays, large arrays can be expensive to move by value, and should generally be preferred to have static locations such as actual `static` bindings, a long lifetime in a low stack frame, or a heap allocation. While you certainly can `Box<[BitArray]>` directly, you may instead prefer the [`BitBox`] or [`BitVec`] heap-allocated regions. These offer the same storage behavior and are better optimized than `Box` for working with the contained `BitSlice` region. ## Examples ```rust use bitvec::prelude::*; const WELL_KNOWN: BitArr!(for 16, in u8, Lsb0) = BitArray::<[u8; 2], Lsb0> { data: *b"bv", ..BitArray::ZERO }; struct HasBitfields { inner: BitArr!(for 50, in u8, Lsb0), } impl HasBitfields { fn new() -> Self { Self { inner: bitarr!(u8, Lsb0; 0; 50), } } fn some_field(&self) -> &BitSlice { &self.inner[2 .. 52] } } ``` [0]: https://doc.rust-lang.org/std/primitive.array.html [`BitArr!`]: macro@crate::BitArr [`BitBox`]: crate::boxed::BitBox [`BitSlice`]: crate::slice::BitSlice [`BitVec`]: crate::vec::BitVec [`bitarr!`]: macro@crate::bitarr [`std::bitset`]: https://en.cppreference.com/w/cpp/utility/bitset bitvec-1.0.1/doc/array/IntoIter.md000064400000000000000000000002521046102023000150400ustar 00000000000000# Bit-Array Iteration This structure wraps a bit-array and provides by-value iteration of the bits it contains. ## Original [`array::IntoIter`](core::array::IntoIter) bitvec-1.0.1/doc/array/TryFromBitSliceError.md000064400000000000000000000011511046102023000173350ustar 00000000000000# Bit-Slice to Bit-Array Conversion Error This error is produced when an `&BitSlice` view is unable to be recast as a `&BitArray` view with the same parameters. Unlike ordinary scalars and arrays, where arrays are never aligned more stringently than their components, `BitSlice` is aligned to an individual bit while `BitArray` is aligned to its `A` storage type. This is produced whenever a `&BitSlice` view is not exactly as long as the destination `&BitArray` view is, or does not also begin at the zeroth bit in an `A::Store` element. ## Original [`array::TryFromSliceError`](core::array::TryFromSliceError) bitvec-1.0.1/doc/array/api.md000064400000000000000000000014501046102023000140550ustar 00000000000000# Port of Array Inherent Methods This module ports the inherent methods available on the [array] fundamental type. As of 1.56, only `.map()` is stable. The `.as_slice()` and `.as_mut_slice()` methods are ported, as the *behavior* has always been stable, and only the name is new. The remaining methods (as of 1.56, `.each_mut()`, `.each_ref()`, `.zip()`) are not ported. While `BitArray` is capable of implementing their behavior with the existing crate APIs, the `const`-generic system is not yet able to allow construction of an array whose length is dependent on an associated `const` in a type parameter. These methods will not be available until the `const`-generic system improves enough for `bitvec 2` to use the proper `BitArray` API. [array]: https://doc.rust-lang.org/std/primitive.array.html bitvec-1.0.1/doc/array/iter.md000064400000000000000000000003501046102023000142450ustar 00000000000000# Bit-Array Iteration This module defines the core iteration logic for `BitArray`. It includes the `IntoIterator` implementations on bit-arrays and their references, as well as the `IntoIter` struct that walks bit-arrays by value. bitvec-1.0.1/doc/array.md000064400000000000000000000014461046102023000133110ustar 00000000000000# Statically-Allocated, Fixed-Size, Bit Buffer This module defines a port of the [array fundamental][0] and its APIs. The primary export is the [`BitArray`] structure. This is a thin wrapper over `[T; N]` that provides a [`BitSlice`] view of its contents and is *roughly* analogous to the C++ type [`std::bitset`]. See the `BitArray` documentation for more details on its usage. ## Submodules - `api` contains ports of the standard library’s array type and `core::array` module. - `iter` contains ports of array iteration. - `ops` defines operator-sigil traits. - `traits` defines all the other traits. [0]: https://doc.rust-lang.org/std/primitive.array.html [`BitArray`]: self::BitArray [`BitSlice`]: crate::slice::BitSlice [`std::bitset`]: https://en.cppreference.com/w/cpp/utility/bitset bitvec-1.0.1/doc/boxed/BitBox.md000064400000000000000000000040611046102023000144570ustar 00000000000000# Fixed-Size, Heap-Allocated, Bit Slice `BitBox` is a heap-allocated [`BitSlice`] region. It is a distinct type because the implementation of bit-slice pointers means that `Box` cannot exist. It can be created by cloning a bit-slice into the heap, or by freezing the allocation of a [`BitVec`] ## Original [`Box<[T]>`](alloc::boxed::Box) ## API Differences As with `BitSlice`, this takes a pair of [`BitOrder`] and [`BitStore`] type parameters to govern the buffer’s memory representation. Because `BitSlice` is unsized, `BitBox` has almost none of the `Box` API, and is difficult to use directly. ## Behavior `BitBox`, like `&BitSlice`, is an opaque pointer to a bit-addressed slice region. Unlike `&BitSlice`, it uses the allocator to guarantee that it is the sole accessor to the referent buffer, and is able to use that uniqueness guarantee to specialize some `BitSlice` behavior to be faster or more efficient. ## Safety `BitBox` is, essentially, a `NonNull>` pointer. The internal value is opaque and cannot be inspected or modified by user code. If you attempt to do so, your program becomes inconsistent. You will likely break the allocator’s internal state and cause a crash. No guarantees of crash *or* recovery are provided. Do not inspect or modify the `BitBox` handle value. ## Construction The simplest way to construct a `BitBox` is by using the [`bitbox!`] macro. You can also explicitly clone a `BitSlice` with [`BitBox::from_bitslice`], or freeze a `BitVec` with [`BitVec::into_boxed_bitslice`]. ## Examples ```rust use bitvec::prelude::*; let a = BitBox::from_bitslice(bits![1, 0, 1, 1, 0]); let b = bitbox![0, 1, 0, 0, 1]; let b_raw: *mut BitSlice = BitBox::into_raw(b); let b_reformed = unsafe { BitBox::from_raw(b_raw) }; ``` [`BitBox::from_bitslice`]: self::BitBox::from_bitslice [`BitOrder`]: crate::order::BitOrder [`BitSlice`]: crate::slice::BitSlice [`BitStore`]: crate::store::BitStore [`BitVec`]: crate::vec::BitVec [`BitVec::into_boxed_bitslice`]: crate::vec::BitVec::into_boxed_bitslice [`bitbox!`]: macro@crate::bitbox bitvec-1.0.1/doc/boxed/iter.md000064400000000000000000000010401046102023000142250ustar 00000000000000# Boxed Bit-Slice Iteration This module contains the by-value iterator used by both `BitBox` and `BitVec`. In the standard library, this iterator is defined under `alloc::vec`, not `alloc::boxed`, as `Box` already has an iteration implementation that forwards to its boxed value. It is moved here for simplicity: both `BitBox` and `BitVec` iterate over a dynamic bit-slice by value, and must deällocate the region when dropped. As `BitBox` has a smaller value than `BitVec`, it is used as the owning handle for the bit-slice being iterated. bitvec-1.0.1/doc/boxed.md000064400000000000000000000012571046102023000132740ustar 00000000000000# Heap-Allocated, Fixed-Size, Bit Buffer This module defines an analogue to `Box<[bool]>`, as `Box` cannot be constructed or used in any way. Like `Box<[T]>`, this is a heap allocation that can modify its contents, but cannot resize the collection. The `BitBox` value is an owning [`*BitSlice`] pointer, and can be used to access its contents without any decoding. You should generally prefer [`BitVec`] or [`BitArray`]; however, very large `BitArrays` are likely better served being copied into a `BitBox` rather than being boxed themselves when moved into the heap. [`BitArray`]: crate::array::BitArray [`BitVec`]: crate::vec::BitVec [`*BitSlice`]: crate::slice::BitSlice bitvec-1.0.1/doc/domain/BitDomain.md000064400000000000000000000026371046102023000153130ustar 00000000000000# Bit-Slice Partitioning This enum partitions a bit-slice into its head- and tail- edge bit-slices, and its interior body bit-slice, according to the definitions laid out in the module documentation. It fragments a [`BitSlice`] into smaller `BitSlice`s, and allows the interior bit-slice to become `::Unalias`ed. This is useful when you need to retain a bit-slice view of memory, but wish to remove synchronization costs imposed by a prior call to [`.split_at_mut()`] for as much of the bit-slice as possible. ## Why Not `Option`? The `Enclave` variant always contains as its single field the exact bit-slice that created the `Enclave`. As such, this type is easily replaceäble with an `Option` of the `Region` variant, which when `None` is understood to be the original. This exists as a dedicated enum, even with a technically useless variant, in order to mirror the shape of the element-domain enum. This type should be understood as a shortcut to the end result of splitting by element-domain, then mapping each `PartialElement` and slice back into `BitSlice`s, rather than testing whether a bit-slice can be split on alias boundaries. You can get the alternate behavior, of testing whether or not a bit-slice can be split into a `Region` or is unsplittable, by calling `.bit_domain().region()` to produce exactly such an `Option`. [`BitSlice`]: crate::slice::BitSlice [`.split_at_mut()`]: crate::slice::BitSlice::split_at_mut bitvec-1.0.1/doc/domain/Domain.md000064400000000000000000000061131046102023000146450ustar 00000000000000# Bit-Slice Element Partitioning This structure provides the bridge between bit-precision memory modeling and element-precision memory manipulation. It allows a bit-slice to provide a safe and correct view of the underlying memory elements, without exposing the values, or permitting mutation, of bits outside a bit-slice’s control but within the elements the bit-slice uses. Nearly all memory access that is not related to single-bit access goes through this structure, and it is highly likely to be in your hot path. Its code is a perpetual topic of optimization, and improvements are always welcome. This is essentially a fully-decoded `BitSpan` handle, in that it addresses memory elements directly and contains the bit-masks needed to selectively interact with them. It is therefore by necessity a large structure, and is usually only alive for a short time. It has a minimal API, as most of its logical operations are attached to `BitSlice`, and merely route through it. If your application cannot afford the cost of repeated `Domain` construction, please [file an issue][0]. ## Memory Model and Variants A given `BitSlice` has essentially two possibilities for where it resides in real memory: - it can reside entirely in the interior of a exactly one memory element, touching neither edge bit, or - it can touch at least one edge bit of zero or more elements. These states correspond to the `Enclave` and `Region` variants, respectively. When a `BitSlice` has only partial control of a given memory element, that element can only be accessed through the bit-slice’s provenance by a [`PartialElement`] handle. This handle is an appropriately-guarded reference to the underlying element, as well as mask information needed to interact with the raw bits and to manipulate the numerical contents. Each `PartialElement` guard carries permissions for *its own bits* within the guarded element, independently of any other handle that may access the element, and all handles are appropriately synchronized with each other to prevent race conditions. The `Enclave` variant is a single `PartialElement`. The `Region` variant is more complex. It has: 1. an optional `PartialElement` for the case where the bit-slice only partially occupies the lowest-addressed memory element it governs, starting after bit-index 0 and extending up to the maximal bit-index, 1. a slice of zero or more fully-occupied memory elements, 1. an optional `PartialElement` for the case where it only partially occupies the highest-addressed memory element it governs, starting at bit-index 0 and ending before the maximal. ## Usage Once created, match upon a `Domain` to access its fields. Each `PartialElement` has a [`.load_value()`][`PartialElement::load_value`] method that produces its stored value (with all ungoverned bits cleared to 0), and a `.store_value()` that writes into its governed bits. If present, the fully-occupied slice can be used as normal. [0]: https://github.com/bitvecto-rs/bitvec/issues/new [`PartialElement`]: crate::domain::PartialElement [`PartialElement::load_value`]: crate::domain::PartialElement::load_value bitvec-1.0.1/doc/domain/PartialElement.md000064400000000000000000000021701046102023000163430ustar 00000000000000# Partially-Owned Memory Element This type is a guarded reference to memory that permits interacting with it as an integer, but only allows views to the section of the integer that the producing handle has permission to observe. Unlike the `BitSafe` type family in the [`access`] module, it is not a transparent wrapper that can be used for reference conversion; it is a “heavy reference” that carries the mask and ## Type Parameters - `T`: The type, including register width and alias information, of the bit-slice handle that created it. - `O`: This propagates the bit-ordering type used by the [`BitSlice`] handle that created it. ## Lifetime This carries the lifetime of the bit-slice handle that created it. ## Usage This structure is only created as part of the [`Domain`] region descriptions, and refers to partially-occupied edge elements. The underlying referent memory can be read with `.load_value()` or written with `.store_value()`, and the appropriate masking will be applied in order to restrict access to only the permitted bits. [`access`]: crate::access [`BitSlice`]: crate::slice::BitSlice [`Domain`]: Domain bitvec-1.0.1/doc/domain.md000064400000000000000000000132531046102023000134410ustar 00000000000000# Memory Region Description This module bridges the abstract [`BitSlice`] region to real memory by segmenting any bit-slice along its maybe-aliased and known-unaliased boundaries. This segmentation applies to both bit-slice and ordinary-element views of memory, and can be used to selectively remove alias restrictions or to enable access to the underlying memory with ordinary types. The four enums in this module all intentionally have the same variants by name and shape, in order to maintain textual consistency. ## Memory Layout Model Any bit-slice resident in memory has one of two major kinds, which the enums in this module refer to as `Enclave` and `Region` ### Enclave An `Enclave` layout occurs when a bit-slice is contained entirely within a single memory element, and does not include either the initial or final semantic index in its span. ```text [ 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 ] [ ^^^^^^^^^^^^^^^^^^^^^ ] ``` In an 8-bit element, a bit-slice is considered to be an `Enclave` if it is contained entirely in the marked interior bits, and touches *neither* bit 7 nor bit 0. Wider elements may touch interior byte boundaries, and only restrict bits 0 and `width - 1`. ### Region A `Region` layout occurs when a bit-slice consists of: - zero or one half-spanned head element (excludes bit 0, includes `width - 1`) - zero or more fully-spanned elements body (includes both 0 and `width - 1`) - zero or one half-spanned tail element (includes bit 0, excludes `width - 1`) Each of these three sections is optionally present independently of the other two. That is, in the following three bytes, all of the following bit-slices have the `Region` layout: ```text [ 7 6 5 4 3 2 1 0 ] [ 7 6 5 4 3 2 1 0 ] [ 7 6 5 4 3 2 1 0 ] [ ] [ h h h h ] [ b b b b b b b b ] [ t t t t ] [ h h h h t t t t ] [ h h h h b b b b b b b b ] [ b b b b b b b b t t t t ] [ h h h h b b b b b b b b t t t t ] ``` 1. The empty bit-slice is a region with all of its segments blank. 1. A bit-slice with one element that touches `width - 1` but not 0 has a head, but no body or tail. 1. A bit-slice that touches both `0` and `width - 1` of any number of elements has a body, but no head or tail. 1. A bit-slice with one element that touches 0 but not `width - 1` has a tail, but no head or body. 1. A bit-slice with two elements, that touches neither 0 of the first nor `width - 1` of the second (but by definition `width - 1` of the first and 0 of the second; bit-slices are contiguous) has a head and tail, but no body. The final three rows show how the individual segments can be composed to describe all possible bit-slices. ## Aliasing Awareness The contiguity property of `BitSlice` combines with the `&`/`&mut` exclusion rules of the Rust language to provide additional information about the state of the program that allows a given bit-slice to exist. Specifically, any well-formed Rust program knows that *if* a bit-slice is able to produce a `Region.body` segment, *then* that body is not aliased by `bitvec`, and can safely transition to the `T::Unalias` state. Alias-permitting types like `Cell` and the atomics will never change their types (because `bitvec` cannot know that there are no views to a region other than what it has been given), but a tainted `BitSlice` bit-slice can revert its interior body back to `u8` and no longer require the alias tainting. The head and tail segments do not retain their history, and cannot tell whether they have been created by splitting or by shrinking, so they do not change their types at all. ## Raw Memory Access The [`BitDomain`] enum only splits a bit-slice along these boundaries, and allows a bit-slice view to safely shed aliasing protection added to it by [`.split_at_mut()`]. The [`Domain`] enum completely sheds its bit-precision views, and reverts to ordinary element accesses. The body segment is an ordinary Rust slice with no additional information or restriction; it can be freely used without regard for any of `bitvec`’s constraints. In order to preserve the rules that any given bit-slice can never be used to affect bits outside of its own view of memory, the underlying memory of the head and tail segments is only made accessible through a [`PartialElement`] reference guard. This guard is an opaque proxy to the memory location, and holds both a reference and the bit-mask required to prevent reading from or writing to the bits outside the scope of the originating bit-slice. ## Generics This module, and the contents of [`ptr`], make extensive use of a trait-level mutability and reference tracking system in order to reduce code duplication and provide a more powerful development environment than would be achieved with macros. As such, the trait bounds on types in this module are more intense than the standard `` fare in the crate’s main data structures. However, they are only ever instantiated with shared or exclusive references, and all of the bounds are a much more verbose way of saying “a reference, that is maybe-mut and maybe-slice, of `T`”. User code does not need to be aware of any of this: the `BitSlice` APIs that call into this module always result in structures where the complex bounds are reduced to ordinary slice references. [`BitDomain`]: BitDomain [`BitSlice`]: crate::slice::BitSlice [`Domain`]: Domain [`PartialElement`]: PartialElement [`ptr`]: crate::ptr [`.split_at_mut()`]: crate::slice::BitSlice::split_at_mut bitvec-1.0.1/doc/field/BitField.md000064400000000000000000000103271046102023000147360ustar 00000000000000# C-Style Bit-Field Access This trait describes data transfer between a [`BitSlice`] region and an ordinary integer. It is not intended for use by any other types than the data structures in this crate. The methods in this trait always operate on the `bitslice.len()` least significant bits of an integer, and ignore any remaining high bits. When loading, any excess high bits not copied out of a bit-slice are cleared to zero. ## Usage The trait methods all panic if called on a bit-slice that is wider than the integer type being transferred. As such, the first step is generally to subslice a larger data structure into exactly the region used for storage, with `bits[start .. end]`. Then, call the desired method on the narrowed bit-slice. ## Target-Specific Behavior If you do not care about the details of the memory layout of stored values, you can use the [`.load()`] and [`.store()`] unadorned methods. These each forward to their `_le` variant on little-endian targets, and their `_be` variant on big-endian. These will provide a reasonable default behavior, but do not guarantee a stable memory layout, and their buffers are not suitable for de/serialization. If you require a stable memory layout, you will need to choose a `BitSlice` with a fixed `O: BitOrder` type parameter (not `LocalBits`), and use a fixed method suffix (`_le` or `_be`). You should *probably* also use `u8` as your `T: BitStore` parameter, in order to avoid any byte-ordering issues. `bitvec` never interferes with processor concepts of wide-integer layout, and always relies on the target machine’s behavior for this work. ## Element- and Bit- Ordering Combinations Remember: the `_le` and `_be` method suffixes are completely independent of the `Lsb0` and `Msb0` types! `_le` and `_be` refer to the order in which successive memory elements are considered to gain numerical significance, while `BitOrder` refers only to the order of successive bits in *one* memory element. The `BitField` and `BitOrder` traits are ***not*** related. When a load or store operation is contained in only one memory element, then the `_le` and `_be` methods have the same behavior: they exchange an integer value with the segment of the element that its governing `BitSlice` considers live. Only when a `BitSlice` covers multiple elements does the distinction come into play. The `_le` methods consider numerical significance to start low and increase with increasing memory address, while the `_be` methods consider numerical significance to start high and *decrease* with increasing memory address. This distinction affects the order in which memory elements are used to load or store segments of the exchanged integer value. Each trait method has detailed visual diagrams in its documentation. Additionally, each *implementation*’s documentation has diagrams that show what the governed bit-sections of elements are! Be sure to check each, or to run the demonstration with `cargo run --example bitfield`. ## Bitfield Value Types When interacting with a bit-slice as a C-style bitfield, it can *only* store the signed or unsigned integer types. No other type is permitted, as the implementation relies on the 2’s-complement significance behavior of processor integers. Record types and floating-point numbers do not have this property, and thus have no sensible default protocol for truncation and un/marshalling that `bitvec` can use. If you have such a protocol, you may implement it yourself by providing a de/serialization transform between your type and the integers. For instance, a numerically-correct protocol to store floating-point numbers in bitfields might look like this: ```rust use bitvec::mem::bits_of; use funty::Floating; fn to_storage(num: F, width: usize) -> F::Raw where F: Floating { num.to_bits() >> (bits_of::() - width) } fn from_storage(val: F::Raw, width: usize) -> F where F: Floating { F::from_bits(val << (bits_of::() - width)) } ``` This implements truncation in the least-significant bits, where floating-point numbers store disposable bits in the mantissa, rather than in the most-significant bits which contain the sign, exponent, and most significant portion of the mantissa. [`BitSlice`]: crate::slice::BitSlice [`.load()`]: Self::load [`.store()`]: Self::store bitvec-1.0.1/doc/field/BitField_Lsb0.md000064400000000000000000000014561046102023000156210ustar 00000000000000# `Lsb0` Bit-Field Behavior `BitField` has no requirements about the in-memory representation or layout of stored integers within a bit-slice, only that round-tripping an integer through a store and a load of the same element suffix on the same bit-slice is idempotent (with respect to sign truncation). `Lsb0` provides a contiguous translation from bit-index to real memory: for any given bit index `n` and its position `P(n)`, `P(n + 1)` is `P(n) + 1`. This allows it to provide batched behavior: since the section of contiguous indices used within an element translates to a section of contiguous bits in real memory, the transaction is always a single shift/mask operation. Each implemented method contains documentation and examples showing exactly how the abstract integer space is mapped to real memory. bitvec-1.0.1/doc/field/BitField_Lsb0_load_be.md000064400000000000000000000041451046102023000172640ustar 00000000000000# `Lsb0` Big-Endian Integer Loading This implementation uses the `Lsb0` bit-ordering to determine *which* bits in a partially-occupied memory element contain the contents of an integer to be loaded, using big-endian element ordering. See the [trait method definition][orig] for an overview of what element ordering means. ## Signed-Integer Loading As described in the trait definition, when loading as a signed integer, the most significant bit *loaded* from memory is sign-extended to the full width of the returned type. In this method, that means that the most-significant bit of the first element. ## Examples In each memory element, the `Lsb0` ordering counts indices leftward from the right edge: ```rust use bitvec::prelude::*; let raw = 0b00_10110_0u8; // 76 54321 0 // ^ sign bit assert_eq!( raw.view_bits::() [1 .. 6] .load_be::(), 0b000_10110, ); assert_eq!( raw.view_bits::() [1 .. 6] .load_be::(), 0b111_10110u8 as i8, ); ``` In bit-slices that span multiple elements, the big-endian element ordering means that the slice index increases while numeric significance decreases: ```rust use bitvec::prelude::*; let raw = [ 0b0010_1111u8, // ^ sign bit // 7 0 0x0_1u8, // 15 8 0xF_8u8, // 23 16 ]; assert_eq!( raw.view_bits::() [4 .. 20] .load_be::(), 0x2018u16, ); ``` Note that while these examples use `u8` storage for convenience in displaying the literals, `BitField` operates identically with *any* storage type. As most machines use little-endian *byte ordering* within wider element types, and `bitvec` exclusively operates on *elements*, the actual bytes of memory may rapidly start to behave oddly when translating between numeric literals and in-memory representation. The [user guide] has a chapter that translates bit indices into memory positions for each combination of ``, and may be of additional use when choosing a combination of type parameters and load functions. [orig]: crate::field::BitField::load_le [user guide]: https://bitvecto-rs.github.io/bitvec/memory-layout bitvec-1.0.1/doc/field/BitField_Lsb0_load_le.md000064400000000000000000000041541046102023000172760ustar 00000000000000# `Lsb0` Little-Endian Integer Loading This implementation uses the `Lsb0` bit-ordering to determine *which* bits in a partially-occupied memory element contain the contents of an integer to be loaded, using little-endian element ordering. See the [trait method definition][orig] for an overview of what element ordering means. ## Signed-Integer Loading As described in the trait definition, when loading as a signed integer, the most significant bit *loaded* from memory is sign-extended to the full width of the returned type. In this method, that means the most-significant loaded bit of the final element. ## Examples In each memory element, the `Lsb0` ordering counts indices leftward from the right edge: ```rust use bitvec::prelude::*; let raw = 0b00_10110_0u8; // 76 54321 0 // ^ sign bit assert_eq!( raw.view_bits::() [1 .. 6] .load_le::(), 0b000_10110, ); assert_eq!( raw.view_bits::() [1 .. 6] .load_le::(), 0b111_10110u8 as i8, ); ``` In bit-slices that span multiple elements, the little-endian element ordering means that the slice index increases with numerical significance: ```rust use bitvec::prelude::*; let raw = [ 0x8_Fu8, // 7 0 0x0_1u8, // 15 8 0b1111_0010u8, // ^ sign bit // 23 16 ]; assert_eq!( raw.view_bits::() [4 .. 20] .load_le::(), 0x2018u16, ); ``` Note that while these examples use `u8` storage for convenience in displaying the literals, `BitField` operates identically with *any* storage type. As most machines use little-endian *byte ordering* within wider element types, and `bitvec` exclusively operates on *elements*, the actual bytes of memory may rapidly start to behave oddly when translating between numeric literals and in-memory representation. The [user guide] has a chapter that translates bit indices into memory positions for each combination of ``, and may be of additional use when choosing a combination of type parameters and load functions. [orig]: crate::field::BitField::load_le [user guide]: https://bitvecto-rs.github.io/bitvec/memory-layout bitvec-1.0.1/doc/field/BitField_Lsb0_store_be.md000064400000000000000000000040611046102023000174760ustar 00000000000000# `Lsb0` Big-Endian Integer Storing This implementation uses the `Lsb0` bit-ordering to determine *which* bits in a partially-occupied memory element are used for storage, using big-endian element ordering. See the [trait method definition][orig] for an overview of what element ordering means. ## Narrowing Behavior Integers are truncated from the high end. When storing into a bit-slice of length `n`, the `n` least numerically significant bits are stored, and any remaining high bits are ignored. Be aware of this behavior if you are storing signed integers! The signed integer `-14i8` (bit pattern `0b1111_0010u8`) will, when stored into and loaded back from a 4-bit slice, become the value `2i8`. ## Examples ```rust use bitvec::prelude::*; let mut raw = 0u8; raw.view_bits_mut::() [1 .. 6] .store_be(22u8); assert_eq!(raw, 0b00_10110_0); // 76 54321 0 raw.view_bits_mut::() [1 .. 6] .store_be(-10i8); assert_eq!(raw, 0b00_10110_0); ``` In bit-slices that span multiple elements, the big-endian element ordering means that the slice index increases while numerical significance decreases: ```rust use bitvec::prelude::*; let mut raw = [!0u8; 3]; raw.view_bits_mut::() [4 .. 20] .store_be(0x2018u16); assert_eq!(raw, [ 0x2_F, // 7 0 0x0_1, // 15 8 0xF_8, // 23 16 ]); ``` Note that while these examples use `u8` storage for convenience in displaying the literals, `BitField` operates identically with *any* storage type. As most machines use little-endian *byte ordering* within wider element types, and `bitvec` exclusively operates on *elements*, the actual bytes of memory may rapidly start to behave oddly when translating between numeric literals and in-memory representation. The [user guide] has a chapter that translates bit indices into memory positions for each combination of ``, and may be of additional use when choosing a combination of type parameters and store functions. [orig]: crate::field::BitField::store_be [user guide]: https://bitvecto-rs.github.io/bitvec/memory-layout bitvec-1.0.1/doc/field/BitField_Lsb0_store_le.md000064400000000000000000000040571046102023000175150ustar 00000000000000# `Lsb0` Little-Endian Integer Storing This implementation uses the `Lsb0` bit-ordering to determine *which* bits in a partially-occupied memory element are used for storage, using little-endian element ordering. See the [trait method definition][orig] for an overview of what element ordering means. ## Narrowing Behavior Integers are truncated from the high end. When storing into a bit-slice of length `n`, the `n` least numerically significant bits are stored, and any remaining high bits are ignored. Be aware of this behavior if you are storing signed integers! The signed integer `-14i8` (bit pattern `0b1111_0010u8`) will, when stored into and loaded back from a 4-bit slice, become the value `2i8`. ## Examples ```rust use bitvec::prelude::*; let mut raw = 0u8; raw.view_bits_mut::() [1 .. 6] .store_le(22u8); assert_eq!(raw, 0b00_10110_0); // 76 54321 0 raw.view_bits_mut::() [1 .. 6] .store_le(-10i8); assert_eq!(raw, 0b00_10110_0); ``` In bit-slices that span multiple elements, the little-endian element ordering means that the slice index increases with numerical significance: ```rust use bitvec::prelude::*; let mut raw = [!0u8; 3]; raw.view_bits_mut::() [4 .. 20] .store_le(0x2018u16); assert_eq!(raw, [ 0x8_F, // 7 0 0x0_1, // 15 8 0xF_2, // 23 16 ]); ``` Note that while these examples use `u8` storage for convenience in displaying the literals, `BitField` operates identically with *any* storage type. As most machines use little-endian *byte ordering* within wider element types, and `bitvec` exclusively operates on *elements*, the actual bytes of memory may rapidly start to behave oddly when translating between numeric literals and in-memory representation. The [user guide] has a chapter that translates bit indices into memory positions for each combination of ``, and may be of additional use when choosing a combination of type parameters and store functions. [orig]: crate::field::BitField::store_le [user guide]: https://bitvecto-rs.github.io/bitvec/memory-layout bitvec-1.0.1/doc/field/BitField_Msb0.md000064400000000000000000000022001046102023000156060ustar 00000000000000# `Msb0` Bit-Field Behavior `BitField` has no requirements about the in-memory representation or layout of stored integers within a bit-slice, only that round-tripping an integer through a store and a load of the same element suffix on the same bit-slice is idempotent (with respect to sign truncation). `Msb0` provides a contiguous translation from bit-index to real memory: for any given bit index `n` and its position `P(n)`, `P(n + 1)` is `P(n) - 1`. This allows it to provide batched behavior: since the section of contiguous indices used within an element translates to a section of contiguous bits in real memory, the transaction is always a single shift-mask operation. Each implemented method contains documentation and examples showing exactly how the abstract integer space is mapped to real memory. ## Notes In particular, note that while `Msb0` indexes bits from the most significant down to the least, and integers index from the least up to the most, this **does not** reörder any bits of the integer value! This ordering only finds a region in real memory; it does *not* affect the partial-integer contents stored in that region. bitvec-1.0.1/doc/field/BitField_Msb0_load_be.md000064400000000000000000000041441046102023000172640ustar 00000000000000# `Msb0` Big-Endian Integer Loading This implementation uses the `Msb0` bit-ordering to determine *which* bits in a partially-occupied memory element contain the contents of an integer to be loaded, using big-endian element ordering. See the [trait method definition][orig] for an overview of what element ordering means. ## Signed-Integer Loading As described in the trait definition, when loading as a signed integer, the most significant bit *loaded* from memory is sign-extended to the full width of the returned type. In this method, that means the most-significant loaded bit of the first element. ## Examples In each memory element, the `Msb0` ordering counts indices rightward from the left edge: ```rust use bitvec::prelude::*; let raw = 0b00_10110_0u8; // 01 23456 7 // ^ sign bit assert_eq!( raw.view_bits::() [2 .. 7] .load_be::(), 0b000_10110, ); assert_eq!( raw.view_bits::() [2 .. 7] .load_be::(), 0b111_10110u8 as i8, ); ``` In bit-slices that span multiple elements, the big-endian element ordering means that the slice index increases with numerical significance: ```rust use bitvec::prelude::*; let raw = [ 0b1111_0010u8, // ^ sign bit // 0 7 0x0_1u8, // 8 15 0x8_Fu8, // 16 23 ]; assert_eq!( raw.view_bits::() [4 .. 20] .load_be::(), 0x2018u16, ); ``` Note that while these examples use `u8` storage for convenience in displaying the literals, `BitField` operates identically with *any* storage type. As most machines use little-endian *byte ordering* within wider element types, and `bitvec` exclusively operates on *elements*, the actual bytes of memory may rapidly start to behave oddly when translating between numeric literals and in-memory representation. The [user guide] has a chapter that translates bit indices into memory positions for each combination of ``, and may be of additional use when choosing a combination of type parameters and load functions. [orig]: crate::field::BitField::load_le [user guide]: https://bitvecto-rs.github.io/bitvec/memory-layout bitvec-1.0.1/doc/field/BitField_Msb0_load_le.md000064400000000000000000000041501046102023000172730ustar 00000000000000# `Msb0` Little-Endian Integer Loading This implementation uses the `Msb0` bit-ordering to determine *which* bits in a partially-occupied memory element contain the contents of an integer to be loaded, using little-endian element ordering. See the [trait method definition][orig] for an overview of what element ordering means. ## Signed-Integer Loading As described in the trait definition, when loading as a signed integer, the most significant bit *loaded* from memory is sign-extended to the full width of the returned type. In this method, that means the most-significant loaded bit of the final element. ## Examples In each memory element, the `Msb0` ordering counts indices rightward from the left edge: ```rust use bitvec::prelude::*; let raw = 0b00_10110_0u8; // 01 23456 7 // ^ sign bit assert_eq!( raw.view_bits::() [2 .. 7] .load_le::(), 0b000_10110, ); assert_eq!( raw.view_bits::() [2 .. 7] .load_le::(), 0b111_10110u8 as i8, ); ``` In bit-slices that span multiple elements, the little-endian element ordering means that the slice index increases with numerical significance: ```rust use bitvec::prelude::*; let raw = [ 0xF_8u8, // 0 7 0x0_1u8, // 8 15 0b0010_1111u8, // ^ sign bit // 16 23 ]; assert_eq!( raw.view_bits::() [4 .. 20] .load_le::(), 0x2018u16, ); ``` Note that while these examples use `u8` storage for convenience in displaying the literals, `BitField` operates identically with *any* storage type. As most machines use little-endian *byte ordering* within wider element types, and `bitvec` exclusively operates on *elements*, the actual bytes of memory may rapidly start to behave oddly when translating between numeric literals and in-memory representation. The [user guide] has a chapter that translates bit indices into memory positions for each combination of ``, and may be of additional use when choosing a combination of type parameters and load functions. [orig]: crate::field::BitField::load_le [user guide]: https://bitvecto-rs.github.io/bitvec/memory-layout bitvec-1.0.1/doc/field/BitField_Msb0_store_be.md000064400000000000000000000040621046102023000175000ustar 00000000000000# `Msb0` Big-Endian Integer Storing This implementation uses the `Msb0` bit-ordering to determine *which* bits in a partially-occupied memory element are used for storage, using big-endian element ordering. See the [trait method definition][orig] for an overview of what element ordering means. ## Narrowing Behavior Integers are truncated from the high end. When storing into a bit-slice of length `n`, the `n` least numerically significant bits are stored, and any remaining high bits are ignored. Be aware of this behavior if you are storing signed integers! The signed integer `-14i8` (bit pattern `0b1111_0010u8`) will, when stored into and loaded back from a 4-bit slice, become the value `2i8`. ## Examples ```rust use bitvec::prelude::*; let mut raw = 0u8; raw.view_bits_mut::() [2 .. 7] .store_be(22u8); assert_eq!(raw, 0b00_10110_0); // 01 23456 7 raw.view_bits_mut::() [2 .. 7] .store_be(-10i8); assert_eq!(raw, 0b00_10110_0); ``` In bit-slices that span multiple elements, the big-endian element ordering means that the slice index increases while numerical significance decreases: ```rust use bitvec::prelude::*; let mut raw = [!0u8; 3]; raw.view_bits_mut::() [4 .. 20] .store_be(0x2018u16); assert_eq!(raw, [ 0xF_2, // 0 7 0x0_1, // 8 15 0x8_F, // 16 23 ]); ``` Note that while these examples use `u8` storage for convenience in displaying the literals, `BitField` operates identically with *any* storage type. As most machines use little-endian *byte ordering* within wider element types, and `bitvec` exclusively operates on *elements*, the actual bytes of memory may rapidly start to behave oddly when translating between numeric literals and in-memory representation. The [user guide] has a chapter that translates bit indices into memory positions for each combination of ``, and may be of additional use when choosing a combination of type parameters and store functions. [orig]: crate::field::BitField::store_be [user guide]: https://bitvecto-rs.github.io/bitvec/memory-layout bitvec-1.0.1/doc/field/BitField_Msb0_store_le.md000064400000000000000000000040601046102023000175100ustar 00000000000000# `Msb0` Little-Endian Integer Storing This implementation uses the `Msb0` bit-ordering to determine *which* bits in a partially-occupied memory element are used for storage, using little-endian element ordering. See the [trait method definition][orig] for an overview of what element ordering means. ## Narrowing Behavior Integers are truncated from the high end. When storing into a bit-slice of length `n`, the `n` least numerically significant bits are stored, and any remaining high bits are ignored. Be aware of this behavior if you are storing signed integers! The signed integer `-14i8` (bit pattern `0b1111_0010u8`) will, when stored into and loaded back from a 4-bit slice, become the value `2i8`. ## Examples ```rust use bitvec::prelude::*; let mut raw = 0u8; raw.view_bits_mut::() [2 .. 7] .store_le(22u8); assert_eq!(raw, 0b00_10110_0); // 01 23456 7 raw.view_bits_mut::() [2 .. 7] .store_le(-10i8); assert_eq!(raw, 0b00_10110_0); ``` In bit-slices that span multiple elements, the little-endian element ordering means that the slice index increases with numerical significance: ```rust use bitvec::prelude::*; let mut raw = [!0u8; 3]; raw.view_bits_mut::() [4 .. 20] .store_le(0x2018u16); assert_eq!(raw, [ 0xF_8, // 0 7 0x0_1, // 8 15 0x2_F, // 16 23 ]); ``` Note that while these examples use `u8` storage for convenience in displaying the literals, `BitField` operates identically with *any* storage type. As most machines use little-endian *byte ordering* within wider element types, and `bitvec` exclusively operates on *elements*, the actual bytes of memory may rapidly start to behave oddly when translating between numeric literals and in-memory representation. The [user guide] has a chapter that translates bit indices into memory positions for each combination of ``, and may be of additional use when choosing a combination of type parameters and store functions. [orig]: crate::field::BitField::store_le [user guide]: https://bitvecto-rs.github.io/bitvec/memory-layout bitvec-1.0.1/doc/field/BitField_load.md000064400000000000000000000044051046102023000157350ustar 00000000000000# Integer Loading This method reads the contents of a bit-slice region as an integer. The region may be shorter than the destination integer type, in which case the loaded value will be zero-extended (when `I: Unsigned`) or sign-extended from the most significant loaded bit (when `I: Signed`). The region may not be zero bits, nor wider than the destination type. Attempting to load a `u32` from a bit-slice of length 33 will panic the program. ## Operation and Endianness Handling Each element in the bit-slice contains a segment of the value to be loaded. If the bit-slice contains more than one element, then the numerical significance of each loaded segment is interpreted according to the target’s endianness: - little-endian targets consider each *`T` element* to have increasing numerical significance, starting with the least-significant segment at the low address and ending with the most-significant segment at the high address. - big-endian targets consider each *`T` element* to have decreasing numerical significance, starting with the most-significant segment at the high address and ending with the least-significant segment at the low address. See the documentation for [`.load_le()`] and [`.load_be()`] for more detail on what this means for how the in-memory representation of bit-slices translates to loaded values. You must always use the loading method that exactly corresponds to the storing method previously used to insert data into the bit-slice: same suffix on the method name (none, `_le`, `_be`) and same integer type. `bitvec` is not required to, and will not, guarantee round-trip consistency if you change any of these parameters. ## Type Parameters - `I`: The integer type being loaded. This can be any of the signed or unsigned integers. ## Parameters - `&self`: A bit-slice region whose length is in the range `1 ..= I::BITS`. ## Returns The contents of the bit-slice, interpreted as an integer. ## Panics This panics if `self.len()` is 0, or greater than `I::BITS`. ## Examples This method is inherently non-portable, and changes behavior depending on the target characteristics. If your target is little-endian, see [`.load_le()`]; if your target is big-endian, see [`.load_be()`]. [`.load_be()`]: Self::load_be [`.load_le()`]: Self::load_le bitvec-1.0.1/doc/field/BitField_load_be.md000064400000000000000000000111221046102023000163750ustar 00000000000000# Big-Endian Integer Loading This method loads an integer value from a bit-slice, using big-endian significance ordering when the bit-slice spans more than one `T` element in memory. Big-endian significance ordering means that if a bit-slice occupies an array `[A, B, C]`, then the bits stored in `A` are considered to be the most significant segment of the loaded integer, then `B` contains the middle segment, then `C` contains the least significant segment. The segments are combined in order, that is, as the raw bit-pattern `0b`. If the destination type is signed, the loaded value is sign-extended according to the most-significant bit in the `A` segment. It is important to note that the `O: BitOrder` parameter of the bit-slice from which the value is loaded **does not** affect the bit-pattern of the stored segments. They are always stored exactly as they exist in an ordinary integer. The ordering parameter only affects *which* bits in an element are available for storage. ## Type Parameters - `I`: The integer type being loaded. This can be any of the signed or unsigned integers. ## Parameters - `&self`: A bit-slice region whose length is in the range `1 ..= I::BITS`. ## Returns The contents of the bit-slice, interpreted as an integer. ## Panics This panics if `self.len()` is 0, or greater than `I::BITS`. ## Examples Let us consider an `i32` value stored in 24 bits of a `BitSlice`: ```rust use bitvec::prelude::*; let mut raw = [0u8; 4]; let bits = raw.view_bits_mut::(); let integer = 0x00__B4_96_3Cu32 as i32; bits[4 .. 28].store_be::(integer); let loaded = bits[4 .. 28].load_be::(); assert_eq!(loaded, 0xFF__B4_96_3Cu32 as i32); ``` Observe that, because the lowest 24 bits began with the pattern `0b1101…`, the value was considered to be negative when interpreted as an `i24` and was sign-extended through the highest byte. Let us now look at the memory representation of this value: ```rust # use bitvec::prelude::*; # let mut raw = [0u8; 4]; # let bits = raw.view_bits_mut::(); # bits[4 .. 28].store_be::(0x00B4963Cu32); assert_eq!(raw, [ 0b1011_0000, // 0xB dead 0b0100_1001, // 0x4 0x9 0b0110_0011, // 0x6 0x3 0b0000_1100, // dead 0xC ]); ``` Notice how while the `Lsb0` bit-ordering means that indexing within the bit-slice proceeds right-to-left in each element, the actual bit-patterns stored in memory are not affected. Element `[0]` is more numerically significant than element `[1]`, but bit `[4]` is not more numerically significant than bit `[5]`. In the sequence `B496`, `B` is the most significant, and so it gets placed lowest in memory. `49` fits in one byte, and is stored directly as written. Lastly, `6` is the least significant nibble of the four, and is placed highest in memory. Now let’s look at the way different `BitOrder` parameters interpret the placement of bit indices within memory: ```rust use bitvec::prelude::*; let raw = [ // Bit index 14 ← // Lsb0: ─┤ 0b0100_0000_0000_0011u16, // Msb0: ├─ // → 14 // Bit index ← 19 16 // Lsb0: ├──┤ 0b0001_0000_0000_1110u16, // Msb0: ├──┤ // 16 19 → ]; assert_eq!( raw.view_bits::() [14 .. 20] .load_be::(), 0b00_01_1110, ); assert_eq!( raw.view_bits::() [14 .. 20] .load_be::(), 0b00_11_0001, ); ``` Notice how the bit-orderings change which *parts* of the memory are loaded, but in both cases the segment in `raw[0]` is more significant than the segment in `raw[1]`, and the ordering of bits *within* each segment are unaffected by the bit-ordering. ## Notes Be sure to see the documentation for [` as BitField>::load_be`][llb] and [` as Bitfield>::load_be`][mlb] for more detailed information on the memory views! You can view the mask of all *storage regions* of a bit-slice by using its [`.domain()`] method to view the breakdown of its memory region, then print the [`.mask()`] of any [`PartialElement`] the domain contains. Whole elements are always used in their entirety. You should use the `domain` module’s types whenever you are uncertain of the exact locations in memory that a particular bit-slice governs. [llb]: https://docs.rs/bitvec/latest/bitvec/field/trait.BitField.html#method.load_be-3 [mlb]: https://docs.rs/bitvec/latest/bitvec/field/trait.BitField.html#method.load_be-4 [`PartialElement`]: crate::domain::PartialElement [`.domain()`]: crate::slice::BitSlice::domain [`.mask()`]: crate::domain::PartialElement::mask bitvec-1.0.1/doc/field/BitField_load_le.md000064400000000000000000000111311046102023000164070ustar 00000000000000# Little-Endian Integer Loading This method loads an integer value from a bit-slice, using little-endian significance ordering when the bit-slice spans more than one `T` element in memory. Little-endian significance ordering means that if a bit-slice occupies an array `[A, B, C]`, then the bits stored in `A` are considered to contain the least significant segment of the loaded integer, then `B` contains the middle segment, and then `C` contains the most significant segment. The segments are combined in order, that is, as the raw bit-pattern `0b`. If the destination type is signed, the loaded value is sign-extended according to the most-significant bit in the `C` segment. It is important to note that the `O: BitOrder` parameter of the bit-slice from which the value is loaded **does not** affect the bit-pattern of the stored segments. They are always stored exactly as they exist in an ordinary integer. The ordering parameter only affects *which* bits in an element are available for storage. ## Type Parameters - `I`: The integer type being loaded. This can be any of the signed or unsigned integers. ## Parameters - `&self`: A bit-slice region whose length is in the range `1 ..= I::BITS`. ## Returns The contents of the bit-slice, interpreted as an integer. ## Panics This panics if `self.len()` is 0, or greater than `I::BITS`. ## Examples Let us consider an `i32` value stored in 24 bits of a `BitSlice`: ```rust use bitvec::prelude::*; let mut raw = [0u8; 4]; let bits = raw.view_bits_mut::(); let integer = 0x00__B4_96_3Cu32 as i32; bits[4 .. 28].store_le::(integer); let loaded = bits[4 .. 28].load_le::(); assert_eq!(loaded, 0xFF__B4_96_3Cu32 as i32); ``` Observe that, because the lowest 24 bits began with the pattern `0b1101…`, the value was considered to be negative when interpreted as an `i24` and was sign-extended through the highest byte. Let us now look at the memory representation of this value: ```rust # use bitvec::prelude::*; # let mut raw = [0u8; 4]; # let bits = raw.view_bits_mut::(); # bits[4 .. 28].store_le::(0x00B4963Cu32); assert_eq!(raw, [ 0b0000_1100, // dead 0xC 0b0110_0011, // 0x6 0x3 0b0100_1001, // 0x4 0x9 0b1011_0000, // 0xB dead ]); ``` Notice how while the `Msb0` bit-ordering means that indexing within the bit-slice proceeds left-to-right in each element, and the bit-patterns in each element proceed left-to-right in the aggregate and the decomposed literals, the ordering of the elements is reversed from how the literal was written. In the sequence `B496`, `B` is the most significant, and so it gets placed highest in memory. `49` fits in one byte, and is stored directly as written. Lastly, `6` is the least significant nibble of the four, and is placed lowest in memory. Now let’s look at the way different `BitOrder` parameters interpret the placement of bit indices within memory: ```rust use bitvec::prelude::*; let raw = [ // Bit index 14 ← // Lsb0: ─┤ 0b0100_0000_0000_0011u16, // Msb0: ├─ // → 14 // Bit index ← 19 16 // Lsb0: ├──┤ 0b0001_0000_0000_1110u16, // Msb0: ├──┤ // 16 19 → ]; assert_eq!( raw.view_bits::() [14 .. 20] .load_le::(), 0b00_1110_01, ); assert_eq!( raw.view_bits::() [14 .. 20] .load_le::(), 0b00_0001_11, ); ``` Notice how the bit-orderings change which *parts* of the memory are loaded, but in both cases the segment in `raw[0]` is less significant than the segment in `raw[1]`, and the ordering of bits *within* each segment are unaffected by the bit-ordering. ## Notes Be sure to see the documentation for [` as BitField>::load_le`][lll] and [` as Bitfield>::load_le`][mll] for more detailed information on the memory views! You can view the mask of all *storage regions* of a bit-slice by using its [`.domain()`] method to view the breakdown of its memory region, then print the [`.mask()`] of any [`PartialElement`] the domain contains. Whole elements are always used in their entirety. You should use the `domain` module’s types whenever you are uncertain of the exact locations in memory that a particular bit-slice governs. [lll]: https://docs.rs/bitvec/latest/bitvec/field/trait.BitField.html#method.load_le-3 [mll]: https://docs.rs/bitvec/latest/bitvec/field/trait.BitField.html#method.load_le-4 [`PartialElement`]: crate::domain::PartialElement [`.domain()`]: crate::slice::BitSlice::domain [`.mask()`]: crate::domain::PartialElement::mask bitvec-1.0.1/doc/field/BitField_store.md000064400000000000000000000045711046102023000161560ustar 00000000000000# Integer Storing This method writes an integer into the contents of a bit-slice region. The region may be shorter than the source integer type, in which case the stored value will be truncated. On load, it may be zero-extended (unsigned destination) or sign-extended from the most significant **stored** bit (signed destination). The region may not be zero bits, nor wider than the source type. Attempting to store a `u32` into a bit-slice of length 33 will panic the program. ## Operation and Endianness Handling The value to be stored is broken into segments according to the elements of the bit-slice receiving it. If the bit-slice contains more than one element, then the numerical significance of each segment routes to a storage element according to the target’s endianness: - little-endian targets consider each *`T` element* to have increasing numerical significance, starting with the least-significant segment at the low address and ending with the most-significant segment at the high address. - big-endian targets consider each *`T` element* to have decreasing numerical significance, starting with the most-significant segment at the high address and ending with the least-significant segment at the low address. See the documentation for [`.store_le()`] and [`.store_be()`] for more detail on what this means for how the in-memory representation of bit-slices translates to stored values. You must always use the loading method that exactly corresponds to the storing method previously used to insert data into the bit-slice: same suffix on the method name (none, `_le`, `_be`) and same integer type. `bitvec` is not required to, and will not, guarantee round-trip consistency if you change any of these parameters. ## Type Parameters - `I`: The integer type being stored. This can be any of the signed or unsigned integers. ## Parameters - `&self`: A bit-slice region whose length is in the range `1 ..= I::BITS`. - `value`: An integer value whose `self.len()` least numerically significant bits will be written into `self`. ## Panics This panics if `self.len()` is 0, or greater than `I::BITS`. ## Examples This method is inherently non-portable, and changes behavior depending on the target characteristics. If your target is little-endian, see [`.store_le()`]; if your target is big-endian, see [`.store_be()`]. [`.store_be()`]: Self::store_be [`.store_le()`]: Self::store_le bitvec-1.0.1/doc/field/BitField_store_be.md000064400000000000000000000112041046102023000166130ustar 00000000000000# Big-Endian Integer Storing This method stores an integer value into a bit-slice, using big-endian significance ordering when the bit-slice spans more than one `T` element in memory. Big-endian significance ordering means that if a bit-slice occupies an array `[A, B, C]`, then the bits stored in `A` are considered to contain the most significant segment of the stored integer, then `B` contains the middle segment, and then `C` contains the least significant segment. An integer is broken into segments in order, that is, the raw bit-pattern is fractured into `0b`. High bits beyond the length of the bit-slice into which the integer is stored are truncated. It is important to note that the `O: BitOrder` parameter of the bit-slice into which the value is stored **does not** affect the bit-pattern of the stored segments. They are always stored exactly as they exist in an ordinary integer. The ordering parameter only affects *which* bits in an element are available for storage. ## Type Parameters - `I`: The integer type being stored. This can be any of the signed or unsigned integers. ## Parameters - `&mut self`: A bit-slice region whose length is in the range `1 ..= I::BITS`. - `value`: An integer value whose `self.len()` least numerically significant bits will be written into `self`. ## Panics This panics if `self.len()` is 0, or greater than `I::BITS`. ## Examples Let us consider an `i32` value stored in 24 bits of a `BitSlice`: ```rust use bitvec::prelude::*; let mut raw = [0u8; 4]; let bits = raw.view_bits_mut::(); let integer = 0x00__B4_96_3Cu32 as i32; bits[4 .. 28].store_be::(integer); let loaded = bits[4 .. 28].load_be::(); assert_eq!(loaded, 0xFF__B4_96_3Cu32 as i32); ``` Observe that, because the lowest 24 bits began with the pattern `0b1101…`, the value was considered to be negative when interpreted as an `i24` and was sign-extended through the highest byte. Let us now look at the memory representation of this value: ```rust # use bitvec::prelude::*; # let mut raw = [0u8; 4]; # let bits = raw.view_bits_mut::(); # bits[4 .. 28].store_be::(0x00B4963Cu32); assert_eq!(raw, [ 0b1011_0000, // 0xB dead 0b0100_1001, // 0x4 0x9 0b0110_0011, // 0x6 0x3 0b0000_1100, // dead 0xC ]); ``` Notice how while the `Lsb0` bit-ordering means that indexing within the bit-slice proceeds right-to-left in each element, the actual bit-patterns stored in memory are not affected. Element `[0]` is more numerically significant than element `[1]`, but bit `[4]` is not more numerically significant than bit `[5]`. In the sequence `B496`, `B` is the most significant, and so it gets placed lowest in memory. `49` fits in one byte, and is stored directly as written. Lastly, `6` is the least significant nibble of the four, and is placed highest in memory. Now let’s look at the way different `BitOrder` parameters interpret the placement of bit indices within memory: ```rust use bitvec::prelude::*; let raw = [ // Bit index 14 ← // Lsb0: ─┤ 0b0100_0000_0000_0011u16, // Msb0: ├─ // → 14 // Bit index ← 19 16 // Lsb0: ├──┤ 0b0001_0000_0000_1110u16, // Msb0: ├──┤ // 16 19 → ]; assert_eq!( raw.view_bits::() [14 .. 20] .load_be::(), 0b00_01_1110, ); assert_eq!( raw.view_bits::() [14 .. 20] .load_be::(), 0b00_11_0001, ); ``` Notice how the bit-orderings change which *parts* of the memory are loaded, but in both cases the segment in `raw[0]` is more significant than the segment in `raw[1]`, and the ordering of bits *within* each segment are unaffected by the bit-ordering. ## Notes Be sure to see the documentation for [` as BitField>::store_be`][lsb] and [` as Bitfield>::store_be`][msb] for more detailed information on the memory views! You can view the mask of all *storage regions* of a bit-slice by using its [`.domain()`] method to view the breakdown of its memory region, then print the [`.mask()`] of any [`PartialElement`] the domain contains. Whole elements are always used in their entirety. You should use the `domain` module’s types whenever you are uncertain of the exact locations in memory that a particular bit-slice governs. [lsb]: https://docs.rs/bitvec/latest/bitvec/field/trait.BitField.html#method.store_be-3 [msb]: https://docs.rs/bitvec/latest/bitvec/field/trait.BitField.html#method.store_be-4 [`PartialElement`]: crate::domain::PartialElement [`.domain()`]: crate::slice::BitSlice::domain [`.mask()`]: crate::domain::PartialElement::mask bitvec-1.0.1/doc/field/BitField_store_le.md000064400000000000000000000112021046102023000166230ustar 00000000000000# Little-Endian Integer Storing This method stores an integer value into a bit-slice, using little-endian significance ordering when the bit-slice spans more than one `T` element in memory. Little-endian significance ordering means that if a bit-slice occupies an array `[A, B, C]`, then the bits stored in `A` are considered to contain the least significant segment of the stored integer, then `B` contains the middle segment, and then `C` contains the most significant segment. An integer is broken into segments in order, that is, the raw bit-pattern is fractured into `0b`. High bits beyond the length of the bit-slice into which the integer is stored are truncated. It is important to note that the `O: BitOrder` parameter of the bit-slice into which the value is stored **does not** affect the bit-pattern of the stored segments. They are always stored exactly as they exist in an ordinary integer. The ordering parameter only affects *which* bits in an element are available for storage. ## Type Parameters - `I`: The integer type being stored. This can be any of the signed or unsigned integers. ## Parameters - `&mut self`: A bit-slice region whose length is in the range `1 ..= I::BITS`. - `value`: An integer value whose `self.len()` least numerically significant bits will be written into `self`. ## Panics This panics if `self.len()` is 0, or greater than `I::BITS`. ## Examples Let us consider an `i32` value stored in 24 bits of a `BitSlice`: ```rust use bitvec::prelude::*; let mut raw = [0u8; 4]; let bits = raw.view_bits_mut::(); let integer = 0x00__B4_96_3Cu32 as i32; bits[4 .. 28].store_le::(integer); let loaded = bits[4 .. 28].load_le::(); assert_eq!(loaded, 0xFF__B4_96_3Cu32 as i32); ``` Observe that, because the lowest 24 bits began with the pattern `0b1101…`, the value was considered to be negative when interpreted as an `i24` and was sign-extended through the highest byte. Let us now look at the memory representation of this value: ```rust # use bitvec::prelude::*; # let mut raw = [0u8; 4]; # let bits = raw.view_bits_mut::(); # bits[4 .. 28].store_le::(0x00B4963Cu32); assert_eq!(raw, [ 0b0000_1100, // dead 0xC 0b0110_0011, // 0x6 0x3 0b0100_1001, // 0x4 0x9 0b1011_0000, // 0xB dead ]); ``` Notice how while the `Msb0` bit-ordering means that indexing within the bit-slice proceeds left-to-right in each element, and the bit-patterns in each element proceed left-to-right in the aggregate and the decomposed literals, the ordering of the elements is reversed from how the literal was written. In the sequence `B496`, `B` is the most significant, and so it gets placed highest in memory. `49` fits in one byte, and is stored directly as written. Lastly, `6` is the least significant nibble of the four, and is placed lowest in memory. Now let’s look at the way different `BitOrder` parameters interpret the placement of bit indices within memory: ```rust use bitvec::prelude::*; let raw = [ // Bit index 14 ← // Lsb0: ─┤ 0b0100_0000_0000_0011u16, // Msb0: ├─ // → 14 // Bit index ← 19 16 // Lsb0: ├──┤ 0b0001_0000_0000_1110u16, // Msb0: ├──┤ // 16 19 → ]; assert_eq!( raw.view_bits::() [14 .. 20] .load_le::(), 0b00_1110_01, ); assert_eq!( raw.view_bits::() [14 .. 20] .load_le::(), 0b00_0001_11, ); ``` Notice how the bit-orderings change which *parts* of the memory are loaded, but in both cases the segment in `raw[0]` is less significant than the segment in `raw[1]`, and the ordering of bits *within* each segment are unaffected by the bit-ordering. ## Notes Be sure to see the documentation for [` as BitField>::store_le`][lsl] and [` as Bitfield>::store_le`][msl] for more detailed information on the memory views! You can view the mask of all *storage regions* of a bit-slice by using its [`.domain()`] method to view the breakdown of its memory region, then print the [`.mask()`] of any [`PartialElement`] the domain contains. Whole elements are always used in their entirety. You should use the `domain` module’s types whenever you are uncertain of the exact locations in memory that a particular bit-slice governs. [lsl]: https://docs.rs/bitvec/latest/bitvec/field/trait.BitField.html#method.store_le-3 [msl]: https://docs.rs/bitvec/latest/bitvec/field/trait.BitField.html#method.store_le-4 [`PartialElement`]: crate::domain::PartialElement [`.domain()`]: crate::slice::BitSlice::domain [`.mask()`]: crate::domain::PartialElement::mask bitvec-1.0.1/doc/field/get.md000064400000000000000000000014671046102023000140400ustar 00000000000000# Partial-Element Getter This function extracts a portion of an integer value from a [`PartialElement`]. The `BitField` implementations call it as they assemble a complete integer. It performs the following steps: 1. the `PartialElement` is loaded (and masked to discard unused bits), 1. the loaded value is then shifted to abut the LSedge of the stack local, 1. and then `resize`d into a `U` value. ## Type Parameters - `O` and `T` are the type parameters of the `PartialElement` argument. - `U` is the destination integer type. ## Parameters - `elem`: A `PartialElement` containing a value segment. - `shamt`: The distance by which to right-shift the value loaded from `elem` so that it abuts the LSedge. ## Returns The segment of an integer stored in `elem`. [`PartialElement`]: crate::domain::PartialElement bitvec-1.0.1/doc/field/impl_BitArray.md000064400000000000000000000007511046102023000160120ustar 00000000000000# Bit-Array Implementation of `BitField` The `BitArray` implementation is only ever called when the entire bit-array is available for use, which means it can skip the bit-slice memory detection and instead use the underlying storage elements directly. The implementation still performs the segmentation for each element contained in the array, in order to maintain value consistency so that viewing the array as a bit-slice is still able to correctly interact with data contained in it. bitvec-1.0.1/doc/field/io/Read_BitSlice.md000064400000000000000000000014711046102023000163140ustar 00000000000000# Reading From a Bit-Slice The implementation loads bytes out of the referenced bit-slice until either the destination buffer is filled or the source has no more bytes to provide. When `.read()` returns, the provided bit-slice handle will have been updated to no longer include the leading segment copied out as bytes into `buf`. Note that the return value of `.read()` is always the number of *bytes* of `buf` filled! The implementation uses [`BitField::load_be`] to collect bytes. Note that unlike the standard library, it is implemented on bit-slices of *any* underlying element type. However, using a `BitSlice<_, u8>` is still likely to be fastest. ## Original [`impl Read for [u8]`][orig] [orig]: https://doc.rust-lang.org/std/primitive.slice.html#impl-Read [`BitField::load_be`]: crate::field::BitField::load_be bitvec-1.0.1/doc/field/io/Read_BitVec.md000064400000000000000000000010431046102023000157650ustar 00000000000000# Reading From a Bit-Vector The implementation loads bytes out of the reference bit-vector until either the destination buffer is filled or the source has no more bytes to provide. When `.read()` returns, the provided bit-vector will have its contents shifted down so that it begins at the first bit *after* the last byte copied out into `buf`. Note that the return value of `.read()` is always the number of *bytes* of `buf` filled! ## API Differences The standard library does not `impl Read for Vec`. It is provided here as a courtesy. bitvec-1.0.1/doc/field/io/Write_BitSlice.md000064400000000000000000000014741046102023000165360ustar 00000000000000# Writing Into a Bit-Slice The implementation stores bytes into the referenced bit-slice until either the source buffer is exhausted or the destination has no more slots to fill. When `.write()` returns, the provided bit-slice handle will have been updated to no longer include the leading segment filled with bytes from `buf`. Note that the return value of `.write()` is always the number of *bytes* of `buf` consumed! The implementation uses [`BitField::store_be`] to fill bytes. Note that unlike the standard library, it is implemented on bit-slices of *any* underlying element type. However, using a `BitSlice<_, u8>` is still likely to be fastest. ## Original [`impl Write for [u8]`][orig] [orig]: https://doc.rust-lang.org/std/primitive.slice.html#impl-Write [`BitField::store_be`]: crate::field::BitField::store_be bitvec-1.0.1/doc/field/io/Write_BitVec.md000064400000000000000000000011651046102023000162110ustar 00000000000000# Writing Into a Bit-Vector The implementation appends bytes to the referenced bit-vector until the source buffer is exhausted. Note that the return value of `.write()` is always the number of *bytes* of `buf` consumed! The implementation uses [`BitField::store_be`] to fill bytes. Note that unlike the standard library, it is implemented on bit-vectors of *any* underlying element type. However, using a `BitVec<_, u8>` is still likely to be fastest. ## Original [`impl Write for Vec`][orig] [orig]: https://doc.rust-lang.org/std/vec/struct.Vec.html#impl-Write [`BitField::store_be`]: crate::field::BitField::store_be bitvec-1.0.1/doc/field/io.md000064400000000000000000000007751046102023000136710ustar 00000000000000# Bit-Field I/O Protocols This module defines the standard-library `io::{Read, Write}` byte-oriented protocols on `bitvec` structures that are capable of operating on bytes through the `BitField` trait. Note that calling [`BitField`] methods in a loop imposes a non-trivial, and irremovable, performance penalty on each invocation. The `.read()` and `.write()` methods implemented in this module are going to suffer this cost, and you should prefer to operate directly on the underlying buffer if possible. bitvec-1.0.1/doc/field/resize.md000064400000000000000000000007141046102023000145540ustar 00000000000000# Value Resizing This zero-extends or truncates a source value to fit into a target type. ## Type Parameters - `T`: The initial integer type of the value being resized. - `U`: The destination type of the value after resizing. ## Parameters - `value`: Any (unsigned) integer. ## Returns `value`, either zero-extended in the most-significant bits (if `U` is wider than `T`) or truncated retaining the least-significant bits (if `U` is narrower than `T`). bitvec-1.0.1/doc/field/set.md000064400000000000000000000015631046102023000140510ustar 00000000000000# Partial-Element Setter This function inserts a portion of an integer value into a [`PartialElement`]. The `BitField` implementations call it as they disassemble a complete integer. It performs the following steps: 1. the value is `resize`d into a `T::Mem`, 1. shifted up from LSedge as needed to fit in the governed region of the partial element, 1. and then stored (after masking away excess bits) through the `PartialElement` into memory. ## Type Parameters - `O` and `T` are the type parameters of the `PartialElement` argument. - `U` is the source integer type. ## Parameters - `elem`: A `PartialElement` into which a value segment will be written. - `value`: A value, whose least-significant bits will be written into `elem`. - `shamt`: The shift distance from the storage location’s LSedge to its live bits. [`PartialElement`]: crate::domain::PartialElement bitvec-1.0.1/doc/field/sign.md000064400000000000000000000024021046102023000142070ustar 00000000000000# Sign Extension When a bit-slice loads a value whose destination type is wider than the bit-slice itself, and the destination type is a signed integer, the loaded value must be sign-extended. The load accumulator always begins as the zero pattern, and the loaders do not attempt to detect a sign bit before they begin. As such, this function takes a value loaded out of a bit-slice, which has been zero-extended from the storage length to the destination type, and the length of the bit-slice that contained it. If the destination type is unsigned, then the value is returned as-is; if the destination type is signed, then the value is sign-extended according to the bit at `1 << (width - 1)`. ## Type Parameters - `I`: The integer type of the loaded element. When this is one of `u{8,16,32,64,size}`, no sign extension takes place. ## Parameters - `elem`: The value loaded out of a bit-slice. - `width`: The width in bits of the source bit-slice. This is always known to be in the domain `1 ..= I::BITS`. ## Returns A correctly-signed copy of `elem`. Unsigned integers, and signed integers whose most significant loaded bit was `0`, are untouched. Signed integers whose most significant loaded bit was `1` have their remaining high bits set to `1` for sign extension. bitvec-1.0.1/doc/field.md000064400000000000000000000151401046102023000132520ustar 00000000000000# Bit-Field Memory Slots This module implements a load/store protocol for [`BitSlice`] regions that enables them to act as if they were a storage slot for integers. Implementations of the [`BitField`] trait provide behavior similar to C and C++ language bit-fields. While any `BitSlice` instantiation is able to provide this behavior, the lack of specialization in the language means that it is instead only implemented for `BitSlice` and `BitSlice` in order to gain a performance advantage. ## Batched Behavior Bit-field behavior can be simulated using `BitSlice`’s existing APIs; however, the inherent methods are all required to operate on each bit individually in sequence. In addition to the semantic load/store behavior this module describes, it also implements it in a way that takes advantage of the contiguity properties of the `Lsb0` and `Msb0` orderings in order to maximize how many bits are transferred in each cycle of the overall operation. This is most efficient when using `BitSlice` as the storage bit-slice, or using `.load::()` or `.store::()` as the transfer type. ## Bit-Slice Storage and Integer Value Relationships `BitField` permits any type of integer, *including signed integers*, to be stored into or loaded out of a `BitSlice` with any storage type `T`. While the examples in this module will largely use `u8`, just to keep the text concise, `BitField` is tested, and will work correctly, for any combination of types. `BitField` implementations use the processor’s own concept of integer registers to operate. As such, the byte-wise memory access patters for types wider than `u8` depends on your processor’s byte endianness, as well as which `BitField` method, and which [`BitOrder`] type parameter, you are using. `BitField` only operates within processor registers; traffic of `T` elements between the memory bank and the processor register is controlled entirely by the processor. If you do not want to introduce the processor’s byte endianness as a variable that affects the in-memory representation of stored integers, use `BitSlice` as the bit-field storage type. In particular, `BitSlice` will fill memory in a way that intuitively matches what most debuggers show when inspecting memory. On the other hand, if you do not care about memory representation and just need fast storage of less than an entire integer, `BitSlice` is likely your best bet. As always, the choice of type parameters is a trade-off with different advantages for each combination, which is why `bitvec` refuses to make the choice for you. ### Signed Behavior The length of the `BitSlice` that stores a value is considered to be the width of that value when it is loaded back out. As such, storing an `i16` into a bit-slice of length `12` means that the stored value has type `i12`. When calling `.load::()` on a 12-bit slice, the load will detect the sign bit of the `i12` value and sign-extend it to `i16`. This means that storing `2048i16` into a 12-bit slice and then loading it back out into an `i16` will produce `-2048i16` (negative), not `2048i16` (positive), because `1 << 11` is the sign bit. `BitField` **does not** record the true sign bit of an integer being stored, and will not attempt to set the sign bit of the narrowed value in storage. Storing `-127i8` (`0b1000_0001`) into a 7-bit slice will load `1i8`. ## Register Bit Order Preservation The implementations in this module assume that the bits within a *value* being transferred into or out of a bit-slice should not be re-ordered. While the implementations will segment a value in order to make it fit into bit-slice storage, and will order those *segments* in memory according to their type parameter and specific trait method called, each segment will remain individually unmodified. If we consider the value `0b100_1011`, segmented at the underscore, then the segments `0b100` and `0b1011` will be present somewhere in the bit-slice that stores them. They may be shifted within an element or re-ordered across elements, but each segment will not be changed. ## Endianness `bitvec` uses the `BitOrder` trait to describe the order of bits within a single memory element. This ordering is independent of, and does not consider, the ordering of memory elements in a sequence; `bitvec` is always “little-endian” in this regard: lower indices are in lower memory addresses, higher indices are in higher memory addresses. However, `BitField` is *explicitly* aware of multiple storage elements in sequence. It is by design able to allow combinations such as ` as BitField>::store_be::`. Even where the storage and value types are the same, or the value is narrower, the bit-slice may be spread across multiple elements and must segment the value across them. The `_be` and `_le` orderings on `BitField` method names refer to the numeric significance of *bit-slice storage elements*. In `_be` methods, lower-address storage elements will hold more-significant segments of the value, and higher-address storage will hold less-significant. In `_le` methods, lower-address storage elements will hold *less*-significant segments of the value, and higher-address storage will hold *more*-significant. Consider again the value `0b100_1011`, segmented at the underscore. When used with `.store_be()`, it will be placed into memory as `[0b…100…, 0b…1011…]`; when used with `.store_le()`, it will be placed into memory as `[0b…1011…, 0b…100…]`. ## Bit-Ordering Behaviors The `_be` and `_le` suffices select the ordering of storage elements in memory. The other critical aspect of the `BitField` memory behavior is selecting *which bits* in a storage element are used when a bit-slice has partial elements. When `BitSlice<_, Lsb0>` produces a [`Domain::Region`], its `head` is in the most-significant bits of its element and its `tail` is in the least-significant bits. When `BitSlice<_, Msb0>` produces a `Region`, its `head` is in the *least*-significant bits, and its `tail` is in the *most*-significant bits. You can therefore use these combinations of `BitOrder` type parameter and `BitField` method suffix to select exactly the memory behavior you want for a storage region. Each implementation of `BitField` has documentation showing exactly what its memory layout looks like, with code examples and visual inspections of memory. This documentation is likely collapsed by default when viewing the trait docs; be sure to use the `[+]` button to expand it! [`BitField`]: self::BitField [`BitOrder`]: crate::order::BitOrder [`BitSlice`]: crate::slice::BitSlice [`Domain::Region`]: crate::domain::Domain::Region bitvec-1.0.1/doc/index/BitEnd.md000064400000000000000000000026311046102023000144440ustar 00000000000000# One-Bit-After Tail Index This is a semantic bit-index within *or one bit after* an `R` register. It is the index of the first “dead” bit after a “live” region, and corresponds to the similar half-open range concept in the Rust `Range` type or the LLVM memory model, pointer values include the address one object past the end of a region. It is a counter in the ring `0 ..= R::BITS` (note the inclusive high end). Like [`BitIdx`], this is a virtual semantic index with no bearing on real memory effects; unlike `BitIdx`, it can never be translated to real memory because it does not describe real memory. This type is necessary in order to preserve the distinction between a dead memory address that is *not* part of a region and a live memory address that *is* within a region. Additionally, it makes computation of region extension or offsets easy. `BitIdx` is insufficient to this task, and produces off-by-one errors when used in its stead. ## Type Parameters - `R`: The register element that this dead-bit index governs. ## Validity Values of this type are **required** to be in the range `0 ..= R::BITS`. Any value greater than [`R::BITS`] makes the program invalid and will likely cause either a crash or incorrect memory access. ## Construction This type cannot be publicly constructed except by using the iterators provided for testing. [`BitIdx`]: crate::index::BitIdx [`R::BITS`]: funty::Integral::BITS bitvec-1.0.1/doc/index/BitIdx.md000064400000000000000000000023711046102023000144630ustar 00000000000000# Semantic Bit Index This type is a counter in the ring `0 .. R::BITS` and serves to mark a semantic index within some register element. It is a virtual index, and is the stored value used in pointer encodings to track region start information. It is translated to a real index through the [`BitOrder`] trait. This virtual index is the only counter that can be used for address computation, and once lowered to an electrical index through [`BitOrder::at`], the electrical address can only be used for setting up machine instructions. ## Type Parameters - `R`: The register element that this index governs. ## Validity Values of this type are **required** to be in the range `0 .. R::BITS`. Any value not less than [`R::BITS`] makes the program invalid, and will likely cause either a crash or incorrect memory access. ## Construction This type can never be constructed outside of the `bitvec` crate. It is passed in to [`BitOrder`] implementations, which may use it to construct electrical position values from it. All values of this type constructed by `bitvec` are known to be correct in their region; no other construction site can be trusted. [`BitOrder`]: crate::order::BitOrder [`BitOrder::at`]: crate::order::BitOrder::at [`R::BITS`]: funty::Integral::BITS bitvec-1.0.1/doc/index/BitIdxError.md000064400000000000000000000004731046102023000154760ustar 00000000000000# Bit Index Error This type marks that a value is out of range to be used as an index within an `R` element. It is likely never produced, as `bitvec` does not construct invalid indices, but is provided for completeness and to ensure that in the event of this error occurring, the diagnostic information is useful. bitvec-1.0.1/doc/index/BitMask.md000064400000000000000000000012701046102023000146270ustar 00000000000000# Multi-Bit Selection Mask Unlike [`BitSel`], which enforces a strict one-hot mask encoding, this type permits any number of bits to be set or cleared. This is used to accumulate selections for batched operations on a register in real memory. ## Type Parameters - `R`: The register element that this mask governs. ## Construction This must only be constructed by combining `BitSel` selection masks produced through the accepted chains of custody beginning with [`BitIdx`] values. Bit-masks not constructed in this manner are not guaranteed to be correct in the caller’s context and may lead to incorrect memory behaviors. [`BitIdx`]: crate::index::BitIdx [`BitSel`]: crate::index::BitSel bitvec-1.0.1/doc/index/BitPos.md000064400000000000000000000017451046102023000145040ustar 00000000000000# Bit Position This is a position counter of a real bit in an `R` memory element. Like [`BitIdx`], it is a counter in the ring `0 .. R::BITS`. It marks a real bit in memory, and is the shift distance in the expression `1 << n`. It can only be produced by applying [`BitOrder::at`] to an existing `BitIdx` produced by `bitvec`. ## Type Parameters - `R`: The register element that this position governs. ## Validity Values of this type are **required** to be in the range `0 .. R::BITS`. Any value not less than [`R::BITS`] makes the program invalid, and will likely cause either a crash or incorrect memory access. ## Construction This type is publicly constructible, but is only correct to do so within an implementation of `BitOrder::at`. `bitvec` will only request its creation through that trait implementation, and has no sites that can publicly accept untrusted values. [`BitIdx`]: crate::index::BitIdx [`BitOrder::at`]: crate::order::BitOrder::at [`R::BITS`]: funty::Integral::BITS bitvec-1.0.1/doc/index/BitSel.md000064400000000000000000000015561046102023000144660ustar 00000000000000# One-Hot Bit Selection Mask This type selects exactly one bit in a register. It is a [`BitPos`] shifted from a counter to a selector, and is used to apply test and write operations to real memory. ## Type Parameters - `R`: The register element this selector governs. ## Validity Values of this type are **required** to have exactly one bit set and all others cleared. Any other value makes the program incorrect, and will cause memory corruption. ## Construction This type is only constructed from `BitPos`, and is always equivalent to `1 << BitPos`. The chain of custody from known-good [`BitIdx`] values, through proven-good [`BitOrder`] implementations, into `BitPos` and then `BitSel` proves that values of this type are always correct to apply to real memory. [`BitIdx`]: crate::index::BitIdx [`BitOrder`]: crate::order::BitOrder [`BitPos`]: crate::index::BitPos bitvec-1.0.1/doc/index.md000064400000000000000000000037151046102023000133030ustar 00000000000000# Bit Indices This module provides well-typed counters for working with bit-storage registers. The session types encode a strict chain of custody for translating semantic indices within [`BitSlice`] regions into real effects in memory. The main advantage of types within this module is that they provide register-dependent range requirements for counter values, making it impossible to have an index out of bounds for a register. They also create a sequence of type transformations that assure the library about the continued validity of each value in its surrounding context. By eliminating public constructors from arbitrary integers, `bitvec` can guarantee that only it can produce initial values, and only trusted functions can transform their numeric values or types, until the program reaches the property that it requires. This chain of assurance means that memory operations can be confident in the correctness of their actions and effects. ## Type Sequence The library produces [`BitIdx`] values from region computation. These types cannot be publicly constructed, and are only ever the result of pointer analysis. As such, they rely on the correctness of the memory regions provided to library entry points, and those entry points can leverage the Rust type system to ensure safety there. `BitIdx` is transformed to [`BitPos`] through the [`BitOrder`] trait. The [`order`] module provides verification functions that implementors can use to demonstrate correctness. `BitPos` is the basis type that describes memory operations, and is used to create the selection masks [`BitSel`] and [`BitMask`]. ## Usage The types in this module should only be used by client crates in their test suites. They have no other purpose, and conjuring values for them is potentially memory-unsafe. [`BitIdx`]: self::BitIdx [`BitMask`]: self::BitMask [`BitOrder`]: crate::order::BitOrder [`BitPos`]: self::BitPos [`BitSel`]: self::BitSel [`BitSlice`]: crate::slice::BitSlice [`order`]: crate::order bitvec-1.0.1/doc/macros/BitArr_type.md000064400000000000000000000026571046102023000157100ustar 00000000000000# Bit-Array Type Definition Because `BitArray` is not expressible in stable Rust, this macro serves the purpose of creating a type definition that expands to a suitable `BitArray`. It creates the correct, rounded-up, `BitArray` to hold a requested number of bits in a requested set of ordering/storage parameters. The macro takes a minimum number of bits to store, and an optional set of bit-order and bit-store type names, and creates a `BitArray` that satisfies the request. As this macro is only usable in type position, it is named with `PascalCase` rather than `snake_case`. ## Examples You must provide a bit-count; you may optionally provide a storage type, or a bit-ordering *and* a storage type, as subsequent arguments. When elided, the type parameters are set to the crate defaut type parameters of `Lsb0` and `usize`. ```rust use bitvec::prelude::*; use core::cell::Cell; let a: BitArr!(for 100) = BitArray::ZERO; let b: BitArr!(for 100, in u32) = BitArray::<_>::ZERO; let c: BitArr!(for 100, in Cell, Msb0) = BitArray::<_, _>::ZERO; ``` The length expression must be `const`. It may be a literal, a named `const` item, or a `const` expression, as long as it evaluates to a `usize`. The type arguments have no restrictions, as long as they are in-scope at the invocation site and are implementors of [`BitOrder`] and [`BitStore`]. [`BitOrder`]: crate::order::BitOrder [`BitStore`]: crate::store::BitStore bitvec-1.0.1/doc/macros/bitarr_value.md000064400000000000000000000046521046102023000161400ustar 00000000000000# Bit-Array Value Constructor This macro provides a bit-initializer syntax for [`BitArray`] values. It takes a superset of the [`vec!`] arguments, and is capable of producing bit-arrays in `const` contexts (for known type parameters). Like `vec!`, it can accept a sequence of comma-separated bit values, or a semicolon-separated pair of a bit value and a repetition counter. Bit values may be any integer or name of a `const` integer, but *should* only be `0` or `1`. ## Argument Syntax It accepts zero, one, or three prefix arguments: - `const`: If the first argument to the macro is the keyword `const`, separated from remaining arguments by a space, then the macro expands to a `const`-expression that can be used in any appropriate context (initializing a `static`, a `const`, or passed to a `const fn`). This only works when the bit-ordering argument is either implicit, or one of the three tokens that `bitvec` can recognize. - `$order ,`: When this is one of the three literal tokens `LocalBits`, `Lsb0`, or `Msb0`, then the macro is able to compute the encoded bit-array contents at compile time, including in `const` contexts. When it is anything else, the encoding must take place at runtime. The name or path chosen must be in scope at the macro invocation site. When not provided, this defaults to `Lbs0`. - `$store ;`: This must be one of `uTYPE`, `Cell`, `AtomicUTYPE`, or `RadiumUTYPE` where `TYPE` is one of `8`, `16`, `32`, `64`, or `size`. The macro recognizes this token textually, and does not have access to the type system resolver, so it will not accept aliases or qualified paths. When not provided, this defaults to `usize`. The `const` argument can be present or absent independently of the type-parameter pair. The pair must be either both absent or both present together. > Previous versions of `bitvec` supported `$order`-only arguments. This has been > removed for clarity of use and ease of implementation. ## Examples ```rust use bitvec::prelude::*; use core::{cell::Cell, mem}; use radium::types::*; let a: BitArray = bitarr![0, 1, 0, 0, 1]; let b: BitArray = bitarr![1; 5]; assert_eq!(b.len(), mem::size_of::() * 8); let c = bitarr![u16, Lsb0; 0, 1, 0, 0, 1]; let d = bitarr![Cell, Msb0; 1; 10]; const E: BitArray<[u32; 1], LocalBits> = bitarr![u32, LocalBits; 1; 15]; let f = bitarr![RadiumU32, Msb0; 1; 20]; ``` [`BitArray`]: crate::array::BitArray [`vec!`]: macro@alloc::vec bitvec-1.0.1/doc/macros/bitbox.md000064400000000000000000000006001046102023000147350ustar 00000000000000# Boxed Bit-Slice Constructor This macro creates encoded `BitSlice` buffers at compile-time, and at run-time copies them directly into a new heap allocation. It forwards all of its arguments to [`bitvec!`], and calls [`BitVec::into_boxed_bitslice`] on the produced `BitVec`. [`BitVec::into_boxed_bitslice`]: crate::vec::BitVec::into_boxed_bitslice [`bitvec!`]: macro@crate::bitvec bitvec-1.0.1/doc/macros/bits.md000064400000000000000000000100101046102023000144030ustar 00000000000000# Bit-Slice Region Constructor This macro provides a bit-initializer syntax for [`BitSlice`] reference values. It takes a superset of the [`vec!`] arguments, and is capable of producing bit-slices in `const` contexts (for known type parameters). Like `vec!`, it can accept a sequence of comma-separated bit values, or a semicolon-separated pair of a bit value and a repetition counter. Bit values may be any integer or name of a `const` integer, but *should* only be `0` or `1`. ## Argument Syntax It accepts two modifier prefixes, zero or two type parameters, and the bit expressions described above. The modifier prefixes are separated from the remaining arguments by clearspace. - `static`: If the first argument is the keyword `static`, then this produces a `&'static BitSlice` reference bound into a (hidden, unnameable) `static BitArray` item. If not, then it produces a stack temporary that the Rust compiler automatically extends to have the lifetime of the returned reference. Note that non-`static` invocations rely on the compiler’s escape analysis, and you should typically not try to move them up the call stack. - `mut`: If the first argument is the keyword `mut`, then this produces a `&mut` writable `BitSlice`. - `static mut`: These can be combined to create a `&'static mut BitSlice`. It is always safe to use this reference, because the `static mut BitArray` it creates is concealed and unreachable by any other codepath, and so the produced reference is always the sole handle that can reach it. The next possible arguments are a pair of `BitOrder`/`BitStore` type parameters. - `$order ,`: When this is one of the three literal tokens `LocalBits`, `Lsb0`, or `Msb0`, then the macro is able to compute the encoded bit-array contents at compile time, including in `const` contexts. When it is anything else, the encoding must take place at runtime. The name or path chosen must be in scope at the macro invocation site. When not provided, this defaults to `Lsb0`. - `$store ;`: This must be one of `uTYPE`, `Cell`, `AtomicUTYPE`, or `RadiumUTYPE` where `TYPE` is one of `8`, `16`, `32`, `64`, or `size`. The macro recognizes this token textually, and does not have access to the type system resolver, so it will not accept aliases or qualified paths. When not provided, this defaults to `usize`. The `static`/`mut` modifiers may be individually present or absent independently of the type-parameter pair. The pair must be either both absent or both present together. > Previous versions of `bitvec` supported $order`-only arguments. This has been > removed for clarity of use and ease of implementation. ## Safety Rust considers all `static mut` bindings to be `unsafe` to use. While `bits!` can prevent *some* of this unsafety by preventing direct access to the created `static mut` buffer, there are still ways to create multiple names referring to the same underlying buffer. ```rust,ignore use bitvec::prelude::*; fn unsound() -> &'static mut BitSlice { unsafe { bits![static mut 0; 64] } } let a = unsound(); let b = unsound(); ``` The two names `a` and `b` can be used to produce aliasing `&mut [usize]` references. **You must not invoke `bits![static mut …]` in a context where it can be used** **to create multiple escaping names**. This, and only this, argument combination of the macro produces a value that requires a call-site `unsafe` block to use. If you do not use this behavior to create multiple names over the same underlying buffer, then the macro’s expansion is safe to use, as `bitvec`’s existing alias-protection behavior suffices. ## Examples ```rust use bitvec::prelude::*; use core::cell::Cell; use radium::types::*; let a: &BitSlice = bits![0, 1, 0, 0, 1]; let b: &BitSlice = bits![1; 5]; assert_eq!(b.len(), 5); let c = bits![u16, Lsb0; 0, 1, 0, 0, 1]; let d = bits![static Cell, Msb0; 1; 10]; let e = unsafe { bits![static mut u32, LocalBits; 0; 15] }; let f = bits![RadiumU32, Msb0; 1; 20]; ``` [`BitSlice`]: crate::slice::BitSlice [`vec!`]: macro@alloc::vec bitvec-1.0.1/doc/macros/bitvec.md000064400000000000000000000007471046102023000147360ustar 00000000000000# Bit-Vector Constructor This macro creates encoded `BitSlice` buffers at compile-time, and at run-time copies them directly into a new heap allocation. It forwards all of its arguments to [`bits!`], and calls [`BitVec::from_bitslice`] on the produced `&BitSlice` expression. While you can use the `bits!` modifiers, there is no point, as the produced bit-slice is lost before the macro exits. [`BitVec::from_bitslice`]: crate::vec::BitVec::from_bitslice [`bits!`]: macro@crate::bits bitvec-1.0.1/doc/macros/encode_bits.md000064400000000000000000000054121046102023000157320ustar 00000000000000# Bit-Sequence Buffer Encoding This macro accepts a sequence of bit expressions from the public macros and creates encoded `[T; N]` arrays from them. The public macros can then use these encoded arrays as the basis of the requested data structure. This is a complex macro that uses recursion to modify and inspect its input tokens. It is divided into three major sections. ## Entry Points The first section provides a series of entry points that the public macros invoke. Each arm matches the syntax provided by public macros, and detects a specific `BitStore` implementor name: `uN`, `Cell`, `AtomicUN`, or `RadiumUN`, for each `N` in `8`, `16`, `32`, `64`, and `size`. These arms then recurse, adding a token for the raw unsigned integer used as the basis of the encoding. The `usize` arms take an additional recursion that routes to the 32-bit or 64-bit encoding, depending on the target. ## Zero Extension The next two arms handle extending the list of bit-expressions with 64 `0,`s. The first arm captures initial reëntry and appends the zero-comma tokens, then recurses to enter the chunking group. The second arm traps when recursion has chunked all user-provided tokens, and only the literal `0,` tokens appended by the first arm remain. The second arm dispatches the chunked bit-expressions into the element encoder, and is the exit point of the macro. Its output is an array of encoded memory elements, typed as the initially-requested `BitStore` name. The `0,` tokens remain matchable as text literals because they never depart this macro: recursion within the same macro does not change the types in the AST, while invoking a new macro causes already-known tokens to become opacified into `:tt` whose contents cannot be matched. This is the reason that the macro is recursive rather than dispatching. ## Chunking The stream of user-provided bit-expressions, followed by the appended zero-comma tokens, is divided into chunks by the width of the storage type. Each width (8, 16, 32, 64) has an arm that munches from the token stream and grows an opaque token-list containing munched groups. In syntax, this is represented by the `[$([$($bit:tt,)+],)*];` cluster: - it is an array - of zero or more arrays - of one or more bit expressions - each followed by a comma - each followed by a comma - followed by a semicolon By placing this array ahead of the bit-expression stream, we can use the array as an append-only list (matched as `[$($elem:tt)*]`, emitted as `[$($elem)* [new]]`) grown by munching from the token stream of unknown length at the end of the argument set. On each recursion, the second arm in zero-extension attempts to trap the input. If it fails, then user-provided tokens remain; if it succeeds, then it discards any remaining macro-appended zeros and terminates. bitvec-1.0.1/doc/macros/internal.md000064400000000000000000000005061046102023000152670ustar 00000000000000# Internal Macro Implementations The contents of this module are required to be publicly reachable from external crates, because that is the context in which the public macros expand; however, the contents of this module are **not** public API and `bitvec` does not support any use of it other than within the public macros. bitvec-1.0.1/doc/macros/make_elem.md000064400000000000000000000021541046102023000153730ustar 00000000000000# Element Encoder Macro This macro is invoked by `__encode_bits!` with a set of bits that exactly fills some `BitStore` element type. It is responsible for encoding those bits into the raw memory bytes and assembling them into a whole integer. It works by inspecting the `$order` argument. If it is one of `LocalBits`, `Lsb0`, or `Msb0`, then it can do the construction in-place, and get solved during `const` evaluation. If it is any other ordering, then it emits runtime code to do the translation and defers to the optimizer for evaluation. It divides the input into clusters of eight bit expressions, then uses the `$order` argument to choose whether the bits are accumulated into a `u8` using `Lsb0`, `Msb0`, or `LocalBits` ordering. The accumulated byte array is then converted into an integer using the corresponding `uN::from_{b,l,n}e_bytes` function in `__ty_from_bytes!`. Once assembled, the raw integer is changed into the requested final type. This currently routes through a helper type that unifies `const fn` constructors for each of the raw integer fundamentals, cells, and atomics in order to avoid transmutes. bitvec-1.0.1/doc/macros.md000064400000000000000000000025611046102023000134560ustar 00000000000000# Constructor Macros This module provides macros that can be used to create `bitvec` data buffers at compile time. Each data structure has a corresponding macro: - `BitSlice` has [`bits!`] - `BitArray` has [`bitarr!`] (and [`BitArr!`] to produce type expressions) - `BitBox` has [`bitbox!`] - `BitVec` has [`bitvec!`] These macros take a sequence of bit literals, as well as some optional control prefixes, and expand to code that is generally solvable at compile-time. The provided bit-orderings `Lsb0` and `Msb0` have implementations that can be used in `const` contexts, while third-party user-provided orderings cannot be used in `const` contexts but almost certainly *can* be const-folded by LLVM. The sequences are encoded into element literals during compilation, and will be correctly encoded into the target binary. This is even true for targets with differing byte-endianness than the host compiler. See each macro for documentation on its invocation syntax. The general pattern is `[modifier] [T, O;] bits…`. The modifiers influence the nature of the produced binding, the `[T, O;]` pair provides type parameters when the default is undesirable, and the `bits…` provides the actual contents of the data buffer. [`BitArr!`]: macro@crate::BitArr [`bitarr!`]: macro@crate::bitarr [`bitbox!`]: macro@crate::bitbox [`bits!`]: macro@crate::bits [`bitvec!`]: macro@crate::bitvec bitvec-1.0.1/doc/mem/BitElement.md000064400000000000000000000015361046102023000150010ustar 00000000000000# Unified Element Constructor This type is a hack around the fact that `Cell` and `AtomicUN` all have `const fn new(val: Inner) -> Self;` constructors, but the numberic fundamentals do not. As such, the standard library does not provide a unified construction syntax to turn an integer fundamental into the final type. This provides a `const fn BitElement::<_>::new(R) -> Self;` function, implemented only for the `BitStore` implementors that the crate provides, that the constructor macros can use to turn integers into final values without using [`mem::transmute`][0]. While `transmute` is acceptable in this case (the types are all `#[repr(transparent)]`), it is still better avoided where possible. As this is a macro assistant, it is publicly exposed, but is not public API. It has no purpose outside of the crate’s macros. [0]: core::mem::transmute. bitvec-1.0.1/doc/mem/BitRegister.md000064400000000000000000000004741046102023000151740ustar 00000000000000# Register Descriptions This trait describes the unsigned integer types that can be manipulated in a target processor’s general-purpose registers. It has no bearing on the processor instructions or registers used to interact with the memory bus, and solely exists to describe integers that can exist on a system. bitvec-1.0.1/doc/mem/elts.md000064400000000000000000000013151046102023000137130ustar 00000000000000# Bit Storage Calculator Computes the number of `T` elements required to store some number of bits. `T` must be an unsigned integer type and cannot have padding bits, but this restriction cannot be placed on `const fn`s yet. ## Parameters - `bits`: The number of bits being stored in a `[T]` array. ## Returns A minimal `N` in `[T; N]` that is not less than `bits`. As this is a `const` function, when `bits` is also a `const` expression, it can be used to compute the size of an array type, such as `[u32; elts::(BITS)]`. ## Examples ```rust use bitvec::mem as bv_mem; assert_eq!(bv_mem::elts::(10), 2); assert_eq!(bv_mem::elts::(16), 2); let arr: [u16; bv_mem::elts::(20)] = [0; 2]; ``` bitvec-1.0.1/doc/mem.md000064400000000000000000000007121046102023000127440ustar 00000000000000# Memory Element Descriptions This module describes the memory integers and processor registers used to hold and manipulate `bitvec` data buffers. The [`BitRegister`] trait marks the unsigned integers that correspond to processor registers, and can therefore be used for buffer control. The integers that are not `BitRegister` can be composed from register values, but are not able to be used in buffer type parameters. [`BitRegister`]: self::BitRegister bitvec-1.0.1/doc/order/BitOrder.md000064400000000000000000000066301046102023000150200ustar 00000000000000# In-Element Bit Ordering This trait manages the translation of semantic bit indices into electrical positions within storage elements of a memory region. ## Usage `bitvec` APIs operate on semantic index counters that exist in an abstract memory space independently of the real memory that underlies them. In order to affect real memory, `bitvec` must translate these indices into real values. The [`at`] function maps abstract index values into their corresponding real positions that can then be used to access memory. You will likely never call any of the trait functions yourself. They are used by `bitvec` internals to operate on memory regions; all you need to do is provide an implementation of this trait as a type parameter to `bitvec` data structures. ## Safety `BitOrder` is unsafe to implement because its translation of index to position cannot be forcibly checked by `bitvec` itself, and an improper implementation will lead to memory unsafety errors and unexpected collisions. The trait has strict requirements for each function. If these are not upheld, then the implementation is considered undefined at the library level and its use may produce incorrect or undefined behavior during compilation. You are responsible for running [`verify_for_type`] or [`verify`] in your test suite if you implement `BitOrder`. ## Implementation Rules Values of this type are never constructed or passed to `bitvec` functions. Your implementation does not need to be zero-sized, but it will never have access to an instance to view its state. It *may* refer to other global state, but per the rules of `at`, that state may not change while any `bitvec` data structures are alive. The only function you *need* to provide is `at`. Its requirements are listed in its trait documentation. You *may* also choose to provide implementations of `select` and `mask`. These have a default implementation that is correct, but may be unoptimized for your implementation. As such, you may replace them with a better version, but your implementation of these functions must be exactly equal to the default implementation for all possible inputs. This requirement is checked by the `verify_for_type` function. ## Verification The `verify_for_type` function verifies that a `BitOrder` implementation is correct for a single `BitStore` implementor, and the `verify` function runs `verify_for_type` on all unsigned integers that implement `BitStore` on a target. If you run these functions in your test suite, they will provide detailed information if your implementation is incorrect. ## Examples Implementations are not required to remain contiguous over a register, and may have any mapping they wish as long as it is total and bijective. This example swizzles the high and low halves of each byte. ```rust use bitvec::{ order::BitOrder, index::{BitIdx, BitPos}, mem::BitRegister, }; pub struct HiLo; unsafe impl BitOrder for HiLo { fn at(index: BitIdx) -> BitPos where R: BitRegister { unsafe { BitPos::new_unchecked(index.into_inner() ^ 4) } } } #[test] #[cfg(test)] fn prove_hilo() { bitvec::order::verify::(); } ``` Once a `BitOrder` implementation passes the test suite, it can be freely used as a type parameter in `bitvec` data structures. The translation takes place automatically, and you never need to look at this trait again. [`at`]: Self::at [`verify`]: crate::order::verify [`verify_for_type`]: crate::order::verify_for_type bitvec-1.0.1/doc/order/LocalBits.md000064400000000000000000000021701046102023000151550ustar 00000000000000# C-Compatible Bit Ordering This type alias attempts to match the bitfield ordering used by GCC on your target. The C standard permits ordering of single-bit bitfields in a structure to be implementation-defined, and GCC has been observed to use Lsb0-ordering on little-endian processors and Msb0-ordering on big-endian processors. This has two important caveats: - ordering of bits in an element is **completely** independent of the ordering of constituent bytes in memory. These have nothing to do with each other in any way. See [the user guide][0] for more information on memory representation. - GCC wide bitfields on big-endian targets behave as `` bit-slices using the `_be` variants of `BitField` accessors. They do not match `Msb0` bit-wise ordering. This type is provided solely as a convenience for narrow use cases that *may* match GCC’s `std::bitset`. It makes no guarantee about what C compilers for your target actually do, and you will need to do your own investigation if you are exchanging a single buffer across FFI in this manner. [0]: https://bitvecto-rs.github.io/bitvec/memory-representation bitvec-1.0.1/doc/order/Lsb0.md000064400000000000000000000010631046102023000141010ustar 00000000000000# Least-Significant-First Bit Traversal This type orders the bits in an element with the least significant bit first and the most significant bit last, in contiguous order across the element. The guide has [a chapter][0] with more detailed information on the memory representation this produces. This is the default type parameter used throughout the crate. If you do not have a desired memory representation, you should continue to use it, as it provides the best codegen for bit manipulation. [0]: https://bitvecto-rs.github.io/bitvec/memory-representation bitvec-1.0.1/doc/order/Msb0.md000064400000000000000000000010501046102023000140760ustar 00000000000000# Most-Significant-First Bit Traversal This type orders the bits in an element with the most significant bit first and the least significant bit last, in contiguous order across the element. The guide has [a chapter][0] with more detailed information on the memory representation this produces. This type likely matches the ordering of bits you would expect to see in a debugger, but has worse codegen than `Lsb0`, and is not encouraged if you are not doing direct memory inspection. [0]: https://bitvecto-rs.github.io/bitvec/memory-representation bitvec-1.0.1/doc/order/verify.md000064400000000000000000000013561046102023000146120ustar 00000000000000# Complete `BitOrder` Verification This function checks some [`BitOrder`] implementation’s behavior on each of the [`BitRegister`] types present on the target, and reports any violation of the rules that it detects. ## Type Parameters - `O`: The `BitOrder` implementation being tested. ## Parameters - `verbose`: Controls whether the test should print diagnostic information to standard output. If this is false, then the test only prints a message on failure; if it is true, it emits a message for every test it executes. ## Panics This panics when it detects a violation of the `BitOrder` rules. If it returns normally, then the implementation is correct. [`BitOrder`]: crate::order::BitOrder [`BitRegister`]: crate::mem::BitRegister bitvec-1.0.1/doc/order/verify_for_type.md000064400000000000000000000021161046102023000165140ustar 00000000000000# Single-Type `BitOrder` Verification This function checks some [`BitOrder`] implementation’s behavior on only one [`BitRegister`] type. It can be used when a program knows that it will only use a limited set of storage types and does not need to check against all of them. You should prefer to use [`verify`], as `bitvec` has no means of preventing the use of a `BitRegister` storage type that your `BitOrder` implementation does not satisfy. ## Type Parameters - `O`: The `BitOrder` implementation being tested. - `R`: The `BitRegister` type for which `O` is being tested. ## Parameters - `verbose`: Controls whether the test should print diagnostic information to standard output. If this is false, then the test only prints a message on failure; if it is true, then it emits a message for every test it executes. ## Panics This panics when it detects a violation of the `BitOrder` rules. If it returns normally, then the implementation is correct for the given `R` type. [`BitOrder`]: crate::order::BitOrder [`BitRegister`]: crate::mem::BitRegister [`verify`]: crate::order::verify. bitvec-1.0.1/doc/order.md000064400000000000000000000020351046102023000133010ustar 00000000000000# In-Element Bit Ordering The `bitvec` memory model is designed to separate the semantic ordering of bits in an abstract memory space from the electrical ordering of latches in real memory. This module provides the bridge between the two domains with the [`BitOrder`] trait and implementations of it. The `BitOrder` trait bridges semantic indices (marked by the [`BitIdx`] type) to electrical position counters (morked by the [`BitPos`] type) or selection masks (marked by the [`BitSel`] and [`BitMask`] types). Because `BitOrder` is open for client crates to implement, this module also provides verification functions for the test suite that ensure a given `BitOrder` implementation is correct for all the register types that it will govern. See the [`verify_for_type`] or [`verify`] functions for more information. [`BitIdx`]: crate::index::BitIdx [`BitMask`]: crate::index::BitMask [`BitOrder`]: self::BitOrder [`BitPos`]: crate::index::BitPos [`BitSel`]: crate::index::BitSel [`verify`]: self::verify [`verify_for_type`]: self::verify_for_type bitvec-1.0.1/doc/prelude.md000064400000000000000000000006621046102023000136320ustar 00000000000000# Symbol Export This module collects the general public API into a single place for bulk import, as `use bitvec::prelude::*;`, without polluting the root namespace of the crate. This provides all the data structure types and macros, as well as the two traits needed to operate them as type parameters, by name. It also imports extension traits without naming them, so that their methods are available but their trait names are not. bitvec-1.0.1/doc/ptr/BitPtr.md000064400000000000000000000050241046102023000142000ustar 00000000000000# Single-Bit Pointer This structure defines a pointer to exactly one bit in a memory element. It is a structure, rather than an encoding of a `*Bit` raw pointer, because it contains more information than can be packed into such a pointer. Furthermore, it can uphold the same requirements and guarantees that the rest of the crate demands, whereäs a raw pointer cannot. ## Original [`*bool`](https://doc.rust-lang.org/std/primitive.pointer.html) and [`NonNull`](core::ptr::NonNull) ## API Differences Since raw pointers are not sufficient in space or guarantees, and are limited by not being marked `#[fundamental]`, this is an ordinary `struct`. Because it cannot use the `*const`/`*mut` distinction that raw pointers and references can, this encodes mutability in a type parameter instead. In order to be consistent with the rest of the crate, particularly the `*BitSlice` encoding, this enforces that all `T` element addresses are well-aligned to `T` and non-null. While this type is used in the API as an analogue of raw pointers, it is restricted in value to only contain the values of valid *references* to memory, not arbitrary pointers. ## ABI Differences This is aligned to `1`, rather than the processor word, in order to enable some crate-internal space optimizations. ## Type Parameters - `M`: Marks whether the pointer has mutability permissions to the referent memory. Only `Mut` pointers can be used to create `&mut` references. - `T`: A memory type used to select both the register width and the bus behavior when performing memory accesses. - `O`: The ordering of bits within a memory element. ## Usage This structure is used as the `bitvec` equivalent to `*bool`. It is used in all raw-pointer APIs and provides behavior to emulate raw pointers. It cannot be directly dereferenced, as it is not a pointer; it can only be transformed back into higher referential types, or used in functions that accept it. These pointers can never be null or misaligned. ## Safety Rust and LLVM **do not** have a concept of bit-level initialization yet. Furthermore, the underlying foundational code that this type uses to manipulate individual bits in memory relies on construction of **shared references** to memory, which means that unlike standard pointers, the `T` element to which `BitPtr` values point must always be **already initialized** in your program context. `bitvec` is not able to detect or enforce this requirement, and is currently not able to avoid it. See [`BitAccess`] for more information. [`BitAccess`]: crate::access::BitAccess bitvec-1.0.1/doc/ptr/BitPtrRange.md000064400000000000000000000025741046102023000151640ustar 00000000000000# Bit-Pointer Range This type is equivalent in purpose, but superior in functionality, to `Range>`. If the standard library stabilizes [`Step`], the trait used to drive `Range` operations, then this type will likely be destroyed in favor of an `impl Step for BitPtr` block and use of standard ranges. Like [`Range`], this is a half-open set where the low bit-pointer selects the first live bit in a span and the high bit-pointer selects the first dead bit *after* the span. This type is not capable of inspecting provenance, and has no requirement of its own that both bit-pointers be derived from the same provenance region. It is safe to construct and use with any pair of bit-pointers; however, the bit-pointers it *produces* are, necessarily, `unsafe` to use. ## Original [`Range<*bool>`][`Range`] ## Memory Representation [`BitPtr`] is required to be `repr(packed)` in order to satisfy the [`BitRef`] size optimizations. In order to stay minimally sized itself, this type has no alignment requirement, and reading either bit-pointer *may* incur a misalignment penalty. Reads are always safe and valid; they may merely be slow. ## Type Parameters This takes the same type parameters as `BitPtr`, as it is simply a pair of bit-pointers with range semantics. [`BitPtr`]: crate::ptr::BitPtr [`BitRef`]: crate::ptr::BitRef [`Range`]: core::ops::Range [`Step`]: core::iter::Step bitvec-1.0.1/doc/ptr/BitRef.md000064400000000000000000000033441046102023000141520ustar 00000000000000# Proxy Bit-Reference This structure simulates `&/mut bool` within `BitSlice` regions. It is analogous to the C++ type [`std::bitset::reference`][0]. This type wraps a [`BitPtr`] and caches a `bool` in one of the remaining padding bytes. It is then able to freely give out references to its cached `bool`, and commits the cached value back to the proxied location when dropped. ## Original This is semantically equivalent to `&'a bool` or `&'a mut bool`. ## Quirks Because this type has both a lifetime and a destructor, it can introduce an uncommon syntax error condition in Rust. When an expression that produces this type is in the final expression of a block, including if that expression is used as a condition in a `match`, `if let`, or `if`, then the compiler will attempt to extend the drop scope of this type to the outside of the block. This causes a lifetime mismatch error if the source region from which this proxy is produced begins its lifetime inside the block. If you get a compiler error that this type causes something to be dropped while borrowed, you can end the borrow by putting any expression-ending syntax element after the offending expression that produces this type, including a semicolon or an item definition. ## Examples ```rust use bitvec::prelude::*; let bits = bits![mut 0; 2]; let (left, right) = bits.split_at_mut(1); let mut first = left.get_mut(0).unwrap(); let second = right.get_mut(0).unwrap(); // Writing through a dereference requires a `mut` binding. *first = true; // Writing through the explicit method call does not. second.commit(true); drop(first); // It’s not a reference, so NLL does not apply! assert_eq!(bits, bits![1; 2]); ``` [0]: https://en.cppreference.com/w/cpp/utility/bitset/reference bitvec-1.0.1/doc/ptr/BitSpan.md000064400000000000000000000131511046102023000143340ustar 00000000000000# Encoded Bit-Span Descriptor This structure is used as the actual in-memory value of `BitSlice` pointers (including both `*{const,mut} BitSlice` and `&/mut BitSlice`). It is **not** public API, and the encoding scheme does not support external modification. Rust slices encode a base element address and an element count into a single `&[T]` two-word value. `BitSpan` encodes a third value, the index of the base bit within the base element, into unused bits of the address and length counter. The slice reference has the ABI `(*T, usize)`, which is exactly two processor words in size. `BitSpan` matches this ABI so that it can be cast into `&/mut BitSlice` and used in reference-demanding APIs. ## Layout This structure is a more complex version of the `(*const T, usize)` tuple that Rust uses to represent slices throughout the language. It breaks the pointer and counter fundamentals into sub-field components. Rust does not have bitfield syntax, so the below description of the structure layout is in C++. ```cpp template struct BitSpan { uintptr_t ptr_head : __builtin_ctzll(alignof(T)); uintptr_t ptr_addr : sizeof(uintptr_T) * 8 - __builtin_ctzll(alignof(T)); size_t len_head : 3; size_t len_bits : sizeof(size_t) * 8 - 3; }; ``` This means that the `BitSpan` has three *logical* fields, stored in four segments, across the two *structural* fields of the type. The widths and placements of each segment are functions of the size of `*const T`, `usize`, and of the alignment of the `T` referent buffer element type. ## Fields ### Base Address The address of the base element in a memory region is stored in all but the lowest bits of the `ptr` field. An aligned pointer to `T` will always have its lowest log2(byte width) bits zeroed, so those bits can be used to store other information, as long as they are erased before dereferencing the address as a pointer to `T`. ### Head Bit Index For any referent element type `T`, the selection of a single bit within the element requires log2(byte width) bits to select a byte within the element `T`, and another three bits to select a bit within the selected byte. |Type |Alignment|Trailing Zeros|Count Bits| |:----|--------:|-------------:|---------:| |`u8` | 1| 0| 3| |`u16`| 2| 1| 4| |`u32`| 4| 2| 5| |`u64`| 8| 3| 6| The index of the first live bit in the base element is split to have its three least significant bits stored in the least significant edge of the `len` field, and its remaining bits stored in the least significant edge of the `ptr` field. ### Length Counter All but the lowest three bits of the `len` field are used to store a counter of live bits in the referent region. When this is zero, the region is empty. Because it is missing three bits, a `BitSpan` has only ⅛ of the index space of a `usize` value. ## Significant Values The following values represent significant instances of the `BitSpan` type. ### Null Slice The fully-zeroed slot is not a valid member of the `BitSpan` type; it is reserved instead as the sentinel value for `Option::>::None`. ### Canonical Empty Slice All pointers with a `bits: 0` logical field are empty. Pointers that are used to maintain ownership of heap buffers are not permitted to erase their `addr` field. The canonical form of the empty slice has an `addr` value of [`NonNull::::dangling()`], but all pointers to an empty region are equivalent regardless of address. #### Uninhabited Slices Any empty pointer with a non-[`dangling()`] base address is considered to be an uninhabited region. `BitSpan` never discards its address information, even as operations may alter or erase its head-index or length values. ## Type Parameters - `T`: The memory type of the referent region. `BitSpan` is a specialized `*[T]` slice pointer, and operates on memory in terms of the `T` type for access instructions and pointer calculation. - `O`: The ordering within the register type. The bit-ordering used within a region colors all pointers to the region, and orderings can never mix. ## Safety `BitSpan` values may only be constructed from pointers provided by the surrounding program. ## Undefined Behavior Values of this type are binary-incompatible with slice pointers. Transmutation of these values into any other type will result in an incorrect program, and permit the program to begin illegal or undefined behaviors. This type may never be manipulated in any way by user code outside of the APIs it offers to this `bitvec`; it certainly may not be seen or observed by other crates. ## Design Notes Accessing the `.head` logical field would be faster if it inhabited the least significant byte of `.len`, and was not partitioned into `.ptr` as well. This implementation was chosen against in order to minimize the loss of bits in the length counter; if user studies indicate that bit-slices do not **ever** require more than 224 bits on 32-bit systems, this may be revisited. The `ptr_metadata` feature, tracked in [Issue #81513], defines a trait `Pointee` that regions such as `BitSlice` can implement and define a `Metadata` type that carries all information other than a dereferenceable memory address. For regular slices, this would be `impl Pointee for [T] { type Metadata = usize; }`. For `BitSlice`, it would be `(usize, BitIdx)` and obviate this module entirely. But until it stabilizes, this remains. [Issue #81513]: https://github.com/rust-lang/rust/issues/81513 [`NonNull::::dangling()`]: core::ptr::NonNull::dangling [`dangling()`]: core::ptr::NonNull::dangling bitvec-1.0.1/doc/ptr/addr.md000064400000000000000000000003201046102023000137000ustar 00000000000000# Address Value Management This module provides utilities for working with `T: BitStore` addresses so that the other `ptr` submodules can rely on the correctness of their values when doing pointer encoding. bitvec-1.0.1/doc/ptr/bitslice_from_raw_parts.md000064400000000000000000000013541046102023000177010ustar 00000000000000# Bit-Slice Pointer Construction This forms a raw [`BitSlice`] pointer from a bit-pointer and a length. ## Original [`ptr::slice_from_raw_parts`](core::ptr::slice_from_raw_parts) ## Examples You will need to construct a `BitPtr` first; these are typically produced by existing `BitSlice` views, or you can do so manually. ```rust use bitvec::{ prelude::*, index::BitIdx, ptr as bv_ptr, }; let data = 6u16; let head = BitIdx::new(1).unwrap(); let ptr = BitPtr::<_, _, Lsb0>::new((&data).into(), head).unwrap(); let slice = bv_ptr::bitslice_from_raw_parts(ptr, 10); let slice_ref = unsafe { &*slice }; assert_eq!(slice_ref.len(), 10); assert_eq!(slice_ref, bits![1, 1, 0, 0, 0, 0, 0, 0, 0, 0]); ``` [`BitSlice`]: crate::slice::BitSlice bitvec-1.0.1/doc/ptr/bitslice_from_raw_parts_mut.md000064400000000000000000000014241046102023000205640ustar 00000000000000# Bit-Slice Pointer Construction This forms a raw [`BitSlice`] pointer from a bit-pointer and a length. ## Original [`ptr::slice_from_raw_parts`](core::ptr::slice_from_raw_parts) ## Examples You will need to construct a `BitPtr` first; these are typically produced by existing `BitSlice` views, or you can do so manually. ```rust use bitvec::{ prelude::*, index::BitIdx, ptr as bv_ptr, }; let mut data = 6u16; let head = BitIdx::new(1).unwrap(); let ptr = BitPtr::<_, _, Lsb0>::new((&mut data).into(), head).unwrap(); let slice = bv_ptr::bitslice_from_raw_parts_mut(ptr, 10); let slice_ref = unsafe { &mut *slice }; assert_eq!(slice_ref.len(), 10); slice_ref.set(2, true); assert_eq!(slice_ref, bits![1, 1, 1, 0, 0, 0, 0, 0, 0, 0]); ``` [`BitSlice`]: crate::slice::BitSlice bitvec-1.0.1/doc/ptr/copy.md000064400000000000000000000047311046102023000137520ustar 00000000000000# Bit-wise `memcpy` This copies bits from a region beginning at `src` into a region beginning at `dst`, each extending upwards in the address space for `count` bits. The two regions may overlap. If the two regions are known to *never* overlap, then [`copy_nonoverlapping`][0] can be used instead. ## Original [`ptr::copy`](core::ptr::copy) ## Overlap Definition `bitvec` defines region overlap only when the bit-pointers used to access them have the same `O: BitOrder` type parameter. When this parameter differs, the regions are always assumed to not overlap in real memory, because `bitvec` does not define the effects of different orderings mapping to the same locations. ## Safety In addition to the bit-ordering constraints, this inherits the restrictions of the original `ptr::copy`: - `src` must be valid to read the next `count` bits out of memory. - `dst` must be valid to write into the next `count` bits. - Both `src` and `dst` must satisfy [`BitPtr`]’s non-null, well-aligned, requirements. ## Behavior This reads and writes each bit individually. It is incapable of optimizing its behavior to perform batched memory accesses that have better awareness of the underlying memory. The [`BitSlice::copy_from_bitslice`][1] method *is* able to perform this optimization. You should always prefer to use `BitSlice` if you are sensitive to performance. ## Examples This example performs a simple copy across independent regions. You can see that it follows the ordering parameter for the source and destination regions as it walks each bit individually. ```rust use bitvec::prelude::*; use bitvec::ptr as bv_ptr; let start = 0b1011u8; let mut end = 0u16; let src = BitPtr::<_, _, Lsb0>::from_ref(&start); let dst = BitPtr::<_, _, Msb0>::from_mut(&mut end); unsafe { bv_ptr::copy(src, dst, 4); } assert_eq!(end, 0b1101_0000_0000_0000); ``` This can detect overlapping regions. Note again that overlap only exists when the ordering parameter is the same! Using bit-pointers that overlap in real memory with different ordering is not defined, and `bitvec` does not specify any result. ```rust use bitvec::prelude::*; use bitvec::ptr as bv_ptr; let mut x = 0b1111_0010u8; let src = BitPtr::<_, _, Lsb0>::from_mut(&mut x); let dst = unsafe { src.add(2) }; unsafe { bv_ptr::copy(src.to_const(), dst, 4); } assert_eq!(x, 0b1100_1010); // bottom nibble ^^ ^^ moved here ``` [`BitPtr`]: crate::ptr::BitPtr [0]: crate::ptr::copy_nonoverlapping [1]: crate::slice::BitSlice::copy_from_bitslice bitvec-1.0.1/doc/ptr/copy_nonoverlapping.md000064400000000000000000000033251046102023000170710ustar 00000000000000# Bit-wise `memcpy` This copies bits from a region beginning at `src` into a region beginning at `dst`, each extending upwards in the address space for `count` bits. The two regions *may not* overlap. ## Original [`ptr::copy_nonoverlapping`](core::ptr::copy_nonoverlapping) ## Overlap Definition The two regions may be in the same provenance as long as they have no common bits. `bitvec` only defines the possibility of overlap when the `O1` and `O2` bit-ordering parameters are the same; if they are different, then it considers the regions to not overlap, and does not attempt to detect real-memory collisions. ## Safety In addition to the bit-ordering constraints, this inherits the restrictions of the original `ptr::copy_nonoverlapping`: - `src` must be valid to read the next `count` bits out of memory. - `dst` must be valid to write into the next `count` bits. - Both `src` and `dst` must satisfy [`BitPtr`]’s non-null, well-aligned, requirements. ## Behavior This reads and writes each bit individually. It is incapable of optimizing its behavior to perform batched memory accesses that have better awareness of the underlying memory. The [`BitSlice::copy_from_bitslice`][1] method *is* able to perform this optimization, and tolerates overlap. You should always prefer to use `BitSlice` if you are sensitive to performance. ## Examples ```rust use bitvec::prelude::*; use bitvec::ptr as bv_ptr; let start = 0b1011u8; let mut end = 0u16; let src = BitPtr::<_, _, Lsb0>::from_ref(&start); let dst = BitPtr::<_, _, Msb0>::from_mut(&mut end); unsafe { bv_ptr::copy_nonoverlapping(src, dst, 4); } assert_eq!(end, 0b1101_0000_0000_0000); ``` [1]: crate::slice::BitSlice::copy_from_bitslice [`BitPtr`]: crate::ptr::BitPtr bitvec-1.0.1/doc/ptr/drop_in_place.md000064400000000000000000000004141046102023000155700ustar 00000000000000# Remote Destructor `BitPtr` only points to indestructible types. This has no effect, and is only present for symbol compatibility. You should not have been calling it on your integers or `bool`s anyway! ## Original [`ptr::drop_in_place`](core::ptr::drop_in_place) bitvec-1.0.1/doc/ptr/eq.md000064400000000000000000000020431046102023000133770ustar 00000000000000# Bit-Pointer Equality This compares two bit-pointers for equality by their address value, not by the value of their referent bit. This does not dereference either. ## Original [`ptr::eq`](core::ptr::eq) ## API Differences The two bit-pointers can differ in their storage type parameters. `bitvec` defines pointer equality only between pointers with the same underlying [`BitStore::Mem`][0] element type. Numerically-equal bit-pointers with different integer types *will not* compare equal, though this function will compile and accept them. This cannot compare encoded span poiters. `*const BitSlice` can be used in the standard-library `ptr::eq`, and does not need an override. ## Examples ```rust use bitvec::prelude::*; use bitvec::ptr as bv_ptr; use core::cell::Cell; let data = 0u16; let bare_ptr = BitPtr::<_, _, Lsb0>::from_ref(&data); let cell_ptr = bare_ptr.cast::>(); assert!(bv_ptr::eq(bare_ptr, cell_ptr)); let byte_ptr = bare_ptr.cast::(); assert!(!bv_ptr::eq(bare_ptr, byte_ptr)); ``` [0]: crate::store::BitStore::Mem bitvec-1.0.1/doc/ptr/hash.md000064400000000000000000000005461046102023000137230ustar 00000000000000# Bit-Pointer Hashing This hashes a bit-pointer by the value of its components, rather than its referent bit. It does not dereference the pointer. This can be used to ensure that you are hashing the bit-pointer’s address value, though, as always, hashing an address rather than a data value is likely unwise. ## Original [`ptr::hash`](core::ptr::hash) bitvec-1.0.1/doc/ptr/null.md000064400000000000000000000004421046102023000137450ustar 00000000000000# Bit-Pointer Sentinel Value `BitPtr` does not permit actual null pointers. Instead, it uses the canonical dangling address as a sentinel for uninitialized, useless, locations. You should use `Option` if you need to track nullability. ## Original [`ptr::null`](core::ptr::null) bitvec-1.0.1/doc/ptr/null_mut.md000064400000000000000000000004521046102023000146330ustar 00000000000000# Bit-Pointer Sentinel Value `BitPtr` does not permit actual null pointers. Instead, it uses the canonical dangling address as a sentinel for uninitialized, useless, locations. You should use `Option` if you need to track nullability. ## Original [`ptr::null_mut`](core::ptr::null_mut) bitvec-1.0.1/doc/ptr/proxy.md000064400000000000000000000007521046102023000141600ustar 00000000000000# Proxy Bit-References Rust does not permit the use of custom proxy structures in place of true reference primitives, so APIs that specify references (like `IndexMut` or `DerefMut`) cannot be implemented by types that cannot manifest `&mut` references directly. Since `bitvec` cannot produce an `&mut bool` reference within a `BitSlice`, it instead uses the `BitRef` proxy type defined in this module to provide reference-like work generally, and simply does not define `IndexMut`. bitvec-1.0.1/doc/ptr/range.md000064400000000000000000000017431046102023000140740ustar 00000000000000# Bit-Pointer Ranges This module defines ports of the `Range` type family to work with `BitPtr`s. Rust’s own ranges have unstable internal details that make them awkward to use within the standard library, and essentially impossible outside it, with anything other than the numeric fundamentals. In particular, `bitvec` uses a half-open range of `BitPtr`s to represent C++-style dual-pointer memory regions (such as `BitSlice` iterators). Rust’s own slice iterators also do this, but because `*T` does not implement the [`Step`] trait, the standard library duplicates some work done by `Range` types in the slice iterators just to be able to alter the views. As such, `Range>` has the same functionality as `Range<*const _>`: almost none. As this is undesirable, this module defines equivalent types that implement the full desired behavior of a pointer range. These are primarily used as crate internals, but may also be of interest to users. [`Step`]: core::iter::Step bitvec-1.0.1/doc/ptr/read.md000064400000000000000000000011261046102023000137060ustar 00000000000000# Single-Bit Read This reads the bit out of `src` directly. ## Original [`ptr::read`](core::ptr::read) ## Safety Because this performs a dereference of memory, it inherits the original `ptr::read`’s requirements: - `src` must be valid to read. - `src` must be properly aligned. This is an invariant of the `BitPtr` type as well as of the memory access. - `src` must point to an initialized value of `T`. ## Examples ```rust use bitvec::prelude::*; use bitvec::ptr as bv_ptr; let data = 128u8; let ptr = BitPtr::<_, _, Msb0>::from_ref(&data); assert!(unsafe { bv_ptr::read(ptr) }); ``` bitvec-1.0.1/doc/ptr/read_unaligned.md000064400000000000000000000013421046102023000157340ustar 00000000000000# Single-Bit Unaligned Read This reads the bit out of `src` directly. It uses compiler intrinsics to tolerate an unaligned `T` address. However, because `BitPtr` has a type invariant that addresses are always well-aligned (and non-null), this has no benefit or purpose. ## Original [`ptr::read_unaligned`](core::ptr::read_unaligned) ## Safety Because this performs a dereference of memory, it inherits the original `ptr::read_unaligned`’s requirements: - `src` must be valid to read. - `src` must point to an initialized value of `T`. ## Examples ```rust use bitvec::prelude::*; use bitvec::ptr as bv_ptr; let data = 128u8; let ptr = BitPtr::<_, _, Msb0>::from_ref(&data); assert!(unsafe { bv_ptr::read_unaligned(ptr) }); ``` bitvec-1.0.1/doc/ptr/read_volatile.md000064400000000000000000000022451046102023000156100ustar 00000000000000# Single-Bit Volatile Read This reads the bit out of `src` directly, using a volatile I/O intrinsic to prevent compiler reördering or removal. You should not use `bitvec` to perform any volatile I/O operations. You should instead do volatile I/O work on integer values directly, or use a crate like [`voladdress`][0] to perform I/O transactions, and use `bitvec` only on stack locals that have no additional memory semantics. ## Original [`ptr::read_volatile`](core::ptr::read_volatile) ## Safety Because this performs a dereference of memory, it inherits the original `ptr::read_volatile`’s requirements: - `src` must be valid to read. - `src` must be properly aligned. This is an invariant of the `BitPtr` type as well as of the memory access. - `src` must point to an initialized value of `T`. Remember that volatile accesses are ordinary loads that the compiler cannot remove or reörder! They are *not* an atomic synchronizer. ## Examples ```rust use bitvec::prelude::*; use bitvec::ptr as bv_ptr; let data = 128u8; let ptr = BitPtr::<_, _, Msb0>::from_ref(&data); assert!(unsafe { bv_ptr::read_volatile(ptr) }); ``` [0]: https://docs.rs/voladdress/latest/voladdress bitvec-1.0.1/doc/ptr/replace.md000064400000000000000000000015611046102023000144110ustar 00000000000000# Single-Bit Replacement This writes a new value into a location, and returns the bit-value previously stored there. It is semantically and behaviorally equivalent to [`BitRef::replace`][0], except that it works on bit-pointer structures rather than proxy references. Prefer to use a proxy reference or [`BitSlice::replace`][1] instead. ## Original [`ptr::replace`](core::ptr::replace) ## Safety This has the same safety requirements as [`ptr::read`][2] and [`ptr::write`][3], as it is required to use them in its implementation. ## Examples ```rust use bitvec::prelude::*; use bitvec::ptr as bv_ptr; let mut data = 4u8; let ptr = BitPtr::<_, _, Lsb0>::from_mut(&mut data); assert!(unsafe { bv_ptr::replace(ptr.add(2), false) }); assert_eq!(data, 0); ``` [0]: crate::ptr::BitRef::replace [1]: crate::slice::BitSlice::replace [2]: crate::ptr::read [3]: crate::ptr::write bitvec-1.0.1/doc/ptr/single.md000064400000000000000000000005641046102023000142610ustar 00000000000000# Single-Bit Pointers This module defines single-bit pointers, which are required to be structures in their own right and do not have an encoded form. These pointers should generally not be used; [`BitSlice`] is more likely to be correct and have better performance. They are provided for consistency, not for hidden optimizations. [`BitSlice`]: crate::slice::BitSlice bitvec-1.0.1/doc/ptr/slice_from_raw_parts.md000064400000000000000000000004231046102023000171760ustar 00000000000000# Raw Bit-Slice Pointer Construction This is an alias for [`bitslice_from_raw_parts`][0], renamed for symbol compatibility. See its documentation instead. ## Original [`ptr::slice_from_raw_parts`](core::ptr::slice_from_raw_parts) [0]: crate::ptr::bitslice_from_raw_parts bitvec-1.0.1/doc/ptr/slice_from_raw_parts_mut.md000064400000000000000000000004431046102023000200650ustar 00000000000000# Raw Bit-Slice Pointer Construction This is an alias for [`bitslice_from_raw_parts_mut`][0], renamed for symbol compatibility. See its documentation instead. ## Original [`ptr::slice_from_raw_parts_mut`](core::ptr::slice_from_raw_parts_mut) [0]: crate::ptr::bitslice_from_raw_parts_mut bitvec-1.0.1/doc/ptr/span.md000064400000000000000000000033771046102023000137460ustar 00000000000000# Encoded Bit-Span Pointer This module implements the logic used to encode and operate on values of `*BitSlice`. It is the core operational module of the library. ## Theory Rust is slowly experimenting with allowing user-provided types to define metadata structures attached to raw-pointers and references in a structured manner. However, this is a fairly recent endeavour, much newer than `bitvec`’s work in the same area, so `bitvec` does not attempt to use it. The problem with bit-addressable memory is that it takes three more bits to select a *bit* than it does a *byte*. While AMD64 specifies (and AArch64 likely follows by fiat) that pointers are 64 bits wide but only contain 48 (or more recently, 57) bits of information, leaving the remainder available to store userspace information (as long as it is canonicalized before dereferencing), x86 and Arm32 have no such luxury space in their pointers. Since `bitvec` supports 32-bit targets, it instead opts to place the three bit-selector bits outside the pointer address. The only other space available in Rust pointers is in the length field of slice pointers. As such, `bitvec` encodes its span description information into `*BitSlice` and, by extension, `&/mut BitSlice`. The value underlying these language fundamentals is well-known (though theoretically opaque), and the standard library provides APIs that it promises will always be valid to manipulate them. Through careful use of these APIs, and following type-system rules to prevent undefined behavior, `bitvec` is able to define its span descriptions within the language fundamentals and appear fully idiomatic and compliant with existing Rust patterns. See the [`BitSpan`] type documentation for details on the encoding scheme used. [`BitSpan`]: self::BitSpan bitvec-1.0.1/doc/ptr/swap.md000064400000000000000000000014461046102023000137520ustar 00000000000000# Bit Swap This exchanges the bit-values in two locations. It is semantically and behaviorally equivalent to [`BitRef::swap`][0], except that it works on bit-pointer structures rather than proxy references. Prefer to use a proxy reference or [`BitSlice::swap`][1] instead. ## Original [`ptr::swap`](core::ptr::swap) ## Safety This has the same safety requirements as [`ptr::read`][2] and [`ptr::write`][3], as it is required to use them in its implementation. ## Examples ```rust use bitvec::prelude::*; use bitvec::ptr as bv_ptr; let mut data = 2u8; let x = BitPtr::<_, _, Lsb0>::from_mut(&mut data); let y = unsafe { x.add(1) }; unsafe { bv_ptr::swap(x, y); } assert_eq!(data, 1); ``` [0]: crate::ptr::BitRef::swap [1]: crate::slice::BitSlice::swap [2]: crate::ptr::read [3]: crate::ptr::write bitvec-1.0.1/doc/ptr/swap_nonoverlapping.md000064400000000000000000000014271046102023000170720ustar 00000000000000# Many-Bit Swap Exchanges the contents of two regions, which cannot overlap. ## Original [`ptr::swap_nonoverlapping`](core::ptr::swap_nonoverlapping) ## Safety Both `one` and `two` must be: - correct `BitPtr` instances (well-aligned, non-null) - valid to read and write for the next `count` bits Additionally, the ranges `one .. one + count` and `two .. two + count` must be entirely disjoint. They can be adjacent, but no bit can be in both. ## Examples ```rust use bitvec::prelude::*; use bitvec::ptr as bv_ptr; let mut x = [0u8; 2]; let mut y = !0u16; let x_ptr = BitPtr::<_, _, Msb0>::from_slice_mut(&mut x); let y_ptr = BitPtr::<_, _, Lsb0>::from_mut(&mut y); unsafe { bv_ptr::swap_nonoverlapping(x_ptr, y_ptr, 12); } assert_eq!(x, [!0, 0xF0]); assert_eq!(y, 0xF0_00); ``` bitvec-1.0.1/doc/ptr/write.md000064400000000000000000000016211046102023000141250ustar 00000000000000# Single-Bit Write This writes a bit into `dst` directly. ## Original [`ptr::write`](core::ptr::write) ## Safety Because this performs a dereference of memory, it inherits the original `ptr::write`’s requirements: - `dst` must be valid to write - `dst` must be properly aligned. This is an invariant of the `BitPtr` type as well as of the memory access. Additionally, `dst` must point to an initialized value of `T`. Integers cannot be initialized one bit at a time. ## Behavior This is required to perform a read/modify/write cycle on the memory location. LLVM *may or may not* emit a bit-write instruction on targets that have them in the ISA, but this is not specified in any way. ## Examples ```rust use bitvec::prelude::*; use bitvec::ptr as bv_ptr; let mut data = 0u8; let ptr = BitPtr::<_, _, Lsb0>::from_mut(&mut data); unsafe { bv_ptr::write(ptr.add(2), true); } assert_eq!(data, 4); ``` bitvec-1.0.1/doc/ptr/write_bits.md000064400000000000000000000020761046102023000151530ustar 00000000000000# Bit-wise `memset` This fills a region of memory with a bit value. It is equivalent to using `memset` with only `!0` or `0`, masked appropriately for the region edges. ## Original [`ptr::write_bytes`](core::ptr::write_bytes) ## Safety Because this performs a dereference of memory, it inherits the original `ptr::write_bytes`’ requirements: - `dst` must be valid to write - `dst` must be properly aligned. This is an invariant of the `BitPtr` type as well as of the memory access. Additionally, `dst` must point to an initialized value of `T`. Integers cannot be initialized one bit at a time. ## Behavior This function does not specify an implementation. You should assume the worst case (`O(n)` read/modify/write of each bit). The [`BitSlice::fill`][0] method will have equal or better performance. ## Examples ```rust use bitvec::prelude::*; use bitvec::ptr as bv_ptr; let mut data = 0u8; let ptr = BitPtr::<_, _, Lsb0>::from_mut(&mut data); unsafe { bv_ptr::write_bits(ptr.add(1), true, 5); } assert_eq!(data, 0b0011_1110); ``` [0]: crate::slice::BitSlice::fill bitvec-1.0.1/doc/ptr/write_bytes.md000064400000000000000000000003261046102023000153340ustar 00000000000000# Bit-wise `memset` This is an alias for [`write_bits`][0], renamed for symbol compatibility. See its documentation instead. ## Original [`ptr::write_bytes`](core::ptr::write_bytes) [0]: crate::ptr::write_bits bitvec-1.0.1/doc/ptr/write_unaligned.md000064400000000000000000000020301046102023000161460ustar 00000000000000# Single-Bit Unaligned Write This writes a bit into `dst` directly. It uses compiler intrinsics to tolerate an unaligned `T` address. However, because `BitPtr` has a type invariant that addresses are always well-aligned (and non-null), this has no benefit or purpose. ## Original [`ptr::write_unaligned](core::ptr::write_unaligned) ## Safety - `dst` must be valid to write - `dst` must be properly aligned. This is an invariant of the `BitPtr` type as well as of the memory access. Additionally, `dst` must point to an initialized value of `T`. Integers cannot be initialized one bit at a time. ## Behavior This is required to perform a read/modify/write cycle on the memory location. LLVM *may or may not* emit a bit-write instruction on targets that have them in the ISA, but this is not specified in any way. ## Examples ```rust use bitvec::prelude::*; use bitvec::ptr as bv_ptr; let mut data = 0u8; let ptr = BitPtr::<_, _, Lsb0>::from_mut(&mut data); unsafe { bv_ptr::write_unaligned(ptr.add(2), true); } assert_eq!(data, 4); ``` bitvec-1.0.1/doc/ptr/write_volatile.md000064400000000000000000000025271046102023000160320ustar 00000000000000# Single-Bit Volatile Write This writes a bit into `dst` directly, using a volatile I/O intrinsic to prevent compiler reördering or removal. You should not use `bitvec` to perform any volatile I/O operations. You should instead do volatile I/O work on integer values directly, or use a crate like [`voladdress`][0] to perform I/O transactions, and use `bitvec` only on stack locals that have no additional memory semantics. ## Original [`ptr::write_volatile](core::ptr::write_volatile) ## Safety Because this performs a dereference of memory, it inherits the original `ptr::write_volatile`’s requirements: - `dst` must be valid to write - `dst` must be properly aligned. This is an invariant of the `BitPtr` type as well as of the memory access. Additionally, `dst` must point to an initialized value of `T`. Integers cannot be initialized one bit at a time. ## Behavior This is required to perform a read/modify/write cycle on the memory location. LLVM *may or may not* emit a bit-write instruction on targets that have them in the ISA, but this is not specified in any way. ## Examples ```rust use bitvec::prelude::*; use bitvec::ptr as bv_ptr; let mut data = 0u8; let ptr = BitPtr::<_, _, Lsb0>::from_mut(&mut data); unsafe { bv_ptr::write_volatile(ptr.add(2), true); } assert_eq!(data, 4); ``` [0]: https://docs.rs/voladdress/latest/voladdress bitvec-1.0.1/doc/ptr.md000064400000000000000000000015001046102023000127670ustar 00000000000000# Raw Pointer Implementation This provides `bitvec`-internal pointer types and a mirror of the [`core::ptr`] module. It contains the following types: - [`BitPtr`] is a raw-pointer to exactly one bit. - [`BitRef`] is a proxy reference to exactly one bit. - `BitSpan` is the encoded form of the `*BitSlice` pointer and `&BitSlice` reference. It is not publicly exposed, but it serves as the foundation of `bitvec`’s ability to describe memory regions. It also provides ports of the free functions available in `core::ptr`, as well as some utilities for bridging ordinary Rust pointers into `bitvec`. You should generally not use the contents of this module; `BitSlice` provides more convenience and has stronger abilities to optimize performance. [`BitPtr`]: self::BitPtr [`BitRef`]: self::BitRef [`core::ptr`]: core::ptr bitvec-1.0.1/doc/serdes/array.md000064400000000000000000000030401046102023000145660ustar 00000000000000# Bit-Array De/Serialization The Serde model distinguishes between *sequences*, which have a dynamic length which must always be transported with the data, and *tuples*, which have a fixed length known at compile-time that does not require transport. Serde handles arrays using its tuple model, not its sequence model, which means that `BitArray` cannot use the `BitSlice` Serde implementations (which must use the sequence model in order to handle `&[u8]` and `Vec` de/serialization). Instead, `BitArray` has a standalone implementation using the tuple model so that its wrapped array can be transported (nearly) as if it were unwrapped. For consistency, `BitArray` has the same wire format that `BitSlice` does; the only distinction is that the data buffer is a tuple rather than a sequence. Additionally, Serde’s support for old versions of Rust means that it only implements its traits on arrays `[T; 0 ..= 32]`. Since `bitvec` has a much higher MSRV that includes support for the const-generic `[T; N]` family, it reïmplements Serde’s behavior on a custom `Array` type in order to ensure that all possible `BitArray` storage types are transportable. Note, however, that *because* each `[T; N]` combination is a new implementation, de/serializing `BitArray`s directly is a great way to pessimize codegen. While it would be nice if `rustc` or LLVM could collapse the implementations and restore `N` as a run-time argument rather than a compile-time constant, neither `bitvec` nor Serde attempt to promise this in any way. Use at your discretion. bitvec-1.0.1/doc/serdes/slice.md000064400000000000000000000011561046102023000145550ustar 00000000000000# Bit-Slice De/Serialization Bit-slice references and containers serialize as sequences with additional metadata. Serde only provides a deserializer for `&[u8]`; wider integers and interior-mutability wrappers are not able to view a transport buffer without potentially modifying it, and the buffer is not modifiable while being used for deserialization. As such, only `&BitSlice` has a no-copy deserialization implementation. If you need other storage types, you will need to deserialize into a `BitBox` or `BitVec`. If you do not have an allocator, you must *serialize from* and deserialize into a `BitArray`. bitvec-1.0.1/doc/serdes/utils.md000064400000000000000000000022141046102023000146120ustar 00000000000000# De/Serialization Assistants This module contains types and implementations that assist in the de/serialization of the crate’s primary data structures. ## `BitIdx` The `BitIdx` implementation serializes both the index value and also the bit-width of `T::Mem`, so that the deserializer can ensure that it only loads from a matching data buffer. ## `Array` Serde only provides implementations for `[T; 0 ..= 32]`, because it must support much older Rust versions (at time of writing, 1.15+) that do not have const-generics. As `bitvec` has an MSRV of 1.56; it *does* have const-generics. This type reïmplements Serde’s array behavior for all arrays, so that `bitvec` can transport any `BitArray` rather than only small bit-arrays. ## `Domain` `BitSlice` serializes its data buffer by using `Domain` to produce a sequence of elements. While the length is always known, and is additionally carried in the crate metadata ahead of the data buffer, `Domain` uses Serde’s sequence model in order to allow the major implementations to use the provided slice or vector deserializers, rather than rebuilding even more logic from scratch. bitvec-1.0.1/doc/serdes.md000064400000000000000000000140631046102023000134570ustar 00000000000000# Support for `serde` `bitvec` structures are able to de/serialize their contents using `serde`. Because `bitvec` is itself designed to be a transport buffer and have memory-efficient storage properties, the implementations are somewhat strange and not necessarily suitable for transport across heterogenous hosts. `bitvec` always serializes its underlying memory storage as a sequence of raw memory. It also includes the necessary metadata to prevent deserialization into an incorrect type. ## Serialization All data types serialize through `BitSlice`. While in version 0, `BitArray` had its own serialization logic; this is no longer the case. `BitSlice` serializes the bit-width of `T::Mem` and the `any::type_name` of `O`. This may cause deserialization failures in the future if `any::type_name` changes its behavior, but as it is run while compiling `bitvec` itself, clients that rename `bitvec` when bringing it into their namespace should not be affected. Note that because `LocalBits` is a reëxport rather than a type in its own right, it always serializes as the real type to which it forwards. This prevents accidental mismatch when transporting between machines with different destinations for this alias. The next items serialized are the index of the starting bit within the starting element, and the total number of bits included in the bit-slice. After these, the data buffer is serialized directly. Each element in the data buffer is loaded, has any dead bits cleared to `0`, and is then serialized directly into the collector. In particular, no byte-reördering for transport is performed here, so integers wider than a byte must use a de/serialization format that handles this if, for example, byte-order endian transforms are required. ## Deserialization Serde only permits no-copy slice deserialization on `&'a [u8]` slices, so `bitvec` in turn can only deserialize into `&'a BitSlice` bit-slices. It can deserialize into `BitArray`s of any type, relying on the serialization layer to reverse any byte-order transforms. `&BitSlice` will only deserialize if the transport format contains the bytes directly in it. If you do do not have an allocator, you should always transport `BitArray`. If you do have an allocator, and are serializing `BitBox` or `BitVec`, and expect to deserialize into a `BitArray`, then you will need to use `.force_align()` and ensure that you fully occupy the buffer being transported. `BitArray` will fail to deserialize if the data stream does not have a head-bit index of `0` and a length that exactly matches its own. This limitation is a consequence of the implementation, and likely will not be relaxed. `BitBox` and `BitVec`, however, are able to deserialize any bit-sequence without issue. ## Warnings `usize` *does* de/serialize! However, because it does not have a fixed width, `bitvec` always serializes it as the local fixed-width equivalent, and places the word width into the serialization stream. This will prevent roundtripping a `BitArray<[usize; N]>` between hosts with different `usize` widths, even though the types in the source code line up. This behavior was not present in version 0, and users were able to write programs that incorrectly handled de/serialization when used on heterogenous systems. In addition, remember that `bitvec` serializes its data buffer *directly* as 2’s-complement integers. You must ensure that your transport layer can handle them correctly. As an example, JSON is not required to transport 64-bit integers with perfect fidelity. `bitvec` has no way to detect inadequacy in the transport layer, and will not prevent you from using a serialization format that damages or alters the bit-stream you send through it. ## Transport Format All `bitvec` data structures produce the same basic format: a structure (named `BitSeq` for `BitSlice`, `BitBox`, and `BitVec`, or `BitArr` for `BitArray`) with four fields: 1. `order` is a string naming the `O: BitOrder` parameter. Because it uses [`any::type_name`][0], its value cannot be *guaranteed* to be stable. You cannot assume that transport buffers are compatible across versions of the compiler used to create applications exchanging them. 1. `head` is a `BitIdx` structure containing two fields: 1. `width` is a single byte containing `8`, `16`, `32`, or `64`, describing the bit-width of each element in the data buffer. `bitvec` structures will refuse to deserialize if the serialized bit-width does not match their `T::Mem` type. 1. `index` is a single byte containing the head-bit that begins the live `BitSlice` region. `BitArray` will refuse to deserialize if this is not zero. 1. `bits` is the number of live bits in the region, as a `u64`. `BitArray` fails to deserialize if it does not match [`mem::bits_of::()`][1]. 1. `data` is the actual data buffer containing the bits being transported. For `BitSeq` serialization, it is a sequence; for `BitArr`, it is a tuple. This may affect the transport representation, and so the two are not guaranteed to be interchangeable over all transports. As known examples, JSON does not have a fixed-size array type, so the contents of all `bitvec` structures have identical rendering, while the [`bincode`] crate does distinguish between run-length-encoded slices and non-length-encoded arrays. ## Implementation Details `bitvec` supports deserializing from both of Serde’s models for aggregate structures: dictionaries and sequences. It always serializes as a dictionary, but if your serialization layer does not want to include field names, you may emit only the values *in the temporal order that they are received* and `bitvec` will correctly deserialize from them. `BitSlice` (and `BitBox` and `BitVec`, which forward to it) transports its data buffer using Serde’s *sequence* model. `BitArray` uses Serde’s *tuple* model instead. These models might not be interchangeable in certain transport formats! You should always deserialize into the same container type that produced a serialized stream. [0]: core::any::type_name [1]: crate::mem::bits_of [`bincode`]: https://docs.rs/bincode/latest/bincode bitvec-1.0.1/doc/slice/BitSlice.md000064400000000000000000000364221046102023000147720ustar 00000000000000# Bit-Addressable Memory A slice of individual bits, anywhere in memory. `BitSlice` is an unsized region type; you interact with it through `&BitSlice` and `&mut BitSlice` references, which work exactly like all other Rust references. As with the standard slice’s relationship to arrays and vectors, this is `bitvec`’s primary working type, but you will probably hold it through one of the provided [`BitArray`], [`BitBox`], or [`BitVec`] containers. `BitSlice` is conceptually a `[bool]` slice, and provides a nearly complete mirror of `[bool]`’s API. Every bit-vector crate can give you an opaque type that hides shift/mask calculations from you. `BitSlice` does far more than this: it offers you the full Rust guarantees about reference behavior, including lifetime tracking, mutability and aliasing awareness, and explicit memory control, *as well as* the full set of tools and APIs available to the standard `[bool]` slice type. `BitSlice` can arbitrarily split and subslice, just like `[bool]`. You can write a linear consuming function and keep the patterns you already know. For example, to trim all the bits off either edge that match a condition, you could write ```rust use bitvec::prelude::*; fn trim( bits: &BitSlice, to_trim: bool, ) -> &BitSlice { let stop = |b: bool| b != to_trim; let front = bits.iter() .by_vals() .position(stop) .unwrap_or(0); let back = bits.iter() .by_vals() .rposition(stop) .map_or(0, |p| p + 1); &bits[front .. back] } # assert_eq!(trim(bits![0, 0, 1, 1, 0, 1, 0], false), bits![1, 1, 0, 1]); ``` to get behavior something like `trim(&BitSlice[0, 0, 1, 1, 0, 1, 0], false) == &BitSlice[1, 1, 0, 1]`. ## Documentation All APIs that mirror something in the standard library will have an `Original` section linking to the corresponding item. All APIs that have a different signature or behavior than the original will have an `API Differences` section explaining what has changed, and how to adapt your existing code to the change. These sections look like this: ## Original [`[bool]`](https://doc.rust-lang.org/stable/std/primitive.slice.html) ## API Differences The slice type `[bool]` has no type parameters. `BitSlice` has two: one for the integer type used as backing storage, and one for the order of bits within that integer type. `&BitSlice` is capable of producing `&bool` references to read bits out of its memory, but is not capable of producing `&mut bool` references to write bits *into* its memory. Any `[bool]` API that would produce a `&mut bool` will instead produce a [`BitRef`] proxy reference. ## Behavior `BitSlice` is a wrapper over `[T]`. It describes a region of memory, and must be handled indirectly. This is most commonly done through the reference types `&BitSlice` and `&mut BitSlice`, which borrow memory owned by some other value in the program. These buffers can be directly owned by the sibling types [`BitBox`], which behaves like [`Box<[T]>`](alloc::boxed::Box), and [`BitVec`], which behaves like [`Vec`]. It cannot be used as the type parameter to a pointer type such as `Box`, `Rc`, `Arc`, or any other indirection. The `BitSlice` region provides access to each individual bit in the region, as if each bit had a memory address that you could use to dereference it. It packs each logical bit into exactly one bit of storage memory, just like [`std::bitset`] and [`std::vector`] in C++. ## Type Parameters `BitSlice` has two type parameters which propagate through nearly every public API in the crate. These are very important to its operation, and your choice of type arguments informs nearly every part of this library’s behavior. ### `T: BitStore` [`BitStore`] is the simpler of the two parameters. It refers to the integer type used to hold bits. It must be one of the Rust unsigned integer fundamentals: `u8`, `u16`, `u32`, `usize`, and on 64-bit systems only, `u64`. In addition, it can also be an alias-safe wrapper over them (see the [`access`] module) in order to permit bit-slices to share underlying memory without interfering with each other. `BitSlice` references can only be constructed over the integers, not over their aliasing wrappers. `BitSlice` will only use aliasing types in its `T` slots when you invoke APIs that produce them, such as [`.split_at_mut()`]. The default type argument is `usize`. The argument you choose is used as the basis of a `[T]` slice, over which the `BitSlice` view is produced. `BitSlice` is subject to all of the rules about alignment that `[T]` is. If you are working with in-memory representation formats, chances are that you already have a `T` type with which you’ve been working, and should use it here. If you are only using this crate to discard the seven wasted bits per `bool` in a collection of `bool`s, and are not too concerned about the in-memory representation, then you should use the default type argument of `usize`. This is because most processors work best when moving an entire `usize` between memory and the processor itself, and using a smaller type may cause it to slow down. Additionally, processor instructions are typically optimized for the whole register, and the processor might need to do additional clearing work for narrower types. ### `O: BitOrder` [`BitOrder`] is the more complex parameter. It has a default argument which, like `usize`, is a good baseline choice when you do not explicitly need to control the representation of bits in memory. This parameter determines how `bitvec` indexes the bits within a single `T` memory element. Computers all agree that in a slice of `T` elements, the element with the lower index has a lower memory address than the element with the higher index. But the individual bits within an element do not have addresses, and so there is no uniform standard of which bit is the zeroth, which is the first, which is the penultimate, and which is the last. To make matters even more confusing, there are two predominant ideas of in-element ordering that often *correlate* with the in-element *byte* ordering of integer types, but are in fact wholly unrelated! `bitvec` provides these two main orderings as types for you, and if you need a different one, it also provides the tools you need to write your own. #### Least Significant Bit Comes First This ordering, named the [`Lsb0`] type, indexes bits within an element by placing the `0` index at the least significant bit (numeric value `1`) and the final index at the most significant bit (numeric value [`T::MIN`][minval] for signed integers on most machines). For example, this is the ordering used by most C compilers to lay out bit-field struct members on little-endian **byte**-ordered machines. #### Most Significant Bit Comes First This ordering, named the [`Msb0`] type, indexes bits within an element by placing the `0` index at the most significant bit (numeric value [`T::MIN`][minval] for most signed integers) and the final index at the least significant bit (numeric value `1`). For example, this is the ordering used by the [TCP wire format][tcp], and by most C compilers to lay out bit-field struct members on big-endian **byte**-ordered machines. #### Default Ordering The default ordering is [`Lsb0`], as it typically produces shorter object code than [`Msb0`] does. If you are implementing a collection, then `Lsb0` will likely give you better performance; if you are implementing a buffer protocol, then your choice of ordering is dictated by the protocol definition. ## Safety `BitSlice` is designed to never introduce new memory unsafety that you did not provide yourself, either before or during the use of this crate. However, safety bugs have been identified before, and you are welcome to submit any discovered flaws as a defect report. The `&BitSlice` reference type uses a private encoding scheme to hold all of the information needed in its stack value. This encoding is **not** part of the public API of the library, and is not binary-compatible with `&[T]`. Furthermore, in order to satisfy Rust’s requirements about alias conditions, `BitSlice` performs type transformations on the `T` parameter to ensure that it never creates the potential for undefined behavior or data races. You must never attempt to type-cast a reference to `BitSlice` in any way. You must not use [`mem::transmute`] with `BitSlice` anywhere in its type arguments. You must not use `as`-casting to convert between `*BitSlice` and any other type. You must not attempt to modify the binary representation of a `&BitSlice` reference value. These actions will all lead to runtime memory unsafety, are (hopefully) likely to induce a program crash, and may possibly cause undefined behavior at compile-time. Everything in the `BitSlice` public API, even the `unsafe` parts, are guaranteed to have no more unsafety than their equivalent items in the standard library. All `unsafe` APIs will have documentation explicitly detailing what the API requires you to uphold in order for it to function safely and correctly. All safe APIs will do so themselves. ## Performance Like the standard library’s `[T]` slice, `BitSlice` is designed to be very easy to use safely, while supporting `unsafe` usage when necessary. Rust has a powerful optimizing engine, and `BitSlice` will frequently be compiled to have zero runtime cost. Where it is slower, it will not be significantly slower than a manual replacement. As the machine instructions operate on registers rather than bits, your choice of [`T: BitStore`] type parameter can influence your bits-slice’s performance. Using larger register types means that bit-slices can gallop over completely-used interior elements faster, while narrower register types permit more graceful handling of subslicing and aliased splits. ## Construction `BitSlice` views of memory can be constructed over borrowed data in a number of ways. As this is a reference-only type, it can only ever be built by borrowing an existing memory buffer and taking temporary control of your program’s view of the region. ### Macro Constructor `BitSlice` buffers can be constructed at compile-time through the [`bits!`] macro. This macro accepts a superset of the [`vec!`] arguments, and creates an appropriate buffer in the local scope. The macro expands to a borrowed [`BitArray`] temporary, which will live for the duration of the bound name. ```rust use bitvec::prelude::*; let immut = bits![u8, Lsb0; 0, 1, 0, 0, 1, 0, 0, 1]; let mutable: &mut BitSlice<_, _> = bits![mut u8, Msb0; 0; 8]; assert_ne!(immut, mutable); mutable.clone_from_bitslice(immut); assert_eq!(immut, mutable); ``` ### Borrowing Constructors You may borrow existing elements or slices with the following functions: - [`from_element`] and [`from_element_mut`], - [`from_slice`] and [`from_slice_mut`], - [`try_from_slice`] and [`try_from_slice_mut`] These take references to existing memory and construct `BitSlice` references from them. These are the most basic ways to borrow memory and view it as bits; however, you should prefer the [`BitView`] trait methods instead. ```rust use bitvec::prelude::*; let data = [0u16; 3]; let local_borrow = BitSlice::<_, Lsb0>::from_slice(&data); let mut data = [0u8; 5]; let local_mut = BitSlice::<_, Lsb0>::from_slice_mut(&mut data); ``` ### Trait Method Constructors The [`BitView`] trait implements [`.view_bits::()`] and [`.view_bits_mut::()`] methods on elements, arrays, and slices. This trait, imported in the crate prelude, is *probably* the easiest way for you to borrow memory as bits. ```rust use bitvec::prelude::*; let data = [0u32; 5]; let trait_view = data.view_bits::(); let mut data = 0usize; let trait_mut = data.view_bits_mut::(); ``` ### Owned Bit Slices If you wish to take ownership of a memory region and enforce that it is always viewed as a `BitSlice` by default, you can use one of the [`BitArray`], [`BitBox`], or [`BitVec`] types, rather than pairing ordinary buffer types with the borrowing constructors. ```rust use bitvec::prelude::*; let slice = bits![0; 27]; let array = bitarr![u8, LocalBits; 0; 10]; # #[cfg(feature = "alloc")] fn allocates() { let boxed = bitbox![0; 10]; let vec = bitvec![0; 20]; # } #[cfg(feature = "alloc")] allocates(); // arrays always round up assert_eq!(array.as_bitslice(), slice[.. 16]); # #[cfg(feature = "alloc")] fn allocates2() { # let slice = bits![0; 27]; # let boxed = bitbox![0; 10]; # let vec = bitvec![0; 20]; assert_eq!(boxed.as_bitslice(), slice[.. 10]); assert_eq!(vec.as_bitslice(), slice[.. 20]); # } #[cfg(feature = "alloc")] allocates2(); ``` ## Usage `BitSlice` implements the full standard-library `[bool]` API. The documentation for these API surfaces is intentionally sparse, and forwards to the standard library rather than try to replicate it. `BitSlice` also has a great deal of novel API surfaces. These are broken into separate `impl` blocks below. A short summary: - Since there is no `BitSlice` literal, the constructor functions `::empty()`, `::from_element()`, `::from_slice()`, and `::try_from_slice()`, and their `_mut` counterparts, create bit-slices as needed. - Since `bits[idx] = value` does not exist, you can use `.set()` or `.replace()` (as well as their `_unchecked` and `_aliased` counterparts) to write into a bit-slice. - Raw memory can be inspected with `.domain()` and `.domain_mut()`, and a bit-slice can be split on aliasing lines with `.bit_domain()` and `.bit_domain_mut()`. - The population can be queried for which indices have `0` or `1` bits by iterating across all such indices, counting them, or counting leading or trailing blocks. Additionally, `.any()`, `.all()`, `.not_any()`, `.not_all()`, and `.some()` test whether bit-slices satisfy aggregate Boolean qualities. - Buffer contents can be relocated internally by shifting or rotating to the left or right. ## Trait Implementations `BitSlice` adds trait implementations that `[bool]` and `[T]` do not necessarily have, including numeric formatting and Boolean arithmetic operators. Additionally, the [`BitField`] trait allows bit-slices to act as a buffer for wide-value storage. [minval]: https://doc.rust-lang.org/stable/std/primitive.usize.html#associatedconstant.MIN [tcp]: https://en.wikipedia.org/wiki/Transmission_Control_Protocol#TCP_segment_structure [`BitArray`]: crate::array::BitArray [`BitBox`]: crate::boxed::BitBox [`BitField`]: crate::field::BitField [`BitRef`]: crate::ptr::BitRef [`BitOrder`]: crate::order::BitOrder [`BitStore`]: crate::store::BitStore [`BitVec`]: crate::vec::BitVec [`BitView`]: crate::view::BitView [`Cell`]: core::cell::Cell [`Lsb0`]: crate::order::Lsb0 [`Msb0`]: crate::order::Msb0 [`T: BitStore`]: crate::store::BitStore [`Vec`]: alloc::vec::Vec [`access`]: crate::access [`bits!`]: macro@crate::bits [`bitvec::prelude::LocalBits`]: crate::order::LocalBits [`from_element`]: Self::from_element [`from_element_mut`]: Self::from_element_mut [`from_slice`]: Self::from_slice [`from_slice_mut`]: Self::from_slice_mut [`mem::transmute`]: core::mem::transmute [`std::bitset`]: https://en.cppreference.com/w/cpp/utility/bitset [`std::vector`]: https://en.cppreference.com/w/cpp/container/vector_bool [`try_from_slice`]: Self::try_from_slice [`try_from_slice_mut`]: Self::try_from_slice_mut [`vec!`]: macro@alloc::vec [`.split_at_mut()`]: Self::split_at_mut [`.view_bits::()`]: crate::view::BitView::view_bits [`.view_bits_mut::()`]: crate::view::BitView::view_bits_mut bitvec-1.0.1/doc/slice/BitSliceIndex.md000064400000000000000000000024321046102023000157540ustar 00000000000000# Bit-Slice Indexing This trait, like its mirror in `core`, unifies various types that can be used to index within a bit-slice. Individual `usize` indices can refer to exactly one bit within a bit-slice, and `R: RangeBounds` ranges can refer to subslices of any length within a bit-slice. The three operations (get, get unchecked, and index) reflect the three theories of lookup within a collection: fallible, pre-checked, and crashing on failure. You will likely not use this trait directly; its methods all have corresponding methods on [`BitSlice`] that delegate to particular implementations of it. ## Original [`slice::SliceIndex`](core::slice::SliceIndex) ## API Differences The [`SliceIndex::Output`] type is not usable here, because `bitvec` cannot manifest a `&mut bool` reference. Work to unify referential values in the trait system is ongoing, and in the future this functionality *may* be approximated. Instead, this uses two output types, [`Immut`] and [`Mut`], that are the referential structures produced by indexing immutably or mutably, respectively. This allows the range implementations to produce `&/mut BitSlice` as expected, while `usize` produces the proxy structure. [`Immut`]: Self::Immut [`Mut`]: Self::Mut [`SliceIndex::Output`]: core::slice::SliceIndex::Output bitvec-1.0.1/doc/slice/api.md000064400000000000000000000013571046102023000140440ustar 00000000000000# Port of the `[bool]` Inherent API This module provides a port of the standard-library’s slice primitive, and associated special-purpose items. It is intended to contain the contents of every `impl [T]` block in the standard library (with a few exceptions due to impossibility or uselessness). The sibling modules `iter`, `ops`, and `traits` contain slice APIs that relate specifically to iteration, the sigil operators, or general-purpose traits. Documentation for each ported API strives to be *inspired by*, but not a transliteration of, the documentation in the standard library. `bitvec` generally assumes that you are already familiar with the standard library, and links each ported item to the original in the event that you are not. bitvec-1.0.1/doc/slice/bitop_assign.md000064400000000000000000000035461046102023000157560ustar 00000000000000# Boolean Arithmetic This merges another bit-slice into `self` with a Boolean arithmetic operation. If the other bit-slice is shorter than `self`, it is zero-extended. For `BitAnd`, this clears all excess bits of `self` to `0`; for `BitOr` and `BitXor`, it leaves them untouched ## Behavior The Boolean operation proceeds across each bit-slice in iteration order. This is `3O(n)` in the length of the shorter of `self` and `rhs`. However, it can be accelerated if `rhs` has the same type parameters as `self`, and both are using one of the orderings provided by `bitvec`. In this case, the implementation specializes to use `BitField` batch operations to operate on the slices one word at a time, rather than one bit. Acceleration is not currently provided for custom bit-orderings that use the same storage type. ## Pre-`1.0` Behavior In the `0.` development series, Boolean arithmetic was implemented against all `I: Iterator`. This allowed code such as `bits |= [false, true];`, but forbad acceleration in the most common use case (combining two bit-slices) because `BitSlice` is not such an iterator. Usage surveys indicate that it is better for the arithmetic operators to operate on bit-slices, and to allow the possibility of specialized acceleration, rather than to allow folding against any iterator of `bool`s. If pre-`1.0` code relies on this behavior specifically, and has non-`BitSlice` arguments to the Boolean sigils, then they will need to be replaced with the equivalent loop. ## Examples ```rust use bitvec::prelude::*; let a = bits![mut 0, 0, 1, 1]; let b = bits![ 0, 1, 0, 1]; *a ^= b; assert_eq!(a, bits![0, 1, 1, 0]); let c = bits![mut 0, 0, 1, 1]; let d = [false, true, false, true]; // no longer allowed // c &= d.into_iter().by_vals(); for (mut c, d) in c.iter_mut().zip(d.into_iter()) { *c ^= d; } assert_eq!(c, bits![0, 1, 1, 0]); ``` bitvec-1.0.1/doc/slice/format.md000064400000000000000000000034761046102023000145670ustar 00000000000000# Bit-Slice Rendering This implementation prints the contents of a `&BitSlice` in one of binary, octal, or hexadecimal. It is important to note that this does *not* render the raw underlying memory! They render the semantically-ordered contents of the bit-slice as numerals. This distinction matters if you use type parameters that differ from those presumed by your debugger (which is usually ``). The output separates the `T` elements as individual list items, and renders each element as a base- 2, 8, or 16 numeric string. When walking an element, the bits traversed by the bit-slice are considered to be stored in most-significant-bit-first ordering. This means that index `[0]` is the high bit of the left-most digit, and index `[n]` is the low bit of the right-most digit, in a given printed word. In order to render according to expectations of the Arabic numeral system, an element being transcribed is chunked into digits from the least-significant end of its rendered form. This is most noticeable in octal, which will always have a smaller ceiling on the left-most digit in a printed word, while the right-most digit in that word is able to use the full `0 ..= 7` numeral range. ## Examples ```rust # #[cfg(feature = "std")] { use bitvec::prelude::*; let data = [ 0b000000_10u8, // digits print LTR 0b10_001_101, // significance is computed RTL 0b01_000000, ]; let bits = &data.view_bits::()[6 .. 18]; assert_eq!(format!("{:b}", bits), "[10, 10001101, 01]"); assert_eq!(format!("{:o}", bits), "[2, 215, 1]"); assert_eq!(format!("{:X}", bits), "[2, 8D, 1]"); # } ``` The `{:#}` format modifier causes the standard `0b`, `0o`, or `0x` prefix to be applied to each printed word. The other format specifiers are not interpreted by this implementation, and apply to the entire rendered text, not to individual words. bitvec-1.0.1/doc/slice/from_raw_parts.md000064400000000000000000000041471046102023000163200ustar 00000000000000# Raw Bit-Slice Construction This produces an `&BitSlice` reference handle from a `BitPtr` bit-pointer and a length. ## Parameters 1. `data`: a bit-pointer to the starting bit of the produced bit-slice. This should generally have been produced by `BitSlice::as_ptr`, but you are able to construct these pointers directly if you wish. 1. `len`: the number of bits, beginning at `data`, that the produced bit-slice includes. This value cannot depart an allocation area, or exceed `BitSlice`’s encoding limitations. ## Returns This returns a `Result`, because it can detect and gracefully fail if `len` is too large, or if `data` is ill-formed. This fails if it has an error while encoding the `&BitSlice`, and succeeds if it is able to produce a correctly encoded value. Note that this is not able to detect semantic violations of the memory model. You are responsible for upholding memory safety. ## Original [`slice::from_raw_parts`](core::slice::from_raw_parts) ## API Differences This takes a [`BitPtr`] instead of a hypothetical `*const Bit`, because `bitvec` is not able to express raw Rust pointers to individual bits. Additionally, it returns a `Result` rather than a direct bit-slice, because the given `len` argument may be invalid to encode into a `&BitSlice` reference. ## Safety This has the same memory safety requirements as the standard-library function: - `data` must be valid for reads and writes of at least `len` bits, - The bits that the produced bit-slice refers to must be wholly unreachable by any other part of the program for the duration of the lifetime `'a`, and additionally imposes some of its own: - `len` cannot exceed [`BitSlice::MAX_BITS`]. ## Examples ```rust use bitvec::{ prelude::*, index::BitIdx, ptr::Const, slice as bv_slice, }; let elem = 6u16; let addr = (&elem).into(); let head = BitIdx::new(1).unwrap(); let data: BitPtr = BitPtr::new(addr, head).unwrap(); let bits = unsafe { bv_slice::from_raw_parts(data, 3) }; assert_eq!(bits.unwrap(), bits![1, 1, 0]); ``` [`BitSlice::MAX_BITS`]: crate::slice::BitSlice::MAX_BITS bitvec-1.0.1/doc/slice/from_raw_parts_mut.md000064400000000000000000000042151046102023000172010ustar 00000000000000# Raw Bit-Slice Construction This produces an `&mut BitSlice` reference handle from a `BitPtr` bit-pointer and a length. ## Parameters 1. `data`: a bit-pointer to the starting bit of the produced bit-slice. This should generally have been produced by `BitSlice::as_mut_ptr`, but you are able to construct these pointers directly if you wish. 1. `len`: the number of bits, beginning at `data`, that the produced bit-slice includes. This value cannot depart an allocation area, or exceed `BitSlice`’s encoding limitations. ## Returns This returns a `Result`, because it can detect and gracefully fail if `len` is too large, or if `data` is ill-formed. This fails if it has an error while encoding the `&mut BitSlice`, and succeeds if it is able to produce a correctly encoded value. Note that this is not able to detect semantic violations of the memory model. You are responsible for upholding memory safety. ## Original [`slice::from_raw_parts_mut`](core::slice::from_raw_parts_mut) ## API Differences This takes a [`BitPtr`] instead of a hypothetical `*mut Bit`, because `bitvec` is not able to express raw Rust pointers to individual bits. Additionally, it returns a `Result` rather than a direct bit-slice, because the given `len` argument may be invalid to encode into a `&mut BitSlice` reference. ## Safety This has the same memory safety requirements as the standard-library function: - `data` must be valid for reads and writes of at least `len` bits, - The bits that the produced bit-slice refers to must be wholly unreachable by any other part of the program for the duration of the lifetime `'a`, and additionally imposes some of its own: - `len` cannot exceed [`BitSlice::MAX_BITS`]. ## Examples ```rust use bitvec::{ prelude::*, index::BitIdx, ptr::Mut, slice as bv_slice, }; let mut elem = 0u16; let addr = (&mut elem).into(); let head = BitIdx::new(1).unwrap(); let data: BitPtr = BitPtr::new(addr, head).unwrap(); let bits = unsafe { bv_slice::from_raw_parts_mut(data, 3) }; bits.unwrap()[1 ..].fill(true); assert_eq!(elem, 12); ``` [`BitSlice::MAX_BITS`]: crate::slice::BitSlice::MAX_BITS bitvec-1.0.1/doc/slice/from_raw_parts_unchecked.md000064400000000000000000000017531046102023000203310ustar 00000000000000# Raw Bit-Slice Construction This is equivalent to [`slice::from_raw_parts()`], except that it does not check any of the encoding requirements. ## Safety Callers must both uphold the safety requirements of that function, as well as ensure that the arguments would not cause it to fail gracefully. Arguments that would cause `from_raw_parts` to return `Err` instead produce a bit-slice handle whose value is undefined. ## Parameters 1. `ptr`: A bit-pointer to a `T` memory element. The pointer’s data address must be well-aligned, the bit-index must be valid for `T`, the target region must be initialized for `len` bits. 1. `len`: A count of live bits beginning at `ptr`. It must not exceed [`MAX_BITS`]. ## Returns An exclusive `BitSlice` reference over the described region. If either of the parameters are invalid, then the value of the reference is library-level undefined. [`MAX_BITS`]: crate::slice::BitSlice::MAX_BITS [`slice::from_raw_parts()`]: crate::slice::from_raw_parts bitvec-1.0.1/doc/slice/from_raw_parts_unchecked_mut.md000064400000000000000000000022301046102023000212050ustar 00000000000000# Raw Bit-Slice Construction This is equivalent to [`slice::from_raw_parts_mut()`], except that it does not check any of the encoding requirements. ## Safety Callers must both uphold the safety requirements of that function, as well as ensure that the arguments would not cause it to fail gracefully. Arguments that would cause `from_raw_parts_mut` to return `Err` instead produce a bit-slice handle whose value is undefined. ## Parameters 1. `ptr`: A bit-pointer to a `T` memory element. The pointer’s data address must be well-aligned, the bit-index must be valid for `T`, the target region must be initialized for `len` bits. 1. `len`: A count of live bits beginning at `ptr`. It must not exceed [`MAX_BITS`]. ## Returns An exclusive `BitSlice` reference over the described region. If either of the parameters are invalid, then the value of the reference is library-level undefined. If any other reference, `BitSlice` or not, targets any of the bits that this reference governs while it is alive, then behavior is language-level undefined. [`MAX_BITS`]: crate::slice::BitSlice::MAX_BITS [`slice::from_raw_parts_mut()`]: crate::slice::from_raw_parts_mut bitvec-1.0.1/doc/slice/iter/Chunks.md000064400000000000000000000014541046102023000154670ustar 00000000000000# Shared Bit-Slice Chunking This iterator yields successive non-overlapping chunks of a bit-slice. Chunking advances one subslice at a time, starting at the beginning of the bit-slice. If the original bit-slice’s length is not evenly divided by the chunk width, then the final chunk will be the remainder, and will be shorter than requested. It is created by the [`BitSlice::chunks`] method. ## Original [`slice::Chunks`](core::slice::Chunks) ## Examples ```rust use bitvec::prelude::*; let bits = bits![0, 0, 0, 1, 1, 1, 0, 1]; let mut chunks = bits.chunks(3); assert_eq!(chunks.next().unwrap(), bits![0; 3]); assert_eq!(chunks.next().unwrap(), bits![1; 3]); assert_eq!(chunks.next().unwrap(), bits![0, 1]); assert!(chunks.next().is_none()); ``` [`BitSlice::chunks`]: crate::slice::BitSlice::chunks bitvec-1.0.1/doc/slice/iter/ChunksExact.md000064400000000000000000000016201046102023000164470ustar 00000000000000# Shared Bit-Slice Exact Chunking This iterator yields successive non-overlapping chunks of a bit-slice. Chunking advances one sub-slice at a time, starting at the beginning of the bit-slice. If the original bit-slice’s length is not evenly divided by the chunk width, then the leftover segment at the back is not iterated, but can be accessed with the [`.remainder()`] method. It is created by the [`BitSlice::chunks_exact`] method. ## Original [`slice::ChunksExact`](core::slice::ChunksExact) ## Examples ```rust use bitvec::prelude::*; let bits = bits![0, 0, 0, 1, 1, 1, 0, 1]; let mut chunks = bits.chunks_exact(3); assert_eq!(chunks.next().unwrap(), bits![0; 3]); assert_eq!(chunks.next().unwrap(), bits![1; 3]); assert!(chunks.next().is_none()); assert_eq!(chunks.remainder(), bits![0, 1]); ``` [`BitSlice::chunks_exact`]: crate::slice::BitSlice::chunks_exact [`.remainder()`]: Self::remainder bitvec-1.0.1/doc/slice/iter/ChunksExactMut.md000064400000000000000000000023061046102023000171370ustar 00000000000000# Exclusive Bit-Slice Exact Chunking This iterator yields successive non-overlapping mutable chunks of a bit-slice. Chunking advances one sub-slice at a time, starting at the beginning of the bit-slice. If the original bit-slice’s length is not evenly divided by the chunk width, then the leftover segment at the back is not iterated, but can be accessed with the [`.into_remainder()`] or [`.take_remainder()`] methods. It is created by the [`BitSlice::chunks_exact_mut`] method. ## Original [`slice::ChunksExactMut`](core::slice::ChunksExactMut) ## API Differences This iterator marks all yielded bit-slices as `::Alias`ed. ## Examples ```rust use bitvec::prelude::*; let bits = bits![mut 0, 0, 0, 1, 1, 1, 0, 1]; let mut chunks = unsafe { bits.chunks_exact_mut(3).remove_alias() }; chunks.next().unwrap().fill(true); chunks.next().unwrap().fill(false); assert!(chunks.next().is_none()); chunks.take_remainder().copy_from_bitslice(bits![1, 0]); assert!(chunks.take_remainder().is_empty()); assert_eq!(bits, bits![1, 1, 1, 0, 0, 0, 1, 0]); ``` [`BitSlice::chunks_exact_mut`]: crate::slice::BitSlice::chunks_exact_mut [`.into_remainder()`]: Self::into_remainder [`.take_remainder()`]: Self::take_remainder bitvec-1.0.1/doc/slice/iter/ChunksMut.md000064400000000000000000000017331046102023000161550ustar 00000000000000# Exclusive Bit-Slice Chunking This iterator yields successive non-overlapping mutable chunks of a bit-slice. Chunking advances one subslice at a time, starting at the beginning of the bit-slice. If the original bit-slice’s length is not evenly divided by the chunk width, then the final chunk will be the remainder, and will be shorter than requested. It is created by the [`BitSlice::chunks_mut`] method. ## Original [`slice::ChunksMut`](core::slice::ChunksMut) ## API Differences This iterator marks all yielded bit-slices as `::Alias`ed. ## Examples ```rust use bitvec::prelude::*; let bits = bits![mut 0, 0, 0, 1, 1, 1, 0, 1]; let mut chunks = unsafe { bits.chunks_mut(3).remove_alias() }; chunks.next().unwrap().fill(true); chunks.next().unwrap().fill(false); chunks.next().unwrap().copy_from_bitslice(bits![1, 0]); assert!(chunks.next().is_none()); assert_eq!(bits, bits![1, 1, 1, 0, 0, 0, 1, 0]); ``` [`BitSlice::chunks_mut`]: crate::slice::BitSlice::chunks_mut bitvec-1.0.1/doc/slice/iter/Iter.md000064400000000000000000000017271046102023000151420ustar 00000000000000# Shared Bit-Slice Iteration This view iterates each bit in the bit-slice by [proxy reference][0]. It is created by the [`BitSlice::iter`] method. ## Original [`slice::Iter`](core::slice::Iter) ## API Differences While this iterator can manifest `&bool` references, it instead yields the `bitvec` [proxy reference][0] for consistency with the [`IterMut`] type. It can be converted to yield true references with [`.by_refs()`]. Additionally, because it does not yield `&bool`, the [`Iterator::copied`] method does not apply. It can be converted to an iterator of `bool` values with [`.by_vals()`]. ## Examples ```rust use bitvec::prelude::*; let bits = bits![0, 1]; for bit in bits.iter() { # #[cfg(feature = "std")] { println!("{}", bit); # } } ``` [`BitSlice::iter`]: crate::slice::BitSlice::iter [`IterMut`]: crate::slice::IterMut [`Iterator::copied`]: core::iter::Iterator::copied [`.by_refs()`]: Self::by_refs [`.by_vals()`]: Self::by_vals [0]: crate::ptr::BitRef bitvec-1.0.1/doc/slice/iter/IterMut.md000064400000000000000000000013171046102023000156230ustar 00000000000000# Exclusive Bit-Slice Iteration This view iterates each bit in the bit-slice by exclusive proxy reference. It is created by the [`BitSlice::iter_mut`] method. ## Original [`slice::IterMut`](core::slice::IterMut) ## API Differences Because `bitvec` cannot manifest `&mut bool` references, this instead yields the crate [proxy reference][0]. Because the proxy is a true type, rather than an `&mut` reference, its name must be bound with `mut` in order to write through it. ## Examples ```rust use bitvec::prelude::*; let bits = bits![mut 0, 1]; for mut bit in bits.iter_mut() { *bit = !*bit; } assert_eq!(bits, bits![1, 0]); ``` [`BitSlice::iter_mut`]: crate::slice::BitSlice::iter_mut [0]: crate::ptr::BitRef bitvec-1.0.1/doc/slice/iter/IterOnes.md000064400000000000000000000011031046102023000157530ustar 00000000000000# Bit Seeking This iterator yields indices of bits set to `1`, rather than bit-values themselves. It is essentially the inverse of indexing: rather than applying a `usize` to the bit-slice to get a `bool`, this applies a `bool` to get a `usize`. It is created by the [`.iter_ones()`] method on bit-slices. ## Examples ```rust use bitvec::prelude::*; let bits = bits![0, 1, 0, 0, 1]; let mut ones = bits.iter_ones(); assert_eq!(ones.next(), Some(1)); assert_eq!(ones.next(), Some(4)); assert!(ones.next().is_none()); ``` [`.iter_ones()`]: crate::slice::BitSlice::iter_ones bitvec-1.0.1/doc/slice/iter/IterZeros.md000064400000000000000000000011171046102023000161560ustar 00000000000000# Bit Seeking This iterator yields indices of bits cleared to `0`, rather than bit-values themselves. It is essentially the inverse of indexing: rather than applying a `usize` to the bit-slice to get a `bool`, this applies a `bool` to get a `usize`. It is created by the [`.iter_zeros()`] method on bit-slices. ## Examples ```rust use bitvec::prelude::*; let bits = bits![1, 0, 1, 1, 0]; let mut zeros = bits.iter_zeros(); assert_eq!(zeros.next(), Some(1)); assert_eq!(zeros.next(), Some(4)); assert!(zeros.next().is_none()); ``` [`.iter_zeros()`]: crate::slice::BitSlice::iter_zeros bitvec-1.0.1/doc/slice/iter/NoAlias.md000064400000000000000000000100231046102023000155520ustar 00000000000000# Anti-Aliasing Iterator Adapter This structure is an adapter over a corresponding `&mut BitSlice` iterator. It removes the `::Alias` taint marker, allowing mutations through each yielded bit reference to skip any costs associated with aliasing. ## Safety The default `&mut BitSlice` iterators attach an `::Alias` taint for a reason: the iterator protocol does not mandate that yielded items have a narrower lifespan than the iterator that produced them! As such, it is completely possible to pull multiple yielded items out into the same scope, where they have overlapping lifetimes. The `BitStore` principles require that whenever two write-capable handles to the same memory region have overlapping lifetimes, they *must* be `::Alias` tainted. This adapter removes the `::Alias` taint, but is not able to enforce strictly non-overlapping lifetimes of yielded items. As such, this adapter is **unsafe to construct**, and you **must** only use it in a `for`-loop where each yielded item does not escape the loop body. In order to help enforce this limitation, this adapter structure is *not* `Send` or `Sync`. It must be consumed in the scope where it was created. ## Usage If you are using a loop that satisfies the safety requirement, you can use the `.remove_alias()` method on your mutable iterator and configure it to yield handles that do not impose additional alias-protection costs when accessing the underlying memory. Note that this adapter does not go to `T::Unalias`: it only takes an iterator that yields `T::Alias` and unwinds it to `T`. If the source bit-slice was *already* alias-tainted, the original protection is not removed. You are responsible for doing so by using [`.bit_domain_mut()`]. ## Examples This example shows using `.chunks_mut()` without incurring alias protection. This documentation is replicated on all `NoAlias` types; the examples will work for all of them, but are not specialized in the text. ```rust use bitvec::prelude::*; use bitvec::slice::{ChunksMut, ChunksMutNoAlias}; type Alias8 = ::Alias; let mut data: BitArr!(for 40, in u8, Msb0) = bitarr![u8, Msb0; 0; 40]; let mut chunks: ChunksMut = data.chunks_mut(5); let _chunk: &mut BitSlice = chunks.next().unwrap(); let mut chunks: ChunksMutNoAlias = unsafe { chunks.remove_alias() }; let _chunk: &mut BitSlice = chunks.next().unwrap(); ``` This example shows how use of [`.split_at_mut()`] forces the `.remove_alias()` to still retain a layer of alias protection. ```rust use bitvec::prelude::*; use bitvec::slice::{ChunksMut, ChunksMutNoAlias}; type Alias8 = ::Alias; type Alias8Alias = ::Alias; let mut data: BitArr!(for 40, in u8, Msb0) = bitarr!(u8, Msb0; 0; 40); let (_head, rest): (_, &mut BitSlice) = data.split_at_mut(5); let mut chunks: ChunksMut = rest.chunks_mut(5); let _chunk: &mut BitSlice = chunks.next().unwrap(); let mut chunks: ChunksMutNoAlias = unsafe { chunks.remove_alias() }; let _chunk: &mut BitSlice = chunks.next().unwrap(); ``` And this example shows how to use `.bit_domain_mut()` in order to undo the effects of `.split_at_mut()`, so that `.remove_alias()` can complete its work. ```rust use bitvec::prelude::*; use bitvec::slice::{ChunksMut, ChunksMutNoAlias}; type Alias8 = ::Alias; let mut data: BitArr!(for 40, in u8, Msb0) = bitarr!(u8, Msb0; 0; 40); let (_head, rest): (_, &mut BitSlice) = data.split_at_mut(5); let (head, body, tail): ( &mut BitSlice, &mut BitSlice, &mut BitSlice, ) = rest.bit_domain_mut().region().unwrap(); let mut chunks: ChunksMut = body.chunks_mut(5); let _chunk: &mut BitSlice = chunks.next().unwrap(); let mut chunks: ChunksMutNoAlias = unsafe { chunks.remove_alias() }; let _chunk: &mut BitSlice = chunks.next().unwrap(); ``` [`.bit_domain_mut()`]: crate::slice::BitSlice::bit_domain_mut [`.split_at_mut()`]: crate::slice::BitSlice::split_at_mut bitvec-1.0.1/doc/slice/iter/RChunks.md000064400000000000000000000014641046102023000156120ustar 00000000000000# Shared Bit-Slice Reverse Chunking This iterator yields successive non-overlapping chunks of a bit-slice. Chunking advances one subslice at a time, starting at the end of the bit-slice. If the original bit-slice’s length is not evenly divided by the chunk width, then the final chunk will be the remainder, and will be shorter than requested. It is created by the [`BitSlice::rchunks`] method. ## Original [`slice::RChunks`](core::slice::RChunks) ## Examples ```rust use bitvec::prelude::*; let bits = bits![0, 1, 0, 0, 0, 1, 1, 1]; let mut chunks = bits.rchunks(3); assert_eq!(chunks.next().unwrap(), bits![1; 3]); assert_eq!(chunks.next().unwrap(), bits![0; 3]); assert_eq!(chunks.next().unwrap(), bits![0, 1]); assert!(chunks.next().is_none()); ``` [`BitSlice::rchunks`]: crate::slice::BitSlice::rchunks bitvec-1.0.1/doc/slice/iter/RChunksExact.md000064400000000000000000000016311046102023000165730ustar 00000000000000# Shared Bit-Slice Reverse Exact Chunking This iterator yields successive non-overlapping chunks of a bit-slice. Chunking advances one sub-slice at a time, starting at the end of the bit-slice. If the original bit-slice’s length is not evenly divided by the chunk width, then the leftover segment at the front is not iterated, but can be accessed with the [`.remainder()`] method. It is created by the [`BitSlice::rchunks_exact`] method. ## Original [`slice::RChunksExact`](core::slice::RChunksExact) ## Examples ```rust use bitvec::prelude::*; let bits = bits![0, 1, 0, 0, 0, 1, 1, 1]; let mut chunks = bits.rchunks_exact(3); assert_eq!(chunks.next().unwrap(), bits![1; 3]); assert_eq!(chunks.next().unwrap(), bits![0; 3]); assert!(chunks.next().is_none()); assert_eq!(chunks.remainder(), bits![0, 1]); ``` [`BitSlice::rchunks_exact`]: crate::slice::BitSlice::rchunks_exact [`.remainder()`]: Self::remainder bitvec-1.0.1/doc/slice/iter/RChunksExactMut.md000064400000000000000000000023171046102023000172630ustar 00000000000000# Exclusive Bit-Slice Reverse Exact Chunking This iterator yields successive non-overlapping mutable chunks of a bit-slice. Chunking advances one sub-slice at a time, starting at the end of the bit-slice. If the original bit-slice’s length is not evenly divided by the chunk width, then the leftover segment at the front is not iterated, but can be accessed with the [`.into_remainder()`] or [`.take_remainder()`] methods. It is created by the [`BitSlice::rchunks_exact_mut`] method. ## Original [`slice::RChunksExactMut`](core::slice::RChunksExactMut) ## API Differences This iterator marks all yielded bit-slices as `::Alias`ed. ## Examples ```rust use bitvec::prelude::*; let bits = bits![mut 0, 1, 0, 0, 0, 1, 1, 1]; let mut chunks = unsafe { bits.rchunks_exact_mut(3).remove_alias() }; chunks.next().unwrap().fill(false); chunks.next().unwrap().fill(true); assert!(chunks.next().is_none()); chunks.take_remainder().copy_from_bitslice(bits![1, 0]); assert!(chunks.take_remainder().is_empty()); assert_eq!(bits, bits![1, 0, 1, 1, 1, 0, 0, 0]); ``` [`BitSlice::rchunks_exact_mut`]: crate::slice::BitSlice::rchunks_exact_mut [`.into_remainder()`]: Self::into_remainder [`.take_remainder()`]: Self::take_remainder bitvec-1.0.1/doc/slice/iter/RChunksMut.md000064400000000000000000000017261046102023000163010ustar 00000000000000# Exclusive Bit-Slice Chunking This iterator yields successive non-overlapping mutable chunks of a bit-slice. Chunking advances one subslice at a time, starting at the end of the bit-slice. If the original bit-slice’s length is not evenly divided by the chunk width, then the final chunk will be the remainder, and will be shorter than requested. It is created by the [`BitSlice::chunks_mut`] method. ## Original [`slice::ChunksMut`](core::slice::ChunksMut) ## API Differences This iterator marks all yielded bit-slices as `::Alias`ed. ## Examples ```rust use bitvec::prelude::*; let bits = bits![mut 0, 1, 0, 0, 0, 1, 1, 1]; let mut chunks = unsafe { bits.rchunks_mut(3).remove_alias() }; chunks.next().unwrap().fill(false); chunks.next().unwrap().fill(true); chunks.next().unwrap().copy_from_bitslice(bits![1, 0]); assert!(chunks.next().is_none()); assert_eq!(bits, bits![1, 0, 1, 1, 1, 0, 0, 0]); ``` [`BitSlice::chunks_mut`]: crate::slice::BitSlice::chunks_mut bitvec-1.0.1/doc/slice/iter/RSplit.md000064400000000000000000000017401046102023000154470ustar 00000000000000# Shared Bit-Slice Reverse Splitting This iterator yields successive non-overlapping segments of a bit-slice, separated by bits that match a predicate function. Splitting advances one segment at a time, starting at the end of the bit-slice. The matched bit is **not** included in the yielded segment. It is created by the [`BitSlice::rsplit`] method. ## Original [`slice::RSplit`](core::slice::RSplit) ## API Differences The predicate function receives both the index within the bit-slice, as well as the bit value, in order to allow the predicate to have more than one bit of information when splitting. ## Examples ```rust use bitvec::prelude::*; let bits = bits![0, 0, 0, 1, 1, 1, 0, 1]; let mut split = bits.rsplit(|idx, _bit| idx % 3 == 2); assert_eq!(split.next().unwrap(), bits![0, 1]); assert_eq!(split.next().unwrap(), bits![1; 2]); assert_eq!(split.next().unwrap(), bits![0; 2]); assert!(split.next().is_none()); ``` [`BitSlice::rsplit`]: crate::slice::BitSlice::rsplit bitvec-1.0.1/doc/slice/iter/RSplitMut.md000064400000000000000000000021731046102023000161360ustar 00000000000000# Exclusive Bit-Slice Reverse Splitting This iterator yields successive non-overlapping mutable segments of a bit-slice, separated by bits that match a predicate function. Splitting advances one segment at a time, starting at the end of the bit-slice. The matched bit is **not** included in the yielded segment. It is created by the [`BitSlice::rsplit_mut`] method. ## Original [`slice::RSplitMut`](core::slice::RSplitMut) ## API Differences This iterator marks all yielded bit-slices as `::Alias`ed. The predicate function receives both the index within the bit-slice, as well as the bit value, in order to allow the predicate to have more than one bit of information when splitting. ## Examples ```rust use bitvec::prelude::*; let bits = bits![mut 0, 0, 0, 1, 1, 1, 0, 1]; let mut split = unsafe { bits.rsplit_mut(|idx, _bit| idx % 3 == 2).remove_alias() }; split.next().unwrap().copy_from_bitslice(bits![1, 0]); split.next().unwrap().fill(false); split.next().unwrap().fill(true); assert!(split.next().is_none()); assert_eq!(bits, bits![1, 1, 0, 0, 0, 1, 1, 0]); ``` [`BitSlice::rsplit_mut`]: crate::slice::BitSlice::rsplit_mut bitvec-1.0.1/doc/slice/iter/RSplitN.md000064400000000000000000000020601046102023000155610ustar 00000000000000# Shared Bit-Slice Reverse Splitting This iterator yields `n` successive non-overlapping segments of a bit-slice, separated by bits that match a predicate function. Splitting advances one segment at a time, starting at the end of the bit-slice. The matched bit is **not** included in the yielded segment. The `n`th yielded segment does not attempt any further splits, and extends to the front of the bit-slice. It is created by the [`BitSlice::rsplitn`] method. ## Original [`slice::RSplitN`](core::slice::RSplitN) ## API Differences The predicate function receives both the index within the bit-slice, as well as the bit value, in order to allow the predicate to have more than one bit of information when splitting. ## Examples ```rust use bitvec::prelude::*; let bits = bits![0, 0, 0, 1, 1, 1, 0, 1]; let mut split = bits.rsplitn(2, |idx, _bit| idx % 3 == 2); assert_eq!(split.next().unwrap(), bits![0, 1]); assert_eq!(split.next().unwrap(), bits![0, 0, 0, 1, 1]); assert!(split.next().is_none()); ``` [`BitSlice::rsplitn`]: crate::slice::BitSlice::rsplitn bitvec-1.0.1/doc/slice/iter/RSplitNMut.md000064400000000000000000000022361046102023000162540ustar 00000000000000# Exclusive Bit-Slice Reverse Splitting This iterator yields `n` successive non-overlapping mutable segments of a bit-slice, separated by bits that match a predicate function. Splitting advances one segment at a time, starting at the end of the bit-slice. The matched bit is **not** included in the yielded segment. The `n`th yielded segment does not attempt any further splits, and extends to the front of the bit-slice. It is created by the [`BitSlice::rsplitn_mut`] method. ## Original [`slice::SplitNMut`](core::slice::SplitNMut) ## API Differences This iterator marks all yielded bit-slices as `::Alias`ed. The predicate function receives both the index within the bit-slice, as well as the bit value, in order to allow the predicate to have more than one bit of information when splitting. ## Examples ```rust use bitvec::prelude::*; let bits = bits![mut 0, 0, 0, 1, 1, 1, 0, 1]; let mut split = bits.rsplitn_mut(2, |idx, _bit| idx % 3 == 2); split.next().unwrap().fill(false); split.next().unwrap().fill(false); assert!(split.next().is_none()); assert_eq!(bits, bits![0, 0, 0, 0, 0, 1, 0, 0]); ``` [`BitSlice::rsplitn_mut`]: crate::slice::BitSlice::rsplitn_mut bitvec-1.0.1/doc/slice/iter/Split.md000064400000000000000000000017301046102023000153240ustar 00000000000000# Shared Bit-Slice Splitting This iterator yields successive non-overlapping segments of a bit-slice, separated by bits that match a predicate function. Splitting advances one segment at a time, starting at the beginning of the bit-slice. The matched bit is **not** included in the yielded segment. It is created by the [`BitSlice::split`] method. ## Original [`slice::Split`](core::slice::Split) ## API Differences The predicate function receives both the index within the bit-slice, as well as the bit value, in order to allow the predicate to have more than one bit of information when splitting. ## Examples ```rust use bitvec::prelude::*; let bits = bits![0, 0, 0, 1, 1, 1, 0, 1]; let mut split = bits.split(|idx, _bit| idx % 3 == 2); assert_eq!(split.next().unwrap(), bits![0; 2]); assert_eq!(split.next().unwrap(), bits![1; 2]); assert_eq!(split.next().unwrap(), bits![0, 1]); assert!(split.next().is_none()); ``` [`BitSlice::split`]: crate::slice::BitSlice::split bitvec-1.0.1/doc/slice/iter/SplitInclusive.md000064400000000000000000000015011046102023000172020ustar 00000000000000# Shared Bit-Slice Splitting This iterator yields successive non-overlapping segments of a bit-slice, separated by bits that match a predicate function. Splitting advances one segment at a time, starting at the beginning of the bit-slice. The matched bit **is** included in the yielded segment. It is created by the [`BitSlice::split_inclusive`] method. ## Original [`slice::SplitInclusive`](core::slice::SplitInclusive) ## Examples ```rust use bitvec::prelude::*; let bits = bits![0, 0, 0, 1, 1, 1, 0, 1]; let mut split = bits.split_inclusive(|idx, _bit| idx % 3 == 2); assert_eq!(split.next().unwrap(), bits![0; 3]); assert_eq!(split.next().unwrap(), bits![1; 3]); assert_eq!(split.next().unwrap(), bits![0, 1]); assert!(split.next().is_none()); ``` [`BitSlice::split_inclusive`]: crate::slice::BitSlice::split_inclusive bitvec-1.0.1/doc/slice/iter/SplitInclusiveMut.md000064400000000000000000000016401046102023000176740ustar 00000000000000# Exclusive Bit-Slice Splitting This iterator yields successive non-overlapping mutable segments of a bit-slice, separated by bits that match a predicate function. Splitting advances one segment at a time, starting at the beginning of the bit-slice. The matched bit **is** included in the yielded segment. It is created by the [`BitSlice::split_inclusive_mut`] method. ## Original [`slice::SplitInclusiveMut`](core::slice::SplitInclusiveMut) ## Examples ```rust use bitvec::prelude::*; let bits = bits![mut 0, 0, 0, 1, 1, 1, 0, 1]; let mut split = unsafe { bits.split_inclusive_mut(|idx, _bit| idx % 3 == 2).remove_alias() }; split.next().unwrap().fill(true); split.next().unwrap().fill(false); split.next().unwrap().copy_from_bitslice(bits![1, 0]); assert!(split.next().is_none()); assert_eq!(bits, bits![1, 1, 1, 0, 0, 0, 1, 0]); ``` [`BitSlice::split_inclusive_mut`]: crate::slice::BitSlice::split_inclusive_mut bitvec-1.0.1/doc/slice/iter/SplitMut.md000064400000000000000000000021631046102023000160130ustar 00000000000000# Exclusive Bit-Slice Splitting This iterator yields successive non-overlapping mutable segments of a bit-slice, separated by bits that match a predicate function. Splitting advances one segment at a time, starting at the beginning of the bit-slice. The matched bit is **not** included in the yielded segment. It is created by the [`BitSlice::split_mut`] method. ## Original [`slice::SplitMut`](core::slice::SplitMut) ## API Differences This iterator marks all yielded bit-slices as `::Alias`ed. The predicate function receives both the index within the bit-slice, as well as the bit value, in order to allow the predicate to have more than one bit of information when splitting. ## Examples ```rust use bitvec::prelude::*; let bits = bits![mut 0, 0, 0, 1, 1, 1, 0, 1]; let mut split = unsafe { bits.split_mut(|idx, _bit| idx % 3 == 2).remove_alias() }; split.next().unwrap().fill(true); split.next().unwrap().fill(false); split.next().unwrap().copy_from_bitslice(bits![1, 0]); assert!(split.next().is_none()); assert_eq!(bits, bits![1, 1, 0, 0, 0, 1, 1, 0]); ``` [`BitSlice::split_mut`]: crate::slice::BitSlice::split_mut bitvec-1.0.1/doc/slice/iter/SplitN.md000064400000000000000000000020461046102023000154430ustar 00000000000000# Shared Bit-Slice Splitting This iterator yields `n` successive non-overlapping segments of a bit-slice, separated by bits that match a predicate function. Splitting advances one segment at a time, starting at the beginning of the bit-slice. The matched bit is **not** included in the yielded segment. The `n`th yielded segment does not attempt any further splits, and extends to the end of the bit-slice. It is created by the [`BitSlice::splitn`] method. ## Original [`slice::SplitN`](core::slice::SplitN) ## API Differences The predicate function receives both the index within the bit-slice, as well as the bit value, in order to allow the predicate to have more than one bit of information when splitting. ## Examples ```rust use bitvec::prelude::*; let bits = bits![0, 0, 0, 1, 1, 1, 0, 1]; let mut split = bits.splitn(2, |idx, _bit| idx % 3 == 2); assert_eq!(split.next().unwrap(), bits![0; 2]); assert_eq!(split.next().unwrap(), bits![1, 1, 1, 0, 1]); assert!(split.next().is_none()); ``` [`BitSlice::splitn`]: crate::slice::BitSlice::splitn bitvec-1.0.1/doc/slice/iter/SplitNMut.md000064400000000000000000000022251046102023000161300ustar 00000000000000# Exclusive Bit-Slice Splitting This iterator yields `n` successive non-overlapping mutable segments of a bit-slice, separated by bits that match a predicate function. Splitting advances one segment at a time, starting at the beginning of the bit-slice. The matched bit is **not** included in the yielded segment. The `n`th yielded segment does not attempt any further splits, and extends to the end of the bit-slice. It is created by the [`BitSlice::splitn_mut`] method. ## Original [`slice::SplitNMut`](core::slice::SplitNMut) ## API Differences This iterator marks all yielded bit-slices as `::Alias`ed. The predicate function receives both the index within the bit-slice, as well as the bit value, in order to allow the predicate to have more than one bit of information when splitting. ## Examples ```rust use bitvec::prelude::*; let bits = bits![mut 0, 0, 0, 1, 1, 1, 0, 1]; let mut split = bits.splitn_mut(2, |idx, _bit| idx % 3 == 2); split.next().unwrap().fill(true); split.next().unwrap().fill(false); assert!(split.next().is_none()); assert_eq!(bits, bits![1, 1, 0, 0, 0, 0, 0, 0]); ``` [`BitSlice::splitn_mut`]: crate::slice::BitSlice::splitn_mut bitvec-1.0.1/doc/slice/iter/Windows.md000064400000000000000000000015601046102023000156640ustar 00000000000000# Bit-Slice Windowing This iterator yields successive overlapping windows into a bit-slice. Windowing advances one bit at a time, so for any given window width `N`, most bits will appear in `N` windows. Windows do not “extend” past either edge of the bit-slice: the first window has its front edge at the front of the bit-slice, and the last window has its back edge at the back of the bit-slice. It is created by the [`BitSlice::windows`] method. ## Original [`slice::Windows`](core::slice::Windows) ## Examples ```rust use bitvec::prelude::*; let bits = bits![0, 0, 1, 1, 0]; let mut windows = bits.windows(2); let expected = &[ bits![0, 0], bits![0, 1], bits![1, 1], bits![1, 0], ]; assert_eq!(windows.len(), 4); for (window, expected) in windows.zip(expected) { assert_eq!(window, expected); } ``` [`BitSlice::windows`]: crate::slice::BitSlice::windows bitvec-1.0.1/doc/slice/iter.md000064400000000000000000000007641046102023000142370ustar 00000000000000# Bit-Slice Iteration Like the standard-library slice, this module contains a great deal of specialized iterators. In addition to the ports of the iterators in [`core::slice`], this also defines iterators that seek out indices of set or cleared bits in sparse collections. Each iterator here is documented most extensively on the [`BitSlice`] method that produces it, and has only light documentation on its own type or inherent methods. [`BitSlice`]: super::BitSlice [`core::slice`]: core::slice bitvec-1.0.1/doc/slice/ops.md000064400000000000000000000014141046102023000140660ustar 00000000000000# Bit-Slice Operator Implementations The standard-library slices only implement the indexing operator `[]`. `BitSlice` additionally implements the Boolean operators `&`, `|`, `^`, and `!`. The dyadic Boolean arithmetic operators all take any `bitvec` container as their second argument, and apply the operation in-place to the left-hand bit-slice. If the second argument exhausts before `self` does, then it is implicitly zero-extended. This means that `&=` zeros excess bits in `self`, while `|=` and `^=` do not modify them. The monadic operator `!` inverts the entire bit-slice at once. Its API requires *taking* a `&mut BitSlice` reference and returning it, so you will need to structure your code accordingly. [`BitSlice::domain_mut`]: crate::slice::BitSlice::domain_mut bitvec-1.0.1/doc/slice/specialization.md000064400000000000000000000031171046102023000163050ustar 00000000000000# Bit-Slice Specialization This module provides specialized implementations for `BitSlice` and `BitSlice`. These implementations are able to use knowledge of their bit-ordering behavior to be faster and operate in batches. Since true specialization is not available in the language yet, this uses the `any::TypeId` system to detect if a type parameter is identical to a known type and conditionally force a cast and branch. Since type identifiers are compiler intrinsics produced during compilation, during monomorphization each branch has its conditional replaced with a compile-time constant value. The `if true` branch is retained, the `if false` branches are discarded, and the monomorphization proceeds with the specialized function replacing the generic body. The `.coerce()` and `.coerce_mut()` methods detect whether a bit-slice with generic type parameters matches statically-known type parameters, and return an `Option` of a value-identical bit-slice reference with the statically-known type parameters which can then invoke a specialization method. Generic methods can be specialized whenever their implementation is dependent on the `O` type parameter and the map of positions the ordering produces is easily legible to processor instructions. Because language-level specialization is unavailable, dispatch is only done in `bitvec` and cannot be extended to third-party crates. The `lsb0` and `msb0` modules should have identical symbols present. For implementation, remember that `Lsb0` and `Msb0` orderings **are** correlated with little-endian and big-endian byte operations! bitvec-1.0.1/doc/slice/threadsafe.md000064400000000000000000000020471046102023000153760ustar 00000000000000# Bit-Slice Thread Safety This allows bit-slice references to be moved across thread boundaries only when the underlying `T` element can tolerate concurrency. All `BitSlice` references, shared or exclusive, are only threadsafe if the `T` element type is `Send`, because any given bit-slice reference may only have partial control of a memory element that is also being shared by a bit-slice reference on another thread. As such, this is never implemented for `Cell`, but always implemented for `AtomicU` and `U` for a given unsigned integer type `U`. Atomic integers safely handle concurrent writes, cells do not allow concurrency at all, so the only missing piece is `&mut BitSlice<_, U: Unsigned>`. This is handled by the aliasing system that the mutable splitters employ: a mutable reference to an unsynchronized bit-slice can only cross threads when no other handle is able to exist to the elements it governs. Splitting a mutable bit-slice causes the split halves to change over to either atomics or cells, so concurrency is either safe or impossible. bitvec-1.0.1/doc/slice/traits.md000064400000000000000000000006501046102023000145740ustar 00000000000000# `Bit-Slice` Trait Implementations `BitSlice` implements all of the traits that `[bool]` does, as well as a number that it does not but are useful for bit-slices. These additions include numeric formatting, so that any bit-slice can have its memory representation printed, as well as a permutation of `PartialEq` and `PartialOrd` implementations so that various `bitvec` containers can be easily compared with each other. bitvec-1.0.1/doc/slice.md000064400000000000000000000032161046102023000132670ustar 00000000000000# Bit-Addressable Memory Regions This module defines the [`BitSlice`] region, which forms the primary export item of the crate. It is a region of memory that addresses each bit individually, and is analogous to the slice language item. See `BitSlice`’s documentation for information on its use. The other data structures `bitvec` offers are built atop `BitSlice`, and follow the development conventions outlined in this module. Because the API surface for `bitvec` data structures is so large, they are broken into a number of common submodules: - `slice` defines the `BitSlice` data structure, its inherent methods that are original to `bitvec`, as well as some free functions. - `slice::api` defines ports of the `impl [T]` inherent blocks from `core::slice`. - `slice::iter` contains all the logic used to iterate across `BitSlices`, including ports of `core::slice` iterators. - `slice::ops` contains implementations of `core::ops` traits that power operator sigils. - `slice::traits` contains all the other trait implementations. - `slice::tests` contains unit tests for `BitSlice` inherent methods. Additionally, `slice` has a submodule unique to it: `specialization` contains override functions that provide faster behavior on known `BitOrder` implementations. Since the other data structures `Deref` to it, they do not need to implement bit-order specializations of their own. All ports of language or standard-library items have an `## Original` section in their documentation that links to the item they are porting, and possibly an `## API Differences` that explains why the `bitvec` item is not a drop-in replacement. [`BitSlice`]: self::BitSlice bitvec-1.0.1/doc/store/BitStore.md000064400000000000000000000027241046102023000150620ustar 00000000000000# Bit Storage This trait drives `bitvec`’s ability to view memory as a collection of discrete bits. It combines awareness of storage element width, memory-bus access requirements, element contention, and buffer management, into a type-system graph that the rest of the crate can use to abstract away concerns about memory representation or access rules. It is responsible for extending the standard Rust `&`/`&mut` shared/exclusion rules to apply to individual bits while avoiding violating those rules when operating on real memory so that Rust and LLVM cannot find fault with the object code it produces. ## Implementors This is implemented on three type families: - all [`BitRegister`] raw integer fundamentals - all [`Cell`] wrappers of them - all [atomic] variants of them The [`BitSlice`] region, and all structures composed atop it, can be built out of regions of memory that have this trait implementation. ## Associated Types The associated types attached to each implementation create a closed graph of type transitions used to manage alias conditions. When a bit-slice region determines that an aliasing or unaliasing event has occurred, it transitions along the type graph in order to maintain correct operations in memory. The methods that cause type transitions can be found in [`BitSlice`] and [`domain`]. [`BitRegister`]: crate::mem::BitRegister [`BitSlice`]: crate::slice::BitSlice [`Cell`]: core::cell::Cell [`domain`]: crate::domain [atomic]: core::sync::atomic bitvec-1.0.1/doc/store.md000064400000000000000000000113601046102023000133230ustar 00000000000000# Storage Memory Description This module defines the `bitvec` memory model used to interface bit-slice regions to raw memory, and manage type-state changes as demanded by the region descriptor. The [`BitStore`] trait is the primary type-level description of `bitvec` views of the memory space and provides the runtime system that drives the crate memory model. ## Memory Model `bitvec` considers all memory within [`BitSlice`] regions as if it were composed of discrete bits, each divisible and independent from its neighbors, just as the Rust memory model considers elements `T` in a slice `[T]`. Much as ordinary byte slices `[u8]` provide an API where each byte is distinct and independent from its neighbors, but the underlying processor silicon clusters them in words and cachelines, both the processor silicon *and* the Rust compiler require that bits in a `BitSlice` be grouped into memory elements, and collectively subjected to aliasing rules within their batch. `bitvec` manages this through the `BitStore` trait. It is implemented on three type families available from the Rust standard libraries: - [unsigned integers] - [atomic] unsigned integers - [`Cell`] wrappers of unsigned integers `bitvec` receives memory regions typed with one of these families and wraps it in one of its data structures based on the `BitSlice` region. The target processor is responsible for handling any contention between `T: BitStore` memory elements; this is irrelevant to the `bitvec` model. `bitvec` is solely responsible for proving to the Rust compiler that all memory accesses through its types are correctly managed according to the `&`/`&mut` shared/exclusion reference model, and the [`UnsafeCell`] shared-mutation model. Through `BitStore`, `bitvec` is able to demonstrate that `&mut BitSlice` references to a region of *bits* have no other `BitSlice` references capable of viewing those bits. However, multiple `&mut BitSlice` references may view the same underlying memory element, which is undefined behavior in the Rust compiler unless additional synchronization and mutual exclusion is provided to prevent racing writes and unsynchronized reads. As such, `BitStore` provides a closed type-system graph that the `BitSlice` region API uses to mark events that can induce aliasing over memory locations. When a `&mut BitSlice<_, T>` typed with an ordinary unsigned integer uses any of the APIs that call [`.split_at_mut()`], it transitions its `BitStore` parameter to `&mut BitSlice<_, T::Alias>`. The [`::Alias`] associated type is always a type that manages aliasing references to a single memory location: either an [atomic] unsigned integer `T` or a [`Cell`][`Cell`]. The Rust standard library guarantees that these types will behave correctly when multiple references to a single location attempt to perform memory transactions. The atomic and `Cell` types stay as themselves when [`BitSlice`] introduces aliasing conditions, as they are already alias-aware. Foreign implementations of `BitStore` are required to follow the conventions used here: unsynchronized storage types must create marker newtypes over an appropriate synchronized type for `::Alias` and uphold the “only `&mut` has write permission” rule, while synchronized storage types do not need to perform these transitions, but may never transition to an unsynchronized type either. The `bitvec` memory description model as implemented in the [`domain`] module is able to perform the inverse transition: where a `BitSlice` can demonstrate a static awareness that the `&`/`&mut` exclusion rules are satisfied for a particular element slice `[T]`, it may apply the [`::Unalias`] marker to undo any `::Alias`ing, and present a type that has no more aliasing protection than that with which the memory region was initially declared. Namely, this means that the [atomic] and [`Cell`] wrappers will *never* be removed from a region that had them before it was given to `bitvec`, while a region of ordinary integers may regain the ability to be viewed without synchrony guards if `bitvec` can prove safety in the `domain` module. In order to retain `bitvec`’s promise that an `&mut BitSlice<_, T>` has the sole right of observation for all bits in its region, the unsigned integers alias to a crate-internal wrapper over the alias-capable standard-library types. This wrapper forbids mutation through shared references, so two [`BitSlice`] references that alias a memory location, but do not overlap in bits, may not be coërced to interfere with each other. [atomic]: core::sync::atomic [unsigned integers]: core::primitive [`BitSlice`]: crate::slice::BitSlice [`BitStore`]: self::BitStore [`Cell`]: core::cell::Cell [`UnsafeCell`]: core::cell::UnsafeCell [`domain`]: crate::domain [`::Alias`]: self::BitStore::Alias [`::Unalias`]: self::BitStore::Unalias bitvec-1.0.1/doc/vec/BitVec.md000064400000000000000000000141541046102023000141240ustar 00000000000000# Bit-Precision Dynamic Array This is an analogue to `Vec` that stores its data using a compaction scheme to ensure that each `bool` takes exactly one bit of memory. It is similar to the C++ type [`std::vector`], but uses `bitvec`’s type parameter system to provide more detailed control over the in-memory representation. This is *always* a heap allocation. If you know your sizes at compile-time, you may prefer to use [`BitArray`] instead, which is able to store its data as an immediate value rather than through an indirection. ## Documentation Practices `BitVec` exactly replicates the API of the standard-library `Vec` type, including inherent methods, trait implementations, and relationships with the [`BitSlice`] slice analogue. Items that are either direct ports, or renamed variants, of standard-library APIs will have a `## Original` section that links to their standard-library documentation. Items that map to standard-library APIs but have a different API signature will also have an `## API Differences` section that describes what the difference is, why it exists, and how to transform your code to fit it. For example: ## Original [`Vec`](alloc::vec::Vec) ## API Differences As with all `bitvec` data structures, this takes two type parameters `` that govern the bit-vector’s storage representation in the underlying memory, and does *not* take a type parameter to govern what data type it stores (always `bool`) ## Suggested Uses `BitVec` is able to act as a compacted `usize => bool` dictionary, and is useful for holding large collections of truthiness. For instance, you might replace a `Vec>` with a `(BitVec, Vec>`) to cut down on the resident size of the discriminant. Through the [`BitField`] trait, `BitVec` is also able to act as a transport buffer for data that can be marshalled as integers. Serializing data to a narrower compacted form, or deserializing data *from* that form, can be easily accomplished by viewing subsets of a bit-vector and storing integers into, or loading integers out of, that subset. As an example, transporting four ten-bit integers can be done in five bytes instead of eight like so: ```rust use bitvec::prelude::*; let mut bv = bitvec![u8, Msb0; 0; 40]; bv[0 .. 10].store::(0x3A8); bv[10 .. 20].store::(0x2F9); bv[20 .. 30].store::(0x154); bv[30 .. 40].store::(0x06D); ``` If you wish to use bit-field memory representations as `struct` fields rather than a transport buffer, consider `BitArray` instead: that type keeps its data as an immediate, and is more likely to act like a C struct with bitfields. ## Examples `BitVec` has exactly the same API as `Vec`, and even extends it with some of `Vec`’s behaviors. As a brief tour: ### Push and Pop ```rust use bitvec::prelude::*; let mut bv: BitVec = BitVec::new(); bv.push(false); bv.push(true); assert_eq!(bv.len(), 2); assert_eq!(bv[0], false); assert_eq!(bv.pop(), Some(true)); assert_eq!(bv.len(), 1); ``` ### Writing Into a Bit-Vector The only `Vec` API that `BitVec` does *not* implement is `IndexMut`, because that is not yet possible. Instead, [`.get_mut()`] can produce a proxy reference, or [`.set()`] can take an index and a value to write. ```rust use bitvec::prelude::*; let mut bv: BitVec = BitVec::new(); bv.push(false); *bv.get_mut(0).unwrap() = true; assert!(bv[0]); bv.set(0, false); assert!(!bv[0]); ``` ### Macro Construction Like `Vec`, `BitVec` also has a macro constructor: [`bitvec!`] takes a sequence of bit expressions and encodes them at compile-time into a suitable buffer. At run-time, this buffer is copied into the heap as a `BitVec` with no extra cost beyond the allocation. ```rust use bitvec::prelude::*; let bv = bitvec![0; 10]; let bv = bitvec![0, 1, 0, 0, 1]; let bv = bitvec![u16, Msb0; 1; 20]; ``` ### Borrowing as `BitSlice` `BitVec` lends its buffer as a `BitSlice`, so you can freely give permission to view or modify the contained data without affecting the allocation: ```rust use bitvec::prelude::*; fn read_bitslice(bits: &BitSlice) { // … } let bv = bitvec![0; 30]; read_bitslice(&bv); let bs: &BitSlice = &bv; ``` ## Other Notes The default type parameters are ``. This is the most performant pair when operating on memory, but likely does not match your needs if you are using `BitVec` to represent a transport buffer. See [the user guide][book] for more details on how the type parameters govern memory representation. Applications, or single-purpose libraries, built atop `bitvec` will likely want to create a `type` alias with specific type parameters for their usage. `bitvec` is fully generic over the ordering/storage types, but this generality is rarely useful for client crates to propagate. `` is fastest; `` matches what most debugger views of memory will print, and the rest are documented in the guide. ## Safety Unlike the other data structures in this crate, `BitVec` is uniquely able to hold uninitialized memory and produce pointers into it. As described in the [`BitAccess`] documentation, this crate is categorically unable to operate on uninitialized memory in any way. In particular, you may not allocate a buffer using [`::with_capacity()`], then use [`.as_mut_bitptr()`] to create a pointer used to write into the uninitialized buffer. You must always initialize the buffer contents of a `BitVec` before attempting to view its contents. You can accomplish this through safe APIs such as `.push()`, `.extend()`, or `.reserve()`. These are all guaranteed to safely initialize the memory elements underlying the `BitVec` buffer without incurring undefined behavior in their operation. [book]: https://bitvecto-rs.github.io/bitvec/type-parameters.html [`BitAccess`]: crate::access::BitAccess [`BitArray`]: crate::array::BitArray [`BitField`]: crate::field::BitField [`BitSlice`]: crate::slice::BitSlice [`bitvec!`]: macro@crate::bitvec [`std::vector`]: https://en.cppreference.com/w/cpp/container/vector_bool [`.as_mut_bitptr()`]: crate::slice::BitSlice::as_mut_bitptr [`.get_mut()`]: crate::slice::BitSlice::get_mut [`.set()`]: crate::slice::BitSlice::set [`::with_capacity()`]: Self::with_capacity bitvec-1.0.1/doc/vec/iter/Drain.md000064400000000000000000000011011046102023000147340ustar 00000000000000# Draining Iteration This structure iterates over a subset of a bit-vector, yielding each bit and removing it completely from the source. Each drain locks the bit-vector that created it until the drain is either destroyed or forgotten. If a drain is leaked rather than being allowed to drop normally, the source bit-vector is only guaranteed to have contents up to the original start of the drain. All further contents are unspecified. See [`BitVec::drain()`] for more details. ## Original [`vec::Drain`](alloc::vec::Drain) [`BitVec::drain()`]: crate::vec::BitVec::drain bitvec-1.0.1/doc/vec/iter/Extend_BitRef.md000064400000000000000000000006701046102023000163730ustar 00000000000000# Bit-Vector Extension by Proxy References **DO NOT** use this. You *clearly* have a bit-slice. Use [`.extend_from_bitslice()`] instead! Iterating over a bit-slice requires loading from memory and constructing a proxy reference for each bit. This is needlessly slow; the specialized method is able to avoid this per-bit cost and possibly even use batched operations. [`.extend_from_bitslice()`]: crate::vec::BitVec::extend_from_bitslice bitvec-1.0.1/doc/vec/iter/Extend_bool.md000064400000000000000000000014541046102023000161540ustar 00000000000000# Bit-Vector Extension This extends a bit-vector from anything that produces individual bits. ## Original [`impl Extend for Vec`][orig] ## Notes This `.extend()` call is the second-slowest possible way to append bits into a bit-vector, faster only than calling `iter.for_each(|bit| bv.push(bit))`. **DO NOT** use this if you have any other choice. If you are extending a bit-vector from the contents of a bit-slice, then you should use [`.extend_from_bitslice()`] instead. That method is specialized to perform upfront allocation and, where possible, use a batch copy rather than copying each bit individually from the source into the bit-vector. [orig]: https://doc.rust-lang.org/alloc/vec/struct.Vec.html#impl-Extend%3CT%3E [`.extend_from_bitslice()`]: crate::vec::BitVec::extend_from_bitslice bitvec-1.0.1/doc/vec/iter/FillStatus.md000064400000000000000000000011141046102023000157750ustar 00000000000000# Fill Status The standard library uses a `bool` flag to indicate whether a splicing operation exhausted the source or filled the target, which is not very clear about what is being signaled. This enum replaces it. ## Variants - `FullSpan`: This marks that a drain span has been completely filled with replacement bits, and any further replacement would require insertion rather than overwriting dead storage. - `EmptyInput`: This marks that a replacement source has been run to completion, but dead bits remain in a drain span, and the dead range will need to be overwritten. bitvec-1.0.1/doc/vec/iter/FromIterator_BitRef.md000064400000000000000000000006501046102023000175570ustar 00000000000000# Bit-Vector Collection from Proxy References **DO NOT** use this. You *clearly* have a bit-slice. Use [`::from_bitslice()`] instead! Iterating over a bit-slice requires loading from memory and constructing a proxy reference for each bit. This is needlessly slow; the specialized method is able to avoid this per-bit cost and possibly even use batched operations. [`::from_bitslice()`]: crate::vec::BitVec::from_bitslice bitvec-1.0.1/doc/vec/iter/FromIterator_bool.md000064400000000000000000000014611046102023000173400ustar 00000000000000# Bit-Vector Collection This collects a bit-vector from anything that produces individual bits. ## Original [`impl FromIterator for Vec`][orig] ## Notes This `.collect()` call is the second-slowest possible way to collect bits into a bit-vector, faster only than calling `iter.for_each(|bit| bv.push(bit))`. **DO NOT** use this if you have any other choice. If you are collecting a bit-vector from the contents of a bit-slice, then you should use [`::from_bitslice()`] instead. That method is specialized to perform upfront allocation and, where possible, use a batch copy rather than copying each bit individually from the source into the bit-vector. [orig]: https://doc.rust-lang.org/alloc/vec/struct.Vec.html#impl-FromIterator%3CT%3E [`::from_bitslice()`]: crate::vec::BitVec::extend_from_bitslice bitvec-1.0.1/doc/vec/iter/IntoIterator.md000064400000000000000000000005131046102023000163300ustar 00000000000000# Bit-Vector Iteration Bit-vectors have the advantage that iteration consumes the whole structure, so they can simply freeze the allocation into a bit-box, then use its iteration and destructor. ## Original [`impl IntoIterator for Vec`][orig] [orig]: https://doc.rust-lang.org/alloc/vec/struct.Vec.html#impl-IntoIterator bitvec-1.0.1/doc/vec/iter/Splice.md000064400000000000000000000007731046102023000151340ustar 00000000000000# Splicing Iteration This adapts a [`Drain`] to overwrite the drained section with the contents of another iterator. When this splice is destroyed, the drained section of the source bit-vector is replaced with the contents of the replacement iterator. If the replacement is not the same length as the drained section, then the bit-vector is resized to fit. See [`BitVec::splice()`] for more information. ## Original [`vec::Splice`](alloc::vec::Splice) [`BitVec::splice()`]: crate::vec::BitVec::splice bitvec-1.0.1/doc/vec/iter.md000064400000000000000000000010641046102023000137070ustar 00000000000000# Bit-Vector Iteration This module provides iteration protocols for `BitVec`, including: - extension of existing bit-vectors with new data - collection of data into new bit-vectors - iteration over the contents of a bit-vector - draining and splicing iteration over parts of a bit-vector. `BitVec` implements `Extend` and `FromIterator` for both sources of individual bits and sources of `T` memory elements. The by-value `bool` iterator is defined in `boxed::iter`, rather than here. The `Drain` and `Splice` iterators remain here in their original location. bitvec-1.0.1/doc/vec.md000064400000000000000000000010741046102023000127450ustar 00000000000000# Dynamically-Allocated, Adjustable-Size, Bit Buffer This module defines the [`BitVec`] buffer and its associated support code. `BitVec` is analogous to [`Vec`] in its use of dynamic memory allocation and its relationship to the [`BitSlice`] type. Most of the interesting work to be done on a bit sequence is actually implemented in `BitSlice`, with `BitVec` itself largely only containing interfaces to the memory allocator. ## Original [`vec`](mod@alloc::vec) [`BitVec`]: crate::vec::BitVec [`BitSlice`]: crate::slice::BitSlice [`Vec`]: alloc::vec::Vec bitvec-1.0.1/doc/view/AsBits.md000064400000000000000000000017301046102023000143260ustar 00000000000000# Immutable Bit View This trait is an analogue to the [`AsRef`] trait, in that it enables any type to provide a view of an immutable bit-slice. It does not require an `AsRef<[T: BitStore]>` implementation, but a blanket implementation for all `AsRef<[T: BitStore]>` is provided. This allows you to choose whether to implement only one of `AsBits` or `AsRef<[T]>`, and gain a bit-slice view through either choice. ## Usage The `.as_bits<_>()` method has the same usage patterns as [`BitView::view_bits`][0]. ## Notes You are not *forbidden* from creating multiple views with different element types to the same region, but doing so is likely to cause inconsistent and surprising behavior. Refrain from implementing this trait with more than one storage argument unless you are sure that you can uphold the memory region requirements of all of them, and are aware of the behavior conflicts that may arise. [0]: crate::view::BitView::view_bits [`AsRef`]: core::convert::AsRef bitvec-1.0.1/doc/view/AsMutBits.md000064400000000000000000000017421046102023000150170ustar 00000000000000# Mutable Bit View This trait is an analogue to the [`AsMut`] trait, in that it enables any type to provide a view of a mutable bit-slice. It does not require an `AsMut<[T: BitStore]>` implementation, but a blanket implementation for all `AsMut<[T: BitStore]>` is provided. This allows you to choose whether to implement only one of `AsMutBits` or `AsMut<[T]>`, and gain a bit-slice view through either choice. ## Usage The `.as_mut_bits<_>()` method has the same usage patterns as [`BitView::view_bits_mut`][0]. ## Notes You are not *forbidden* from creating multiple views with different element types to the same region, but doing so is likely to cause inconsistent and surprising behavior. Refrain from implementing this trait with more than one storage argument unless you are sure that you can uphold the memory region requirements of all of them, and are aware of the behavior conflicts that may arise. [0]: crate::view::BitView::view_bits_mut [`AsMut`]: core::convert::AsMut bitvec-1.0.1/doc/view/BitView.md000064400000000000000000000015701046102023000145140ustar 00000000000000# Bit View This trait describes a region of memory that can be viewed as its constituent bits. It is blanket-implemented on all [`BitStore`] implementors, as well as slices and arrays of them. It should not be implemented on any other types. The contained extension methods allow existing memory to be easily viewd as [`BitSlice`]s using dot-call method syntax rather than the more cumbersome constructor functions in `BitSlice`’s inherent API. Since the element type is already known to the implementor, the only type parameter you need to provide when calling these methods is the bit-ordering. ## Examples ```rust use bitvec::prelude::*; let a = 0u16; let a_bits: &BitSlice = a.view_bits::(); let mut b = [0u8; 4]; let b_bits: &mut BitSlice = b.view_bits_mut::(); ``` [`BitSlice`]: crate::slice::BitSlice [`BitStore`]: crate::store::BitStore bitvec-1.0.1/doc/view.md000064400000000000000000000016201046102023000131370ustar 00000000000000# Bit View Adapters This module provides extension traits that view ordinary memory as bit-addressable. The [`&BitSlice`][0] type is a reference view over memory managed elsewhere. The inherent constructors are awkward to call, as they require function syntax and a redundant type argument (the `T: BitStore` parameter is already known by the data being viewed). As an alternative, the [`BitView`] trait provides methods on `BitStore` scalars and arrays that are more convenient to create `BitSlice` reference views. Additionally, [`BitViewSized`], [`AsBits`], and [`AsMutBits`] inform the type system about types that can be used as [`BitArray`] storage, immutably viewed as bits, or mutably viewed as bits, respectively. [0]: crate::slice::BitSlice [`AsBits`]: self::AsBits [`AsMutBits`]: self::AsMutBits [`BitArray`]: crate::array::BitArray [`BitView`]: self::BitView [`BitViewSized`]: self::BitViewSized bitvec-1.0.1/src/access.rs000064400000000000000000000173371046102023000135100ustar 00000000000000#![doc = include_str!("../doc/access.md")] use core::sync::atomic::Ordering; use funty::Integral; use radium::Radium; use crate::{ index::{ BitIdx, BitMask, }, mem::BitRegister, order::BitOrder, }; #[doc = include_str!("../doc/access/BitAccess.md")] pub trait BitAccess: Radium where ::Item: BitRegister { /// Clears bits within a memory element to `0`. /// /// The mask provided to this method must be constructed from indices that /// are valid in the caller’s context. As the mask is already computed by /// the caller, this does not take an ordering type parameter. /// /// ## Parameters /// /// - `mask`: A mask of any number of bits. This is a selection mask: all /// bits in the mask that are set to `1` will set the corresponding bit in /// `*self` to `0`. /// /// ## Returns /// /// The prior value of the memory element. /// /// ## Effects /// /// All bits in `*self` corresponding to `1` bits in the `mask` are cleared /// to `0`; all others retain their original value. /// /// Do not invert the `mask` prior to calling this function. [`BitMask`] is /// a selection type, not a bitwise-operation argument. /// /// [`BitMask`]: crate::index::BitMask #[inline] fn clear_bits(&self, mask: BitMask) -> Self::Item { self.fetch_and(!mask.into_inner(), Ordering::Relaxed) } /// Sets bits within a memory element to `1`. /// /// The mask provided to this method must be constructed from indices that /// are valid in the caller’s context. As the mask is already computed by /// the caller, this does not take an ordering type parameter. /// /// ## Parameters /// /// - `mask`: A mask of any number of bits. This is a selection mask: all /// bits in the mask that are set to `1` will set the corresponding bit in /// `*self` to `1`. /// /// ## Returns /// /// The prior value of the memory element. /// /// ## Effects /// /// All bits in `*self` corresponding to `1` bits in the `mask` are set to /// `1`; all others retain their original value. #[inline] fn set_bits(&self, mask: BitMask) -> Self::Item { self.fetch_or(mask.into_inner(), Ordering::Relaxed) } /// Inverts bits within a memory element. /// /// The mask provided to this method must be constructed from indices that /// are valid in the caller’s context. As the mask is already computed by /// the caller, this does not take an ordering type parameter. /// /// ## Parameters /// /// - `mask`: A mask of any number of bits. This is a selection mask: all /// bits in the mask that are set to `1` will invert the corresponding bit /// in `*self`. /// /// ## Returns /// /// The prior value of the memory element. /// /// ## Effects /// /// All bits in `*self` corresponding to `1` bits in the `mask` are /// inverted; all others retain their original value. #[inline] fn invert_bits(&self, mask: BitMask) -> Self::Item { self.fetch_xor(mask.into_inner(), Ordering::Relaxed) } /// Writes a value to one bit in a memory element, returning the previous /// value. /// /// ## Type Parameters /// /// - `O`: An ordering of bits in a memory element that translates the /// `index` into a real position. /// /// ## Parameters /// /// - `index`: The semantic index of the bit in `*self` to modify. /// - `value`: The new bit value to write into `*self` at the `index`. /// /// ## Returns /// /// The bit previously stored in `*self` at `index`. These operations are /// required to load the `*self` value from memory in order to operate, and /// so always have the prior value available for use. This can reduce /// spurious loads throughout the crate. /// /// ## Effects /// /// `*self` is updated with the bit at `index` set to `value`; all other /// bits remain unchanged. #[inline] fn write_bit(&self, index: BitIdx, value: bool) -> bool where O: BitOrder { let select = index.select::().into_inner(); select & if value { self.fetch_or(select, Ordering::Relaxed) } else { self.fetch_and(!select, Ordering::Relaxed) } != ::ZERO } /// Gets the function that will write `value` into all bits under a mask. /// /// This is useful for preparing bulk operations that all write the same /// data into memory, and only need to provide the shape of memory to write. /// /// ## Parameters /// /// - `value`: The bit that will be written by the returned function. /// /// ## Returns /// /// A function which writes `value` into memory at a given address and under /// a given mask. If `value` is `false`, then this produces [`clear_bits`]; /// if it is `true`, then this produces [`set_bits`]. /// /// [`clear_bits`]: Self::clear_bits /// [`set_bits`]: Self::set_bits #[inline] fn get_writers( value: bool, ) -> for<'a> fn(&'a Self, BitMask) -> Self::Item { if value { Self::set_bits } else { Self::clear_bits } } } impl BitAccess for A where A: Radium, A::Item: BitRegister, { } #[doc = include_str!("../doc/access/BitSafe.md")] pub trait BitSafe { /// The element type being guarded against improper mutation. /// /// This is only present as an extra proof that the type graph has a /// consistent view of the underlying memory. type Mem: BitRegister; /// The memory-access type this guards. /// /// This is exposed as an associated type so that `BitStore` can name it /// without having to re-select it based on crate configuration. type Rad: Radium; /// The zero constant. const ZERO: Self; /// Loads the value from memory, allowing for the possibility that other /// handles have write permissions to it. fn load(&self) -> Self::Mem; } /// Constructs a shared-mutable guard type that disallows mutation *through it*. macro_rules! safe { ($($t:ident => $w:ident => $r:ty);+ $(;)?) => { $( #[derive(Debug)] #[repr(transparent)] #[doc = include_str!("../doc/access/impl_BitSafe.md")] pub struct $w { inner: ::Rad, } impl $w { /// Allow construction of the safed value by forwarding to its /// interior constructor. /// /// This type is not public API, and general use has no reason to /// construct values of it directly. It is provided for convenience /// as a crate internal. pub(crate) const fn new(value: $t) -> Self { Self { inner: <::Rad>::new(value) } } } impl BitSafe for $w { type Mem = $t; #[cfg(feature = "atomic")] type Rad = $r; #[cfg(not(feature = "atomic"))] type Rad = core::cell::Cell<$t>; const ZERO: Self = Self::new(0); #[inline] fn load(&self) -> Self::Mem { self.inner.load(Ordering::Relaxed) } } )+ }; } safe! { u8 => BitSafeU8 => radium::types::RadiumU8; u16 => BitSafeU16 => radium::types::RadiumU16; u32 => BitSafeU32 => radium::types::RadiumU32; } #[cfg(target_pointer_width = "64")] safe!(u64 => BitSafeU64 => radium::types::RadiumU64); safe!(usize => BitSafeUsize => radium::types::RadiumUsize); #[cfg(test)] mod tests { use core::cell::Cell; use super::*; use crate::prelude::*; #[test] fn touch_memory() { let data = Cell::new(0u8); let accessor = &data; let aliased = unsafe { &*(&data as *const _ as *const BitSafeU8) }; assert!(!BitAccess::write_bit::( accessor, BitIdx::new(1).unwrap(), true )); assert_eq!(aliased.load(), 2); assert!(BitAccess::write_bit::( accessor, BitIdx::new(1).unwrap(), false )); assert_eq!(aliased.load(), 0); } #[test] #[cfg(not(miri))] fn sanity_check_prefetch() { use core::cell::Cell; assert_eq!( as BitAccess>::get_writers(false) as *const (), as BitAccess>::clear_bits as *const () ); assert_eq!( as BitAccess>::get_writers(true) as *const (), as BitAccess>::set_bits as *const () ); } } bitvec-1.0.1/src/array/api.rs000064400000000000000000000032331046102023000141240ustar 00000000000000#![doc = include_str!("../../doc/array/api.md")] use super::BitArray; use crate::{ order::BitOrder, slice::BitSlice, view::BitViewSized, }; impl BitArray where A: BitViewSized, O: BitOrder, { /// Returns a bit-slice containing the entire bit-array. Equivalent to /// `&a[..]`. /// /// Because `BitArray` can be viewed as a slice of bits or as a slice of /// elements with equal ease, you should switch to using [`.as_bitslice()`] /// or [`.as_raw_slice()`] to make your choice explicit. /// /// ## Original /// /// [`array::as_slice`](https://doc.rust-lang.org/std/primitive.array.html#method.as_slice) /// /// [`.as_bitslice()`]: Self::as_bitslice /// [`.as_raw_slice()`]: Self::as_raw_slice #[inline] #[cfg(not(tarpaulin_include))] #[deprecated = "use `.as_bitslice()` or `.as_raw_slice()` instead"] pub fn as_slice(&self) -> &BitSlice { self.as_bitslice() } /// Returns a mutable bit-slice containing the entire bit-array. Equivalent /// to `&mut a[..]`. /// /// Because `BitArray` can be viewed as a slice of bits or as a slice of /// elements with equal ease, you should switch to using /// [`.as_mut_bitslice()`] or [`.as_raw_mut_slice()`] to make your choice /// explicit. /// /// ## Original /// /// [`array::as_mut_slice`](https://doc.rust-lang.org/std/primitive.array.html#method.as_mut_slice) /// /// [`.as_mut_bitslice()`]: Self::as_mut_bitslice /// [`.as_raw_mut_slice()`]: Self::as_raw_mut_slice #[inline] #[cfg(not(tarpaulin_include))] #[deprecated = "use `.as_mut_bitslice()` or `.as_raw_mut_slice()` instead"] pub fn as_mut_slice(&mut self) -> &mut BitSlice { self.as_mut_bitslice() } } bitvec-1.0.1/src/array/iter.rs000064400000000000000000000115131046102023000143160ustar 00000000000000#![doc = include_str!("../../doc/array/iter.md")] use core::{ fmt::{ self, Debug, Formatter, }, iter::FusedIterator, ops::Range, }; use tap::Pipe; use wyz::comu::Const; use super::BitArray; use crate::{ mem, order::BitOrder, ptr::BitPtr, slice::BitSlice, view::BitViewSized, }; /// [Original](https://doc.rust-lang.org/std/primitive.array.html#impl-IntoIterator) impl IntoIterator for BitArray where A: BitViewSized, O: BitOrder, { type IntoIter = IntoIter; type Item = as Iterator>::Item; #[inline] fn into_iter(self) -> Self::IntoIter { IntoIter::new(self) } } /// [Original](https://doc.rust-lang.org/std/primitive.array.html#impl-IntoIterator-1) #[cfg(not(tarpaulin_include))] impl<'a, A, O> IntoIterator for &'a BitArray where O: BitOrder, A: 'a + BitViewSized, { type IntoIter = <&'a BitSlice as IntoIterator>::IntoIter; type Item = <&'a BitSlice as IntoIterator>::Item; #[inline] fn into_iter(self) -> Self::IntoIter { self.as_bitslice().into_iter() } } /// [Original](https://doc.rust-lang.org/std/primitive.array.html#impl-IntoIterator-2) #[cfg(not(tarpaulin_include))] impl<'a, A, O> IntoIterator for &'a mut BitArray where O: BitOrder, A: 'a + BitViewSized, { type IntoIter = <&'a mut BitSlice as IntoIterator>::IntoIter; type Item = <&'a mut BitSlice as IntoIterator>::Item; #[inline] fn into_iter(self) -> Self::IntoIter { self.as_mut_bitslice().into_iter() } } #[derive(Clone)] #[doc = include_str!("../../doc/array/IntoIter.md")] pub struct IntoIter where A: BitViewSized, O: BitOrder, { /// The bit-array being iterated. array: BitArray, /// The indices in `.array` that have not yet been yielded. /// /// This range is always a strict subset of `0 .. self.array.len()`. alive: Range, } impl IntoIter where A: BitViewSized, O: BitOrder, { /// Converts a bit-array into its iterator. /// /// The [`.into_iter()`] method on bit-arrays forwards to this. While /// `BitArray` does deref to `&/mut BitSlice`, which also has /// `.into_iter()`, this behavior has always been present alongside /// `BitArray` and there is no legacy forwarding to preserve. /// /// ## Original /// /// [`IntoIter::new`](core::array::IntoIter::new)s #[inline] pub fn new(array: BitArray) -> Self { Self { array, alive: 0 .. mem::bits_of::(), } } /// Views the remaining unyielded bits in the iterator. /// /// ## Original /// /// [`IntoIter::as_slice`](core::array::IntoIter::as_slice) #[inline] pub fn as_bitslice(&self) -> &BitSlice { unsafe { self.array.as_bitslice().get_unchecked(self.alive.clone()) } } #[inline] #[cfg(not(tarpaulin_include))] #[deprecated = "use `.as_bitslice()` instead"] #[allow(missing_docs, clippy::missing_docs_in_private_items)] pub fn as_slice(&self) -> &BitSlice { self.as_bitslice() } /// Mutably views the remaining unyielded bits in the iterator. /// /// ## Original /// /// [`IntoIter::as_mut_slice`](core::array::IntoIter::as_mut_slice) #[inline] pub fn as_mut_bitslice(&mut self) -> &mut BitSlice { unsafe { self.array .as_mut_bitslice() .get_unchecked_mut(self.alive.clone()) } } #[inline] #[cfg(not(tarpaulin_include))] #[deprecated = "use `.as_bitslice_mut()` instead"] #[allow(missing_docs, clippy::missing_docs_in_private_items)] pub fn as_mut_slice(&mut self) -> &mut BitSlice { self.as_mut_bitslice() } /// Gets a bit from the bit-array. #[inline] fn get(&self, index: usize) -> bool { unsafe { self.array .as_raw_slice() .pipe(BitPtr::::from_slice) .add(index) .read() } } } #[cfg(not(tarpaulin_include))] impl Debug for IntoIter where A: BitViewSized, O: BitOrder, { #[inline] fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { fmt.debug_tuple("IntoIter") .field(&self.as_bitslice()) .finish() } } impl Iterator for IntoIter where A: BitViewSized, O: BitOrder, { type Item = bool; easy_iter!(); #[inline] fn next(&mut self) -> Option { self.alive.next().map(|idx| self.get(idx)) } #[inline] fn nth(&mut self, n: usize) -> Option { self.alive.nth(n).map(|idx| self.get(idx)) } } impl DoubleEndedIterator for IntoIter where A: BitViewSized, O: BitOrder, { #[inline] fn next_back(&mut self) -> Option { self.alive.next_back().map(|idx| self.get(idx)) } #[inline] fn nth_back(&mut self, n: usize) -> Option { self.alive.nth_back(n).map(|idx| self.get(idx)) } } impl ExactSizeIterator for IntoIter where A: BitViewSized, O: BitOrder, { #[inline] fn len(&self) -> usize { self.alive.len() } } impl FusedIterator for IntoIter where A: BitViewSized, O: BitOrder, { } bitvec-1.0.1/src/array/ops.rs000064400000000000000000000102051046102023000141510ustar 00000000000000//! Operator trait implementations for bit-arrays. use core::ops::{ BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign, Deref, DerefMut, Index, IndexMut, Not, }; use super::BitArray; use crate::{ order::BitOrder, slice::BitSlice, store::BitStore, view::BitViewSized, }; #[cfg(not(tarpaulin_include))] impl BitAndAssign> for BitSlice where A: BitViewSized, O: BitOrder, { #[inline] fn bitand_assign(&mut self, rhs: BitArray) { *self &= rhs.as_bitslice() } } #[cfg(not(tarpaulin_include))] impl BitAndAssign<&BitArray> for BitSlice where A: BitViewSized, O: BitOrder, { #[inline] fn bitand_assign(&mut self, rhs: &BitArray) { *self &= rhs.as_bitslice() } } impl BitAnd for BitArray where A: BitViewSized, O: BitOrder, BitSlice: BitAndAssign, { type Output = Self; #[inline] fn bitand(mut self, rhs: Rhs) -> Self::Output { self &= rhs; self } } impl BitAndAssign for BitArray where A: BitViewSized, O: BitOrder, BitSlice: BitAndAssign, { #[inline] fn bitand_assign(&mut self, rhs: Rhs) { *self.as_mut_bitslice() &= rhs; } } #[cfg(not(tarpaulin_include))] impl BitOrAssign> for BitSlice where A: BitViewSized, O: BitOrder, { #[inline] fn bitor_assign(&mut self, rhs: BitArray) { *self |= rhs.as_bitslice() } } #[cfg(not(tarpaulin_include))] impl BitOrAssign<&BitArray> for BitSlice where A: BitViewSized, O: BitOrder, { #[inline] fn bitor_assign(&mut self, rhs: &BitArray) { *self |= rhs.as_bitslice() } } impl BitOr for BitArray where A: BitViewSized, O: BitOrder, BitSlice: BitOrAssign, { type Output = Self; #[inline] fn bitor(mut self, rhs: Rhs) -> Self::Output { self |= rhs; self } } impl BitOrAssign for BitArray where A: BitViewSized, O: BitOrder, BitSlice: BitOrAssign, { #[inline] fn bitor_assign(&mut self, rhs: Rhs) { *self.as_mut_bitslice() |= rhs; } } #[cfg(not(tarpaulin_include))] impl BitXorAssign> for BitSlice where A: BitViewSized, O: BitOrder, { #[inline] fn bitxor_assign(&mut self, rhs: BitArray) { *self ^= rhs.as_bitslice() } } #[cfg(not(tarpaulin_include))] impl BitXorAssign<&BitArray> for BitSlice where A: BitViewSized, O: BitOrder, { #[inline] fn bitxor_assign(&mut self, rhs: &BitArray) { *self ^= rhs.as_bitslice() } } impl BitXor for BitArray where A: BitViewSized, O: BitOrder, BitSlice: BitXorAssign, { type Output = Self; #[inline] fn bitxor(mut self, rhs: Rhs) -> Self::Output { self ^= rhs; self } } impl BitXorAssign for BitArray where A: BitViewSized, O: BitOrder, BitSlice: BitXorAssign, { #[inline] fn bitxor_assign(&mut self, rhs: Rhs) { *self.as_mut_bitslice() ^= rhs; } } impl Deref for BitArray where A: BitViewSized, O: BitOrder, { type Target = BitSlice; #[inline] fn deref(&self) -> &Self::Target { self.as_bitslice() } } impl DerefMut for BitArray where A: BitViewSized, O: BitOrder, { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { self.as_mut_bitslice() } } impl Index for BitArray where A: BitViewSized, O: BitOrder, BitSlice: Index, { type Output = as Index>::Output; #[inline] fn index(&self, index: Idx) -> &Self::Output { &self.as_bitslice()[index] } } impl IndexMut for BitArray where A: BitViewSized, O: BitOrder, BitSlice: IndexMut, { #[inline] fn index_mut(&mut self, index: Idx) -> &mut Self::Output { &mut self.as_mut_bitslice()[index] } } impl Not for BitArray where A: BitViewSized, O: BitOrder, { type Output = Self; #[inline] fn not(mut self) -> Self::Output { for elem in self.as_raw_mut_slice() { elem.store_value(!elem.load_value()); } self } } bitvec-1.0.1/src/array/tests.rs000064400000000000000000000067121046102023000145220ustar 00000000000000//! Unit tests for bit-arrays. #![cfg(test)] use core::{ borrow::{ Borrow, BorrowMut, }, cell::Cell, convert::TryFrom, fmt::Debug, hash::Hash, ops::{ BitAnd, BitOr, BitXor, Index, IndexMut, Range, }, }; use static_assertions::*; use crate::prelude::*; #[test] fn core_impl() { assert_impl_all!( BitArray: AsMut, AsRef, Borrow, BorrowMut, Debug, Default, Eq, Hash, Index, Index>, IndexMut>, IntoIterator, Ord, PartialEq<&'static BitSlice>, PartialEq<&'static mut BitSlice>, PartialOrd<&'static BitSlice>, TryFrom<&'static BitSlice>, ); assert_impl_all!(&'static BitArray: TryFrom<&'static BitSlice>); assert_impl_all!(&'static mut BitArray: TryFrom<&'static mut BitSlice>); } #[test] fn bonus_impl() { assert_impl_all!( BitArray: BitAnd<&'static BitSlice>, BitAnd, BitOr<&'static BitSlice>, BitOr, BitXor<&'static BitSlice>, BitXor, ); } #[test] fn make_and_view() { let data = [1u8, 2, 3, 4]; let bits = BitArray::<_, Msb0>::new(data); assert_eq!(bits.as_bitslice(), data.view_bits::()); assert_eq!(bits.len(), data.view_bits::().len()); assert!(!bits.is_empty()); assert_eq!(bits.into_inner(), data); } #[test] fn ops() { let a = bitarr![0, 0, 1, 1]; let b = bitarr![0, 1, 0, 1]; let c = a & b; assert_eq!(c, bitarr![0, 0, 0, 1]); let d = a | b; assert_eq!(d, bitarr![0, 1, 1, 1]); let e = a ^ b; assert_eq!(e, bitarr![0, 1, 1, 0]); let mut f = !e; assert_eq!(f[.. 4], bitarr![1, 0, 0, 1][.. 4]); let _: &BitSlice = &a; let _: &mut BitSlice = &mut f; } #[test] fn traits() { let a = BitArray::<[Cell; 3], Msb0>::default(); let b = a.clone(); assert_eq!(a, b); let mut c = rand::random::<[u8; 4]>(); let d = c.view_bits_mut::(); assert!(<&BitArray<[u8; 4], Lsb0>>::try_from(&*d).is_ok()); assert!(<&mut BitArray<[u8; 4], Lsb0>>::try_from(&mut *d).is_ok()); assert!(<&BitArray<[u8; 3], Lsb0>>::try_from(&d[4 .. 28]).is_err()); assert!(<&mut BitArray<[u8; 3], Lsb0>>::try_from(&mut d[4 .. 28]).is_err()); assert_eq!(BitArray::<[u8; 4], Lsb0>::try_from(&*d).unwrap(), *d); } #[test] fn iter() { let data = rand::random::<[u32; 4]>(); let bits = data.into_bitarray::(); let view = data.view_bits::(); assert!( bits.into_iter() .zip(view.iter().by_vals()) .all(|(a, b)| a == b) ); let mut iter = bits.into_iter(); assert!(iter.next().is_some()); assert!(iter.next_back().is_some()); assert!(iter.nth(6).is_some()); assert!(iter.nth_back(6).is_some()); assert_eq!(iter.len(), 112); assert_eq!(iter.as_bitslice(), &view[8 .. 120]); assert_eq!(iter.as_mut_bitslice(), &view[8 .. 120]); } #[cfg(feature = "alloc")] mod format { #[cfg(not(feature = "std"))] use alloc::format; use core::{ any, convert::TryFrom, }; use super::{ BitArray, Lsb0, }; #[test] fn render() { let render = format!("{:?}", BitArray::::ZERO); assert!(render.starts_with(&format!( "BitArray", any::type_name::(), ))); assert!(render.ends_with("[0, 0, 0, 0, 0, 0, 0, 0]")); assert_eq!( format!( "{:?}", BitArray::::try_from(bits![u8, Lsb0; 0, 1]) .unwrap_err(), ), "TryFromBitSliceError::UnequalLen(2 != 8)", ); assert_eq!( format!( "{:?}", BitArray::::try_from(&bits![u8, Lsb0; 0; 9][1 ..]) .unwrap_err(), ), "TryFromBitSliceError::Misaligned", ); } } bitvec-1.0.1/src/array/traits.rs000064400000000000000000000163051046102023000146650ustar 00000000000000//! Additional trait implementations on bit-arrays. use core::{ borrow::{ Borrow, BorrowMut, }, cmp, convert::TryFrom, fmt::{ self, Debug, Display, Formatter, }, hash::{ Hash, Hasher, }, marker::Unpin, }; use tap::TryConv; use super::BitArray; use crate::{ index::BitIdx, mem, order::BitOrder, slice::BitSlice, store::BitStore, view::BitViewSized, }; #[cfg(not(tarpaulin_include))] impl Borrow> for BitArray where A: BitViewSized, O: BitOrder, { #[inline] fn borrow(&self) -> &BitSlice { self.as_bitslice() } } #[cfg(not(tarpaulin_include))] impl BorrowMut> for BitArray where A: BitViewSized, O: BitOrder, { #[inline] fn borrow_mut(&mut self) -> &mut BitSlice { self.as_mut_bitslice() } } impl Clone for BitArray where A: BitViewSized, O: BitOrder, { #[inline] fn clone(&self) -> Self { let mut out = Self::ZERO; for (dst, src) in out.as_raw_mut_slice().iter_mut().zip(self.as_raw_slice()) { dst.store_value(src.load_value()); } out } } impl Eq for BitArray where A: BitViewSized, O: BitOrder, { } #[cfg(not(tarpaulin_include))] impl Ord for BitArray where A: BitViewSized, O: BitOrder, { #[inline] fn cmp(&self, other: &Self) -> cmp::Ordering { self.as_bitslice().cmp(other.as_bitslice()) } } #[cfg(not(tarpaulin_include))] impl PartialEq> for BitSlice where O1: BitOrder, O2: BitOrder, A: BitViewSized, T: BitStore, { #[inline] fn eq(&self, other: &BitArray) -> bool { self == other.as_bitslice() } } #[cfg(not(tarpaulin_include))] impl PartialEq for BitArray where A: BitViewSized, O: BitOrder, Rhs: ?Sized, BitSlice: PartialEq, { #[inline] fn eq(&self, other: &Rhs) -> bool { self.as_bitslice() == other } } #[cfg(not(tarpaulin_include))] impl PartialOrd> for BitSlice where A: BitViewSized, T: BitStore, O: BitOrder, { #[inline] fn partial_cmp(&self, other: &BitArray) -> Option { self.partial_cmp(other.as_bitslice()) } } #[cfg(not(tarpaulin_include))] impl PartialOrd for BitArray where A: BitViewSized, O: BitOrder, Rhs: ?Sized, BitSlice: PartialOrd, { #[inline] fn partial_cmp(&self, other: &Rhs) -> Option { self.as_bitslice().partial_cmp(other) } } #[cfg(not(tarpaulin_include))] impl AsRef> for BitArray where A: BitViewSized, O: BitOrder, { #[inline] fn as_ref(&self) -> &BitSlice { self.as_bitslice() } } #[cfg(not(tarpaulin_include))] impl AsMut> for BitArray where A: BitViewSized, O: BitOrder, { #[inline] fn as_mut(&mut self) -> &mut BitSlice { self.as_mut_bitslice() } } #[cfg(not(tarpaulin_include))] impl From for BitArray where A: BitViewSized, O: BitOrder, { #[inline] fn from(data: A) -> Self { Self::new(data) } } impl TryFrom<&BitSlice> for BitArray where A: BitViewSized, O: BitOrder, { type Error = TryFromBitSliceError; #[inline] fn try_from(src: &BitSlice) -> Result { src.try_conv::<&Self>().map(|this| this.clone()) } } impl TryFrom<&BitSlice> for &BitArray where A: BitViewSized, O: BitOrder, { type Error = TryFromBitSliceError; #[inline] fn try_from(src: &BitSlice) -> Result { TryFromBitSliceError::new::(src).map(|()| unsafe { &*src .as_bitspan() .address() .to_const() .cast::>() }) } } impl TryFrom<&mut BitSlice> for &mut BitArray where A: BitViewSized, O: BitOrder, { type Error = TryFromBitSliceError; #[inline] fn try_from(src: &mut BitSlice) -> Result { TryFromBitSliceError::new::(src).map(|()| unsafe { &mut *src .as_mut_bitspan() .address() .to_mut() .cast::>() }) } } impl Default for BitArray where A: BitViewSized, O: BitOrder, { #[inline] fn default() -> Self { Self::ZERO } } impl Debug for BitArray where A: BitViewSized, O: BitOrder, { #[inline] fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { self.as_bitspan().render(fmt, "Array", None)?; fmt.write_str(" ")?; Display::fmt(self, fmt) } } easy_fmt! { impl Binary impl Display impl LowerHex impl Octal impl UpperHex for BitArray } #[cfg(not(tarpaulin_include))] impl Hash for BitArray where A: BitViewSized, O: BitOrder, { #[inline] fn hash(&self, hasher: &mut H) where H: Hasher { self.as_bitslice().hash(hasher); } } impl Copy for BitArray where O: BitOrder, A: BitViewSized + Copy, { } impl Unpin for BitArray where A: BitViewSized, O: BitOrder, { } #[repr(transparent)] #[derive(Clone, Copy, Eq, Ord, PartialEq, PartialOrd)] #[doc = include_str!("../../doc/array/TryFromBitSliceError.md")] pub struct TryFromBitSliceError(InnerError); impl TryFromBitSliceError { /// Checks whether a bit-slice can be viewed as a bit-array. #[inline] fn new(bits: &BitSlice) -> Result<(), Self> where O: BitOrder, A: BitViewSized, { InnerError::new::(bits).map_err(Self) } } impl Debug for TryFromBitSliceError { #[inline] fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { fmt.write_str("TryFromBitSliceError::")?; match self.0 { InnerError::UnequalLen { actual, expected } => { write!(fmt, "UnequalLen({} != {})", actual, expected) }, InnerError::Misaligned => fmt.write_str("Misaligned"), } } } #[cfg(not(tarpaulin_include))] impl Display for TryFromBitSliceError { #[inline] fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { match self.0 { InnerError::UnequalLen { actual, expected } => write!( fmt, "bit-slice with length {} cannot be viewed as bit-array with \ length {}", actual, expected, ), InnerError::Misaligned => fmt.write_str( "a bit-slice must begin at the front edge of a storage element \ in order to be viewed as a bit-array", ), } } } #[cfg(feature = "std")] impl std::error::Error for TryFromBitSliceError {} /// Opaque error type for bit-slice to bit-array view conversions. #[derive(Clone, Copy, Eq, Hash, Ord, PartialEq, PartialOrd)] enum InnerError { /// A bit-slice did not match the length of the destination bit-array. UnequalLen { /// The length of the bit-slice that produced this error. actual: usize, /// The length of the destination bit-array type. expected: usize, }, /// A bit-slice did not begin at `BitIdx::MIN`. Misaligned, } impl InnerError { /// Checks whether a bit-slice is suitable to view as a bit-array. #[inline] fn new(bits: &BitSlice) -> Result<(), Self> where O: BitOrder, A: BitViewSized, { let bitspan = bits.as_bitspan(); let actual = bitspan.len(); let expected = mem::bits_of::(); if actual != expected { return Err(Self::UnequalLen { actual, expected }); } if bitspan.head() != BitIdx::<::Mem>::MIN { return Err(Self::Misaligned); } Ok(()) } } bitvec-1.0.1/src/array.rs000064400000000000000000000044521046102023000133570ustar 00000000000000#![doc = include_str!("../doc/array.md")] use core::marker::PhantomData; use crate::{ mem, order::{ BitOrder, Lsb0, }, slice::BitSlice, view::BitViewSized, }; mod api; mod iter; mod ops; mod tests; mod traits; pub use self::iter::IntoIter; #[repr(transparent)] #[doc = include_str!("../doc/array/BitArray.md")] pub struct BitArray where A: BitViewSized, O: BitOrder, { /// The ordering of bits within an `A::Store` element. pub _ord: PhantomData, /// The wrapped data buffer. pub data: A, } impl BitArray where A: BitViewSized, O: BitOrder, { /// A bit-array with all bits initialized to zero. pub const ZERO: Self = Self { _ord: PhantomData, data: A::ZERO, }; /// Wraps an existing buffer as a bit-array. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let data = [0u16, 1, 2, 3]; /// let bits = BitArray::<_, Msb0>::new(data); /// assert_eq!(bits.len(), 64); /// ``` #[inline] pub fn new(data: A) -> Self { Self { data, ..Self::ZERO } } /// Removes the bit-array wrapper, returning the contained buffer. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bitarr![0; 30]; /// let native: [usize; 1] = bits.into_inner(); /// ``` #[inline] pub fn into_inner(self) -> A { self.data } /// Explicitly views the bit-array as a bit-slice. #[inline] pub fn as_bitslice(&self) -> &BitSlice { self.data.view_bits::() } /// Explicitly views the bit-array as a mutable bit-slice. #[inline] pub fn as_mut_bitslice(&mut self) -> &mut BitSlice { self.data.view_bits_mut::() } /// Views the bit-array as a slice of its underlying memory elements. #[inline] pub fn as_raw_slice(&self) -> &[A::Store] { self.data.as_raw_slice() } /// Views the bit-array as a mutable slice of its underlying memory /// elements. #[inline] pub fn as_raw_mut_slice(&mut self) -> &mut [A::Store] { self.data.as_raw_mut_slice() } /// Gets the length (in bits) of the bit-array. /// /// This method is a compile-time constant. #[inline] pub fn len(&self) -> usize { mem::bits_of::() } /// Tests whether the array is empty. /// /// This method is a compile-time constant. #[inline] pub fn is_empty(&self) -> bool { mem::bits_of::() == 0 } } bitvec-1.0.1/src/boxed/api.rs000064400000000000000000000075731046102023000141220ustar 00000000000000//! Port of the `Box<[T]>` inherent API. use core::mem; use tap::Tap; use super::BitBox; use crate::{ order::BitOrder, ptr::BitSpan, slice::BitSlice, store::BitStore, vec::BitVec, }; impl BitBox where T: BitStore, O: BitOrder, { /// Constructs a bit-box from a raw bit-slice pointer. /// /// This converts a `*mut BitSlice` pointer that had previously been /// produced by either [`::into_raw()`] or [`::leak()`] and restores the /// bit-box containing it. /// /// ## Original /// /// [`Box::from_raw`](alloc::boxed::Box::from_raw) /// /// ## Safety /// /// You must only call this function on pointers produced by leaking a prior /// `BitBox`; you may not modify the value of a pointer returned by /// [`::into_raw()`], nor may you conjure pointer values of your own. Doing /// so will corrupt the allocator state. /// /// You must only call this function on any given leaked pointer at most /// once. Not calling it at all will merely render the allocated memory /// unreachable for the duration of the program runtime, a normal (and safe) /// memory leak. Calling it once restores ordinary functionality, and /// ensures ordinary destruction at or before program termination. However, /// calling it more than once on the same pointer will introduce data races, /// use-after-free, and/or double-free errors. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bb = bitbox![0; 80]; /// let ptr: *mut BitSlice = BitBox::into_raw(bb); /// let bb = unsafe { BitBox::from_raw(ptr) }; /// // unsafe { BitBox::from_raw(ptr) }; // UAF crash! /// ``` /// /// [`::into_raw()`]: Self::into_raw /// [`::leak()`]: Self::leak #[inline] pub unsafe fn from_raw(raw: *mut BitSlice) -> Self { Self { bitspan: BitSpan::from_bitslice_ptr_mut(raw), } } /// Consumes the bit-box, returning a raw bit-slice pointer. /// /// Bit-slice pointers are always correctly encoded and non-null. The /// referent region is dereferenceäble *as a `BitSlice` for the remainder of /// the program, or until it is first passed to [`::from_raw()`], whichever /// comes first. Once the pointer is first passed to `::from_raw()`, all /// copies of that pointer become invalid to dereference. /// /// ## Original /// /// [`Box::into_raw`](alloc::boxed::Box::into_raw) /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bb = bitbox![0; 80]; /// let ptr = BitBox::into_raw(bb); /// let bb = unsafe { BitBox::from_raw(ptr) }; /// ``` /// /// You **may not** deällocate pointers produced by this function through /// any other means. /// /// [`::from_raw()`]: Self::from_raw #[inline] pub fn into_raw(this: Self) -> *mut BitSlice { Self::leak(this) } /// Deliberately leaks the allocated memory, returning an /// `&'static mut BitSlice` reference. /// /// This differs from [`::into_raw()`] in that the reference is safe to use /// and can be tracked by the Rust borrow-checking system. Like the /// bit-slice pointer produced by `::into_raw()`, this reference can be /// un-leaked by passing it into [`::from_raw()`] to reclaim the memory. /// /// ## Original /// /// [`Box::leak`](alloc::boxed::Box::leak) /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bb = bitbox![0; 80]; /// let static_ref: &'static mut BitSlice = BitBox::leak(bb); /// /// static_ref.set(0, true); /// assert!(static_ref[0]); /// let _ = unsafe { /// BitBox::from_raw(static_ref) /// }; /// ``` /// /// [`::from_raw()`]: Self::from_raw /// [`::into_raw()`]: Self::into_raw #[inline] pub fn leak<'a>(this: Self) -> &'a mut BitSlice where T: 'a { unsafe { this.bitspan.into_bitslice_mut() }.tap(|_| mem::forget(this)) } #[inline] #[doc(hidden)] #[cfg(not(tarpaulin_include))] #[deprecated = "use `.into_bitvec()` instead"] pub fn into_vec(self) -> BitVec { self.into_bitvec() } } bitvec-1.0.1/src/boxed/iter.rs000064400000000000000000000111401046102023000142750ustar 00000000000000#![doc = include_str!("../../doc/boxed/iter.md")] use core::{ fmt::{ self, Debug, Formatter, }, iter::FusedIterator, ops::Range, }; use super::BitBox; use crate::{ order::{ BitOrder, Lsb0, }, slice::BitSlice, store::BitStore, }; /// [Original](alloc::vec::IntoIter) impl IntoIterator for BitBox where T: BitStore, O: BitOrder, { type IntoIter = IntoIter; type Item = bool; #[inline] fn into_iter(self) -> Self::IntoIter { IntoIter::new(self) } } /** An iterator over a `BitBox`. ## Original [`vec::IntoIter`](alloc::vec::IntoIter) **/ pub struct IntoIter where T: BitStore, O: BitOrder, { /// The original `BitBox`, kept so it can correctly drop. _buf: BitBox, /// A range of indices yet to be iterated. // TODO(myrrlyn): Race this against `BitPtrRange`. iter: Range, } impl IntoIter where T: BitStore, O: BitOrder, { /// Wraps a bit-array in an iterator view. This is irreversible. #[inline] fn new(this: BitBox) -> Self { let iter = 0 .. this.len(); Self { _buf: this, iter } } /// Views the remaining unyielded bits as a bit-slice. /// /// ## Original /// /// [`IntoIter::as_slice`](alloc::vec::IntoIter::as_slice) #[inline] pub fn as_bitslice(&self) -> &BitSlice { // While the memory is never actually deïnitialized, this is still a // good habit to do. unsafe { self._buf .as_bitptr() .add(self.iter.start) .span_unchecked(self.iter.len()) .into_bitslice_ref() } } #[inline] #[doc(hidden)] #[cfg(not(tarpaulin_include))] #[deprecated = "use `.as_bitslice()` instead"] #[allow(missing_docs, clippy::missing_docs_in_private_items)] pub fn as_slice(&self) -> &BitSlice { self.as_bitslice() } /// Views the remaining unyielded bits as a mutable bit-slice. /// /// ## Original /// /// [`IntoIter::as_mut_slice`](alloc::vec::IntoIter::as_mut_slice) #[inline] pub fn as_mut_bitslice(&mut self) -> &mut BitSlice { unsafe { self._buf .as_mut_bitptr() .add(self.iter.start) .span_unchecked(self.iter.len()) .into_bitslice_mut() } } #[inline] #[doc(hidden)] #[cfg(not(tarpaulin_include))] #[deprecated = "use `.as_mut_bitslice()` instead"] #[allow(missing_docs, clippy::missing_docs_in_private_items)] pub fn as_mut_slice(&mut self) -> &mut BitSlice { self.as_mut_bitslice() } } /// [Original](https://doc.rust-lang.org/alloc/vec/struct.IntoIter.html#impl-AsRef%3C%5BT%5D%3E) #[cfg(not(tarpaulin_include))] impl AsRef> for IntoIter where T: BitStore, O: BitOrder, { #[inline] fn as_ref(&self) -> &BitSlice { self.as_bitslice() } } #[cfg(not(tarpaulin_include))] impl Clone for IntoIter where T: BitStore, O: BitOrder, { #[inline] fn clone(&self) -> Self { Self { _buf: self._buf.clone(), iter: self.iter.clone(), } } } /// [Original](https://doc.rust-lang.org/alloc/vec/struct.IntoIter.html#impl-Debug) #[cfg(not(tarpaulin_include))] impl Debug for IntoIter where T: BitStore, O: BitOrder, { #[inline] fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { fmt.debug_tuple("IntoIter") .field(&self.as_bitslice()) .finish() } } impl Iterator for IntoIter where T: BitStore, O: BitOrder, { type Item = bool; easy_iter!(); #[inline] fn next(&mut self) -> Option { self.iter .next() .map(|idx| unsafe { self._buf.as_bitptr().add(idx).read() }) } #[inline] fn nth(&mut self, n: usize) -> Option { self.iter .nth(n) .map(|idx| unsafe { self._buf.as_bitptr().add(idx).read() }) } } impl DoubleEndedIterator for IntoIter where T: BitStore, O: BitOrder, { #[inline] fn next_back(&mut self) -> Option { self.iter .next_back() .map(|idx| unsafe { self._buf.as_bitptr().add(idx).read() }) } #[inline] fn nth_back(&mut self, n: usize) -> Option { self.iter .nth_back(n) .map(|idx| unsafe { self._buf.as_bitptr().add(idx).read() }) } } impl ExactSizeIterator for IntoIter where T: BitStore, O: BitOrder, { #[inline] fn len(&self) -> usize { self.iter.len() } } impl FusedIterator for IntoIter where T: BitStore, O: BitOrder, { } /// [Original](https://doc.rust-lang.org/alloc/vec/struct.IntoIter.html#impl-Send) // #[allow(clippy::non_send_fields_in_send_ty)] unsafe impl Send for IntoIter where T: BitStore + Sync, O: BitOrder, { } /// [Original](https://doc.rust-lang.org/alloc/vec/struct.IntoIter.html#impl-Sync) unsafe impl Sync for IntoIter where T: BitStore + Sync, O: BitOrder, { } bitvec-1.0.1/src/boxed/ops.rs000064400000000000000000000102431046102023000141360ustar 00000000000000//! Operator trait implementations for boxed bit-slices. use core::{ mem::ManuallyDrop, ops::{ BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign, Deref, DerefMut, Index, IndexMut, Not, }, }; use super::BitBox; use crate::{ order::BitOrder, slice::BitSlice, store::BitStore, }; #[cfg(not(tarpaulin_include))] impl BitAndAssign> for BitSlice where T: BitStore, O: BitOrder, { #[inline] fn bitand_assign(&mut self, rhs: BitBox) { *self &= rhs.as_bitslice() } } #[cfg(not(tarpaulin_include))] impl BitAndAssign<&BitBox> for BitSlice where T: BitStore, O: BitOrder, { #[inline] fn bitand_assign(&mut self, rhs: &BitBox) { *self &= rhs.as_bitslice() } } impl BitAnd for BitBox where T: BitStore, O: BitOrder, BitSlice: BitAndAssign, { type Output = Self; #[inline] fn bitand(mut self, rhs: Rhs) -> Self::Output { self &= rhs; self } } impl BitAndAssign for BitBox where T: BitStore, O: BitOrder, BitSlice: BitAndAssign, { #[inline] fn bitand_assign(&mut self, rhs: Rhs) { *self.as_mut_bitslice() &= rhs; } } #[cfg(not(tarpaulin_include))] impl BitOrAssign> for BitSlice where T: BitStore, O: BitOrder, { #[inline] fn bitor_assign(&mut self, rhs: BitBox) { *self |= rhs.as_bitslice() } } #[cfg(not(tarpaulin_include))] impl BitOrAssign<&BitBox> for BitSlice where T: BitStore, O: BitOrder, { #[inline] fn bitor_assign(&mut self, rhs: &BitBox) { *self |= rhs.as_bitslice() } } impl BitOr for BitBox where T: BitStore, O: BitOrder, BitSlice: BitOrAssign, { type Output = Self; #[inline] fn bitor(mut self, rhs: Rhs) -> Self::Output { self |= rhs; self } } impl BitOrAssign for BitBox where T: BitStore, O: BitOrder, BitSlice: BitOrAssign, { #[inline] fn bitor_assign(&mut self, rhs: Rhs) { *self.as_mut_bitslice() |= rhs; } } #[cfg(not(tarpaulin_include))] impl BitXorAssign> for BitSlice where T: BitStore, O: BitOrder, { #[inline] fn bitxor_assign(&mut self, rhs: BitBox) { *self ^= rhs.as_bitslice() } } #[cfg(not(tarpaulin_include))] impl BitXorAssign<&BitBox> for BitSlice where T: BitStore, O: BitOrder, { #[inline] fn bitxor_assign(&mut self, rhs: &BitBox) { *self ^= rhs.as_bitslice() } } impl BitXor for BitBox where T: BitStore, O: BitOrder, BitSlice: BitXorAssign, { type Output = Self; #[inline] fn bitxor(mut self, rhs: Rhs) -> Self::Output { self ^= rhs; self } } impl BitXorAssign for BitBox where T: BitStore, O: BitOrder, BitSlice: BitXorAssign, { #[inline] fn bitxor_assign(&mut self, rhs: Rhs) { *self.as_mut_bitslice() ^= rhs; } } impl Deref for BitBox where T: BitStore, O: BitOrder, { type Target = BitSlice; #[inline] fn deref(&self) -> &Self::Target { self.as_bitslice() } } impl DerefMut for BitBox where T: BitStore, O: BitOrder, { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { self.as_mut_bitslice() } } impl Drop for BitBox where T: BitStore, O: BitOrder, { #[inline] fn drop(&mut self) { self.with_box(|b| unsafe { ManuallyDrop::drop(b) }) } } #[cfg(not(tarpaulin_include))] impl Index for BitBox where T: BitStore, O: BitOrder, BitSlice: Index, { type Output = as Index>::Output; #[inline] fn index(&self, index: Idx) -> &Self::Output { &self.as_bitslice()[index] } } #[cfg(not(tarpaulin_include))] impl IndexMut for BitBox where T: BitStore, O: BitOrder, BitSlice: IndexMut, { #[inline] fn index_mut(&mut self, index: Idx) -> &mut Self::Output { &mut self.as_mut_bitslice()[index] } } impl Not for BitBox where T: BitStore, O: BitOrder, { type Output = Self; #[inline] fn not(mut self) -> Self::Output { for elem in self.as_raw_mut_slice().iter_mut() { elem.store_value(!elem.load_value()); } self } } bitvec-1.0.1/src/boxed/tests.rs000064400000000000000000000073471046102023000145120ustar 00000000000000//! Unit tests for boxed bit-slices. #![cfg(test)] #[cfg(not(feature = "std"))] use alloc::vec; use alloc::{ borrow::Cow, boxed::Box, }; use core::{ any, borrow::{ Borrow, BorrowMut, }, convert::TryFrom, fmt::{ Debug, Display, Pointer, }, hash::Hash, iter::{ FromIterator, FusedIterator, }, ops::{ Deref, DerefMut, }, }; use static_assertions::*; use crate::prelude::*; #[test] fn inherents() { let bits = bits![0, 1, 0, 0, 1]; let mut boxed = BitBox::from_bitslice(&bits[1 ..]); assert_eq!(boxed, bits[1 ..]); assert_eq!(boxed.bitspan.head().into_inner(), 1); boxed.force_align(); assert_eq!(boxed.bitspan.head().into_inner(), 0); let data = vec![0u8, 1, 2, 3].into_boxed_slice(); let ptr = data.as_ptr(); let boxed: BitBox = BitBox::from_boxed_slice(data); assert_eq!(boxed.len(), 32); assert_eq!(boxed.count_ones(), 4); let data = boxed.into_boxed_slice(); assert_eq!(data.as_ptr(), ptr); let bv = BitBox::::from_boxed_slice(data).into_bitvec(); assert_eq!(bv.len(), 32); assert_eq!(bitbox![0, 1, 0, 0, 1].as_bitslice(), bits![0, 1, 0, 0, 1]); assert_eq!(bitbox![0; 5].as_mut_bitslice(), bits![0; 5]); let mut bb = bitbox![0; 5]; bb.fill_uninitialized(true); assert_eq!(bb.as_raw_slice(), &[!0usize << 5][..]); let ptr = BitBox::into_raw(bb); let bb = unsafe { BitBox::from_raw(ptr) }; assert_eq!(ptr as *const BitSlice, bb.as_bitslice() as *const BitSlice); } #[test] fn iter() { let bb = bitbox![0, 1, 1, 0, 0, 1]; let mut iter = bb.into_iter(); assert_eq!(iter.len(), 6); assert!(!iter.next().unwrap()); assert_eq!(iter.as_bitslice(), bits![1, 1, 0, 0, 1]); assert!(iter.next_back().unwrap()); assert_eq!(iter.as_mut_bitslice(), bits![1, 1, 0, 0]); assert!(iter.nth(1).unwrap()); assert!(!iter.nth_back(1).unwrap()); assert!(iter.next().is_none()); } #[test] fn traits() { assert_impl_all!( BitBox: AsMut, AsRef, Borrow, BorrowMut, Clone, Debug, Default, Deref, DerefMut, Display, Drop, Eq, From<&'static BitSlice>, From, From>, From>, From, FromIterator, Hash, Ord, PartialEq, PartialOrd, Pointer, TryFrom>, Unpin, ); assert_impl_all!( super::IntoIter: AsRef, Clone, Debug, DoubleEndedIterator, ExactSizeIterator, FusedIterator, Send, Sync, ); } #[test] fn conversions() { let bits = bits![0, 1, 0, 0, 1]; assert_eq!(BitBox::from(bits), bits); let arr: BitArray = BitArray::new(rand::random()); assert_eq!(BitBox::from(arr), arr); let boxed = Box::new(5usize); assert_eq!( BitBox::<_, Lsb0>::from(boxed.clone()), boxed.view_bits::() ); let cow = Cow::Borrowed([0usize, 1].view_bits::()); assert_eq!(BitBox::from(cow.clone()), &*cow); assert_eq!(BitBox::from(bitvec![0, 1]), bits![0, 1]); let boxed: Box<[usize]> = BitBox::from(cow.clone()).into(); assert_eq!(boxed[..], [0usize, 1][..]); assert!(BitBox::<_, Lsb0>::try_from(boxed).is_ok()); assert!(BitBox::::default().is_empty()); } #[test] fn ops() { let a = bitbox![0, 0, 1, 1]; let b = bitbox![0, 1, 0, 1]; let c = a.clone() & b.clone(); assert_eq!(c, bitbox![0, 0, 0, 1]); let d = a.clone() | b.clone(); assert_eq!(d, bitbox![0, 1, 1, 1]); let e = a.clone() ^ b; assert_eq!(e, bitbox![0, 1, 1, 0]); let mut f = !e; assert_eq!(f, bitbox![1, 0, 0, 1]); let _: &BitSlice = &a; let _: &mut BitSlice = &mut f; } #[test] fn format() { #[cfg(not(feature = "std"))] use alloc::format; let render = format!("{:?}", bitbox![0, 1, 0, 0, 1]); assert!( render.starts_with(&format!( "BitBox", any::type_name::(), )) ); assert!(render.ends_with("[0, 1, 0, 0, 1]")); } bitvec-1.0.1/src/boxed/traits.rs000064400000000000000000000146641046102023000146560ustar 00000000000000//! General trait implementations for boxed bit-slices. use alloc::{ borrow::Cow, boxed::Box, }; use core::{ borrow::{ Borrow, BorrowMut, }, cmp, convert::TryFrom, fmt::{ self, Debug, Display, Formatter, }, hash::{ Hash, Hasher, }, iter::FromIterator, }; use tap::Pipe; use super::BitBox; use crate::{ array::BitArray, order::BitOrder, slice::BitSlice, store::BitStore, vec::BitVec, view::BitViewSized, }; #[cfg(not(tarpaulin_include))] impl Borrow> for BitBox where T: BitStore, O: BitOrder, { #[inline] fn borrow(&self) -> &BitSlice { self.as_bitslice() } } #[cfg(not(tarpaulin_include))] impl BorrowMut> for BitBox where T: BitStore, O: BitOrder, { #[inline] fn borrow_mut(&mut self) -> &mut BitSlice { self.as_mut_bitslice() } } #[cfg(not(tarpaulin_include))] impl Clone for BitBox where T: BitStore, O: BitOrder, { #[inline] fn clone(&self) -> Self { self.as_bitslice().pipe(Self::from_bitslice) } } #[cfg(not(tarpaulin_include))] impl Eq for BitBox where T: BitStore, O: BitOrder, { } #[cfg(not(tarpaulin_include))] impl Ord for BitBox where T: BitStore, O: BitOrder, { #[inline] fn cmp(&self, other: &Self) -> cmp::Ordering { self.as_bitslice().cmp(other.as_bitslice()) } } #[cfg(not(tarpaulin_include))] impl PartialEq> for BitSlice where O1: BitOrder, O2: BitOrder, T1: BitStore, T2: BitStore, { #[inline] fn eq(&self, other: &BitBox) -> bool { self == other.as_bitslice() } } #[cfg(not(tarpaulin_include))] impl PartialEq> for &BitSlice where O1: BitOrder, O2: BitOrder, T1: BitStore, T2: BitStore, { #[inline] fn eq(&self, other: &BitBox) -> bool { *self == other.as_bitslice() } } #[cfg(not(tarpaulin_include))] impl PartialEq> for &mut BitSlice where O1: BitOrder, O2: BitOrder, T1: BitStore, T2: BitStore, { #[inline] fn eq(&self, other: &BitBox) -> bool { **self == other.as_bitslice() } } #[cfg(not(tarpaulin_include))] impl PartialEq for BitBox where T: BitStore, O: BitOrder, Rhs: ?Sized + PartialEq>, { #[inline] fn eq(&self, other: &Rhs) -> bool { other == self.as_bitslice() } } #[cfg(not(tarpaulin_include))] impl PartialOrd> for BitSlice where O1: BitOrder, O2: BitOrder, T1: BitStore, T2: BitStore, { #[inline] fn partial_cmp(&self, other: &BitBox) -> Option { self.partial_cmp(other.as_bitslice()) } } #[cfg(not(tarpaulin_include))] impl PartialOrd for BitBox where T: BitStore, O: BitOrder, Rhs: ?Sized + PartialOrd>, { #[inline] fn partial_cmp(&self, other: &Rhs) -> Option { other.partial_cmp(self.as_bitslice()) } } #[cfg(not(tarpaulin_include))] impl<'a, O1, O2, T1, T2> PartialOrd> for &'a BitSlice where O1: BitOrder, O2: BitOrder, T1: BitStore, T2: BitStore, { #[inline] fn partial_cmp(&self, other: &BitBox) -> Option { self.partial_cmp(other.as_bitslice()) } } #[cfg(not(tarpaulin_include))] impl<'a, O1, O2, T1, T2> PartialOrd> for &'a mut BitSlice where O1: BitOrder, O2: BitOrder, T1: BitStore, T2: BitStore, { #[inline] fn partial_cmp(&self, other: &BitBox) -> Option { self.partial_cmp(other.as_bitslice()) } } #[cfg(not(tarpaulin_include))] impl AsRef> for BitBox where T: BitStore, O: BitOrder, { #[inline] fn as_ref(&self) -> &BitSlice { self.as_bitslice() } } #[cfg(not(tarpaulin_include))] impl AsMut> for BitBox where T: BitStore, O: BitOrder, { #[inline] fn as_mut(&mut self) -> &mut BitSlice { self.as_mut_bitslice() } } impl From<&'_ BitSlice> for BitBox where T: BitStore, O: BitOrder, { #[inline] fn from(slice: &BitSlice) -> Self { slice.pipe(Self::from_bitslice) } } impl From> for BitBox where A: BitViewSized, O: BitOrder, { #[inline] fn from(array: BitArray) -> Self { array.as_bitslice().pipe(Self::from_bitslice) } } impl From> for BitBox where T: BitStore, O: BitOrder, { #[inline] fn from(elem: Box) -> Self { unsafe { Box::from_raw(Box::into_raw(elem).cast::<[T; 1]>() as *mut [T]) } .pipe(Self::from_boxed_slice) } } impl<'a, T, O> From>> for BitBox where T: BitStore, O: BitOrder, { #[inline] fn from(cow: Cow<'a, BitSlice>) -> Self { cow.into_owned().into_boxed_bitslice() } } impl From> for BitBox where T: BitStore, O: BitOrder, { #[inline] fn from(bv: BitVec) -> Self { bv.into_boxed_bitslice() } } impl From> for Box<[T]> where T: BitStore, O: BitOrder, { #[inline] fn from(bb: BitBox) -> Self { bb.into_boxed_slice() } } impl TryFrom> for BitBox where T: BitStore, O: BitOrder, { type Error = Box<[T]>; #[inline] fn try_from(boxed: Box<[T]>) -> Result { Self::try_from_boxed_slice(boxed) } } impl Default for BitBox where T: BitStore, O: BitOrder, { #[inline] fn default() -> Self { Self::from_bitslice(BitSlice::::empty()) } } impl Debug for BitBox where T: BitStore, O: BitOrder, { #[inline] fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { self.bitspan.render(fmt, "Box", None)?; fmt.write_str(" ")?; Display::fmt(self, fmt) } } easy_fmt! { impl Binary impl Display impl LowerHex impl Octal impl Pointer impl UpperHex for BitBox } #[cfg(not(tarpaulin_include))] impl FromIterator for BitBox where T: BitStore, O: BitOrder, BitVec: FromIterator, { #[inline] fn from_iter(iter: II) -> Self where II: IntoIterator { BitVec::from_iter(iter).into_boxed_bitslice() } } #[cfg(not(tarpaulin_include))] impl Hash for BitBox where T: BitStore, O: BitOrder, { #[inline] fn hash(&self, state: &mut H) where H: Hasher { self.as_bitslice().hash(state) } } unsafe impl Send for BitBox where T: BitStore, O: BitOrder, { } unsafe impl Sync for BitBox where T: BitStore, O: BitOrder, { } impl Unpin for BitBox where T: BitStore, O: BitOrder, { } bitvec-1.0.1/src/boxed.rs000064400000000000000000000262141046102023000133420ustar 00000000000000#![cfg(feature = "alloc")] #![doc = include_str!("../doc/boxed.md")] use alloc::boxed::Box; use core::{ mem::ManuallyDrop, slice, }; use tap::{ Pipe, Tap, }; use wyz::comu::Mut; use crate::{ index::BitIdx, mem, order::{ BitOrder, Lsb0, }, ptr::{ BitPtr, BitSpan, }, slice::BitSlice, store::BitStore, vec::BitVec, view::BitView, }; mod api; mod iter; mod ops; mod tests; mod traits; pub use self::iter::IntoIter; #[repr(transparent)] #[doc = include_str!("../doc/boxed/BitBox.md")] pub struct BitBox where T: BitStore, O: BitOrder, { /// Describes the region that the box owns. bitspan: BitSpan, } impl BitBox where T: BitStore, O: BitOrder, { /// Copies a bit-slice region into a new bit-box allocation. /// /// The referent memory is `memcpy`d into the heap, exactly preserving the /// original bit-slice’s memory layout and contents. This allows the /// function to run as fast as possible, but misaligned source bit-slices /// may result in decreased performance or unexpected layout behavior during /// use. You can use [`.force_align()`] to ensure that the referent /// bit-slice is aligned in memory. /// /// ## Notes /// /// Bits in the allocation of the source bit-slice, but outside its own /// description of that memory, have an **unspecified**, but initialized, /// value. You may not rely on their contents in any way, and you *should* /// call [`.force_align()`] and/or [`.fill_uninitialized()`] if you are /// going to inspect the underlying memory of the new allocation. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let data = 0b0101_1011u8; /// let bits = data.view_bits::(); /// let bb = BitBox::from_bitslice(&bits[2 ..]); /// assert_eq!(bb, bits[2 ..]); /// ``` /// /// [`.fill_uninitialized()`]: Self::fill_uninitialized /// [`.force_align()`]: Self::force_align #[inline] pub fn from_bitslice(slice: &BitSlice) -> Self { BitVec::from_bitslice(slice).into_boxed_bitslice() } /// Converts a `Box<[T]>` into a `BitBox`, in place. /// /// This does not affect the referent buffer, and only transforms the /// handle. /// /// ## Panics /// /// This panics if the provided `boxed` slice is too long to view as a /// bit-slice region. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let boxed: Box<[u8]> = Box::new([0; 40]); /// let addr = boxed.as_ptr(); /// let bb = BitBox::::from_boxed_slice(boxed); /// assert_eq!(bb, bits![0; 320]); /// assert_eq!(addr, bb.as_raw_slice().as_ptr()); /// ``` #[inline] pub fn from_boxed_slice(boxed: Box<[T]>) -> Self { Self::try_from_boxed_slice(boxed) .expect("slice was too long to be converted into a `BitBox`") } /// Attempts to convert an ordinary boxed slice into a boxed bit-slice. /// /// This does not perform a copy or reällocation; it only attempts to /// transform the handle. Because `Box<[T]>` can be longer than `BitBox`es, /// it may fail, and will return the original handle if it does. /// /// It is unlikely that you have a single `Box<[_]>` that is too large to /// convert into a bit-box. You can find the length restrictions as the /// bit-slice associated constants [`MAX_BITS`] and [`MAX_ELTS`]. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let boxed: Box<[u8]> = Box::new([0u8; 40]); /// let addr = boxed.as_ptr(); /// let bb = BitBox::::try_from_boxed_slice(boxed).unwrap(); /// assert_eq!(bb, bits![0; 320]); /// assert_eq!(addr, bb.as_raw_slice().as_ptr()); /// ``` /// /// [`MAX_BITS`]: crate::slice::BitSlice::MAX_BITS /// [`MAX_ELTS`]: crate::slice::BitSlice::MAX_ELTS #[inline] pub fn try_from_boxed_slice(boxed: Box<[T]>) -> Result> { let mut boxed = ManuallyDrop::new(boxed); BitPtr::from_mut_slice(boxed.as_mut()) .span(boxed.len() * mem::bits_of::()) .map(|bitspan| Self { bitspan }) .map_err(|_| ManuallyDrop::into_inner(boxed)) } /// Converts the bit-box back into an ordinary boxed element slice. /// /// This does not touch the allocator or the buffer contents; it is purely a /// handle transform. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bb = bitbox![0; 5]; /// let addr = bb.as_raw_slice().as_ptr(); /// let boxed = bb.into_boxed_slice(); /// assert_eq!(boxed[..], [0][..]); /// assert_eq!(addr, boxed.as_ptr()); /// ``` #[inline] pub fn into_boxed_slice(self) -> Box<[T]> { self.pipe(ManuallyDrop::new) .as_raw_mut_slice() .pipe(|slice| unsafe { Box::from_raw(slice) }) } /// Converts the bit-box into a bit-vector. /// /// This uses the Rust allocator API, and does not guarantee whether or not /// a reällocation occurs internally. /// /// The resulting bit-vector can be converted back into a bit-box via /// [`BitBox::into_boxed_bitslice`][0]. /// /// ## Original /// /// [`slice::into_vec`](https://doc.rust-lang.org/std/primitive.slice.html#method.into_vec) /// /// ## API Differences /// /// The original function is implemented in an `impl [T]` block, despite /// taking a `Box<[T]>` receiver. Since `BitBox` cannot be used as an /// explicit receiver outside its own `impl` blocks, the method is relocated /// here. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bb = bitbox![0, 1, 0, 0, 1]; /// let bv = bb.into_bitvec(); /// /// assert_eq!(bv, bitvec![0, 1, 0, 0, 1]); /// ``` /// /// [0]: crate::vec::BitVec::into_boxed_bitslice #[inline] pub fn into_bitvec(self) -> BitVec { let bitspan = self.bitspan; /* This pipeline converts the underlying `Box<[T]>` into a `Vec`, * then converts that into a `BitVec`. This handles any changes that * may occur in the allocator. Once done, the original head/span * values need to be written into the `BitVec`, since the conversion * from `Vec` always fully spans the live elements. */ self.pipe(ManuallyDrop::new) .with_box(|b| unsafe { ManuallyDrop::take(b) }) .into_vec() .pipe(BitVec::from_vec) .tap_mut(|bv| unsafe { // len first! Otherwise, the descriptor might briefly go out of // bounds. bv.set_len_unchecked(bitspan.len()); bv.set_head(bitspan.head()); }) } /// Explicitly views the bit-box as a bit-slice. #[inline] pub fn as_bitslice(&self) -> &BitSlice { unsafe { self.bitspan.into_bitslice_ref() } } /// Explicitly views the bit-box as a mutable bit-slice. #[inline] pub fn as_mut_bitslice(&mut self) -> &mut BitSlice { unsafe { self.bitspan.into_bitslice_mut() } } /// Views the bit-box as a slice of its underlying memory elements. /// /// Because bit-boxes uniquely own their buffer, they can safely view the /// underlying buffer without dealing with contending neighbors. #[inline] pub fn as_raw_slice(&self) -> &[T] { let (data, len) = (self.bitspan.address().to_const(), self.bitspan.elements()); unsafe { slice::from_raw_parts(data, len) } } /// Views the bit-box as a mutable slice of its underlying memory elements. /// /// Because bit-boxes uniquely own their buffer, they can safely view the /// underlying buffer without dealing with contending neighbors. #[inline] pub fn as_raw_mut_slice(&mut self) -> &mut [T] { let (data, len) = (self.bitspan.address().to_mut(), self.bitspan.elements()); unsafe { slice::from_raw_parts_mut(data, len) } } /// Sets the unused bits outside the `BitBox` buffer to a fixed value. /// /// This method modifies all bits that the allocated buffer owns but which /// are outside the `self.as_bitslice()` view. `bitvec` guarantees that all /// owned bits are initialized to *some* value, but does not guarantee /// *which* value. This method can be used to make all such unused bits have /// a known value after the call, so that viewing the underlying memory /// directly has consistent results. /// /// Note that the crate implementation guarantees that all bits owned by its /// handles are stably initialized according to the language and compiler /// rules! `bitvec` will never cause UB by using uninitialized memory. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = 0b1011_0101u8.view_bits::(); /// let mut bb = BitBox::from_bitslice(&bits[2 .. 6]); /// assert_eq!(bb.count_ones(), 3); /// // Remember, the two bits on each edge are unspecified, and cannot be /// // observed! They must be masked away for the test to be meaningful. /// assert_eq!(bb.as_raw_slice()[0] & 0x3C, 0b00_1101_00u8); /// /// bb.fill_uninitialized(false); /// assert_eq!(bb.as_raw_slice(), &[0b00_1101_00u8]); /// /// bb.fill_uninitialized(true); /// assert_eq!(bb.as_raw_slice(), &[0b11_1101_11u8]); /// ``` #[inline] pub fn fill_uninitialized(&mut self, value: bool) { let (_, head, bits) = self.bitspan.raw_parts(); let head = head.into_inner() as usize; let tail = head + bits; let all = self.as_raw_mut_slice().view_bits_mut::(); unsafe { all.get_unchecked_mut(.. head).fill(value); all.get_unchecked_mut(tail ..).fill(value); } } /// Ensures that the allocated buffer has no dead bits between the start of /// the buffer and the start of the live bit-slice. /// /// This is useful for ensuring a consistent memory layout in bit-boxes /// created by cloning an arbitrary bit-slice into the heap. As bit-slices /// can begin and end anywhere in memory, the [`::from_bitslice()`] function /// does not attempt to normalize them and only does a fast element-wise /// copy when creating the bit-box. /// /// The value of dead bits that are in the allocation but not in the live /// region are *initialized*, but do not have a *specified* value. After /// calling this method, you should use [`.fill_uninitialized()`] to set the /// excess bits in the buffer to a fixed value. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = &0b10_1101_01u8.view_bits::()[2 .. 6]; /// let mut bb = BitBox::from_bitslice(bits); /// // Remember, the two bits on each edge are unspecified, and cannot be /// // observed! They must be masked away for the test to be meaningful. /// assert_eq!(bb.as_raw_slice()[0] & 0x3C, 0b00_1101_00u8); /// /// bb.force_align(); /// bb.fill_uninitialized(false); /// assert_eq!(bb.as_raw_slice(), &[0b1101_0000u8]); /// ``` /// /// [`::from_bitslice()`]: Self::from_bitslice /// [`.fill_uninitialized()`]: Self::fill_uninitialized #[inline] pub fn force_align(&mut self) { let head = self.bitspan.head(); if head == BitIdx::MIN { return; } let head = head.into_inner() as usize; let last = self.len() + head; unsafe { self.bitspan.set_head(BitIdx::MIN); self.copy_within_unchecked(head .. last, 0); } } /// Permits a function to modify the `Box` backing storage of a `BitBox` /// handle. /// /// This produces a temporary `Box` view of the bit-box’s buffer and allows /// a function to have mutable access to it. After the callback returns, the /// `Box` is written back into `self` and forgotten. #[inline] fn with_box(&mut self, func: F) -> R where F: FnOnce(&mut ManuallyDrop>) -> R { self.as_raw_mut_slice() .pipe(|raw| unsafe { Box::from_raw(raw) }) .pipe(ManuallyDrop::new) .pipe_ref_mut(func) } } bitvec-1.0.1/src/devel.rs000064400000000000000000000045351046102023000133420ustar 00000000000000//! Support utilities for crate development. use core::any::TypeId; use crate::{ order::BitOrder, store::BitStore, }; /// Constructs formatting-trait implementations by delegating. macro_rules! easy_fmt { ($(impl $fmt:ident)+ for BitArray) => { $( impl core::fmt::$fmt for $crate::array::BitArray where O: $crate::order::BitOrder, A: $crate::view::BitViewSized, { #[inline] #[cfg(not(tarpaulin_include))] fn fmt(&self, fmt: &mut core::fmt::Formatter) -> core::fmt::Result { core::fmt::$fmt::fmt(self.as_bitslice(), fmt) } } )+ }; ($(impl $fmt:ident)+ for $this:ident) => { $( impl core::fmt::$fmt for $this where O: $crate::order::BitOrder, T: $crate::store::BitStore, { #[inline] #[cfg(not(tarpaulin_include))] fn fmt(&self, fmt: &mut core::fmt::Formatter) -> core::fmt::Result { core::fmt::$fmt::fmt(self.as_bitslice(), fmt) } } )+ }; } /// Implements some `Iterator` functions that have boilerplate behavior. macro_rules! easy_iter { () => { #[inline] fn size_hint(&self) -> (usize, Option) { let len = self.len(); (len, Some(len)) } #[inline] fn count(self) -> usize { self.len() } #[inline] fn last(mut self) -> Option { self.next_back() } }; } /// Tests if two `BitOrder` implementors are the same. #[inline] pub fn match_order() -> bool where O: BitOrder, P: BitOrder, { eq_types::() } /// Tests if two `BitStore` implementors are the same. #[inline] pub fn match_store() -> bool where T: BitStore, U: BitStore, { eq_types::() } /// Tests if two `BitSlice` type parameter pairs match each other. #[inline] pub fn match_types() -> bool where O1: BitOrder, T1: BitStore, O2: BitOrder, T2: BitStore, { match_order::() && match_store::() } /// Tests if a type is known to be an unsigned integer. /// /// Returns `true` for `u{8,16,32,64,128,size}` and `false` for all others. #[inline] pub fn is_unsigned() -> bool where T: 'static { eq_types::() || eq_types::() || eq_types::() || eq_types::() || eq_types::() || eq_types::() } /// Tests if two types are identical, even through different names. #[inline] fn eq_types() -> bool where T: 'static, U: 'static, { TypeId::of::() == TypeId::of::() } bitvec-1.0.1/src/domain.rs000064400000000000000000000671421046102023000135150ustar 00000000000000#![doc = include_str!("../doc/domain.md")] use core::{ any, convert::{ TryFrom, TryInto, }, fmt::{ self, Binary, Debug, Display, Formatter, LowerHex, Octal, UpperHex, }, hash::{ Hash, Hasher, }, iter::FusedIterator, marker::PhantomData, }; use tap::{ Conv, Pipe, Tap, }; use wyz::{ comu::{ Address, Const, Mut, Mutability, Reference, Referential, SliceReferential, }, fmt::FmtForward, }; use crate::{ access::BitAccess, index::{ BitEnd, BitIdx, BitMask, }, order::{ BitOrder, Lsb0, }, ptr::BitSpan, slice::BitSlice, store::BitStore, }; #[doc = include_str!("../doc/domain/BitDomain.md")] pub enum BitDomain<'a, M = Const, T = usize, O = Lsb0> where M: Mutability, T: 'a + BitStore, O: BitOrder, Address>: Referential<'a>, Address>: Referential<'a>, { /// Indicates that a bit-slice’s contents are entirely in the interior /// indices of a single memory element. /// /// The contained value is always the bit-slice that created this view. Enclave(Reference<'a, M, BitSlice>), /// Indicates that a bit-slice’s contents touch an element edge. /// /// This splits the bit-slice into three partitions, each of which may be /// empty: two partially-occupied edge elements, with their original type /// status, and one interior span, which is known to not have any other /// aliases derived from the bit-slice that created this view. Region { /// Any bits that partially-fill the first element of the underlying /// storage region. /// /// This does not modify its aliasing status, as it will already be /// appropriately marked before this view is constructed. head: Reference<'a, M, BitSlice>, /// Any bits that wholly-fill elements in the interior of the bit-slice. /// /// This is marked as unaliased, because it is statically impossible for /// any other handle derived from the source bit-slice to have /// conflicting access to the region of memory it describes. As such, /// even a bit-slice that was marked as `::Alias` can revert this /// protection on the known-unaliased interior. /// /// Proofs: /// /// - Rust’s `&`/`&mut` exclusion rules universally apply. If a /// reference exists, no other reference has unsynchronized write /// capability. /// - `BitStore::Unalias` only modifies unsynchronized types. `Cell` and /// atomic types unalias to themselves, and retain their original /// behavior. body: Reference<'a, M, BitSlice>, /// Any bits that partially-fill the last element of the underlying /// storage region. /// /// This does not modify its aliasing status, as it will already be /// appropriately marked before this view is constructed. tail: Reference<'a, M, BitSlice>, }, } impl<'a, M, T, O> BitDomain<'a, M, T, O> where M: Mutability, T: 'a + BitStore, O: BitOrder, Address>: Referential<'a>, Address>: Referential<'a>, { /// Attempts to unpack the bit-domain as an [`Enclave`] variant. This is /// just a shorthand for explicit destructuring. /// /// [`Enclave`]: Self::Enclave #[inline] pub fn enclave(self) -> Option>> { match self { Self::Enclave(bits) => Some(bits), _ => None, } } /// Attempts to unpack the bit-domain as a [`Region`] variant. This is just /// a shorthand for explicit destructuring. /// /// [`Region`]: Self::Region #[inline] pub fn region( self, ) -> Option<( Reference<'a, M, BitSlice>, Reference<'a, M, BitSlice>, Reference<'a, M, BitSlice>, )> { match self { Self::Region { head, body, tail } => Some((head, body, tail)), _ => None, } } } impl<'a, M, T, O> Default for BitDomain<'a, M, T, O> where M: Mutability, T: 'a + BitStore, O: BitOrder, Address>: Referential<'a>, Address>: Referential<'a>, Reference<'a, M, BitSlice>: Default, Reference<'a, M, BitSlice>: Default, { #[inline] fn default() -> Self { Self::Region { head: Default::default(), body: Default::default(), tail: Default::default(), } } } impl<'a, M, T, O> Debug for BitDomain<'a, M, T, O> where M: Mutability, T: 'a + BitStore, O: BitOrder, Address>: Referential<'a>, Address>: Referential<'a>, Reference<'a, M, BitSlice>: Debug, Reference<'a, M, BitSlice>: Debug, { #[inline] fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { write!( fmt, "BitDomain::<{} {}, {}>::", M::RENDER, any::type_name::(), any::type_name::(), )?; match self { Self::Enclave(elem) => { fmt.debug_tuple("Enclave").field(elem).finish() }, Self::Region { head, body, tail } => fmt .debug_struct("Region") .field("head", head) .field("body", body) .field("tail", tail) .finish(), } } } #[cfg(not(tarpaulin_include))] impl Clone for BitDomain<'_, Const, T, O> where T: BitStore, O: BitOrder, { #[inline] fn clone(&self) -> Self { *self } } impl Copy for BitDomain<'_, Const, T, O> where T: BitStore, O: BitOrder, { } #[doc = include_str!("../doc/domain/Domain.md")] pub enum Domain<'a, M = Const, T = usize, O = Lsb0> where M: Mutability, T: 'a + BitStore, O: BitOrder, Address: Referential<'a>, Address: SliceReferential<'a>, { /// Indicates that a bit-slice’s contents are entirely in the interior /// indices of a single memory element. /// /// The contained reference is only able to observe the bits governed by the /// generating bit-slice. Other handles to the element may exist, and may /// write to bits outside the range that this reference can observe. Enclave(PartialElement<'a, M, T, O>), /// Indicates that a bit-slice’s contents touch an element edge. /// /// This splits the bit-slice into three partitions, each of which may be /// empty: two partially-occupied edge elements, with their original type /// status, and one interior span, which is known not to have any other /// aliases derived from the bit-slice that created this view. Region { /// The first element in the bit-slice’s underlying storage, if it is /// only partially used. head: Option>, /// All fully-used elements in the bit-slice’s underlying storage. /// /// This is marked as unaliased, because it is statically impossible for /// any other handle derived from the source bit-slice to have /// conflicting access to the region of memory it describes. As such, /// even a bit-slice that was marked as `::Alias` can revert this /// protection on the known-unaliased interior. body: Reference<'a, M, [T::Unalias]>, /// The last element in the bit-slice’s underlying storage, if it is /// only partially used. tail: Option>, }, } impl<'a, M, T, O> Domain<'a, M, T, O> where M: Mutability, T: 'a + BitStore, O: BitOrder, Address: Referential<'a>, Address: SliceReferential<'a>, { /// Attempts to unpack the bit-domain as an [`Enclave`] variant. This is /// just a shorthand for explicit destructuring. /// /// [`Enclave`]: Self::Enclave #[inline] pub fn enclave(self) -> Option> { match self { Self::Enclave(elem) => Some(elem), _ => None, } } /// Attempts to unpack the bit-domain as a [`Region`] variant. This is just /// a shorthand for explicit destructuring. /// /// [`Region`]: Self::Region #[inline] pub fn region( self, ) -> Option<( Option>, Reference<'a, M, [T::Unalias]>, Option>, )> { match self { Self::Region { head, body, tail } => Some((head, body, tail)), _ => None, } } /// Converts the element-wise `Domain` into the equivalent `BitDomain`. /// /// This transform replaces each memory reference with an equivalent /// `BitSlice` reference. #[inline] pub fn into_bit_domain(self) -> BitDomain<'a, M, T, O> where Address>: Referential<'a>, Address>: Referential<'a>, Reference<'a, M, BitSlice>: Default, Reference<'a, M, BitSlice>: TryFrom>, { match self { Self::Enclave(elem) => BitDomain::Enclave(elem.into_bitslice()), Self::Region { head, body, tail } => BitDomain::Region { head: head.map_or_else( Default::default, PartialElement::into_bitslice, ), body: body.try_into().unwrap_or_else(|_| { match option_env!("CARGO_PKG_REPOSITORY") { Some(env) => unreachable!( "Construction of a slice with length {} should not \ be possible. If this assumption is outdated, \ please file an issue at {}", (isize::MIN as usize) >> 3, env, ), None => unreachable!( "Construction of a slice with length {} should not \ be possible. If this assumption is outdated, \ please consider filing an issue", (isize::MIN as usize) >> 3 ), } }), tail: tail.map_or_else( Default::default, PartialElement::into_bitslice, ), }, } } } /** Domain constructors. Only `Domain` and `Domain` are ever constructed, and they of course are only constructed from `&BitSlice` and `&mut BitSlice`, respectively. However, the Rust trait system does not have a way to express a closed set, so **/ impl<'a, M, T, O> Domain<'a, M, T, O> where M: Mutability, T: 'a + BitStore, O: BitOrder, Address: Referential<'a>, Address: SliceReferential<'a, ElementAddr = Address>, Address>: Referential<'a>, Reference<'a, M, [T::Unalias]>: Default, { /// Creates a new `Domain` over a bit-slice. /// /// ## Parameters /// /// - `bits`: Either a `&BitSlice` or `&mut BitSlice` reference, depending /// on whether a `Domain` or `Domain` is being produced. /// /// ## Returns /// /// A `Domain` description of the raw memory governed by `bits`. pub(crate) fn new(bits: Reference<'a, M, BitSlice>) -> Self where BitSpan: From>> { let bitspan = bits.conv::>(); let (head, elts, tail) = (bitspan.head(), bitspan.elements(), bitspan.tail()); let base = bitspan.address(); let (min, max) = (BitIdx::::MIN, BitEnd::::MAX); let ctor = match (head, elts, tail) { (_, 0, _) => Self::empty, (h, _, t) if h == min && t == max => Self::spanning, (_, _, t) if t == max => Self::partial_head, (h, ..) if h == min => Self::partial_tail, (_, 1, _) => Self::minor, _ => Self::major, }; ctor(base, elts, head, tail) } /// Produces the canonical empty `Domain`. #[inline] fn empty( _: Address, _: usize, _: BitIdx, _: BitEnd, ) -> Self { Default::default() } /// Produces a `Domain::Region` that contains both `head` and `tail` partial /// elements as well as a `body` slice (which may be empty). #[inline] fn major( addr: Address, elts: usize, head: BitIdx, tail: BitEnd, ) -> Self { let h_elem = addr; let t_elem = unsafe { addr.add(elts - 1) }; let body = unsafe { Address::::from_raw_parts( addr.add(1).cast::(), elts - 2, ) }; Self::Region { head: Some(PartialElement::new(h_elem, head, None)), body, tail: Some(PartialElement::new(t_elem, None, tail)), } } /// Produces a `Domain::Enclave`. #[inline] fn minor( addr: Address, _: usize, head: BitIdx, tail: BitEnd, ) -> Self { let elem = addr; Self::Enclave(PartialElement::new(elem, head, tail)) } /// Produces a `Domain::Region` with a partial `head` and a `body`, but no /// `tail`. #[inline] fn partial_head( addr: Address, elts: usize, head: BitIdx, _: BitEnd, ) -> Self { let elem = addr; let body = unsafe { Address::::from_raw_parts( addr.add(1).cast::(), elts - 1, ) }; Self::Region { head: Some(PartialElement::new(elem, head, None)), body, tail: None, } } /// Produces a `Domain::Region` with a partial `tail` and a `body`, but no /// `head`. #[inline] fn partial_tail( addr: Address, elts: usize, _: BitIdx, tail: BitEnd, ) -> Self { let elem = unsafe { addr.add(elts - 1) }; let body = unsafe { Address::::from_raw_parts( addr.cast::(), elts - 1, ) }; Self::Region { head: None, body, tail: Some(PartialElement::new(elem, None, tail)), } } /// Produces a `Domain::Region` with neither `head` nor `tail`, but only a /// `body`. #[inline] fn spanning( addr: Address, elts: usize, _: BitIdx, _: BitEnd, ) -> Self { Self::Region { head: None, body: unsafe { as SliceReferential>::from_raw_parts( addr.cast::(), elts, ) }, tail: None, } } } impl<'a, M, T, O> Default for Domain<'a, M, T, O> where M: Mutability, T: 'a + BitStore, O: BitOrder, Address: Referential<'a>, Address: SliceReferential<'a>, Reference<'a, M, [T::Unalias]>: Default, { #[inline] fn default() -> Self { Self::Region { head: None, body: Reference::::default(), tail: None, } } } impl<'a, M, T, O> Debug for Domain<'a, M, T, O> where M: Mutability, T: 'a + BitStore, O: BitOrder, Address: Referential<'a>, Address: SliceReferential<'a>, Reference<'a, M, [T::Unalias]>: Debug, { #[inline] fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { write!( fmt, "Domain::<{} {}, {}>::", M::RENDER, any::type_name::(), any::type_name::(), )?; match self { Self::Enclave(elem) => { fmt.debug_tuple("Enclave").field(elem).finish() }, Self::Region { head, body, tail } => fmt .debug_struct("Region") .field("head", head) .field("body", body) .field("tail", tail) .finish(), } } } #[cfg(not(tarpaulin_include))] impl Clone for Domain<'_, Const, T, O> where T: BitStore, O: BitOrder, { #[inline] fn clone(&self) -> Self { *self } } impl Iterator for Domain<'_, Const, T, O> where T: BitStore, O: BitOrder, { type Item = T::Mem; #[inline] fn next(&mut self) -> Option { match self { Self::Enclave(elem) => { elem.load_value().tap(|_| *self = Default::default()).into() }, Self::Region { head, body, tail } => { if let Some(elem) = head.take() { return elem.load_value().into(); } if let Some((elem, rest)) = body.split_first() { *body = rest; return elem.load_value().into(); } if let Some(elem) = tail.take() { return elem.load_value().into(); } None }, } } } impl DoubleEndedIterator for Domain<'_, Const, T, O> where T: BitStore, O: BitOrder, { #[inline] fn next_back(&mut self) -> Option { match self { Self::Enclave(elem) => { elem.load_value().tap(|_| *self = Default::default()).into() }, Self::Region { head, body, tail } => { if let Some(elem) = tail.take() { return elem.load_value().into(); } if let Some((elem, rest)) = body.split_last() { *body = rest; return elem.load_value().into(); } if let Some(elem) = head.take() { return elem.load_value().into(); } None }, } } } impl ExactSizeIterator for Domain<'_, Const, T, O> where T: BitStore, O: BitOrder, { #[inline] fn len(&self) -> usize { match self { Self::Enclave(_) => 1, Self::Region { head, body, tail } => { head.is_some() as usize + body.len() + tail.is_some() as usize }, } } } impl FusedIterator for Domain<'_, Const, T, O> where T: BitStore, O: BitOrder, { } impl Copy for Domain<'_, Const, T, O> where T: BitStore, O: BitOrder, { } /// Implements numeric formatting by rendering each element. macro_rules! fmt { ($($fmt:ty => $fwd:ident),+ $(,)?) => { $( impl<'a, T, O> $fmt for Domain<'a, Const, T, O> where O: BitOrder, T: BitStore, { #[inline] fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { fmt.debug_list() .entries(self.into_iter().map(FmtForward::$fwd)) .finish() } } )+ }; } fmt! { Binary => fmt_binary, Display => fmt_display, LowerHex => fmt_lower_hex, Octal => fmt_octal, UpperHex => fmt_upper_hex, } #[doc = include_str!("../doc/domain/PartialElement.md")] pub struct PartialElement<'a, M, T, O> where M: Mutability, T: 'a + BitStore, O: BitOrder, { /// The address of the memory element being partially viewed. /// /// This must be stored as a pointer, not a reference, because it must /// retain mutability permissions but cannot have an `&mut` reference to /// a shared element. /// /// Similarly, it must remain typed as `T`, not `T::Access`, to allow the /// `` case not to inappropriately produce a `>` /// even if no write is performed. elem: Address, /// Cache the selector mask, so it never needs to be recomputed. mask: BitMask, /// The starting index. head: BitIdx, /// The ending index. tail: BitEnd, /// Preserve the originating bit-order _ord: PhantomData, /// This type acts as-if it were a shared-mutable reference. _ref: PhantomData<&'a T::Access>, } impl<'a, M, T, O> PartialElement<'a, M, T, O> where M: Mutability, T: 'a + BitStore, O: BitOrder, { /// Constructs a new partial-element guarded reference. /// /// ## Parameters /// /// - `elem`: the element to which this partially points. /// - `head`: the index at which the partial region begins. /// - `tail`: the index at which the partial region ends. #[inline] fn new( elem: Address, head: impl Into>>, tail: impl Into>>, ) -> Self { let (head, tail) = ( head.into().unwrap_or(BitIdx::MIN), tail.into().unwrap_or(BitEnd::MAX), ); Self { elem, mask: O::mask(head, tail), head, tail, _ord: PhantomData, _ref: PhantomData, } } /// Fetches the value stored through `self` and masks away extra bits. /// /// ## Returns /// /// A bit-map containing any bits set to `1` in the governed bits. All other /// bits are cleared to `0`. #[inline] pub fn load_value(&self) -> T::Mem { self.elem .pipe(|addr| unsafe { &*addr.to_const() }) .load_value() & self.mask.into_inner() } /// Gets the starting index of the live bits in the element. #[inline] #[cfg(not(tarpaulin_include))] pub fn head(&self) -> BitIdx { self.head } /// Gets the ending index of the live bits in the element. #[inline] #[cfg(not(tarpaulin_include))] pub fn tail(&self) -> BitEnd { self.tail } /// Gets the semantic head and tail indices that constrain which bits of the /// referent element may be accessed. #[inline] #[cfg(not(tarpaulin_include))] pub fn bounds(&self) -> (BitIdx, BitEnd) { (self.head, self.tail) } /// Gets the bit-mask over all accessible bits. #[inline] #[cfg(not(tarpaulin_include))] pub fn mask(&self) -> BitMask { self.mask } /// Converts the partial element into a bit-slice over its governed bits. #[inline] pub fn into_bitslice(self) -> Reference<'a, M, BitSlice> where Address>: Referential<'a> { unsafe { BitSpan::new_unchecked( self.elem, self.head, (self.tail.into_inner() - self.head.into_inner()) as usize, ) } .to_bitslice() } } impl<'a, T, O> PartialElement<'a, Mut, T, O> where T: BitStore, O: BitOrder, Address: Referential<'a>, { /// Stores a value through `self` after masking away extra bits. /// /// ## Parameters /// /// - `&mut self` /// - `value`: A bit-map which will be written into the governed bits. This /// is a bit-map store, not an integer store; the value will not be /// shifted into position and will only be masked directly against the /// bits that this partial-element governs. /// /// ## Returns /// /// The previous value of the governed bits. #[inline] pub fn store_value(&mut self, value: T::Mem) -> T::Mem { let this = self.access(); let prev = this.clear_bits(self.mask); this.set_bits(self.mask & value); prev & self.mask.into_inner() } /// Inverts the value of each bit governed by the partial-element. /// /// ## Returns /// /// The previous value of the governed bits. #[inline] #[cfg(not(tarpaulin_include))] pub fn invert(&mut self) -> T::Mem { self.access().invert_bits(self.mask) & self.mask.into_inner() } /// Clears all bits governed by the partial-element to `0`. /// /// ## Returns /// /// The previous value of the governed bits. #[inline] #[cfg(not(tarpaulin_include))] pub fn clear(&mut self) -> T::Mem { self.access().clear_bits(self.mask) & self.mask.into_inner() } /// Sets all bits governed by the partial-element to `1`. /// /// ## Returns /// /// The previous value of the governed bits. #[inline] #[cfg(not(tarpaulin_include))] pub fn set(&mut self) -> T::Mem { self.access().set_bits(self.mask) & self.mask.into_inner() } /// Produces a reference capable of tolerating other handles viewing the /// same *memory element*. #[inline] fn access(&self) -> &T::Access { unsafe { &*self.elem.to_const().cast::() } } } impl<'a, M, T, O> PartialElement<'a, M, T, O> where M: Mutability, O: BitOrder, T: 'a + BitStore + radium::Radium, { /// Performs a store operation on a partial-element whose bits might be /// observed by another handle. #[inline] pub fn store_value_aliased(&self, value: T::Mem) -> T::Mem { let this = unsafe { &*self.elem.to_const().cast::() }; let prev = this.clear_bits(self.mask); this.set_bits(self.mask & value); prev & self.mask.into_inner() } } #[cfg(not(tarpaulin_include))] impl<'a, T, O> Clone for PartialElement<'a, Const, T, O> where T: BitStore, O: BitOrder, Address: Referential<'a>, { #[inline] fn clone(&self) -> Self { *self } } impl<'a, M, T, O> Debug for PartialElement<'a, M, T, O> where M: Mutability, T: 'a + BitStore, O: BitOrder, { #[inline] fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { write!( fmt, "PartialElement<{} {}, {}>", M::RENDER, any::type_name::(), any::type_name::(), )?; fmt.debug_struct("") .field("elem", &self.load_value()) .field("mask", &self.mask.fmt_display()) .field("head", &self.head.fmt_display()) .field("tail", &self.tail.fmt_display()) .finish() } } #[cfg(not(tarpaulin_include))] impl<'a, M, T, O> Hash for PartialElement<'a, M, T, O> where M: Mutability, T: 'a + BitStore, O: BitOrder, { #[inline] fn hash(&self, hasher: &mut H) where H: Hasher { self.load_value().hash(hasher); self.mask.hash(hasher); self.head.hash(hasher); self.tail.hash(hasher); } } impl Copy for PartialElement<'_, Const, T, O> where T: BitStore, O: BitOrder, { } #[cfg(test)] mod tests { use rand::random; use super::*; use crate::prelude::*; #[test] fn bit_domain() { let data = BitArray::<[u32; 3], Msb0>::new(random()); let bd = data.bit_domain(); assert!(bd.enclave().is_none()); let (head, body, tail) = bd.region().unwrap(); assert_eq!(data, body); assert!(head.is_empty()); assert!(tail.is_empty()); let bd = data[2 ..].bit_domain(); let (head, body, tail) = bd.region().unwrap(); assert_eq!(head, &data[2 .. 32]); assert_eq!(body, &data[32 ..]); assert!(tail.is_empty()); let bd = data[.. 94].bit_domain(); let (head, body, tail) = bd.region().unwrap(); assert!(head.is_empty()); assert_eq!(body, &data[.. 64]); assert_eq!(tail, &data[64 .. 94]); let bd = data[2 .. 94].bit_domain(); let (head, body, tail) = bd.region().unwrap(); assert_eq!(head, &data[2 .. 32]); assert_eq!(body, &data[32 .. 64]); assert_eq!(tail, &data[64 .. 94]); let bd = data[34 .. 62].bit_domain(); assert!(bd.region().is_none()); assert_eq!(bd.enclave().unwrap(), data[34 .. 62]); let (head, body, tail) = BitDomain::::default().region().unwrap(); assert!(head.is_empty()); assert!(body.is_empty()); assert!(tail.is_empty()); } #[test] fn domain() { let data: [u32; 3] = random(); let bits = data.view_bits::(); let d = bits.domain(); assert!(d.enclave().is_none()); let (head, body, tail) = d.region().unwrap(); assert!(head.is_none()); assert!(tail.is_none()); assert_eq!(body, data); let d = bits[2 ..].domain(); let (head, body, tail) = d.region().unwrap(); assert_eq!(head.unwrap().load_value(), (data[0] << 2) >> 2); assert_eq!(body, &data[1 ..]); assert!(tail.is_none()); let d = bits[.. 94].domain(); let (head, body, tail) = d.region().unwrap(); assert!(head.is_none()); assert_eq!(body, &data[.. 2]); assert_eq!(tail.unwrap().load_value(), (data[2] >> 2) << 2); let d = bits[2 .. 94].domain(); let (head, body, tail) = d.region().unwrap(); assert_eq!(head.unwrap().load_value(), (data[0] << 2) >> 2); assert_eq!(body, &data[1 .. 2]); assert_eq!(tail.unwrap().load_value(), (data[2] >> 2) << 2); let d = bits[34 .. 62].domain(); assert!(d.region().is_none()); assert_eq!( d.enclave().unwrap().load_value(), ((data[1] << 2) >> 4) << 2, ); assert!(matches!(bits![].domain(), Domain::Region { head: None, body: &[], tail: None, })); assert!(matches!( Domain::::default(), Domain::Region { head: None, body: &[], tail: None, }, )); let data = core::cell::Cell::new(0u8); let partial = data.view_bits::()[2 .. 6].domain().enclave().unwrap(); assert_eq!(partial.store_value_aliased(!0), 0); assert_eq!(data.get(), 0b00_1111_00); } #[test] fn iter() { let bits = [0x12u8, 0x34, 0x56].view_bits::(); let mut domain = bits[4 .. 12].domain(); assert_eq!(domain.len(), 2); assert_eq!(domain.next().unwrap(), 0x10); assert_eq!(domain.next_back().unwrap(), 0x04); assert!(domain.next().is_none()); assert!(domain.next_back().is_none()); assert_eq!(bits[2 .. 6].domain().len(), 1); assert_eq!(bits[18 .. 22].domain().next_back().unwrap(), 0b00_0101_00); let mut domain = bits[4 .. 20].domain(); assert_eq!(domain.next_back().unwrap(), 0x06); assert_eq!(domain.next_back().unwrap(), 0x34); assert_eq!(domain.next_back().unwrap(), 0x10); } #[test] #[cfg(feature = "alloc")] fn render() { #[cfg(not(feature = "std"))] use alloc::format; let data = BitArray::::new(random()); let render = format!("{:?}", data.bit_domain()); let expected = format!( "BitDomain::<*const u32, {}>::Region {{ head: {:?}, body: {:?}, \ tail: {:?} }}", any::type_name::(), BitSlice::::empty(), data.as_bitslice(), BitSlice::::empty(), ); assert_eq!(render, expected); let render = format!("{:?}", data[2 .. 30].bit_domain()); let expected = format!( "BitDomain::<*const u32, {}>::Enclave({:?})", any::type_name::(), &data[2 .. 30], ); assert_eq!(render, expected); let render = format!("{:?}", data.domain()); let expected = format!( "Domain::<*const u32, {}>::Region {{ head: None, body: {:?}, tail: \ None }}", any::type_name::(), data.as_raw_slice(), ); assert_eq!(render, expected); let render = format!("{:?}", data[2 .. 30].domain()); let expected = format!( "Domain::<*const u32, {}>::Enclave", any::type_name::(), ); assert!(render.starts_with(&expected)); let partial = 0x3Cu8.view_bits::()[2 .. 6] .domain() .enclave() .unwrap(); let render = format!("{:?}", partial); assert_eq!( render, format!( "PartialElement<*const u8, {}> {{ elem: 60, mask: {}, head: \ {}, tail: {} }}", any::type_name::(), partial.mask, partial.head, partial.tail, ), ); } } bitvec-1.0.1/src/field/io.rs000064400000000000000000000042631046102023000137330ustar 00000000000000#![cfg(feature = "std")] #![doc = include_str!("../../doc/field/io.md")] use core::mem; use std::io::{ self, Read, Write, }; use super::BitField; use crate::{ mem::bits_of, order::BitOrder, slice::BitSlice, store::BitStore, vec::BitVec, }; #[doc = include_str!("../../doc/field/io/Read_BitSlice.md")] impl Read for &BitSlice where T: BitStore, O: BitOrder, BitSlice: BitField, { #[inline] fn read(&mut self, buf: &mut [u8]) -> io::Result { let mut count = 0; self.chunks_exact(bits_of::()) .zip(buf.iter_mut()) .for_each(|(byte, slot)| { *slot = byte.load_be(); count += 1; }); *self = unsafe { self.get_unchecked(count * bits_of::() ..) }; Ok(count) } } #[doc = include_str!("../../doc/field/io/Write_BitSlice.md")] impl Write for &mut BitSlice where T: BitStore, O: BitOrder, BitSlice: BitField, { #[inline] fn write(&mut self, buf: &[u8]) -> io::Result { let mut count = 0; unsafe { self.chunks_exact_mut(bits_of::()).remove_alias() } .zip(buf.iter().copied()) .for_each(|(slot, byte)| { slot.store_be(byte); count += 1; }); *self = unsafe { mem::take(self).get_unchecked_mut(count * bits_of::() ..) }; Ok(count) } #[inline] #[cfg(not(tarpaulin_include))] fn flush(&mut self) -> io::Result<()> { Ok(()) } } #[doc = include_str!("../../doc/field/io/Read_BitVec.md")] impl Read for BitVec where T: BitStore, O: BitOrder, BitSlice: BitField, { #[inline] fn read(&mut self, buf: &mut [u8]) -> io::Result { let bytes_read = self.as_bitslice().read(buf)?; let bits = bytes_read * bits_of::(); self.shift_left(bits); self.truncate(self.len() - bits); Ok(bytes_read) } } #[doc = include_str!("../../doc/field/io/Write_BitVec.md")] impl Write for BitVec where O: BitOrder, T: BitStore, BitSlice: BitField, { #[inline] fn write(&mut self, buf: &[u8]) -> io::Result { let len = self.len(); self.resize(len + buf.len() * bits_of::(), false); unsafe { self.get_unchecked_mut(len ..) }.write(buf) } #[inline] #[cfg(not(tarpaulin_include))] fn flush(&mut self) -> io::Result<()> { Ok(()) } } bitvec-1.0.1/src/field/tests.rs000064400000000000000000000162361046102023000144710ustar 00000000000000#![cfg(test)] #[cfg(feature = "std")] use std::io; use rand::prelude::*; use crate::prelude::*; #[test] fn lsb0_u8_any_u5() { let mut bits = BitArray::::ZERO; let val = random::() & 0x1Fu8; bits[2 .. 7].store_le(val); assert_eq!( bits.as_raw_slice()[0], val << 2, "{:08b} != {:08b}", bits.as_raw_slice()[0], val << 2, ); assert_eq!(bits[2 .. 7].load_le::(), val); let neg = val | 0xF0; bits[2 .. 7].store_le(neg); assert_eq!(bits[2 .. 7].load_le::(), neg as i8); let val = random::() & 0x1Fu8; bits[2 .. 7].store_be(val); assert_eq!( bits.as_raw_slice()[0], val << 2, "{:08b} != {:08b}", bits.as_raw_slice()[0], val << 2, ); assert_eq!(bits[2 .. 7].load_be::(), val); let neg = val | 0xF0; bits[2 .. 7].store_be(neg); assert_eq!(bits[2 .. 7].load_be::(), neg as i8); } #[test] fn lsb0_u8_le_u20() { let mut bits = BitArray::<[u8; 3], Lsb0>::ZERO; let val = random::() & 0x00_0F_FF_FFu32; let bytes = (val << 2).to_le_bytes(); bits[2 .. 22].store_le(val); assert_eq!(bits.as_raw_slice(), &bytes[.. 3]); assert_eq!(bits[2 .. 22].load_le::(), val); let neg = val | 0xFF_F8_00_00u32; bits[2 .. 22].store_le(neg); assert_eq!( bits[2 .. 22].load_le::(), neg as i32, "{:08x} != {:08x}", bits[2 .. 22].load_le::(), neg as i32, ); } #[test] fn lsb0_u8_be_u20() { let mut bits = BitArray::<[u8; 3], Lsb0>::ZERO; let val = random::() & 0x00_0F_FF_FFu32; let mut bytes = (val << 2).to_be_bytes(); // Lsb0 _be has *weird* effects in raw memory. bytes[1] <<= 2; bytes[3] >>= 2; bits[2 .. 22].store_be(val); assert_eq!(bits.as_raw_slice(), &bytes[1 ..]); assert_eq!(bits[2 .. 22].load_be::(), val); let neg = val | 0xFF_F8_00_00u32; bits[2 .. 22].store_be(neg); assert_eq!( bits[2 .. 22].load_be::(), neg as i32, "{:08x} != {:08x}", bits[2 .. 22].load_le::(), neg as i32, ); } #[test] fn msb0_u8_any_u5() { let mut bits = BitArray::::ZERO; let val = random::() & 0x1Fu8; bits[2 .. 7].store_le(val); assert_eq!( bits.as_raw_slice()[0], val << 1, "{:08b} != {:08b}", bits.as_raw_slice()[0], val << 1, ); assert_eq!(bits[2 .. 7].load_le::(), val); let neg = val | 0xF0; bits[2 .. 7].store_le(neg); assert_eq!(bits[2 .. 7].load_le::(), neg as i8); let val = random::() & 0x1Fu8; bits[2 .. 7].store_be(val); assert_eq!( bits.as_raw_slice()[0], val << 1, "{:08b} != {:08b}", bits.as_raw_slice()[0], val << 1, ); assert_eq!(bits[2 .. 7].load_be::(), val); let neg = val | 0xF0; bits[2 .. 7].store_be(neg); assert_eq!(bits[2 .. 7].load_be::(), neg as i8); } #[test] fn msb0_u8_le_u20() { let mut bits = BitArray::<[u8; 3], Msb0>::ZERO; let val = random::() & 0x00_0F_FF_FFu32; let mut bytes = (val << 2).to_le_bytes(); // Msb0 _le has *weird* effects in raw memory. bytes[0] >>= 2; bytes[2] <<= 2; bits[2 .. 22].store_le(val); assert_eq!(bits.as_raw_slice(), &bytes[.. 3]); assert_eq!(bits[2 .. 22].load_le::(), val); let neg = val | 0xFF_F8_00_00u32; bits[2 .. 22].store_le(neg); assert_eq!( bits[2 .. 22].load_le::(), neg as i32, "{:08x} != {:08x}", bits[2 .. 22].load_le::(), neg as i32, ); } #[test] fn msb0_u8_be_u20() { let mut bits = BitArray::<[u8; 3], Msb0>::ZERO; let val = random::() & 0x00_0F_FF_FFu32; let bytes = (val << 2).to_be_bytes(); bits[2 .. 22].store_be(val); assert_eq!(bits.as_raw_slice(), &bytes[1 ..]); assert_eq!(bits[2 .. 22].load_be::(), val); let neg = val | 0xFF_F8_00_00u32; bits[2 .. 22].store_be(neg); assert_eq!( bits[2 .. 22].load_be::(), neg as i32, "{:08x} != {:08x}", bits[2 .. 22].load_le::(), neg as i32, ); } #[test] fn lsb0_u8_le_u24() { let mut bits = BitArray::<[u8; 3], Lsb0>::ZERO; let val = random::() & 0x00_FF_FF_FFu32; let bytes = val.to_le_bytes(); bits.store_le(val); assert_eq!(bits.as_raw_slice(), &bytes[.. 3]); assert_eq!( bits.load_le::(), val, "{:08x} != {:08x}", bits.load_le::(), val, ); let neg = val | 0xFF_80_00_00u32; bits.store_le(neg); assert_eq!( bits.load_le::(), neg as i32, "{:08x} != {:08x}", bits.load_le::(), neg as i32, ); } #[test] fn lsb0_u8_be_u24() { let mut bits = BitArray::<[u8; 3], Lsb0>::ZERO; let val = random::() & 0x00_FF_FF_FFu32; let bytes = val.to_be_bytes(); bits.store_be(val); assert_eq!(bits.as_raw_slice(), &bytes[1 ..]); assert_eq!(bits.load_be::(), val); let neg = val | 0xFF_80_00_00u32; bits.store_be(neg); assert_eq!( bits.load_be::(), neg as i32, "{:08x} != {:08x}", bits.load_be::(), neg as i32, ); } #[test] fn msb0_u8_le_u24() { let mut bits = BitArray::<[u8; 3], Msb0>::ZERO; let val = random::() & 0x00_FF_FF_FFu32; let bytes = val.to_le_bytes(); bits.store_le(val); assert_eq!(bits.as_raw_slice(), &bytes[.. 3]); assert_eq!(bits.load_le::(), val); let neg = val | 0xFF_80_00_00u32; bits.store_le(neg); assert_eq!( bits.load_le::(), neg as i32, "{:08x} != {:08x}", bits.load_le::(), neg as i32, ); } #[test] fn msb0_u8_be_u24() { let mut bits = BitArray::<[u8; 3], Msb0>::ZERO; let val = random::() & 0x00_FF_FF_FFu32; let bytes = val.to_be_bytes(); bits.store_be(val); assert_eq!(bits.as_raw_slice(), &bytes[1 ..]); assert_eq!(bits.load_be::(), val); let neg = val | 0xFF_80_00_00u32; bits.store_be(neg); assert_eq!( bits.load_be::(), neg as i32, "{:08x} != {:08x}", bits.load_be::(), neg as i32, ); } #[test] #[cfg(feature = "std")] fn read_bits() { let data = [0x136Cu16, 0x8C63]; let base = data.view_bits::().as_bitptr(); let mut bits = &data.view_bits::()[4 ..]; assert_eq!(unsafe { bits.as_bitptr().offset_from(base) }, 4); assert_eq!(bits.len(), 28); let mut transfer = [0u8; 4]; let last_ptr = &mut transfer[3] as *mut _; let mut transfer_handle = &mut transfer[..]; assert_eq!(io::copy(&mut bits, &mut transfer_handle).unwrap(), 3); assert_eq!(unsafe { bits.as_bitptr().offset_from(base) }, 28); assert_eq!(transfer_handle.as_mut_ptr() as *mut _, last_ptr); assert_eq!(transfer[.. 3], [0x36, 0xC8, 0xC6][..]); let mut bv = data.view_bits::()[4 ..].to_bitvec(); let mut transfer = [0u8; 3]; assert_eq!(io::copy(&mut bv, &mut &mut transfer[..]).unwrap(), 3); assert_eq!(bv, bits![0, 0, 1, 1]); assert_eq!(transfer, [0x36, 0xC8, 0xC6]); } #[test] #[cfg(feature = "std")] fn write_bits() { let mut bv = bitvec![usize, Msb0; 0; 4]; assert_eq!( io::copy(&mut &[0xC3u8, 0xF0, 0x69][..], &mut bv).unwrap(), 3, ); assert_eq!(bv, bits![ 0, 0, 0, 0, // original 1, 1, 0, 0, 0, 0, 1, 1, // byte 0 1, 1, 1, 1, 0, 0, 0, 0, // byte 1 0, 1, 1, 0, 1, 0, 0, 1, // byte 2 ]); let mut data = [0u8; 4]; let base = data.view_bits_mut::().as_mut_bitptr(); let mut bits = &mut data.view_bits_mut::()[4 ..]; assert_eq!(unsafe { bits.as_mut_bitptr().offset_from(base) }, 4); assert_eq!(bits.len(), 28); assert_eq!( io::copy(&mut &[0xA5u8, 0xB4, 0x3C][..], &mut bits).unwrap(), 3, ); assert_eq!(unsafe { bits.as_mut_bitptr().offset_from(base) }, 28); assert_eq!(bits.len(), 4); assert_eq!(data, [0b1010_0000, 0b1011_0101, 0b0011_0100, 0b0000_1100]); } bitvec-1.0.1/src/field.rs000064400000000000000000000347031046102023000133260ustar 00000000000000#![doc = include_str!("../doc/field.md")] use core::{ mem, ptr, }; use funty::Integral; use tap::Pipe; use wyz::comu::{ Const, Mut, }; use crate::{ array::BitArray, devel as dvl, domain::{ Domain, PartialElement, }, mem::bits_of, order::{ BitOrder, Lsb0, Msb0, }, slice::BitSlice, store::BitStore, view::BitViewSized, }; #[cfg(feature = "alloc")] use crate::{ boxed::BitBox, vec::BitVec, }; mod io; mod tests; #[doc = include_str!("../doc/field/BitField.md")] pub trait BitField { #[inline] #[cfg(not(tarpaulin_include))] #[doc = include_str!("../doc/field/BitField_load.md")] fn load(&self) -> I where I: Integral { if cfg!(target_endian = "little") { self.load_le::() } else if cfg!(target_endian = "big") { self.load_be::() } else { match option_env!("CARGO_PKG_REPOSITORY") { Some(env) => unreachable!( "This architecture is not supported! Please consider \ filing an issue at {}", env ), None => unreachable!( "This architecture is not supported! Please consider \ filing an issue" ), } } } #[inline] #[cfg(not(tarpaulin_include))] #[doc = include_str!("../doc/field/BitField_store.md")] fn store(&mut self, value: I) where I: Integral { if cfg!(target_endian = "little") { self.store_le::(value); } else if cfg!(target_endian = "big") { self.store_be::(value); } else { match option_env!("CARGO_PKG_REPOSITORY") { Some(env) => unreachable!( "This architecture is not supported! Please consider \ filing an issue at {}", env ), None => unreachable!( "This architecture is not supported! Please consider \ filing an issue" ), } } } #[doc = include_str!("../doc/field/BitField_load_le.md")] fn load_le(&self) -> I where I: Integral; #[doc = include_str!("../doc/field/BitField_load_be.md")] fn load_be(&self) -> I where I: Integral; #[doc = include_str!("../doc/field/BitField_store_le.md")] fn store_le(&mut self, value: I) where I: Integral; #[doc = include_str!("../doc/field/BitField_store_be.md")] fn store_be(&mut self, value: I) where I: Integral; } #[doc = include_str!("../doc/field/BitField_Lsb0.md")] impl BitField for BitSlice where T: BitStore { #[inline] #[doc = include_str!("../doc/field/BitField_Lsb0_load_le.md")] fn load_le(&self) -> I where I: Integral { let len = self.len(); check::("load", len); match self.domain() { // In Lsb0, the head counts distance from LSedge to first live bit. Domain::Enclave(elem) => get(elem, elem.head().into_inner()), Domain::Region { head, body, tail } => { let mut accum = I::ZERO; if let Some(elem) = tail { accum = get(elem, 0); } for elem in body.iter().rev().map(BitStore::load_value) { maybe_shift_left(&mut accum, bits_of::()); accum |= resize::(elem); } if let Some(elem) = head { let shamt = elem.head().into_inner(); maybe_shift_left( &mut accum, bits_of::() - shamt as usize, ); accum |= get::<_, _, I>(elem, shamt); } accum }, } .pipe(|elem| sign(elem, len)) } #[inline] #[doc = include_str!("../doc/field/BitField_Lsb0_load_be.md")] fn load_be(&self) -> I where I: Integral { let len = self.len(); check::("load", len); match self.domain() { Domain::Enclave(elem) => get(elem, elem.head().into_inner()), Domain::Region { head, body, tail } => { let mut accum = I::ZERO; if let Some(elem) = head { accum = get(elem, elem.head().into_inner()); } for elem in body.iter().map(BitStore::load_value) { maybe_shift_left(&mut accum, bits_of::()); accum |= resize::(elem); } if let Some(elem) = tail { let shamt = elem.tail().into_inner() as usize; maybe_shift_left(&mut accum, shamt); accum |= get::<_, _, I>(elem, 0); } accum }, } .pipe(|elem| sign(elem, len)) } #[inline] #[doc = include_str!("../doc/field/BitField_Lsb0_store_le.md")] fn store_le(&mut self, mut value: I) where I: Integral { check::("store", self.len()); match self.domain_mut() { Domain::Enclave(elem) => { let shamt = elem.head().into_inner(); set(elem, value, shamt); }, Domain::Region { head, body, tail } => { if let Some(elem) = head { let shamt = elem.head().into_inner(); set(elem, value, shamt); let rshamt = bits_of::() - shamt as usize; maybe_shift_right(&mut value, rshamt); } for elem in body.iter_mut() { elem.store_value(resize(value)); maybe_shift_right(&mut value, bits_of::()); } if let Some(elem) = tail { set(elem, value, 0); } }, } } #[inline] #[doc = include_str!("../doc/field/BitField_Lsb0_store_be.md")] fn store_be(&mut self, mut value: I) where I: Integral { check::("store", self.len()); match self.domain_mut() { Domain::Enclave(elem) => { let shamt = elem.head().into_inner(); set(elem, value, shamt); }, Domain::Region { head, body, tail } => { if let Some(elem) = tail { let shamt = elem.tail().into_inner() as usize; set(elem, value, 0); maybe_shift_right(&mut value, shamt); } for elem in body.iter_mut().rev() { elem.store_value(resize(value)); maybe_shift_right(&mut value, bits_of::()); } if let Some(elem) = head { let shamt = elem.head().into_inner(); set(elem, value, shamt); } }, } } } #[doc = include_str!("../doc/field/BitField_Msb0.md")] impl BitField for BitSlice where T: BitStore { #[inline] #[doc = include_str!("../doc/field/BitField_Msb0_load_le.md")] fn load_le(&self) -> I where I: Integral { let len = self.len(); check::("load", len); match self.domain() { Domain::Enclave(elem) => { let shamt = bits_of::() as u8 - elem.tail().into_inner(); get(elem, shamt) }, Domain::Region { head, body, tail } => { let mut accum = I::ZERO; if let Some(elem) = tail { let shamt = bits_of::() as u8 - elem.tail().into_inner(); accum = get(elem, shamt); } for elem in body.iter().rev().map(BitStore::load_value) { maybe_shift_left(&mut accum, bits_of::()); accum |= resize::(elem); } if let Some(elem) = head { let shamt = bits_of::() - elem.head().into_inner() as usize; maybe_shift_left(&mut accum, shamt); accum |= get::<_, _, I>(elem, 0); } accum }, } .pipe(|elem| sign(elem, len)) } #[inline] #[doc = include_str!("../doc/field/BitField_Msb0_load_be.md")] fn load_be(&self) -> I where I: Integral { let len = self.len(); check::("load", len); match self.domain() { Domain::Enclave(elem) => { let shamt = bits_of::() as u8 - elem.tail().into_inner(); get(elem, shamt) }, Domain::Region { head, body, tail } => { let mut accum = I::ZERO; if let Some(elem) = head { accum = get(elem, 0); } for elem in body.iter().map(BitStore::load_value) { maybe_shift_left(&mut accum, bits_of::()); accum |= resize::(elem); } if let Some(elem) = tail { let shamt = elem.tail().into_inner(); maybe_shift_left(&mut accum, shamt as usize); accum |= get::<_, _, I>(elem, bits_of::() as u8 - shamt); } accum }, } .pipe(|elem| sign(elem, len)) } #[inline] #[doc = include_str!("../doc/field/BitField_Msb0_store_le.md")] fn store_le(&mut self, mut value: I) where I: Integral { check::("store", self.len()); match self.domain_mut() { Domain::Enclave(elem) => { let shamt = bits_of::() as u8 - elem.tail().into_inner(); set(elem, value, shamt); }, Domain::Region { head, body, tail } => { if let Some(elem) = head { let shamt = bits_of::() - elem.head().into_inner() as usize; set(elem, value, 0); maybe_shift_right(&mut value, shamt); } for elem in body.iter_mut() { elem.store_value(resize(value)); maybe_shift_right(&mut value, bits_of::()); } if let Some(elem) = tail { let shamt = bits_of::() as u8 - elem.tail().into_inner(); set(elem, value, shamt); } }, } } #[inline] #[doc = include_str!("../doc/field/BitField_Msb0_store_be.md")] fn store_be(&mut self, mut value: I) where I: Integral { check::("store", self.len()); match self.domain_mut() { Domain::Enclave(elem) => { let shamt = bits_of::() as u8 - elem.tail().into_inner(); set(elem, value, shamt); }, Domain::Region { head, body, tail } => { if let Some(elem) = tail { let tail = elem.tail().into_inner() as usize; let shamt = bits_of::() - tail; set(elem, value, shamt as u8); maybe_shift_right(&mut value, tail); } for elem in body.iter_mut().rev() { elem.store_value(resize(value)); maybe_shift_right(&mut value, bits_of::()); } if let Some(elem) = head { set(elem, value, 0); } }, } } } #[doc = include_str!("../doc/field/impl_BitArray.md")] impl BitField for BitArray where O: BitOrder, A: BitViewSized, BitSlice: BitField, { #[inline(always)] fn load_le(&self) -> I where I: Integral { let mut accum = I::ZERO; for elem in self.as_raw_slice().iter().map(BitStore::load_value).rev() { maybe_shift_left(&mut accum, bits_of::()); accum |= resize::<_, I>(elem); } sign(accum, self.len()) } #[inline(always)] fn load_be(&self) -> I where I: Integral { let mut accum = I::ZERO; for elem in self.as_raw_slice().iter().map(BitStore::load_value) { maybe_shift_left(&mut accum, bits_of::()); accum |= resize::<_, I>(elem); } sign(accum, self.len()) } #[inline(always)] fn store_le(&mut self, mut value: I) where I: Integral { for slot in self.as_raw_mut_slice() { slot.store_value(resize(value)); maybe_shift_right(&mut value, bits_of::()); } } #[inline(always)] fn store_be(&mut self, mut value: I) where I: Integral { for slot in self.as_raw_mut_slice().iter_mut().rev() { slot.store_value(resize(value)); maybe_shift_right(&mut value, bits_of::()); } } } #[cfg(feature = "alloc")] #[cfg(not(tarpaulin_include))] impl BitField for BitBox where T: BitStore, O: BitOrder, BitSlice: BitField, { #[inline(always)] fn load_le(&self) -> I where I: Integral { self.as_bitslice().load_le() } #[inline(always)] fn load_be(&self) -> I where I: Integral { self.as_bitslice().load_be() } #[inline(always)] fn store_le(&mut self, value: I) where I: Integral { self.as_mut_bitslice().store_le(value) } #[inline(always)] fn store_be(&mut self, value: I) where I: Integral { self.as_mut_bitslice().store_be(value) } } #[cfg(feature = "alloc")] #[cfg(not(tarpaulin_include))] impl BitField for BitVec where T: BitStore, O: BitOrder, BitSlice: BitField, { #[inline(always)] fn load_le(&self) -> I where I: Integral { self.as_bitslice().load_le() } #[inline(always)] fn load_be(&self) -> I where I: Integral { self.as_bitslice().load_be() } #[inline(always)] fn store_le(&mut self, value: I) where I: Integral { self.as_mut_bitslice().store_le(value) } #[inline(always)] fn store_be(&mut self, value: I) where I: Integral { self.as_mut_bitslice().store_be(value) } } /** Asserts that a bit-slice is not longer than a memory element. ## Type Parameters - `I`: The integer type being stored into or loaded out of a bit-slice. ## Parameters - `action`: the verb being performed. One of `"load"` or `"store"`. - `len`: the length of the bit-slice under test. ## Panics This panics if `len` is not in `1 ..= U::BITS`. **/ fn check(action: &'static str, len: usize) where I: Integral { assert!( (1 ..= bits_of::()).contains(&len), "cannot {} {} bits from a {}-bit region", action, bits_of::(), len, ); } /// Shifts a value to the left, if it can support the shift amount. fn maybe_shift_left(elem: &mut T, shamt: usize) { if bits_of::() > shamt { *elem <<= shamt; } } /// Shifts a value to the right, if it can support the shift amount. fn maybe_shift_right(elem: &mut T, shamt: usize) { if bits_of::() > shamt { *elem >>= shamt; } } #[doc = include_str!("../doc/field/get.md")] fn get(elem: PartialElement, shamt: u8) -> I where T: BitStore, O: BitOrder, I: Integral, { resize::(elem.load_value() >> shamt) } #[doc = include_str!("../doc/field/set.md")] fn set(mut elem: PartialElement, value: I, shamt: u8) where T: BitStore, O: BitOrder, I: Integral, { elem.store_value(resize::(value) << shamt); } #[doc = include_str!("../doc/field/sign.md")] fn sign(elem: I, width: usize) -> I where I: Integral { if dvl::is_unsigned::() { return elem; } // Find the number of high bits that are not loaded. let shamt = bits_of::() - width; // Shift left, so that the highest loaded bit is now in the sign position. let shl: I = elem << shamt; // Shift right with sign extension back to the original place. shl >> shamt } #[doc = include_str!("../doc/field/resize.md")] fn resize(value: T) -> U where T: Integral, U: Integral, { let mut out = U::ZERO; let size_t = mem::size_of::(); let size_u = mem::size_of::(); unsafe { resize_inner::(&value, &mut out, size_t, size_u); } out } /// Performs little-endian byte-order register resizing. #[cfg(target_endian = "little")] unsafe fn resize_inner( src: &T, dst: &mut U, size_t: usize, size_u: usize, ) { // In LE, the least-significant byte is the base address, so resizing is // just a `memmove` into a zeroed slot, taking only the lesser width. ptr::copy_nonoverlapping( src as *const T as *const u8, dst as *mut U as *mut u8, size_t.min(size_u), ); } /// Performs big-endian byte-order register resizing. #[cfg(target_endian = "big")] unsafe fn resize_inner( src: &T, dst: &mut U, size_t: usize, size_u: usize, ) { let src = src as *const T as *const u8; let dst = dst as *mut U as *mut u8; // In BE, shrinking a value requires moving the source base-pointer up in // memory (to a higher address, lower significance), if size_t > size_u { ptr::copy_nonoverlapping(src.add(size_t - size_u), dst, size_u); } // While expanding a value requires moving the *destination* base-pointer // up (and leaving the lower address, higher significance bytes zeroed). else { ptr::copy_nonoverlapping(src, dst.add(size_u - size_t), size_t); } } bitvec-1.0.1/src/index.rs000064400000000000000000001016351046102023000133510ustar 00000000000000#![doc = include_str!("../doc/index.md")] use core::{ any, fmt::{ self, Binary, Debug, Display, Formatter, }, iter::{ FusedIterator, Sum, }, marker::PhantomData, ops::{ BitAnd, BitOr, Not, }, }; use crate::{ mem::{ bits_of, BitRegister, }, order::BitOrder, }; #[repr(transparent)] #[doc = include_str!("../doc/index/BitIdx.md")] #[derive(Clone, Copy, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct BitIdx where R: BitRegister { /// Semantic index counter within a register, constrained to `0 .. R::BITS`. idx: u8, /// Marker for the register type. _ty: PhantomData, } impl BitIdx where R: BitRegister { /// The inclusive maximum index within an `R` element. pub const MAX: Self = Self { idx: R::MASK, _ty: PhantomData, }; /// The inclusive minimum index within an `R` element. pub const MIN: Self = Self { idx: 0, _ty: PhantomData, }; /// Wraps a counter value as a known-good index into an `R` register. /// /// ## Parameters /// /// - `idx`: The counter value to mark as an index. This must be in the /// range `0 .. R::BITS`. /// /// ## Returns /// /// This returns `idx`, either marked as a valid `BitIdx` or an invalid /// `BitIdxError` by whether it is within the valid range `0 .. R::BITS`. #[inline] pub fn new(idx: u8) -> Result> { if idx >= bits_of::() as u8 { return Err(BitIdxError::new(idx)); } Ok(unsafe { Self::new_unchecked(idx) }) } /// Wraps a counter value as an assumed-good index into an `R` register. /// /// ## Parameters /// /// - `idx`: The counter value to mark as an index. This must be in the /// range `0 .. R::BITS`. /// /// ## Returns /// /// This unconditionally marks `idx` as a valid bit-index. /// /// ## Safety /// /// If the `idx` value is outside the valid range, then the program is /// incorrect. Debug builds will panic; release builds do not inspect the /// value or specify a behavior. #[inline] pub unsafe fn new_unchecked(idx: u8) -> Self { debug_assert!( idx < bits_of::() as u8, "Bit index {} cannot exceed type width {}", idx, bits_of::(), ); Self { idx, _ty: PhantomData, } } /// Removes the index wrapper, leaving the internal counter. #[inline] #[cfg(not(tarpaulin_include))] pub fn into_inner(self) -> u8 { self.idx } /// Increments an index counter, wrapping at the back edge of the register. /// /// ## Parameters /// /// - `self`: The index to increment. /// /// ## Returns /// /// - `.0`: The next index after `self`. /// - `.1`: Indicates whether the new index is in the next memory address. #[inline] pub fn next(self) -> (Self, bool) { let next = self.idx + 1; ( unsafe { Self::new_unchecked(next & R::MASK) }, next == bits_of::() as u8, ) } /// Decrements an index counter, wrapping at the front edge of the register. /// /// ## Parameters /// /// - `self`: The index to decrement. /// /// ## Returns /// /// - `.0`: The previous index before `self`. /// - `.1`: Indicates whether the new index is in the previous memory /// address. #[inline] pub fn prev(self) -> (Self, bool) { let prev = self.idx.wrapping_sub(1); ( unsafe { Self::new_unchecked(prev & R::MASK) }, self.idx == 0, ) } /// Computes the bit position corresponding to `self` under some ordering. /// /// This forwards to [`O::at::`], which is the only public, safe, /// constructor for a position counter. /// /// [`O::at::`]: crate::order::BitOrder::at #[inline] #[cfg(not(tarpaulin_include))] pub fn position(self) -> BitPos where O: BitOrder { O::at::(self) } /// Computes the bit selector corresponding to `self` under an ordering. /// /// This forwards to [`O::select::`], which is the only public, safe, /// constructor for a bit selector. /// /// [`O::select::`]: crate::order::BitOrder::select #[inline] #[cfg(not(tarpaulin_include))] pub fn select(self) -> BitSel where O: BitOrder { O::select::(self) } /// Computes the bit selector for `self` as an accessor mask. /// /// This is a type-cast over [`Self::select`]. /// /// [`Self::select`]: Self::select #[inline] #[cfg(not(tarpaulin_include))] pub fn mask(self) -> BitMask where O: BitOrder { self.select::().mask() } /// Iterates over all indices between an inclusive start and exclusive end /// point. /// /// Because implementation details of the range type family, including the /// [`RangeBounds`] trait, are not yet stable, and heterogeneous ranges are /// not supported, this must be an opaque iterator rather than a direct /// [`Range>`]. /// /// # Parameters /// /// - `from`: The inclusive low bound of the range. This will be the first /// index produced by the iterator. /// - `upto`: The exclusive high bound of the range. The iterator will halt /// before yielding an index of this value. /// /// # Returns /// /// An opaque iterator that is equivalent to the range `from .. upto`. /// /// # Requirements /// /// `from` must be no greater than `upto`. /// /// [`RangeBounds`]: core::ops::RangeBounds /// [`Range>`]: core::ops::Range #[inline] pub fn range( self, upto: BitEnd, ) -> impl Iterator + DoubleEndedIterator + ExactSizeIterator + FusedIterator { let (from, upto) = (self.into_inner(), upto.into_inner()); debug_assert!(from <= upto, "Ranges must run from low to high"); (from .. upto).map(|val| unsafe { Self::new_unchecked(val) }) } /// Iterates over all possible index values. #[inline] pub fn range_all() -> impl Iterator + DoubleEndedIterator + ExactSizeIterator + FusedIterator { BitIdx::MIN.range(BitEnd::MAX) } /// Computes the jump distance for some number of bits away from a starting /// index. /// /// This computes the number of elements by which to adjust a base pointer, /// and then the bit index of the destination bit in the new referent /// register element. /// /// # Parameters /// /// - `self`: An index within some element, from which the offset is /// computed. /// - `by`: The distance by which to jump. Negative values move lower in the /// index and element-pointer space; positive values move higher. /// /// # Returns /// /// - `.0`: The number of elements `R` by which to adjust a base pointer. /// This value can be passed directly into [`ptr::offset`]. /// - `.1`: The index of the destination bit within the destination element. /// /// [`ptr::offset`]: https://doc.rust-lang.org/stable/std/primitive.pointer.html#method.offset pub(crate) fn offset(self, by: isize) -> (isize, Self) { /* Signed-add `self.idx` to the jump distance. This will almost * certainly not wrap (as the crate imposes restrictions well below * `isize::MAX`), but correctness never hurts. The resulting sum is a * bit distance that is then broken into an element distance and final * bit index. */ let far = by.wrapping_add(self.into_inner() as isize); let (elts, head) = (far >> R::INDX, far as u8 & R::MASK); (elts, unsafe { Self::new_unchecked(head) }) } /// Computes the span information for a region beginning at `self` for `len` /// bits. /// /// The span information is the number of elements in the region that hold /// live bits, and the position of the tail marker after the live bits. /// /// This forwards to [`BitEnd::span`], as the computation is identical for /// the two types. Beginning a span at any `Idx` is equivalent to beginning /// it at the tail of a previous span. /// /// # Parameters /// /// - `self`: The start bit of the span. /// - `len`: The number of bits in the span. /// /// # Returns /// /// - `.0`: The number of elements, starting in the element that contains /// `self`, that contain live bits of the span. /// - `.1`: The tail counter of the span’s end point. /// /// [`BitEnd::span`]: crate::index::BitEnd::span pub(crate) fn span(self, len: usize) -> (usize, BitEnd) { unsafe { BitEnd::::new_unchecked(self.into_inner()) }.span(len) } } impl Binary for BitIdx where R: BitRegister { #[inline] fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { write!(fmt, "{:0>1$b}", self.idx, R::INDX as usize) } } impl Debug for BitIdx where R: BitRegister { #[inline] fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { write!(fmt, "BitIdx<{}>({})", any::type_name::(), self) } } impl Display for BitIdx where R: BitRegister { #[inline] fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { Binary::fmt(self, fmt) } } #[repr(transparent)] #[doc = include_str!("../doc/index/BitIdxError.md")] #[derive(Clone, Copy, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct BitIdxError where R: BitRegister { /// The value that is invalid as a [`BitIdx`]. /// /// [`BitIdx`]: crate::index::BitIdx err: u8, /// Marker for the register type. _ty: PhantomData, } impl BitIdxError where R: BitRegister { /// Marks a counter value as invalid to be an index for an `R` register. /// /// ## Parameters /// /// - `err`: The counter value to mark as an error. This must be greater /// than `R::BITS`. /// /// ## Returns /// /// This returns `err`, marked as an invalid index for `R`. /// /// ## Panics /// /// Debug builds panic when `err` is a valid index for `R`. pub(crate) fn new(err: u8) -> Self { debug_assert!( err >= bits_of::() as u8, "Bit index {} is valid for type width {}", err, bits_of::(), ); Self { err, _ty: PhantomData, } } /// Removes the error wrapper, leaving the internal counter. #[inline] #[cfg(not(tarpaulin_include))] pub fn into_inner(self) -> u8 { self.err } } impl Debug for BitIdxError where R: BitRegister { #[inline] fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { write!(fmt, "BitIdxError<{}>({})", any::type_name::(), self.err) } } #[cfg(not(tarpaulin_include))] impl Display for BitIdxError where R: BitRegister { #[inline] fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { write!( fmt, "the value {} is too large to index into {} ({} bits wide)", self.err, any::type_name::(), bits_of::(), ) } } #[cfg(feature = "std")] impl std::error::Error for BitIdxError where R: BitRegister {} #[repr(transparent)] #[doc = include_str!("../doc/index/BitEnd.md")] #[derive(Clone, Copy, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct BitEnd where R: BitRegister { /// Semantic tail counter within or after a register, contained to `0 ..= /// R::BITS`. end: u8, /// Marker for the register type. _ty: PhantomData, } impl BitEnd where R: BitRegister { /// The inclusive maximum tail within (or after) an `R` element. pub const MAX: Self = Self { end: bits_of::() as u8, _ty: PhantomData, }; /// The inclusive minimum tail within (or after) an `R` element. pub const MIN: Self = Self { end: 0, _ty: PhantomData, }; /// Wraps a counter value as a known-good tail of an `R` register. /// /// ## Parameters /// /// - `end`: The counter value to mark as a tail. This must be in the range /// `0 ..= R::BITS`. /// /// ## Returns /// /// This returns `Some(end)` when it is in the valid range `0 ..= R::BITS`, /// and `None` when it is not. #[inline] pub fn new(end: u8) -> Option { if end > bits_of::() as u8 { return None; } Some(unsafe { Self::new_unchecked(end) }) } /// Wraps a counter value as an assumed-good tail of an `R` register. /// /// ## Parameters /// /// - `end`: The counter value to mark as a tail. This must be in the range /// `0 ..= R::BITS`. /// /// ## Returns /// /// This unconditionally marks `end` as a valid tail index. /// /// ## Safety /// /// If the `end` value is outside the valid range, then the program is /// incorrect. Debug builds will panic; release builds do not inspect the /// value or specify a behavior. pub(crate) unsafe fn new_unchecked(end: u8) -> Self { debug_assert!( end <= bits_of::() as u8, "Bit tail {} cannot exceed type width {}", end, bits_of::(), ); Self { end, _ty: PhantomData, } } /// Removes the tail wrapper, leaving the internal counter. #[inline] #[cfg(not(tarpaulin_include))] pub fn into_inner(self) -> u8 { self.end } /// Iterates over all tail indices at and after an inclusive starting point. /// /// Because implementation details of the range type family, including the /// [`RangeBounds`] trait, are not yet stable, and heterogeneous ranges are /// not yet supported, this must be an opaque iterator rather than a direct /// [`Range>`]. /// /// # Parameters /// /// - `from`: The inclusive low bound of the range. This will be the first /// tail produced by the iterator. /// /// # Returns /// /// An opaque iterator that is equivalent to the range `from ..= /// Self::MAX`. /// /// [`RangeBounds`]: core::ops::RangeBounds /// [`Range>`]: core::ops::Range #[inline] pub fn range_from( from: BitIdx, ) -> impl Iterator + DoubleEndedIterator + ExactSizeIterator + FusedIterator { (from.idx ..= Self::MAX.end) .map(|tail| unsafe { BitEnd::new_unchecked(tail) }) } /// Computes the span information for a region. /// /// The computed span of `len` bits begins at `self` and extends upwards in /// memory. The return value is the number of memory elements that contain /// bits of the span, and the first dead bit after the span. /// /// ## Parameters /// /// - `self`: A dead bit which is used as the first live bit of the new /// span. /// - `len`: The number of live bits in the span starting at `self`. /// /// ## Returns /// /// - `.0`: The number of `R` elements that contain live bits in the /// computed span. /// - `.1`: The dead-bit tail index ending the computed span. /// /// ## Behavior /// /// If `len` is `0`, this returns `(0, self)`, as the span has no live bits. /// If `self` is [`BitEnd::MAX`], then the new region starts at /// [`BitIdx::MIN`] in the next element. /// /// [`BitEnd::MAX`]: Self::MAX /// [`BitIdx::MIN`]: Self::MIN pub(crate) fn span(self, len: usize) -> (usize, Self) { if len == 0 { return (0, self); } let head = self.end & R::MASK; let bits_in_head = (bits_of::() as u8 - head) as usize; if len <= bits_in_head { return (1, unsafe { Self::new_unchecked(head + len as u8) }); } let bits_after_head = len - bits_in_head; let elts = bits_after_head >> R::INDX; let tail = bits_after_head as u8 & R::MASK; let is_zero = (tail == 0) as u8; let edges = 2 - is_zero as usize; (elts + edges, unsafe { Self::new_unchecked((is_zero << R::INDX) | tail) }) } } impl Binary for BitEnd where R: BitRegister { #[inline] fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { write!(fmt, "{:0>1$b}", self.end, R::INDX as usize + 1) } } impl Debug for BitEnd where R: BitRegister { #[inline] fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { write!(fmt, "BitEnd<{}>({})", any::type_name::(), self) } } impl Display for BitEnd where R: BitRegister { #[inline] fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { Binary::fmt(self, fmt) } } #[repr(transparent)] #[doc = include_str!("../doc/index/BitPos.md")] // #[rustc_layout_scalar_valid_range_end(R::BITS)] #[derive(Clone, Copy, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct BitPos where R: BitRegister { /// Electrical position counter within a register, constrained to `0 .. /// R::BITS`. pos: u8, /// Marker for the register type. _ty: PhantomData, } impl BitPos where R: BitRegister { /// The position value of the most significant bit in an `R` element. pub const MAX: Self = Self { pos: R::MASK as u8, _ty: PhantomData, }; /// The position value of the least significant bit in an `R` element. pub const MIN: Self = Self { pos: 0, _ty: PhantomData, }; /// Wraps a counter value as a known-good position within an `R` register. /// /// ## Parameters /// /// - `pos`: The counter value to mark as a position. This must be in the /// range `0 .. R::BITS`. /// /// ## Returns /// /// This returns `Some(pos)` when it is in the valid range `0 .. R::BITS`, /// and `None` when it is not. #[inline] pub fn new(pos: u8) -> Option { if pos >= bits_of::() as u8 { return None; } Some(unsafe { Self::new_unchecked(pos) }) } /// Wraps a counter value as an assumed-good position within an `R` /// register. /// /// ## Parameters /// /// - `value`: The counter value to mark as a position. This must be in the /// range `0 .. R::BITS`. /// /// ## Returns /// /// This unconditionally marks `pos` as a valid bit-position. /// /// ## Safety /// /// If the `pos` value is outside the valid range, then the program is /// incorrect. Debug builds will panic; release builds do not inspect the /// value or specify a behavior. #[inline] pub unsafe fn new_unchecked(pos: u8) -> Self { debug_assert!( pos < bits_of::() as u8, "Bit position {} cannot exceed type width {}", pos, bits_of::(), ); Self { pos, _ty: PhantomData, } } /// Removes the position wrapper, leaving the internal counter. #[inline] #[cfg(not(tarpaulin_include))] pub fn into_inner(self) -> u8 { self.pos } /// Computes the bit selector corresponding to `self`. /// /// This is always `1 << self.pos`. #[inline] pub fn select(self) -> BitSel { unsafe { BitSel::new_unchecked(R::ONE << self.pos) } } /// Computes the bit selector for `self` as an accessor mask. /// /// This is a type-cast over [`Self::select`]. /// /// [`Self::select`]: Self::select #[inline] #[cfg(not(tarpaulin_include))] pub fn mask(self) -> BitMask { self.select().mask() } /// Iterates over all possible position values. pub(crate) fn range_all() -> impl Iterator + DoubleEndedIterator + ExactSizeIterator + FusedIterator { BitIdx::::range_all() .map(|idx| unsafe { Self::new_unchecked(idx.into_inner()) }) } } impl Binary for BitPos where R: BitRegister { #[inline] fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { write!(fmt, "{:0>1$b}", self.pos, R::INDX as usize) } } impl Debug for BitPos where R: BitRegister { #[inline] fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { write!(fmt, "BitPos<{}>({})", any::type_name::(), self) } } impl Display for BitPos where R: BitRegister { #[inline] fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { Binary::fmt(self, fmt) } } #[repr(transparent)] #[doc = include_str!("../doc/index/BitSel.md")] // #[rustc_layout_scalar_valid_range_end(R::BITS)] #[derive(Clone, Copy, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct BitSel where R: BitRegister { /// A one-hot selection mask. sel: R, } impl BitSel where R: BitRegister { /// Wraps a selector value as a known-good selection in an `R` register. /// /// ## Parameters /// /// - `sel`: A one-hot selection mask of a bit in an `R` register. /// /// ## Returns /// /// This returns `Some(sel)` when it is a power of two (exactly one bit set /// and all others cleared), and `None` when it is not. #[inline] pub fn new(sel: R) -> Option { if sel.count_ones() != 1 { return None; } Some(unsafe { Self::new_unchecked(sel) }) } /// Wraps a selector value as an assumed-good selection in an `R` register. /// /// ## Parameters /// /// - `sel`: A one-hot selection mask of a bit in an `R` register. /// /// ## Returns /// /// This unconditionally marks `sel` as a one-hot bit selector. /// /// ## Safety /// /// If the `sel` value has zero or multiple bits set, then it is invalid to /// be used as a `BitSel` and the program is incorrect. Debug builds will /// panic; release builds do not inspect the value or specify a behavior. #[inline] pub unsafe fn new_unchecked(sel: R) -> Self { debug_assert!( sel.count_ones() == 1, "Selections are required to have exactly one bit set: {:0>1$b}", sel, bits_of::() as usize, ); Self { sel } } /// Removes the one-hot selection wrapper, leaving the internal mask. #[inline] #[cfg(not(tarpaulin_include))] pub fn into_inner(self) -> R { self.sel } /// Computes a bit-mask for `self`. This is a type-cast. #[inline] #[cfg(not(tarpaulin_include))] pub fn mask(self) -> BitMask { BitMask::new(self.sel) } /// Iterates over all possible selector values. #[inline] pub fn range_all() -> impl Iterator + DoubleEndedIterator + ExactSizeIterator + FusedIterator { BitPos::::range_all().map(BitPos::select) } } impl Binary for BitSel where R: BitRegister { #[inline] fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { write!(fmt, "{:0>1$b}", self.sel, bits_of::() as usize) } } impl Debug for BitSel where R: BitRegister { #[inline] fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { write!(fmt, "BitSel<{}>({})", any::type_name::(), self) } } impl Display for BitSel where R: BitRegister { #[inline] fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { Binary::fmt(self, fmt) } } #[repr(transparent)] #[doc = include_str!("../doc/index/BitMask.md")] #[derive(Clone, Copy, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct BitMask where R: BitRegister { /// A mask of any number of bits to select. mask: R, } impl BitMask where R: BitRegister { /// A full bit-mask with every bit set. pub const ALL: Self = Self { mask: R::ALL }; /// An empty bit-mask with every bit cleared. pub const ZERO: Self = Self { mask: R::ZERO }; /// Wraps any `R` value as a bit-mask. /// /// This constructor is provided to explicitly declare that an operation is /// discarding the numeric value of an integer and instead using it only as /// a bit-mask. /// /// ## Parameters /// /// - `mask`: Some integer to use as a bit-mask. /// /// ## Returns /// /// The `mask` value wrapped as a bit-mask, with its numeric context /// discarded. /// /// Prefer accumulating [`BitSel`] values using its `Sum` implementation. /// /// ## Safety /// /// The `mask` value must be computed from a set of valid bit positions in /// the caller’s context. /// /// [`BitSel`]: crate::index::BitSel #[inline] pub fn new(mask: R) -> Self { Self { mask } } /// Removes the mask wrapper, leaving the internal value. #[inline] #[cfg(not(tarpaulin_include))] pub fn into_inner(self) -> R { self.mask } /// Tests if a mask contains a given selector bit. /// /// ## Parameters /// /// - `&self`: The mask being tested. /// - `sel`: A selector bit to test in `self`. /// /// ## Returns /// /// Whether `self` has set the bit that `sel` indicates. #[inline] pub fn test(&self, sel: BitSel) -> bool { self.mask & sel.sel != R::ZERO } /// Inserts a selector bit into a mask. /// /// ## Parameters /// /// - `&mut self`: The mask being modified. /// - `sel`: A selector bit to insert into `self`. /// /// ## Effects /// /// The `sel` bit is set in the mask. #[inline] pub fn insert(&mut self, sel: BitSel) { self.mask |= sel.sel; } /// Creates a new mask with a selector bit activated. /// /// ## Parameters /// /// - `self`: The original mask. /// - `sel`: The selector bit being added into the mask. /// /// ## Returns /// /// A new bit-mask with `sel` activated. #[inline] pub fn combine(self, sel: BitSel) -> Self { Self { mask: self.mask | sel.sel, } } } impl Binary for BitMask where R: BitRegister { #[inline] fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { write!(fmt, "{:0>1$b}", self.mask, bits_of::() as usize) } } impl Debug for BitMask where R: BitRegister { #[inline] fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { write!(fmt, "BitMask<{}>({})", any::type_name::(), self) } } impl Display for BitMask where R: BitRegister { #[inline] fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { Binary::fmt(self, fmt) } } impl Sum> for BitMask where R: BitRegister { #[inline] fn sum(iter: I) -> Self where I: Iterator> { iter.fold(Self::ZERO, Self::combine) } } impl BitAnd for BitMask where R: BitRegister { type Output = Self; #[inline] fn bitand(self, rhs: R) -> Self::Output { Self { mask: self.mask & rhs, } } } impl BitOr for BitMask where R: BitRegister { type Output = Self; #[inline] fn bitor(self, rhs: R) -> Self::Output { Self { mask: self.mask | rhs, } } } impl Not for BitMask where R: BitRegister { type Output = Self; #[inline] fn not(self) -> Self::Output { Self { mask: !self.mask } } } #[cfg(test)] mod tests { use super::*; use crate::order::Lsb0; #[test] fn index_ctors() { for n in 0 .. 8 { assert!(BitIdx::::new(n).is_ok()); } assert!(BitIdx::::new(8).is_err()); for n in 0 .. 16 { assert!(BitIdx::::new(n).is_ok()); } assert!(BitIdx::::new(16).is_err()); for n in 0 .. 32 { assert!(BitIdx::::new(n).is_ok()); } assert!(BitIdx::::new(32).is_err()); #[cfg(target_pointer_width = "64")] { for n in 0 .. 64 { assert!(BitIdx::::new(n).is_ok()); } assert!(BitIdx::::new(64).is_err()); } for n in 0 .. bits_of::() as u8 { assert!(BitIdx::::new(n).is_ok()); } assert!(BitIdx::::new(bits_of::() as u8).is_err()); } #[test] fn tail_ctors() { for n in 0 ..= 8 { assert!(BitEnd::::new(n).is_some()); } assert!(BitEnd::::new(9).is_none()); for n in 0 ..= 16 { assert!(BitEnd::::new(n).is_some()); } assert!(BitEnd::::new(17).is_none()); for n in 0 ..= 32 { assert!(BitEnd::::new(n).is_some()); } assert!(BitEnd::::new(33).is_none()); #[cfg(target_pointer_width = "64")] { for n in 0 ..= 64 { assert!(BitEnd::::new(n).is_some()); } assert!(BitEnd::::new(65).is_none()); } for n in 0 ..= bits_of::() as u8 { assert!(BitEnd::::new(n).is_some()); } assert!(BitEnd::::new(bits_of::() as u8 + 1).is_none()); } #[test] fn position_ctors() { for n in 0 .. 8 { assert!(BitPos::::new(n).is_some()); } assert!(BitPos::::new(8).is_none()); for n in 0 .. 16 { assert!(BitPos::::new(n).is_some()); } assert!(BitPos::::new(16).is_none()); for n in 0 .. 32 { assert!(BitPos::::new(n).is_some()); } assert!(BitPos::::new(32).is_none()); #[cfg(target_pointer_width = "64")] { for n in 0 .. 64 { assert!(BitPos::::new(n).is_some()); } assert!(BitPos::::new(64).is_none()); } for n in 0 .. bits_of::() as u8 { assert!(BitPos::::new(n).is_some()); } assert!(BitPos::::new(bits_of::() as u8).is_none()); } #[test] fn select_ctors() { for n in 0 .. 8 { assert!(BitSel::::new(1 << n).is_some()); } assert!(BitSel::::new(0).is_none()); assert!(BitSel::::new(3).is_none()); for n in 0 .. 16 { assert!(BitSel::::new(1 << n).is_some()); } assert!(BitSel::::new(0).is_none()); assert!(BitSel::::new(3).is_none()); for n in 0 .. 32 { assert!(BitSel::::new(1 << n).is_some()); } assert!(BitSel::::new(0).is_none()); assert!(BitSel::::new(3).is_none()); #[cfg(target_pointer_width = "64")] { for n in 0 .. 64 { assert!(BitSel::::new(1 << n).is_some()); } assert!(BitSel::::new(0).is_none()); assert!(BitSel::::new(3).is_none()); } for n in 0 .. bits_of::() as u8 { assert!(BitSel::::new(1 << n).is_some()); } assert!(BitSel::::new(0).is_none()); assert!(BitSel::::new(3).is_none()); } #[test] fn ranges() { let mut range = BitIdx::::range_all(); assert_eq!(range.next(), BitIdx::new(0).ok()); assert_eq!(range.next_back(), BitIdx::new(15).ok()); assert_eq!(range.count(), 14); let mut range = BitEnd::::range_from(BitIdx::new(1).unwrap()); assert_eq!(range.next(), BitEnd::new(1)); assert_eq!(range.next_back(), BitEnd::new(8)); assert_eq!(range.count(), 6); let mut range = BitPos::::range_all(); assert_eq!(range.next(), BitPos::new(0)); assert_eq!(range.next_back(), BitPos::new(7)); assert_eq!(range.count(), 6); let mut range = BitSel::::range_all(); assert_eq!(range.next(), BitSel::new(1)); assert_eq!(range.next_back(), BitSel::new(128)); assert_eq!(range.count(), 6); } #[test] fn index_cycle() { let six = BitIdx::::new(6).unwrap(); let (seven, step) = six.next(); assert_eq!(seven, BitIdx::new(7).unwrap()); assert!(!step); let (zero, step) = seven.next(); assert_eq!(zero, BitIdx::MIN); assert!(step); let (seven, step) = zero.prev(); assert_eq!(seven, BitIdx::new(7).unwrap()); assert!(step); let (six, step) = seven.prev(); assert_eq!(six, BitIdx::new(6).unwrap()); assert!(!step); let fourteen = BitIdx::::new(14).unwrap(); let (fifteen, step) = fourteen.next(); assert_eq!(fifteen, BitIdx::new(15).unwrap()); assert!(!step); let (zero, step) = fifteen.next(); assert_eq!(zero, BitIdx::MIN); assert!(step); let (fifteen, step) = zero.prev(); assert_eq!(fifteen, BitIdx::new(15).unwrap()); assert!(step); let (fourteen, step) = fifteen.prev(); assert_eq!(fourteen, BitIdx::new(14).unwrap()); assert!(!step); } #[test] fn jumps() { let (jump, head) = BitIdx::::new(1).unwrap().offset(2); assert_eq!(jump, 0); assert_eq!(head, BitIdx::new(3).unwrap()); let (jump, head) = BitIdx::::MAX.offset(1); assert_eq!(jump, 1); assert_eq!(head, BitIdx::MIN); let (jump, head) = BitIdx::::new(10).unwrap().offset(40); // 10 is in 0..16; 10+40 is in 48..64 assert_eq!(jump, 3); assert_eq!(head, BitIdx::new(2).unwrap()); // .offset() wraps at the `isize` boundary let (jump, head) = BitIdx::::MAX.offset(isize::MAX); assert_eq!(jump, -(((isize::MAX as usize + 1) >> 3) as isize)); assert_eq!(head, BitIdx::MAX.prev().0); let (elts, tail) = BitIdx::::new(4).unwrap().span(0); assert_eq!(elts, 0); assert_eq!(tail, BitEnd::new(4).unwrap()); let (elts, tail) = BitIdx::::new(3).unwrap().span(3); assert_eq!(elts, 1); assert_eq!(tail, BitEnd::new(6).unwrap()); let (elts, tail) = BitIdx::::new(10).unwrap().span(40); assert_eq!(elts, 4); assert_eq!(tail, BitEnd::new(2).unwrap()); } #[test] fn mask_operators() { let mut mask = BitIdx::::new(2) .unwrap() .range(BitEnd::new(5).unwrap()) .map(BitIdx::select::) .sum::>(); assert_eq!(mask, BitMask::new(28)); assert_eq!(mask & 25, BitMask::new(24)); assert_eq!(mask | 32, BitMask::new(60)); assert_eq!(!mask, BitMask::new(!28)); let yes = BitSel::::new(16).unwrap(); let no = BitSel::::new(64).unwrap(); assert!(mask.test(yes)); assert!(!mask.test(no)); mask.insert(no); assert!(mask.test(no)); } #[test] #[cfg(feature = "alloc")] fn render() { #[cfg(not(feature = "std"))] use alloc::format; assert_eq!(format!("{:?}", BitIdx::::MAX), "BitIdx(111)"); assert_eq!(format!("{:?}", BitIdx::::MAX), "BitIdx(1111)"); assert_eq!(format!("{:?}", BitIdx::::MAX), "BitIdx(11111)"); assert_eq!( format!("{:?}", BitIdx::::new(8).unwrap_err()), "BitIdxError(8)" ); assert_eq!( format!("{:?}", BitIdx::::new(16).unwrap_err()), "BitIdxError(16)" ); assert_eq!( format!("{:?}", BitIdx::::new(32).unwrap_err()), "BitIdxError(32)" ); assert_eq!(format!("{:?}", BitEnd::::MAX), "BitEnd(1000)"); assert_eq!(format!("{:?}", BitEnd::::MAX), "BitEnd(10000)"); assert_eq!(format!("{:?}", BitEnd::::MAX), "BitEnd(100000)"); assert_eq!(format!("{:?}", BitPos::::MAX), "BitPos(111)"); assert_eq!(format!("{:?}", BitPos::::MAX), "BitPos(1111)"); assert_eq!(format!("{:?}", BitPos::::MAX), "BitPos(11111)"); assert_eq!( format!("{:?}", BitSel::::new(1).unwrap()), "BitSel(00000001)", ); assert_eq!( format!("{:?}", BitSel::::new(1).unwrap()), "BitSel(0000000000000001)", ); assert_eq!( format!("{:?}", BitSel::::new(1).unwrap()), "BitSel(00000000000000000000000000000001)", ); assert_eq!( format!("{:?}", BitMask::::new(1 | 4 | 32)), "BitMask(00100101)", ); assert_eq!( format!("{:?}", BitMask::::new(1 | 4 | 32)), "BitMask(0000000000100101)", ); assert_eq!( format!("{:?}", BitMask::::new(1 | 4 | 32)), "BitMask(00000000000000000000000000100101)", ); #[cfg(target_pointer_width = "64")] { assert_eq!( format!("{:?}", BitIdx::::MAX), "BitIdx(111111)", ); assert_eq!( format!("{:?}", BitIdx::::new(64).unwrap_err()), "BitIdxError(64)", ); assert_eq!( format!("{:?}", BitEnd::::MAX), "BitEnd(1000000)", ); assert_eq!( format!("{:?}", BitPos::::MAX), "BitPos(111111)", ); assert_eq!( format!("{:?}", BitSel::::new(1).unwrap()), "BitSel(0000000000000000000000000000000000000000000000000000000000000001)", ); assert_eq!( format!("{:?}", BitMask::::new(1 | 4 | 32)), "BitMask(0000000000000000000000000000000000000000000000000000000000100101)", ); } } } bitvec-1.0.1/src/lib.rs000064400000000000000000000022551046102023000130060ustar 00000000000000#![doc = include_str!("../README.md")] #![cfg_attr(not(feature = "std"), no_std)] #![cfg_attr( debug_assertions, warn(missing_docs, clippy::missing_docs_in_private_items) )] #![cfg_attr( not(debug_assertions), deny(missing_docs, clippy::missing_docs_in_private_items) )] #![deny(unconditional_recursion)] #![allow( clippy::declare_interior_mutable_const, clippy::type_complexity, unknown_lints )] #[cfg(feature = "alloc")] extern crate alloc; #[macro_use] mod devel; #[macro_use] pub mod macros; pub mod access; pub mod array; pub mod boxed; pub mod domain; pub mod field; pub mod index; pub mod mem; pub mod order; pub mod ptr; mod serdes; pub mod slice; pub mod store; pub mod vec; pub mod view; #[doc = include_str!("../doc/prelude.md")] pub mod prelude { pub use crate::{ array::BitArray, bitarr, bits, field::BitField as _, order::{ BitOrder, LocalBits, Lsb0, Msb0, }, ptr::{ BitPtr, BitPtrRange, BitRef, }, slice::BitSlice, store::BitStore, view::{ AsBits, AsMutBits, BitView as _, BitViewSized as _, }, BitArr, }; #[cfg(feature = "alloc")] pub use crate::{ bitbox, bitvec, boxed::BitBox, vec::BitVec, }; } bitvec-1.0.1/src/macros/internal.rs000064400000000000000000000272071046102023000153440ustar 00000000000000#![doc(hidden)] #![doc = include_str!("../../doc/macros/internal.md")] // Provide known mount-points of dependency crates. #[doc(hidden)] pub use core; #[doc(hidden)] pub use funty; #[doc(hidden)] #[macro_export] #[doc = include_str!("../../doc/macros/encode_bits.md")] macro_rules! __encode_bits { /* ENTRY POINTS * * These arms match the syntax provided by the public macros, and dispatch * by storage type width. */ (u8, $ord:tt; $($val:expr),*) => { $crate::__encode_bits!(u8 as u8, $ord; $($val),*) }; (Cell, $ord:tt; $($val:expr),*) => { $crate::__encode_bits!(Cell as u8, $ord; $($val),*) }; (AtomicU8, $ord:tt; $($val:expr),*) => { $crate::__encode_bits!(AtomicU8 as u8, $ord; $($val),*) }; (RadiumU8, $ord:tt; $($val:expr),*) => { $crate::__encode_bits!(RadiumU8 as u8, $ord; $($val),*) }; (u16, $ord:tt; $($val:expr),*) => { $crate::__encode_bits!(u16 as u16, $ord; $($val),*) }; (Cell, $ord:tt; $($val:expr),*) => { $crate::__encode_bits!(Cell as u16, $ord; $($val),*) }; (AtomicU16, $ord:tt; $($val:expr),*) => { $crate::__encode_bits!(AtomicU16 as u16, $ord; $($val),*) }; (RadiumU16, $ord:tt; $($val:expr),*) => { $crate::__encode_bits!(RadiumU16 as u16, $ord; $($val),*) }; (u32, $ord:tt; $($val:expr),*) => { $crate::__encode_bits!(u32 as u32, $ord; $($val),*) }; (Cell, $ord:tt; $($val:expr),*) => { $crate::__encode_bits!(Cell as u32, $ord; $($val),*) }; (AtomicU32, $ord:tt; $($val:expr),*) => { $crate::__encode_bits!(AtomicU32 as u32, $ord; $($val),*) }; (RadiumU32, $ord:tt; $($val:expr),*) => { $crate::__encode_bits!(RadiumU32 as u32, $ord; $($val),*) }; (u64, $ord:tt; $($val:expr),*) => { $crate::__encode_bits!(u64 as u64, $ord; $($val),*) }; (Cell, $ord:tt; $($val:expr),*) => { $crate::__encode_bits!(Cell as u64, $ord; $($val),*) }; (AtomicU64, $ord:tt; $($val:expr),*) => { $crate::__encode_bits!(AtomicU64 as u64, $ord; $($val),*) }; (RadiumU64, $ord:tt; $($val:expr),*) => { $crate::__encode_bits!(RadiumU64 as u64, $ord; $($val),*) }; (usize, $ord:tt; $($val:expr),*) => { $crate::__encode_bits!(usize as usize, $ord; $($val),*) }; (Cell, $ord:tt; $($val:expr),*) => { $crate::__encode_bits!(Cell as usize, $ord; $($val),*) }; (AtomicUsize, $ord:tt; $($val:expr),*) => { $crate::__encode_bits!(AtomicUsize as usize, $ord; $($val),*) }; (RadiumUsize, $ord:tt; $($val:expr),*) => { $crate::__encode_bits!(RadiumUsize as usize, $ord; $($val),*) }; // This arm routes `usize` into `u32` or `u64`, depending on target, and // marks them to return to `usize` after chunking. ($typ:ty as usize, $ord:tt; $($val:expr),*) => {{ const LEN: usize = $crate::__count_elts!(usize; $($val),*); let out: [$typ; LEN]; #[cfg(target_pointer_width = "32")] { out = $crate::__encode_bits!($typ as u32 as usize, $ord; $($val),*); } #[cfg(target_pointer_width = "64")] { out = $crate::__encode_bits!($typ as u64 as usize, $ord; $($val),*); } out }}; // ZERO EXTENSION: Supply literal `0, ` tokens to ensure that elements can // be completely filled with bits. ($typ:ty as $uint:ident $(as $usz:ident)?, $ord:tt; $($val:expr),*) => { $crate::__encode_bits!( $typ as $uint $(as $usz)?, $ord; []; $($val,)* 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 16 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 32 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 48 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 64 ) }; /* EXIT POINT. * * This arm enters once the only remaining bit-expression tokens are the * literal `0, `s provided above. It does not enter while any opaque * user-provided bit expressions remain, and matching falls through to the * chunkers, below. * * Once entered, this converts each chunk of bit expressions into the * requested storage element, then emits an array of the encoded elements. * This array is the final value of the originally-invoked macro. The * invoker is responsible for turning the array into a `bitvec` type. */ ( $typ:ty as $uint:ident as usize, $ord:tt; [$([$($bit:tt,)+],)*]; $(0,)* ) => { [$($crate::__make_elem!($typ as $uint as usize, $ord; $($bit,)+),)*] }; ( $typ:ty as $uint:ident, $ord:tt; [$([$($bit:tt,)+],)*]; $(0,)* ) => { [$($crate::__make_elem!($typ as $uint, $ord; $($bit,)+),)*] }; /* CHUNKERS * * These arms munch through the token stream, creating a sequence of chunks * of bits. Each chunk contains bits to exactly fill one element, and gets * passed into `__make_elem!` for final encoding. */ ( $typ:ty as u8, $ord:tt; [$($elem:tt)*]; $a0:tt, $b0:tt, $c0:tt, $d0:tt, $e0:tt, $f0:tt, $g0:tt, $h0:tt, $($t:tt)* ) => { $crate::__encode_bits!( $typ as u8, $ord; [$($elem)* [ $a0, $b0, $c0, $d0, $e0, $f0, $g0, $h0, ],]; $($t)* ) }; ( $typ:ty as u16, $ord:tt; [$($elem:tt)*]; $a0:tt, $b0:tt, $c0:tt, $d0:tt, $e0:tt, $f0:tt, $g0:tt, $h0:tt, $a1:tt, $b1:tt, $c1:tt, $d1:tt, $e1:tt, $f1:tt, $g1:tt, $h1:tt, $($t:tt)* ) => { $crate::__encode_bits!( $typ as u16, $ord; [$($elem)* [ $a0, $b0, $c0, $d0, $e0, $f0, $g0, $h0, $a1, $b1, $c1, $d1, $e1, $f1, $g1, $h1, ],]; $($t)* ) }; ( $typ:ty as u32 $(as $usz:ident)?, $ord:tt; [$($elem:tt)*]; $a0:tt, $b0:tt, $c0:tt, $d0:tt, $e0:tt, $f0:tt, $g0:tt, $h0:tt, $a1:tt, $b1:tt, $c1:tt, $d1:tt, $e1:tt, $f1:tt, $g1:tt, $h1:tt, $a2:tt, $b2:tt, $c2:tt, $d2:tt, $e2:tt, $f2:tt, $g2:tt, $h2:tt, $a3:tt, $b3:tt, $c3:tt, $d3:tt, $e3:tt, $f3:tt, $g3:tt, $h3:tt, $($t:tt)* ) => { $crate::__encode_bits!( $typ as u32 $(as $usz)?, $ord; [$($elem)* [ $a0, $b0, $c0, $d0, $e0, $f0, $g0, $h0, $a1, $b1, $c1, $d1, $e1, $f1, $g1, $h1, $a2, $b2, $c2, $d2, $e2, $f2, $g2, $h2, $a3, $b3, $c3, $d3, $e3, $f3, $g3, $h3, ],]; $($t)* ) }; ( $typ:ty as u64 $(as $usz:ident)?, $ord:tt; [$($elem:tt)*]; $a0:tt, $b0:tt, $c0:tt, $d0:tt, $e0:tt, $f0:tt, $g0:tt, $h0:tt, $a1:tt, $b1:tt, $c1:tt, $d1:tt, $e1:tt, $f1:tt, $g1:tt, $h1:tt, $a2:tt, $b2:tt, $c2:tt, $d2:tt, $e2:tt, $f2:tt, $g2:tt, $h2:tt, $a3:tt, $b3:tt, $c3:tt, $d3:tt, $e3:tt, $f3:tt, $g3:tt, $h3:tt, $a4:tt, $b4:tt, $c4:tt, $d4:tt, $e4:tt, $f4:tt, $g4:tt, $h4:tt, $a5:tt, $b5:tt, $c5:tt, $d5:tt, $e5:tt, $f5:tt, $g5:tt, $h5:tt, $a6:tt, $b6:tt, $c6:tt, $d6:tt, $e6:tt, $f6:tt, $g6:tt, $h6:tt, $a7:tt, $b7:tt, $c7:tt, $d7:tt, $e7:tt, $f7:tt, $g7:tt, $h7:tt, $($t:tt)* ) => { $crate::__encode_bits!( $typ as u64 $(as $usz)?, $ord; [$($elem)* [ $a0, $b0, $c0, $d0, $e0, $f0, $g0, $h0, $a1, $b1, $c1, $d1, $e1, $f1, $g1, $h1, $a2, $b2, $c2, $d2, $e2, $f2, $g2, $h2, $a3, $b3, $c3, $d3, $e3, $f3, $g3, $h3, $a4, $b4, $c4, $d4, $e4, $f4, $g4, $h4, $a5, $b5, $c5, $d5, $e5, $f5, $g5, $h5, $a6, $b6, $c6, $d6, $e6, $f6, $g6, $h6, $a7, $b7, $c7, $d7, $e7, $f7, $g7, $h7, ],]; $($t)* ) }; } /// Counts the number of expression tokens in a repetition sequence. #[doc(hidden)] #[macro_export] macro_rules! __count { (@ $val:expr) => { 1 }; ($($val:expr),* $(,)?) => {{ const LEN: usize = 0 $(+ $crate::__count!(@ $val))*; LEN }}; } /// Counts the number of storage elements needed to store a bit sequence. #[doc(hidden)] #[macro_export] macro_rules! __count_elts { ($t:ty; $($val:expr),*) => { $crate::mem::elts::<$t>($crate::__count!($($val),*)) }; } #[doc(hidden)] #[macro_export] #[doc = include_str!("../../doc/macros/make_elem.md")] macro_rules! __make_elem { // Token-matching ordering names can use specialized work. ($typ:ty as $uint:ident $(as $usz:ident)?, Lsb0; $( $a:expr, $b:expr, $c:expr, $d:expr, $e:expr, $f:expr, $g:expr, $h:expr, )*) => {{ const ELEM: $uint = $crate::__ty_from_bytes!( $uint, Lsb0, [$($crate::macros::internal::u8_from_le_bits( $a != 0, $b != 0, $c != 0, $d != 0, $e != 0, $f != 0, $g != 0, $h != 0, )),*] ); $crate::mem::BitElement::<$typ>::new(ELEM $(as $usz)?).elem }}; ($typ:ty as $uint:ident $(as $usz:ident)?, Msb0; $( $a:expr, $b:expr, $c:expr, $d:expr, $e:expr, $f:expr, $g:expr, $h:expr, )*) => {{ const ELEM: $uint = $crate::__ty_from_bytes!( $uint, Msb0, [$($crate::macros::internal::u8_from_be_bits( $a != 0, $b != 0, $c != 0, $d != 0, $e != 0, $f != 0, $g != 0, $h != 0, )),*] ); $crate::mem::BitElement::<$typ>::new(ELEM $(as $usz)?).elem }}; ($typ:ty as $uint:ident $(as $usz:ident)?, LocalBits; $( $a:expr, $b:expr, $c:expr, $d:expr, $e:expr, $f:expr, $g:expr, $h:expr, )*) => {{ const ELEM: $uint = $crate::__ty_from_bytes!( $uint, LocalBits, [$($crate::macros::internal::u8_from_ne_bits( $a != 0, $b != 0, $c != 0, $d != 0, $e != 0, $f != 0, $g != 0, $h != 0, )),*] ); $crate::mem::BitElement::<$typ>::new(ELEM $(as $usz)?).elem }}; // Otherwise, invoke `BitOrder` for each bit and accumulate. ($typ:ty as $uint:ident $(as $usz:ident)?, $ord:tt; $($bit:expr),* $(,)?) => {{ let mut tmp: $uint = 0; let _bits = $crate::slice::BitSlice::<$uint, $ord>::from_element_mut( &mut tmp ); let mut _idx = 0; $( _bits.set(_idx, $bit != 0); _idx += 1; )* $crate::mem::BitElement::<$typ>::new(tmp $(as $usz)?).elem }}; } /// Translates `false` into `0` and `true` into `!0`. #[doc(hidden)] #[macro_export] macro_rules! __extend_bool { ($val:expr, $typ:tt) => {{ type Mem = <$typ as $crate::store::BitStore>::Mem; if $val != 0 { ::ALL } else { ::ZERO } }}; } /// Constructs an unsigned integer from a list of *bytes*. #[doc(hidden)] #[macro_export] macro_rules! __ty_from_bytes { (u8, Msb0, [$($byte:expr),*]) => { u8::from_be_bytes([$($byte),*]) }; (u8, Lsb0, [$($byte:expr),*]) => { u8::from_le_bytes([$($byte),*]) }; (u8, LocalBits, [$($byte:expr),*]) => { u8::from_ne_bytes([$($byte),*]) }; (u16, Msb0, [$($byte:expr),*]) => { u16::from_be_bytes([$($byte),*]) }; (u16, Lsb0, [$($byte:expr),*]) => { u16::from_le_bytes([$($byte),*]) }; (u16, LocalBits, [$($byte:expr),*]) => { u16::from_ne_bytes([$($byte),*]) }; (u32, Msb0, [$($byte:expr),*]) => { u32::from_be_bytes([$($byte),*]) }; (u32, Lsb0, [$($byte:expr),*]) => { u32::from_le_bytes([$($byte),*]) }; (u32, LocalBits, [$($byte:expr),*]) => { u32::from_ne_bytes([$($byte),*]) }; (u64, Msb0, [$($byte:expr),*]) => { u64::from_be_bytes([$($byte),*]) }; (u64, Lsb0, [$($byte:expr),*]) => { u64::from_le_bytes([$($byte),*]) }; (u64, LocalBits, [$($byte:expr),*]) => { u64::from_ne_bytes([$($byte),*]) }; (usize, Msb0, [$($byte:expr),*]) => { usize::from_be_bytes([$($byte),*]) }; (usizeLsb0, , [$($byte:expr),*]) => { usize::from_le_bytes([$($byte),*]) }; (usize, LocalBits, [$($byte:expr),*]) => { usize::from_ne_bytes([$($byte),*]) }; } /// Constructs a `u8` from bits applied in `Lsb0` order (`a` low, `h` high). #[doc(hidden)] #[inline(always)] #[cfg(not(tarpaulin_include))] pub const fn u8_from_le_bits( a: bool, b: bool, c: bool, d: bool, e: bool, f: bool, g: bool, h: bool, ) -> u8 { (a as u8) | ((b as u8) << 1) | ((c as u8) << 2) | ((d as u8) << 3) | ((e as u8) << 4) | ((f as u8) << 5) | ((g as u8) << 6) | ((h as u8) << 7) } /// Constructs a `u8` from bits applied in `Msb0` order (`a` high, `h` low). #[doc(hidden)] #[inline(always)] #[cfg(not(tarpaulin_include))] pub const fn u8_from_be_bits( a: bool, b: bool, c: bool, d: bool, e: bool, f: bool, g: bool, h: bool, ) -> u8 { (h as u8) | ((g as u8) << 1) | ((f as u8) << 2) | ((e as u8) << 3) | ((d as u8) << 4) | ((c as u8) << 5) | ((b as u8) << 6) | ((a as u8) << 7) } #[doc(hidden)] #[cfg(target_endian = "big")] pub use self::u8_from_be_bits as u8_from_ne_bits; #[doc(hidden)] #[cfg(target_endian = "little")] pub use self::u8_from_le_bits as u8_from_ne_bits; bitvec-1.0.1/src/macros/tests.rs000064400000000000000000000564151046102023000146750ustar 00000000000000//! Invocation tests of each supported constructor-macro syntax. #![cfg(test)] use core::{ cell::Cell, sync::atomic::*, }; use radium::types::*; use crate::{ mem::bits_of, prelude::*, }; #[test] fn compile_bitarr_typedef() { #[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)] struct Slots { all: BitArr!(for 10, in u8, Msb0), typ: BitArr!(for 10, in u8), def: BitArr!(for 10), } static SLOTS: Slots = Slots { all: bitarr!(const u8, Msb0; 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), typ: bitarr!(const u8, Lsb0; 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), def: bitarr!(const 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), }; let slots = Slots { all: bitarr!(u8, Msb0; 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), typ: bitarr!(u8, Lsb0; 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), def: bitarr!(1, 1, 1, 1, 1, 1, 1, 1, 1, 1), }; assert_eq!(SLOTS, slots); assert_eq!(slots.all.into_inner(), [!0u8, 192]); assert_eq!(slots.typ.into_inner(), [!0u8, 3]); let def: [usize; 1] = slots.def.into_inner(); assert_eq!(def[0].count_ones(), 10); } #[test] fn constexpr_macros() { const A: BitArr!(for 20, in Cell, Lsb0) = bitarr!(const Cell, Lsb0; 1; 20); let a = A; assert_eq!(a.len(), 24); assert!(a.all()); const B: BitArr!(for 20) = bitarr!(const 1; 20); let b = B; assert_eq!(b.len(), bits_of::()); assert!(b.all()); const C: BitArr!(for 5, in Cell, Msb0) = bitarr!(const Cell, Msb0; 1, 0, 1, 1, 0); let c = C; assert_eq!(c[.. 5], bits![1, 0, 1, 1, 0]); const D: BitArr!(for 5, in u32, Lsb0) = bitarr!(const u32, Lsb0; 1, 0, 1, 1, 0); let d = D; assert_eq!(d[.. 5], bits![1, 0, 1, 1, 0]); let _: &'static mut BitSlice, Msb0> = unsafe { bits!(static mut Cell, Msb0; 1; 20) }; let _: &'static mut BitSlice = unsafe { bits!(static mut u32, Lsb0; 1; 20) }; let _: &'static mut BitSlice = unsafe { bits!(static mut 1; 20) }; let _: &'static mut BitSlice, Msb0> = unsafe { bits!(static mut Cell, Msb0; 1, 0, 1, 1, 0) }; let _: &'static mut BitSlice, Msb0> = unsafe { bits!(static mut Cell, Msb0; 1, 0, 1, 1, 0) }; let _: &'static mut BitSlice = unsafe { bits!(static mut 1, 0, 1, 1, 0) }; let _: &'static BitSlice, Msb0> = bits!(static Cell, Msb0; 1; 20); let _: &'static BitSlice = bits!(static u32, Lsb0; 1, 0, 1, 1, 0); let _: &'static BitSlice = bits!(static 1; 20); let _: &'static BitSlice, Msb0> = bits!(static Cell, Msb0; 1, 0, 1, 1, 0); let _: &'static BitSlice = bits!(static u32, Msb0; 1, 0, 1, 1, 0); let _: &'static BitSlice = bits!(static 1, 0, 1, 1, 0); } #[test] fn compile_bitarr() { let uint: BitArray<[u8; 1], Lsb0> = bitarr![u8, Lsb0; 1, 0, 1, 0]; assert_eq!(uint.into_inner(), [5u8]); let cell: BitArray<[Cell; 1], Lsb0> = bitarr![Cell, Lsb0; 1, 0, 1, 0]; assert_eq!(cell.into_inner()[0].get(), 5u8); let uint: BitArray<[u16; 2], Msb0> = bitarr![u16, Msb0; 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, ]; assert_eq!(uint.into_inner(), [0x5569, 0x6E74]); let cell: BitArray<[Cell; 2], Msb0> = bitarr![Cell, Msb0; 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, ]; let cells = cell.into_inner(); assert_eq!(cells[0].get(), 0x5569); assert_eq!(cells[1].get(), 0x6E74); let uint: BitArray<[u32; 1], Lsb0> = bitarr![u32, crate::order::Lsb0; 1, 0, 1, 1]; assert_eq!(uint.into_inner(), [13u32]); let cell: BitArray<[Cell; 1], Lsb0> = bitarr![Cell, crate::order::Lsb0; 1, 0, 1, 1]; assert_eq!(cell.into_inner()[0].get(), 13u32); #[cfg(target_pointer_width = "64")] { let uint: BitArray<[u64; 2], LocalBits> = bitarr![u64, LocalBits; 1; 70]; assert_eq!(uint.into_inner(), [!0u64; 2]); let cell: BitArray<[Cell; 2], LocalBits> = bitarr![Cell, LocalBits; 1; 70]; assert_eq!(cell.clone().into_inner()[0].get(), !0u64); assert_eq!(cell.into_inner()[1].get(), !0u64); } let uint: BitArray<[usize; 1], Lsb0> = bitarr![1, 0, 1]; assert_eq!(uint.into_inner(), [5usize]); let uint: BitArray<[usize; 1], Lsb0> = bitarr![1;30]; assert_eq!(uint.into_inner(), [!0usize]); } #[test] #[allow(clippy::many_single_char_names)] fn compile_bits() { let a: &mut BitSlice, Lsb0> = bits![mut Cell, Lsb0; 1, 0, 1]; let b: &mut BitSlice = bits![mut u8, Lsb0; 1, 0, 1]; let c: &mut BitSlice, Msb0> = bits![mut Cell, crate::order::Msb0; 1, 0, 1]; let d: &mut BitSlice = bits![mut u8, crate::order::Msb0; 1, 0, 1]; assert_eq!(a, c); assert_eq!(b, d); let e: &mut BitSlice, Lsb0> = bits![mut Cell, Lsb0; 1; 100]; let f: &mut BitSlice = bits![mut u8, Lsb0; 1; 100]; let g: &mut BitSlice, Msb0> = bits![mut Cell, crate::order::Msb0; 1; 100]; let h: &mut BitSlice = bits![mut u8, crate::order::Msb0; 1; 100]; assert_eq!(e, g); assert_eq!(f, h); assert!(h.domain().take(12).all(|e| e == !0)); assert_eq!(h.domain().next_back().unwrap(), 0b1111_0000); assert_eq!(h.domain().len(), 13); let i: &mut BitSlice = bits![mut 1, 0, 1]; let j: &mut BitSlice = bits![mut 1; 3]; j.set(1, false); assert_eq!(i, j); let _: &BitSlice, Lsb0> = bits![Cell, Lsb0; 1, 0, 1]; let _: &BitSlice = bits![u8, Lsb0; 1, 0, 1]; let _: &BitSlice, Msb0> = bits![Cell, crate::order::Msb0; 1, 0, 1]; let _: &BitSlice = bits![u8, crate::order::Msb0; 1, 0, 1]; let _: &BitSlice, Lsb0> = bits![Cell, Lsb0; 1; 100]; let _: &BitSlice = bits![u8, Lsb0; 1; 100]; let _: &BitSlice, Msb0> = bits![Cell, crate::order::Msb0; 1; 100]; let _: &BitSlice = bits![u8, crate::order::Msb0; 1; 100]; let _: &BitSlice = bits![1, 0, 1]; let _: &BitSlice = bits![1; 100]; let _: &BitSlice, Lsb0> = bits![Cell, Lsb0; 1, 0, 1]; let _: &BitSlice = bits![u16, Lsb0; 1, 0, 1]; let _: &BitSlice, Msb0> = bits![Cell, crate::order::Msb0; 1, 0, 1]; let _: &BitSlice = bits![u16, crate::order::Msb0; 1, 0, 1]; let _: &BitSlice, Lsb0> = bits![Cell, Lsb0; 1; 100]; let _: &BitSlice = bits![u16, Lsb0; 1; 100]; let _: &BitSlice, Msb0> = bits![Cell, crate::order::Msb0; 1; 100]; let _: &BitSlice = bits![u16, crate::order::Msb0; 1; 100]; let _: &BitSlice, Lsb0> = bits![Cell, Lsb0; 1, 0, 1]; let _: &BitSlice = bits![u32, Lsb0; 1, 0, 1]; let _: &BitSlice, Msb0> = bits![Cell, crate::order::Msb0; 1, 0, 1]; let _: &BitSlice = bits![u32, crate::order::Msb0; 1, 0, 1]; let _: &BitSlice, Lsb0> = bits![Cell, Lsb0; 1; 100]; let _: &BitSlice = bits![u32, Lsb0; 1; 100]; let _: &BitSlice, Msb0> = bits![Cell, crate::order::Msb0; 1; 100]; let _: &BitSlice = bits![u32, crate::order::Msb0; 1; 100]; let _: &BitSlice, Lsb0> = bits![Cell, Lsb0; 1, 0, 1]; let _: &BitSlice = bits![usize, Lsb0; 1, 0, 1]; let _: &BitSlice, Msb0> = bits![Cell, crate::order::Msb0; 1, 0, 1]; let _: &BitSlice = bits![usize, crate::order::Msb0; 1, 0, 1]; let _: &BitSlice, Lsb0> = bits![Cell, Lsb0; 1; 100]; let _: &BitSlice = bits![usize, Lsb0; 1; 100]; let _: &BitSlice, Msb0> = bits![Cell, crate::order::Msb0; 1; 100]; let _: &BitSlice = bits![usize, crate::order::Msb0; 1; 100]; #[cfg(target_pointer_width = "64")] { let _: &BitSlice, Lsb0> = bits![Cell, Lsb0; 1, 0, 1]; let _: &BitSlice = bits![u64, Lsb0; 1, 0, 1]; let _: &BitSlice, Msb0> = bits![Cell, crate::order::Msb0; 1, 0, 1]; let _: &BitSlice = bits![u64, crate::order::Msb0; 1, 0, 1]; let _: &BitSlice, Lsb0> = bits![Cell, Lsb0; 1; 100]; let _: &BitSlice = bits![u64, Lsb0; 1; 100]; let _: &BitSlice, Msb0> = bits![Cell, crate::order::Msb0; 1; 100]; let _: &BitSlice = bits![u64, crate::order::Msb0; 1; 100]; } radium::if_atomic! { if atomic(8) { let _: &BitSlice = bits![AtomicU8, LocalBits; 0, 1]; let _: &BitSlice = bits![AtomicU8, Lsb0; 0, 1]; let _: &BitSlice = bits![AtomicU8, Msb0; 0, 1]; let _: &BitSlice = bits![RadiumU8, LocalBits; 1; 100]; let _: &BitSlice = bits![RadiumU8, Lsb0; 1; 100]; let _: &BitSlice = bits![RadiumU8, Msb0; 1; 100]; } if atomic(16) { let _: &BitSlice = bits![AtomicU16, LocalBits; 0, 1]; let _: &BitSlice = bits![AtomicU16, Lsb0; 0, 1]; let _: &BitSlice = bits![AtomicU16, Msb0; 0, 1]; let _: &BitSlice = bits![RadiumU16, LocalBits; 1; 100]; let _: &BitSlice = bits![RadiumU16, Lsb0; 1; 100]; let _: &BitSlice = bits![RadiumU16, Msb0; 1; 100]; } if atomic(32) { let _: &BitSlice = bits![AtomicU32, LocalBits; 0, 1]; let _: &BitSlice = bits![AtomicU32, Lsb0; 0, 1]; let _: &BitSlice = bits![AtomicU32, Msb0; 0, 1]; let _: &BitSlice = bits![RadiumU32, LocalBits; 1; 100]; let _: &BitSlice = bits![RadiumU32, Lsb0; 1; 100]; let _: &BitSlice = bits![RadiumU32, Msb0; 1; 100]; } if atomic(size) { let _: &BitSlice = bits![AtomicUsize, LocalBits; 0, 1]; let _: &BitSlice = bits![AtomicUsize, Lsb0; 0, 1]; let _: &BitSlice = bits![AtomicUsize, Msb0; 0, 1]; let _: &BitSlice = bits![RadiumUsize, LocalBits; 1; 100]; let _: &BitSlice = bits![RadiumUsize, Lsb0; 1; 100]; let _: &BitSlice = bits![RadiumUsize, Msb0; 1; 100]; } } #[cfg(target_pointer_width = "64")] radium::if_atomic! { if atomic(64) { let _: &BitSlice = bits![AtomicU64, LocalBits; 0, 1]; let _: &BitSlice = bits![AtomicU64, Lsb0; 0, 1]; let _: &BitSlice = bits![AtomicU64, Msb0; 0, 1]; let _: &BitSlice = bits![RadiumU64, LocalBits; 1; 100]; let _: &BitSlice = bits![RadiumU64, Lsb0; 1; 100]; let _: &BitSlice = bits![RadiumU64, Msb0; 1; 100]; } } } #[test] #[cfg(feature = "alloc")] fn compile_bitvec() { let _: BitVec, Lsb0> = bitvec![Cell, Lsb0; 1, 0, 1]; let _: BitVec = bitvec![u8, Lsb0; 1, 0, 1]; let _: BitVec, Msb0> = bitvec![Cell, crate::order::Msb0; 1, 0, 1]; let _: BitVec = bitvec![u8, crate::order::Msb0; 1, 0, 1]; let _: BitVec, Lsb0> = bitvec![Cell, Lsb0; 1; 100]; let _: BitVec = bitvec![u8, Lsb0; 1; 100]; let _: BitVec, Msb0> = bitvec![Cell, crate::order::Msb0; 1; 100]; let _: BitVec = bitvec![u8, crate::order::Msb0; 1; 100]; let _: BitVec = bitvec![1, 0, 1]; let _: BitVec = bitvec![1; 100]; let _: BitVec, Lsb0> = bitvec![Cell, Lsb0; 1, 0, 1]; let _: BitVec = bitvec![u16, Lsb0; 1, 0, 1]; let _: BitVec, Msb0> = bitvec![Cell, crate::order::Msb0; 1, 0, 1]; let _: BitVec = bitvec![u16, crate::order::Msb0; 1, 0, 1]; let _: BitVec, Lsb0> = bitvec![Cell, Lsb0; 1; 100]; let _: BitVec = bitvec![u16, Lsb0; 1; 100]; let _: BitVec, Msb0> = bitvec![Cell, crate::order::Msb0; 1; 100]; let _: BitVec = bitvec![u16, crate::order::Msb0; 1; 100]; let _: BitVec, Lsb0> = bitvec![Cell, Lsb0; 1, 0, 1]; let _: BitVec = bitvec![u32, Lsb0; 1, 0, 1]; let _: BitVec, Msb0> = bitvec![Cell, crate::order::Msb0; 1, 0, 1]; let _: BitVec = bitvec![u32, crate::order::Msb0; 1, 0, 1]; let _: BitVec, Lsb0> = bitvec![Cell, Lsb0; 1; 100]; let _: BitVec = bitvec![u32, Lsb0; 1; 100]; let _: BitVec, Msb0> = bitvec![Cell, crate::order::Msb0; 1; 100]; let _: BitVec = bitvec![u32, crate::order::Msb0; 1; 100]; let _: BitVec, Lsb0> = bitvec![Cell, Lsb0; 1, 0, 1]; let _: BitVec = bitvec![usize, Lsb0; 1, 0, 1]; let _: BitVec, Msb0> = bitvec![Cell, crate::order::Msb0; 1, 0, 1]; let _: BitVec = bitvec![usize, crate::order::Msb0; 1, 0, 1]; let _: BitVec, Lsb0> = bitvec![Cell, Lsb0; 1; 100]; let _: BitVec = bitvec![usize, Lsb0; 1; 100]; let _: BitVec, Msb0> = bitvec![Cell, crate::order::Msb0; 1; 100]; let _: BitVec = bitvec![usize, crate::order::Msb0; 1; 100]; #[cfg(target_pointer_width = "64")] { let _: BitVec, Lsb0> = bitvec![Cell, Lsb0; 1, 0, 1]; let _: BitVec = bitvec![u64, Lsb0; 1, 0, 1]; let _: BitVec, Msb0> = bitvec![Cell, crate::order::Msb0; 1, 0, 1]; let _: BitVec = bitvec![u64, crate::order::Msb0; 1, 0, 1]; let _: BitVec, Lsb0> = bitvec![Cell, Lsb0; 1; 100]; let _: BitVec = bitvec![u64, Lsb0; 1; 100]; let _: BitVec, Msb0> = bitvec![Cell, crate::order::Msb0; 1; 100]; let _: BitVec = bitvec![u64, crate::order::Msb0; 1; 100]; } radium::if_atomic! { if atomic(8) { let _: BitVec =bitvec![AtomicU8, LocalBits; 0, 1]; let _: BitVec =bitvec![AtomicU8, Lsb0; 0, 1]; let _: BitVec =bitvec![AtomicU8, Msb0; 0, 1]; let _: BitVec =bitvec![RadiumU8, LocalBits; 1; 100]; let _: BitVec =bitvec![RadiumU8, Lsb0; 1; 100]; let _: BitVec =bitvec![RadiumU8, Msb0; 1; 100]; } if atomic(16) { let _: BitVec =bitvec![AtomicU16, LocalBits; 0, 1]; let _: BitVec =bitvec![AtomicU16, Lsb0; 0, 1]; let _: BitVec =bitvec![AtomicU16, Msb0; 0, 1]; let _: BitVec =bitvec![RadiumU16, LocalBits; 1; 100]; let _: BitVec =bitvec![RadiumU16, Lsb0; 1; 100]; let _: BitVec =bitvec![RadiumU16, Msb0; 1; 100]; } if atomic(32) { let _: BitVec =bitvec![AtomicU32, LocalBits; 0, 1]; let _: BitVec =bitvec![AtomicU32, Lsb0; 0, 1]; let _: BitVec =bitvec![AtomicU32, Msb0; 0, 1]; let _: BitVec =bitvec![RadiumU32, LocalBits; 1; 100]; let _: BitVec =bitvec![RadiumU32, Lsb0; 1; 100]; let _: BitVec =bitvec![RadiumU32, Msb0; 1; 100]; } if atomic(size) { let _: BitVec =bitvec![AtomicUsize, LocalBits; 0, 1]; let _: BitVec =bitvec![AtomicUsize, Lsb0; 0, 1]; let _: BitVec =bitvec![AtomicUsize, Msb0; 0, 1]; let _: BitVec =bitvec![RadiumUsize, LocalBits; 1; 100]; let _: BitVec =bitvec![RadiumUsize, Lsb0; 1; 100]; let _: BitVec =bitvec![RadiumUsize, Msb0; 1; 100]; } } #[cfg(target_pointer_width = "64")] radium::if_atomic! { if atomic(64) { let _: BitVec =bitvec![AtomicU64, LocalBits; 0, 1]; let _: BitVec =bitvec![AtomicU64, Lsb0; 0, 1]; let _: BitVec =bitvec![AtomicU64, Msb0; 0, 1]; let _: BitVec =bitvec![RadiumU64, LocalBits; 1; 100]; let _: BitVec =bitvec![RadiumU64, Lsb0; 1; 100]; let _: BitVec =bitvec![RadiumU64, Msb0; 1; 100]; } } } #[test] #[cfg(feature = "alloc")] fn compile_bitbox() { let _: BitBox, Lsb0> = bitbox![Cell, Lsb0; 1, 0, 1]; let _: BitBox = bitbox![u8, Lsb0; 1, 0, 1]; let _: BitBox, Msb0> = bitbox![Cell, crate::order::Msb0; 1, 0, 1]; let _: BitBox = bitbox![u8, crate::order::Msb0; 1, 0, 1]; let _: BitBox, Lsb0> = bitbox![Cell, Lsb0; 1; 100]; let _: BitBox = bitbox![u8, Lsb0; 1; 100]; let _: BitBox, Msb0> = bitbox![Cell, crate::order::Msb0; 1; 100]; let _: BitBox = bitbox![u8, crate::order::Msb0; 1; 100]; let _: BitBox = bitbox![1, 0, 1]; let _: BitBox = bitbox![1; 100]; let _: BitBox, Lsb0> = bitbox![Cell, Lsb0; 1, 0, 1]; let _: BitBox = bitbox![u16, Lsb0; 1, 0, 1]; let _: BitBox, Msb0> = bitbox![Cell, crate::order::Msb0; 1, 0, 1]; let _: BitBox = bitbox![u16, crate::order::Msb0; 1, 0, 1]; let _: BitBox, Lsb0> = bitbox![Cell, Lsb0; 1; 100]; let _: BitBox = bitbox![u16, Lsb0; 1; 100]; let _: BitBox, Msb0> = bitbox![Cell, crate::order::Msb0; 1; 100]; let _: BitBox = bitbox![u16, crate::order::Msb0; 1; 100]; let _: BitBox, Lsb0> = bitbox![Cell, Lsb0; 1, 0, 1]; let _: BitBox = bitbox![u32, Lsb0; 1, 0, 1]; let _: BitBox, Msb0> = bitbox![Cell, crate::order::Msb0; 1, 0, 1]; let _: BitBox = bitbox![u32, crate::order::Msb0; 1, 0, 1]; let _: BitBox, Lsb0> = bitbox![Cell, Lsb0; 1; 100]; let _: BitBox = bitbox![u32, Lsb0; 1; 100]; let _: BitBox, Msb0> = bitbox![Cell, crate::order::Msb0; 1; 100]; let _: BitBox = bitbox![u32, crate::order::Msb0; 1; 100]; let _: BitBox, Lsb0> = bitbox![Cell, Lsb0; 1, 0, 1]; let _: BitBox = bitbox![usize, Lsb0; 1, 0, 1]; let _: BitBox, Msb0> = bitbox![Cell, crate::order::Msb0; 1, 0, 1]; let _: BitBox = bitbox![usize, crate::order::Msb0; 1, 0, 1]; let _: BitBox, Lsb0> = bitbox![Cell, Lsb0; 1; 100]; let _: BitBox = bitbox![usize, Lsb0; 1; 100]; let _: BitBox, Msb0> = bitbox![Cell, crate::order::Msb0; 1; 100]; let _: BitBox = bitbox![usize, crate::order::Msb0; 1; 100]; #[cfg(target_pointer_width = "64")] { let _: BitBox, Lsb0> = bitbox![Cell, Lsb0; 1, 0, 1]; let _: BitBox = bitbox![u64, Lsb0; 1, 0, 1]; let _: BitBox, Msb0> = bitbox![Cell, crate::order::Msb0; 1, 0, 1]; let _: BitBox = bitbox![u64, crate::order::Msb0; 1, 0, 1]; let _: BitBox, Lsb0> = bitbox![Cell, Lsb0; 1; 100]; let _: BitBox = bitbox![u64, Lsb0; 1; 100]; let _: BitBox, Msb0> = bitbox![Cell, crate::order::Msb0; 1; 100]; let _: BitBox = bitbox![u64, crate::order::Msb0; 1; 100]; } radium::if_atomic! { if atomic(8) { let _: BitBox =bitbox![AtomicU8, LocalBits; 0, 1]; let _: BitBox =bitbox![AtomicU8, Lsb0; 0, 1]; let _: BitBox =bitbox![AtomicU8, Msb0; 0, 1]; let _: BitBox =bitbox![RadiumU8, LocalBits; 1; 100]; let _: BitBox =bitbox![RadiumU8, Lsb0; 1; 100]; let _: BitBox =bitbox![RadiumU8, Msb0; 1; 100]; } if atomic(16) { let _: BitBox =bitbox![AtomicU16, LocalBits; 0, 1]; let _: BitBox =bitbox![AtomicU16, Lsb0; 0, 1]; let _: BitBox =bitbox![AtomicU16, Msb0; 0, 1]; let _: BitBox =bitbox![RadiumU16, LocalBits; 1; 100]; let _: BitBox =bitbox![RadiumU16, Lsb0; 1; 100]; let _: BitBox =bitbox![RadiumU16, Msb0; 1; 100]; } if atomic(32) { let _: BitBox =bitbox![AtomicU32, LocalBits; 0, 1]; let _: BitBox =bitbox![AtomicU32, Lsb0; 0, 1]; let _: BitBox =bitbox![AtomicU32, Msb0; 0, 1]; let _: BitBox =bitbox![RadiumU32, LocalBits; 1; 100]; let _: BitBox =bitbox![RadiumU32, Lsb0; 1; 100]; let _: BitBox =bitbox![RadiumU32, Msb0; 1; 100]; } if atomic(size) { let _: BitBox =bitbox![AtomicUsize, LocalBits; 0, 1]; let _: BitBox =bitbox![AtomicUsize, Lsb0; 0, 1]; let _: BitBox =bitbox![AtomicUsize, Msb0; 0, 1]; let _: BitBox =bitbox![RadiumUsize, LocalBits; 1; 100]; let _: BitBox =bitbox![RadiumUsize, Lsb0; 1; 100]; let _: BitBox =bitbox![RadiumUsize, Msb0; 1; 100]; } } #[cfg(target_pointer_width = "64")] radium::if_atomic! { if atomic(64) { let _: BitBox =bitbox![AtomicU64, LocalBits; 0, 1]; let _: BitBox =bitbox![AtomicU64, Lsb0; 0, 1]; let _: BitBox =bitbox![AtomicU64, Msb0; 0, 1]; let _: BitBox =bitbox![RadiumU64, LocalBits; 1; 100]; let _: BitBox =bitbox![RadiumU64, Lsb0; 1; 100]; let _: BitBox =bitbox![RadiumU64, Msb0; 1; 100]; } } } #[test] fn encode_bits() { let uint: [u8; 1] = __encode_bits!(u8, Lsb0; 1, 0, 1, 0, 1, 1, 0, 0); assert_eq!(uint, [53]); let cell: [Cell; 1] = __encode_bits!(Cell, Lsb0; 1, 0, 1, 0, 1, 1, 0, 0); assert_eq!(cell[0].get(), 53); let uint: [u16; 1] = __encode_bits!(u16, Msb0; 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1 ); assert_eq!(uint, [0x4869]); let cell: [Cell; 1] = __encode_bits!(Cell, Msb0; 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1 ); assert_eq!(cell[0].get(), 0x4869); let uint: [u32; 1] = __encode_bits!(u32, LocalBits; 1, 0, 1); assert_eq!(uint.view_bits::()[.. 3], bits![1, 0, 1]); let cell: [Cell; 1] = __encode_bits!(Cell, LocalBits; 1, 0, 1); assert_eq!(cell.view_bits::()[.. 3], bits![1, 0, 1]); } #[test] fn make_elem() { let uint: u8 = __make_elem!(u8 as u8, Lsb0; 1, 0, 1, 0, 1, 1, 0, 0); assert_eq!(uint, 53); let cell: Cell = __make_elem!(Cell as u8, Lsb0; 1, 0, 1, 0, 1, 1, 0, 0); assert_eq!(cell.get(), 53); let uint: u16 = __make_elem!(u16 as u16, Msb0; 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1 ); assert_eq!(uint, 0x4869); let cell: Cell = __make_elem!(Cell as u16, Msb0; 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1 ); assert_eq!(cell.get(), 0x4869); let uint: u32 = __make_elem!(u32 as u32, LocalBits; 1, 0, 1); assert_eq!(uint.view_bits::()[.. 3], bits![1, 0, 1]); let cell: Cell = __make_elem!(Cell as u32, LocalBits; 1, 0, 1); assert_eq!(cell.view_bits::()[.. 3], bits![1, 0, 1]); /* `__make_elem!` is only invoked after `$ord` has already been made * opaque to matchers as a single `:tt`. Invoking it directly with a path * will fail the `:tt`, so this macro wraps it as one and forwards the * rest. */ macro_rules! invoke_make_elem { (Cell<$typ:ident> as $sto:ident, $ord:path; $($rest:tt)*) => { __make_elem!(Cell<$typ> as $sto, $ord; $($rest)*) }; ($typ:ident as $sto:ident, $ord:path; $($rest:tt)*) => { __make_elem!($typ as $sto, $ord; $($rest)*) }; } let uint: usize = invoke_make_elem!(usize as usize, crate::order::Lsb0; 0, 0, 1, 1); assert_eq!(uint, 12); let cell: Cell = invoke_make_elem!(Cell as usize, crate::order::Lsb0; 0, 0, 1, 1); assert_eq!(cell.get(), 12); } bitvec-1.0.1/src/macros.rs000064400000000000000000000304521046102023000135240ustar 00000000000000#![allow(deprecated)] #![doc = include_str!("../doc/macros.md")] #[macro_use] #[doc(hidden)] pub mod internal; mod tests; #[macro_export] #[doc = include_str!("../doc/macros/BitArr_type.md")] macro_rules! BitArr { (for $len:expr, in $store:ty, $order:ty $(,)?) => { $crate::array::BitArray::< [$store; $crate::mem::elts::<$store>($len)], $order > }; (for $len:expr, in $store:ty $(,)?) => { $crate::BitArr!(for $len, in $store, $crate::order::Lsb0) }; (for $len:expr) => { $crate::BitArr!(for $len, in usize) }; } #[macro_export] #[doc = include_str!("../doc/macros/bitarr_value.md")] macro_rules! bitarr { /* `const`-expression constructors. * * These arms expand to expressions which are guaranteed to be valid in * `const` position: initializing `static` or `const`, or arguments to * `const fn`. * * > Other arms *may* be valid in `const`s, but do not guarantee it. * * They are more restricted than the general variants below, because the * trait system is not yet usable in `const` contexts and thus these * expansions can only use codepaths defined in this module, and cannot use * the rest of `bitvec`’s systems. * * All valid invocations with a leading `const` will remain valid if the * `const` is removed, though their expansion may change to no longer be * valid in `const` contexts. */ // Bit-sequencing requires detecting `Cell` separately from other types. // See below. (const Cell<$store:ident>, $order:ident; $($val:expr),* $(,)?) => {{ const ELTS: usize = $crate::__count_elts!($store; $($val),*); type Data = [Cell<$store>; ELTS]; const DATA: Data = $crate::__encode_bits!(Cell<$store>, $order; $($val),*); type This = $crate::array::BitArray; This { data: DATA, ..This::ZERO } }}; (const $store:ident, $order:ident; $($val:expr),* $(,)?) => {{ const ELTS: usize = $crate::__count_elts!($store; $($val),*); type Data = [$store; ELTS]; const DATA: Data = $crate::__encode_bits!($store, $order; $($val),*); type This = $crate::array::BitArray; This { data: DATA, ..This::ZERO } }}; // Bit-repetition is agnostic to types, so it only needs two arms. (const $store:ty, $order:ty; $val:expr; $len:expr) => {{ use $crate::macros::internal::core; type Mem = <$store as $crate::store::BitStore>::Mem; const ELTS: usize = $crate::mem::elts::<$store>($len); const ELEM: Mem = $crate::__extend_bool!($val, $store); const DATA: [Mem; ELTS] = [ELEM; ELTS]; type This = $crate::array::BitArray<[$store; ELTS], $order>; unsafe { core::mem::transmute::<_, This>(DATA) } }}; (const $val:expr; $len:expr) => {{ $crate::bitarr!(const usize, $crate::order::Lsb0; $val; $len) }}; (const $($val:expr),* $(,)?) => {{ $crate::bitarr!(const usize, Lsb0; $($val),*) }}; /* Non-`const` constructors. * * These expansions are allowed to produce code that does not run in `const` * contexts. While it is *likely* that the expansions will be evaluated at * compile-time, they won’t do so while the `const` engine is active. */ /* Bit-sequence encoding. * * This requires four arms to the `const` section’s one, because of how both * the ordering and storage arguments may be provided. As macros operate * syntactically, before the type system begins, they have to accept any * syntax that could later be accepted as the name of a satisfying type. * * The `$order:ident` matcher uses the fact that `:ident` matches remain * matchable across deeper macro invocations, so that the bottom of the * macro stack can detect the magic tokens `LocalBits`, `Lsb0`, and `Msb0`, * and operate accordingly. The `$order:path` matcher is always opaque, and * serves as a fallback for complex type-names. * * `Cell<$store>` uses literal detection to extract the interior type width. * This cannot be done by `:ty` or `:path`, as these are opaque, and * `:ident` does not match `Cell<_>`. */ (Cell<$store:ident>, $order:ident; $($val:expr),* $(,)?) => {{ use $crate::macros::internal::core; type Celled = core::cell::Cell<$store>; const ELTS: usize = $crate::__count_elts!($store; $($val),*); type Data = [Celled; ELTS]; type This = $crate::array::BitArray; This::new($crate::__encode_bits!(Cell<$store>, $order; $($val),*)) }}; (Cell<$store:ident>, $order:path; $($val:expr),* $(,)?) => {{ use $crate::macros::internal::core; type Celled = core::cell::Cell<$store>; const ELTS: usize = $crate::__count_elts!($store; $($val),*); type This = $crate::array::BitArray<[Celled; ELTS], $order>; This::new($crate::__encode_bits!(Cell<$store>, $order; $($val),*)) }}; ($store:ident, $order:ident; $($val:expr),* $(,)?) => {{ const ELTS: usize = $crate::__count_elts!($store; $($val),*); type This = $crate::array::BitArray<[$store; ELTS], $order>; This::new($crate::__encode_bits!($store, $order; $($val),*)) }}; ($store:ident, $order:path; $($val:expr),* $(,)?) => {{ const ELTS: usize = $crate::__count_elts!($store; $($val),*); type This = $crate::array::BitArray<[$store; ELTS], $order>; This::new($crate::__encode_bits!($store, $order; $($val),*)) }}; ($store:ty, $order:ty; $val:expr; $len:expr) => {{ $crate::bitarr!(const $store, $order; $val; $len) }}; ($val:expr; $len:expr) => {{ $crate::bitarr!(const $val; $len) }}; ($($val:expr),* $(,)?) => { $crate::bitarr!(usize, Lsb0; $($val),*) }; } #[macro_export] #[doc = include_str!("../doc/macros/bits.md")] macro_rules! bits { /* `&'static` constructors. * * Like the `bitarr!(const …)` arms, these arms must expand to code that is * valid in `const` contexts. As such, they can only accept `$order` * arguments that are one of the `LocalBits`, `Lsb0`, or `Msb0` literals. * Once the underlying `static BitArray` is created, */ (static mut Cell<$store:ident>, $order:ty; $val:expr; $len:expr) => {{ use $crate::macros::internal::core; type Celled = core::cell::Cell<$store>; static mut DATA: $crate::BitArr!(for $len, in Celled, $order) = $crate::bitarr!(const Cell<$store>, $order; $val; $len); &mut DATA[.. $len] }}; (static mut $store:ident, $order:ident; $val:expr; $len:expr) => {{ static mut DATA: $crate::BitArr!(for $len, in $store, $order) = $crate::bitarr!(const $store, $order; $val; $len); DATA.get_unchecked_mut(.. $len) }}; (static mut Cell<$store:ident>, $order:ident; $($val:expr),* $(,)?) => {{ use $crate::macros::internal::core; type Celled = core::cell::Cell<$store>; const BITS: usize = $crate::__count!($($val),*); static mut DATA: $crate::BitArr!(for BITS, in $store, $order) = $crate::bitarr!(const $store, $order; $($val),*); &mut *( DATA.get_unchecked_mut(.. BITS) as *mut $crate::slice::BitSlice<$store, $order> as *mut $crate::slice::BitSlice ) }}; (static mut $store:ident, $order:ident; $($val:expr),* $(,)?) => {{ const BITS: usize = $crate::__count!($($val),*); static mut DATA: $crate::BitArr!(for BITS, in $store, $order) = $crate::bitarr!(const $store, $order; $($val),*); DATA.get_unchecked_mut(.. BITS) }}; (static mut $val:expr; $len:expr) => {{ static mut DATA: $crate::BitArr!(for $len) = $crate::bitarr!(const usize, $crate::order::Lsb0; $val; $len); DATA.get_unchecked_mut(.. $len) }}; (static mut $($val:expr),* $(,)?) => {{ $crate::bits!(static mut usize, Lsb0; $($val),*) }}; (static Cell<$store:ident>, $order:ty; $val:expr; $len:expr) => {{ use $crate::macros::internal::core; type Celled = core::cell::Cell<$store>; static DATA: $crate::BitArr!(for $len, in $store, $order) = $crate::bitarr!(const $store, $order; $val; $len); unsafe { &*( DATA.get_unchecked(.. $len) as *const $crate::slice::BitSlice<$store, $order> as *const $crate::slice::BitSlice ) } }}; (static Cell<$store:ident>, $order:ident; $($val:expr),* $(,)?) => {{ use $crate::macros::internal::core; type Celled = core::cell::Cell<$store>; const BITS: usize = $crate::__count!($($val),*); static DATA: $crate::BitArr!(for BITS, in $store, $order) = $crate::bitarr!(const $store, $order; $($val),*); unsafe { &*( DATA.get_unchecked(.. BITS) as *const $crate::slice::BitSlice<$store, $order> as *const $crate::slice::BitSlice ) } }}; (static $store:ident, $order:ident; $val:expr; $len:expr) => {{ static DATA: $crate::BitArr!(for $len, in $store, $order) = $crate::bitarr!(const $store, $order; $val; $len); unsafe { DATA.get_unchecked(.. $len) } }}; (static $val:expr; $len:expr) => {{ static DATA: $crate::BitArr!(for $len) = $crate::bitarr!(const usize, $crate::order::Lsb0; $val; $len); unsafe { DATA.get_unchecked(.. $len) } }}; (static $store:ident, $order:ident; $($val:expr),* $(,)?) => {{ const BITS: usize = $crate::__count!($($val),*); static DATA: $crate::BitArr!(for BITS, in $store, $order) = $crate::bitarr!(const $store, $order; $($val),*); unsafe { DATA.get_unchecked(.. BITS) } }}; (static $($val:expr),* $(,)?) => {{ $crate::bits!(static usize, Lsb0; $($val),*) }}; // Repetition syntax `[bit ; count]`. // NOTE: `count` must be a `const`, as this is a non-allocating macro. // Sequence syntax `[bit (, bit)*]` or `[(bit ,)*]`. // Explicit order and store. (mut Cell<$store:ident>, $order:ident; $($val:expr),* $(,)?) => {{ const BITS: usize = $crate::__count!($($val),*); &mut $crate::bitarr!(Cell<$store>, $order; $($val),*)[.. BITS] }}; (mut Cell<$store:ident>, $order:path; $($val:expr),* $(,)?) => {{ const BITS: usize = $crate::__count!($($val),*); &mut $crate::bitarr!(Cell<$store>, $order; $($val),*)[.. BITS] }}; (mut $store:ident, $order:ident; $($val:expr),* $(,)?) => {{ const BITS: usize = $crate::__count!($($val),*); &mut $crate::bitarr!($store, $order; $($val),*)[.. BITS] }}; (mut $store:ident, $order:path; $($val:expr),* $(,)?) => {{ const BITS: usize = $crate::__count!($($val),*); &mut $crate::bitarr!($store, $order; $($val),*)[.. BITS] }}; // Explicit order and store. (mut $store:ty, $order:ty; $val:expr; $len:expr) => {{ &mut $crate::bitarr!($store, $order; $val; $len)[.. $len] }}; // Default order and store. (mut $val:expr; $len:expr) => { $crate::bits!(mut usize, $crate::order::Lsb0; $val; $len) }; // Default order and store. (mut $($val:expr),* $(,)?) => { $crate::bits!(mut usize, Lsb0; $($val),*) }; // Repeat everything from above, but now immutable. ($store:ty, $order:ty; $val:expr; $len:expr) => {{ &$crate::bitarr!($store, $order; $val; $len)[.. $len] }}; (Cell<$store:ident>, $order:ident; $($val:expr),* $(,)?) => {{ const BITS: usize = $crate::__count!($($val),*); &$crate::bitarr!(Cell<$store>, $order; $($val),*)[.. BITS] }}; ($store:ident, $order:ident; $($val:expr),* $(,)?) => {{ const BITS: usize = $crate::__count!($($val),*); &$crate::bitarr!($store, $order; $($val),*)[.. BITS] }}; (Cell<$store:ident>, $order:path; $($val:expr),* $(,)?) => {{ const BITS: usize = $crate::__count!($($val),*); &$crate::bitarr!(Cell<$store>, $order; $($val),*)[.. BITS] }}; ($store:ident, $order:path; $($val:expr),* $(,)?) => {{ const BITS: usize = $crate::__count!($($val),*); &$crate::bitarr!($store, $order; $($val),*)[.. BITS] }}; // Default order and store. ($val:expr; $len:expr) => { $crate::bits!(usize, $crate::order::Lsb0; $val; $len) }; ($($val:expr),* $(,)?) => { $crate::bits!(usize, Lsb0; $($val),*) }; } #[macro_export] #[cfg(feature = "alloc")] #[doc = include_str!("../doc/macros/bitvec.md")] macro_rules! bitvec { // First, capture the repetition syntax, as it is permitted to use runtime // values for the repetition count. ($store:ty, $order:ty; $val:expr; $len:expr) => { $crate::vec::BitVec::<$store, $order>::repeat($val != 0, $len) }; // Capture `Cell` patterns and prevent them from being parsed as // comparisons. Guess we didn't escape Most Vexing Parse after all. (Cell<$store:ident>, $order:ident $($rest:tt)*) => { $crate::vec::BitVec::from_bitslice($crate::bits!(Cell<$store>, $order $($rest)*)) }; ($val:expr; $len:expr) => { $crate::bitvec!(usize, $crate::order::Lsb0; $val; $len) }; // Delegate all others to the `bits!` macro. ($($arg:tt)*) => { $crate::vec::BitVec::from_bitslice($crate::bits!($($arg)*)) }; } #[macro_export] #[cfg(feature = "alloc")] #[doc = include_str!("../doc/macros/bitbox.md")] macro_rules! bitbox { ($($arg:tt)*) => { $crate::bitvec!($($arg)*).into_boxed_bitslice() }; } bitvec-1.0.1/src/mem.rs000064400000000000000000000074471046102023000130260ustar 00000000000000#![doc = include_str!("../doc/mem.md")] use core::{ cell::Cell, mem, }; use funty::Unsigned; use radium::marker::BitOps; #[doc = include_str!("../doc/mem/BitRegister.md")] pub trait BitRegister: Unsigned + BitOps { /// The number of bits required to store an index in the range `0 .. BITS`. const INDX: u8 = bits_of::().trailing_zeros() as u8; /// A mask over all bits that can be used as an index within the element. /// This is the value with the least significant `INDX`-many bits set high. const MASK: u8 = bits_of::() as u8 - 1; /// The literal `!0`. const ALL: Self; } /// Marks certain fundamentals as processor registers. macro_rules! register { ($($t:ty),+ $(,)?) => { $( impl BitRegister for $t { const ALL: Self = !0; } )+ }; } register!(u8, u16, u32); /** `u64` can only be used as a register on processors whose word size is at least 64 bits. This implementation is not present on targets with 32-bit processor words. **/ #[cfg(target_pointer_width = "64")] impl BitRegister for u64 { const ALL: Self = !0; } register!(usize); /// Counts the number of bits in a value of type `T`. pub const fn bits_of() -> usize { core::mem::size_of::().saturating_mul(::BITS as usize) } #[doc = include_str!("../doc/mem/elts.md")] pub const fn elts(bits: usize) -> usize { let width = bits_of::(); if width == 0 { return 0; } bits / width + (bits % width != 0) as usize } /// Tests if a type has alignment equal to its size. #[doc(hidden)] #[cfg(not(tarpaulin_include))] pub const fn aligned_to_size() -> bool { mem::align_of::() == mem::size_of::() } /// Tests if two types have identical layouts (size and alignment are equal). #[doc(hidden)] #[cfg(not(tarpaulin_include))] pub const fn layout_eq() -> bool { mem::align_of::() == mem::align_of::() && mem::size_of::() == mem::size_of::() } #[doc(hidden)] #[repr(transparent)] #[doc = include_str!("../doc/mem/BitElement.md")] #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct BitElement { pub elem: T, } /// Creates a `BitElement` implementation for an integer and its atomic/cell /// variants. macro_rules! element { ($($size:tt, $bare:ty => $atom:ident);+ $(;)?) => { $( impl BitElement<$bare> { /// Creates a new element wrapper from a raw integer. pub const fn new(elem: $bare) -> Self { Self { elem, } } } impl BitElement> { /// Creates a new element wrapper from a raw integer. pub const fn new(elem: $bare) -> Self { Self { elem: Cell::new(elem), } } } radium::if_atomic!( if atomic($size) { use core::sync::atomic::$atom; impl BitElement<$atom> { /// Creates a new element wrapper from a raw integer. pub const fn new(elem: $bare) -> Self { Self { elem: <$atom>::new(elem), } } } }); )+ }; } element! { 8, u8 => AtomicU8; 16, u16 => AtomicU16; 32, u32 => AtomicU32; } #[cfg(target_pointer_width = "64")] element!(64, u64 => AtomicU64); element!(size, usize => AtomicUsize); #[cfg(test)] mod tests { use super::*; use crate::access::*; #[test] fn integer_properties() { assert!(aligned_to_size::()); assert!(aligned_to_size::()); assert!(layout_eq::()); assert!(aligned_to_size::()); assert!(aligned_to_size::()); assert!(layout_eq::()); assert!(aligned_to_size::()); assert!(aligned_to_size::()); assert!(layout_eq::()); assert!(aligned_to_size::()); assert!(aligned_to_size::()); assert!(layout_eq::()); #[cfg(target_pointer_width = "64")] { assert!(aligned_to_size::()); assert!(aligned_to_size::()); assert!(layout_eq::()); } } } bitvec-1.0.1/src/order.rs000064400000000000000000000335701046102023000133570ustar 00000000000000#![doc = include_str!("../doc/order.md")] use crate::{ index::{ BitEnd, BitIdx, BitMask, BitPos, BitSel, }, mem::{ bits_of, BitRegister, }, }; #[doc = include_str!("../doc/order/BitOrder.md")] pub unsafe trait BitOrder: 'static { /// Translates a semantic bit index into a real bit position. /// /// This function is the basis of the trait, and must adhere to a number of /// requirements in order for an implementation to be correct. /// /// ## Type Parameters /// /// - `R`: The memory element type that the index and position govern. /// /// ## Parameters /// /// - `index`: A semantic bit-index within some `R` element. /// /// ## Returns /// /// The real position of the indexed bit within an `R` element. See the /// `BitPos` documentation for what these positions are considered to mean. /// /// ## Requirements /// /// This function must satisfy the following requirements for all possible /// input and output values, for all possible `R` type parameters: /// /// - Totality: The implementation must be able to accept every input in /// [`BitIdx::::range_all()`], and produce some `BitPos` value for /// each. /// - Bijection: There must be an exactly one-to-one correspondence between /// input and output values. No input index may choose its output from a /// set of more than one position, and no output position may be produced /// by more than one input index. /// - Purity: The translation from index to position must be consistent for /// the lifetime of *at least* all data structures in the program. This /// function *may* refer to global state, but that state **must** be /// immutable while any `bitvec` data structures exist, and must not be /// used to violate the totality or bijection requirements. /// - Validity: The produced `BitPos` value must be within the valid range /// of its type. This is enforced by [`BitPos::new`], but not by the /// unsafe constructor [`BitPos::new_unchecked`]. /// /// [`BitIdx::::range_all()`]: crate::index::BitIdx::range_all /// [`BitPos::new`]: crate::index::BitPos::new /// [`BitPos::new_unchecked`]: crate::index::BitPos::new_unchecked fn at(index: BitIdx) -> BitPos where R: BitRegister; /// Produces a single-bit selection mask from a bit-index. /// /// This is an optional function: it is implemented as, and must always be /// exactly identical to, `BitOrder::at(index).select()`. If your ordering /// has a faster implementation, you may provide it, but it must be exactly /// numerically equivalent. #[inline] fn select(index: BitIdx) -> BitSel where R: BitRegister { Self::at::(index).select() } /// Produces a multi-bit selection mask from a range of bit-indices. /// /// This is an optional function: it is implemented as, and must always be /// exactly identical to, /// `BitIdx::range(from, upto).map(BitOrder::select).sum()`. If your /// ordering has a faster implementation, you may provide it, but it must be /// exactly numerically equivalent. /// /// ## Parameters /// /// - `from`: The inclusive starting value of the indices being selected. /// Defaults to [`BitIdx::MIN`]. /// - `upto`: The exclusive ending value of the indices being selected. /// Defaults to [`BitEnd::MAX`]. /// /// ## Returns /// /// A selection mask with all bit-positions corresponding to `from .. upto` /// selected. /// /// [`BitEnd::MAX`]: crate::index::BitEnd::MAX /// [`BitIdx::MIN`]: crate::index::BitIdx::MIN #[inline] fn mask( from: impl Into>>, upto: impl Into>>, ) -> BitMask where R: BitRegister, { let (from, upto) = match (from.into(), upto.into()) { (None, None) => return BitMask::ALL, (Some(from), None) => (from, BitEnd::MAX), (None, Some(upto)) => (BitIdx::MIN, upto), (Some(from), Some(upto)) => (from, upto), }; from.range(upto).map(Self::select::).sum() } } #[doc = include_str!("../doc/order/Lsb0.md")] #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct Lsb0; #[doc = include_str!("../doc/order/Msb0.md")] #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct Msb0; unsafe impl BitOrder for Lsb0 { #[inline] fn at(index: BitIdx) -> BitPos where R: BitRegister { unsafe { BitPos::new_unchecked(index.into_inner()) } } #[inline] fn select(index: BitIdx) -> BitSel where R: BitRegister { unsafe { BitSel::new_unchecked(R::ONE << index.into_inner()) } } #[inline] fn mask( from: impl Into>>, upto: impl Into>>, ) -> BitMask where R: BitRegister, { let from = from.into().unwrap_or(BitIdx::MIN).into_inner(); let upto = upto.into().unwrap_or(BitEnd::MAX).into_inner(); debug_assert!( from <= upto, "Ranges must run from low index ({}) to high ({})", from, upto, ); let ct = upto - from; if ct == bits_of::() as u8 { return BitMask::ALL; } /* This expression does the following work: * 1. Set all bits in the mask to `1`. * 2. Shift left by the number of bits in the mask. The mask bits are * now at LSedge and `0`. * 3. Invert the mask. The mask bits are now at LSedge and `1`; all * else are `0`. * 4. Shift left by the `from` distance from LSedge. The mask bits now * begin at `from` left of LSedge and extend to `upto` left of * LSedge. */ BitMask::new(!(R::ALL << ct) << from) } } unsafe impl BitOrder for Msb0 { #[inline] fn at(index: BitIdx) -> BitPos where R: BitRegister { unsafe { BitPos::new_unchecked(R::MASK - index.into_inner()) } } #[inline] fn select(index: BitIdx) -> BitSel where R: BitRegister { /* Shift the MSbit down by the index count. This is not equivalent to * the expression `1 << (mask - index)`, because that is required to * perform a runtime subtraction before the shift, while this produces * a constant that is shifted. */ let msbit: R = R::ONE << R::MASK; unsafe { BitSel::new_unchecked(msbit >> index.into_inner()) } } #[inline] fn mask( from: impl Into>>, upto: impl Into>>, ) -> BitMask where R: BitRegister, { let from = from.into().unwrap_or(BitIdx::MIN).into_inner(); let upto = upto.into().unwrap_or(BitEnd::MAX).into_inner(); debug_assert!( from <= upto, "ranges must run from low index ({}) to high ({})", from, upto, ); let ct = upto - from; if ct == bits_of::() as u8 { return BitMask::ALL; } /* This expression does the following work: * 1. Set all bits in the mask to `1`. * 2. Shift right by the number of bits in the mask. The mask bits are * now at MSedge and `0`. * 3. Invert the mask. The mask bits are now at MSedge and `1`; all * else are `0`. * 4. Shift right by the `from` distance from MSedge. The mask bits * now begin at `from` right of MSedge and extend to `upto` right * of MSedge. */ BitMask::new(!(R::ALL >> ct) >> from) } } #[cfg(target_endian = "little")] #[doc = include_str!("../doc/order/LocalBits.md")] pub use self::Lsb0 as LocalBits; #[cfg(target_endian = "big")] #[doc = include_str!("../doc/order/LocalBits.md")] pub use self::Msb0 as LocalBits; #[cfg(not(any(target_endian = "big", target_endian = "little")))] compile_fail!( "This architecture is not supported! Please consider filing an issue" ); #[inline] #[cfg(not(tarpaulin_include))] #[doc = include_str!("../doc/order/verify.md")] pub fn verify(verbose: bool) where O: BitOrder { verify_for_type::(verbose); verify_for_type::(verbose); verify_for_type::(verbose); verify_for_type::(verbose); #[cfg(target_pointer_width = "64")] verify_for_type::(verbose); } /// Verification does not access memory, and is both useless and slow in Miri. #[cfg(miri)] pub fn verify_for_type(_: bool) where R: BitRegister, O: BitOrder, { } #[cfg(not(miri))] #[doc = include_str!("../doc/order/verify_for_type.md")] pub fn verify_for_type(verbose: bool) where R: BitRegister, O: BitOrder, { use core::any::type_name; let mut accum = BitMask::::ZERO; let ord_name = type_name::(); let reg_name = type_name::(); for n in 0 .. bits_of::() as u8 { // Wrap the counter as an index. let idx = unsafe { BitIdx::::new_unchecked(n) }; // Compute the bit position for the index. let pos = O::at::(idx); if verbose { #[cfg(feature = "std")] println!( "`<{} as BitOrder>::at::<{}>({})` produces {}", ord_name, reg_name, n, pos.into_inner(), ); } // If the computed position exceeds the valid range, fail. assert!( pos.into_inner() < bits_of::() as u8, "Error when verifying the implementation of `BitOrder` for `{}`: \ Index {} produces a bit position ({}) that exceeds the type width \ {}", ord_name, n, pos.into_inner(), bits_of::(), ); // Check `O`’s implementation of `select` let sel = O::select::(idx); if verbose { #[cfg(feature = "std")] println!( "`<{} as BitOrder>::select::<{}>({})` produces {:b}", ord_name, reg_name, n, sel, ); } // If the selector bit is not one-hot, fail. assert_eq!( sel.into_inner().count_ones(), 1, "Error when verifying the implementation of `BitOrder` for `{}`: \ Index {} produces a bit selector ({:b}) that is not a one-hot mask", ord_name, n, sel, ); // Check that the selection computed from the index matches the // selection computed from the position. let shl = pos.select(); // If `O::select(idx)` does not produce `1 << pos`, fail. assert_eq!( sel, shl, "Error when verifying the implementation of `BitOrder` for `{}`: \ Index {} produces a bit selector ({:b}) that is not equal to `1 \ << {}` ({:b})", ord_name, n, sel, pos.into_inner(), shl, ); // Check that the produced selector bit has not already been added to // the accumulator. assert!( !accum.test(sel), "Error when verifying the implementation of `BitOrder` for `{}`: \ Index {} produces a bit position ({}) that has already been \ produced by a prior index", ord_name, n, pos.into_inner(), ); accum.insert(sel); if verbose { #[cfg(feature = "std")] println!( "`<{} as BitOrder>::at::<{}>({})` accumulates {:b}", ord_name, reg_name, n, accum, ); } } // Check that all indices produced all positions. assert_eq!( accum, BitMask::ALL, "Error when verifying the implementation of `BitOrder` for `{}`: The \ bit positions marked with a `0` here were never produced from an \ index, despite all possible indices being passed in for translation: \ {:b}", ord_name, accum, ); // Check that `O::mask` is correct for all range combinations. for from in BitIdx::::range_all() { for upto in BitEnd::::range_from(from) { let mask = O::mask(from, upto); let check = from .range(upto) .map(O::at) .map(BitPos::select) .sum::>(); assert_eq!( mask, check, "Error when verifying the implementation of `BitOrder` for \ `{o}`: `{o}::mask::<{m}>({f}, {u})` produced {bad:b}, but \ expected {good:b}", o = ord_name, m = reg_name, f = from, u = upto, bad = mask, good = check, ); } } } /// An ordering that does not provide a contiguous index map or `BitField` /// acceleration. #[cfg(test)] pub struct HiLo; #[cfg(test)] unsafe impl BitOrder for HiLo { fn at(index: BitIdx) -> BitPos where R: BitRegister { BitPos::new(index.into_inner() ^ 4).unwrap() } } #[cfg(test)] mod tests { use super::*; #[test] fn default_impl() { assert_eq!(Lsb0::mask(None, None), BitMask::::ALL); assert_eq!(Msb0::mask(None, None), BitMask::::ALL); assert_eq!(HiLo::mask(None, None), BitMask::::ALL); assert_eq!( HiLo::mask(None, BitEnd::::new(3).unwrap()), BitMask::new(0b0111_0000), ); assert_eq!( HiLo::mask(BitIdx::::new(3).unwrap(), None), BitMask::new(0b1000_1111), ); } // Split these out into individual test functions so they can parallelize. mod lsb0 { use super::*; #[test] fn verify_u8() { verify_for_type::(cfg!(feature = "verbose")); } #[test] #[cfg(not(tarpaulin))] fn verify_u16() { verify_for_type::(cfg!(feature = "verbose")); } #[test] #[cfg(not(tarpaulin))] fn verify_u32() { verify_for_type::(cfg!(feature = "verbose")); } #[test] #[cfg(all(target_pointer_width = "64", not(tarpaulin)))] fn verify_u64() { verify_for_type::(cfg!(feature = "verbose")); } #[test] #[cfg(not(tarpaulin))] fn verify_usize() { verify_for_type::(cfg!(feature = "verbose")); } } mod msb0 { use super::*; #[test] fn verify_u8() { verify_for_type::(cfg!(feature = "verbose")); } #[test] #[cfg(not(tarpaulin))] fn verify_u16() { verify_for_type::(cfg!(feature = "verbose")); } #[test] #[cfg(not(tarpaulin))] fn verify_u32() { verify_for_type::(cfg!(feature = "verbose")); } #[test] #[cfg(all(target_pointer_width = "64", not(tarpaulin)))] fn verify_u64() { verify_for_type::(cfg!(feature = "verbose")); } #[test] #[cfg(not(tarpaulin))] fn verify_usize() { verify_for_type::(cfg!(feature = "verbose")); } } mod hilo { use super::*; #[test] fn verify_u8() { verify_for_type::(cfg!(feature = "verbose")); } #[test] #[cfg(not(tarpaulin))] fn verify_u16() { verify_for_type::(cfg!(feature = "verbose")); } #[test] #[cfg(not(tarpaulin))] fn verify_u32() { verify_for_type::(cfg!(feature = "verbose")); } #[test] #[cfg(all(target_pointer_width = "64", not(tarpaulin)))] fn verify_u64() { verify_for_type::(cfg!(feature = "verbose")); } #[test] #[cfg(not(tarpaulin))] fn verify_usize() { verify_for_type::(cfg!(feature = "verbose")); } } } bitvec-1.0.1/src/ptr/addr.rs000064400000000000000000000076361046102023000137670ustar 00000000000000#![doc = include_str!("../../doc/ptr/addr.md")] use core::{ any, fmt::{ self, Debug, Display, Formatter, Pointer, }, mem, ptr::NonNull, }; use tap::{ Pipe, TryConv, }; use wyz::{ comu::{ Address, Const, Mut, Mutability, }, fmt::FmtForward, }; /// Ensures that an address is well-aligned to its referent type width. #[inline] pub fn check_alignment( addr: Address, ) -> Result, MisalignError> where M: Mutability { let ptr = addr.to_const(); let mask = mem::align_of::() - 1; if ptr as usize & mask != 0 { Err(MisalignError { ptr }) } else { Ok(addr) } } /// Extension methods for raw pointers. pub(crate) trait AddressExt { /// Tracks the original mutation capability of the source pointer. type Permission: Mutability; /// The type to which the pointer points. type Referent: Sized; /// Forcibly wraps a raw pointer as an `Address`, without handling errors. /// /// In debug builds, this panics on null or misaligned pointers. In release /// builds, it is permitted to remove the error-handling codepaths and /// assume these invariants are upheld by the caller. /// /// ## Safety /// /// The caller must ensure that this is only called on non-null, /// well-aligned pointers. Pointers derived from Rust references or calls to /// the Rust allocator API will always satisfy this. unsafe fn into_address(self) -> Address; } #[cfg(not(tarpaulin_include))] impl AddressExt for *const T { type Permission = Const; type Referent = T; unsafe fn into_address(self) -> Address { if cfg!(debug_assertions) { self.try_conv::>() .unwrap_or_else(|err| panic!("{}", err)) .pipe(check_alignment) .unwrap_or_else(|err| panic!("{}", err)) } else { Address::new(NonNull::new_unchecked(self as *mut T)) } } } #[cfg(not(tarpaulin_include))] impl AddressExt for *mut T { type Permission = Mut; type Referent = T; unsafe fn into_address(self) -> Address { if cfg!(debug_assertions) { self.try_conv::>() .unwrap_or_else(|err| panic!("{}", err)) .pipe(check_alignment) .unwrap_or_else(|err| panic!("{}", err)) } else { Address::new(NonNull::new_unchecked(self)) } } } #[cfg(not(tarpaulin_include))] impl AddressExt for &T { type Permission = Const; type Referent = T; unsafe fn into_address(self) -> Address { self.into() } } #[cfg(not(tarpaulin_include))] impl AddressExt for &mut T { type Permission = Mut; type Referent = T; unsafe fn into_address(self) -> Address { self.into() } } /// The error produced when an address is insufficiently aligned to the width of /// its type. #[derive(Clone, Copy, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct MisalignError { /// The misaligned pointer. ptr: *const T, } impl MisalignError { /// The minimum address alignment of `T` values. const ALIGN: usize = mem::align_of::(); /// The number of least-significant-bits of an address that must be `0` in /// order for it to be validly aligned for `T`. const CTTZ: usize = Self::ALIGN.trailing_zeros() as usize; } #[cfg(not(tarpaulin_include))] impl Debug for MisalignError { #[inline] fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { fmt.debug_tuple("MisalignError") .field(&self.ptr.fmt_pointer()) .field(&Self::ALIGN) .finish() } } #[cfg(not(tarpaulin_include))] impl Display for MisalignError { #[inline] fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { write!( fmt, "Type {} requires {}-byte alignment: address ", any::type_name::(), Self::ALIGN, )?; Pointer::fmt(&self.ptr, fmt)?; write!(fmt, " must clear its least {} bits", Self::CTTZ) } } unsafe impl Send for MisalignError {} unsafe impl Sync for MisalignError {} #[cfg(feature = "std")] impl std::error::Error for MisalignError {} bitvec-1.0.1/src/ptr/proxy.rs000064400000000000000000000212761046102023000142320ustar 00000000000000#![doc = include_str!("../../doc/ptr/proxy.md")] use core::{ cell::UnsafeCell, cmp, fmt::{ self, Debug, Display, Formatter, Pointer, }, hash::{ Hash, Hasher, }, marker::PhantomData, mem, ops::{ Deref, DerefMut, Not, }, }; use wyz::comu::{ Const, Mut, Mutability, }; use super::BitPtr; use crate::{ order::{ BitOrder, Lsb0, }, store::BitStore, }; #[doc = include_str!("../../doc/ptr/BitRef.md")] // Restore alignment and sizing properties, as `BitPtr` lacks them. #[cfg_attr(target_pointer_width = "32", repr(C, align(4)))] #[cfg_attr(target_pointer_width = "64", repr(C, align(8)))] #[cfg_attr( not(any(target_pointer_width = "32", target_pointer_width = "64")), repr(C) )] pub struct BitRef<'a, M = Const, T = usize, O = Lsb0> where M: Mutability, T: BitStore, O: BitOrder, { /// The proxied bit-address. bitptr: BitPtr, /// A local cache of the proxied bit that can be referenced. data: bool, /// Attach the lifetime and reflect the possibility of mutation. _ref: PhantomData<&'a UnsafeCell>, } impl BitRef<'_, M, T, O> where M: Mutability, T: BitStore, O: BitOrder, { /// Converts a bit-pointer into a proxy bit-reference. /// /// This reads through the pointer in order to cache the current bit value /// in the proxy. /// /// ## Original /// /// The syntax `unsafe { &* ptr }`. /// /// ## Safety /// /// This is equivalent to (and is!) dereferencing a raw pointer. The pointer /// must be well-constructed, refer to a live memory location in the program /// context, and not be aliased beyond its typing indicators. #[inline] pub unsafe fn from_bitptr(bitptr: BitPtr) -> Self { let data = bitptr.read(); Self { bitptr, data, _ref: PhantomData, } } /// Decays the bit-reference to an ordinary bit-pointer. /// /// ## Original /// /// The syntax `&val as *T`. #[inline] #[cfg(not(tarpaulin_include))] pub fn into_bitptr(self) -> BitPtr { self.bitptr } /// Removes a layer of `::Alias` marking from a bit-reference. /// /// ## Safety /// /// The caller must ensure that no element-level aliasing *by `bitvec`* /// occurs in the scope for which the produced de-aliased proxy is alive. #[cfg(not(tarpaulin_include))] pub(crate) unsafe fn remove_alias(this: BitRef) -> Self { Self { bitptr: this.bitptr.cast::(), data: this.data, _ref: PhantomData, } } } impl BitRef<'_, Mut, T, O> where T: BitStore, O: BitOrder, { /// Moves `src` into the referenced bit, returning the previous value. /// /// ## Original /// /// [`mem::replace`](core::mem::replace) #[inline] pub fn replace(&mut self, src: bool) -> bool { mem::replace(&mut self.data, src) } /// Swaps the bit values of two proxies. /// /// ## Original /// /// [`mem::swap`](core::mem::swap) #[inline] pub fn swap(&mut self, other: &mut BitRef) where T2: BitStore, O2: BitOrder, { mem::swap(&mut self.data, &mut other.data) } /// Commits a bit into the proxied location. /// /// This function writes `value` directly into the proxied location, /// bypassing the cache and destroying the proxy. This eliminates the second /// write done in the destructor, and allows code to be slightly faster. #[inline] pub fn commit(self, value: bool) { unsafe { self.bitptr.write(value); } mem::forget(self); } /// Writes `value` into the proxy. /// /// This does not write into the proxied location; that is deferred until /// the proxy destructor runs. #[inline] pub fn set(&mut self, value: bool) { self.data = value; } } #[cfg(not(tarpaulin_include))] impl Clone for BitRef<'_, Const, T, O> where T: BitStore, O: BitOrder, { #[inline] fn clone(&self) -> Self { Self { ..*self } } } impl Eq for BitRef<'_, M, T, O> where M: Mutability, T: BitStore, O: BitOrder, { } #[cfg(not(tarpaulin_include))] impl Ord for BitRef<'_, M, T, O> where M: Mutability, T: BitStore, O: BitOrder, { #[inline] fn cmp(&self, other: &Self) -> cmp::Ordering { self.data.cmp(&other.data) } } #[cfg(not(tarpaulin_include))] impl PartialEq> for BitRef<'_, M1, T1, O1> where M1: Mutability, M2: Mutability, T1: BitStore, T2: BitStore, O1: BitOrder, O2: BitOrder, { #[inline(always)] fn eq(&self, other: &BitRef<'_, M2, T2, O2>) -> bool { self.data == other.data } } #[cfg(not(tarpaulin_include))] impl PartialEq for BitRef<'_, M, T, O> where M: Mutability, T: BitStore, O: BitOrder, { #[inline(always)] fn eq(&self, other: &bool) -> bool { self.data == *other } } #[cfg(not(tarpaulin_include))] impl PartialEq> for bool where M: Mutability, T: BitStore, O: BitOrder, { #[inline] fn eq(&self, other: &BitRef<'_, M, T, O>) -> bool { other == self } } #[cfg(not(tarpaulin_include))] impl PartialEq<&bool> for BitRef<'_, M, T, O> where M: Mutability, T: BitStore, O: BitOrder, { #[inline(always)] fn eq(&self, other: &&bool) -> bool { self.data == **other } } #[cfg(not(tarpaulin_include))] impl PartialEq> for &bool where M: Mutability, T: BitStore, O: BitOrder, { #[inline] fn eq(&self, other: &BitRef<'_, M, T, O>) -> bool { other == *self } } #[cfg(not(tarpaulin_include))] impl PartialOrd> for BitRef<'_, M1, T1, O1> where M1: Mutability, M2: Mutability, T1: BitStore, T2: BitStore, O1: BitOrder, O2: BitOrder, { #[inline] fn partial_cmp( &self, other: &BitRef<'_, M2, T2, O2>, ) -> Option { self.data.partial_cmp(&other.data) } } #[cfg(not(tarpaulin_include))] impl PartialOrd for BitRef<'_, M, T, O> where M: Mutability, T: BitStore, O: BitOrder, { #[inline] fn partial_cmp(&self, other: &bool) -> Option { self.data.partial_cmp(other) } } #[cfg(not(tarpaulin_include))] impl PartialOrd<&bool> for BitRef<'_, M, T, O> where M: Mutability, T: BitStore, O: BitOrder, { #[inline] fn partial_cmp(&self, other: &&bool) -> Option { self.data.partial_cmp(*other) } } #[cfg(not(tarpaulin_include))] impl AsRef for BitRef<'_, M, T, O> where M: Mutability, T: BitStore, O: BitOrder, { #[inline] fn as_ref(&self) -> &bool { &self.data } } #[cfg(not(tarpaulin_include))] impl AsMut for BitRef<'_, Mut, T, O> where T: BitStore, O: BitOrder, { #[inline] fn as_mut(&mut self) -> &mut bool { &mut self.data } } impl Debug for BitRef<'_, M, T, O> where M: Mutability, T: BitStore, O: BitOrder, { #[inline] fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { unsafe { self.bitptr.span_unchecked(1) } .render(fmt, "Ref", &[("bit", &self.data as &dyn Debug)]) } } #[cfg(not(tarpaulin_include))] impl Display for BitRef<'_, M, T, O> where M: Mutability, T: BitStore, O: BitOrder, { #[inline] fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { Display::fmt(&self.data, fmt) } } #[cfg(not(tarpaulin_include))] impl Pointer for BitRef<'_, M, T, O> where M: Mutability, T: BitStore, O: BitOrder, { #[inline] fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { Pointer::fmt(&self.bitptr, fmt) } } #[cfg(not(tarpaulin_include))] impl Hash for BitRef<'_, M, T, O> where M: Mutability, T: BitStore, O: BitOrder, { #[inline] fn hash(&self, state: &mut H) where H: Hasher { self.bitptr.hash(state); } } // #[allow(clippy::non_send_fields_in_send_ty)] // I know what I’m doing unsafe impl Send for BitRef<'_, M, T, O> where M: Mutability, T: BitStore + Sync, O: BitOrder, { } unsafe impl Sync for BitRef<'_, M, T, O> where M: Mutability, T: BitStore + Sync, O: BitOrder, { } // This cannot be implemented until `Drop` is specialized to only // ``. // impl Copy for BitRef<'_, Const, T, O> // where O: BitOrder, T: BitStore {} impl Deref for BitRef<'_, M, T, O> where M: Mutability, T: BitStore, O: BitOrder, { type Target = bool; #[inline] fn deref(&self) -> &Self::Target { &self.data } } impl DerefMut for BitRef<'_, Mut, T, O> where T: BitStore, O: BitOrder, { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.data } } impl Drop for BitRef<'_, M, T, O> where M: Mutability, T: BitStore, O: BitOrder, { #[inline] fn drop(&mut self) { // `Drop` cannot specialize on type parameters, but only mutable // proxies can commit to memory. if M::CONTAINS_MUTABILITY { unsafe { self.bitptr.to_mut().write(self.data); } } } } impl Not for BitRef<'_, M, T, O> where M: Mutability, T: BitStore, O: BitOrder, { type Output = bool; #[inline] fn not(self) -> Self::Output { !self.data } } bitvec-1.0.1/src/ptr/range.rs000064400000000000000000000210711046102023000141360ustar 00000000000000#![doc = include_str!("../../doc/ptr/range.md")] use core::{ fmt::{ self, Debug, Formatter, }, hash::{ Hash, Hasher, }, iter::FusedIterator, ops::{ Bound, Range, RangeBounds, }, }; use wyz::comu::{ Const, Mutability, }; use super::{ BitPtr, BitSpan, }; use crate::{ devel as dvl, order::{ BitOrder, Lsb0, }, store::BitStore, }; #[repr(C)] #[doc = include_str!("../../doc/ptr/BitPtrRange.md")] pub struct BitPtrRange where M: Mutability, T: BitStore, O: BitOrder, { /// The lower, inclusive, bound of the range. The bit to which this points /// is considered live. pub start: BitPtr, /// The higher, exclusive, bound of the range. The bit to which this points /// is considered dead, and the pointer may be one bit beyond the bounds of /// an allocation region. /// /// Because Rust and LLVM both define the address of `base + (len * width)` /// as being within the provenance of `base`, even though that address may /// itself be the base address of another region in a different provenance, /// and bit-pointers are always composed of an ordinary memory address and a /// bit-counter, the ending bit-pointer is always valid. pub end: BitPtr, } impl BitPtrRange where M: Mutability, T: BitStore, O: BitOrder, { /// The canonical empty range. All ranges with zero length (equal `.start` /// and `.end`) are equally empty. pub const EMPTY: Self = Self { start: BitPtr::DANGLING, end: BitPtr::DANGLING, }; /// Explicitly converts a `Range` into a `BitPtrRange`. #[inline] pub fn from_range(Range { start, end }: Range>) -> Self { Self { start, end } } /// Explicitly converts a `BitPtrRange` into a `Range`. #[inline] pub fn into_range(self) -> Range> { let Self { start, end } = self; start .. end } /// Tests if the range is empty (the distance between bit-pointers is `0`). /// /// ## Original /// /// [`Range::is_empty`](core::ops::Range::is_empty) /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// use bitvec::ptr::BitPtrRange; /// /// let data = 0u8; /// let bp = BitPtr::<_, _, Lsb0>::from_ref(&data); /// let mut range = BitPtrRange::from_range(bp .. bp.wrapping_add(1)); /// /// assert!(!range.is_empty()); /// assert_ne!(range.start, range.end); /// /// range.next(); /// /// assert!(range.is_empty()); /// assert_eq!(range.start, range.end); /// ``` #[inline] pub fn is_empty(&self) -> bool { self.start == self.end } /// Tests if a given bit-pointer is contained within the range. /// /// Bit-pointer ordering is defined when the types have the same exact /// `BitOrder` type parameter and the same `BitStore::Mem` associated type /// (but are free to differ in alias condition!). Inclusion in a range /// occurs when the bit-pointer is not strictly less than the range start, /// and is strictly less than the range end. /// /// ## Original /// /// [`Range::contains`](core::ops::Range::contains) /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// use bitvec::ptr::BitPtrRange; /// use core::cell::Cell; /// /// let data = 0u16; /// let bp = BitPtr::<_, _, Lsb0>::from_ref(&data); /// /// let mut range = BitPtrRange::from_range(bp .. bp.wrapping_add(16)); /// range.nth(2); /// range.nth_back(2); /// /// assert!(bp < range.start); /// assert!(!range.contains(&bp)); /// /// let mid = bp.wrapping_add(8); /// /// let same_mem = mid.cast::>(); /// assert!(range.contains(&mid)); /// ``` /// /// Casting to a different `BitStore` type whose `Mem` parameter differs /// from the range always results in a `false` response, even if the pointer /// being tested is numerically within the range. #[inline] pub fn contains(&self, pointer: &BitPtr) -> bool where M2: Mutability, T2: BitStore, { dvl::match_store::() && self.start <= *pointer && *pointer < self.end } /// Converts the range into a span descriptor over all live bits. /// /// The produced bit-span does *not* include the bit addressed by `.end`. /// /// ## Safety /// /// The `.start` and `.end` bit-pointers must both be derived from the same /// provenance region. `BitSpan` draws its provenance from the `.start` /// element pointer, and incorrectly extending it beyond the source /// provenance is undefined behavior. pub(crate) unsafe fn into_bitspan(self) -> BitSpan { self.start.span_unchecked(self.len()) } /// Snapshots `.start`, then increments it. /// /// This method is only safe to call when the range is non-empty. #[inline] fn take_front(&mut self) -> BitPtr { let start = self.start; self.start = start.wrapping_add(1); start } /// Decrements `.end`, then returns it. /// /// The bit-pointer returned by this method is always to an alive bit. /// /// This method is only safe to call when the range is non-empty. #[inline] fn take_back(&mut self) -> BitPtr { let prev = self.end.wrapping_sub(1); self.end = prev; prev } } #[cfg(not(tarpaulin_include))] impl Clone for BitPtrRange where M: Mutability, T: BitStore, O: BitOrder, { #[inline] fn clone(&self) -> Self { Self { ..*self } } } impl Eq for BitPtrRange where M: Mutability, T: BitStore, O: BitOrder, { } impl PartialEq> for BitPtrRange where M1: Mutability, M2: Mutability, O: BitOrder, T1: BitStore, T2: BitStore, { #[inline] fn eq(&self, other: &BitPtrRange) -> bool { // Pointers over different element types are never equal dvl::match_store::() && self.start == other.start && self.end == other.end } } #[cfg(not(tarpaulin_include))] impl Default for BitPtrRange where M: Mutability, T: BitStore, O: BitOrder, { #[inline] fn default() -> Self { Self::EMPTY } } #[cfg(not(tarpaulin_include))] impl From>> for BitPtrRange where M: Mutability, T: BitStore, O: BitOrder, { #[inline] fn from(range: Range>) -> Self { Self::from_range(range) } } #[cfg(not(tarpaulin_include))] impl From> for Range> where M: Mutability, T: BitStore, O: BitOrder, { #[inline] fn from(range: BitPtrRange) -> Self { range.into_range() } } #[cfg(not(tarpaulin_include))] impl Debug for BitPtrRange where M: Mutability, T: BitStore, O: BitOrder, { #[inline] fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { let Range { start, end } = self.clone().into_range(); Debug::fmt(&start, fmt)?; write!(fmt, "{0}..{0}", if fmt.alternate() { " " } else { "" })?; Debug::fmt(&end, fmt) } } #[cfg(not(tarpaulin_include))] impl Hash for BitPtrRange where M: Mutability, T: BitStore, O: BitOrder, { #[inline] fn hash(&self, state: &mut H) where H: Hasher { self.start.hash(state); self.end.hash(state); } } impl Iterator for BitPtrRange where M: Mutability, T: BitStore, O: BitOrder, { type Item = BitPtr; easy_iter!(); #[inline] fn next(&mut self) -> Option { if Self::is_empty(&*self) { return None; } Some(self.take_front()) } #[inline] fn nth(&mut self, n: usize) -> Option { if n >= self.len() { self.start = self.end; return None; } self.start = unsafe { self.start.add(n) }; Some(self.take_front()) } } impl DoubleEndedIterator for BitPtrRange where M: Mutability, T: BitStore, O: BitOrder, { #[inline] fn next_back(&mut self) -> Option { if Self::is_empty(&*self) { return None; } Some(self.take_back()) } #[inline] fn nth_back(&mut self, n: usize) -> Option { if n >= self.len() { self.end = self.start; return None; } let out = unsafe { self.end.sub(n.wrapping_add(1)) }; self.end = out; Some(out) } } impl ExactSizeIterator for BitPtrRange where M: Mutability, T: BitStore, O: BitOrder, { #[inline] fn len(&self) -> usize { (unsafe { self.end.offset_from(self.start) }) as usize } } impl FusedIterator for BitPtrRange where M: Mutability, T: BitStore, O: BitOrder, { } #[cfg(not(tarpaulin_include))] impl RangeBounds> for BitPtrRange where M: Mutability, T: BitStore, O: BitOrder, { #[inline] fn start_bound(&self) -> Bound<&BitPtr> { Bound::Included(&self.start) } #[inline] fn end_bound(&self) -> Bound<&BitPtr> { Bound::Excluded(&self.end) } } bitvec-1.0.1/src/ptr/single.rs000064400000000000000000001202771046102023000143330ustar 00000000000000#![doc = include_str!("../../doc/ptr/single.md")] use core::{ any, cmp, convert::TryFrom, fmt::{ self, Debug, Display, Formatter, Pointer, }, hash::{ Hash, Hasher, }, marker::PhantomData, ptr, }; use tap::{ Pipe, TryConv, }; use wyz::{ comu::{ Address, Const, Frozen, Mut, Mutability, NullPtrError, }, fmt::FmtForward, }; use super::{ check_alignment, AddressExt, BitPtrRange, BitRef, BitSpan, BitSpanError, MisalignError, }; use crate::{ access::BitAccess, devel as dvl, index::BitIdx, mem, order::{ BitOrder, Lsb0, }, store::BitStore, }; #[repr(C, packed)] #[doc = include_str!("../../doc/ptr/BitPtr.md")] pub struct BitPtr where M: Mutability, T: BitStore, O: BitOrder, { /// Memory addresses must be well-aligned and non-null. /// /// This is not actually a requirement of `BitPtr`, but it is a requirement /// of `BitSpan`, and it is extended across the entire crate for /// consistency. ptr: Address, /// The index of the referent bit within `*addr`. bit: BitIdx, /// The ordering used to select the bit at `head` in `*addr`. _or: PhantomData, } impl BitPtr where M: Mutability, T: BitStore, O: BitOrder, { /// The canonical dangling pointer. This selects the starting bit of the /// canonical dangling pointer for `T`. pub const DANGLING: Self = Self { ptr: Address::DANGLING, bit: BitIdx::MIN, _or: PhantomData, }; /// Loads the address field, sidestepping any alignment problems. /// /// This is the only safe way to access `(&self).ptr`. Do not perform field /// access on `.ptr` through a reference except through this method. #[inline] fn get_addr(&self) -> Address { unsafe { ptr::addr_of!(self.ptr).read_unaligned() } } /// Tries to construct a `BitPtr` from a memory location and a bit index. /// /// ## Parameters /// /// - `ptr`: The address of a memory element. `Address` wraps raw pointers /// or references, and enforces that they are not null. `BitPtr` /// additionally requires that the address be well-aligned to its type; /// misaligned addresses cause this to return an error. /// - `bit`: The index of the selected bit within `*ptr`. /// /// ## Returns /// /// This returns an error if `ptr` is not aligned to `T`; otherwise, it /// returns a new bit-pointer structure to the given element and bit. /// /// You should typically prefer to use constructors that take directly from /// a memory reference or pointer, such as the `TryFrom<*T>` /// implementations, the `From<&/mut T>` implementations, or the /// [`::from_ref()`], [`::from_mut()`], [`::from_slice()`], or /// [`::from_slice_mut()`] functions. /// /// [`::from_mut()`]: Self::from_mut /// [`::from_ref()`]: Self::from_ref /// [`::from_slice()`]: Self::from_slice /// [`::from_slice_mut()`]: Self::from_slice_mut #[inline] pub fn new( ptr: Address, bit: BitIdx, ) -> Result> { Ok(Self { ptr: check_alignment(ptr)?, bit, ..Self::DANGLING }) } /// Constructs a `BitPtr` from an address and head index, without checking /// the address for validity. /// /// ## Parameters /// /// - `addr`: The memory address to use in the bit-pointer. See the Safety /// section. /// - `head`: The index of the bit in `*addr` that this bit-pointer selects. /// /// ## Returns /// /// A new bit-pointer composed of the parameters. No validity checking is /// performed. /// /// ## Safety /// /// The `Address` type imposes a non-null requirement. `BitPtr` additionally /// requires that `addr` is well-aligned for `T`, and presumes that the /// caller has ensured this with [`bv_ptr::check_alignment`][0]. If this is /// not the case, then the program is incorrect, and subsequent behavior is /// not specified. /// /// [0]: crate::ptr::check_alignment. #[inline] pub unsafe fn new_unchecked( ptr: Address, bit: BitIdx, ) -> Self { if cfg!(debug_assertions) { Self::new(ptr, bit).unwrap() } else { Self { ptr, bit, ..Self::DANGLING } } } /// Gets the address of the base storage element. #[inline] pub fn address(self) -> Address { self.get_addr() } /// Gets the `BitIdx` that selects the bit within the memory element. #[inline] pub fn bit(self) -> BitIdx { self.bit } /// Decomposes a bit-pointer into its element address and bit index. /// /// ## Parameters /// /// - `self` /// /// ## Returns /// /// - `.0`: The memory address in which the referent bit is located. /// - `.1`: The index of the referent bit in `*.0` according to the `O` type /// parameter. #[inline] pub fn raw_parts(self) -> (Address, BitIdx) { (self.address(), self.bit()) } /// Converts a bit-pointer into a span descriptor by attaching a length /// counter (in bits). /// /// ## Parameters /// /// - `self`: The base address of the produced span. /// - `bits`: The length, in bits, of the span. /// /// ## Returns /// /// A span descriptor beginning at `self` and ending (exclusive) at `self + /// bits`. This fails if it is unable to encode the requested span into a /// descriptor. pub(crate) fn span( self, bits: usize, ) -> Result, BitSpanError> { BitSpan::new(self.ptr, self.bit, bits) } /// Converts a bit-pointer into a span descriptor, without performing /// encoding validity checks. /// /// ## Parameters /// /// - `self`: The base address of the produced span. /// - `bits`: The length, in bits, of the span. /// /// ## Returns /// /// An encoded span descriptor of `self` and `bits`. Note that no validity /// checks are performed! /// /// ## Safety /// /// The caller must ensure that the rules of `BitSpan::new` are not /// violated. Typically this method should only be used on parameters that /// have already passed through `BitSpan::new` and are known to be good. pub(crate) unsafe fn span_unchecked(self, bits: usize) -> BitSpan { BitSpan::new_unchecked(self.get_addr(), self.bit, bits) } /// Produces a bit-pointer range beginning at `self` (inclusive) and ending /// at `self + count` (exclusive). /// /// ## Safety /// /// `self + count` must be within the same provenance region as `self`. The /// first bit past the end of an allocation is included in provenance /// regions, though it is not dereferenceable and will not be dereferenced. /// /// It is unsound to *even construct* a pointer that departs the provenance /// region, even if that pointer is never dereferenced! pub(crate) unsafe fn range(self, count: usize) -> BitPtrRange { (self .. self.add(count)).into() } /// Removes write permissions from a bit-pointer. #[inline] pub fn to_const(self) -> BitPtr { let Self { ptr: addr, bit: head, .. } = self; BitPtr { ptr: addr.immut(), bit: head, ..BitPtr::DANGLING } } /// Adds write permissions to a bit-pointer. /// /// ## Safety /// /// This pointer must have been derived from a `*mut` pointer. #[inline] pub unsafe fn to_mut(self) -> BitPtr { let Self { ptr: addr, bit: head, .. } = self; BitPtr { ptr: addr.assert_mut(), bit: head, ..BitPtr::DANGLING } } /// Freezes a bit-pointer, forbidding direct mutation. /// /// This is used as a necessary prerequisite to all mutation of memory. /// `BitPtr` uses an implementation scoped to `Frozen<_>` to perform /// alias-aware writes; see below. pub(crate) fn freeze(self) -> BitPtr, T, O> { let Self { ptr: addr, bit: head, .. } = self; BitPtr { ptr: addr.freeze(), bit: head, ..BitPtr::DANGLING } } } impl BitPtr where T: BitStore, O: BitOrder, { /// Constructs a `BitPtr` to the zeroth bit in a single element. #[inline] pub fn from_ref(elem: &T) -> Self { unsafe { Self::new_unchecked(elem.into(), BitIdx::MIN) } } /// Constructs a `BitPtr` to the zeroth bit in the zeroth element of a /// slice. /// /// This method is distinct from `Self::from_ref(&elem[0])`, because it /// ensures that the returned bit-pointer has provenance over the entire /// slice. Indexing within a slice narrows the provenance range, and makes /// departure from the subslice, *even within the original slice*, illegal. #[inline] pub fn from_slice(slice: &[T]) -> Self { unsafe { Self::new_unchecked(slice.as_ptr().into_address(), BitIdx::MIN) } } /// Gets a raw pointer to the memory element containing the selected bit. #[inline] #[cfg(not(tarpaulin_include))] pub fn pointer(&self) -> *const T { self.get_addr().to_const() } } impl BitPtr where T: BitStore, O: BitOrder, { /// Constructs a mutable `BitPtr` to the zeroth bit in a single element. #[inline] pub fn from_mut(elem: &mut T) -> Self { unsafe { Self::new_unchecked(elem.into(), BitIdx::MIN) } } /// Constructs a `BitPtr` to the zeroth bit in the zeroth element of a /// mutable slice. /// /// This method is distinct from `Self::from_mut(&mut elem[0])`, because it /// ensures that the returned bit-pointer has provenance over the entire /// slice. Indexing within a slice narrows the provenance range, and makes /// departure from the subslice, *even within the original slice*, illegal. #[inline] pub fn from_mut_slice(slice: &mut [T]) -> Self { unsafe { Self::new_unchecked(slice.as_mut_ptr().into_address(), BitIdx::MIN) } } /// Constructs a mutable `BitPtr` to the zeroth bit in the zeroth element of /// a slice. /// /// This method is distinct from `Self::from_mut(&mut elem[0])`, because it /// ensures that the returned bit-pointer has provenance over the entire /// slice. Indexing within a slice narrows the provenance range, and makes /// departure from the subslice, *even within the original slice*, illegal. #[inline] pub fn from_slice_mut(slice: &mut [T]) -> Self { unsafe { Self::new_unchecked(slice.as_mut_ptr().into_address(), BitIdx::MIN) } } /// Gets a raw pointer to the memory location containing the selected bit. #[inline] #[cfg(not(tarpaulin_include))] pub fn pointer(&self) -> *mut T { self.get_addr().to_mut() } } /// Port of the `*bool` inherent API. impl BitPtr where M: Mutability, T: BitStore, O: BitOrder, { /// Tests if a bit-pointer is the null value. /// /// This is always false, as a `BitPtr` is a `NonNull` internally. Use /// `Option` to express the potential for a null pointer. /// /// ## Original /// /// [`pointer::is_null`](https://doc.rust-lang.org/std/primitive.pointer.html#method.is_null) #[inline] #[deprecated = "`BitPtr` is never null"] pub fn is_null(self) -> bool { false } /// Casts to a `BitPtr` with a different storage parameter. /// /// This is not free! In order to maintain value integrity, it encodes a /// `BitSpan` encoded descriptor with its value, casts that, then decodes /// into a `BitPtr` of the target type. If `T` and `U` have different /// `::Mem` associated types, then this may change the selected bit in /// memory. This is an unavoidable cost of the addressing and encoding /// schemes. /// /// ## Original /// /// [`pointer::cast`](https://doc.rust-lang.org/std/primitive.pointer.html#method.cast) #[inline] pub fn cast(self) -> BitPtr where U: BitStore { let (addr, head, _) = unsafe { self.span_unchecked(1) }.cast::().raw_parts(); unsafe { BitPtr::new_unchecked(addr, head) } } /// Decomposes a bit-pointer into its address and head-index components. /// /// ## Original /// /// [`pointer::to_raw_parts`](https://doc.rust-lang.org/std/primitive.pointer.html#method.to_raw_parts) /// /// ## API Differences /// /// The original method is unstable as of 1.54.0; however, because `BitPtr` /// already has a similar API, the name is optimistically stabilized here. /// Prefer [`.raw_parts()`] until the original inherent stabilizes. /// /// [`.raw_parts()`]: Self::raw_parts #[inline] #[cfg(not(tarpaulin_include))] pub fn to_raw_parts(self) -> (Address, BitIdx) { self.raw_parts() } /// Produces a proxy reference to the referent bit. /// /// Because `BitPtr` guarantees that it is non-null and well-aligned, this /// never returns `None`. However, this is still unsafe to call on any /// bit-pointers created from conjured values rather than known references. /// /// ## Original /// /// [`pointer::as_ref`](https://doc.rust-lang.org/std/primitive.pointer.html#method.as_ref) /// /// ## API Differences /// /// This produces a proxy type rather than a true reference. The proxy /// implements `Deref`, and can be converted to `&bool` with /// a reborrow `&*`. /// /// ## Safety /// /// Since `BitPtr` does not permit null or misaligned pointers, this method /// will always dereference the pointer in order to create the proxy. As /// such, you must ensure the following conditions are met: /// /// - the pointer must be dereferenceable as defined in the standard library /// documentation /// - the pointer must point to an initialized instance of `T` /// - you must ensure that no other pointer will race to modify the referent /// location while this call is reading from memory to produce the proxy /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let data = 1u8; /// let ptr = BitPtr::<_, _, Lsb0>::from_ref(&data); /// let val = unsafe { ptr.as_ref() }.unwrap(); /// assert!(*val); /// ``` #[inline] pub unsafe fn as_ref<'a>(self) -> Option> { Some(BitRef::from_bitptr(self.to_const())) } /// Creates a new bit-pointer at a specified offset from the original. /// /// `count` is in units of bits. /// /// ## Original /// /// [`pointer::offset`](https://doc.rust-lang.org/std/primitive.pointer.html#method.offset) /// /// ## Safety /// /// `BitPtr` is implemented with Rust raw pointers internally, and is /// subject to all of Rust’s rules about provenance and permission tracking. /// You must abide by the safety rules established in the original method, /// to which this internally delegates. /// /// Additionally, `bitvec` imposes its own rules: while Rust cannot observe /// provenance beyond an element or byte level, `bitvec` demands that /// `&mut BitSlice` have exclusive view over all bits it observes. You must /// not produce a bit-pointer that departs a `BitSlice` region and intrudes /// on any `&mut BitSlice`’s handle, and you must not produce a /// write-capable bit-pointer that intrudes on a `&BitSlice` handle that /// expects its contents to be immutable. /// /// Note that it is illegal to *construct* a bit-pointer that invalidates /// any of these rules. If you wish to defer safety-checking to the point of /// dereferencing, and allow the temporary construction *but not* /// *dereference* of illegal `BitPtr`s, use [`.wrapping_offset()`] instead. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let data = 5u8; /// let ptr = BitPtr::<_, _, Lsb0>::from_ref(&data); /// unsafe { /// assert!(ptr.read()); /// assert!(!ptr.offset(1).read()); /// assert!(ptr.offset(2).read()); /// } /// ``` /// /// [`.wrapping_offset()`]: Self::wrapping_offset #[inline] #[must_use = "returns a new bit-pointer rather than modifying its argument"] pub unsafe fn offset(self, count: isize) -> Self { let (elts, head) = self.bit.offset(count); Self::new_unchecked(self.ptr.offset(elts), head) } /// Creates a new bit-pointer at a specified offset from the original. /// /// `count` is in units of bits. /// /// ## Original /// /// [`pointer::wrapping_offset`](https://doc.rust-lang.org/std/primitive.pointer.html#method.wrapping_offset) /// /// ## API Differences /// /// `bitvec` makes it explicitly illegal to wrap a pointer around the high /// end of the address space, because it is incapable of representing a null /// pointer. /// /// However, `<*T>::wrapping_offset` has additional properties as a result /// of its tolerance for wrapping the address space: it tolerates departing /// a provenance region, and is not unsafe to use to *create* a bit-pointer /// that is outside the bounds of its original provenance. /// /// ## Safety /// /// This function is safe to use because the bit-pointers it creates defer /// their provenance checks until the point of dereference. As such, you /// can safely use this to perform arbitrary pointer arithmetic that Rust /// considers illegal in ordinary arithmetic, as long as you do not /// dereference the bit-pointer until it has been brought in bounds of the /// originating provenance region. /// /// This means that, to the Rust rule engine, /// `let z = x.wrapping_add(y as usize).wrapping_sub(x as usize);` is not /// equivalent to `y`, but `z` is safe to construct, and /// `z.wrapping_add(x as usize).wrapping_sub(y as usize)` produces a /// bit-pointer that *is* equivalent to `x`. /// /// See the documentation of the original method for more details about /// provenance regions, and the distinctions that the optimizer makes about /// them. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let data = 0u32; /// let mut ptr = BitPtr::<_, _, Lsb0>::from_ref(&data); /// let end = ptr.wrapping_offset(32); /// while ptr < end { /// # #[cfg(feature = "std")] { /// println!("{}", unsafe { ptr.read() }); /// # } /// ptr = ptr.wrapping_offset(3); /// } /// ``` #[inline] #[must_use = "returns a new bit-pointer rather than modifying its argument"] pub fn wrapping_offset(self, count: isize) -> Self { let (elts, head) = self.bit.offset(count); unsafe { Self::new_unchecked(self.ptr.wrapping_offset(elts), head) } } /// Calculates the distance (in bits) between two bit-pointers. /// /// This method is the inverse of [`.offset()`]. /// /// ## Original /// /// [`pointer::offset_from`](https://doc.rust-lang.org/std/primitive.pointer.html#method.offset_from) /// /// ## API Differences /// /// The base pointer may have a different `BitStore` type parameter, as long /// as they share an underlying memory type. This is necessary in order to /// accommodate aliasing markers introduced between when an origin pointer /// was taken and when `self` compared against it. /// /// ## Safety /// /// Both `self` and `origin` **must** be drawn from the same provenance /// region. This means that they must be created from the same Rust /// allocation, whether with `let` or the allocator API, and must be in the /// (inclusive) range `base ..= base + len`. The first bit past the end of /// a region can be addressed, just not dereferenced. /// /// See the original `<*T>::offset_from` for more details on region safety. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let data = 0u32; /// let base = BitPtr::<_, _, Lsb0>::from_ref(&data); /// let low = unsafe { base.add(10) }; /// let high = unsafe { low.add(15) }; /// unsafe { /// assert_eq!(high.offset_from(low), 15); /// assert_eq!(low.offset_from(high), -15); /// assert_eq!(low.offset(15), high); /// assert_eq!(high.offset(-15), low); /// } /// ``` /// /// While this method is safe to *construct* bit-pointers that depart a /// provenance region, it remains illegal to *dereference* those pointers! /// /// This usage is incorrect, and a program that contains it is not /// well-formed. /// /// ```rust,no_run /// use bitvec::prelude::*; /// /// let a = 0u8; /// let b = !0u8; /// /// let a_ptr = BitPtr::<_, _, Lsb0>::from_ref(&a); /// let b_ptr = BitPtr::<_, _, Lsb0>::from_ref(&b); /// let diff = (b_ptr.pointer() as isize) /// .wrapping_sub(a_ptr.pointer() as isize) /// // Remember: raw pointers are byte-stepped, /// // but bit-pointers are bit-stepped. /// .wrapping_mul(8); /// // This pointer to `b` has `a`’s provenance: /// let b_ptr_2 = a_ptr.wrapping_offset(diff); /// /// // They are *arithmetically* equal: /// assert_eq!(b_ptr, b_ptr_2); /// // But it is still undefined behavior to cross provenances! /// assert_eq!(0, unsafe { b_ptr_2.offset_from(b_ptr) }); /// ``` /// /// [`.offset()`]: Self::offset #[inline] pub unsafe fn offset_from(self, origin: BitPtr) -> isize where U: BitStore { self.get_addr() .cast::() .offset_from(origin.get_addr().cast::()) .wrapping_mul(mem::bits_of::() as isize) .wrapping_add(self.bit.into_inner() as isize) .wrapping_sub(origin.bit.into_inner() as isize) } /// Adjusts a bit-pointer upwards in memory. This is equivalent to /// `.offset(count as isize)`. /// /// `count` is in units of bits. /// /// ## Original /// /// [`pointer::add`](https://doc.rust-lang.org/std/primitive.pointer.html#method.add) /// /// ## Safety /// /// See [`.offset()`](Self::offset). #[inline] #[must_use = "returns a new bit-pointer rather than modifying its argument"] pub unsafe fn add(self, count: usize) -> Self { self.offset(count as isize) } /// Adjusts a bit-pointer downwards in memory. This is equivalent to /// `.offset((count as isize).wrapping_neg())`. /// /// `count` is in units of bits. /// /// ## Original /// /// [`pointer::sub`](https://doc.rust-lang.org/std/primitive.pointer.html#method.sub) /// /// ## Safety /// /// See [`.offset()`](Self::offset). #[inline] #[must_use = "returns a new bit-pointer rather than modifying its argument"] pub unsafe fn sub(self, count: usize) -> Self { self.offset((count as isize).wrapping_neg()) } /// Adjusts a bit-pointer upwards in memory, using wrapping semantics. This /// is equivalent to `.wrapping_offset(count as isize)`. /// /// `count` is in units of bits. /// /// ## Original /// /// [`pointer::wrapping_add`](https://doc.rust-lang.org/std/primitive.pointer.html#method.wrapping_add) /// /// ## Safety /// /// See [`.wrapping_offset()`](Self::wrapping_offset). #[inline] #[must_use = "returns a new bit-pointer rather than modifying its argument"] pub fn wrapping_add(self, count: usize) -> Self { self.wrapping_offset(count as isize) } /// Adjusts a bit-pointer downwards in memory, using wrapping semantics. /// This is equivalent to /// `.wrapping_offset((count as isize).wrapping_neg())`. /// /// `count` is in units of bits. /// /// ## Original /// /// [`pointer::wrapping_add`](https://doc.rust-lang.org/std/primitive.pointer.html#method.wrapping_add) /// /// ## Safety /// /// See [`.wrapping_offset()`](Self::wrapping_offset). #[inline] #[must_use = "returns a new bit-pointer rather than modifying its argument"] pub fn wrapping_sub(self, count: usize) -> Self { self.wrapping_offset((count as isize).wrapping_neg()) } /// Reads the bit from `*self`. /// /// ## Original /// /// [`pointer::read`](https://doc.rust-lang.org/std/primitive.pointer.html#method.read) /// /// ## Safety /// /// See [`ptr::read`](crate::ptr::read). #[inline] pub unsafe fn read(self) -> bool { (*self.ptr.to_const()).load_value().get_bit::(self.bit) } /// Reads the bit from `*self` using a volatile load. /// /// Prefer using a crate such as [`voladdress`][0] to manage volatile I/O /// and use `bitvec` only on the local objects it provides. Individual I/O /// operations for individual bits are likely not the behavior you want. /// /// ## Original /// /// [`pointer::read_volatile`](https://doc.rust-lang.org/std/primitive.pointer.html#method.read_volatile) /// /// ## Safety /// /// See [`ptr::read_volatile`](crate::ptr::read_volatile). /// /// [0]: https://docs.rs/voladdress/later/voladdress #[inline] pub unsafe fn read_volatile(self) -> bool { self.ptr.to_const().read_volatile().get_bit::(self.bit) } /// Reads the bit from `*self` using an unaligned memory access. /// /// `BitPtr` forbids unaligned addresses. If you have such an address, you /// must perform your memory accesses on the raw element, and only use /// `bitvec` on a well-aligned stack temporary. This method should never be /// necessary. /// /// ## Original /// /// [`pointer::read_unaligned`](https://doc.rust-lang.org/std/primitive.pointer.html#method.read_unaligned) /// /// ## Safety /// /// See [`ptr::read_unaligned`](crate::ptr::read_unaligned) #[inline] #[deprecated = "`BitPtr` does not have unaligned addresses"] pub unsafe fn read_unaligned(self) -> bool { self.ptr.to_const().read_unaligned().get_bit::(self.bit) } /// Copies `count` bits from `self` to `dest`. The source and destination /// may overlap. /// /// Note that overlap is only defined when `O` and `O2` are the same type. /// If they differ, then `bitvec` does not define overlap, and assumes that /// they are wholly discrete in memory. /// /// ## Original /// /// [`pointer::copy_to`](https://doc.rust-lang.org/std/primitive.pointer.html#method.copy_to) /// /// ## Safety /// /// See [`ptr::copy`](crate::ptr::copy). #[inline] #[cfg(not(tarpaulin_include))] pub unsafe fn copy_to(self, dest: BitPtr, count: usize) where T2: BitStore, O2: BitOrder, { super::copy(self.to_const(), dest, count); } /// Copies `count` bits from `self` to `dest`. The source and destination /// may *not* overlap. /// /// ## Original /// /// [`pointer::copy_to_nonoverlapping`](https://doc.rust-lang.org/std/primitive.pointer.html#method.copy_to_nonoverlapping) /// /// ## Safety /// /// See [`ptr::copy_nonoverlapping`](crate::ptr::copy_nonoverlapping). #[inline] #[cfg(not(tarpaulin_include))] pub unsafe fn copy_to_nonoverlapping( self, dest: BitPtr, count: usize, ) where T2: BitStore, O2: BitOrder, { super::copy_nonoverlapping(self.to_const(), dest, count); } /// Computes the offset (in bits) that needs to be applied to the /// bit-pointer in order to make it aligned to the given *byte* alignment. /// /// “Alignment” here means that the bit-pointer selects the starting bit of /// a memory location whose address satisfies the requested alignment. /// /// `align` is measured in **bytes**. If you wish to align your bit-pointer /// to a specific fraction (½, ¼, or ⅛ of one byte), please file an issue /// and I will work on adding this functionality. /// /// ## Original /// /// [`pointer::align_offset`](https://doc.rust-lang.org/std/primitive.pointer.html#method.align_offset) /// /// ## Notes /// /// If the base-element address of the bit-pointer is already aligned to /// `align`, then this will return the bit-offset required to select the /// first bit of the successor element. /// /// If it is not possible to align the bit-pointer, then the implementation /// returns `usize::MAX`. /// /// The return value is measured in bits, not `T` elements or bytes. The /// only thing you can do with it is pass it into [`.add()`] or /// [`.wrapping_add()`]. /// /// Note from the standard library: It is permissible for the implementation /// to *always* return `usize::MAX`. Only your algorithm’s performance can /// depend on getting a usable offset here; it must be correct independently /// of this function providing a useful value. /// /// ## Safety /// /// There are no guarantees whatsoëver that offsetting the bit-pointer will /// not overflow or go beyond the allocation that the bit-pointer selects. /// It is up to the caller to ensure that the returned offset is correct in /// all terms other than alignment. /// /// ## Panics /// /// This method panics if `align` is not a power of two. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let data = [0u8; 3]; /// let ptr = BitPtr::<_, _, Lsb0>::from_slice(&data); /// let ptr = unsafe { ptr.add(2) }; /// let count = ptr.align_offset(2); /// assert!(count >= 6); /// ``` /// /// [`.add()`]: Self::add /// [`.wrapping_add()`]: Self::wrapping_add #[inline] pub fn align_offset(self, align: usize) -> usize { let width = mem::bits_of::(); match ( self.ptr.to_const().align_offset(align), self.bit.into_inner() as usize, ) { (0, 0) => 0, (0, head) => align * mem::bits_of::() - head, (usize::MAX, _) => usize::MAX, (elts, head) => elts.wrapping_mul(width).wrapping_sub(head), } } } /// Port of the `*mut bool` inherent API. impl BitPtr where T: BitStore, O: BitOrder, { /// Produces a proxy reference to the referent bit. /// /// Because `BitPtr` guarantees that it is non-null and well-aligned, this /// never returns `None`. However, this is still unsafe to call on any /// bit-pointers created from conjured values rather than known references. /// /// ## Original /// /// [`pointer::as_mut`](https://doc.rust-lang.org/std/primitive.pointer.html#method.as_mut) /// /// ## API Differences /// /// This produces a proxy type rather than a true reference. The proxy /// implements `DerefMut`, and can be converted to /// `&mut bool` with a reborrow `&mut *`. /// /// Writes to the proxy are not reflected in the proxied location until the /// proxy is destroyed, either through `Drop` or its [`.commit()`] method. /// /// ## Safety /// /// Since `BitPtr` does not permit null or misaligned pointers, this method /// will always dereference the pointer in order to create the proxy. As /// such, you must ensure the following conditions are met: /// /// - the pointer must be dereferenceable as defined in the standard library /// documentation /// - the pointer must point to an initialized instance of `T` /// - you must ensure that no other pointer will race to modify the referent /// location while this call is reading from memory to produce the proxy /// - you must ensure that no other `bitvec` handle targets the referent bit /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let mut data = 0u8; /// let ptr = BitPtr::<_, _, Lsb0>::from_mut(&mut data); /// let mut val = unsafe { ptr.as_mut() }.unwrap(); /// assert!(!*val); /// *val = true; /// assert!(*val); /// ``` /// /// [`.commit()`]: crate::ptr::BitRef::commit #[inline] pub unsafe fn as_mut<'a>(self) -> Option> { Some(BitRef::from_bitptr(self)) } /// Copies `count` bits from the region starting at `src` to the region /// starting at `self`. /// /// The regions are free to overlap; the implementation will detect overlap /// and correctly avoid it. /// /// Note: this has the *opposite* argument order from [`ptr::copy`]: `self` /// is the destination, not the source. /// /// ## Original /// /// [`pointer::copy_from`](https://doc.rust-lang.org/std/primitive.pointer.html#method.copy_from) /// /// ## Safety /// /// See [`ptr::copy`]. /// /// [`ptr::copy`]: crate::ptr::copy #[inline] #[cfg(not(tarpaulin_include))] pub unsafe fn copy_from( self, src: BitPtr, count: usize, ) where T2: BitStore, O2: BitOrder, { src.copy_to(self, count); } /// Copies `count` bits from the region starting at `src` to the region /// starting at `self`. /// /// Unlike [`.copy_from()`], the two regions may *not* overlap; this method /// does not attempt to detect overlap and thus may have a slight /// performance boost over the overlap-handling `.copy_from()`. /// /// Note: this has the *opposite* argument order from /// [`ptr::copy_nonoverlapping`]: `self` is the destination, not the source. /// /// ## Original /// /// [`pointer::copy_from_nonoverlapping`](https://doc.rust-lang.org/std/primitive.pointer.html#method.copy_from_nonoverlapping) /// /// ## Safety /// /// See [`ptr::copy_nonoverlapping`]. /// /// [`.copy_from()`]: Self::copy_from #[inline] #[cfg(not(tarpaulin_include))] pub unsafe fn copy_from_nonoverlapping( self, src: BitPtr, count: usize, ) where T2: BitStore, O2: BitOrder, { src.copy_to_nonoverlapping(self, count); } /// Runs the destructor of the referent value. /// /// `bool` has no destructor; this function does nothing. /// /// ## Original /// /// [`pointer::drop_in_place`](https://doc.rust-lang.org/std/primitive.pointer.html#method.drop_in_place) /// /// ## Safety /// /// See [`ptr::drop_in_place`]. /// /// [`ptr::drop_in_place`]: crate::ptr::drop_in_place #[inline] #[deprecated = "this has no effect, and should not be called"] pub fn drop_in_place(self) {} /// Writes a new bit into the given location. /// /// ## Original /// /// [`pointer::write`](https://doc.rust-lang.org/std/primitive.pointer.html#method.write) /// /// ## Safety /// /// See [`ptr::write`]. /// /// [`ptr::write`]: crate::ptr::write #[inline] pub unsafe fn write(self, value: bool) { self.replace(value); } /// Writes a new bit using volatile I/O operations. /// /// Because processors do not generally have single-bit read or write /// instructions, this must perform a volatile read of the entire memory /// location, perform the write locally, then perform another volatile write /// to the entire location. These three steps are guaranteed to be /// sequential with respect to each other, but are not guaranteed to be /// atomic. /// /// Volatile operations are intended to act on I/O memory, and are *only* /// guaranteed not to be elided or reördered by the compiler across other /// I/O operations. /// /// You should not use `bitvec` to act on volatile memory. You should use a /// crate specialized for volatile I/O work, such as [`voladdr`], and use it /// to explicitly manage the I/O and ask it to perform `bitvec` work only on /// the local snapshot of a volatile location. /// /// ## Original /// /// [`pointer::write_volatile`](https://doc.rust-lang.org/std/primitive.pointer.html#method.write_volatile) /// /// ## Safety /// /// See [`ptr::write_volatile`]. /// /// [`ptr::write_volatile`]: crate::ptr::write_volatile /// [`voladdr`]: https://docs.rs/voladdr/latest/voladdr #[inline] #[allow(clippy::needless_borrow)] // Clippy is wrong. pub unsafe fn write_volatile(self, value: bool) { let ptr = self.ptr.to_mut(); let mut tmp = ptr.read_volatile(); Self::new_unchecked((&mut tmp).into(), self.bit).write(value); ptr.write_volatile(tmp); } /// Writes a bit into memory, tolerating unaligned addresses. /// /// `BitPtr` does not have unaligned addresses. `BitPtr` itself is capable /// of operating on misaligned addresses, but elects to disallow use of them /// in keeping with the rest of `bitvec`’s requirements. /// /// ## Original /// /// [`pointer::write_unaligned`](https://doc.rust-lang.org/std/primitive.pointer.html#method.write_unaligned) /// /// ## Safety /// /// See [`ptr::write_unaligned`]. /// /// [`ptr::write_unaligned`]: crate::ptr::write_unaligned #[inline] #[allow(clippy::needless_borrow)] // Clippy is wrong. #[deprecated = "`BitPtr` does not have unaligned addresses"] pub unsafe fn write_unaligned(self, value: bool) { let ptr = self.ptr.to_mut(); let mut tmp = ptr.read_unaligned(); Self::new_unchecked((&mut tmp).into(), self.bit).write(value); ptr.write_unaligned(tmp); } /// Replaces the bit at `*self` with a new value, returning the previous /// value. /// /// ## Original /// /// [`pointer::replace`](https://doc.rust-lang.org/std/primitive.pointer.html#method.replace) /// /// ## Safety /// /// See [`ptr::replace`]. /// /// [`ptr::replace`]: crate::ptr::replace #[inline] pub unsafe fn replace(self, value: bool) -> bool { self.freeze().frozen_write_bit(value) } /// Swaps the bits at two mutable locations. /// /// ## Original /// /// [`pointer::swap`](https://doc.rust-lang.org/std/primitive.pointer.html#method.swap) /// /// ## Safety /// /// See [`ptr::swap`]. /// /// [`ptr::swap`]: crate::ptr::swap #[inline] pub unsafe fn swap(self, with: BitPtr) where T2: BitStore, O2: BitOrder, { self.write(with.replace(self.read())); } } impl BitPtr, T, O> where M: Mutability, T: BitStore, O: BitOrder, { /// Writes through a bit-pointer that has had its mutability permission /// removed. /// /// This is used to allow `BitPtr>` pointers, which /// are not `Mut` but may still modify memory, to do so. pub(crate) unsafe fn frozen_write_bit(self, value: bool) -> bool { (*self.ptr.cast::().to_const()) .write_bit::(self.bit, value) } } #[cfg(not(tarpaulin_include))] impl Clone for BitPtr where M: Mutability, T: BitStore, O: BitOrder, { #[inline] fn clone(&self) -> Self { Self { ptr: self.get_addr(), ..*self } } } impl Eq for BitPtr where M: Mutability, T: BitStore, O: BitOrder, { } impl Ord for BitPtr where M: Mutability, T: BitStore, O: BitOrder, { #[inline] fn cmp(&self, other: &Self) -> cmp::Ordering { self.partial_cmp(other).expect( "BitPtr has a total ordering when type parameters are identical", ) } } impl PartialEq> for BitPtr where M1: Mutability, M2: Mutability, T1: BitStore, T2: BitStore, O: BitOrder, { #[inline] fn eq(&self, other: &BitPtr) -> bool { if !dvl::match_store::() { return false; } self.get_addr().to_const() as usize == other.get_addr().to_const() as usize && self.bit.into_inner() == other.bit.into_inner() } } impl PartialOrd> for BitPtr where M1: Mutability, M2: Mutability, T1: BitStore, T2: BitStore, O: BitOrder, { #[inline] fn partial_cmp(&self, other: &BitPtr) -> Option { if !dvl::match_store::() { return None; } match (self.get_addr().to_const() as usize) .cmp(&(other.get_addr().to_const() as usize)) { cmp::Ordering::Equal => { self.bit.into_inner().partial_cmp(&other.bit.into_inner()) }, ord => Some(ord), } } } #[cfg(not(tarpaulin_include))] impl From<&T> for BitPtr where T: BitStore, O: BitOrder, { #[inline] fn from(elem: &T) -> Self { Self::from_ref(elem) } } #[cfg(not(tarpaulin_include))] impl From<&mut T> for BitPtr where T: BitStore, O: BitOrder, { #[inline] fn from(elem: &mut T) -> Self { Self::from_mut(elem) } } impl TryFrom<*const T> for BitPtr where T: BitStore, O: BitOrder, { type Error = BitPtrError; #[inline] fn try_from(elem: *const T) -> Result { elem.try_conv::>()? .pipe(|ptr| Self::new(ptr, BitIdx::MIN))? .pipe(Ok) } } impl TryFrom<*mut T> for BitPtr where T: BitStore, O: BitOrder, { type Error = BitPtrError; #[inline] fn try_from(elem: *mut T) -> Result { elem.try_conv::>()? .pipe(|ptr| Self::new(ptr, BitIdx::MIN))? .pipe(Ok) } } impl Debug for BitPtr where M: Mutability, T: BitStore, O: BitOrder, { #[inline] fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { write!( fmt, "{} Bit<{}, {}>", M::RENDER, any::type_name::(), any::type_name::(), )?; Pointer::fmt(self, fmt) } } impl Pointer for BitPtr where M: Mutability, T: BitStore, O: BitOrder, { #[inline] fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { fmt.debug_tuple("") .field(&self.get_addr().fmt_pointer()) .field(&self.bit.fmt_binary()) .finish() } } #[cfg(not(tarpaulin_include))] impl Hash for BitPtr where M: Mutability, T: BitStore, O: BitOrder, { #[inline] fn hash(&self, state: &mut H) where H: Hasher { self.get_addr().hash(state); self.bit.hash(state); } } impl Copy for BitPtr where M: Mutability, T: BitStore, O: BitOrder, { } /// Errors produced by invalid bit-pointer components. #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] pub enum BitPtrError where T: BitStore { /// Attempted to construct a bit-pointer with the null element address. Null(NullPtrError), /// Attempted to construct a bit-pointer with an address not aligned for the /// element type. Misaligned(MisalignError), } #[cfg(not(tarpaulin_include))] impl From> for BitPtrError where T: BitStore { #[inline] fn from(err: MisalignError) -> Self { Self::Misaligned(err) } } #[cfg(not(tarpaulin_include))] impl From for BitPtrError where T: BitStore { #[inline] fn from(err: NullPtrError) -> Self { Self::Null(err) } } #[cfg(not(tarpaulin_include))] impl Display for BitPtrError where T: BitStore { #[inline] fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { match self { Self::Null(err) => Display::fmt(err, fmt), Self::Misaligned(err) => Display::fmt(err, fmt), } } } #[cfg(feature = "std")] impl std::error::Error for BitPtrError where T: BitStore {} bitvec-1.0.1/src/ptr/span.rs000064400000000000000000000553611046102023000140140ustar 00000000000000#![doc = include_str!("../../doc/ptr/span.md")] use core::{ any, fmt::{ self, Binary, Debug, Display, Formatter, Pointer, }, marker::PhantomData, mem, ptr::{ self, NonNull, }, }; use tap::Pipe; use wyz::{ comu::{ Address, Const, Mut, Mutability, NullPtrError, Reference, Referential, }, fmt::FmtForward, }; use super::{ BitPtr, BitPtrError, BitPtrRange, MisalignError, }; use crate::{ index::{ BitEnd, BitIdx, }, mem::{ bits_of, BitRegister, }, order::{ BitOrder, Lsb0, }, slice::BitSlice, store::BitStore, }; #[doc = include_str!("../../doc/ptr/BitSpan.md")] pub(crate) struct BitSpan where M: Mutability, T: BitStore, O: BitOrder, { /// The element address in which the base bit lives. ptr: NonNull<()>, /// The length of the span, in bits. This must be typed as `()` because it /// cannot be directly dereferenced, and will not have valid values for /// `NonNull`. len: usize, /// The bit-ordering within elements used to translate indices to real bits. _or: PhantomData, /// This is functionally an element-slice pointer. _ty: PhantomData>, } impl BitSpan where M: Mutability, T: BitStore, O: BitOrder, { /// The canonical empty span. This always uses the dangling address for `T`. pub(crate) const EMPTY: Self = Self { ptr: NonNull::::dangling().cast::<()>(), len: 0, _or: PhantomData, _ty: PhantomData, }; /// The number of least-significant bits in `.len` needed to hold the low /// bits of the head `BitIdx` cursor. /// /// This is always 3 until Rust adds a target architecture whose bytes are /// not 8 bits. pub(crate) const LEN_HEAD_BITS: usize = 3; /// Marks the bits of `.len` that store some of the `.head()` logical field. pub(crate) const LEN_HEAD_MASK: usize = 0b111; /// Marks the bits of `.ptr` that store the `.addr()` logical field. pub(crate) const PTR_ADDR_MASK: usize = !0 << Self::PTR_HEAD_BITS; /// The number of least-significant bits in `.ptr` needed to hold the high /// bits of the head `BitIdx` cursor. pub(crate) const PTR_HEAD_BITS: usize = ::INDX as usize - Self::LEN_HEAD_BITS; /// Marks the bits of `.ptr` that store some of the `.head()` logical field. pub(crate) const PTR_HEAD_MASK: usize = !Self::PTR_ADDR_MASK; /// The inclusive-maximum number of bits that a `BitSpan` can cover. This /// value is therefore one higher than the maximum *index* that can be used /// to select a bit within a span. pub(crate) const REGION_MAX_BITS: usize = !0 >> Self::LEN_HEAD_BITS; /// The inclusive-maximum number of memory elements that a bit-span can /// cover. /// /// This is the number of elements required to store `REGION_MAX_BITS` bits, /// plus one because a region could begin away from the zeroth bit and thus /// continue into the next element at the end. /// /// Since the region is ⅛th the domain of a `usize` counter already, this /// number is guaranteed to be well below the limits of both arithmetic and /// Rust’s own ceiling constraints on memory region descriptors. pub(crate) const REGION_MAX_ELTS: usize = crate::mem::elts::(Self::REGION_MAX_BITS) + 1; } /// Constructors. impl BitSpan where M: Mutability, T: BitStore, O: BitOrder, { /// Constructs an empty `BitSpan` at an allocated address. /// /// This is used when the region has no contents, but the pointer /// information must be retained and cannot be canonicalized. /// /// ## Parameters /// /// - `addr`: Some address of a `T` allocation. It must be valid in the /// caller’s memory regime. /// /// ## Returns /// /// A zero-length `BitSpan` based at `addr`. #[cfg(feature = "alloc")] pub(crate) fn uninhabited(addr: Address) -> Self { Self { ptr: addr.into_inner().cast::<()>(), ..Self::EMPTY } } /// Creates a new bit-span from its logical components. /// /// ## Parameters /// /// - `addr`: The base address of the memory region in which the bit-span /// resides. /// - `head`: The index of the initial bit within `*addr`. /// - `bits`: The number of bits contained in the bit-span. /// /// ## Returns /// /// This fails in the following conditions: /// /// - `bits` is greater than `REGION_MAX_BITS` /// - `addr` is not aligned to `T`. /// - `addr + elts(bits)` wraps around the address space /// /// The `Address` type already enforces the non-null requirement. pub(crate) fn new( addr: Address, head: BitIdx, bits: usize, ) -> Result> { if bits > Self::REGION_MAX_BITS { return Err(BitSpanError::TooLong(bits)); } let base = BitPtr::::new(addr, head)?; let last = base.wrapping_add(bits); if last < base { return Err(BitSpanError::TooHigh(addr.to_const())); } Ok(unsafe { Self::new_unchecked(addr, head, bits) }) } /// Creates a new bit-span from its components, without any validity checks. /// /// ## Safety /// /// The caller must ensure that the arguments satisfy all the requirements /// outlined in [`::new()`]. The easiest way to ensure this is to only use /// this function to construct bit-spans from values extracted from /// bit-spans previously constructed through `::new()`. /// /// This function **only** performs the value encoding. Invalid lengths will /// truncate, and invalid addresses may cause memory unsafety. /// /// [`::new()`]: Self::new pub(crate) unsafe fn new_unchecked( addr: Address, head: BitIdx, bits: usize, ) -> Self { let addr = addr.to_const().cast::(); let head = head.into_inner() as usize; let ptr_data = addr as usize & Self::PTR_ADDR_MASK; let ptr_head = head >> Self::LEN_HEAD_BITS; let len_head = head & Self::LEN_HEAD_MASK; let len_bits = bits << Self::LEN_HEAD_BITS; /* See . * This attempts to retain inbound provenance information and may help * Miri better understand pointer operations this module performs. * * This performs `a + (p - a)` in `addr`’s provenance zone, which is * numerically equivalent to `p` but does not require conjuring a new, * uninformed, pointer value. */ let ptr_raw = ptr_data | ptr_head; let ptr = addr.wrapping_add(ptr_raw.wrapping_sub(addr as usize)); Self { ptr: NonNull::new_unchecked(ptr.cast::<()>() as *mut ()), len: len_bits | len_head, ..Self::EMPTY } } } /// Encoded fields. impl BitSpan where M: Mutability, T: BitStore, O: BitOrder, { /// Gets the base element address of the referent region. /// /// # Parameters /// /// - `&self` /// /// # Returns /// /// The address of the starting element of the memory region. This address /// is weakly typed so that it can be cast by call sites to the most useful /// access type. pub(crate) fn address(&self) -> Address { Address::new(unsafe { NonNull::new_unchecked( (self.ptr.as_ptr() as usize & Self::PTR_ADDR_MASK) as *mut T, ) }) } /// Overwrites the data pointer with a new address. This method does not /// perform safety checks on the new pointer. /// /// # Parameters /// /// - `&mut self` /// - `ptr`: The new address of the `BitSpan`’s domain. /// /// # Safety /// /// None. The invariants of [`::new`] must be checked at the caller. /// /// [`::new`]: Self::new #[cfg(feature = "alloc")] pub(crate) unsafe fn set_address(&mut self, addr: Address) { let mut addr_value = addr.to_const() as usize; addr_value &= Self::PTR_ADDR_MASK; addr_value |= self.ptr.as_ptr() as usize & Self::PTR_HEAD_MASK; self.ptr = NonNull::new_unchecked(addr_value as *mut ()) } /// Gets the starting bit index of the referent region. /// /// # Parameters /// /// - `&self` /// /// # Returns /// /// A [`BitIdx`] of the first live bit in the element at the /// [`self.address()`] address. /// /// [`BitIdx`]: crate::index::BitIdx /// [`self.address()`]: Self::address pub(crate) fn head(&self) -> BitIdx { let ptr = self.ptr.as_ptr() as usize; let ptr_head = (ptr & Self::PTR_HEAD_MASK) << Self::LEN_HEAD_BITS; let len_head = self.len & Self::LEN_HEAD_MASK; unsafe { BitIdx::new_unchecked((ptr_head | len_head) as u8) } } /// Writes a new `head` value into the pointer, with no other effects. /// /// # Parameters /// /// - `&mut self` /// - `head`: A new starting index. /// /// # Effects /// /// `head` is written into the `.head` logical field, without affecting /// `.addr` or `.bits`. #[cfg(feature = "alloc")] pub(crate) unsafe fn set_head(&mut self, head: BitIdx) { let head = head.into_inner() as usize; let mut ptr = self.ptr.as_ptr() as usize; ptr &= Self::PTR_ADDR_MASK; ptr |= head >> Self::LEN_HEAD_BITS; self.ptr = NonNull::new_unchecked(ptr as *mut ()); self.len &= !Self::LEN_HEAD_MASK; self.len |= head & Self::LEN_HEAD_MASK; } /// Gets the number of live bits in the described region. /// /// # Parameters /// /// - `&self` /// /// # Returns /// /// A count of how many live bits the region pointer describes. pub(crate) fn len(&self) -> usize { self.len >> Self::LEN_HEAD_BITS } /// Sets the `.bits` logical member to a new value. /// /// # Parameters /// /// - `&mut self` /// - `len`: A new bit length. This must not be greater than /// [`REGION_MAX_BITS`]. /// /// # Effects /// /// The `new_len` value is written directly into the `.bits` logical field. /// /// [`REGION_MAX_BITS`]: Self::REGION_MAX_BITS pub(crate) unsafe fn set_len(&mut self, new_len: usize) { if cfg!(debug_assertions) { *self = Self::new(self.address(), self.head(), new_len).unwrap(); } else { self.len &= Self::LEN_HEAD_MASK; self.len |= new_len << Self::LEN_HEAD_BITS; } } /// Gets the three logical components of the pointer. /// /// The encoding is not public API, and direct field access is never /// supported. /// /// # Parameters /// /// - `&self` /// /// # Returns /// /// - `.0`: The base address of the referent memory region. /// - `.1`: The index of the first live bit in the first element of the /// region. /// - `.2`: The number of live bits in the region. pub(crate) fn raw_parts(&self) -> (Address, BitIdx, usize) { (self.address(), self.head(), self.len()) } } /// Virtual fields. impl BitSpan where M: Mutability, T: BitStore, O: BitOrder, { /// Computes the number of elements, starting at [`self.address()`], that /// the region touches. /// /// # Parameters /// /// - `&self` /// /// # Returns /// /// The count of all elements, starting at [`self.address()`], that contain /// live bits included in the referent region. /// /// [`self.address()`]: Self::address pub(crate) fn elements(&self) -> usize { crate::mem::elts::(self.len() + self.head().into_inner() as usize) } /// Computes the tail index for the first dead bit after the live bits. /// /// # Parameters /// /// - `&self` /// /// # Returns /// /// A `BitEnd` that is the index of the first dead bit after the last live /// bit in the last element. This will almost always be in the range `1 ..= /// T::Mem::BITS`. /// /// It will be zero only when `self` is empty. pub(crate) fn tail(&self) -> BitEnd { let (head, len) = (self.head(), self.len()); let (_, tail) = head.span(len); tail } } /// Conversions. impl BitSpan where M: Mutability, T: BitStore, O: BitOrder, { /// Casts the span to another element type. /// /// This does not alter the encoded value of the pointer! It only /// reinterprets the element type, and the encoded value may shift /// significantly in the result type. Use with caution. pub(crate) fn cast(self) -> BitSpan where U: BitStore { let Self { ptr, len, .. } = self; BitSpan { ptr, len, ..BitSpan::EMPTY } } /// Reäligns a bit-span to a different base memory type. /// /// ## Original /// /// [`slice::align_to`](https://doc.rust-lang.org/std/primitive.slice.html#method.align_to) /// /// ## Safety /// /// `U` must have the same type family as `T`. It is illegal to use this /// method to cast away alias safeties such as an atomic or `Cell` wrapper. pub(crate) unsafe fn align_to(self) -> (Self, BitSpan, Self) where U: BitStore { /* This function body implements the algorithm locally, rather than * delegating to the standard library’s `<[T]>::align_to::` * function, because that requires use of memory references, and * `BitSpan` does not require that its values be valid for * dereference. */ let this = self.to_bitptr(); // Counter for how many bits remain in the span. let mut rem = self.len(); // The *byte* alignment of `U`. let align = mem::align_of::(); // 1. Get the number of bits between `self.head()` and the start of a // `[U]` region. let step = this.align_offset(align); // If this count is more than the available bits, quit. if step > rem { return (self, BitSpan::EMPTY, Self::EMPTY); } let left = this.span_unchecked(step); rem -= step; let mid_base = this.add(step).address().cast::().pipe(|addr| { BitPtr::::new_unchecked(addr, BitIdx::MIN) }); let mid_elts = rem >> ::INDX; let excess = rem & ::MASK as usize; let step = rem - excess; let mid = mid_base.span_unchecked(step); let right_base = mid_base.address().add(mid_elts).cast::().pipe(|addr| { BitPtr::::new_unchecked(addr, BitIdx::MIN) }); let right = right_base.span_unchecked(excess); (left, mid, right) } /// Casts a mutable bit-slice pointer into its structural representation. pub(crate) fn from_bitslice_ptr_mut(raw: *mut BitSlice) -> Self { let BitSpan { ptr, len, .. } = BitSpan::from_bitslice_ptr(raw as *const BitSlice); Self { ptr, len, ..Self::EMPTY } } /// Converts the span descriptor into a raw `BitSlice` pointer. /// /// This is a noöp. pub(crate) fn into_bitslice_ptr(self) -> *const BitSlice { let Self { ptr, len, .. } = self; ptr::slice_from_raw_parts(ptr.as_ptr(), len) as *const BitSlice } /// Converts the span descriptor into a shared `BitSlice` reference. /// /// This is a noöp. /// /// ## Safety /// /// The span must describe memory that is safe to dereference, and to which /// no `&mut BitSlice` references exist. pub(crate) unsafe fn into_bitslice_ref<'a>(self) -> &'a BitSlice { &*self.into_bitslice_ptr() } /// Produces a bit-pointer to the start of the span. /// /// This is **not** a noöp: the base address and starting bit index are /// decoded into the bit-pointer structure. pub(crate) fn to_bitptr(self) -> BitPtr { unsafe { BitPtr::new_unchecked(self.address(), self.head()) } } /// Produces a bit-pointer range to either end of the span. /// /// This is **not** a noöp: all three logical fields are decoded in order to /// construct the range. pub(crate) fn to_bitptr_range(self) -> BitPtrRange { let start = self.to_bitptr(); let end = unsafe { start.add(self.len()) }; BitPtrRange { start, end } } /// Converts the span descriptor into an `Address<>` generic pointer. /// /// This is a noöp. pub(crate) fn to_bitslice_addr(self) -> Address> { (self.into_bitslice_ptr() as *mut BitSlice) .pipe(|ptr| unsafe { NonNull::new_unchecked(ptr) }) .pipe(Address::new) } /// Converts the span descriptor into a `Reference<>` generic handle. /// /// This is a noöp. pub(crate) fn to_bitslice<'a>(self) -> Reference<'a, M, BitSlice> where Address>: Referential<'a> { unsafe { self.to_bitslice_addr().to_ref() } } } /// Conversions. impl BitSpan where T: BitStore, O: BitOrder, { /// Creates a `Const` span descriptor from a `const` bit-slice pointer. pub(crate) fn from_bitslice_ptr(raw: *const BitSlice) -> Self { let slice_nn = match NonNull::new(raw as *const [()] as *mut [()]) { Some(nn) => nn, None => return Self::EMPTY, }; let ptr = slice_nn.cast::<()>(); let len = unsafe { slice_nn.as_ref() }.len(); Self { ptr, len, ..Self::EMPTY } } } /// Conversions. impl BitSpan where T: BitStore, O: BitOrder, { /// Converts the span descriptor into a raw mutable `BitSlice` pointer. /// /// This is a noöp. pub(crate) fn into_bitslice_ptr_mut(self) -> *mut BitSlice { self.into_bitslice_ptr() as *mut BitSlice } /// Converts the span descriptor into an exclusive `BitSlice` reference. /// /// This is a noöp. /// /// ## Safety /// /// The span must describe memory that is safe to dereference. In addition, /// no other `BitSlice` reference of any kind (`&` or `&mut`) may exist. pub(crate) unsafe fn into_bitslice_mut<'a>(self) -> &'a mut BitSlice { &mut *self.into_bitslice_ptr_mut() } } /// Utilities. impl BitSpan where M: Mutability, T: BitStore, O: BitOrder, { /// Checks if a requested length can be encoded into the `BitSpan`. /// /// This is `len <= Self::REGION_MAX_BITS`. #[cfg(feature = "alloc")] pub(crate) fn len_encodable(len: usize) -> bool { len <= Self::REGION_MAX_BITS } /// Renders the pointer structure into a formatter for use during /// higher-level type [`Debug`] implementations. /// /// # Parameters /// /// - `&self` /// - `fmt`: The formatter into which the pointer is rendered. /// - `name`: The suffix of the structure rendering its pointer. The `Bit` /// prefix is applied to the object type name in this format. /// - `fields`: Any additional fields in the object’s debug info to be /// rendered. /// /// # Returns /// /// The result of formatting the pointer into the receiver. /// /// # Behavior /// /// This function writes `Bit{name}<{ord}, {type}> {{ {fields } }}` into the /// `fmt` formatter, where `{fields}` includes the address, head index, and /// bit length of the pointer, as well as any additional fields provided by /// the caller. /// /// Higher types in the crate should use this function to drive their /// [`Debug`] implementations, and then use [`BitSlice`]’s list formatters /// to display their buffer contents. /// /// [`BitSlice`]: crate::slice::BitSlice /// [`Debug`]: core::fmt::Debug pub(crate) fn render<'a>( &'a self, fmt: &'a mut Formatter, name: &'a str, fields: impl IntoIterator, ) -> fmt::Result { write!( fmt, "Bit{}<{}, {}>", name, any::type_name::(), any::type_name::(), )?; let mut builder = fmt.debug_struct(""); builder .field("addr", &self.address().fmt_pointer()) .field("head", &self.head().fmt_binary()) .field("bits", &self.len()); for (name, value) in fields { builder.field(name, value); } builder.finish() } } #[cfg(not(tarpaulin_include))] impl Clone for BitSpan where M: Mutability, T: BitStore, O: BitOrder, { #[inline] fn clone(&self) -> Self { *self } } impl PartialEq> for BitSpan where M1: Mutability, M2: Mutability, O: BitOrder, T1: BitStore, T2: BitStore, { #[inline] fn eq(&self, other: &BitSpan) -> bool { let (addr_a, head_a, bits_a) = self.raw_parts(); let (addr_b, head_b, bits_b) = other.raw_parts(); bits_of::() == bits_of::() && addr_a.to_const() as usize == addr_b.to_const() as usize && head_a.into_inner() == head_b.into_inner() && bits_a == bits_b } } impl From<&BitSlice> for BitSpan where T: BitStore, O: BitOrder, { #[inline] fn from(bits: &BitSlice) -> Self { Self::from_bitslice_ptr(bits) } } impl From<&mut BitSlice> for BitSpan where T: BitStore, O: BitOrder, { #[inline] fn from(bits: &mut BitSlice) -> Self { Self::from_bitslice_ptr_mut(bits) } } #[cfg(not(tarpaulin_include))] impl Default for BitSpan where M: Mutability, T: BitStore, O: BitOrder, { #[inline] fn default() -> Self { Self::EMPTY } } impl Debug for BitSpan where M: Mutability, T: BitStore, O: BitOrder, { #[inline] fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { self.render(fmt, "Span", None) } } impl Pointer for BitSpan where M: Mutability, T: BitStore, O: BitOrder, { #[inline] fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { Pointer::fmt(&self.address(), fmt)?; fmt.write_str("(")?; Binary::fmt(&self.head(), fmt)?; fmt.write_str(")[")?; Display::fmt(&self.len(), fmt)?; fmt.write_str("]") } } impl Copy for BitSpan where M: Mutability, T: BitStore, O: BitOrder, { } /// An error produced when creating `BitSpan` encoded references. #[derive(Clone, Copy, Eq, Hash, Ord, PartialEq, PartialOrd)] pub enum BitSpanError where T: BitStore { /// A null pointer was provided. Null(NullPtrError), /// The base element pointer is not aligned. Misaligned(MisalignError), /// The requested length exceeds the `BitSpan` length ceiling. TooLong(usize), /// The requested address is too high, and wraps to zero. TooHigh(*const T), } #[cfg(not(tarpaulin_include))] impl From> for BitSpanError where T: BitStore { #[inline] fn from(err: BitPtrError) -> Self { match err { BitPtrError::Null(err) => Self::Null(err), BitPtrError::Misaligned(err) => Self::Misaligned(err), } } } #[cfg(not(tarpaulin_include))] impl From> for BitSpanError where T: BitStore { #[inline] fn from(err: MisalignError) -> Self { Self::Misaligned(err) } } #[cfg(not(tarpaulin_include))] impl Debug for BitSpanError where T: BitStore { #[inline] fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { write!(fmt, "BitSpanError<{}>::", any::type_name::())?; match self { Self::Null(err) => fmt.debug_tuple("Null").field(&err).finish(), Self::Misaligned(err) => { fmt.debug_tuple("Misaligned").field(&err).finish() }, Self::TooLong(len) => fmt.debug_tuple("TooLong").field(len).finish(), Self::TooHigh(addr) => { fmt.debug_tuple("TooHigh").field(addr).finish() }, } } } #[cfg(not(tarpaulin_include))] impl Display for BitSpanError where T: BitStore { #[inline] fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { match self { Self::Null(err) => Display::fmt(err, fmt), Self::Misaligned(err) => Display::fmt(err, fmt), Self::TooLong(len) => write!( fmt, "Length {} is too long to encode in a bit-slice, which can \ only accept {} bits", len, BitSpan::::REGION_MAX_BITS, ), Self::TooHigh(addr) => write!( fmt, "Address {:p} is too high, and produces a span that wraps \ around to the zero address.", addr, ), } } } unsafe impl Send for BitSpanError where T: BitStore {} unsafe impl Sync for BitSpanError where T: BitStore {} #[cfg(feature = "std")] impl std::error::Error for BitSpanError where T: BitStore {} bitvec-1.0.1/src/ptr/tests.rs000064400000000000000000000074151046102023000142120ustar 00000000000000//! Unit tests for bit-pointers. #![cfg(test)] use core::cmp; use crate::{ index::BitIdx, prelude::*, ptr::{ self as bv_ptr, AddressExt, BitSpan, BitSpanError, Mut, }, }; #[test] fn free_functions() { let mut a = [0u8, !0]; let mut b = 255u16; let one = BitPtr::::from_slice_mut(&mut a[..]); let two = one.wrapping_add(8); let three = BitPtr::::from_mut(&mut b); let four = three.wrapping_add(8); unsafe { bv_ptr::copy(two.to_const(), one, 8); } assert_eq!(a[0], !0); unsafe { bv_ptr::copy(three.to_const(), one, 8); } assert_eq!(a[0], 0); assert!(!bv_ptr::eq(two.to_const(), one.to_const())); unsafe { bv_ptr::swap_nonoverlapping(two, three, 8); } assert_eq!(a[1], 0); assert_eq!(b, !0); unsafe { bv_ptr::write_bits(four, false, 8); } assert_eq!(b, 0xFF00); } #[test] fn alignment() { let data = 0u16; let a = unsafe { (&data).into_address() }; let b = a.cast::().wrapping_add(1).cast::(); assert!(bv_ptr::check_alignment(a).is_ok()); assert!(bv_ptr::check_alignment(b).is_err()); } #[test] fn proxy() { let mut data = 0u8; { let bits = data.view_bits_mut::(); let (mut a, rest) = bits.split_first_mut().unwrap(); let (mut b, _) = rest.split_first_mut().unwrap(); assert!(!a.replace(true)); a.swap(&mut b); assert!(*b); a.set(true); } assert_eq!(data, 3); } #[test] fn range() { let data = 0u8; let mut bpr = data.view_bits::().as_bitptr_range(); let range = bpr.clone().into_range(); let bpr2 = range.into(); assert_eq!(bpr, bpr2); assert!(bpr.nth_back(9).is_none()); } #[test] #[allow(deprecated)] fn single() { let mut data = 1u16; let bp = data.view_bits_mut::().as_mut_bitptr(); assert!(!bp.is_null()); let bp2 = bp.wrapping_add(9); assert_ne!(bp2.pointer().cast::(), bp2.cast::().pointer()); assert!(unsafe { bp.read_volatile() }); assert!(unsafe { bp.read_unaligned() }); assert_eq!(bp.align_offset(2), 0); assert_eq!(bp2.align_offset(2), 7); unsafe { bp.write_volatile(false); bp.swap(bp2); bp2.write_unaligned(true); } assert_eq!(bp.cmp(&bp2), cmp::Ordering::Less); assert_ne!(bp, bp.cast::()); assert!(bp.partial_cmp(&bp.cast::()).is_none()); } #[test] fn span() { let mut data = [0u32; 2]; let addr = unsafe { data.as_mut_ptr().into_address() }; let too_long = BitSpan::::REGION_MAX_BITS + 1; assert!(matches!( BitSpan::<_, _, Lsb0>::new(addr, BitIdx::MIN, too_long), Err(BitSpanError::TooLong(ct)) if ct == too_long)); let bp = data.view_bits_mut::().as_mut_bitptr(); let bs = bp.cast::().wrapping_add(8).span(32).unwrap(); let (l, c, r) = unsafe { bs.align_to::() }; assert_eq!(l.len(), 8); assert_eq!(c.len(), 16); assert_eq!(r.len(), 8); let bs2 = bp.cast::().wrapping_add(3).span(3).unwrap(); assert_eq!( unsafe { bs2.align_to::() }, (bs2, BitSpan::EMPTY, BitSpan::EMPTY) ); } #[test] #[cfg(feature = "alloc")] fn format() { #[cfg(not(feature = "std"))] use alloc::format; use core::any; let data = 1u8; let bits = data.view_bits::(); let bit = bits.first().unwrap(); let render = format!("{:?}", bit); assert!(render.starts_with("BitRef {{ addr: {:p}, head: 010, bits: 3 }}", any::type_name::(), bitspan.address(), ); assert_eq!(render, expected); let render = format!("{:p}", bitspan); let expected = format!("{:p}(010)[3]", bitspan.address()); assert_eq!(render, expected); } bitvec-1.0.1/src/ptr.rs000064400000000000000000000147761046102023000130600ustar 00000000000000#![doc = include_str!("../doc/ptr.md")] use core::hash::{ Hash, Hasher, }; use wyz::bidi::BidiIterator; use crate::{ devel as dvl, order::BitOrder, slice::BitSlice, store::BitStore, }; mod addr; mod proxy; mod range; mod single; mod span; mod tests; pub use wyz::comu::{ Const, Mut, Mutability, }; pub(crate) use self::{ addr::AddressExt, span::BitSpan, }; pub use self::{ addr::{ check_alignment, MisalignError, }, proxy::BitRef, range::BitPtrRange, single::{ BitPtr, BitPtrError, }, span::BitSpanError, }; #[inline] #[doc = include_str!("../doc/ptr/copy.md")] pub unsafe fn copy( src: BitPtr, dst: BitPtr, count: usize, ) where O1: BitOrder, O2: BitOrder, T1: BitStore, T2: BitStore, { // Overlap is only defined if the orderings are identical. if dvl::match_order::() { let (addr, head) = dst.raw_parts(); let dst = BitPtr::::new_unchecked(addr, head); let src_pair = src.range(count); let rev = src_pair.contains(&dst); for (from, to) in src_pair.zip(dst.range(count)).bidi(rev) { to.write(from.read()); } } else { copy_nonoverlapping(src, dst, count); } } #[inline] #[doc = include_str!("../doc/ptr/copy_nonoverlapping.md")] pub unsafe fn copy_nonoverlapping( src: BitPtr, dst: BitPtr, count: usize, ) where O1: BitOrder, O2: BitOrder, T1: BitStore, T2: BitStore, { for (from, to) in src.range(count).zip(dst.range(count)) { to.write(from.read()); } } #[inline] #[doc = include_str!("../doc/ptr/drop_in_place.md")] #[deprecated = "this has no effect, and should not be called"] pub unsafe fn drop_in_place(_: BitPtr) where T: BitStore, O: BitOrder, { } #[doc = include_str!("../doc/ptr/eq.md")] #[inline] pub fn eq( this: BitPtr, that: BitPtr, ) -> bool where T1: BitStore, T2: BitStore, O: BitOrder, { this == that } #[inline] #[cfg(not(tarpaulin_include))] #[doc = include_str!("../doc/ptr/hash.md")] pub fn hash(ptr: BitPtr, into: &mut S) where T: BitStore, O: BitOrder, S: Hasher, { ptr.hash(into); } #[inline] #[cfg(not(tarpaulin_include))] #[doc = include_str!("../doc/ptr/null.md")] pub fn null() -> BitPtr where T: BitStore, O: BitOrder, { BitPtr::DANGLING } #[inline] #[cfg(not(tarpaulin_include))] #[doc = include_str!("../doc/ptr/null_mut.md")] pub fn null_mut() -> BitPtr where T: BitStore, O: BitOrder, { BitPtr::DANGLING } #[inline] #[cfg(not(tarpaulin_include))] #[doc = include_str!("../doc/ptr/read.md")] pub unsafe fn read(src: BitPtr) -> bool where T: BitStore, O: BitOrder, { src.read() } #[inline] #[allow(deprecated)] #[cfg(not(tarpaulin_include))] #[doc = include_str!("../doc/ptr/read_unaligned.md")] #[deprecated = "`BitPtr` does not have unaligned addresses"] pub unsafe fn read_unaligned(src: BitPtr) -> bool where T: BitStore, O: BitOrder, { src.read_unaligned() } #[inline] #[cfg(not(tarpaulin_include))] #[doc = include_str!("../doc/ptr/read_volatile.md")] pub unsafe fn read_volatile(src: BitPtr) -> bool where T: BitStore, O: BitOrder, { src.read_volatile() } #[inline] #[cfg(not(tarpaulin_include))] #[doc = include_str!("../doc/ptr/replace.md")] pub unsafe fn replace(dst: BitPtr, src: bool) -> bool where T: BitStore, O: BitOrder, { dst.replace(src) } #[inline] #[cfg(not(tarpaulin_include))] #[doc = include_str!("../doc/ptr/slice_from_raw_parts.md")] pub fn slice_from_raw_parts( ptr: BitPtr, len: usize, ) -> *const BitSlice where T: BitStore, O: BitOrder, { bitslice_from_raw_parts(ptr, len) } #[inline] #[cfg(not(tarpaulin_include))] #[doc = include_str!("../doc/ptr/slice_from_raw_parts_mut.md")] pub fn slice_from_raw_parts_mut( ptr: BitPtr, len: usize, ) -> *mut BitSlice where T: BitStore, O: BitOrder, { bitslice_from_raw_parts_mut(ptr, len) } #[inline] #[doc = include_str!("../doc/ptr/swap.md")] pub unsafe fn swap( one: BitPtr, two: BitPtr, ) where T1: BitStore, T2: BitStore, O1: BitOrder, O2: BitOrder, { one.write(two.replace(one.read())); } #[inline] #[doc = include_str!("../doc/ptr/swap_nonoverlapping.md")] pub unsafe fn swap_nonoverlapping( mut one: BitPtr, mut two: BitPtr, count: usize, ) where O1: BitOrder, O2: BitOrder, T1: BitStore, T2: BitStore, { // Note: compare codegen with `one.range(count).zip(two.range(count))`. for _ in 0 .. count { swap(one, two); one = one.add(1); two = two.add(1); } } #[inline] #[cfg(not(tarpaulin_include))] #[doc = include_str!("../doc/ptr/write.md")] pub unsafe fn write(dst: BitPtr, value: bool) where T: BitStore, O: BitOrder, { dst.write(value); } #[inline] #[cfg(not(tarpaulin_include))] #[deprecated = "use `write_bits()` instead"] #[doc = include_str!("../doc/ptr/write_bytes.md")] pub unsafe fn write_bytes( dst: BitPtr, value: bool, count: usize, ) where T: BitStore, O: BitOrder, { write_bits(dst, value, count) } #[inline] #[allow(deprecated)] #[cfg(not(tarpaulin_include))] #[doc = include_str!("../doc/ptr/write_unaligned.md")] #[deprecated = "`BitPtr` does not have unaligned addresses"] pub unsafe fn write_unaligned(dst: BitPtr, value: bool) where T: BitStore, O: BitOrder, { dst.write_unaligned(value); } #[inline] #[cfg(not(tarpaulin_include))] #[doc = include_str!("../doc/ptr/write_volatile.md")] pub unsafe fn write_volatile(dst: BitPtr, value: bool) where T: BitStore, O: BitOrder, { dst.write_volatile(value); } // Renamed variants. #[inline] #[cfg(not(tarpaulin_include))] #[doc = include_str!("../doc/ptr/bitslice_from_raw_parts.md")] pub fn bitslice_from_raw_parts( ptr: BitPtr, len: usize, ) -> *const BitSlice where T: BitStore, O: BitOrder, { ptr.span(len).unwrap().into_bitslice_ptr() } #[inline] #[cfg(not(tarpaulin_include))] #[doc = include_str!("../doc/ptr/bitslice_from_raw_parts_mut.md")] pub fn bitslice_from_raw_parts_mut( ptr: BitPtr, len: usize, ) -> *mut BitSlice where T: BitStore, O: BitOrder, { ptr.span(len).unwrap().into_bitslice_ptr_mut() } #[inline] #[doc = include_str!("../doc/ptr/write_bits.md")] pub unsafe fn write_bits(dst: BitPtr, value: bool, count: usize) where T: BitStore, O: BitOrder, { for bit in dst.range(count) { bit.write(value); } } bitvec-1.0.1/src/serdes/array.rs000064400000000000000000000242331046102023000146430ustar 00000000000000#![doc=include_str!("../../doc/serdes/array.md")] use core::{ any, fmt::{ self, Formatter, }, }; use serde::{ de::{ Deserialize, Deserializer, Error, MapAccess, SeqAccess, Unexpected, Visitor, }, ser::{ Serialize, SerializeStruct, Serializer, }, }; use super::{ utils::{ Array, TypeName, }, Field, FIELDS, }; use crate::{ array::BitArray, index::BitIdx, mem::bits_of, order::BitOrder, store::BitStore, }; impl Serialize for BitArray where T: BitStore, O: BitOrder, T::Mem: Serialize, { #[inline] fn serialize(&self, serializer: S) -> super::Result where S: Serializer { let mut state = serializer.serialize_struct("BitArr", FIELDS.len())?; state.serialize_field("order", &any::type_name::())?; state.serialize_field("head", &BitIdx::::MIN)?; state.serialize_field("bits", &(self.len() as u64))?; state.serialize_field( "data", Array::from_ref(core::array::from_ref(&self.data)), )?; state.end() } } impl Serialize for BitArray<[T; N], O> where T: BitStore, O: BitOrder, T::Mem: Serialize, { #[inline] fn serialize(&self, serializer: S) -> super::Result where S: Serializer { let mut state = serializer.serialize_struct("BitArr", FIELDS.len())?; state.serialize_field("order", &any::type_name::())?; state.serialize_field("head", &BitIdx::::MIN)?; state.serialize_field("bits", &(self.len() as u64))?; state.serialize_field("data", Array::from_ref(&self.data))?; state.end() } } impl<'de, T, O> Deserialize<'de> for BitArray where T: BitStore, O: BitOrder, T::Mem: Deserialize<'de>, { #[inline] fn deserialize(deserializer: D) -> Result where D: Deserializer<'de> { deserializer .deserialize_struct("BitArr", FIELDS, BitArrVisitor::::THIS) .map(|BitArray { data: [elem], .. }| BitArray::new(elem)) } } impl<'de, T, O, const N: usize> Deserialize<'de> for BitArray<[T; N], O> where T: BitStore, O: BitOrder, T::Mem: Deserialize<'de>, { #[inline] fn deserialize(deserializer: D) -> Result where D: Deserializer<'de> { deserializer.deserialize_struct( "BitArr", FIELDS, BitArrVisitor::::THIS, ) } } /// Assists in deserialization of a static `BitArr`. struct BitArrVisitor where T: BitStore, O: BitOrder, { /// The deserialized bit-ordering string. order: Option>, /// The deserialized head-bit index. This must be zero; it is used for /// consistency with `BitSeq` and to carry `T::Mem` information. head: Option>, /// The deserialized bit-count. It must be `bits_of::<[T::Mem; N]>()`. bits: Option, /// The deserialized data buffer. data: Option>, } impl<'de, T, O, const N: usize> BitArrVisitor where T: BitStore, O: BitOrder, Array: Deserialize<'de>, { /// A new visitor in its ready condition. const THIS: Self = Self { order: None, head: None, bits: None, data: None, }; /// Attempts to assemble deserialized components into an output value. #[inline] fn assemble(mut self) -> Result, E> where E: Error { self.order.take().ok_or_else(|| E::missing_field("order"))?; let head = self.head.take().ok_or_else(|| E::missing_field("head"))?; let bits = self.bits.take().ok_or_else(|| E::missing_field("bits"))?; let data = self.data.take().ok_or_else(|| E::missing_field("data"))?; if head != BitIdx::MIN { return Err(E::invalid_value( Unexpected::Unsigned(head.into_inner() as u64), &"`BitArray` must have a head-bit of `0`", )); } let bits = bits as usize; if bits != bits_of::<[T; N]>() { return Err(E::invalid_length(bits, &self)); } Ok(BitArray::new(data.inner)) } } impl<'de, T, O, const N: usize> Visitor<'de> for BitArrVisitor where T: BitStore, O: BitOrder, Array: Deserialize<'de>, { type Value = BitArray<[T; N], O>; #[inline] fn expecting(&self, fmt: &mut Formatter) -> fmt::Result { write!( fmt, "a `BitArray<[u{}; {}], {}>`", bits_of::(), N, any::type_name::(), ) } #[inline] fn visit_seq(mut self, mut seq: V) -> Result where V: SeqAccess<'de> { self.order = Some( seq.next_element()? .ok_or_else(|| ::invalid_length(0, &self))?, ); self.head = Some( seq.next_element()? .ok_or_else(|| ::invalid_length(1, &self))?, ); self.bits = Some( seq.next_element()? .ok_or_else(|| ::invalid_length(2, &self))?, ); self.data = Some( seq.next_element()? .ok_or_else(|| ::invalid_length(3, &self))?, ); self.assemble() } #[inline] fn visit_map(mut self, mut map: V) -> Result where V: MapAccess<'de> { while let Some(key) = map.next_key()? { match key { Field::Order => { if self.order.replace(map.next_value()?).is_some() { return Err(::duplicate_field("order")); } }, Field::Head => { if self.head.replace(map.next_value()?).is_some() { return Err(::duplicate_field("head")); } }, Field::Bits => { if self.bits.replace(map.next_value()?).is_some() { return Err(::duplicate_field("bits")); } }, Field::Data => { if self.data.replace(map.next_value()?).is_some() { return Err(::duplicate_field("data")); } }, } } self.assemble() } } #[cfg(test)] mod tests { #[cfg(all(feature = "alloc", not(feature = "std")))] use alloc::format; use core::any; use serde_test::{ assert_de_tokens, assert_de_tokens_error, assert_ser_tokens, Token, }; use crate::prelude::*; #[test] #[cfg(feature = "std")] fn roundtrip() -> Result<(), Box> { type BA = BitArr!(for 16, in u8, Msb0); let array = [0x3Cu8, 0xA5].into_bitarray::(); let bytes = bincode::serialize(&array)?; let array2 = bincode::deserialize::(&bytes)?; assert_eq!(array, array2); let json = serde_json::to_string(&array)?; let array3 = serde_json::from_str::(&json)?; assert_eq!(array, array3); let json_value = serde_json::to_value(&array)?; let array4 = serde_json::from_value::(json_value)?; assert_eq!(array, array4); type BA2 = BitArray; let array = BA2::new(44203); let bytes = bincode::serialize(&array)?; let array2 = bincode::deserialize::(&bytes)?; assert_eq!(array, array2); let json = serde_json::to_string(&array)?; let array3 = serde_json::from_str::(&json)?; assert_eq!(array, array3); let json_value = serde_json::to_value(&array)?; let array4 = serde_json::from_value::(json_value)?; assert_eq!(array, array4); Ok(()) } #[test] fn tokens() { let array = [0x3Cu8, 0xA5].into_bitarray::(); let tokens = &mut [ Token::Struct { name: "BitArr", len: 4, }, Token::Str("order"), Token::Str(any::type_name::()), Token::Str("head"), Token::Struct { name: "BitIdx", len: 2, }, Token::Str("width"), Token::U8(8), Token::Str("index"), Token::U8(0), Token::StructEnd, Token::Str("bits"), Token::U64(16), Token::Str("data"), Token::Tuple { len: 2 }, Token::U8(0x3C), Token::U8(0xA5), Token::TupleEnd, Token::StructEnd, ]; assert_ser_tokens(&array, tokens); tokens[1 .. 4].copy_from_slice(&[ Token::BorrowedStr("order"), Token::BorrowedStr(any::type_name::()), Token::BorrowedStr("head"), ]); tokens[5] = Token::BorrowedStr("width"); tokens[7] = Token::BorrowedStr("index"); tokens[10] = Token::BorrowedStr("bits"); tokens[12] = Token::BorrowedStr("data"); assert_de_tokens(&array, tokens); } #[test] #[cfg(feature = "alloc")] fn errors() { type BA = BitArr!(for 8, in u8, Msb0); let mut tokens = vec![ Token::Seq { len: Some(4) }, Token::BorrowedStr(any::type_name::()), ]; assert_de_tokens_error::( &tokens, &format!( "invalid value: string \"{}\", expected the string \"{}\"", any::type_name::(), any::type_name::(), ), ); tokens.extend([ Token::Seq { len: Some(2) }, Token::U8(8), Token::U8(0), Token::SeqEnd, Token::U64(8), Token::Tuple { len: 1 }, Token::U8(0), Token::TupleEnd, Token::SeqEnd, ]); tokens[6] = Token::U64(7); assert_de_tokens_error::( &tokens, "invalid length 7, expected a `BitArray<[u8; 1], \ bitvec::order::Msb0>`", ); tokens[4] = Token::U8(1); assert_de_tokens_error::( &tokens, "invalid value: integer `1`, expected `BitArray` must have a \ head-bit of `0`", ); assert_de_tokens_error::( &[ Token::Struct { name: "BitArr", len: 2, }, Token::BorrowedStr("placeholder"), ], &format!( "unknown field `placeholder`, expected one of `{}`", super::FIELDS.join("`, `"), ), ); assert_de_tokens_error::( &[ Token::Struct { name: "BitArr", len: 2, }, Token::BorrowedStr("order"), Token::BorrowedStr(any::type_name::()), Token::BorrowedStr("order"), Token::BorrowedStr(any::type_name::()), Token::StructEnd, ], "duplicate field `order`", ); assert_de_tokens_error::( &[ Token::Struct { name: "BitArr", len: 2, }, Token::BorrowedStr("head"), Token::Seq { len: Some(2) }, Token::U8(8), Token::U8(0), Token::SeqEnd, Token::BorrowedStr("head"), Token::Seq { len: Some(2) }, Token::U8(8), Token::U8(0), Token::SeqEnd, Token::StructEnd, ], "duplicate field `head`", ); assert_de_tokens_error::( &[ Token::Struct { name: "BitArr", len: 2, }, Token::BorrowedStr("bits"), Token::U64(8), Token::BorrowedStr("bits"), Token::U64(8), Token::StructEnd, ], "duplicate field `bits`", ); assert_de_tokens_error::( &[ Token::Struct { name: "BitArr", len: 2, }, Token::BorrowedStr("data"), Token::Tuple { len: 1 }, Token::U8(0), Token::TupleEnd, Token::BorrowedStr("data"), Token::Tuple { len: 1 }, Token::U8(1), Token::TupleEnd, Token::StructEnd, ], "duplicate field `data`", ); } } bitvec-1.0.1/src/serdes/slice.rs000064400000000000000000000234701046102023000146260ustar 00000000000000#![doc=include_str!("../../doc/serdes/slice.md")] #[cfg(feature = "alloc")] use alloc::vec::Vec; use core::{ any, fmt::{ self, Formatter, }, marker::PhantomData, }; use serde::{ de::{ Deserialize, Deserializer, Error, MapAccess, SeqAccess, Visitor, }, ser::{ Serialize, SerializeStruct, Serializer, }, }; use wyz::comu::Const; use super::{ utils::TypeName, Field, FIELDS, }; #[cfg(feature = "alloc")] use crate::{ boxed::BitBox, vec::BitVec, }; use crate::{ index::BitIdx, mem::bits_of, order::BitOrder, ptr::{ AddressExt, BitSpan, BitSpanError, }, slice::BitSlice, store::BitStore, }; impl Serialize for BitSlice where T: BitStore, O: BitOrder, T::Mem: Serialize, { #[inline] fn serialize(&self, serializer: S) -> super::Result where S: Serializer { let head = self.as_bitspan().head(); let mut state = serializer.serialize_struct("BitSeq", FIELDS.len())?; state.serialize_field("order", &any::type_name::())?; state.serialize_field("head", &head)?; state.serialize_field("bits", &(self.len() as u64))?; state.serialize_field("data", &self.domain())?; state.end() } } #[cfg(feature = "alloc")] impl Serialize for BitBox where T: BitStore, O: BitOrder, BitSlice: Serialize, { #[inline] fn serialize(&self, serializer: S) -> super::Result where S: Serializer { self.as_bitslice().serialize(serializer) } } #[cfg(feature = "alloc")] impl Serialize for BitVec where T: BitStore, O: BitOrder, BitSlice: Serialize, { #[inline] fn serialize(&self, serializer: S) -> super::Result where S: Serializer { self.as_bitslice().serialize(serializer) } } impl<'de, O> Deserialize<'de> for &'de BitSlice where O: BitOrder { #[inline] fn deserialize(deserializer: D) -> Result where D: Deserializer<'de> { deserializer.deserialize_struct( "BitSeq", FIELDS, BitSeqVisitor::::new( |data, head, bits| unsafe { BitSpan::new(data.as_ptr().into_address(), head, bits) .map(|span| BitSpan::into_bitslice_ref(span)) }, ), ) } } #[cfg(feature = "alloc")] impl<'de, T, O> Deserialize<'de> for BitBox where T: BitStore, O: BitOrder, Vec: Deserialize<'de>, { #[inline] fn deserialize(deserializer: D) -> Result where D: Deserializer<'de> { as Deserialize<'de>>::deserialize(deserializer) .map(BitVec::into_boxed_bitslice) } } #[cfg(feature = "alloc")] impl<'de, T, O> Deserialize<'de> for BitVec where T: BitStore, O: BitOrder, Vec: Deserialize<'de>, { #[inline] fn deserialize(deserializer: D) -> Result where D: Deserializer<'de> { deserializer.deserialize_struct( "BitSeq", FIELDS, BitSeqVisitor::, Self, _>::new( |vec, head, bits| unsafe { let addr = vec.as_ptr().into_address(); let mut bv = BitVec::try_from_vec(vec).map_err(|_| { BitSpan::::new(addr, head, bits) .unwrap_err() })?; bv.set_head(head); bv.set_len(bits); Ok(bv) }, ), ) } } /// Assists in deserialization of a dynamic `BitSeq`. struct BitSeqVisitor where T: BitStore, O: BitOrder, Func: FnOnce(In, BitIdx, usize) -> Result>, { /// As well as a final output value. out: PhantomData>>, /// The deserialized bit-ordering string. order: Option>, /// The deserialized head-bit index. head: Option>, /// The deserialized bit-count. bits: Option, /// The deserialized data buffer. data: Option, /// A functor responsible for final transformation of the deserialized /// components into the output value. func: Func, } impl<'de, T, O, In, Out, Func> BitSeqVisitor where T: 'de + BitStore, O: BitOrder, In: Deserialize<'de>, Func: FnOnce(In, BitIdx, usize) -> Result>, { /// Creates a new visitor with a given transform functor. #[inline] fn new(func: Func) -> Self { Self { out: PhantomData, order: None, head: None, bits: None, data: None, func, } } /// Attempts to assemble deserialized components into an output value. #[inline] fn assemble(mut self) -> Result where E: Error { self.order.take().ok_or_else(|| E::missing_field("order"))?; let head = self.head.take().ok_or_else(|| E::missing_field("head"))?; let bits = self.bits.take().ok_or_else(|| E::missing_field("bits"))?; let data = self.data.take().ok_or_else(|| E::missing_field("data"))?; (self.func)(data, head, bits as usize).map_err(|_| todo!()) } } impl<'de, T, O, In, Out, Func> Visitor<'de> for BitSeqVisitor where T: 'de + BitStore, O: BitOrder, In: Deserialize<'de>, Func: FnOnce(In, BitIdx, usize) -> Result>, { type Value = Out; #[inline] fn expecting(&self, fmt: &mut Formatter) -> fmt::Result { write!( fmt, "a `BitSlice`", bits_of::(), any::type_name::(), ) } #[inline] fn visit_seq(mut self, mut seq: V) -> Result where V: SeqAccess<'de> { self.order = Some( seq.next_element()? .ok_or_else(|| ::invalid_length(0, &self))?, ); self.head = Some( seq.next_element()? .ok_or_else(|| ::invalid_length(1, &self))?, ); self.bits = Some( seq.next_element()? .ok_or_else(|| ::invalid_length(2, &self))?, ); self.data = Some( seq.next_element()? .ok_or_else(|| ::invalid_length(3, &self))?, ); self.assemble() } #[inline] fn visit_map(mut self, mut map: V) -> Result where V: MapAccess<'de> { while let Some(key) = map.next_key()? { match key { Field::Order => { if self.order.replace(map.next_value()?).is_some() { return Err(::duplicate_field("order")); } }, Field::Head => { if self.head.replace(map.next_value()?).is_some() { return Err(::duplicate_field("head")); } }, Field::Bits => { if self.bits.replace(map.next_value()?).is_some() { return Err(::duplicate_field("bits")); } }, Field::Data => { if self.data.replace(map.next_value()?).is_some() { return Err(::duplicate_field("data")); } }, } } self.assemble() } } #[cfg(test)] mod tests { #[cfg(all(feature = "alloc", not(feature = "std")))] use alloc::format; use core::any; use serde_test::{ assert_de_tokens, assert_de_tokens_error, assert_ser_tokens, Token, }; use crate::prelude::*; #[test] #[cfg(feature = "alloc")] fn roundtrip() -> Result<(), alloc::boxed::Box> { let bits = bits![u8, Msb0; 1, 0, 1, 1, 0]; let encoded = bincode::serialize(&bits)?; let bits2 = bincode::deserialize::<&BitSlice>(&encoded)?; assert_eq!(bits, bits2); Ok(()) } #[test] fn tokens() { let slice = bits![u8, Lsb0; 0, 1, 0, 0, 1]; let tokens = &mut [ Token::Struct { name: "BitSeq", len: 4, }, Token::Str("order"), Token::Str(any::type_name::()), Token::Str("head"), Token::Struct { name: "BitIdx", len: 2, }, Token::Str("width"), Token::U8(8), Token::Str("index"), Token::U8(0), Token::StructEnd, Token::Str("bits"), Token::U64(5), Token::Str("data"), Token::Seq { len: Some(1) }, Token::U8(18), Token::SeqEnd, Token::StructEnd, ]; assert_ser_tokens(&slice, tokens); tokens[8] = Token::U8(1); tokens[11] = Token::U64(4); assert_ser_tokens(&&slice[1 ..], tokens); let tokens = &[ Token::Seq { len: Some(4) }, Token::BorrowedStr(any::type_name::()), Token::Seq { len: Some(2) }, Token::U8(8), Token::U8(0), Token::SeqEnd, Token::U64(5), Token::BorrowedBytes(&[18]), Token::SeqEnd, ]; assert_de_tokens(&slice, tokens); } #[test] #[cfg(feature = "alloc")] fn errors() { assert_de_tokens_error::<&BitSlice>( &[ Token::Seq { len: Some(4) }, Token::BorrowedStr(any::type_name::()), ], &format!( "invalid value: string \"{}\", expected the string \"{}\"", any::type_name::(), any::type_name::(), ), ); assert_de_tokens_error::<&BitSlice>( &[ Token::Struct { name: "BitSeq", len: 1, }, Token::BorrowedStr("unknown"), ], &format!( "unknown field `unknown`, expected one of `{}`", super::FIELDS.join("`, `"), ), ); assert_de_tokens_error::<&BitSlice>( &[ Token::Struct { name: "BitSeq", len: 2, }, Token::BorrowedStr("order"), Token::BorrowedStr(any::type_name::()), Token::BorrowedStr("order"), Token::BorrowedStr(any::type_name::()), Token::StructEnd, ], "duplicate field `order`", ); assert_de_tokens_error::<&BitSlice>( &[ Token::Struct { name: "BitSeq", len: 2, }, Token::BorrowedStr("head"), Token::Seq { len: Some(2) }, Token::U8(8), Token::U8(0), Token::SeqEnd, Token::BorrowedStr("head"), Token::Seq { len: Some(2) }, Token::U8(8), Token::U8(0), Token::SeqEnd, Token::StructEnd, ], "duplicate field `head`", ); assert_de_tokens_error::<&BitSlice>( &[ Token::Struct { name: "BitSeq", len: 2, }, Token::BorrowedStr("bits"), Token::U64(10), Token::BorrowedStr("bits"), Token::U64(10), Token::StructEnd, ], "duplicate field `bits`", ); assert_de_tokens_error::<&BitSlice>( &[ Token::Struct { name: "BitSeq", len: 2, }, Token::BorrowedStr("data"), Token::BorrowedBytes(&[0x3C, 0xA5]), Token::BorrowedStr("data"), Token::BorrowedBytes(&[0x3C, 0xA5]), Token::StructEnd, ], "duplicate field `data`", ); } } bitvec-1.0.1/src/serdes/utils.rs000064400000000000000000000256461046102023000146760ustar 00000000000000#![doc=include_str!("../../doc/serdes/utils.md")] use core::{ any, fmt::{ self, Formatter, }, marker::PhantomData, mem::MaybeUninit, }; use serde::{ de::{ Deserialize, Deserializer, Error, MapAccess, SeqAccess, Unexpected, Visitor, }, ser::{ Serialize, SerializeSeq, SerializeStruct, SerializeTuple, Serializer, }, }; use wyz::comu::Const; use crate::{ domain::Domain, index::BitIdx, mem::{ bits_of, BitRegister, }, order::BitOrder, store::BitStore, view::BitViewSized, }; /// A zero-sized type that deserializes from any string as long as it is equal /// to `any::type_name::()`. pub(super) struct TypeName(PhantomData); impl TypeName { /// Creates a type-name ghost for any type. fn new() -> Self { TypeName(PhantomData) } } impl<'de, T> Deserialize<'de> for TypeName { fn deserialize(deserializer: D) -> Result where D: Deserializer<'de> { deserializer.deserialize_str(Self::new()) } } impl<'de, T> Visitor<'de> for TypeName { type Value = Self; fn expecting(&self, fmt: &mut Formatter) -> fmt::Result { write!(fmt, "the string {:?}", any::type_name::()) } fn visit_str(self, value: &str) -> Result where E: serde::de::Error { if value == any::type_name::() { Ok(self) } else { Err(serde::de::Error::invalid_value( Unexpected::Str(value), &self, )) } } } /// Fields used in the `BitIdx` transport format. static FIELDS: &[&str] = &["width", "index"]; /// The components of a bit-idx in wire format. enum Field { /// Denotes the maximum allowable value of the bit-idx. Width, /// Denotes the value of the bit-idx. Index, } /// Visits field tokens of a bit-idx wire format. struct FieldVisitor; impl<'de> Deserialize<'de> for Field { fn deserialize(deserializer: D) -> Result where D: Deserializer<'de> { deserializer.deserialize_identifier(FieldVisitor) } } impl<'de> Visitor<'de> for FieldVisitor { type Value = Field; fn expecting(&self, fmt: &mut Formatter) -> fmt::Result { fmt.write_str("field identifier") } fn visit_str(self, value: &str) -> Result where E: serde::de::Error { match value { "width" => Ok(Field::Width), "index" => Ok(Field::Index), _ => Err(serde::de::Error::unknown_field(value, FIELDS)), } } } impl Serialize for BitIdx where R: BitRegister { #[inline] fn serialize(&self, serializer: S) -> super::Result where S: Serializer { let mut state = serializer.serialize_struct("BitIdx", FIELDS.len())?; // Emit the bit-width of the `R` type. state.serialize_field(FIELDS[0], &(bits_of::() as u8))?; // Emit the actual head-bit index. state.serialize_field(FIELDS[1], &self.into_inner())?; state.end() } } impl<'de, R> Deserialize<'de> for BitIdx where R: BitRegister { #[inline] fn deserialize(deserializer: D) -> Result where D: Deserializer<'de> { deserializer.deserialize_struct( "BitIdx", FIELDS, BitIdxVisitor::::THIS, ) } } impl Serialize for Domain<'_, Const, T, O> where T: BitStore, O: BitOrder, T::Mem: Serialize, { #[inline] fn serialize(&self, serializer: S) -> super::Result where S: Serializer { // Domain is functionally equivalent to `[T::Mem]`. let mut state = serializer.serialize_seq(Some(self.len()))?; for elem in *self { state.serialize_element(&elem)?; } state.end() } } /** `serde` only provides implementations for `[T; 0 ..= 32]`. This wrapper provides the same de/ser logic, but allows it to be used on arrays of any size. **/ #[repr(transparent)] #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] pub(super) struct Array where T: BitStore { /// The data buffer being transported. pub(super) inner: [T; N], } impl Array where T: BitStore { /// Constructs a `&Array` reference from an `&[T; N]` reference. /// /// ## Safety /// /// `Array` is `#[repr(transparent)]`, so this address transformation is /// always sound. pub(super) fn from_ref(arr: &[T; N]) -> &Self { unsafe { &*(arr as *const [T; N] as *const Self) } } } impl Serialize for Array where T: BitStore, T::Mem: Serialize, { #[inline] fn serialize(&self, serializer: S) -> super::Result where S: Serializer { // `serde` serializes arrays as a tuple, so that transport formats can // safely choose to keep or discard the length counter. let mut state = serializer.serialize_tuple(N)?; for elem in self.inner.as_raw_slice().iter().map(BitStore::load_value) { state.serialize_element(&elem)? } state.end() } } impl<'de, T, const N: usize> Deserialize<'de> for Array where T: BitStore, T::Mem: Deserialize<'de>, { #[inline] fn deserialize(deserializer: D) -> Result where D: Deserializer<'de> { deserializer.deserialize_tuple(N, ArrayVisitor::::THIS) } } /// Assists in deserialization of a static `[T; N]` for any `N`. struct ArrayVisitor where T: BitStore { /// This produces an array during its work. inner: PhantomData<[T; N]>, } impl ArrayVisitor where T: BitStore { /// A blank visitor in its ready state. const THIS: Self = Self { inner: PhantomData }; } impl<'de, T, const N: usize> Visitor<'de> for ArrayVisitor where T: BitStore, T::Mem: Deserialize<'de>, { type Value = Array; #[inline] fn expecting(&self, fmt: &mut Formatter) -> fmt::Result { write!(fmt, "a [{}; {}]", any::type_name::(), N) } #[inline] fn visit_seq(self, mut seq: V) -> Result where V: SeqAccess<'de> { let mut uninit = [MaybeUninit::::uninit(); N]; for (idx, slot) in uninit.iter_mut().enumerate() { slot.write( seq.next_element::()? .ok_or_else(|| ::invalid_length(idx, &self))?, ); } Ok(Array { inner: uninit .map(|elem| unsafe { MaybeUninit::assume_init(elem) }) .map(BitStore::new), }) } } /// Assists in deserialization of a `BitIdx` value. struct BitIdxVisitor where R: BitRegister { /// This requires carrying the register type information. inner: PhantomData, } impl BitIdxVisitor where R: BitRegister { /// A blank visitor in its ready state. const THIS: Self = Self { inner: PhantomData }; /// Attempts to assemble deserialized components into an output value. #[inline] fn assemble(self, width: u8, index: u8) -> Result, E> where E: Error { // Fail if the transported type width does not match the destination. if width != bits_of::() as u8 { return Err(E::invalid_type( Unexpected::Unsigned(width as u64), &self, )); } // Capture an invalid index value and route it to the error handler. BitIdx::::new(index).map_err(|_| { E::invalid_value(Unexpected::Unsigned(index as u64), &self) }) } } impl<'de, R> Visitor<'de> for BitIdxVisitor where R: BitRegister { type Value = BitIdx; #[inline] fn expecting(&self, fmt: &mut Formatter) -> fmt::Result { write!(fmt, "a valid `BitIdx`", bits_of::()) } #[inline] fn visit_seq(self, mut seq: V) -> Result where V: SeqAccess<'de> { let width = seq .next_element::()? .ok_or_else(|| ::invalid_length(0, &self))?; let index = seq .next_element::()? .ok_or_else(|| ::invalid_length(1, &self))?; self.assemble(width, index) } #[inline] fn visit_map(self, mut map: V) -> Result where V: MapAccess<'de> { let mut width = None; let mut index = None; while let Some(key) = map.next_key()? { match key { Field::Width => { if width.replace(map.next_value::()?).is_some() { return Err(::duplicate_field("width")); } }, Field::Index => { if index.replace(map.next_value::()?).is_some() { return Err(::duplicate_field("index")); } }, } } let width = width.ok_or_else(|| ::missing_field("width"))?; let index = index.ok_or_else(|| ::missing_field("index"))?; self.assemble(width, index) } } #[cfg(test)] mod tests { use serde_test::{ assert_de_tokens, assert_de_tokens_error, assert_ser_tokens, Token, }; use super::*; #[test] fn array_wrapper() { let array = Array { inner: [0u8; 40] }; #[rustfmt::skip] let tokens = &[ Token::Tuple { len: 40 }, Token::U8(0), Token::U8(0), Token::U8(0), Token::U8(0), Token::U8(0), Token::U8(0), Token::U8(0), Token::U8(0), Token::U8(0), Token::U8(0), Token::U8(0), Token::U8(0), Token::U8(0), Token::U8(0), Token::U8(0), Token::U8(0), Token::U8(0), Token::U8(0), Token::U8(0), Token::U8(0), Token::U8(0), Token::U8(0), Token::U8(0), Token::U8(0), Token::U8(0), Token::U8(0), Token::U8(0), Token::U8(0), Token::U8(0), Token::U8(0), Token::U8(0), Token::U8(0), Token::U8(0), Token::U8(0), Token::U8(0), Token::U8(0), Token::U8(0), Token::U8(0), Token::U8(0), Token::U8(0), Token::TupleEnd, ]; assert_ser_tokens(&array, tokens); assert_de_tokens(&array, tokens); let tokens = &[Token::Tuple { len: 1 }, Token::U32(0), Token::TupleEnd]; assert_de_tokens_error::>( tokens, "invalid length 1, expected a [u32; 2]", ); } #[test] fn bit_idx() { let idx = BitIdx::::new(20).unwrap(); let tokens = &mut [ Token::Struct { name: "BitIdx", len: 2, }, Token::Str("width"), Token::U8(32), Token::Str("index"), Token::U8(20), Token::StructEnd, ]; assert_ser_tokens(&idx, tokens); tokens[1] = Token::BorrowedStr("width"); tokens[3] = Token::BorrowedStr("index"); assert_de_tokens(&idx, tokens); let idx = BitIdx::::new(10).unwrap(); let tokens = &[ Token::Seq { len: Some(2) }, Token::U8(16), Token::U8(10), Token::SeqEnd, ]; assert_de_tokens(&idx, tokens); assert_de_tokens_error::>( &[ Token::Seq { len: Some(2) }, Token::U8(8), Token::U8(0), Token::SeqEnd, ], "invalid type: integer `8`, expected a valid `BitIdx`", ); assert_de_tokens_error::>( &[ Token::Seq { len: Some(2) }, Token::U8(16), Token::U8(16), Token::SeqEnd, ], "invalid value: integer `16`, expected a valid `BitIdx`", ); assert_de_tokens_error::>( &[ Token::Struct { name: "BitIdx", len: 1, }, Token::BorrowedStr("unknown"), ], "unknown field `unknown`, expected `width` or `index`", ); assert_de_tokens_error::>( &[ Token::Struct { name: "BitIdx", len: 2, }, Token::BorrowedStr("width"), Token::U8(8), Token::BorrowedStr("width"), Token::U8(8), Token::StructEnd, ], "duplicate field `width`", ); assert_de_tokens_error::>( &[ Token::Struct { name: "BitIdx", len: 2, }, Token::BorrowedStr("index"), Token::U8(7), Token::BorrowedStr("index"), Token::U8(7), Token::StructEnd, ], "duplicate field `index`", ); } } bitvec-1.0.1/src/serdes.rs000064400000000000000000000073131046102023000135250ustar 00000000000000#![cfg(feature = "serde")] #![doc = include_str!("../doc/serdes.md")] mod array; mod slice; mod utils; use core::fmt::{ self, Formatter, }; use serde::de::{ Deserialize, Deserializer, Visitor, }; /// A result of serialization. type Result = core::result::Result< ::Ok, ::Error, >; /// A list of fields in the `BitSeq` and `BitArr` transport format. static FIELDS: &[&str] = &["order", "head", "bits", "data"]; /// The components of a bit-slice in wire format. enum Field { /// Denotes the `` type parameter. Order, /// Denotes the head-bit index in the first `Data` element. Head, /// Denotes the count of all live bits in the `Data` sequence. Bits, /// Denotes the raw storage sequence. Data, } /// Visits field tokens without attempting to deserialize into real data. struct FieldVisitor; impl<'de> Deserialize<'de> for Field { fn deserialize(deserializer: D) -> core::result::Result where D: Deserializer<'de> { deserializer.deserialize_identifier(FieldVisitor) } } impl<'de> Visitor<'de> for FieldVisitor { type Value = Field; fn expecting(&self, fmt: &mut Formatter) -> fmt::Result { fmt.write_str("field_identifier") } fn visit_str(self, value: &str) -> core::result::Result where E: serde::de::Error { match value { "order" => Ok(Field::Order), "head" => Ok(Field::Head), "bits" => Ok(Field::Bits), "data" => Ok(Field::Data), _ => Err(serde::de::Error::unknown_field(value, FIELDS)), } } } #[cfg(test)] mod tests { use serde::{ Deserialize, Serialize, }; use static_assertions::*; use crate::prelude::*; #[test] fn trait_impls() { use core::{ cell::Cell, sync::atomic::*, }; use radium::types::*; macro_rules! check_impl { ($($ord:ident @ $($sto:ty),+);+ $(;)?) => {{ $( $( assert_impl_all!(BitSlice<$sto, $ord>: Serialize); assert_impl_all!(BitArray<$sto, $ord>: Serialize, Deserialize<'static>); assert_impl_all!(BitArray<[$sto; 32], $ord>: Serialize, Deserialize<'static>); #[cfg(feature = "alloc")] { assert_impl_all!(BitBox<$sto, $ord>: Serialize, Deserialize<'static>); assert_impl_all!(BitVec<$sto, $ord>: Serialize, Deserialize<'static>); } )+ )+ }}; } assert_impl_all!(&BitSlice: Deserialize<'static>); assert_impl_all!(&BitSlice: Deserialize<'static>); assert_impl_all!(&BitSlice: Deserialize<'static>); check_impl! { Lsb0 @ u8, u16, u32, usize; Msb0 @ u8, u16, u32, usize; LocalBits @ u8, u16, u32, usize; Lsb0 @ Cell, Cell, Cell, Cell; Msb0 @ Cell, Cell, Cell, Cell; LocalBits @ Cell, Cell, Cell, Cell; Lsb0 @ RadiumU8, RadiumU16, RadiumU32, RadiumUsize; Msb0 @ RadiumU8, RadiumU16, RadiumU32, RadiumUsize; LocalBits @ RadiumU8, RadiumU16, RadiumU32, RadiumUsize; } radium::if_atomic! { if atomic(8) { check_impl! { Lsb0 @ AtomicU8; Msb0 @ AtomicU8; LocalBits @ AtomicU8; } } if atomic(16) { check_impl! { Lsb0 @ AtomicU16; Msb0 @ AtomicU16; LocalBits @ AtomicU16; } } if atomic(32) { check_impl! { Lsb0 @ AtomicU32; Msb0 @ AtomicU32; LocalBits @ AtomicU32; } } if atomic(ptr) { check_impl! { Lsb0 @ AtomicUsize; Msb0 @ AtomicUsize; LocalBits @ AtomicUsize; } } } #[cfg(target_pointer_width = "64")] check_impl! { Lsb0 @ u64, RadiumU64; Msb0 @ u64, RadiumU64; LocalBits @ u64, RadiumU64; } #[cfg(target_pointer_width = "64")] radium::if_atomic!(if atomic(64) { check_impl! { Lsb0 @ AtomicU64; Msb0 @ AtomicU64; LocalBits @ AtomicU64; } }); } } bitvec-1.0.1/src/slice/api.rs000064400000000000000000002364731046102023000141230ustar 00000000000000#![doc = include_str!("../../doc/slice/api.md")] use core::{ cmp, ops::{ Range, RangeFrom, RangeFull, RangeInclusive, RangeTo, RangeToInclusive, }, }; use wyz::{ comu::{ Const, Mut, }, range::RangeExt, }; use super::{ BitSlice, Chunks, ChunksExact, ChunksExactMut, ChunksMut, Iter, IterMut, RChunks, RChunksExact, RChunksExactMut, RChunksMut, RSplit, RSplitMut, RSplitN, RSplitNMut, Split, SplitInclusive, SplitInclusiveMut, SplitMut, SplitN, SplitNMut, Windows, }; #[cfg(feature = "alloc")] use crate::vec::BitVec; use crate::{ array::BitArray, domain::Domain, mem::{ self, BitRegister, }, order::BitOrder, ptr::{ BitPtr, BitRef, BitSpan, BitSpanError, }, store::BitStore, }; /// Port of the `[T]` inherent API. impl BitSlice where T: BitStore, O: BitOrder, { /// Gets the number of bits in the bit-slice. /// /// ## Original /// /// [`slice::len`](https://doc.rust-lang.org/std/primitive.slice.html#method.len) /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// assert_eq!(bits![].len(), 0); /// assert_eq!(bits![0; 10].len(), 10); /// ``` #[inline] pub fn len(&self) -> usize { self.as_bitspan().len() } /// Tests if the bit-slice is empty (length zero). /// /// ## Original /// /// [`slice::is_empty`](https://doc.rust-lang.org/std/primitive.slice.html#method.is_empty) /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// assert!(bits![].is_empty()); /// assert!(!bits![0; 10].is_empty()); /// ``` #[inline] pub fn is_empty(&self) -> bool { self.len() == 0 } /// Gets a reference to the first bit of the bit-slice, or `None` if it is /// empty. /// /// ## Original /// /// [`slice::first`](https://doc.rust-lang.org/std/primitive.slice.html#method.first) /// /// ## API Differences /// /// `bitvec` uses a custom structure for both read-only and mutable /// references to `bool`. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![1, 0, 0]; /// assert_eq!(bits.first().as_deref(), Some(&true)); /// /// assert!(bits![].first().is_none()); /// ``` #[inline] pub fn first(&self) -> Option> { self.get(0) } /// Gets a mutable reference to the first bit of the bit-slice, or `None` if /// it is empty. /// /// ## Original /// /// [`slice::first_mut`](https://doc.rust-lang.org/std/primitive.slice.html#method.first_mut) /// /// ## API Differences /// /// `bitvec` uses a custom structure for both read-only and mutable /// references to `bool`. This must be bound as `mut` in order to write /// through it. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![mut 0; 3]; /// if let Some(mut first) = bits.first_mut() { /// *first = true; /// } /// assert_eq!(bits, bits![1, 0, 0]); /// /// assert!(bits![mut].first_mut().is_none()); /// ``` #[inline] pub fn first_mut(&mut self) -> Option> { self.get_mut(0) } /// Splits the bit-slice into a reference to its first bit, and the rest of /// the bit-slice. Returns `None` when empty. /// /// ## Original /// /// [`slice::split_first`](https://doc.rust-lang.org/std/primitive.slice.html#method.split_first) /// /// ## API Differences /// /// `bitvec` uses a custom structure for both read-only and mutable /// references to `bool`. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![1, 0, 0]; /// let (first, rest) = bits.split_first().unwrap(); /// assert_eq!(first, &true); /// assert_eq!(rest, bits![0; 2]); /// ``` #[inline] pub fn split_first(&self) -> Option<(BitRef, &Self)> { match self.len() { 0 => None, _ => unsafe { let (head, rest) = self.split_at_unchecked(1); Some((head.get_unchecked(0), rest)) }, } } /// Splits the bit-slice into mutable references of its first bit, and the /// rest of the bit-slice. Returns `None` when empty. /// /// ## Original /// /// [`slice::split_first_mut`](https://doc.rust-lang.org/std/primitive.slice.html#method.split_first_mut) /// /// ## API Differences /// /// `bitvec` uses a custom structure for both read-only and mutable /// references to `bool`. This must be bound as `mut` in order to write /// through it. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![mut 0; 3]; /// if let Some((mut first, rest)) = bits.split_first_mut() { /// *first = true; /// assert_eq!(rest, bits![0; 2]); /// } /// assert_eq!(bits, bits![1, 0, 0]); /// ``` #[inline] pub fn split_first_mut( &mut self, ) -> Option<(BitRef, &mut BitSlice)> { match self.len() { 0 => None, _ => unsafe { let (head, rest) = self.split_at_unchecked_mut(1); Some((head.get_unchecked_mut(0), rest)) }, } } /// Splits the bit-slice into a reference to its last bit, and the rest of /// the bit-slice. Returns `None` when empty. /// /// ## Original /// /// [`slice::split_last`](https://doc.rust-lang.org/std/primitive.slice.html#method.split_last) /// /// ## API Differences /// /// `bitvec` uses a custom structure for both read-only and mutable /// references to `bool`. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![0, 0, 1]; /// let (last, rest) = bits.split_last().unwrap(); /// assert_eq!(last, &true); /// assert_eq!(rest, bits![0; 2]); /// ``` #[inline] pub fn split_last(&self) -> Option<(BitRef, &Self)> { match self.len() { 0 => None, n => unsafe { let (rest, tail) = self.split_at_unchecked(n - 1); Some((tail.get_unchecked(0), rest)) }, } } /// Splits the bit-slice into mutable references to its last bit, and the /// rest of the bit-slice. Returns `None` when empty. /// /// ## Original /// /// [`slice::split_last_mut`](https://doc.rust-lang.org/std/primitive.slice.html#method.split_last_mut) /// /// ## API Differences /// /// `bitvec` uses a custom structure for both read-only and mutable /// references to `bool`. This must be bound as `mut` in order to write /// through it. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![mut 0; 3]; /// if let Some((mut last, rest)) = bits.split_last_mut() { /// *last = true; /// assert_eq!(rest, bits![0; 2]); /// } /// assert_eq!(bits, bits![0, 0, 1]); /// ``` #[inline] pub fn split_last_mut( &mut self, ) -> Option<(BitRef, &mut BitSlice)> { match self.len() { 0 => None, n => unsafe { let (rest, tail) = self.split_at_unchecked_mut(n - 1); Some((tail.get_unchecked_mut(0), rest)) }, } } /// Gets a reference to the last bit of the bit-slice, or `None` if it is /// empty. /// /// ## Original /// /// [`slice::last`](https://doc.rust-lang.org/std/primitive.slice.html#method.last) /// /// ## API Differences /// /// `bitvec` uses a custom structure for both read-only and mutable /// references to `bool`. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![0, 0, 1]; /// assert_eq!(bits.last().as_deref(), Some(&true)); /// /// assert!(bits![].last().is_none()); /// ``` #[inline] pub fn last(&self) -> Option> { match self.len() { 0 => None, n => Some(unsafe { self.get_unchecked(n - 1) }), } } /// Gets a mutable reference to the last bit of the bit-slice, or `None` if /// it is empty. /// /// ## Original /// /// [`slice::last_mut`](https://doc.rust-lang.org/std/primitive.slice.html#method.last_mut) /// /// ## API Differences /// /// `bitvec` uses a custom structure for both read-only and mutable /// references to `bool`. This must be bound as `mut` in order to write /// through it. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![mut 0; 3]; /// if let Some(mut last) = bits.last_mut() { /// *last = true; /// } /// assert_eq!(bits, bits![0, 0, 1]); /// /// assert!(bits![mut].last_mut().is_none()); /// ``` #[inline] pub fn last_mut(&mut self) -> Option> { match self.len() { 0 => None, n => Some(unsafe { self.get_unchecked_mut(n - 1) }), } } /// Gets a reference to a single bit or a subsection of the bit-slice, /// depending on the type of `index`. /// /// - If given a `usize`, this produces a reference structure to the `bool` /// at the position. /// - If given any form of range, this produces a smaller bit-slice. /// /// This returns `None` if the `index` departs the bounds of `self`. /// /// ## Original /// /// [`slice::get`](https://doc.rust-lang.org/std/primitive.slice.html#method.get) /// /// ## API Differences /// /// `BitSliceIndex` uses discrete types for immutable and mutable /// references, rather than a single referent type. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![0, 1, 0]; /// assert_eq!(bits.get(1).as_deref(), Some(&true)); /// assert_eq!(bits.get(0 .. 2), Some(bits![0, 1])); /// assert!(bits.get(3).is_none()); /// assert!(bits.get(0 .. 4).is_none()); /// ``` #[inline] pub fn get<'a, I>(&'a self, index: I) -> Option where I: BitSliceIndex<'a, T, O> { index.get(self) } /// Gets a mutable reference to a single bit or a subsection of the /// bit-slice, depending on the type of `index`. /// /// - If given a `usize`, this produces a reference structure to the `bool` /// at the position. /// - If given any form of range, this produces a smaller bit-slice. /// /// This returns `None` if the `index` departs the bounds of `self`. /// /// ## Original /// /// [`slice::get_mut`](https://doc.rust-lang.org/std/primitive.slice.html#method.get_mut) /// /// ## API Differences /// /// `BitSliceIndex` uses discrete types for immutable and mutable /// references, rather than a single referent type. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![mut 0; 3]; /// /// *bits.get_mut(0).unwrap() = true; /// bits.get_mut(1 ..).unwrap().fill(true); /// assert_eq!(bits, bits![1; 3]); /// ``` #[inline] pub fn get_mut<'a, I>(&'a mut self, index: I) -> Option where I: BitSliceIndex<'a, T, O> { index.get_mut(self) } /// Gets a reference to a single bit or to a subsection of the bit-slice, /// without bounds checking. /// /// This has the same arguments and behavior as [`.get()`], except that it /// does not check that `index` is in bounds. /// /// ## Original /// /// [`slice::get_unchecked`](https://doc.rust-lang.org/std/primitive.slice.html#method.get_unchecked) /// /// ## Safety /// /// You must ensure that `index` is within bounds (within the range `0 .. /// self.len()`), or this method will introduce memory safety and/or /// undefined behavior. /// /// It is library-level undefined behavior to index beyond the length of any /// bit-slice, even if you **know** that the offset remains within an /// allocation as measured by Rust or LLVM. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let data = 0b0001_0010u8; /// let bits = &data.view_bits::()[.. 3]; /// /// unsafe { /// assert!(bits.get_unchecked(1)); /// assert!(bits.get_unchecked(4)); /// } /// ``` /// /// [`.get()`]: Self::get #[inline] pub unsafe fn get_unchecked<'a, I>(&'a self, index: I) -> I::Immut where I: BitSliceIndex<'a, T, O> { index.get_unchecked(self) } /// Gets a mutable reference to a single bit or a subsection of the /// bit-slice, depending on the type of `index`. /// /// This has the same arguments and behavior as [`.get_mut()`], except that /// it does not check that `index` is in bounds. /// /// ## Original /// /// [`slice::get_unchecked_mut`](https://doc.rust-lang.org/std/primitive.slice.html#method.get_unchecked_mut) /// /// ## Safety /// /// You must ensure that `index` is within bounds (within the range `0 .. /// self.len()`), or this method will introduce memory safety and/or /// undefined behavior. /// /// It is library-level undefined behavior to index beyond the length of any /// bit-slice, even if you **know** that the offset remains within an /// allocation as measured by Rust or LLVM. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let mut data = 0u8; /// let bits = &mut data.view_bits_mut::()[.. 3]; /// /// unsafe { /// bits.get_unchecked_mut(1).commit(true); /// bits.get_unchecked_mut(4 .. 6).fill(true); /// } /// assert_eq!(data, 0b0011_0010); /// ``` /// /// [`.get_mut()`]: Self::get_mut #[inline] pub unsafe fn get_unchecked_mut<'a, I>(&'a mut self, index: I) -> I::Mut where I: BitSliceIndex<'a, T, O> { index.get_unchecked_mut(self) } #[inline] #[cfg(not(tarpaulin_include))] #[deprecated = "use `.as_bitptr()` instead"] #[allow(missing_docs, clippy::missing_docs_in_private_items)] pub fn as_ptr(&self) -> BitPtr { self.as_bitptr() } #[inline] #[cfg(not(tarpaulin_include))] #[deprecated = "use `.as_mut_bitptr()` instead"] #[allow(missing_docs, clippy::missing_docs_in_private_items)] pub fn as_mut_ptr(&mut self) -> BitPtr { self.as_mut_bitptr() } /// Produces a range of bit-pointers to each bit in the bit-slice. /// /// This is a standard-library range, which has no real functionality for /// pointer types. You should prefer [`.as_bitptr_range()`] instead, as it /// produces a custom structure that provides expected ranging /// functionality. /// /// ## Original /// /// [`slice::as_ptr_range`](https://doc.rust-lang.org/std/primitive.slice.html#method.as_ptr_range) /// /// [`.as_bitptr_range()`]: Self::as_bitptr_range #[inline] #[cfg(not(tarpaulin_include))] pub fn as_ptr_range(&self) -> Range> { self.as_bitptr_range().into_range() } /// Produces a range of mutable bit-pointers to each bit in the bit-slice. /// /// This is a standard-library range, which has no real functionality for /// pointer types. You should prefer [`.as_mut_bitptr_range()`] instead, as /// it produces a custom structure that provides expected ranging /// functionality. /// /// ## Original /// /// [`slice::as_mut_ptr_range`](https://doc.rust-lang.org/std/primitive.slice.html#method.as_mut_ptr_range) /// /// [`.as_mut_bitptr_range()`]: Self::as_mut_bitptr_range #[inline] #[cfg(not(tarpaulin_include))] pub fn as_mut_ptr_range(&mut self) -> Range> { self.as_mut_bitptr_range().into_range() } /// Exchanges the bit values at two indices. /// /// ## Original /// /// [`slice::swap`](https://doc.rust-lang.org/std/primitive.slice.html#method.swap) /// /// ## Panics /// /// This panics if either `a` or `b` are out of bounds. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![mut 0, 1]; /// bits.swap(0, 1); /// assert_eq!(bits, bits![1, 0]); /// ``` #[inline] pub fn swap(&mut self, a: usize, b: usize) { let bounds = 0 .. self.len(); self.assert_in_bounds(a, bounds.clone()); self.assert_in_bounds(b, bounds); unsafe { self.swap_unchecked(a, b); } } /// Reverses the order of bits in a bit-slice. /// /// ## Original /// /// [`slice::reverse`](https://doc.rust-lang.org/std/primitive.slice.html#method.reverse) /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![mut 0, 0, 1, 0, 1, 1, 0, 0, 1]; /// bits.reverse(); /// assert_eq!(bits, bits![1, 0, 0, 1, 1, 0, 1, 0, 0]); /// ``` #[inline] pub fn reverse(&mut self) { let mut iter = self.as_mut_bitptr_range(); while let (Some(a), Some(b)) = (iter.next(), iter.next_back()) { unsafe { crate::ptr::swap(a, b); } } } /// Produces an iterator over each bit in the bit-slice. /// /// ## Original /// /// [`slice::iter`](https://doc.rust-lang.org/std/primitive.slice.html#method.iter) /// /// ## API Differences /// /// This iterator yields proxy-reference structures, not `&bool`. It can be /// adapted to yield `&bool` with the [`.by_refs()`] method, or `bool` with /// [`.by_vals()`]. /// /// This iterator, and its adapters, are fast. Do not try to be more clever /// than them by abusing `.as_bitptr_range()`. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![0, 1, 0, 1]; /// let mut iter = bits.iter(); /// /// assert!(!iter.next().unwrap()); /// assert!( iter.next().unwrap()); /// assert!( iter.next_back().unwrap()); /// assert!(!iter.next_back().unwrap()); /// assert!( iter.next().is_none()); /// ``` /// /// [`.by_refs()`]: crate::slice::Iter::by_refs /// [`.by_vals()`]: crate::slice::Iter::by_vals #[inline] pub fn iter(&self) -> Iter { Iter::new(self) } /// Produces a mutable iterator over each bit in the bit-slice. /// /// ## Original /// /// [`slice::iter_mut`](https://doc.rust-lang.org/std/primitive.slice.html#method.iter_mut) /// /// ## API Differences /// /// This iterator yields proxy-reference structures, not `&mut bool`. In /// addition, it marks each proxy as alias-tainted. /// /// If you are using this in an ordinary loop and **not** keeping multiple /// yielded proxy-references alive at the same scope, you may use the /// [`.remove_alias()`] adapter to undo the alias marking. /// /// This iterator is fast. Do not try to be more clever than it by abusing /// `.as_mut_bitptr_range()`. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![mut 0; 4]; /// let mut iter = bits.iter_mut(); /// /// iter.nth(1).unwrap().commit(true); // index 1 /// iter.next_back().unwrap().commit(true); // index 3 /// /// assert!(iter.next().is_some()); // index 2 /// assert!(iter.next().is_none()); // complete /// assert_eq!(bits, bits![0, 1, 0, 1]); /// ``` /// /// [`.remove_alias()`]: crate::slice::IterMut::remove_alias #[inline] pub fn iter_mut(&mut self) -> IterMut { IterMut::new(self) } /// Iterates over consecutive windowing subslices in a bit-slice. /// /// Windows are overlapping views of the bit-slice. Each window advances one /// bit from the previous, so in a bit-slice `[A, B, C, D, E]`, calling /// `.windows(3)` will yield `[A, B, C]`, `[B, C, D]`, and `[C, D, E]`. /// /// ## Original /// /// [`slice::windows`](https://doc.rust-lang.org/std/primitive.slice.html#method.windows) /// /// ## Panics /// /// This panics if `size` is `0`. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![0, 1, 0, 0, 1]; /// let mut iter = bits.windows(3); /// /// assert_eq!(iter.next(), Some(bits![0, 1, 0])); /// assert_eq!(iter.next(), Some(bits![1, 0, 0])); /// assert_eq!(iter.next(), Some(bits![0, 0, 1])); /// assert!(iter.next().is_none()); /// ``` #[inline] pub fn windows(&self, size: usize) -> Windows { Windows::new(self, size) } /// Iterates over non-overlapping subslices of a bit-slice. /// /// Unlike `.windows()`, the subslices this yields do not overlap with each /// other. If `self.len()` is not an even multiple of `chunk_size`, then the /// last chunk yielded will be shorter. /// /// ## Original /// /// [`slice::chunks`](https://doc.rust-lang.org/std/primitive.slice.html#method.chunks) /// /// ## Sibling Methods /// /// - [`.chunks_mut()`] has the same division logic, but each yielded /// bit-slice is mutable. /// - [`.chunks_exact()`] does not yield the final chunk if it is shorter /// than `chunk_size`. /// - [`.rchunks()`] iterates from the back of the bit-slice to the front, /// with the final, possibly-shorter, segment at the front edge. /// /// ## Panics /// /// This panics if `chunk_size` is `0`. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![0, 1, 0, 0, 1]; /// let mut iter = bits.chunks(2); /// /// assert_eq!(iter.next(), Some(bits![0, 1])); /// assert_eq!(iter.next(), Some(bits![0, 0])); /// assert_eq!(iter.next(), Some(bits![1])); /// assert!(iter.next().is_none()); /// ``` /// /// [`.chunks_exact()`]: Self::chunks_exact /// [`.chunks_mut()`]: Self::chunks_mut /// [`.rchunks()`]: Self::rchunks #[inline] pub fn chunks(&self, chunk_size: usize) -> Chunks { Chunks::new(self, chunk_size) } /// Iterates over non-overlapping mutable subslices of a bit-slice. /// /// Iterators do not require that each yielded item is destroyed before the /// next is produced. This means that each bit-slice yielded must be marked /// as aliased. If you are using this in a loop that does not collect /// multiple yielded subslices for the same scope, then you can remove the /// alias marking by calling the (`unsafe`) method [`.remove_alias()`] on /// the iterator. /// /// ## Original /// /// [`slice::chunks_mut`](https://doc.rust-lang.org/std/primitive.slice.html#method.chunks_mut) /// /// ## Sibling Methods /// /// - [`.chunks()`] has the same division logic, but each yielded bit-slice /// is immutable. /// - [`.chunks_exact_mut()`] does not yield the final chunk if it is /// shorter than `chunk_size`. /// - [`.rchunks_mut()`] iterates from the back of the bit-slice to the /// front, with the final, possibly-shorter, segment at the front edge. /// /// ## Panics /// /// This panics if `chunk_size` is `0`. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![mut u8, Msb0; 0; 5]; /// /// for (idx, chunk) in unsafe { /// bits.chunks_mut(2).remove_alias() /// }.enumerate() { /// chunk.store(idx + 1); /// } /// assert_eq!(bits, bits![0, 1, 1, 0, 1]); /// // ^^^^ ^^^^ ^ /// ``` /// /// [`.chunks()`]: Self::chunks /// [`.chunks_exact_mut()`]: Self::chunks_exact_mut /// [`.rchunks_mut()`]: Self::rchunks_mut /// [`.remove_alias()`]: crate::slice::ChunksMut::remove_alias #[inline] pub fn chunks_mut(&mut self, chunk_size: usize) -> ChunksMut { ChunksMut::new(self, chunk_size) } /// Iterates over non-overlapping subslices of a bit-slice. /// /// If `self.len()` is not an even multiple of `chunk_size`, then the last /// few bits are not yielded by the iterator at all. They can be accessed /// with the [`.remainder()`] method if the iterator is bound to a name. /// /// ## Original /// /// [`slice::chunks_exact`](https://doc.rust-lang.org/std/primitive.slice.html#method.chunks_exact) /// /// ## Sibling Methods /// /// - [`.chunks()`] yields any leftover bits at the end as a shorter chunk /// during iteration. /// - [`.chunks_exact_mut()`] has the same division logic, but each yielded /// bit-slice is mutable. /// - [`.rchunks_exact()`] iterates from the back of the bit-slice to the /// front, with the unyielded remainder segment at the front edge. /// /// ## Panics /// /// This panics if `chunk_size` is `0`. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![0, 1, 0, 0, 1]; /// let mut iter = bits.chunks_exact(2); /// /// assert_eq!(iter.next(), Some(bits![0, 1])); /// assert_eq!(iter.next(), Some(bits![0, 0])); /// assert!(iter.next().is_none()); /// assert_eq!(iter.remainder(), bits![1]); /// ``` /// /// [`.chunks()`]: Self::chunks /// [`.chunks_exact_mut()`]: Self::chunks_exact_mut /// [`.rchunks_exact()`]: Self::rchunks_exact /// [`.remainder()`]: crate::slice::ChunksExact::remainder #[inline] pub fn chunks_exact(&self, chunk_size: usize) -> ChunksExact { ChunksExact::new(self, chunk_size) } /// Iterates over non-overlapping mutable subslices of a bit-slice. /// /// If `self.len()` is not an even multiple of `chunk_size`, then the last /// few bits are not yielded by the iterator at all. They can be accessed /// with the [`.into_remainder()`] method if the iterator is bound to a /// name. /// /// Iterators do not require that each yielded item is destroyed before the /// next is produced. This means that each bit-slice yielded must be marked /// as aliased. If you are using this in a loop that does not collect /// multiple yielded subslices for the same scope, then you can remove the /// alias marking by calling the (`unsafe`) method [`.remove_alias()`] on /// the iterator. /// /// ## Original /// /// [`slice::chunks_exact_mut`](https://doc.rust-lang.org/std/primitive.slice.html#method.chunks_exact_mut) /// /// ## Sibling Methods /// /// - [`.chunks_mut()`] yields any leftover bits at the end as a shorter /// chunk during iteration. /// - [`.chunks_exact()`] has the same division logic, but each yielded /// bit-slice is immutable. /// - [`.rchunks_exact_mut()`] iterates from the back of the bit-slice /// forwards, with the unyielded remainder segment at the front edge. /// /// ## Panics /// /// This panics if `chunk_size` is `0`. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![mut u8, Msb0; 0; 5]; /// let mut iter = bits.chunks_exact_mut(2); /// /// for (idx, chunk) in iter.by_ref().enumerate() { /// chunk.store(idx + 1); /// } /// iter.into_remainder().store(1u8); /// /// assert_eq!(bits, bits![0, 1, 1, 0, 1]); /// // remainder ^ /// ``` /// /// [`.chunks_exact()`]: Self::chunks_exact /// [`.chunks_mut()`]: Self::chunks_mut /// [`.into_remainder()`]: crate::slice::ChunksExactMut::into_remainder /// [`.rchunks_exact_mut()`]: Self::rchunks_exact_mut /// [`.remove_alias()`]: crate::slice::ChunksExactMut::remove_alias #[inline] pub fn chunks_exact_mut( &mut self, chunk_size: usize, ) -> ChunksExactMut { ChunksExactMut::new(self, chunk_size) } /// Iterates over non-overlapping subslices of a bit-slice, from the back /// edge. /// /// Unlike `.chunks()`, this aligns its chunks to the back edge of `self`. /// If `self.len()` is not an even multiple of `chunk_size`, then the /// leftover partial chunk is `self[0 .. len % chunk_size]`. /// /// ## Original /// /// [`slice::rchunks`](https://doc.rust-lang.org/std/primitive.slice.html#method.rchunks) /// /// ## Sibling Methods /// /// - [`.rchunks_mut()`] has the same division logic, but each yielded /// bit-slice is mutable. /// - [`.rchunks_exact()`] does not yield the final chunk if it is shorter /// than `chunk_size`. /// - [`.chunks()`] iterates from the front of the bit-slice to the back, /// with the final, possibly-shorter, segment at the back edge. /// /// ## Panics /// /// This panics if `chunk_size` is `0`. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![0, 1, 0, 0, 1]; /// let mut iter = bits.rchunks(2); /// /// assert_eq!(iter.next(), Some(bits![0, 1])); /// assert_eq!(iter.next(), Some(bits![1, 0])); /// assert_eq!(iter.next(), Some(bits![0])); /// assert!(iter.next().is_none()); /// ``` /// /// [`.chunks()`]: Self::chunks /// [`.rchunks_exact()`]: Self::rchunks_exact /// [`.rchunks_mut()`]: Self::rchunks_mut #[inline] pub fn rchunks(&self, chunk_size: usize) -> RChunks { RChunks::new(self, chunk_size) } /// Iterates over non-overlapping mutable subslices of a bit-slice, from the /// back edge. /// /// Unlike `.chunks_mut()`, this aligns its chunks to the back edge of /// `self`. If `self.len()` is not an even multiple of `chunk_size`, then /// the leftover partial chunk is `self[0 .. len % chunk_size]`. /// /// Iterators do not require that each yielded item is destroyed before the /// next is produced. This means that each bit-slice yielded must be marked /// as aliased. If you are using this in a loop that does not collect /// multiple yielded values for the same scope, then you can remove the /// alias marking by calling the (`unsafe`) method [`.remove_alias()`] on /// the iterator. /// /// ## Original /// /// [`slice::rchunks_mut`](https://doc.rust-lang.org/std/primitive.slice.html#method.rchunks_mut) /// /// ## Sibling Methods /// /// - [`.rchunks()`] has the same division logic, but each yielded bit-slice /// is immutable. /// - [`.rchunks_exact_mut()`] does not yield the final chunk if it is /// shorter than `chunk_size`. /// - [`.chunks_mut()`] iterates from the front of the bit-slice to the /// back, with the final, possibly-shorter, segment at the back edge. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![mut u8, Msb0; 0; 5]; /// for (idx, chunk) in unsafe { /// bits.rchunks_mut(2).remove_alias() /// }.enumerate() { /// chunk.store(idx + 1); /// } /// assert_eq!(bits, bits![1, 1, 0, 0, 1]); /// // remainder ^ ^^^^ ^^^^ /// ``` /// /// [`.chunks_mut()`]: Self::chunks_mut /// [`.rchunks()`]: Self::rchunks /// [`.rchunks_exact_mut()`]: Self::rchunks_exact_mut /// [`.remove_alias()`]: crate::slice::RChunksMut::remove_alias #[inline] pub fn rchunks_mut(&mut self, chunk_size: usize) -> RChunksMut { RChunksMut::new(self, chunk_size) } /// Iterates over non-overlapping subslices of a bit-slice, from the back /// edge. /// /// If `self.len()` is not an even multiple of `chunk_size`, then the first /// few bits are not yielded by the iterator at all. They can be accessed /// with the [`.remainder()`] method if the iterator is bound to a name. /// /// ## Original /// /// [`slice::rchunks_exact`](https://doc.rust-lang.org/std/primitive.slice.html#method.rchunks_exact) /// /// ## Sibling Methods /// /// - [`.rchunks()`] yields any leftover bits at the front as a shorter /// chunk during iteration. /// - [`.rchunks_exact_mut()`] has the same division logic, but each yielded /// bit-slice is mutable. /// - [`.chunks_exact()`] iterates from the front of the bit-slice to the /// back, with the unyielded remainder segment at the back edge. /// /// ## Panics /// /// This panics if `chunk_size` is `0`. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![0, 1, 0, 0, 1]; /// let mut iter = bits.rchunks_exact(2); /// /// assert_eq!(iter.next(), Some(bits![0, 1])); /// assert_eq!(iter.next(), Some(bits![1, 0])); /// assert!(iter.next().is_none()); /// assert_eq!(iter.remainder(), bits![0]); /// ``` /// /// [`.chunks_exact()`]: Self::chunks_exact /// [`.rchunks()`]: Self::rchunks /// [`.rchunks_exact_mut()`]: Self::rchunks_exact_mut /// [`.remainder()`]: crate::slice::RChunksExact::remainder #[inline] pub fn rchunks_exact(&self, chunk_size: usize) -> RChunksExact { RChunksExact::new(self, chunk_size) } /// Iterates over non-overlapping mutable subslices of a bit-slice, from the /// back edge. /// /// If `self.len()` is not an even multiple of `chunk_size`, then the first /// few bits are not yielded by the iterator at all. They can be accessed /// with the [`.into_remainder()`] method if the iterator is bound to a /// name. /// /// Iterators do not require that each yielded item is destroyed before the /// next is produced. This means that each bit-slice yielded must be marked /// as aliased. If you are using this in a loop that does not collect /// multiple yielded subslices for the same scope, then you can remove the /// alias marking by calling the (`unsafe`) method [`.remove_alias()`] on /// the iterator. /// /// ## Sibling Methods /// /// - [`.rchunks_mut()`] yields any leftover bits at the front as a shorter /// chunk during iteration. /// - [`.rchunks_exact()`] has the same division logic, but each yielded /// bit-slice is immutable. /// - [`.chunks_exact_mut()`] iterates from the front of the bit-slice /// backwards, with the unyielded remainder segment at the back edge. /// /// ## Panics /// /// This panics if `chunk_size` is `0`. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![mut u8, Msb0; 0; 5]; /// let mut iter = bits.rchunks_exact_mut(2); /// /// for (idx, chunk) in iter.by_ref().enumerate() { /// chunk.store(idx + 1); /// } /// iter.into_remainder().store(1u8); /// /// assert_eq!(bits, bits![1, 1, 0, 0, 1]); /// // remainder ^ /// ``` /// /// [`.chunks_exact_mut()`]: Self::chunks_exact_mut /// [`.into_remainder()`]: crate::slice::RChunksExactMut::into_remainder /// [`.rchunks_exact()`]: Self::rchunks_exact /// [`.rchunks_mut()`]: Self::rchunks_mut /// [`.remove_alias()`]: crate::slice::RChunksExactMut::remove_alias #[inline] pub fn rchunks_exact_mut( &mut self, chunk_size: usize, ) -> RChunksExactMut { RChunksExactMut::new(self, chunk_size) } /// Splits a bit-slice in two parts at an index. /// /// The returned bit-slices are `self[.. mid]` and `self[mid ..]`. `mid` is /// included in the right bit-slice, not the left. /// /// If `mid` is `0` then the left bit-slice is empty; if it is `self.len()` /// then the right bit-slice is empty. /// /// This method guarantees that even when either partition is empty, the /// encoded bit-pointer values of the bit-slice references is `&self[0]` and /// `&self[mid]`. /// /// ## Original /// /// [`slice::split_at`](https://doc.rust-lang.org/std/primitive.slice.html#method.split_at) /// /// ## Panics /// /// This panics if `mid` is greater than `self.len()`. It is allowed to be /// equal to the length, in which case the right bit-slice is simply empty. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![0, 0, 0, 1, 1, 1]; /// let base = bits.as_bitptr(); /// /// let (a, b) = bits.split_at(0); /// assert_eq!(unsafe { a.as_bitptr().offset_from(base) }, 0); /// assert_eq!(unsafe { b.as_bitptr().offset_from(base) }, 0); /// /// let (a, b) = bits.split_at(6); /// assert_eq!(unsafe { b.as_bitptr().offset_from(base) }, 6); /// /// let (a, b) = bits.split_at(3); /// assert_eq!(a, bits![0; 3]); /// assert_eq!(b, bits![1; 3]); /// ``` #[inline] pub fn split_at(&self, mid: usize) -> (&Self, &Self) { self.assert_in_bounds(mid, 0 ..= self.len()); unsafe { self.split_at_unchecked(mid) } } /// Splits a mutable bit-slice in two parts at an index. /// /// The returned bit-slices are `self[.. mid]` and `self[mid ..]`. `mid` is /// included in the right bit-slice, not the left. /// /// If `mid` is `0` then the left bit-slice is empty; if it is `self.len()` /// then the right bit-slice is empty. /// /// This method guarantees that even when either partition is empty, the /// encoded bit-pointer values of the bit-slice references is `&self[0]` and /// `&self[mid]`. /// /// ## Original /// /// [`slice::split_at_mut`](https://doc.rust-lang.org/std/primitive.slice.html#method.split_at_mut) /// /// ## API Differences /// /// The end bits of the left half and the start bits of the right half might /// be stored in the same memory element. In order to avoid breaking /// `bitvec`’s memory-safety guarantees, both bit-slices are marked as /// `T::Alias`. This marking allows them to be used without interfering with /// each other when they interact with memory. /// /// ## Panics /// /// This panics if `mid` is greater than `self.len()`. It is allowed to be /// equal to the length, in which case the right bit-slice is simply empty. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![mut u8, Msb0; 0; 6]; /// let base = bits.as_mut_bitptr(); /// /// let (a, b) = bits.split_at_mut(0); /// assert_eq!(unsafe { a.as_mut_bitptr().offset_from(base) }, 0); /// assert_eq!(unsafe { b.as_mut_bitptr().offset_from(base) }, 0); /// /// let (a, b) = bits.split_at_mut(6); /// assert_eq!(unsafe { b.as_mut_bitptr().offset_from(base) }, 6); /// /// let (a, b) = bits.split_at_mut(3); /// a.store(3); /// b.store(5); /// /// assert_eq!(bits, bits![0, 1, 1, 1, 0, 1]); /// ``` #[inline] pub fn split_at_mut( &mut self, mid: usize, ) -> (&mut BitSlice, &mut BitSlice) { self.assert_in_bounds(mid, 0 ..= self.len()); unsafe { self.split_at_unchecked_mut(mid) } } /// Iterates over subslices separated by bits that match a predicate. The /// matched bit is *not* contained in the yielded bit-slices. /// /// ## Original /// /// [`slice::split`](https://doc.rust-lang.org/std/primitive.slice.html#method.split) /// /// ## API Differences /// /// The predicate function receives the index being tested as well as the /// bit value at that index. This allows the predicate to have more than one /// bit of information about the bit-slice being traversed. /// /// ## Sibling Methods /// /// - [`.split_mut()`] has the same splitting logic, but each yielded /// bit-slice is mutable. /// - [`.split_inclusive()`] includes the matched bit in the yielded /// bit-slice. /// - [`.rsplit()`] iterates from the back of the bit-slice instead of the /// front. /// - [`.splitn()`] times out after `n` yields. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![0, 1, 1, 0]; /// // ^ /// let mut iter = bits.split(|pos, _bit| pos % 3 == 2); /// /// assert_eq!(iter.next().unwrap(), bits![0, 1]); /// assert_eq!(iter.next().unwrap(), bits![0]); /// assert!(iter.next().is_none()); /// ``` /// /// If the first bit is matched, then an empty bit-slice will be the first /// item yielded by the iterator. Similarly, if the last bit in the /// bit-slice matches, then an empty bit-slice will be the last item /// yielded. /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![0, 0, 1]; /// // ^ /// let mut iter = bits.split(|_pos, bit| *bit); /// /// assert_eq!(iter.next().unwrap(), bits![0; 2]); /// assert!(iter.next().unwrap().is_empty()); /// assert!(iter.next().is_none()); /// ``` /// /// If two matched bits are directly adjacent, then an empty bit-slice will /// be yielded between them: /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![1, 0, 0, 1]; /// // ^ ^ /// let mut iter = bits.split(|_pos, bit| !*bit); /// /// assert_eq!(iter.next().unwrap(), bits![1]); /// assert!(iter.next().unwrap().is_empty()); /// assert_eq!(iter.next().unwrap(), bits![1]); /// assert!(iter.next().is_none()); /// ``` /// /// [`.rsplit()`]: Self::rsplit /// [`.splitn()`]: Self::splitn /// [`.split_inclusive()`]: Self::split_inclusive /// [`.split_mut()`]: Self::split_mut #[inline] pub fn split(&self, pred: F) -> Split where F: FnMut(usize, &bool) -> bool { Split::new(self, pred) } /// Iterates over mutable subslices separated by bits that match a /// predicate. The matched bit is *not* contained in the yielded bit-slices. /// /// Iterators do not require that each yielded item is destroyed before the /// next is produced. This means that each bit-slice yielded must be marked /// as aliased. If you are using this in a loop that does not collect /// multiple yielded subslices for the same scope, then you can remove the /// alias marking by calling the (`unsafe`) method [`.remove_alias()`] on /// the iterator. /// /// ## Original /// /// [`slice::split_mut`](https://doc.rust-lang.org/std/primitive.slice.html#method.split_mut) /// /// ## API Differences /// /// The predicate function receives the index being tested as well as the /// bit value at that index. This allows the predicate to have more than one /// bit of information about the bit-slice being traversed. /// /// ## Sibling Methods /// /// - [`.split()`] has the same splitting logic, but each yielded bit-slice /// is immutable. /// - [`.split_inclusive_mut()`] includes the matched bit in the yielded /// bit-slice. /// - [`.rsplit_mut()`] iterates from the back of the bit-slice instead of /// the front. /// - [`.splitn_mut()`] times out after `n` yields. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![mut 0, 0, 1, 0, 1, 0]; /// // ^ ^ /// for group in bits.split_mut(|_pos, bit| *bit) { /// group.set(0, true); /// } /// assert_eq!(bits, bits![1, 0, 1, 1, 1, 1]); /// ``` /// /// [`.remove_alias()`]: crate::slice::SplitMut::remove_alias /// [`.rsplit_mut()`]: Self::rsplit_mut /// [`.split()`]: Self::split /// [`.split_inclusive_mut()`]: Self::split_inclusive_mut /// [`.splitn_mut()`]: Self::splitn_mut #[inline] pub fn split_mut(&mut self, pred: F) -> SplitMut where F: FnMut(usize, &bool) -> bool { SplitMut::new(self.alias_mut(), pred) } /// Iterates over subslices separated by bits that match a predicate. Unlike /// `.split()`, this *does* include the matching bit as the last bit in the /// yielded bit-slice. /// /// ## Original /// /// [`slice::split_inclusive`](https://doc.rust-lang.org/std/primitive.slice.html#method.split_inclusive) /// /// ## API Differences /// /// The predicate function receives the index being tested as well as the /// bit value at that index. This allows the predicate to have more than one /// bit of information about the bit-slice being traversed. /// /// ## Sibling Methods /// /// - [`.split_inclusive_mut()`] has the same splitting logic, but each /// yielded bit-slice is mutable. /// - [`.split()`] does not include the matched bit in the yielded /// bit-slice. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![0, 0, 1, 0, 1]; /// // ^ ^ /// let mut iter = bits.split_inclusive(|_pos, bit| *bit); /// /// assert_eq!(iter.next().unwrap(), bits![0, 0, 1]); /// assert_eq!(iter.next().unwrap(), bits![0, 1]); /// assert!(iter.next().is_none()); /// ``` /// /// [`.split()`]: Self::split /// [`.split_inclusive_mut()`]: Self::split_inclusive_mut #[inline] pub fn split_inclusive(&self, pred: F) -> SplitInclusive where F: FnMut(usize, &bool) -> bool { SplitInclusive::new(self, pred) } /// Iterates over mutable subslices separated by bits that match a /// predicate. Unlike `.split_mut()`, this *does* include the matching bit /// as the last bit in the bit-slice. /// /// Iterators do not require that each yielded item is destroyed before the /// next is produced. This means that each bit-slice yielded must be marked /// as aliased. If you are using this in a loop that does not collect /// multiple yielded subslices for the same scope, then you can remove the /// alias marking by calling the (`unsafe`) method [`.remove_alias()`] on /// the iterator. /// /// ## Original /// /// [`slice::split_inclusive_mut`](https://doc.rust-lang.org/std/primitive.slice.html#method.split_inclusive_mut) /// /// ## API Differences /// /// The predicate function receives the index being tested as well as the /// bit value at that index. This allows the predicate to have more than one /// bit of information about the bit-slice being traversed. /// /// ## Sibling Methods /// /// - [`.split_inclusive()`] has the same splitting logic, but each yielded /// bit-slice is immutable. /// - [`.split_mut()`] does not include the matched bit in the yielded /// bit-slice. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![mut 0, 0, 0, 0, 0]; /// // ^ /// for group in bits.split_inclusive_mut(|pos, _bit| pos % 3 == 2) { /// group.set(0, true); /// } /// assert_eq!(bits, bits![1, 0, 0, 1, 0]); /// ``` /// /// [`.remove_alias()`]: crate::slice::SplitInclusiveMut::remove_alias /// [`.split_inclusive()`]: Self::split_inclusive /// [`.split_mut()`]: Self::split_mut #[inline] pub fn split_inclusive_mut( &mut self, pred: F, ) -> SplitInclusiveMut where F: FnMut(usize, &bool) -> bool, { SplitInclusiveMut::new(self.alias_mut(), pred) } /// Iterates over subslices separated by bits that match a predicate, from /// the back edge. The matched bit is *not* contained in the yielded /// bit-slices. /// /// ## Original /// /// [`slice::rsplit`](https://doc.rust-lang.org/std/primitive.slice.html#method.rsplit) /// /// ## API Differences /// /// The predicate function receives the index being tested as well as the /// bit value at that index. This allows the predicate to have more than one /// bit of information about the bit-slice being traversed. /// /// ## Sibling Methods /// /// - [`.rsplit_mut()`] has the same splitting logic, but each yielded /// bit-slice is mutable. /// - [`.split()`] iterates from the front of the bit-slice instead of the /// back. /// - [`.rsplitn()`] times out after `n` yields. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![0, 1, 1, 0]; /// // ^ /// let mut iter = bits.rsplit(|pos, _bit| pos % 3 == 2); /// /// assert_eq!(iter.next().unwrap(), bits![0]); /// assert_eq!(iter.next().unwrap(), bits![0, 1]); /// assert!(iter.next().is_none()); /// ``` /// /// If the last bit is matched, then an empty bit-slice will be the first /// item yielded by the iterator. Similarly, if the first bit in the /// bit-slice matches, then an empty bit-slice will be the last item /// yielded. /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![0, 0, 1]; /// // ^ /// let mut iter = bits.rsplit(|_pos, bit| *bit); /// /// assert!(iter.next().unwrap().is_empty()); /// assert_eq!(iter.next().unwrap(), bits![0; 2]); /// assert!(iter.next().is_none()); /// ``` /// /// If two yielded bits are directly adjacent, then an empty bit-slice will /// be yielded between them: /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![1, 0, 0, 1]; /// // ^ ^ /// let mut iter = bits.split(|_pos, bit| !*bit); /// /// assert_eq!(iter.next().unwrap(), bits![1]); /// assert!(iter.next().unwrap().is_empty()); /// assert_eq!(iter.next().unwrap(), bits![1]); /// assert!(iter.next().is_none()); /// ``` /// /// [`.rsplitn()`]: Self::rsplitn /// [`.rsplit_mut()`]: Self::rsplit_mut /// [`.split()`]: Self::split #[inline] pub fn rsplit(&self, pred: F) -> RSplit where F: FnMut(usize, &bool) -> bool { RSplit::new(self, pred) } /// Iterates over mutable subslices separated by bits that match a /// predicate, from the back. The matched bit is *not* contained in the /// yielded bit-slices. /// /// Iterators do not require that each yielded item is destroyed before the /// next is produced. This means that each bit-slice yielded must be marked /// as aliased. If you are using this in a loop that does not collect /// multiple yielded subslices for the same scope, then you can remove the /// alias marking by calling the (`unsafe`) method [`.remove_alias()`] on /// the iterator. /// /// ## Original /// /// [`slice::rsplit_mut`](https://doc.rust-lang.org/std/primitive.slice.html#method.rsplit_mut) /// /// ## API Differences /// /// The predicate function receives the index being tested as well as the /// bit value at that index. This allows the predicate to have more than one /// bit of information about the bit-slice being traversed. /// /// ## Sibling Methods /// /// - [`.rsplit()`] has the same splitting logic, but each yielded bit-slice /// is immutable. /// - [`.split_mut()`] iterates from the front of the bit-slice to the back. /// - [`.rsplitn_mut()`] iterates from the front of the bit-slice to the /// back. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![mut 0, 0, 1, 0, 1, 0]; /// // ^ ^ /// for group in bits.rsplit_mut(|_pos, bit| *bit) { /// group.set(0, true); /// } /// assert_eq!(bits, bits![1, 0, 1, 1, 1, 1]); /// ``` /// /// [`.remove_alias()`]: crate::slice::RSplitMut::remove_alias /// [`.rsplit()`]: Self::rsplit /// [`.rsplitn_mut()`]: Self::rsplitn_mut /// [`.split_mut()`]: Self::split_mut #[inline] pub fn rsplit_mut(&mut self, pred: F) -> RSplitMut where F: FnMut(usize, &bool) -> bool { RSplitMut::new(self.alias_mut(), pred) } /// Iterates over subslices separated by bits that match a predicate, giving /// up after yielding `n` times. The `n`th yield contains the rest of the /// bit-slice. As with `.split()`, the yielded bit-slices do not contain the /// matched bit. /// /// ## Original /// /// [`slice::splitn`](https://doc.rust-lang.org/std/primitive.slice.html#method.splitn) /// /// ## API Differences /// /// The predicate function receives the index being tested as well as the /// bit value at that index. This allows the predicate to have more than one /// bit of information about the bit-slice being traversed. /// /// ## Sibling Methods /// /// - [`.splitn_mut()`] has the same splitting logic, but each yielded /// bit-slice is mutable. /// - [`.rsplitn()`] iterates from the back of the bit-slice instead of the /// front. /// - [`.split()`] has the same splitting logic, but never times out. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![0, 0, 1, 0, 1, 0]; /// let mut iter = bits.splitn(2, |_pos, bit| *bit); /// /// assert_eq!(iter.next().unwrap(), bits![0, 0]); /// assert_eq!(iter.next().unwrap(), bits![0, 1, 0]); /// assert!(iter.next().is_none()); /// ``` /// /// [`.rsplitn()`]: Self::rsplitn /// [`.split()`]: Self::split /// [`.splitn_mut()`]: Self::splitn_mut #[inline] pub fn splitn(&self, n: usize, pred: F) -> SplitN where F: FnMut(usize, &bool) -> bool { SplitN::new(self, pred, n) } /// Iterates over mutable subslices separated by bits that match a /// predicate, giving up after yielding `n` times. The `n`th yield contains /// the rest of the bit-slice. As with `.split_mut()`, the yielded /// bit-slices do not contain the matched bit. /// /// Iterators do not require that each yielded item is destroyed before the /// next is produced. This means that each bit-slice yielded must be marked /// as aliased. If you are using this in a loop that does not collect /// multiple yielded subslices for the same scope, then you can remove the /// alias marking by calling the (`unsafe`) method [`.remove_alias()`] on /// the iterator. /// /// ## Original /// /// [`slice::splitn_mut`](https://doc.rust-lang.org/std/primitive.slice.html#method.splitn_mut) /// /// ## API Differences /// /// The predicate function receives the index being tested as well as the /// bit value at that index. This allows the predicate to have more than one /// bit of information about the bit-slice being traversed. /// /// ## Sibling Methods /// /// - [`.splitn()`] has the same splitting logic, but each yielded bit-slice /// is immutable. /// - [`.rsplitn_mut()`] iterates from the back of the bit-slice instead of /// the front. /// - [`.split_mut()`] has the same splitting logic, but never times out. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![mut 0, 0, 1, 0, 1, 0]; /// for group in bits.splitn_mut(2, |_pos, bit| *bit) { /// group.set(0, true); /// } /// assert_eq!(bits, bits![1, 0, 1, 1, 1, 0]); /// ``` /// /// [`.remove_alias()`]: crate::slice::SplitNMut::remove_alias /// [`.rsplitn_mut()`]: Self::rsplitn_mut /// [`.split_mut()`]: Self::split_mut /// [`.splitn()`]: Self::splitn #[inline] pub fn splitn_mut(&mut self, n: usize, pred: F) -> SplitNMut where F: FnMut(usize, &bool) -> bool { SplitNMut::new(self.alias_mut(), pred, n) } /// Iterates over mutable subslices separated by bits that match a /// predicate from the back edge, giving up after yielding `n` times. The /// `n`th yield contains the rest of the bit-slice. As with `.split_mut()`, /// the yielded bit-slices do not contain the matched bit. /// /// ## Original /// /// [`slice::rsplitn`](https://doc.rust-lang.org/std/primitive.slice.html#method.rsplitn) /// /// ## API Differences /// /// The predicate function receives the index being tested as well as the /// bit value at that index. This allows the predicate to have more than one /// bit of information about the bit-slice being traversed. /// /// ## Sibling Methods /// /// - [`.rsplitn_mut()`] has the same splitting logic, but each yielded /// bit-slice is mutable. /// - [`.splitn()`]: iterates from the front of the bit-slice instead of the /// back. /// - [`.rsplit()`] has the same splitting logic, but never times out. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![0, 0, 1, 1, 0]; /// // ^ /// let mut iter = bits.rsplitn(2, |_pos, bit| *bit); /// /// assert_eq!(iter.next().unwrap(), bits![0]); /// assert_eq!(iter.next().unwrap(), bits![0, 0, 1]); /// assert!(iter.next().is_none()); /// ``` /// /// [`.rsplit()`]: Self::rsplit /// [`.rsplitn_mut()`]: Self::rsplitn_mut /// [`.splitn()`]: Self::splitn #[inline] pub fn rsplitn(&self, n: usize, pred: F) -> RSplitN where F: FnMut(usize, &bool) -> bool { RSplitN::new(self, pred, n) } /// Iterates over mutable subslices separated by bits that match a /// predicate from the back edge, giving up after yielding `n` times. The /// `n`th yield contains the rest of the bit-slice. As with `.split_mut()`, /// the yielded bit-slices do not contain the matched bit. /// /// Iterators do not require that each yielded item is destroyed before the /// next is produced. This means that each bit-slice yielded must be marked /// as aliased. If you are using this in a loop that does not collect /// multiple yielded subslices for the same scope, then you can remove the /// alias marking by calling the (`unsafe`) method [`.remove_alias()`] on /// the iterator. /// /// ## Original /// /// [`slice::rsplitn_mut`](https://doc.rust-lang.org/std/primitive.slice.html#method.rsplitn_mut) /// /// ## API Differences /// /// The predicate function receives the index being tested as well as the /// bit value at that index. This allows the predicate to have more than one /// bit of information about the bit-slice being traversed. /// /// ## Sibling Methods /// /// - [`.rsplitn()`] has the same splitting logic, but each yielded /// bit-slice is immutable. /// - [`.splitn_mut()`] iterates from the front of the bit-slice instead of /// the back. /// - [`.rsplit_mut()`] has the same splitting logic, but never times out. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![mut 0, 0, 1, 0, 0, 1, 0, 0, 0]; /// for group in bits.rsplitn_mut(2, |_idx, bit| *bit) { /// group.set(0, true); /// } /// assert_eq!(bits, bits![1, 0, 1, 0, 0, 1, 1, 0, 0]); /// // ^ group 2 ^ group 1 /// ``` /// /// [`.remove_alias()`]: crate::slice::RSplitNMut::remove_alias /// [`.rsplitn()`]: Self::rsplitn /// [`.rsplit_mut()`]: Self::rsplit_mut /// [`.splitn_mut()`]: Self::splitn_mut #[inline] pub fn rsplitn_mut(&mut self, n: usize, pred: F) -> RSplitNMut where F: FnMut(usize, &bool) -> bool { RSplitNMut::new(self.alias_mut(), pred, n) } /// Tests if the bit-slice contains the given sequence anywhere within it. /// /// This scans over `self.windows(other.len())` until one of the windows /// matches. The search key does not need to share type parameters with the /// bit-slice being tested, as the comparison is bit-wise. However, sharing /// type parameters will accelerate the comparison. /// /// ## Original /// /// [`slice::contains`](https://doc.rust-lang.org/std/primitive.slice.html#method.contains) /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![0, 0, 1, 0, 1, 1, 0, 0]; /// assert!( bits.contains(bits![0, 1, 1, 0])); /// assert!(!bits.contains(bits![1, 0, 0, 1])); /// ``` #[inline] pub fn contains(&self, other: &BitSlice) -> bool where T2: BitStore, O2: BitOrder, { self.len() >= other.len() && self.windows(other.len()).any(|window| window == other) } /// Tests if the bit-slice begins with the given sequence. /// /// The search key does not need to share type parameters with the bit-slice /// being tested, as the comparison is bit-wise. However, sharing type /// parameters will accelerate the comparison. /// /// ## Original /// /// [`slice::starts_with`](https://doc.rust-lang.org/std/primitive.slice.html#method.starts_with) /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![0, 1, 1, 0]; /// assert!( bits.starts_with(bits![0, 1])); /// assert!(!bits.starts_with(bits![1, 0])); /// ``` /// /// This always returns `true` if the needle is empty: /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![0, 1, 0]; /// let empty = bits![]; /// assert!(bits.starts_with(empty)); /// assert!(empty.starts_with(empty)); /// ``` #[inline] pub fn starts_with(&self, needle: &BitSlice) -> bool where T2: BitStore, O2: BitOrder, { self.get(.. needle.len()) .map(|slice| slice == needle) .unwrap_or(false) } /// Tests if the bit-slice ends with the given sequence. /// /// The search key does not need to share type parameters with the bit-slice /// being tested, as the comparison is bit-wise. However, sharing type /// parameters will accelerate the comparison. /// /// ## Original /// /// [`slice::ends_with`](https://doc.rust-lang.org/std/primitive.slice.html#method.ends_with) /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![0, 1, 1, 0]; /// assert!( bits.ends_with(bits![1, 0])); /// assert!(!bits.ends_with(bits![0, 1])); /// ``` /// /// This always returns `true` if the needle is empty: /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![0, 1, 0]; /// let empty = bits![]; /// assert!(bits.ends_with(empty)); /// assert!(empty.ends_with(empty)); /// ``` #[inline] pub fn ends_with(&self, needle: &BitSlice) -> bool where T2: BitStore, O2: BitOrder, { self.get(self.len() - needle.len() ..) .map(|slice| slice == needle) .unwrap_or(false) } /// Removes a prefix bit-slice, if present. /// /// Like [`.starts_with()`], the search key does not need to share type /// parameters with the bit-slice being stripped. If /// `self.starts_with(suffix)`, then this returns `Some(&self[prefix.len() /// ..])`, otherwise it returns `None`. /// /// ## Original /// /// [`slice::strip_prefix`](https://doc.rust-lang.org/std/primitive.slice.html#method.strip_prefix) /// /// ## API Differences /// /// `BitSlice` does not support pattern searches; instead, it permits `self` /// and `prefix` to differ in type parameters. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![0, 1, 0, 0, 1, 0, 1, 1, 0]; /// assert_eq!(bits.strip_prefix(bits![0, 1]).unwrap(), bits[2 ..]); /// assert_eq!(bits.strip_prefix(bits![0, 1, 0, 0,]).unwrap(), bits[4 ..]); /// assert!(bits.strip_prefix(bits![1, 0]).is_none()); /// ``` /// /// [`.starts_with()`]: Self::starts_with #[inline] pub fn strip_prefix( &self, prefix: &BitSlice, ) -> Option<&Self> where T2: BitStore, O2: BitOrder, { if self.starts_with(prefix) { self.get(prefix.len() ..) } else { None } } /// Removes a suffix bit-slice, if present. /// /// Like [`.ends_with()`], the search key does not need to share type /// parameters with the bit-slice being stripped. If /// `self.ends_with(suffix)`, then this returns `Some(&self[.. self.len() - /// suffix.len()])`, otherwise it returns `None`. /// /// ## Original /// /// [`slice::strip_suffix`](https://doc.rust-lang.org/std/primitive.slice.html#method.strip_suffix) /// /// ## API Differences /// /// `BitSlice` does not support pattern searches; instead, it permits `self` /// and `suffix` to differ in type parameters. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![0, 1, 0, 0, 1, 0, 1, 1, 0]; /// assert_eq!(bits.strip_suffix(bits![1, 0]).unwrap(), bits[.. 7]); /// assert_eq!(bits.strip_suffix(bits![0, 1, 1, 0]).unwrap(), bits[.. 5]); /// assert!(bits.strip_suffix(bits![0, 1]).is_none()); /// ``` /// /// [`.ends_with()`]: Self::ends_with. #[inline] pub fn strip_suffix( &self, suffix: &BitSlice, ) -> Option<&Self> where T2: BitStore, O2: BitOrder, { if self.ends_with(suffix) { self.get(.. self.len() - suffix.len()) } else { None } } /// Rotates the contents of a bit-slice to the left (towards the zero /// index). /// /// This essentially splits the bit-slice at `by`, then exchanges the two /// pieces. `self[.. by]` becomes the first section, and is then followed by /// `self[.. by]`. /// /// The implementation is batch-accelerated where possible. It should have a /// runtime complexity much lower than `O(by)`. /// /// ## Original /// /// [`slice::rotate_left`](https://doc.rust-lang.org/std/primitive.slice.html#method.rotate_left) /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![mut 0, 0, 1, 0, 1, 0]; /// // split occurs here ^ /// bits.rotate_left(2); /// assert_eq!(bits, bits![1, 0, 1, 0, 0, 0]); /// ``` #[inline] pub fn rotate_left(&mut self, mut by: usize) { let len = self.len(); assert!( by <= len, "bit-slices cannot be rotated by more than their length", ); if by == 0 || by == len { return; } let mut tmp = BitArray::::ZERO; while by > 0 { let shamt = cmp::min(mem::bits_of::(), by); unsafe { let tmp_bits = tmp.get_unchecked_mut(.. shamt); tmp_bits.clone_from_bitslice(self.get_unchecked(.. shamt)); self.copy_within_unchecked(shamt .., 0); self.get_unchecked_mut(len - shamt ..) .clone_from_bitslice(tmp_bits); } by -= shamt; } } /// Rotates the contents of a bit-slice to the right (away from the zero /// index). /// /// This essentially splits the bit-slice at `self.len() - by`, then /// exchanges the two pieces. `self[len - by ..]` becomes the first section, /// and is then followed by `self[.. len - by]`. /// /// The implementation is batch-accelerated where possible. It should have a /// runtime complexity much lower than `O(by)`. /// /// ## Original /// /// [`slice::rotate_right`](https://doc.rust-lang.org/std/primitive.slice.html#method.rotate_right) /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![mut 0, 0, 1, 1, 1, 0]; /// // split occurs here ^ /// bits.rotate_right(2); /// assert_eq!(bits, bits![1, 0, 0, 0, 1, 1]); /// ``` #[inline] pub fn rotate_right(&mut self, mut by: usize) { let len = self.len(); assert!( by <= len, "bit-slices cannot be rotated by more than their length", ); if by == 0 || by == len { return; } let mut tmp = BitArray::::ZERO; while by > 0 { let shamt = cmp::min(mem::bits_of::(), by); let mid = len - shamt; unsafe { let tmp_bits = tmp.get_unchecked_mut(.. shamt); tmp_bits.clone_from_bitslice(self.get_unchecked(mid ..)); self.copy_within_unchecked(.. mid, shamt); self.get_unchecked_mut(.. shamt) .clone_from_bitslice(tmp_bits); } by -= shamt; } } /// Fills the bit-slice with a given bit. /// /// This is a recent stabilization in the standard library. `bitvec` /// previously offered this behavior as the novel API `.set_all()`. That /// method name is now removed in favor of this standard-library analogue. /// /// ## Original /// /// [`slice::fill`](https://doc.rust-lang.org/std/primitive.slice.html#method.fill) /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![mut 0; 5]; /// bits.fill(true); /// assert_eq!(bits, bits![1; 5]); /// ``` #[inline] pub fn fill(&mut self, value: bool) { let fill = if value { T::Mem::ALL } else { T::Mem::ZERO }; match self.domain_mut() { Domain::Enclave(mut elem) => { elem.store_value(fill); }, Domain::Region { head, body, tail } => { if let Some(mut elem) = head { elem.store_value(fill); } for elem in body { elem.store_value(fill); } if let Some(mut elem) = tail { elem.store_value(fill); } }, } } /// Fills the bit-slice with bits produced by a generator function. /// /// ## Original /// /// [`slice::fill_with`](https://doc.rust-lang.org/std/primitive.slice.html#method.fill_with) /// /// ## API Differences /// /// The generator function receives the index of the bit being initialized /// as an argument. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![mut 0; 5]; /// bits.fill_with(|idx| idx % 2 == 0); /// assert_eq!(bits, bits![1, 0, 1, 0, 1]); /// ``` #[inline] pub fn fill_with(&mut self, mut func: F) where F: FnMut(usize) -> bool { for (idx, ptr) in self.as_mut_bitptr_range().enumerate() { unsafe { ptr.write(func(idx)); } } } #[inline] #[cfg(not(tarpaulin_include))] #[deprecated = "use `.clone_from_bitslice()` instead"] #[allow(missing_docs, clippy::missing_docs_in_private_items)] pub fn clone_from_slice(&mut self, src: &BitSlice) where T2: BitStore, O2: BitOrder, { self.clone_from_bitslice(src); } #[inline] #[cfg(not(tarpaulin_include))] #[deprecated = "use `.copy_from_bitslice()` instead"] #[allow(missing_docs, clippy::missing_docs_in_private_items)] pub fn copy_from_slice(&mut self, src: &Self) { self.copy_from_bitslice(src) } /// Copies a span of bits to another location in the bit-slice. /// /// `src` is the range of bit-indices in the bit-slice to copy, and `dest is /// the starting index of the destination range. `src` and `dest .. dest + /// src.len()` are permitted to overlap; the copy will automatically detect /// and manage this. However, both `src` and `dest .. dest + src.len()` /// **must** fall within the bounds of `self`. /// /// ## Original /// /// [`slice::copy_within`](https://doc.rust-lang.org/std/primitive.slice.html#method.copy_within) /// /// ## Panics /// /// This panics if either the source or destination range exceed /// `self.len()`. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![mut 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]; /// bits.copy_within(1 .. 5, 8); /// // v v v v /// assert_eq!(bits, bits![1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0]); /// // ^ ^ ^ ^ /// ``` #[inline] pub fn copy_within(&mut self, src: R, dest: usize) where R: RangeExt { let len = self.len(); let src = src.normalize(0, len); self.assert_in_bounds(src.start, 0 .. len); self.assert_in_bounds(src.end, 0 ..= len); self.assert_in_bounds(dest, 0 .. len); self.assert_in_bounds(dest + src.len(), 0 ..= len); unsafe { self.copy_within_unchecked(src, dest); } } #[inline] #[deprecated = "use `.swap_with_bitslice()` instead"] #[allow(missing_docs, clippy::missing_docs_in_private_items)] pub fn swap_with_slice(&mut self, other: &mut BitSlice) where T2: BitStore, O2: BitOrder, { self.swap_with_bitslice(other); } /// Produces bit-slice view(s) with different underlying storage types. /// /// This may have unexpected effects, and you cannot assume that /// `before[idx] == after[idx]`! Consult the [tables in the manual][layout] /// for information about memory layouts. /// /// ## Original /// /// [`slice::align_to`](https://doc.rust-lang.org/std/primitive.slice.html#method.align_to) /// /// ## Notes /// /// Unlike the standard library documentation, this explicitly guarantees /// that the middle bit-slice will have maximal size. You may rely on this /// property. /// /// ## Safety /// /// You may not use this to cast away alias protections. Rust does not have /// support for higher-kinded types, so this cannot express the relation /// `Outer -> Outer where Outer: BitStoreContainer`, but memory safety /// does require that you respect this rule. Reälign integers to integers, /// `Cell`s to `Cell`s, and atomics to atomics, but do not cross these /// boundaries. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bytes: [u8; 7] = [1, 2, 3, 4, 5, 6, 7]; /// let bits = bytes.view_bits::(); /// let (pfx, mid, sfx) = unsafe { /// bits.align_to::() /// }; /// assert!(pfx.len() <= 8); /// assert_eq!(mid.len(), 48); /// assert!(sfx.len() <= 8); /// ``` /// /// [layout]: https://bitvecto-rs.github.io/bitvec/memory-layout.html #[inline] pub unsafe fn align_to(&self) -> (&Self, &BitSlice, &Self) where U: BitStore { let (l, c, r) = self.as_bitspan().align_to::(); ( l.into_bitslice_ref(), c.into_bitslice_ref(), r.into_bitslice_ref(), ) } /// Produces bit-slice view(s) with different underlying storage types. /// /// This may have unexpected effects, and you cannot assume that /// `before[idx] == after[idx]`! Consult the [tables in the manual][layout] /// for information about memory layouts. /// /// ## Original /// /// [`slice::align_to_mut`](https://doc.rust-lang.org/std/primitive.slice.html#method.align_to_mut) /// /// ## Notes /// /// Unlike the standard library documentation, this explicitly guarantees /// that the middle bit-slice will have maximal size. You may rely on this /// property. /// /// ## Safety /// /// You may not use this to cast away alias protections. Rust does not have /// support for higher-kinded types, so this cannot express the relation /// `Outer -> Outer where Outer: BitStoreContainer`, but memory safety /// does require that you respect this rule. Reälign integers to integers, /// `Cell`s to `Cell`s, and atomics to atomics, but do not cross these /// boundaries. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let mut bytes: [u8; 7] = [1, 2, 3, 4, 5, 6, 7]; /// let bits = bytes.view_bits_mut::(); /// let (pfx, mid, sfx) = unsafe { /// bits.align_to_mut::() /// }; /// assert!(pfx.len() <= 8); /// assert_eq!(mid.len(), 48); /// assert!(sfx.len() <= 8); /// ``` /// /// [layout]: https://bitvecto-rs.github.io/bitvec/memory-layout.html #[inline] pub unsafe fn align_to_mut( &mut self, ) -> (&mut Self, &mut BitSlice, &mut Self) where U: BitStore { let (l, c, r) = self.as_mut_bitspan().align_to::(); ( l.into_bitslice_mut(), c.into_bitslice_mut(), r.into_bitslice_mut(), ) } } #[cfg(feature = "alloc")] impl BitSlice where T: BitStore, O: BitOrder, { #[inline] #[deprecated = "use `.to_bitvec()` instead"] #[allow(missing_docs, clippy::missing_docs_in_private_items)] pub fn to_vec(&self) -> BitVec { self.to_bitvec() } /// Creates a bit-vector by repeating a bit-slice `n` times. /// /// ## Original /// /// [`slice::repeat`](https://doc.rust-lang.org/std/primitive.slice.html#method.repeat) /// /// ## Panics /// /// This method panics if `self.len() * n` exceeds the `BitVec` capacity. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// assert_eq!(bits![0, 1].repeat(3), bitvec![0, 1, 0, 1, 0, 1]); /// ``` /// /// This panics by exceeding bit-vector maximum capacity: /// /// ```rust,should_panic /// use bitvec::prelude::*; /// /// bits![0, 1].repeat(BitSlice::::MAX_BITS); /// ``` #[inline] pub fn repeat(&self, n: usize) -> BitVec { let len = self.len(); let total = len.checked_mul(n).expect("capacity overflow"); let mut out = BitVec::repeat(false, total); let iter = unsafe { out.chunks_exact_mut(len).remove_alias() }; for chunk in iter { chunk.clone_from_bitslice(self); } out } /* As of 1.56, the `concat` and `join` methods use still-unstable traits * to govern the collection of multiple subslices into one vector. These * are possible to copy over and redefine locally, but unless a user asks * for it, doing so is considered a low priority. */ } #[inline] #[allow(missing_docs, clippy::missing_docs_in_private_items)] #[deprecated = "use `BitSlice::from_element()` instead"] pub fn from_ref(elem: &T) -> &BitSlice where T: BitStore, O: BitOrder, { BitSlice::from_element(elem) } #[inline] #[allow(missing_docs, clippy::missing_docs_in_private_items)] #[deprecated = "use `BitSlice::from_element_mut()` instead"] pub fn from_mut(elem: &mut T) -> &mut BitSlice where T: BitStore, O: BitOrder, { BitSlice::from_element_mut(elem) } #[inline] #[doc = include_str!("../../doc/slice/from_raw_parts.md")] pub unsafe fn from_raw_parts<'a, T, O>( data: BitPtr, len: usize, ) -> Result<&'a BitSlice, BitSpanError> where O: BitOrder, T: 'a + BitStore, { data.span(len).map(|bp| bp.into_bitslice_ref()) } #[inline] #[doc = include_str!("../../doc/slice/from_raw_parts_mut.md")] pub unsafe fn from_raw_parts_mut<'a, T, O>( data: BitPtr, len: usize, ) -> Result<&'a mut BitSlice, BitSpanError> where O: BitOrder, T: 'a + BitStore, { data.span(len).map(|bp| bp.into_bitslice_mut()) } #[doc = include_str!("../../doc/slice/BitSliceIndex.md")] pub trait BitSliceIndex<'a, T, O> where T: BitStore, O: BitOrder, { /// The output type of immutable access. type Immut; /// The output type of mutable access. type Mut; /// Immutably indexes into a bit-slice, returning `None` if `self` is out of /// bounds. /// /// ## Original /// /// [`SliceIndex::get`](core::slice::SliceIndex::get) fn get(self, bits: &'a BitSlice) -> Option; /// Mutably indexes into a bit-slice, returning `None` if `self` is out of /// bounds. /// /// ## Original /// /// [`SliceIndex::get_mut`](core::slice::SliceIndex::get_mut) fn get_mut(self, bits: &'a mut BitSlice) -> Option; /// Immutably indexes into a bit-slice without doing any bounds checking. /// /// ## Original /// /// [`SliceIndex::get_unchecked`](core::slice::SliceIndex::get_unchecked) /// /// ## Safety /// /// If `self` is not in bounds, then memory accesses through it are illegal /// and the program becomes undefined. You must ensure that `self` is /// appropriately within `0 .. bits.len()` at the call site. unsafe fn get_unchecked(self, bits: &'a BitSlice) -> Self::Immut; /// Mutably indexes into a bit-slice without doing any bounds checking. /// /// ## Original /// /// [`SliceIndex::get_unchecked_mut`][0] /// /// ## Safety /// /// If `self` is not in bounds, then memory accesses through it bare illegal /// and the program becomes undefined. You must ensure that `self` is /// appropriately within `0 .. bits.len()` at the call site. /// /// [0]: core::slice::SliceIndex::get_unchecked_mut unsafe fn get_unchecked_mut(self, bits: &'a mut BitSlice) -> Self::Mut; /// Immutably indexes into a bit-slice, panicking if `self` is out of /// bounds. /// /// ## Original /// /// [`SliceIndex::index`](core::slice::SliceIndex::index) /// /// ## Panics /// /// Implementations are required to panic if `self` exceeds `bits.len()` in /// any way. fn index(self, bits: &'a BitSlice) -> Self::Immut; /// Mutably indexes into a bit-slice, panicking if `self` is out of bounds. /// /// ## Original /// /// [`SliceIndex::index_mut`](core::slice::SliceIndex::index_mut) /// /// ## Panics /// /// Implementations are required to panic if `self` exceeds `bits.len()` in /// any way. fn index_mut(self, bits: &'a mut BitSlice) -> Self::Mut; } impl<'a, T, O> BitSliceIndex<'a, T, O> for usize where T: BitStore, O: BitOrder, { type Immut = BitRef<'a, Const, T, O>; type Mut = BitRef<'a, Mut, T, O>; #[inline] fn get(self, bits: &'a BitSlice) -> Option { if self < bits.len() { Some(unsafe { self.get_unchecked(bits) }) } else { None } } #[inline] fn get_mut(self, bits: &'a mut BitSlice) -> Option { if self < bits.len() { Some(unsafe { self.get_unchecked_mut(bits) }) } else { None } } #[inline] unsafe fn get_unchecked(self, bits: &'a BitSlice) -> Self::Immut { bits.as_bitptr().add(self).as_ref().unwrap() } #[inline] unsafe fn get_unchecked_mut( self, bits: &'a mut BitSlice, ) -> Self::Mut { bits.as_mut_bitptr().add(self).as_mut().unwrap() } #[inline] fn index(self, bits: &'a BitSlice) -> Self::Immut { self.get(bits).unwrap_or_else(|| { panic!("index {} out of bounds: {}", self, bits.len()) }) } #[inline] fn index_mut(self, bits: &'a mut BitSlice) -> Self::Mut { let len = bits.len(); self.get_mut(bits) .unwrap_or_else(|| panic!("index {} out of bounds: {}", self, len)) } } /// Implements indexing on bit-slices by various range types. macro_rules! range_impl { ($r:ty { check $check:expr; select $select:expr; }) => { #[allow(clippy::redundant_closure_call)] impl<'a, T, O> BitSliceIndex<'a, T, O> for $r where O: BitOrder, T: BitStore, { type Immut = &'a BitSlice; type Mut = &'a mut BitSlice; #[inline] #[allow( clippy::blocks_in_if_conditions, clippy::redundant_closure_call )] fn get(self, bits: Self::Immut) -> Option { if ($check)(self.clone(), bits.as_bitspan()) { Some(unsafe { self.get_unchecked(bits) }) } else { None } } #[inline] #[allow( clippy::blocks_in_if_conditions, clippy::redundant_closure_call )] fn get_mut(self, bits: Self::Mut) -> Option { if ($check)(self.clone(), bits.as_bitspan()) { Some(unsafe { self.get_unchecked_mut(bits) }) } else { None } } #[inline] #[allow(clippy::redundant_closure_call)] unsafe fn get_unchecked(self, bits: Self::Immut) -> Self::Immut { ($select)(self, bits.as_bitspan()).into_bitslice_ref() } #[inline] #[allow(clippy::redundant_closure_call)] unsafe fn get_unchecked_mut(self, bits: Self::Mut) -> Self::Mut { ($select)(self, bits.as_mut_bitspan()).into_bitslice_mut() } #[inline] #[track_caller] fn index(self, bits: Self::Immut) -> Self::Immut { let r = self.clone(); let l = bits.len(); self.get(bits).unwrap_or_else(|| { panic!("range {:?} out of bounds: {}", r, l) }) } #[inline] #[track_caller] fn index_mut(self, bits: Self::Mut) -> Self::Mut { let r = self.clone(); let l = bits.len(); self.get_mut(bits).unwrap_or_else(|| { panic!("range {:?} out of bounds: {}", r, l) }) } } }; } range_impl!(Range { check |Range { start, end }, span: BitSpan<_, _, _>| { let len = span.len(); start <= len && end <= len && start <= end }; select |Range { start, end }, span: BitSpan<_, _, _>| { span.to_bitptr().add(start).span_unchecked(end - start) }; }); range_impl!(RangeFrom { check |RangeFrom { start }, span: BitSpan<_, _, _>| { start <= span.len() }; select |RangeFrom { start }, span: BitSpan<_, _, _>| { span.to_bitptr().add(start).span_unchecked(span.len() - start) }; }); range_impl!(RangeTo { check |RangeTo { end }, span: BitSpan<_, _, _>| { end <= span.len() }; select |RangeTo { end }, mut span: BitSpan<_, _, _>| { span.set_len(end); span }; }); range_impl!(RangeInclusive { check |range: Self, span: BitSpan<_, _, _>| { let len = span.len(); let start = *range.start(); let end = *range.end(); start < len && end < len && start <= end }; select |range: Self, span: BitSpan<_, _, _>| { let start = *range.start(); let end = *range.end(); span.to_bitptr().add(start).span_unchecked(end + 1 - start) }; }); range_impl!(RangeToInclusive { check |RangeToInclusive { end }, span: BitSpan<_, _, _>| { end < span.len() }; select |RangeToInclusive { end }, mut span: BitSpan<_, _, _>| { span.set_len(end + 1); span }; }); #[cfg(not(tarpaulin_include))] impl<'a, T, O> BitSliceIndex<'a, T, O> for RangeFull where T: BitStore, O: BitOrder, { type Immut = &'a BitSlice; type Mut = &'a mut BitSlice; #[inline] fn get(self, bits: Self::Immut) -> Option { Some(bits) } #[inline] fn get_mut(self, bits: Self::Mut) -> Option { Some(bits) } #[inline] unsafe fn get_unchecked(self, bits: Self::Immut) -> Self::Immut { bits } #[inline] unsafe fn get_unchecked_mut(self, bits: Self::Mut) -> Self::Mut { bits } #[inline] fn index(self, bits: Self::Immut) -> Self::Immut { bits } #[inline] fn index_mut(self, bits: Self::Mut) -> Self::Mut { bits } } bitvec-1.0.1/src/slice/iter.rs000064400000000000000000001715111046102023000143040ustar 00000000000000#![doc = include_str!("../../doc/slice/iter.md")] use core::{ cmp, fmt::{ self, Debug, Formatter, }, iter::{ FusedIterator, Map, }, marker::PhantomData, mem, }; use wyz::comu::{ Const, Mut, }; use super::{ BitSlice, BitSliceIndex, }; use crate::{ order::{ BitOrder, Lsb0, Msb0, }, ptr::{ BitPtrRange, BitRef, }, store::BitStore, }; /// [Original](https://doc.rust-lang.org/core/iter/trait.IntoIterator.html#impl-IntoIterator-1) #[cfg(not(tarpaulin_include))] impl<'a, T, O> IntoIterator for &'a BitSlice where T: 'a + BitStore, O: BitOrder, { type IntoIter = Iter<'a, T, O>; type Item = ::Item; #[inline] fn into_iter(self) -> Self::IntoIter { Iter::new(self) } } /// [Original](https://doc.rust-lang.org/core/iter/trait.IntoIterator.html#impl-IntoIterator-3) #[cfg(not(tarpaulin_include))] impl<'a, T, O> IntoIterator for &'a mut BitSlice where T: 'a + BitStore, O: BitOrder, { type IntoIter = IterMut<'a, T, O>; type Item = ::Item; #[inline] fn into_iter(self) -> Self::IntoIter { IterMut::new(self) } } #[repr(transparent)] #[doc = include_str!("../../doc/slice/iter/Iter.md")] pub struct Iter<'a, T, O> where T: 'a + BitStore, O: BitOrder, { /// A dual-pointer range of the bit-slice undergoing iteration. /// /// This structure stores two fully-decode pointers to the first live and /// first dead bits, trading increased size (three words instead of two) for /// faster performance when iterating. range: BitPtrRange, /// `Iter` is semantically equivalent to a `&BitSlice`. _ref: PhantomData<&'a BitSlice>, } impl<'a, T, O> Iter<'a, T, O> where T: 'a + BitStore, O: BitOrder, { #[allow(missing_docs, clippy::missing_docs_in_private_items)] pub(super) fn new(slice: &'a BitSlice) -> Self { Self { range: slice.as_bitptr_range(), _ref: PhantomData, } } /// Views the currently unyielded bit-slice. /// /// Because the iterator is a shared view, the returned bit-slice does not /// cause a lifetime conflict, and the iterator can continue to be used /// while it exists. /// /// ## Original /// /// [`Iter::as_slice`](core::slice::Iter::as_slice) /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![0, 0, 1, 1]; /// let mut iter = bits.iter(); /// /// assert_eq!(iter.as_bitslice(), bits![0, 0, 1, 1]); /// assert!(!*iter.nth(1).unwrap()); /// assert_eq!(iter.as_bitslice(), bits![1, 1]); /// ``` #[inline] #[allow(missing_docs, clippy::missing_docs_in_private_items)] pub fn as_bitslice(&self) -> &'a BitSlice { unsafe { self.range.clone().into_bitspan().into_bitslice_ref() } } #[inline] #[cfg(not(tarpaulin_include))] #[deprecated = "use `.as_bitslice()` instead"] #[allow(missing_docs, clippy::missing_docs_in_private_items)] pub fn as_slice(&self) -> &'a BitSlice { self.as_bitslice() } /// Adapts the iterator to yield regular `&bool` references rather than the /// [proxy reference][0]. /// /// This allows the iterator to be used in APIs that expect ordinary /// references. It reads from the proxy and provides an equivalent /// `&'static bool`. The address value of the yielded reference is not /// related to the addresses covered by the `BitSlice` buffer in any way. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![0, 1]; /// let mut iter = bits.iter().by_refs(); /// assert_eq!(iter.next(), Some(&false)); /// assert_eq!(iter.next(), Some(&true)); /// assert!(iter.next().is_none()); /// ``` /// /// [0]: crate::ptr::BitRef #[inline] pub fn by_refs(self) -> BitRefIter<'a, T, O> { self.by_vals().map(|bit| match bit { true => &true, false => &false, }) } /// Adapts the iterator to yield `bool` values rather than the /// [proxy reference][0]. /// /// This allows the iterator to be used in APIs that expect direct values. /// It dereferences the proxy and yields the referent `bool` directly. It /// replaces `Iterator::copied`, which is not available on this type. /// /// ## Original /// /// [`Iterator::copied`](core::iter::Iterator::copied) /// /// ## Performance /// /// This bypasses the construction of a `BitRef` for each yielded bit. Do /// not use `bits.as_bitptr_range().map(|bp| unsafe { bp.read() })` in a /// misguided attempt to eke out some additional performance in your code. /// /// This iterator is already the fastest possible walk across a bit-slice. /// You do not need to beat it. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![0, 1]; /// let mut iter = bits.iter().by_vals(); /// assert_eq!(iter.next(), Some(false)); /// assert_eq!(iter.next(), Some(true)); /// assert!(iter.next().is_none()); /// ``` /// /// [0]: crate::ptr::BitRef #[inline] pub fn by_vals(self) -> BitValIter<'a, T, O> { BitValIter { range: self.range, _life: PhantomData, } } /// Yields `bool` values directly, rather than [proxy references][0]. /// /// The original slice iterator yields true `&bool`, and as such allows /// [`Iterator::copied`] to exist. This iterator does not satisfy the bounds /// for that method, so `.copied()` is provided as an inherent in order to /// maintain source compatibility. Prefer [`.by_vals()`] instead, which /// avoids the name collision while still making clear that it yields `bool` /// values. /// /// [`Iterator::copied`]: core::iter::Iterator::copied /// [`.by_vals()`]: Self::by_vals /// [0]: crate::ptr::BitRef #[inline] #[cfg(not(tarpaulin_include))] #[deprecated = "`Iterator::copied` does not exist on this type. Use \ `.by_vals()` instead"] pub fn copied(self) -> BitValIter<'a, T, O> { self.by_vals() } } /// [Original](https://doc.rust-lang.org/core/slice/struct.Iter.html#impl-Clone) #[cfg(not(tarpaulin_include))] impl Clone for Iter<'_, T, O> where T: BitStore, O: BitOrder, { #[inline] fn clone(&self) -> Self { Self { range: self.range.clone(), ..*self } } } /// [Original](https://doc.rust-lang.org/core/slice/struct.Iter.html#impl-AsRef%3C%5BT%5D%3E) #[cfg(not(tarpaulin_include))] impl AsRef> for Iter<'_, T, O> where T: BitStore, O: BitOrder, { #[inline] fn as_ref(&self) -> &BitSlice { self.as_bitslice() } } /// [Original](https://doc.rust-lang.org/core/slice/struct.Iter.html#impl-Debug) #[cfg(not(tarpaulin_include))] impl Debug for Iter<'_, T, O> where T: BitStore, O: BitOrder, { #[inline] fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { fmt.debug_tuple("Iter").field(&self.as_bitslice()).finish() } } #[repr(transparent)] #[doc = include_str!("../../doc/slice/iter/IterMut.md")] pub struct IterMut<'a, T, O> where T: 'a + BitStore, O: BitOrder, { /// A dual-pointer range of the bit-slice undergoing iteration. /// /// This structure stores two fully-decode pointers to the first live and /// first dead bits, trading increased size (three words instead of two) for /// faster performance when iterating. range: BitPtrRange, /// `IterMut` is semantically equivalent to an aliased `&mut BitSlice`. _ref: PhantomData<&'a mut BitSlice>, } impl<'a, T, O> IterMut<'a, T, O> where T: 'a + BitStore, O: BitOrder, { #[allow(missing_docs, clippy::missing_docs_in_private_items)] pub(super) fn new(slice: &'a mut BitSlice) -> Self { Self { range: slice.alias_mut().as_mut_bitptr_range(), _ref: PhantomData, } } /// Views the underlying bit-slice as a subslice of the original data. /// /// This consumes the iterator in order to avoid creating aliasing /// references between the returned subslice (which has the original /// lifetime, and is not borrowed from the iterator) and the proxies the /// iterator produces. /// /// ## Original /// /// [`IterMut::into_slice`](core::slice::IterMut::into_slice) /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![mut 0, 0, 1, 1]; /// let mut iter = bits.iter_mut(); /// /// *iter.next().unwrap() = true; /// assert_eq!(iter.into_bitslice(), bits![0, 1, 1]); /// assert!(bits[0]); /// ``` #[inline] #[cfg(not(tarpaulin_include))] pub fn into_bitslice(self) -> &'a mut BitSlice { unsafe { self.range.into_bitspan().into_bitslice_mut() } } #[inline] #[cfg(not(tarpaulin_include))] #[deprecated = "use `.into_bitslice()` instead"] #[allow(missing_docs, clippy::missing_docs_in_private_items)] pub fn into_slice(self) -> &'a mut BitSlice { self.into_bitslice() } /// Views the remaining bit-slice that has not yet been iterated. /// /// This borrows the iterator’s own lifetime, preventing it from being used /// while the bit-slice view exists and thus ensuring that no aliasing /// references are created. Bits that the iterator has already yielded are /// not included in the produced bit-slice. /// /// ## Original /// /// [`IterMut::as_slice`](core::slice::IterMut::as_slice) /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![mut 0; 4]; /// let mut iter = bits.iter_mut(); /// /// *iter.next().unwrap() = true; /// assert_eq!(iter.as_bitslice(), bits![0; 3]); /// *iter.next().unwrap() = true; /// assert_eq!(iter.as_bitslice(), bits![0; 2]); /// /// assert_eq!(bits, bits![1, 1, 0, 0]); /// ``` #[inline] #[cfg(not(tarpaulin_include))] pub fn as_bitslice(&self) -> &BitSlice { unsafe { self.range.clone().into_bitspan().into_bitslice_ref() } } #[inline] #[cfg(not(tarpaulin_include))] #[deprecated = "use `.as_bitslice()` instead"] #[allow(missing_docs, clippy::missing_docs_in_private_items)] pub fn as_slice(&self) -> &BitSlice { self.as_bitslice() } } /// [Original](https://doc.rust-lang.org/core/slice/struct.IterMut.html#impl-AsRef%3C%5BT%5D%3E) #[cfg(not(tarpaulin_include))] impl AsRef> for IterMut<'_, T, O> where T: BitStore, O: BitOrder, { #[inline] fn as_ref(&self) -> &BitSlice { self.as_bitslice() } } /// [Original](https://doc.rust-lang.org/core/slice/struct.IterMut.html#impl-Debug) #[cfg(not(tarpaulin_include))] impl Debug for IterMut<'_, T, O> where T: BitStore, O: BitOrder, { #[inline] fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { fmt.debug_tuple("IterMut") .field(&self.as_bitslice()) .finish() } } /// `Iter` and `IterMut` have very nearly the same implementation text. macro_rules! iter { ($($iter:ident => $item:ty);+ $(;)?) => { $( /// [Original](https://doc.rust-lang.org/core/slice/struct.Iter.html#impl-Iterator) and /// [Original](https://doc.rust-lang.org/core/slice/struct.IterMut.html#impl-Iterator) impl<'a, T, O> Iterator for $iter<'a, T, O> where T: 'a + BitStore, O: BitOrder, { type Item = $item; #[inline] fn next(&mut self) -> Option { self.range.next().map(|bp| unsafe { BitRef::from_bitptr(bp) }) } #[inline] fn nth(&mut self, n: usize) -> Option { self.range.nth(n).map(|bp| unsafe { BitRef::from_bitptr(bp) }) } easy_iter!(); } /// [Original](https://doc.rust-lang.org/core/slice/struct.Iter.html#impl-DoubleEndedIterator) and /// [Original](https://doc.rust-lang.org/core/slice/struct.IterMut.html#impl-DoubleEndedIterator) impl<'a, T, O> DoubleEndedIterator for $iter<'a, T, O> where T: 'a + BitStore, O: BitOrder, { #[inline] fn next_back(&mut self) -> Option { self.range .next_back() .map(|bp| unsafe { BitRef::from_bitptr(bp) }) } #[inline] fn nth_back(&mut self, n: usize) -> Option { self.range .nth_back(n) .map(|bp| unsafe { BitRef::from_bitptr(bp) }) } } /// [Original](https://doc.rust-lang.org/core/slice/struct.Iter.html#impl-ExactSizeIterator) and /// [Original](https://doc.rust-lang.org/core/slice/struct.IterMut.html#impl-ExactSizeIterator) impl ExactSizeIterator for $iter<'_, T, O> where T: BitStore, O: BitOrder, { #[inline] fn len(&self) -> usize { self.range.len() } } /// [Original](https://doc.rust-lang.org/core/slice/struct.Iter.html#impl-FusedIterator) and /// [Original](https://doc.rust-lang.org/core/slice/struct.IterMut.html#impl-FusedIterator) impl FusedIterator for $iter<'_, T, O> where T: BitStore, O: BitOrder, { } /// [Original](https://doc.rust-lang.org/core/slice/struct.Iter.html#impl-Send) and /// [Original](https://doc.rust-lang.org/core/slice/struct.IterMut.html#impl-Send) // #[allow(clippy::non_send_fields_in_send_ty)] unsafe impl<'a, T, O> Send for $iter<'a, T, O> where T: BitStore, O: BitOrder, &'a mut BitSlice: Send, { } /// [Original](https://doc.rust-lang.org/core/slice/struct.Iter.html#impl-Sync) and /// [Original](https://doc.rust-lang.org/core/slice/struct.IterMut.html#impl-Sync) unsafe impl Sync for $iter<'_, T, O> where T: BitStore, O: BitOrder, BitSlice: Sync, { } )+ }; } iter! { Iter => >::Immut; IterMut => >::Mut; } /// Builds an iterator implementation for grouping iterators. macro_rules! group { // The iterator and its yielded type. ($iter:ident => $item:ty { // The eponymous functions from the iterator traits. $next:item $nth:item $next_back:item $nth_back:item $len:item }) => { impl<'a, T, O> Iterator for $iter<'a, T, O> where T: 'a + BitStore, O: BitOrder, { type Item = $item; #[inline] $next #[inline] $nth easy_iter!(); } impl DoubleEndedIterator for $iter<'_, T, O> where T: BitStore, O: BitOrder, { #[inline] $next_back #[inline] $nth_back } impl ExactSizeIterator for $iter<'_, T, O> where T: BitStore, O: BitOrder, { #[inline] $len } impl FusedIterator for $iter<'_, T, O> where T: BitStore, O: BitOrder, { } }; } /// An iterator over `BitSlice` that yields `&bool` directly. pub type BitRefIter<'a, T, O> = Map, fn(bool) -> &'a bool>; /// An iterator over `BitSlice` that yields `bool` directly. pub struct BitValIter<'a, T, O> where T: 'a + BitStore, O: BitOrder, { /// The start and end bit-pointers in the iteration region. range: BitPtrRange, /// Hold the lifetime of the source region, so that this does not cause UAF. _life: PhantomData<&'a BitSlice>, } group!(BitValIter => bool { fn next(&mut self) -> Option { self.range.next().map(|bp| unsafe { bp.read() }) } fn nth(&mut self, n: usize) -> Option { self.range.nth(n).map(|bp| unsafe { bp.read() }) } fn next_back(&mut self) -> Option { self.range.next_back().map(|bp| unsafe { bp.read() }) } fn nth_back(&mut self, n: usize) -> Option { self.range.nth_back(n).map(|bp| unsafe { bp.read() }) } fn len(&self) -> usize { self.range.len() } }); #[derive(Clone, Debug)] #[doc = include_str!("../../doc/slice/iter/Windows.md")] pub struct Windows<'a, T, O> where T: 'a + BitStore, O: BitOrder, { /// The source bit-slice. slice: &'a BitSlice, /// The width of the produced windows. width: usize, } group!(Windows => &'a BitSlice { fn next(&mut self) -> Option { if self.width > self.slice.len() { self.slice = Default::default(); return None; } unsafe { let out = self.slice.get_unchecked(.. self.width); self.slice = self.slice.get_unchecked(1 ..); Some(out) } } fn nth(&mut self, n: usize) -> Option { let (end, ovf) = self.width.overflowing_add(n); if end > self.slice.len() || ovf { self.slice = Default::default(); return None; } unsafe { let out = self.slice.get_unchecked(n .. end); self.slice = self.slice.get_unchecked(n + 1 ..); Some(out) } } fn next_back(&mut self) -> Option { let len = self.slice.len(); if self.width > len { self.slice = Default::default(); return None; } unsafe { let out = self.slice.get_unchecked(len - self.width ..); self.slice = self.slice.get_unchecked(.. len - 1); Some(out) } } fn nth_back(&mut self, n: usize) -> Option { let (end, ovf) = self.slice.len().overflowing_sub(n); if end < self.width || ovf { self.slice = Default::default(); return None; } unsafe { let out = self.slice.get_unchecked(end - self.width .. end); self.slice = self.slice.get_unchecked(.. end - 1); Some(out) } } fn len(&self) -> usize { let len = self.slice.len(); if self.width > len { 0 } else { len - self.width + 1 } } }); #[derive(Clone, Debug)] #[doc = include_str!("../../doc/slice/iter/Chunks.md")] pub struct Chunks<'a, T, O> where T: 'a + BitStore, O: BitOrder, { /// The source bit-slice. slice: &'a BitSlice, /// The width of the produced chunks. width: usize, } group!(Chunks => &'a BitSlice { fn next(&mut self) -> Option { let len = self.slice.len(); if len == 0 { return None; } let mid = cmp::min(len, self.width); let (out, rest) = unsafe { self.slice.split_at_unchecked(mid) }; self.slice = rest; Some(out) } fn nth(&mut self, n: usize) -> Option { let len = self.slice.len(); let (start, ovf) = n.overflowing_mul(self.width); if start >= len || ovf { self.slice = Default::default(); return None; } let split = start.checked_add(self.width) .map(|mid| cmp::min(mid, len)) .unwrap_or(len); unsafe { let (head, rest) = self.slice.split_at_unchecked(split); self.slice = rest; Some(head.get_unchecked(start ..)) } } fn next_back(&mut self) -> Option { match self.slice.len() { 0 => None, len => { // Determine if the back chunk is a remnant or a whole chunk. let rem = len % self.width; let size = if rem == 0 { self.width } else { rem }; let (rest, out) = unsafe { self.slice.split_at_unchecked(len - size) }; self.slice = rest; Some(out) }, } } fn nth_back(&mut self, n: usize) -> Option { let len = self.len(); if n >= len { self.slice = Default::default(); return None; } let start = (len - 1 - n) * self.width; let width = cmp::min(start + self.width, self.slice.len()); let (rest, out) = unsafe { self.slice .get_unchecked(.. start + width) .split_at_unchecked(start) }; self.slice = rest; Some(out) } fn len(&self) -> usize { match self.slice.len() { 0 => 0, len => { let (n, r) = (len / self.width, len % self.width); n + (r > 0) as usize }, } } }); #[derive(Debug)] #[doc = include_str!("../../doc/slice/iter/ChunksMut.md")] pub struct ChunksMut<'a, T, O> where T: 'a + BitStore, O: BitOrder, { /// The source bit-slice, marked with the alias tainting. slice: &'a mut BitSlice, /// The width of the produced chunks. width: usize, } group!(ChunksMut => &'a mut BitSlice { fn next(&mut self) -> Option { let slice = mem::take(&mut self.slice); let len = slice.len(); if len == 0 { return None; } let mid = cmp::min(len, self.width); let (out, rest) = unsafe { slice.split_at_unchecked_mut_noalias(mid) }; self.slice = rest; Some(out) } fn nth(&mut self, n: usize) -> Option { let slice = mem::take(&mut self.slice); let len = slice.len(); let (start, ovf) = n.overflowing_mul(self.width); if start >= len || ovf { return None; } let (out, rest) = unsafe { slice .get_unchecked_mut(start ..) .split_at_unchecked_mut_noalias(cmp::min(len - start, self.width)) }; self.slice = rest; Some(out) } fn next_back(&mut self) -> Option { let slice = mem::take(&mut self.slice); match slice.len() { 0 => None, len => { let rem = len % self.width; let size = if rem == 0 { self.width } else { rem }; let mid = len - size; let (rest, out) = unsafe { slice.split_at_unchecked_mut_noalias(mid) }; self.slice = rest; Some(out) }, } } fn nth_back(&mut self, n: usize) -> Option { let len = self.len(); let slice = mem::take(&mut self.slice); if n >= len { return None; } let start = (len - 1 - n) * self.width; let width = cmp::min(start + self.width, slice.len()); let (rest, out) = unsafe { slice .get_unchecked_mut(.. start + width) .split_at_unchecked_mut_noalias(start) }; self.slice = rest; Some(out) } fn len(&self) -> usize { match self.slice.len() { 0 => 0, len => { let (n, r) = (len / self.width, len % self.width); n + (r > 0) as usize }, } } }); #[derive(Clone, Debug)] #[doc = include_str!("../../doc/slice/iter/ChunksExact.md")] pub struct ChunksExact<'a, T, O> where T: 'a + BitStore, O: BitOrder, { /// The source bit-slice. slice: &'a BitSlice, /// Any remnant of the source bit-slice that will not be yielded as a chunk. extra: &'a BitSlice, /// The width of the produced chunks. width: usize, } impl<'a, T, O> ChunksExact<'a, T, O> where T: 'a + BitStore, O: BitOrder, { #[allow(missing_docs, clippy::missing_docs_in_private_items)] pub(super) fn new(slice: &'a BitSlice, width: usize) -> Self { assert_ne!(width, 0, "Chunk width cannot be 0"); let len = slice.len(); let rem = len % width; let (slice, extra) = unsafe { slice.split_at_unchecked(len - rem) }; Self { slice, extra, width, } } /// Gets the remnant bit-slice that the iterator will not yield. /// /// ## Original /// /// [`ChunksExact::remainder`](core::slice::ChunksExact::remainder) #[inline] #[cfg(not(tarpaulin_include))] pub fn remainder(&self) -> &'a BitSlice { self.extra } } group!(ChunksExact => &'a BitSlice { fn next(&mut self) -> Option { if self.slice.len() < self.width { return None; } let (out, rest) = unsafe { self.slice.split_at_unchecked(self.width) }; self.slice = rest; Some(out) } fn nth(&mut self, n: usize) -> Option { let (start, ovf) = n.overflowing_mul(self.width); if start >= self.slice.len() || ovf { self.slice = Default::default(); return None; } let (out, rest) = unsafe { self.slice .get_unchecked(start ..) .split_at_unchecked(self.width) }; self.slice = rest; Some(out) } fn next_back(&mut self) -> Option { let len = self.slice.len(); if len < self.width { return None; } let (rest, out) = unsafe { self.slice.split_at_unchecked(len - self.width) }; self.slice = rest; Some(out) } fn nth_back(&mut self, n: usize) -> Option { let len = self.len(); if n >= len { self.slice = Default::default(); return None; } let end = (len - n) * self.width; let (rest, out) = unsafe { self.slice .get_unchecked(.. end) .split_at_unchecked(end - self.width) }; self.slice = rest; Some(out) } fn len(&self) -> usize { self.slice.len() / self.width } }); #[derive(Debug)] #[doc = include_str!("../../doc/slice/iter/ChunksExactMut.md")] pub struct ChunksExactMut<'a, T, O> where T: 'a + BitStore, O: BitOrder, { /// The source bit-slice, marked with the alias tainting. slice: &'a mut BitSlice, /// Any remnant of the source bit-slice that will not be yielded as a chunk. extra: &'a mut BitSlice, /// The width of the produced chunks. width: usize, } impl<'a, T, O> ChunksExactMut<'a, T, O> where T: 'a + BitStore, O: BitOrder, { #[allow(missing_docs, clippy::missing_docs_in_private_items)] pub(super) fn new(slice: &'a mut BitSlice, width: usize) -> Self { assert_ne!(width, 0, "Chunk width cannot be 0"); let len = slice.len(); let rem = len % width; let (slice, extra) = unsafe { slice.split_at_unchecked_mut(len - rem) }; Self { slice, extra, width, } } /// Consumes the iterator, returning the remnant bit-slice that it will not /// yield. /// /// ## Original /// /// [`ChunksExactMut::into_remainder`][0] /// /// [0]: core::slice::ChunksExactMut::into_remainder #[inline] #[cfg(not(tarpaulin_include))] pub fn into_remainder(self) -> &'a mut BitSlice { self.extra } /// Takes the remnant bit-slice out of the iterator. /// /// The first time this is called, it will produce the remnant; on each /// subsequent call, it will produce an empty bit-slice. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![mut 0; 5]; /// let mut chunks = bits.chunks_exact_mut(3); /// /// assert_eq!(chunks.take_remainder(), bits![0; 2]); /// assert!(chunks.take_remainder().is_empty()); /// ``` #[inline] pub fn take_remainder(&mut self) -> &'a mut BitSlice { mem::take(&mut self.extra) } } group!(ChunksExactMut => &'a mut BitSlice { fn next(&mut self) -> Option { let slice = mem::take(&mut self.slice); if slice.len() < self.width { return None; } let (out, rest) = unsafe { slice.split_at_unchecked_mut_noalias(self.width) }; self.slice = rest; Some(out) } fn nth(&mut self, n: usize) -> Option { let slice = mem::take(&mut self.slice); let (start, ovf) = n.overflowing_mul(self.width); if start + self.width >= slice.len() || ovf { return None; } let (out, rest) = unsafe { slice.get_unchecked_mut(start ..) .split_at_unchecked_mut_noalias(self.width) }; self.slice = rest; Some(out) } fn next_back(&mut self) -> Option { let slice = mem::take(&mut self.slice); let len = slice.len(); if len < self.width { return None; } let (rest, out) = unsafe { slice.split_at_unchecked_mut_noalias(len - self.width) }; self.slice = rest; Some(out) } fn nth_back(&mut self, n: usize) -> Option { let len = self.len(); let slice = mem::take(&mut self.slice); if n >= len { return None; } let end = (len - n) * self.width; let (rest, out) = unsafe { slice.get_unchecked_mut(.. end) .split_at_unchecked_mut_noalias(end - self.width) }; self.slice = rest; Some(out) } fn len(&self) -> usize { self.slice.len() / self.width } }); #[derive(Clone, Debug)] #[doc = include_str!("../../doc/slice/iter/RChunks.md")] pub struct RChunks<'a, T, O> where T: 'a + BitStore, O: BitOrder, { /// The source bit-slice. slice: &'a BitSlice, /// The width of the produced chunks. width: usize, } group!(RChunks => &'a BitSlice { fn next(&mut self) -> Option { let len = self.slice.len(); if len == 0 { return None; } let mid = len - cmp::min(len, self.width); let (rest, out) = unsafe { self.slice.split_at_unchecked(mid) }; self.slice = rest; Some(out) } fn nth(&mut self, n: usize) -> Option { let len = self.slice.len(); let (num, ovf) = n.overflowing_mul(self.width); if num >= len || ovf { self.slice = Default::default(); return None; } let end = len - num; let mid = end.saturating_sub(self.width); let (rest, out) = unsafe { self.slice .get_unchecked(.. end) .split_at_unchecked(mid) }; self.slice = rest; Some(out) } fn next_back(&mut self) -> Option { match self.slice.len() { 0 => None, n => { let rem = n % self.width; let len = if rem == 0 { self.width } else { rem }; let (out, rest) = unsafe { self.slice.split_at_unchecked(len) }; self.slice = rest; Some(out) }, } } fn nth_back(&mut self, n: usize) -> Option { let len = self.len(); if n >= len { self.slice = Default::default(); return None; } /* Taking from the back of a reverse iterator means taking from the front of the slice. `len` gives us the total number of subslices remaining. In order to find the partition point, we need to subtract `n - 1` full subslices from that count (because the back slice of the iteration might not be full), compute their bit width, and offset *that* from the end of the memory region. This gives us the zero-based index of the partition point between what is returned and what is retained. The `part ..` section of the slice is retained, and the very end of the `.. part` section is returned. The head section is split at no less than `self.width` bits below the end marker (this could be the partial section, so a wrapping subtraction cannot be used), and `.. start` is discarded. Source: https://doc.rust-lang.org/1.43.0/src/core/slice/mod.rs.html#5141-5156 */ let from_end = (len - 1 - n) * self.width; let end = self.slice.len() - from_end; let start = end.saturating_sub(self.width); let (out, rest) = unsafe { self.slice.split_at_unchecked(end) }; self.slice = rest; Some(unsafe { out.get_unchecked(start ..) }) } fn len(&self) -> usize { match self.slice.len() { 0 => 0, len => { let (n, r) = (len / self.width, len % self.width); n + (r > 0) as usize }, } } }); #[derive(Debug)] #[doc = include_str!("../../doc/slice/iter/RChunksMut.md")] pub struct RChunksMut<'a, T, O> where T: 'a + BitStore, O: BitOrder, { /// The source bit-slice, marked with the alias tainting. slice: &'a mut BitSlice, /// The width of the produced chunks. width: usize, } group!(RChunksMut => &'a mut BitSlice { fn next(&mut self) -> Option { let slice = mem::take(&mut self.slice); let len = slice.len(); if len == 0 { return None; } let mid = len - cmp::min(len, self.width); let (rest, out) = unsafe { slice.split_at_unchecked_mut_noalias(mid) }; self.slice = rest; Some(out) } fn nth(&mut self, n: usize) -> Option { let slice = mem::take(&mut self.slice); let len = slice.len(); let (num, ovf) = n.overflowing_mul(self.width); if num >= len || ovf { return None; } let end = len - num; let mid = end.saturating_sub(self.width); let (rest, out) = unsafe { slice.get_unchecked_mut(.. end) .split_at_unchecked_mut_noalias(mid) }; self.slice = rest; Some(out) } fn next_back(&mut self) -> Option { let slice = mem::take(&mut self.slice); match slice.len() { 0 => None, n => { let rem = n % self.width; let len = if rem == 0 { self.width } else { rem }; let (out, rest) = unsafe { slice.split_at_unchecked_mut_noalias(len) }; self.slice = rest; Some(out) }, } } fn nth_back(&mut self, n: usize) -> Option { let len = self.len(); let slice = mem::take(&mut self.slice); if n >= len { return None; } let from_end = (len - 1 - n) * self.width; let end = slice.len() - from_end; let start = end.saturating_sub(self.width); let (out, rest) = unsafe { slice.split_at_unchecked_mut_noalias(end) }; self.slice = rest; Some(unsafe { out.get_unchecked_mut(start ..) }) } fn len(&self) -> usize { match self.slice.len() { 0 => 0, len => { let (n, r) = (len / self.width, len % self.width); n + (r > 0) as usize }, } } }); #[derive(Clone, Debug)] #[doc = include_str!("../../doc/slice/iter/RChunksExact.md")] pub struct RChunksExact<'a, T, O> where T: 'a + BitStore, O: BitOrder, { /// The source bit-slice. slice: &'a BitSlice, /// Any remnant of the source bit-slice that will not be yielded as a chunk. extra: &'a BitSlice, /// The width of the produced chunks. width: usize, } impl<'a, T, O> RChunksExact<'a, T, O> where T: 'a + BitStore, O: BitOrder, { #[allow(missing_docs, clippy::missing_docs_in_private_items)] pub(super) fn new(slice: &'a BitSlice, width: usize) -> Self { assert_ne!(width, 0, "Chunk width cannot be 0"); let (extra, slice) = unsafe { slice.split_at_unchecked(slice.len() % width) }; Self { slice, extra, width, } } /// Gets the remnant bit-slice that the iterator will not yield. /// /// ## Original /// /// [`RChunksExact::remainder`](core::slice::RChunksExact::remainder) #[inline] #[cfg(not(tarpaulin_include))] pub fn remainder(&self) -> &'a BitSlice { self.extra } } group!(RChunksExact => &'a BitSlice { fn next(&mut self) -> Option { let len = self.slice.len(); if len < self.width { return None; } let (rest, out) = unsafe { self.slice.split_at_unchecked(len - self.width) }; self.slice = rest; Some(out) } fn nth(&mut self, n: usize) -> Option { let len = self.slice.len(); let (split, ovf) = n.overflowing_mul(self.width); if split >= len || ovf { self.slice = Default::default(); return None; } let end = len - split; let (rest, out) = unsafe { self.slice .get_unchecked(.. end) .split_at_unchecked(end - self.width) }; self.slice = rest; Some(out) } fn next_back(&mut self) -> Option { if self.slice.len() < self.width { return None; } let (out, rest) = unsafe { self.slice.split_at_unchecked(self.width) }; self.slice = rest; Some(out) } fn nth_back(&mut self, n: usize) -> Option { let len = self.slice.len(); let (start, ovf) = n.overflowing_mul(self.width); if start >= len || ovf { self.slice = Default::default(); return None; } // At this point, `start` is at least `self.width` less than `len`. let (out, rest) = unsafe { self.slice.get_unchecked(start ..).split_at_unchecked(self.width) }; self.slice = rest; Some(out) } fn len(&self) -> usize { self.slice.len() / self.width } }); #[derive(Debug)] #[doc = include_str!("../../doc/slice/iter/RChunksExactMut.md")] pub struct RChunksExactMut<'a, T, O> where T: 'a + BitStore, O: BitOrder, { /// The source bit-slice, marked with the alias tainting. slice: &'a mut BitSlice, /// Any remnant of the source bit-slice that will not be yielded as a chunk. extra: &'a mut BitSlice, /// The width of the produced chunks. width: usize, } impl<'a, T, O> RChunksExactMut<'a, T, O> where T: 'a + BitStore, O: BitOrder, { #[allow(missing_docs, clippy::missing_docs_in_private_items)] pub(super) fn new(slice: &'a mut BitSlice, width: usize) -> Self { assert_ne!(width, 0, "Chunk width cannot be 0"); let (extra, slice) = unsafe { slice.split_at_unchecked_mut(slice.len() % width) }; Self { slice, extra, width, } } /// Consumes the iterator, returning the remnant bit-slice that it will not /// yield. /// /// ## Original /// /// [`RChunksExactMut::into_remainder`][0] /// /// [0]: core::slice::RChunksExactMut::into_remainder #[inline] #[cfg(not(tarpaulin_include))] pub fn into_remainder(self) -> &'a mut BitSlice { self.extra } /// Takes the remnant bit-slice out of the iterator. /// /// The first time this is called, it will produce the remnant; on each /// subsequent call, it will produce an empty bit-slice. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![mut 0; 5]; /// let mut chunks = bits.rchunks_exact_mut(3); /// /// assert_eq!(chunks.take_remainder(), bits![0; 2]); /// assert!(chunks.take_remainder().is_empty()); /// ``` #[inline] pub fn take_remainder(&mut self) -> &'a mut BitSlice { mem::take(&mut self.extra) } } group!(RChunksExactMut => &'a mut BitSlice { fn next(&mut self) -> Option { let slice = mem::take(&mut self.slice); let len = slice.len(); if len < self.width { return None; } let (rest, out) = unsafe { slice.split_at_unchecked_mut_noalias(len - self.width) }; self.slice = rest; Some(out) } fn nth(&mut self, n: usize) -> Option { let slice = mem::take(&mut self.slice); let len = slice.len(); let (split, ovf) = n.overflowing_mul(self.width); if split >= len || ovf { return None; } let end = len - split; let (rest, out) = unsafe { slice.get_unchecked_mut(.. end) .split_at_unchecked_mut_noalias(end - self.width) }; self.slice = rest; Some(out) } fn next_back(&mut self) -> Option { let slice = mem::take(&mut self.slice); if slice.len() < self.width { return None; } let (out, rest) = unsafe { slice.split_at_unchecked_mut_noalias(self.width) }; self.slice = rest; Some(out) } fn nth_back(&mut self, n: usize) -> Option { let slice = mem::take(&mut self.slice); let len = slice.len(); let (start, ovf) = n.overflowing_mul(self.width); if start >= len || ovf { return None; } // At this point, `start` is at least `self.width` less than `len`. let (out, rest) = unsafe { slice.get_unchecked_mut(start ..) .split_at_unchecked_mut_noalias(self.width) }; self.slice = rest; Some(out) } fn len(&self) -> usize { self.slice.len() / self.width } }); /// Creates the `new` function for the easy grouping iterators. macro_rules! new_group { ($($t:ident $($m:ident)? $(.$a:ident())?),+ $(,)?) => { $( impl<'a, T, O> $t<'a, T, O> where T: 'a + BitStore, O: BitOrder, { #[inline] #[allow(missing_docs, clippy::missing_docs_in_private_items)] pub(super) fn new( slice: &'a $($m)? BitSlice, width: usize, ) -> Self { assert_ne!(width, 0, "view width cannot be 0"); let slice = slice$(.$a())?; Self { slice, width } } } )+ }; } new_group! { Windows, Chunks, ChunksMut mut .alias_mut(), RChunks, RChunksMut mut .alias_mut(), } /// Creates splitting iterators. macro_rules! split { ( $iter:ident => $item:ty $(where $alias:ident)? { $next:item $next_back:item } ) => { impl<'a, T, O, P> $iter<'a, T, O, P> where T: 'a + BitStore, O: BitOrder, P: FnMut(usize, &bool) -> bool, { pub(super) fn new(slice: $item, pred: P) -> Self { Self { slice, pred, done: false, } } } impl Debug for $iter<'_, T, O, P> where T: BitStore, O: BitOrder, P: FnMut(usize, &bool) -> bool, { #[inline] fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { fmt.debug_struct(stringify!($iter)) .field("slice", &self.slice) .field("done", &self.done) .finish() } } impl<'a, T, O, P> Iterator for $iter<'a, T, O, P> where T: 'a + BitStore, O: BitOrder, P: FnMut(usize, &bool) -> bool, { type Item = $item; #[inline] $next #[inline] fn size_hint(&self) -> (usize, Option) { if self.done { (0, Some(0)) } else { (1, Some(self.slice.len() + 1)) } } } impl<'a, T, O, P> DoubleEndedIterator for $iter<'a, T, O, P> where T: 'a + BitStore, O: BitOrder, P: FnMut(usize, &bool) -> bool, { #[inline] $next_back } impl<'a, T, O, P> FusedIterator for $iter<'a, T, O, P> where T: 'a + BitStore, O: BitOrder, P: FnMut(usize, &bool) -> bool, { } impl<'a, T, O, P> SplitIter for $iter<'a, T, O, P> where T: 'a + BitStore, O: BitOrder, P: FnMut(usize, &bool) -> bool, { #[inline] fn finish(&mut self) -> Option { if self.done { None } else { self.done = true; Some(mem::take(&mut self.slice)) } } } }; } #[derive(Clone)] #[doc = include_str!("../../doc/slice/iter/Split.md")] pub struct Split<'a, T, O, P> where T: 'a + BitStore, O: BitOrder, P: FnMut(usize, &bool) -> bool, { /// The [`BitSlice`] being split. /// /// [`BitSlice`]: crate::slice::BitSlice slice: &'a BitSlice, /// The function used to test whether a split should occur. pred: P, /// Whether the split is finished. done: bool, } split!(Split => &'a BitSlice { fn next(&mut self) -> Option { if self.done { return None; } match self.slice .iter() .by_refs() .enumerate() .position(|(idx, bit)| (self.pred)(idx, bit)) { None => self.finish(), Some(idx) => unsafe { let out = self.slice.get_unchecked(.. idx); self.slice = self.slice.get_unchecked(idx + 1 ..); Some(out) }, } } fn next_back(&mut self) -> Option { if self.done { return None; } match self.slice .iter() .by_refs() .enumerate() .rposition(|(idx, bit)| (self.pred)(idx, bit)) { None => self.finish(), Some(idx) => unsafe { let out = self.slice.get_unchecked(idx + 1 ..); self.slice = self.slice.get_unchecked(.. idx); Some(out) }, } } }); #[doc = include_str!("../../doc/slice/iter/SplitMut.md")] pub struct SplitMut<'a, T, O, P> where T: 'a + BitStore, O: BitOrder, P: FnMut(usize, &bool) -> bool, { /// The source bit-slice, marked with the alias tainting. slice: &'a mut BitSlice, /// The function that tests each bit for whether it is a split point. pred: P, /// Marks whether iteration has concluded, without emptying the `slice`. done: bool, } split!(SplitMut => &'a mut BitSlice { fn next(&mut self) -> Option { if self.done { return None; } let idx_opt = { let pred = &mut self.pred; self.slice .iter() .by_refs() .enumerate() .position(|(idx, bit)| (pred)(idx, bit)) }; match idx_opt { None => self.finish(), Some(idx) => unsafe { let slice = mem::take(&mut self.slice); let (out, rest) = slice.split_at_unchecked_mut_noalias(idx); self.slice = rest.get_unchecked_mut(1 ..); Some(out) }, } } fn next_back(&mut self) -> Option { if self.done { return None; } let idx_opt = { let pred = &mut self.pred; self.slice .iter() .by_refs() .enumerate() .rposition(|(idx, bit)| (pred)(idx, bit)) }; match idx_opt { None => self.finish(), Some(idx) => unsafe { let slice = mem::take(&mut self.slice); let (rest, out) = slice.split_at_unchecked_mut_noalias(idx); self.slice = rest; Some(out.get_unchecked_mut(1 ..)) }, } } }); #[derive(Clone)] #[doc = include_str!("../../doc/slice/iter/SplitInclusive.md")] pub struct SplitInclusive<'a, T, O, P> where T: 'a + BitStore, O: BitOrder, P: FnMut(usize, &bool) -> bool, { /// The source bit-slice. slice: &'a BitSlice, /// The function that tests each bit for whether it is a split point. pred: P, /// Marks whether iteration has concluded, without emptying the `slice`. done: bool, } split!(SplitInclusive => &'a BitSlice { fn next(&mut self) -> Option { if self.done { return None; } let len = self.slice.len(); let idx = self.slice.iter() .by_refs() .enumerate() .position(|(idx, bit)| (self.pred)(idx, bit)) .map(|idx| idx + 1) .unwrap_or(len); if idx == len { self.done = true; } let (out, rest) = unsafe { self.slice.split_at_unchecked(idx) }; self.slice = rest; Some(out) } fn next_back(&mut self) -> Option { if self.done { return None; } let idx = if self.slice.is_empty() { 0 } else { unsafe { self.slice.get_unchecked(.. self.slice.len() - 1) } .iter() .by_refs() .enumerate() .rposition(|(idx, bit)| (self.pred)(idx, bit)) .map(|idx| idx + 1) .unwrap_or(0) }; if idx == 0 { self.done = true; } let (rest, out) = unsafe { self.slice.split_at_unchecked(idx) }; self.slice = rest; Some(out) } }); #[doc = include_str!("../../doc/slice/iter/SplitInclusiveMut.md")] pub struct SplitInclusiveMut<'a, T, O, P> where T: 'a + BitStore, O: BitOrder, P: FnMut(usize, &bool) -> bool, { /// The source bit-slice, marked with the alias tainting. slice: &'a mut BitSlice, /// The function that tests each bit for whether it is a split point. pred: P, /// Marks whether iteration has concluded, without emptying the `slice`. done: bool, } split!(SplitInclusiveMut => &'a mut BitSlice { fn next(&mut self) -> Option { if self.done { return None; } let pred = &mut self.pred; let len = self.slice.len(); let idx = self.slice.iter() .by_refs() .enumerate() .position(|(idx, bit)| (pred)(idx, bit)) .map(|idx| idx + 1) .unwrap_or(len); if idx == len { self.done = true; } let (out, rest) = unsafe { mem::take(&mut self.slice) .split_at_unchecked_mut_noalias(idx) }; self.slice = rest; Some(out) } fn next_back(&mut self) -> Option { if self.done { return None; } let pred = &mut self.pred; let idx = if self.slice.is_empty() { 0 } else { unsafe { self.slice.get_unchecked(.. self.slice.len() - 1) } .iter() .by_refs() .enumerate() .rposition(|(idx, bit)| (pred)(idx, bit)) .map(|idx| idx + 1) .unwrap_or(0) }; if idx == 0 { self.done = true; } let (rest, out) = unsafe { mem::take(&mut self.slice) .split_at_unchecked_mut_noalias(idx) }; self.slice = rest; Some(out) } }); #[derive(Clone)] #[doc = include_str!("../../doc/slice/iter/RSplit.md")] pub struct RSplit<'a, T, O, P> where T: 'a + BitStore, O: BitOrder, P: FnMut(usize, &bool) -> bool, { /// The source bit-slice. slice: &'a BitSlice, /// The function that tests each bit for whether it is a split point. pred: P, /// Marks whether iteration has concluded, without emptying the `slice`. done: bool, } split!(RSplit => &'a BitSlice { fn next(&mut self) -> Option { let mut split = Split::<'a, T, O, &mut P> { slice: mem::take(&mut self.slice), pred: &mut self.pred, done: self.done, }; let out = split.next_back(); let Split { slice, done, .. } = split; self.slice = slice; self.done = done; out } fn next_back(&mut self) -> Option { let mut split = Split::<'a, T, O, &mut P> { slice: mem::take(&mut self.slice), pred: &mut self.pred, done: self.done, }; let out = split.next(); let Split { slice, done, .. } = split; self.slice = slice; self.done = done; out } }); #[doc = include_str!("../../doc/slice/iter/RSplitMut.md")] pub struct RSplitMut<'a, T, O, P> where T: 'a + BitStore, O: BitOrder, P: FnMut(usize, &bool) -> bool, { /// The source bit-slice, marked with the alias tainting. slice: &'a mut BitSlice, /// The function that tests each bit for whether it is a split point. pred: P, /// Marks whether iteration has concluded, without emptying the `slice`. done: bool, } split!(RSplitMut => &'a mut BitSlice { fn next(&mut self) -> Option { let mut split = SplitMut::<'a, T, O, &mut P> { slice: mem::take(&mut self.slice), pred: &mut self.pred, done: self.done, }; let out = split.next_back(); let SplitMut { slice, done, .. } = split; self.slice = slice; self.done = done; out } fn next_back(&mut self) -> Option { let mut split = SplitMut::<'a, T, O, &mut P> { slice: mem::take(&mut self.slice), pred: &mut self.pred, done: self.done, }; let out = split.next(); let SplitMut { slice, done, .. } = split; self.slice = slice; self.done = done; out } }); /// [Original](https://github.com/rust-lang/rust/blob/95750ae/library/core/src/slice/iter.rs#L318-L325) trait SplitIter: DoubleEndedIterator { /// Marks the underlying iterator as complete, and extracts the remaining /// portion of the bit-slice. fn finish(&mut self) -> Option; } #[derive(Clone)] #[doc = include_str!("../../doc/slice/iter/SplitN.md")] pub struct SplitN<'a, T, O, P> where T: 'a + BitStore, O: BitOrder, P: FnMut(usize, &bool) -> bool, { /// The interior splitter. inner: Split<'a, T, O, P>, /// The number of permissible splits remaining. count: usize, } #[doc = include_str!("../../doc/slice/iter/SplitNMut.md")] pub struct SplitNMut<'a, T, O, P> where T: 'a + BitStore, O: BitOrder, P: FnMut(usize, &bool) -> bool, { /// The interior splitter. inner: SplitMut<'a, T, O, P>, /// The number of permissible splits remaining. count: usize, } #[derive(Clone)] #[doc = include_str!("../../doc/slice/iter/RSplitN.md")] pub struct RSplitN<'a, T, O, P> where T: 'a + BitStore, O: BitOrder, P: FnMut(usize, &bool) -> bool, { /// The interior splitter. inner: RSplit<'a, T, O, P>, /// The number of permissible splits remaining. count: usize, } #[doc = include_str!("../../doc/slice/iter/RSplitNMut.md")] pub struct RSplitNMut<'a, T, O, P> where T: 'a + BitStore, O: BitOrder, P: FnMut(usize, &bool) -> bool, { /// The interior splitter. inner: RSplitMut<'a, T, O, P>, /// The number of permissible splits remaining. count: usize, } /// Creates a splitting iterator with a maximum number of attempts. macro_rules! split_n { ($( $outer:ident => $inner:ident => $item:ty $(where $alias:ident)? );+ $(;)?) => { $( impl<'a, T, O, P> $outer<'a, T, O, P> where T: 'a + BitStore, O: BitOrder, P: FnMut(usize, &bool) -> bool, { #[inline] #[allow(missing_docs, clippy::missing_docs_in_private_items)] pub(super) fn new( slice: $item, pred: P, count: usize, ) -> Self { Self { inner: <$inner<'a, T, O, P>>::new(slice, pred), count, } } } impl Debug for $outer<'_, T, O, P> where T: BitStore, O: BitOrder, P: FnMut(usize, &bool) -> bool { #[inline] fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { fmt.debug_struct(stringify!($outer)) .field("slice", &self.inner.slice) .field("count", &self.count) .finish() } } impl<'a, T, O, P> Iterator for $outer<'a, T, O, P> where T: 'a + BitStore, O: BitOrder, P: FnMut(usize, &bool) -> bool, $( T::$alias: radium::Radium<<::Alias as BitStore>::Mem>, )? { type Item = <$inner <'a, T, O, P> as Iterator>::Item; #[inline] fn next(&mut self) -> Option { match self.count { 0 => None, 1 => { self.count -= 1; self.inner.finish() }, _ => { self.count -= 1; self.inner.next() }, } } #[inline] fn size_hint(&self) -> (usize, Option) { let (low, hi) = self.inner.size_hint(); (low, hi.map(|h| cmp::min(h, self.count)).or(Some(self.count))) } } impl FusedIterator for $outer<'_, T, O, P> where T: BitStore, O: BitOrder, P: FnMut(usize, &bool) -> bool, $( T::$alias: radium::Radium<<::Alias as BitStore>::Mem>, )? { } )+ }; } split_n! { SplitN => Split => &'a BitSlice; SplitNMut => SplitMut => &'a mut BitSlice; RSplitN => RSplit => &'a BitSlice; RSplitNMut => RSplitMut => &'a mut BitSlice; } #[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)] #[doc = include_str!("../../doc/slice/iter/IterOnes.md")] pub struct IterOnes<'a, T, O> where T: 'a + BitStore, O: BitOrder, { /// The remaining bit-slice whose `1` bits are to be found. inner: &'a BitSlice, /// The offset from the front of the original bit-slice to the current /// `.inner`. front: usize, } impl<'a, T, O> IterOnes<'a, T, O> where T: 'a + BitStore, O: BitOrder, { #[inline] #[allow(missing_docs, clippy::missing_docs_in_private_items)] pub(super) fn new(slice: &'a BitSlice) -> Self { Self { inner: slice, front: 0, } } } impl Default for IterOnes<'_, T, O> where T: BitStore, O: BitOrder, { #[inline] fn default() -> Self { Self { inner: Default::default(), front: 0, } } } impl Iterator for IterOnes<'_, T, O> where T: BitStore, O: BitOrder, { type Item = usize; easy_iter!(); #[inline] fn next(&mut self) -> Option { let pos = if let Some(bits) = self.inner.coerce::() { bits.sp_first_one() } else if let Some(bits) = self.inner.coerce::() { bits.sp_first_one() } else { self.inner.iter().by_vals().position(|b| b) }; match pos { Some(n) => { // Split at the index *past* the discovered bit. This is always // safe, as `split_at(len)` produces `(self, [])`. let (_, rest) = unsafe { self.inner.split_at_unchecked(n + 1) }; self.inner = rest; let out = self.front + n; // Search resumes from the next index after the found position. self.front = out + 1; Some(out) }, None => { *self = Default::default(); None }, } } } impl DoubleEndedIterator for IterOnes<'_, T, O> where T: BitStore, O: BitOrder, { #[inline] fn next_back(&mut self) -> Option { let pos = if let Some(bits) = self.inner.coerce::() { bits.sp_last_one() } else if let Some(bits) = self.inner.coerce::() { bits.sp_last_one() } else { self.inner.iter().by_vals().rposition(|b| b) }; match pos { Some(n) => { let (rest, _) = unsafe { self.inner.split_at_unchecked(n) }; self.inner = rest; Some(self.front + n) }, None => { *self = Default::default(); None }, } } } impl ExactSizeIterator for IterOnes<'_, T, O> where T: BitStore, O: BitOrder, { #[inline] fn len(&self) -> usize { self.inner.count_ones() } } impl FusedIterator for IterOnes<'_, T, O> where T: BitStore, O: BitOrder, { } #[doc = include_str!("../../doc/slice/iter/IterZeros.md")] #[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)] pub struct IterZeros<'a, T, O> where T: 'a + BitStore, O: BitOrder, { /// The remaining bit-slice whose `0` bits are to be found. inner: &'a BitSlice, /// The offset from the front of the original bit-slice to the current /// `.inner`. front: usize, } impl<'a, T, O> IterZeros<'a, T, O> where T: 'a + BitStore, O: BitOrder, { #[allow(missing_docs, clippy::missing_docs_in_private_items)] pub(super) fn new(slice: &'a BitSlice) -> Self { Self { inner: slice, front: 0, } } } impl Default for IterZeros<'_, T, O> where T: BitStore, O: BitOrder, { #[inline] fn default() -> Self { Self { inner: Default::default(), front: 0, } } } impl Iterator for IterZeros<'_, T, O> where T: BitStore, O: BitOrder, { type Item = usize; easy_iter!(); #[inline] fn next(&mut self) -> Option { let pos = if let Some(bits) = self.inner.coerce::() { bits.sp_first_zero() } else if let Some(bits) = self.inner.coerce::() { bits.sp_first_zero() } else { self.inner.iter().by_vals().position(|b| !b) }; match pos { Some(n) => { let (_, rest) = unsafe { self.inner.split_at_unchecked(n + 1) }; self.inner = rest; let out = self.front + n; self.front = out + 1; Some(out) }, None => { *self = Default::default(); None }, } } } impl DoubleEndedIterator for IterZeros<'_, T, O> where T: BitStore, O: BitOrder, { #[inline] fn next_back(&mut self) -> Option { let pos = if let Some(bits) = self.inner.coerce::() { bits.sp_last_zero() } else if let Some(bits) = self.inner.coerce::() { bits.sp_last_zero() } else { self.inner.iter().by_vals().rposition(|b| !b) }; match pos { Some(n) => { let (rest, _) = unsafe { self.inner.split_at_unchecked(n) }; self.inner = rest; Some(self.front + n) }, None => { *self = Default::default(); None }, } } } impl ExactSizeIterator for IterZeros<'_, T, O> where T: BitStore, O: BitOrder, { #[inline] fn len(&self) -> usize { self.inner.count_zeros() } } impl FusedIterator for IterZeros<'_, T, O> where T: BitStore, O: BitOrder, { } /* This macro has some very obnoxious call syntax that is necessary to handle the different iteration protocols used above. The `Split` iterators are not `DoubleEndedIterator` or `ExactSizeIterator`, and must be excluded from those implementations. However, bounding on `DEI` causes `.next_back()` and `.nth_back()` to return opaque associated types, rather than the return type from the directly-resolved signatures. As such, the item type of the source iterator must also be provided so that methods on it can be named. */ /// Creates wrappers that unsafely remove one layer of `::Alias` tainting. macro_rules! noalias { ($( $from:ident $(($p:ident))? => $alias:ty => $to:ident => $item:ty => $map:path; )+) => { $( #[repr(transparent)] #[doc = include_str!("../../doc/slice/iter/NoAlias.md")] pub struct $to<'a, T, O$(, $p)?> where T: 'a + BitStore, O: BitOrder, $($p: FnMut(usize, &bool) -> bool,)? { /// The actual iterator that this wraps. inner: $from<'a, T, O$(, $p)?>, } impl<'a, T, O$(, $p)?> $from<'a, T, O$(, $p)?> where T: 'a + BitStore, O: BitOrder, $($p: FnMut(usize, &bool) -> bool,)? { /// Removes a layer of `::Alias` tainting from the yielded item. /// /// ## Safety /// /// You *must* consume the adapted iterator in a loop that does not /// allow multiple yielded items to exist in the same scope. Each /// yielded item must have a completely non-overlapping lifetime /// from all the others. /// /// The items yielded by this iterator will not have an additional /// alias marker applied to them, so their use in an iteration /// sequence will not be penalized when the surrounding code ensures /// that each item yielded by the iterator is destroyed before the /// next is produced. /// /// This adapter does **not** convert the iterator to use the /// [`T::Mem`] raw underlying type, as it can be applied to an /// iterator over an already-aliased bit-slice and must preserve the /// initial condition. Its *only* effect is to remove the additional /// [`T::Alias`] marker imposed by the mutable iterators. /// /// Violating this requirement causes memory-unsafety and breaks /// Rust’s data-race guarantees. /// /// [`T::Alias`]: crate::store::BitStore::Alias /// [`T::Mem`]: crate::store::BitStore::Mem #[inline] #[must_use = "You must consume this object, preferably immediately \ upon creation"] pub unsafe fn remove_alias(self) -> $to<'a, T, O$(, $p)?> { $to { inner: self } } } impl<'a, T, O$(, $p)?> Iterator for $to<'a, T, O$(, $p)?> where T: 'a + BitStore, O: BitOrder, $($p: FnMut(usize, &bool) -> bool,)? { type Item = $item; #[inline] fn next(&mut self) -> Option { self.inner.next().map(|item| unsafe { $map(item) }) } #[inline] fn nth(&mut self, n: usize) -> Option { self.inner.nth(n).map(|item| unsafe { $map(item) }) } #[inline] fn size_hint(&self) -> (usize, Option) { self.inner.size_hint() } #[inline] fn count(self) -> usize { self.inner.count() } #[inline] fn last(self) -> Option { self.inner.last().map(|item| unsafe { $map(item) }) } } impl<'a, T, O$(, $p)?> DoubleEndedIterator for $to<'a, T, O$(, $p)?> where T: 'a + BitStore, O: BitOrder, $($p: FnMut(usize, &bool) -> bool,)? $from<'a, T, O$(, $p)?>: DoubleEndedIterator, { #[inline] fn next_back(&mut self) -> Option { self.inner.next_back().map(|item| unsafe { $map(item) }) } #[inline] fn nth_back(&mut self, n: usize) -> Option { self.inner.nth_back(n).map(|item| unsafe { $map(item) }) } } impl<'a, T, O$(, $p)?> ExactSizeIterator for $to<'a, T, O$(, $p)?> where T: 'a + BitStore, O: BitOrder, $($p: FnMut(usize, &bool) -> bool,)? $from<'a, T, O$(, $p)?>: ExactSizeIterator, { #[inline] fn len(&self) -> usize { self.inner.len() } } impl<'a, T, O$(, $p)?> FusedIterator for $to<'a, T, O$(, $p)?> where T: 'a + BitStore, O: BitOrder, $($p: FnMut(usize, &bool) -> bool,)? $from<'a, T, O$(, $p)?>: FusedIterator, { } )+ }; } noalias! { IterMut => >::Mut => IterMutNoAlias => >::Mut => BitRef::remove_alias; ChunksMut => &'a mut BitSlice => ChunksMutNoAlias => &'a mut BitSlice => BitSlice::unalias_mut; ChunksExactMut => &'a mut BitSlice => ChunksExactMutNoAlias => &'a mut BitSlice => BitSlice::unalias_mut; RChunksMut => &'a mut BitSlice => RChunksMutNoAlias => &'a mut BitSlice => BitSlice::unalias_mut; RChunksExactMut => &'a mut BitSlice => RChunksExactMutNoAlias => &'a mut BitSlice => BitSlice::unalias_mut; SplitMut (P) => &'a mut BitSlice => SplitMutNoAlias => &'a mut BitSlice => BitSlice::unalias_mut; SplitInclusiveMut (P) => &'a mut BitSlice => SplitInclusiveMutNoAlias => &'a mut BitSlice => BitSlice::unalias_mut; RSplitMut (P) => &'a mut BitSlice => RSplitMutNoAlias => &'a mut BitSlice => BitSlice::unalias_mut; SplitNMut (P) => &'a mut BitSlice => SplitNMutNoAlias => &'a mut BitSlice => BitSlice::unalias_mut; RSplitNMut (P) => &'a mut BitSlice => RSplitNMutNoAlias => &'a mut BitSlice => BitSlice::unalias_mut; } impl<'a, T, O> ChunksExactMutNoAlias<'a, T, O> where T: 'a + BitStore, O: BitOrder, { /// See [`ChunksExactMut::into_remainder()`][0]. /// /// [0]: crate::slice::ChunksExactMut::into_remainder #[inline] pub fn into_remainder(self) -> &'a mut BitSlice { unsafe { BitSlice::unalias_mut(self.inner.into_remainder()) } } /// See [`ChunksExactMut::take_remainder()`][0] /// /// [0]: crate::slice::ChunksExactMut::take_remainder #[inline] pub fn take_remainder(&mut self) -> &'a mut BitSlice { unsafe { BitSlice::unalias_mut(self.inner.take_remainder()) } } } impl<'a, T, O> RChunksExactMutNoAlias<'a, T, O> where T: 'a + BitStore, O: BitOrder, { /// See [`RChunksExactMut::into_remainder()`][0] /// /// [0]: crate::slice::RChunksExactMut::into_remainder #[inline] pub fn into_remainder(self) -> &'a mut BitSlice { unsafe { BitSlice::unalias_mut(self.inner.into_remainder()) } } /// See [`RChunksExactMut::take_remainder()`][0] /// /// [0]: crate::slice::RChunksExactMut::take_remainder #[inline] pub fn take_remainder(&mut self) -> &'a mut BitSlice { unsafe { BitSlice::unalias_mut(self.inner.take_remainder()) } } } bitvec-1.0.1/src/slice/ops.rs000064400000000000000000000116041046102023000141360ustar 00000000000000#![doc = include_str!("../../doc/slice/ops.md")] use core::ops::{ BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign, Index, IndexMut, Not, Range, RangeFrom, RangeFull, RangeInclusive, RangeTo, RangeToInclusive, }; use super::{ BitSlice, BitSliceIndex, }; use crate::{ domain::Domain, order::{ BitOrder, Lsb0, Msb0, }, store::BitStore, }; impl BitAndAssign<&BitSlice> for BitSlice where T1: BitStore, T2: BitStore, O1: BitOrder, O2: BitOrder, { #[inline] #[doc = include_str!("../../doc/slice/bitop_assign.md")] fn bitand_assign(&mut self, rhs: &BitSlice) { if let (Some(this), Some(that)) = (self.coerce_mut::(), rhs.coerce::()) { return this.sp_bitop_assign(that, BitAnd::bitand, BitAnd::bitand); } if let (Some(this), Some(that)) = (self.coerce_mut::(), rhs.coerce::()) { return this.sp_bitop_assign(that, BitAnd::bitand, BitAnd::bitand); } for (this, that) in self.as_mut_bitptr_range().zip(rhs.as_bitptr_range()) { unsafe { this.write(this.read() & that.read()); } } if let Some(rem) = self.get_mut(rhs.len() ..) { rem.fill(false); } } } impl BitOrAssign<&BitSlice> for BitSlice where T1: BitStore, T2: BitStore, O1: BitOrder, O2: BitOrder, { #[inline] #[doc = include_str!("../../doc/slice/bitop_assign.md")] fn bitor_assign(&mut self, rhs: &BitSlice) { if let (Some(this), Some(that)) = (self.coerce_mut::(), rhs.coerce::()) { return this.sp_bitop_assign(that, BitOr::bitor, BitOr::bitor); } if let (Some(this), Some(that)) = (self.coerce_mut::(), rhs.coerce::()) { return this.sp_bitop_assign(that, BitOr::bitor, BitOr::bitor); } for (this, that) in self.as_mut_bitptr_range().zip(rhs.as_bitptr_range()) { unsafe { this.write(this.read() | that.read()); } } } } impl BitXorAssign<&BitSlice> for BitSlice where T1: BitStore, T2: BitStore, O1: BitOrder, O2: BitOrder, { #[inline] #[doc = include_str!("../../doc/slice/bitop_assign.md")] fn bitxor_assign(&mut self, rhs: &BitSlice) { if let (Some(this), Some(that)) = (self.coerce_mut::(), rhs.coerce::()) { return this.sp_bitop_assign(that, BitXor::bitxor, BitXor::bitxor); } if let (Some(this), Some(that)) = (self.coerce_mut::(), rhs.coerce::()) { return this.sp_bitop_assign(that, BitXor::bitxor, BitXor::bitxor); } for (this, that) in self.as_mut_bitptr_range().zip(rhs.as_bitptr_range()) { unsafe { this.write(this.read() ^ that.read()); } } } } impl Index for BitSlice where T: BitStore, O: BitOrder, { type Output = bool; /// Looks up a single bit by its semantic index. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![u8, Msb0; 0, 1, 0]; /// assert!(!bits[0]); // -----^ | | /// assert!( bits[1]); // --------^ | /// assert!(!bits[2]); // -----------^ /// ``` /// /// If the index is greater than or equal to the length, indexing will /// panic. /// /// The below test will panic when accessing index 1, as only index 0 is /// valid. /// /// ```rust,should_panic /// use bitvec::prelude::*; /// /// let bits = bits![0, ]; /// bits[1]; // --------^ /// ``` #[inline] fn index(&self, index: usize) -> &Self::Output { match *index.index(self) { true => &true, false => &false, } } } /// Implements `Index` and `IndexMut` with the given type. macro_rules! index { ($($t:ty),+ $(,)?) => { $( impl Index<$t> for BitSlice where O: BitOrder, T: BitStore, { type Output = Self; #[inline] #[track_caller] fn index(&self, index: $t) -> &Self::Output { index.index(self) } } impl IndexMut<$t> for BitSlice where O: BitOrder, T: BitStore, { #[inline] #[track_caller] fn index_mut(&mut self, index: $t) -> &mut Self::Output { index.index_mut(self) } } )+ }; } index! { Range, RangeFrom, RangeFull, RangeInclusive, RangeTo, RangeToInclusive, } /** Inverts each bit in the bit-slice. Unlike the `&`, `|`, and `^` operators, this implementation is guaranteed to update each memory element only once, and is not required to traverse every live bit in the underlying region. **/ impl<'a, T, O> Not for &'a mut BitSlice where T: BitStore, O: BitOrder, { type Output = Self; #[inline] fn not(self) -> Self::Output { match self.domain_mut() { Domain::Enclave(mut elem) => { elem.invert(); }, Domain::Region { head, body, tail } => { if let Some(mut elem) = head { elem.invert(); } for elem in body { elem.store_value(!elem.load_value()); } if let Some(mut elem) = tail { elem.invert(); } }, } self } } bitvec-1.0.1/src/slice/specialization/lsb0.rs000064400000000000000000000200711046102023000172110ustar 00000000000000//! Specializations for `BitSlice<_, Lsb0>. use core::iter; use funty::Integral; use wyz::{ bidi::BidiIterator, range::RangeExt, }; use super::{ has_one, has_zero, WORD_BITS, }; use crate::{ domain::Domain, field::BitField, mem::bits_of, order::Lsb0, slice::BitSlice, store::BitStore, }; impl BitSlice where T: BitStore { /// Accelerates Boolean arithmetic. /// /// This applies a Boolean-arithmetic function across all the bits in a /// pair. The secondary bit-slice is zero-extended if it expires before /// `self` does. /// /// Because the two bit-slices share the same types, this is able to /// batch-load `usize` chunks from each, apply the arithmetic to them, and /// write the result back into `self`. Any leftover bits are handled /// individually. pub(crate) fn sp_bitop_assign( &mut self, rhs: &Self, word_op: fn(usize, usize) -> usize, bool_op: fn(bool, bool) -> bool, ) { let (mut this, mut that) = (self, rhs); while this.len() >= WORD_BITS && that.len() >= WORD_BITS { unsafe { let (l, left) = this.split_at_unchecked_mut_noalias(WORD_BITS); let (r, right) = that.split_at_unchecked(WORD_BITS); this = left; that = right; let (a, b) = (l.load_le::(), r.load_le::()); l.store_le(word_op(a, b)); } } // Note: it might actually be possible to do a partial-word load/store // to exhaust the shorter bit-slice. Investigate further. for (l, r) in this .as_mut_bitptr_range() .zip(that.iter().by_vals().chain(iter::repeat(false))) { unsafe { l.write(bool_op(l.read(), r)); } } } /// Accelerates copies between disjoint bit-slices with batch loads. pub(crate) fn sp_copy_from_bitslice(&mut self, src: &Self) { assert_eq!( self.len(), src.len(), "copying between bit-slices requires equal lengths", ); for (to, from) in unsafe { self.chunks_mut(WORD_BITS).remove_alias() } .zip(src.chunks(WORD_BITS)) { to.store_le::(from.load_le::()); } } /// Accelerates possibly-overlapping copies within a single bit-slice with /// batch loads. pub(crate) unsafe fn sp_copy_within_unchecked( &mut self, src: impl RangeExt, dest: usize, ) { let source = src.normalize(None, self.len()); let rev = source.contains(&dest); let dest = dest .. dest + source.len(); let this = self.as_accessor(); let from = this .get_unchecked(source) .chunks(WORD_BITS) .map(|bits| bits as *const BitSlice); let to = this.get_unchecked(dest).chunks(WORD_BITS).map(|bits| { bits as *const BitSlice as *mut BitSlice }); for (from, to) in from.zip(to).bidi(rev) { let value = (*from).load_le::(); (*to).store_le::(value); } } /// Accelerates equality checking with batch loads. pub(crate) fn sp_eq(&self, other: &Self) -> bool { self.len() == other.len() && self .chunks(WORD_BITS) .zip(other.chunks(WORD_BITS)) .all(|(a, b)| a.load_le::() == b.load_le::()) } /// Seeks the index of the first `1` bit in the bit-slice. pub(crate) fn sp_first_one(&self) -> Option { let mut accum = 0; match self.domain() { Domain::Enclave(elem) => { let val = elem.load_value(); if has_one(val, elem.mask().into_inner()) { accum += val.trailing_zeros() as usize - elem.head().into_inner() as usize; return Some(accum); } None }, Domain::Region { head, body, tail } => { if let Some(elem) = head { let val = elem.load_value(); accum += val.trailing_zeros() as usize - elem.head().into_inner() as usize; if has_one(val, elem.mask().into_inner()) { return Some(accum); } } for val in body.iter().map(BitStore::load_value) { accum += val.trailing_zeros() as usize; if has_one(val, !::ZERO) { return Some(accum); } } if let Some(elem) = tail { let val = elem.load_value(); if has_one(val, elem.mask().into_inner()) { accum += val.trailing_zeros() as usize; return Some(accum); } } None }, } } /// Seeks the index of the last `1` bit in the bit-slice. pub(crate) fn sp_last_one(&self) -> Option { let mut out = self.len(); match self.domain() { Domain::Enclave(elem) => { let val = elem.load_value(); let dead_bits = bits_of::() - elem.tail().into_inner() as usize; if has_one(val, elem.mask().into_inner()) { out -= val.leading_zeros() as usize - dead_bits as usize; return Some(out - 1); } None }, Domain::Region { head, body, tail } => { if let Some(elem) = tail { let val = elem.load_value(); let dead_bits = bits_of::() - elem.tail().into_inner() as usize; out -= val.leading_zeros() as usize - dead_bits; if has_one(val, elem.mask().into_inner()) { return Some(out - 1); } } for val in body.iter().map(BitStore::load_value).rev() { out -= val.leading_zeros() as usize; if has_one(val, !::ZERO) { return Some(out - 1); } } if let Some(elem) = head { let val = elem.load_value(); if has_one(val, elem.mask().into_inner()) { out -= val.leading_zeros() as usize; return Some(out - 1); } } None }, } } /// Seeks the index of the first `0` bit in the bit-slice. pub(crate) fn sp_first_zero(&self) -> Option { let mut accum = 0; match self.domain() { Domain::Enclave(elem) => { let val = elem.load_value() | !elem.mask().into_inner(); accum += val.trailing_ones() as usize - elem.head().into_inner() as usize; if has_zero(val, elem.mask().into_inner()) { return Some(accum); } None }, Domain::Region { head, body, tail } => { if let Some(elem) = head { let val = elem.load_value() | !elem.mask().into_inner(); accum += val.trailing_ones() as usize - elem.head().into_inner() as usize; if has_zero(val, elem.mask().into_inner()) { return Some(accum); } } for val in body.iter().map(BitStore::load_value) { accum += val.trailing_ones() as usize; if has_zero(val, !::ZERO) { return Some(accum); } } if let Some(elem) = tail { let val = elem.load_value() | !elem.mask().into_inner(); accum += val.trailing_ones() as usize; if has_zero(val, elem.mask().into_inner()) { return Some(accum); } } None }, } } /// Seeks the index of the last `0` bit in the bit-slice. pub(crate) fn sp_last_zero(&self) -> Option { let mut out = self.len(); match self.domain() { Domain::Enclave(elem) => { let val = elem.load_value() | !elem.mask().into_inner(); let dead_bits = bits_of::() - elem.tail().into_inner() as usize; if has_zero(val, elem.mask().into_inner()) { out -= val.leading_ones() as usize - dead_bits as usize; return Some(out - 1); } None }, Domain::Region { head, body, tail } => { if let Some(elem) = tail { let val = elem.load_value() | !elem.mask().into_inner(); let dead_bits = bits_of::() - elem.tail().into_inner() as usize; out -= val.leading_ones() as usize - dead_bits; if has_zero(val, elem.mask().into_inner()) { return Some(out - 1); } } for val in body.iter().map(BitStore::load_value).rev() { out -= val.leading_ones() as usize; if has_zero(val, !::ZERO) { return Some(out - 1); } } if let Some(elem) = head { let val = elem.load_value() | !elem.mask().into_inner(); if has_zero(val, elem.mask().into_inner()) { out -= val.leading_ones() as usize; return Some(out - 1); } } None }, } } /// Accelerates swapping memory. pub(crate) fn sp_swap_with_bitslice(&mut self, other: &mut Self) { for (this, that) in unsafe { self.chunks_mut(WORD_BITS) .remove_alias() .zip(other.chunks_mut(WORD_BITS).remove_alias()) } { let (a, b) = (this.load_le::(), that.load_le::()); this.store_le(b); that.store_le(a); } } } bitvec-1.0.1/src/slice/specialization/msb0.rs000064400000000000000000000176431046102023000172250ustar 00000000000000//! Specializations for `BitSlice<_, Msb0>. use core::iter; use funty::Integral; use wyz::{ bidi::BidiIterator, range::RangeExt, }; use super::{ has_one, has_zero, WORD_BITS, }; use crate::{ domain::Domain, field::BitField, mem::bits_of, order::Msb0, slice::BitSlice, store::BitStore, }; impl BitSlice where T: BitStore { /// Accelerates Boolean arithmetic. /// /// This applies a Boolean-arithmetic function across all the bits in a /// pair. The secondary bit-slice is zero-extended if it expires before /// `self` does. /// /// Because the two bit-slices share the same types, this is able to /// batch-load `usize` chunks from each, apply the arithmetic to them, and /// write the result back into `self`. Any leftover bits are handled /// individually. pub(crate) fn sp_bitop_assign( &mut self, rhs: &Self, word_op: fn(usize, usize) -> usize, bool_op: fn(bool, bool) -> bool, ) { let (mut this, mut that) = (self, rhs); while this.len() >= WORD_BITS && that.len() >= WORD_BITS { unsafe { let (l, left) = this.split_at_unchecked_mut_noalias(WORD_BITS); let (r, right) = that.split_at_unchecked(WORD_BITS); this = left; that = right; let (a, b) = (l.load_be::(), r.load_be::()); l.store_be(word_op(a, b)); } } for (l, r) in this .as_mut_bitptr_range() .zip(that.iter().by_vals().chain(iter::repeat(false))) { unsafe { l.write(bool_op(l.read(), r)); } } } /// Accelerates copies between disjoint bit-slices with batch loads. pub(crate) fn sp_copy_from_bitslice(&mut self, src: &Self) { assert_eq!( self.len(), src.len(), "copying between bit-slices requires equal lengths", ); for (to, from) in unsafe { self.chunks_mut(WORD_BITS).remove_alias() } .zip(src.chunks(WORD_BITS)) { to.store_be::(from.load_be::()); } } /// Accelerates possibly-overlapping copies within a single bit-slice with /// batch loads. pub(crate) unsafe fn sp_copy_within_unchecked( &mut self, src: impl RangeExt, dest: usize, ) { let source = src.normalize(None, self.len()); let rev = source.contains(&dest); let dest = dest .. dest + source.len(); let this = self.as_accessor(); let from = this .get_unchecked(source) .chunks(WORD_BITS) .map(|bits| bits as *const BitSlice); let to = this.get_unchecked(dest).chunks(WORD_BITS).map(|bits| { bits as *const BitSlice as *mut BitSlice }); for (from, to) in from.zip(to).bidi(rev) { let value = (*from).load_be::(); (*to).store_be::(value); } } /// Accelerates equality checking with batch loads. pub(crate) fn sp_eq(&self, other: &Self) -> bool { self.len() == other.len() && self .chunks(WORD_BITS) .zip(other.chunks(WORD_BITS)) .all(|(a, b)| a.load_be::() == b.load_be::()) } /// Seeks the index of the first `1` bit in the bit-slice. pub(crate) fn sp_first_one(&self) -> Option { let mut accum = 0; match self.domain() { Domain::Enclave(elem) => { let val = elem.load_value(); accum += val.leading_zeros() as usize - elem.head().into_inner() as usize; if has_one(val, elem.mask().into_inner()) { return Some(accum); } None }, Domain::Region { head, body, tail } => { if let Some(elem) = head { let val = elem.load_value(); accum += val.leading_zeros() as usize - elem.head().into_inner() as usize; if has_one(val, elem.mask().into_inner()) { return Some(accum); } } for val in body.iter().map(BitStore::load_value) { accum += val.leading_zeros() as usize; if has_one(val, !::ZERO) { return Some(accum); } } if let Some(elem) = tail { let val = elem.load_value(); accum += val.leading_zeros() as usize; if has_one(val, elem.mask().into_inner()) { return Some(accum); } } None }, } } /// Seeks the index of the last `1` bit in the bit-slice. pub(crate) fn sp_last_one(&self) -> Option { let mut out = self.len().checked_sub(1)?; match self.domain() { Domain::Enclave(elem) => { let val = elem.load_value(); let dead_bits = bits_of::() - elem.tail().into_inner() as usize; if has_one(val, elem.mask().into_inner()) { out -= val.trailing_zeros() as usize - dead_bits as usize; return Some(out); } None }, Domain::Region { head, body, tail } => { if let Some(elem) = tail { let val = elem.load_value(); let dead_bits = bits_of::() - elem.tail().into_inner() as usize; out -= val.trailing_zeros() as usize - dead_bits; if has_one(val, elem.mask().into_inner()) { return Some(out); } } for val in body.iter().map(BitStore::load_value).rev() { out -= val.trailing_zeros() as usize; if has_one(val, !::ZERO) { return Some(out); } } if let Some(elem) = head { let val = elem.load_value(); if has_one(val, elem.mask().into_inner()) { out -= val.trailing_zeros() as usize; return Some(out); } } None }, } } /// Seeks the index of the first `0` bit in the bit-slice. pub(crate) fn sp_first_zero(&self) -> Option { let mut accum = 0; match self.domain() { Domain::Enclave(elem) => { let val = elem.load_value() | !elem.mask().into_inner(); accum += val.leading_ones() as usize - elem.head().into_inner() as usize; if has_zero(val, elem.mask().into_inner()) { return Some(accum); } None }, Domain::Region { head, body, tail } => { if let Some(elem) = head { let val = elem.load_value() | !elem.mask().into_inner(); accum += val.leading_ones() as usize - elem.head().into_inner() as usize; if has_zero(val, elem.mask().into_inner()) { return Some(accum); } } for val in body.iter().map(BitStore::load_value) { accum += val.leading_ones() as usize; if has_zero(val, !::ZERO) { return Some(accum); } } if let Some(elem) = tail { let val = elem.load_value() | !elem.mask().into_inner(); accum += val.leading_ones() as usize; if has_zero(val, elem.mask().into_inner()) { return Some(accum); } } None }, } } /// Seeks the index of the last `0` bit in the bit-slice. pub(crate) fn sp_last_zero(&self) -> Option { let mut out = self.len().checked_sub(1)?; match self.domain() { Domain::Enclave(elem) => { let val = elem.load_value() | !elem.mask().into_inner(); let dead_bits = bits_of::() - elem.tail().into_inner() as usize; if has_zero(val, elem.mask().into_inner()) { out -= val.trailing_ones() as usize - dead_bits; return Some(out); } None }, Domain::Region { head, body, tail } => { if let Some(elem) = tail { let val = elem.load_value() | !elem.mask().into_inner(); let dead_bits = bits_of::() - elem.tail().into_inner() as usize; out -= val.trailing_ones() as usize - dead_bits; if has_zero(val, elem.mask().into_inner()) { return Some(out); } } for val in body.iter().map(BitStore::load_value).rev() { out -= val.trailing_ones() as usize; if has_zero(val, !::ZERO) { return Some(out); } } if let Some(elem) = head { let val = elem.load_value() | !elem.mask().into_inner(); if has_zero(val, elem.mask().into_inner()) { out -= val.trailing_ones() as usize; return Some(out); } } None }, } } /// Accelerates swapping memory. pub(crate) fn sp_swap_with_bitslice(&mut self, other: &mut Self) { for (this, that) in unsafe { self.chunks_mut(WORD_BITS) .remove_alias() .zip(other.chunks_mut(WORD_BITS).remove_alias()) } { let (a, b) = (this.load_be::(), that.load_be::()); this.store_be(b); that.store_be(a); } } } bitvec-1.0.1/src/slice/specialization.rs000064400000000000000000000036731046102023000163620ustar 00000000000000#![doc = include_str!("../../doc/slice/specialization.md")] use funty::Integral; use super::BitSlice; use crate::{ devel as dvl, mem, order::BitOrder, store::BitStore, }; mod lsb0; mod msb0; /// Processor width, used for chunking. const WORD_BITS: usize = mem::bits_of::(); /// Tests whether the masked portion of an integer has a `0` bit in it. fn has_zero(val: T, mask: T) -> bool where T: Integral { val | !mask != !T::ZERO } /// Tests whether the masked portion of an integer has a `1` bit in it. fn has_one(val: T, mask: T) -> bool where T: Integral { val & mask != T::ZERO } impl BitSlice where T: BitStore, O: BitOrder, { /// Forces the storage type parameter to be its accessor type. /// /// Functions must use this when working with maybe-overlapping regions /// within a single bit-slice, as the accessor is always tolerant of /// aliasing. #[inline] fn as_accessor(&mut self) -> &BitSlice { unsafe { &*(self as *const Self as *const BitSlice) } } /// Attempts to change a bit-slice reference to caller-supplied type /// parameters. /// /// If `` is identical to ``, this returns `Some` with the /// bit-slice reference unchanged in value but changed in type. If the types /// differ, it returns `None`. This is useful for creating statically-known /// bit-slice types within generic contexts. pub(crate) fn coerce(&self) -> Option<&BitSlice> where T2: BitStore, O2: BitOrder, { if dvl::match_types::() { Some(unsafe { &*(self as *const Self as *const BitSlice) }) } else { None } } /// See [`.coerce()`]. /// /// [`.coerce()`]: Self::coerce pub(crate) fn coerce_mut(&mut self) -> Option<&mut BitSlice> where T2: BitStore, O2: BitOrder, { if dvl::match_types::() { Some(unsafe { &mut *(self as *mut Self as *mut BitSlice) }) } else { None } } } bitvec-1.0.1/src/slice/tests/api.rs000064400000000000000000000060611046102023000152510ustar 00000000000000#![cfg(test)] use crate::prelude::*; #[test] fn properties() { let empty = bits![]; assert_eq!(empty.len(), 0); assert!(empty.is_empty()); let bits = bits![0, 1, 0, 0, 1]; assert_eq!(bits.len(), 5); assert!(!bits.is_empty()); } #[test] fn getters() { let empty = bits![mut]; let bits = bits![mut 0, 1, 0, 0, 1]; assert!(empty.first().is_none()); assert!(empty.first_mut().is_none()); assert!(empty.last().is_none()); assert!(empty.last_mut().is_none()); assert!(empty.split_first().is_none()); assert!(empty.split_first_mut().is_none()); assert!(empty.split_last().is_none()); assert!(empty.split_last_mut().is_none()); assert!(!bits.first().unwrap()); assert!(bits.last().unwrap()); *bits.first_mut().unwrap() = true; *bits.last_mut().unwrap() = false; let (first, rest) = bits.split_first().unwrap(); assert!(*first); assert_eq!(rest, bits![1, 0, 0, 0]); let (last, rest) = bits.split_last().unwrap(); assert!(!*last); assert_eq!(rest, bits![1, 1, 0, 0]); drop(first); drop(last); let (first, _) = bits.split_first_mut().unwrap(); first.commit(false); let (last, _) = bits.split_last_mut().unwrap(); last.commit(true); *bits.get_mut(2).unwrap() = true; unsafe { assert!(*bits.get_unchecked(2)); bits.get_unchecked_mut(2).commit(false); } bits.swap(0, 4); bits[1 .. 4].reverse(); assert_eq!(bits, bits![1, 0, 0, 1, 0]); } #[test] fn splitters() { type Bsl = BitSlice; let mut data = 0xF0u8; let bits = data.view_bits_mut::(); let (l, r): (&Bsl, &Bsl) = bits.split_at(4); assert_eq!(l, bits![0; 4]); assert_eq!(r, bits![1; 4]); let (l, r): ( &mut Bsl<::Alias>, &mut Bsl<::Alias>, ) = bits.split_at_mut(4); l.fill(true); r.fill(false); assert_eq!(data, 0x0Fu8); let bits = bits![0, 1, 0, 0, 1]; assert!(bits.strip_prefix(bits![1, 0]).is_none()); assert_eq!(bits.strip_prefix(bits![0, 1]), Some(bits![0, 0, 1])); assert!(bits.strip_suffix(bits![1, 0]).is_none()); assert_eq!(bits.strip_suffix(bits![0, 1]), Some(bits![0, 1, 0])); } #[test] fn rotators() { let bits = bits![mut 0, 1, 0, 0, 1]; bits.rotate_left(2); assert_eq!(bits, bits![0, 0, 1, 0, 1]); bits.rotate_right(2); assert_eq!(bits, bits![0, 1, 0, 0, 1]); bits.rotate_left(0); bits.rotate_right(0); bits.rotate_left(5); bits.rotate_right(5); } #[test] #[should_panic] fn rotate_too_far_left() { bits![mut 0, 1].rotate_left(3); } #[test] #[should_panic] fn rotate_too_far_right() { bits![mut 0, 1].rotate_right(3); } #[test] fn fillers() { let bits = bits![mut 0; 5]; bits.fill(true); assert_eq!(bits, bits![1; 5]); bits.fill_with(|idx| idx % 2 == 0); assert_eq!(bits, bits![1, 0, 1, 0, 1]); bits.copy_within(1 .., 0); assert_eq!(bits, bits![0, 1, 0, 1, 1]); } #[test] fn inspectors() { let bits = bits![0, 1, 0, 0, 1, 0, 1, 1, 0, 1]; assert!(bits.contains(bits![0, 1, 0, 1])); assert!(!bits.contains(bits![0; 4])); assert!(bits.starts_with(bits![0, 1, 0, 0])); assert!(!bits.starts_with(bits![0, 1, 1])); assert!(bits.ends_with(bits![1, 0, 1])); assert!(!bits.ends_with(bits![0, 0, 1])); } bitvec-1.0.1/src/slice/tests/iter.rs000064400000000000000000000511201046102023000154370ustar 00000000000000#![cfg(test)] use crate::prelude::*; #[test] fn iter() { let bits = bits![0, 1, 0, 1, 0, 1]; let mut iter = bits.iter().by_refs(); assert!(!*iter.next().unwrap()); assert!(!*iter.nth(1).unwrap()); assert!(*iter.next_back().unwrap()); assert!(*iter.nth_back(1).unwrap()); assert_eq!(iter.len(), 0); assert!(iter.next().is_none()); assert!(iter.next_back().is_none()); assert!(iter.nth(1).is_none()); assert!(iter.nth_back(1).is_none()); } #[test] fn iter_mut() { let bits = bits![mut 0, 1, 0, 0, 1]; let mut iter = bits.iter_mut(); while let Some(mut bit) = iter.nth(1) { *bit = !*bit; } assert_eq!(bits, bits![0, 0, 0, 1, 1]); } #[test] fn windows() { let bits = bits![0, 1, 0, 1, 1, 0, 0, 1, 1, 1]; let base = bits.as_bitptr(); let mut windows = bits.windows(4); assert_eq!(windows.len(), 7); let next = windows.next().unwrap(); assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 0); assert_eq!(next, bits![0, 1, 0, 1]); let next_back = windows.next_back().unwrap(); assert_eq!(unsafe { next_back.as_bitptr().offset_from(base) }, 6); assert_eq!(next_back, bits![0, 1, 1, 1]); let nth = windows.nth(2).unwrap(); assert_eq!(unsafe { nth.as_bitptr().offset_from(base) }, 3); assert_eq!(nth, bits![1, 1, 0, 0]); let nth_back = windows.nth_back(1).unwrap(); assert_eq!(unsafe { nth_back.as_bitptr().offset_from(base) }, 4); assert_eq!(nth_back, bits![1, 0, 0, 1]); assert_eq!(windows.len(), 0); assert!(windows.next().is_none()); assert!(windows.next_back().is_none()); assert!(windows.nth(1).is_none()); assert!(windows.nth_back(1).is_none()); } #[test] fn chunks() { let bits = bits![0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0]; // ^^^^^ ^^^^^ ^^^^^ ^ let base = bits.as_bitptr(); let mut chunks = bits.chunks(2); assert_eq!(chunks.len(), 6); let next = chunks.next().unwrap(); assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 0); assert_eq!(next, bits![0, 0]); let next_back = chunks.next_back().unwrap(); assert_eq!(unsafe { next_back.as_bitptr().offset_from(base) }, 10); assert_eq!(next_back, bits![0]); let nth = chunks.nth(1).unwrap(); assert_eq!(unsafe { nth.as_bitptr().offset_from(base) }, 4); assert_eq!(nth, bits![0, 1]); let nth_back = chunks.nth_back(1).unwrap(); assert_eq!(unsafe { nth_back.as_bitptr().offset_from(base) }, 6); assert_eq!(nth_back, bits![1, 0]); assert_eq!(chunks.len(), 0); assert!(chunks.next().is_none()); assert!(chunks.next_back().is_none()); assert!(chunks.nth(1).is_none()); assert!(chunks.nth_back(1).is_none()); assert_eq!(bits![0; 2].chunks(3).next().unwrap().len(), 2); assert_eq!(bits![0; 5].chunks(3).next().unwrap().len(), 3); assert_eq!(bits![0; 5].chunks(3).nth(1).unwrap().len(), 2); assert_eq!(bits![0; 8].chunks(3).nth(1).unwrap().len(), 3); assert_eq!(bits![0; 5].chunks(3).next_back().unwrap().len(), 2); assert_eq!(bits![0; 6].chunks(3).next_back().unwrap().len(), 3); assert_eq!(bits![0; 5].chunks(3).nth_back(1).unwrap().len(), 3); } #[test] fn chunks_mut() { let bits = bits![mut 1; 11]; let base = bits.as_bitptr(); let mut chunks = bits.chunks_mut(2); assert_eq!(chunks.len(), 6); let next = chunks.next().unwrap(); assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 0); next.fill(false); let next_back = chunks.next_back().unwrap(); assert_eq!(unsafe { next_back.as_bitptr().offset_from(base) }, 10); next_back.fill(false); let nth = chunks.nth(1).unwrap(); assert_eq!(unsafe { nth.as_bitptr().offset_from(base) }, 4); nth.set(0, false); let nth_back = chunks.nth_back(1).unwrap(); assert_eq!(unsafe { nth_back.as_bitptr().offset_from(base) }, 6); nth_back.set(1, false); assert_eq!(chunks.len(), 0); assert!(chunks.next().is_none()); assert!(chunks.next_back().is_none()); assert!(chunks.nth(1).is_none()); assert!(chunks.nth_back(1).is_none()); assert_eq!(bits, bits![0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0]); assert_eq!(bits![mut 0; 2].chunks_mut(3).next().unwrap().len(), 2); assert_eq!(bits![mut 0; 5].chunks_mut(3).next().unwrap().len(), 3); assert_eq!(bits![mut 0; 5].chunks_mut(3).nth(1).unwrap().len(), 2); assert_eq!(bits![mut 0; 8].chunks_mut(3).nth(1).unwrap().len(), 3); assert_eq!(bits![mut 0; 5].chunks_mut(3).next_back().unwrap().len(), 2); assert_eq!(bits![mut 0; 6].chunks_mut(3).next_back().unwrap().len(), 3); assert_eq!(bits![mut 0; 5].chunks_mut(3).nth_back(1).unwrap().len(), 3); } #[test] fn chunks_exact() { let bits = bits![ 0, 0, 0, 1, 1, 1, 0, 0, 1, // next and nth(1) 1, 0, 0, 1, 1, 1, 0, 1, 0, // next_back and nth_back(1) 1, 1, // remainder ]; let base = bits.as_bitptr(); let mut chunks = bits.chunks_exact(3); assert_eq!(chunks.len(), 6); let next = chunks.next().unwrap(); assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 0); assert_eq!(next, bits![0, 0, 0]); let nth = chunks.nth(1).unwrap(); assert_eq!(unsafe { nth.as_bitptr().offset_from(base) }, 6); assert_eq!(nth, bits![0, 0, 1]); let next_back = chunks.next_back().unwrap(); assert_eq!(unsafe { next_back.as_bitptr().offset_from(base) }, 15); assert_eq!(next_back, bits![0, 1, 0]); let nth_back = chunks.nth_back(1).unwrap(); assert_eq!(unsafe { nth_back.as_bitptr().offset_from(base) }, 9); assert_eq!(nth_back, bits![1, 0, 0]); let remainder = chunks.remainder(); assert_eq!(unsafe { remainder.as_bitptr().offset_from(base) }, 18); assert_eq!(remainder, bits![1, 1]); assert_eq!(chunks.len(), 0); assert!(chunks.next().is_none()); assert!(chunks.next_back().is_none()); assert!(chunks.nth(1).is_none()); assert!(chunks.nth_back(1).is_none()); } #[test] fn chunks_exact_mut() { let bits = bits![mut 0; 20]; let base = bits.as_bitptr(); let mut chunks = bits.chunks_exact_mut(3); let next = chunks.next().unwrap(); assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 0); next.fill(true); let next_back = chunks.next_back().unwrap(); assert_eq!(unsafe { next_back.as_bitptr().offset_from(base) }, 15); next_back.fill(true); let nth = chunks.nth(1).unwrap(); assert_eq!(unsafe { nth.as_bitptr().offset_from(base) }, 6); nth.set(2, true); let nth_back = chunks.nth_back(1).unwrap(); assert_eq!(unsafe { nth_back.as_bitptr().offset_from(base) }, 9); nth_back.set(0, true); assert_eq!(chunks.len(), 0); assert!(chunks.next().is_none()); assert!(chunks.next_back().is_none()); assert!(chunks.nth(1).is_none()); assert!(chunks.nth_back(1).is_none()); assert_eq!(bits, bits![ 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, ]); bits.fill(false); let mut chunks = bits.chunks_exact_mut(3); let remainder = chunks.take_remainder(); assert_eq!(unsafe { remainder.as_bitptr().offset_from(base) }, 18); remainder.fill(true); assert!(chunks.take_remainder().is_empty()); assert!(chunks.into_remainder().is_empty()); assert!(bits.ends_with(bits![0, 0, 1, 1])); } #[test] fn rchunks() { let bits = bits![1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0]; // ^^ ^^^^^ ^^^^^ ^^^^ let base = bits.as_bitptr(); let mut rchunks = bits.rchunks(2); let next = rchunks.next().unwrap(); assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 9); assert_eq!(next, bits![0, 0]); let next_back = rchunks.next_back().unwrap(); assert_eq!(unsafe { next_back.as_bitptr().offset_from(base) }, 0); assert_eq!(next_back, bits![1]); let nth = rchunks.nth(1).unwrap(); assert_eq!(unsafe { nth.as_bitptr().offset_from(base) }, 5); assert_eq!(nth, bits![0, 1]); let nth_back = rchunks.nth_back(1).unwrap(); assert_eq!(unsafe { nth_back.as_bitptr().offset_from(base) }, 3); assert_eq!(nth_back, bits![1, 0]); assert_eq!(rchunks.len(), 0); assert!(rchunks.next().is_none()); assert!(rchunks.next_back().is_none()); assert!(rchunks.nth(1).is_none()); assert!(rchunks.nth_back(1).is_none()); assert_eq!(bits![0; 5].rchunks(3).next().unwrap().len(), 3); assert_eq!(bits![0; 5].rchunks(3).nth(1).unwrap().len(), 2); assert_eq!(bits![0; 5].rchunks(3).next_back().unwrap().len(), 2); assert_eq!(bits![0; 5].rchunks(3).nth_back(1).unwrap().len(), 3); } #[test] fn rchunks_mut() { let bits = bits![mut 0; 11]; let base = bits.as_bitptr(); let mut rchunks = bits.rchunks_mut(2); assert_eq!(rchunks.len(), 6); let next = rchunks.next().unwrap(); assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 9); next.fill(true); let next_back = rchunks.next_back().unwrap(); assert_eq!(unsafe { next_back.as_bitptr().offset_from(base) }, 0); next_back.fill(true); let nth = rchunks.nth(1).unwrap(); assert_eq!(unsafe { nth.as_bitptr().offset_from(base) }, 5); nth.set(0, true); let nth_back = rchunks.nth_back(1).unwrap(); assert_eq!(unsafe { nth_back.as_bitptr().offset_from(base) }, 3); nth_back.set(1, true); assert_eq!(rchunks.len(), 0); assert!(rchunks.next().is_none()); assert!(rchunks.next_back().is_none()); assert!(rchunks.nth(1).is_none()); assert!(rchunks.nth_back(1).is_none()); assert_eq!(bits, bits![1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1]); assert_eq!(bits![mut 0; 5].rchunks_mut(3).next().unwrap().len(), 3); assert_eq!(bits![mut 0; 5].rchunks_mut(3).nth(1).unwrap().len(), 2); assert_eq!(bits![mut 0; 5].rchunks_mut(3).next_back().unwrap().len(), 2); assert_eq!(bits![mut 0; 5].rchunks_mut(3).nth_back(1).unwrap().len(), 3); } #[test] fn rchunks_exact() { let bits = bits![ 1, 1, // remainder 0, 1, 0, 1, 1, 1, 0, 0, 1, // nth_back(1) and next 1, 0, 0, 1, 1, 1, 0, 0, 0, // nth(1) and next ]; let base = bits.as_bitptr(); let mut rchunks = bits.rchunks_exact(3); assert_eq!(rchunks.len(), 6); let next = rchunks.next().unwrap(); assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 17); assert_eq!(next, bits![0, 0, 0]); let nth = rchunks.nth(1).unwrap(); assert_eq!(unsafe { nth.as_bitptr().offset_from(base) }, 11); assert_eq!(nth, bits![1, 0, 0]); let next_back = rchunks.next_back().unwrap(); assert_eq!(unsafe { next_back.as_bitptr().offset_from(base) }, 2); assert_eq!(next_back, bits![0, 1, 0]); let nth_back = rchunks.nth_back(1).unwrap(); assert_eq!(unsafe { nth_back.as_bitptr().offset_from(base) }, 8); assert_eq!(nth_back, bits![0, 0, 1]); let remainder = rchunks.remainder(); assert_eq!(unsafe { remainder.as_bitptr().offset_from(base) }, 0); assert_eq!(remainder, bits![1, 1]); assert_eq!(rchunks.len(), 0); assert!(rchunks.next().is_none()); assert!(rchunks.next_back().is_none()); assert!(rchunks.nth(1).is_none()); assert!(rchunks.nth_back(1).is_none()); } #[test] fn rchunks_exact_mut() { let bits = bits![mut 0; 20]; let base = bits.as_bitptr(); let mut rchunks = bits.rchunks_exact_mut(3); let next = rchunks.next().unwrap(); assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 17); next.fill(true); let next_back = rchunks.next_back().unwrap(); assert_eq!(unsafe { next_back.as_bitptr().offset_from(base) }, 2); next_back.fill(true); let nth = rchunks.nth(1).unwrap(); assert_eq!(unsafe { nth.as_bitptr().offset_from(base) }, 11); nth.set(2, true); let nth_back = rchunks.nth_back(1).unwrap(); assert_eq!(unsafe { nth_back.as_bitptr().offset_from(base) }, 8); nth_back.set(0, true); assert_eq!(rchunks.len(), 0); assert!(rchunks.next().is_none()); assert!(rchunks.next_back().is_none()); assert!(rchunks.nth(1).is_none()); assert!(rchunks.nth_back(1).is_none()); assert_eq!(bits, bits![ 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1 ]); bits.fill(false); let mut chunks = bits.rchunks_exact_mut(3); let remainder = chunks.take_remainder(); assert_eq!(unsafe { remainder.as_bitptr().offset_from(base) }, 0); remainder.fill(true); assert!(chunks.take_remainder().is_empty()); assert!(chunks.into_remainder().is_empty()); assert!(bits.starts_with(bits![1, 1, 0, 0])); } #[test] fn split() { let bits = bits![0, 0, 1, 1, 0, 0, 1, 1]; let base = bits.as_bitptr(); let mut split = bits.split(|_, &bit| bit); let next = split.next().unwrap(); assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 0); assert_eq!(next, bits![0, 0]); let next = split.next().unwrap(); assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 3); assert!(next.is_empty()); let next_back = split.next_back().unwrap(); assert_eq!(unsafe { next_back.as_bitptr().offset_from(base) }, 8); assert!(next_back.is_empty()); let next_back = split.next_back().unwrap(); assert_eq!(unsafe { next_back.as_bitptr().offset_from(base) }, 7); assert!(next_back.is_empty()); let next_back = split.next_back().unwrap(); assert_eq!(unsafe { next_back.as_bitptr().offset_from(base) }, 4); assert_eq!(next_back, bits![0, 0]); assert!(split.next().is_none()); assert!(split.next_back().is_none()); } #[test] fn split_mut() { let bits = bits![mut 0, 0, 1, 1, 0, 0, 1, 1]; let base = bits.as_bitptr(); let mut split = bits.split_mut(|_, &bit| bit); let next = split.next().unwrap(); assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 0); assert_eq!(next, bits![0, 0]); let next = split.next().unwrap(); assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 3); assert!(next.is_empty()); let next_back = split.next_back().unwrap(); assert_eq!(unsafe { next_back.as_bitptr().offset_from(base) }, 8); assert!(next_back.is_empty()); let next_back = split.next_back().unwrap(); assert_eq!(unsafe { next_back.as_bitptr().offset_from(base) }, 7); assert!(next_back.is_empty()); let next_back = split.next_back().unwrap(); assert_eq!(unsafe { next_back.as_bitptr().offset_from(base) }, 4); assert_eq!(next_back, bits![0, 0]); assert!(split.next().is_none()); assert!(split.next_back().is_none()); let bits = bits![mut 0]; let mut split = bits.split_mut(|_, &bit| bit); assert_eq!(split.next().unwrap(), bits![0]); assert!(split.next().is_none()); } #[test] fn split_inclusive() { let bits = bits![0, 0, 1, 1, 0, 0, 1, 1]; let base = bits.as_bitptr(); let mut split = bits.split_inclusive(|_, &bit| bit); let next = split.next().unwrap(); assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 0); assert_eq!(next, bits![0, 0, 1]); let next = split.next().unwrap(); assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 3); assert_eq!(next, bits![1]); let next_back = split.next_back().unwrap(); assert_eq!(unsafe { next_back.as_bitptr().offset_from(base) }, 7); assert_eq!(next_back, bits![1]); let next_back = split.next_back().unwrap(); assert_eq!(unsafe { next_back.as_bitptr().offset_from(base) }, 4); assert_eq!(next_back, bits![0, 0, 1]); assert!(split.next().is_none()); assert!(split.next_back().is_none()); let bits = bits![0, 1]; let mut split = bits.split_inclusive(|_, &bit| bit); assert_eq!(split.next(), Some(bits![0, 1])); assert!(split.next().is_none()); let mut split = bits.split_inclusive(|_, &bit| bit); assert_eq!(split.next_back(), Some(bits![0, 1])); assert!(split.next_back().is_none()); assert_eq!( bits![].split_inclusive(|_, &bit| bit).next_back(), Some(bits![]), ); } #[test] fn split_inclusive_mut() { let bits = bits![mut 0, 0, 1, 1, 0, 0, 1, 1]; let base = bits.as_bitptr(); let mut split = bits.split_inclusive_mut(|_, &bit| bit); let next = split.next().unwrap(); assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 0); assert_eq!(next, bits![0, 0, 1]); let next = split.next().unwrap(); assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 3); assert_eq!(next, bits![1]); let next_back = split.next_back().unwrap(); assert_eq!(unsafe { next_back.as_bitptr().offset_from(base) }, 7); assert_eq!(next_back, bits![1]); let next_back = split.next_back().unwrap(); assert_eq!(unsafe { next_back.as_bitptr().offset_from(base) }, 4); assert_eq!(next_back, bits![0, 0, 1]); assert!(split.next().is_none()); assert!(split.next_back().is_none()); let bits = bits![mut 0, 1]; let mut split = bits.split_inclusive_mut(|_, &bit| bit); assert_eq!(split.next().unwrap(), bits![0, 1]); assert!(split.next().is_none()); let mut split = bits.split_inclusive_mut(|_, &bit| bit); assert_eq!(split.next_back().unwrap(), bits![0, 1]); assert!(split.next_back().is_none()); assert_eq!( bits![mut] .split_inclusive_mut(|_, &bit| bit) .next_back() .unwrap(), bits![], ); } #[test] fn rsplit() { let bits = bits![0, 0, 1, 1, 0, 0, 1, 1]; let base = bits.as_bitptr(); let mut rsplit = bits.rsplit(|_, &bit| bit); let next = rsplit.next().unwrap(); assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 8); assert!(next.is_empty()); let next = rsplit.next().unwrap(); assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 7); assert!(next.is_empty()); let next_back = rsplit.next_back().unwrap(); assert_eq!(unsafe { next_back.as_bitptr().offset_from(base) }, 0); assert_eq!(next_back, bits![0, 0]); let next_back = rsplit.next_back().unwrap(); assert_eq!(unsafe { next_back.as_bitptr().offset_from(base) }, 3); assert!(next_back.is_empty()); let next_back = rsplit.next_back().unwrap(); assert_eq!(unsafe { next_back.as_bitptr().offset_from(base) }, 4); assert_eq!(next_back, bits![0, 0]); assert!(rsplit.next().is_none()); assert!(rsplit.next_back().is_none()); } #[test] fn rsplit_mut() { let bits = bits![mut 0, 0, 1, 1, 0, 0, 1, 1]; let base = bits.as_bitptr(); let mut rsplit = bits.rsplit_mut(|_, &bit| bit); let next = rsplit.next().unwrap(); assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 8); assert!(next.is_empty()); let next = rsplit.next().unwrap(); assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 7); assert!(next.is_empty()); let next_back = rsplit.next_back().unwrap(); assert_eq!(unsafe { next_back.as_bitptr().offset_from(base) }, 0); assert_eq!(next_back, bits![0, 0]); let next_back = rsplit.next_back().unwrap(); assert_eq!(unsafe { next_back.as_bitptr().offset_from(base) }, 3); assert!(next_back.is_empty()); let next_back = rsplit.next_back().unwrap(); assert_eq!(unsafe { next_back.as_bitptr().offset_from(base) }, 4); assert!(next.is_empty()); assert!(rsplit.next().is_none()); assert!(rsplit.next_back().is_none()); let bits = bits![mut 0]; let mut rsplit = bits.rsplit_mut(|_, &bit| bit); assert_eq!(rsplit.next().unwrap(), bits![0]); assert!(rsplit.next().is_none()); } #[test] fn splitn() { let bits = bits![0, 0, 1, 1, 0, 0, 1, 1]; let base = bits.as_bitptr(); let mut splitn = bits.splitn(2, |_, &bit| bit); let next = splitn.next().unwrap(); assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 0); assert_eq!(next, bits![0, 0]); let next = splitn.next().unwrap(); assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 3); assert_eq!(next, bits[3 ..]); assert!(splitn.next().is_none()); } #[test] fn splitn_mut() { let bits = bits![mut 0, 0, 1, 1, 0, 0, 1, 1]; let base = bits.as_bitptr(); let mut splitn = bits.splitn_mut(2, |_, &bit| bit); let next = splitn.next().unwrap(); assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 0); assert_eq!(next, bits![0, 0]); let next = splitn.next().unwrap(); assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 3); assert_eq!(next, bits![1, 0, 0, 1, 1]); assert!(splitn.next().is_none()); } #[test] fn rsplitn() { let bits = bits![0, 0, 1, 1, 0, 0, 1, 1]; let base = bits.as_bitptr(); let mut rsplitn = bits.rsplitn(2, |_, &bit| bit); let next = rsplitn.next().unwrap(); assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 8); assert!(next.is_empty()); let next = rsplitn.next().unwrap(); assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 0); assert_eq!(next, bits![0, 0, 1, 1, 0, 0, 1]); } #[test] fn rsplitn_mut() { let bits = bits![mut 0, 0, 1, 1, 0, 0, 1, 1]; let base = bits.as_bitptr(); let mut rsplitn = bits.rsplitn_mut(2, |_, &bit| bit); let next = rsplitn.next().unwrap(); assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 8); assert!(next.is_empty()); let next = rsplitn.next().unwrap(); assert_eq!(unsafe { next.as_bitptr().offset_from(base) }, 0); assert_eq!(next, bits![0, 0, 1, 1, 0, 0, 1]); assert!(rsplitn.next().is_none()); } #[test] fn iter_ones() { use crate::order::HiLo; let bits = 0b0100_1001u8.view_bits::(); // ordering: 3210 7654 let mut ones = bits.iter_ones(); assert_eq!(ones.len(), 3); assert_eq!(ones.next(), Some(2)); assert_eq!(ones.next_back(), Some(7)); assert_eq!(ones.next(), Some(4)); assert!(ones.next().is_none()); } #[test] fn iter_zeros() { use crate::order::HiLo; let bits = 0b1011_0110u8.view_bits::(); // ordering: 3210 7654 let mut zeros = bits.iter_zeros(); assert_eq!(zeros.len(), 3); assert_eq!(zeros.next(), Some(2)); assert_eq!(zeros.next_back(), Some(7)); assert_eq!(zeros.next(), Some(4)); assert!(zeros.next().is_none()); } #[test] fn trait_impls() { use core::iter::FusedIterator; use static_assertions::*; use crate::slice::iter::{ BitRefIter, BitValIter, }; assert_impl_all!( BitRefIter<'static, usize, Lsb0>: Iterator, DoubleEndedIterator, ExactSizeIterator, FusedIterator ); assert_impl_all!( BitValIter<'static, usize, Lsb0>: Iterator, DoubleEndedIterator, ExactSizeIterator, FusedIterator ); } bitvec-1.0.1/src/slice/tests/ops.rs000064400000000000000000000050671046102023000153060ustar 00000000000000use rand::random; use crate::{ prelude::*, slice::BitSliceIndex, }; #[test] fn bitand() { let a = random::<[u32; 3]>(); let b = random::<[u32; 3]>(); let c = [a[0] & b[0], a[1] & b[1], a[2] & b[2]]; let mut d = a; *d.view_bits_mut::() &= b.view_bits::(); assert_eq!(c, d); d = a; *d.view_bits_mut::() &= b.view_bits::(); assert_eq!(c, d); let d = random::<[u8; 6]>(); let e = random::<[u16; 3]>(); let mut f = d; *f.view_bits_mut::() &= e.view_bits::(); for ((d, e), f) in (d .view_bits::() .iter() .by_vals() .zip(e.view_bits::().iter().by_vals())) .zip(f.view_bits::()) { assert_eq!(d & e, f); } } #[test] fn bitor() { let a = random::<[u32; 3]>(); let b = random::<[u32; 3]>(); let c = [a[0] | b[0], a[1] | b[1], a[2] | b[2]]; let mut d = a; *d.view_bits_mut::() |= b.view_bits::(); assert_eq!(c, d); d = a; *d.view_bits_mut::() |= b.view_bits::(); assert_eq!(c, d); let d = random::<[u8; 6]>(); let e = random::<[u16; 3]>(); let mut f = d; *f.view_bits_mut::() |= e.view_bits::(); for ((d, e), f) in (d .view_bits::() .iter() .by_vals() .zip(e.view_bits::().iter().by_vals())) .zip(f.view_bits::()) { assert_eq!(d | e, f); } } #[test] fn bitxor() { let a = random::<[u32; 3]>(); let b = random::<[u32; 3]>(); let c = [a[0] ^ b[0], a[1] ^ b[1], a[2] ^ b[2]]; let mut d = a; *d.view_bits_mut::() ^= b.view_bits::(); assert_eq!(c, d); d = a; *d.view_bits_mut::() ^= b.view_bits::(); assert_eq!(c, d); let d = random::<[u8; 6]>(); let e = random::<[u16; 3]>(); let mut f = d; *f.view_bits_mut::() ^= e.view_bits::(); for ((d, e), f) in (d .view_bits::() .iter() .by_vals() .zip(e.view_bits::().iter().by_vals())) .zip(f.view_bits::()) { assert_eq!(d ^ e, f); } } #[test] fn bit_not() { let a = random::<[u32; 3]>(); let mut b = a; let _ = !b.view_bits_mut::(); assert_eq!([!a[0], !a[1], !a[2]], b); let mut c = [0u32; 3]; let d = !&mut c.view_bits_mut::()[16 .. 80]; let _ = !&mut d[24 .. 40]; assert_eq!(c, [0xFF_FF_00_00, 0xFF_00_00_FF, 0x00_00_FF_FF]); } #[test] fn indexing() { let bits = bits![mut 0, 1, 0, 0, 1]; assert!(bits[1]); assert!(!bits[2]); assert_eq!(bits[1 ..= 2], bits![1, 0]); assert!((10 .. 12).get(bits).is_none()); assert!((10 .. 12).get_mut(bits).is_none()); } #[test] #[should_panic = "index 10 out of bounds: 5"] fn index_mut_usize() { let bits = bits![mut 0, 1, 0, 0, 1]; 10.index_mut(bits); } bitvec-1.0.1/src/slice/tests/traits.rs000064400000000000000000000131371046102023000160100ustar 00000000000000use core::{ cmp, convert::TryFrom, }; use static_assertions::*; use crate::prelude::*; #[test] fn core_impls() { use core::{ cmp::{ Eq, Ord, }, fmt::Debug, hash::Hash, ops::{ Index, Range, }, panic::{ RefUnwindSafe, UnwindSafe, }, }; assert_impl_all!(BitSlice: AsRef>, AsMut>, Debug, Eq, Hash, Index, Index>, Ord, PartialEq>, PartialOrd>, Send, Sync, Unpin, UnwindSafe, RefUnwindSafe, ); assert_impl_all!(&BitSlice: Default, IntoIterator, ); assert_impl_all!(&mut BitSlice: Default, IntoIterator, ); } #[test] #[cfg(feature = "alloc")] fn alloc_impls() { use alloc::borrow::ToOwned; assert_impl_all!(BitSlice: ToOwned, ); } #[test] #[allow(deprecated)] #[cfg(feature = "std")] fn std_impls() { use std::{ ascii::AsciiExt, io::{ BufRead, Read, Write, }, }; assert_impl_all!(&BitSlice: Read, ); assert_impl_all!(&mut BitSlice: Write, ); assert_not_impl_any!(BitSlice: AsciiExt, BufRead, ); } #[test] fn cmp() { let a = bits![0, 1]; let b = bits![1, 0]; let c = bits![u8, Msb0; 1, 0]; let d = bits![u8, Msb0; 1, 1]; assert_eq!(a.cmp(b), cmp::Ordering::Less); assert_ne!(a, b); assert_eq!(b, c); assert_ne!(c, d); } #[test] fn conv() -> Result<(), ()> { let mut a = [0u8, 1, 2, 3]; let _ = <&BitSlice<_, Lsb0>>::try_from(&a[..]).map_err(drop)?; let _ = <&mut BitSlice<_, Lsb0>>::try_from(&mut a[..]).map_err(drop)?; Ok(()) } #[cfg(feature = "alloc")] mod format { #[cfg(not(feature = "std"))] use alloc::format; use crate::prelude::*; #[test] fn binary() { let data = [0u8, 0x0F, !0]; let bits = data.view_bits::(); assert_eq!(format!("{:b}", &bits[.. 0]), "[]"); assert_eq!(format!("{:#b}", &bits[.. 0]), "[]"); assert_eq!(format!("{:b}", &bits[9 .. 15]), "[000111]"); assert_eq!( format!("{:#b}", &bits[9 .. 15]), "[ 0b000111, ]" ); assert_eq!(format!("{:b}", &bits[4 .. 20]), "[0000, 00001111, 1111]"); assert_eq!( format!("{:#b}", &bits[4 .. 20]), "[ 0b0000, 0b00001111, 0b1111, ]" ); assert_eq!(format!("{:b}", &bits[4 ..]), "[0000, 00001111, 11111111]"); assert_eq!( format!("{:#b}", &bits[4 ..]), "[ 0b0000, 0b00001111, 0b11111111, ]" ); assert_eq!(format!("{:b}", &bits[.. 20]), "[00000000, 00001111, 1111]"); assert_eq!( format!("{:#b}", &bits[.. 20]), "[ 0b00000000, 0b00001111, 0b1111, ]" ); assert_eq!(format!("{:b}", bits), "[00000000, 00001111, 11111111]"); assert_eq!( format!("{:#b}", bits), "[ 0b00000000, 0b00001111, 0b11111111, ]" ); } #[test] fn octal() { let data = [0u8, 0x0F, !0]; let bits = data.view_bits::(); assert_eq!(format!("{:o}", &bits[.. 0]), "[]"); assert_eq!(format!("{:#o}", &bits[.. 0]), "[]"); assert_eq!(format!("{:o}", &bits[9 .. 15]), "[07]"); assert_eq!( format!("{:#o}", &bits[9 .. 15]), "[ 0o07, ]" ); // …0_000 00_001_111 1_111… assert_eq!(format!("{:o}", &bits[4 .. 20]), "[00, 017, 17]"); assert_eq!( format!("{:#o}", &bits[4 .. 20]), "[ 0o00, 0o017, 0o17, ]" ); assert_eq!(format!("{:o}", &bits[4 ..]), "[00, 017, 377]"); assert_eq!( format!("{:#o}", &bits[4 ..]), "[ 0o00, 0o017, 0o377, ]" ); assert_eq!(format!("{:o}", &bits[.. 20]), "[000, 017, 17]"); assert_eq!( format!("{:#o}", &bits[.. 20]), "[ 0o000, 0o017, 0o17, ]" ); assert_eq!(format!("{:o}", bits), "[000, 017, 377]"); assert_eq!( format!("{:#o}", bits), "[ 0o000, 0o017, 0o377, ]" ); } #[test] fn hex_lower() { let data = [0u8, 0x0F, !0]; let bits = data.view_bits::(); assert_eq!(format!("{:x}", &bits[.. 0]), "[]"); assert_eq!(format!("{:#x}", &bits[.. 0]), "[]"); // …00_0111 … assert_eq!(format!("{:x}", &bits[9 .. 15]), "[07]"); assert_eq!( format!("{:#x}", &bits[9 .. 15]), "[ 0x07, ]" ); // …0000 00001111 1111… assert_eq!(format!("{:x}", &bits[4 .. 20]), "[0, 0f, f]"); assert_eq!( format!("{:#x}", &bits[4 .. 20]), "[ 0x0, 0x0f, 0xf, ]" ); assert_eq!(format!("{:x}", &bits[4 ..]), "[0, 0f, ff]"); assert_eq!( format!("{:#x}", &bits[4 ..]), "[ 0x0, 0x0f, 0xff, ]" ); assert_eq!(format!("{:x}", &bits[.. 20]), "[00, 0f, f]"); assert_eq!( format!("{:#x}", &bits[.. 20]), "[ 0x00, 0x0f, 0xf, ]" ); assert_eq!(format!("{:x}", bits), "[00, 0f, ff]"); assert_eq!( format!("{:#x}", bits), "[ 0x00, 0x0f, 0xff, ]" ); } #[test] fn hex_upper() { let data = [0u8, 0x0F, !0]; let bits = data.view_bits::(); assert_eq!(format!("{:X}", &bits[.. 0]), "[]"); assert_eq!(format!("{:#X}", &bits[.. 0]), "[]"); assert_eq!(format!("{:X}", &bits[9 .. 15]), "[07]"); assert_eq!( format!("{:#X}", &bits[9 .. 15]), "[ 0x07, ]" ); assert_eq!(format!("{:X}", &bits[4 .. 20]), "[0, 0F, F]"); assert_eq!( format!("{:#X}", &bits[4 .. 20]), "[ 0x0, 0x0F, 0xF, ]" ); assert_eq!(format!("{:X}", &bits[4 ..]), "[0, 0F, FF]"); assert_eq!( format!("{:#X}", &bits[4 ..]), "[ 0x0, 0x0F, 0xFF, ]" ); assert_eq!(format!("{:X}", &bits[.. 20]), "[00, 0F, F]"); assert_eq!( format!("{:#X}", &bits[.. 20]), "[ 0x00, 0x0F, 0xF, ]" ); assert_eq!(format!("{:X}", bits), "[00, 0F, FF]"); assert_eq!( format!("{:#X}", bits), "[ 0x00, 0x0F, 0xFF, ]" ); } } bitvec-1.0.1/src/slice/tests.rs000064400000000000000000000202621046102023000144770ustar 00000000000000//! Unit tests for bit-slices. #![cfg(test)] use core::cell::Cell; use rand::random; use crate::{ order::HiLo, prelude::*, }; mod api; mod iter; mod ops; mod traits; #[test] #[allow(clippy::many_single_char_names)] fn copying() { let a = bits![mut u8, Lsb0; 0; 4]; let b = bits![u16, Msb0; 0, 1, 0, 1]; a.clone_from_bitslice(b); assert_eq!(a, b); let mut a = random::<[u32; 3]>(); let b = random::<[u32; 3]>(); a.view_bits_mut::()[4 .. 92] .copy_from_bitslice(&b.view_bits::()[4 .. 92]); assert_eq!([a[0] & 0xFF_FF_FF_F0, a[1], a[2] & 0x0F_FF_FF_FF], [ b[0] & 0xFF_FF_FF_F0, b[1], b[2] & 0x0F_FF_FF_FF ],); let mut c = random::(); let d = random::(); c.view_bits_mut::()[4 .. 28] .copy_from_bitslice(&d.view_bits::()[4 .. 28]); assert_eq!(c & 0x0F_FF_FF_F0, d & 0x0F_FF_FF_F0); let mut e = 0x01_23_45_67u32; let f = 0x89_AB_CD_EFu32; e.view_bits_mut::()[.. 28] .copy_from_bitslice(&f.view_bits::()[4 ..]); assert_eq!(e, 0x91_B8_DA_FC); // 28 .. 32 ^ let mut g = random::<[u32; 3]>(); let mut h = random::<[u32; 3]>(); let i = g; let j = h; g.view_bits_mut::() .swap_with_bitslice(h.view_bits_mut::()); assert_eq!((g, h), (j, i)); g.view_bits_mut::() .swap_with_bitslice(h.view_bits_mut::()); assert_eq!((g, h), (i, j)); g.view_bits_mut::() .swap_with_bitslice(h.view_bits_mut::()); assert_eq!(g.view_bits::(), j.view_bits::()); assert_eq!(h.view_bits::(), i.view_bits::()); let mut k = random::<[u32; 3]>(); let j = k; unsafe { k.view_bits_mut::().copy_within_unchecked(32 .., 0); assert_eq!(k, [j[1], j[2], j[2]]); k.view_bits_mut::().copy_within_unchecked(.. 64, 32); assert_eq!(k, [j[1], j[1], j[2]]); k.view_bits_mut::().copy_within_unchecked(32 .., 0); assert_eq!(k, [j[1], j[2], j[2]]); } } #[test] fn writing() { let bits = bits![mut 0; 2]; bits.set(0, true); unsafe { bits.set_unchecked(1, true); } assert_eq!(bits, bits![1;2]); assert!(bits.replace(0, false)); assert!(unsafe { bits.replace_unchecked(1, false) }); assert_eq!(bits, bits![0;2]); } #[test] fn bit_counting() { let data = [0x12u8, 0xFE, 0x34, 0xDC]; let lsb0 = data.view_bits::(); let msb0 = data.view_bits::(); assert_eq!(lsb0[2 .. 6].count_ones(), 1); assert_eq!(lsb0[2 .. 6].count_zeros(), 3); assert_eq!(msb0[2 .. 30].count_ones(), 17); assert_eq!(msb0[2 .. 30].count_zeros(), 11); assert!(!bits![].any()); assert!(!bits![0, 0].any()); assert!(bits![0, 1].any()); assert!(bits![].all()); assert!(!bits![0, 1].all()); assert!(bits![1, 1].all()); assert!(bits![].not_any()); assert!(bits![0, 0].not_any()); assert!(!bits![0, 1].not_any()); assert!(!bits![].not_all()); assert!(bits![0, 1].not_all()); assert!(!bits![1, 1].not_all()); assert!(!bits![0; 2].some()); assert!(bits![0, 1].some()); assert!(!bits![1; 2].some()); assert!(bits![usize, Lsb0;].first_one().is_none()); assert!(bits![usize, Msb0;].first_one().is_none()); assert!(bits![usize, Lsb0;].last_one().is_none()); assert!(bits![usize, Msb0;].last_one().is_none()); assert!(bits![usize, Lsb0;].first_zero().is_none()); assert!(bits![usize, Msb0;].first_zero().is_none()); assert!(bits![usize, Lsb0;].last_zero().is_none()); assert!(bits![usize, Msb0;].last_zero().is_none()); assert!([0u8; 1].view_bits::()[1 .. 7].first_one().is_none()); assert!([0u8; 3].view_bits::()[1 .. 23].first_one().is_none()); assert!([0u8; 1].view_bits::()[1 .. 7].first_one().is_none()); assert!([0u8; 3].view_bits::()[1 .. 23].first_one().is_none()); assert!([0u8; 1].view_bits::()[1 .. 7].last_one().is_none()); assert!([0u8; 3].view_bits::()[1 .. 23].last_one().is_none()); assert!([0u8; 1].view_bits::()[1 .. 7].last_one().is_none()); assert!([0u8; 3].view_bits::()[1 .. 23].last_one().is_none()); assert!([!0u8; 1].view_bits::()[1 .. 7].first_zero().is_none()); assert!( [!0u8; 3].view_bits::()[1 .. 23] .first_zero() .is_none() ); assert!([!0u8; 1].view_bits::()[1 .. 7].first_zero().is_none()); assert!( [!0u8; 3].view_bits::()[1 .. 23] .first_zero() .is_none() ); assert!([!0u8; 1].view_bits::()[1 .. 7].last_zero().is_none()); assert!([!0u8; 3].view_bits::()[1 .. 23].last_zero().is_none()); assert!([!0u8; 1].view_bits::()[1 .. 7].last_zero().is_none()); assert!([!0u8; 3].view_bits::()[1 .. 23].last_zero().is_none()); let data = 0b0100_0100u8; assert_eq!(data.view_bits::()[1 .. 7].first_one(), Some(1)); assert_eq!(data.view_bits::()[1 .. 7].last_one(), Some(5)); assert_eq!(data.view_bits::()[1 .. 7].first_one(), Some(0)); assert_eq!(data.view_bits::()[1 .. 7].last_one(), Some(4)); let data = 0b1011_1011u8; assert_eq!(data.view_bits::()[1 .. 7].first_zero(), Some(1)); assert_eq!(data.view_bits::()[1 .. 7].last_zero(), Some(5)); assert_eq!(data.view_bits::()[1 .. 7].first_zero(), Some(0)); assert_eq!(data.view_bits::()[1 .. 7].last_zero(), Some(4)); let data = [0u8, 0b1001_0110, 0]; assert_eq!(data.view_bits::()[12 ..].first_one(), Some(0)); assert_eq!(data.view_bits::()[4 ..].first_one(), Some(5)); assert_eq!(data.view_bits::()[.. 12].first_one(), Some(9)); assert_eq!(data.view_bits::()[12 ..].first_one(), Some(1)); assert_eq!(data.view_bits::()[4 ..].first_one(), Some(4)); assert_eq!(data.view_bits::()[.. 12].first_one(), Some(8)); assert_eq!(data.view_bits::()[12 ..].last_one(), Some(3)); assert_eq!(data.view_bits::()[4 ..].last_one(), Some(11)); assert_eq!(data.view_bits::()[.. 12].last_one(), Some(10)); assert_eq!(data.view_bits::()[12 ..].last_one(), Some(2)); assert_eq!(data.view_bits::()[4 ..].last_one(), Some(10)); assert_eq!(data.view_bits::()[.. 12].last_one(), Some(11)); let data = [!0u8, 0b1001_0110, !0]; assert_eq!(data.view_bits::()[12 ..].first_zero(), Some(1)); assert_eq!(data.view_bits::()[4 ..].first_zero(), Some(4)); assert_eq!(data.view_bits::()[.. 12].first_zero(), Some(8)); assert_eq!(data.view_bits::()[12 ..].first_zero(), Some(0)); assert_eq!(data.view_bits::()[4 ..].first_zero(), Some(5)); assert_eq!(data.view_bits::()[.. 12].first_zero(), Some(9)); assert_eq!(data.view_bits::()[12 ..].last_zero(), Some(2)); assert_eq!(data.view_bits::()[4 ..].last_zero(), Some(10)); assert_eq!(data.view_bits::()[.. 12].last_zero(), Some(11)); assert_eq!(data.view_bits::()[12 ..].last_zero(), Some(3)); assert_eq!(data.view_bits::()[4 ..].last_zero(), Some(11)); assert_eq!(data.view_bits::()[.. 12].last_zero(), Some(10)); assert_eq!(15u8.view_bits::().leading_ones(), 4); assert_eq!(15u8.view_bits::().leading_ones(), 0); assert_eq!(15u8.view_bits::().leading_zeros(), 0); assert_eq!(15u8.view_bits::().leading_zeros(), 4); assert_eq!(15u8.view_bits::().trailing_ones(), 0); assert_eq!(15u8.view_bits::().trailing_ones(), 4); assert_eq!(15u8.view_bits::().trailing_zeros(), 4); assert_eq!(15u8.view_bits::().trailing_zeros(), 0); } #[test] fn shunting() { let bits = bits![mut 0, 1, 0, 0, 1]; bits.shift_left(0); bits.shift_right(0); assert_eq!(bits, bits![0, 1, 0, 0, 1]); let bits = bits![mut 1;5]; bits.shift_left(1); bits.shift_right(2); bits.shift_left(1); assert_eq!(bits, bits![0, 1, 1, 1, 0]); } #[test] fn aliasing() { let bits = bits![Cell, Lsb0; 0]; let (a, b) = (bits, bits); a.set_aliased(0, true); assert!(bits[0]); b.set_aliased(0, false); assert!(!bits[0]); } #[test] fn cooking() { use core::convert::TryFrom; use crate::{ ptr::BitPtr, slice, }; let mut data = [0usize; 80]; let len = crate::mem::bits_of::() * 80; let ref_ptr = data.as_ptr(); let mut_ptr = data.as_mut_ptr(); unsafe { assert_eq!( slice::from_raw_parts_unchecked( BitPtr::try_from(ref_ptr).unwrap(), len ) .as_bitspan(), data.view_bits::().as_bitspan(), ); assert_eq!( slice::from_raw_parts_unchecked_mut( BitPtr::try_from(mut_ptr).unwrap(), len ) .as_bitspan(), data.view_bits_mut::().as_bitspan(), ); } } bitvec-1.0.1/src/slice/traits.rs000064400000000000000000000300741046102023000146450ustar 00000000000000#![doc = include_str!("../../doc/slice/traits.md")] #[cfg(feature = "alloc")] use alloc::borrow::ToOwned; use core::{ cmp, convert::TryFrom, fmt::{ self, Binary, Debug, Display, Formatter, LowerHex, Octal, Pointer, UpperHex, }, hash::{ Hash, Hasher, }, str, }; use wyz::fmt::FmtForward; use super::BitSlice; #[cfg(feature = "alloc")] use crate::vec::BitVec; use crate::{ domain::Domain, mem, order::{ BitOrder, Lsb0, Msb0, }, store::BitStore, view::BitView, }; /// [Original](https://doc.rust-lang.org/std/primitive.slice.html#impl-AsRef%3C%5BT%5D%3E) impl AsRef for BitSlice where T: BitStore, O: BitOrder, { #[inline] fn as_ref(&self) -> &Self { self } } /// [Original](https://doc.rust-lang.org/std/primitive.slice.html#impl-AsMut%3C%5BT%5D%3E) impl AsMut for BitSlice where T: BitStore, O: BitOrder, { #[inline] fn as_mut(&mut self) -> &mut Self { self } } /// [Original](https://doc.rust-lang.org/std/primitive.slice.html#impl-Eq) impl Eq for BitSlice where T: BitStore, O: BitOrder, { } /// [Original](https://doc.rust-lang.org/std/primitive.slice.html#impl-Ord) impl Ord for BitSlice where T: BitStore, O: BitOrder, { #[inline] fn cmp(&self, rhs: &Self) -> cmp::Ordering { self.partial_cmp(rhs) .expect("BitSlice has a total ordering") } } /** Tests if two `BitSlice`s are semantically — not representationally — equal. It is valid to compare slices of different ordering or memory types. The equality condition requires that they have the same length and that at each index, the two slices have the same bit value. [Original](https://doc.rust-lang.org/std/primitive.slice.html#impl-PartialEq%3C%5BB%5D%3E) **/ impl PartialEq> for BitSlice where T1: BitStore, T2: BitStore, O1: BitOrder, O2: BitOrder, { #[inline] fn eq(&self, rhs: &BitSlice) -> bool { if let (Some(this), Some(that)) = (self.coerce::(), rhs.coerce::()) { this.sp_eq(that) } else if let (Some(this), Some(that)) = (self.coerce::(), rhs.coerce::()) { this.sp_eq(that) } else { self.len() == rhs.len() && self .iter() .by_vals() .zip(rhs.iter().by_vals()) .all(|(l, r)| l == r) } } } // ref-to-val equality impl PartialEq> for &BitSlice where T1: BitStore, T2: BitStore, O1: BitOrder, O2: BitOrder, { #[inline] fn eq(&self, rhs: &BitSlice) -> bool { **self == rhs } } impl PartialEq> for &mut BitSlice where T1: BitStore, T2: BitStore, O1: BitOrder, O2: BitOrder, { #[inline] fn eq(&self, rhs: &BitSlice) -> bool { **self == rhs } } // val-to-ref equality impl PartialEq<&BitSlice> for BitSlice where T1: BitStore, T2: BitStore, O1: BitOrder, O2: BitOrder, { #[inline] fn eq(&self, rhs: &&BitSlice) -> bool { *self == **rhs } } impl PartialEq<&mut BitSlice> for BitSlice where T1: BitStore, T2: BitStore, O1: BitOrder, O2: BitOrder, { #[inline] fn eq(&self, rhs: &&mut BitSlice) -> bool { *self == **rhs } } /** Compares two `BitSlice`s by semantic — not representational — ordering. The comparison sorts by testing at each index if one slice has a high bit where the other has a low. At the first index where the slices differ, the slice with the high bit is greater. If the slices are equal until at least one terminates, then they are compared by length. [Original](https://doc.rust-lang.org/std/primitive.slice.html#impl-PartialOrd%3C%5BT%5D%3E) **/ impl PartialOrd> for BitSlice where T1: BitStore, T2: BitStore, O1: BitOrder, O2: BitOrder, { #[inline] fn partial_cmp(&self, rhs: &BitSlice) -> Option { for (l, r) in self.iter().by_vals().zip(rhs.iter().by_vals()) { match (l, r) { (true, false) => return Some(cmp::Ordering::Greater), (false, true) => return Some(cmp::Ordering::Less), _ => continue, } } self.len().partial_cmp(&rhs.len()) } } // ref-to-val ordering impl PartialOrd> for &BitSlice where T1: BitStore, T2: BitStore, O1: BitOrder, O2: BitOrder, { #[inline] fn partial_cmp(&self, rhs: &BitSlice) -> Option { (*self).partial_cmp(rhs) } } impl PartialOrd> for &mut BitSlice where T1: BitStore, T2: BitStore, O1: BitOrder, O2: BitOrder, { #[inline] fn partial_cmp(&self, rhs: &BitSlice) -> Option { (**self).partial_cmp(rhs) } } // val-to-ref ordering impl PartialOrd<&BitSlice> for BitSlice where T1: BitStore, T2: BitStore, O1: BitOrder, O2: BitOrder, { #[inline] fn partial_cmp(&self, rhs: &&BitSlice) -> Option { (*self).partial_cmp(&**rhs) } } impl PartialOrd<&mut BitSlice> for BitSlice where T1: BitStore, T2: BitStore, O1: BitOrder, O2: BitOrder, { #[inline] fn partial_cmp(&self, rhs: &&mut BitSlice) -> Option { (*self).partial_cmp(&**rhs) } } // &mut-to-& ordering impl PartialOrd<&mut BitSlice> for &BitSlice where T1: BitStore, T2: BitStore, O1: BitOrder, O2: BitOrder, { #[inline] fn partial_cmp(&self, rhs: &&mut BitSlice) -> Option { (**self).partial_cmp(&**rhs) } } impl PartialOrd<&BitSlice> for &mut BitSlice where T1: BitStore, T2: BitStore, O1: BitOrder, O2: BitOrder, { #[inline] fn partial_cmp(&self, rhs: &&BitSlice) -> Option { (**self).partial_cmp(&**rhs) } } /** Calls [`BitSlice::try_from_slice`], but returns the original Rust slice on error instead of the failure event. This only fails if `slice.len()` exceeds `BitSlice::MAX_ELTS`. [`BitSlice::try_from_slice`]: crate::slice::BitSlice::try_from_slice **/ impl<'a, T, O> TryFrom<&'a [T]> for &'a BitSlice where T: BitStore, O: BitOrder, { type Error = &'a [T]; #[inline] fn try_from(slice: &'a [T]) -> Result { BitSlice::try_from_slice(slice).map_err(|_| slice) } } /** Calls [`BitSlice::try_from_slice_mut`], but returns the original Rust slice on error instead of the failure event. This only fails if `slice.len()` exceeds `BitSlice::MAX_ELTS`. [`BitSlice::try_from_slice_mut`]: crate::slice::BitSlice::try_from_slice_mut **/ impl<'a, T, O> TryFrom<&'a mut [T]> for &'a mut BitSlice where T: BitStore, O: BitOrder, { type Error = &'a mut [T]; #[inline] fn try_from(slice: &'a mut [T]) -> Result { let slice_ptr = slice as *mut [T]; BitSlice::try_from_slice_mut(slice) .map_err(|_| unsafe { &mut *slice_ptr }) } } /// [Original](https://doc.rust-lang.org/std/primitive.slice.html#impl-Default-1) impl Default for &BitSlice where T: BitStore, O: BitOrder, { #[inline] fn default() -> Self { BitSlice::empty() } } /// [Original](https://doc.rust-lang.org/std/primitive.slice.html#impl-Default) impl Default for &mut BitSlice where T: BitStore, O: BitOrder, { #[inline] fn default() -> Self { BitSlice::empty_mut() } } /// [Original](https://doc.rust-lang.org/std/primitive.slice.html#impl-Debug) impl Debug for BitSlice where T: BitStore, O: BitOrder, { #[inline] fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { self.as_bitspan().render(fmt, "Slice", None)?; fmt.write_str(" ")?; Display::fmt(self, fmt) } } impl Display for BitSlice where T: BitStore, O: BitOrder, { #[inline] fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { fmt.debug_list() .entries(self.iter().by_vals().map(|b| if b { 1 } else { 0 })) .finish() } } impl Pointer for BitSlice where T: BitStore, O: BitOrder, { #[inline] fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { Pointer::fmt(&self.as_bitspan(), fmt) } } /// Encodes a short bit-slice into an ASCII b36 value. #[inline(always)] fn bits_to_ascii(bits: &BitSlice, alpha: u8) -> u8 where T: BitStore, O: BitOrder, { let mut val = 0u8; for bit in bits.iter().by_vals() { val <<= 1; val |= bit as u8; } match val { v @ 0 ..= 9 => b'0' + v, v @ 10 ..= 35 => alpha - 10 + v, _ => unreachable!( "bit-slices wider than five bits cannot be rendered to ASCII b36" ), } } /** Encodes an arbitrary bit-slice into an ASCII b36 string. ## Parameters - `bits`: the bit-slice to encode. - `into`: a provided buffer into which the bit-slice is encoded. - `radix`: the bit width of each digit (log2 of its radix). - `skip`: the number of bytes to skip before beginning the write. - `alpha`: one of `b'a'` or `b'A'`. ## Returns A subset of `into` that is now initialized to the ASCII encoding. **/ #[inline(always)] fn encode_ascii<'a, T, O>( bits: &BitSlice, into: &'a mut [u8], radix: usize, mut skip: usize, alpha: u8, ) -> &'a str where T: BitStore, O: BitOrder, { for (chunk, slot) in bits.rchunks(radix).rev().zip(into.iter_mut().skip(skip)) { *slot = bits_to_ascii(chunk, alpha); skip += 1; } unsafe { str::from_utf8_unchecked(&into[.. skip]) } } /// Constructs the numeric formatting implementations. macro_rules! fmt { ($($trait:ident: $alpha:expr, $pfx:expr, $radix:expr;)+) => { $( #[doc = include_str!("../../doc/slice/format.md")] impl $trait for BitSlice where T: BitStore, O: BitOrder, { #[inline] #[allow(clippy::modulo_one)] // I know what I’m doing. // TODO(myrrlyn): See if Binary codegen ditches the loops. fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { const D: usize = mem::bits_of::() / $radix; const M: usize = mem::bits_of::() % $radix; const W: usize = D + (M != 0) as usize; let mut txt: [u8; W + 2] = [b'0'; W + 2]; txt[1] = $pfx; let start = if fmt.alternate() { 0 } else { 2 }; let mut seq = fmt.debug_list(); match self.domain() { Domain::Enclave(elem) => { seq.entry(&encode_ascii( elem.into_bitslice(), &mut txt[start ..], $radix, 2 - start, $alpha, ).fmt_display()); }, Domain::Region { head, body, tail } => { if let Some(elem) = head { seq.entry(&encode_ascii( elem.into_bitslice(), &mut txt[start ..], $radix, 2 - start, $alpha, ).fmt_display()); } for elem in body.iter().map(BitStore::load_value) { seq.entry(&encode_ascii( elem.view_bits::(), &mut txt[start ..], $radix, 2 - start, $alpha, ).fmt_display()); } if let Some(elem) = tail { seq.entry(&encode_ascii( elem.into_bitslice(), &mut txt[start ..], $radix, 2 - start, $alpha, ).fmt_display()); } }, } seq.finish() } } )+ }; } fmt! { Binary: b'0', b'b', 1; Octal: b'0', b'o', 3; LowerHex: b'a', b'x', 4; UpperHex: b'A', b'x', 4; } /// [Original](https://doc.rust-lang.org/std/primitive.slice.html#impl-Hash) #[cfg(not(tarpaulin_include))] impl Hash for BitSlice where T: BitStore, O: BitOrder, { #[inline] fn hash(&self, hasher: &mut H) where H: Hasher { self.iter().by_vals().for_each(|bit| bit.hash(hasher)); } } #[doc = include_str!("../../doc/slice/threadsafe.md")] unsafe impl Send for BitSlice where T: BitStore + Sync, O: BitOrder, { } #[doc = include_str!("../../doc/slice/threadsafe.md")] unsafe impl Sync for BitSlice where T: BitStore + Sync, O: BitOrder, { } /// [Original](https://doc.rust-lang.org/std/primitive.slice.html#impl-Unpin) impl Unpin for BitSlice where T: BitStore, O: BitOrder, { } /// [Original](https://doc.rust-lang.org/std/primitive.slice.html#impl-ToOwned) #[cfg(feature = "alloc")] #[cfg(not(tarpaulin_include))] impl ToOwned for BitSlice where T: BitStore, O: BitOrder, { type Owned = BitVec; #[inline] fn to_owned(&self) -> Self::Owned { BitVec::from_bitslice(self) } } bitvec-1.0.1/src/slice.rs000064400000000000000000001416351046102023000133450ustar 00000000000000#![doc = include_str!("../doc/slice.md")] #[cfg(feature = "alloc")] use alloc::vec::Vec; use core::{ marker::PhantomData, ops::RangeBounds, }; use funty::Integral; use tap::Pipe; #[cfg(feature = "alloc")] use tap::Tap; use wyz::{ bidi::BidiIterator, comu::{ Const, Mut, }, range::RangeExt, }; #[cfg(feature = "alloc")] use crate::vec::BitVec; use crate::{ domain::{ BitDomain, Domain, }, mem, order::{ BitOrder, Lsb0, Msb0, }, ptr::{ self as bv_ptr, BitPtr, BitPtrRange, BitSpan, BitSpanError, }, store::BitStore, }; mod api; mod iter; mod ops; mod specialization; mod tests; mod traits; pub use self::{ api::*, iter::*, }; #[repr(transparent)] #[doc = include_str!("../doc/slice/BitSlice.md")] pub struct BitSlice where T: BitStore, O: BitOrder, { /// The ordering of bits within a `T` register. _ord: PhantomData, /// The register type used for storage. _typ: PhantomData<[T]>, /// Indicate that this is a newtype wrapper over a wholly-untyped slice. /// /// This is necessary in order for the Rust compiler to remove restrictions /// on the possible values of reference handles to this type. Any other /// slice type here (such as `[u8]` or `[T]`) would require that `&/mut /// BitSlice` handles have values that correctly describe the region, and /// the encoding *does not* do this. As such, reference handles to /// `BitSlice` must not be even implicitly dereferenceäble to real memory, /// and the slice must be a ZST. /// /// References to a ZST have no restrictions about what the values can be, /// as they are never able to dereference real memory and thus both /// addresses and lengths are meaningless to the memory inspector. /// /// See `ptr::span` for more information on the encoding scheme used in /// references to `BitSlice`. _mem: [()], } /// Constructors. impl BitSlice where T: BitStore, O: BitOrder, { /// Produces an empty bit-slice with an arbitrary lifetime. /// /// ## Original /// /// This is equivalent to the `&[]` literal. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// assert!(BitSlice::::empty().is_empty()); /// assert_eq!(bits![], BitSlice::::empty()); /// ``` #[inline] pub fn empty<'a>() -> &'a Self { unsafe { BitSpan::::EMPTY.into_bitslice_ref() } } /// Produces an empty bit-slice with an arbitrary lifetime. /// /// ## Original /// /// This is equivalent to the `&mut []` literal. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// assert!(BitSlice::::empty_mut().is_empty()); /// assert_eq!(bits![mut], BitSlice::::empty_mut()); /// ``` #[inline] pub fn empty_mut<'a>() -> &'a mut Self { unsafe { BitSpan::::EMPTY.into_bitslice_mut() } } /// Constructs a shared `&BitSlice` reference over a shared element. /// /// The [`BitView`] trait, implemented on all [`BitStore`] implementors, /// provides a [`.view_bits::()`] method which delegates to this function /// and may be more convenient for you to write. /// /// ## Parameters /// /// - `elem`: A shared reference to a memory element. /// /// ## Returns /// /// A shared `&BitSlice` over `elem`. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let elem = 0u8; /// let bits = BitSlice::<_, Lsb0>::from_element(&elem); /// assert_eq!(bits.len(), 8); /// /// let bits = elem.view_bits::(); /// ``` /// /// [`BitStore`]: crate::store::BitStore /// [`BitView`]: crate::view::BitView /// [`.view_bits::()`]: crate::view::BitView::view_bits #[inline] pub fn from_element(elem: &T) -> &Self { unsafe { BitPtr::from_ref(elem) .span_unchecked(mem::bits_of::()) .into_bitslice_ref() } } /// Constructs an exclusive `&mut BitSlice` reference over an element. /// /// The [`BitView`] trait, implemented on all [`BitStore`] implementors, /// provides a [`.view_bits_mut::()`] method which delegates to this /// function and may be more convenient for you to write. /// /// ## Parameters /// /// - `elem`: An exclusive reference to a memory element. /// /// ## Returns /// /// An exclusive `&mut BitSlice` over `elem`. /// /// Note that the original `elem` reference will be inaccessible for the /// duration of the returned bit-slice handle’s lifetime. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let mut elem = 0u8; /// let bits = BitSlice::<_, Lsb0>::from_element_mut(&mut elem); /// bits.set(1, true); /// assert!(bits[1]); /// assert_eq!(elem, 2); /// /// let bits = elem.view_bits_mut::(); /// ``` /// /// [`BitStore`]: crate::store::BitStore /// [`BitView`]: crate::view::BitView /// [`.view_bits_mut::()`]: crate::view::BitView::view_bits_mut #[inline] pub fn from_element_mut(elem: &mut T) -> &mut Self { unsafe { BitPtr::from_mut(elem) .span_unchecked(mem::bits_of::()) .into_bitslice_mut() } } /// Constructs a shared `&BitSlice` reference over a slice of elements. /// /// The [`BitView`] trait, implemented on all `[T]` slices, provides a /// [`.view_bits::()`] method which delegates to this function and may be /// more convenient for you to write. /// /// ## Parameters /// /// - `slice`: A shared reference to a slice of memory elements. /// /// ## Returns /// /// A shared `BitSlice` reference over all of `slice`. /// /// ## Panics /// /// This will panic if `slice` is too long to encode as a bit-slice view. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let data = [0u16, 1]; /// let bits = BitSlice::<_, Lsb0>::from_slice(&data); /// assert!(bits[16]); /// /// let bits = data.view_bits::(); /// ``` /// /// [`BitView`]: crate::view::BitView /// [`.view_bits::()`]: crate::view::BitView::view_bits #[inline] pub fn from_slice(slice: &[T]) -> &Self { Self::try_from_slice(slice).unwrap() } /// Attempts to construct a shared `&BitSlice` reference over a slice of /// elements. /// /// The [`BitView`], implemented on all `[T]` slices, provides a /// [`.try_view_bits::()`] method which delegates to this function and /// may be more convenient for you to write. /// /// This is *very hard*, if not impossible, to cause to fail. Rust will not /// create excessive arrays on 64-bit architectures. /// /// ## Parameters /// /// - `slice`: A shared reference to a slice of memory elements. /// /// ## Returns /// /// A shared `&BitSlice` over `slice`. If `slice` is longer than can be /// encoded into a `&BitSlice` (see [`MAX_ELTS`]), this will fail and return /// the original `slice` as an error. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let data = [0u8, 1]; /// let bits = BitSlice::<_, Msb0>::try_from_slice(&data).unwrap(); /// assert!(bits[15]); /// /// let bits = data.try_view_bits::().unwrap(); /// ``` /// /// [`BitView`]: crate::view::BitView /// [`MAX_ELTS`]: Self::MAX_ELTS /// [`.try_view_bits::()`]: crate::view::BitView::try_view_bits #[inline] pub fn try_from_slice(slice: &[T]) -> Result<&Self, BitSpanError> { let elts = slice.len(); if elts >= Self::MAX_ELTS { elts.saturating_mul(mem::bits_of::()) .pipe(BitSpanError::TooLong) .pipe(Err) } else { Ok(unsafe { Self::from_slice_unchecked(slice) }) } } /// Constructs an exclusive `&mut BitSlice` reference over a slice of /// elements. /// /// The [`BitView`] trait, implemented on all `[T]` slices, provides a /// [`.view_bits_mut::()`] method which delegates to this function and /// may be more convenient for you to write. /// /// ## Parameters /// /// - `slice`: An exclusive reference to a slice of memory elements. /// /// ## Returns /// /// An exclusive `&mut BitSlice` over all of `slice`. /// /// ## Panics /// /// This panics if `slice` is too long to encode as a bit-slice view. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let mut data = [0u16; 2]; /// let bits = BitSlice::<_, Lsb0>::from_slice_mut(&mut data); /// bits.set(0, true); /// bits.set(17, true); /// assert_eq!(data, [1, 2]); /// /// let bits = data.view_bits_mut::(); /// ``` /// /// [`BitView`]: crate::view::BitView /// [`.view_bits_mut::()`]: crate::view::BitView::view_bits_mut #[inline] pub fn from_slice_mut(slice: &mut [T]) -> &mut Self { Self::try_from_slice_mut(slice).unwrap() } /// Attempts to construct an exclusive `&mut BitSlice` reference over a /// slice of elements. /// /// The [`BitView`] trait, implemented on all `[T]` slices, provides a /// [`.try_view_bits_mut::()`] method which delegates to this function /// and may be more convenient for you to write. /// /// ## Parameters /// /// - `slice`: An exclusive reference to a slice of memory elements. /// /// ## Returns /// /// An exclusive `&mut BitSlice` over `slice`. If `slice` is longer than can /// be encoded into a `&mut BitSlice` (see [`MAX_ELTS`]), this will fail and /// return the original `slice` as an error. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let mut data = [0u8; 2]; /// let bits = BitSlice::<_, Msb0>::try_from_slice_mut(&mut data).unwrap(); /// bits.set(7, true); /// bits.set(15, true); /// assert_eq!(data, [1; 2]); /// /// let bits = data.try_view_bits_mut::().unwrap(); /// ``` /// /// [`BitView`]: crate::view::BitView /// [`MAX_ELTS`]: Self::MAX_ELTS /// [`.try_view_bits_mut::()`]: crate::view::BitView::try_view_bits_mut #[inline] pub fn try_from_slice_mut( slice: &mut [T], ) -> Result<&mut Self, BitSpanError> { let elts = slice.len(); if elts >= Self::MAX_ELTS { elts.saturating_mul(mem::bits_of::()) .pipe(BitSpanError::TooLong) .pipe(Err) } else { Ok(unsafe { Self::from_slice_unchecked_mut(slice) }) } } /// Constructs a shared `&BitSlice` over an element slice, without checking /// its length. /// /// If `slice` is too long to encode into a `&BitSlice`, then the produced /// bit-slice’s length is unspecified. /// /// ## Safety /// /// You must ensure that `slice.len() < BitSlice::MAX_ELTS`. /// /// Calling this function with an over-long slice is **library-level** /// undefined behavior. You may not assume anything about its implementation /// or behavior, and must conservatively assume that over-long slices cause /// compiler UB. #[inline] pub unsafe fn from_slice_unchecked(slice: &[T]) -> &Self { let bits = slice.len().wrapping_mul(mem::bits_of::()); BitPtr::from_slice(slice) .span_unchecked(bits) .into_bitslice_ref() } /// Constructs an exclusive `&mut BitSlice` over an element slice, without /// checking its length. /// /// If `slice` is too long to encode into a `&mut BitSlice`, then the /// produced bit-slice’s length is unspecified. /// /// ## Safety /// /// You must ensure that `slice.len() < BitSlice::MAX_ELTS`. /// /// Calling this function with an over-long slice is **library-level** /// undefined behavior. You may not assume anything about its implementation /// or behavior, and must conservatively assume that over-long slices cause /// compiler UB. #[inline] pub unsafe fn from_slice_unchecked_mut(slice: &mut [T]) -> &mut Self { let bits = slice.len().wrapping_mul(mem::bits_of::()); BitPtr::from_slice_mut(slice) .span_unchecked(bits) .into_bitslice_mut() } } /// Alternates of standard APIs. impl BitSlice where T: BitStore, O: BitOrder, { /// Gets a raw pointer to the zeroth bit of the bit-slice. /// /// ## Original /// /// [`slice::as_ptr`](https://doc.rust-lang.org/std/primitive.slice.html#method.as_ptr) /// /// ## API Differences /// /// This is renamed in order to indicate that it is returning a `bitvec` /// structure, not a raw pointer. #[inline] pub fn as_bitptr(&self) -> BitPtr { self.as_bitspan().to_bitptr() } /// Gets a raw, write-capable pointer to the zeroth bit of the bit-slice. /// /// ## Original /// /// [`slice::as_mut_ptr`](https://doc.rust-lang.org/std/primitive.slice.html#method.as_mut_ptr) /// /// ## API Differences /// /// This is renamed in order to indicate that it is returning a `bitvec` /// structure, not a raw pointer. #[inline] pub fn as_mut_bitptr(&mut self) -> BitPtr { self.as_mut_bitspan().to_bitptr() } /// Views the bit-slice as a half-open range of bit-pointers, to its first /// bit *in* the bit-slice and first bit *beyond* it. /// /// ## Original /// /// [`slice::as_ptr_range`](https://doc.rust-lang.org/std/primitive.slice.html#method.as_ptr_range) /// /// ## API Differences /// /// This is renamed to indicate that it returns a `bitvec` structure, rather /// than an ordinary `Range`. /// /// ## Notes /// /// `BitSlice` does define a [`.as_ptr_range()`], which returns a /// `Range`. `BitPtrRange` has additional capabilities that /// `Range<*const T>` and `Range` do not. /// /// [`.as_ptr_range()`]: Self::as_ptr_range #[inline] pub fn as_bitptr_range(&self) -> BitPtrRange { self.as_bitspan().to_bitptr_range() } /// Views the bit-slice as a half-open range of write-capable bit-pointers, /// to its first bit *in* the bit-slice and the first bit *beyond* it. /// /// ## Original /// /// [`slice::as_mut_ptr_range`](https://doc.rust-lang.org/std/primitive.slice.html#method.as_mut_ptr_range) /// /// ## API Differences /// /// This is renamed to indicate that it returns a `bitvec` structure, rather /// than an ordinary `Range`. /// /// ## Notes /// /// `BitSlice` does define a [`.as_mut_ptr_range()`], which returns a /// `Range`. `BitPtrRange` has additional capabilities that /// `Range<*mut T>` and `Range` do not. #[inline] pub fn as_mut_bitptr_range(&mut self) -> BitPtrRange { self.as_mut_bitspan().to_bitptr_range() } /// Copies the bits from `src` into `self`. /// /// `self` and `src` must have the same length. /// /// ## Performance /// /// If `src` has the same type arguments as `self`, it will use the same /// implementation as [`.copy_from_bitslice()`]; if you know that this will /// always be the case, you should prefer to use that method directly. /// /// Only `.copy_from_bitslice()` is *able* to perform acceleration; this /// method is *always* required to perform a bit-by-bit crawl over both /// bit-slices. /// /// ## Original /// /// [`slice::clone_from_slice`](https://doc.rust-lang.org/std/primitive.slice.html#method.clone_from_slice) /// /// ## API Differences /// /// This is renamed to reflect that it copies from another bit-slice, not /// from an element slice. /// /// In order to support general usage, it allows `src` to have different /// type parameters than `self`, at the cost of performance optimizations. /// /// ## Panics /// /// This panics if the two bit-slices have different lengths. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// ``` /// /// [`.copy_from_bitslice()`]: Self::copy_from_bitslice #[inline] pub fn clone_from_bitslice(&mut self, src: &BitSlice) where T2: BitStore, O2: BitOrder, { assert_eq!( self.len(), src.len(), "cloning between bit-slices requires equal lengths", ); if let Some(that) = src.coerce::() { self.copy_from_bitslice(that); } // TODO(myrrlyn): Test if `` matches `` and // specialize cloning. else { for (to, bit) in self.as_mut_bitptr_range().zip(src.iter().by_vals()) { unsafe { to.write(bit); } } } } /// Copies all bits from `src` into `self`, using batched acceleration when /// possible. /// /// `self` and `src` must have the same length. /// /// ## Original /// /// [`slice::copy_from_slice`](https://doc.rust-lang.org/std/primitive.slice.html#method.copy_from_slice) /// /// ## Panics /// /// This panics if the two bit-slices have different lengths. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// ``` #[inline] pub fn copy_from_bitslice(&mut self, src: &Self) { assert_eq!( self.len(), src.len(), "copying between bit-slices requires equal lengths", ); let (to_head, from_head) = (self.as_bitspan().head(), src.as_bitspan().head()); if to_head == from_head { match (self.domain_mut(), src.domain()) { (Domain::Enclave(mut to), Domain::Enclave(from)) => { to.store_value(from.load_value()); }, ( Domain::Region { head: to_head, body: to_body, tail: to_tail, }, Domain::Region { head: from_head, body: from_body, tail: from_tail, }, ) => { if let (Some(mut to), Some(from)) = (to_head, from_head) { to.store_value(from.load_value()); } for (to, from) in to_body.iter_mut().zip(from_body) { to.store_value(from.load_value()); } if let (Some(mut to), Some(from)) = (to_tail, from_tail) { to.store_value(from.load_value()); } }, _ => unreachable!( "bit-slices with equal type parameters, lengths, and heads \ will always have equal domains" ), } } if let (Some(this), Some(that)) = (self.coerce_mut::(), src.coerce::()) { return this.sp_copy_from_bitslice(that); } if let (Some(this), Some(that)) = (self.coerce_mut::(), src.coerce::()) { return this.sp_copy_from_bitslice(that); } for (to, bit) in self.as_mut_bitptr_range().zip(src.iter().by_vals()) { unsafe { to.write(bit); } } } /// Swaps the contents of two bit-slices. /// /// `self` and `other` must have the same length. /// /// ## Original /// /// [`slice::swap_with_slice`](https://doc.rust-lang.org/std/primitive.slice.html#method.swap_with_slice) /// /// ## API Differences /// /// This method is renamed, as it takes a bit-slice rather than an element /// slice. /// /// ## Panics /// /// This panics if the two bit-slices have different lengths. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let mut one = [0xA5u8, 0x69]; /// let mut two = 0x1234u16; /// let one_bits = one.view_bits_mut::(); /// let two_bits = two.view_bits_mut::(); /// /// one_bits.swap_with_bitslice(two_bits); /// /// assert_eq!(one, [0x2C, 0x48]); /// # if cfg!(target_endian = "little") { /// assert_eq!(two, 0x96A5); /// # } /// ``` #[inline] pub fn swap_with_bitslice(&mut self, other: &mut BitSlice) where T2: BitStore, O2: BitOrder, { assert_eq!( self.len(), other.len(), "swapping between bit-slices requires equal lengths", ); if let (Some(this), Some(that)) = (self.coerce_mut::(), other.coerce_mut::()) { return this.sp_swap_with_bitslice(that); } if let (Some(this), Some(that)) = (self.coerce_mut::(), other.coerce_mut::()) { return this.sp_swap_with_bitslice(that); } self.as_mut_bitptr_range() .zip(other.as_mut_bitptr_range()) .for_each(|(a, b)| unsafe { bv_ptr::swap(a, b); }); } } /// Extensions of standard APIs. impl BitSlice where T: BitStore, O: BitOrder, { /// Writes a new value into a single bit. /// /// This is the replacement for `*slice[index] = value;`, as `bitvec` is not /// able to express that under the current `IndexMut` API signature. /// /// ## Parameters /// /// - `&mut self` /// - `index`: The bit-index to set. It must be in `0 .. self.len()`. /// - `value`: The new bit-value to write into the bit at `index`. /// /// ## Panics /// /// This panics if `index` is out of bounds. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![mut 0, 1]; /// bits.set(0, true); /// bits.set(1, false); /// /// assert_eq!(bits, bits![1, 0]); /// ``` #[inline] pub fn set(&mut self, index: usize, value: bool) { self.replace(index, value); } /// Writes a new value into a single bit, without bounds checking. /// /// ## Parameters /// /// - `&mut self` /// - `index`: The bit-index to set. It must be in `0 .. self.len()`. /// - `value`: The new bit-value to write into the bit at `index`. /// /// ## Safety /// /// You must ensure that `index` is in the range `0 .. self.len()`. /// /// This performs bit-pointer offset arithmetic without doing any bounds /// checks. If `index` is out of bounds, then this will issue an /// out-of-bounds access and will trigger memory unsafety. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let mut data = 0u8; /// let bits = &mut data.view_bits_mut::()[.. 2]; /// assert_eq!(bits.len(), 2); /// unsafe { /// bits.set_unchecked(3, true); /// } /// assert_eq!(data, 8); /// ``` #[inline] pub unsafe fn set_unchecked(&mut self, index: usize, value: bool) { self.replace_unchecked(index, value); } /// Writes a new value into a bit, and returns its previous value. /// /// ## Panics /// /// This panics if `index` is not less than `self.len()`. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![mut 0]; /// assert!(!bits.replace(0, true)); /// assert!(bits[0]); /// ``` #[inline] pub fn replace(&mut self, index: usize, value: bool) -> bool { self.assert_in_bounds(index, 0 .. self.len()); unsafe { self.replace_unchecked(index, value) } } /// Writes a new value into a bit, returning the previous value, without /// bounds checking. /// /// ## Safety /// /// `index` must be less than `self.len()`. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![mut 0, 0]; /// let old = unsafe { /// let a = &mut bits[.. 1]; /// a.replace_unchecked(1, true) /// }; /// assert!(!old); /// assert!(bits[1]); /// ``` #[inline] pub unsafe fn replace_unchecked( &mut self, index: usize, value: bool, ) -> bool { self.as_mut_bitptr().add(index).replace(value) } /// Swaps two bits in a bit-slice, without bounds checking. /// /// See [`.swap()`] for documentation. /// /// ## Safety /// /// You must ensure that `a` and `b` are both in the range `0 .. /// self.len()`. /// /// This method performs bit-pointer offset arithmetic without doing any /// bounds checks. If `a` or `b` are out of bounds, then this will issue an /// out-of-bounds access and will trigger memory unsafety. /// /// [`.swap()`]: Self::swap #[inline] pub unsafe fn swap_unchecked(&mut self, a: usize, b: usize) { let a = self.as_mut_bitptr().add(a); let b = self.as_mut_bitptr().add(b); bv_ptr::swap(a, b); } /// Splits a bit-slice at an index, without bounds checking. /// /// See [`.split_at()`] for documentation. /// /// ## Safety /// /// You must ensure that `mid` is in the range `0 ..= self.len()`. /// /// This method produces new bit-slice references. If `mid` is out of /// bounds, its behavior is **library-level** undefined. You must /// conservatively assume that an out-of-bounds split point produces /// compiler-level UB. /// /// [`.split_at()`]: Self::split_at #[inline] pub unsafe fn split_at_unchecked(&self, mid: usize) -> (&Self, &Self) { let len = self.len(); let left = self.as_bitptr(); let right = left.add(mid); let left = left.span_unchecked(mid); let right = right.span_unchecked(len - mid); let left = left.into_bitslice_ref(); let right = right.into_bitslice_ref(); (left, right) } /// Splits a mutable bit-slice at an index, without bounds checking. /// /// See [`.split_at_mut()`] for documentation. /// /// ## Safety /// /// You must ensure that `mid` is in the range `0 ..= self.len()`. /// /// This method produces new bit-slice references. If `mid` is out of /// bounds, its behavior is **library-level** undefined. You must /// conservatively assume that an out-of-bounds split point produces /// compiler-level UB. /// /// [`.split_at_mut()`]: Self::split_at_mut #[inline] pub unsafe fn split_at_unchecked_mut( &mut self, mid: usize, ) -> (&mut BitSlice, &mut BitSlice) { let len = self.len(); let left = self.alias_mut().as_mut_bitptr(); let right = left.add(mid); ( left.span_unchecked(mid).into_bitslice_mut(), right.span_unchecked(len - mid).into_bitslice_mut(), ) } /// Copies bits from one region of the bit-slice to another region of /// itself, without doing bounds checks. /// /// The regions are allowed to overlap. /// /// ## Parameters /// /// - `&mut self` /// - `src`: The range within `self` from which to copy. /// - `dst`: The starting index within `self` at which to paste. /// /// ## Effects /// /// `self[src]` is copied to `self[dest .. dest + src.len()]`. The bits of /// `self[src]` are in an unspecified, but initialized, state. /// /// ## Safety /// /// `src.end()` and `dest + src.len()` must be entirely within bounds. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let mut data = 0b1011_0000u8; /// let bits = data.view_bits_mut::(); /// /// unsafe { /// bits.copy_within_unchecked(.. 4, 2); /// } /// assert_eq!(data, 0b1010_1100); /// ``` #[inline] pub unsafe fn copy_within_unchecked(&mut self, src: R, dest: usize) where R: RangeExt { if let Some(this) = self.coerce_mut::() { return this.sp_copy_within_unchecked(src, dest); } if let Some(this) = self.coerce_mut::() { return this.sp_copy_within_unchecked(src, dest); } let source = src.normalize(0, self.len()); let source_len = source.len(); let rev = source.contains(&dest); let dest = dest .. dest + source_len; for (from, to) in self .get_unchecked(source) .as_bitptr_range() .zip(self.get_unchecked_mut(dest).as_mut_bitptr_range()) .bidi(rev) { to.write(from.read()); } } #[inline] #[doc(hidden)] #[cfg(not(tarpaulin_include))] #[deprecated = "use `.iter_mut().enumerate()`"] pub fn for_each(&mut self, mut func: impl FnMut(usize, bool) -> bool) { for (idx, ptr) in self.as_mut_bitptr_range().enumerate() { unsafe { ptr.write(func(idx, ptr.read())); } } } } /// Views of underlying memory. impl BitSlice where T: BitStore, O: BitOrder, { /// Partitions a bit-slice into maybe-contended and known-uncontended parts. /// /// The documentation of `BitDomain` goes into this in more detail. In /// short, this produces a `&BitSlice` that is as large as possible without /// requiring alias protection, as well as any bits that were not able to be /// included in the unaliased bit-slice. #[inline] #[cfg(not(tarpaulin_include))] pub fn bit_domain(&self) -> BitDomain { self.domain().into_bit_domain() } /// Partitions a mutable bit-slice into maybe-contended and /// known-uncontended parts. /// /// The documentation of `BitDomain` goes into this in more detail. In /// short, this produces a `&mut BitSlice` that is as large as possible /// without requiring alias protection, as well as any bits that were not /// able to be included in the unaliased bit-slice. #[inline] #[cfg(not(tarpaulin_include))] pub fn bit_domain_mut(&mut self) -> BitDomain { self.domain_mut().into_bit_domain() } /// Views the underlying memory of a bit-slice, removing alias protections /// where possible. /// /// The documentation of `Domain` goes into this in more detail. In short, /// this produces a `&[T]` slice with alias protections removed, covering /// all elements that `self` completely fills. Partially-used elements on /// either the front or back edge of the slice are returned separately. #[inline] #[cfg(not(tarpaulin_include))] pub fn domain(&self) -> Domain { Domain::new(self) } /// Views the underlying memory of a bit-slice, removing alias protections /// where possible. /// /// The documentation of `Domain` goes into this in more detail. In short, /// this produces a `&mut [T]` slice with alias protections removed, /// covering all elements that `self` completely fills. Partially-used /// elements on the front or back edge of the slice are returned separately. #[inline] #[cfg(not(tarpaulin_include))] pub fn domain_mut(&mut self) -> Domain { Domain::new(self) } } /// Bit-value queries. impl BitSlice where T: BitStore, O: BitOrder, { /// Counts the number of bits set to `1` in the bit-slice contents. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![1, 1, 0, 0]; /// assert_eq!(bits[.. 2].count_ones(), 2); /// assert_eq!(bits[2 ..].count_ones(), 0); /// assert_eq!(bits![].count_ones(), 0); /// ``` #[inline] pub fn count_ones(&self) -> usize { match self.domain() { Domain::Enclave(elem) => elem.load_value().count_ones() as usize, Domain::Region { head, body, tail } => { head.map_or(0, |elem| elem.load_value().count_ones() as usize) + body .iter() .map(BitStore::load_value) .map(|elem| elem.count_ones() as usize) .sum::() + tail .map_or(0, |elem| elem.load_value().count_ones() as usize) }, } } /// Counts the number of bits cleared to `0` in the bit-slice contents. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![1, 1, 0, 0]; /// assert_eq!(bits[.. 2].count_zeros(), 0); /// assert_eq!(bits[2 ..].count_zeros(), 2); /// assert_eq!(bits![].count_zeros(), 0); /// ``` #[inline] pub fn count_zeros(&self) -> usize { match self.domain() { Domain::Enclave(elem) => (elem.load_value() | !elem.mask().into_inner()) .count_zeros() as usize, Domain::Region { head, body, tail } => { head.map_or(0, |elem| { (elem.load_value() | !elem.mask().into_inner()).count_zeros() as usize }) + body .iter() .map(BitStore::load_value) .map(|elem| elem.count_zeros() as usize) .sum::() + tail.map_or(0, |elem| { (elem.load_value() | !elem.mask().into_inner()).count_zeros() as usize }) }, } } /// Enumerates the index of each bit in a bit-slice set to `1`. /// /// This is a shorthand for a `.enumerate().filter_map()` iterator that /// selects the index of each `true` bit; however, its implementation is /// eligible for optimizations that the individual-bit iterator is not. /// /// Specializations for the `Lsb0` and `Msb0` orderings allow processors /// with instructions that seek particular bits within an element to operate /// on whole elements, rather than on each bit individually. /// /// ## Examples /// /// This example uses `.iter_ones()`, a `.filter_map()` that finds the index /// of each set bit, and the known indices, in order to show that they have /// equivalent behavior. /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![0, 1, 0, 0, 1, 0, 0, 0, 1]; /// /// let iter_ones = bits.iter_ones(); /// let known_indices = [1, 4, 8].iter().copied(); /// let filter = bits.iter() /// .by_vals() /// .enumerate() /// .filter_map(|(idx, bit)| if bit { Some(idx) } else { None }); /// let all = iter_ones.zip(known_indices).zip(filter); /// /// for ((iter_one, known), filtered) in all { /// assert_eq!(iter_one, known); /// assert_eq!(known, filtered); /// } /// ``` #[inline] pub fn iter_ones(&self) -> IterOnes { IterOnes::new(self) } /// Enumerates the index of each bit in a bit-slice cleared to `0`. /// /// This is a shorthand for a `.enumerate().filter_map()` iterator that /// selects the index of each `false` bit; however, its implementation is /// eligible for optimizations that the individual-bit iterator is not. /// /// Specializations for the `Lsb0` and `Msb0` orderings allow processors /// with instructions that seek particular bits within an element to operate /// on whole elements, rather than on each bit individually. /// /// ## Examples /// /// This example uses `.iter_zeros()`, a `.filter_map()` that finds the /// index of each cleared bit, and the known indices, in order to show that /// they have equivalent behavior. /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![1, 0, 1, 1, 0, 1, 1, 1, 0]; /// /// let iter_zeros = bits.iter_zeros(); /// let known_indices = [1, 4, 8].iter().copied(); /// let filter = bits.iter() /// .by_vals() /// .enumerate() /// .filter_map(|(idx, bit)| if !bit { Some(idx) } else { None }); /// let all = iter_zeros.zip(known_indices).zip(filter); /// /// for ((iter_zero, known), filtered) in all { /// assert_eq!(iter_zero, known); /// assert_eq!(known, filtered); /// } /// ``` #[inline] pub fn iter_zeros(&self) -> IterZeros { IterZeros::new(self) } /// Finds the index of the first bit in the bit-slice set to `1`. /// /// Returns `None` if there is no `true` bit in the bit-slice. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// assert!(bits![].first_one().is_none()); /// assert!(bits![0].first_one().is_none()); /// assert_eq!(bits![0, 1].first_one(), Some(1)); /// ``` #[inline] pub fn first_one(&self) -> Option { self.iter_ones().next() } /// Finds the index of the first bit in the bit-slice cleared to `0`. /// /// Returns `None` if there is no `false` bit in the bit-slice. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// assert!(bits![].first_zero().is_none()); /// assert!(bits![1].first_zero().is_none()); /// assert_eq!(bits![1, 0].first_zero(), Some(1)); /// ``` #[inline] pub fn first_zero(&self) -> Option { self.iter_zeros().next() } /// Finds the index of the last bit in the bit-slice set to `1`. /// /// Returns `None` if there is no `true` bit in the bit-slice. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// assert!(bits![].last_one().is_none()); /// assert!(bits![0].last_one().is_none()); /// assert_eq!(bits![1, 0].last_one(), Some(0)); /// ``` #[inline] pub fn last_one(&self) -> Option { self.iter_ones().next_back() } /// Finds the index of the last bit in the bit-slice cleared to `0`. /// /// Returns `None` if there is no `false` bit in the bit-slice. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// assert!(bits![].last_zero().is_none()); /// assert!(bits![1].last_zero().is_none()); /// assert_eq!(bits![0, 1].last_zero(), Some(0)); /// ``` #[inline] pub fn last_zero(&self) -> Option { self.iter_zeros().next_back() } /// Counts the number of bits from the start of the bit-slice to the first /// bit set to `0`. /// /// This returns `0` if the bit-slice is empty. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// assert_eq!(bits![].leading_ones(), 0); /// assert_eq!(bits![0].leading_ones(), 0); /// assert_eq!(bits![1, 0].leading_ones(), 1); /// ``` #[inline] pub fn leading_ones(&self) -> usize { self.first_zero().unwrap_or_else(|| self.len()) } /// Counts the number of bits from the start of the bit-slice to the first /// bit set to `1`. /// /// This returns `0` if the bit-slice is empty. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// assert_eq!(bits![].leading_zeros(), 0); /// assert_eq!(bits![1].leading_zeros(), 0); /// assert_eq!(bits![0, 1].leading_zeros(), 1); /// ``` #[inline] pub fn leading_zeros(&self) -> usize { self.first_one().unwrap_or_else(|| self.len()) } /// Counts the number of bits from the end of the bit-slice to the last bit /// set to `0`. /// /// This returns `0` if the bit-slice is empty. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// assert_eq!(bits![].trailing_ones(), 0); /// assert_eq!(bits![0].trailing_ones(), 0); /// assert_eq!(bits![0, 1].trailing_ones(), 1); /// ``` #[inline] pub fn trailing_ones(&self) -> usize { let len = self.len(); self.last_zero().map(|idx| len - 1 - idx).unwrap_or(len) } /// Counts the number of bits from the end of the bit-slice to the last bit /// set to `1`. /// /// This returns `0` if the bit-slice is empty. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// assert_eq!(bits![].trailing_zeros(), 0); /// assert_eq!(bits![1].trailing_zeros(), 0); /// assert_eq!(bits![1, 0].trailing_zeros(), 1); /// ``` #[inline] pub fn trailing_zeros(&self) -> usize { let len = self.len(); self.last_one().map(|idx| len - 1 - idx).unwrap_or(len) } /// Tests if there is at least one bit set to `1` in the bit-slice. /// /// Returns `false` when `self` is empty. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// assert!(!bits![].any()); /// assert!(!bits![0].any()); /// assert!(bits![0, 1].any()); /// ``` #[inline] pub fn any(&self) -> bool { self.count_ones() > 0 } /// Tests if every bit is set to `1` in the bit-slice. /// /// Returns `true` when `self` is empty. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// assert!( bits![].all()); /// assert!(!bits![0].all()); /// assert!( bits![1].all()); /// ``` #[inline] pub fn all(&self) -> bool { self.count_zeros() == 0 } /// Tests if every bit is cleared to `0` in the bit-slice. /// /// Returns `true` when `self` is empty. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// assert!( bits![].not_any()); /// assert!(!bits![1].not_any()); /// assert!( bits![0].not_any()); /// ``` #[inline] pub fn not_any(&self) -> bool { self.count_ones() == 0 } /// Tests if at least one bit is cleared to `0` in the bit-slice. /// /// Returns `false` when `self` is empty. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// assert!(!bits![].not_all()); /// assert!(!bits![1].not_all()); /// assert!( bits![0].not_all()); /// ``` #[inline] pub fn not_all(&self) -> bool { self.count_zeros() > 0 } /// Tests if at least one bit is set to `1`, and at least one bit is cleared /// to `0`, in the bit-slice. /// /// Returns `false` when `self` is empty. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// assert!(!bits![].some()); /// assert!(!bits![0].some()); /// assert!(!bits![1].some()); /// assert!( bits![0, 1].some()); /// ``` #[inline] pub fn some(&self) -> bool { self.any() && self.not_all() } } /// Buffer manipulation. impl BitSlice where T: BitStore, O: BitOrder, { /// Shifts the contents of a bit-slice “left” (towards the zero-index), /// clearing the “right” bits to `0`. /// /// This is a strictly-worse analogue to taking `bits = &bits[by ..]`: it /// has to modify the entire memory region that `bits` governs, and destroys /// contained information. Unless the actual memory layout and contents of /// your bit-slice matters to your program, you should *probably* prefer to /// munch your way forward through a bit-slice handle. /// /// Note also that the “left” here is semantic only, and **does not** /// necessarily correspond to a left-shift instruction applied to the /// underlying integer storage. /// /// This has no effect when `by` is `0`. When `by` is `self.len()`, the /// bit-slice is entirely cleared to `0`. /// /// ## Panics /// /// This panics if `by` is not less than `self.len()`. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![mut 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1]; /// // these bits are retained ^--------------------------^ /// bits.shift_left(2); /// assert_eq!(bits, bits![1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0]); /// // and move here ^--------------------------^ /// /// let bits = bits![mut 1; 2]; /// bits.shift_left(2); /// assert_eq!(bits, bits![0; 2]); /// ``` #[inline] pub fn shift_left(&mut self, by: usize) { if by == 0 { return; } let len = self.len(); if by == len { return self.fill(false); } assert!( by <= len, "shift must be less than the length of the bit-slice: {} >= {}", by, len, ); unsafe { self.copy_within_unchecked(by .., 0); self.get_unchecked_mut(len - by ..).fill(false); } } /// Shifts the contents of a bit-slice “right” (away from the zero-index), /// clearing the “left” bits to `0`. /// /// This is a strictly-worse analogue to taking `bits = &bits[.. bits.len() /// - by]`: it must modify the entire memory region that `bits` governs, and /// destroys contained information. Unless the actual memory layout and /// contents of your bit-slice matters to your program, you should /// *probably* prefer to munch your way backward through a bit-slice handle. /// /// Note also that the “right” here is semantic only, and **does not** /// necessarily correspond to a right-shift instruction applied to the /// underlying integer storage. /// /// This has no effect when `by` is `0`. When `by` is `self.len()`, the /// bit-slice is entirely cleared to `0`. /// /// ## Panics /// /// This panics if `by` is not less than `self.len()`. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![mut 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1]; /// // these bits stay ^--------------------------^ /// bits.shift_right(2); /// assert_eq!(bits, bits![0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1]); /// // and move here ^--------------------------^ /// /// let bits = bits![mut 1; 2]; /// bits.shift_right(2); /// assert_eq!(bits, bits![0; 2]); /// ``` #[inline] pub fn shift_right(&mut self, by: usize) { if by == 0 { return; } let len = self.len(); if by == len { return self.fill(false); } assert!( by <= len, "shift must be less than the length of the bit-slice: {} >= {}", by, len, ); unsafe { self.copy_within_unchecked(.. len - by, by); self.get_unchecked_mut(.. by).fill(false); } } } /// Crate internals. impl BitSlice where T: BitStore, O: BitOrder, { /// Gets the structural form of the encoded reference. pub(crate) fn as_bitspan(&self) -> BitSpan { BitSpan::from_bitslice_ptr(self) } /// Gets the structural form of the encoded reference. pub(crate) fn as_mut_bitspan(&mut self) -> BitSpan { BitSpan::from_bitslice_ptr_mut(self) } /// Asserts that `index` is within the given bounds. /// /// ## Parameters /// /// - `&self` /// - `index`: The bit index to test against the bit-slice. /// - `bounds`: The bounds to check. cannot exceed `0 ..= self.len()`. /// /// ## Panics /// /// This panics if `bounds` is outside `index`. pub(crate) fn assert_in_bounds(&self, index: usize, bounds: R) where R: RangeExt { let bounds = bounds.normalize(0, self.len()); assert!( bounds.contains(&index), "index {} out of range: {:?}", index, bounds.end_bound() ); } /// Marks an exclusive bit-slice as covering an aliased memory region. pub(crate) fn alias_mut(&mut self) -> &mut BitSlice { unsafe { self.as_mut_bitspan().cast::().into_bitslice_mut() } } /// Removes an aliasing marker from an exclusive bit-slice handle. /// /// ## Safety /// /// This may only be used when the bit-slice is either known to be /// unaliased, or this call is combined with an operation that adds an /// aliasing marker and the total number of aliasing markers remains /// unchanged. pub(crate) unsafe fn unalias_mut( this: &mut BitSlice, ) -> &mut Self { this.as_mut_bitspan().cast::().into_bitslice_mut() } /// Splits a mutable bit-slice at a midpoint, without either doing bounds /// checks or adding an alias marker to the returned sections. /// /// This method has the same behavior as [`.split_at_unchecked_mut()`], /// except that it does not apply an aliasing marker to the partitioned /// subslices. /// /// ## Safety /// /// See `split_at_unchecked_mut`. Additionally, this is only safe when `T` /// is alias-safe. /// /// [`.split_at_unchecked_mut()`]: Self::split_at_unchecked_mut pub(crate) unsafe fn split_at_unchecked_mut_noalias( &mut self, mid: usize, ) -> (&mut Self, &mut Self) { // Split the slice at the requested midpoint, adding an alias layer let (head, tail) = self.split_at_unchecked_mut(mid); // Remove the new alias layer. (Self::unalias_mut(head), Self::unalias_mut(tail)) } } /// Methods available only when `T` allows shared mutability. impl BitSlice where T: BitStore + radium::Radium, O: BitOrder, { /// Writes a new value into a single bit, using alias-safe operations. /// /// This is equivalent to [`.set()`], except that it does not require an /// `&mut` reference, and allows bit-slices with alias-safe storage to share /// write permissions. /// /// ## Parameters /// /// - `&self`: This method only exists on bit-slices with alias-safe /// storage, and so does not require exclusive access. /// - `index`: The bit index to set. It must be in `0 .. self.len()`. /// - `value`: The new bit-value to write into the bit at `index`. /// /// ## Panics /// /// This panics if `index` is out of bounds. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// use core::cell::Cell; /// /// let bits: &BitSlice<_, _> = bits![Cell, Lsb0; 0, 1]; /// bits.set_aliased(0, true); /// bits.set_aliased(1, false); /// /// assert_eq!(bits, bits![1, 0]); /// ``` /// /// [`.set()`]: Self::set #[inline] pub fn set_aliased(&self, index: usize, value: bool) { self.assert_in_bounds(index, 0 .. self.len()); unsafe { self.set_aliased_unchecked(index, value); } } /// Writes a new value into a single bit, using alias-safe operations and /// without bounds checking. /// /// This is equivalent to [`.set_unchecked()`], except that it does not /// require an `&mut` reference, and allows bit-slices with alias-safe /// storage to share write permissions. /// /// ## Parameters /// /// - `&self`: This method only exists on bit-slices with alias-safe /// storage, and so does not require exclusive access. /// - `index`: The bit index to set. It must be in `0 .. self.len()`. /// - `value`: The new bit-value to write into the bit at `index`. /// /// ## Safety /// /// The caller must ensure that `index` is not out of bounds. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// use core::cell::Cell; /// /// let data = Cell::new(0u8); /// let bits = &data.view_bits::()[.. 2]; /// unsafe { /// bits.set_aliased_unchecked(3, true); /// } /// assert_eq!(data.get(), 8); /// ``` /// /// [`.set_unchecked()`]: Self::set_unchecked #[inline] pub unsafe fn set_aliased_unchecked(&self, index: usize, value: bool) { self.as_bitptr().add(index).freeze().frozen_write_bit(value); } } /// Miscellaneous information. impl BitSlice where T: BitStore, O: BitOrder, { /// The inclusive maximum length of a `BitSlice<_, T>`. /// /// As `BitSlice` is zero-indexed, the largest possible *index* is one less /// than this value. /// /// |CPU word width| Value | /// |-------------:|----------------------:| /// | 32 bits | `0x1fff_ffff` | /// | 64 bits |`0x1fff_ffff_ffff_ffff`| pub const MAX_BITS: usize = BitSpan::::REGION_MAX_BITS; /// The inclusive maximum length that a `[T]` slice can be for /// `BitSlice<_, T>` to cover it. /// /// A `BitSlice<_, T>` that begins in the interior of an element and /// contains the maximum number of bits will extend one element past the /// cutoff that would occur if the bit-slice began at the zeroth bit. Such a /// bit-slice is difficult to manually construct, but would not otherwise /// fail. /// /// |Type Bits|Max Elements (32-bit)| Max Elements (64-bit) | /// |--------:|--------------------:|----------------------:| /// | 8| `0x0400_0001` |`0x0400_0000_0000_0001`| /// | 16| `0x0200_0001` |`0x0200_0000_0000_0001`| /// | 32| `0x0100_0001` |`0x0100_0000_0000_0001`| /// | 64| `0x0080_0001` |`0x0080_0000_0000_0001`| pub const MAX_ELTS: usize = BitSpan::::REGION_MAX_ELTS; } #[cfg(feature = "alloc")] impl BitSlice where T: BitStore, O: BitOrder, { /// Copies a bit-slice into an owned bit-vector. /// /// Since the new vector is freshly owned, this gets marked as `::Unalias` /// to remove any guards that may have been inserted by the bit-slice’s /// history. /// /// It does *not* use the underlying memory type, so that a `BitSlice<_, /// Cell<_>>` will produce a `BitVec<_, Cell<_>>`. /// /// ## Original /// /// [`slice::to_vec`](https://doc.rust-lang.org/std/primitive.slice.html#method.to_vec) /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![0, 1, 0, 1]; /// let bv = bits.to_bitvec(); /// assert_eq!(bits, bv); /// ``` #[inline] pub fn to_bitvec(&self) -> BitVec { self.domain() .map(::new) .collect::>() .pipe(BitVec::from_vec) .tap_mut(|bv| unsafe { bv.set_head(self.as_bitspan().head()); bv.set_len(self.len()); }) } } #[inline] #[doc = include_str!("../doc/slice/from_raw_parts_unchecked.md")] pub unsafe fn from_raw_parts_unchecked<'a, T, O>( ptr: BitPtr, len: usize, ) -> &'a BitSlice where O: BitOrder, T: 'a + BitStore, { ptr.span_unchecked(len).into_bitslice_ref() } #[inline] #[doc = include_str!("../doc/slice/from_raw_parts_unchecked_mut.md")] pub unsafe fn from_raw_parts_unchecked_mut<'a, T, O>( ptr: BitPtr, len: usize, ) -> &'a mut BitSlice where O: BitOrder, T: 'a + BitStore, { ptr.span_unchecked(len).into_bitslice_mut() } bitvec-1.0.1/src/store.rs000064400000000000000000000215221046102023000133720ustar 00000000000000#![doc = include_str!("../doc/store.md")] use core::{ cell::Cell, fmt::Debug, }; use funty::Integral; use crate::{ access::*, index::BitIdx, mem::{ self, BitRegister, }, order::BitOrder, }; #[doc = include_str!("../doc/store/BitStore.md")] pub trait BitStore: 'static + Debug { /// The element type used in the memory region underlying a `BitSlice`. It /// is *always* one of the unsigned integer fundamentals. type Mem: BitRegister + BitStore; /// A type that selects the appropriate load/store instructions when /// accessing the memory bus. It determines what instructions are used when /// moving a `Self::Mem` value between the processor and the memory system. /// /// This must be *at least* able to manage aliasing. type Access: BitAccess + BitStore; /// A sibling `BitStore` implementor that is known to be alias-safe. It is /// used when a `BitSlice` introduces multiple handles that view the same /// memory location, and at least one of them has write capabilities to it. /// It must have the same underlying memory type, and can only change access /// patterns or public-facing usage. type Alias: BitStore; /// The inverse of `::Alias`. It is used when a `BitSlice` removes the /// conditions that required a `T -> T::Alias` transition. type Unalias: BitStore; /// The zero constant. const ZERO: Self; /// Wraps a raw memory value as a `BitStore` type. fn new(value: Self::Mem) -> Self; /// Loads a value out of the memory system according to the `::Access` /// rules. This may be called when the value is aliased by a write-capable /// reference. fn load_value(&self) -> Self::Mem; /// Stores a value into the memory system. This is only called when there /// are no other handles to the value, and it may bypass `::Access` /// constraints. fn store_value(&mut self, value: Self::Mem); /// Reads a single bit out of the memory system according to the `::Access` /// rules. This is lifted from [`BitAccess`] so that it can be used /// elsewhere without additional casts. /// /// ## Type Parameters /// /// - `O`: The ordering of bits within `Self::Mem` governing the lookup. /// /// ## Parameters /// /// - `index`: The semantic index of a bit in `*self`. /// /// ## Returns /// /// The value of the bit in `*self` at `BitOrder::at(index)`. /// /// [`BitAccess`]: crate::access::BitAccess #[inline] fn get_bit(&self, index: BitIdx) -> bool where O: BitOrder { self.load_value() & index.select::().into_inner() != ::ZERO } /// All implementors are required to have their alignment match their size. /// /// Use [`mem::aligned_to_size::()`][0] to prove this. /// /// [0]: crate::mem::aligned_to_size const ALIGNED_TO_SIZE: [(); 1]; /// All implementors are required to have `Self` and `Self::Alias` be equal /// in representation. This is true by fiat for all types except the /// unsigned integers. /// /// Use [`mem::layout_eq::()`][0] to prove this. /// /// [0]: crate::mem::layout_eq const ALIAS_WIDTH: [(); 1]; } /// Generates `BitStore` implementations for ordinary integers and `Cell`s. macro_rules! store { ($($base:ty => $safe:ty);+ $(;)?) => { $( impl BitStore for $base { type Mem = Self; /// The unsigned integers will only be `BitStore` type parameters /// for handles to unaliased memory, following the normal Rust /// reference rules. type Access = Cell; type Alias = $safe; type Unalias = Self; const ZERO: Self = 0; #[inline] fn new(value: Self::Mem) -> Self { value } #[inline] fn load_value(&self) -> Self::Mem { *self } #[inline] fn store_value(&mut self, value: Self::Mem) { *self = value; } const ALIGNED_TO_SIZE: [(); 1] = [(); mem::aligned_to_size::() as usize]; const ALIAS_WIDTH: [(); 1] = [(); mem::layout_eq::() as usize]; } impl BitStore for $safe { type Mem = $base; type Access = ::Rad; type Alias = Self; type Unalias = $base; const ZERO: Self = ::ZERO; #[inline] fn new(value: Self::Mem) -> Self { ::new(value) } #[inline] fn load_value(&self) -> Self::Mem { self.load() } #[inline] fn store_value(&mut self, value: Self::Mem) { *self = Self::new(value); } const ALIGNED_TO_SIZE: [(); 1] = [(); mem::aligned_to_size::() as usize]; const ALIAS_WIDTH: [(); 1] = [()]; } impl BitStore for Cell<$base> { type Mem = $base; type Access = Self; type Alias = Self; type Unalias = Self; const ZERO: Self = Self::new(0); #[inline] fn new(value: Self::Mem) -> Self { ::new(value) } #[inline] fn load_value(&self) -> Self::Mem { self.get() } #[inline] fn store_value(&mut self, value: Self::Mem) { *self = Self::new(value); } const ALIGNED_TO_SIZE: [(); 1] = [(); mem::aligned_to_size::() as usize]; const ALIAS_WIDTH: [(); 1] = [()]; } )+ }; } store! { u8 => BitSafeU8; u16 => BitSafeU16; u32 => BitSafeU32; } #[cfg(target_pointer_width = "64")] store!(u64 => BitSafeU64); store!(usize => BitSafeUsize); /// Generates `BitStore` implementations for atomic types. macro_rules! atomic { ($($size:tt, $base:ty => $atom:ident);+ $(;)?) => { $( radium::if_atomic!(if atomic($size) { use core::sync::atomic::$atom; impl BitStore for $atom { type Mem = $base; type Access = Self; type Alias = Self; type Unalias = Self; const ZERO: Self = ::new(0); #[inline] fn new(value: Self::Mem) -> Self { ::new(value) } #[inline] fn load_value(&self) -> Self::Mem { self.load(core::sync::atomic::Ordering::Relaxed) } #[inline] fn store_value(&mut self, value: Self::Mem) { *self = Self::new(value); } const ALIGNED_TO_SIZE: [(); 1] = [(); mem::aligned_to_size::() as usize]; const ALIAS_WIDTH: [(); 1] = [()]; } }); )+ }; } atomic! { 8, u8 => AtomicU8; 16, u16 => AtomicU16; 32, u32 => AtomicU32; } #[cfg(target_pointer_width = "64")] atomic!(64, u64 => AtomicU64); atomic!(size, usize => AtomicUsize); #[cfg(test)] mod tests { use static_assertions::*; use super::*; use crate::prelude::*; #[test] fn load_store() { let mut word = 0usize; word.store_value(39); assert_eq!(word.load_value(), 39); let mut safe = BitSafeUsize::new(word); safe.store_value(57); assert_eq!(safe.load_value(), 57); let mut cell = Cell::new(0usize); cell.store_value(39); assert_eq!(cell.load_value(), 39); radium::if_atomic!(if atomic(size) { let mut atom = AtomicUsize::new(0); atom.store_value(57); assert_eq!(atom.load_value(), 57); }); } /// Unaliased `BitSlice`s are universally threadsafe, because they satisfy /// Rust’s unsynchronized mutation rules. #[test] fn unaliased_send_sync() { assert_impl_all!(BitSlice: Send, Sync); assert_impl_all!(BitSlice: Send, Sync); assert_impl_all!(BitSlice: Send, Sync); assert_impl_all!(BitSlice: Send, Sync); #[cfg(target_pointer_width = "64")] assert_impl_all!(BitSlice: Send, Sync); } #[test] fn cell_unsend_unsync() { assert_not_impl_any!(BitSlice, LocalBits>: Send, Sync); assert_not_impl_any!(BitSlice, LocalBits>: Send, Sync); assert_not_impl_any!(BitSlice, LocalBits>: Send, Sync); assert_not_impl_any!(BitSlice, LocalBits>: Send, Sync); #[cfg(target_pointer_width = "64")] assert_not_impl_any!(BitSlice, LocalBits>: Send, Sync); } /// In non-atomic builds, aliased `BitSlice`s become universally /// thread-unsafe. An `&mut BitSlice` is an `&Cell`, and `&Cell` cannot be /// sent across threads. /// /// This test cannot be meaningfully expressed in atomic builds, because the /// atomicity of a `BitSafeUN` type is target-specific, and expressed in /// `radium` rather than in `bitvec`. #[test] #[cfg(not(feature = "atomic"))] fn aliased_non_atomic_unsend_unsync() { assert_not_impl_any!(BitSlice: Send, Sync); assert_not_impl_any!(BitSlice: Send, Sync); assert_not_impl_any!(BitSlice: Send, Sync); assert_not_impl_any!(BitSlice: Send, Sync); #[cfg(target_pointer_width = "64")] assert_not_impl_any!(BitSlice: Send, Sync); } #[test] #[cfg(feature = "atomic")] fn aliased_atomic_send_sync() { assert_impl_all!(BitSlice: Send, Sync); assert_impl_all!(BitSlice: Send, Sync); assert_impl_all!(BitSlice: Send, Sync); assert_impl_all!(BitSlice: Send, Sync); #[cfg(target_pointer_width = "64")] assert_impl_all!(BitSlice: Send, Sync); } } bitvec-1.0.1/src/vec/api.rs000064400000000000000000000643101046102023000135660ustar 00000000000000//! Port of the `Vec` inherent API. use alloc::vec::Vec; use core::{ mem::ManuallyDrop, ops::RangeBounds, }; use tap::Pipe; use wyz::{ comu::{ Const, Mut, }, range::RangeExt, }; use super::{ BitVec, Drain, Splice, }; use crate::{ boxed::BitBox, index::BitEnd, mem, order::BitOrder, ptr::{ AddressExt, BitPtr, BitSpan, }, slice::BitSlice, store::BitStore, }; /// Port of the `Vec` inherent API. impl BitVec where T: BitStore, O: BitOrder, { /// Constructs a new, empty, bit-vector. /// /// This does not allocate until bits are [`.push()`]ed into it, or space is /// explicitly [`.reserve()`]d. /// /// ## Original /// /// [`Vec::new`](alloc::vec::Vec::new) /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bv = BitVec::::new(); /// assert!(bv.is_empty()); /// ``` /// /// [`.push()`]: Self::push /// [`.reserve()`]: Self::reserve #[inline] pub fn new() -> Self { Self::EMPTY } /// Allocates a new, empty, bit-vector with space for at least `capacity` /// bits before reallocating. /// /// ## Original /// /// [`Vec::with_capacity`](alloc::vec::Vec::with_capacity) /// /// ## Panics /// /// This panics if the requested capacity is longer than what the bit-vector /// can represent. See [`BitSlice::MAX_BITS`]. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let mut bv: BitVec = BitVec::with_capacity(128); /// /// assert!(bv.is_empty()); /// assert!(bv.capacity() >= 128); /// /// for i in 0 .. 128 { /// bv.push(i & 0xC0 == i); /// } /// assert_eq!(bv.len(), 128); /// assert!(bv.capacity() >= 128); /// /// bv.push(false); /// assert_eq!(bv.len(), 129); /// assert!(bv.capacity() >= 129); /// ``` /// /// [`BitSlice::MAX_BITS`]: crate::slice::BitSlice::MAX_BITS #[inline] pub fn with_capacity(capacity: usize) -> Self { Self::assert_len_encodable(capacity); let mut vec = capacity .pipe(crate::mem::elts::) .pipe(Vec::::with_capacity) .pipe(ManuallyDrop::new); let (addr, capacity) = (vec.as_mut_ptr(), vec.capacity()); let bitspan = BitSpan::uninhabited(unsafe { addr.into_address() }); Self { bitspan, capacity } } /// Constructs a bit-vector handle from its constituent fields. /// /// ## Original /// /// [`Vec::from_raw_parts`](alloc::vec::Vec::from_raw_parts) /// /// ## Safety /// /// The **only** acceptable argument values for this function are those that /// were previously produced by calling [`.into_raw_parts()`]. Furthermore, /// you may only call this **at most once** on any set of arguments. Using /// the same arguments in more than one call to this function will result in /// a double- or use-after free error. /// /// Attempting to conjure your own values and pass them into this function /// will break the allocator state. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bv = bitvec![0, 1, 0, 0, 1]; /// let (bitptr, len, capa) = bv.into_raw_parts(); /// let bv2 = unsafe { /// BitVec::from_raw_parts(bitptr, len, capa) /// }; /// assert_eq!(bv2, bits![0, 1, 0, 0, 1]); /// ``` /// /// [`.into_raw_parts()`]: Self::into_raw_parts #[inline] pub unsafe fn from_raw_parts( bitptr: BitPtr, length: usize, capacity: usize, ) -> Self { let bitspan = bitptr.span_unchecked(length); Self { bitspan, capacity: mem::elts::( capacity.saturating_add(bitspan.head().into_inner() as usize), ), } } /// Decomposes a bit-vector into its constituent member fields. /// /// This disarms the destructor. In order to prevent a memory leak, you must /// pass **these exact values** back into [`::from_raw_parts()`]. /// /// ## Original /// /// [`Vec::into_raw_parts`](alloc::vec::Vec::into_raw_parts) /// /// ## API Differences /// /// This method is still unstable as of 1.54. It is provided here as a /// convenience, under the expectation that the standard-library method will /// stabilize as-is. /// /// [`::from_raw_parts()`]: Self::from_raw_parts #[inline] pub fn into_raw_parts(self) -> (BitPtr, usize, usize) { let this = ManuallyDrop::new(self); ( this.bitspan.to_bitptr(), this.bitspan.len(), this.capacity(), ) } /// Gets the allocation capacity, measured in bits. /// /// This counts how many total bits the bit-vector can store before it must /// perform a reällocation to acquire more memory. /// /// If the capacity is not a multiple of 8, you should call /// [`.force_align()`]. /// /// ## Original /// /// [`Vec::capacity`](alloc::vec::Vec::capacity) /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bv = bitvec![0, 1, 0, 0, 1]; /// ``` /// /// [`.force_align()`]: Self::force_align #[inline] pub fn capacity(&self) -> usize { self.capacity .checked_mul(mem::bits_of::()) .expect("bit-vector capacity exceeded") .saturating_sub(self.bitspan.head().into_inner() as usize) } /// Ensures that the bit-vector has allocation capacity for *at least* /// `additional` more bits to be appended to it. /// /// For convenience, this method *guarantees* that the underlying memory for /// `self[.. self.len() + additional]` is initialized, and may be safely /// accessed directly without requiring use of `.push()` or `.extend()` to /// initialize it. /// /// Newly-allocated memory is always initialized to zero. It is still *dead* /// until the bit-vector is grown (by `.push()`, `.extend()`, or /// `.set_len()`), but direct access will not trigger UB. /// /// ## Original /// /// [`Vec::reserve`](alloc::vec::Vec::reserve) /// /// ## Panics /// /// This panics if the new capacity exceeds the bit-vector’s maximum. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let mut bv: BitVec = BitVec::with_capacity(80); /// assert!(bv.capacity() >= 80); /// bv.reserve(800); /// assert!(bv.capacity() >= 800); /// ``` #[inline] pub fn reserve(&mut self, additional: usize) { Self::assert_len_encodable(self.len() + additional); self.do_reservation(additional, Vec::::reserve); } /// Ensures that the bit-vector has allocation capacity for *at least* /// `additional` more bits to be appended to it. /// /// This differs from [`.reserve()`] by requesting that the allocator /// provide the minimum capacity necessary, rather than a potentially larger /// amount that the allocator may find more convenient. /// /// Remember that this is a *request*: the allocator provides what it /// provides, and you cannot rely on the new capacity to be exactly minimal. /// You should still prefer `.reserve()`, especially if you expect to append /// to the bit-vector in the future. /// /// ## Original /// /// [`Vec::reserve_exact`](alloc::vec::Vec::reserve_exact) /// /// ## Panics /// /// This panics if the new capacity exceeds the bit-vector’s maximum. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let mut bv: BitVec = BitVec::with_capacity(80); /// assert!(bv.capacity() >= 80); /// bv.reserve_exact(800); /// assert!(bv.capacity() >= 800); /// ``` /// /// [`.reserve()`]: Self::reserve #[inline] pub fn reserve_exact(&mut self, additional: usize) { self.do_reservation(additional, Vec::::reserve_exact); } /// Releases excess capacity back to the allocator. /// /// Like [`.reserve_exact()`], this is a *request* to the allocator, not a /// command. The allocator may reclaim excess memory or may not. /// /// ## Original /// /// [`Vec::shrink_to_fit`](alloc::vec::Vec::shrink_to_fit) /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let mut bv: BitVec = BitVec::with_capacity(1000); /// bv.push(true); /// bv.shrink_to_fit(); /// ``` /// /// [`.reserve_exact()`]: Self::reserve_exact #[inline] pub fn shrink_to_fit(&mut self) { self.with_vec(|vec| vec.shrink_to_fit()); } #[inline] #[cfg(not(tarpaulin_include))] #[deprecated = "prefer `.into_boxed_bitslice() instead"] #[allow(missing_docs, clippy::missing_docs_in_private_items)] pub fn into_boxed_slice(self) -> BitBox { self.into_boxed_bitslice() } /// Shortens the bit-vector, keeping the first `new_len` bits and discarding /// the rest. /// /// If `len` is greater than the bit-vector’s current length, this has no /// effect. /// /// The [`.drain()`] method can emulate `.truncate()`, except that it yields /// the excess bits rather than discarding them. /// /// Note that this has no effect on the allocated capacity of the /// bit-vector, **nor does it erase truncated memory**. Bits in the /// allocated memory that are outside of the [`.as_bitslice()`] view are /// always considered to have *initialized*, but **unspecified**, values, /// and you cannot rely on them to be zero. /// /// ## Original /// /// [`Vec::truncate`](alloc::vec::Vec::truncate) /// /// ## Examples /// /// Truncating a five-bit vector to two bits: /// /// ```rust /// use bitvec::prelude::*; /// /// let mut bv = bitvec![0, 1, 0, 0, 1]; /// bv.truncate(2); /// assert_eq!(bv.len(), 2); /// assert!(bv.as_raw_slice()[0].count_ones() >= 2); /// ``` /// /// No truncation occurs when `len` is greater than the bit-vector’s current /// length: /// /// [`.as_bitslice()`]: Self::as_bitslice /// [`.drain()`]: Self::drain #[inline] pub fn truncate(&mut self, new_len: usize) { if new_len < self.len() { unsafe { self.set_len_unchecked(new_len); } } } #[inline] #[cfg(not(tarpaulin_include))] #[deprecated = "use `.as_bitslice()` instead"] #[allow(missing_docs, clippy::missing_docs_in_private_items)] pub fn as_slice(&self) -> &BitSlice { self.as_bitslice() } #[inline] #[cfg(not(tarpaulin_include))] #[deprecated = "use `.as_mut_bitslice()` instead"] #[allow(missing_docs, clippy::missing_docs_in_private_items)] pub fn as_mut_slice(&mut self) -> &mut BitSlice { self.as_mut_bitslice() } #[inline] #[cfg(not(tarpaulin_include))] #[deprecated = "use `.as_bitptr()` instead"] #[allow(missing_docs, clippy::missing_docs_in_private_items)] pub fn as_ptr(&self) -> BitPtr { self.as_bitptr() } #[inline] #[cfg(not(tarpaulin_include))] #[deprecated = "use `.as_mut_bitptr()` instead"] #[allow(missing_docs, clippy::missing_docs_in_private_items)] pub fn as_mut_ptr(&mut self) -> BitPtr { self.as_mut_bitptr() } /// Resizes a bit-vector to a new length. /// /// ## Original /// /// [`Vec::set_len`](alloc::vec::Vec::set_len) /// /// ## Safety /// /// **NOT ALL MEMORY IN THE ALLOCATION IS INITIALIZED!** /// /// Memory in a bit-vector’s allocation is only initialized when the /// bit-vector grows into it normally (through [`.push()`] or one of the /// various `.extend*()` methods). Setting the length to a value beyond what /// was previously initialized, but still within the allocation, is /// undefined behavior. /// /// The caller is responsible for ensuring that all memory up to (but not /// including) the new length has already been initialized. /// /// ## Panics /// /// This panics if `new_len` exceeds the capacity as reported by /// [`.capacity()`]. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let mut bv = bitvec![0, 1, 0, 0, 1]; /// unsafe { /// // The default storage type, `usize`, is at least 32 bits. /// bv.set_len(32); /// } /// assert_eq!(bv, bits![ /// 0, 1, 0, 0, 1, 0, 0, 0, /// 0, 0, 0, 0, 0, 0, 0, 0, /// 0, 0, 0, 0, 0, 0, 0, 0, /// 0, 0, 0, 0, 0, 0, 0, 0, /// ]); /// // `BitVec` guarantees that newly-initialized memory is zeroed. /// ``` /// /// [`.push()`]: Self::push /// [`.capacity()`]: Self::capacity #[inline] pub unsafe fn set_len(&mut self, new_len: usize) { let capa = self.capacity(); assert!( new_len <= capa, "bit-vector capacity exceeded: {} > {}", new_len, capa, ); self.set_len_unchecked(new_len); } /// Takes a bit out of the bit-vector. /// /// The empty slot is filled with the last bit in the bit-vector, rather /// than shunting `index + 1 .. self.len()` down by one. /// /// ## Original /// /// [`Vec::swap_remove`](alloc::vec::Vec::swap_remove) /// /// ## Panics /// /// This panics if `index` is out of bounds (`self.len()` or greater). /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let mut bv = bitvec![0, 1, 0, 0, 1]; /// assert!(!bv.swap_remove(2)); /// assert_eq!(bv, bits![0, 1, 1, 0]); /// ``` #[inline] pub fn swap_remove(&mut self, index: usize) -> bool { self.assert_in_bounds(index, 0 .. self.len()); let last = self.len() - 1; unsafe { self.swap_unchecked(index, last); self.set_len(last); *self.get_unchecked(last) } } /// Inserts a bit at a given position, shifting all bits after it one spot /// to the right. /// /// `index` may be any value up to *and including* `self.len()`. If it is /// `self.len()`, it behaves equivalently to `.push()`. /// /// ## Original /// /// [`Vec::insert`](alloc::vec::Vec::insert) /// /// ## Panics /// /// This panics if `index` is out of bounds (including `self.len()`). #[inline] pub fn insert(&mut self, index: usize, value: bool) { self.assert_in_bounds(index, 0 ..= self.len()); self.push(value); unsafe { self.get_unchecked_mut(index ..) }.rotate_right(1); } /// Removes a bit at a given position, shifting all bits after it one spot /// to the left. /// /// `index` may be any value up to, but **not** including, `self.len()`. /// /// ## Original /// /// [`Vec::remove`](alloc::vec::Vec::remove) /// /// ## Panics /// /// This panics if `index` is out of bounds (excluding `self.len()`). #[inline] pub fn remove(&mut self, index: usize) -> bool { self.assert_in_bounds(index, 0 .. self.len()); let last = self.len() - 1; unsafe { self.get_unchecked_mut(index ..).rotate_left(1); let out = *self.get_unchecked(last); self.set_len(last); out } } /// Retains only the bits that the predicate allows. /// /// Bits are deleted from the vector when the predicate function returns /// false. This function is linear in `self.len()`. /// /// ## Original /// /// [`Vec::retain`](alloc::vec::Vec::retain) /// /// ## API Differences /// /// The predicate receives both the index of the bit as well as its value, /// in order to allow the predicate to have more than one bit of /// keep/discard information. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let mut bv = bitvec![0, 1, 0, 0, 1]; /// bv.retain(|idx, _| idx % 2 == 0); /// assert_eq!(bv, bits![0, 0, 1]); /// ``` #[inline] pub fn retain(&mut self, mut func: F) where F: FnMut(usize, &bool) -> bool { let mut len = self.len(); let mut hole_ptr = self.as_mut_bitptr(); let mut reader = self.as_bitptr_range().enumerate(); // Advance until the *first* hole is created. This avoids writing into // the bit-slice when no change takes place. for (idx, bitptr) in reader.by_ref() { let bit = unsafe { bitptr.read() }; if func(idx, &bit) { hole_ptr = unsafe { hole_ptr.add(1) }; } else { len -= 1; break; } } // Now that a hole exists, switch to a loop that always writes into the // hole pointer. for (idx, bitptr) in reader { let bit = unsafe { bitptr.read() }; if func(idx, &bit) { hole_ptr = unsafe { hole_ptr.write(bit); hole_ptr.add(1) }; } else { len -= 1; } } // Discard the bits that did not survive the predicate. unsafe { self.set_len_unchecked(len); } } /// Appends a single bit to the vector. /// /// ## Original /// /// [`Vec::push`](alloc::vec::Vec::push) /// /// ## Panics /// /// This panics if the push would cause the bit-vector to exceed its maximum /// capacity. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let mut bv = bitvec![0, 0]; /// bv.push(true); /// assert_eq!(bv.as_bitslice(), bits![0, 0, 1]); /// ``` #[inline] pub fn push(&mut self, value: bool) { let len = self.len(); let new_len = len + 1; Self::assert_len_encodable(new_len); // Push a new `T` into the underlying buffer if needed. if len == 0 || self.bitspan.tail() == BitEnd::MAX { self.with_vec(|vec| vec.push(T::ZERO)); } // Write `value` into the now-safely-allocated `len` slot. unsafe { self.set_len_unchecked(new_len); self.set_unchecked(len, value); } } /// Attempts to remove the trailing bit from the bit-vector. /// /// This returns `None` if the bit-vector is empty. /// /// ## Original /// /// [`Vec::pop`](alloc::vec::Vec::pop) /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let mut bv = bitvec![0, 1]; /// assert!(bv.pop().unwrap()); /// assert!(!bv.pop().unwrap()); /// assert!(bv.pop().is_none()); /// ``` #[inline] pub fn pop(&mut self) -> Option { match self.len() { 0 => None, n => unsafe { let new_len = n - 1; let out = Some(*self.get_unchecked(new_len)); self.set_len_unchecked(new_len); out }, } } /// Moves all the bits out of `other` into the back of `self`. /// /// The `other` bit-vector is emptied after this occurs. /// /// ## Original /// /// [`Vec::append`](alloc::vec::Vec::append) /// /// ## API Differences /// /// This permits `other` to have different type parameters than `self`, and /// does not require that it be literally `Self`. /// /// ## Panics /// /// This panics if `self.len() + other.len()` exceeds the maximum capacity /// of a bit-vector. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let mut bv1 = bitvec![u16, Msb0; 0; 10]; /// let mut bv2 = bitvec![u32, Lsb0; 1; 10]; /// /// bv1.append(&mut bv2); /// /// assert_eq!(bv1.count_ones(), 10); /// assert_eq!(bv1.count_zeros(), 10); /// assert!(bv2.is_empty()); /// ``` #[inline] pub fn append(&mut self, other: &mut BitVec) where T2: BitStore, O2: BitOrder, { self.extend_from_bitslice(other); other.clear(); } /// Iterates over a portion of the bit-vector, *removing* all yielded bits /// from it. /// /// When the iterator drops, *all* bits in its coverage are removed from /// `self`, even if the iterator did not yield them. If the iterator is /// leaked or otherwise forgotten, and its destructor never runs, then the /// amount of un-yielded bits removed from the bit-vector is not specified. /// /// ## Original /// /// [`Vec::drain`](alloc::vec::Vec::drain) /// /// ## Panics /// /// This panics if `range` departs `0 .. self.len()`. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let mut bv = bitvec![0, 1, 0, 0, 1]; /// let bv2 = bv.drain(1 ..= 3).collect::(); /// assert_eq!(bv, bits![0, 1]); /// assert_eq!(bv2, bits![1, 0, 0]); /// /// // A full range clears the bit-vector. /// bv.drain(..); /// assert!(bv.is_empty()); /// ``` #[inline] pub fn drain(&mut self, range: R) -> Drain where R: RangeBounds { Drain::new(self, range) } /// Empties the bit-vector. /// /// This does not affect the allocated capacity. /// /// ## Original /// /// [`Vec::clear`](alloc::vec::Vec::clear) /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let mut bv = bitvec![0, 1, 0, 0, 1]; /// bv.clear(); /// assert!(bv.is_empty()); /// ``` #[inline] pub fn clear(&mut self) { self.truncate(0); } /// Gets the length of the bit-vector. /// /// This is equivalent to `BitSlice::len`; it is provided as an inherent /// method here rather than relying on `Deref` forwarding so that you can /// write `BitVec::len` as a named function item. /// /// ## Original /// /// [`Vec::len`](alloc::vec::Vec::len) #[inline] #[cfg(not(tarpaulin_include))] pub fn len(&self) -> usize { self.bitspan.len() } /// Tests if the bit-vector is empty. /// /// This is equivalent to `BitSlice::is_empty`; it is provided as an /// inherent method here rather than relying on `Deref` forwarding so that /// you can write `BitVec::is_empty` as a named function item. /// /// ## Original /// /// [`Vec::is_empty`](alloc::vec::Vec::is_empty) #[inline] #[cfg(not(tarpaulin_include))] pub fn is_empty(&self) -> bool { self.bitspan.len() == 0 } /// Splits the bit-vector in half at an index, moving `self[at ..]` out into /// a new bit-vector. /// /// ## Original /// /// [`Vec::split_off`](alloc::vec::Vec::split_off) /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let mut bv = bitvec![0, 1, 0, 0, 1]; /// let bv2 = bv.split_off(2); /// assert_eq!((&*bv, &*bv2), (bits![0, 1], bits![0, 0, 1])); /// ``` #[inline] pub fn split_off(&mut self, at: usize) -> Self { let len = self.len(); self.assert_in_bounds(at, 0 ..= len); let (this, that) = unsafe { self.bitspan .into_bitslice_mut() .split_at_unchecked_mut_noalias(at) }; self.bitspan = this.as_mut_bitspan(); Self::from_bitslice(that) } /// Resizes the bit-vector to a new length, using a function to produce each /// inserted bit. /// /// If `new_len` is less than `self.len()`, this is a truncate operation; if /// it is greater, then `self` is extended by repeatedly pushing `func()`. /// /// ## Original /// /// [`Vec::resize_with`](alloc::vec::Vec::resize_with) /// /// ## API Differences /// /// The generator function receives the index into which its bit will be /// placed. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let mut bv = bitvec![1; 2]; /// bv.resize_with(5, |idx| idx % 2 == 1); /// assert_eq!(bv, bits![1, 1, 0, 1, 0]); /// ``` #[inline] pub fn resize_with(&mut self, new_len: usize, mut func: F) where F: FnMut(usize) -> bool { let old_len = self.len(); self.resize(new_len, false); if new_len > old_len { for (bitptr, idx) in unsafe { self.get_unchecked_mut(old_len ..) } .as_mut_bitptr_range() .zip(old_len ..) { unsafe { bitptr.write(func(idx)); } } } } /// Destroys the `BitVec` handle without destroying the bit-vector /// allocation. The allocation is returned as an `&mut BitSlice` that lasts /// for the remaining program lifetime. /// /// You *may* call [`BitBox::from_raw`] on this slice handle exactly once in /// order to reap the allocation before program exit. That function takes a /// mutable pointer, not a mutable reference, so you must ensure that the /// returned reference is never used again after restoring the allocation /// handle. /// /// ## Original /// /// [`Vec::leak`](alloc::vec::Vec::leak) /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bv = bitvec![0, 0, 1]; /// let static_bits: &'static mut BitSlice = bv.leak(); /// static_bits.set(0, true); /// assert_eq!(static_bits, bits![1, 0, 1]); /// /// let bb = unsafe { BitBox::from_raw(static_bits) }; /// // static_bits may no longer be used. /// drop(bb); // explicitly reap memory before program exit /// ``` /// /// [`BitBox::leak`]: crate::boxed::BitBox::leak #[inline] #[cfg(not(tarpaulin_include))] pub fn leak<'a>(self) -> &'a mut BitSlice { self.into_boxed_bitslice().pipe(BitBox::leak) } /// Resizes the bit-vector to a new length. New bits are initialized to /// `value`. /// /// ## Original /// /// [`Vec::resize`](alloc::vec::Vec::resize) /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let mut bv = bitvec![0; 2]; /// bv.resize(5, true); /// assert_eq!(bv, bits![0, 0, 1, 1, 1]); /// ``` #[inline] pub fn resize(&mut self, new_len: usize, value: bool) { let len = self.len(); if new_len > len { self.reserve(new_len - len); unsafe { self.set_len(new_len); self.get_unchecked_mut(len .. new_len).fill(value); } } else { self.truncate(new_len); } } #[inline] #[cfg(not(tarpaulin_include))] #[allow(missing_docs, clippy::missing_docs_in_private_items)] #[deprecated = "use `.extend_from_bitslice()` or `.extend_from_raw_slice()` \ instead"] pub fn extend_from_slice(&mut self, other: &BitSlice) where T2: BitStore, O2: BitOrder, { self.extend_from_bitslice(other); } /// Extends `self` by copying an internal range of its bit-slice as the /// region to append. /// /// ## Original /// /// [`Vec::extend_from_within`](alloc::vec::Vec::extend_from_within) /// /// ## Panics /// /// This panics if `src` is not within `0 .. self.len()`. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let mut bv = bitvec![0, 1, 0, 0, 1]; /// bv.extend_from_within(1 .. 4); /// assert_eq!(bv, bits![0, 1, 0, 0, 1, 1, 0, 0]); /// ``` #[inline] pub fn extend_from_within(&mut self, src: R) where R: RangeExt { let old_len = self.len(); let src = src.normalize(0, old_len); self.assert_in_bounds(src.end, 0 .. old_len); self.resize(old_len + src.len(), false); unsafe { self.copy_within_unchecked(src, old_len); } } /// Modifies [`self.drain()`] so that the removed bit-slice is instead /// replaced with the contents of another bit-stream. /// /// As with `.drain()`, the specified range is always removed from the /// bit-vector even if the splicer is not fully consumed, and the splicer /// does not specify how many bits are removed if it leaks. /// /// The replacement source is only consumed when the splicer drops; however, /// it may be pulled before then. The replacement source cannot assume that /// there will be a delay between creation of the splicer and when it must /// begin producing bits. /// /// This copies the `Vec::splice` implementation; see its documentation for /// more details about how the replacement should act. /// /// ## Original /// /// [`Vec::splice`](alloc::vec::Vec::splice) /// /// ## Panics /// /// This panics if `range` departs `0 .. self.len()`. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let mut bv = bitvec![0, 1, 1]; /// // a b c /// let mut yank = bv.splice( /// .. 2, /// bits![static 1, 1, 0].iter().by_vals(), /// // d e f /// ); /// /// assert!(!yank.next().unwrap()); // a /// assert!(yank.next().unwrap()); // b /// drop(yank); /// assert_eq!(bv, bits![1, 1, 0, 1]); /// // d e f c /// ``` /// /// [`self.drain()`]: Self::drain #[inline] pub fn splice( &mut self, range: R, replace_with: I, ) -> Splice where R: RangeBounds, I: IntoIterator, { Splice::new(self.drain(range), replace_with) } } bitvec-1.0.1/src/vec/iter.rs000064400000000000000000000366051046102023000137660ustar 00000000000000#![doc = include_str!("../../doc/vec/iter.md")] use alloc::vec::Vec; use core::{ fmt::{ self, Debug, Formatter, }, iter::{ FromIterator, FusedIterator, }, mem, ops::Range, }; use tap::{ Pipe, Tap, TapOptional, }; use wyz::{ comu::{ Mut, Mutability, }, range::RangeExt, }; use super::BitVec; use crate::{ boxed::BitBox, mem::bits_of, order::BitOrder, ptr::{ BitPtrRange, BitRef, }, slice::BitSlice, store::BitStore, view::BitView, }; #[doc = include_str!("../../doc/vec/iter/Extend_bool.md")] impl Extend for BitVec where T: BitStore, O: BitOrder, { #[inline] fn extend(&mut self, iter: I) where I: IntoIterator { let mut iter = iter.into_iter(); #[allow(irrefutable_let_patterns)] // Removing the `if` is unstable. if let (_, Some(n)) | (n, None) = iter.size_hint() { self.reserve(n); let len = self.len(); // If the reservation did not panic, then this will not overflow. let new_len = len.wrapping_add(n); let new = unsafe { self.get_unchecked_mut(len .. new_len) }; let pulled = new .as_mut_bitptr_range() .zip(iter.by_ref()) .map(|(ptr, bit)| unsafe { ptr.write(bit); }) .count(); unsafe { self.set_len(len + pulled); } } // If the iterator is well-behaved and finite, this should never // enter; if the iterator is infinite, then this will eventually crash. iter.for_each(|bit| self.push(bit)); } } #[cfg(not(tarpaulin_include))] impl<'a, T, O> Extend<&'a bool> for BitVec where T: BitStore, O: BitOrder, { #[inline] fn extend(&mut self, iter: I) where I: IntoIterator { self.extend(iter.into_iter().copied()); } } #[cfg(not(tarpaulin_include))] #[doc = include_str!("../../doc/vec/iter/Extend_BitRef.md")] impl<'a, M, T1, T2, O1, O2> Extend> for BitVec where M: Mutability, T1: BitStore, T2: BitStore, O1: BitOrder, O2: BitOrder, { #[inline] fn extend(&mut self, iter: I) where I: IntoIterator> { self.extend(iter.into_iter().map(|bit| *bit)); } } impl Extend for BitVec where T: BitStore, O: BitOrder, { #[inline] fn extend(&mut self, iter: I) where I: IntoIterator { let iter = iter.into_iter(); #[allow(irrefutable_let_patterns)] if let (_, Some(n)) | (n, None) = iter.size_hint() { self.reserve(n.checked_mul(bits_of::()).unwrap()); } iter.for_each(|elem| self.extend_from_bitslice(elem.view_bits::())); } } #[cfg(not(tarpaulin_include))] impl<'a, T, O> Extend<&'a T> for BitVec where T: BitStore, O: BitOrder, { #[inline] fn extend(&mut self, iter: I) where I: IntoIterator { self.extend( iter.into_iter() .map(BitStore::load_value) .map(::new), ); } } #[cfg(not(tarpaulin_include))] #[doc = include_str!("../../doc/vec/iter/FromIterator_bool.md")] impl FromIterator for BitVec where T: BitStore, O: BitOrder, { #[inline] fn from_iter(iter: I) -> Self where I: IntoIterator { Self::new().tap_mut(|bv| bv.extend(iter)) } } #[cfg(not(tarpaulin_include))] impl<'a, T, O> FromIterator<&'a bool> for BitVec where T: BitStore, O: BitOrder, { #[inline] fn from_iter(iter: I) -> Self where I: IntoIterator { iter.into_iter().copied().collect::() } } #[cfg(not(tarpaulin_include))] #[doc = include_str!("../../doc/vec/iter/FromIterator_BitRef.md")] impl<'a, M, T1, T2, O1, O2> FromIterator> for BitVec where M: Mutability, T1: BitStore, T2: BitStore, O1: BitOrder, O2: BitOrder, { #[inline] fn from_iter(iter: I) -> Self where I: IntoIterator> { iter.into_iter().map(|br| *br).pipe(Self::from_iter) } } #[cfg(not(tarpaulin_include))] impl FromIterator for BitVec where T: BitStore, O: BitOrder, { #[inline] fn from_iter(iter: I) -> Self where I: IntoIterator { iter.into_iter().collect::>().pipe(Self::from_vec) } } #[cfg(not(tarpaulin_include))] impl<'a, T, O> FromIterator<&'a T> for BitVec where T: BitStore, O: BitOrder, { #[inline] fn from_iter(iter: I) -> Self where I: IntoIterator { iter.into_iter() .map(::load_value) .map(::new) .collect::() } } #[doc = include_str!("../../doc/vec/iter/IntoIterator.md")] impl IntoIterator for BitVec where T: BitStore, O: BitOrder, { type IntoIter = as IntoIterator>::IntoIter; type Item = as IntoIterator>::Item; #[inline] fn into_iter(self) -> Self::IntoIter { self.into_boxed_bitslice().into_iter() } } #[cfg(not(tarpaulin_include))] /// [Original](https://doc.rust-lang.org/alloc/vec/struct.Vec.html#impl-IntoIterator-1) impl<'a, T, O> IntoIterator for &'a BitVec where O: BitOrder, T: 'a + BitStore, { type IntoIter = <&'a BitSlice as IntoIterator>::IntoIter; type Item = <&'a BitSlice as IntoIterator>::Item; #[inline] fn into_iter(self) -> Self::IntoIter { self.as_bitslice().iter() } } #[cfg(not(tarpaulin_include))] /// [Original](https://doc.rust-lang.org/alloc/vec/struct.Vec.html#impl-IntoIterator-2) impl<'a, T, O> IntoIterator for &'a mut BitVec where O: BitOrder, T: 'a + BitStore, { type IntoIter = <&'a mut BitSlice as IntoIterator>::IntoIter; type Item = <&'a mut BitSlice as IntoIterator>::Item; #[inline] fn into_iter(self) -> Self::IntoIter { self.as_mut_bitslice().iter_mut() } } #[doc = include_str!("../../doc/vec/iter/Drain.md")] pub struct Drain<'a, T, O> where O: BitOrder, T: 'a + BitStore, { /// Exclusive reference to the handle that created the drain. source: &'a mut BitVec, /// The range of the source bit-vector’s buffer that is being drained. drain: BitPtrRange, /// The range of the source bit-vector’s preserved back section. This runs /// from the first bit after the `.drain` to the first bit after the /// original bit-vector ends. tail: Range, } impl<'a, T, O> Drain<'a, T, O> where O: BitOrder, T: 'a + BitStore, { /// Produces a new drain over a region of a bit-vector. pub(super) fn new(source: &'a mut BitVec, range: R) -> Self where R: RangeExt { let len = source.len(); let region = range.normalize(None, len); assert!( region.end <= len, "drains cannot extend past the length of their source bit-vector", ); // The `.tail` region is everything in the bit-vector after the drain. let tail = region.end .. len; let drain = unsafe { // Artificially truncate the source bit-vector to before the drain // region. This is restored in the destructor. source.set_len_unchecked(region.start); let base = source.as_mut_bitptr(); BitPtrRange { start: base.add(region.start), end: base.add(region.end), } }; Self { source, drain, tail, } } /// Views the unyielded bits remaining in the drain. /// /// ## Original /// /// [`Drain::as_slice`](alloc::vec::Drain::as_slice) #[inline] #[cfg(not(tarpaulin_include))] pub fn as_bitslice(&self) -> &'a BitSlice { unsafe { self.drain.clone().into_bitspan().into_bitslice_ref() } } #[inline] #[cfg(not(tarpaulin_include))] #[deprecated = "use `.as_bitslice()` instead"] #[allow(missing_docs, clippy::missing_docs_in_private_items)] pub fn as_slice(&self) -> &'a BitSlice { self.as_bitslice() } /// Attempts to fill the `drain` region with the contents of another /// iterator. /// /// The source bit-vector is extended to include each bit that the /// replacement iterator provides, but is *not yet* extended to include the /// `tail` region, even if the replacement iterator completely fills the /// `drain` region. That work occurs in the destructor. /// /// This is only used by [`Splice`]. /// /// [`Splice`]: crate::vec::Splice #[inline] fn fill(&mut self, iter: &mut impl Iterator) -> FillStatus { let bv = &mut *self.source; let mut len = bv.len(); let span = unsafe { bv.as_mut_bitptr().add(len).range(self.tail.start - len) }; let mut out = FillStatus::FullSpan; for ptr in span { if let Some(bit) = iter.next() { unsafe { ptr.write(bit); } len += 1; } else { out = FillStatus::EmptyInput; break; } } unsafe { bv.set_len_unchecked(len); } out } /// Reserves space for `additional` more bits at the end of the `drain` /// region by moving the `tail` region upwards in memory. /// /// This has the same effects as [`BitVec::resize`], except that the bits /// are inserted between `drain` and `tail` rather than at the end. /// /// This does not modify the drain iteration cursor, including its endpoint. /// The newly inserted bits are not available for iteration. /// /// This is only used by [`Splice`], which may insert more bits than the /// drain removed. /// /// [`BitVec::resize`]: crate::vec::BitVec::resize /// [`Splice`]: crate::vec::Splice unsafe fn move_tail(&mut self, additional: usize) { if additional == 0 { return; } let bv = &mut *self.source; let tail_len = self.tail.len(); let full_len = additional + tail_len; bv.reserve(full_len); let new_tail_start = additional + self.tail.start; let orig_tail = mem::replace( &mut self.tail, new_tail_start .. new_tail_start + tail_len, ); let len = bv.len(); bv.set_len_unchecked(full_len); bv.copy_within_unchecked(orig_tail, new_tail_start); bv.set_len_unchecked(len); } } /// [Original](https://doc.rust-lang.org/alloc/vec/struct.Drain.html#impl-AsRef%3C%5BT%5D%3E) #[cfg(not(tarpaulin_include))] impl AsRef> for Drain<'_, T, O> where T: BitStore, O: BitOrder, { #[inline] fn as_ref(&self) -> &BitSlice { self.as_bitslice() } } #[cfg(not(tarpaulin_include))] impl Debug for Drain<'_, T, O> where T: BitStore, O: BitOrder, { #[inline] fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { fmt.debug_tuple("Drain").field(&self.as_bitslice()).finish() } } /// [Original](https://doc.rust-lang.org/alloc/vec/struct.Drain.html#impl-Iterator) #[cfg(not(tarpaulin_include))] impl Iterator for Drain<'_, T, O> where T: BitStore, O: BitOrder, { type Item = bool; easy_iter!(); #[inline] fn next(&mut self) -> Option { self.drain.next().map(|bp| unsafe { bp.read() }) } #[inline] fn nth(&mut self, n: usize) -> Option { self.drain.nth(n).map(|bp| unsafe { bp.read() }) } } /// [Original](https://doc.rust-lang.org/alloc/vec/struct.Drain.html#impl-DoubleEndedIterator) #[cfg(not(tarpaulin_include))] impl DoubleEndedIterator for Drain<'_, T, O> where T: BitStore, O: BitOrder, { #[inline] fn next_back(&mut self) -> Option { self.drain.next_back().map(|bp| unsafe { bp.read() }) } #[inline] fn nth_back(&mut self, n: usize) -> Option { self.drain.nth_back(n).map(|bp| unsafe { bp.read() }) } } /// [Original](https://doc.rust-lang.org/alloc/vec/struct.Drain.html#impl-ExactSizeIterator) #[cfg(not(tarpaulin_include))] impl ExactSizeIterator for Drain<'_, T, O> where T: BitStore, O: BitOrder, { #[inline] fn len(&self) -> usize { self.drain.len() } } /// [Original](https://doc.rust-lang.org/alloc/vec/struct.Drain.html#impl-FusedIterator) impl FusedIterator for Drain<'_, T, O> where T: BitStore, O: BitOrder, { } /// [Original](https://doc.rust-lang.org/alloc/vec/struct.Drain.html#impl-Send) // #[allow(clippy::non_send_fields_in_send_ty)] unsafe impl Send for Drain<'_, T, O> where T: BitStore, O: BitOrder, for<'a> &'a mut BitSlice: Send, { } /// [Original](https://doc.rust-lang.org/alloc/vec/struct.Drain.html#impl-Sync) unsafe impl Sync for Drain<'_, T, O> where T: BitStore, O: BitOrder, BitSlice: Sync, { } /// [Original](https://doc.rust-lang.org/alloc/vec/struct.Drain.html#impl-Drop) impl Drop for Drain<'_, T, O> where T: BitStore, O: BitOrder, { #[inline] fn drop(&mut self) { let tail = mem::take(&mut self.tail); let tail_len = tail.len(); if tail_len == 0 { return; } let bv = &mut *self.source; let old_len = bv.len(); unsafe { bv.set_len_unchecked(tail.end); bv.copy_within_unchecked(tail, old_len); bv.set_len_unchecked(old_len + tail_len); } } } #[repr(u8)] #[doc = include_str!("../../doc/vec/iter/FillStatus.md")] #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] enum FillStatus { /// The drain span is completely filled. FullSpan = 0, /// The replacement source is completely exhausted. EmptyInput = 1, } #[derive(Debug)] #[doc = include_str!("../../doc/vec/iter/Splice.md")] pub struct Splice<'a, T, O, I> where O: BitOrder, T: 'a + BitStore, I: Iterator, { /// The region of the bit-vector being drained. drain: Drain<'a, T, O>, /// The bitstream that replaces drained bits. splice: I, } impl<'a, T, O, I> Splice<'a, T, O, I> where O: BitOrder, T: 'a + BitStore, I: Iterator, { /// Constructs a splice out of a drain and a replacement source. pub(super) fn new( drain: Drain<'a, T, O>, splice: impl IntoIterator, ) -> Self { let splice = splice.into_iter(); Self { drain, splice } } } impl Iterator for Splice<'_, T, O, I> where T: BitStore, O: BitOrder, I: Iterator, { type Item = bool; easy_iter!(); #[inline] fn next(&mut self) -> Option { self.drain.next().tap_some(|_| unsafe { if let Some(bit) = self.splice.next() { let bv = &mut *self.drain.source; let len = bv.len(); bv.set_len_unchecked(len + 1); bv.set_unchecked(len, bit); } }) } } #[cfg(not(tarpaulin_include))] impl DoubleEndedIterator for Splice<'_, T, O, I> where T: BitStore, O: BitOrder, I: Iterator, { #[inline] fn next_back(&mut self) -> Option { self.drain.next_back() } #[inline] fn nth_back(&mut self, n: usize) -> Option { self.drain.nth_back(n) } } #[cfg(not(tarpaulin_include))] impl ExactSizeIterator for Splice<'_, T, O, I> where T: BitStore, O: BitOrder, I: Iterator, { #[inline] fn len(&self) -> usize { self.drain.len() } } impl FusedIterator for Splice<'_, T, O, I> where T: BitStore, O: BitOrder, I: Iterator, { } /// [Original](https://doc.rust-lang.org/alloc/vec/struct.Drain.html#impl-Drop) impl Drop for Splice<'_, T, O, I> where T: BitStore, O: BitOrder, I: Iterator, { #[inline] fn drop(&mut self) { let tail = self.drain.tail.clone(); let tail_len = tail.len(); let bv = &mut *self.drain.source; if tail_len == 0 { bv.extend(self.splice.by_ref()); return; } if let FillStatus::EmptyInput = self.drain.fill(&mut self.splice) { return; } let len = match self.splice.size_hint() { (n, None) | (_, Some(n)) => n, }; unsafe { self.drain.move_tail(len); } if let FillStatus::EmptyInput = self.drain.fill(&mut self.splice) { return; } /* If the `.splice` *still* has bits to provide, then its * `.size_hint()` is untrustworthy. Collect the `.splice` into a * bit-vector, then insert the bit-vector into the spliced region. */ let mut collected = self.splice.by_ref().collect::>().into_iter(); let len = collected.len(); if len > 0 { unsafe { self.drain.move_tail(len); } let filled = self.drain.fill(collected.by_ref()); debug_assert_eq!(filled, FillStatus::EmptyInput); debug_assert_eq!(collected.len(), 0); } } } bitvec-1.0.1/src/vec/ops.rs000064400000000000000000000111441046102023000136130ustar 00000000000000//! Operator trait implementations for bit-vectors. use core::{ mem::ManuallyDrop, ops::{ BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign, Deref, DerefMut, Index, IndexMut, Not, }, }; use wyz::comu::Mut; use super::BitVec; use crate::{ order::BitOrder, ptr::BitSpan, slice::BitSlice, store::BitStore, }; #[cfg(not(tarpaulin_include))] impl BitAndAssign> for BitSlice where T: BitStore, O: BitOrder, { #[inline] fn bitand_assign(&mut self, rhs: BitVec) { *self &= rhs.as_bitslice() } } #[cfg(not(tarpaulin_include))] impl BitAndAssign<&BitVec> for BitSlice where T: BitStore, O: BitOrder, { #[inline] fn bitand_assign(&mut self, rhs: &BitVec) { *self &= rhs.as_bitslice() } } #[cfg(not(tarpaulin_include))] impl BitAnd for BitVec where T: BitStore, O: BitOrder, BitSlice: BitAndAssign, { type Output = Self; #[inline] fn bitand(mut self, rhs: Rhs) -> Self::Output { self &= rhs; self } } #[cfg(not(tarpaulin_include))] impl BitAndAssign for BitVec where T: BitStore, O: BitOrder, BitSlice: BitAndAssign, { #[inline] fn bitand_assign(&mut self, rhs: Rhs) { *self.as_mut_bitslice() &= rhs; } } #[cfg(not(tarpaulin_include))] impl BitOrAssign> for BitSlice where T: BitStore, O: BitOrder, { #[inline] fn bitor_assign(&mut self, rhs: BitVec) { *self |= rhs.as_bitslice() } } #[cfg(not(tarpaulin_include))] impl BitOrAssign<&BitVec> for BitSlice where T: BitStore, O: BitOrder, { #[inline] fn bitor_assign(&mut self, rhs: &BitVec) { *self |= rhs.as_bitslice() } } #[cfg(not(tarpaulin_include))] impl BitOr for BitVec where T: BitStore, O: BitOrder, BitSlice: BitOrAssign, { type Output = Self; #[inline] fn bitor(mut self, rhs: Rhs) -> Self::Output { self |= rhs; self } } #[cfg(not(tarpaulin_include))] impl BitOrAssign for BitVec where T: BitStore, O: BitOrder, BitSlice: BitOrAssign, { #[inline] fn bitor_assign(&mut self, rhs: Rhs) { *self.as_mut_bitslice() |= rhs; } } #[cfg(not(tarpaulin_include))] impl BitXorAssign> for BitSlice where T: BitStore, O: BitOrder, { #[inline] fn bitxor_assign(&mut self, rhs: BitVec) { *self ^= rhs.as_bitslice() } } #[cfg(not(tarpaulin_include))] impl BitXorAssign<&BitVec> for BitSlice where T: BitStore, O: BitOrder, { #[inline] fn bitxor_assign(&mut self, rhs: &BitVec) { *self ^= rhs.as_bitslice() } } #[cfg(not(tarpaulin_include))] impl BitXor for BitVec where T: BitStore, O: BitOrder, BitSlice: BitXorAssign, { type Output = Self; #[inline] fn bitxor(mut self, rhs: Rhs) -> Self::Output { self ^= rhs; self } } #[cfg(not(tarpaulin_include))] impl BitXorAssign for BitVec where T: BitStore, O: BitOrder, BitSlice: BitXorAssign, { #[inline] fn bitxor_assign(&mut self, rhs: Rhs) { *self.as_mut_bitslice() ^= rhs; } } impl Deref for BitVec where T: BitStore, O: BitOrder, { type Target = BitSlice; #[inline] fn deref(&self) -> &Self::Target { self.as_bitslice() } } impl DerefMut for BitVec where T: BitStore, O: BitOrder, { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { self.as_mut_bitslice() } } impl Drop for BitVec where T: BitStore, O: BitOrder, { #[inline] fn drop(&mut self) { if self.bitspan != BitSpan::::EMPTY { self.with_vec(|slot| unsafe { ManuallyDrop::drop(slot) }); } } } #[cfg(not(tarpaulin_include))] impl Index for BitVec where T: BitStore, O: BitOrder, BitSlice: Index, { type Output = as Index>::Output; #[inline] fn index(&self, index: Idx) -> &Self::Output { &self.as_bitslice()[index] } } #[cfg(not(tarpaulin_include))] impl IndexMut for BitVec where T: BitStore, O: BitOrder, BitSlice: IndexMut, { #[inline] fn index_mut(&mut self, index: Idx) -> &mut Self::Output { &mut self.as_mut_bitslice()[index] } } /** This implementation inverts all elements in the live buffer. You cannot rely on the value of bits in the buffer that are outside the domain of [`BitVec::as_mut_bitslice`]. **/ impl Not for BitVec where T: BitStore, O: BitOrder, { type Output = Self; #[inline] fn not(mut self) -> Self::Output { for elem in self.as_raw_mut_slice() { elem.store_value(!elem.load_value()); } self } } bitvec-1.0.1/src/vec/tests/api.rs000064400000000000000000000027371046102023000147350ustar 00000000000000use crate::prelude::*; #[test] fn ins_del() { let mut bv = bitvec![0, 1, 0, 0, 1]; assert!(!bv.swap_remove(2)); assert_eq!(bv, bits![0, 1, 1, 0]); bv.insert(2, false); assert_eq!(bv, bits![0, 1, 0, 1, 0]); assert!(bv.remove(3)); assert_eq!(bv, bits![0, 1, 0, 0]); } #[test] fn walk() { let mut bv = bitvec![ 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0 ]; assert_eq!(bv.pop(), Some(false)); assert_eq!(bv.count_ones(), 8); bv.retain(|idx, &bit| bit && idx % 2 == 1); assert_eq!(bv, bits![1; 7]); let mut bv2 = bitvec![1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1]; bv.append(&mut bv2); assert_eq!(bv.count_ones(), 14); assert!(bv2.is_empty()); let mut splice = bv.splice(2 .. 10, Some(false)); assert!(splice.all(|bit| bit)); drop(splice); assert_eq!(bv, bits![1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1]); } #[test] fn misc() { let mut bv = bitvec![0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1]; let bv2 = bv.split_off(10); assert_eq!(bv2, bits![0, 1, 0, 1]); bv.clear(); let mut a = 1; let mut b = 1; let fib = |idx| { if idx == a.max(b) { let c = a + b; b = a; a = c; true } else { false } }; bv.resize_with(22, fib); assert_eq!(bv, bits![ 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, ]); bv.resize(14, false); assert_eq!(bv, bits![0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1]); let mut bv = bitvec![0, 0, 1, 1, 0, 0]; bv.extend_from_within(2 .. 4); assert_eq!(bv, bits![0, 0, 1, 1, 0, 0, 1, 1]); } bitvec-1.0.1/src/vec/tests/iter.rs000064400000000000000000000015521046102023000151210ustar 00000000000000use crate::prelude::*; #[test] fn extend() { let mut bv = bitvec![]; bv.extend(Some(true)); bv.extend([2usize]); let mut iter = bv.into_iter(); assert!(iter.next().unwrap()); } #[test] fn drain() { let mut bv = bitvec![0, 1, 1, 1, 0]; let mut drain = bv.drain(1 .. 4); assert!(drain.next().unwrap()); assert!(drain.next_back().unwrap()); drop(drain); assert_eq!(bv, bits![0; 2]); } #[test] fn splice() { let mut bv = bitvec![0, 1, 1, 1, 0]; let mut splice = bv.splice(1 .. 4, [false, true, true, false]); assert!(splice.next().unwrap()); assert!(splice.next_back().unwrap()); drop(splice); assert_eq!(bv, bits![0, 0, 1, 1, 0, 0]); let mut bv = bitvec![0, 1, 0, 0, 1]; drop(bv.splice(2 .., None)); assert_eq!(bv, bits![0, 1]); let mut bv = bitvec![0, 1, 0, 0, 1]; drop(bv.splice(2 .. 2, Some(true))); assert_eq!(bv, bits![0, 1, 1, 0, 0, 1]); } bitvec-1.0.1/src/vec/tests/traits.rs000064400000000000000000000037441046102023000154710ustar 00000000000000use alloc::{ borrow::{ Borrow, BorrowMut, Cow, }, vec::Vec, }; use core::{ convert::TryFrom, fmt::Debug, hash::Hash, iter::FromIterator, ops::{ Deref, DerefMut, Index, Range, }, panic::{ RefUnwindSafe, UnwindSafe, }, }; #[cfg(feature = "std")] use std::io::Write; use static_assertions::*; use crate::prelude::*; #[test] fn alloc_impl() { assert_impl_all!(BitVec: AsMut>, AsMut>, AsRef>, AsRef>, Borrow>, BorrowMut>, Clone, Debug, Default, Deref, DerefMut, Drop, Eq, Extend<&'static bool>, Extend, From<&'static BitSlice>, From<&'static mut BitSlice>, From>, From>, From>>, FromIterator, Hash, Index, Index>, IntoIterator, Ord, PartialEq<&'static BitSlice>, PartialEq>, RefUnwindSafe, Send, Sync, TryFrom>, Unpin, UnwindSafe, ); } #[test] #[cfg(feature = "std")] fn std_impl() { assert_impl_all!(BitVec: Write); } #[test] fn format() { #[cfg(not(feature = "std"))] use alloc::format; let bv = bitvec![0, 0, 1, 1, 0, 1, 0, 1]; assert_eq!(format!("{}", bv), format!("{}", bv.as_bitslice())); assert_eq!(format!("{:b}", bv), format!("{:b}", bv.as_bitslice())); assert_eq!(format!("{:o}", bv), format!("{:o}", bv.as_bitslice())); assert_eq!(format!("{:x}", bv), format!("{:x}", bv.as_bitslice())); assert_eq!(format!("{:X}", bv), format!("{:X}", bv.as_bitslice())); let text = format!("{:?}", bitvec![u8, Msb0; 0, 1, 0, 0]); assert!( text.starts_with("BitVec { addr: 0x"), "{}", text ); assert!( text.contains(", head: 000, bits: 4, capacity: "), "{}", text ); assert!(text.ends_with(" } [0, 1, 0, 0]"), "{}", text); } bitvec-1.0.1/src/vec/tests.rs000064400000000000000000000034021046102023000141520ustar 00000000000000//! Unit tests for bit-vectors. #![cfg(test)] use core::mem; use rand::random; use crate::{ mem::bits_of, prelude::*, }; mod api; mod iter; mod traits; #[test] fn make_and_resize() { let mut bv: BitVec = BitVec::new(); assert!(bv.is_empty()); assert_eq!(bv.capacity(), 0); bv.reserve(20); // Capacity always rounds up to the storage size, which is an // at-least-32-bit `usize`. assert!(bv.capacity() >= 32); bv.reserve_exact(90); assert!(bv.capacity() >= 96); bv = BitVec::with_capacity(100); assert!(bv.is_empty()); assert!(bv.capacity() >= 128); bv.extend_from_bitslice(bits![0, 1, 0, 0, 1]); assert_eq!(bv.len(), 5); let (bitptr, length, capacity) = mem::take(&mut bv).into_raw_parts(); bv = unsafe { BitVec::from_raw_parts(bitptr, length, capacity) }; assert_eq!(bv, bits![0, 1, 0, 0, 1]); let capacity = bv.capacity(); bv.shrink_to_fit(); assert!(bv.capacity() <= capacity); bv.truncate(2); assert_eq!(bv.len(), 2); assert_eq!(bv, bits![0, 1]); bv.truncate(20); assert_eq!(bv.len(), 2); let capacity = bv.capacity(); unsafe { bv.set_len(capacity); bv.set_elements((&false) as *const bool as usize); } } #[test] fn misc() { let elem = random::(); let bv: BitVec = BitVec::from_element(elem); assert_eq!(bv, elem.view_bits::()); let array: [usize; 10] = random(); let mut bv: BitVec = BitVec::from_slice(&array[..]); assert_eq!(bv, array.view_bits::()); bv.extend_from_raw_slice(&[elem]); assert_eq!(bv[10 * bits_of::() ..], elem.view_bits::()); let elem = random::(); let bits = &elem.view_bits::()[4 .. 28]; let mut bv = bits.to_bitvec(); bv.set_uninitialized(false); bv.force_align(); bv.set_uninitialized(true); bv.force_align(); assert_eq!(!bitvec![0, 1], bits![1, 0]); } bitvec-1.0.1/src/vec/traits.rs000064400000000000000000000151341046102023000143230ustar 00000000000000//! General trait implementations for bit-vectors. use alloc::{ borrow::{ Cow, ToOwned, }, vec::Vec, }; use core::{ borrow::{ Borrow, BorrowMut, }, cmp, convert::TryFrom, fmt::{ self, Debug, Display, Formatter, }, hash::{ Hash, Hasher, }, marker::Unpin, }; use super::BitVec; use crate::{ array::BitArray, boxed::BitBox, order::BitOrder, slice::BitSlice, store::BitStore, view::BitViewSized, }; #[cfg(not(tarpaulin_include))] impl Borrow> for BitVec where T: BitStore, O: BitOrder, { #[inline] fn borrow(&self) -> &BitSlice { self.as_bitslice() } } #[cfg(not(tarpaulin_include))] impl BorrowMut> for BitVec where T: BitStore, O: BitOrder, { #[inline] fn borrow_mut(&mut self) -> &mut BitSlice { self.as_mut_bitslice() } } #[cfg(not(tarpaulin_include))] impl Clone for BitVec where T: BitStore, O: BitOrder, { #[inline] fn clone(&self) -> Self { Self::from_bitslice(self.as_bitslice()) } } impl Eq for BitVec where T: BitStore, O: BitOrder, { } #[cfg(not(tarpaulin_include))] impl Ord for BitVec where T: BitStore, O: BitOrder, { #[inline] fn cmp(&self, other: &Self) -> cmp::Ordering { self.as_bitslice().cmp(other.as_bitslice()) } } #[cfg(not(tarpaulin_include))] impl PartialEq> for BitSlice where T1: BitStore, T2: BitStore, O1: BitOrder, O2: BitOrder, { #[inline] fn eq(&self, other: &BitVec) -> bool { self == other.as_bitslice() } } #[cfg(not(tarpaulin_include))] impl PartialEq> for &BitSlice where T1: BitStore, T2: BitStore, O1: BitOrder, O2: BitOrder, { #[inline] fn eq(&self, other: &BitVec) -> bool { *self == other.as_bitslice() } } #[cfg(not(tarpaulin_include))] impl PartialEq> for &mut BitSlice where T1: BitStore, T2: BitStore, O1: BitOrder, O2: BitOrder, { #[inline] fn eq(&self, other: &BitVec) -> bool { **self == other.as_bitslice() } } #[cfg(not(tarpaulin_include))] impl PartialEq for BitVec where T: BitStore, O: BitOrder, Rhs: ?Sized + PartialEq>, { #[inline] fn eq(&self, other: &Rhs) -> bool { other == self.as_bitslice() } } #[cfg(not(tarpaulin_include))] impl PartialOrd> for BitSlice where T1: BitStore, T2: BitStore, O1: BitOrder, O2: BitOrder, { #[inline] fn partial_cmp(&self, other: &BitVec) -> Option { self.partial_cmp(other.as_bitslice()) } } #[cfg(not(tarpaulin_include))] impl<'a, T1, T2, O1, O2> PartialOrd> for &'a BitSlice where T1: BitStore, T2: BitStore, O1: BitOrder, O2: BitOrder, { #[inline] fn partial_cmp(&self, other: &BitVec) -> Option { self.partial_cmp(other.as_bitslice()) } } #[cfg(not(tarpaulin_include))] impl<'a, T1, T2, O1, O2> PartialOrd> for &'a mut BitSlice where T1: BitStore, T2: BitStore, O1: BitOrder, O2: BitOrder, { #[inline] fn partial_cmp(&self, other: &BitVec) -> Option { self.partial_cmp(other.as_bitslice()) } } #[cfg(not(tarpaulin_include))] impl PartialOrd for BitVec where T: BitStore, O: BitOrder, Rhs: ?Sized + PartialOrd>, { #[inline] fn partial_cmp(&self, other: &Rhs) -> Option { other.partial_cmp(self.as_bitslice()) } } #[cfg(not(tarpaulin_include))] impl AsRef> for BitVec where T: BitStore, O: BitOrder, { #[inline] fn as_ref(&self) -> &BitSlice { self.as_bitslice() } } #[cfg(not(tarpaulin_include))] impl AsMut> for BitVec where T: BitStore, O: BitOrder, { #[inline] fn as_mut(&mut self) -> &mut BitSlice { self.as_mut_bitslice() } } #[cfg(not(tarpaulin_include))] impl AsRef> for BitVec where T: BitStore, O: BitOrder, { #[inline] fn as_ref(&self) -> &Self { self } } #[cfg(not(tarpaulin_include))] impl AsMut> for BitVec where T: BitStore, O: BitOrder, { #[inline] fn as_mut(&mut self) -> &mut Self { self } } #[cfg(not(tarpaulin_include))] impl From<&'_ BitSlice> for BitVec where T: BitStore, O: BitOrder, { #[inline] fn from(slice: &BitSlice) -> Self { Self::from_bitslice(slice) } } #[cfg(not(tarpaulin_include))] impl From<&'_ mut BitSlice> for BitVec where T: BitStore, O: BitOrder, { #[inline] fn from(slice: &mut BitSlice) -> Self { Self::from_bitslice(slice) } } #[cfg(not(tarpaulin_include))] impl From> for BitVec where O: BitOrder, A: BitViewSized, { #[inline] fn from(array: BitArray) -> Self { array.as_bitslice().to_owned() } } #[cfg(not(tarpaulin_include))] impl From> for BitVec where T: BitStore, O: BitOrder, { #[inline] fn from(boxed: BitBox) -> Self { boxed.into_bitvec() } } #[cfg(not(tarpaulin_include))] impl From> for Vec where T: BitStore, O: BitOrder, { #[inline] fn from(bv: BitVec) -> Self { bv.into_vec() } } #[cfg(not(tarpaulin_include))] impl<'a, T, O> From>> for BitVec where O: BitOrder, T: 'a + BitStore, { #[inline] fn from(cow: Cow<'a, BitSlice>) -> Self { cow.into_owned() } } #[cfg(not(tarpaulin_include))] impl TryFrom> for BitVec where T: BitStore, O: BitOrder, { type Error = Vec; #[inline] fn try_from(vec: Vec) -> Result { Self::try_from_vec(vec) } } #[cfg(not(tarpaulin_include))] impl Default for BitVec where T: BitStore, O: BitOrder, { #[inline] fn default() -> Self { Self::new() } } impl Debug for BitVec where T: BitStore, O: BitOrder, { #[inline] fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { self.as_bitspan().render(fmt, "Vec", &[( "capacity", &self.capacity() as &dyn Debug, )])?; fmt.write_str(" ")?; Display::fmt(self, fmt) } } easy_fmt! { impl Binary impl Display impl LowerHex impl Octal impl Pointer impl UpperHex for BitVec } #[cfg(not(tarpaulin_include))] impl Hash for BitVec where T: BitStore, O: BitOrder, { #[inline] fn hash(&self, state: &mut H) where H: Hasher { self.as_bitslice().hash(state) } } unsafe impl Send for BitVec where T: BitStore, O: BitOrder, { } unsafe impl Sync for BitVec where T: BitStore, O: BitOrder, { } impl Unpin for BitVec where T: BitStore, O: BitOrder, { } bitvec-1.0.1/src/vec.rs000064400000000000000000000432111046102023000130120ustar 00000000000000#![doc = include_str!("../doc/vec.md")] #![cfg(feature = "alloc")] #[cfg(not(feature = "std"))] use alloc::vec; use alloc::vec::Vec; use core::{ mem::{ self, ManuallyDrop, }, ptr, slice, }; use tap::Pipe; use wyz::comu::{ Const, Mut, }; pub use self::iter::{ Drain, Splice, }; pub use crate::boxed::IntoIter; use crate::{ boxed::BitBox, index::BitIdx, mem::bits_of, order::{ BitOrder, Lsb0, }, ptr::{ AddressExt, BitPtr, BitSpan, BitSpanError, }, slice::BitSlice, store::BitStore, view::BitView, }; mod api; mod iter; mod ops; mod tests; mod traits; #[repr(C)] #[doc = include_str!("../doc/vec/BitVec.md")] pub struct BitVec where T: BitStore, O: BitOrder, { /// Span description of the live bits in the allocation. bitspan: BitSpan, /// Allocation capacity, measured in `T` elements. capacity: usize, } /// Constructors. impl BitVec where T: BitStore, O: BitOrder, { /// An empty bit-vector with no backing allocation. pub const EMPTY: Self = Self { bitspan: BitSpan::EMPTY, capacity: 0, }; /// Creates a new bit-vector by repeating a bit for the desired length. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let zeros = BitVec::::repeat(false, 50); /// let ones = BitVec::::repeat(true, 50); /// ``` #[inline] pub fn repeat(bit: bool, len: usize) -> Self { let mut out = Self::with_capacity(len); unsafe { out.set_len(len); out.as_raw_mut_slice().fill_with(|| { BitStore::new(if bit { !::ZERO } else { ::ZERO }) }); } out } /// Copies the contents of a bit-slice into a new heap allocation. /// /// This copies the raw underlying elements into a new allocation, and sets /// the produced bit-vector to use the same memory layout as the originating /// bit-slice. This means that it may begin at any bit in the first element, /// not just the zeroth bit. If you require this property, call /// [`.force_align()`]. /// /// Dead bits in the copied memory elements are guaranteed to be zeroed. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bits = bits![0, 1, 0, 0, 1]; /// let bv = BitVec::from_bitslice(bits); /// assert_eq!(bv, bits); /// ``` /// /// [`.force_align()`]: Self::force_align #[inline] pub fn from_bitslice(slice: &BitSlice) -> Self { let bitspan = slice.as_bitspan(); let mut vec = bitspan .elements() .pipe(Vec::with_capacity) .pipe(ManuallyDrop::new); vec.extend(slice.domain()); let bitspan = unsafe { BitSpan::new_unchecked( vec.as_mut_ptr().cast::().into_address(), bitspan.head(), bitspan.len(), ) }; let capacity = vec.capacity(); Self { bitspan, capacity } } /// Constructs a new bit-vector from a single element. /// /// This copies `elem` into a new heap allocation, and sets the bit-vector /// to cover it entirely. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bv = BitVec::<_, Msb0>::from_element(1u8); /// assert!(bv[7]); /// ``` #[inline] pub fn from_element(elem: T) -> Self { Self::from_vec(vec![elem]) } /// Constructs a new bit-vector from a slice of memory elements. /// /// This copies `slice` into a new heap allocation, and sets the bit-vector /// to cover it entirely. /// /// ## Panics /// /// This panics if `slice` exceeds bit-vector capacity. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let slice = &[0u8, 1, 2, 3]; /// let bv = BitVec::<_, Lsb0>::from_slice(slice); /// assert_eq!(bv.len(), 32); /// ``` #[inline] pub fn from_slice(slice: &[T]) -> Self { Self::try_from_slice(slice).unwrap() } /// Fallibly constructs a new bit-vector from a slice of memory elements. /// /// This fails early if `slice` exceeds bit-vector capacity. If it is not, /// then `slice` is copied into a new heap allocation and fully spanned by /// the returned bit-vector. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let slice = &[0u8, 1, 2, 3]; /// let bv = BitVec::<_, Lsb0>::try_from_slice(slice).unwrap(); /// assert_eq!(bv.len(), 32); /// ``` #[inline] pub fn try_from_slice(slice: &[T]) -> Result> { BitSlice::::try_from_slice(slice).map(Self::from_bitslice) } /// Converts a regular vector in-place into a bit-vector. /// /// The produced bit-vector spans every bit in the original vector. No /// reällocation occurs; this is purely a transform of the handle. /// /// ## Panics /// /// This panics if the source vector is too long to view as a bit-slice. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let v = vec![0u8, 1, 2, 3]; /// let bv = BitVec::<_, Msb0>::from_vec(v); /// assert_eq!(bv.len(), 32); /// ``` #[inline] pub fn from_vec(vec: Vec) -> Self { Self::try_from_vec(vec) .expect("vector was too long to be converted into a `BitVec`") } /// Attempts to convert a regular vector in-place into a bit-vector. /// /// This fails if the source vector is too long to view as a bit-slice. On /// success, the produced bit-vector spans every bit in the original vector. /// No reällocation occurs; this is purely a transform of the handle. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let v = vec![0u8; 20]; /// assert_eq!(BitVec::<_, Msb0>::try_from_vec(v).unwrap().len(), 160); /// ``` /// /// It is not practical to allocate a vector that will fail this conversion. #[inline] pub fn try_from_vec(vec: Vec) -> Result> { let mut vec = ManuallyDrop::new(vec); let capacity = vec.capacity(); BitPtr::from_mut_slice(vec.as_mut_slice()) .span(vec.len() * bits_of::()) .map(|bitspan| Self { bitspan, capacity }) .map_err(|_| ManuallyDrop::into_inner(vec)) } /// Appends the contents of a bit-slice to a bit-vector. /// /// This can extend from a bit-slice of any type parameters; it is not /// restricted to using the same parameters as `self`. However, when the /// type parameters *do* match, it is possible for this to use a batch-copy /// optimization to go faster than the individual-bit crawl that is /// necessary when they differ. /// /// Until Rust provides extensive support for specialization in trait /// implementations, you should use this method whenever you are extending /// from a `BitSlice` proper, and only use the general [`.extend()`] /// implementation if you are required to use a generic `bool` source. /// /// ## Original /// /// [`Vec::extend_from_slice`](alloc::vec::Vec::extend_from_slice) /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let mut bv = bitvec![0, 1]; /// bv.extend_from_bitslice(bits![0, 1, 0, 0, 1]); /// assert_eq!(bv, bits![0, 1, 0, 1, 0, 0, 1]); /// ``` /// /// [`.extend()`]: https://docs.rs/bitvec/latest/bitvec/vec/struct.Vec.html#impl-Extend #[inline] pub fn extend_from_bitslice(&mut self, other: &BitSlice) where T2: BitStore, O2: BitOrder, { let len = self.len(); let olen = other.len(); self.resize(len + olen, false); unsafe { self.get_unchecked_mut(len ..) }.clone_from_bitslice(other); } /// Appends a slice of `T` elements to a bit-vector. /// /// The slice is viewed as a `BitSlice`, then appended directly to the /// bit-vector. /// /// ## Original /// /// [`Vec::extend_from_slice`](alloc::vec::Vec::extend_from_slice) #[inline] pub fn extend_from_raw_slice(&mut self, slice: &[T]) { self.extend_from_bitslice(slice.view_bits::()); } } /// Converters. impl BitVec where T: BitStore, O: BitOrder, { /// Explicitly views the bit-vector as a bit-slice. #[inline] pub fn as_bitslice(&self) -> &BitSlice { unsafe { self.bitspan.into_bitslice_ref() } } /// Explicitly views the bit-vector as a mutable bit-slice. #[inline] pub fn as_mut_bitslice(&mut self) -> &mut BitSlice { unsafe { self.bitspan.into_bitslice_mut() } } /// Views the bit-vector as a slice of its underlying memory elements. #[inline] pub fn as_raw_slice(&self) -> &[T] { let (data, len) = (self.bitspan.address().to_const(), self.bitspan.elements()); unsafe { slice::from_raw_parts(data, len) } } /// Views the bit-vector as a mutable slice of its underlying memory /// elements. #[inline] pub fn as_raw_mut_slice(&mut self) -> &mut [T] { let (data, len) = (self.bitspan.address().to_mut(), self.bitspan.elements()); unsafe { slice::from_raw_parts_mut(data, len) } } /// Creates an unsafe shared bit-pointer to the start of the buffer. /// /// ## Original /// /// [`Vec::as_ptr`](alloc::vec::Vec::as_ptr) /// /// ## Safety /// /// You must initialize the contents of the underlying buffer before /// accessing memory through this pointer. See the `BitPtr` documentation /// for more details. #[inline] pub fn as_bitptr(&self) -> BitPtr { self.bitspan.to_bitptr().to_const() } /// Creates an unsafe writable bit-pointer to the start of the buffer. /// /// ## Original /// /// [`Vec::as_mut_ptr`](alloc::vec::Vec::as_mut_ptr) /// /// ## Safety /// /// You must initialize the contents of the underlying buffer before /// accessing memory through this pointer. See the `BitPtr` documentation /// for more details. #[inline] pub fn as_mut_bitptr(&mut self) -> BitPtr { self.bitspan.to_bitptr() } /// Converts a bit-vector into a boxed bit-slice. /// /// This may cause a reällocation to drop any excess capacity. /// /// ## Original /// /// [`Vec::into_boxed_slice`](alloc::vec::Vec::into_boxed_slice) /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bv = bitvec![0, 1, 0, 0, 1]; /// let bb = bv.into_boxed_bitslice(); /// ``` #[inline] pub fn into_boxed_bitslice(self) -> BitBox { let mut bitspan = self.bitspan; let mut boxed = self.into_vec().into_boxed_slice().pipe(ManuallyDrop::new); unsafe { bitspan.set_address(boxed.as_mut_ptr().into_address()); BitBox::from_raw(bitspan.into_bitslice_ptr_mut()) } } /// Converts a bit-vector into a `Vec` of its underlying storage. /// /// The produced vector contains all elements that contained live bits. Dead /// bits have an unspecified value; you should call [`.set_uninitialized()`] /// before converting into a vector. /// /// This does not affect the allocated memory; it is purely a conversion of /// the handle. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let bv = bitvec![u8, Msb0; 0, 1, 0, 0, 1]; /// let v = bv.into_vec(); /// assert_eq!(v[0] & 0xF8, 0b01001_000); /// ``` /// /// [`.set_uninitialized()`]: Self::set_uninitialized #[inline] pub fn into_vec(self) -> Vec { let (bitspan, capacity) = (self.bitspan, self.capacity); mem::forget(self); unsafe { Vec::from_raw_parts( bitspan.address().to_mut(), bitspan.elements(), capacity, ) } } } /// Utilities. impl BitVec where T: BitStore, O: BitOrder, { /// Overwrites each element (visible in [`.as_raw_mut_slice()`]) with a new /// bit-pattern. /// /// This unconditionally writes `element` into each element in the backing /// slice, without altering the bit-vector’s length or capacity. /// /// This guarantees that dead bits visible in [`.as_raw_slice()`] but not /// [`.as_bitslice()`] are initialized according to the bit-pattern of /// `element.` The elements not visible in the raw slice, but present in the /// allocation, do *not* specify a value. You may not rely on them being /// zeroed *or* being set to the `element` bit-pattern. /// /// ## Parameters /// /// - `&mut self` /// - `element`: The bit-pattern with which each live element in the backing /// store is initialized. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let mut bv = bitvec![u8, Msb0; 0; 20]; /// assert_eq!(bv.as_raw_slice(), [0; 3]); /// bv.set_elements(0xA5); /// assert_eq!(bv.as_raw_slice(), [0xA5; 3]); /// ``` /// /// [`.as_bitslice()`]: Self::as_bitslice /// [`.as_raw_mut_slice()`]: Self::as_raw_mut_slice /// [`.as_raw_slice()`]: Self::as_raw_slice #[inline] pub fn set_elements(&mut self, element: T::Mem) { self.as_raw_mut_slice() .iter_mut() .for_each(|elt| elt.store_value(element)); } /// Sets the uninitialized bits of a bit-vector to a known value. /// /// This method modifies all bits that are observable in [`.as_raw_slice()`] /// but *not* observable in [`.as_bitslice()`] to a known value. /// Memory beyond the raw-slice view, but still within the allocation, is /// considered fully dead and will never be seen. /// /// This can be used to zero the unused memory so that when viewed as a raw /// slice, unused bits have a consistent and predictable value. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let mut bv = 0b1101_1100u8.view_bits::().to_bitvec(); /// assert_eq!(bv.as_raw_slice()[0], 0b1101_1100u8); /// /// bv.truncate(4); /// assert_eq!(bv.count_ones(), 2); /// assert_eq!(bv.as_raw_slice()[0], 0b1101_1100u8); /// /// bv.set_uninitialized(false); /// assert_eq!(bv.as_raw_slice()[0], 0b0000_1100u8); /// /// bv.set_uninitialized(true); /// assert_eq!(bv.as_raw_slice()[0], 0b1111_1100u8); /// ``` /// /// [`.as_bitslice()`]: Self::as_bitslice /// [`.as_raw_slice()`]: Self::as_raw_slice #[inline] pub fn set_uninitialized(&mut self, value: bool) { let head = self.bitspan.head().into_inner() as usize; let last = head + self.len(); let all = self.as_raw_mut_slice().view_bits_mut::(); unsafe { all.get_unchecked_mut(.. head).fill(value); all.get_unchecked_mut(last ..).fill(value); } } /// Ensures that the live region of the bit-vector’s contents begin at the /// front edge of the buffer. /// /// `BitVec` has performance optimizations where it moves its view of its /// buffer contents in order to avoid needless moves of its data within the /// buffer. This can lead to unexpected contents of the raw memory values, /// so this method ensures that the semantic contents of the bit-vector /// match its in-memory storage. /// /// ## Examples /// /// ```rust /// use bitvec::prelude::*; /// /// let data = 0b00_1111_00u8; /// let bits = data.view_bits::(); /// /// let mut bv = bits[2 .. 6].to_bitvec(); /// assert_eq!(bv, bits![1; 4]); /// assert_eq!(bv.as_raw_slice()[0], data); /// /// bv.force_align(); /// assert_eq!(bv, bits![1; 4]); /// // BitVec does not specify the value of dead bits in its buffer. /// assert_eq!(bv.as_raw_slice()[0] & 0xF0, 0xF0); /// ``` #[inline] pub fn force_align(&mut self) { let mut bitspan = self.bitspan; let len = bitspan.len(); let head = self.bitspan.head(); if head == BitIdx::MIN { return; } let head = head.into_inner() as usize; let last = head + len; unsafe { bitspan.set_head(BitIdx::MIN); bitspan.set_len(last); bitspan .into_bitslice_mut() .copy_within_unchecked(head .., 0); bitspan.set_len(len); } self.bitspan = bitspan; } /// Sets the starting-bit index of the span descriptor. /// /// ## Safety /// /// The new `head` value must not cause the final bits of the bit-vector to /// depart allocated memory. pub(crate) unsafe fn set_head(&mut self, new_head: BitIdx) { self.bitspan.set_head(new_head); } /// Sets a bit-vector’s length without checking that it fits in the /// allocated capacity. /// /// ## Safety /// /// `new_len` must not exceed `self.capacity()`. pub(crate) unsafe fn set_len_unchecked(&mut self, new_len: usize) { self.bitspan.set_len(new_len); } /// Asserts that a length can be encoded into the bit-vector handle. /// /// ## Panics /// /// This panics if `len` is too large to encode into a `BitSpan`. #[inline] fn assert_len_encodable(len: usize) { assert!( BitSpan::::len_encodable(len), "bit-vector capacity exceeded: {} > {}", len, BitSlice::::MAX_BITS, ); } /// Reserves some memory through the underlying vector. /// /// ## Parameters /// /// - `&mut self` /// - `additional`: The amount of additional space required after /// `self.len()` in the allocation. /// - `func`: A function that manipulates the memory reservation of the /// underlying vector. /// /// ## Behavior /// /// `func` should perform the appropriate action to allocate space for at /// least `additional` more bits. After it returns, the underlying vector is /// extended with zero-initialized elements until `self.len() + additional` /// bits have been given initialized memory. #[inline] fn do_reservation( &mut self, additional: usize, func: impl FnOnce(&mut Vec, usize), ) { let len = self.len(); let new_len = len.saturating_add(additional); Self::assert_len_encodable(new_len); let (head, elts) = (self.bitspan.head(), self.bitspan.elements()); let new_elts = crate::mem::elts::(head.into_inner() as usize + new_len); let extra_elts = new_elts - elts; self.with_vec(|vec| { func(&mut **vec, extra_elts); // Ensure that any new elements are initialized. vec.resize_with(new_elts, || ::ZERO); }); } /// Briefly constructs an ordinary `Vec` controlling the buffer, allowing /// operations to be applied to the memory allocation. /// /// ## Parameters /// /// - `&mut self` /// - `func`: A function which may interact with the memory allocation. /// /// After `func` runs, `self` is updated with the temporary `Vec`’s address /// and capacity. #[inline] fn with_vec(&mut self, func: F) -> R where F: FnOnce(&mut ManuallyDrop>) -> R { let mut vec = unsafe { ptr::read(self) } .into_vec() .pipe(ManuallyDrop::new); let out = func(&mut vec); unsafe { self.bitspan.set_address(vec.as_mut_ptr().into_address()); } self.capacity = vec.capacity(); out } } bitvec-1.0.1/src/view.rs000064400000000000000000000154241046102023000132140ustar 00000000000000#![doc = include_str!("../doc/view.md")] use core::slice; use crate::{ array::BitArray, order::BitOrder, ptr::BitSpanError, slice::BitSlice, store::BitStore, }; #[doc = include_str!("../doc/view/BitView.md")] pub trait BitView { /// The underlying element type. type Store: BitStore; /// Views a memory region as an immutable bit-slice. fn view_bits(&self) -> &BitSlice where O: BitOrder; /// Attempts to view a memory region as an immutable bit-slice. /// /// This may return an error if `self` is too long to view as a bit-slice. fn try_view_bits( &self, ) -> Result<&BitSlice, BitSpanError> where O: BitOrder; /// Views a memory region as a mutable bit-slice. fn view_bits_mut(&mut self) -> &mut BitSlice where O: BitOrder; /// Attempts to view a memory region as a mutable bit-slice. /// /// This may return an error if `self` is too long to view as a bit-slice. fn try_view_bits_mut( &mut self, ) -> Result<&mut BitSlice, BitSpanError> where O: BitOrder; } #[cfg(not(tarpaulin_include))] impl BitView for T where T: BitStore { type Store = Self; fn view_bits(&self) -> &BitSlice where O: BitOrder { BitSlice::from_element(self) } fn try_view_bits(&self) -> Result<&BitSlice, BitSpanError> where O: BitOrder { Ok(BitSlice::from_element(self)) } fn view_bits_mut(&mut self) -> &mut BitSlice where O: BitOrder { BitSlice::from_element_mut(self) } fn try_view_bits_mut( &mut self, ) -> Result<&mut BitSlice, BitSpanError> where O: BitOrder { Ok(BitSlice::from_element_mut(self)) } } /// Note that overly-large slices may cause the conversions to fail. #[cfg(not(tarpaulin_include))] impl BitView for [T] where T: BitStore { type Store = T; #[inline] fn view_bits(&self) -> &BitSlice where O: BitOrder { BitSlice::from_slice(self) } #[inline] fn try_view_bits(&self) -> Result<&BitSlice, BitSpanError> where O: BitOrder { BitSlice::try_from_slice(self) } #[inline] fn view_bits_mut(&mut self) -> &mut BitSlice where O: BitOrder { BitSlice::from_slice_mut(self) } #[inline] fn try_view_bits_mut( &mut self, ) -> Result<&mut BitSlice, BitSpanError> where O: BitOrder { BitSlice::try_from_slice_mut(self) } } /// Note that overly-large arrays may cause the conversions to fail. #[cfg(not(tarpaulin_include))] impl BitView for [T; N] where T: BitStore { type Store = T; #[inline] fn view_bits(&self) -> &BitSlice where O: BitOrder { BitSlice::from_slice(self) } #[inline] fn try_view_bits(&self) -> Result<&BitSlice, BitSpanError> where O: BitOrder { BitSlice::try_from_slice(self) } #[inline] fn view_bits_mut(&mut self) -> &mut BitSlice where O: BitOrder { BitSlice::from_slice_mut(self) } #[inline] fn try_view_bits_mut( &mut self, ) -> Result<&mut BitSlice, BitSpanError> where O: BitOrder { BitSlice::try_from_slice_mut(self) } } /// Helper trait for scalars and arrays, but not slices. pub trait BitViewSized: BitView + Sized { /// The zero constant. const ZERO: Self; /// Wraps `self` in a `BitArray`. #[inline] fn into_bitarray(self) -> BitArray where O: BitOrder { BitArray::new(self) } /// Views the type as a slice of its elements. fn as_raw_slice(&self) -> &[Self::Store]; /// Views the type as a mutable slice of its elements. fn as_raw_mut_slice(&mut self) -> &mut [Self::Store]; } impl BitViewSized for T where T: BitStore { const ZERO: Self = ::ZERO; #[inline] fn as_raw_slice(&self) -> &[Self::Store] { slice::from_ref(self) } #[inline] fn as_raw_mut_slice(&mut self) -> &mut [Self::Store] { slice::from_mut(self) } } impl BitViewSized for [T; N] where T: BitStore { const ZERO: Self = [T::ZERO; N]; #[inline] fn as_raw_slice(&self) -> &[Self::Store] { &self[..] } #[inline] fn as_raw_mut_slice(&mut self) -> &mut [Self::Store] { &mut self[..] } } #[doc = include_str!("../doc/view/AsBits.md")] pub trait AsBits where T: BitStore { /// Views `self` as an immutable bit-slice region with the `O` ordering. fn as_bits(&self) -> &BitSlice where O: BitOrder; /// Attempts to view `self` as an immutable bit-slice region with the `O` /// ordering. /// /// This may return an error if `self` is too long to view as a bit-slice. fn try_as_bits(&self) -> Result<&BitSlice, BitSpanError> where O: BitOrder; } #[doc = include_str!("../doc/view/AsMutBits.md")] pub trait AsMutBits where T: BitStore { /// Views `self` as a mutable bit-slice region with the `O` ordering. fn as_mut_bits(&mut self) -> &mut BitSlice where O: BitOrder; /// Attempts to view `self` as a mutable bit-slice region with the `O` /// ordering. /// /// This may return an error if `self` is too long to view as a bit-slice. fn try_as_mut_bits( &mut self, ) -> Result<&mut BitSlice, BitSpanError> where O: BitOrder; } #[cfg(not(tarpaulin_include))] impl AsBits for A where A: AsRef<[T]>, T: BitStore, { #[inline] fn as_bits(&self) -> &BitSlice where O: BitOrder { self.as_ref().view_bits::() } #[inline] fn try_as_bits(&self) -> Result<&BitSlice, BitSpanError> where O: BitOrder { self.as_ref().try_view_bits::() } } #[cfg(not(tarpaulin_include))] impl AsMutBits for A where A: AsMut<[T]>, T: BitStore, { #[inline] fn as_mut_bits(&mut self) -> &mut BitSlice where O: BitOrder { self.as_mut().view_bits_mut::() } #[inline] fn try_as_mut_bits( &mut self, ) -> Result<&mut BitSlice, BitSpanError> where O: BitOrder { self.as_mut().try_view_bits_mut::() } } #[cfg(test)] mod tests { use static_assertions::*; use super::*; use crate::prelude::*; #[test] fn implementations() { let mut byte = 0u8; let mut bytes = [0u8; 2]; assert!(byte.view_bits::().not_any()); assert!(byte.view_bits_mut::().not_any()); assert!(bytes.view_bits::().not_any()); assert!(bytes.view_bits_mut::().not_any()); assert!(bytes[..].view_bits::().not_any()); assert!(bytes[..].view_bits_mut::().not_any()); let mut blank: [u8; 0] = []; assert!(blank.view_bits::().is_empty()); assert!(blank.view_bits_mut::().is_empty()); assert_eq!([0u32; 2].as_bits::().len(), 64); assert_eq!([0u32; 2].as_mut_bits::().len(), 64); assert_eq!(0usize.as_raw_slice().len(), 1); assert_eq!(0usize.as_raw_mut_slice().len(), 1); assert_eq!(0u32.into_bitarray::().len(), 32); assert_impl_all!( [usize; 10]: AsBits, AsMutBits, BitViewSized, BitView, ); } }