heapless-0.8.0/.cargo_vcs_info.json0000644000000001360000000000100126450ustar { "git": { "sha1": "9feb6647225316e9022f3ebdd63ab0c2380edeb8" }, "path_in_vcs": "" }heapless-0.8.0/.github/bors.toml000064400000000000000000000001131046102023000146320ustar 00000000000000block_labels = ["S-blocked"] delete_merged_branches = true status = ["ci"] heapless-0.8.0/.github/workflows/build.yml000064400000000000000000000224161046102023000166610ustar 00000000000000name: Build on: merge_group: pull_request: branches: [main] push: branches: [staging, trying] workflow_dispatch: env: CARGO_TERM_COLOR: always jobs: # Run MIRI tests on nightly # NOTE first because it takes the longest to complete testmiri: name: testmiri runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@v4 - name: Cache cargo dependencies uses: actions/cache@v3 with: path: | - ~/.cargo/bin/ - ~/.cargo/registry/index/ - ~/.cargo/registry/cache/ - ~/.cargo/git/db/ key: ${{ runner.OS }}-cargo-${{ hashFiles('**/Cargo.lock') }} restore-keys: | ${{ runner.OS }}-cargo- - name: Cache build output dependencies uses: actions/cache@v3 with: path: target key: ${{ runner.OS }}-build-${{ hashFiles('**/Cargo.lock') }} restore-keys: | ${{ runner.OS }}-build- - name: Install Rust uses: dtolnay/rust-toolchain@master with: toolchain: nightly components: miri - name: Run miri run: MIRIFLAGS=-Zmiri-ignore-leaks cargo miri test # Run cargo test test: name: test runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@v4 - name: Cache cargo dependencies uses: actions/cache@v3 with: path: | - ~/.cargo/bin/ - ~/.cargo/registry/index/ - ~/.cargo/registry/cache/ - ~/.cargo/git/db/ key: ${{ runner.OS }}-cargo-${{ hashFiles('**/Cargo.lock') }} restore-keys: | ${{ runner.OS }}-cargo- - name: Cache build output dependencies uses: actions/cache@v3 with: path: target key: ${{ runner.OS }}-build-${{ hashFiles('**/Cargo.lock') }} restore-keys: | ${{ runner.OS }}-build- - name: Install Rust uses: dtolnay/rust-toolchain@master with: toolchain: stable - name: Run cargo test run: cargo test # Run cargo fmt --check style: name: style runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@v4 - name: Install Rust uses: dtolnay/rust-toolchain@stable with: components: rustfmt - name: cargo fmt --check run: cargo fmt --all -- --check # Compilation check check: name: check runs-on: ubuntu-latest strategy: matrix: target: - x86_64-unknown-linux-gnu - i686-unknown-linux-musl - riscv32imc-unknown-none-elf - armv7r-none-eabi - thumbv6m-none-eabi - thumbv7m-none-eabi - thumbv8m.base-none-eabi - thumbv8m.main-none-eabi steps: - name: Checkout uses: actions/checkout@v4 - name: Cache cargo dependencies uses: actions/cache@v3 with: path: | - ~/.cargo/bin/ - ~/.cargo/registry/index/ - ~/.cargo/registry/cache/ - ~/.cargo/git/db/ key: ${{ runner.OS }}-cargo-${{ hashFiles('**/Cargo.lock') }} restore-keys: | ${{ runner.OS }}-cargo-${{ hashFiles('**/Cargo.lock') }} ${{ runner.OS }}-cargo- - name: Cache build output dependencies uses: actions/cache@v3 with: path: target key: ${{ runner.OS }}-build-${{ hashFiles('**/Cargo.lock') }} restore-keys: | ${{ runner.OS }}-build-${{ hashFiles('**/Cargo.lock') }} ${{ runner.OS }}-build- - name: Install Rust with target (${{ matrix.target }}) uses: dtolnay/rust-toolchain@master with: toolchain: stable targets: ${{ matrix.target }} - name: cargo check run: | cargo check --target=${{ matrix.target }} cargo check --target=${{ matrix.target }} --features="portable-atomic-critical-section" cargo check --target=${{ matrix.target }} --features="ufmt serde defmt-03 mpmc_large" doc: name: doc runs-on: ubuntu-latest strategy: matrix: target: - x86_64-unknown-linux-gnu - thumbv7m-none-eabi steps: - name: Checkout uses: actions/checkout@v4 - name: Cache cargo dependencies uses: actions/cache@v3 with: path: | - ~/.cargo/bin/ - ~/.cargo/registry/index/ - ~/.cargo/registry/cache/ - ~/.cargo/git/db/ key: ${{ runner.OS }}-cargo-${{ hashFiles('**/Cargo.lock') }} restore-keys: | ${{ runner.OS }}-cargo-${{ hashFiles('**/Cargo.lock') }} ${{ runner.OS }}-cargo- - name: Cache build output dependencies uses: actions/cache@v3 with: path: target key: ${{ runner.OS }}-build-${{ hashFiles('**/Cargo.lock') }} restore-keys: | ${{ runner.OS }}-build-${{ hashFiles('**/Cargo.lock') }} ${{ runner.OS }}-build- - name: Install nightly Rust with target (${{ matrix.target }}) uses: dtolnay/rust-toolchain@nightly with: targets: ${{ matrix.target }} - name: cargo rustdoc env: {"RUSTDOCFLAGS": "-D warnings --cfg docsrs"} run: cargo rustdoc --target=${{ matrix.target }} --features="ufmt serde defmt-03 mpmc_large portable-atomic-critical-section" # Run cpass tests testcpass: name: testcpass runs-on: ubuntu-latest strategy: matrix: target: - x86_64-unknown-linux-gnu - i686-unknown-linux-musl buildtype: - "" - "--release" steps: - name: Checkout uses: actions/checkout@v4 - name: Cache cargo dependencies uses: actions/cache@v3 with: path: | - ~/.cargo/bin/ - ~/.cargo/registry/index/ - ~/.cargo/registry/cache/ - ~/.cargo/git/db/ key: ${{ runner.OS }}-cargo-${{ hashFiles('**/Cargo.lock') }} restore-keys: | ${{ runner.OS }}-cargo-${{ hashFiles('**/Cargo.lock') }} ${{ runner.OS }}-cargo- - name: Cache build output dependencies uses: actions/cache@v3 with: path: target key: ${{ runner.OS }}-build-${{ hashFiles('**/Cargo.lock') }} restore-keys: | ${{ runner.OS }}-build-${{ hashFiles('**/Cargo.lock') }} ${{ runner.OS }}-build- - name: Install Rust with target (${{ matrix.target }}) uses: dtolnay/rust-toolchain@master with: toolchain: stable targets: ${{ matrix.target }} - name: cargo test run: cargo test --test cpass --target=${{ matrix.target }} --features=serde ${{ matrix.buildtype }} # Run test suite for UI testtsan: name: testtsan runs-on: ubuntu-latest strategy: matrix: target: - x86_64-unknown-linux-gnu buildtype: - "" - "--release" steps: - name: Checkout uses: actions/checkout@v4 - name: Cache cargo dependencies uses: actions/cache@v3 with: path: | - ~/.cargo/bin/ - ~/.cargo/registry/index/ - ~/.cargo/registry/cache/ - ~/.cargo/git/db/ key: ${{ runner.OS }}-cargo-${{ hashFiles('**/Cargo.lock') }} restore-keys: | ${{ runner.OS }}-cargo- - name: Cache build output dependencies uses: actions/cache@v3 with: path: target key: ${{ runner.OS }}-build-${{ hashFiles('**/Cargo.lock') }} restore-keys: | ${{ runner.OS }}-build- - name: Install Rust nightly with target (${{ matrix.target }}) uses: dtolnay/rust-toolchain@master with: toolchain: nightly target: ${{ matrix.target }} components: rust-src - name: Export variables run: | echo RUSTFLAGS="-Z sanitizer=thread" >> $GITHUB_ENV echo TSAN_OPTIONS="suppressions=$(pwd)/suppressions.txt" >> $GITHUB_ENV echo $GITHUB_ENV - name: cargo test run: cargo test -Zbuild-std --test tsan --target=${{ matrix.target }} --features=${{ matrix.features }} ${{ matrix.buildtype }} -- --test-threads=1 # Run cfail tests on MSRV testcfail: name: testcfail runs-on: ubuntu-latest defaults: run: working-directory: cfail steps: - name: Checkout uses: actions/checkout@v4 - name: Cache cargo dependencies uses: actions/cache@v3 with: path: | - ~/.cargo/bin/ - ~/.cargo/registry/index/ - ~/.cargo/registry/cache/ - ~/.cargo/git/db/ key: ${{ runner.OS }}-cargo-${{ hashFiles('**/Cargo.lock') }} restore-keys: | ${{ runner.OS }}-cargo- - name: Cache build output dependencies uses: actions/cache@v3 with: path: target key: ${{ runner.OS }}-build-${{ hashFiles('**/Cargo.lock') }} restore-keys: | ${{ runner.OS }}-build- - name: Install Rust uses: dtolnay/rust-toolchain@stable - name: Run cargo run: cargo run heapless-0.8.0/.github/workflows/changelog.yml000064400000000000000000000015201046102023000175020ustar 00000000000000# Check that the changelog is updated for all changes. # # This is only run for PRs. on: pull_request: # opened, reopened, synchronize are the default types for pull_request. # labeled, unlabeled ensure this check is also run if a label is added or removed. types: [opened, reopened, labeled, unlabeled, synchronize] name: Changelog jobs: changelog: name: Changelog runs-on: ubuntu-latest steps: - name: Checkout sources uses: actions/checkout@v4 - name: Check that changelog updated uses: dangoslen/changelog-enforcer@v3 with: changeLogPath: CHANGELOG.md skipLabels: 'needs-changelog, skip-changelog' missingUpdateErrorMessage: 'Please add a changelog entry in the CHANGELOG.md file.' env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}heapless-0.8.0/.github/workflows/properties/build.properties.json000064400000000000000000000001661046102023000234160ustar 00000000000000{ "name": "Build", "description": "Heapless Test Suite", "iconName": "rust", "categories": ["Rust"] } heapless-0.8.0/.gitignore000064400000000000000000000000421046102023000134210ustar 00000000000000**/*.rs.bk .#* Cargo.lock target/ heapless-0.8.0/CHANGELOG.md000064400000000000000000000442161046102023000132550ustar 00000000000000# Change Log All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](http://keepachangelog.com/) and this project adheres to [Semantic Versioning](http://semver.org/). ## [Unreleased] ## [v0.8.0] - 2023-11-07 ### Added - Add `Clone` and `PartialEq` implementations to `HistoryBuffer`. - Added an object pool API. see the `pool::object` module level doc for details - Add `HistoryBuffer::as_slices()` - Implemented `retain` for `IndexMap` and `IndexSet`. - Recover `StableDeref` trait for `pool::object::Object` and `pool::boxed::Box`. - Add polyfills for ESP32S2 - Added `String::from_utf8` and `String::from_utf8_unchecked`. ### Changed - updated from edition 2018 to edition 2021 - [breaking-change] `IndexMap` and `IndexSet` now require that keys implement the `core::hash::Hash` trait instead of the `hash32::Hash` (v0.2.0) trait - move `pool::singleton::Box` to the `pool::box` module - renamed `pool::singleton::Pool` to `BoxPool` and moved it into the `pool::box` module - move `pool::singleton::arc::Arc` to the `pool::arc` module - renamed `pool::singleton::arc::Pool` to `ArcPool` and moved it into the `pool::arc` module - [breaking-change] changed the target support of memory pool API to only support 32-bit x86 and a subset of ARM targets. See the module level documentation of the `pool` module for details - relax trait requirements on `IndexMap` and `IndexSet`. - export `IndexSet` and `IndexMap` iterator types. - [breaking-change] export `IndexMapKeys`, `IndexMapValues` and `IndexMapValuesMut` iterator types. - [breaking-change] this crate now uses `portable-atomic` v1.0 instead of `atomic-polyfill` for emulating CAS instructions on targets where they're not natively available. - [breaking-change] `From<&str>` for `String` was replaced with `TryFrom<&str>` because the `From` trait must not fail. - [breaking-change] Renamed Cargo features - `defmt-impl` is now `defmt-03` - `ufmt-impl` is now `ufmt` - `cas` is removed, atomic polyfilling is now opt-in via the `portable-atomic` feature. - `Vec::as_mut_slice` is now a public method. ### Fixed - Fixed a `dropping_references` warning in `LinearMap`. - Fixed IndexMap entry API returning wrong slot after an insert on vacant entry. (#360) ### Removed - [breaking-change] this crate no longer has a Minimum Supported Rust Version (MSRV) guarantee and should be used with the latest stable version of the Rust toolchain. - [breaking-change] removed the `Init` and `Uninint` type states from `pool::singleton::Box` - [breaking-change] removed the following `pool::singleton::Box` methods: `freeze`, `forget` and `init` - [breaking-change] removed the `pool::singleton::arc::ArcInner` type - [breaking-change] removed support for attributes from `pool!` and `arc_pool!` ## [v0.7.16] - 2022-08-09 ### Added - add more `PartialEq` implementations to `Vec` where `Vec` is the RHS ### Changed ### Fixed - clarify in the docs that the capacity `heapless::String` is in bytes, not characters - Fixed some broken links in the documentation. ## [v0.7.15] - 2022-07-05 ### Added - Added `Vec::insert(index, element)` - Added `Vec::remove(index)` - Added `Vec::retain(f)` - Added `Vec::retain_mut(f)` ## [v0.7.14] - 2022-06-15 ### Added - Added support for AVR architecture. ### Fixed - `IndexSet` and `IndexMap`'s `default` method now compile time checks that their capacity is a power of two. ## [v0.7.13] - 2022-05-16 ### Added - Added `into_vec` to `BinaryHeap` ## [v0.7.12] - 2022-05-12 ### Added - Added support for AVR architecture. - Add `entry` API to `IndexMap` - Implement `IntoIterator` trait for `Indexmap` - Implement `FromIterator` for `String` - Add `first` and `last` methods to `IndexMap` and `IndexSet` - Add `pop_{front_back}_unchecked` methods to `Deque` ### Changed - Optimize the codegen of `Vec::clone` - `riscv32i` and `riscv32imc` targets unconditionally (e.g. `build --no-default-features`) depends on `atomic-polyfill` ### Fixed - Inserting an item that replaces an already present item will no longer fail with an error ## [v0.7.11] - 2022-05-09 ### Fixed - Fixed `pool` example in docstring. - Fixed undefined behavior in `Vec::truncate()`, `Vec::swap_remove_unchecked()`, and `Hole::move_to()` (internal to the binary heap implementation). - Fixed `BinaryHeap` elements are being dropped twice ## [v0.7.10] - 2022-01-21 ### Fixed - `cargo test` can now run on non-`x86` hosts ### Added - Added `OldestOrdered` iterator for `HistoryBuffer` ### Changed - `atomic-polyfill` is now enabled and used for `cas` atomic emulation on `riscv` targets ## [v0.7.9] - 2021-12-16 ### Fixed - Fix `IndexMap` and `IndexSet` bounds - Make `IndexSet::new()` a `const fn` ## [v0.7.8] - 2021-11-11 ### Added - A span of `defmt` versions is now supported (`0.2` and `0.3`) ## [v0.7.7] - 2021-09-22 ### Fixed - Fixed so `Pool` is `Sync` on ARMv6 ## [v0.7.6] - 2021-09-21 ### Added - Added `ArcPool` - Added `Debug` impl for `Deque` ### Fixed - ZSTs in `Pool` now works correctly - Some MIRI errors were resolved - Allow `pool!` on thumbv6 - Fixed possible UB in `Pool` on x86 ## [v0.7.5] - 2021-08-16 ### Added - Added `SortedLinkedList` - Added `Vec::is_empty`, one does not need to go through a slice anymore ### Changed - `Vec::pop_unchecked` is now public ## [v0.7.4] - 2021-08-06 ### Added - Implement `Default` for `MpMcQueue`, `Queue` and `HistoryBuffer` - Implement `PartialOrd` and `Ord` for `Vec` and `String` ### Fixed - Fixed comments in SPSC ## [v0.7.3] - 2021-07-01 ### Added - Added `Deque` ### Changed - `Box::freeze` is deprecated due to possibility of undefined behavior. ## [v0.7.2] - 2021-06-30 ### Added - Added new `Vec::into_array` method - Added const-asserts to all data structures ## [v0.7.1] - 2021-05-23 ### Changed - MPMC is now more generic ### Added - `defmt` for `Vec` and `String` ## [v0.7.0] - 2021-04-23 ### Changed - [breaking-change] Converted all data structures to use the `const generics` MVP - [breaking-change] `HistoryBuffer` is now working with const constructors and non-`Copy` data - [breaking-change] `HistoryBuffer::as_slice` and others now only return initialized values - Added missing `Deref`, `AsRef` and `Debug` for `HistoryBuffer` - [breaking-change] `MultiCore`/`SingleCore` and `Uxx` is now removed from `spsc::Queue` - [breaking-change] `spsc::Queue` is now `usize` only - [breaking-change] `spsc::Queue` now sacrifices one element for correctness (see issue #207), i.e. it creates an `N - 1` sized queue instead of the old that generated an size `N` queue - [breaking-change] `String` has had `utf8` related methods removed as this can be done via `str` - [breaking-change] No data structures implement `AsSlice` traits any more, now using `AsRef` and `AsMut` as they work with any size of array now ### Fixed - `Pool` and `MPMC` now works on `thumbv6m` - `IndexMap::new()` is now a `const-fn` ## [v0.6.1] - 2021-03-02 ### Fixed - Security issue. ## [v0.6.0] - 2021-02-02 ### Changed - [breaking-change] The version of the `generic-array` dependency has been bumped to v0.14.2. ## [v0.5.6] - 2020-09-18 ### Added - Added `as_mut_vec` for `String` - Added `set_len` for `Vec` - Performance improvements in `histbuf` ### Fixed - `Producer` was made `Send` for single core applications ## [v0.5.5] - 2020-05-04 ### Added - Added `HistoryBuffer` - Added extra methods to `Vec`: `from_slice`, `starts_with`, `ends_with` - Optional `ufmt` support for `String` and `Vec` - Added `pool` support for bare-metal `armebv7r-` targets - Added Sync to `pool` for `x86` ## [v0.5.4] - 2020-04-06 ### Added - Added `StableDeref` implementation for `pool::Box` and `pool::singleton::Box`. ## [v0.5.3] - 2020-01-27 ### Added - Extend the ARMv7-A `Pool` support to the bare-metal `armv7a-` targets. ## [v0.5.2] - 2020-01-15 ### Fixed - Fixed incorrect overflow behavior in computation of capacities - Fixed edge case in `mpmc::Queue::dqueue` that led to an infinite loop - IndexMap and LinerMap are now deserialized as maps, rather than as sequences - Fixed compilation of this crates on built-in targets that don't have CAS instructions ### Changed - `spsc::Queue` iterators now implement the double ended iterator trait ### Added - opt-out `cas` feature to disable parts of the API that use CAS instructions. Useful if using a custom (i.e. not built-in) rustc target that does not have CAS instructions. - singleton `Pool` support on ARMv7-A devices ## [v0.5.1] - 2019-08-29 ### Added - Added armv8 support - Added `Queue::peek` - Added `BinaryHeap::peek_mut` ## [v0.5.0] - 2019-07-12 ### Added - `Pool` now implements the `Sync` trait when targeting ARMv7-R. - Most data structures can now be constructed in "const context" (e.g. `static [mut]` variables) using a newtype in `heapless::i`. - `Pool` has gained a `grow_exact` method to more efficiently use statically allocated memory. - The `pool!` macro now accepts attributes. - `mpmc::Q*` a family of fixed capacity multiple-producer multiple-consumer lock-free queues. ### Changed - [breaking-change] `binary_heap::Kind` is now a sealed trait. ### Removed - [breaking-change] The "smaller-atomics" feature has been removed. It is now always enabled. - [breaking-change] The "min-const-fn" feature has been removed. It is now always enabled. - [breaking-change] The MSRV has been bumped to Rust 1.36.0. - [breaking-change] The version of the `generic-array` dependency has been bumped to v0.13.0. ## [v0.4.4] - 2019-05-02 ### Added - Implemented `PartialEq`, `PartialOrd`, `Eq`, `Ord` and `Hash` for `pool::Box` and `pool::singleton::Box`. ### Fixed - Fixed UB in our internal, stable re-implementation of `core::mem::MaybeUninit` that occurred when using some of our data structures with types that implement `Drop`. ## [v0.4.3] - 2019-04-22 ### Added - Added a memory pool that's lock-free and interrupt-safe on the ARMv7-M architecture. - `IndexMap` have gained `Eq` and `PartialEq` implementations. ## [v0.4.2] - 2019-02-12 ### Added - All containers now implement `Clone` - `spsc::Queue` now implements `Debug`, `Hash`, `PartialEq` and `Eq` - `LinearMap` now implements `Debug`, `FromIterator`, `IntoIter`, `PartialEq`, `Eq` and `Default` - `BinaryHeap` now implements `Debug` and `Default` - `String` now implements `FromStr`, `Hash`, `From` and `Default` - `Vec` now implements `Hash` and `Default` - A "serde" Cargo feature that when enabled adds a `serde::Serialize` and `serde::Deserialize` implementations to each collection. ## [v0.4.1] - 2018-12-16 ### Changed - Add a new type parameter to `spsc::Queue` that indicates whether the queue is only single-core safe, or multi-core safe. By default the queue is multi-core safe; this preserves the current semantics. New `unsafe` constructors have been added to create the single-core variant. ## [v0.4.0] - 2018-10-19 ### Changed - [breaking-change] All Cargo features are disabled by default. This crate now compiles on stable by default. - [breaking-change] RingBuffer has been renamed to spsc::Queue. The ring_buffer module has been renamed to spsc. - [breaking-change] The bounds on spsc::Queue have changed. ### Removed - [breaking-change] The sealed `Uxx` trait has been removed from the public API. ## [v0.3.7] - 2018-08-19 ### Added - Implemented `IntoIterator` and `FromIterator` for `Vec` - `ready` methods to `ring_buffer::{Consumer,Producer}` - An opt-out "const-fn" Cargo feature that turns `const` functions into normal functions when disabled. - An opt-out "smaller-atomics" Cargo feature that removes the ability to shrink the size of `RingBuffer` when disabled. ### Changed - This crate now compiles on stable when both the "const-fn" and "smaller-atomics" features are disabled. ### Fixed - The `RingBuffer.len` function - Compilation on recent nightlies ## [v0.3.6] - 2018-05-04 ### Fixed - The capacity of `RingBuffer`. It should be the requested capacity plus not twice that plus one. ## [v0.3.5] - 2018-05-03 ### Added - `RingBuffer.enqueue_unchecked` an unchecked version of `RingBuffer.enqueue` ## [v0.3.4] - 2018-04-28 ### Added - `BinaryHeap.pop_unchecked` an unchecked version of `BinaryHeap.pop` ## [v0.3.3] - 2018-04-28 ### Added - `BinaryHeap.push_unchecked` an unchecked version of `BinaryHeap.push` ## [v0.3.2] - 2018-04-27 ### Added - A re-export of `generic_array::ArrayLength`, for convenience. ## [v0.3.1] - 2018-04-23 ### Added - Fixed capacity implementations of `IndexMap` and `IndexSet`. - A `Extend` implementation to `Vec` - More `PartialEq` implementations to `Vec` ## [v0.3.0] - 2018-04-22 ### Changed - [breaking-change] The capacity of all data structures must now be specified using type level integers (cf. `typenum`). See documentation for details. - [breaking-change] `BufferFullError` has been removed in favor of (a) returning ownership of the item that couldn't be added to the collection (cf. `Vec.push`), or (b) returning the unit type when the argument was not consumed (cf. `Vec.extend_from_slice`). ## [v0.2.7] - 2018-04-20 ### Added - Unchecked methods to dequeue and enqueue items into a `RingBuffer` via the `Consumer` and `Producer` end points. ### Changed - `RingBuffer` now has a generic index type, which default to `usize` for backward compatibility. Changing the index type to `u8` or `u16` reduces the footprint of the `RingBuffer` but limits its maximum capacity (254 and 65534, respectively). ## [v0.2.6] - 2018-04-18 ### Added - A `BinaryHeap` implementation. `BinaryHeap` is a priority queue implemented with a binary heap. ## [v0.2.5] - 2018-04-13 ### Fixed - Dereferencing `heapless::Vec` no longer incurs in a bounds check. ## [v0.2.4] - 2018-03-12 ### Fixed - `LinerMap::new` is now a const fn ## [v0.2.3] - 2018-03-11 ### Added - A `swap_remove` method to `Vec` - A `LinearMap` implementation. `LinearMap` is a map / dict backed by an array and that performs lookups via linear search. ## [v0.2.2] - 2018-03-01 ### Added - Fixed size version of `std::String` ## [v0.2.1] - 2017-12-21 ### Added - `Vec` now implements both `fmt::Debug`, `PartialEq` and `Eq`. - `resize` and `resize_default` methods to `Vec`. ## [v0.2.0] - 2017-11-22 ### Added - A single producer single consumer mode to `RingBuffer`. - A `truncate` method to `Vec`. ### Changed - [breaking-change] Both `Vec::new` and `RingBuffer::new` no longer require an initial value. The signature of `new` is now `const fn() -> Self`. - [breaking-change] The error type of all operations that may fail has changed from `()` to `BufferFullError`. - Both `RingBuffer` and `Vec` now support arrays of _any_ size for their backup storage. ## [v0.1.0] - 2017-04-27 - Initial release [Unreleased]: https://github.com/rust-embedded/heapless/compare/v0.8.0...HEAD [v0.8.0]: https://github.com/rust-embedded/heapless/compare/v0.7.16...v0.8.0 [v0.7.16]: https://github.com/rust-embedded/heapless/compare/v0.7.15...v0.7.16 [v0.7.15]: https://github.com/rust-embedded/heapless/compare/v0.7.14...v0.7.15 [v0.7.14]: https://github.com/rust-embedded/heapless/compare/v0.7.13...v0.7.14 [v0.7.13]: https://github.com/rust-embedded/heapless/compare/v0.7.12...v0.7.13 [v0.7.12]: https://github.com/rust-embedded/heapless/compare/v0.7.11...v0.7.12 [v0.7.11]: https://github.com/rust-embedded/heapless/compare/v0.7.10...v0.7.11 [v0.7.10]: https://github.com/rust-embedded/heapless/compare/v0.7.9...v0.7.10 [v0.7.9]: https://github.com/rust-embedded/heapless/compare/v0.7.8...v0.7.9 [v0.7.8]: https://github.com/rust-embedded/heapless/compare/v0.7.7...v0.7.8 [v0.7.7]: https://github.com/rust-embedded/heapless/compare/v0.7.6...v0.7.7 [v0.7.6]: https://github.com/rust-embedded/heapless/compare/v0.7.5...v0.7.6 [v0.7.5]: https://github.com/rust-embedded/heapless/compare/v0.7.4...v0.7.5 [v0.7.4]: https://github.com/rust-embedded/heapless/compare/v0.7.3...v0.7.4 [v0.7.3]: https://github.com/rust-embedded/heapless/compare/v0.7.2...v0.7.3 [v0.7.2]: https://github.com/rust-embedded/heapless/compare/v0.7.1...v0.7.2 [v0.7.1]: https://github.com/rust-embedded/heapless/compare/v0.7.0...v0.7.1 [v0.7.0]: https://github.com/rust-embedded/heapless/compare/v0.6.1...v0.7.0 [v0.6.1]: https://github.com/rust-embedded/heapless/compare/v0.6.0...v0.6.1 [v0.6.0]: https://github.com/rust-embedded/heapless/compare/v0.5.5...v0.6.0 [v0.5.5]: https://github.com/rust-embedded/heapless/compare/v0.5.4...v0.5.5 [v0.5.4]: https://github.com/rust-embedded/heapless/compare/v0.5.3...v0.5.4 [v0.5.3]: https://github.com/rust-embedded/heapless/compare/v0.5.2...v0.5.3 [v0.5.2]: https://github.com/rust-embedded/heapless/compare/v0.5.1...v0.5.2 [v0.5.1]: https://github.com/rust-embedded/heapless/compare/v0.5.0...v0.5.1 [v0.5.0]: https://github.com/rust-embedded/heapless/compare/v0.4.4...v0.5.0 [v0.4.4]: https://github.com/rust-embedded/heapless/compare/v0.4.3...v0.4.4 [v0.4.3]: https://github.com/rust-embedded/heapless/compare/v0.4.2...v0.4.3 [v0.4.2]: https://github.com/rust-embedded/heapless/compare/v0.4.1...v0.4.2 [v0.4.1]: https://github.com/rust-embedded/heapless/compare/v0.4.0...v0.4.1 [v0.4.0]: https://github.com/rust-embedded/heapless/compare/v0.3.7...v0.4.0 [v0.3.7]: https://github.com/rust-embedded/heapless/compare/v0.3.6...v0.3.7 [v0.3.6]: https://github.com/rust-embedded/heapless/compare/v0.3.5...v0.3.6 [v0.3.5]: https://github.com/rust-embedded/heapless/compare/v0.3.4...v0.3.5 [v0.3.4]: https://github.com/rust-embedded/heapless/compare/v0.3.3...v0.3.4 [v0.3.3]: https://github.com/rust-embedded/heapless/compare/v0.3.2...v0.3.3 [v0.3.2]: https://github.com/rust-embedded/heapless/compare/v0.3.1...v0.3.2 [v0.3.1]: https://github.com/rust-embedded/heapless/compare/v0.3.0...v0.3.1 [v0.3.0]: https://github.com/rust-embedded/heapless/compare/v0.2.7...v0.3.0 [v0.2.7]: https://github.com/rust-embedded/heapless/compare/v0.2.6...v0.2.7 [v0.2.6]: https://github.com/rust-embedded/heapless/compare/v0.2.5...v0.2.6 [v0.2.5]: https://github.com/rust-embedded/heapless/compare/v0.2.4...v0.2.5 [v0.2.4]: https://github.com/rust-embedded/heapless/compare/v0.2.3...v0.2.4 [v0.2.3]: https://github.com/rust-embedded/heapless/compare/v0.2.2...v0.2.3 [v0.2.2]: https://github.com/rust-embedded/heapless/compare/v0.2.1...v0.2.2 [v0.2.1]: https://github.com/rust-embedded/heapless/compare/v0.2.0...v0.2.1 [v0.2.0]: https://github.com/rust-embedded/heapless/compare/v0.1.0...v0.2.0 heapless-0.8.0/CODE_OF_CONDUCT.md000064400000000000000000000107561046102023000142450ustar 00000000000000# The Rust Code of Conduct ## Conduct **Contact**: [Libs team](https://github.com/rust-embedded/wg#the-libs-team) * We are committed to providing a friendly, safe and welcoming environment for all, regardless of level of experience, gender identity and expression, sexual orientation, disability, personal appearance, body size, race, ethnicity, age, religion, nationality, or other similar characteristic. * On IRC, please avoid using overtly sexual nicknames or other nicknames that might detract from a friendly, safe and welcoming environment for all. * Please be kind and courteous. There's no need to be mean or rude. * Respect that people have differences of opinion and that every design or implementation choice carries a trade-off and numerous costs. There is seldom a right answer. * Please keep unstructured critique to a minimum. If you have solid ideas you want to experiment with, make a fork and see how it works. * We will exclude you from interaction if you insult, demean or harass anyone. That is not welcome behavior. We interpret the term "harassment" as including the definition in the [Citizen Code of Conduct](http://citizencodeofconduct.org/); if you have any lack of clarity about what might be included in that concept, please read their definition. In particular, we don't tolerate behavior that excludes people in socially marginalized groups. * Private harassment is also unacceptable. No matter who you are, if you feel you have been or are being harassed or made uncomfortable by a community member, please contact one of the channel ops or any of the [Libs team][team] immediately. Whether you're a regular contributor or a newcomer, we care about making this community a safe place for you and we've got your back. * Likewise any spamming, trolling, flaming, baiting or other attention-stealing behavior is not welcome. ## Moderation These are the policies for upholding our community's standards of conduct. 1. Remarks that violate the Rust standards of conduct, including hateful, hurtful, oppressive, or exclusionary remarks, are not allowed. (Cursing is allowed, but never targeting another user, and never in a hateful manner.) 2. Remarks that moderators find inappropriate, whether listed in the code of conduct or not, are also not allowed. 3. Moderators will first respond to such remarks with a warning. 4. If the warning is unheeded, the user will be "kicked," i.e., kicked out of the communication channel to cool off. 5. If the user comes back and continues to make trouble, they will be banned, i.e., indefinitely excluded. 6. Moderators may choose at their discretion to un-ban the user if it was a first offense and they offer the offended party a genuine apology. 7. If a moderator bans someone and you think it was unjustified, please take it up with that moderator, or with a different moderator, **in private**. Complaints about bans in-channel are not allowed. 8. Moderators are held to a higher standard than other community members. If a moderator creates an inappropriate situation, they should expect less leeway than others. In the Rust community we strive to go the extra step to look out for each other. Don't just aim to be technically unimpeachable, try to be your best self. In particular, avoid flirting with offensive or sensitive issues, particularly if they're off-topic; this all too often leads to unnecessary fights, hurt feelings, and damaged trust; worse, it can drive people away from the community entirely. And if someone takes issue with something you said or did, resist the urge to be defensive. Just stop doing what it was they complained about and apologize. Even if you feel you were misinterpreted or unfairly accused, chances are good there was something you could've communicated better — remember that it's your responsibility to make your fellow Rustaceans comfortable. Everyone wants to get along and we are all here first and foremost because we want to talk about cool technology. You will find that people will be eager to assume good intent and forgive as long as you earn their trust. The enforcement policies listed above apply to all official embedded WG venues; including official IRC channels (#rust-embedded); GitHub repositories under rust-embedded; and all forums under rust-embedded.org (forum.rust-embedded.org). *Adapted from the [Node.js Policy on Trolling](http://blog.izs.me/post/30036893703/policy-on-trolling) as well as the [Contributor Covenant v1.3.0](https://www.contributor-covenant.org/version/1/3/0/).* [team]: https://github.com/rust-embedded/wg#the-libs-team heapless-0.8.0/Cargo.toml0000644000000040350000000000100106450ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2021" name = "heapless" version = "0.8.0" authors = [ "Jorge Aparicio ", "Per Lindgren ", "Emil Fresk ", ] description = "`static` friendly data structures that don't require dynamic memory allocation" documentation = "https://docs.rs/heapless" readme = "README.md" keywords = [ "static", "no-heap", ] categories = [ "data-structures", "no-std", ] license = "MIT OR Apache-2.0" repository = "https://github.com/rust-embedded/heapless" [package.metadata.docs.rs] features = [ "ufmt", "serde", "defmt-03", "mpmc_large", "portable-atomic-critical-section", ] rustdoc-args = [ "--cfg", "docsrs", ] targets = ["i686-unknown-linux-gnu"] [dependencies.defmt] version = ">=0.2.0,<0.4" optional = true [dependencies.hash32] version = "0.3.0" [dependencies.portable-atomic] version = "1.0" optional = true [dependencies.serde] version = "1" optional = true default-features = false [dependencies.stable_deref_trait] version = "1" default-features = false [dependencies.ufmt-write] version = "0.1" optional = true [dev-dependencies.ufmt] version = "0.2" [features] defmt-03 = ["dep:defmt"] mpmc_large = [] portable-atomic = ["dep:portable-atomic"] portable-atomic-critical-section = [ "dep:portable-atomic", "portable-atomic", "portable-atomic?/critical-section", ] portable-atomic-unsafe-assume-single-core = [ "dep:portable-atomic", "portable-atomic", "portable-atomic?/unsafe-assume-single-core", ] serde = ["dep:serde"] ufmt = ["dep:ufmt-write"] heapless-0.8.0/Cargo.toml.orig000064400000000000000000000040651046102023000143310ustar 00000000000000[package] authors = [ "Jorge Aparicio ", "Per Lindgren ", "Emil Fresk ", ] categories = ["data-structures", "no-std"] description = "`static` friendly data structures that don't require dynamic memory allocation" documentation = "https://docs.rs/heapless" edition = "2021" keywords = ["static", "no-heap"] license = "MIT OR Apache-2.0" name = "heapless" repository = "https://github.com/rust-embedded/heapless" version = "0.8.0" [features] # Enable polyfilling of atomics via `portable-atomic`. # `portable-atomic` polyfills some functionality by default, but to get full atomics you must # enable one of its features to tell it how to do it. See `portable-atomic` documentation for details. portable-atomic = ["dep:portable-atomic"] # Enable polyfilling of atomics via portable-atomic, using critical section for locking portable-atomic-critical-section = ["dep:portable-atomic", "portable-atomic", "portable-atomic?/critical-section"] # Enable polyfilling of atomics via portable-atomic, using disabling interrupts for locking. # WARNING: this is only sound for single-core bare-metal privileged-mode targets! portable-atomic-unsafe-assume-single-core = ["dep:portable-atomic", "portable-atomic", "portable-atomic?/unsafe-assume-single-core"] # implement serde traits. serde = ["dep:serde"] # implement ufmt traits. ufmt = ["dep:ufmt-write"] # Implement defmt::Format from defmt v0.3 defmt-03 = ["dep:defmt"] # Enable larger MPMC sizes. mpmc_large = [] [dependencies] portable-atomic = { version = "1.0", optional = true } hash32 = "0.3.0" serde = { version = "1", optional = true, default-features = false } stable_deref_trait = { version = "1", default-features = false } ufmt-write = { version = "0.1", optional = true } defmt = { version = ">=0.2.0,<0.4", optional = true } [dev-dependencies] ufmt = "0.2" [package.metadata.docs.rs] features = ["ufmt", "serde", "defmt-03", "mpmc_large", "portable-atomic-critical-section"] # for the pool module targets = ["i686-unknown-linux-gnu"] rustdoc-args = ["--cfg", "docsrs"] heapless-0.8.0/LICENSE-APACHE000064400000000000000000000251371046102023000133710ustar 00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. heapless-0.8.0/LICENSE-MIT000064400000000000000000000020421046102023000130670ustar 00000000000000Copyright (c) 2017 Jorge Aparicio Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. heapless-0.8.0/README.md000064400000000000000000000022001046102023000127060ustar 00000000000000[![crates.io](https://img.shields.io/crates/v/heapless.svg)](https://crates.io/crates/heapless) [![crates.io](https://img.shields.io/crates/d/heapless.svg)](https://crates.io/crates/heapless) # `heapless` > `static` friendly data structures that don't require dynamic memory allocation This project is developed and maintained by the [libs team]. ## [Documentation](https://docs.rs/heapless/latest/heapless) ## [Change log](CHANGELOG.md) ## Tests ``` console $ # run all $ cargo test --features serde $ # run only for example histbuf tests $ cargo test histbuf --features serde ``` ## License Licensed under either of - Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) - MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) at your option. ## Contribution Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions. [libs team]: https://github.com/rust-embedded/wg#the-libs-team heapless-0.8.0/build.rs000064400000000000000000000051361046102023000131070ustar 00000000000000#![deny(warnings)] use std::{ env, error::Error, fs, path::Path, process::{Command, ExitStatus, Stdio}, }; fn main() -> Result<(), Box> { let target = env::var("TARGET")?; // Manually list targets that have atomic load/store, but no CAS. // Remove when `cfg(target_has_atomic_load_store)` is stable. // last updated nightly-2023-10-28 match &target[..] { "armv4t-none-eabi" | "armv5te-none-eabi" | "avr-unknown-gnu-atmega328" | "bpfeb-unknown-none" | "bpfel-unknown-none" | "thumbv4t-none-eabi" | "thumbv5te-none-eabi" | "thumbv6m-none-eabi" => println!("cargo:rustc-cfg=has_atomic_load_store"), _ => {} }; // AArch64 instruction set contains `clrex` but not `ldrex` or `strex`; the // probe will succeed when we already know to deny this target from LLSC. if !target.starts_with("aarch64") { match compile_probe(ARM_LLSC_PROBE) { Some(status) if status.success() => println!("cargo:rustc-cfg=arm_llsc"), _ => {} } } Ok(()) } const ARM_LLSC_PROBE: &str = r#" #![no_std] // `no_mangle` forces codegen, which makes llvm check the contents of the `asm!` macro #[no_mangle] unsafe fn asm() { core::arch::asm!("clrex"); } "#; // this function was taken from anyhow v1.0.63 build script // https://crates.io/crates/anyhow/1.0.63 (last visited 2022-09-02) // the code is licensed under 'MIT or APACHE-2.0' fn compile_probe(source: &str) -> Option { let rustc = env::var_os("RUSTC")?; let out_dir = env::var_os("OUT_DIR")?; let probefile = Path::new(&out_dir).join("probe.rs"); fs::write(&probefile, source).ok()?; // Make sure to pick up Cargo rustc configuration. let mut cmd = if let Some(wrapper) = env::var_os("RUSTC_WRAPPER") { let mut cmd = Command::new(wrapper); // The wrapper's first argument is supposed to be the path to rustc. cmd.arg(rustc); cmd } else { Command::new(rustc) }; cmd.stderr(Stdio::null()) .arg("--edition=2018") .arg("--crate-name=probe") .arg("--crate-type=lib") .arg("--out-dir") .arg(out_dir) .arg(probefile); if let Some(target) = env::var_os("TARGET") { cmd.arg("--target").arg(target); } // If Cargo wants to set RUSTFLAGS, use that. if let Ok(rustflags) = env::var("CARGO_ENCODED_RUSTFLAGS") { if !rustflags.is_empty() { for arg in rustflags.split('\x1f') { cmd.arg(arg); } } } cmd.status().ok() } heapless-0.8.0/src/binary_heap.rs000064400000000000000000000463011046102023000150570ustar 00000000000000//! A priority queue implemented with a binary heap. //! //! Insertion and popping the largest element have `O(log n)` time complexity. Checking the largest //! / smallest element is `O(1)`. // TODO not yet implemented // Converting a vector to a binary heap can be done in-place, and has `O(n)` complexity. A binary // heap can also be converted to a sorted vector in-place, allowing it to be used for an `O(n log // n)` in-place heapsort. use core::{ cmp::Ordering, fmt, marker::PhantomData, mem::{self, ManuallyDrop}, ops::{Deref, DerefMut}, ptr, slice, }; use crate::vec::Vec; /// Min-heap pub enum Min {} /// Max-heap pub enum Max {} /// The binary heap kind: min-heap or max-heap pub trait Kind: private::Sealed { #[doc(hidden)] fn ordering() -> Ordering; } impl Kind for Min { fn ordering() -> Ordering { Ordering::Less } } impl Kind for Max { fn ordering() -> Ordering { Ordering::Greater } } /// Sealed traits mod private { pub trait Sealed {} } impl private::Sealed for Max {} impl private::Sealed for Min {} /// A priority queue implemented with a binary heap. /// /// This can be either a min-heap or a max-heap. /// /// It is a logic error for an item to be modified in such a way that the item's ordering relative /// to any other item, as determined by the `Ord` trait, changes while it is in the heap. This is /// normally only possible through `Cell`, `RefCell`, global state, I/O, or unsafe code. /// /// ``` /// use heapless::binary_heap::{BinaryHeap, Max}; /// /// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new(); /// /// // We can use peek to look at the next item in the heap. In this case, /// // there's no items in there yet so we get None. /// assert_eq!(heap.peek(), None); /// /// // Let's add some scores... /// heap.push(1).unwrap(); /// heap.push(5).unwrap(); /// heap.push(2).unwrap(); /// /// // Now peek shows the most important item in the heap. /// assert_eq!(heap.peek(), Some(&5)); /// /// // We can check the length of a heap. /// assert_eq!(heap.len(), 3); /// /// // We can iterate over the items in the heap, although they are returned in /// // a random order. /// for x in &heap { /// println!("{}", x); /// } /// /// // If we instead pop these scores, they should come back in order. /// assert_eq!(heap.pop(), Some(5)); /// assert_eq!(heap.pop(), Some(2)); /// assert_eq!(heap.pop(), Some(1)); /// assert_eq!(heap.pop(), None); /// /// // We can clear the heap of any remaining items. /// heap.clear(); /// /// // The heap should now be empty. /// assert!(heap.is_empty()) /// ``` pub struct BinaryHeap { pub(crate) _kind: PhantomData, pub(crate) data: Vec, } impl BinaryHeap { /* Constructors */ /// Creates an empty BinaryHeap as a $K-heap. /// /// ``` /// use heapless::binary_heap::{BinaryHeap, Max}; /// /// // allocate the binary heap on the stack /// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new(); /// heap.push(4).unwrap(); /// /// // allocate the binary heap in a static variable /// static mut HEAP: BinaryHeap = BinaryHeap::new(); /// ``` pub const fn new() -> Self { Self { _kind: PhantomData, data: Vec::new(), } } } impl BinaryHeap where T: Ord, K: Kind, { /* Public API */ /// Returns the capacity of the binary heap. pub fn capacity(&self) -> usize { self.data.capacity() } /// Drops all items from the binary heap. /// /// ``` /// use heapless::binary_heap::{BinaryHeap, Max}; /// /// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new(); /// heap.push(1).unwrap(); /// heap.push(3).unwrap(); /// /// assert!(!heap.is_empty()); /// /// heap.clear(); /// /// assert!(heap.is_empty()); /// ``` pub fn clear(&mut self) { self.data.clear() } /// Returns the length of the binary heap. /// /// ``` /// use heapless::binary_heap::{BinaryHeap, Max}; /// /// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new(); /// heap.push(1).unwrap(); /// heap.push(3).unwrap(); /// /// assert_eq!(heap.len(), 2); /// ``` pub fn len(&self) -> usize { self.data.len() } /// Checks if the binary heap is empty. /// /// ``` /// use heapless::binary_heap::{BinaryHeap, Max}; /// /// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new(); /// /// assert!(heap.is_empty()); /// /// heap.push(3).unwrap(); /// heap.push(5).unwrap(); /// heap.push(1).unwrap(); /// /// assert!(!heap.is_empty()); /// ``` pub fn is_empty(&self) -> bool { self.len() == 0 } /// Returns an iterator visiting all values in the underlying vector, in arbitrary order. /// /// ``` /// use heapless::binary_heap::{BinaryHeap, Max}; /// /// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new(); /// heap.push(1).unwrap(); /// heap.push(2).unwrap(); /// heap.push(3).unwrap(); /// heap.push(4).unwrap(); /// /// // Print 1, 2, 3, 4 in arbitrary order /// for x in heap.iter() { /// println!("{}", x); /// /// } /// ``` pub fn iter(&self) -> slice::Iter<'_, T> { self.data.as_slice().iter() } /// Returns a mutable iterator visiting all values in the underlying vector, in arbitrary order. /// /// **WARNING** Mutating the items in the binary heap can leave the heap in an inconsistent /// state. pub fn iter_mut(&mut self) -> slice::IterMut<'_, T> { self.data.as_mut_slice().iter_mut() } /// Returns the *top* (greatest if max-heap, smallest if min-heap) item in the binary heap, or /// None if it is empty. /// /// ``` /// use heapless::binary_heap::{BinaryHeap, Max}; /// /// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new(); /// assert_eq!(heap.peek(), None); /// /// heap.push(1).unwrap(); /// heap.push(5).unwrap(); /// heap.push(2).unwrap(); /// assert_eq!(heap.peek(), Some(&5)); /// ``` pub fn peek(&self) -> Option<&T> { self.data.as_slice().get(0) } /// Returns a mutable reference to the greatest item in the binary heap, or /// `None` if it is empty. /// /// Note: If the `PeekMut` value is leaked, the heap may be in an /// inconsistent state. /// /// # Examples /// /// Basic usage: /// /// ``` /// use heapless::binary_heap::{BinaryHeap, Max}; /// /// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new(); /// assert!(heap.peek_mut().is_none()); /// /// heap.push(1); /// heap.push(5); /// heap.push(2); /// { /// let mut val = heap.peek_mut().unwrap(); /// *val = 0; /// } /// /// assert_eq!(heap.peek(), Some(&2)); /// ``` pub fn peek_mut(&mut self) -> Option> { if self.is_empty() { None } else { Some(PeekMut { heap: self, sift: true, }) } } /// Removes the *top* (greatest if max-heap, smallest if min-heap) item from the binary heap and /// returns it, or None if it is empty. /// /// ``` /// use heapless::binary_heap::{BinaryHeap, Max}; /// /// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new(); /// heap.push(1).unwrap(); /// heap.push(3).unwrap(); /// /// assert_eq!(heap.pop(), Some(3)); /// assert_eq!(heap.pop(), Some(1)); /// assert_eq!(heap.pop(), None); /// ``` pub fn pop(&mut self) -> Option { if self.is_empty() { None } else { Some(unsafe { self.pop_unchecked() }) } } /// Removes the *top* (greatest if max-heap, smallest if min-heap) item from the binary heap and /// returns it, without checking if the binary heap is empty. pub unsafe fn pop_unchecked(&mut self) -> T { let mut item = self.data.pop_unchecked(); if !self.is_empty() { mem::swap(&mut item, self.data.as_mut_slice().get_unchecked_mut(0)); self.sift_down_to_bottom(0); } item } /// Pushes an item onto the binary heap. /// /// ``` /// use heapless::binary_heap::{BinaryHeap, Max}; /// /// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new(); /// heap.push(3).unwrap(); /// heap.push(5).unwrap(); /// heap.push(1).unwrap(); /// /// assert_eq!(heap.len(), 3); /// assert_eq!(heap.peek(), Some(&5)); /// ``` pub fn push(&mut self, item: T) -> Result<(), T> { if self.data.is_full() { return Err(item); } unsafe { self.push_unchecked(item) } Ok(()) } /// Pushes an item onto the binary heap without first checking if it's full. pub unsafe fn push_unchecked(&mut self, item: T) { let old_len = self.len(); self.data.push_unchecked(item); self.sift_up(0, old_len); } /// Returns the underlying ```Vec```. Order is arbitrary and time is O(1). pub fn into_vec(self) -> Vec { self.data } /* Private API */ fn sift_down_to_bottom(&mut self, mut pos: usize) { let end = self.len(); let start = pos; unsafe { let mut hole = Hole::new(self.data.as_mut_slice(), pos); let mut child = 2 * pos + 1; while child < end { let right = child + 1; // compare with the greater of the two children if right < end && hole.get(child).cmp(hole.get(right)) != K::ordering() { child = right; } hole.move_to(child); child = 2 * hole.pos() + 1; } pos = hole.pos; } self.sift_up(start, pos); } fn sift_up(&mut self, start: usize, pos: usize) -> usize { unsafe { // Take out the value at `pos` and create a hole. let mut hole = Hole::new(self.data.as_mut_slice(), pos); while hole.pos() > start { let parent = (hole.pos() - 1) / 2; if hole.element().cmp(hole.get(parent)) != K::ordering() { break; } hole.move_to(parent); } hole.pos() } } } /// Hole represents a hole in a slice i.e. an index without valid value /// (because it was moved from or duplicated). /// In drop, `Hole` will restore the slice by filling the hole /// position with the value that was originally removed. struct Hole<'a, T> { data: &'a mut [T], /// `elt` is always `Some` from new until drop. elt: ManuallyDrop, pos: usize, } impl<'a, T> Hole<'a, T> { /// Create a new Hole at index `pos`. /// /// Unsafe because pos must be within the data slice. #[inline] unsafe fn new(data: &'a mut [T], pos: usize) -> Self { debug_assert!(pos < data.len()); let elt = ptr::read(data.get_unchecked(pos)); Hole { data, elt: ManuallyDrop::new(elt), pos, } } #[inline] fn pos(&self) -> usize { self.pos } /// Returns a reference to the element removed. #[inline] fn element(&self) -> &T { &self.elt } /// Returns a reference to the element at `index`. /// /// Unsafe because index must be within the data slice and not equal to pos. #[inline] unsafe fn get(&self, index: usize) -> &T { debug_assert!(index != self.pos); debug_assert!(index < self.data.len()); self.data.get_unchecked(index) } /// Move hole to new location /// /// Unsafe because index must be within the data slice and not equal to pos. #[inline] unsafe fn move_to(&mut self, index: usize) { debug_assert!(index != self.pos); debug_assert!(index < self.data.len()); let ptr = self.data.as_mut_ptr(); let index_ptr: *const _ = ptr.add(index); let hole_ptr = ptr.add(self.pos); ptr::copy_nonoverlapping(index_ptr, hole_ptr, 1); self.pos = index; } } /// Structure wrapping a mutable reference to the greatest item on a /// `BinaryHeap`. /// /// This `struct` is created by [`BinaryHeap::peek_mut`]. /// See its documentation for more. pub struct PeekMut<'a, T, K, const N: usize> where T: Ord, K: Kind, { heap: &'a mut BinaryHeap, sift: bool, } impl Drop for PeekMut<'_, T, K, N> where T: Ord, K: Kind, { fn drop(&mut self) { if self.sift { self.heap.sift_down_to_bottom(0); } } } impl Deref for PeekMut<'_, T, K, N> where T: Ord, K: Kind, { type Target = T; fn deref(&self) -> &T { debug_assert!(!self.heap.is_empty()); // SAFE: PeekMut is only instantiated for non-empty heaps unsafe { self.heap.data.as_slice().get_unchecked(0) } } } impl DerefMut for PeekMut<'_, T, K, N> where T: Ord, K: Kind, { fn deref_mut(&mut self) -> &mut T { debug_assert!(!self.heap.is_empty()); // SAFE: PeekMut is only instantiated for non-empty heaps unsafe { self.heap.data.as_mut_slice().get_unchecked_mut(0) } } } impl<'a, T, K, const N: usize> PeekMut<'a, T, K, N> where T: Ord, K: Kind, { /// Removes the peeked value from the heap and returns it. pub fn pop(mut this: PeekMut<'a, T, K, N>) -> T { let value = this.heap.pop().unwrap(); this.sift = false; value } } impl<'a, T> Drop for Hole<'a, T> { #[inline] fn drop(&mut self) { // fill the hole again unsafe { let pos = self.pos; ptr::write(self.data.get_unchecked_mut(pos), ptr::read(&*self.elt)); } } } impl Default for BinaryHeap where T: Ord, K: Kind, { fn default() -> Self { Self::new() } } impl Clone for BinaryHeap where K: Kind, T: Ord + Clone, { fn clone(&self) -> Self { Self { _kind: self._kind, data: self.data.clone(), } } } impl fmt::Debug for BinaryHeap where K: Kind, T: Ord + fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.iter()).finish() } } impl<'a, T, K, const N: usize> IntoIterator for &'a BinaryHeap where K: Kind, T: Ord, { type Item = &'a T; type IntoIter = slice::Iter<'a, T>; fn into_iter(self) -> Self::IntoIter { self.iter() } } #[cfg(test)] mod tests { use std::vec::Vec; use crate::binary_heap::{BinaryHeap, Max, Min}; #[test] fn static_new() { static mut _B: BinaryHeap = BinaryHeap::new(); } #[test] fn drop() { droppable!(); { let mut v: BinaryHeap = BinaryHeap::new(); v.push(Droppable::new()).ok().unwrap(); v.push(Droppable::new()).ok().unwrap(); v.pop().unwrap(); } assert_eq!(Droppable::count(), 0); { let mut v: BinaryHeap = BinaryHeap::new(); v.push(Droppable::new()).ok().unwrap(); v.push(Droppable::new()).ok().unwrap(); } assert_eq!(Droppable::count(), 0); { let mut v: BinaryHeap = BinaryHeap::new(); v.push(Droppable::new()).ok().unwrap(); v.push(Droppable::new()).ok().unwrap(); v.pop().unwrap(); } assert_eq!(Droppable::count(), 0); { let mut v: BinaryHeap = BinaryHeap::new(); v.push(Droppable::new()).ok().unwrap(); v.push(Droppable::new()).ok().unwrap(); } assert_eq!(Droppable::count(), 0); } #[test] fn into_vec() { droppable!(); let mut h: BinaryHeap = BinaryHeap::new(); h.push(Droppable::new()).ok().unwrap(); h.push(Droppable::new()).ok().unwrap(); h.pop().unwrap(); assert_eq!(Droppable::count(), 1); let v = h.into_vec(); assert_eq!(Droppable::count(), 1); core::mem::drop(v); assert_eq!(Droppable::count(), 0); } #[test] fn min() { let mut heap = BinaryHeap::<_, Min, 16>::new(); heap.push(1).unwrap(); heap.push(2).unwrap(); heap.push(3).unwrap(); heap.push(17).unwrap(); heap.push(19).unwrap(); heap.push(36).unwrap(); heap.push(7).unwrap(); heap.push(25).unwrap(); heap.push(100).unwrap(); assert_eq!( heap.iter().cloned().collect::>(), [1, 2, 3, 17, 19, 36, 7, 25, 100] ); assert_eq!(heap.pop(), Some(1)); assert_eq!( heap.iter().cloned().collect::>(), [2, 17, 3, 25, 19, 36, 7, 100] ); assert_eq!(heap.pop(), Some(2)); assert_eq!(heap.pop(), Some(3)); assert_eq!(heap.pop(), Some(7)); assert_eq!(heap.pop(), Some(17)); assert_eq!(heap.pop(), Some(19)); assert_eq!(heap.pop(), Some(25)); assert_eq!(heap.pop(), Some(36)); assert_eq!(heap.pop(), Some(100)); assert_eq!(heap.pop(), None); assert!(heap.peek_mut().is_none()); heap.push(1).unwrap(); heap.push(2).unwrap(); heap.push(10).unwrap(); { let mut val = heap.peek_mut().unwrap(); *val = 7; } assert_eq!(heap.pop(), Some(2)); assert_eq!(heap.pop(), Some(7)); assert_eq!(heap.pop(), Some(10)); assert_eq!(heap.pop(), None); } #[test] fn max() { let mut heap = BinaryHeap::<_, Max, 16>::new(); heap.push(1).unwrap(); heap.push(2).unwrap(); heap.push(3).unwrap(); heap.push(17).unwrap(); heap.push(19).unwrap(); heap.push(36).unwrap(); heap.push(7).unwrap(); heap.push(25).unwrap(); heap.push(100).unwrap(); assert_eq!( heap.iter().cloned().collect::>(), [100, 36, 19, 25, 3, 2, 7, 1, 17] ); assert_eq!(heap.pop(), Some(100)); assert_eq!( heap.iter().cloned().collect::>(), [36, 25, 19, 17, 3, 2, 7, 1] ); assert_eq!(heap.pop(), Some(36)); assert_eq!(heap.pop(), Some(25)); assert_eq!(heap.pop(), Some(19)); assert_eq!(heap.pop(), Some(17)); assert_eq!(heap.pop(), Some(7)); assert_eq!(heap.pop(), Some(3)); assert_eq!(heap.pop(), Some(2)); assert_eq!(heap.pop(), Some(1)); assert_eq!(heap.pop(), None); assert!(heap.peek_mut().is_none()); heap.push(1).unwrap(); heap.push(9).unwrap(); heap.push(10).unwrap(); { let mut val = heap.peek_mut().unwrap(); *val = 7; } assert_eq!(heap.pop(), Some(9)); assert_eq!(heap.pop(), Some(7)); assert_eq!(heap.pop(), Some(1)); assert_eq!(heap.pop(), None); } } heapless-0.8.0/src/de.rs000064400000000000000000000224551046102023000131720ustar 00000000000000use crate::{ binary_heap::Kind as BinaryHeapKind, BinaryHeap, Deque, IndexMap, IndexSet, LinearMap, String, Vec, }; use core::{ fmt, hash::{Hash, Hasher}, marker::PhantomData, }; use hash32::BuildHasherDefault; use serde::de::{self, Deserialize, Deserializer, Error, MapAccess, SeqAccess}; // Sequential containers impl<'de, T, KIND, const N: usize> Deserialize<'de> for BinaryHeap where T: Ord + Deserialize<'de>, KIND: BinaryHeapKind, { fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, { struct ValueVisitor<'de, T, KIND, const N: usize>(PhantomData<(&'de (), T, KIND)>); impl<'de, T, KIND, const N: usize> de::Visitor<'de> for ValueVisitor<'de, T, KIND, N> where T: Ord + Deserialize<'de>, KIND: BinaryHeapKind, { type Value = BinaryHeap; fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { formatter.write_str("a sequence") } fn visit_seq(self, mut seq: A) -> Result where A: SeqAccess<'de>, { let mut values = BinaryHeap::new(); while let Some(value) = seq.next_element()? { if values.push(value).is_err() { return Err(A::Error::invalid_length(values.capacity() + 1, &self))?; } } Ok(values) } } deserializer.deserialize_seq(ValueVisitor(PhantomData)) } } impl<'de, T, S, const N: usize> Deserialize<'de> for IndexSet, N> where T: Eq + Hash + Deserialize<'de>, S: Hasher + Default, { fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, { struct ValueVisitor<'de, T, S, const N: usize>(PhantomData<(&'de (), T, S)>); impl<'de, T, S, const N: usize> de::Visitor<'de> for ValueVisitor<'de, T, S, N> where T: Eq + Hash + Deserialize<'de>, S: Hasher + Default, { type Value = IndexSet, N>; fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { formatter.write_str("a sequence") } fn visit_seq(self, mut seq: A) -> Result where A: SeqAccess<'de>, { let mut values = IndexSet::new(); while let Some(value) = seq.next_element()? { if values.insert(value).is_err() { return Err(A::Error::invalid_length(values.capacity() + 1, &self))?; } } Ok(values) } } deserializer.deserialize_seq(ValueVisitor(PhantomData)) } } impl<'de, T, const N: usize> Deserialize<'de> for Vec where T: Deserialize<'de>, { fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, { struct ValueVisitor<'de, T, const N: usize>(PhantomData<(&'de (), T)>); impl<'de, T, const N: usize> serde::de::Visitor<'de> for ValueVisitor<'de, T, N> where T: Deserialize<'de>, { type Value = Vec; fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { formatter.write_str("a sequence") } fn visit_seq(self, mut seq: A) -> Result where A: SeqAccess<'de>, { let mut values = Vec::new(); while let Some(value) = seq.next_element()? { if values.push(value).is_err() { return Err(A::Error::invalid_length(values.capacity() + 1, &self))?; } } Ok(values) } } deserializer.deserialize_seq(ValueVisitor(PhantomData)) } } impl<'de, T, const N: usize> Deserialize<'de> for Deque where T: Deserialize<'de>, { fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, { struct ValueVisitor<'de, T, const N: usize>(PhantomData<(&'de (), T)>); impl<'de, T, const N: usize> serde::de::Visitor<'de> for ValueVisitor<'de, T, N> where T: Deserialize<'de>, { type Value = Deque; fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { formatter.write_str("a sequence") } fn visit_seq(self, mut seq: A) -> Result where A: SeqAccess<'de>, { let mut values = Deque::new(); while let Some(value) = seq.next_element()? { if values.push_back(value).is_err() { return Err(A::Error::invalid_length(values.capacity() + 1, &self))?; } } Ok(values) } } deserializer.deserialize_seq(ValueVisitor(PhantomData)) } } // Dictionaries impl<'de, K, V, S, const N: usize> Deserialize<'de> for IndexMap, N> where K: Eq + Hash + Deserialize<'de>, V: Deserialize<'de>, S: Default + Hasher, { fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, { struct ValueVisitor<'de, K, V, S, const N: usize>(PhantomData<(&'de (), K, V, S)>); impl<'de, K, V, S, const N: usize> de::Visitor<'de> for ValueVisitor<'de, K, V, S, N> where K: Eq + Hash + Deserialize<'de>, V: Deserialize<'de>, S: Default + Hasher, { type Value = IndexMap, N>; fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { formatter.write_str("a map") } fn visit_map(self, mut map: A) -> Result where A: MapAccess<'de>, { let mut values = IndexMap::new(); while let Some((key, value)) = map.next_entry()? { if values.insert(key, value).is_err() { return Err(A::Error::invalid_length(values.capacity() + 1, &self))?; } } Ok(values) } } deserializer.deserialize_map(ValueVisitor(PhantomData)) } } impl<'de, K, V, const N: usize> Deserialize<'de> for LinearMap where K: Eq + Deserialize<'de>, V: Deserialize<'de>, { fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, { struct ValueVisitor<'de, K, V, const N: usize>(PhantomData<(&'de (), K, V)>); impl<'de, K, V, const N: usize> de::Visitor<'de> for ValueVisitor<'de, K, V, N> where K: Eq + Deserialize<'de>, V: Deserialize<'de>, { type Value = LinearMap; fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { formatter.write_str("a map") } fn visit_map(self, mut map: A) -> Result where A: MapAccess<'de>, { let mut values = LinearMap::new(); while let Some((key, value)) = map.next_entry()? { if values.insert(key, value).is_err() { return Err(A::Error::invalid_length(values.capacity() + 1, &self))?; } } Ok(values) } } deserializer.deserialize_map(ValueVisitor(PhantomData)) } } // String containers impl<'de, const N: usize> Deserialize<'de> for String { fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, { struct ValueVisitor<'de, const N: usize>(PhantomData<&'de ()>); impl<'de, const N: usize> de::Visitor<'de> for ValueVisitor<'de, N> { type Value = String; fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { write!(formatter, "a string no more than {} bytes long", N as u64) } fn visit_str(self, v: &str) -> Result where E: de::Error, { let mut s = String::new(); s.push_str(v) .map_err(|_| E::invalid_length(v.len(), &self))?; Ok(s) } fn visit_bytes(self, v: &[u8]) -> Result where E: de::Error, { let mut s = String::new(); s.push_str( core::str::from_utf8(v) .map_err(|_| E::invalid_value(de::Unexpected::Bytes(v), &self))?, ) .map_err(|_| E::invalid_length(v.len(), &self))?; Ok(s) } } deserializer.deserialize_str(ValueVisitor::<'de, N>(PhantomData)) } } heapless-0.8.0/src/defmt.rs000064400000000000000000000007241046102023000136740ustar 00000000000000//! Defmt implementations for heapless types //! use crate::Vec; use defmt::Formatter; impl defmt::Format for Vec where T: defmt::Format, { fn format(&self, fmt: Formatter<'_>) { defmt::write!(fmt, "{=[?]}", self.as_slice()) } } impl defmt::Format for crate::String where u8: defmt::Format, { fn format(&self, fmt: Formatter<'_>) { defmt::write!(fmt, "{=str}", self.as_str()); } } heapless-0.8.0/src/deque.rs000064400000000000000000000561401046102023000137030ustar 00000000000000use core::fmt; use core::iter::FusedIterator; use core::marker::PhantomData; use core::mem::MaybeUninit; use core::{ptr, slice}; /// A fixed capacity double-ended queue. /// /// # Examples /// /// ``` /// use heapless::Deque; /// /// // A deque with a fixed capacity of 8 elements allocated on the stack /// let mut deque = Deque::<_, 8>::new(); /// /// // You can use it as a good old FIFO queue. /// deque.push_back(1); /// deque.push_back(2); /// assert_eq!(deque.len(), 2); /// /// assert_eq!(deque.pop_front(), Some(1)); /// assert_eq!(deque.pop_front(), Some(2)); /// assert_eq!(deque.len(), 0); /// /// // Deque is double-ended, you can push and pop from the front and back. /// deque.push_back(1); /// deque.push_front(2); /// deque.push_back(3); /// deque.push_front(4); /// assert_eq!(deque.pop_front(), Some(4)); /// assert_eq!(deque.pop_front(), Some(2)); /// assert_eq!(deque.pop_front(), Some(1)); /// assert_eq!(deque.pop_front(), Some(3)); /// /// // You can iterate it, yielding all the elements front-to-back. /// for x in &deque { /// println!("{}", x); /// } /// ``` pub struct Deque { buffer: [MaybeUninit; N], /// Front index. Always 0..=(N-1) front: usize, /// Back index. Always 0..=(N-1). back: usize, /// Used to distinguish "empty" and "full" cases when `front == back`. /// May only be `true` if `front == back`, always `false` otherwise. full: bool, } impl Deque { const INIT: MaybeUninit = MaybeUninit::uninit(); /// Constructs a new, empty deque with a fixed capacity of `N` /// /// # Examples /// /// ``` /// use heapless::Deque; /// /// // allocate the deque on the stack /// let mut x: Deque = Deque::new(); /// /// // allocate the deque in a static variable /// static mut X: Deque = Deque::new(); /// ``` pub const fn new() -> Self { // Const assert N > 0 crate::sealed::greater_than_0::(); Self { buffer: [Self::INIT; N], front: 0, back: 0, full: false, } } fn increment(i: usize) -> usize { if i + 1 == N { 0 } else { i + 1 } } fn decrement(i: usize) -> usize { if i == 0 { N - 1 } else { i - 1 } } /// Returns the maximum number of elements the deque can hold. pub const fn capacity(&self) -> usize { N } /// Returns the number of elements currently in the deque. pub const fn len(&self) -> usize { if self.full { N } else if self.back < self.front { self.back + N - self.front } else { self.back - self.front } } /// Clears the deque, removing all values. pub fn clear(&mut self) { // safety: we're immediately setting a consistent empty state. unsafe { self.drop_contents() } self.front = 0; self.back = 0; self.full = false; } /// Drop all items in the `Deque`, leaving the state `back/front/full` unmodified. /// /// safety: leaves the `Deque` in an inconsistent state, so can cause duplicate drops. unsafe fn drop_contents(&mut self) { // We drop each element used in the deque by turning into a &mut[T] let (a, b) = self.as_mut_slices(); ptr::drop_in_place(a); ptr::drop_in_place(b); } /// Returns whether the deque is empty. pub fn is_empty(&self) -> bool { self.front == self.back && !self.full } /// Returns whether the deque is full (i.e. if `len() == capacity()`. pub fn is_full(&self) -> bool { self.full } /// Returns a pair of slices which contain, in order, the contents of the `Deque`. pub fn as_slices(&self) -> (&[T], &[T]) { // NOTE(unsafe) avoid bound checks in the slicing operation unsafe { if self.is_empty() { (&[], &[]) } else if self.back <= self.front { ( slice::from_raw_parts( self.buffer.as_ptr().add(self.front) as *const T, N - self.front, ), slice::from_raw_parts(self.buffer.as_ptr() as *const T, self.back), ) } else { ( slice::from_raw_parts( self.buffer.as_ptr().add(self.front) as *const T, self.back - self.front, ), &[], ) } } } /// Returns a pair of mutable slices which contain, in order, the contents of the `Deque`. pub fn as_mut_slices(&mut self) -> (&mut [T], &mut [T]) { let ptr = self.buffer.as_mut_ptr(); // NOTE(unsafe) avoid bound checks in the slicing operation unsafe { if self.is_empty() { (&mut [], &mut []) } else if self.back <= self.front { ( slice::from_raw_parts_mut(ptr.add(self.front) as *mut T, N - self.front), slice::from_raw_parts_mut(ptr as *mut T, self.back), ) } else { ( slice::from_raw_parts_mut( ptr.add(self.front) as *mut T, self.back - self.front, ), &mut [], ) } } } /// Provides a reference to the front element, or None if the `Deque` is empty. pub fn front(&self) -> Option<&T> { if self.is_empty() { None } else { Some(unsafe { &*self.buffer.get_unchecked(self.front).as_ptr() }) } } /// Provides a mutable reference to the front element, or None if the `Deque` is empty. pub fn front_mut(&mut self) -> Option<&mut T> { if self.is_empty() { None } else { Some(unsafe { &mut *self.buffer.get_unchecked_mut(self.front).as_mut_ptr() }) } } /// Provides a reference to the back element, or None if the `Deque` is empty. pub fn back(&self) -> Option<&T> { if self.is_empty() { None } else { let index = Self::decrement(self.back); Some(unsafe { &*self.buffer.get_unchecked(index).as_ptr() }) } } /// Provides a mutable reference to the back element, or None if the `Deque` is empty. pub fn back_mut(&mut self) -> Option<&mut T> { if self.is_empty() { None } else { let index = Self::decrement(self.back); Some(unsafe { &mut *self.buffer.get_unchecked_mut(index).as_mut_ptr() }) } } /// Removes the item from the front of the deque and returns it, or `None` if it's empty pub fn pop_front(&mut self) -> Option { if self.is_empty() { None } else { Some(unsafe { self.pop_front_unchecked() }) } } /// Removes the item from the back of the deque and returns it, or `None` if it's empty pub fn pop_back(&mut self) -> Option { if self.is_empty() { None } else { Some(unsafe { self.pop_back_unchecked() }) } } /// Appends an `item` to the front of the deque /// /// Returns back the `item` if the deque is full pub fn push_front(&mut self, item: T) -> Result<(), T> { if self.is_full() { Err(item) } else { unsafe { self.push_front_unchecked(item) } Ok(()) } } /// Appends an `item` to the back of the deque /// /// Returns back the `item` if the deque is full pub fn push_back(&mut self, item: T) -> Result<(), T> { if self.is_full() { Err(item) } else { unsafe { self.push_back_unchecked(item) } Ok(()) } } /// Removes an item from the front of the deque and returns it, without checking that the deque /// is not empty /// /// # Safety /// /// It's undefined behavior to call this on an empty deque pub unsafe fn pop_front_unchecked(&mut self) -> T { debug_assert!(!self.is_empty()); let index = self.front; self.full = false; self.front = Self::increment(self.front); (self.buffer.get_unchecked_mut(index).as_ptr() as *const T).read() } /// Removes an item from the back of the deque and returns it, without checking that the deque /// is not empty /// /// # Safety /// /// It's undefined behavior to call this on an empty deque pub unsafe fn pop_back_unchecked(&mut self) -> T { debug_assert!(!self.is_empty()); self.full = false; self.back = Self::decrement(self.back); (self.buffer.get_unchecked_mut(self.back).as_ptr() as *const T).read() } /// Appends an `item` to the front of the deque /// /// # Safety /// /// This assumes the deque is not full. pub unsafe fn push_front_unchecked(&mut self, item: T) { debug_assert!(!self.is_full()); let index = Self::decrement(self.front); // NOTE: the memory slot that we are about to write to is uninitialized. We assign // a `MaybeUninit` to avoid running `T`'s destructor on the uninitialized memory *self.buffer.get_unchecked_mut(index) = MaybeUninit::new(item); self.front = index; if self.front == self.back { self.full = true; } } /// Appends an `item` to the back of the deque /// /// # Safety /// /// This assumes the deque is not full. pub unsafe fn push_back_unchecked(&mut self, item: T) { debug_assert!(!self.is_full()); // NOTE: the memory slot that we are about to write to is uninitialized. We assign // a `MaybeUninit` to avoid running `T`'s destructor on the uninitialized memory *self.buffer.get_unchecked_mut(self.back) = MaybeUninit::new(item); self.back = Self::increment(self.back); if self.front == self.back { self.full = true; } } /// Returns an iterator over the deque. pub fn iter(&self) -> Iter<'_, T, N> { let done = self.is_empty(); Iter { _phantom: PhantomData, buffer: &self.buffer as *const MaybeUninit, front: self.front, back: self.back, done, } } /// Returns an iterator that allows modifying each value. pub fn iter_mut(&mut self) -> IterMut<'_, T, N> { let done = self.is_empty(); IterMut { _phantom: PhantomData, buffer: &mut self.buffer as *mut _ as *mut MaybeUninit, front: self.front, back: self.back, done, } } } // Trait implementations impl Default for Deque { fn default() -> Self { Self::new() } } impl Drop for Deque { fn drop(&mut self) { // safety: `self` is left in an inconsistent state but it doesn't matter since // it's getting dropped. Nothing should be able to observe `self` after drop. unsafe { self.drop_contents() } } } impl fmt::Debug for Deque { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self).finish() } } /// An iterator that moves out of a [`Deque`]. /// /// This struct is created by calling the `into_iter` method. /// #[derive(Clone)] pub struct IntoIter { deque: Deque, } impl Iterator for IntoIter { type Item = T; fn next(&mut self) -> Option { self.deque.pop_front() } } impl IntoIterator for Deque { type Item = T; type IntoIter = IntoIter; fn into_iter(self) -> Self::IntoIter { IntoIter { deque: self } } } /// An iterator over the elements of a [`Deque`]. /// /// This struct is created by calling the `iter` method. #[derive(Clone)] pub struct Iter<'a, T, const N: usize> { buffer: *const MaybeUninit, _phantom: PhantomData<&'a T>, front: usize, back: usize, done: bool, } impl<'a, T, const N: usize> Iterator for Iter<'a, T, N> { type Item = &'a T; fn next(&mut self) -> Option { if self.done { None } else { let index = self.front; self.front = Deque::::increment(self.front); if self.front == self.back { self.done = true; } Some(unsafe { &*(self.buffer.add(index) as *const T) }) } } fn size_hint(&self) -> (usize, Option) { let len = if self.done { 0 } else if self.back <= self.front { self.back + N - self.front } else { self.back - self.front }; (len, Some(len)) } } impl<'a, T, const N: usize> DoubleEndedIterator for Iter<'a, T, N> { fn next_back(&mut self) -> Option { if self.done { None } else { self.back = Deque::::decrement(self.back); if self.front == self.back { self.done = true; } Some(unsafe { &*(self.buffer.add(self.back) as *const T) }) } } } impl<'a, T, const N: usize> ExactSizeIterator for Iter<'a, T, N> {} impl<'a, T, const N: usize> FusedIterator for Iter<'a, T, N> {} /// An iterator over the elements of a [`Deque`]. /// /// This struct is created by calling the `iter` method. pub struct IterMut<'a, T, const N: usize> { buffer: *mut MaybeUninit, _phantom: PhantomData<&'a mut T>, front: usize, back: usize, done: bool, } impl<'a, T, const N: usize> Iterator for IterMut<'a, T, N> { type Item = &'a mut T; fn next(&mut self) -> Option { if self.done { None } else { let index = self.front; self.front = Deque::::increment(self.front); if self.front == self.back { self.done = true; } Some(unsafe { &mut *(self.buffer.add(index) as *mut T) }) } } fn size_hint(&self) -> (usize, Option) { let len = if self.done { 0 } else if self.back <= self.front { self.back + N - self.front } else { self.back - self.front }; (len, Some(len)) } } impl<'a, T, const N: usize> DoubleEndedIterator for IterMut<'a, T, N> { fn next_back(&mut self) -> Option { if self.done { None } else { self.back = Deque::::decrement(self.back); if self.front == self.back { self.done = true; } Some(unsafe { &mut *(self.buffer.add(self.back) as *mut T) }) } } } impl<'a, T, const N: usize> ExactSizeIterator for IterMut<'a, T, N> {} impl<'a, T, const N: usize> FusedIterator for IterMut<'a, T, N> {} impl<'a, T, const N: usize> IntoIterator for &'a Deque { type Item = &'a T; type IntoIter = Iter<'a, T, N>; fn into_iter(self) -> Self::IntoIter { self.iter() } } impl<'a, T, const N: usize> IntoIterator for &'a mut Deque { type Item = &'a mut T; type IntoIter = IterMut<'a, T, N>; fn into_iter(self) -> Self::IntoIter { self.iter_mut() } } impl Clone for Deque where T: Clone, { fn clone(&self) -> Self { let mut res = Deque::new(); for i in self { // safety: the original and new deques have the same capacity, so it can // not become full. unsafe { res.push_back_unchecked(i.clone()) } } res } } #[cfg(test)] mod tests { use crate::Deque; #[test] fn static_new() { static mut _V: Deque = Deque::new(); } #[test] fn stack_new() { let mut _v: Deque = Deque::new(); } #[test] fn drop() { droppable!(); { let mut v: Deque = Deque::new(); v.push_back(Droppable::new()).ok().unwrap(); v.push_back(Droppable::new()).ok().unwrap(); v.pop_front().unwrap(); } assert_eq!(Droppable::count(), 0); { let mut v: Deque = Deque::new(); v.push_back(Droppable::new()).ok().unwrap(); v.push_back(Droppable::new()).ok().unwrap(); } assert_eq!(Droppable::count(), 0); { let mut v: Deque = Deque::new(); v.push_front(Droppable::new()).ok().unwrap(); v.push_front(Droppable::new()).ok().unwrap(); } assert_eq!(Droppable::count(), 0); } #[test] fn full() { let mut v: Deque = Deque::new(); v.push_back(0).unwrap(); v.push_front(1).unwrap(); v.push_back(2).unwrap(); v.push_back(3).unwrap(); assert!(v.push_front(4).is_err()); assert!(v.push_back(4).is_err()); assert!(v.is_full()); } #[test] fn empty() { let mut v: Deque = Deque::new(); assert!(v.is_empty()); v.push_back(0).unwrap(); assert!(!v.is_empty()); v.push_front(1).unwrap(); assert!(!v.is_empty()); v.pop_front().unwrap(); v.pop_front().unwrap(); assert!(v.pop_front().is_none()); assert!(v.pop_back().is_none()); assert!(v.is_empty()); } #[test] fn front_back() { let mut v: Deque = Deque::new(); assert_eq!(v.front(), None); assert_eq!(v.front_mut(), None); assert_eq!(v.back(), None); assert_eq!(v.back_mut(), None); v.push_back(4).unwrap(); assert_eq!(v.front(), Some(&4)); assert_eq!(v.front_mut(), Some(&mut 4)); assert_eq!(v.back(), Some(&4)); assert_eq!(v.back_mut(), Some(&mut 4)); v.push_front(3).unwrap(); assert_eq!(v.front(), Some(&3)); assert_eq!(v.front_mut(), Some(&mut 3)); assert_eq!(v.back(), Some(&4)); assert_eq!(v.back_mut(), Some(&mut 4)); v.pop_back().unwrap(); assert_eq!(v.front(), Some(&3)); assert_eq!(v.front_mut(), Some(&mut 3)); assert_eq!(v.back(), Some(&3)); assert_eq!(v.back_mut(), Some(&mut 3)); v.pop_front().unwrap(); assert_eq!(v.front(), None); assert_eq!(v.front_mut(), None); assert_eq!(v.back(), None); assert_eq!(v.back_mut(), None); } #[test] fn iter() { let mut v: Deque = Deque::new(); v.push_back(0).unwrap(); v.push_back(1).unwrap(); v.push_front(2).unwrap(); v.push_front(3).unwrap(); v.pop_back().unwrap(); v.push_front(4).unwrap(); let mut items = v.iter(); assert_eq!(items.next(), Some(&4)); assert_eq!(items.next(), Some(&3)); assert_eq!(items.next(), Some(&2)); assert_eq!(items.next(), Some(&0)); assert_eq!(items.next(), None); } #[test] fn iter_mut() { let mut v: Deque = Deque::new(); v.push_back(0).unwrap(); v.push_back(1).unwrap(); v.push_front(2).unwrap(); v.push_front(3).unwrap(); v.pop_back().unwrap(); v.push_front(4).unwrap(); let mut items = v.iter_mut(); assert_eq!(items.next(), Some(&mut 4)); assert_eq!(items.next(), Some(&mut 3)); assert_eq!(items.next(), Some(&mut 2)); assert_eq!(items.next(), Some(&mut 0)); assert_eq!(items.next(), None); } #[test] fn iter_move() { let mut v: Deque = Deque::new(); v.push_back(0).unwrap(); v.push_back(1).unwrap(); v.push_back(2).unwrap(); v.push_back(3).unwrap(); let mut items = v.into_iter(); assert_eq!(items.next(), Some(0)); assert_eq!(items.next(), Some(1)); assert_eq!(items.next(), Some(2)); assert_eq!(items.next(), Some(3)); assert_eq!(items.next(), None); } #[test] fn iter_move_drop() { droppable!(); { let mut deque: Deque = Deque::new(); deque.push_back(Droppable::new()).ok().unwrap(); deque.push_back(Droppable::new()).ok().unwrap(); let mut items = deque.into_iter(); // Move all let _ = items.next(); let _ = items.next(); } assert_eq!(Droppable::count(), 0); { let mut deque: Deque = Deque::new(); deque.push_back(Droppable::new()).ok().unwrap(); deque.push_back(Droppable::new()).ok().unwrap(); let _items = deque.into_iter(); // Move none } assert_eq!(Droppable::count(), 0); { let mut deque: Deque = Deque::new(); deque.push_back(Droppable::new()).ok().unwrap(); deque.push_back(Droppable::new()).ok().unwrap(); let mut items = deque.into_iter(); let _ = items.next(); // Move partly } assert_eq!(Droppable::count(), 0); } #[test] fn push_and_pop() { let mut q: Deque = Deque::new(); assert_eq!(q.len(), 0); assert_eq!(q.pop_front(), None); assert_eq!(q.pop_back(), None); assert_eq!(q.len(), 0); q.push_back(0).unwrap(); assert_eq!(q.len(), 1); assert_eq!(q.pop_back(), Some(0)); assert_eq!(q.len(), 0); q.push_back(0).unwrap(); q.push_back(1).unwrap(); q.push_front(2).unwrap(); q.push_front(3).unwrap(); assert_eq!(q.len(), 4); // deque contains: 3 2 0 1 assert_eq!(q.pop_front(), Some(3)); assert_eq!(q.len(), 3); assert_eq!(q.pop_front(), Some(2)); assert_eq!(q.len(), 2); assert_eq!(q.pop_back(), Some(1)); assert_eq!(q.len(), 1); assert_eq!(q.pop_front(), Some(0)); assert_eq!(q.len(), 0); // deque is now empty assert_eq!(q.pop_front(), None); assert_eq!(q.pop_back(), None); assert_eq!(q.len(), 0); } #[test] fn as_slices() { let mut q: Deque = Deque::new(); assert_eq!(q.len(), 0); q.push_back(0).unwrap(); q.push_back(1).unwrap(); q.push_back(2).unwrap(); q.push_back(3).unwrap(); assert_eq!(q.as_slices(), (&[0, 1, 2, 3][..], &[][..])); q.pop_front().unwrap(); assert_eq!(q.as_slices(), (&[1, 2, 3][..], &[][..])); q.push_back(4).unwrap(); assert_eq!(q.as_slices(), (&[1, 2, 3][..], &[4][..])); } #[test] fn clear() { let mut q: Deque = Deque::new(); assert_eq!(q.len(), 0); q.push_back(0).unwrap(); q.push_back(1).unwrap(); q.push_back(2).unwrap(); q.push_back(3).unwrap(); assert_eq!(q.len(), 4); q.clear(); assert_eq!(q.len(), 0); q.push_back(0).unwrap(); assert_eq!(q.len(), 1); } } heapless-0.8.0/src/histbuf.rs000064400000000000000000000366361046102023000142540ustar 00000000000000use core::fmt; use core::mem::MaybeUninit; use core::ops::Deref; use core::ptr; use core::slice; /// A "history buffer", similar to a write-only ring buffer of fixed length. /// /// This buffer keeps a fixed number of elements. On write, the oldest element /// is overwritten. Thus, the buffer is useful to keep a history of values with /// some desired depth, and for example calculate a rolling average. /// /// # Examples /// ``` /// use heapless::HistoryBuffer; /// /// // Initialize a new buffer with 8 elements. /// let mut buf = HistoryBuffer::<_, 8>::new(); /// /// // Starts with no data /// assert_eq!(buf.recent(), None); /// /// buf.write(3); /// buf.write(5); /// buf.extend(&[4, 4]); /// /// // The most recent written element is a four. /// assert_eq!(buf.recent(), Some(&4)); /// /// // To access all elements in an unspecified order, use `as_slice()`. /// for el in buf.as_slice() { println!("{:?}", el); } /// /// // Now we can prepare an average of all values, which comes out to 4. /// let avg = buf.as_slice().iter().sum::() / buf.len(); /// assert_eq!(avg, 4); /// ``` pub struct HistoryBuffer { data: [MaybeUninit; N], write_at: usize, filled: bool, } impl HistoryBuffer { const INIT: MaybeUninit = MaybeUninit::uninit(); /// Constructs a new history buffer. /// /// The construction of a `HistoryBuffer` works in `const` contexts. /// /// # Examples /// /// ``` /// use heapless::HistoryBuffer; /// /// // Allocate a 16-element buffer on the stack /// let x: HistoryBuffer = HistoryBuffer::new(); /// assert_eq!(x.len(), 0); /// ``` #[inline] pub const fn new() -> Self { // Const assert crate::sealed::greater_than_0::(); Self { data: [Self::INIT; N], write_at: 0, filled: false, } } /// Clears the buffer, replacing every element with the default value of /// type `T`. pub fn clear(&mut self) { *self = Self::new(); } } impl HistoryBuffer where T: Copy + Clone, { /// Constructs a new history buffer, where every element is the given value. /// /// # Examples /// /// ``` /// use heapless::HistoryBuffer; /// /// // Allocate a 16-element buffer on the stack /// let mut x: HistoryBuffer = HistoryBuffer::new_with(4); /// // All elements are four /// assert_eq!(x.as_slice(), [4; 16]); /// ``` #[inline] pub fn new_with(t: T) -> Self { Self { data: [MaybeUninit::new(t); N], write_at: 0, filled: true, } } /// Clears the buffer, replacing every element with the given value. pub fn clear_with(&mut self, t: T) { *self = Self::new_with(t); } } impl HistoryBuffer { /// Returns the current fill level of the buffer. #[inline] pub fn len(&self) -> usize { if self.filled { N } else { self.write_at } } /// Returns the capacity of the buffer, which is the length of the /// underlying backing array. #[inline] pub fn capacity(&self) -> usize { N } /// Writes an element to the buffer, overwriting the oldest value. pub fn write(&mut self, t: T) { if self.filled { // Drop the old before we overwrite it. unsafe { ptr::drop_in_place(self.data[self.write_at].as_mut_ptr()) } } self.data[self.write_at] = MaybeUninit::new(t); self.write_at += 1; if self.write_at == self.capacity() { self.write_at = 0; self.filled = true; } } /// Clones and writes all elements in a slice to the buffer. /// /// If the slice is longer than the buffer, only the last `self.len()` /// elements will actually be stored. pub fn extend_from_slice(&mut self, other: &[T]) where T: Clone, { for item in other { self.write(item.clone()); } } /// Returns a reference to the most recently written value. /// /// # Examples /// /// ``` /// use heapless::HistoryBuffer; /// /// let mut x: HistoryBuffer = HistoryBuffer::new(); /// x.write(4); /// x.write(10); /// assert_eq!(x.recent(), Some(&10)); /// ``` pub fn recent(&self) -> Option<&T> { if self.write_at == 0 { if self.filled { Some(unsafe { &*self.data[self.capacity() - 1].as_ptr() }) } else { None } } else { Some(unsafe { &*self.data[self.write_at - 1].as_ptr() }) } } /// Returns the array slice backing the buffer, without keeping track /// of the write position. Therefore, the element order is unspecified. pub fn as_slice(&self) -> &[T] { unsafe { slice::from_raw_parts(self.data.as_ptr() as *const _, self.len()) } } /// Returns a pair of slices which contain, in order, the contents of the buffer. /// /// # Examples /// /// ``` /// use heapless::HistoryBuffer; /// /// let mut buffer: HistoryBuffer = HistoryBuffer::new(); /// buffer.extend([0, 0, 0]); /// buffer.extend([1, 2, 3, 4, 5, 6]); /// assert_eq!(buffer.as_slices(), (&[1, 2, 3][..], &[4, 5, 6][..])); /// ``` pub fn as_slices(&self) -> (&[T], &[T]) { let buffer = self.as_slice(); if !self.filled { (buffer, &[]) } else { (&buffer[self.write_at..], &buffer[..self.write_at]) } } /// Returns an iterator for iterating over the buffer from oldest to newest. /// /// # Examples /// /// ``` /// use heapless::HistoryBuffer; /// /// let mut buffer: HistoryBuffer = HistoryBuffer::new(); /// buffer.extend([0, 0, 0, 1, 2, 3, 4, 5, 6]); /// let expected = [1, 2, 3, 4, 5, 6]; /// for (x, y) in buffer.oldest_ordered().zip(expected.iter()) { /// assert_eq!(x, y) /// } /// /// ``` pub fn oldest_ordered<'a>(&'a self) -> OldestOrdered<'a, T, N> { if self.filled { OldestOrdered { buf: self, cur: self.write_at, wrapped: false, } } else { // special case: act like we wrapped already to handle empty buffer. OldestOrdered { buf: self, cur: 0, wrapped: true, } } } } impl Extend for HistoryBuffer { fn extend(&mut self, iter: I) where I: IntoIterator, { for item in iter.into_iter() { self.write(item); } } } impl<'a, T, const N: usize> Extend<&'a T> for HistoryBuffer where T: 'a + Clone, { fn extend(&mut self, iter: I) where I: IntoIterator, { self.extend(iter.into_iter().cloned()) } } impl Clone for HistoryBuffer where T: Clone, { fn clone(&self) -> Self { let mut ret = Self::new(); for (new, old) in ret.data.iter_mut().zip(self.as_slice()) { new.write(old.clone()); } ret.filled = self.filled; ret.write_at = self.write_at; ret } } impl Drop for HistoryBuffer { fn drop(&mut self) { unsafe { ptr::drop_in_place(ptr::slice_from_raw_parts_mut( self.data.as_mut_ptr() as *mut T, self.len(), )) } } } impl Deref for HistoryBuffer { type Target = [T]; fn deref(&self) -> &[T] { self.as_slice() } } impl AsRef<[T]> for HistoryBuffer { #[inline] fn as_ref(&self) -> &[T] { self } } impl fmt::Debug for HistoryBuffer where T: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { <[T] as fmt::Debug>::fmt(self, f) } } impl Default for HistoryBuffer { fn default() -> Self { Self::new() } } impl PartialEq for HistoryBuffer where T: PartialEq, { fn eq(&self, other: &Self) -> bool { self.oldest_ordered().eq(other.oldest_ordered()) } } /// An iterator on the underlying buffer ordered from oldest data to newest #[derive(Clone)] pub struct OldestOrdered<'a, T, const N: usize> { buf: &'a HistoryBuffer, cur: usize, wrapped: bool, } impl<'a, T, const N: usize> Iterator for OldestOrdered<'a, T, N> { type Item = &'a T; fn next(&mut self) -> Option<&'a T> { if self.cur == self.buf.len() && self.buf.filled { // roll-over self.cur = 0; self.wrapped = true; } if self.cur == self.buf.write_at && self.wrapped { return None; } let item = &self.buf[self.cur]; self.cur += 1; Some(item) } } #[cfg(test)] mod tests { use crate::HistoryBuffer; use core::fmt::Debug; use core::sync::atomic::{AtomicUsize, Ordering}; #[test] fn new() { let x: HistoryBuffer = HistoryBuffer::new_with(1); assert_eq!(x.len(), 4); assert_eq!(x.as_slice(), [1; 4]); assert_eq!(*x, [1; 4]); let x: HistoryBuffer = HistoryBuffer::new(); assert_eq!(x.as_slice(), []); } #[test] fn write() { let mut x: HistoryBuffer = HistoryBuffer::new(); x.write(1); x.write(4); assert_eq!(x.as_slice(), [1, 4]); x.write(5); x.write(6); x.write(10); assert_eq!(x.as_slice(), [10, 4, 5, 6]); x.extend([11, 12].iter()); assert_eq!(x.as_slice(), [10, 11, 12, 6]); } #[test] fn clear() { let mut x: HistoryBuffer = HistoryBuffer::new_with(1); x.clear(); assert_eq!(x.as_slice(), []); let mut x: HistoryBuffer = HistoryBuffer::new(); x.clear_with(1); assert_eq!(x.as_slice(), [1; 4]); } #[test] fn clone() { let mut x: HistoryBuffer = HistoryBuffer::new(); for i in 0..10 { assert_eq!(x.as_slice(), x.clone().as_slice()); x.write(i); } // Records number of clones locally and globally. static GLOBAL: AtomicUsize = AtomicUsize::new(0); #[derive(Default, PartialEq, Debug)] struct InstrumentedClone(usize); impl Clone for InstrumentedClone { fn clone(&self) -> Self { GLOBAL.fetch_add(1, Ordering::Relaxed); Self(self.0 + 1) } } let mut y: HistoryBuffer = HistoryBuffer::new(); let _ = y.clone(); assert_eq!(GLOBAL.load(Ordering::Relaxed), 0); y.write(InstrumentedClone(0)); assert_eq!(GLOBAL.load(Ordering::Relaxed), 0); assert_eq!(y.clone().as_slice(), [InstrumentedClone(1)]); assert_eq!(GLOBAL.load(Ordering::Relaxed), 1); y.write(InstrumentedClone(0)); assert_eq!(GLOBAL.load(Ordering::Relaxed), 1); assert_eq!( y.clone().as_slice(), [InstrumentedClone(1), InstrumentedClone(1)] ); assert_eq!(GLOBAL.load(Ordering::Relaxed), 3); assert_eq!( y.clone().clone().clone().as_slice(), [InstrumentedClone(3), InstrumentedClone(3)] ); assert_eq!(GLOBAL.load(Ordering::Relaxed), 9); } #[test] fn recent() { let mut x: HistoryBuffer = HistoryBuffer::new(); assert_eq!(x.recent(), None); x.write(1); x.write(4); assert_eq!(x.recent(), Some(&4)); x.write(5); x.write(6); x.write(10); assert_eq!(x.recent(), Some(&10)); } #[test] fn as_slice() { let mut x: HistoryBuffer = HistoryBuffer::new(); assert_eq!(x.as_slice(), []); x.extend([1, 2, 3, 4, 5].iter()); assert_eq!(x.as_slice(), [5, 2, 3, 4]); } /// Test whether .as_slices() behaves as expected. #[test] fn as_slices() { let mut buffer: HistoryBuffer = HistoryBuffer::new(); let mut extend_then_assert = |extend: &[u8], assert: (&[u8], &[u8])| { buffer.extend(extend); assert_eq!(buffer.as_slices(), assert); }; extend_then_assert(b"a", (b"a", b"")); extend_then_assert(b"bcd", (b"abcd", b"")); extend_then_assert(b"efg", (b"d", b"efg")); extend_then_assert(b"h", (b"efgh", b"")); extend_then_assert(b"123456", (b"34", b"56")); } /// Test whether .as_slices() and .oldest_ordered() produce elements in the same order. #[test] fn as_slices_equals_ordered() { let mut buffer: HistoryBuffer = HistoryBuffer::new(); for n in 0..20 { buffer.write(n); let (head, tail) = buffer.as_slices(); assert_eq_iter( [head, tail].iter().copied().flatten(), buffer.oldest_ordered(), ) } } #[test] fn ordered() { // test on an empty buffer let buffer: HistoryBuffer = HistoryBuffer::new(); let mut iter = buffer.oldest_ordered(); assert_eq!(iter.next(), None); assert_eq!(iter.next(), None); // test on a un-filled buffer let mut buffer: HistoryBuffer = HistoryBuffer::new(); buffer.extend([1, 2, 3]); assert_eq!(buffer.len(), 3); assert_eq_iter(buffer.oldest_ordered(), &[1, 2, 3]); // test on a filled buffer let mut buffer: HistoryBuffer = HistoryBuffer::new(); buffer.extend([0, 0, 0, 1, 2, 3, 4, 5, 6]); assert_eq!(buffer.len(), 6); assert_eq_iter(buffer.oldest_ordered(), &[1, 2, 3, 4, 5, 6]); // comprehensive test all cases for n in 0..50 { const N: usize = 7; let mut buffer: HistoryBuffer = HistoryBuffer::new(); buffer.extend(0..n); assert_eq_iter( buffer.oldest_ordered().copied(), n.saturating_sub(N as u8)..n, ); } } /// Compares two iterators item by item, making sure they stop at the same time. fn assert_eq_iter( a: impl IntoIterator, b: impl IntoIterator, ) { let mut a = a.into_iter(); let mut b = b.into_iter(); let mut i = 0; loop { let a_item = a.next(); let b_item = b.next(); assert_eq!(a_item, b_item, "{}", i); i += 1; if b_item.is_none() { break; } } } #[test] fn partial_eq() { let mut x: HistoryBuffer = HistoryBuffer::new(); let mut y: HistoryBuffer = HistoryBuffer::new(); assert_eq!(x, y); x.write(1); assert_ne!(x, y); y.write(1); assert_eq!(x, y); for _ in 0..4 { x.write(2); assert_ne!(x, y); for i in 0..5 { x.write(i); y.write(i); } assert_eq!( x, y, "{:?} {:?}", x.iter().collect::>(), y.iter().collect::>() ); } } } heapless-0.8.0/src/indexmap.rs000064400000000000000000001303271046102023000144050ustar 00000000000000use core::{ borrow::Borrow, fmt, hash::{BuildHasher, Hash, Hasher as _}, iter::FromIterator, mem, num::NonZeroU32, ops, slice, }; use hash32::{BuildHasherDefault, FnvHasher}; use crate::Vec; /// A [`IndexMap`] using the default FNV hasher /// /// A list of all Methods and Traits available for `FnvIndexMap` can be found in /// the [`IndexMap`] documentation. /// /// # Examples /// ``` /// use heapless::FnvIndexMap; /// /// // A hash map with a capacity of 16 key-value pairs allocated on the stack /// let mut book_reviews = FnvIndexMap::<_, _, 16>::new(); /// /// // review some books. /// book_reviews.insert("Adventures of Huckleberry Finn", "My favorite book.").unwrap(); /// book_reviews.insert("Grimms' Fairy Tales", "Masterpiece.").unwrap(); /// book_reviews.insert("Pride and Prejudice", "Very enjoyable.").unwrap(); /// book_reviews.insert("The Adventures of Sherlock Holmes", "Eye lyked it alot.").unwrap(); /// /// // check for a specific one. /// if !book_reviews.contains_key("Les Misérables") { /// println!("We've got {} reviews, but Les Misérables ain't one.", /// book_reviews.len()); /// } /// /// // oops, this review has a lot of spelling mistakes, let's delete it. /// book_reviews.remove("The Adventures of Sherlock Holmes"); /// /// // look up the values associated with some keys. /// let to_find = ["Pride and Prejudice", "Alice's Adventure in Wonderland"]; /// for book in &to_find { /// match book_reviews.get(book) { /// Some(review) => println!("{}: {}", book, review), /// None => println!("{} is unreviewed.", book) /// } /// } /// /// // iterate over everything. /// for (book, review) in &book_reviews { /// println!("{}: \"{}\"", book, review); /// } /// ``` pub type FnvIndexMap = IndexMap, N>; #[derive(Clone, Copy, Eq, PartialEq)] struct HashValue(u16); impl HashValue { fn desired_pos(&self, mask: usize) -> usize { usize::from(self.0) & mask } fn probe_distance(&self, mask: usize, current: usize) -> usize { current.wrapping_sub(self.desired_pos(mask) as usize) & mask } } #[doc(hidden)] #[derive(Clone)] pub struct Bucket { hash: HashValue, key: K, value: V, } #[doc(hidden)] #[derive(Clone, Copy, PartialEq)] pub struct Pos { // compact representation of `{ hash_value: u16, index: u16 }` // To get the most from `NonZero` we store the *value minus 1*. This way `None::Option` // is equivalent to the very unlikely value of `{ hash_value: 0xffff, index: 0xffff }` instead // the more likely of `{ hash_value: 0x00, index: 0x00 }` nz: NonZeroU32, } impl Pos { fn new(index: usize, hash: HashValue) -> Self { Pos { nz: unsafe { NonZeroU32::new_unchecked( ((u32::from(hash.0) << 16) + index as u32).wrapping_add(1), ) }, } } fn hash(&self) -> HashValue { HashValue((self.nz.get().wrapping_sub(1) >> 16) as u16) } fn index(&self) -> usize { self.nz.get().wrapping_sub(1) as u16 as usize } } enum Insert { Success(Inserted), Full((K, V)), } struct Inserted { index: usize, old_value: Option, } macro_rules! probe_loop { ($probe_var: ident < $len: expr, $body: expr) => { loop { if $probe_var < $len { $body $probe_var += 1; } else { $probe_var = 0; } } } } struct CoreMap { entries: Vec, N>, indices: [Option; N], } impl CoreMap { const fn new() -> Self { const INIT: Option = None; CoreMap { entries: Vec::new(), indices: [INIT; N], } } } impl CoreMap where K: Eq + Hash, { fn capacity() -> usize { N } fn mask() -> usize { Self::capacity() - 1 } fn find(&self, hash: HashValue, query: &Q) -> Option<(usize, usize)> where K: Borrow, Q: ?Sized + Eq, { let mut probe = hash.desired_pos(Self::mask()); let mut dist = 0; probe_loop!(probe < self.indices.len(), { if let Some(pos) = self.indices[probe] { let entry_hash = pos.hash(); // NOTE(i) we use unchecked indexing below let i = pos.index(); debug_assert!(i < self.entries.len()); if dist > entry_hash.probe_distance(Self::mask(), probe) { // give up when probe distance is too long return None; } else if entry_hash == hash && unsafe { self.entries.get_unchecked(i).key.borrow() == query } { return Some((probe, i)); } } else { return None; } dist += 1; }); } fn insert(&mut self, hash: HashValue, key: K, value: V) -> Insert { let mut probe = hash.desired_pos(Self::mask()); let mut dist = 0; probe_loop!(probe < self.indices.len(), { let pos = &mut self.indices[probe]; if let Some(pos) = *pos { let entry_hash = pos.hash(); // NOTE(i) we use unchecked indexing below let i = pos.index(); debug_assert!(i < self.entries.len()); let their_dist = entry_hash.probe_distance(Self::mask(), probe); if their_dist < dist { if self.entries.is_full() { return Insert::Full((key, value)); } // robin hood: steal the spot if it's better for us let index = self.entries.len(); unsafe { self.entries.push_unchecked(Bucket { hash, key, value }) }; Self::insert_phase_2(&mut self.indices, probe, Pos::new(index, hash)); return Insert::Success(Inserted { index, old_value: None, }); } else if entry_hash == hash && unsafe { self.entries.get_unchecked(i).key == key } { return Insert::Success(Inserted { index: i, old_value: Some(mem::replace( unsafe { &mut self.entries.get_unchecked_mut(i).value }, value, )), }); } } else { if self.entries.is_full() { return Insert::Full((key, value)); } // empty bucket, insert here let index = self.entries.len(); *pos = Some(Pos::new(index, hash)); unsafe { self.entries.push_unchecked(Bucket { hash, key, value }) }; return Insert::Success(Inserted { index, old_value: None, }); } dist += 1; }); } // phase 2 is post-insert where we forward-shift `Pos` in the indices. fn insert_phase_2(indices: &mut [Option; N], mut probe: usize, mut old_pos: Pos) -> usize { probe_loop!(probe < indices.len(), { let pos = unsafe { indices.get_unchecked_mut(probe) }; let mut is_none = true; // work around lack of NLL if let Some(pos) = pos.as_mut() { old_pos = mem::replace(pos, old_pos); is_none = false; } if is_none { *pos = Some(old_pos); return probe; } }); } fn remove_found(&mut self, probe: usize, found: usize) -> (K, V) { // index `probe` and entry `found` is to be removed // use swap_remove, but then we need to update the index that points // to the other entry that has to move self.indices[probe] = None; let entry = unsafe { self.entries.swap_remove_unchecked(found) }; // correct index that points to the entry that had to swap places if let Some(entry) = self.entries.get(found) { // was not last element // examine new element in `found` and find it in indices let mut probe = entry.hash.desired_pos(Self::mask()); probe_loop!(probe < self.indices.len(), { if let Some(pos) = self.indices[probe] { if pos.index() >= self.entries.len() { // found it self.indices[probe] = Some(Pos::new(found, entry.hash)); break; } } }); } self.backward_shift_after_removal(probe); (entry.key, entry.value) } fn retain_in_order(&mut self, mut keep: F) where F: FnMut(&mut K, &mut V) -> bool, { const INIT: Option = None; self.entries .retain_mut(|entry| keep(&mut entry.key, &mut entry.value)); if self.entries.len() < self.indices.len() { for index in self.indices.iter_mut() { *index = INIT; } for (index, entry) in self.entries.iter().enumerate() { let mut probe = entry.hash.desired_pos(Self::mask()); let mut dist = 0; probe_loop!(probe < self.indices.len(), { let pos = &mut self.indices[probe]; if let Some(pos) = *pos { let entry_hash = pos.hash(); // robin hood: steal the spot if it's better for us let their_dist = entry_hash.probe_distance(Self::mask(), probe); if their_dist < dist { Self::insert_phase_2( &mut self.indices, probe, Pos::new(index, entry.hash), ); break; } } else { *pos = Some(Pos::new(index, entry.hash)); break; } dist += 1; }); } } } fn backward_shift_after_removal(&mut self, probe_at_remove: usize) { // backward shift deletion in self.indices // after probe, shift all non-ideally placed indices backward let mut last_probe = probe_at_remove; let mut probe = probe_at_remove + 1; probe_loop!(probe < self.indices.len(), { if let Some(pos) = self.indices[probe] { let entry_hash = pos.hash(); if entry_hash.probe_distance(Self::mask(), probe) > 0 { unsafe { *self.indices.get_unchecked_mut(last_probe) = self.indices[probe] } self.indices[probe] = None; } else { break; } } else { break; } last_probe = probe; }); } } impl Clone for CoreMap where K: Clone, V: Clone, { fn clone(&self) -> Self { Self { entries: self.entries.clone(), indices: self.indices.clone(), } } } /// A view into an entry in the map pub enum Entry<'a, K, V, const N: usize> { /// The entry corresponding to the key `K` exists in the map Occupied(OccupiedEntry<'a, K, V, N>), /// The entry corresponding to the key `K` does not exist in the map Vacant(VacantEntry<'a, K, V, N>), } /// An occupied entry which can be manipulated pub struct OccupiedEntry<'a, K, V, const N: usize> { key: K, probe: usize, pos: usize, core: &'a mut CoreMap, } impl<'a, K, V, const N: usize> OccupiedEntry<'a, K, V, N> where K: Eq + Hash, { /// Gets a reference to the key that this entity corresponds to pub fn key(&self) -> &K { &self.key } /// Removes this entry from the map and yields its corresponding key and value pub fn remove_entry(self) -> (K, V) { self.core.remove_found(self.probe, self.pos) } /// Gets a reference to the value associated with this entry pub fn get(&self) -> &V { // SAFETY: Already checked existence at instantiation and the only mutable reference // to the map is internally held. unsafe { &self.core.entries.get_unchecked(self.pos).value } } /// Gets a mutable reference to the value associated with this entry pub fn get_mut(&mut self) -> &mut V { // SAFETY: Already checked existence at instantiation and the only mutable reference // to the map is internally held. unsafe { &mut self.core.entries.get_unchecked_mut(self.pos).value } } /// Consumes this entry and yields a reference to the underlying value pub fn into_mut(self) -> &'a mut V { // SAFETY: Already checked existence at instantiation and the only mutable reference // to the map is internally held. unsafe { &mut self.core.entries.get_unchecked_mut(self.pos).value } } /// Overwrites the underlying map's value with this entry's value pub fn insert(self, value: V) -> V { // SAFETY: Already checked existence at instantiation and the only mutable reference // to the map is internally held. unsafe { mem::replace( &mut self.core.entries.get_unchecked_mut(self.pos).value, value, ) } } /// Removes this entry from the map and yields its value pub fn remove(self) -> V { self.remove_entry().1 } } /// A view into an empty slot in the underlying map pub struct VacantEntry<'a, K, V, const N: usize> { key: K, hash_val: HashValue, core: &'a mut CoreMap, } impl<'a, K, V, const N: usize> VacantEntry<'a, K, V, N> where K: Eq + Hash, { /// Get the key associated with this entry pub fn key(&self) -> &K { &self.key } /// Consumes this entry to yield to key associated with it pub fn into_key(self) -> K { self.key } /// Inserts this entry into to underlying map, yields a mutable reference to the inserted value. /// If the map is at capacity the value is returned instead. pub fn insert(self, value: V) -> Result<&'a mut V, V> { if self.core.entries.is_full() { Err(value) } else { match self.core.insert(self.hash_val, self.key, value) { Insert::Success(inserted) => { unsafe { // SAFETY: Already checked existence at instantiation and the only mutable reference // to the map is internally held. Ok(&mut (*self.core.entries.as_mut_ptr().add(inserted.index)).value) } } Insert::Full((_, v)) => Err(v), } } } } /// Fixed capacity [`IndexMap`](https://docs.rs/indexmap/2/indexmap/map/struct.IndexMap.html) /// /// Note that you cannot use `IndexMap` directly, since it is generic around the hashing algorithm /// in use. Pick a concrete instantiation like [`FnvIndexMap`] instead /// or create your own. /// /// Note that the capacity of the `IndexMap` must be a power of 2. /// /// # Examples /// /// Since `IndexMap` cannot be used directly, we're using its `FnvIndexMap` instantiation /// for this example. /// /// ``` /// use heapless::FnvIndexMap; /// /// // A hash map with a capacity of 16 key-value pairs allocated on the stack /// let mut book_reviews = FnvIndexMap::<_, _, 16>::new(); /// /// // review some books. /// book_reviews.insert("Adventures of Huckleberry Finn", "My favorite book.").unwrap(); /// book_reviews.insert("Grimms' Fairy Tales", "Masterpiece.").unwrap(); /// book_reviews.insert("Pride and Prejudice", "Very enjoyable.").unwrap(); /// book_reviews.insert("The Adventures of Sherlock Holmes", "Eye lyked it alot.").unwrap(); /// /// // check for a specific one. /// if !book_reviews.contains_key("Les Misérables") { /// println!("We've got {} reviews, but Les Misérables ain't one.", /// book_reviews.len()); /// } /// /// // oops, this review has a lot of spelling mistakes, let's delete it. /// book_reviews.remove("The Adventures of Sherlock Holmes"); /// /// // look up the values associated with some keys. /// let to_find = ["Pride and Prejudice", "Alice's Adventure in Wonderland"]; /// for book in &to_find { /// match book_reviews.get(book) { /// Some(review) => println!("{}: {}", book, review), /// None => println!("{} is unreviewed.", book) /// } /// } /// /// // iterate over everything. /// for (book, review) in &book_reviews { /// println!("{}: \"{}\"", book, review); /// } /// ``` pub struct IndexMap { core: CoreMap, build_hasher: S, } impl IndexMap, N> { /// Creates an empty `IndexMap`. pub const fn new() -> Self { // Const assert crate::sealed::greater_than_1::(); crate::sealed::power_of_two::(); IndexMap { build_hasher: BuildHasherDefault::new(), core: CoreMap::new(), } } } impl IndexMap { /// Returns the number of elements the map can hold pub fn capacity(&self) -> usize { N } /// Return an iterator over the keys of the map, in insertion order /// /// ``` /// use heapless::FnvIndexMap; /// /// let mut map = FnvIndexMap::<_, _, 16>::new(); /// map.insert("a", 1).unwrap(); /// map.insert("b", 2).unwrap(); /// map.insert("c", 3).unwrap(); /// /// for key in map.keys() { /// println!("{}", key); /// } /// ``` pub fn keys(&self) -> Keys<'_, K, V> { Keys { iter: self.core.entries.iter(), } } /// Return an iterator over the values of the map, in insertion order /// /// ``` /// use heapless::FnvIndexMap; /// /// let mut map = FnvIndexMap::<_, _, 16>::new(); /// map.insert("a", 1).unwrap(); /// map.insert("b", 2).unwrap(); /// map.insert("c", 3).unwrap(); /// /// for val in map.values() { /// println!("{}", val); /// } /// ``` pub fn values(&self) -> Values<'_, K, V> { Values { iter: self.core.entries.iter(), } } /// Return an iterator over mutable references to the the values of the map, in insertion order /// /// ``` /// use heapless::FnvIndexMap; /// /// let mut map = FnvIndexMap::<_, _, 16>::new(); /// map.insert("a", 1).unwrap(); /// map.insert("b", 2).unwrap(); /// map.insert("c", 3).unwrap(); /// /// for val in map.values_mut() { /// *val += 10; /// } /// /// for val in map.values() { /// println!("{}", val); /// } /// ``` pub fn values_mut(&mut self) -> ValuesMut<'_, K, V> { ValuesMut { iter: self.core.entries.iter_mut(), } } /// Return an iterator over the key-value pairs of the map, in insertion order /// /// ``` /// use heapless::FnvIndexMap; /// /// let mut map = FnvIndexMap::<_, _, 16>::new(); /// map.insert("a", 1).unwrap(); /// map.insert("b", 2).unwrap(); /// map.insert("c", 3).unwrap(); /// /// for (key, val) in map.iter() { /// println!("key: {} val: {}", key, val); /// } /// ``` pub fn iter(&self) -> Iter<'_, K, V> { Iter { iter: self.core.entries.iter(), } } /// Return an iterator over the key-value pairs of the map, in insertion order /// /// ``` /// use heapless::FnvIndexMap; /// /// let mut map = FnvIndexMap::<_, _, 16>::new(); /// map.insert("a", 1).unwrap(); /// map.insert("b", 2).unwrap(); /// map.insert("c", 3).unwrap(); /// /// for (_, val) in map.iter_mut() { /// *val = 2; /// } /// /// for (key, val) in &map { /// println!("key: {} val: {}", key, val); /// } /// ``` pub fn iter_mut(&mut self) -> IterMut<'_, K, V> { IterMut { iter: self.core.entries.iter_mut(), } } /// Get the first key-value pair /// /// Computes in **O(1)** time pub fn first(&self) -> Option<(&K, &V)> { self.core .entries .first() .map(|bucket| (&bucket.key, &bucket.value)) } /// Get the first key-value pair, with mutable access to the value /// /// Computes in **O(1)** time pub fn first_mut(&mut self) -> Option<(&K, &mut V)> { self.core .entries .first_mut() .map(|bucket| (&bucket.key, &mut bucket.value)) } /// Get the last key-value pair /// /// Computes in **O(1)** time pub fn last(&self) -> Option<(&K, &V)> { self.core .entries .last() .map(|bucket| (&bucket.key, &bucket.value)) } /// Get the last key-value pair, with mutable access to the value /// /// Computes in **O(1)** time pub fn last_mut(&mut self) -> Option<(&K, &mut V)> { self.core .entries .last_mut() .map(|bucket| (&bucket.key, &mut bucket.value)) } /// Return the number of key-value pairs in the map. /// /// Computes in **O(1)** time. /// /// ``` /// use heapless::FnvIndexMap; /// /// let mut a = FnvIndexMap::<_, _, 16>::new(); /// assert_eq!(a.len(), 0); /// a.insert(1, "a").unwrap(); /// assert_eq!(a.len(), 1); /// ``` pub fn len(&self) -> usize { self.core.entries.len() } /// Returns true if the map contains no elements. /// /// Computes in **O(1)** time. /// /// ``` /// use heapless::FnvIndexMap; /// /// let mut a = FnvIndexMap::<_, _, 16>::new(); /// assert!(a.is_empty()); /// a.insert(1, "a"); /// assert!(!a.is_empty()); /// ``` pub fn is_empty(&self) -> bool { self.len() == 0 } /// Remove all key-value pairs in the map, while preserving its capacity. /// /// Computes in **O(n)** time. /// /// ``` /// use heapless::FnvIndexMap; /// /// let mut a = FnvIndexMap::<_, _, 16>::new(); /// a.insert(1, "a"); /// a.clear(); /// assert!(a.is_empty()); /// ``` pub fn clear(&mut self) { self.core.entries.clear(); for pos in self.core.indices.iter_mut() { *pos = None; } } } impl IndexMap where K: Eq + Hash, S: BuildHasher, { /* Public API */ /// Returns an entry for the corresponding key /// ``` /// use heapless::FnvIndexMap; /// use heapless::Entry; /// let mut map = FnvIndexMap::<_, _, 16>::new(); /// if let Entry::Vacant(v) = map.entry("a") { /// v.insert(1).unwrap(); /// } /// if let Entry::Occupied(mut o) = map.entry("a") { /// println!("found {}", *o.get()); // Prints 1 /// o.insert(2); /// } /// // Prints 2 /// println!("val: {}", *map.get("a").unwrap()); /// ``` pub fn entry(&mut self, key: K) -> Entry<'_, K, V, N> { let hash_val = hash_with(&key, &self.build_hasher); if let Some((probe, pos)) = self.core.find(hash_val, &key) { Entry::Occupied(OccupiedEntry { key, probe, pos, core: &mut self.core, }) } else { Entry::Vacant(VacantEntry { key, hash_val, core: &mut self.core, }) } } /// Returns a reference to the value corresponding to the key. /// /// The key may be any borrowed form of the map's key type, but `Hash` and `Eq` on the borrowed /// form *must* match those for the key type. /// /// Computes in **O(1)** time (average). /// /// ``` /// use heapless::FnvIndexMap; /// /// let mut map = FnvIndexMap::<_, _, 16>::new(); /// map.insert(1, "a").unwrap(); /// assert_eq!(map.get(&1), Some(&"a")); /// assert_eq!(map.get(&2), None); /// ``` pub fn get(&self, key: &Q) -> Option<&V> where K: Borrow, Q: ?Sized + Hash + Eq, { self.find(key) .map(|(_, found)| unsafe { &self.core.entries.get_unchecked(found).value }) } /// Returns true if the map contains a value for the specified key. /// /// The key may be any borrowed form of the map's key type, but `Hash` and `Eq` on the borrowed /// form *must* match those for the key type. /// /// Computes in **O(1)** time (average). /// /// # Examples /// /// ``` /// use heapless::FnvIndexMap; /// /// let mut map = FnvIndexMap::<_, _, 8>::new(); /// map.insert(1, "a").unwrap(); /// assert_eq!(map.contains_key(&1), true); /// assert_eq!(map.contains_key(&2), false); /// ``` pub fn contains_key(&self, key: &Q) -> bool where K: Borrow, Q: ?Sized + Eq + Hash, { self.find(key).is_some() } /// Returns a mutable reference to the value corresponding to the key. /// /// The key may be any borrowed form of the map's key type, but `Hash` and `Eq` on the borrowed /// form *must* match those for the key type. /// /// Computes in **O(1)** time (average). /// /// # Examples /// /// ``` /// use heapless::FnvIndexMap; /// /// let mut map = FnvIndexMap::<_, _, 8>::new(); /// map.insert(1, "a").unwrap(); /// if let Some(x) = map.get_mut(&1) { /// *x = "b"; /// } /// assert_eq!(map[&1], "b"); /// ``` pub fn get_mut<'v, Q>(&'v mut self, key: &Q) -> Option<&'v mut V> where K: Borrow, Q: ?Sized + Hash + Eq, { if let Some((_, found)) = self.find(key) { Some(unsafe { &mut self.core.entries.get_unchecked_mut(found).value }) } else { None } } /// Inserts a key-value pair into the map. /// /// If an equivalent key already exists in the map: the key remains and retains in its place in /// the order, its corresponding value is updated with `value` and the older value is returned /// inside `Some(_)`. /// /// If no equivalent key existed in the map: the new key-value pair is inserted, last in order, /// and `None` is returned. /// /// Computes in **O(1)** time (average). /// /// See also entry if you you want to insert or modify or if you need to get the index of the /// corresponding key-value pair. /// /// # Examples /// /// ``` /// use heapless::FnvIndexMap; /// /// let mut map = FnvIndexMap::<_, _, 8>::new(); /// assert_eq!(map.insert(37, "a"), Ok(None)); /// assert_eq!(map.is_empty(), false); /// /// map.insert(37, "b"); /// assert_eq!(map.insert(37, "c"), Ok(Some("b"))); /// assert_eq!(map[&37], "c"); /// ``` pub fn insert(&mut self, key: K, value: V) -> Result, (K, V)> { let hash = hash_with(&key, &self.build_hasher); match self.core.insert(hash, key, value) { Insert::Success(inserted) => Ok(inserted.old_value), Insert::Full((k, v)) => Err((k, v)), } } /// Same as [`swap_remove`](Self::swap_remove) /// /// Computes in **O(1)** time (average). /// /// # Examples /// /// ``` /// use heapless::FnvIndexMap; /// /// let mut map = FnvIndexMap::<_, _, 8>::new(); /// map.insert(1, "a").unwrap(); /// assert_eq!(map.remove(&1), Some("a")); /// assert_eq!(map.remove(&1), None); /// ``` pub fn remove(&mut self, key: &Q) -> Option where K: Borrow, Q: ?Sized + Hash + Eq, { self.swap_remove(key) } /// Remove the key-value pair equivalent to `key` and return its value. /// /// Like `Vec::swap_remove`, the pair is removed by swapping it with the last element of the map /// and popping it off. **This perturbs the postion of what used to be the last element!** /// /// Return `None` if `key` is not in map. /// /// Computes in **O(1)** time (average). pub fn swap_remove(&mut self, key: &Q) -> Option where K: Borrow, Q: ?Sized + Hash + Eq, { self.find(key) .map(|(probe, found)| self.core.remove_found(probe, found).1) } /// Retains only the elements specified by the predicate. /// /// In other words, remove all pairs `(k, v)` for which `f(&k, &mut v)` returns `false`. pub fn retain(&mut self, mut f: F) where F: FnMut(&K, &mut V) -> bool, { self.core.retain_in_order(move |k, v| f(k, v)); } /* Private API */ /// Return probe (indices) and position (entries) fn find(&self, key: &Q) -> Option<(usize, usize)> where K: Borrow, Q: ?Sized + Hash + Eq, { if self.len() == 0 { return None; } let h = hash_with(key, &self.build_hasher); self.core.find(h, key) } } impl<'a, K, Q, V, S, const N: usize> ops::Index<&'a Q> for IndexMap where K: Eq + Hash + Borrow, Q: ?Sized + Eq + Hash, S: BuildHasher, { type Output = V; fn index(&self, key: &Q) -> &V { self.get(key).expect("key not found") } } impl<'a, K, Q, V, S, const N: usize> ops::IndexMut<&'a Q> for IndexMap where K: Eq + Hash + Borrow, Q: ?Sized + Eq + Hash, S: BuildHasher, { fn index_mut(&mut self, key: &Q) -> &mut V { self.get_mut(key).expect("key not found") } } impl Clone for IndexMap where K: Clone, V: Clone, S: Clone, { fn clone(&self) -> Self { Self { core: self.core.clone(), build_hasher: self.build_hasher.clone(), } } } impl fmt::Debug for IndexMap where K: fmt::Debug, V: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_map().entries(self.iter()).finish() } } impl Default for IndexMap where S: Default, { fn default() -> Self { // Const assert crate::sealed::greater_than_1::(); crate::sealed::power_of_two::(); IndexMap { build_hasher: <_>::default(), core: CoreMap::new(), } } } impl PartialEq> for IndexMap where K: Eq + Hash, V: Eq, S: BuildHasher, S2: BuildHasher, { fn eq(&self, other: &IndexMap) -> bool { self.len() == other.len() && self .iter() .all(|(key, value)| other.get(key).map_or(false, |v| *value == *v)) } } impl Eq for IndexMap where K: Eq + Hash, V: Eq, S: BuildHasher, { } impl Extend<(K, V)> for IndexMap where K: Eq + Hash, S: BuildHasher, { fn extend(&mut self, iterable: I) where I: IntoIterator, { for (k, v) in iterable { self.insert(k, v).ok().unwrap(); } } } impl<'a, K, V, S, const N: usize> Extend<(&'a K, &'a V)> for IndexMap where K: Eq + Hash + Copy, V: Copy, S: BuildHasher, { fn extend(&mut self, iterable: I) where I: IntoIterator, { self.extend(iterable.into_iter().map(|(&key, &value)| (key, value))) } } impl FromIterator<(K, V)> for IndexMap where K: Eq + Hash, S: BuildHasher + Default, { fn from_iter(iterable: I) -> Self where I: IntoIterator, { let mut map = IndexMap::default(); map.extend(iterable); map } } #[derive(Clone)] pub struct IntoIter { entries: Vec, N>, } impl Iterator for IntoIter { type Item = (K, V); fn next(&mut self) -> Option { self.entries.pop().map(|bucket| (bucket.key, bucket.value)) } } impl IntoIterator for IndexMap { type Item = (K, V); type IntoIter = IntoIter; fn into_iter(self) -> Self::IntoIter { IntoIter { entries: self.core.entries, } } } impl<'a, K, V, S, const N: usize> IntoIterator for &'a IndexMap { type Item = (&'a K, &'a V); type IntoIter = Iter<'a, K, V>; fn into_iter(self) -> Self::IntoIter { self.iter() } } impl<'a, K, V, S, const N: usize> IntoIterator for &'a mut IndexMap { type Item = (&'a K, &'a mut V); type IntoIter = IterMut<'a, K, V>; fn into_iter(self) -> Self::IntoIter { self.iter_mut() } } /// An iterator over the items of a [`IndexMap`]. /// /// This `struct` is created by the [`iter`](IndexMap::iter) method on [`IndexMap`]. See its /// documentation for more. pub struct Iter<'a, K, V> { iter: slice::Iter<'a, Bucket>, } impl<'a, K, V> Iterator for Iter<'a, K, V> { type Item = (&'a K, &'a V); fn next(&mut self) -> Option { self.iter.next().map(|bucket| (&bucket.key, &bucket.value)) } } impl<'a, K, V> Clone for Iter<'a, K, V> { fn clone(&self) -> Self { Self { iter: self.iter.clone(), } } } /// A mutable iterator over the items of a [`IndexMap`]. /// /// This `struct` is created by the [`iter_mut`](IndexMap::iter_mut) method on [`IndexMap`]. See its /// documentation for more. pub struct IterMut<'a, K, V> { iter: slice::IterMut<'a, Bucket>, } impl<'a, K, V> Iterator for IterMut<'a, K, V> { type Item = (&'a K, &'a mut V); fn next(&mut self) -> Option { self.iter .next() .map(|bucket| (&bucket.key, &mut bucket.value)) } } /// An iterator over the keys of a [`IndexMap`]. /// /// This `struct` is created by the [`keys`](IndexMap::keys) method on [`IndexMap`]. See its /// documentation for more. pub struct Keys<'a, K, V> { iter: slice::Iter<'a, Bucket>, } impl<'a, K, V> Iterator for Keys<'a, K, V> { type Item = &'a K; fn next(&mut self) -> Option { self.iter.next().map(|bucket| &bucket.key) } } /// An iterator over the values of a [`IndexMap`]. /// /// This `struct` is created by the [`values`](IndexMap::values) method on [`IndexMap`]. See its /// documentation for more. pub struct Values<'a, K, V> { iter: slice::Iter<'a, Bucket>, } impl<'a, K, V> Iterator for Values<'a, K, V> { type Item = &'a V; fn next(&mut self) -> Option { self.iter.next().map(|bucket| &bucket.value) } } /// A mutable iterator over the values of a [`IndexMap`]. /// /// This `struct` is created by the [`values_mut`](IndexMap::values_mut) method on [`IndexMap`]. See its /// documentation for more. pub struct ValuesMut<'a, K, V> { iter: slice::IterMut<'a, Bucket>, } impl<'a, K, V> Iterator for ValuesMut<'a, K, V> { type Item = &'a mut V; fn next(&mut self) -> Option { self.iter.next().map(|bucket| &mut bucket.value) } } fn hash_with(key: &K, build_hasher: &S) -> HashValue where K: ?Sized + Hash, S: BuildHasher, { let mut h = build_hasher.build_hasher(); key.hash(&mut h); HashValue(h.finish() as u16) } #[cfg(test)] mod tests { use crate::{indexmap::Entry, FnvIndexMap}; use core::mem; #[test] fn size() { const CAP: usize = 4; assert_eq!( mem::size_of::>(), CAP * mem::size_of::() + // indices CAP * (mem::size_of::() + // key mem::size_of::() + // value mem::size_of::() // hash ) + // buckets mem::size_of::() // entries.length ) } #[test] fn partial_eq() { { let mut a: FnvIndexMap<_, _, 4> = FnvIndexMap::new(); a.insert("k1", "v1").unwrap(); let mut b: FnvIndexMap<_, _, 4> = FnvIndexMap::new(); b.insert("k1", "v1").unwrap(); assert!(a == b); b.insert("k2", "v2").unwrap(); assert!(a != b); } { let mut a: FnvIndexMap<_, _, 4> = FnvIndexMap::new(); a.insert("k1", "v1").unwrap(); a.insert("k2", "v2").unwrap(); let mut b: FnvIndexMap<_, _, 4> = FnvIndexMap::new(); b.insert("k2", "v2").unwrap(); b.insert("k1", "v1").unwrap(); assert!(a == b); } } #[test] fn into_iter() { let mut src: FnvIndexMap<_, _, 4> = FnvIndexMap::new(); src.insert("k1", "v1").unwrap(); src.insert("k2", "v2").unwrap(); src.insert("k3", "v3").unwrap(); src.insert("k4", "v4").unwrap(); let clone = src.clone(); for (k, v) in clone.into_iter() { assert_eq!(v, *src.get(k).unwrap()); } } #[test] fn insert_replaces_on_full_map() { let mut a: FnvIndexMap<_, _, 2> = FnvIndexMap::new(); a.insert("k1", "v1").unwrap(); a.insert("k2", "v2").unwrap(); a.insert("k1", "v2").unwrap(); assert_eq!(a.get("k1"), a.get("k2")); } // tests that use this constant take too long to run under miri, specially on CI, with a map of // this size so make the map smaller when using miri #[cfg(not(miri))] const MAP_SLOTS: usize = 4096; #[cfg(miri)] const MAP_SLOTS: usize = 64; fn almost_filled_map() -> FnvIndexMap { let mut almost_filled = FnvIndexMap::new(); for i in 1..MAP_SLOTS { almost_filled.insert(i, i).unwrap(); } almost_filled } #[test] fn entry_find() { let key = 0; let value = 0; let mut src = almost_filled_map(); let entry = src.entry(key); match entry { Entry::Occupied(_) => { panic!("Found entry without inserting"); } Entry::Vacant(v) => { assert_eq!(&key, v.key()); assert_eq!(key, v.into_key()); } } src.insert(key, value).unwrap(); let entry = src.entry(key); match entry { Entry::Occupied(mut o) => { assert_eq!(&key, o.key()); assert_eq!(&value, o.get()); assert_eq!(&value, o.get_mut()); assert_eq!(&value, o.into_mut()); } Entry::Vacant(_) => { panic!("Entry not found"); } } } #[test] fn entry_vacant_insert() { let key = 0; let value = 0; let mut src = almost_filled_map(); assert_eq!(MAP_SLOTS - 1, src.len()); let entry = src.entry(key); match entry { Entry::Occupied(_) => { panic!("Entry found when empty"); } Entry::Vacant(v) => { assert_eq!(value, *v.insert(value).unwrap()); } }; assert_eq!(value, *src.get(&key).unwrap()) } #[test] fn entry_occupied_insert() { let key = 0; let value = 0; let value2 = 5; let mut src = almost_filled_map(); assert_eq!(MAP_SLOTS - 1, src.len()); src.insert(key, value).unwrap(); let entry = src.entry(key); match entry { Entry::Occupied(o) => { assert_eq!(value, o.insert(value2)); } Entry::Vacant(_) => { panic!("Entry not found"); } }; assert_eq!(value2, *src.get(&key).unwrap()) } #[test] fn entry_remove_entry() { let key = 0; let value = 0; let mut src = almost_filled_map(); src.insert(key, value).unwrap(); assert_eq!(MAP_SLOTS, src.len()); let entry = src.entry(key); match entry { Entry::Occupied(o) => { assert_eq!((key, value), o.remove_entry()); } Entry::Vacant(_) => { panic!("Entry not found") } }; assert_eq!(MAP_SLOTS - 1, src.len()); } #[test] fn entry_remove() { let key = 0; let value = 0; let mut src = almost_filled_map(); src.insert(key, value).unwrap(); assert_eq!(MAP_SLOTS, src.len()); let entry = src.entry(key); match entry { Entry::Occupied(o) => { assert_eq!(value, o.remove()); } Entry::Vacant(_) => { panic!("Entry not found"); } }; assert_eq!(MAP_SLOTS - 1, src.len()); } #[test] fn retain() { let mut none = almost_filled_map(); none.retain(|_, _| false); assert!(none.is_empty()); let mut all = almost_filled_map(); all.retain(|_, _| true); assert_eq!(all.len(), MAP_SLOTS - 1); let mut even = almost_filled_map(); even.retain(|_, &mut v| v % 2 == 0); assert_eq!(even.len(), (MAP_SLOTS - 1) / 2); for &v in even.values() { assert_eq!(v % 2, 0); } let mut odd = almost_filled_map(); odd.retain(|_, &mut v| v % 2 != 0); assert_eq!(odd.len(), MAP_SLOTS / 2); for &v in odd.values() { assert_ne!(v % 2, 0); } assert_eq!(odd.insert(2, 2), Ok(None)); assert_eq!(odd.len(), (MAP_SLOTS / 2) + 1); } #[test] fn entry_roll_through_all() { let mut src: FnvIndexMap = FnvIndexMap::new(); for i in 0..MAP_SLOTS { match src.entry(i) { Entry::Occupied(_) => { panic!("Entry found before insert"); } Entry::Vacant(v) => { assert_eq!(i, *v.insert(i).unwrap()); } } } let add_mod = 99; for i in 0..MAP_SLOTS { match src.entry(i) { Entry::Occupied(o) => { assert_eq!(i, o.insert(i + add_mod)); } Entry::Vacant(_) => { panic!("Entry not found after insert"); } } } for i in 0..MAP_SLOTS { match src.entry(i) { Entry::Occupied(o) => { assert_eq!((i, i + add_mod), o.remove_entry()); } Entry::Vacant(_) => { panic!("Entry not found after insert"); } } } for i in 0..MAP_SLOTS { assert!(matches!(src.entry(i), Entry::Vacant(_))); } assert!(src.is_empty()); } #[test] fn first_last() { let mut map = FnvIndexMap::<_, _, 4>::new(); assert_eq!(None, map.first()); assert_eq!(None, map.last()); map.insert(0, 0).unwrap(); map.insert(2, 2).unwrap(); assert_eq!(Some((&0, &0)), map.first()); assert_eq!(Some((&2, &2)), map.last()); map.insert(1, 1).unwrap(); assert_eq!(Some((&1, &1)), map.last()); *map.first_mut().unwrap().1 += 1; *map.last_mut().unwrap().1 += 1; assert_eq!(Some((&0, &1)), map.first()); assert_eq!(Some((&1, &2)), map.last()); } #[test] fn keys_iter() { let map = almost_filled_map(); for (&key, i) in map.keys().zip(1..MAP_SLOTS) { assert_eq!(key, i); } } #[test] fn values_iter() { let map = almost_filled_map(); for (&value, i) in map.values().zip(1..MAP_SLOTS) { assert_eq!(value, i); } } #[test] fn values_mut_iter() { let mut map = almost_filled_map(); for value in map.values_mut() { *value += 1; } for (&value, i) in map.values().zip(1..MAP_SLOTS) { assert_eq!(value, i + 1); } } } heapless-0.8.0/src/indexset.rs000064400000000000000000000416421046102023000144240ustar 00000000000000use crate::indexmap::{self, IndexMap}; use core::{ borrow::Borrow, fmt, hash::{BuildHasher, Hash}, iter::FromIterator, }; use hash32::{BuildHasherDefault, FnvHasher}; /// A [`IndexSet`] using the /// default FNV hasher. /// A list of all Methods and Traits available for `FnvIndexSet` can be found in /// the [`IndexSet`] documentation. /// /// # Examples /// ``` /// use heapless::FnvIndexSet; /// /// // A hash set with a capacity of 16 elements allocated on the stack /// let mut books = FnvIndexSet::<_, 16>::new(); /// /// // Add some books. /// books.insert("A Dance With Dragons").unwrap(); /// books.insert("To Kill a Mockingbird").unwrap(); /// books.insert("The Odyssey").unwrap(); /// books.insert("The Great Gatsby").unwrap(); /// /// // Check for a specific one. /// if !books.contains("The Winds of Winter") { /// println!("We have {} books, but The Winds of Winter ain't one.", /// books.len()); /// } /// /// // Remove a book. /// books.remove("The Odyssey"); /// /// // Iterate over everything. /// for book in &books { /// println!("{}", book); /// } /// ``` pub type FnvIndexSet = IndexSet, N>; /// Fixed capacity [`IndexSet`](https://docs.rs/indexmap/2/indexmap/set/struct.IndexSet.html). /// /// Note that you cannot use `IndexSet` directly, since it is generic around the hashing algorithm /// in use. Pick a concrete instantiation like [`FnvIndexSet`] instead /// or create your own. /// /// Note that the capacity of the `IndexSet` must be a power of 2. /// /// # Examples /// Since `IndexSet` cannot be used directly, we're using its `FnvIndexSet` instantiation /// for this example. /// /// ``` /// use heapless::FnvIndexSet; /// /// // A hash set with a capacity of 16 elements allocated on the stack /// let mut books = FnvIndexSet::<_, 16>::new(); /// /// // Add some books. /// books.insert("A Dance With Dragons").unwrap(); /// books.insert("To Kill a Mockingbird").unwrap(); /// books.insert("The Odyssey").unwrap(); /// books.insert("The Great Gatsby").unwrap(); /// /// // Check for a specific one. /// if !books.contains("The Winds of Winter") { /// println!("We have {} books, but The Winds of Winter ain't one.", /// books.len()); /// } /// /// // Remove a book. /// books.remove("The Odyssey"); /// /// // Iterate over everything. /// for book in &books { /// println!("{}", book); /// } /// ``` pub struct IndexSet { map: IndexMap, } impl IndexSet, N> { /// Creates an empty `IndexSet` pub const fn new() -> Self { IndexSet { map: IndexMap::new(), } } } impl IndexSet { /// Returns the number of elements the set can hold /// /// # Examples /// /// ``` /// use heapless::FnvIndexSet; /// /// let set = FnvIndexSet::::new(); /// assert_eq!(set.capacity(), 16); /// ``` pub fn capacity(&self) -> usize { self.map.capacity() } /// Return an iterator over the values of the set, in insertion order /// /// # Examples /// /// ``` /// use heapless::FnvIndexSet; /// /// let mut set = FnvIndexSet::<_, 16>::new(); /// set.insert("a").unwrap(); /// set.insert("b").unwrap(); /// /// // Will print in insertion order: a, b /// for x in set.iter() { /// println!("{}", x); /// } /// ``` pub fn iter(&self) -> Iter<'_, T> { Iter { iter: self.map.iter(), } } /// Get the first value /// /// Computes in **O(1)** time pub fn first(&self) -> Option<&T> { self.map.first().map(|(k, _v)| k) } /// Get the last value /// /// Computes in **O(1)** time pub fn last(&self) -> Option<&T> { self.map.last().map(|(k, _v)| k) } /// Returns the number of elements in the set. /// /// # Examples /// /// ``` /// use heapless::FnvIndexSet; /// /// let mut v: FnvIndexSet<_, 16> = FnvIndexSet::new(); /// assert_eq!(v.len(), 0); /// v.insert(1).unwrap(); /// assert_eq!(v.len(), 1); /// ``` pub fn len(&self) -> usize { self.map.len() } /// Returns `true` if the set contains no elements. /// /// # Examples /// /// ``` /// use heapless::FnvIndexSet; /// /// let mut v: FnvIndexSet<_, 16> = FnvIndexSet::new(); /// assert!(v.is_empty()); /// v.insert(1).unwrap(); /// assert!(!v.is_empty()); /// ``` pub fn is_empty(&self) -> bool { self.map.is_empty() } /// Clears the set, removing all values. /// /// # Examples /// /// ``` /// use heapless::FnvIndexSet; /// /// let mut v: FnvIndexSet<_, 16> = FnvIndexSet::new(); /// v.insert(1).unwrap(); /// v.clear(); /// assert!(v.is_empty()); /// ``` pub fn clear(&mut self) { self.map.clear() } } impl IndexSet where T: Eq + Hash, S: BuildHasher, { /// Visits the values representing the difference, i.e. the values that are in `self` but not in /// `other`. /// /// # Examples /// /// ``` /// use heapless::FnvIndexSet; /// /// let mut a: FnvIndexSet<_, 16> = [1, 2, 3].iter().cloned().collect(); /// let mut b: FnvIndexSet<_, 16> = [4, 2, 3, 4].iter().cloned().collect(); /// /// // Can be seen as `a - b`. /// for x in a.difference(&b) { /// println!("{}", x); // Print 1 /// } /// /// let diff: FnvIndexSet<_, 16> = a.difference(&b).collect(); /// assert_eq!(diff, [1].iter().collect::>()); /// /// // Note that difference is not symmetric, /// // and `b - a` means something else: /// let diff: FnvIndexSet<_, 16> = b.difference(&a).collect(); /// assert_eq!(diff, [4].iter().collect::>()); /// ``` pub fn difference<'a, S2, const N2: usize>( &'a self, other: &'a IndexSet, ) -> Difference<'a, T, S2, N2> where S2: BuildHasher, { Difference { iter: self.iter(), other, } } /// Visits the values representing the symmetric difference, i.e. the values that are in `self` /// or in `other` but not in both. /// /// # Examples /// /// ``` /// use heapless::FnvIndexSet; /// /// let mut a: FnvIndexSet<_, 16> = [1, 2, 3].iter().cloned().collect(); /// let mut b: FnvIndexSet<_, 16> = [4, 2, 3, 4].iter().cloned().collect(); /// /// // Print 1, 4 in that order. /// for x in a.symmetric_difference(&b) { /// println!("{}", x); /// } /// /// let diff1: FnvIndexSet<_, 16> = a.symmetric_difference(&b).collect(); /// let diff2: FnvIndexSet<_, 16> = b.symmetric_difference(&a).collect(); /// /// assert_eq!(diff1, diff2); /// assert_eq!(diff1, [1, 4].iter().collect::>()); /// ``` pub fn symmetric_difference<'a, S2, const N2: usize>( &'a self, other: &'a IndexSet, ) -> impl Iterator where S2: BuildHasher, { self.difference(other).chain(other.difference(self)) } /// Visits the values representing the intersection, i.e. the values that are both in `self` and /// `other`. /// /// # Examples /// /// ``` /// use heapless::FnvIndexSet; /// /// let mut a: FnvIndexSet<_, 16> = [1, 2, 3].iter().cloned().collect(); /// let mut b: FnvIndexSet<_, 16> = [4, 2, 3, 4].iter().cloned().collect(); /// /// // Print 2, 3 in that order. /// for x in a.intersection(&b) { /// println!("{}", x); /// } /// /// let intersection: FnvIndexSet<_, 16> = a.intersection(&b).collect(); /// assert_eq!(intersection, [2, 3].iter().collect::>()); /// ``` pub fn intersection<'a, S2, const N2: usize>( &'a self, other: &'a IndexSet, ) -> Intersection<'a, T, S2, N2> where S2: BuildHasher, { Intersection { iter: self.iter(), other, } } /// Visits the values representing the union, i.e. all the values in `self` or `other`, without /// duplicates. /// /// # Examples /// /// ``` /// use heapless::FnvIndexSet; /// /// let mut a: FnvIndexSet<_, 16> = [1, 2, 3].iter().cloned().collect(); /// let mut b: FnvIndexSet<_, 16> = [4, 2, 3, 4].iter().cloned().collect(); /// /// // Print 1, 2, 3, 4 in that order. /// for x in a.union(&b) { /// println!("{}", x); /// } /// /// let union: FnvIndexSet<_, 16> = a.union(&b).collect(); /// assert_eq!(union, [1, 2, 3, 4].iter().collect::>()); /// ``` pub fn union<'a, S2, const N2: usize>( &'a self, other: &'a IndexSet, ) -> impl Iterator where S2: BuildHasher, { self.iter().chain(other.difference(self)) } /// Returns `true` if the set contains a value. /// /// The value may be any borrowed form of the set's value type, but `Hash` and `Eq` on the /// borrowed form must match those for the value type. /// /// # Examples /// /// ``` /// use heapless::FnvIndexSet; /// /// let set: FnvIndexSet<_, 16> = [1, 2, 3].iter().cloned().collect(); /// assert_eq!(set.contains(&1), true); /// assert_eq!(set.contains(&4), false); /// ``` pub fn contains(&self, value: &Q) -> bool where T: Borrow, Q: ?Sized + Eq + Hash, { self.map.contains_key(value) } /// Returns `true` if `self` has no elements in common with `other`. This is equivalent to /// checking for an empty intersection. /// /// # Examples /// /// ``` /// use heapless::FnvIndexSet; /// /// let a: FnvIndexSet<_, 16> = [1, 2, 3].iter().cloned().collect(); /// let mut b = FnvIndexSet::<_, 16>::new(); /// /// assert_eq!(a.is_disjoint(&b), true); /// b.insert(4).unwrap(); /// assert_eq!(a.is_disjoint(&b), true); /// b.insert(1).unwrap(); /// assert_eq!(a.is_disjoint(&b), false); /// ``` pub fn is_disjoint(&self, other: &IndexSet) -> bool where S2: BuildHasher, { self.iter().all(|v| !other.contains(v)) } /// Returns `true` if the set is a subset of another, i.e. `other` contains at least all the /// values in `self`. /// /// # Examples /// /// ``` /// use heapless::FnvIndexSet; /// /// let sup: FnvIndexSet<_, 16> = [1, 2, 3].iter().cloned().collect(); /// let mut set = FnvIndexSet::<_, 16>::new(); /// /// assert_eq!(set.is_subset(&sup), true); /// set.insert(2).unwrap(); /// assert_eq!(set.is_subset(&sup), true); /// set.insert(4).unwrap(); /// assert_eq!(set.is_subset(&sup), false); /// ``` pub fn is_subset(&self, other: &IndexSet) -> bool where S2: BuildHasher, { self.iter().all(|v| other.contains(v)) } // Returns `true` if the set is a superset of another, i.e. `self` contains at least all the // values in `other`. /// /// # Examples /// /// ``` /// use heapless::FnvIndexSet; /// /// let sub: FnvIndexSet<_, 16> = [1, 2].iter().cloned().collect(); /// let mut set = FnvIndexSet::<_, 16>::new(); /// /// assert_eq!(set.is_superset(&sub), false); /// /// set.insert(0).unwrap(); /// set.insert(1).unwrap(); /// assert_eq!(set.is_superset(&sub), false); /// /// set.insert(2).unwrap(); /// assert_eq!(set.is_superset(&sub), true); /// ``` pub fn is_superset(&self, other: &IndexSet) -> bool where S2: BuildHasher, { other.is_subset(self) } /// Adds a value to the set. /// /// If the set did not have this value present, `true` is returned. /// /// If the set did have this value present, `false` is returned. /// /// # Examples /// /// ``` /// use heapless::FnvIndexSet; /// /// let mut set = FnvIndexSet::<_, 16>::new(); /// /// assert_eq!(set.insert(2).unwrap(), true); /// assert_eq!(set.insert(2).unwrap(), false); /// assert_eq!(set.len(), 1); /// ``` pub fn insert(&mut self, value: T) -> Result { self.map .insert(value, ()) .map(|old| old.is_none()) .map_err(|(k, _)| k) } /// Removes a value from the set. Returns `true` if the value was present in the set. /// /// The value may be any borrowed form of the set's value type, but `Hash` and `Eq` on the /// borrowed form must match those for the value type. /// /// # Examples /// /// ``` /// use heapless::FnvIndexSet; /// /// let mut set = FnvIndexSet::<_, 16>::new(); /// /// set.insert(2).unwrap(); /// assert_eq!(set.remove(&2), true); /// assert_eq!(set.remove(&2), false); /// ``` pub fn remove(&mut self, value: &Q) -> bool where T: Borrow, Q: ?Sized + Eq + Hash, { self.map.remove(value).is_some() } /// Retains only the elements specified by the predicate. /// /// In other words, remove all elements `e` for which `f(&e)` returns `false`. pub fn retain(&mut self, mut f: F) where F: FnMut(&T) -> bool, { self.map.retain(move |k, _| f(k)); } } impl Clone for IndexSet where T: Clone, S: Clone, { fn clone(&self) -> Self { Self { map: self.map.clone(), } } } impl fmt::Debug for IndexSet where T: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_set().entries(self.iter()).finish() } } impl Default for IndexSet where S: Default, { fn default() -> Self { IndexSet { map: <_>::default(), } } } impl PartialEq> for IndexSet where T: Eq + Hash, S1: BuildHasher, S2: BuildHasher, { fn eq(&self, other: &IndexSet) -> bool { self.len() == other.len() && self.is_subset(other) } } impl Extend for IndexSet where T: Eq + Hash, S: BuildHasher, { fn extend(&mut self, iterable: I) where I: IntoIterator, { self.map.extend(iterable.into_iter().map(|k| (k, ()))) } } impl<'a, T, S, const N: usize> Extend<&'a T> for IndexSet where T: 'a + Eq + Hash + Copy, S: BuildHasher, { fn extend(&mut self, iterable: I) where I: IntoIterator, { self.extend(iterable.into_iter().cloned()) } } impl FromIterator for IndexSet where T: Eq + Hash, S: BuildHasher + Default, { fn from_iter(iter: I) -> Self where I: IntoIterator, { let mut set = IndexSet::default(); set.extend(iter); set } } impl<'a, T, S, const N: usize> IntoIterator for &'a IndexSet where T: Eq + Hash, S: BuildHasher, { type Item = &'a T; type IntoIter = Iter<'a, T>; fn into_iter(self) -> Self::IntoIter { self.iter() } } /// An iterator over the items of a [`IndexSet`]. /// /// This `struct` is created by the [`iter`](IndexSet::iter) method on [`IndexSet`]. See its /// documentation for more. pub struct Iter<'a, T> { iter: indexmap::Iter<'a, T, ()>, } impl<'a, T> Iterator for Iter<'a, T> { type Item = &'a T; fn next(&mut self) -> Option { self.iter.next().map(|(k, _)| k) } } impl<'a, T> Clone for Iter<'a, T> { fn clone(&self) -> Self { Self { iter: self.iter.clone(), } } } pub struct Difference<'a, T, S, const N: usize> where S: BuildHasher, T: Eq + Hash, { iter: Iter<'a, T>, other: &'a IndexSet, } impl<'a, T, S, const N: usize> Iterator for Difference<'a, T, S, N> where S: BuildHasher, T: Eq + Hash, { type Item = &'a T; fn next(&mut self) -> Option { loop { let elt = self.iter.next()?; if !self.other.contains(elt) { return Some(elt); } } } } pub struct Intersection<'a, T, S, const N: usize> where S: BuildHasher, T: Eq + Hash, { iter: Iter<'a, T>, other: &'a IndexSet, } impl<'a, T, S, const N: usize> Iterator for Intersection<'a, T, S, N> where S: BuildHasher, T: Eq + Hash, { type Item = &'a T; fn next(&mut self) -> Option { loop { let elt = self.iter.next()?; if self.other.contains(elt) { return Some(elt); } } } } heapless-0.8.0/src/lib.rs000064400000000000000000000124341046102023000133440ustar 00000000000000//! `static` friendly data structures that don't require dynamic memory allocation //! //! The core principle behind `heapless` is that its data structures are backed by a *static* memory //! allocation. For example, you can think of `heapless::Vec` as an alternative version of //! `std::Vec` with fixed capacity and that can't be re-allocated on the fly (e.g. via `push`). //! //! All `heapless` data structures store their memory allocation *inline* and specify their capacity //! via their type parameter `N`. This means that you can instantiate a `heapless` data structure on //! the stack, in a `static` variable, or even in the heap. //! //! ``` //! use heapless::Vec; // fixed capacity `std::Vec` //! //! // on the stack //! let mut xs: Vec = Vec::new(); // can hold up to 8 elements //! xs.push(42).unwrap(); //! assert_eq!(xs.pop(), Some(42)); //! //! // in a `static` variable //! static mut XS: Vec = Vec::new(); //! //! let xs = unsafe { &mut XS }; //! //! xs.push(42); //! assert_eq!(xs.pop(), Some(42)); //! //! // in the heap (though kind of pointless because no reallocation) //! let mut ys: Box> = Box::new(Vec::new()); //! ys.push(42).unwrap(); //! assert_eq!(ys.pop(), Some(42)); //! ``` //! //! Because they have fixed capacity `heapless` data structures don't implicitly reallocate. This //! means that operations like `heapless::Vec.push` are *truly* constant time rather than amortized //! constant time with potentially unbounded (depends on the allocator) worst case execution time //! (which is bad / unacceptable for hard real time applications). //! //! `heapless` data structures don't use a memory allocator which means no risk of an uncatchable //! Out Of Memory (OOM) condition while performing operations on them. It's certainly possible to //! run out of capacity while growing `heapless` data structures, but the API lets you handle this //! possibility by returning a `Result` on operations that may exhaust the capacity of the data //! structure. //! //! List of currently implemented data structures: //! #![cfg_attr( any(arm_llsc, target_arch = "x86"), doc = "- [`Arc`](pool::arc::Arc) -- like `std::sync::Arc` but backed by a lock-free memory pool rather than `#[global_allocator]`" )] #![cfg_attr( any(arm_llsc, target_arch = "x86"), doc = "- [`Box`](pool::boxed::Box) -- like `std::boxed::Box` but backed by a lock-free memory pool rather than `#[global_allocator]`" )] //! - [`BinaryHeap`] -- priority queue //! - [`IndexMap`] -- hash table //! - [`IndexSet`] -- hash set //! - [`LinearMap`] #![cfg_attr( any(arm_llsc, target_arch = "x86"), doc = "- [`Object`](pool::object::Object) -- objects managed by an object pool" )] //! - [`String`] //! - [`Vec`] //! - [`mpmc::Q*`](mpmc) -- multiple producer multiple consumer lock-free queue //! - [`spsc::Queue`] -- single producer single consumer lock-free queue //! //! # Optional Features //! //! The `heapless` crate provides the following optional Cargo features: //! //! - `ufmt`: Implement [`ufmt_write::uWrite`] for `String` and `Vec` //! //! [`ufmt_write::uWrite`]: https://docs.rs/ufmt-write/ //! //! # Minimum Supported Rust Version (MSRV) //! //! This crate does *not* have a Minimum Supported Rust Version (MSRV) and may make use of language //! features and API in the standard library available in the latest stable Rust version. //! //! In other words, changes in the Rust version requirement of this crate are not considered semver //! breaking change and may occur in patch version releases. #![cfg_attr(docsrs, feature(doc_cfg), feature(doc_auto_cfg))] #![cfg_attr(not(test), no_std)] #![deny(missing_docs)] #![deny(warnings)] pub use binary_heap::BinaryHeap; pub use deque::Deque; pub use histbuf::{HistoryBuffer, OldestOrdered}; pub use indexmap::{ Bucket, Entry, FnvIndexMap, IndexMap, Iter as IndexMapIter, IterMut as IndexMapIterMut, Keys as IndexMapKeys, OccupiedEntry, Pos, VacantEntry, Values as IndexMapValues, ValuesMut as IndexMapValuesMut, }; pub use indexset::{FnvIndexSet, IndexSet, Iter as IndexSetIter}; pub use linear_map::LinearMap; pub use string::String; pub use vec::Vec; #[macro_use] #[cfg(test)] mod test_helpers; mod deque; mod histbuf; mod indexmap; mod indexset; mod linear_map; mod string; mod vec; #[cfg(feature = "serde")] mod de; #[cfg(feature = "serde")] mod ser; pub mod binary_heap; #[cfg(feature = "defmt-03")] mod defmt; #[cfg(any( // assume we have all atomics available if we're using portable-atomic feature = "portable-atomic", // target has native atomic CAS (mpmc_large requires usize, otherwise just u8) all(feature = "mpmc_large", target_has_atomic = "ptr"), all(not(feature = "mpmc_large"), target_has_atomic = "8") ))] pub mod mpmc; #[cfg(any(arm_llsc, target_arch = "x86"))] pub mod pool; pub mod sorted_linked_list; #[cfg(any( // assume we have all atomics available if we're using portable-atomic feature = "portable-atomic", // target has native atomic CAS. Note this is too restrictive, spsc requires load/store only, not CAS. // This should be `cfg(target_has_atomic_load_store)`, but that's not stable yet. target_has_atomic = "ptr", // or the current target is in a list in build.rs of targets known to have load/store but no CAS. has_atomic_load_store ))] pub mod spsc; #[cfg(feature = "ufmt")] mod ufmt; mod sealed; heapless-0.8.0/src/linear_map.rs000064400000000000000000000321461046102023000147070ustar 00000000000000use crate::Vec; use core::{borrow::Borrow, fmt, iter::FromIterator, mem, ops, slice}; /// A fixed capacity map / dictionary that performs lookups via linear search /// /// Note that as this map doesn't use hashing so most operations are **O(N)** instead of O(1) pub struct LinearMap { pub(crate) buffer: Vec<(K, V), N>, } impl LinearMap { /// Creates an empty `LinearMap` /// /// # Examples /// /// ``` /// use heapless::LinearMap; /// /// // allocate the map on the stack /// let mut map: LinearMap<&str, isize, 8> = LinearMap::new(); /// /// // allocate the map in a static variable /// static mut MAP: LinearMap<&str, isize, 8> = LinearMap::new(); /// ``` pub const fn new() -> Self { Self { buffer: Vec::new() } } } impl LinearMap where K: Eq, { /// Returns the number of elements that the map can hold /// /// Computes in **O(1)** time /// /// # Examples /// /// ``` /// use heapless::LinearMap; /// /// let map: LinearMap<&str, isize, 8> = LinearMap::new(); /// assert_eq!(map.capacity(), 8); /// ``` pub fn capacity(&self) -> usize { N } /// Clears the map, removing all key-value pairs /// /// Computes in **O(1)** time /// /// # Examples /// /// ``` /// use heapless::LinearMap; /// /// let mut map: LinearMap<_, _, 8> = LinearMap::new(); /// map.insert(1, "a").unwrap(); /// map.clear(); /// assert!(map.is_empty()); /// ``` pub fn clear(&mut self) { self.buffer.clear() } /// Returns true if the map contains a value for the specified key. /// /// Computes in **O(N)** time /// /// # Examples /// /// ``` /// use heapless::LinearMap; /// /// let mut map: LinearMap<_, _, 8> = LinearMap::new(); /// map.insert(1, "a").unwrap(); /// assert_eq!(map.contains_key(&1), true); /// assert_eq!(map.contains_key(&2), false); /// ``` pub fn contains_key(&self, key: &K) -> bool { self.get(key).is_some() } /// Returns a reference to the value corresponding to the key /// /// Computes in **O(N)** time /// /// # Examples /// /// ``` /// use heapless::LinearMap; /// /// let mut map: LinearMap<_, _, 8> = LinearMap::new(); /// map.insert(1, "a").unwrap(); /// assert_eq!(map.get(&1), Some(&"a")); /// assert_eq!(map.get(&2), None); /// ``` pub fn get(&self, key: &Q) -> Option<&V> where K: Borrow, Q: Eq + ?Sized, { self.iter() .find(|&(k, _)| k.borrow() == key) .map(|(_, v)| v) } /// Returns a mutable reference to the value corresponding to the key /// /// Computes in **O(N)** time /// /// # Examples /// /// ``` /// use heapless::LinearMap; /// /// let mut map: LinearMap<_, _, 8> = LinearMap::new(); /// map.insert(1, "a").unwrap(); /// if let Some(x) = map.get_mut(&1) { /// *x = "b"; /// } /// assert_eq!(map[&1], "b"); /// ``` pub fn get_mut(&mut self, key: &Q) -> Option<&mut V> where K: Borrow, Q: Eq + ?Sized, { self.iter_mut() .find(|&(k, _)| k.borrow() == key) .map(|(_, v)| v) } /// Returns the number of elements in this map /// /// Computes in **O(1)** time /// /// # Examples /// /// ``` /// use heapless::LinearMap; /// /// let mut a: LinearMap<_, _, 8> = LinearMap::new(); /// assert_eq!(a.len(), 0); /// a.insert(1, "a").unwrap(); /// assert_eq!(a.len(), 1); /// ``` pub fn len(&self) -> usize { self.buffer.len() } /// Inserts a key-value pair into the map. /// /// If the map did not have this key present, `None` is returned. /// /// If the map did have this key present, the value is updated, and the old value is returned. /// /// Computes in **O(N)** time /// /// # Examples /// /// ``` /// use heapless::LinearMap; /// /// let mut map: LinearMap<_, _, 8> = LinearMap::new(); /// assert_eq!(map.insert(37, "a").unwrap(), None); /// assert_eq!(map.is_empty(), false); /// /// map.insert(37, "b").unwrap(); /// assert_eq!(map.insert(37, "c").unwrap(), Some("b")); /// assert_eq!(map[&37], "c"); /// ``` pub fn insert(&mut self, key: K, mut value: V) -> Result, (K, V)> { if let Some((_, v)) = self.iter_mut().find(|&(k, _)| *k == key) { mem::swap(v, &mut value); return Ok(Some(value)); } self.buffer.push((key, value))?; Ok(None) } /// Returns true if the map contains no elements /// /// Computes in **O(1)** time /// /// # Examples /// /// ``` /// use heapless::LinearMap; /// /// let mut a: LinearMap<_, _, 8> = LinearMap::new(); /// assert!(a.is_empty()); /// a.insert(1, "a").unwrap(); /// assert!(!a.is_empty()); /// ``` pub fn is_empty(&self) -> bool { self.len() == 0 } /// An iterator visiting all key-value pairs in arbitrary order. /// /// # Examples /// /// ``` /// use heapless::LinearMap; /// /// let mut map: LinearMap<_, _, 8> = LinearMap::new(); /// map.insert("a", 1).unwrap(); /// map.insert("b", 2).unwrap(); /// map.insert("c", 3).unwrap(); /// /// for (key, val) in map.iter() { /// println!("key: {} val: {}", key, val); /// } /// ``` pub fn iter(&self) -> Iter<'_, K, V> { Iter { iter: self.buffer.as_slice().iter(), } } /// An iterator visiting all key-value pairs in arbitrary order, with mutable references to the /// values /// /// # Examples /// /// ``` /// use heapless::LinearMap; /// /// let mut map: LinearMap<_, _, 8> = LinearMap::new(); /// map.insert("a", 1).unwrap(); /// map.insert("b", 2).unwrap(); /// map.insert("c", 3).unwrap(); /// /// // Update all values /// for (_, val) in map.iter_mut() { /// *val = 2; /// } /// /// for (key, val) in &map { /// println!("key: {} val: {}", key, val); /// } /// ``` pub fn iter_mut(&mut self) -> IterMut<'_, K, V> { IterMut { iter: self.buffer.as_mut_slice().iter_mut(), } } /// An iterator visiting all keys in arbitrary order /// /// # Examples /// /// ``` /// use heapless::LinearMap; /// /// let mut map: LinearMap<_, _, 8> = LinearMap::new(); /// map.insert("a", 1).unwrap(); /// map.insert("b", 2).unwrap(); /// map.insert("c", 3).unwrap(); /// /// for key in map.keys() { /// println!("{}", key); /// } /// ``` pub fn keys(&self) -> impl Iterator { self.iter().map(|(k, _)| k) } /// Removes a key from the map, returning the value at the key if the key was previously in the /// map /// /// Computes in **O(N)** time /// /// # Examples /// /// ``` /// use heapless::LinearMap; /// /// let mut map: LinearMap<_, _, 8> = LinearMap::new(); /// map.insert(1, "a").unwrap(); /// assert_eq!(map.remove(&1), Some("a")); /// assert_eq!(map.remove(&1), None); /// ``` pub fn remove(&mut self, key: &Q) -> Option where K: Borrow, Q: Eq + ?Sized, { let idx = self .keys() .enumerate() .find(|&(_, k)| k.borrow() == key) .map(|(idx, _)| idx); idx.map(|idx| self.buffer.swap_remove(idx).1) } /// An iterator visiting all values in arbitrary order /// /// # Examples /// /// ``` /// use heapless::LinearMap; /// /// let mut map: LinearMap<_, _, 8> = LinearMap::new(); /// map.insert("a", 1).unwrap(); /// map.insert("b", 2).unwrap(); /// map.insert("c", 3).unwrap(); /// /// for val in map.values() { /// println!("{}", val); /// } /// ``` pub fn values(&self) -> impl Iterator { self.iter().map(|(_, v)| v) } /// An iterator visiting all values mutably in arbitrary order /// /// # Examples /// /// ``` /// use heapless::LinearMap; /// /// let mut map: LinearMap<_, _, 8> = LinearMap::new(); /// map.insert("a", 1).unwrap(); /// map.insert("b", 2).unwrap(); /// map.insert("c", 3).unwrap(); /// /// for val in map.values_mut() { /// *val += 10; /// } /// /// for val in map.values() { /// println!("{}", val); /// } /// ``` pub fn values_mut(&mut self) -> impl Iterator { self.iter_mut().map(|(_, v)| v) } } impl<'a, K, V, Q, const N: usize> ops::Index<&'a Q> for LinearMap where K: Borrow + Eq, Q: Eq + ?Sized, { type Output = V; fn index(&self, key: &Q) -> &V { self.get(key).expect("no entry found for key") } } impl<'a, K, V, Q, const N: usize> ops::IndexMut<&'a Q> for LinearMap where K: Borrow + Eq, Q: Eq + ?Sized, { fn index_mut(&mut self, key: &Q) -> &mut V { self.get_mut(key).expect("no entry found for key") } } impl Default for LinearMap where K: Eq, { fn default() -> Self { Self::new() } } impl Clone for LinearMap where K: Eq + Clone, V: Clone, { fn clone(&self) -> Self { Self { buffer: self.buffer.clone(), } } } impl fmt::Debug for LinearMap where K: Eq + fmt::Debug, V: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_map().entries(self.iter()).finish() } } impl FromIterator<(K, V)> for LinearMap where K: Eq, { fn from_iter(iter: I) -> Self where I: IntoIterator, { let mut out = Self::new(); out.buffer.extend(iter); out } } pub struct IntoIter where K: Eq, { inner: as IntoIterator>::IntoIter, } impl Iterator for IntoIter where K: Eq, { type Item = (K, V); fn next(&mut self) -> Option { self.inner.next() } } impl<'a, K, V, const N: usize> IntoIterator for &'a LinearMap where K: Eq, { type Item = (&'a K, &'a V); type IntoIter = Iter<'a, K, V>; fn into_iter(self) -> Self::IntoIter { self.iter() } } pub struct Iter<'a, K, V> { iter: slice::Iter<'a, (K, V)>, } impl<'a, K, V> Iterator for Iter<'a, K, V> { type Item = (&'a K, &'a V); fn next(&mut self) -> Option { self.iter.next().map(|&(ref k, ref v)| (k, v)) } } impl<'a, K, V> Clone for Iter<'a, K, V> { fn clone(&self) -> Self { Self { iter: self.iter.clone(), } } } pub struct IterMut<'a, K, V> { iter: slice::IterMut<'a, (K, V)>, } impl<'a, K, V> Iterator for IterMut<'a, K, V> { type Item = (&'a K, &'a mut V); fn next(&mut self) -> Option { self.iter.next().map(|&mut (ref k, ref mut v)| (k, v)) } } impl PartialEq> for LinearMap where K: Eq, V: PartialEq, { fn eq(&self, other: &LinearMap) -> bool { self.len() == other.len() && self .iter() .all(|(key, value)| other.get(key).map_or(false, |v| *value == *v)) } } impl Eq for LinearMap where K: Eq, V: PartialEq, { } #[cfg(test)] mod test { use crate::LinearMap; #[test] fn static_new() { static mut _L: LinearMap = LinearMap::new(); } #[test] fn partial_eq() { { let mut a = LinearMap::<_, _, 1>::new(); a.insert("k1", "v1").unwrap(); let mut b = LinearMap::<_, _, 2>::new(); b.insert("k1", "v1").unwrap(); assert!(a == b); b.insert("k2", "v2").unwrap(); assert!(a != b); } { let mut a = LinearMap::<_, _, 2>::new(); a.insert("k1", "v1").unwrap(); a.insert("k2", "v2").unwrap(); let mut b = LinearMap::<_, _, 2>::new(); b.insert("k2", "v2").unwrap(); b.insert("k1", "v1").unwrap(); assert!(a == b); } } #[test] fn drop() { droppable!(); { let mut v: LinearMap = LinearMap::new(); v.insert(0, Droppable::new()).ok().unwrap(); v.insert(1, Droppable::new()).ok().unwrap(); v.remove(&1).unwrap(); } assert_eq!(Droppable::count(), 0); { let mut v: LinearMap = LinearMap::new(); v.insert(0, Droppable::new()).ok().unwrap(); v.insert(1, Droppable::new()).ok().unwrap(); } assert_eq!(Droppable::count(), 0); } } heapless-0.8.0/src/mpmc.rs000064400000000000000000000224711046102023000135340ustar 00000000000000//! A fixed capacity Multiple-Producer Multiple-Consumer (MPMC) lock-free queue //! //! NOTE: This module requires atomic CAS operations. On targets where they're not natively available, //! they are emulated by the [`portable-atomic`](https://crates.io/crates/portable-atomic) crate. //! //! # Example //! //! This queue can be constructed in "const context". Placing it in a `static` variable lets *all* //! contexts (interrupts / threads / `main`) safely enqueue and dequeue items from it. //! //! ``` ignore //! #![no_main] //! #![no_std] //! //! use panic_semihosting as _; //! //! use cortex_m::{asm, peripheral::syst::SystClkSource}; //! use cortex_m_rt::{entry, exception}; //! use cortex_m_semihosting::hprintln; //! use heapless::mpmc::Q2; //! //! static Q: Q2 = Q2::new(); //! //! #[entry] //! fn main() -> ! { //! if let Some(p) = cortex_m::Peripherals::take() { //! let mut syst = p.SYST; //! //! // configures the system timer to trigger a SysTick exception every second //! syst.set_clock_source(SystClkSource::Core); //! syst.set_reload(12_000_000); //! syst.enable_counter(); //! syst.enable_interrupt(); //! } //! //! loop { //! if let Some(x) = Q.dequeue() { //! hprintln!("{}", x).ok(); //! } else { //! asm::wfi(); //! } //! } //! } //! //! #[exception] //! fn SysTick() { //! static mut COUNT: u8 = 0; //! //! Q.enqueue(*COUNT).ok(); //! *COUNT += 1; //! } //! ``` //! //! # Benchmark //! //! Measured on a ARM Cortex-M3 core running at 8 MHz and with zero Flash wait cycles //! //! N| `Q8::::enqueue().ok()` (`z`) | `Q8::::dequeue()` (`z`) | //! -|----------------------------------|-----------------------------| //! 0|34 |35 | //! 1|52 |53 | //! 2|69 |71 | //! //! - `N` denotes the number of *interruptions*. On Cortex-M, an interruption consists of an //! interrupt handler preempting the would-be atomic section of the `enqueue` / `dequeue` //! operation. Note that it does *not* matter if the higher priority handler uses the queue or //! not. //! - All execution times are in clock cycles. 1 clock cycle = 125 ns. //! - Execution time is *dependent* of `mem::size_of::()`. Both operations include one //! `memcpy(T)` in their successful path. //! - The optimization level is indicated in parentheses. //! - The numbers reported correspond to the successful path (i.e. `Some` is returned by `dequeue` //! and `Ok` is returned by `enqueue`). //! //! # Portability //! //! This module requires CAS atomic instructions which are not available on all architectures //! (e.g. ARMv6-M (`thumbv6m-none-eabi`) and MSP430 (`msp430-none-elf`)). These atomics can be //! emulated however with [`portable-atomic`](https://crates.io/crates/portable-atomic), which is //! enabled with the `cas` feature and is enabled by default for `thumbv6m-none-eabi` and `riscv32` //! targets. //! //! # References //! //! This is an implementation of Dmitry Vyukov's ["Bounded MPMC queue"][0] minus the cache padding. //! //! [0]: http://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue use core::{cell::UnsafeCell, mem::MaybeUninit}; #[cfg(not(feature = "portable-atomic"))] use core::sync::atomic; #[cfg(feature = "portable-atomic")] use portable_atomic as atomic; use atomic::Ordering; #[cfg(feature = "mpmc_large")] type AtomicTargetSize = atomic::AtomicUsize; #[cfg(not(feature = "mpmc_large"))] type AtomicTargetSize = atomic::AtomicU8; #[cfg(feature = "mpmc_large")] type IntSize = usize; #[cfg(not(feature = "mpmc_large"))] type IntSize = u8; /// MPMC queue with a capability for 2 elements. pub type Q2 = MpMcQueue; /// MPMC queue with a capability for 4 elements. pub type Q4 = MpMcQueue; /// MPMC queue with a capability for 8 elements. pub type Q8 = MpMcQueue; /// MPMC queue with a capability for 16 elements. pub type Q16 = MpMcQueue; /// MPMC queue with a capability for 32 elements. pub type Q32 = MpMcQueue; /// MPMC queue with a capability for 64 elements. pub type Q64 = MpMcQueue; /// MPMC queue with a capacity for N elements /// N must be a power of 2 /// The max value of N is u8::MAX - 1 if `mpmc_large` feature is not enabled. pub struct MpMcQueue { buffer: UnsafeCell<[Cell; N]>, dequeue_pos: AtomicTargetSize, enqueue_pos: AtomicTargetSize, } impl MpMcQueue { const MASK: IntSize = (N - 1) as IntSize; const EMPTY_CELL: Cell = Cell::new(0); const ASSERT: [(); 1] = [()]; /// Creates an empty queue pub const fn new() -> Self { // Const assert crate::sealed::greater_than_1::(); crate::sealed::power_of_two::(); // Const assert on size. Self::ASSERT[!(N < (IntSize::MAX as usize)) as usize]; let mut cell_count = 0; let mut result_cells: [Cell; N] = [Self::EMPTY_CELL; N]; while cell_count != N { result_cells[cell_count] = Cell::new(cell_count); cell_count += 1; } Self { buffer: UnsafeCell::new(result_cells), dequeue_pos: AtomicTargetSize::new(0), enqueue_pos: AtomicTargetSize::new(0), } } /// Returns the item in the front of the queue, or `None` if the queue is empty pub fn dequeue(&self) -> Option { unsafe { dequeue(self.buffer.get() as *mut _, &self.dequeue_pos, Self::MASK) } } /// Adds an `item` to the end of the queue /// /// Returns back the `item` if the queue is full pub fn enqueue(&self, item: T) -> Result<(), T> { unsafe { enqueue( self.buffer.get() as *mut _, &self.enqueue_pos, Self::MASK, item, ) } } } impl Default for MpMcQueue { fn default() -> Self { Self::new() } } unsafe impl Sync for MpMcQueue where T: Send {} struct Cell { data: MaybeUninit, sequence: AtomicTargetSize, } impl Cell { const fn new(seq: usize) -> Self { Self { data: MaybeUninit::uninit(), sequence: AtomicTargetSize::new(seq as IntSize), } } } unsafe fn dequeue( buffer: *mut Cell, dequeue_pos: &AtomicTargetSize, mask: IntSize, ) -> Option { let mut pos = dequeue_pos.load(Ordering::Relaxed); let mut cell; loop { cell = buffer.add(usize::from(pos & mask)); let seq = (*cell).sequence.load(Ordering::Acquire); let dif = (seq as i8).wrapping_sub((pos.wrapping_add(1)) as i8); if dif == 0 { if dequeue_pos .compare_exchange_weak( pos, pos.wrapping_add(1), Ordering::Relaxed, Ordering::Relaxed, ) .is_ok() { break; } } else if dif < 0 { return None; } else { pos = dequeue_pos.load(Ordering::Relaxed); } } let data = (*cell).data.as_ptr().read(); (*cell) .sequence .store(pos.wrapping_add(mask).wrapping_add(1), Ordering::Release); Some(data) } unsafe fn enqueue( buffer: *mut Cell, enqueue_pos: &AtomicTargetSize, mask: IntSize, item: T, ) -> Result<(), T> { let mut pos = enqueue_pos.load(Ordering::Relaxed); let mut cell; loop { cell = buffer.add(usize::from(pos & mask)); let seq = (*cell).sequence.load(Ordering::Acquire); let dif = (seq as i8).wrapping_sub(pos as i8); if dif == 0 { if enqueue_pos .compare_exchange_weak( pos, pos.wrapping_add(1), Ordering::Relaxed, Ordering::Relaxed, ) .is_ok() { break; } } else if dif < 0 { return Err(item); } else { pos = enqueue_pos.load(Ordering::Relaxed); } } (*cell).data.as_mut_ptr().write(item); (*cell) .sequence .store(pos.wrapping_add(1), Ordering::Release); Ok(()) } #[cfg(test)] mod tests { use super::Q2; #[test] fn sanity() { let q = Q2::new(); q.enqueue(0).unwrap(); q.enqueue(1).unwrap(); assert!(q.enqueue(2).is_err()); assert_eq!(q.dequeue(), Some(0)); assert_eq!(q.dequeue(), Some(1)); assert_eq!(q.dequeue(), None); } #[test] fn drain_at_pos255() { let q = Q2::new(); for _ in 0..255 { assert!(q.enqueue(0).is_ok()); assert_eq!(q.dequeue(), Some(0)); } // this should not block forever assert_eq!(q.dequeue(), None); } #[test] fn full_at_wrapped_pos0() { let q = Q2::new(); for _ in 0..254 { assert!(q.enqueue(0).is_ok()); assert_eq!(q.dequeue(), Some(0)); } assert!(q.enqueue(0).is_ok()); assert!(q.enqueue(0).is_ok()); // this should not block forever assert!(q.enqueue(0).is_err()); } } heapless-0.8.0/src/pool/arc.rs000064400000000000000000000302561046102023000143160ustar 00000000000000//! `std::sync::Arc`-like API on top of a lock-free memory pool //! //! # Example usage //! //! ``` //! use heapless::{arc_pool, pool::arc::{Arc, ArcBlock}}; //! //! arc_pool!(P: u128); //! //! // cannot allocate without first giving memory blocks to the pool //! assert!(P.alloc(42).is_err()); //! //! // (some `no_std` runtimes have safe APIs to create `&'static mut` references) //! let block: &'static mut ArcBlock = unsafe { //! static mut B: ArcBlock = ArcBlock::new(); //! &mut B //! }; //! //! P.manage(block); //! //! let arc = P.alloc(1).unwrap(); //! //! // number of smart pointers is limited to the number of blocks managed by the pool //! let res = P.alloc(2); //! assert!(res.is_err()); //! //! // but cloning does not consume an `ArcBlock` //! let arc2 = arc.clone(); //! //! assert_eq!(1, *arc2); //! //! // `arc`'s destructor returns the memory block to the pool //! drop(arc2); // decrease reference counter //! drop(arc); // release memory //! //! // it's now possible to allocate a new `Arc` smart pointer //! let res = P.alloc(3); //! //! assert!(res.is_ok()); //! ``` //! //! # Array block initialization //! //! You can create a static variable that contains an array of memory blocks and give all the blocks //! to the `ArcPool`. This requires an intermediate `const` value as shown below: //! //! ``` //! use heapless::{arc_pool, pool::arc::ArcBlock}; //! //! arc_pool!(P: u128); //! //! const POOL_CAPACITY: usize = 8; //! //! let blocks: &'static mut [ArcBlock] = { //! const BLOCK: ArcBlock = ArcBlock::new(); // <= //! static mut BLOCKS: [ArcBlock; POOL_CAPACITY] = [BLOCK; POOL_CAPACITY]; //! unsafe { &mut BLOCKS } //! }; //! //! for block in blocks { //! P.manage(block); //! } //! ``` // reference counting logic is based on version 1.63.0 of the Rust standard library (`alloc` crate) // which is licensed under 'MIT or APACHE-2.0' // https://github.com/rust-lang/rust/blob/1.63.0/library/alloc/src/sync.rs#L235 (last visited // 2022-09-05) use core::{ fmt, hash::{Hash, Hasher}, mem::{ManuallyDrop, MaybeUninit}, ops, ptr, sync::atomic::{self, AtomicUsize, Ordering}, }; use super::treiber::{NonNullPtr, Stack, UnionNode}; /// Creates a new `ArcPool` singleton with the given `$name` that manages the specified `$data_type` /// /// For more extensive documentation see the [module level documentation](crate::pool::arc) #[macro_export] macro_rules! arc_pool { ($name:ident: $data_type:ty) => { pub struct $name; impl $crate::pool::arc::ArcPool for $name { type Data = $data_type; fn singleton() -> &'static $crate::pool::arc::ArcPoolImpl<$data_type> { static $name: $crate::pool::arc::ArcPoolImpl<$data_type> = $crate::pool::arc::ArcPoolImpl::new(); &$name } } impl $name { /// Inherent method version of `ArcPool::alloc` #[allow(dead_code)] pub fn alloc( &self, value: $data_type, ) -> Result<$crate::pool::arc::Arc<$name>, $data_type> { <$name as $crate::pool::arc::ArcPool>::alloc(value) } /// Inherent method version of `ArcPool::manage` #[allow(dead_code)] pub fn manage(&self, block: &'static mut $crate::pool::arc::ArcBlock<$data_type>) { <$name as $crate::pool::arc::ArcPool>::manage(block) } } }; } /// A singleton that manages `pool::arc::Arc` smart pointers pub trait ArcPool: Sized { /// The data type managed by the memory pool type Data: 'static; /// `arc_pool!` implementation detail #[doc(hidden)] fn singleton() -> &'static ArcPoolImpl; /// Allocate a new `Arc` smart pointer initialized to the given `value` /// /// `manage` should be called at least once before calling `alloc` /// /// # Errors /// /// The `Err`or variant is returned when the memory pool has run out of memory blocks fn alloc(value: Self::Data) -> Result, Self::Data> { Ok(Arc { node_ptr: Self::singleton().alloc(value)?, }) } /// Add a statically allocated memory block to the memory pool fn manage(block: &'static mut ArcBlock) { Self::singleton().manage(block) } } /// `arc_pool!` implementation detail // newtype to avoid having to make field types public #[doc(hidden)] pub struct ArcPoolImpl { stack: Stack>>>, } impl ArcPoolImpl { /// `arc_pool!` implementation detail #[doc(hidden)] pub const fn new() -> Self { Self { stack: Stack::new(), } } fn alloc(&self, value: T) -> Result>>>, T> { if let Some(node_ptr) = self.stack.try_pop() { let inner = ArcInner { data: value, strong: AtomicUsize::new(1), }; unsafe { node_ptr.as_ptr().cast::>().write(inner) } Ok(node_ptr) } else { Err(value) } } fn manage(&self, block: &'static mut ArcBlock) { let node: &'static mut _ = &mut block.node; unsafe { self.stack.push(NonNullPtr::from_static_mut_ref(node)) } } } unsafe impl Sync for ArcPoolImpl {} /// Like `std::sync::Arc` but managed by memory pool `P` pub struct Arc

where P: ArcPool, { node_ptr: NonNullPtr>>>, } impl

Arc

where P: ArcPool, { fn inner(&self) -> &ArcInner { unsafe { &*self.node_ptr.as_ptr().cast::>() } } fn from_inner(node_ptr: NonNullPtr>>>) -> Self { Self { node_ptr } } unsafe fn get_mut_unchecked(this: &mut Self) -> &mut P::Data { &mut *ptr::addr_of_mut!((*this.node_ptr.as_ptr().cast::>()).data) } #[inline(never)] unsafe fn drop_slow(&mut self) { // run `P::Data`'s destructor ptr::drop_in_place(Self::get_mut_unchecked(self)); // return memory to pool P::singleton().stack.push(self.node_ptr); } } impl

AsRef for Arc

where P: ArcPool, { fn as_ref(&self) -> &P::Data { &**self } } const MAX_REFCOUNT: usize = (isize::MAX) as usize; impl

Clone for Arc

ops::Deref for Arc

where P: ArcPool, { type Target = P::Data; fn deref(&self) -> &Self::Target { unsafe { &*ptr::addr_of!((*self.node_ptr.as_ptr().cast::>()).data) } } } impl fmt::Display for Arc where A: ArcPool, A::Data: fmt::Display, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { A::Data::fmt(self, f) } } impl Drop for Arc where A: ArcPool, { fn drop(&mut self) { if self.inner().strong.fetch_sub(1, Ordering::Release) != 1 { return; } atomic::fence(Ordering::Acquire); unsafe { self.drop_slow() } } } impl Eq for Arc where A: ArcPool, A::Data: Eq, { } impl Hash for Arc where A: ArcPool, A::Data: Hash, { fn hash(&self, state: &mut H) where H: Hasher, { (**self).hash(state) } } impl Ord for Arc where A: ArcPool, A::Data: Ord, { fn cmp(&self, other: &Self) -> core::cmp::Ordering { A::Data::cmp(self, other) } } impl PartialEq> for Arc where A: ArcPool, B: ArcPool, A::Data: PartialEq, { fn eq(&self, other: &Arc) -> bool { A::Data::eq(self, &**other) } } impl PartialOrd> for Arc where A: ArcPool, B: ArcPool, A::Data: PartialOrd, { fn partial_cmp(&self, other: &Arc) -> Option { A::Data::partial_cmp(self, &**other) } } unsafe impl Send for Arc where A: ArcPool, A::Data: Sync + Send, { } unsafe impl Sync for Arc where A: ArcPool, A::Data: Sync + Send, { } impl Unpin for Arc where A: ArcPool {} struct ArcInner { data: T, strong: AtomicUsize, } /// A chunk of memory that an `ArcPool` can manage pub struct ArcBlock { node: UnionNode>>, } impl ArcBlock { /// Creates a new memory block pub const fn new() -> Self { Self { node: UnionNode { data: ManuallyDrop::new(MaybeUninit::uninit()), }, } } } #[cfg(test)] mod tests { use super::*; #[test] fn cannot_alloc_if_empty() { arc_pool!(P: i32); assert_eq!(Err(42), P.alloc(42),); } #[test] fn can_alloc_if_manages_one_block() { arc_pool!(P: i32); let block = unsafe { static mut B: ArcBlock = ArcBlock::new(); &mut B }; P.manage(block); assert_eq!(42, *P.alloc(42).unwrap()); } #[test] fn alloc_drop_alloc() { arc_pool!(P: i32); let block = unsafe { static mut B: ArcBlock = ArcBlock::new(); &mut B }; P.manage(block); let arc = P.alloc(1).unwrap(); drop(arc); assert_eq!(2, *P.alloc(2).unwrap()); } #[test] fn strong_count_starts_at_one() { arc_pool!(P: i32); let block = unsafe { static mut B: ArcBlock = ArcBlock::new(); &mut B }; P.manage(block); let arc = P.alloc(1).ok().unwrap(); assert_eq!(1, arc.inner().strong.load(Ordering::Relaxed)); } #[test] fn clone_increases_strong_count() { arc_pool!(P: i32); let block = unsafe { static mut B: ArcBlock = ArcBlock::new(); &mut B }; P.manage(block); let arc = P.alloc(1).ok().unwrap(); let before = arc.inner().strong.load(Ordering::Relaxed); let arc2 = arc.clone(); let expected = before + 1; assert_eq!(expected, arc.inner().strong.load(Ordering::Relaxed)); assert_eq!(expected, arc2.inner().strong.load(Ordering::Relaxed)); } #[test] fn drop_decreases_strong_count() { arc_pool!(P: i32); let block = unsafe { static mut B: ArcBlock = ArcBlock::new(); &mut B }; P.manage(block); let arc = P.alloc(1).ok().unwrap(); let arc2 = arc.clone(); let before = arc.inner().strong.load(Ordering::Relaxed); drop(arc); let expected = before - 1; assert_eq!(expected, arc2.inner().strong.load(Ordering::Relaxed)); } #[test] fn runs_destructor_exactly_once_when_strong_count_reaches_zero() { static COUNT: AtomicUsize = AtomicUsize::new(0); pub struct S; impl Drop for S { fn drop(&mut self) { COUNT.fetch_add(1, Ordering::Relaxed); } } arc_pool!(P: S); let block = unsafe { static mut B: ArcBlock = ArcBlock::new(); &mut B }; P.manage(block); let arc = P.alloc(S).ok().unwrap(); assert_eq!(0, COUNT.load(Ordering::Relaxed)); drop(arc); assert_eq!(1, COUNT.load(Ordering::Relaxed)); } #[test] fn zst_is_well_aligned() { #[repr(align(4096))] pub struct Zst4096; arc_pool!(P: Zst4096); let block = unsafe { static mut B: ArcBlock = ArcBlock::new(); &mut B }; P.manage(block); let arc = P.alloc(Zst4096).ok().unwrap(); let raw = &*arc as *const Zst4096; assert_eq!(0, raw as usize % 4096); } } heapless-0.8.0/src/pool/boxed.rs000064400000000000000000000321451046102023000146510ustar 00000000000000//! `std::boxed::Box`-like API on top of a lock-free memory pool //! //! # Example usage //! //! ``` //! use heapless::{box_pool, pool::boxed::{Box, BoxBlock}}; //! //! box_pool!(P: u128); //! //! // cannot allocate without first giving memory blocks to the pool //! assert!(P.alloc(42).is_err()); //! //! // (some `no_std` runtimes have safe APIs to create `&'static mut` references) //! let block: &'static mut BoxBlock = unsafe { //! static mut B: BoxBlock = BoxBlock::new(); //! &mut B //! }; //! //! // give block of memory to the pool //! P.manage(block); //! //! // it's now possible to allocate //! let mut boxed = P.alloc(1).unwrap(); //! //! // mutation is possible //! *boxed += 1; //! assert_eq!(2, *boxed); //! //! // number of boxes is limited to the number of blocks managed by the pool //! let res = P.alloc(3); //! assert!(res.is_err()); //! //! // give another memory block to the pool //! P.manage(unsafe { //! static mut B: BoxBlock = BoxBlock::new(); //! &mut B //! }); //! //! // cloning also consumes a memory block from the pool //! let mut separate_box = boxed.clone(); //! *separate_box += 1; //! assert_eq!(3, *separate_box); //! //! // after the clone it's not possible to allocate again //! let res = P.alloc(4); //! assert!(res.is_err()); //! //! // `boxed`'s destructor returns the memory block to the pool //! drop(boxed); //! //! // it's possible to allocate again //! let res = P.alloc(5); //! //! assert!(res.is_ok()); //! ``` //! //! # Array block initialization //! //! You can create a static variable that contains an array of memory blocks and give all the blocks //! to the `BoxPool`. This requires an intermediate `const` value as shown below: //! //! ``` //! use heapless::{box_pool, pool::boxed::BoxBlock}; //! //! box_pool!(P: u128); //! //! const POOL_CAPACITY: usize = 8; //! //! let blocks: &'static mut [BoxBlock] = { //! const BLOCK: BoxBlock = BoxBlock::new(); // <= //! static mut BLOCKS: [BoxBlock; POOL_CAPACITY] = [BLOCK; POOL_CAPACITY]; //! unsafe { &mut BLOCKS } //! }; //! //! for block in blocks { //! P.manage(block); //! } //! ``` use core::{ fmt, hash::{Hash, Hasher}, mem::{ManuallyDrop, MaybeUninit}, ops, ptr, }; use stable_deref_trait::StableDeref; use super::treiber::{NonNullPtr, Stack, UnionNode}; /// Creates a new `BoxPool` singleton with the given `$name` that manages the specified `$data_type` /// /// For more extensive documentation see the [module level documentation](crate::pool::boxed) #[macro_export] macro_rules! box_pool { ($name:ident: $data_type:ty) => { pub struct $name; impl $crate::pool::boxed::BoxPool for $name { type Data = $data_type; fn singleton() -> &'static $crate::pool::boxed::BoxPoolImpl<$data_type> { static $name: $crate::pool::boxed::BoxPoolImpl<$data_type> = $crate::pool::boxed::BoxPoolImpl::new(); &$name } } impl $name { /// Inherent method version of `BoxPool::alloc` #[allow(dead_code)] pub fn alloc( &self, value: $data_type, ) -> Result<$crate::pool::boxed::Box<$name>, $data_type> { <$name as $crate::pool::boxed::BoxPool>::alloc(value) } /// Inherent method version of `BoxPool::manage` #[allow(dead_code)] pub fn manage(&self, block: &'static mut $crate::pool::boxed::BoxBlock<$data_type>) { <$name as $crate::pool::boxed::BoxPool>::manage(block) } } }; } /// A singleton that manages `pool::boxed::Box`-es /// /// # Usage /// /// Do not implement this trait yourself; instead use the `box_pool!` macro to create a type that /// implements this trait. /// /// # Semver guarantees /// /// *Implementing* this trait is exempt from semver guarantees. /// i.e. a new patch release is allowed to break downstream `BoxPool` implementations. /// /// *Using* the trait, e.g. in generic code, does fall under semver guarantees. pub trait BoxPool: Sized { /// The data type managed by the memory pool type Data: 'static; /// `box_pool!` implementation detail #[doc(hidden)] fn singleton() -> &'static BoxPoolImpl; /// Allocate a new `Box` initialized to the given `value` /// /// `manage` should be called at least once before calling `alloc` /// /// # Errors /// /// The `Err`or variant is returned when the memory pool has run out of memory blocks fn alloc(value: Self::Data) -> Result, Self::Data> { Ok(Box { node_ptr: Self::singleton().alloc(value)?, }) } /// Add a statically allocated memory block to the memory pool fn manage(block: &'static mut BoxBlock) { Self::singleton().manage(block) } } /// Like `std::boxed::Box` but managed by memory pool `P` rather than `#[global_allocator]` pub struct Box

where P: BoxPool, { node_ptr: NonNullPtr>>, } impl Clone for Box where A: BoxPool, A::Data: Clone, { fn clone(&self) -> Self { A::alloc((**self).clone()).ok().expect("OOM") } } impl fmt::Debug for Box where A: BoxPool, A::Data: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { A::Data::fmt(self, f) } } impl

ops::Deref for Box

where P: BoxPool, { type Target = P::Data; fn deref(&self) -> &Self::Target { unsafe { &*self.node_ptr.as_ptr().cast::() } } } impl

ops::DerefMut for Box

where P: BoxPool, { fn deref_mut(&mut self) -> &mut Self::Target { unsafe { &mut *self.node_ptr.as_ptr().cast::() } } } unsafe impl

StableDeref for Box

where P: BoxPool {} impl fmt::Display for Box where A: BoxPool, A::Data: fmt::Display, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { A::Data::fmt(self, f) } } impl

Drop for Box

where P: BoxPool, { fn drop(&mut self) { let node = self.node_ptr; unsafe { ptr::drop_in_place(node.as_ptr().cast::()) } unsafe { P::singleton().stack.push(node) } } } impl Eq for Box where A: BoxPool, A::Data: Eq, { } impl Hash for Box where A: BoxPool, A::Data: Hash, { fn hash(&self, state: &mut H) where H: Hasher, { (**self).hash(state) } } impl Ord for Box where A: BoxPool, A::Data: Ord, { fn cmp(&self, other: &Self) -> core::cmp::Ordering { A::Data::cmp(self, other) } } impl PartialEq> for Box where A: BoxPool, B: BoxPool, A::Data: PartialEq, { fn eq(&self, other: &Box) -> bool { A::Data::eq(self, other) } } impl PartialOrd> for Box where A: BoxPool, B: BoxPool, A::Data: PartialOrd, { fn partial_cmp(&self, other: &Box) -> Option { A::Data::partial_cmp(self, other) } } unsafe impl

Send for Box

where P: BoxPool, P::Data: Send, { } unsafe impl

Sync for Box

where P: BoxPool, P::Data: Sync, { } /// `box_pool!` implementation detail // newtype to avoid having to make field types public #[doc(hidden)] pub struct BoxPoolImpl { stack: Stack>>, } impl BoxPoolImpl { pub const fn new() -> Self { Self { stack: Stack::new(), } } fn alloc(&self, value: T) -> Result>>, T> { if let Some(node_ptr) = self.stack.try_pop() { unsafe { node_ptr.as_ptr().cast::().write(value) } Ok(node_ptr) } else { Err(value) } } fn manage(&self, block: &'static mut BoxBlock) { let node: &'static mut _ = &mut block.node; unsafe { self.stack.push(NonNullPtr::from_static_mut_ref(node)) } } } unsafe impl Sync for BoxPoolImpl {} /// A chunk of memory that a `BoxPool` singleton can manage pub struct BoxBlock { node: UnionNode>, } impl BoxBlock { /// Creates a new memory block pub const fn new() -> Self { Self { node: UnionNode { data: ManuallyDrop::new(MaybeUninit::uninit()), }, } } } #[cfg(test)] mod tests { use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; use std::thread; use super::*; #[test] fn cannot_alloc_if_empty() { box_pool!(P: i32); assert_eq!(Err(42), P.alloc(42)); } #[test] fn can_alloc_if_pool_manages_one_block() { box_pool!(P: i32); let block = unsafe { static mut B: BoxBlock = BoxBlock::new(); &mut B }; P.manage(block); assert_eq!(42, *P.alloc(42).unwrap()); } #[test] fn alloc_drop_alloc() { box_pool!(P: i32); let block = unsafe { static mut B: BoxBlock = BoxBlock::new(); &mut B }; P.manage(block); let boxed = P.alloc(1).unwrap(); drop(boxed); assert_eq!(2, *P.alloc(2).unwrap()); } #[test] fn runs_destructor_exactly_once_on_drop() { static COUNT: AtomicUsize = AtomicUsize::new(0); pub struct S; impl Drop for S { fn drop(&mut self) { COUNT.fetch_add(1, Ordering::Relaxed); } } box_pool!(P: S); let block = unsafe { static mut B: BoxBlock = BoxBlock::new(); &mut B }; P.manage(block); let boxed = P.alloc(S).ok().unwrap(); assert_eq!(0, COUNT.load(Ordering::Relaxed)); drop(boxed); assert_eq!(1, COUNT.load(Ordering::Relaxed)); } #[test] fn zst_is_well_aligned() { #[repr(align(4096))] pub struct Zst4096; box_pool!(P: Zst4096); let block = unsafe { static mut B: BoxBlock = BoxBlock::new(); &mut B }; P.manage(block); let boxed = P.alloc(Zst4096).ok().unwrap(); let raw = &*boxed as *const Zst4096; assert_eq!(0, raw as usize % 4096); } #[allow(clippy::redundant_clone)] #[test] fn can_clone_if_pool_is_not_exhausted() { static STRUCT_CLONE_WAS_CALLED: AtomicBool = AtomicBool::new(false); pub struct S; impl Clone for S { fn clone(&self) -> Self { STRUCT_CLONE_WAS_CALLED.store(true, Ordering::Relaxed); Self } } box_pool!(P: S); P.manage(unsafe { static mut B: BoxBlock = BoxBlock::new(); &mut B }); P.manage(unsafe { static mut B: BoxBlock = BoxBlock::new(); &mut B }); let first = P.alloc(S).ok().unwrap(); let _second = first.clone(); assert!(STRUCT_CLONE_WAS_CALLED.load(Ordering::Relaxed)); let is_oom = P.alloc(S).is_err(); assert!(is_oom); } #[allow(clippy::redundant_clone)] #[test] fn clone_panics_if_pool_exhausted() { static STRUCT_CLONE_WAS_CALLED: AtomicBool = AtomicBool::new(false); pub struct S; impl Clone for S { fn clone(&self) -> Self { STRUCT_CLONE_WAS_CALLED.store(true, Ordering::Relaxed); Self } } box_pool!(P: S); P.manage(unsafe { static mut B: BoxBlock = BoxBlock::new(); &mut B }); let first = P.alloc(S).ok().unwrap(); let thread = thread::spawn(move || { let _second = first.clone(); }); let thread_panicked = thread.join().is_err(); assert!(thread_panicked); // we diverge from `alloc::Box` in that we call `T::clone` first and then request // memory from the allocator whereas `alloc::Box` does it the other way around // assert!(!STRUCT_CLONE_WAS_CALLED.load(Ordering::Relaxed)); } #[allow(clippy::redundant_clone)] #[test] fn panicking_clone_does_not_leak_memory() { static STRUCT_CLONE_WAS_CALLED: AtomicBool = AtomicBool::new(false); pub struct S; impl Clone for S { fn clone(&self) -> Self { STRUCT_CLONE_WAS_CALLED.store(true, Ordering::Relaxed); panic!() } } box_pool!(P: S); P.manage(unsafe { static mut B: BoxBlock = BoxBlock::new(); &mut B }); P.manage(unsafe { static mut B: BoxBlock = BoxBlock::new(); &mut B }); let boxed = P.alloc(S).ok().unwrap(); let thread = thread::spawn(move || { let _boxed = boxed.clone(); }); let thread_panicked = thread.join().is_err(); assert!(thread_panicked); assert!(STRUCT_CLONE_WAS_CALLED.load(Ordering::Relaxed)); let once = P.alloc(S); let twice = P.alloc(S); assert!(once.is_ok()); assert!(twice.is_ok()); } } heapless-0.8.0/src/pool/object.rs000064400000000000000000000233431046102023000150160ustar 00000000000000//! Object pool API //! //! # Example usage //! //! ``` //! use heapless::{object_pool, pool::object::{Object, ObjectBlock}}; //! //! object_pool!(P: [u8; 128]); //! //! // cannot request objects without first giving object blocks to the pool //! assert!(P.request().is_none()); //! //! // (some `no_std` runtimes have safe APIs to create `&'static mut` references) //! let block: &'static mut ObjectBlock<[u8; 128]> = unsafe { //! // unlike the memory pool APIs, an initial value must be specified here //! static mut B: ObjectBlock<[u8; 128]>= ObjectBlock::new([0; 128]); //! &mut B //! }; //! //! // give object block to the pool //! P.manage(block); //! //! // it's now possible to request objects //! // unlike the memory pool APIs, no initial value is required here //! let mut object = P.request().unwrap(); //! //! // mutation is possible //! object.iter_mut().for_each(|byte| *byte = byte.wrapping_add(1)); //! //! // the number of live objects is limited to the number of blocks managed by the pool //! let res = P.request(); //! assert!(res.is_none()); //! //! // `object`'s destructor returns the object to the pool //! drop(object); //! //! // it's possible to request an `Object` again //! let res = P.request(); //! //! assert!(res.is_some()); //! ``` //! //! # Array block initialization //! //! You can create a static variable that contains an array of memory blocks and give all the blocks //! to the `ObjectPool`. This requires an intermediate `const` value as shown below: //! //! ``` //! use heapless::{object_pool, pool::object::ObjectBlock}; //! //! object_pool!(P: [u8; 128]); //! //! const POOL_CAPACITY: usize = 8; //! //! let blocks: &'static mut [ObjectBlock<[u8; 128]>] = { //! const BLOCK: ObjectBlock<[u8; 128]> = ObjectBlock::new([0; 128]); // <= //! static mut BLOCKS: [ObjectBlock<[u8; 128]>; POOL_CAPACITY] = [BLOCK; POOL_CAPACITY]; //! unsafe { &mut BLOCKS } //! }; //! //! for block in blocks { //! P.manage(block); //! } //! ``` use core::{ cmp::Ordering, fmt, hash::{Hash, Hasher}, mem::ManuallyDrop, ops, ptr, }; use stable_deref_trait::StableDeref; use super::treiber::{AtomicPtr, NonNullPtr, Stack, StructNode}; /// Creates a new `ObjectPool` singleton with the given `$name` that manages the specified /// `$data_type` /// /// For more extensive documentation see the [module level documentation](crate::pool::object) #[macro_export] macro_rules! object_pool { ($name:ident: $data_type:ty) => { pub struct $name; impl $crate::pool::object::ObjectPool for $name { type Data = $data_type; fn singleton() -> &'static $crate::pool::object::ObjectPoolImpl<$data_type> { static $name: $crate::pool::object::ObjectPoolImpl<$data_type> = $crate::pool::object::ObjectPoolImpl::new(); &$name } } impl $name { /// Inherent method version of `ObjectPool::request` #[allow(dead_code)] pub fn request(&self) -> Option<$crate::pool::object::Object<$name>> { <$name as $crate::pool::object::ObjectPool>::request() } /// Inherent method version of `ObjectPool::manage` #[allow(dead_code)] pub fn manage( &self, block: &'static mut $crate::pool::object::ObjectBlock<$data_type>, ) { <$name as $crate::pool::object::ObjectPool>::manage(block) } } }; } /// A singleton that manages `pool::object::Object`s pub trait ObjectPool: Sized { /// The data type of the objects managed by the object pool type Data: 'static; /// `object_pool!` implementation detail #[doc(hidden)] fn singleton() -> &'static ObjectPoolImpl; /// Request a new object from the pool fn request() -> Option> { Self::singleton() .request() .map(|node_ptr| Object { node_ptr }) } /// Adds a statically allocate object to the pool fn manage(block: &'static mut ObjectBlock) { Self::singleton().manage(block) } } /// `object_pool!` implementation detail #[doc(hidden)] pub struct ObjectPoolImpl { stack: Stack>, } impl ObjectPoolImpl { /// `object_pool!` implementation detail #[doc(hidden)] pub const fn new() -> Self { Self { stack: Stack::new(), } } fn request(&self) -> Option>> { self.stack.try_pop() } fn manage(&self, block: &'static mut ObjectBlock) { let node: &'static mut _ = &mut block.node; unsafe { self.stack.push(NonNullPtr::from_static_mut_ref(node)) } } } // `T needs` to be Send because returning an object from a thread and then // requesting it from another is effectively a cross-thread 'send' operation unsafe impl Sync for ObjectPoolImpl where T: Send {} /// An object managed by object pool `P` pub struct Object

where P: ObjectPool, { node_ptr: NonNullPtr>, } impl AsMut<[T]> for Object where A: ObjectPool, { fn as_mut(&mut self) -> &mut [T] { &mut **self } } impl AsRef<[T]> for Object where A: ObjectPool, { fn as_ref(&self) -> &[T] { &**self } } impl fmt::Debug for Object where A: ObjectPool, A::Data: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { A::Data::fmt(self, f) } } impl ops::Deref for Object where A: ObjectPool, { type Target = A::Data; fn deref(&self) -> &Self::Target { unsafe { &*ptr::addr_of!((*self.node_ptr.as_ptr()).data) } } } impl ops::DerefMut for Object where A: ObjectPool, { fn deref_mut(&mut self) -> &mut Self::Target { unsafe { &mut *ptr::addr_of_mut!((*self.node_ptr.as_ptr()).data) } } } unsafe impl StableDeref for Object where A: ObjectPool {} impl fmt::Display for Object where A: ObjectPool, A::Data: fmt::Display, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { A::Data::fmt(self, f) } } impl

Drop for Object

where P: ObjectPool, { fn drop(&mut self) { unsafe { P::singleton().stack.push(self.node_ptr) } } } impl Eq for Object where A: ObjectPool, A::Data: Eq, { } impl Hash for Object where A: ObjectPool, A::Data: Hash, { fn hash(&self, state: &mut H) where H: Hasher, { (**self).hash(state) } } impl Ord for Object where A: ObjectPool, A::Data: Ord, { fn cmp(&self, other: &Self) -> Ordering { A::Data::cmp(self, other) } } impl PartialEq> for Object where A: ObjectPool, B: ObjectPool, A::Data: PartialEq, { fn eq(&self, other: &Object) -> bool { A::Data::eq(self, other) } } impl PartialOrd> for Object where A: ObjectPool, B: ObjectPool, A::Data: PartialOrd, { fn partial_cmp(&self, other: &Object) -> Option { A::Data::partial_cmp(self, other) } } unsafe impl

Send for Object

where P: ObjectPool, P::Data: Send, { } unsafe impl

Sync for Object

where P: ObjectPool, P::Data: Sync, { } /// An object "block" of data type `T` that has not yet been associated to an `ObjectPool` pub struct ObjectBlock { node: StructNode, } impl ObjectBlock { /// Creates a new object block with the given `initial_value` pub const fn new(initial_value: T) -> Self { Self { node: StructNode { next: ManuallyDrop::new(AtomicPtr::null()), data: ManuallyDrop::new(initial_value), }, } } } #[cfg(test)] mod tests { use core::sync::atomic::{self, AtomicUsize}; use super::*; #[test] fn cannot_request_if_empty() { object_pool!(P: i32); assert_eq!(None, P.request()); } #[test] fn can_request_if_manages_one_block() { object_pool!(P: i32); let block = unsafe { static mut B: ObjectBlock = ObjectBlock::new(1); &mut B }; P.manage(block); assert_eq!(1, *P.request().unwrap()); } #[test] fn request_drop_request() { object_pool!(P: i32); let block = unsafe { static mut B: ObjectBlock = ObjectBlock::new(1); &mut B }; P.manage(block); let mut object = P.request().unwrap(); *object = 2; drop(object); assert_eq!(2, *P.request().unwrap()); } #[test] fn destructor_does_not_run_on_drop() { static COUNT: AtomicUsize = AtomicUsize::new(0); pub struct S; impl Drop for S { fn drop(&mut self) { COUNT.fetch_add(1, atomic::Ordering::Relaxed); } } object_pool!(P: S); let block = unsafe { static mut B: ObjectBlock = ObjectBlock::new(S); &mut B }; P.manage(block); let object = P.request().unwrap(); assert_eq!(0, COUNT.load(atomic::Ordering::Relaxed)); drop(object); assert_eq!(0, COUNT.load(atomic::Ordering::Relaxed)); } #[test] fn zst_is_well_aligned() { #[repr(align(4096))] pub struct Zst4096; object_pool!(P: Zst4096); let block = unsafe { static mut B: ObjectBlock = ObjectBlock::new(Zst4096); &mut B }; P.manage(block); let object = P.request().unwrap(); let raw = &*object as *const Zst4096; assert_eq!(0, raw as usize % 4096); } } heapless-0.8.0/src/pool/treiber/cas.rs000064400000000000000000000105761046102023000157560ustar 00000000000000use core::{ marker::PhantomData, num::{NonZeroU32, NonZeroU64}, ptr::NonNull, sync::atomic::{AtomicU64, Ordering}, }; use super::{Node, Stack}; pub struct AtomicPtr where N: Node, { inner: AtomicU64, _marker: PhantomData<*mut N>, } impl AtomicPtr where N: Node, { pub const fn null() -> Self { Self { inner: AtomicU64::new(0), _marker: PhantomData, } } fn compare_and_exchange_weak( &self, current: Option>, new: Option>, success: Ordering, failure: Ordering, ) -> Result<(), Option>> { self.inner .compare_exchange_weak( current .map(|pointer| pointer.into_u64()) .unwrap_or_default(), new.map(|pointer| pointer.into_u64()).unwrap_or_default(), success, failure, ) .map(drop) .map_err(NonNullPtr::from_u64) } fn load(&self, order: Ordering) -> Option> { NonZeroU64::new(self.inner.load(order)).map(|inner| NonNullPtr { inner, _marker: PhantomData, }) } fn store(&self, value: Option>, order: Ordering) { self.inner.store( value.map(|pointer| pointer.into_u64()).unwrap_or_default(), order, ) } } pub struct NonNullPtr where N: Node, { inner: NonZeroU64, _marker: PhantomData<*mut N>, } impl Clone for NonNullPtr where N: Node, { fn clone(&self) -> Self { *self } } impl Copy for NonNullPtr where N: Node {} impl NonNullPtr where N: Node, { pub fn as_ptr(&self) -> *mut N { self.inner.get() as *mut N } pub fn from_static_mut_ref(ref_: &'static mut N) -> NonNullPtr { let non_null = NonNull::from(ref_); Self::from_non_null(non_null) } fn from_non_null(ptr: NonNull) -> Self { let address = ptr.as_ptr() as u32; let tag = initial_tag().get(); let value = (u64::from(tag) << 32) | u64::from(address); Self { inner: unsafe { NonZeroU64::new_unchecked(value) }, _marker: PhantomData, } } fn from_u64(value: u64) -> Option { NonZeroU64::new(value).map(|inner| Self { inner, _marker: PhantomData, }) } fn non_null(&self) -> NonNull { unsafe { NonNull::new_unchecked(self.inner.get() as *mut N) } } fn tag(&self) -> NonZeroU32 { unsafe { NonZeroU32::new_unchecked((self.inner.get() >> 32) as u32) } } fn into_u64(self) -> u64 { self.inner.get() } fn increase_tag(&mut self) { let address = self.as_ptr() as u32; let new_tag = self .tag() .get() .checked_add(1) .map(|val| unsafe { NonZeroU32::new_unchecked(val) }) .unwrap_or_else(initial_tag) .get(); let value = (u64::from(new_tag) << 32) | u64::from(address); self.inner = unsafe { NonZeroU64::new_unchecked(value) }; } } fn initial_tag() -> NonZeroU32 { unsafe { NonZeroU32::new_unchecked(1) } } pub unsafe fn push(stack: &Stack, new_top: NonNullPtr) where N: Node, { let mut top = stack.top.load(Ordering::Relaxed); loop { new_top .non_null() .as_ref() .next() .store(top, Ordering::Relaxed); if let Err(p) = stack.top.compare_and_exchange_weak( top, Some(new_top), Ordering::Release, Ordering::Relaxed, ) { top = p; } else { return; } } } pub fn try_pop(stack: &Stack) -> Option> where N: Node, { loop { if let Some(mut top) = stack.top.load(Ordering::Acquire) { let next = unsafe { top.non_null().as_ref().next().load(Ordering::Relaxed) }; if stack .top .compare_and_exchange_weak(Some(top), next, Ordering::Release, Ordering::Relaxed) .is_ok() { top.increase_tag(); return Some(top); } } else { // stack observed as empty return None; } } } heapless-0.8.0/src/pool/treiber/llsc.rs000064400000000000000000000057231046102023000161430ustar 00000000000000use core::{ cell::UnsafeCell, ptr::{self, NonNull}, }; use super::{Node, Stack}; pub struct AtomicPtr where N: Node, { inner: UnsafeCell>>, } impl AtomicPtr where N: Node, { pub const fn null() -> Self { Self { inner: UnsafeCell::new(None), } } } pub struct NonNullPtr where N: Node, { inner: NonNull, } impl NonNullPtr where N: Node, { pub fn as_ptr(&self) -> *mut N { self.inner.as_ptr().cast() } pub fn from_static_mut_ref(ref_: &'static mut N) -> Self { Self { inner: NonNull::from(ref_), } } } impl Clone for NonNullPtr where N: Node, { fn clone(&self) -> Self { Self { inner: self.inner } } } impl Copy for NonNullPtr where N: Node {} pub unsafe fn push(stack: &Stack, mut node: NonNullPtr) where N: Node, { let top_addr = ptr::addr_of!(stack.top) as *mut usize; loop { let top = arch::load_link(top_addr); node.inner .as_mut() .next_mut() .inner .get() .write(NonNull::new(top as *mut _)); if arch::store_conditional(node.inner.as_ptr() as usize, top_addr).is_ok() { break; } } } pub fn try_pop(stack: &Stack) -> Option> where N: Node, { unsafe { let top_addr = ptr::addr_of!(stack.top) as *mut usize; loop { let top = arch::load_link(top_addr); if let Some(top) = NonNull::new(top as *mut N) { let next = &top.as_ref().next(); if arch::store_conditional( next.inner .get() .read() .map(|non_null| non_null.as_ptr() as usize) .unwrap_or_default(), top_addr, ) .is_ok() { break Some(NonNullPtr { inner: top }); } } else { arch::clear_load_link(); break None; } } } } #[cfg(arm_llsc)] mod arch { use core::arch::asm; #[inline(always)] pub fn clear_load_link() { unsafe { asm!("clrex", options(nomem, nostack)) } } /// # Safety /// - `addr` must be a valid pointer #[inline(always)] pub unsafe fn load_link(addr: *const usize) -> usize { let value; asm!("ldrex {}, [{}]", out(reg) value, in(reg) addr, options(nostack)); value } /// # Safety /// - `addr` must be a valid pointer #[inline(always)] pub unsafe fn store_conditional(value: usize, addr: *mut usize) -> Result<(), ()> { let outcome: usize; asm!("strex {}, {}, [{}]", out(reg) outcome, in(reg) value, in(reg) addr, options(nostack)); if outcome == 0 { Ok(()) } else { Err(()) } } } heapless-0.8.0/src/pool/treiber.rs000064400000000000000000000033061046102023000152010ustar 00000000000000use core::mem::ManuallyDrop; #[cfg_attr(target_arch = "x86", path = "treiber/cas.rs")] #[cfg_attr(arm_llsc, path = "treiber/llsc.rs")] mod impl_; pub use impl_::{AtomicPtr, NonNullPtr}; pub struct Stack where N: Node, { top: AtomicPtr, } impl Stack where N: Node, { pub const fn new() -> Self { Self { top: AtomicPtr::null(), } } /// # Safety /// - `node` must be a valid pointer /// - aliasing rules must be enforced by the caller. e.g, the same `node` may not be pushed more than once pub unsafe fn push(&self, node: NonNullPtr) { impl_::push(self, node) } pub fn try_pop(&self) -> Option> { impl_::try_pop(self) } } pub trait Node: Sized { type Data; fn next(&self) -> &AtomicPtr; fn next_mut(&mut self) -> &mut AtomicPtr; } pub union UnionNode { next: ManuallyDrop>>, pub data: ManuallyDrop, } impl Node for UnionNode { type Data = T; fn next(&self) -> &AtomicPtr { unsafe { &self.next } } fn next_mut(&mut self) -> &mut AtomicPtr { unsafe { &mut self.next } } } pub struct StructNode { pub next: ManuallyDrop>>, pub data: ManuallyDrop, } impl Node for StructNode { type Data = T; fn next(&self) -> &AtomicPtr { &self.next } fn next_mut(&mut self) -> &mut AtomicPtr { &mut self.next } } #[cfg(test)] mod tests { use core::mem; use super::*; #[test] fn node_is_never_zero_sized() { struct Zst; assert_ne!(mem::size_of::>(), 0); } } heapless-0.8.0/src/pool.rs000064400000000000000000000032711046102023000135460ustar 00000000000000//! Memory and object pools //! //! # Target support //! //! This module / API is only available on these compilation targets: //! //! - ARM architectures which instruction set include the LDREX, CLREX and STREX instructions, e.g. //! `thumbv7m-none-eabi` but not `thumbv6m-none-eabi` //! - 32-bit x86, e.g. `i686-unknown-linux-gnu` //! //! # Benchmarks //! //! - compilation settings //! - `codegen-units = 1` //! - `lto = 'fat'` //! - `opt-level = 'z'` //! - compilation target: `thumbv7em-none-eabihf` //! - CPU: ARM Cortex-M4F //! //! - test program: //! //! ``` no_run //! use heapless::box_pool; //! //! box_pool!(P: ()); // or `arc_pool!` or `object_pool!` //! //! bkpt(); //! let res = P.alloc(()); //! bkpt(); //! //! if let Ok(boxed) = res { //! bkpt(); //! drop(boxed); //! bkpt(); //! } //! # fn bkpt() {} //! ``` //! //! - measurement method: the cycle counter (CYCCNT) register was sampled each time a breakpoint //! (`bkpt`) was hit. the difference between the "after" and the "before" value of CYCCNT yields the //! execution time in clock cycles. //! //! | API | clock cycles | //! |------------------------------|--------------| //! | `BoxPool::alloc` | 23 | //! | `pool::boxed::Box::drop` | 23 | //! | `ArcPool::alloc` | 28 | //! | `pool::arc::Arc::drop` | 59 | //! | `ObjectPool::request` | 23 | //! | `pool::object::Object::drop` | 23 | //! //! Note that the execution time won't include `T`'s initialization nor `T`'s destructor which will //! be present in the general case for `Box` and `Arc`. mod treiber; pub mod arc; pub mod boxed; pub mod object; heapless-0.8.0/src/sealed.rs000064400000000000000000000025631046102023000140350ustar 00000000000000#[allow(dead_code)] #[allow(path_statements)] pub(crate) const fn smaller_than() { Assert::::LESS; } #[allow(dead_code)] #[allow(path_statements)] pub(crate) const fn greater_than_eq_0() { Assert::::GREATER_EQ; } #[allow(dead_code)] #[allow(path_statements)] pub(crate) const fn greater_than_0() { Assert::::GREATER; } #[allow(dead_code)] #[allow(path_statements)] pub(crate) const fn greater_than_1() { Assert::::GREATER; } #[allow(dead_code)] #[allow(path_statements)] pub(crate) const fn power_of_two() { Assert::::GREATER; Assert::::POWER_OF_TWO; } #[allow(dead_code)] /// Const assert hack pub struct Assert; #[allow(dead_code)] impl Assert { /// Const assert hack pub const GREATER_EQ: usize = L - R; /// Const assert hack pub const LESS_EQ: usize = R - L; /// Const assert hack pub const NOT_EQ: isize = 0 / (R as isize - L as isize); /// Const assert hack pub const EQ: usize = (R - L) + (L - R); /// Const assert hack pub const GREATER: usize = L - R - 1; /// Const assert hack pub const LESS: usize = R - L - 1; /// Const assert hack pub const POWER_OF_TWO: usize = 0 - (L & (L - 1)); } heapless-0.8.0/src/ser.rs000064400000000000000000000055031046102023000133660ustar 00000000000000use core::hash::{BuildHasher, Hash}; use crate::{ binary_heap::Kind as BinaryHeapKind, BinaryHeap, Deque, IndexMap, IndexSet, LinearMap, String, Vec, }; use serde::ser::{Serialize, SerializeMap, SerializeSeq, Serializer}; // Sequential containers impl Serialize for BinaryHeap where T: Ord + Serialize, KIND: BinaryHeapKind, { fn serialize(&self, serializer: S) -> Result where S: Serializer, { let mut seq = serializer.serialize_seq(Some(self.len()))?; for element in self { seq.serialize_element(element)?; } seq.end() } } impl Serialize for IndexSet where T: Eq + Hash + Serialize, S: BuildHasher, { fn serialize(&self, serializer: SER) -> Result where SER: Serializer, { let mut seq = serializer.serialize_seq(Some(self.len()))?; for element in self { seq.serialize_element(element)?; } seq.end() } } impl Serialize for Vec where T: Serialize, { fn serialize(&self, serializer: S) -> Result where S: Serializer, { let mut seq = serializer.serialize_seq(Some(self.len()))?; for element in self { seq.serialize_element(element)?; } seq.end() } } impl Serialize for Deque where T: Serialize, { fn serialize(&self, serializer: S) -> Result where S: Serializer, { let mut seq = serializer.serialize_seq(Some(self.len()))?; for element in self { seq.serialize_element(element)?; } seq.end() } } // Dictionaries impl Serialize for IndexMap where K: Eq + Hash + Serialize, S: BuildHasher, V: Serialize, { fn serialize(&self, serializer: SER) -> Result where SER: Serializer, { let mut map = serializer.serialize_map(Some(self.len()))?; for (k, v) in self { map.serialize_entry(k, v)?; } map.end() } } impl Serialize for LinearMap where K: Eq + Serialize, V: Serialize, { fn serialize(&self, serializer: SER) -> Result where SER: Serializer, { let mut map = serializer.serialize_map(Some(self.len()))?; for (k, v) in self { map.serialize_entry(k, v)?; } map.end() } } // String containers impl Serialize for String { fn serialize(&self, serializer: S) -> Result where S: Serializer, { serializer.serialize_str(&*self) } } heapless-0.8.0/src/sorted_linked_list.rs000064400000000000000000000577241046102023000164720ustar 00000000000000//! A fixed sorted priority linked list, similar to [`BinaryHeap`] but with different properties //! on `push`, `pop`, etc. //! For example, the sorting of the list will never `memcpy` the underlying value, so having large //! objects in the list will not cause a performance hit. //! //! # Examples //! //! ``` //! use heapless::sorted_linked_list::{SortedLinkedList, Max}; //! let mut ll: SortedLinkedList<_, _, Max, 3> = SortedLinkedList::new_usize(); //! //! // The largest value will always be first //! ll.push(1).unwrap(); //! assert_eq!(ll.peek(), Some(&1)); //! //! ll.push(2).unwrap(); //! assert_eq!(ll.peek(), Some(&2)); //! //! ll.push(3).unwrap(); //! assert_eq!(ll.peek(), Some(&3)); //! //! // This will not fit in the queue. //! assert_eq!(ll.push(4), Err(4)); //! ``` //! //! [`BinaryHeap`]: `crate::binary_heap::BinaryHeap` use core::cmp::Ordering; use core::fmt; use core::marker::PhantomData; use core::mem::MaybeUninit; use core::ops::{Deref, DerefMut}; use core::ptr; /// Trait for defining an index for the linked list, never implemented by users. pub trait SortedLinkedListIndex: Copy { #[doc(hidden)] unsafe fn new_unchecked(val: usize) -> Self; #[doc(hidden)] unsafe fn get_unchecked(self) -> usize; #[doc(hidden)] fn option(self) -> Option; #[doc(hidden)] fn none() -> Self; } /// Marker for Min sorted [`SortedLinkedList`]. pub struct Min; /// Marker for Max sorted [`SortedLinkedList`]. pub struct Max; /// The linked list kind: min-list or max-list pub trait Kind: private::Sealed { #[doc(hidden)] fn ordering() -> Ordering; } impl Kind for Min { fn ordering() -> Ordering { Ordering::Less } } impl Kind for Max { fn ordering() -> Ordering { Ordering::Greater } } /// Sealed traits mod private { pub trait Sealed {} } impl private::Sealed for Max {} impl private::Sealed for Min {} /// A node in the [`SortedLinkedList`]. pub struct Node { val: MaybeUninit, next: Idx, } /// The linked list. pub struct SortedLinkedList where Idx: SortedLinkedListIndex, { list: [Node; N], head: Idx, free: Idx, _kind: PhantomData, } // Internal macro for generating indexes for the linkedlist and const new for the linked list macro_rules! impl_index_and_const_new { ($name:ident, $ty:ty, $new_name:ident, $max_val:expr) => { /// Index for the [`SortedLinkedList`] with specific backing storage. #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] pub struct $name($ty); impl SortedLinkedListIndex for $name { #[inline(always)] unsafe fn new_unchecked(val: usize) -> Self { Self::new_unchecked(val as $ty) } /// This is only valid if `self.option()` is not `None`. #[inline(always)] unsafe fn get_unchecked(self) -> usize { self.0 as usize } #[inline(always)] fn option(self) -> Option { if self.0 == <$ty>::MAX { None } else { Some(self.0 as usize) } } #[inline(always)] fn none() -> Self { Self::none() } } impl $name { /// Needed for a `const fn new()`. #[inline] const unsafe fn new_unchecked(value: $ty) -> Self { $name(value) } /// Needed for a `const fn new()`. #[inline] const fn none() -> Self { $name(<$ty>::MAX) } } impl SortedLinkedList { const UNINIT: Node = Node { val: MaybeUninit::uninit(), next: $name::none(), }; /// Create a new linked list. pub const fn $new_name() -> Self { // Const assert N < MAX crate::sealed::smaller_than::(); let mut list = SortedLinkedList { list: [Self::UNINIT; N], head: $name::none(), free: unsafe { $name::new_unchecked(0) }, _kind: PhantomData, }; if N == 0 { list.free = $name::none(); return list; } let mut free = 0; // Initialize indexes while free < N - 1 { list.list[free].next = unsafe { $name::new_unchecked(free as $ty + 1) }; free += 1; } list } } }; } impl_index_and_const_new!(LinkedIndexU8, u8, new_u8, { u8::MAX as usize - 1 }); impl_index_and_const_new!(LinkedIndexU16, u16, new_u16, { u16::MAX as usize - 1 }); impl_index_and_const_new!(LinkedIndexUsize, usize, new_usize, { usize::MAX - 1 }); impl SortedLinkedList where Idx: SortedLinkedListIndex, { /// Internal access helper #[inline(always)] fn node_at(&self, index: usize) -> &Node { // Safety: The entire `self.list` is initialized in `new`, which makes this safe. unsafe { self.list.get_unchecked(index) } } /// Internal access helper #[inline(always)] fn node_at_mut(&mut self, index: usize) -> &mut Node { // Safety: The entire `self.list` is initialized in `new`, which makes this safe. unsafe { self.list.get_unchecked_mut(index) } } /// Internal access helper #[inline(always)] fn write_data_in_node_at(&mut self, index: usize, data: T) { // Safety: The entire `self.list` is initialized in `new`, which makes this safe. unsafe { self.node_at_mut(index).val.as_mut_ptr().write(data); } } /// Internal access helper #[inline(always)] fn read_data_in_node_at(&self, index: usize) -> &T { // Safety: The entire `self.list` is initialized in `new`, which makes this safe. unsafe { &*self.node_at(index).val.as_ptr() } } /// Internal access helper #[inline(always)] fn read_mut_data_in_node_at(&mut self, index: usize) -> &mut T { // Safety: The entire `self.list` is initialized in `new`, which makes this safe. unsafe { &mut *self.node_at_mut(index).val.as_mut_ptr() } } /// Internal access helper #[inline(always)] fn extract_data_in_node_at(&mut self, index: usize) -> T { // Safety: The entire `self.list` is initialized in `new`, which makes this safe. unsafe { self.node_at(index).val.as_ptr().read() } } } impl SortedLinkedList where T: Ord, Idx: SortedLinkedListIndex, K: Kind, { /// Pushes a value onto the list without checking if the list is full. /// /// Complexity is worst-case `O(N)`. /// /// # Safety /// /// Assumes that the list is not full. pub unsafe fn push_unchecked(&mut self, value: T) { let new = self.free.get_unchecked(); // Store the data and update the next free spot self.write_data_in_node_at(new, value); self.free = self.node_at(new).next; if let Some(head) = self.head.option() { // Check if we need to replace head if self .read_data_in_node_at(head) .cmp(self.read_data_in_node_at(new)) != K::ordering() { self.node_at_mut(new).next = self.head; self.head = Idx::new_unchecked(new); } else { // It's not head, search the list for the correct placement let mut current = head; while let Some(next) = self.node_at(current).next.option() { if self .read_data_in_node_at(next) .cmp(self.read_data_in_node_at(new)) != K::ordering() { break; } current = next; } self.node_at_mut(new).next = self.node_at(current).next; self.node_at_mut(current).next = Idx::new_unchecked(new); } } else { self.node_at_mut(new).next = self.head; self.head = Idx::new_unchecked(new); } } /// Pushes an element to the linked list and sorts it into place. /// /// Complexity is worst-case `O(N)`. /// /// # Example /// /// ``` /// use heapless::sorted_linked_list::{SortedLinkedList, Max}; /// let mut ll: SortedLinkedList<_, _, Max, 3> = SortedLinkedList::new_usize(); /// /// // The largest value will always be first /// ll.push(1).unwrap(); /// assert_eq!(ll.peek(), Some(&1)); /// /// ll.push(2).unwrap(); /// assert_eq!(ll.peek(), Some(&2)); /// /// ll.push(3).unwrap(); /// assert_eq!(ll.peek(), Some(&3)); /// /// // This will not fit in the queue. /// assert_eq!(ll.push(4), Err(4)); /// ``` pub fn push(&mut self, value: T) -> Result<(), T> { if !self.is_full() { Ok(unsafe { self.push_unchecked(value) }) } else { Err(value) } } /// Get an iterator over the sorted list. /// /// # Example /// /// ``` /// use heapless::sorted_linked_list::{SortedLinkedList, Max}; /// let mut ll: SortedLinkedList<_, _, Max, 3> = SortedLinkedList::new_usize(); /// /// ll.push(1).unwrap(); /// ll.push(2).unwrap(); /// /// let mut iter = ll.iter(); /// /// assert_eq!(iter.next(), Some(&2)); /// assert_eq!(iter.next(), Some(&1)); /// assert_eq!(iter.next(), None); /// ``` pub fn iter(&self) -> Iter<'_, T, Idx, K, N> { Iter { list: self, index: self.head, } } /// Find an element in the list that can be changed and resorted. /// /// # Example /// /// ``` /// use heapless::sorted_linked_list::{SortedLinkedList, Max}; /// let mut ll: SortedLinkedList<_, _, Max, 3> = SortedLinkedList::new_usize(); /// /// ll.push(1).unwrap(); /// ll.push(2).unwrap(); /// ll.push(3).unwrap(); /// /// // Find a value and update it /// let mut find = ll.find_mut(|v| *v == 2).unwrap(); /// *find += 1000; /// find.finish(); /// /// assert_eq!(ll.pop(), Ok(1002)); /// assert_eq!(ll.pop(), Ok(3)); /// assert_eq!(ll.pop(), Ok(1)); /// assert_eq!(ll.pop(), Err(())); /// ``` pub fn find_mut(&mut self, mut f: F) -> Option> where F: FnMut(&T) -> bool, { let head = self.head.option()?; // Special-case, first element if f(self.read_data_in_node_at(head)) { return Some(FindMut { is_head: true, prev_index: Idx::none(), index: self.head, list: self, maybe_changed: false, }); } let mut current = head; while let Some(next) = self.node_at(current).next.option() { if f(self.read_data_in_node_at(next)) { return Some(FindMut { is_head: false, prev_index: unsafe { Idx::new_unchecked(current) }, index: unsafe { Idx::new_unchecked(next) }, list: self, maybe_changed: false, }); } current = next; } None } /// Peek at the first element. /// /// # Example /// /// ``` /// use heapless::sorted_linked_list::{SortedLinkedList, Max, Min}; /// let mut ll_max: SortedLinkedList<_, _, Max, 3> = SortedLinkedList::new_usize(); /// /// // The largest value will always be first /// ll_max.push(1).unwrap(); /// assert_eq!(ll_max.peek(), Some(&1)); /// ll_max.push(2).unwrap(); /// assert_eq!(ll_max.peek(), Some(&2)); /// ll_max.push(3).unwrap(); /// assert_eq!(ll_max.peek(), Some(&3)); /// /// let mut ll_min: SortedLinkedList<_, _, Min, 3> = SortedLinkedList::new_usize(); /// /// // The Smallest value will always be first /// ll_min.push(3).unwrap(); /// assert_eq!(ll_min.peek(), Some(&3)); /// ll_min.push(2).unwrap(); /// assert_eq!(ll_min.peek(), Some(&2)); /// ll_min.push(1).unwrap(); /// assert_eq!(ll_min.peek(), Some(&1)); /// ``` pub fn peek(&self) -> Option<&T> { self.head .option() .map(|head| self.read_data_in_node_at(head)) } /// Pop an element from the list without checking so the list is not empty. /// /// # Safety /// /// Assumes that the list is not empty. pub unsafe fn pop_unchecked(&mut self) -> T { let head = self.head.get_unchecked(); let current = head; self.head = self.node_at(head).next; self.node_at_mut(current).next = self.free; self.free = Idx::new_unchecked(current); self.extract_data_in_node_at(current) } /// Pops the first element in the list. /// /// Complexity is worst-case `O(1)`. /// /// # Example /// /// ``` /// use heapless::sorted_linked_list::{SortedLinkedList, Max}; /// let mut ll: SortedLinkedList<_, _, Max, 3> = SortedLinkedList::new_usize(); /// /// ll.push(1).unwrap(); /// ll.push(2).unwrap(); /// /// assert_eq!(ll.pop(), Ok(2)); /// assert_eq!(ll.pop(), Ok(1)); /// assert_eq!(ll.pop(), Err(())); /// ``` pub fn pop(&mut self) -> Result { if !self.is_empty() { Ok(unsafe { self.pop_unchecked() }) } else { Err(()) } } /// Checks if the linked list is full. /// /// # Example /// /// ``` /// use heapless::sorted_linked_list::{SortedLinkedList, Max}; /// let mut ll: SortedLinkedList<_, _, Max, 3> = SortedLinkedList::new_usize(); /// /// assert_eq!(ll.is_full(), false); /// /// ll.push(1).unwrap(); /// assert_eq!(ll.is_full(), false); /// ll.push(2).unwrap(); /// assert_eq!(ll.is_full(), false); /// ll.push(3).unwrap(); /// assert_eq!(ll.is_full(), true); /// ``` #[inline] pub fn is_full(&self) -> bool { self.free.option().is_none() } /// Checks if the linked list is empty. /// /// # Example /// /// ``` /// use heapless::sorted_linked_list::{SortedLinkedList, Max}; /// let mut ll: SortedLinkedList<_, _, Max, 3> = SortedLinkedList::new_usize(); /// /// assert_eq!(ll.is_empty(), true); /// /// ll.push(1).unwrap(); /// assert_eq!(ll.is_empty(), false); /// ``` #[inline] pub fn is_empty(&self) -> bool { self.head.option().is_none() } } /// Iterator for the linked list. pub struct Iter<'a, T, Idx, K, const N: usize> where T: Ord, Idx: SortedLinkedListIndex, K: Kind, { list: &'a SortedLinkedList, index: Idx, } impl<'a, T, Idx, K, const N: usize> Iterator for Iter<'a, T, Idx, K, N> where T: Ord, Idx: SortedLinkedListIndex, K: Kind, { type Item = &'a T; fn next(&mut self) -> Option { let index = self.index.option()?; let node = self.list.node_at(index); self.index = node.next; Some(self.list.read_data_in_node_at(index)) } } /// Comes from [`SortedLinkedList::find_mut`]. pub struct FindMut<'a, T, Idx, K, const N: usize> where T: Ord, Idx: SortedLinkedListIndex, K: Kind, { list: &'a mut SortedLinkedList, is_head: bool, prev_index: Idx, index: Idx, maybe_changed: bool, } impl<'a, T, Idx, K, const N: usize> FindMut<'a, T, Idx, K, N> where T: Ord, Idx: SortedLinkedListIndex, K: Kind, { fn pop_internal(&mut self) -> T { if self.is_head { // If it is the head element, we can do a normal pop unsafe { self.list.pop_unchecked() } } else { // Somewhere in the list let prev = unsafe { self.prev_index.get_unchecked() }; let curr = unsafe { self.index.get_unchecked() }; // Re-point the previous index self.list.node_at_mut(prev).next = self.list.node_at_mut(curr).next; // Release the index into the free queue self.list.node_at_mut(curr).next = self.list.free; self.list.free = self.index; self.list.extract_data_in_node_at(curr) } } /// This will pop the element from the list. /// /// Complexity is worst-case `O(1)`. /// /// # Example /// /// ``` /// use heapless::sorted_linked_list::{SortedLinkedList, Max}; /// let mut ll: SortedLinkedList<_, _, Max, 3> = SortedLinkedList::new_usize(); /// /// ll.push(1).unwrap(); /// ll.push(2).unwrap(); /// ll.push(3).unwrap(); /// /// // Find a value and update it /// let mut find = ll.find_mut(|v| *v == 2).unwrap(); /// find.pop(); /// /// assert_eq!(ll.pop(), Ok(3)); /// assert_eq!(ll.pop(), Ok(1)); /// assert_eq!(ll.pop(), Err(())); /// ``` #[inline] pub fn pop(mut self) -> T { self.pop_internal() } /// This will resort the element into the correct position in the list if needed. The resorting /// will only happen if the element has been accessed mutably. /// /// Same as calling `drop`. /// /// Complexity is worst-case `O(N)`. /// /// # Example /// /// ``` /// use heapless::sorted_linked_list::{SortedLinkedList, Max}; /// let mut ll: SortedLinkedList<_, _, Max, 3> = SortedLinkedList::new_usize(); /// /// ll.push(1).unwrap(); /// ll.push(2).unwrap(); /// ll.push(3).unwrap(); /// /// let mut find = ll.find_mut(|v| *v == 2).unwrap(); /// find.finish(); // No resort, we did not access the value. /// /// let mut find = ll.find_mut(|v| *v == 2).unwrap(); /// *find += 1000; /// find.finish(); // Will resort, we accessed (and updated) the value. /// /// assert_eq!(ll.pop(), Ok(1002)); /// assert_eq!(ll.pop(), Ok(3)); /// assert_eq!(ll.pop(), Ok(1)); /// assert_eq!(ll.pop(), Err(())); /// ``` #[inline] pub fn finish(self) { drop(self) } } impl Drop for FindMut<'_, T, Idx, K, N> where T: Ord, Idx: SortedLinkedListIndex, K: Kind, { fn drop(&mut self) { // Only resort the list if the element has changed if self.maybe_changed { let val = self.pop_internal(); unsafe { self.list.push_unchecked(val) }; } } } impl Deref for FindMut<'_, T, Idx, K, N> where T: Ord, Idx: SortedLinkedListIndex, K: Kind, { type Target = T; fn deref(&self) -> &Self::Target { self.list .read_data_in_node_at(unsafe { self.index.get_unchecked() }) } } impl DerefMut for FindMut<'_, T, Idx, K, N> where T: Ord, Idx: SortedLinkedListIndex, K: Kind, { fn deref_mut(&mut self) -> &mut Self::Target { self.maybe_changed = true; self.list .read_mut_data_in_node_at(unsafe { self.index.get_unchecked() }) } } // /// Useful for debug during development. // impl fmt::Debug for FindMut<'_, T, Idx, K, N> // where // T: Ord + core::fmt::Debug, // Idx: SortedLinkedListIndex, // K: Kind, // { // fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { // f.debug_struct("FindMut") // .field("prev_index", &self.prev_index.option()) // .field("index", &self.index.option()) // .field( // "prev_value", // &self // .list // .read_data_in_node_at(self.prev_index.option().unwrap()), // ) // .field( // "value", // &self.list.read_data_in_node_at(self.index.option().unwrap()), // ) // .finish() // } // } impl fmt::Debug for SortedLinkedList where T: Ord + core::fmt::Debug, Idx: SortedLinkedListIndex, K: Kind, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.iter()).finish() } } impl Drop for SortedLinkedList where Idx: SortedLinkedListIndex, { fn drop(&mut self) { let mut index = self.head; while let Some(i) = index.option() { let node = self.node_at_mut(i); index = node.next; unsafe { ptr::drop_in_place(node.val.as_mut_ptr()); } } } } #[cfg(test)] mod tests { use super::*; #[test] fn const_new() { static mut _V1: SortedLinkedList = SortedLinkedList::new_u8(); static mut _V2: SortedLinkedList = SortedLinkedList::new_u16(); static mut _V3: SortedLinkedList = SortedLinkedList::new_usize(); } #[test] fn test_peek() { let mut ll: SortedLinkedList = SortedLinkedList::new_usize(); ll.push(1).unwrap(); assert_eq!(ll.peek().unwrap(), &1); ll.push(2).unwrap(); assert_eq!(ll.peek().unwrap(), &2); ll.push(3).unwrap(); assert_eq!(ll.peek().unwrap(), &3); let mut ll: SortedLinkedList = SortedLinkedList::new_usize(); ll.push(2).unwrap(); assert_eq!(ll.peek().unwrap(), &2); ll.push(1).unwrap(); assert_eq!(ll.peek().unwrap(), &1); ll.push(3).unwrap(); assert_eq!(ll.peek().unwrap(), &1); } #[test] fn test_full() { let mut ll: SortedLinkedList = SortedLinkedList::new_usize(); ll.push(1).unwrap(); ll.push(2).unwrap(); ll.push(3).unwrap(); assert!(ll.is_full()) } #[test] fn test_empty() { let ll: SortedLinkedList = SortedLinkedList::new_usize(); assert!(ll.is_empty()) } #[test] fn test_zero_size() { let ll: SortedLinkedList = SortedLinkedList::new_usize(); assert!(ll.is_empty()); assert!(ll.is_full()); } #[test] fn test_rejected_push() { let mut ll: SortedLinkedList = SortedLinkedList::new_usize(); ll.push(1).unwrap(); ll.push(2).unwrap(); ll.push(3).unwrap(); // This won't fit let r = ll.push(4); assert_eq!(r, Err(4)); } #[test] fn test_updating() { let mut ll: SortedLinkedList = SortedLinkedList::new_usize(); ll.push(1).unwrap(); ll.push(2).unwrap(); ll.push(3).unwrap(); let mut find = ll.find_mut(|v| *v == 2).unwrap(); *find += 1000; find.finish(); assert_eq!(ll.peek().unwrap(), &1002); let mut find = ll.find_mut(|v| *v == 3).unwrap(); *find += 1000; find.finish(); assert_eq!(ll.peek().unwrap(), &1003); // Remove largest element ll.find_mut(|v| *v == 1003).unwrap().pop(); assert_eq!(ll.peek().unwrap(), &1002); } #[test] fn test_updating_1() { let mut ll: SortedLinkedList = SortedLinkedList::new_usize(); ll.push(1).unwrap(); let v = ll.pop().unwrap(); assert_eq!(v, 1); } #[test] fn test_updating_2() { let mut ll: SortedLinkedList = SortedLinkedList::new_usize(); ll.push(1).unwrap(); let mut find = ll.find_mut(|v| *v == 1).unwrap(); *find += 1000; find.finish(); assert_eq!(ll.peek().unwrap(), &1001); } } heapless-0.8.0/src/spsc.rs000064400000000000000000000626321046102023000135530ustar 00000000000000//! Fixed capacity Single Producer Single Consumer (SPSC) queue //! //! Implementation based on //! //! # Portability //! //! This module requires CAS atomic instructions which are not available on all architectures //! (e.g. ARMv6-M (`thumbv6m-none-eabi`) and MSP430 (`msp430-none-elf`)). These atomics can be //! emulated however with [`portable-atomic`](https://crates.io/crates/portable-atomic), which is //! enabled with the `cas` feature and is enabled by default for `thumbv6m-none-eabi` and `riscv32` //! targets. //! //! # Examples //! //! - `Queue` can be used as a plain queue //! //! ``` //! use heapless::spsc::Queue; //! //! let mut rb: Queue = Queue::new(); //! //! assert!(rb.enqueue(0).is_ok()); //! assert!(rb.enqueue(1).is_ok()); //! assert!(rb.enqueue(2).is_ok()); //! assert!(rb.enqueue(3).is_err()); // full //! //! assert_eq!(rb.dequeue(), Some(0)); //! ``` //! //! - `Queue` can be `split` and then be used in Single Producer Single Consumer mode. //! //! "no alloc" applications can create a `&'static mut` reference to a `Queue` -- using a static //! variable -- and then `split` it: this consumes the static reference. The resulting `Consumer` //! and `Producer` can then be moved into different execution contexts (threads, interrupt handlers, //! etc.) //! //! ``` //! use heapless::spsc::{Producer, Queue}; //! //! enum Event { A, B } //! //! fn main() { //! let queue: &'static mut Queue = { //! static mut Q: Queue = Queue::new(); //! unsafe { &mut Q } //! }; //! //! let (producer, mut consumer) = queue.split(); //! //! // `producer` can be moved into `interrupt_handler` using a static mutex or the mechanism //! // provided by the concurrency framework you are using (e.g. a resource in RTIC) //! //! loop { //! match consumer.dequeue() { //! Some(Event::A) => { /* .. */ }, //! Some(Event::B) => { /* .. */ }, //! None => { /* sleep */ }, //! } //! # break //! } //! } //! //! // this is a different execution context that can preempt `main` //! fn interrupt_handler(producer: &mut Producer<'static, Event, 4>) { //! # let condition = true; //! //! // .. //! //! if condition { //! producer.enqueue(Event::A).ok().unwrap(); //! } else { //! producer.enqueue(Event::B).ok().unwrap(); //! } //! //! // .. //! } //! ``` //! //! # Benchmarks //! //! Measured on a ARM Cortex-M3 core running at 8 MHz and with zero Flash wait cycles //! //! `-C opt-level` |`3`| //! -----------------------|---| //! `Consumer::dequeue`| 15| //! `Queue::dequeue` | 12| //! `Producer::enqueue`| 16| //! `Queue::enqueue` | 14| //! //! - All execution times are in clock cycles. 1 clock cycle = 125 ns. //! - Execution time is *dependent* of `mem::size_of::()`. Both operations include one //! `memcpy(T)` in their successful path. //! - The optimization level is indicated in the first row. //! - The numbers reported correspond to the successful path (i.e. `Some` is returned by `dequeue` //! and `Ok` is returned by `enqueue`). use core::{cell::UnsafeCell, fmt, hash, mem::MaybeUninit, ptr}; #[cfg(not(feature = "portable-atomic"))] use core::sync::atomic; #[cfg(feature = "portable-atomic")] use portable_atomic as atomic; use atomic::{AtomicUsize, Ordering}; /// A statically allocated single producer single consumer queue with a capacity of `N - 1` elements /// /// *IMPORTANT*: To get better performance use a value for `N` that is a power of 2 (e.g. `16`, `32`, /// etc.). pub struct Queue { // this is from where we dequeue items pub(crate) head: AtomicUsize, // this is where we enqueue new items pub(crate) tail: AtomicUsize, pub(crate) buffer: [UnsafeCell>; N], } impl Queue { const INIT: UnsafeCell> = UnsafeCell::new(MaybeUninit::uninit()); #[inline] fn increment(val: usize) -> usize { (val + 1) % N } /// Creates an empty queue with a fixed capacity of `N - 1` pub const fn new() -> Self { // Const assert N > 1 crate::sealed::greater_than_1::(); Queue { head: AtomicUsize::new(0), tail: AtomicUsize::new(0), buffer: [Self::INIT; N], } } /// Returns the maximum number of elements the queue can hold #[inline] pub const fn capacity(&self) -> usize { N - 1 } /// Returns the number of elements in the queue #[inline] pub fn len(&self) -> usize { let current_head = self.head.load(Ordering::Relaxed); let current_tail = self.tail.load(Ordering::Relaxed); current_tail.wrapping_sub(current_head).wrapping_add(N) % N } /// Returns `true` if the queue is empty #[inline] pub fn is_empty(&self) -> bool { self.head.load(Ordering::Relaxed) == self.tail.load(Ordering::Relaxed) } /// Returns `true` if the queue is full #[inline] pub fn is_full(&self) -> bool { Self::increment(self.tail.load(Ordering::Relaxed)) == self.head.load(Ordering::Relaxed) } /// Iterates from the front of the queue to the back pub fn iter(&self) -> Iter<'_, T, N> { Iter { rb: self, index: 0, len: self.len(), } } /// Returns an iterator that allows modifying each value pub fn iter_mut(&mut self) -> IterMut<'_, T, N> { let len = self.len(); IterMut { rb: self, index: 0, len, } } /// Adds an `item` to the end of the queue /// /// Returns back the `item` if the queue is full #[inline] pub fn enqueue(&mut self, val: T) -> Result<(), T> { unsafe { self.inner_enqueue(val) } } /// Returns the item in the front of the queue, or `None` if the queue is empty #[inline] pub fn dequeue(&mut self) -> Option { unsafe { self.inner_dequeue() } } /// Returns a reference to the item in the front of the queue without dequeuing, or /// `None` if the queue is empty. /// /// # Examples /// ``` /// use heapless::spsc::Queue; /// /// let mut queue: Queue = Queue::new(); /// let (mut producer, mut consumer) = queue.split(); /// assert_eq!(None, consumer.peek()); /// producer.enqueue(1); /// assert_eq!(Some(&1), consumer.peek()); /// assert_eq!(Some(1), consumer.dequeue()); /// assert_eq!(None, consumer.peek()); /// ``` pub fn peek(&self) -> Option<&T> { if !self.is_empty() { let head = self.head.load(Ordering::Relaxed); Some(unsafe { &*(self.buffer.get_unchecked(head).get() as *const T) }) } else { None } } // The memory for enqueueing is "owned" by the tail pointer. // NOTE: This internal function uses internal mutability to allow the [`Producer`] to enqueue // items without doing pointer arithmetic and accessing internal fields of this type. unsafe fn inner_enqueue(&self, val: T) -> Result<(), T> { let current_tail = self.tail.load(Ordering::Relaxed); let next_tail = Self::increment(current_tail); if next_tail != self.head.load(Ordering::Acquire) { (self.buffer.get_unchecked(current_tail).get()).write(MaybeUninit::new(val)); self.tail.store(next_tail, Ordering::Release); Ok(()) } else { Err(val) } } // The memory for enqueueing is "owned" by the tail pointer. // NOTE: This internal function uses internal mutability to allow the [`Producer`] to enqueue // items without doing pointer arithmetic and accessing internal fields of this type. unsafe fn inner_enqueue_unchecked(&self, val: T) { let current_tail = self.tail.load(Ordering::Relaxed); (self.buffer.get_unchecked(current_tail).get()).write(MaybeUninit::new(val)); self.tail .store(Self::increment(current_tail), Ordering::Release); } /// Adds an `item` to the end of the queue, without checking if it's full /// /// # Unsafety /// /// If the queue is full this operation will leak a value (T's destructor won't run on /// the value that got overwritten by `item`), *and* will allow the `dequeue` operation /// to create a copy of `item`, which could result in `T`'s destructor running on `item` /// twice. pub unsafe fn enqueue_unchecked(&mut self, val: T) { self.inner_enqueue_unchecked(val) } // The memory for dequeuing is "owned" by the head pointer,. // NOTE: This internal function uses internal mutability to allow the [`Consumer`] to dequeue // items without doing pointer arithmetic and accessing internal fields of this type. unsafe fn inner_dequeue(&self) -> Option { let current_head = self.head.load(Ordering::Relaxed); if current_head == self.tail.load(Ordering::Acquire) { None } else { let v = (self.buffer.get_unchecked(current_head).get() as *const T).read(); self.head .store(Self::increment(current_head), Ordering::Release); Some(v) } } // The memory for dequeuing is "owned" by the head pointer,. // NOTE: This internal function uses internal mutability to allow the [`Consumer`] to dequeue // items without doing pointer arithmetic and accessing internal fields of this type. unsafe fn inner_dequeue_unchecked(&self) -> T { let current_head = self.head.load(Ordering::Relaxed); let v = (self.buffer.get_unchecked(current_head).get() as *const T).read(); self.head .store(Self::increment(current_head), Ordering::Release); v } /// Returns the item in the front of the queue, without checking if there is something in the /// queue /// /// # Unsafety /// /// If the queue is empty this operation will return uninitialized memory. pub unsafe fn dequeue_unchecked(&mut self) -> T { self.inner_dequeue_unchecked() } /// Splits a queue into producer and consumer endpoints pub fn split(&mut self) -> (Producer<'_, T, N>, Consumer<'_, T, N>) { (Producer { rb: self }, Consumer { rb: self }) } } impl Default for Queue { fn default() -> Self { Self::new() } } impl Clone for Queue where T: Clone, { fn clone(&self) -> Self { let mut new: Queue = Queue::new(); for s in self.iter() { unsafe { // NOTE(unsafe) new.capacity() == self.capacity() >= self.len() // no overflow possible new.enqueue_unchecked(s.clone()); } } new } } impl PartialEq> for Queue where T: PartialEq, { fn eq(&self, other: &Queue) -> bool { self.len() == other.len() && self.iter().zip(other.iter()).all(|(v1, v2)| v1 == v2) } } impl Eq for Queue where T: Eq {} /// An iterator over the items of a queue pub struct Iter<'a, T, const N: usize> { rb: &'a Queue, index: usize, len: usize, } impl<'a, T, const N: usize> Clone for Iter<'a, T, N> { fn clone(&self) -> Self { Self { rb: self.rb, index: self.index, len: self.len, } } } /// A mutable iterator over the items of a queue pub struct IterMut<'a, T, const N: usize> { rb: &'a mut Queue, index: usize, len: usize, } impl<'a, T, const N: usize> Iterator for Iter<'a, T, N> { type Item = &'a T; fn next(&mut self) -> Option { if self.index < self.len { let head = self.rb.head.load(Ordering::Relaxed); let i = (head + self.index) % N; self.index += 1; Some(unsafe { &*(self.rb.buffer.get_unchecked(i).get() as *const T) }) } else { None } } } impl<'a, T, const N: usize> Iterator for IterMut<'a, T, N> { type Item = &'a mut T; fn next(&mut self) -> Option { if self.index < self.len { let head = self.rb.head.load(Ordering::Relaxed); let i = (head + self.index) % N; self.index += 1; Some(unsafe { &mut *(self.rb.buffer.get_unchecked(i).get() as *mut T) }) } else { None } } } impl<'a, T, const N: usize> DoubleEndedIterator for Iter<'a, T, N> { fn next_back(&mut self) -> Option { if self.index < self.len { let head = self.rb.head.load(Ordering::Relaxed); // self.len > 0, since it's larger than self.index > 0 let i = (head + self.len - 1) % N; self.len -= 1; Some(unsafe { &*(self.rb.buffer.get_unchecked(i).get() as *const T) }) } else { None } } } impl<'a, T, const N: usize> DoubleEndedIterator for IterMut<'a, T, N> { fn next_back(&mut self) -> Option { if self.index < self.len { let head = self.rb.head.load(Ordering::Relaxed); // self.len > 0, since it's larger than self.index > 0 let i = (head + self.len - 1) % N; self.len -= 1; Some(unsafe { &mut *(self.rb.buffer.get_unchecked(i).get() as *mut T) }) } else { None } } } impl Drop for Queue { fn drop(&mut self) { for item in self { unsafe { ptr::drop_in_place(item); } } } } impl fmt::Debug for Queue where T: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.iter()).finish() } } impl hash::Hash for Queue where T: hash::Hash, { fn hash(&self, state: &mut H) { // iterate over self in order for t in self.iter() { hash::Hash::hash(t, state); } } } impl<'a, T, const N: usize> IntoIterator for &'a Queue { type Item = &'a T; type IntoIter = Iter<'a, T, N>; fn into_iter(self) -> Self::IntoIter { self.iter() } } impl<'a, T, const N: usize> IntoIterator for &'a mut Queue { type Item = &'a mut T; type IntoIter = IterMut<'a, T, N>; fn into_iter(self) -> Self::IntoIter { self.iter_mut() } } /// A queue "consumer"; it can dequeue items from the queue /// NOTE the consumer semantically owns the `head` pointer of the queue pub struct Consumer<'a, T, const N: usize> { rb: &'a Queue, } unsafe impl<'a, T, const N: usize> Send for Consumer<'a, T, N> where T: Send {} /// A queue "producer"; it can enqueue items into the queue /// NOTE the producer semantically owns the `tail` pointer of the queue pub struct Producer<'a, T, const N: usize> { rb: &'a Queue, } unsafe impl<'a, T, const N: usize> Send for Producer<'a, T, N> where T: Send {} impl<'a, T, const N: usize> Consumer<'a, T, N> { /// Returns the item in the front of the queue, or `None` if the queue is empty #[inline] pub fn dequeue(&mut self) -> Option { unsafe { self.rb.inner_dequeue() } } /// Returns the item in the front of the queue, without checking if there are elements in the /// queue /// /// See [`Queue::dequeue_unchecked`] for safety #[inline] pub unsafe fn dequeue_unchecked(&mut self) -> T { self.rb.inner_dequeue_unchecked() } /// Returns if there are any items to dequeue. When this returns `true`, at least the /// first subsequent dequeue will succeed #[inline] pub fn ready(&self) -> bool { !self.rb.is_empty() } /// Returns the number of elements in the queue #[inline] pub fn len(&self) -> usize { self.rb.len() } /// Returns the maximum number of elements the queue can hold #[inline] pub fn capacity(&self) -> usize { self.rb.capacity() } /// Returns the item in the front of the queue without dequeuing, or `None` if the queue is /// empty /// /// # Examples /// ``` /// use heapless::spsc::Queue; /// /// let mut queue: Queue = Queue::new(); /// let (mut producer, mut consumer) = queue.split(); /// assert_eq!(None, consumer.peek()); /// producer.enqueue(1); /// assert_eq!(Some(&1), consumer.peek()); /// assert_eq!(Some(1), consumer.dequeue()); /// assert_eq!(None, consumer.peek()); /// ``` #[inline] pub fn peek(&self) -> Option<&T> { self.rb.peek() } } impl<'a, T, const N: usize> Producer<'a, T, N> { /// Adds an `item` to the end of the queue, returns back the `item` if the queue is full #[inline] pub fn enqueue(&mut self, val: T) -> Result<(), T> { unsafe { self.rb.inner_enqueue(val) } } /// Adds an `item` to the end of the queue, without checking if the queue is full /// /// See [`Queue::enqueue_unchecked`] for safety #[inline] pub unsafe fn enqueue_unchecked(&mut self, val: T) { self.rb.inner_enqueue_unchecked(val) } /// Returns if there is any space to enqueue a new item. When this returns true, at /// least the first subsequent enqueue will succeed. #[inline] pub fn ready(&self) -> bool { !self.rb.is_full() } /// Returns the number of elements in the queue #[inline] pub fn len(&self) -> usize { self.rb.len() } /// Returns the maximum number of elements the queue can hold #[inline] pub fn capacity(&self) -> usize { self.rb.capacity() } } #[cfg(test)] mod tests { use std::hash::{Hash, Hasher}; use crate::spsc::Queue; #[test] fn full() { let mut rb: Queue = Queue::new(); assert_eq!(rb.is_full(), false); rb.enqueue(1).unwrap(); assert_eq!(rb.is_full(), false); rb.enqueue(2).unwrap(); assert_eq!(rb.is_full(), true); } #[test] fn empty() { let mut rb: Queue = Queue::new(); assert_eq!(rb.is_empty(), true); rb.enqueue(1).unwrap(); assert_eq!(rb.is_empty(), false); rb.enqueue(2).unwrap(); assert_eq!(rb.is_empty(), false); } #[test] #[cfg_attr(miri, ignore)] // too slow fn len() { let mut rb: Queue = Queue::new(); assert_eq!(rb.len(), 0); rb.enqueue(1).unwrap(); assert_eq!(rb.len(), 1); rb.enqueue(2).unwrap(); assert_eq!(rb.len(), 2); for _ in 0..1_000_000 { let v = rb.dequeue().unwrap(); println!("{}", v); rb.enqueue(v).unwrap(); assert_eq!(rb.len(), 2); } } #[test] #[cfg_attr(miri, ignore)] // too slow fn try_overflow() { const N: usize = 23; let mut rb: Queue = Queue::new(); for i in 0..N as i32 - 1 { rb.enqueue(i).unwrap(); } for _ in 0..1_000_000 { for i in 0..N as i32 - 1 { let d = rb.dequeue().unwrap(); assert_eq!(d, i); rb.enqueue(i).unwrap(); } } } #[test] fn sanity() { let mut rb: Queue = Queue::new(); let (mut p, mut c) = rb.split(); assert_eq!(p.ready(), true); assert_eq!(c.ready(), false); assert_eq!(c.dequeue(), None); p.enqueue(0).unwrap(); assert_eq!(c.dequeue(), Some(0)); } #[test] fn static_new() { static mut _Q: Queue = Queue::new(); } #[test] fn drop() { struct Droppable; impl Droppable { fn new() -> Self { unsafe { COUNT += 1; } Droppable } } impl Drop for Droppable { fn drop(&mut self) { unsafe { COUNT -= 1; } } } static mut COUNT: i32 = 0; { let mut v: Queue = Queue::new(); v.enqueue(Droppable::new()).ok().unwrap(); v.enqueue(Droppable::new()).ok().unwrap(); v.dequeue().unwrap(); } assert_eq!(unsafe { COUNT }, 0); { let mut v: Queue = Queue::new(); v.enqueue(Droppable::new()).ok().unwrap(); v.enqueue(Droppable::new()).ok().unwrap(); } assert_eq!(unsafe { COUNT }, 0); } #[test] fn iter() { let mut rb: Queue = Queue::new(); rb.enqueue(0).unwrap(); rb.dequeue().unwrap(); rb.enqueue(1).unwrap(); rb.enqueue(2).unwrap(); rb.enqueue(3).unwrap(); let mut items = rb.iter(); // assert_eq!(items.next(), Some(&0)); assert_eq!(items.next(), Some(&1)); assert_eq!(items.next(), Some(&2)); assert_eq!(items.next(), Some(&3)); assert_eq!(items.next(), None); } #[test] fn iter_double_ended() { let mut rb: Queue = Queue::new(); rb.enqueue(0).unwrap(); rb.enqueue(1).unwrap(); rb.enqueue(2).unwrap(); let mut items = rb.iter(); assert_eq!(items.next(), Some(&0)); assert_eq!(items.next_back(), Some(&2)); assert_eq!(items.next(), Some(&1)); assert_eq!(items.next(), None); assert_eq!(items.next_back(), None); } #[test] fn iter_mut() { let mut rb: Queue = Queue::new(); rb.enqueue(0).unwrap(); rb.enqueue(1).unwrap(); rb.enqueue(2).unwrap(); let mut items = rb.iter_mut(); assert_eq!(items.next(), Some(&mut 0)); assert_eq!(items.next(), Some(&mut 1)); assert_eq!(items.next(), Some(&mut 2)); assert_eq!(items.next(), None); } #[test] fn iter_mut_double_ended() { let mut rb: Queue = Queue::new(); rb.enqueue(0).unwrap(); rb.enqueue(1).unwrap(); rb.enqueue(2).unwrap(); let mut items = rb.iter_mut(); assert_eq!(items.next(), Some(&mut 0)); assert_eq!(items.next_back(), Some(&mut 2)); assert_eq!(items.next(), Some(&mut 1)); assert_eq!(items.next(), None); assert_eq!(items.next_back(), None); } #[test] fn wrap_around() { let mut rb: Queue = Queue::new(); rb.enqueue(0).unwrap(); rb.enqueue(1).unwrap(); rb.enqueue(2).unwrap(); rb.dequeue().unwrap(); rb.dequeue().unwrap(); rb.dequeue().unwrap(); rb.enqueue(3).unwrap(); rb.enqueue(4).unwrap(); assert_eq!(rb.len(), 2); } #[test] fn ready_flag() { let mut rb: Queue = Queue::new(); let (mut p, mut c) = rb.split(); assert_eq!(c.ready(), false); assert_eq!(p.ready(), true); p.enqueue(0).unwrap(); assert_eq!(c.ready(), true); assert_eq!(p.ready(), true); p.enqueue(1).unwrap(); assert_eq!(c.ready(), true); assert_eq!(p.ready(), false); c.dequeue().unwrap(); assert_eq!(c.ready(), true); assert_eq!(p.ready(), true); c.dequeue().unwrap(); assert_eq!(c.ready(), false); assert_eq!(p.ready(), true); } #[test] fn clone() { let mut rb1: Queue = Queue::new(); rb1.enqueue(0).unwrap(); rb1.enqueue(0).unwrap(); rb1.dequeue().unwrap(); rb1.enqueue(0).unwrap(); let rb2 = rb1.clone(); assert_eq!(rb1.capacity(), rb2.capacity()); assert_eq!(rb1.len(), rb2.len()); assert!(rb1.iter().zip(rb2.iter()).all(|(v1, v2)| v1 == v2)); } #[test] fn eq() { // generate two queues with same content // but different buffer alignment let mut rb1: Queue = Queue::new(); rb1.enqueue(0).unwrap(); rb1.enqueue(0).unwrap(); rb1.dequeue().unwrap(); rb1.enqueue(0).unwrap(); let mut rb2: Queue = Queue::new(); rb2.enqueue(0).unwrap(); rb2.enqueue(0).unwrap(); assert!(rb1 == rb2); // test for symmetry assert!(rb2 == rb1); // test for changes in content rb1.enqueue(0).unwrap(); assert!(rb1 != rb2); rb2.enqueue(1).unwrap(); assert!(rb1 != rb2); // test for refexive relation assert!(rb1 == rb1); assert!(rb2 == rb2); } #[test] fn hash_equality() { // generate two queues with same content // but different buffer alignment let rb1 = { let mut rb1: Queue = Queue::new(); rb1.enqueue(0).unwrap(); rb1.enqueue(0).unwrap(); rb1.dequeue().unwrap(); rb1.enqueue(0).unwrap(); rb1 }; let rb2 = { let mut rb2: Queue = Queue::new(); rb2.enqueue(0).unwrap(); rb2.enqueue(0).unwrap(); rb2 }; let hash1 = { let mut hasher1 = hash32::FnvHasher::default(); rb1.hash(&mut hasher1); let hash1 = hasher1.finish(); hash1 }; let hash2 = { let mut hasher2 = hash32::FnvHasher::default(); rb2.hash(&mut hasher2); let hash2 = hasher2.finish(); hash2 }; assert_eq!(hash1, hash2); } } heapless-0.8.0/src/string.rs000064400000000000000000000523651046102023000141130ustar 00000000000000use core::{ cmp::Ordering, fmt, fmt::Write, hash, iter, ops, str::{self, Utf8Error}, }; use crate::Vec; /// A fixed capacity [`String`](https://doc.rust-lang.org/std/string/struct.String.html) pub struct String { vec: Vec, } impl String { /// Constructs a new, empty `String` with a fixed capacity of `N` bytes /// /// # Examples /// /// Basic usage: /// /// ``` /// use heapless::String; /// /// // allocate the string on the stack /// let mut s: String<4> = String::new(); /// /// // allocate the string in a static variable /// static mut S: String<4> = String::new(); /// ``` #[inline] pub const fn new() -> Self { Self { vec: Vec::new() } } /// Convert UTF-8 bytes into a `String`. /// /// # Examples /// /// Basic usage: /// /// ``` /// use heapless::{String, Vec}; /// /// let mut sparkle_heart = Vec::::new(); /// sparkle_heart.extend_from_slice(&[240, 159, 146, 150]); /// /// let sparkle_heart: String<4> = String::from_utf8(sparkle_heart)?; /// assert_eq!("💖", sparkle_heart); /// # Ok::<(), core::str::Utf8Error>(()) /// ``` /// /// Invalid UTF-8: /// /// ``` /// use core::str::Utf8Error; /// use heapless::{String, Vec}; /// /// let mut vec = Vec::::new(); /// vec.extend_from_slice(&[0, 159, 146, 150]); /// /// let e: Utf8Error = String::from_utf8(vec).unwrap_err(); /// assert_eq!(e.valid_up_to(), 1); /// # Ok::<(), core::str::Utf8Error>(()) /// ``` #[inline] pub fn from_utf8(vec: Vec) -> Result { core::str::from_utf8(&vec)?; Ok(Self { vec }) } /// Convert UTF-8 bytes into a `String`, without checking that the string /// contains valid UTF-8. /// /// # Safety /// /// The bytes passed in must be valid UTF-8. /// /// # Examples /// /// Basic usage: /// /// ``` /// use heapless::{String, Vec}; /// /// let mut sparkle_heart = Vec::::new(); /// sparkle_heart.extend_from_slice(&[240, 159, 146, 150]); /// /// // Safety: `sparkle_heart` Vec is known to contain valid UTF-8 /// let sparkle_heart: String<4> = unsafe { String::from_utf8_unchecked(sparkle_heart) }; /// assert_eq!("💖", sparkle_heart); /// ``` #[inline] pub unsafe fn from_utf8_unchecked(vec: Vec) -> Self { Self { vec } } /// Converts a `String` into a byte vector. /// /// This consumes the `String`, so we do not need to copy its contents. /// /// # Examples /// /// Basic usage: /// /// ``` /// use heapless::String; /// /// let s: String<4> = String::try_from("ab")?; /// let b = s.into_bytes(); /// assert!(b.len() == 2); /// /// assert_eq!(&['a' as u8, 'b' as u8], &b[..]); /// # Ok::<(), ()>(()) /// ``` #[inline] pub fn into_bytes(self) -> Vec { self.vec } /// Extracts a string slice containing the entire string. /// /// # Examples /// /// Basic usage: /// /// ``` /// use heapless::String; /// /// let mut s: String<4> = String::try_from("ab")?; /// assert!(s.as_str() == "ab"); /// /// let _s = s.as_str(); /// // s.push('c'); // <- cannot borrow `s` as mutable because it is also borrowed as immutable /// # Ok::<(), ()>(()) /// ``` #[inline] pub fn as_str(&self) -> &str { unsafe { str::from_utf8_unchecked(self.vec.as_slice()) } } /// Converts a `String` into a mutable string slice. /// /// # Examples /// /// Basic usage: /// /// ``` /// use heapless::String; /// /// let mut s: String<4> = String::try_from("ab")?; /// let s = s.as_mut_str(); /// s.make_ascii_uppercase(); /// # Ok::<(), ()>(()) /// ``` #[inline] pub fn as_mut_str(&mut self) -> &mut str { unsafe { str::from_utf8_unchecked_mut(self.vec.as_mut_slice()) } } /// Returns a mutable reference to the contents of this `String`. /// /// # Safety /// /// This function is unsafe because it does not check that the bytes passed /// to it are valid UTF-8. If this constraint is violated, it may cause /// memory unsafety issues with future users of the `String`, as the rest of /// the library assumes that `String`s are valid UTF-8. /// /// # Examples /// /// Basic usage: /// /// ``` /// use heapless::String; /// /// let mut s: String<8> = String::try_from("hello")?; /// /// unsafe { /// let vec = s.as_mut_vec(); /// assert_eq!(&[104, 101, 108, 108, 111][..], &vec[..]); /// /// vec.reverse(); /// } /// assert_eq!(s, "olleh"); /// # Ok::<(), ()>(()) /// ``` pub unsafe fn as_mut_vec(&mut self) -> &mut Vec { &mut self.vec } /// Appends a given string slice onto the end of this `String`. /// /// # Examples /// /// Basic usage: /// /// ``` /// use heapless::String; /// /// let mut s: String<8> = String::try_from("foo")?; /// /// assert!(s.push_str("bar").is_ok()); /// /// assert_eq!("foobar", s); /// /// assert!(s.push_str("tender").is_err()); /// # Ok::<(), ()>(()) /// ``` #[inline] pub fn push_str(&mut self, string: &str) -> Result<(), ()> { self.vec.extend_from_slice(string.as_bytes()) } /// Returns the maximum number of elements the String can hold /// /// # Examples /// /// Basic usage: /// /// ``` /// use heapless::String; /// /// let mut s: String<4> = String::new(); /// assert!(s.capacity() == 4); /// ``` #[inline] pub fn capacity(&self) -> usize { self.vec.capacity() } /// Appends the given [`char`] to the end of this `String`. /// /// # Examples /// /// Basic usage: /// /// ``` /// use heapless::String; /// /// let mut s: String<8> = String::try_from("abc")?; /// /// s.push('1').unwrap(); /// s.push('2').unwrap(); /// s.push('3').unwrap(); /// /// assert!("abc123" == s.as_str()); /// /// assert_eq!("abc123", s); /// # Ok::<(), ()>(()) /// ``` #[inline] pub fn push(&mut self, c: char) -> Result<(), ()> { match c.len_utf8() { 1 => self.vec.push(c as u8).map_err(|_| {}), _ => self .vec .extend_from_slice(c.encode_utf8(&mut [0; 4]).as_bytes()), } } /// Shortens this `String` to the specified length. /// /// If `new_len` is greater than the string's current length, this has no /// effect. /// /// Note that this method has no effect on the allocated capacity /// of the string /// /// # Panics /// /// Panics if `new_len` does not lie on a [`char`] boundary. /// /// # Examples /// /// Basic usage: /// /// ``` /// use heapless::String; /// /// let mut s: String<8> = String::try_from("hello")?; /// /// s.truncate(2); /// /// assert_eq!("he", s); /// # Ok::<(), ()>(()) /// ``` #[inline] pub fn truncate(&mut self, new_len: usize) { if new_len <= self.len() { assert!(self.is_char_boundary(new_len)); self.vec.truncate(new_len) } } /// Removes the last character from the string buffer and returns it. /// /// Returns [`None`] if this `String` is empty. /// /// # Examples /// /// Basic usage: /// /// ``` /// use heapless::String; /// /// let mut s: String<8> = String::try_from("foo")?; /// /// assert_eq!(s.pop(), Some('o')); /// assert_eq!(s.pop(), Some('o')); /// assert_eq!(s.pop(), Some('f')); /// /// assert_eq!(s.pop(), None); /// Ok::<(), ()>(()) /// ``` pub fn pop(&mut self) -> Option { let ch = self.chars().rev().next()?; // pop bytes that correspond to `ch` for _ in 0..ch.len_utf8() { unsafe { self.vec.pop_unchecked(); } } Some(ch) } /// Removes a [`char`] from this `String` at a byte position and returns it. /// /// Note: Because this shifts over the remaining elements, it has a /// worst-case performance of *O*(*n*). /// /// # Panics /// /// Panics if `idx` is larger than or equal to the `String`'s length, /// or if it does not lie on a [`char`] boundary. /// /// # Examples /// /// Basic usage: /// /// ``` /// use heapless::String; /// /// let mut s: String<8> = String::try_from("foo").unwrap(); /// /// assert_eq!(s.remove(0), 'f'); /// assert_eq!(s.remove(1), 'o'); /// assert_eq!(s.remove(0), 'o'); /// ``` #[inline] pub fn remove(&mut self, index: usize) -> char { let ch = match self[index..].chars().next() { Some(ch) => ch, None => panic!("cannot remove a char from the end of a string"), }; let next = index + ch.len_utf8(); let len = self.len(); let ptr = self.vec.as_mut_ptr(); unsafe { core::ptr::copy(ptr.add(next), ptr.add(index), len - next); self.vec.set_len(len - (next - index)); } ch } /// Truncates this `String`, removing all contents. /// /// While this means the `String` will have a length of zero, it does not /// touch its capacity. /// /// # Examples /// /// Basic usage: /// /// ``` /// use heapless::String; /// /// let mut s: String<8> = String::try_from("foo")?; /// /// s.clear(); /// /// assert!(s.is_empty()); /// assert_eq!(0, s.len()); /// assert_eq!(8, s.capacity()); /// Ok::<(), ()>(()) /// ``` #[inline] pub fn clear(&mut self) { self.vec.clear() } } impl Default for String { fn default() -> Self { Self::new() } } impl<'a, const N: usize> TryFrom<&'a str> for String { type Error = (); fn try_from(s: &'a str) -> Result { let mut new = String::new(); new.push_str(s)?; Ok(new) } } impl str::FromStr for String { type Err = (); fn from_str(s: &str) -> Result { let mut new = String::new(); new.push_str(s)?; Ok(new) } } impl iter::FromIterator for String { fn from_iter>(iter: T) -> Self { let mut new = String::new(); for c in iter { new.push(c).unwrap(); } new } } impl<'a, const N: usize> iter::FromIterator<&'a char> for String { fn from_iter>(iter: T) -> Self { let mut new = String::new(); for c in iter { new.push(*c).unwrap(); } new } } impl<'a, const N: usize> iter::FromIterator<&'a str> for String { fn from_iter>(iter: T) -> Self { let mut new = String::new(); for c in iter { new.push_str(c).unwrap(); } new } } impl Clone for String { fn clone(&self) -> Self { Self { vec: self.vec.clone(), } } } impl fmt::Debug for String { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { ::fmt(self, f) } } impl fmt::Display for String { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { ::fmt(self, f) } } impl hash::Hash for String { #[inline] fn hash(&self, hasher: &mut H) { ::hash(self, hasher) } } impl fmt::Write for String { fn write_str(&mut self, s: &str) -> Result<(), fmt::Error> { self.push_str(s).map_err(|_| fmt::Error) } fn write_char(&mut self, c: char) -> Result<(), fmt::Error> { self.push(c).map_err(|_| fmt::Error) } } impl ops::Deref for String { type Target = str; fn deref(&self) -> &str { self.as_str() } } impl ops::DerefMut for String { fn deref_mut(&mut self) -> &mut str { self.as_mut_str() } } impl AsRef for String { #[inline] fn as_ref(&self) -> &str { self } } impl AsRef<[u8]> for String { #[inline] fn as_ref(&self) -> &[u8] { self.as_bytes() } } impl PartialEq> for String { fn eq(&self, rhs: &String) -> bool { str::eq(&**self, &**rhs) } fn ne(&self, rhs: &String) -> bool { str::ne(&**self, &**rhs) } } // String == str impl PartialEq for String { #[inline] fn eq(&self, other: &str) -> bool { str::eq(&self[..], &other[..]) } #[inline] fn ne(&self, other: &str) -> bool { str::ne(&self[..], &other[..]) } } // String == &'str impl PartialEq<&str> for String { #[inline] fn eq(&self, other: &&str) -> bool { str::eq(&self[..], &other[..]) } #[inline] fn ne(&self, other: &&str) -> bool { str::ne(&self[..], &other[..]) } } // str == String impl PartialEq> for str { #[inline] fn eq(&self, other: &String) -> bool { str::eq(&self[..], &other[..]) } #[inline] fn ne(&self, other: &String) -> bool { str::ne(&self[..], &other[..]) } } // &'str == String impl PartialEq> for &str { #[inline] fn eq(&self, other: &String) -> bool { str::eq(&self[..], &other[..]) } #[inline] fn ne(&self, other: &String) -> bool { str::ne(&self[..], &other[..]) } } impl Eq for String {} impl PartialOrd> for String { #[inline] fn partial_cmp(&self, other: &String) -> Option { PartialOrd::partial_cmp(&**self, &**other) } } impl Ord for String { #[inline] fn cmp(&self, other: &Self) -> Ordering { Ord::cmp(&**self, &**other) } } macro_rules! impl_try_from_num { ($num:ty, $size:expr) => { impl core::convert::TryFrom<$num> for String { type Error = (); fn try_from(s: $num) -> Result { let mut new = String::new(); write!(&mut new, "{}", s).map_err(|_| ())?; Ok(new) } } }; } impl_try_from_num!(i8, 4); impl_try_from_num!(i16, 6); impl_try_from_num!(i32, 11); impl_try_from_num!(i64, 20); impl_try_from_num!(u8, 3); impl_try_from_num!(u16, 5); impl_try_from_num!(u32, 10); impl_try_from_num!(u64, 20); #[cfg(test)] mod tests { use crate::{String, Vec}; use core::convert::TryFrom; #[test] fn static_new() { static mut _S: String<8> = String::new(); } #[test] fn clone() { let s1: String<20> = String::try_from("abcd").unwrap(); let mut s2 = s1.clone(); s2.push_str(" efgh").unwrap(); assert_eq!(s1, "abcd"); assert_eq!(s2, "abcd efgh"); } #[test] fn cmp() { let s1: String<4> = String::try_from("abcd").unwrap(); let s2: String<4> = String::try_from("zzzz").unwrap(); assert!(s1 < s2); } #[test] fn cmp_heterogenous_size() { let s1: String<4> = String::try_from("abcd").unwrap(); let s2: String<8> = String::try_from("zzzz").unwrap(); assert!(s1 < s2); } #[test] fn debug() { use core::fmt::Write; let s: String<8> = String::try_from("abcd").unwrap(); let mut std_s = std::string::String::new(); write!(std_s, "{:?}", s).unwrap(); assert_eq!("\"abcd\"", std_s); } #[test] fn display() { use core::fmt::Write; let s: String<8> = String::try_from("abcd").unwrap(); let mut std_s = std::string::String::new(); write!(std_s, "{}", s).unwrap(); assert_eq!("abcd", std_s); } #[test] fn empty() { let s: String<4> = String::new(); assert!(s.capacity() == 4); assert_eq!(s, ""); assert_eq!(s.len(), 0); assert_ne!(s.len(), 4); } #[test] fn try_from() { let s: String<4> = String::try_from("123").unwrap(); assert!(s.len() == 3); assert_eq!(s, "123"); let e: () = String::<2>::try_from("123").unwrap_err(); assert_eq!(e, ()); } #[test] fn from_str() { use core::str::FromStr; let s: String<4> = String::<4>::from_str("123").unwrap(); assert!(s.len() == 3); assert_eq!(s, "123"); let e: () = String::<2>::from_str("123").unwrap_err(); assert_eq!(e, ()); } #[test] fn from_iter() { let mut v: Vec = Vec::new(); v.push('h').unwrap(); v.push('e').unwrap(); v.push('l').unwrap(); v.push('l').unwrap(); v.push('o').unwrap(); let string1: String<5> = v.iter().collect(); //&char let string2: String<5> = "hello".chars().collect(); //char assert_eq!(string1, "hello"); assert_eq!(string2, "hello"); } #[test] #[should_panic] fn from_panic() { let _: String<4> = String::try_from("12345").unwrap(); } #[test] fn try_from_num() { let v: String<20> = String::try_from(18446744073709551615 as u64).unwrap(); assert_eq!(v, "18446744073709551615"); let e: () = String::<2>::try_from(18446744073709551615 as u64).unwrap_err(); assert_eq!(e, ()); } #[test] fn into_bytes() { let s: String<4> = String::try_from("ab").unwrap(); let b: Vec = s.into_bytes(); assert_eq!(b.len(), 2); assert_eq!(&['a' as u8, 'b' as u8], &b[..]); } #[test] fn as_str() { let s: String<4> = String::try_from("ab").unwrap(); assert_eq!(s.as_str(), "ab"); // should be moved to fail test // let _s = s.as_str(); // s.push('c'); // <- cannot borrow `s` as mutable because it is also borrowed as immutable } #[test] fn as_mut_str() { let mut s: String<4> = String::try_from("ab").unwrap(); let s = s.as_mut_str(); s.make_ascii_uppercase(); assert_eq!(s, "AB"); } #[test] fn push_str() { let mut s: String<8> = String::try_from("foo").unwrap(); assert!(s.push_str("bar").is_ok()); assert_eq!("foobar", s); assert_eq!(s, "foobar"); assert!(s.push_str("tender").is_err()); assert_eq!("foobar", s); assert_eq!(s, "foobar"); } #[test] fn push() { let mut s: String<6> = String::try_from("abc").unwrap(); assert!(s.push('1').is_ok()); assert!(s.push('2').is_ok()); assert!(s.push('3').is_ok()); assert!(s.push('4').is_err()); assert!("abc123" == s.as_str()); } #[test] fn as_bytes() { let s: String<8> = String::try_from("hello").unwrap(); assert_eq!(&[104, 101, 108, 108, 111], s.as_bytes()); } #[test] fn truncate() { let mut s: String<8> = String::try_from("hello").unwrap(); s.truncate(6); assert_eq!(s.len(), 5); s.truncate(2); assert_eq!(s.len(), 2); assert_eq!("he", s); assert_eq!(s, "he"); } #[test] fn pop() { let mut s: String<8> = String::try_from("foo").unwrap(); assert_eq!(s.pop(), Some('o')); assert_eq!(s.pop(), Some('o')); assert_eq!(s.pop(), Some('f')); assert_eq!(s.pop(), None); } #[test] fn pop_uenc() { let mut s: String<8> = String::try_from("é").unwrap(); assert_eq!(s.len(), 3); match s.pop() { Some(c) => { assert_eq!(s.len(), 1); assert_eq!(c, '\u{0301}'); // accute accent of e () } None => assert!(false), }; } #[test] fn is_empty() { let mut v: String<8> = String::new(); assert!(v.is_empty()); let _ = v.push('a'); assert!(!v.is_empty()); } #[test] fn clear() { let mut s: String<8> = String::try_from("foo").unwrap(); s.clear(); assert!(s.is_empty()); assert_eq!(0, s.len()); assert_eq!(8, s.capacity()); } #[test] fn remove() { let mut s: String<8> = String::try_from("foo").unwrap(); assert_eq!(s.remove(0), 'f'); assert_eq!(s.as_str(), "oo"); } #[test] fn remove_uenc() { let mut s: String<8> = String::try_from("ĝėēƶ").unwrap(); assert_eq!(s.remove(2), 'ė'); assert_eq!(s.remove(2), 'ē'); assert_eq!(s.remove(2), 'ƶ'); assert_eq!(s.as_str(), "ĝ"); } #[test] fn remove_uenc_combo_characters() { let mut s: String<8> = String::try_from("héy").unwrap(); assert_eq!(s.remove(2), '\u{0301}'); assert_eq!(s.as_str(), "hey"); } } heapless-0.8.0/src/test_helpers.rs000064400000000000000000000012701046102023000152730ustar 00000000000000macro_rules! droppable { () => { static COUNT: core::sync::atomic::AtomicI32 = core::sync::atomic::AtomicI32::new(0); #[derive(Eq, Ord, PartialEq, PartialOrd)] struct Droppable(i32); impl Droppable { fn new() -> Self { COUNT.fetch_add(1, core::sync::atomic::Ordering::Relaxed); Droppable(Self::count()) } fn count() -> i32 { COUNT.load(core::sync::atomic::Ordering::Relaxed) } } impl Drop for Droppable { fn drop(&mut self) { COUNT.fetch_sub(1, core::sync::atomic::Ordering::Relaxed); } } }; } heapless-0.8.0/src/ufmt.rs000064400000000000000000000023501046102023000135450ustar 00000000000000use crate::{string::String, vec::Vec}; use ufmt_write::uWrite; impl uWrite for String { type Error = (); fn write_str(&mut self, s: &str) -> Result<(), Self::Error> { self.push_str(s) } } impl uWrite for Vec { type Error = (); fn write_str(&mut self, s: &str) -> Result<(), Self::Error> { self.extend_from_slice(s.as_bytes()) } } #[cfg(test)] mod tests { use super::*; use ufmt::{derive::uDebug, uwrite}; #[derive(uDebug)] struct Pair { x: u32, y: u32, } #[test] fn test_string() { let a = 123; let b = Pair { x: 0, y: 1234 }; let mut s = String::<32>::new(); uwrite!(s, "{} -> {:?}", a, b).unwrap(); assert_eq!(s, "123 -> Pair { x: 0, y: 1234 }"); } #[test] fn test_string_err() { let p = Pair { x: 0, y: 1234 }; let mut s = String::<4>::new(); assert!(uwrite!(s, "{:?}", p).is_err()); } #[test] fn test_vec() { let a = 123; let b = Pair { x: 0, y: 1234 }; let mut v = Vec::::new(); uwrite!(v, "{} -> {:?}", a, b).unwrap(); assert_eq!(v, b"123 -> Pair { x: 0, y: 1234 }"); } } heapless-0.8.0/src/vec.rs000064400000000000000000001262641046102023000133620ustar 00000000000000use core::{cmp::Ordering, fmt, hash, iter::FromIterator, mem::MaybeUninit, ops, ptr, slice}; /// A fixed capacity [`Vec`](https://doc.rust-lang.org/std/vec/struct.Vec.html) /// /// # Examples /// /// ``` /// use heapless::Vec; /// /// /// // A vector with a fixed capacity of 8 elements allocated on the stack /// let mut vec = Vec::<_, 8>::new(); /// vec.push(1); /// vec.push(2); /// /// assert_eq!(vec.len(), 2); /// assert_eq!(vec[0], 1); /// /// assert_eq!(vec.pop(), Some(2)); /// assert_eq!(vec.len(), 1); /// /// vec[0] = 7; /// assert_eq!(vec[0], 7); /// /// vec.extend([1, 2, 3].iter().cloned()); /// /// for x in &vec { /// println!("{}", x); /// } /// assert_eq!(*vec, [7, 1, 2, 3]); /// ``` pub struct Vec { // NOTE order is important for optimizations. the `len` first layout lets the compiler optimize // `new` to: reserve stack space and zero the first word. With the fields in the reverse order // the compiler optimizes `new` to `memclr`-ing the *entire* stack space, including the `buffer` // field which should be left uninitialized. Optimizations were last checked with Rust 1.60 len: usize, buffer: [MaybeUninit; N], } impl Vec { const ELEM: MaybeUninit = MaybeUninit::uninit(); const INIT: [MaybeUninit; N] = [Self::ELEM; N]; // important for optimization of `new` /// Constructs a new, empty vector with a fixed capacity of `N` /// /// # Examples /// /// ``` /// use heapless::Vec; /// /// // allocate the vector on the stack /// let mut x: Vec = Vec::new(); /// /// // allocate the vector in a static variable /// static mut X: Vec = Vec::new(); /// ``` /// `Vec` `const` constructor; wrap the returned value in [`Vec`]. pub const fn new() -> Self { Self { len: 0, buffer: Self::INIT, } } /// Constructs a new vector with a fixed capacity of `N` and fills it /// with the provided slice. /// /// This is equivalent to the following code: /// /// ``` /// use heapless::Vec; /// /// let mut v: Vec = Vec::new(); /// v.extend_from_slice(&[1, 2, 3]).unwrap(); /// ``` #[inline] pub fn from_slice(other: &[T]) -> Result where T: Clone, { let mut v = Vec::new(); v.extend_from_slice(other)?; Ok(v) } /// Clones a vec into a new vec pub(crate) fn clone(&self) -> Self where T: Clone, { let mut new = Self::new(); // avoid `extend_from_slice` as that introduces a runtime check / panicking branch for elem in self { unsafe { new.push_unchecked(elem.clone()); } } new } /// Returns a raw pointer to the vector’s buffer. pub fn as_ptr(&self) -> *const T { self.buffer.as_ptr() as *const T } /// Returns a raw pointer to the vector’s buffer, which may be mutated through. pub fn as_mut_ptr(&mut self) -> *mut T { self.buffer.as_mut_ptr() as *mut T } /// Extracts a slice containing the entire vector. /// /// Equivalent to `&s[..]`. /// /// # Examples /// /// ``` /// use heapless::Vec; /// let buffer: Vec = Vec::from_slice(&[1, 2, 3, 5, 8]).unwrap(); /// assert_eq!(buffer.as_slice(), &[1, 2, 3, 5, 8]); /// ``` pub fn as_slice(&self) -> &[T] { // NOTE(unsafe) avoid bound checks in the slicing operation // &buffer[..self.len] unsafe { slice::from_raw_parts(self.buffer.as_ptr() as *const T, self.len) } } /// Returns the contents of the vector as an array of length `M` if the length /// of the vector is exactly `M`, otherwise returns `Err(self)`. /// /// # Examples /// /// ``` /// use heapless::Vec; /// let buffer: Vec = Vec::from_slice(&[1, 2, 3, 5, 8]).unwrap(); /// let array: [u8; 5] = buffer.into_array().unwrap(); /// assert_eq!(array, [1, 2, 3, 5, 8]); /// ``` pub fn into_array(self) -> Result<[T; M], Self> { if self.len() == M { // This is how the unstable `MaybeUninit::array_assume_init` method does it let array = unsafe { (&self.buffer as *const _ as *const [T; M]).read() }; // We don't want `self`'s destructor to be called because that would drop all the // items in the array core::mem::forget(self); Ok(array) } else { Err(self) } } /// Extracts a mutable slice containing the entire vector. /// /// Equivalent to `&mut s[..]`. /// /// # Examples /// /// ``` /// use heapless::Vec; /// let mut buffer: Vec = Vec::from_slice(&[1, 2, 3, 5, 8]).unwrap(); /// buffer[0] = 9; /// assert_eq!(buffer.as_slice(), &[9, 2, 3, 5, 8]); /// ``` pub fn as_mut_slice(&mut self) -> &mut [T] { // NOTE(unsafe) avoid bound checks in the slicing operation // &mut buffer[..self.len] unsafe { slice::from_raw_parts_mut(self.buffer.as_mut_ptr() as *mut T, self.len) } } /// Returns the maximum number of elements the vector can hold. pub const fn capacity(&self) -> usize { N } /// Clears the vector, removing all values. pub fn clear(&mut self) { self.truncate(0); } /// Extends the vec from an iterator. /// /// # Panic /// /// Panics if the vec cannot hold all elements of the iterator. pub fn extend(&mut self, iter: I) where I: IntoIterator, { for elem in iter { self.push(elem).ok().unwrap() } } /// Clones and appends all elements in a slice to the `Vec`. /// /// Iterates over the slice `other`, clones each element, and then appends /// it to this `Vec`. The `other` vector is traversed in-order. /// /// # Examples /// /// ``` /// use heapless::Vec; /// /// let mut vec = Vec::::new(); /// vec.push(1).unwrap(); /// vec.extend_from_slice(&[2, 3, 4]).unwrap(); /// assert_eq!(*vec, [1, 2, 3, 4]); /// ``` pub fn extend_from_slice(&mut self, other: &[T]) -> Result<(), ()> where T: Clone, { if self.len + other.len() > self.capacity() { // won't fit in the `Vec`; don't modify anything and return an error Err(()) } else { for elem in other { unsafe { self.push_unchecked(elem.clone()); } } Ok(()) } } /// Removes the last element from a vector and returns it, or `None` if it's empty pub fn pop(&mut self) -> Option { if self.len != 0 { Some(unsafe { self.pop_unchecked() }) } else { None } } /// Appends an `item` to the back of the collection /// /// Returns back the `item` if the vector is full pub fn push(&mut self, item: T) -> Result<(), T> { if self.len < self.capacity() { unsafe { self.push_unchecked(item) } Ok(()) } else { Err(item) } } /// Removes the last element from a vector and returns it /// /// # Safety /// /// This assumes the vec to have at least one element. pub unsafe fn pop_unchecked(&mut self) -> T { debug_assert!(!self.is_empty()); self.len -= 1; (self.buffer.get_unchecked_mut(self.len).as_ptr() as *const T).read() } /// Appends an `item` to the back of the collection /// /// # Safety /// /// This assumes the vec is not full. pub unsafe fn push_unchecked(&mut self, item: T) { // NOTE(ptr::write) the memory slot that we are about to write to is uninitialized. We // use `ptr::write` to avoid running `T`'s destructor on the uninitialized memory debug_assert!(!self.is_full()); *self.buffer.get_unchecked_mut(self.len) = MaybeUninit::new(item); self.len += 1; } /// Shortens the vector, keeping the first `len` elements and dropping the rest. pub fn truncate(&mut self, len: usize) { // This is safe because: // // * the slice passed to `drop_in_place` is valid; the `len > self.len` // case avoids creating an invalid slice, and // * the `len` of the vector is shrunk before calling `drop_in_place`, // such that no value will be dropped twice in case `drop_in_place` // were to panic once (if it panics twice, the program aborts). unsafe { // Note: It's intentional that this is `>` and not `>=`. // Changing it to `>=` has negative performance // implications in some cases. See rust-lang/rust#78884 for more. if len > self.len { return; } let remaining_len = self.len - len; let s = ptr::slice_from_raw_parts_mut(self.as_mut_ptr().add(len), remaining_len); self.len = len; ptr::drop_in_place(s); } } /// Resizes the Vec in-place so that len is equal to new_len. /// /// If new_len is greater than len, the Vec is extended by the /// difference, with each additional slot filled with value. If /// new_len is less than len, the Vec is simply truncated. /// /// See also [`resize_default`](Self::resize_default). pub fn resize(&mut self, new_len: usize, value: T) -> Result<(), ()> where T: Clone, { if new_len > self.capacity() { return Err(()); } if new_len > self.len { while self.len < new_len { self.push(value.clone()).ok(); } } else { self.truncate(new_len); } Ok(()) } /// Resizes the `Vec` in-place so that `len` is equal to `new_len`. /// /// If `new_len` is greater than `len`, the `Vec` is extended by the /// difference, with each additional slot filled with `Default::default()`. /// If `new_len` is less than `len`, the `Vec` is simply truncated. /// /// See also [`resize`](Self::resize). pub fn resize_default(&mut self, new_len: usize) -> Result<(), ()> where T: Clone + Default, { self.resize(new_len, T::default()) } /// Forces the length of the vector to `new_len`. /// /// This is a low-level operation that maintains none of the normal /// invariants of the type. Normally changing the length of a vector /// is done using one of the safe operations instead, such as /// [`truncate`], [`resize`], [`extend`], or [`clear`]. /// /// [`truncate`]: Self::truncate /// [`resize`]: Self::resize /// [`extend`]: core::iter::Extend /// [`clear`]: Self::clear /// /// # Safety /// /// - `new_len` must be less than or equal to [`capacity()`]. /// - The elements at `old_len..new_len` must be initialized. /// /// [`capacity()`]: Self::capacity /// /// # Examples /// /// This method can be useful for situations in which the vector /// is serving as a buffer for other code, particularly over FFI: /// /// ```no_run /// # #![allow(dead_code)] /// use heapless::Vec; /// /// # // This is just a minimal skeleton for the doc example; /// # // don't use this as a starting point for a real library. /// # pub struct StreamWrapper { strm: *mut core::ffi::c_void } /// # const Z_OK: i32 = 0; /// # extern "C" { /// # fn deflateGetDictionary( /// # strm: *mut core::ffi::c_void, /// # dictionary: *mut u8, /// # dictLength: *mut usize, /// # ) -> i32; /// # } /// # impl StreamWrapper { /// pub fn get_dictionary(&self) -> Option> { /// // Per the FFI method's docs, "32768 bytes is always enough". /// let mut dict = Vec::new(); /// let mut dict_length = 0; /// // SAFETY: When `deflateGetDictionary` returns `Z_OK`, it holds that: /// // 1. `dict_length` elements were initialized. /// // 2. `dict_length` <= the capacity (32_768) /// // which makes `set_len` safe to call. /// unsafe { /// // Make the FFI call... /// let r = deflateGetDictionary(self.strm, dict.as_mut_ptr(), &mut dict_length); /// if r == Z_OK { /// // ...and update the length to what was initialized. /// dict.set_len(dict_length); /// Some(dict) /// } else { /// None /// } /// } /// } /// # } /// ``` /// /// While the following example is sound, there is a memory leak since /// the inner vectors were not freed prior to the `set_len` call: /// /// ``` /// use core::iter::FromIterator; /// use heapless::Vec; /// /// let mut vec = Vec::, 3>::from_iter( /// [ /// Vec::from_iter([1, 0, 0].iter().cloned()), /// Vec::from_iter([0, 1, 0].iter().cloned()), /// Vec::from_iter([0, 0, 1].iter().cloned()), /// ] /// .iter() /// .cloned() /// ); /// // SAFETY: /// // 1. `old_len..0` is empty so no elements need to be initialized. /// // 2. `0 <= capacity` always holds whatever `capacity` is. /// unsafe { /// vec.set_len(0); /// } /// ``` /// /// Normally, here, one would use [`clear`] instead to correctly drop /// the contents and thus not leak memory. pub unsafe fn set_len(&mut self, new_len: usize) { debug_assert!(new_len <= self.capacity()); self.len = new_len } /// Removes an element from the vector and returns it. /// /// The removed element is replaced by the last element of the vector. /// /// This does not preserve ordering, but is O(1). /// /// # Panics /// /// Panics if `index` is out of bounds. /// /// # Examples /// /// ``` /// use heapless::Vec; ///// use heapless::consts::*; /// /// let mut v: Vec<_, 8> = Vec::new(); /// v.push("foo").unwrap(); /// v.push("bar").unwrap(); /// v.push("baz").unwrap(); /// v.push("qux").unwrap(); /// /// assert_eq!(v.swap_remove(1), "bar"); /// assert_eq!(&*v, ["foo", "qux", "baz"]); /// /// assert_eq!(v.swap_remove(0), "foo"); /// assert_eq!(&*v, ["baz", "qux"]); /// ``` pub fn swap_remove(&mut self, index: usize) -> T { assert!(index < self.len); unsafe { self.swap_remove_unchecked(index) } } /// Removes an element from the vector and returns it. /// /// The removed element is replaced by the last element of the vector. /// /// This does not preserve ordering, but is O(1). /// /// # Safety /// /// Assumes `index` within bounds. /// /// # Examples /// /// ``` /// use heapless::Vec; /// /// let mut v: Vec<_, 8> = Vec::new(); /// v.push("foo").unwrap(); /// v.push("bar").unwrap(); /// v.push("baz").unwrap(); /// v.push("qux").unwrap(); /// /// assert_eq!(unsafe { v.swap_remove_unchecked(1) }, "bar"); /// assert_eq!(&*v, ["foo", "qux", "baz"]); /// /// assert_eq!(unsafe { v.swap_remove_unchecked(0) }, "foo"); /// assert_eq!(&*v, ["baz", "qux"]); /// ``` pub unsafe fn swap_remove_unchecked(&mut self, index: usize) -> T { let length = self.len(); debug_assert!(index < length); let value = ptr::read(self.as_ptr().add(index)); let base_ptr = self.as_mut_ptr(); ptr::copy(base_ptr.add(length - 1), base_ptr.add(index), 1); self.len -= 1; value } /// Returns true if the vec is full #[inline] pub fn is_full(&self) -> bool { self.len == self.capacity() } /// Returns true if the vec is empty #[inline] pub fn is_empty(&self) -> bool { self.len == 0 } /// Returns `true` if `needle` is a prefix of the Vec. /// /// Always returns `true` if `needle` is an empty slice. /// /// # Examples /// /// ``` /// use heapless::Vec; /// /// let v: Vec<_, 8> = Vec::from_slice(b"abc").unwrap(); /// assert_eq!(v.starts_with(b""), true); /// assert_eq!(v.starts_with(b"ab"), true); /// assert_eq!(v.starts_with(b"bc"), false); /// ``` #[inline] pub fn starts_with(&self, needle: &[T]) -> bool where T: PartialEq, { let n = needle.len(); self.len >= n && needle == &self[..n] } /// Returns `true` if `needle` is a suffix of the Vec. /// /// Always returns `true` if `needle` is an empty slice. /// /// # Examples /// /// ``` /// use heapless::Vec; /// /// let v: Vec<_, 8> = Vec::from_slice(b"abc").unwrap(); /// assert_eq!(v.ends_with(b""), true); /// assert_eq!(v.ends_with(b"ab"), false); /// assert_eq!(v.ends_with(b"bc"), true); /// ``` #[inline] pub fn ends_with(&self, needle: &[T]) -> bool where T: PartialEq, { let (v, n) = (self.len(), needle.len()); v >= n && needle == &self[v - n..] } /// Inserts an element at position `index` within the vector, shifting all /// elements after it to the right. /// /// Returns back the `element` if the vector is full. /// /// # Panics /// /// Panics if `index > len`. /// /// # Examples /// /// ``` /// use heapless::Vec; /// /// let mut vec: Vec<_, 8> = Vec::from_slice(&[1, 2, 3]).unwrap(); /// vec.insert(1, 4); /// assert_eq!(vec, [1, 4, 2, 3]); /// vec.insert(4, 5); /// assert_eq!(vec, [1, 4, 2, 3, 5]); /// ``` pub fn insert(&mut self, index: usize, element: T) -> Result<(), T> { let len = self.len(); if index > len { panic!( "insertion index (is {}) should be <= len (is {})", index, len ); } // check there's space for the new element if self.is_full() { return Err(element); } unsafe { // infallible // The spot to put the new value { let p = self.as_mut_ptr().add(index); // Shift everything over to make space. (Duplicating the // `index`th element into two consecutive places.) ptr::copy(p, p.offset(1), len - index); // Write it in, overwriting the first copy of the `index`th // element. ptr::write(p, element); } self.set_len(len + 1); } Ok(()) } /// Removes and returns the element at position `index` within the vector, /// shifting all elements after it to the left. /// /// Note: Because this shifts over the remaining elements, it has a /// worst-case performance of *O*(*n*). If you don't need the order of /// elements to be preserved, use [`swap_remove`] instead. If you'd like to /// remove elements from the beginning of the `Vec`, consider using /// [`Deque::pop_front`] instead. /// /// [`swap_remove`]: Vec::swap_remove /// [`Deque::pop_front`]: crate::Deque::pop_front /// /// # Panics /// /// Panics if `index` is out of bounds. /// /// # Examples /// /// ``` /// use heapless::Vec; /// /// let mut v: Vec<_, 8> = Vec::from_slice(&[1, 2, 3]).unwrap(); /// assert_eq!(v.remove(1), 2); /// assert_eq!(v, [1, 3]); /// ``` pub fn remove(&mut self, index: usize) -> T { let len = self.len(); if index >= len { panic!("removal index (is {}) should be < len (is {})", index, len); } unsafe { // infallible let ret; { // the place we are taking from. let ptr = self.as_mut_ptr().add(index); // copy it out, unsafely having a copy of the value on // the stack and in the vector at the same time. ret = ptr::read(ptr); // Shift everything down to fill in that spot. ptr::copy(ptr.offset(1), ptr, len - index - 1); } self.set_len(len - 1); ret } } /// Retains only the elements specified by the predicate. /// /// In other words, remove all elements `e` for which `f(&e)` returns `false`. /// This method operates in place, visiting each element exactly once in the /// original order, and preserves the order of the retained elements. /// /// # Examples /// /// ``` /// use heapless::Vec; /// /// let mut vec: Vec<_, 8> = Vec::from_slice(&[1, 2, 3, 4]).unwrap(); /// vec.retain(|&x| x % 2 == 0); /// assert_eq!(vec, [2, 4]); /// ``` /// /// Because the elements are visited exactly once in the original order, /// external state may be used to decide which elements to keep. /// /// ``` /// use heapless::Vec; /// /// let mut vec: Vec<_, 8> = Vec::from_slice(&[1, 2, 3, 4, 5]).unwrap(); /// let keep = [false, true, true, false, true]; /// let mut iter = keep.iter(); /// vec.retain(|_| *iter.next().unwrap()); /// assert_eq!(vec, [2, 3, 5]); /// ``` pub fn retain(&mut self, mut f: F) where F: FnMut(&T) -> bool, { self.retain_mut(|elem| f(elem)); } /// Retains only the elements specified by the predicate, passing a mutable reference to it. /// /// In other words, remove all elements `e` such that `f(&mut e)` returns `false`. /// This method operates in place, visiting each element exactly once in the /// original order, and preserves the order of the retained elements. /// /// # Examples /// /// ``` /// use heapless::Vec; /// /// let mut vec: Vec<_, 8> = Vec::from_slice(&[1, 2, 3, 4]).unwrap(); /// vec.retain_mut(|x| if *x <= 3 { /// *x += 1; /// true /// } else { /// false /// }); /// assert_eq!(vec, [2, 3, 4]); /// ``` pub fn retain_mut(&mut self, mut f: F) where F: FnMut(&mut T) -> bool, { let original_len = self.len(); // Avoid double drop if the drop guard is not executed, // since we may make some holes during the process. unsafe { self.set_len(0) }; // Vec: [Kept, Kept, Hole, Hole, Hole, Hole, Unchecked, Unchecked] // |<- processed len ->| ^- next to check // |<- deleted cnt ->| // |<- original_len ->| // Kept: Elements which predicate returns true on. // Hole: Moved or dropped element slot. // Unchecked: Unchecked valid elements. // // This drop guard will be invoked when predicate or `drop` of element panicked. // It shifts unchecked elements to cover holes and `set_len` to the correct length. // In cases when predicate and `drop` never panick, it will be optimized out. struct BackshiftOnDrop<'a, T, const N: usize> { v: &'a mut Vec, processed_len: usize, deleted_cnt: usize, original_len: usize, } impl Drop for BackshiftOnDrop<'_, T, N> { fn drop(&mut self) { if self.deleted_cnt > 0 { // SAFETY: Trailing unchecked items must be valid since we never touch them. unsafe { ptr::copy( self.v.as_ptr().add(self.processed_len), self.v .as_mut_ptr() .add(self.processed_len - self.deleted_cnt), self.original_len - self.processed_len, ); } } // SAFETY: After filling holes, all items are in contiguous memory. unsafe { self.v.set_len(self.original_len - self.deleted_cnt); } } } let mut g = BackshiftOnDrop { v: self, processed_len: 0, deleted_cnt: 0, original_len, }; fn process_loop( original_len: usize, f: &mut F, g: &mut BackshiftOnDrop<'_, T, N>, ) where F: FnMut(&mut T) -> bool, { while g.processed_len != original_len { let p = g.v.as_mut_ptr(); // SAFETY: Unchecked element must be valid. let cur = unsafe { &mut *p.add(g.processed_len) }; if !f(cur) { // Advance early to avoid double drop if `drop_in_place` panicked. g.processed_len += 1; g.deleted_cnt += 1; // SAFETY: We never touch this element again after dropped. unsafe { ptr::drop_in_place(cur) }; // We already advanced the counter. if DELETED { continue; } else { break; } } if DELETED { // SAFETY: `deleted_cnt` > 0, so the hole slot must not overlap with current element. // We use copy for move, and never touch this element again. unsafe { let hole_slot = p.add(g.processed_len - g.deleted_cnt); ptr::copy_nonoverlapping(cur, hole_slot, 1); } } g.processed_len += 1; } } // Stage 1: Nothing was deleted. process_loop::(original_len, &mut f, &mut g); // Stage 2: Some elements were deleted. process_loop::(original_len, &mut f, &mut g); // All item are processed. This can be optimized to `set_len` by LLVM. drop(g); } } // Trait implementations impl Default for Vec { fn default() -> Self { Self::new() } } impl fmt::Debug for Vec where T: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { <[T] as fmt::Debug>::fmt(self, f) } } impl fmt::Write for Vec { fn write_str(&mut self, s: &str) -> fmt::Result { match self.extend_from_slice(s.as_bytes()) { Ok(()) => Ok(()), Err(_) => Err(fmt::Error), } } } impl Drop for Vec { fn drop(&mut self) { // We drop each element used in the vector by turning into a &mut[T] unsafe { ptr::drop_in_place(self.as_mut_slice()); } } } impl<'a, T: Clone, const N: usize> TryFrom<&'a [T]> for Vec { type Error = (); fn try_from(slice: &'a [T]) -> Result { Vec::from_slice(slice) } } impl Extend for Vec { fn extend(&mut self, iter: I) where I: IntoIterator, { self.extend(iter) } } impl<'a, T, const N: usize> Extend<&'a T> for Vec where T: 'a + Copy, { fn extend(&mut self, iter: I) where I: IntoIterator, { self.extend(iter.into_iter().cloned()) } } impl hash::Hash for Vec where T: core::hash::Hash, { fn hash(&self, state: &mut H) { <[T] as hash::Hash>::hash(self, state) } } impl<'a, T, const N: usize> IntoIterator for &'a Vec { type Item = &'a T; type IntoIter = slice::Iter<'a, T>; fn into_iter(self) -> Self::IntoIter { self.iter() } } impl<'a, T, const N: usize> IntoIterator for &'a mut Vec { type Item = &'a mut T; type IntoIter = slice::IterMut<'a, T>; fn into_iter(self) -> Self::IntoIter { self.iter_mut() } } impl FromIterator for Vec { fn from_iter(iter: I) -> Self where I: IntoIterator, { let mut vec = Vec::new(); for i in iter { vec.push(i).ok().expect("Vec::from_iter overflow"); } vec } } /// An iterator that moves out of an [`Vec`][`Vec`]. /// /// This struct is created by calling the `into_iter` method on [`Vec`][`Vec`]. pub struct IntoIter { vec: Vec, next: usize, } impl Iterator for IntoIter { type Item = T; fn next(&mut self) -> Option { if self.next < self.vec.len() { let item = unsafe { (self.vec.buffer.get_unchecked_mut(self.next).as_ptr() as *const T).read() }; self.next += 1; Some(item) } else { None } } } impl Clone for IntoIter where T: Clone, { fn clone(&self) -> Self { let mut vec = Vec::new(); if self.next < self.vec.len() { let s = unsafe { slice::from_raw_parts( (self.vec.buffer.as_ptr() as *const T).add(self.next), self.vec.len() - self.next, ) }; vec.extend_from_slice(s).ok(); } Self { vec, next: 0 } } } impl Drop for IntoIter { fn drop(&mut self) { unsafe { // Drop all the elements that have not been moved out of vec ptr::drop_in_place(&mut self.vec.as_mut_slice()[self.next..]); // Prevent dropping of other elements self.vec.len = 0; } } } impl IntoIterator for Vec { type Item = T; type IntoIter = IntoIter; fn into_iter(self) -> Self::IntoIter { IntoIter { vec: self, next: 0 } } } impl PartialEq> for Vec where A: PartialEq, { fn eq(&self, other: &Vec) -> bool { <[A]>::eq(self, &**other) } } // Vec == [B] impl PartialEq<[B]> for Vec where A: PartialEq, { fn eq(&self, other: &[B]) -> bool { <[A]>::eq(self, &other[..]) } } // [B] == Vec impl PartialEq> for [B] where A: PartialEq, { fn eq(&self, other: &Vec) -> bool { <[A]>::eq(other, &self[..]) } } // Vec == &[B] impl PartialEq<&[B]> for Vec where A: PartialEq, { fn eq(&self, other: &&[B]) -> bool { <[A]>::eq(self, &other[..]) } } // &[B] == Vec impl PartialEq> for &[B] where A: PartialEq, { fn eq(&self, other: &Vec) -> bool { <[A]>::eq(other, &self[..]) } } // Vec == &mut [B] impl PartialEq<&mut [B]> for Vec where A: PartialEq, { fn eq(&self, other: &&mut [B]) -> bool { <[A]>::eq(self, &other[..]) } } // &mut [B] == Vec impl PartialEq> for &mut [B] where A: PartialEq, { fn eq(&self, other: &Vec) -> bool { <[A]>::eq(other, &self[..]) } } // Vec == [B; M] // Equality does not require equal capacity impl PartialEq<[B; M]> for Vec where A: PartialEq, { fn eq(&self, other: &[B; M]) -> bool { <[A]>::eq(self, &other[..]) } } // [B; M] == Vec // Equality does not require equal capacity impl PartialEq> for [B; M] where A: PartialEq, { fn eq(&self, other: &Vec) -> bool { <[A]>::eq(other, &self[..]) } } // Vec == &[B; M] // Equality does not require equal capacity impl PartialEq<&[B; M]> for Vec where A: PartialEq, { fn eq(&self, other: &&[B; M]) -> bool { <[A]>::eq(self, &other[..]) } } // &[B; M] == Vec // Equality does not require equal capacity impl PartialEq> for &[B; M] where A: PartialEq, { fn eq(&self, other: &Vec) -> bool { <[A]>::eq(other, &self[..]) } } // Implements Eq if underlying data is Eq impl Eq for Vec where T: Eq {} impl PartialOrd> for Vec where T: PartialOrd, { fn partial_cmp(&self, other: &Vec) -> Option { PartialOrd::partial_cmp(&**self, &**other) } } impl Ord for Vec where T: Ord, { #[inline] fn cmp(&self, other: &Self) -> Ordering { Ord::cmp(&**self, &**other) } } impl ops::Deref for Vec { type Target = [T]; fn deref(&self) -> &[T] { self.as_slice() } } impl ops::DerefMut for Vec { fn deref_mut(&mut self) -> &mut [T] { self.as_mut_slice() } } impl AsRef> for Vec { #[inline] fn as_ref(&self) -> &Self { self } } impl AsMut> for Vec { #[inline] fn as_mut(&mut self) -> &mut Self { self } } impl AsRef<[T]> for Vec { #[inline] fn as_ref(&self) -> &[T] { self } } impl AsMut<[T]> for Vec { #[inline] fn as_mut(&mut self) -> &mut [T] { self } } impl Clone for Vec where T: Clone, { fn clone(&self) -> Self { self.clone() } } #[cfg(test)] mod tests { use crate::Vec; use core::fmt::Write; #[test] fn static_new() { static mut _V: Vec = Vec::new(); } #[test] fn stack_new() { let mut _v: Vec = Vec::new(); } #[test] fn is_full_empty() { let mut v: Vec = Vec::new(); assert!(v.is_empty()); assert!(!v.is_full()); v.push(1).unwrap(); assert!(!v.is_empty()); assert!(!v.is_full()); v.push(1).unwrap(); assert!(!v.is_empty()); assert!(!v.is_full()); v.push(1).unwrap(); assert!(!v.is_empty()); assert!(!v.is_full()); v.push(1).unwrap(); assert!(!v.is_empty()); assert!(v.is_full()); } #[test] fn drop() { droppable!(); { let mut v: Vec = Vec::new(); v.push(Droppable::new()).ok().unwrap(); v.push(Droppable::new()).ok().unwrap(); v.pop().unwrap(); } assert_eq!(Droppable::count(), 0); { let mut v: Vec = Vec::new(); v.push(Droppable::new()).ok().unwrap(); v.push(Droppable::new()).ok().unwrap(); } assert_eq!(Droppable::count(), 0); } #[test] fn eq() { let mut xs: Vec = Vec::new(); let mut ys: Vec = Vec::new(); assert_eq!(xs, ys); xs.push(1).unwrap(); ys.push(1).unwrap(); assert_eq!(xs, ys); } #[test] fn cmp() { let mut xs: Vec = Vec::new(); let mut ys: Vec = Vec::new(); assert_eq!(xs, ys); xs.push(1).unwrap(); ys.push(2).unwrap(); assert!(xs < ys); } #[test] fn cmp_heterogenous_size() { let mut xs: Vec = Vec::new(); let mut ys: Vec = Vec::new(); assert_eq!(xs, ys); xs.push(1).unwrap(); ys.push(2).unwrap(); assert!(xs < ys); } #[test] fn cmp_with_arrays_and_slices() { let mut xs: Vec = Vec::new(); xs.push(1).unwrap(); let array = [1]; assert_eq!(xs, array); assert_eq!(array, xs); assert_eq!(xs, array.as_slice()); assert_eq!(array.as_slice(), xs); assert_eq!(xs, &array); assert_eq!(&array, xs); let longer_array = [1; 20]; assert_ne!(xs, longer_array); assert_ne!(longer_array, xs); } #[test] fn full() { let mut v: Vec = Vec::new(); v.push(0).unwrap(); v.push(1).unwrap(); v.push(2).unwrap(); v.push(3).unwrap(); assert!(v.push(4).is_err()); } #[test] fn iter() { let mut v: Vec = Vec::new(); v.push(0).unwrap(); v.push(1).unwrap(); v.push(2).unwrap(); v.push(3).unwrap(); let mut items = v.iter(); assert_eq!(items.next(), Some(&0)); assert_eq!(items.next(), Some(&1)); assert_eq!(items.next(), Some(&2)); assert_eq!(items.next(), Some(&3)); assert_eq!(items.next(), None); } #[test] fn iter_mut() { let mut v: Vec = Vec::new(); v.push(0).unwrap(); v.push(1).unwrap(); v.push(2).unwrap(); v.push(3).unwrap(); let mut items = v.iter_mut(); assert_eq!(items.next(), Some(&mut 0)); assert_eq!(items.next(), Some(&mut 1)); assert_eq!(items.next(), Some(&mut 2)); assert_eq!(items.next(), Some(&mut 3)); assert_eq!(items.next(), None); } #[test] fn collect_from_iter() { let slice = &[1, 2, 3]; let vec: Vec = slice.iter().cloned().collect(); assert_eq!(&vec, slice); } #[test] #[should_panic] fn collect_from_iter_overfull() { let slice = &[1, 2, 3]; let _vec = slice.iter().cloned().collect::>(); } #[test] fn iter_move() { let mut v: Vec = Vec::new(); v.push(0).unwrap(); v.push(1).unwrap(); v.push(2).unwrap(); v.push(3).unwrap(); let mut items = v.into_iter(); assert_eq!(items.next(), Some(0)); assert_eq!(items.next(), Some(1)); assert_eq!(items.next(), Some(2)); assert_eq!(items.next(), Some(3)); assert_eq!(items.next(), None); } #[test] fn iter_move_drop() { droppable!(); { let mut vec: Vec = Vec::new(); vec.push(Droppable::new()).ok().unwrap(); vec.push(Droppable::new()).ok().unwrap(); let mut items = vec.into_iter(); // Move all let _ = items.next(); let _ = items.next(); } assert_eq!(Droppable::count(), 0); { let mut vec: Vec = Vec::new(); vec.push(Droppable::new()).ok().unwrap(); vec.push(Droppable::new()).ok().unwrap(); let _items = vec.into_iter(); // Move none } assert_eq!(Droppable::count(), 0); { let mut vec: Vec = Vec::new(); vec.push(Droppable::new()).ok().unwrap(); vec.push(Droppable::new()).ok().unwrap(); let mut items = vec.into_iter(); let _ = items.next(); // Move partly } assert_eq!(Droppable::count(), 0); } #[test] fn push_and_pop() { let mut v: Vec = Vec::new(); assert_eq!(v.len(), 0); assert_eq!(v.pop(), None); assert_eq!(v.len(), 0); v.push(0).unwrap(); assert_eq!(v.len(), 1); assert_eq!(v.pop(), Some(0)); assert_eq!(v.len(), 0); assert_eq!(v.pop(), None); assert_eq!(v.len(), 0); } #[test] fn resize_size_limit() { let mut v: Vec = Vec::new(); v.resize(0, 0).unwrap(); v.resize(4, 0).unwrap(); v.resize(5, 0).err().expect("full"); } #[test] fn resize_length_cases() { let mut v: Vec = Vec::new(); assert_eq!(v.len(), 0); // Grow by 1 v.resize(1, 0).unwrap(); assert_eq!(v.len(), 1); // Grow by 2 v.resize(3, 0).unwrap(); assert_eq!(v.len(), 3); // Resize to current size v.resize(3, 0).unwrap(); assert_eq!(v.len(), 3); // Shrink by 1 v.resize(2, 0).unwrap(); assert_eq!(v.len(), 2); // Shrink by 2 v.resize(0, 0).unwrap(); assert_eq!(v.len(), 0); } #[test] fn resize_contents() { let mut v: Vec = Vec::new(); // New entries take supplied value when growing v.resize(1, 17).unwrap(); assert_eq!(v[0], 17); // Old values aren't changed when growing v.resize(2, 18).unwrap(); assert_eq!(v[0], 17); assert_eq!(v[1], 18); // Old values aren't changed when length unchanged v.resize(2, 0).unwrap(); assert_eq!(v[0], 17); assert_eq!(v[1], 18); // Old values aren't changed when shrinking v.resize(1, 0).unwrap(); assert_eq!(v[0], 17); } #[test] fn resize_default() { let mut v: Vec = Vec::new(); // resize_default is implemented using resize, so just check the // correct value is being written. v.resize_default(1).unwrap(); assert_eq!(v[0], 0); } #[test] fn write() { let mut v: Vec = Vec::new(); write!(v, "{:x}", 1234).unwrap(); assert_eq!(&v[..], b"4d2"); } #[test] fn extend_from_slice() { let mut v: Vec = Vec::new(); assert_eq!(v.len(), 0); v.extend_from_slice(&[1, 2]).unwrap(); assert_eq!(v.len(), 2); assert_eq!(v.as_slice(), &[1, 2]); v.extend_from_slice(&[3]).unwrap(); assert_eq!(v.len(), 3); assert_eq!(v.as_slice(), &[1, 2, 3]); assert!(v.extend_from_slice(&[4, 5]).is_err()); assert_eq!(v.len(), 3); assert_eq!(v.as_slice(), &[1, 2, 3]); } #[test] fn from_slice() { // Successful construction let v: Vec = Vec::from_slice(&[1, 2, 3]).unwrap(); assert_eq!(v.len(), 3); assert_eq!(v.as_slice(), &[1, 2, 3]); // Slice too large assert!(Vec::::from_slice(&[1, 2, 3]).is_err()); } #[test] fn starts_with() { let v: Vec<_, 8> = Vec::from_slice(b"ab").unwrap(); assert!(v.starts_with(&[])); assert!(v.starts_with(b"")); assert!(v.starts_with(b"a")); assert!(v.starts_with(b"ab")); assert!(!v.starts_with(b"abc")); assert!(!v.starts_with(b"ba")); assert!(!v.starts_with(b"b")); } #[test] fn ends_with() { let v: Vec<_, 8> = Vec::from_slice(b"ab").unwrap(); assert!(v.ends_with(&[])); assert!(v.ends_with(b"")); assert!(v.ends_with(b"b")); assert!(v.ends_with(b"ab")); assert!(!v.ends_with(b"abc")); assert!(!v.ends_with(b"ba")); assert!(!v.ends_with(b"a")); } #[test] fn zero_capacity() { let mut v: Vec = Vec::new(); // Validate capacity assert_eq!(v.capacity(), 0); // Make sure there is no capacity assert!(v.push(1).is_err()); // Validate length assert_eq!(v.len(), 0); // Validate pop assert_eq!(v.pop(), None); // Validate slice assert_eq!(v.as_slice(), &[]); // Validate empty assert!(v.is_empty()); // Validate full assert!(v.is_full()); } } heapless-0.8.0/suppressions.txt000064400000000000000000000004331046102023000147530ustar 00000000000000race:std::panic::catch_unwind race:std::thread::scope # std::thread::spawn false positive; seen on Ubuntu 20.04 but not on Arch Linux (2022-04-29) race:drop_in_place*JoinHandle race:alloc::sync::Arc<*>::drop_slow race:__call_tls_dtors # false positives in memcpy (?) race:*memcpy* heapless-0.8.0/tests/cpass.rs000064400000000000000000000007121046102023000142560ustar 00000000000000//! Collections of `Send`-able things are `Send` use heapless::{ spsc::{Consumer, Producer, Queue}, HistoryBuffer, Vec, }; #[test] fn send() { struct IsSend; unsafe impl Send for IsSend {} fn is_send() where T: Send, { } is_send::>(); is_send::>(); is_send::>(); is_send::>(); is_send::>(); } heapless-0.8.0/tests/tsan.rs000064400000000000000000000121171046102023000141140ustar 00000000000000#![deny(rust_2018_compatibility)] #![deny(rust_2018_idioms)] #![deny(warnings)] use std::thread; use heapless::spsc; #[test] fn once() { static mut RB: spsc::Queue = spsc::Queue::new(); let rb = unsafe { &mut RB }; rb.enqueue(0).unwrap(); let (mut p, mut c) = rb.split(); p.enqueue(1).unwrap(); thread::spawn(move || { p.enqueue(1).unwrap(); }); thread::spawn(move || { c.dequeue().unwrap(); }); } #[test] fn twice() { static mut RB: spsc::Queue = spsc::Queue::new(); let rb = unsafe { &mut RB }; rb.enqueue(0).unwrap(); rb.enqueue(1).unwrap(); let (mut p, mut c) = rb.split(); thread::spawn(move || { p.enqueue(2).unwrap(); p.enqueue(3).unwrap(); }); thread::spawn(move || { c.dequeue().unwrap(); c.dequeue().unwrap(); }); } #[test] fn scoped() { let mut rb: spsc::Queue = spsc::Queue::new(); rb.enqueue(0).unwrap(); { let (mut p, mut c) = rb.split(); thread::scope(move |scope| { scope.spawn(move || { p.enqueue(1).unwrap(); }); scope.spawn(move || { c.dequeue().unwrap(); }); }); } rb.dequeue().unwrap(); } #[test] #[cfg_attr(miri, ignore)] // too slow fn contention() { const N: usize = 1024; let mut rb: spsc::Queue = spsc::Queue::new(); { let (mut p, mut c) = rb.split(); thread::scope(move |scope| { scope.spawn(move || { let mut sum: u32 = 0; for i in 0..(2 * N) { sum = sum.wrapping_add(i as u32); while let Err(_) = p.enqueue(i as u8) {} } println!("producer: {}", sum); }); scope.spawn(move || { let mut sum: u32 = 0; for _ in 0..(2 * N) { loop { match c.dequeue() { Some(v) => { sum = sum.wrapping_add(v as u32); break; } _ => {} } } } println!("consumer: {}", sum); }); }); } assert!(rb.is_empty()); } #[test] #[cfg_attr(miri, ignore)] // too slow fn mpmc_contention() { use std::sync::mpsc; use heapless::mpmc::Q64; const N: u32 = 64; static Q: Q64 = Q64::new(); let (s, r) = mpsc::channel(); thread::scope(|scope| { let s1 = s.clone(); scope.spawn(move || { let mut sum: u32 = 0; for i in 0..(16 * N) { sum = sum.wrapping_add(i); println!("enqueue {}", i); while let Err(_) = Q.enqueue(i) {} } s1.send(sum).unwrap(); }); let s2 = s.clone(); scope.spawn(move || { let mut sum: u32 = 0; for _ in 0..(16 * N) { loop { match Q.dequeue() { Some(v) => { sum = sum.wrapping_add(v); println!("dequeue {}", v); break; } _ => {} } } } s2.send(sum).unwrap(); }); }); assert_eq!(r.recv().unwrap(), r.recv().unwrap()); } #[test] #[cfg_attr(miri, ignore)] // too slow fn unchecked() { const N: usize = 1024; let mut rb: spsc::Queue = spsc::Queue::new(); for _ in 0..N / 2 - 1 { rb.enqueue(1).unwrap(); } { let (mut p, mut c) = rb.split(); thread::scope(move |scope| { scope.spawn(move || { for _ in 0..N / 2 - 1 { p.enqueue(2).unwrap(); } }); scope.spawn(move || { let mut sum: usize = 0; for _ in 0..N / 2 - 1 { sum = sum.wrapping_add(usize::from(c.dequeue().unwrap())); } assert_eq!(sum, N / 2 - 1); }); }); } assert_eq!(rb.len(), N / 2 - 1); } #[test] fn len_properly_wraps() { const N: usize = 4; let mut rb: spsc::Queue = spsc::Queue::new(); rb.enqueue(1).unwrap(); assert_eq!(rb.len(), 1); rb.dequeue(); assert_eq!(rb.len(), 0); rb.enqueue(2).unwrap(); assert_eq!(rb.len(), 1); rb.enqueue(3).unwrap(); assert_eq!(rb.len(), 2); rb.enqueue(4).unwrap(); assert_eq!(rb.len(), 3); } #[test] fn iterator_properly_wraps() { const N: usize = 4; let mut rb: spsc::Queue = spsc::Queue::new(); rb.enqueue(1).unwrap(); rb.dequeue(); rb.enqueue(2).unwrap(); rb.enqueue(3).unwrap(); rb.enqueue(4).unwrap(); let expected = [2, 3, 4]; let mut actual = [0, 0, 0]; for (idx, el) in rb.iter().enumerate() { actual[idx] = *el; } assert_eq!(expected, actual) }

where P: ArcPool, { fn clone(&self) -> Self { let old_size = self.inner().strong.fetch_add(1, Ordering::Relaxed); if old_size > MAX_REFCOUNT { // XXX original code calls `intrinsics::abort` which is unstable API panic!(); } Self::from_inner(self.node_ptr) } } impl fmt::Debug for Arc where A: ArcPool, A::Data: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { A::Data::fmt(self, f) } } impl