heapless-0.7.16/.cargo_vcs_info.json0000644000000001360000000000100127330ustar { "git": { "sha1": "8a98b2650deceda3bc6a1d0c8aef3f7778cf37c4" }, "path_in_vcs": "" }heapless-0.7.16/.github/bors.toml000064400000000000000000000001130072674642500147500ustar 00000000000000block_labels = ["S-blocked"] delete_merged_branches = true status = ["ci"] heapless-0.7.16/.github/workflows/build.yml000064400000000000000000000203320072674642500167720ustar 00000000000000name: Build on: pull_request: push: branches: - master - staging - trying env: CARGO_TERM_COLOR: always jobs: # Run cargo fmt --check style: name: style runs-on: ubuntu-20.04 steps: - name: Checkout uses: actions/checkout@v1 - name: Install Rust uses: actions-rs/toolchain@v1 with: profile: minimal toolchain: stable override: true components: rustfmt - name: cargo fmt --check uses: actions-rs/cargo@v1 with: command: fmt args: --all -- --check # Compilation check check: name: check runs-on: ubuntu-20.04 strategy: matrix: target: - x86_64-unknown-linux-gnu - i686-unknown-linux-musl - riscv32imc-unknown-none-elf - armv7r-none-eabi - thumbv6m-none-eabi - thumbv7m-none-eabi - thumbv8m.base-none-eabi - thumbv8m.main-none-eabi toolchain: - stable - nightly features: - "" - "cas" - "serde" steps: - name: Checkout uses: actions/checkout@v2 - name: Cache cargo dependencies uses: actions/cache@v2 with: path: | - ~/.cargo/bin/ - ~/.cargo/registry/index/ - ~/.cargo/registry/cache/ - ~/.cargo/git/db/ key: ${{ runner.OS }}-cargo-${{ hashFiles('**/Cargo.lock') }} restore-keys: | ${{ runner.OS }}-cargo-${{ hashFiles('**/Cargo.lock') }} ${{ runner.OS }}-cargo- - name: Cache build output dependencies uses: actions/cache@v2 with: path: target key: ${{ runner.OS }}-build-${{ hashFiles('**/Cargo.lock') }} restore-keys: | ${{ runner.OS }}-build-${{ hashFiles('**/Cargo.lock') }} ${{ runner.OS }}-build- - name: Install Rust ${{ matrix.toolchain }} with target (${{ matrix.target }}) uses: actions-rs/toolchain@v1 with: toolchain: ${{ matrix.toolchain }} target: ${{ matrix.target }} override: true - name: cargo check uses: actions-rs/cargo@v1 with: use-cross: false command: check args: --target=${{ matrix.target }} --no-default-features --features=${{ matrix.features }} # Run cpass tests testcpass: name: testcpass runs-on: ubuntu-20.04 strategy: matrix: target: - x86_64-unknown-linux-gnu - i686-unknown-linux-musl toolchain: - stable - nightly - 1.51.0 features: - serde buildtype: - "" - "--release" steps: - name: Checkout uses: actions/checkout@v2 - name: Cache cargo dependencies uses: actions/cache@v2 with: path: | - ~/.cargo/bin/ - ~/.cargo/registry/index/ - ~/.cargo/registry/cache/ - ~/.cargo/git/db/ key: ${{ runner.OS }}-cargo-${{ hashFiles('**/Cargo.lock') }} restore-keys: | ${{ runner.OS }}-cargo-${{ hashFiles('**/Cargo.lock') }} ${{ runner.OS }}-cargo- - name: Cache build output dependencies uses: actions/cache@v2 with: path: target key: ${{ runner.OS }}-build-${{ hashFiles('**/Cargo.lock') }} restore-keys: | ${{ runner.OS }}-build-${{ hashFiles('**/Cargo.lock') }} ${{ runner.OS }}-build- - name: Install Rust ${{ matrix.toolchain }} with target (${{ matrix.target }}) uses: actions-rs/toolchain@v1 with: toolchain: ${{ matrix.toolchain }} target: ${{ matrix.target }} override: true - uses: actions-rs/cargo@v1 with: use-cross: false command: test args: --test cpass --target=${{ matrix.target }} --features=${{ matrix.features }} ${{ matrix.buildtype }} # Run test suite for UI testtsan: name: testtsan runs-on: ubuntu-20.04 strategy: matrix: target: - x86_64-unknown-linux-gnu toolchain: - nightly features: - x86-sync-pool buildtype: - "" - "--release" steps: - name: Checkout uses: actions/checkout@v2 - name: Cache cargo dependencies uses: actions/cache@v2 with: path: | - ~/.cargo/bin/ - ~/.cargo/registry/index/ - ~/.cargo/registry/cache/ - ~/.cargo/git/db/ key: ${{ runner.OS }}-cargo-${{ hashFiles('**/Cargo.lock') }} restore-keys: | ${{ runner.OS }}-cargo- - name: Cache build output dependencies uses: actions/cache@v2 with: path: target key: ${{ runner.OS }}-build-${{ hashFiles('**/Cargo.lock') }} restore-keys: | ${{ runner.OS }}-build- - name: Install Rust ${{ matrix.toolchain }} with target (${{ matrix.target }}) uses: actions-rs/toolchain@v1 with: toolchain: ${{ matrix.toolchain }} target: ${{ matrix.target }} override: true - name: Export variables run: | echo RUSTFLAGS="-Z sanitizer=thread" >> $GITHUB_ENV echo TSAN_OPTIONS="suppressions=$(pwd)/suppressions.txt" >> $GITHUB_ENV echo $GITHUB_ENV - uses: actions-rs/cargo@v1 with: use-cross: false command: test args: --test tsan --target=${{ matrix.target }} --features=${{ matrix.features }} ${{ matrix.buildtype }} -- --test-threads=1 # Run cfail tests on MSRV testcfail: name: testcfail runs-on: ubuntu-20.04 defaults: run: working-directory: cfail steps: - name: Checkout uses: actions/checkout@v2 - name: Cache cargo dependencies uses: actions/cache@v2 with: path: | - ~/.cargo/bin/ - ~/.cargo/registry/index/ - ~/.cargo/registry/cache/ - ~/.cargo/git/db/ key: ${{ runner.OS }}-cargo-${{ hashFiles('**/Cargo.lock') }} restore-keys: | ${{ runner.OS }}-cargo- - name: Cache build output dependencies uses: actions/cache@v2 with: path: target key: ${{ runner.OS }}-build-${{ hashFiles('**/Cargo.lock') }} restore-keys: | ${{ runner.OS }}-build- - name: Install Rust uses: actions-rs/toolchain@v1 with: toolchain: 1.51.0 target: x86_64-unknown-linux-gnu override: true - name: Run cargo run: cargo run # Run MIRI tests on nightly testmiri: name: testmiri runs-on: ubuntu-20.04 steps: - name: Checkout uses: actions/checkout@v2 - name: Cache cargo dependencies uses: actions/cache@v2 with: path: | - ~/.cargo/bin/ - ~/.cargo/registry/index/ - ~/.cargo/registry/cache/ - ~/.cargo/git/db/ key: ${{ runner.OS }}-cargo-${{ hashFiles('**/Cargo.lock') }} restore-keys: | ${{ runner.OS }}-cargo- - name: Cache build output dependencies uses: actions/cache@v2 with: path: target key: ${{ runner.OS }}-build-${{ hashFiles('**/Cargo.lock') }} restore-keys: | ${{ runner.OS }}-build- - name: Install Rust uses: actions-rs/toolchain@v1 with: toolchain: nightly target: x86_64-unknown-linux-gnu components: miri override: true - name: Run miri run: MIRIFLAGS=-Zmiri-ignore-leaks cargo miri test # Refs: https://github.com/rust-lang/crater/blob/9ab6f9697c901c4a44025cf0a39b73ad5b37d198/.github/workflows/bors.yml#L125-L149 # # ALL THE PREVIOUS JOBS NEEDS TO BE ADDED TO THE `needs` SECTION OF THIS JOB! ci-success: name: ci if: github.event_name == 'push' && success() needs: - style - check - testcpass - testtsan - testcfail runs-on: ubuntu-20.04 steps: - name: Mark the job as a success run: exit 0 heapless-0.7.16/.github/workflows/changelog.yml000064400000000000000000000015200072674642500176200ustar 00000000000000# Check that the changelog is updated for all changes. # # This is only run for PRs. on: pull_request: # opened, reopened, synchronize are the default types for pull_request. # labeled, unlabeled ensure this check is also run if a label is added or removed. types: [opened, reopened, labeled, unlabeled, synchronize] name: Changelog jobs: changelog: name: Changelog runs-on: ubuntu-latest steps: - name: Checkout sources uses: actions/checkout@v2 - name: Check that changelog updated uses: dangoslen/changelog-enforcer@v3 with: changeLogPath: CHANGELOG.md skipLabels: 'needs-changelog, skip-changelog' missingUpdateErrorMessage: 'Please add a changelog entry in the CHANGELOG.md file.' env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}heapless-0.7.16/.github/workflows/properties/build.properties.json000064400000000000000000000001660072674642500235340ustar 00000000000000{ "name": "Build", "description": "Heapless Test Suite", "iconName": "rust", "categories": ["Rust"] } heapless-0.7.16/.gitignore000064400000000000000000000000420072674642500135370ustar 00000000000000**/*.rs.bk .#* Cargo.lock target/ heapless-0.7.16/CHANGELOG.md000064400000000000000000000364560072674642500134020ustar 00000000000000# Change Log All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](http://keepachangelog.com/) and this project adheres to [Semantic Versioning](http://semver.org/). ## [Unreleased] ### Added ### Changed ### Fixed ## [v0.7.16] - 2022-08-09 ### Added - add more `PartialEq` implementations to `Vec` where `Vec` is the RHS ### Changed ### Fixed - clarify in the docs that the capacity `heapless::String` is in bytes, not characters - Fixed some broken links in the documentation. ## [v0.7.15] - 2022-07-05 ### Added - Added `Vec::insert(index, element)` - Added `Vec::remove(index)` - Added `Vec::retain(f)` - Added `Vec::retain_mut(f)` ## [v0.7.14] - 2022-06-15 ### Added - Added support for AVR architecture. ### Fixed - `IndexSet` and `IndexMap`'s `default` method now compile time checks that their capacity is a power of two. ## [v0.7.13] - 2022-05-16 ### Added - Added `into_vec` to `BinaryHeap` ## [v0.7.12] - 2022-05-12 ### Added * Added support for AVR architecture. * Add `entry` API to `IndexMap` * Implement `IntoIterator` trait for `Indexmap` * Implement `FromIterator` for `String` * Add `first` and `last` methods to `IndexMap` and `IndexSet` * Add `pop_{front_back}_unchecked` methods to `Deque` ### Changed * Optimize the codegen of `Vec::clone` * `riscv32i` and `riscv32imc` targets unconditionally (e.g. `build --no-default-features`) depends on `atomic-polyfill` ### Fixed * Inserting an item that replaces an already present item will no longer fail with an error ## [v0.7.11] - 2022-05-09 ### Fixed * Fixed `pool` example in docstring. * Fixed undefined behavior in `Vec::truncate()`, `Vec::swap_remove_unchecked()`, and `Hole::move_to()` (internal to the binary heap implementation). * Fixed `BinaryHeap` elements are being dropped twice ## [v0.7.10] - 2022-01-21 ### Fixed - `cargo test` can now run on non-`x86` hosts ### Added - Added `OldestOrdered` iterator for `HistoryBuffer` ### Changed - `atomic-polyfill` is now enabled and used for `cas` atomic emulation on `riscv` targets ## [v0.7.9] - 2021-12-16 ### Fixed - Fix `IndexMap` and `IndexSet` bounds - Make `IndexSet::new()` a `const fn` ## [v0.7.8] - 2021-11-11 ### Added - A span of `defmt` versions is now supported (`0.2` and `0.3`) ## [v0.7.7] - 2021-09-22 ### Fixed - Fixed so `Pool` is `Sync` on ARMv6 ## [v0.7.6] - 2021-09-21 ### Added - Added `ArcPool` - Added `Debug` impl for `Deque` ### Fixed - ZSTs in `Pool` now works correctly - Some MIRI errors were resolved - Allow `pool!` on thumbv6 - Fixed possible UB in `Pool` on x86 ## [v0.7.5] - 2021-08-16 ### Added - Added `SortedLinkedList` - Added `Vec::is_empty`, one does not need to go through a slice anymore ### Changed - `Vec::pop_unchecked` is now public ## [v0.7.4] - 2021-08-06 ### Added - Implement `Default` for `MpMcQueue`, `Queue` and `HistoryBuffer` - Implement `PartialOrd` and `Ord` for `Vec` and `String` ### Fixed - Fixed comments in SPSC ## [v0.7.3] - 2021-07-01 ### Added - Added `Deque` ### Changed - `Box::freeze` is deprecated due to possibility of undefined behavior. ## [v0.7.2] - 2021-06-30 ### Added - Added new `Vec::into_array` method - Added const-asserts to all data structures ## [v0.7.1] - 2021-05-23 ### Changed - MPMC is now more generic ### Added - `defmt` for `Vec` and `String` ## [v0.7.0] - 2021-04-23 ### Changed - [breaking-change] Converted all data structures to use the `const generics` MVP - [breaking-change] `HistoryBuffer` is now working with const constructors and non-`Copy` data - [breaking-change] `HistoryBuffer::as_slice` and others now only return initialized values - Added missing `Deref`, `AsRef` and `Debug` for `HistoryBuffer` - [breaking-change] `MultiCore`/`SingleCore` and `Uxx` is now removed from `spsc::Queue` - [breaking-change] `spsc::Queue` is now `usize` only - [breaking-change] `spsc::Queue` now sacrifices one element for correctness (see issue #207), i.e. it creates an `N - 1` sized queue instead of the old that generated an size `N` queue - [breaking-change] `String` has had `utf8` related methods removed as this can be done via `str` - [breaking-change] No data structures implement `AsSlice` traits any more, now using `AsRef` and `AsMut` as they work with any size of array now ### Fixed - `Pool` and `MPMC` now works on `thumbv6m` - `IndexMap::new()` is now a `const-fn` ## [v0.6.1] - 2021-03-02 ### Fixed - Security issue. ## [v0.6.0] - 2021-02-02 ### Changed - [breaking-change] The version of the `generic-array` dependency has been bumped to v0.14.2. ## [v0.5.6] - 2020-09-18 ### Added - Added `as_mut_vec` for `String` - Added `set_len` for `Vec` - Performance improvements in `histbuf` ### Fixed - `Producer` was made `Send` for single core applications ## [v0.5.5] - 2020-05-04 ### Added - Added `HistoryBuffer` - Added extra methods to `Vec`: `from_slice`, `starts_with`, `ends_with` - Optional `ufmt` support for `String` and `Vec` - Added `pool` support for bare-metal `armebv7r-` targets - Added Sync to `pool` for `x86` ## [v0.5.4] - 2020-04-06 ### Added - Added `StableDeref` implementation for `pool::Box` and `pool::singleton::Box`. ## [v0.5.3] - 2020-01-27 ### Added - Extend the ARMv7-A `Pool` support to the bare-metal `armv7a-` targets. ## [v0.5.2] - 2020-01-15 ### Fixed - Fixed incorrect overflow behavior in computation of capacities - Fixed edge case in `mpmc::Queue::dqueue` that led to an infinite loop - IndexMap and LinerMap are now deserialized as maps, rather than as sequences - Fixed compilation of this crates on built-in targets that don't have CAS instructions ### Changed - `spsc::Queue` iterators now implement the double ended iterator trait ### Added - opt-out `cas` feature to disable parts of the API that use CAS instructions. Useful if using a custom (i.e. not built-in) rustc target that does not have CAS instructions. - singleton `Pool` support on ARMv7-A devices ## [v0.5.1] - 2019-08-29 ### Added - Added armv8 support - Added `Queue::peek` - Added `BinaryHeap::peek_mut` ## [v0.5.0] - 2019-07-12 ### Added - `Pool` now implements the `Sync` trait when targeting ARMv7-R. - Most data structures can now be constructed in "const context" (e.g. `static [mut]` variables) using a newtype in `heapless::i`. - `Pool` has gained a `grow_exact` method to more efficiently use statically allocated memory. - The `pool!` macro now accepts attributes. - `mpmc::Q*` a family of fixed capacity multiple-producer multiple-consumer lock-free queues. ### Changed - [breaking-change] `binary_heap::Kind` is now a sealed trait. ### Removed - [breaking-change] The "smaller-atomics" feature has been removed. It is now always enabled. - [breaking-change] The "min-const-fn" feature has been removed. It is now always enabled. - [breaking-change] The MSRV has been bumped to Rust 1.36.0. - [breaking-change] The version of the `generic-array` dependency has been bumped to v0.13.0. ## [v0.4.4] - 2019-05-02 ### Added - Implemented `PartialEq`, `PartialOrd`, `Eq`, `Ord` and `Hash` for `pool::Box` and `pool::singleton::Box`. ### Fixed - Fixed UB in our internal, stable re-implementation of `core::mem::MaybeUninit` that occurred when using some of our data structures with types that implement `Drop`. ## [v0.4.3] - 2019-04-22 ### Added - Added a memory pool that's lock-free and interrupt-safe on the ARMv7-M architecture. - `IndexMap` have gained `Eq` and `PartialEq` implementations. ## [v0.4.2] - 2019-02-12 ### Added - All containers now implement `Clone` - `spsc::Queue` now implements `Debug`, `Hash`, `PartialEq` and `Eq` - `LinearMap` now implements `Debug`, `FromIterator`, `IntoIter`, `PartialEq`, `Eq` and `Default` - `BinaryHeap` now implements `Debug` and `Default` - `String` now implements `FromStr`, `Hash`, `From` and `Default` - `Vec` now implements `Hash` and `Default` - A "serde" Cargo feature that when enabled adds a `serde::Serialize` and `serde::Deserialize` implementations to each collection. ## [v0.4.1] - 2018-12-16 ### Changed - Add a new type parameter to `spsc::Queue` that indicates whether the queue is only single-core safe, or multi-core safe. By default the queue is multi-core safe; this preserves the current semantics. New `unsafe` constructors have been added to create the single-core variant. ## [v0.4.0] - 2018-10-19 ### Changed - [breaking-change] All Cargo features are disabled by default. This crate now compiles on stable by default. - [breaking-change] RingBuffer has been renamed to spsc::Queue. The ring_buffer module has been renamed to spsc. - [breaking-change] The bounds on spsc::Queue have changed. ### Removed - [breaking-change] The sealed `Uxx` trait has been removed from the public API. ## [v0.3.7] - 2018-08-19 ### Added - Implemented `IntoIterator` and `FromIterator` for `Vec` - `ready` methods to `ring_buffer::{Consumer,Producer}` - An opt-out "const-fn" Cargo feature that turns `const` functions into normal functions when disabled. - An opt-out "smaller-atomics" Cargo feature that removes the ability to shrink the size of `RingBuffer` when disabled. ### Changed - This crate now compiles on stable when both the "const-fn" and "smaller-atomics" features are disabled. ### Fixed - The `RingBuffer.len` function - Compilation on recent nightlies ## [v0.3.6] - 2018-05-04 ### Fixed - The capacity of `RingBuffer`. It should be the requested capacity plus not twice that plus one. ## [v0.3.5] - 2018-05-03 ### Added - `RingBuffer.enqueue_unchecked` an unchecked version of `RingBuffer.enqueue` ## [v0.3.4] - 2018-04-28 ### Added - `BinaryHeap.pop_unchecked` an unchecked version of `BinaryHeap.pop` ## [v0.3.3] - 2018-04-28 ### Added - `BinaryHeap.push_unchecked` an unchecked version of `BinaryHeap.push` ## [v0.3.2] - 2018-04-27 ### Added - A re-export of `generic_array::ArrayLength`, for convenience. ## [v0.3.1] - 2018-04-23 ### Added - Fixed capacity implementations of `IndexMap` and `IndexSet`. - A `Extend` implementation to `Vec` - More `PartialEq` implementations to `Vec` ## [v0.3.0] - 2018-04-22 ### Changed - [breaking-change] The capacity of all data structures must now be specified using type level integers (cf. `typenum`). See documentation for details. - [breaking-change] `BufferFullError` has been removed in favor of (a) returning ownership of the item that couldn't be added to the collection (cf. `Vec.push`), or (b) returning the unit type when the argument was not consumed (cf. `Vec.extend_from_slice`). ## [v0.2.7] - 2018-04-20 ### Added - Unchecked methods to dequeue and enqueue items into a `RingBuffer` via the `Consumer` and `Producer` end points. ### Changed - `RingBuffer` now has a generic index type, which default to `usize` for backward compatibility. Changing the index type to `u8` or `u16` reduces the footprint of the `RingBuffer` but limits its maximum capacity (254 and 65534, respectively). ## [v0.2.6] - 2018-04-18 ### Added - A `BinaryHeap` implementation. `BinaryHeap` is a priority queue implemented with a binary heap. ## [v0.2.5] - 2018-04-13 ### Fixed - Dereferencing `heapless::Vec` no longer incurs in a bounds check. ## [v0.2.4] - 2018-03-12 ### Fixed - `LinerMap::new` is now a const fn ## [v0.2.3] - 2018-03-11 ### Added - A `swap_remove` method to `Vec` - A `LinearMap` implementation. `LinearMap` is a map / dict backed by an array and that performs lookups via linear search. ## [v0.2.2] - 2018-03-01 ### Added - Fixed size version of `std::String` ## [v0.2.1] - 2017-12-21 ### Added - `Vec` now implements both `fmt::Debug`, `PartialEq` and `Eq`. - `resize` and `resize_default` methods to `Vec`. ## [v0.2.0] - 2017-11-22 ### Added - A single producer single consumer mode to `RingBuffer`. - A `truncate` method to `Vec`. ### Changed - [breaking-change] Both `Vec::new` and `RingBuffer::new` no longer require an initial value. The signature of `new` is now `const fn() -> Self`. - [breaking-change] The error type of all operations that may fail has changed from `()` to `BufferFullError`. - Both `RingBuffer` and `Vec` now support arrays of *any* size for their backup storage. ## [v0.1.0] - 2017-04-27 - Initial release [Unreleased]: https://github.com/japaric/heapless/compare/v0.7.16...HEAD [v0.7.16]: https://github.com/japaric/heapless/compare/v0.7.15...v0.7.16 [v0.7.15]: https://github.com/japaric/heapless/compare/v0.7.14...v0.7.15 [v0.7.14]: https://github.com/japaric/heapless/compare/v0.7.13...v0.7.14 [v0.7.13]: https://github.com/japaric/heapless/compare/v0.7.12...v0.7.13 [v0.7.12]: https://github.com/japaric/heapless/compare/v0.7.11...v0.7.12 [v0.7.11]: https://github.com/japaric/heapless/compare/v0.7.10...v0.7.11 [v0.7.10]: https://github.com/japaric/heapless/compare/v0.7.9...v0.7.10 [v0.7.9]: https://github.com/japaric/heapless/compare/v0.7.8...v0.7.9 [v0.7.8]: https://github.com/japaric/heapless/compare/v0.7.7...v0.7.8 [v0.7.7]: https://github.com/japaric/heapless/compare/v0.7.6...v0.7.7 [v0.7.6]: https://github.com/japaric/heapless/compare/v0.7.5...v0.7.6 [v0.7.5]: https://github.com/japaric/heapless/compare/v0.7.4...v0.7.5 [v0.7.4]: https://github.com/japaric/heapless/compare/v0.7.3...v0.7.4 [v0.7.3]: https://github.com/japaric/heapless/compare/v0.7.2...v0.7.3 [v0.7.2]: https://github.com/japaric/heapless/compare/v0.7.1...v0.7.2 [v0.7.1]: https://github.com/japaric/heapless/compare/v0.7.0...v0.7.1 [v0.7.0]: https://github.com/japaric/heapless/compare/v0.6.1...v0.7.0 [v0.6.1]: https://github.com/japaric/heapless/compare/v0.6.0...v0.6.1 [v0.6.0]: https://github.com/japaric/heapless/compare/v0.5.5...v0.6.0 [v0.5.5]: https://github.com/japaric/heapless/compare/v0.5.4...v0.5.5 [v0.5.4]: https://github.com/japaric/heapless/compare/v0.5.3...v0.5.4 [v0.5.3]: https://github.com/japaric/heapless/compare/v0.5.2...v0.5.3 [v0.5.2]: https://github.com/japaric/heapless/compare/v0.5.1...v0.5.2 [v0.5.1]: https://github.com/japaric/heapless/compare/v0.5.0...v0.5.1 [v0.5.0]: https://github.com/japaric/heapless/compare/v0.4.4...v0.5.0 [v0.4.4]: https://github.com/japaric/heapless/compare/v0.4.3...v0.4.4 [v0.4.3]: https://github.com/japaric/heapless/compare/v0.4.2...v0.4.3 [v0.4.2]: https://github.com/japaric/heapless/compare/v0.4.1...v0.4.2 [v0.4.1]: https://github.com/japaric/heapless/compare/v0.4.0...v0.4.1 [v0.4.0]: https://github.com/japaric/heapless/compare/v0.3.7...v0.4.0 [v0.3.7]: https://github.com/japaric/heapless/compare/v0.3.6...v0.3.7 [v0.3.6]: https://github.com/japaric/heapless/compare/v0.3.5...v0.3.6 [v0.3.5]: https://github.com/japaric/heapless/compare/v0.3.4...v0.3.5 [v0.3.4]: https://github.com/japaric/heapless/compare/v0.3.3...v0.3.4 [v0.3.3]: https://github.com/japaric/heapless/compare/v0.3.2...v0.3.3 [v0.3.2]: https://github.com/japaric/heapless/compare/v0.3.1...v0.3.2 [v0.3.1]: https://github.com/japaric/heapless/compare/v0.3.0...v0.3.1 [v0.3.0]: https://github.com/japaric/heapless/compare/v0.2.7...v0.3.0 [v0.2.7]: https://github.com/japaric/heapless/compare/v0.2.6...v0.2.7 [v0.2.6]: https://github.com/japaric/heapless/compare/v0.2.5...v0.2.6 [v0.2.5]: https://github.com/japaric/heapless/compare/v0.2.4...v0.2.5 [v0.2.4]: https://github.com/japaric/heapless/compare/v0.2.3...v0.2.4 [v0.2.3]: https://github.com/japaric/heapless/compare/v0.2.2...v0.2.3 [v0.2.2]: https://github.com/japaric/heapless/compare/v0.2.1...v0.2.2 [v0.2.1]: https://github.com/japaric/heapless/compare/v0.2.0...v0.2.1 [v0.2.0]: https://github.com/japaric/heapless/compare/v0.1.0...v0.2.0 heapless-0.7.16/Cargo.toml0000644000000040150000000000100107310ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2018" name = "heapless" version = "0.7.16" authors = [ "Jorge Aparicio ", "Per Lindgren ", "Emil Fresk ", ] description = "`static` friendly data structures that don't require dynamic memory allocation" documentation = "https://docs.rs/heapless" readme = "README.md" keywords = [ "static", "no-heap", ] categories = [ "data-structures", "no-std", ] license = "MIT OR Apache-2.0" repository = "https://github.com/japaric/heapless" [package.metadata.docs.rs] all-features = true [dependencies.defmt] version = ">=0.2.0,<0.4" optional = true [dependencies.hash32] version = "0.2.1" [dependencies.serde] version = "1" optional = true default-features = false [dependencies.stable_deref_trait] version = "1" default-features = false [dependencies.ufmt-write] version = "0.1" optional = true [dev-dependencies.ufmt] version = "0.1" [build-dependencies.rustc_version] version = "0.4.0" [features] __trybuild = [] cas = ["atomic-polyfill"] default = ["cas"] defmt-impl = ["defmt"] mpmc_large = [] ufmt-impl = ["ufmt-write"] x86-sync-pool = [] [target."cfg(target_arch = \"avr\")".dependencies.atomic-polyfill] version = "0.1.8" optional = true [target."cfg(target_arch = \"x86_64\")".dependencies.spin] version = "0.9.2" [target.riscv32i-unknown-none-elf.dependencies.atomic-polyfill] version = "0.1.4" [target.riscv32imc-unknown-none-elf.dependencies.atomic-polyfill] version = "0.1.4" [target.thumbv6m-none-eabi.dependencies.atomic-polyfill] version = "0.1.2" optional = true heapless-0.7.16/Cargo.toml.orig000064400000000000000000000033200072674642500144400ustar 00000000000000[package] authors = [ "Jorge Aparicio ", "Per Lindgren ", "Emil Fresk ", ] categories = ["data-structures", "no-std"] description = "`static` friendly data structures that don't require dynamic memory allocation" documentation = "https://docs.rs/heapless" edition = "2018" keywords = ["static", "no-heap"] license = "MIT OR Apache-2.0" name = "heapless" repository = "https://github.com/japaric/heapless" version = "0.7.16" [features] default = ["cas"] cas = ["atomic-polyfill"] ufmt-impl = ["ufmt-write"] # read the docs before enabling: makes `Pool` Sync on x86_64 x86-sync-pool = [] # only for tests __trybuild = [] # Enable larger MPMC sizes. mpmc_large = [] # This flag has no version guarantee, the `defmt` dependency can be updated in a patch release defmt-impl = ["defmt"] [target.thumbv6m-none-eabi.dependencies] atomic-polyfill = { version = "0.1.2", optional = true } [target.riscv32i-unknown-none-elf.dependencies] atomic-polyfill = { version = "0.1.4" } [target.riscv32imc-unknown-none-elf.dependencies] atomic-polyfill = { version = "0.1.4" } [target.'cfg(target_arch = "avr")'.dependencies] atomic-polyfill = { version = "0.1.8", optional = true } [dependencies] hash32 = "0.2.1" [target.'cfg(target_arch = "x86_64")'.dependencies] spin = "0.9.2" [dependencies.serde] version = "1" optional = true default-features = false [dependencies.stable_deref_trait] version = "1" default-features = false [dependencies.ufmt-write] version = "0.1" optional = true [dev-dependencies.ufmt] version = "0.1" [dependencies.defmt] version = ">=0.2.0,<0.4" optional = true [build-dependencies] rustc_version = "0.4.0" [package.metadata.docs.rs] all-features = true heapless-0.7.16/LICENSE-APACHE000064400000000000000000000251370072674642500135070ustar 00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. heapless-0.7.16/LICENSE-MIT000064400000000000000000000020420072674642500132050ustar 00000000000000Copyright (c) 2017 Jorge Aparicio Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. heapless-0.7.16/README.md000064400000000000000000000020470072674642500130350ustar 00000000000000[![crates.io](https://img.shields.io/crates/v/heapless.svg)](https://crates.io/crates/heapless) [![crates.io](https://img.shields.io/crates/d/heapless.svg)](https://crates.io/crates/heapless) # `heapless` > `static` friendly data structures that don't require dynamic memory allocation ## [Documentation](https://japaric.github.io/heapless/heapless/index.html) ## [Change log](CHANGELOG.md) ## Tests ```bash # run all cargo test --features 'serde','x86-sync-pool' # run only for example histbuf tests cargo test histbuf --features 'serde','x86-sync-pool' ``` ## License Licensed under either of - Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) - MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) at your option. ## Contribution Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions. heapless-0.7.16/build.rs000064400000000000000000000057500072674642500132270ustar 00000000000000#![deny(warnings)] use std::{env, error::Error}; use rustc_version::Channel; fn main() -> Result<(), Box> { let target = env::var("TARGET")?; if target.starts_with("thumbv6m-") { println!("cargo:rustc-cfg=armv6m"); } else if target.starts_with("thumbv7m-") { println!("cargo:rustc-cfg=armv7m"); } else if target.starts_with("thumbv7em-") { println!("cargo:rustc-cfg=armv7m"); } else if target.starts_with("armv7r-") | target.starts_with("armebv7r-") { println!("cargo:rustc-cfg=armv7r"); } else if target.starts_with("thumbv8m.base") { println!("cargo:rustc-cfg=armv8m_base"); } else if target.starts_with("thumbv8m.main") { println!("cargo:rustc-cfg=armv8m_main"); } else if target.starts_with("armv7-") | target.starts_with("armv7a-") { println!("cargo:rustc-cfg=armv7a"); } let is_avr = env::var("CARGO_CFG_TARGET_ARCH").as_deref() == Ok("avr"); // built-in targets with no atomic / CAS support as of nightly-2022-01-13 // AND not supported by the atomic-polyfill crate // see the `no-atomics.sh` / `no-cas.sh` script sitting next to this file if is_avr { // lacks cas } else { match &target[..] { "avr-unknown-gnu-atmega328" | "bpfeb-unknown-none" | "bpfel-unknown-none" | "msp430-none-elf" // | "riscv32i-unknown-none-elf" // supported by atomic-polyfill // | "riscv32imc-unknown-none-elf" // supported by atomic-polyfill | "thumbv4t-none-eabi" // | "thumbv6m-none-eabi" // supported by atomic-polyfill => {} _ => { println!("cargo:rustc-cfg=has_cas"); } } }; if is_avr { // lacks atomics } else { match &target[..] { "msp430-none-elf" // | "riscv32i-unknown-none-elf" // supported by atomic-polyfill // | "riscv32imc-unknown-none-elf" // supported by atomic-polyfill => {} _ => { println!("cargo:rustc-cfg=has_atomics"); } } }; // Let the code know if it should use atomic-polyfill or not, and what aspects // of polyfill it requires if is_avr { println!("cargo:rustc-cfg=full_atomic_polyfill"); println!("cargo:rustc-cfg=cas_atomic_polyfill"); } else { match &target[..] { "riscv32i-unknown-none-elf" | "riscv32imc-unknown-none-elf" => { println!("cargo:rustc-cfg=full_atomic_polyfill"); println!("cargo:rustc-cfg=cas_atomic_polyfill"); } "thumbv6m-none-eabi" => { println!("cargo:rustc-cfg=cas_atomic_polyfill"); } _ => {} } } if !matches!( rustc_version::version_meta().unwrap().channel, Channel::Stable | Channel::Beta ) { println!("cargo:rustc-cfg=unstable_channel"); } Ok(()) } heapless-0.7.16/no-atomics.sh000064400000000000000000000003410072674642500141560ustar 00000000000000#!/bin/bash set -euo pipefail main() { IFS=' ' for t in $(rustc --print target-list); do rustc +nightly --print cfg --target $t | grep 'target_has_atomic_load_store=' >/dev/null || echo $t done } main heapless-0.7.16/no-cas.sh000064400000000000000000000003260072674642500132700ustar 00000000000000#!/bin/bash set -euo pipefail main() { IFS=' ' for t in $(rustc --print target-list); do rustc +nightly --print cfg --target $t | grep 'target_has_atomic=' >/dev/null || echo $t done } main heapless-0.7.16/src/binary_heap.rs000064400000000000000000000464720072674642500152060ustar 00000000000000//! A priority queue implemented with a binary heap. //! //! Insertion and popping the largest element have `O(log n)` time complexity. Checking the largest //! / smallest element is `O(1)`. // TODO not yet implemented // Converting a vector to a binary heap can be done in-place, and has `O(n)` complexity. A binary // heap can also be converted to a sorted vector in-place, allowing it to be used for an `O(n log // n)` in-place heapsort. use core::{ cmp::Ordering, fmt, marker::PhantomData, mem::{self, ManuallyDrop}, ops::{Deref, DerefMut}, ptr, slice, }; use crate::vec::Vec; /// Min-heap pub enum Min {} /// Max-heap pub enum Max {} /// The binary heap kind: min-heap or max-heap pub trait Kind: private::Sealed { #[doc(hidden)] fn ordering() -> Ordering; } impl Kind for Min { fn ordering() -> Ordering { Ordering::Less } } impl Kind for Max { fn ordering() -> Ordering { Ordering::Greater } } /// Sealed traits mod private { pub trait Sealed {} } impl private::Sealed for Max {} impl private::Sealed for Min {} /// A priority queue implemented with a binary heap. /// /// This can be either a min-heap or a max-heap. /// /// It is a logic error for an item to be modified in such a way that the item's ordering relative /// to any other item, as determined by the `Ord` trait, changes while it is in the heap. This is /// normally only possible through `Cell`, `RefCell`, global state, I/O, or unsafe code. /// /// ``` /// use heapless::binary_heap::{BinaryHeap, Max}; /// /// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new(); /// /// // We can use peek to look at the next item in the heap. In this case, /// // there's no items in there yet so we get None. /// assert_eq!(heap.peek(), None); /// /// // Let's add some scores... /// heap.push(1).unwrap(); /// heap.push(5).unwrap(); /// heap.push(2).unwrap(); /// /// // Now peek shows the most important item in the heap. /// assert_eq!(heap.peek(), Some(&5)); /// /// // We can check the length of a heap. /// assert_eq!(heap.len(), 3); /// /// // We can iterate over the items in the heap, although they are returned in /// // a random order. /// for x in &heap { /// println!("{}", x); /// } /// /// // If we instead pop these scores, they should come back in order. /// assert_eq!(heap.pop(), Some(5)); /// assert_eq!(heap.pop(), Some(2)); /// assert_eq!(heap.pop(), Some(1)); /// assert_eq!(heap.pop(), None); /// /// // We can clear the heap of any remaining items. /// heap.clear(); /// /// // The heap should now be empty. /// assert!(heap.is_empty()) /// ``` pub struct BinaryHeap { pub(crate) _kind: PhantomData, pub(crate) data: Vec, } impl BinaryHeap { /* Constructors */ /// Creates an empty BinaryHeap as a $K-heap. /// /// ``` /// use heapless::binary_heap::{BinaryHeap, Max}; /// /// // allocate the binary heap on the stack /// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new(); /// heap.push(4).unwrap(); /// /// // allocate the binary heap in a static variable /// static mut HEAP: BinaryHeap = BinaryHeap::new(); /// ``` pub const fn new() -> Self { Self { _kind: PhantomData, data: Vec::new(), } } } impl BinaryHeap where T: Ord, K: Kind, { /* Public API */ /// Returns the capacity of the binary heap. pub fn capacity(&self) -> usize { self.data.capacity() } /// Drops all items from the binary heap. /// /// ``` /// use heapless::binary_heap::{BinaryHeap, Max}; /// /// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new(); /// heap.push(1).unwrap(); /// heap.push(3).unwrap(); /// /// assert!(!heap.is_empty()); /// /// heap.clear(); /// /// assert!(heap.is_empty()); /// ``` pub fn clear(&mut self) { self.data.clear() } /// Returns the length of the binary heap. /// /// ``` /// use heapless::binary_heap::{BinaryHeap, Max}; /// /// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new(); /// heap.push(1).unwrap(); /// heap.push(3).unwrap(); /// /// assert_eq!(heap.len(), 2); /// ``` pub fn len(&self) -> usize { self.data.len() } /// Checks if the binary heap is empty. /// /// ``` /// use heapless::binary_heap::{BinaryHeap, Max}; /// /// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new(); /// /// assert!(heap.is_empty()); /// /// heap.push(3).unwrap(); /// heap.push(5).unwrap(); /// heap.push(1).unwrap(); /// /// assert!(!heap.is_empty()); /// ``` pub fn is_empty(&self) -> bool { self.len() == 0 } /// Returns an iterator visiting all values in the underlying vector, in arbitrary order. /// /// ``` /// use heapless::binary_heap::{BinaryHeap, Max}; /// /// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new(); /// heap.push(1).unwrap(); /// heap.push(2).unwrap(); /// heap.push(3).unwrap(); /// heap.push(4).unwrap(); /// /// // Print 1, 2, 3, 4 in arbitrary order /// for x in heap.iter() { /// println!("{}", x); /// /// } /// ``` pub fn iter(&self) -> slice::Iter<'_, T> { self.data.as_slice().iter() } /// Returns a mutable iterator visiting all values in the underlying vector, in arbitrary order. /// /// **WARNING** Mutating the items in the binary heap can leave the heap in an inconsistent /// state. pub fn iter_mut(&mut self) -> slice::IterMut<'_, T> { self.data.as_mut_slice().iter_mut() } /// Returns the *top* (greatest if max-heap, smallest if min-heap) item in the binary heap, or /// None if it is empty. /// /// ``` /// use heapless::binary_heap::{BinaryHeap, Max}; /// /// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new(); /// assert_eq!(heap.peek(), None); /// /// heap.push(1).unwrap(); /// heap.push(5).unwrap(); /// heap.push(2).unwrap(); /// assert_eq!(heap.peek(), Some(&5)); /// ``` pub fn peek(&self) -> Option<&T> { self.data.as_slice().get(0) } /// Returns a mutable reference to the greatest item in the binary heap, or /// `None` if it is empty. /// /// Note: If the `PeekMut` value is leaked, the heap may be in an /// inconsistent state. /// /// # Examples /// /// Basic usage: /// /// ``` /// use heapless::binary_heap::{BinaryHeap, Max}; /// /// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new(); /// assert!(heap.peek_mut().is_none()); /// /// heap.push(1); /// heap.push(5); /// heap.push(2); /// { /// let mut val = heap.peek_mut().unwrap(); /// *val = 0; /// } /// /// assert_eq!(heap.peek(), Some(&2)); /// ``` pub fn peek_mut(&mut self) -> Option> { if self.is_empty() { None } else { Some(PeekMut { heap: self, sift: true, }) } } /// Removes the *top* (greatest if max-heap, smallest if min-heap) item from the binary heap and /// returns it, or None if it is empty. /// /// ``` /// use heapless::binary_heap::{BinaryHeap, Max}; /// /// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new(); /// heap.push(1).unwrap(); /// heap.push(3).unwrap(); /// /// assert_eq!(heap.pop(), Some(3)); /// assert_eq!(heap.pop(), Some(1)); /// assert_eq!(heap.pop(), None); /// ``` pub fn pop(&mut self) -> Option { if self.is_empty() { None } else { Some(unsafe { self.pop_unchecked() }) } } /// Removes the *top* (greatest if max-heap, smallest if min-heap) item from the binary heap and /// returns it, without checking if the binary heap is empty. pub unsafe fn pop_unchecked(&mut self) -> T { let mut item = self.data.pop_unchecked(); if !self.is_empty() { mem::swap(&mut item, self.data.as_mut_slice().get_unchecked_mut(0)); self.sift_down_to_bottom(0); } item } /// Pushes an item onto the binary heap. /// /// ``` /// use heapless::binary_heap::{BinaryHeap, Max}; /// /// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new(); /// heap.push(3).unwrap(); /// heap.push(5).unwrap(); /// heap.push(1).unwrap(); /// /// assert_eq!(heap.len(), 3); /// assert_eq!(heap.peek(), Some(&5)); /// ``` pub fn push(&mut self, item: T) -> Result<(), T> { if self.data.is_full() { return Err(item); } unsafe { self.push_unchecked(item) } Ok(()) } /// Pushes an item onto the binary heap without first checking if it's full. pub unsafe fn push_unchecked(&mut self, item: T) { let old_len = self.len(); self.data.push_unchecked(item); self.sift_up(0, old_len); } /// Returns the underlying ```Vec```. Order is arbitrary and time is O(1). pub fn into_vec(self) -> Vec { self.data } /* Private API */ fn sift_down_to_bottom(&mut self, mut pos: usize) { let end = self.len(); let start = pos; unsafe { let mut hole = Hole::new(self.data.as_mut_slice(), pos); let mut child = 2 * pos + 1; while child < end { let right = child + 1; // compare with the greater of the two children if right < end && hole.get(child).cmp(hole.get(right)) != K::ordering() { child = right; } hole.move_to(child); child = 2 * hole.pos() + 1; } pos = hole.pos; } self.sift_up(start, pos); } fn sift_up(&mut self, start: usize, pos: usize) -> usize { unsafe { // Take out the value at `pos` and create a hole. let mut hole = Hole::new(self.data.as_mut_slice(), pos); while hole.pos() > start { let parent = (hole.pos() - 1) / 2; if hole.element().cmp(hole.get(parent)) != K::ordering() { break; } hole.move_to(parent); } hole.pos() } } } /// Hole represents a hole in a slice i.e. an index without valid value /// (because it was moved from or duplicated). /// In drop, `Hole` will restore the slice by filling the hole /// position with the value that was originally removed. struct Hole<'a, T> { data: &'a mut [T], /// `elt` is always `Some` from new until drop. elt: ManuallyDrop, pos: usize, } impl<'a, T> Hole<'a, T> { /// Create a new Hole at index `pos`. /// /// Unsafe because pos must be within the data slice. #[inline] unsafe fn new(data: &'a mut [T], pos: usize) -> Self { debug_assert!(pos < data.len()); let elt = ptr::read(data.get_unchecked(pos)); Hole { data, elt: ManuallyDrop::new(elt), pos, } } #[inline] fn pos(&self) -> usize { self.pos } /// Returns a reference to the element removed. #[inline] fn element(&self) -> &T { &self.elt } /// Returns a reference to the element at `index`. /// /// Unsafe because index must be within the data slice and not equal to pos. #[inline] unsafe fn get(&self, index: usize) -> &T { debug_assert!(index != self.pos); debug_assert!(index < self.data.len()); self.data.get_unchecked(index) } /// Move hole to new location /// /// Unsafe because index must be within the data slice and not equal to pos. #[inline] unsafe fn move_to(&mut self, index: usize) { debug_assert!(index != self.pos); debug_assert!(index < self.data.len()); let ptr = self.data.as_mut_ptr(); let index_ptr: *const _ = ptr.add(index); let hole_ptr = ptr.add(self.pos); ptr::copy_nonoverlapping(index_ptr, hole_ptr, 1); self.pos = index; } } /// Structure wrapping a mutable reference to the greatest item on a /// `BinaryHeap`. /// /// This `struct` is created by the [`peek_mut`] method on [`BinaryHeap`]. See /// its documentation for more. /// /// [`peek_mut`]: struct.BinaryHeap.html#method.peek_mut /// [`BinaryHeap`]: struct.BinaryHeap.html pub struct PeekMut<'a, T, K, const N: usize> where T: Ord, K: Kind, { heap: &'a mut BinaryHeap, sift: bool, } impl Drop for PeekMut<'_, T, K, N> where T: Ord, K: Kind, { fn drop(&mut self) { if self.sift { self.heap.sift_down_to_bottom(0); } } } impl Deref for PeekMut<'_, T, K, N> where T: Ord, K: Kind, { type Target = T; fn deref(&self) -> &T { debug_assert!(!self.heap.is_empty()); // SAFE: PeekMut is only instantiated for non-empty heaps unsafe { self.heap.data.as_slice().get_unchecked(0) } } } impl DerefMut for PeekMut<'_, T, K, N> where T: Ord, K: Kind, { fn deref_mut(&mut self) -> &mut T { debug_assert!(!self.heap.is_empty()); // SAFE: PeekMut is only instantiated for non-empty heaps unsafe { self.heap.data.as_mut_slice().get_unchecked_mut(0) } } } impl<'a, T, K, const N: usize> PeekMut<'a, T, K, N> where T: Ord, K: Kind, { /// Removes the peeked value from the heap and returns it. pub fn pop(mut this: PeekMut<'a, T, K, N>) -> T { let value = this.heap.pop().unwrap(); this.sift = false; value } } impl<'a, T> Drop for Hole<'a, T> { #[inline] fn drop(&mut self) { // fill the hole again unsafe { let pos = self.pos; ptr::write(self.data.get_unchecked_mut(pos), ptr::read(&*self.elt)); } } } impl Default for BinaryHeap where T: Ord, K: Kind, { fn default() -> Self { Self::new() } } impl Clone for BinaryHeap where K: Kind, T: Ord + Clone, { fn clone(&self) -> Self { Self { _kind: self._kind, data: self.data.clone(), } } } impl fmt::Debug for BinaryHeap where K: Kind, T: Ord + fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.iter()).finish() } } impl<'a, T, K, const N: usize> IntoIterator for &'a BinaryHeap where K: Kind, T: Ord, { type Item = &'a T; type IntoIter = slice::Iter<'a, T>; fn into_iter(self) -> Self::IntoIter { self.iter() } } #[cfg(test)] mod tests { use std::vec::Vec; use crate::binary_heap::{BinaryHeap, Max, Min}; #[test] fn static_new() { static mut _B: BinaryHeap = BinaryHeap::new(); } #[test] fn drop() { droppable!(); { let mut v: BinaryHeap = BinaryHeap::new(); v.push(Droppable::new()).ok().unwrap(); v.push(Droppable::new()).ok().unwrap(); v.pop().unwrap(); } assert_eq!(Droppable::count(), 0); { let mut v: BinaryHeap = BinaryHeap::new(); v.push(Droppable::new()).ok().unwrap(); v.push(Droppable::new()).ok().unwrap(); } assert_eq!(Droppable::count(), 0); { let mut v: BinaryHeap = BinaryHeap::new(); v.push(Droppable::new()).ok().unwrap(); v.push(Droppable::new()).ok().unwrap(); v.pop().unwrap(); } assert_eq!(Droppable::count(), 0); { let mut v: BinaryHeap = BinaryHeap::new(); v.push(Droppable::new()).ok().unwrap(); v.push(Droppable::new()).ok().unwrap(); } assert_eq!(Droppable::count(), 0); } #[test] fn into_vec() { droppable!(); let mut h: BinaryHeap = BinaryHeap::new(); h.push(Droppable::new()).ok().unwrap(); h.push(Droppable::new()).ok().unwrap(); h.pop().unwrap(); assert_eq!(Droppable::count(), 1); let v = h.into_vec(); assert_eq!(Droppable::count(), 1); core::mem::drop(v); assert_eq!(Droppable::count(), 0); } #[test] fn min() { let mut heap = BinaryHeap::<_, Min, 16>::new(); heap.push(1).unwrap(); heap.push(2).unwrap(); heap.push(3).unwrap(); heap.push(17).unwrap(); heap.push(19).unwrap(); heap.push(36).unwrap(); heap.push(7).unwrap(); heap.push(25).unwrap(); heap.push(100).unwrap(); assert_eq!( heap.iter().cloned().collect::>(), [1, 2, 3, 17, 19, 36, 7, 25, 100] ); assert_eq!(heap.pop(), Some(1)); assert_eq!( heap.iter().cloned().collect::>(), [2, 17, 3, 25, 19, 36, 7, 100] ); assert_eq!(heap.pop(), Some(2)); assert_eq!(heap.pop(), Some(3)); assert_eq!(heap.pop(), Some(7)); assert_eq!(heap.pop(), Some(17)); assert_eq!(heap.pop(), Some(19)); assert_eq!(heap.pop(), Some(25)); assert_eq!(heap.pop(), Some(36)); assert_eq!(heap.pop(), Some(100)); assert_eq!(heap.pop(), None); assert!(heap.peek_mut().is_none()); heap.push(1).unwrap(); heap.push(2).unwrap(); heap.push(10).unwrap(); { let mut val = heap.peek_mut().unwrap(); *val = 7; } assert_eq!(heap.pop(), Some(2)); assert_eq!(heap.pop(), Some(7)); assert_eq!(heap.pop(), Some(10)); assert_eq!(heap.pop(), None); } #[test] fn max() { let mut heap = BinaryHeap::<_, Max, 16>::new(); heap.push(1).unwrap(); heap.push(2).unwrap(); heap.push(3).unwrap(); heap.push(17).unwrap(); heap.push(19).unwrap(); heap.push(36).unwrap(); heap.push(7).unwrap(); heap.push(25).unwrap(); heap.push(100).unwrap(); assert_eq!( heap.iter().cloned().collect::>(), [100, 36, 19, 25, 3, 2, 7, 1, 17] ); assert_eq!(heap.pop(), Some(100)); assert_eq!( heap.iter().cloned().collect::>(), [36, 25, 19, 17, 3, 2, 7, 1] ); assert_eq!(heap.pop(), Some(36)); assert_eq!(heap.pop(), Some(25)); assert_eq!(heap.pop(), Some(19)); assert_eq!(heap.pop(), Some(17)); assert_eq!(heap.pop(), Some(7)); assert_eq!(heap.pop(), Some(3)); assert_eq!(heap.pop(), Some(2)); assert_eq!(heap.pop(), Some(1)); assert_eq!(heap.pop(), None); assert!(heap.peek_mut().is_none()); heap.push(1).unwrap(); heap.push(9).unwrap(); heap.push(10).unwrap(); { let mut val = heap.peek_mut().unwrap(); *val = 7; } assert_eq!(heap.pop(), Some(9)); assert_eq!(heap.pop(), Some(7)); assert_eq!(heap.pop(), Some(1)); assert_eq!(heap.pop(), None); } } heapless-0.7.16/src/de.rs000064400000000000000000000201430072674642500133000ustar 00000000000000use crate::{ binary_heap::Kind as BinaryHeapKind, BinaryHeap, IndexMap, IndexSet, LinearMap, String, Vec, }; use core::{fmt, marker::PhantomData}; use hash32::{BuildHasherDefault, Hash, Hasher}; use serde::de::{self, Deserialize, Deserializer, Error, MapAccess, SeqAccess}; // Sequential containers impl<'de, T, KIND, const N: usize> Deserialize<'de> for BinaryHeap where T: Ord + Deserialize<'de>, KIND: BinaryHeapKind, { fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, { struct ValueVisitor<'de, T, KIND, const N: usize>(PhantomData<(&'de (), T, KIND)>); impl<'de, T, KIND, const N: usize> de::Visitor<'de> for ValueVisitor<'de, T, KIND, N> where T: Ord + Deserialize<'de>, KIND: BinaryHeapKind, { type Value = BinaryHeap; fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { formatter.write_str("a sequence") } fn visit_seq(self, mut seq: A) -> Result where A: SeqAccess<'de>, { let mut values = BinaryHeap::new(); while let Some(value) = seq.next_element()? { if values.push(value).is_err() { return Err(A::Error::invalid_length(values.capacity() + 1, &self))?; } } Ok(values) } } deserializer.deserialize_seq(ValueVisitor(PhantomData)) } } impl<'de, T, S, const N: usize> Deserialize<'de> for IndexSet, N> where T: Eq + Hash + Deserialize<'de>, S: Hasher + Default, { fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, { struct ValueVisitor<'de, T, S, const N: usize>(PhantomData<(&'de (), T, S)>); impl<'de, T, S, const N: usize> de::Visitor<'de> for ValueVisitor<'de, T, S, N> where T: Eq + Hash + Deserialize<'de>, S: Hasher + Default, { type Value = IndexSet, N>; fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { formatter.write_str("a sequence") } fn visit_seq(self, mut seq: A) -> Result where A: SeqAccess<'de>, { let mut values = IndexSet::new(); while let Some(value) = seq.next_element()? { if values.insert(value).is_err() { return Err(A::Error::invalid_length(values.capacity() + 1, &self))?; } } Ok(values) } } deserializer.deserialize_seq(ValueVisitor(PhantomData)) } } impl<'de, T, const N: usize> Deserialize<'de> for Vec where T: Deserialize<'de>, { fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, { struct ValueVisitor<'de, T, const N: usize>(PhantomData<(&'de (), T)>); impl<'de, T, const N: usize> serde::de::Visitor<'de> for ValueVisitor<'de, T, N> where T: Deserialize<'de>, { type Value = Vec; fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { formatter.write_str("a sequence") } fn visit_seq(self, mut seq: A) -> Result where A: SeqAccess<'de>, { let mut values = Vec::new(); while let Some(value) = seq.next_element()? { if values.push(value).is_err() { return Err(A::Error::invalid_length(values.capacity() + 1, &self))?; } } Ok(values) } } deserializer.deserialize_seq(ValueVisitor(PhantomData)) } } // Dictionaries impl<'de, K, V, S, const N: usize> Deserialize<'de> for IndexMap, N> where K: Eq + Hash + Deserialize<'de>, V: Deserialize<'de>, S: Default + Hasher, { fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, { struct ValueVisitor<'de, K, V, S, const N: usize>(PhantomData<(&'de (), K, V, S)>); impl<'de, K, V, S, const N: usize> de::Visitor<'de> for ValueVisitor<'de, K, V, S, N> where K: Eq + Hash + Deserialize<'de>, V: Deserialize<'de>, S: Default + Hasher, { type Value = IndexMap, N>; fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { formatter.write_str("a map") } fn visit_map(self, mut map: A) -> Result where A: MapAccess<'de>, { let mut values = IndexMap::new(); while let Some((key, value)) = map.next_entry()? { if values.insert(key, value).is_err() { return Err(A::Error::invalid_length(values.capacity() + 1, &self))?; } } Ok(values) } } deserializer.deserialize_map(ValueVisitor(PhantomData)) } } impl<'de, K, V, const N: usize> Deserialize<'de> for LinearMap where K: Eq + Deserialize<'de>, V: Deserialize<'de>, { fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, { struct ValueVisitor<'de, K, V, const N: usize>(PhantomData<(&'de (), K, V)>); impl<'de, K, V, const N: usize> de::Visitor<'de> for ValueVisitor<'de, K, V, N> where K: Eq + Deserialize<'de>, V: Deserialize<'de>, { type Value = LinearMap; fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { formatter.write_str("a map") } fn visit_map(self, mut map: A) -> Result where A: MapAccess<'de>, { let mut values = LinearMap::new(); while let Some((key, value)) = map.next_entry()? { if values.insert(key, value).is_err() { return Err(A::Error::invalid_length(values.capacity() + 1, &self))?; } } Ok(values) } } deserializer.deserialize_map(ValueVisitor(PhantomData)) } } // String containers impl<'de, const N: usize> Deserialize<'de> for String { fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, { struct ValueVisitor<'de, const N: usize>(PhantomData<&'de ()>); impl<'de, const N: usize> de::Visitor<'de> for ValueVisitor<'de, N> { type Value = String; fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { write!(formatter, "a string no more than {} bytes long", N as u64) } fn visit_str(self, v: &str) -> Result where E: de::Error, { let mut s = String::new(); s.push_str(v) .map_err(|_| E::invalid_length(v.len(), &self))?; Ok(s) } fn visit_bytes(self, v: &[u8]) -> Result where E: de::Error, { let mut s = String::new(); s.push_str( core::str::from_utf8(v) .map_err(|_| E::invalid_value(de::Unexpected::Bytes(v), &self))?, ) .map_err(|_| E::invalid_length(v.len(), &self))?; Ok(s) } } deserializer.deserialize_str(ValueVisitor::<'de, N>(PhantomData)) } } heapless-0.7.16/src/defmt.rs000064400000000000000000000007240072674642500140120ustar 00000000000000//! Defmt implementations for heapless types //! use crate::Vec; use defmt::Formatter; impl defmt::Format for Vec where T: defmt::Format, { fn format(&self, fmt: Formatter<'_>) { defmt::write!(fmt, "{=[?]}", self.as_slice()) } } impl defmt::Format for crate::String where u8: defmt::Format, { fn format(&self, fmt: Formatter<'_>) { defmt::write!(fmt, "{=str}", self.as_str()); } } heapless-0.7.16/src/deque.rs000064400000000000000000000561400072674642500140210ustar 00000000000000use core::fmt; use core::iter::FusedIterator; use core::marker::PhantomData; use core::mem::MaybeUninit; use core::{ptr, slice}; /// A fixed capacity double-ended queue. /// /// # Examples /// /// ``` /// use heapless::Deque; /// /// // A deque with a fixed capacity of 8 elements allocated on the stack /// let mut deque = Deque::<_, 8>::new(); /// /// // You can use it as a good old FIFO queue. /// deque.push_back(1); /// deque.push_back(2); /// assert_eq!(deque.len(), 2); /// /// assert_eq!(deque.pop_front(), Some(1)); /// assert_eq!(deque.pop_front(), Some(2)); /// assert_eq!(deque.len(), 0); /// /// // Deque is double-ended, you can push and pop from the front and back. /// deque.push_back(1); /// deque.push_front(2); /// deque.push_back(3); /// deque.push_front(4); /// assert_eq!(deque.pop_front(), Some(4)); /// assert_eq!(deque.pop_front(), Some(2)); /// assert_eq!(deque.pop_front(), Some(1)); /// assert_eq!(deque.pop_front(), Some(3)); /// /// // You can iterate it, yielding all the elements front-to-back. /// for x in &deque { /// println!("{}", x); /// } /// ``` pub struct Deque { buffer: [MaybeUninit; N], /// Front index. Always 0..=(N-1) front: usize, /// Back index. Always 0..=(N-1). back: usize, /// Used to distinguish "empty" and "full" cases when `front == back`. /// May only be `true` if `front == back`, always `false` otherwise. full: bool, } impl Deque { const INIT: MaybeUninit = MaybeUninit::uninit(); /// Constructs a new, empty deque with a fixed capacity of `N` /// /// # Examples /// /// ``` /// use heapless::Deque; /// /// // allocate the deque on the stack /// let mut x: Deque = Deque::new(); /// /// // allocate the deque in a static variable /// static mut X: Deque = Deque::new(); /// ``` pub const fn new() -> Self { // Const assert N > 0 crate::sealed::greater_than_0::(); Self { buffer: [Self::INIT; N], front: 0, back: 0, full: false, } } fn increment(i: usize) -> usize { if i + 1 == N { 0 } else { i + 1 } } fn decrement(i: usize) -> usize { if i == 0 { N - 1 } else { i - 1 } } /// Returns the maximum number of elements the deque can hold. pub const fn capacity(&self) -> usize { N } /// Returns the number of elements currently in the deque. pub const fn len(&self) -> usize { if self.full { N } else if self.back < self.front { self.back + N - self.front } else { self.back - self.front } } /// Clears the deque, removing all values. pub fn clear(&mut self) { // safety: we're immediately setting a consistent empty state. unsafe { self.drop_contents() } self.front = 0; self.back = 0; self.full = false; } /// Drop all items in the `Deque`, leaving the state `back/front/full` unmodified. /// /// safety: leaves the `Deque` in an inconsistent state, so can cause duplicate drops. unsafe fn drop_contents(&mut self) { // We drop each element used in the deque by turning into a &mut[T] let (a, b) = self.as_mut_slices(); ptr::drop_in_place(a); ptr::drop_in_place(b); } /// Returns whether the deque is empty. pub fn is_empty(&self) -> bool { self.front == self.back && !self.full } /// Returns whether the deque is full (i.e. if `len() == capacity()`. pub fn is_full(&self) -> bool { self.full } /// Returns a pair of slices which contain, in order, the contents of the `Deque`. pub fn as_slices(&self) -> (&[T], &[T]) { // NOTE(unsafe) avoid bound checks in the slicing operation unsafe { if self.is_empty() { (&[], &[]) } else if self.back <= self.front { ( slice::from_raw_parts( self.buffer.as_ptr().add(self.front) as *const T, N - self.front, ), slice::from_raw_parts(self.buffer.as_ptr() as *const T, self.back), ) } else { ( slice::from_raw_parts( self.buffer.as_ptr().add(self.front) as *const T, self.back - self.front, ), &[], ) } } } /// Returns a pair of mutable slices which contain, in order, the contents of the `Deque`. pub fn as_mut_slices(&mut self) -> (&mut [T], &mut [T]) { let ptr = self.buffer.as_mut_ptr(); // NOTE(unsafe) avoid bound checks in the slicing operation unsafe { if self.is_empty() { (&mut [], &mut []) } else if self.back <= self.front { ( slice::from_raw_parts_mut(ptr.add(self.front) as *mut T, N - self.front), slice::from_raw_parts_mut(ptr as *mut T, self.back), ) } else { ( slice::from_raw_parts_mut( ptr.add(self.front) as *mut T, self.back - self.front, ), &mut [], ) } } } /// Provides a reference to the front element, or None if the `Deque` is empty. pub fn front(&self) -> Option<&T> { if self.is_empty() { None } else { Some(unsafe { &*self.buffer.get_unchecked(self.front).as_ptr() }) } } /// Provides a mutable reference to the front element, or None if the `Deque` is empty. pub fn front_mut(&mut self) -> Option<&mut T> { if self.is_empty() { None } else { Some(unsafe { &mut *self.buffer.get_unchecked_mut(self.front).as_mut_ptr() }) } } /// Provides a reference to the back element, or None if the `Deque` is empty. pub fn back(&self) -> Option<&T> { if self.is_empty() { None } else { let index = Self::decrement(self.back); Some(unsafe { &*self.buffer.get_unchecked(index).as_ptr() }) } } /// Provides a mutable reference to the back element, or None if the `Deque` is empty. pub fn back_mut(&mut self) -> Option<&mut T> { if self.is_empty() { None } else { let index = Self::decrement(self.back); Some(unsafe { &mut *self.buffer.get_unchecked_mut(index).as_mut_ptr() }) } } /// Removes the item from the front of the deque and returns it, or `None` if it's empty pub fn pop_front(&mut self) -> Option { if self.is_empty() { None } else { Some(unsafe { self.pop_front_unchecked() }) } } /// Removes the item from the back of the deque and returns it, or `None` if it's empty pub fn pop_back(&mut self) -> Option { if self.is_empty() { None } else { Some(unsafe { self.pop_back_unchecked() }) } } /// Appends an `item` to the front of the deque /// /// Returns back the `item` if the deque is full pub fn push_front(&mut self, item: T) -> Result<(), T> { if self.is_full() { Err(item) } else { unsafe { self.push_front_unchecked(item) } Ok(()) } } /// Appends an `item` to the back of the deque /// /// Returns back the `item` if the deque is full pub fn push_back(&mut self, item: T) -> Result<(), T> { if self.is_full() { Err(item) } else { unsafe { self.push_back_unchecked(item) } Ok(()) } } /// Removes an item from the front of the deque and returns it, without checking that the deque /// is not empty /// /// # Safety /// /// It's undefined behavior to call this on an empty deque pub unsafe fn pop_front_unchecked(&mut self) -> T { debug_assert!(!self.is_empty()); let index = self.front; self.full = false; self.front = Self::increment(self.front); (self.buffer.get_unchecked_mut(index).as_ptr() as *const T).read() } /// Removes an item from the back of the deque and returns it, without checking that the deque /// is not empty /// /// # Safety /// /// It's undefined behavior to call this on an empty deque pub unsafe fn pop_back_unchecked(&mut self) -> T { debug_assert!(!self.is_empty()); self.full = false; self.back = Self::decrement(self.back); (self.buffer.get_unchecked_mut(self.back).as_ptr() as *const T).read() } /// Appends an `item` to the front of the deque /// /// # Safety /// /// This assumes the deque is not full. pub unsafe fn push_front_unchecked(&mut self, item: T) { debug_assert!(!self.is_full()); let index = Self::decrement(self.front); // NOTE: the memory slot that we are about to write to is uninitialized. We assign // a `MaybeUninit` to avoid running `T`'s destructor on the uninitialized memory *self.buffer.get_unchecked_mut(index) = MaybeUninit::new(item); self.front = index; if self.front == self.back { self.full = true; } } /// Appends an `item` to the back of the deque /// /// # Safety /// /// This assumes the deque is not full. pub unsafe fn push_back_unchecked(&mut self, item: T) { debug_assert!(!self.is_full()); // NOTE: the memory slot that we are about to write to is uninitialized. We assign // a `MaybeUninit` to avoid running `T`'s destructor on the uninitialized memory *self.buffer.get_unchecked_mut(self.back) = MaybeUninit::new(item); self.back = Self::increment(self.back); if self.front == self.back { self.full = true; } } /// Returns an iterator over the deque. pub fn iter(&self) -> Iter<'_, T, N> { let done = self.is_empty(); Iter { _phantom: PhantomData, buffer: &self.buffer as *const MaybeUninit, front: self.front, back: self.back, done, } } /// Returns an iterator that allows modifying each value. pub fn iter_mut(&mut self) -> IterMut<'_, T, N> { let done = self.is_empty(); IterMut { _phantom: PhantomData, buffer: &mut self.buffer as *mut _ as *mut MaybeUninit, front: self.front, back: self.back, done, } } } // Trait implementations impl Default for Deque { fn default() -> Self { Self::new() } } impl Drop for Deque { fn drop(&mut self) { // safety: `self` is left in an inconsistent state but it doesn't matter since // it's getting dropped. Nothing should be able to observe `self` after drop. unsafe { self.drop_contents() } } } impl fmt::Debug for Deque { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self).finish() } } /// An iterator that moves out of a [`Deque`]. /// /// This struct is created by calling the `into_iter` method. /// #[derive(Clone)] pub struct IntoIter { deque: Deque, } impl Iterator for IntoIter { type Item = T; fn next(&mut self) -> Option { self.deque.pop_front() } } impl IntoIterator for Deque { type Item = T; type IntoIter = IntoIter; fn into_iter(self) -> Self::IntoIter { IntoIter { deque: self } } } /// An iterator over the elements of a [`Deque`]. /// /// This struct is created by calling the `iter` method. #[derive(Clone)] pub struct Iter<'a, T, const N: usize> { buffer: *const MaybeUninit, _phantom: PhantomData<&'a T>, front: usize, back: usize, done: bool, } impl<'a, T, const N: usize> Iterator for Iter<'a, T, N> { type Item = &'a T; fn next(&mut self) -> Option { if self.done { None } else { let index = self.front; self.front = Deque::::increment(self.front); if self.front == self.back { self.done = true; } Some(unsafe { &*(self.buffer.add(index) as *const T) }) } } fn size_hint(&self) -> (usize, Option) { let len = if self.done { 0 } else if self.back <= self.front { self.back + N - self.front } else { self.back - self.front }; (len, Some(len)) } } impl<'a, T, const N: usize> DoubleEndedIterator for Iter<'a, T, N> { fn next_back(&mut self) -> Option { if self.done { None } else { self.back = Deque::::decrement(self.back); if self.front == self.back { self.done = true; } Some(unsafe { &*(self.buffer.add(self.back) as *const T) }) } } } impl<'a, T, const N: usize> ExactSizeIterator for Iter<'a, T, N> {} impl<'a, T, const N: usize> FusedIterator for Iter<'a, T, N> {} /// An iterator over the elements of a [`Deque`]. /// /// This struct is created by calling the `iter` method. pub struct IterMut<'a, T, const N: usize> { buffer: *mut MaybeUninit, _phantom: PhantomData<&'a mut T>, front: usize, back: usize, done: bool, } impl<'a, T, const N: usize> Iterator for IterMut<'a, T, N> { type Item = &'a mut T; fn next(&mut self) -> Option { if self.done { None } else { let index = self.front; self.front = Deque::::increment(self.front); if self.front == self.back { self.done = true; } Some(unsafe { &mut *(self.buffer.add(index) as *mut T) }) } } fn size_hint(&self) -> (usize, Option) { let len = if self.done { 0 } else if self.back <= self.front { self.back + N - self.front } else { self.back - self.front }; (len, Some(len)) } } impl<'a, T, const N: usize> DoubleEndedIterator for IterMut<'a, T, N> { fn next_back(&mut self) -> Option { if self.done { None } else { self.back = Deque::::decrement(self.back); if self.front == self.back { self.done = true; } Some(unsafe { &mut *(self.buffer.add(self.back) as *mut T) }) } } } impl<'a, T, const N: usize> ExactSizeIterator for IterMut<'a, T, N> {} impl<'a, T, const N: usize> FusedIterator for IterMut<'a, T, N> {} impl<'a, T, const N: usize> IntoIterator for &'a Deque { type Item = &'a T; type IntoIter = Iter<'a, T, N>; fn into_iter(self) -> Self::IntoIter { self.iter() } } impl<'a, T, const N: usize> IntoIterator for &'a mut Deque { type Item = &'a mut T; type IntoIter = IterMut<'a, T, N>; fn into_iter(self) -> Self::IntoIter { self.iter_mut() } } impl Clone for Deque where T: Clone, { fn clone(&self) -> Self { let mut res = Deque::new(); for i in self { // safety: the original and new deques have the same capacity, so it can // not become full. unsafe { res.push_back_unchecked(i.clone()) } } res } } #[cfg(test)] mod tests { use crate::Deque; #[test] fn static_new() { static mut _V: Deque = Deque::new(); } #[test] fn stack_new() { let mut _v: Deque = Deque::new(); } #[test] fn drop() { droppable!(); { let mut v: Deque = Deque::new(); v.push_back(Droppable::new()).ok().unwrap(); v.push_back(Droppable::new()).ok().unwrap(); v.pop_front().unwrap(); } assert_eq!(Droppable::count(), 0); { let mut v: Deque = Deque::new(); v.push_back(Droppable::new()).ok().unwrap(); v.push_back(Droppable::new()).ok().unwrap(); } assert_eq!(Droppable::count(), 0); { let mut v: Deque = Deque::new(); v.push_front(Droppable::new()).ok().unwrap(); v.push_front(Droppable::new()).ok().unwrap(); } assert_eq!(Droppable::count(), 0); } #[test] fn full() { let mut v: Deque = Deque::new(); v.push_back(0).unwrap(); v.push_front(1).unwrap(); v.push_back(2).unwrap(); v.push_back(3).unwrap(); assert!(v.push_front(4).is_err()); assert!(v.push_back(4).is_err()); assert!(v.is_full()); } #[test] fn empty() { let mut v: Deque = Deque::new(); assert!(v.is_empty()); v.push_back(0).unwrap(); assert!(!v.is_empty()); v.push_front(1).unwrap(); assert!(!v.is_empty()); v.pop_front().unwrap(); v.pop_front().unwrap(); assert!(v.pop_front().is_none()); assert!(v.pop_back().is_none()); assert!(v.is_empty()); } #[test] fn front_back() { let mut v: Deque = Deque::new(); assert_eq!(v.front(), None); assert_eq!(v.front_mut(), None); assert_eq!(v.back(), None); assert_eq!(v.back_mut(), None); v.push_back(4).unwrap(); assert_eq!(v.front(), Some(&4)); assert_eq!(v.front_mut(), Some(&mut 4)); assert_eq!(v.back(), Some(&4)); assert_eq!(v.back_mut(), Some(&mut 4)); v.push_front(3).unwrap(); assert_eq!(v.front(), Some(&3)); assert_eq!(v.front_mut(), Some(&mut 3)); assert_eq!(v.back(), Some(&4)); assert_eq!(v.back_mut(), Some(&mut 4)); v.pop_back().unwrap(); assert_eq!(v.front(), Some(&3)); assert_eq!(v.front_mut(), Some(&mut 3)); assert_eq!(v.back(), Some(&3)); assert_eq!(v.back_mut(), Some(&mut 3)); v.pop_front().unwrap(); assert_eq!(v.front(), None); assert_eq!(v.front_mut(), None); assert_eq!(v.back(), None); assert_eq!(v.back_mut(), None); } #[test] fn iter() { let mut v: Deque = Deque::new(); v.push_back(0).unwrap(); v.push_back(1).unwrap(); v.push_front(2).unwrap(); v.push_front(3).unwrap(); v.pop_back().unwrap(); v.push_front(4).unwrap(); let mut items = v.iter(); assert_eq!(items.next(), Some(&4)); assert_eq!(items.next(), Some(&3)); assert_eq!(items.next(), Some(&2)); assert_eq!(items.next(), Some(&0)); assert_eq!(items.next(), None); } #[test] fn iter_mut() { let mut v: Deque = Deque::new(); v.push_back(0).unwrap(); v.push_back(1).unwrap(); v.push_front(2).unwrap(); v.push_front(3).unwrap(); v.pop_back().unwrap(); v.push_front(4).unwrap(); let mut items = v.iter_mut(); assert_eq!(items.next(), Some(&mut 4)); assert_eq!(items.next(), Some(&mut 3)); assert_eq!(items.next(), Some(&mut 2)); assert_eq!(items.next(), Some(&mut 0)); assert_eq!(items.next(), None); } #[test] fn iter_move() { let mut v: Deque = Deque::new(); v.push_back(0).unwrap(); v.push_back(1).unwrap(); v.push_back(2).unwrap(); v.push_back(3).unwrap(); let mut items = v.into_iter(); assert_eq!(items.next(), Some(0)); assert_eq!(items.next(), Some(1)); assert_eq!(items.next(), Some(2)); assert_eq!(items.next(), Some(3)); assert_eq!(items.next(), None); } #[test] fn iter_move_drop() { droppable!(); { let mut deque: Deque = Deque::new(); deque.push_back(Droppable::new()).ok().unwrap(); deque.push_back(Droppable::new()).ok().unwrap(); let mut items = deque.into_iter(); // Move all let _ = items.next(); let _ = items.next(); } assert_eq!(Droppable::count(), 0); { let mut deque: Deque = Deque::new(); deque.push_back(Droppable::new()).ok().unwrap(); deque.push_back(Droppable::new()).ok().unwrap(); let _items = deque.into_iter(); // Move none } assert_eq!(Droppable::count(), 0); { let mut deque: Deque = Deque::new(); deque.push_back(Droppable::new()).ok().unwrap(); deque.push_back(Droppable::new()).ok().unwrap(); let mut items = deque.into_iter(); let _ = items.next(); // Move partly } assert_eq!(Droppable::count(), 0); } #[test] fn push_and_pop() { let mut q: Deque = Deque::new(); assert_eq!(q.len(), 0); assert_eq!(q.pop_front(), None); assert_eq!(q.pop_back(), None); assert_eq!(q.len(), 0); q.push_back(0).unwrap(); assert_eq!(q.len(), 1); assert_eq!(q.pop_back(), Some(0)); assert_eq!(q.len(), 0); q.push_back(0).unwrap(); q.push_back(1).unwrap(); q.push_front(2).unwrap(); q.push_front(3).unwrap(); assert_eq!(q.len(), 4); // deque contains: 3 2 0 1 assert_eq!(q.pop_front(), Some(3)); assert_eq!(q.len(), 3); assert_eq!(q.pop_front(), Some(2)); assert_eq!(q.len(), 2); assert_eq!(q.pop_back(), Some(1)); assert_eq!(q.len(), 1); assert_eq!(q.pop_front(), Some(0)); assert_eq!(q.len(), 0); // deque is now empty assert_eq!(q.pop_front(), None); assert_eq!(q.pop_back(), None); assert_eq!(q.len(), 0); } #[test] fn as_slices() { let mut q: Deque = Deque::new(); assert_eq!(q.len(), 0); q.push_back(0).unwrap(); q.push_back(1).unwrap(); q.push_back(2).unwrap(); q.push_back(3).unwrap(); assert_eq!(q.as_slices(), (&[0, 1, 2, 3][..], &[][..])); q.pop_front().unwrap(); assert_eq!(q.as_slices(), (&[1, 2, 3][..], &[][..])); q.push_back(4).unwrap(); assert_eq!(q.as_slices(), (&[1, 2, 3][..], &[4][..])); } #[test] fn clear() { let mut q: Deque = Deque::new(); assert_eq!(q.len(), 0); q.push_back(0).unwrap(); q.push_back(1).unwrap(); q.push_back(2).unwrap(); q.push_back(3).unwrap(); assert_eq!(q.len(), 4); q.clear(); assert_eq!(q.len(), 0); q.push_back(0).unwrap(); assert_eq!(q.len(), 1); } } heapless-0.7.16/src/histbuf.rs000064400000000000000000000261120072674642500143560ustar 00000000000000use core::fmt; use core::mem::MaybeUninit; use core::ops::Deref; use core::ptr; use core::slice; /// A "history buffer", similar to a write-only ring buffer of fixed length. /// /// This buffer keeps a fixed number of elements. On write, the oldest element /// is overwritten. Thus, the buffer is useful to keep a history of values with /// some desired depth, and for example calculate a rolling average. /// /// # Examples /// ``` /// use heapless::HistoryBuffer; /// /// // Initialize a new buffer with 8 elements. /// let mut buf = HistoryBuffer::<_, 8>::new(); /// /// // Starts with no data /// assert_eq!(buf.recent(), None); /// /// buf.write(3); /// buf.write(5); /// buf.extend(&[4, 4]); /// /// // The most recent written element is a four. /// assert_eq!(buf.recent(), Some(&4)); /// /// // To access all elements in an unspecified order, use `as_slice()`. /// for el in buf.as_slice() { println!("{:?}", el); } /// /// // Now we can prepare an average of all values, which comes out to 4. /// let avg = buf.as_slice().iter().sum::() / buf.len(); /// assert_eq!(avg, 4); /// ``` pub struct HistoryBuffer { data: [MaybeUninit; N], write_at: usize, filled: bool, } impl HistoryBuffer { const INIT: MaybeUninit = MaybeUninit::uninit(); /// Constructs a new history buffer. /// /// The construction of a `HistoryBuffer` works in `const` contexts. /// /// # Examples /// /// ``` /// use heapless::HistoryBuffer; /// /// // Allocate a 16-element buffer on the stack /// let x: HistoryBuffer = HistoryBuffer::new(); /// assert_eq!(x.len(), 0); /// ``` #[inline] pub const fn new() -> Self { // Const assert crate::sealed::greater_than_0::(); Self { data: [Self::INIT; N], write_at: 0, filled: false, } } /// Clears the buffer, replacing every element with the default value of /// type `T`. pub fn clear(&mut self) { *self = Self::new(); } } impl HistoryBuffer where T: Copy + Clone, { /// Constructs a new history buffer, where every element is the given value. /// /// # Examples /// /// ``` /// use heapless::HistoryBuffer; /// /// // Allocate a 16-element buffer on the stack /// let mut x: HistoryBuffer = HistoryBuffer::new_with(4); /// // All elements are four /// assert_eq!(x.as_slice(), [4; 16]); /// ``` #[inline] pub fn new_with(t: T) -> Self { Self { data: [MaybeUninit::new(t); N], write_at: 0, filled: true, } } /// Clears the buffer, replacing every element with the given value. pub fn clear_with(&mut self, t: T) { *self = Self::new_with(t); } } impl HistoryBuffer { /// Returns the current fill level of the buffer. #[inline] pub fn len(&self) -> usize { if self.filled { N } else { self.write_at } } /// Returns the capacity of the buffer, which is the length of the /// underlying backing array. #[inline] pub fn capacity(&self) -> usize { N } /// Writes an element to the buffer, overwriting the oldest value. pub fn write(&mut self, t: T) { if self.filled { // Drop the old before we overwrite it. unsafe { ptr::drop_in_place(self.data[self.write_at].as_mut_ptr()) } } self.data[self.write_at] = MaybeUninit::new(t); self.write_at += 1; if self.write_at == self.capacity() { self.write_at = 0; self.filled = true; } } /// Clones and writes all elements in a slice to the buffer. /// /// If the slice is longer than the buffer, only the last `self.len()` /// elements will actually be stored. pub fn extend_from_slice(&mut self, other: &[T]) where T: Clone, { for item in other { self.write(item.clone()); } } /// Returns a reference to the most recently written value. /// /// # Examples /// /// ``` /// use heapless::HistoryBuffer; /// /// let mut x: HistoryBuffer = HistoryBuffer::new(); /// x.write(4); /// x.write(10); /// assert_eq!(x.recent(), Some(&10)); /// ``` pub fn recent(&self) -> Option<&T> { if self.write_at == 0 { if self.filled { Some(unsafe { &*self.data[self.capacity() - 1].as_ptr() }) } else { None } } else { Some(unsafe { &*self.data[self.write_at - 1].as_ptr() }) } } /// Returns the array slice backing the buffer, without keeping track /// of the write position. Therefore, the element order is unspecified. pub fn as_slice(&self) -> &[T] { unsafe { slice::from_raw_parts(self.data.as_ptr() as *const _, self.len()) } } /// Returns an iterator for iterating over the buffer from oldest to newest. /// /// # Examples /// /// ``` /// use heapless::HistoryBuffer; /// /// let mut buffer: HistoryBuffer = HistoryBuffer::new(); /// buffer.extend([0, 0, 0, 1, 2, 3, 4, 5, 6]); /// let expected = [1, 2, 3, 4, 5, 6]; /// for (x, y) in buffer.oldest_ordered().zip(expected.iter()) { /// assert_eq!(x, y) /// } /// /// ``` pub fn oldest_ordered<'a>(&'a self) -> OldestOrdered<'a, T, N> { if self.filled { OldestOrdered { buf: self, cur: self.write_at, wrapped: false, } } else { // special case: act like we wrapped already to handle empty buffer. OldestOrdered { buf: self, cur: 0, wrapped: true, } } } } impl Extend for HistoryBuffer { fn extend(&mut self, iter: I) where I: IntoIterator, { for item in iter.into_iter() { self.write(item); } } } impl<'a, T, const N: usize> Extend<&'a T> for HistoryBuffer where T: 'a + Clone, { fn extend(&mut self, iter: I) where I: IntoIterator, { self.extend(iter.into_iter().cloned()) } } impl Drop for HistoryBuffer { fn drop(&mut self) { unsafe { ptr::drop_in_place(ptr::slice_from_raw_parts_mut( self.data.as_mut_ptr() as *mut T, self.len(), )) } } } impl Deref for HistoryBuffer { type Target = [T]; fn deref(&self) -> &[T] { self.as_slice() } } impl AsRef<[T]> for HistoryBuffer { #[inline] fn as_ref(&self) -> &[T] { self } } impl fmt::Debug for HistoryBuffer where T: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { <[T] as fmt::Debug>::fmt(self, f) } } impl Default for HistoryBuffer { fn default() -> Self { Self::new() } } /// An iterator on the underlying buffer ordered from oldest data to newest #[derive(Clone)] pub struct OldestOrdered<'a, T, const N: usize> { buf: &'a HistoryBuffer, cur: usize, wrapped: bool, } impl<'a, T, const N: usize> Iterator for OldestOrdered<'a, T, N> { type Item = &'a T; fn next(&mut self) -> Option<&'a T> { if self.cur == self.buf.len() && self.buf.filled { // roll-over self.cur = 0; self.wrapped = true; } if self.cur == self.buf.write_at && self.wrapped { return None; } let item = &self.buf[self.cur]; self.cur += 1; Some(item) } } #[cfg(test)] mod tests { use crate::HistoryBuffer; use core::fmt::Debug; #[test] fn new() { let x: HistoryBuffer = HistoryBuffer::new_with(1); assert_eq!(x.len(), 4); assert_eq!(x.as_slice(), [1; 4]); assert_eq!(*x, [1; 4]); let x: HistoryBuffer = HistoryBuffer::new(); assert_eq!(x.as_slice(), []); } #[test] fn write() { let mut x: HistoryBuffer = HistoryBuffer::new(); x.write(1); x.write(4); assert_eq!(x.as_slice(), [1, 4]); x.write(5); x.write(6); x.write(10); assert_eq!(x.as_slice(), [10, 4, 5, 6]); x.extend([11, 12].iter()); assert_eq!(x.as_slice(), [10, 11, 12, 6]); } #[test] fn clear() { let mut x: HistoryBuffer = HistoryBuffer::new_with(1); x.clear(); assert_eq!(x.as_slice(), []); let mut x: HistoryBuffer = HistoryBuffer::new(); x.clear_with(1); assert_eq!(x.as_slice(), [1; 4]); } #[test] fn recent() { let mut x: HistoryBuffer = HistoryBuffer::new(); assert_eq!(x.recent(), None); x.write(1); x.write(4); assert_eq!(x.recent(), Some(&4)); x.write(5); x.write(6); x.write(10); assert_eq!(x.recent(), Some(&10)); } #[test] fn as_slice() { let mut x: HistoryBuffer = HistoryBuffer::new(); assert_eq!(x.as_slice(), []); x.extend([1, 2, 3, 4, 5].iter()); assert_eq!(x.as_slice(), [5, 2, 3, 4]); } #[test] fn ordered() { // test on an empty buffer let buffer: HistoryBuffer = HistoryBuffer::new(); let mut iter = buffer.oldest_ordered(); assert_eq!(iter.next(), None); assert_eq!(iter.next(), None); // test on a un-filled buffer let mut buffer: HistoryBuffer = HistoryBuffer::new(); buffer.extend([1, 2, 3]); assert_eq!(buffer.len(), 3); assert_eq_iter(buffer.oldest_ordered(), &[1, 2, 3]); // test on a filled buffer let mut buffer: HistoryBuffer = HistoryBuffer::new(); buffer.extend([0, 0, 0, 1, 2, 3, 4, 5, 6]); assert_eq!(buffer.len(), 6); assert_eq_iter(buffer.oldest_ordered(), &[1, 2, 3, 4, 5, 6]); // comprehensive test all cases for n in 0..50 { const N: usize = 7; let mut buffer: HistoryBuffer = HistoryBuffer::new(); buffer.extend(0..n); assert_eq_iter( buffer.oldest_ordered().copied(), n.saturating_sub(N as u8)..n, ); } } /// Compares two iterators item by item, making sure they stop at the same time. fn assert_eq_iter( a: impl IntoIterator, b: impl IntoIterator, ) { let mut a = a.into_iter(); let mut b = b.into_iter(); let mut i = 0; loop { let a_item = a.next(); let b_item = b.next(); assert_eq!(a_item, b_item, "{}", i); i += 1; if b_item.is_none() { break; } } } } heapless-0.7.16/src/indexmap.rs000064400000000000000000001164070072674642500145260ustar 00000000000000use core::{borrow::Borrow, fmt, iter::FromIterator, mem, num::NonZeroU32, ops, slice}; use hash32::{BuildHasher, BuildHasherDefault, FnvHasher, Hash, Hasher}; use crate::Vec; /// A [`heapless::IndexMap`](./struct.IndexMap.html) using the default FNV hasher /// /// A list of all Methods and Traits available for `FnvIndexMap` can be found in /// the [`heapless::IndexMap`](./struct.IndexMap.html) documentation. /// /// # Examples /// ``` /// use heapless::FnvIndexMap; /// /// // A hash map with a capacity of 16 key-value pairs allocated on the stack /// let mut book_reviews = FnvIndexMap::<_, _, 16>::new(); /// /// // review some books. /// book_reviews.insert("Adventures of Huckleberry Finn", "My favorite book.").unwrap(); /// book_reviews.insert("Grimms' Fairy Tales", "Masterpiece.").unwrap(); /// book_reviews.insert("Pride and Prejudice", "Very enjoyable.").unwrap(); /// book_reviews.insert("The Adventures of Sherlock Holmes", "Eye lyked it alot.").unwrap(); /// /// // check for a specific one. /// if !book_reviews.contains_key("Les Misérables") { /// println!("We've got {} reviews, but Les Misérables ain't one.", /// book_reviews.len()); /// } /// /// // oops, this review has a lot of spelling mistakes, let's delete it. /// book_reviews.remove("The Adventures of Sherlock Holmes"); /// /// // look up the values associated with some keys. /// let to_find = ["Pride and Prejudice", "Alice's Adventure in Wonderland"]; /// for book in &to_find { /// match book_reviews.get(book) { /// Some(review) => println!("{}: {}", book, review), /// None => println!("{} is unreviewed.", book) /// } /// } /// /// // iterate over everything. /// for (book, review) in &book_reviews { /// println!("{}: \"{}\"", book, review); /// } /// ``` pub type FnvIndexMap = IndexMap, N>; #[derive(Clone, Copy, Eq, PartialEq)] struct HashValue(u16); impl HashValue { fn desired_pos(&self, mask: usize) -> usize { usize::from(self.0) & mask } fn probe_distance(&self, mask: usize, current: usize) -> usize { current.wrapping_sub(self.desired_pos(mask) as usize) & mask } } #[doc(hidden)] #[derive(Clone)] pub struct Bucket { hash: HashValue, key: K, value: V, } #[doc(hidden)] #[derive(Clone, Copy, PartialEq)] pub struct Pos { // compact representation of `{ hash_value: u16, index: u16 }` // To get the most from `NonZero` we store the *value minus 1*. This way `None::Option` // is equivalent to the very unlikely value of `{ hash_value: 0xffff, index: 0xffff }` instead // the more likely of `{ hash_value: 0x00, index: 0x00 }` nz: NonZeroU32, } impl Pos { fn new(index: usize, hash: HashValue) -> Self { Pos { nz: unsafe { NonZeroU32::new_unchecked( ((u32::from(hash.0) << 16) + index as u32).wrapping_add(1), ) }, } } fn hash(&self) -> HashValue { HashValue((self.nz.get().wrapping_sub(1) >> 16) as u16) } fn index(&self) -> usize { self.nz.get().wrapping_sub(1) as u16 as usize } } enum Insert { Success(Inserted), Full((K, V)), } struct Inserted { index: usize, old_value: Option, } macro_rules! probe_loop { ($probe_var: ident < $len: expr, $body: expr) => { loop { if $probe_var < $len { $body $probe_var += 1; } else { $probe_var = 0; } } } } struct CoreMap { entries: Vec, N>, indices: [Option; N], } impl CoreMap { const fn new() -> Self { const INIT: Option = None; CoreMap { entries: Vec::new(), indices: [INIT; N], } } } impl CoreMap where K: Eq + Hash, { fn capacity() -> usize { N } fn mask() -> usize { Self::capacity() - 1 } fn find(&self, hash: HashValue, query: &Q) -> Option<(usize, usize)> where K: Borrow, Q: ?Sized + Eq, { let mut probe = hash.desired_pos(Self::mask()); let mut dist = 0; probe_loop!(probe < self.indices.len(), { if let Some(pos) = self.indices[probe] { let entry_hash = pos.hash(); // NOTE(i) we use unchecked indexing below let i = pos.index(); debug_assert!(i < self.entries.len()); if dist > entry_hash.probe_distance(Self::mask(), probe) { // give up when probe distance is too long return None; } else if entry_hash == hash && unsafe { self.entries.get_unchecked(i).key.borrow() == query } { return Some((probe, i)); } } else { return None; } dist += 1; }); } fn insert(&mut self, hash: HashValue, key: K, value: V) -> Insert { let mut probe = hash.desired_pos(Self::mask()); let mut dist = 0; probe_loop!(probe < self.indices.len(), { let pos = &mut self.indices[probe]; if let Some(pos) = *pos { let entry_hash = pos.hash(); // NOTE(i) we use unchecked indexing below let i = pos.index(); debug_assert!(i < self.entries.len()); let their_dist = entry_hash.probe_distance(Self::mask(), probe); if their_dist < dist { if self.entries.is_full() { return Insert::Full((key, value)); } // robin hood: steal the spot if it's better for us let index = self.entries.len(); unsafe { self.entries.push_unchecked(Bucket { hash, key, value }) }; return Insert::Success(Inserted { index: self.insert_phase_2(probe, Pos::new(index, hash)), old_value: None, }); } else if entry_hash == hash && unsafe { self.entries.get_unchecked(i).key == key } { return Insert::Success(Inserted { index: i, old_value: Some(mem::replace( unsafe { &mut self.entries.get_unchecked_mut(i).value }, value, )), }); } } else { if self.entries.is_full() { return Insert::Full((key, value)); } // empty bucket, insert here let index = self.entries.len(); *pos = Some(Pos::new(index, hash)); unsafe { self.entries.push_unchecked(Bucket { hash, key, value }) }; return Insert::Success(Inserted { index, old_value: None, }); } dist += 1; }); } // phase 2 is post-insert where we forward-shift `Pos` in the indices. fn insert_phase_2(&mut self, mut probe: usize, mut old_pos: Pos) -> usize { probe_loop!(probe < self.indices.len(), { let pos = unsafe { self.indices.get_unchecked_mut(probe) }; let mut is_none = true; // work around lack of NLL if let Some(pos) = pos.as_mut() { old_pos = mem::replace(pos, old_pos); is_none = false; } if is_none { *pos = Some(old_pos); return probe; } }); } fn remove_found(&mut self, probe: usize, found: usize) -> (K, V) { // index `probe` and entry `found` is to be removed // use swap_remove, but then we need to update the index that points // to the other entry that has to move self.indices[probe] = None; let entry = unsafe { self.entries.swap_remove_unchecked(found) }; // correct index that points to the entry that had to swap places if let Some(entry) = self.entries.get(found) { // was not last element // examine new element in `found` and find it in indices let mut probe = entry.hash.desired_pos(Self::mask()); probe_loop!(probe < self.indices.len(), { if let Some(pos) = self.indices[probe] { if pos.index() >= self.entries.len() { // found it self.indices[probe] = Some(Pos::new(found, entry.hash)); break; } } }); } self.backward_shift_after_removal(probe); (entry.key, entry.value) } fn backward_shift_after_removal(&mut self, probe_at_remove: usize) { // backward shift deletion in self.indices // after probe, shift all non-ideally placed indices backward let mut last_probe = probe_at_remove; let mut probe = probe_at_remove + 1; probe_loop!(probe < self.indices.len(), { if let Some(pos) = self.indices[probe] { let entry_hash = pos.hash(); if entry_hash.probe_distance(Self::mask(), probe) > 0 { unsafe { *self.indices.get_unchecked_mut(last_probe) = self.indices[probe] } self.indices[probe] = None; } else { break; } } else { break; } last_probe = probe; }); } } impl Clone for CoreMap where K: Eq + Hash + Clone, V: Clone, { fn clone(&self) -> Self { Self { entries: self.entries.clone(), indices: self.indices.clone(), } } } /// A view into an entry in the map pub enum Entry<'a, K, V, const N: usize> { /// The entry corresponding to the key `K` exists in the map Occupied(OccupiedEntry<'a, K, V, N>), /// The entry corresponding to the key `K` does not exist in the map Vacant(VacantEntry<'a, K, V, N>), } /// An occupied entry which can be manipulated pub struct OccupiedEntry<'a, K, V, const N: usize> { key: K, probe: usize, pos: usize, core: &'a mut CoreMap, } impl<'a, K, V, const N: usize> OccupiedEntry<'a, K, V, N> where K: Eq + Hash, { /// Gets a reference to the key that this entity corresponds to pub fn key(&self) -> &K { &self.key } /// Removes this entry from the map and yields its corresponding key and value pub fn remove_entry(self) -> (K, V) { self.core.remove_found(self.probe, self.pos) } /// Gets a reference to the value associated with this entry pub fn get(&self) -> &V { // SAFETY: Already checked existence at instantiation and the only mutable reference // to the map is internally held. unsafe { &self.core.entries.get_unchecked(self.pos).value } } /// Gets a mutable reference to the value associated with this entry pub fn get_mut(&mut self) -> &mut V { // SAFETY: Already checked existence at instantiation and the only mutable reference // to the map is internally held. unsafe { &mut self.core.entries.get_unchecked_mut(self.pos).value } } /// Consumes this entry and yields a reference to the underlying value pub fn into_mut(self) -> &'a mut V { // SAFETY: Already checked existence at instantiation and the only mutable reference // to the map is internally held. unsafe { &mut self.core.entries.get_unchecked_mut(self.pos).value } } /// Overwrites the underlying map's value with this entry's value pub fn insert(self, value: V) -> V { // SAFETY: Already checked existence at instantiation and the only mutable reference // to the map is internally held. unsafe { mem::replace( &mut self.core.entries.get_unchecked_mut(self.pos).value, value, ) } } /// Removes this entry from the map and yields its value pub fn remove(self) -> V { self.remove_entry().1 } } /// A view into an empty slot in the underlying map pub struct VacantEntry<'a, K, V, const N: usize> { key: K, hash_val: HashValue, core: &'a mut CoreMap, } impl<'a, K, V, const N: usize> VacantEntry<'a, K, V, N> where K: Eq + Hash, { /// Get the key associated with this entry pub fn key(&self) -> &K { &self.key } /// Consumes this entry to yield to key associated with it pub fn into_key(self) -> K { self.key } /// Inserts this entry into to underlying map, yields a mutable reference to the inserted value. /// If the map is at capacity the value is returned instead. pub fn insert(self, value: V) -> Result<&'a mut V, V> { if self.core.entries.is_full() { Err(value) } else { match self.core.insert(self.hash_val, self.key, value) { Insert::Success(inserted) => { unsafe { // SAFETY: Already checked existence at instantiation and the only mutable reference // to the map is internally held. Ok(&mut (*self.core.entries.as_mut_ptr().add(inserted.index)).value) } } Insert::Full((_, v)) => Err(v), } } } } /// Fixed capacity [`IndexMap`](https://docs.rs/indexmap/1/indexmap/map/struct.IndexMap.html) /// /// Note that you cannot use `IndexMap` directly, since it is generic around the hashing algorithm /// in use. Pick a concrete instantiation like [`FnvIndexMap`](./type.FnvIndexMap.html) instead /// or create your own. /// /// Note that the capacity of the `IndexMap` must be a power of 2. /// /// # Examples /// Since `IndexMap` cannot be used directly, we're using its `FnvIndexMap` instantiation /// for this example. /// /// ``` /// use heapless::FnvIndexMap; /// /// // A hash map with a capacity of 16 key-value pairs allocated on the stack /// let mut book_reviews = FnvIndexMap::<_, _, 16>::new(); /// /// // review some books. /// book_reviews.insert("Adventures of Huckleberry Finn", "My favorite book.").unwrap(); /// book_reviews.insert("Grimms' Fairy Tales", "Masterpiece.").unwrap(); /// book_reviews.insert("Pride and Prejudice", "Very enjoyable.").unwrap(); /// book_reviews.insert("The Adventures of Sherlock Holmes", "Eye lyked it alot.").unwrap(); /// /// // check for a specific one. /// if !book_reviews.contains_key("Les Misérables") { /// println!("We've got {} reviews, but Les Misérables ain't one.", /// book_reviews.len()); /// } /// /// // oops, this review has a lot of spelling mistakes, let's delete it. /// book_reviews.remove("The Adventures of Sherlock Holmes"); /// /// // look up the values associated with some keys. /// let to_find = ["Pride and Prejudice", "Alice's Adventure in Wonderland"]; /// for book in &to_find { /// match book_reviews.get(book) { /// Some(review) => println!("{}: {}", book, review), /// None => println!("{} is unreviewed.", book) /// } /// } /// /// // iterate over everything. /// for (book, review) in &book_reviews { /// println!("{}: \"{}\"", book, review); /// } /// ``` pub struct IndexMap { core: CoreMap, build_hasher: S, } impl IndexMap, N> { /// Creates an empty `IndexMap`. pub const fn new() -> Self { // Const assert crate::sealed::greater_than_1::(); crate::sealed::power_of_two::(); IndexMap { build_hasher: BuildHasherDefault::new(), core: CoreMap::new(), } } } impl IndexMap where K: Eq + Hash, S: BuildHasher, { /* Public API */ /// Returns the number of elements the map can hold pub fn capacity(&self) -> usize { N } /// Return an iterator over the keys of the map, in their order /// /// ``` /// use heapless::FnvIndexMap; /// /// let mut map = FnvIndexMap::<_, _, 16>::new(); /// map.insert("a", 1).unwrap(); /// map.insert("b", 2).unwrap(); /// map.insert("c", 3).unwrap(); /// /// for key in map.keys() { /// println!("{}", key); /// } /// ``` pub fn keys(&self) -> impl Iterator { self.core.entries.iter().map(|bucket| &bucket.key) } /// Return an iterator over the values of the map, in their order /// /// ``` /// use heapless::FnvIndexMap; /// /// let mut map = FnvIndexMap::<_, _, 16>::new(); /// map.insert("a", 1).unwrap(); /// map.insert("b", 2).unwrap(); /// map.insert("c", 3).unwrap(); /// /// for val in map.values() { /// println!("{}", val); /// } /// ``` pub fn values(&self) -> impl Iterator { self.core.entries.iter().map(|bucket| &bucket.value) } /// Return an iterator over mutable references to the the values of the map, in their order /// /// ``` /// use heapless::FnvIndexMap; /// /// let mut map = FnvIndexMap::<_, _, 16>::new(); /// map.insert("a", 1).unwrap(); /// map.insert("b", 2).unwrap(); /// map.insert("c", 3).unwrap(); /// /// for val in map.values_mut() { /// *val += 10; /// } /// /// for val in map.values() { /// println!("{}", val); /// } /// ``` pub fn values_mut(&mut self) -> impl Iterator { self.core.entries.iter_mut().map(|bucket| &mut bucket.value) } /// Return an iterator over the key-value pairs of the map, in their order /// /// ``` /// use heapless::FnvIndexMap; /// /// let mut map = FnvIndexMap::<_, _, 16>::new(); /// map.insert("a", 1).unwrap(); /// map.insert("b", 2).unwrap(); /// map.insert("c", 3).unwrap(); /// /// for (key, val) in map.iter() { /// println!("key: {} val: {}", key, val); /// } /// ``` pub fn iter(&self) -> Iter<'_, K, V> { Iter { iter: self.core.entries.iter(), } } /// Return an iterator over the key-value pairs of the map, in their order /// /// ``` /// use heapless::FnvIndexMap; /// /// let mut map = FnvIndexMap::<_, _, 16>::new(); /// map.insert("a", 1).unwrap(); /// map.insert("b", 2).unwrap(); /// map.insert("c", 3).unwrap(); /// /// for (_, val) in map.iter_mut() { /// *val = 2; /// } /// /// for (key, val) in &map { /// println!("key: {} val: {}", key, val); /// } /// ``` pub fn iter_mut(&mut self) -> IterMut<'_, K, V> { IterMut { iter: self.core.entries.iter_mut(), } } /// Get the first key-value pair /// /// Computes in **O(1)** time pub fn first(&self) -> Option<(&K, &V)> { self.core .entries .first() .map(|bucket| (&bucket.key, &bucket.value)) } /// Get the first key-value pair, with mutable access to the value /// /// Computes in **O(1)** time pub fn first_mut(&mut self) -> Option<(&K, &mut V)> { self.core .entries .first_mut() .map(|bucket| (&bucket.key, &mut bucket.value)) } /// Get the last key-value pair /// /// Computes in **O(1)** time pub fn last(&self) -> Option<(&K, &V)> { self.core .entries .last() .map(|bucket| (&bucket.key, &bucket.value)) } /// Get the last key-value pair, with mutable access to the value /// /// Computes in **O(1)** time pub fn last_mut(&mut self) -> Option<(&K, &mut V)> { self.core .entries .last_mut() .map(|bucket| (&bucket.key, &mut bucket.value)) } /// Returns an entry for the corresponding key /// ``` /// use heapless::FnvIndexMap; /// use heapless::Entry; /// let mut map = FnvIndexMap::<_, _, 16>::new(); /// if let Entry::Vacant(v) = map.entry("a") { /// v.insert(1).unwrap(); /// } /// if let Entry::Occupied(mut o) = map.entry("a") { /// println!("found {}", *o.get()); // Prints 1 /// o.insert(2); /// } /// // Prints 2 /// println!("val: {}", *map.get("a").unwrap()); /// ``` pub fn entry(&mut self, key: K) -> Entry<'_, K, V, N> { let hash_val = hash_with(&key, &self.build_hasher); if let Some((probe, pos)) = self.core.find(hash_val, &key) { Entry::Occupied(OccupiedEntry { key, probe, pos, core: &mut self.core, }) } else { Entry::Vacant(VacantEntry { key, hash_val, core: &mut self.core, }) } } /// Return the number of key-value pairs in the map. /// /// Computes in **O(1)** time. /// /// ``` /// use heapless::FnvIndexMap; /// /// let mut a = FnvIndexMap::<_, _, 16>::new(); /// assert_eq!(a.len(), 0); /// a.insert(1, "a").unwrap(); /// assert_eq!(a.len(), 1); /// ``` pub fn len(&self) -> usize { self.core.entries.len() } /// Returns true if the map contains no elements. /// /// Computes in **O(1)** time. /// /// ``` /// use heapless::FnvIndexMap; /// /// let mut a = FnvIndexMap::<_, _, 16>::new(); /// assert!(a.is_empty()); /// a.insert(1, "a"); /// assert!(!a.is_empty()); /// ``` pub fn is_empty(&self) -> bool { self.len() == 0 } /// Remove all key-value pairs in the map, while preserving its capacity. /// /// Computes in **O(n)** time. /// /// ``` /// use heapless::FnvIndexMap; /// /// let mut a = FnvIndexMap::<_, _, 16>::new(); /// a.insert(1, "a"); /// a.clear(); /// assert!(a.is_empty()); /// ``` pub fn clear(&mut self) { self.core.entries.clear(); for pos in self.core.indices.iter_mut() { *pos = None; } } /// Returns a reference to the value corresponding to the key. /// /// The key may be any borrowed form of the map's key type, but `Hash` and `Eq` on the borrowed /// form *must* match those for the key type. /// /// Computes in **O(1)** time (average). /// /// ``` /// use heapless::FnvIndexMap; /// /// let mut map = FnvIndexMap::<_, _, 16>::new(); /// map.insert(1, "a").unwrap(); /// assert_eq!(map.get(&1), Some(&"a")); /// assert_eq!(map.get(&2), None); /// ``` pub fn get(&self, key: &Q) -> Option<&V> where K: Borrow, Q: ?Sized + Hash + Eq, { self.find(key) .map(|(_, found)| unsafe { &self.core.entries.get_unchecked(found).value }) } /// Returns true if the map contains a value for the specified key. /// /// The key may be any borrowed form of the map's key type, but `Hash` and `Eq` on the borrowed /// form *must* match those for the key type. /// /// Computes in **O(1)** time (average). /// /// # Examples /// /// ``` /// use heapless::FnvIndexMap; /// /// let mut map = FnvIndexMap::<_, _, 8>::new(); /// map.insert(1, "a").unwrap(); /// assert_eq!(map.contains_key(&1), true); /// assert_eq!(map.contains_key(&2), false); /// ``` pub fn contains_key(&self, key: &Q) -> bool where K: Borrow, Q: ?Sized + Eq + Hash, { self.find(key).is_some() } /// Returns a mutable reference to the value corresponding to the key. /// /// The key may be any borrowed form of the map's key type, but `Hash` and `Eq` on the borrowed /// form *must* match those for the key type. /// /// Computes in **O(1)** time (average). /// /// # Examples /// /// ``` /// use heapless::FnvIndexMap; /// /// let mut map = FnvIndexMap::<_, _, 8>::new(); /// map.insert(1, "a").unwrap(); /// if let Some(x) = map.get_mut(&1) { /// *x = "b"; /// } /// assert_eq!(map[&1], "b"); /// ``` pub fn get_mut<'v, Q>(&'v mut self, key: &Q) -> Option<&'v mut V> where K: Borrow, Q: ?Sized + Hash + Eq, { if let Some((_, found)) = self.find(key) { Some(unsafe { &mut self.core.entries.get_unchecked_mut(found).value }) } else { None } } /// Inserts a key-value pair into the map. /// /// If an equivalent key already exists in the map: the key remains and retains in its place in /// the order, its corresponding value is updated with `value` and the older value is returned /// inside `Some(_)`. /// /// If no equivalent key existed in the map: the new key-value pair is inserted, last in order, /// and `None` is returned. /// /// Computes in **O(1)** time (average). /// /// See also entry if you you want to insert or modify or if you need to get the index of the /// corresponding key-value pair. /// /// # Examples /// /// ``` /// use heapless::FnvIndexMap; /// /// let mut map = FnvIndexMap::<_, _, 8>::new(); /// assert_eq!(map.insert(37, "a"), Ok(None)); /// assert_eq!(map.is_empty(), false); /// /// map.insert(37, "b"); /// assert_eq!(map.insert(37, "c"), Ok(Some("b"))); /// assert_eq!(map[&37], "c"); /// ``` pub fn insert(&mut self, key: K, value: V) -> Result, (K, V)> { let hash = hash_with(&key, &self.build_hasher); match self.core.insert(hash, key, value) { Insert::Success(inserted) => Ok(inserted.old_value), Insert::Full((k, v)) => Err((k, v)), } } /// Same as [`swap_remove`](struct.IndexMap.html#method.swap_remove) /// /// Computes in **O(1)** time (average). /// /// # Examples /// /// ``` /// use heapless::FnvIndexMap; /// /// let mut map = FnvIndexMap::<_, _, 8>::new(); /// map.insert(1, "a").unwrap(); /// assert_eq!(map.remove(&1), Some("a")); /// assert_eq!(map.remove(&1), None); /// ``` pub fn remove(&mut self, key: &Q) -> Option where K: Borrow, Q: ?Sized + Hash + Eq, { self.swap_remove(key) } /// Remove the key-value pair equivalent to `key` and return its value. /// /// Like `Vec::swap_remove`, the pair is removed by swapping it with the last element of the map /// and popping it off. **This perturbs the postion of what used to be the last element!** /// /// Return `None` if `key` is not in map. /// /// Computes in **O(1)** time (average). pub fn swap_remove(&mut self, key: &Q) -> Option where K: Borrow, Q: ?Sized + Hash + Eq, { self.find(key) .map(|(probe, found)| self.core.remove_found(probe, found).1) } /* Private API */ /// Return probe (indices) and position (entries) fn find(&self, key: &Q) -> Option<(usize, usize)> where K: Borrow, Q: ?Sized + Hash + Eq, { if self.len() == 0 { return None; } let h = hash_with(key, &self.build_hasher); self.core.find(h, key) } } impl<'a, K, Q, V, S, const N: usize> ops::Index<&'a Q> for IndexMap where K: Eq + Hash + Borrow, Q: ?Sized + Eq + Hash, S: BuildHasher, { type Output = V; fn index(&self, key: &Q) -> &V { self.get(key).expect("key not found") } } impl<'a, K, Q, V, S, const N: usize> ops::IndexMut<&'a Q> for IndexMap where K: Eq + Hash + Borrow, Q: ?Sized + Eq + Hash, S: BuildHasher, { fn index_mut(&mut self, key: &Q) -> &mut V { self.get_mut(key).expect("key not found") } } impl Clone for IndexMap where K: Eq + Hash + Clone, V: Clone, S: Clone, { fn clone(&self) -> Self { Self { core: self.core.clone(), build_hasher: self.build_hasher.clone(), } } } impl fmt::Debug for IndexMap where K: Eq + Hash + fmt::Debug, V: fmt::Debug, S: BuildHasher, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_map().entries(self.iter()).finish() } } impl Default for IndexMap where K: Eq + Hash, S: BuildHasher + Default, { fn default() -> Self { // Const assert crate::sealed::greater_than_1::(); crate::sealed::power_of_two::(); IndexMap { build_hasher: <_>::default(), core: CoreMap::new(), } } } impl PartialEq> for IndexMap where K: Eq + Hash, V: Eq, S: BuildHasher, S2: BuildHasher, { fn eq(&self, other: &IndexMap) -> bool { self.len() == other.len() && self .iter() .all(|(key, value)| other.get(key).map_or(false, |v| *value == *v)) } } impl Eq for IndexMap where K: Eq + Hash, V: Eq, S: BuildHasher, { } impl Extend<(K, V)> for IndexMap where K: Eq + Hash, S: BuildHasher, { fn extend(&mut self, iterable: I) where I: IntoIterator, { for (k, v) in iterable { self.insert(k, v).ok().unwrap(); } } } impl<'a, K, V, S, const N: usize> Extend<(&'a K, &'a V)> for IndexMap where K: Eq + Hash + Copy, V: Copy, S: BuildHasher, { fn extend(&mut self, iterable: I) where I: IntoIterator, { self.extend(iterable.into_iter().map(|(&key, &value)| (key, value))) } } impl FromIterator<(K, V)> for IndexMap where K: Eq + Hash, S: BuildHasher + Default, { fn from_iter(iterable: I) -> Self where I: IntoIterator, { let mut map = IndexMap::default(); map.extend(iterable); map } } #[derive(Clone)] pub struct IntoIter { entries: Vec, N>, } impl Iterator for IntoIter { type Item = (K, V); fn next(&mut self) -> Option { self.entries.pop().map(|bucket| (bucket.key, bucket.value)) } } impl IntoIterator for IndexMap where K: Eq + Hash, S: BuildHasher, { type Item = (K, V); type IntoIter = IntoIter; fn into_iter(self) -> Self::IntoIter { IntoIter { entries: self.core.entries, } } } impl<'a, K, V, S, const N: usize> IntoIterator for &'a IndexMap where K: Eq + Hash, S: BuildHasher, { type Item = (&'a K, &'a V); type IntoIter = Iter<'a, K, V>; fn into_iter(self) -> Self::IntoIter { self.iter() } } impl<'a, K, V, S, const N: usize> IntoIterator for &'a mut IndexMap where K: Eq + Hash, S: BuildHasher, { type Item = (&'a K, &'a mut V); type IntoIter = IterMut<'a, K, V>; fn into_iter(self) -> Self::IntoIter { self.iter_mut() } } pub struct Iter<'a, K, V> { iter: slice::Iter<'a, Bucket>, } impl<'a, K, V> Iterator for Iter<'a, K, V> { type Item = (&'a K, &'a V); fn next(&mut self) -> Option { self.iter.next().map(|bucket| (&bucket.key, &bucket.value)) } } impl<'a, K, V> Clone for Iter<'a, K, V> { fn clone(&self) -> Self { Self { iter: self.iter.clone(), } } } pub struct IterMut<'a, K, V> { iter: slice::IterMut<'a, Bucket>, } impl<'a, K, V> Iterator for IterMut<'a, K, V> { type Item = (&'a K, &'a mut V); fn next(&mut self) -> Option { self.iter .next() .map(|bucket| (&bucket.key, &mut bucket.value)) } } fn hash_with(key: &K, build_hasher: &S) -> HashValue where K: ?Sized + Hash, S: BuildHasher, { let mut h = build_hasher.build_hasher(); key.hash(&mut h); HashValue(h.finish() as u16) } #[cfg(test)] mod tests { use crate::{indexmap::Entry, FnvIndexMap}; use core::mem; #[test] fn size() { const CAP: usize = 4; assert_eq!( mem::size_of::>(), CAP * mem::size_of::() + // indices CAP * (mem::size_of::() + // key mem::size_of::() + // value mem::size_of::() // hash ) + // buckets mem::size_of::() // entries.length ) } #[test] fn partial_eq() { { let mut a: FnvIndexMap<_, _, 4> = FnvIndexMap::new(); a.insert("k1", "v1").unwrap(); let mut b: FnvIndexMap<_, _, 4> = FnvIndexMap::new(); b.insert("k1", "v1").unwrap(); assert!(a == b); b.insert("k2", "v2").unwrap(); assert!(a != b); } { let mut a: FnvIndexMap<_, _, 4> = FnvIndexMap::new(); a.insert("k1", "v1").unwrap(); a.insert("k2", "v2").unwrap(); let mut b: FnvIndexMap<_, _, 4> = FnvIndexMap::new(); b.insert("k2", "v2").unwrap(); b.insert("k1", "v1").unwrap(); assert!(a == b); } } #[test] fn into_iter() { let mut src: FnvIndexMap<_, _, 4> = FnvIndexMap::new(); src.insert("k1", "v1").unwrap(); src.insert("k2", "v2").unwrap(); src.insert("k3", "v3").unwrap(); src.insert("k4", "v4").unwrap(); let clone = src.clone(); for (k, v) in clone.into_iter() { assert_eq!(v, *src.get(k).unwrap()); } } #[test] fn insert_replaces_on_full_map() { let mut a: FnvIndexMap<_, _, 2> = FnvIndexMap::new(); a.insert("k1", "v1").unwrap(); a.insert("k2", "v2").unwrap(); a.insert("k1", "v2").unwrap(); assert_eq!(a.get("k1"), a.get("k2")); } const MAP_SLOTS: usize = 4096; fn almost_filled_map() -> FnvIndexMap { let mut almost_filled = FnvIndexMap::new(); for i in 1..MAP_SLOTS { almost_filled.insert(i, i).unwrap(); } almost_filled } #[test] fn entry_find() { let key = 0; let value = 0; let mut src = almost_filled_map(); let entry = src.entry(key); match entry { Entry::Occupied(_) => { panic!("Found entry without inserting"); } Entry::Vacant(v) => { assert_eq!(&key, v.key()); assert_eq!(key, v.into_key()); } } src.insert(key, value).unwrap(); let entry = src.entry(key); match entry { Entry::Occupied(mut o) => { assert_eq!(&key, o.key()); assert_eq!(&value, o.get()); assert_eq!(&value, o.get_mut()); assert_eq!(&value, o.into_mut()); } Entry::Vacant(_) => { panic!("Entry not found"); } } } #[test] fn entry_vacant_insert() { let key = 0; let value = 0; let mut src = almost_filled_map(); assert_eq!(MAP_SLOTS - 1, src.len()); let entry = src.entry(key); match entry { Entry::Occupied(_) => { panic!("Entry found when empty"); } Entry::Vacant(v) => { v.insert(value).unwrap(); } }; assert_eq!(value, *src.get(&key).unwrap()) } #[test] fn entry_occupied_insert() { let key = 0; let value = 0; let value2 = 5; let mut src = almost_filled_map(); assert_eq!(MAP_SLOTS - 1, src.len()); src.insert(key, value).unwrap(); let entry = src.entry(key); match entry { Entry::Occupied(o) => { assert_eq!(value, o.insert(value2)); } Entry::Vacant(_) => { panic!("Entry not found"); } }; assert_eq!(value2, *src.get(&key).unwrap()) } #[test] fn entry_remove_entry() { let key = 0; let value = 0; let mut src = almost_filled_map(); src.insert(key, value).unwrap(); assert_eq!(MAP_SLOTS, src.len()); let entry = src.entry(key); match entry { Entry::Occupied(o) => { assert_eq!((key, value), o.remove_entry()); } Entry::Vacant(_) => { panic!("Entry not found") } }; assert_eq!(MAP_SLOTS - 1, src.len()); } #[test] fn entry_remove() { let key = 0; let value = 0; let mut src = almost_filled_map(); src.insert(key, value).unwrap(); assert_eq!(MAP_SLOTS, src.len()); let entry = src.entry(key); match entry { Entry::Occupied(o) => { assert_eq!(value, o.remove()); } Entry::Vacant(_) => { panic!("Entry not found"); } }; assert_eq!(MAP_SLOTS - 1, src.len()); } #[test] fn entry_roll_through_all() { let mut src: FnvIndexMap = FnvIndexMap::new(); for i in 0..MAP_SLOTS { match src.entry(i) { Entry::Occupied(_) => { panic!("Entry found before insert"); } Entry::Vacant(v) => { v.insert(i).unwrap(); } } } let add_mod = 99; for i in 0..MAP_SLOTS { match src.entry(i) { Entry::Occupied(o) => { assert_eq!(i, o.insert(i + add_mod)); } Entry::Vacant(_) => { panic!("Entry not found after insert"); } } } for i in 0..MAP_SLOTS { match src.entry(i) { Entry::Occupied(o) => { assert_eq!((i, i + add_mod), o.remove_entry()); } Entry::Vacant(_) => { panic!("Entry not found after insert"); } } } for i in 0..MAP_SLOTS { assert!(matches!(src.entry(i), Entry::Vacant(_))); } assert!(src.is_empty()); } #[test] fn first_last() { let mut map = FnvIndexMap::<_, _, 4>::new(); assert_eq!(None, map.first()); assert_eq!(None, map.last()); map.insert(0, 0).unwrap(); map.insert(2, 2).unwrap(); assert_eq!(Some((&0, &0)), map.first()); assert_eq!(Some((&2, &2)), map.last()); map.insert(1, 1).unwrap(); assert_eq!(Some((&1, &1)), map.last()); *map.first_mut().unwrap().1 += 1; *map.last_mut().unwrap().1 += 1; assert_eq!(Some((&0, &1)), map.first()); assert_eq!(Some((&1, &2)), map.last()); } } heapless-0.7.16/src/indexset.rs000064400000000000000000000410550072674642500145400ustar 00000000000000use crate::indexmap::{self, IndexMap}; use core::{borrow::Borrow, fmt, iter::FromIterator}; use hash32::{BuildHasher, BuildHasherDefault, FnvHasher, Hash}; /// A [`heapless::IndexSet`](./struct.IndexSet.html) using the /// default FNV hasher. /// A list of all Methods and Traits available for `FnvIndexSet` can be found in /// the [`heapless::IndexSet`](./struct.IndexSet.html) documentation. /// /// # Examples /// ``` /// use heapless::FnvIndexSet; /// /// // A hash set with a capacity of 16 elements allocated on the stack /// let mut books = FnvIndexSet::<_, 16>::new(); /// /// // Add some books. /// books.insert("A Dance With Dragons").unwrap(); /// books.insert("To Kill a Mockingbird").unwrap(); /// books.insert("The Odyssey").unwrap(); /// books.insert("The Great Gatsby").unwrap(); /// /// // Check for a specific one. /// if !books.contains("The Winds of Winter") { /// println!("We have {} books, but The Winds of Winter ain't one.", /// books.len()); /// } /// /// // Remove a book. /// books.remove("The Odyssey"); /// /// // Iterate over everything. /// for book in &books { /// println!("{}", book); /// } /// ``` pub type FnvIndexSet = IndexSet, N>; /// Fixed capacity [`IndexSet`](https://docs.rs/indexmap/1/indexmap/set/struct.IndexSet.html). /// /// Note that you cannot use `IndexSet` directly, since it is generic around the hashing algorithm /// in use. Pick a concrete instantiation like [`FnvIndexSet`](./type.FnvIndexSet.html) instead /// or create your own. /// /// Note that the capacity of the `IndexSet` must be a power of 2. /// /// # Examples /// Since `IndexSet` cannot be used directly, we're using its `FnvIndexSet` instantiation /// for this example. /// /// ``` /// use heapless::FnvIndexSet; /// /// // A hash set with a capacity of 16 elements allocated on the stack /// let mut books = FnvIndexSet::<_, 16>::new(); /// /// // Add some books. /// books.insert("A Dance With Dragons").unwrap(); /// books.insert("To Kill a Mockingbird").unwrap(); /// books.insert("The Odyssey").unwrap(); /// books.insert("The Great Gatsby").unwrap(); /// /// // Check for a specific one. /// if !books.contains("The Winds of Winter") { /// println!("We have {} books, but The Winds of Winter ain't one.", /// books.len()); /// } /// /// // Remove a book. /// books.remove("The Odyssey"); /// /// // Iterate over everything. /// for book in &books { /// println!("{}", book); /// } /// ``` pub struct IndexSet { map: IndexMap, } impl IndexSet, N> { /// Creates an empty `IndexSet` pub const fn new() -> Self { IndexSet { map: IndexMap::new(), } } } impl IndexSet where T: Eq + Hash, S: BuildHasher, { /// Returns the number of elements the set can hold /// /// # Examples /// /// ``` /// use heapless::FnvIndexSet; /// /// let set = FnvIndexSet::::new(); /// assert_eq!(set.capacity(), 16); /// ``` pub fn capacity(&self) -> usize { self.map.capacity() } /// Return an iterator over the values of the set, in their order /// /// # Examples /// /// ``` /// use heapless::FnvIndexSet; /// /// let mut set = FnvIndexSet::<_, 16>::new(); /// set.insert("a").unwrap(); /// set.insert("b").unwrap(); /// /// // Will print in an arbitrary order. /// for x in set.iter() { /// println!("{}", x); /// } /// ``` pub fn iter(&self) -> Iter<'_, T> { Iter { iter: self.map.iter(), } } /// Get the first value /// /// Computes in **O(1)** time pub fn first(&self) -> Option<&T> { self.map.first().map(|(k, _v)| k) } /// Get the last value /// /// Computes in **O(1)** time pub fn last(&self) -> Option<&T> { self.map.last().map(|(k, _v)| k) } /// Visits the values representing the difference, i.e. the values that are in `self` but not in /// `other`. /// /// # Examples /// /// ``` /// use heapless::FnvIndexSet; /// /// let mut a: FnvIndexSet<_, 16> = [1, 2, 3].iter().cloned().collect(); /// let mut b: FnvIndexSet<_, 16> = [4, 2, 3, 4].iter().cloned().collect(); /// /// // Can be seen as `a - b`. /// for x in a.difference(&b) { /// println!("{}", x); // Print 1 /// } /// /// let diff: FnvIndexSet<_, 16> = a.difference(&b).collect(); /// assert_eq!(diff, [1].iter().collect::>()); /// /// // Note that difference is not symmetric, /// // and `b - a` means something else: /// let diff: FnvIndexSet<_, 16> = b.difference(&a).collect(); /// assert_eq!(diff, [4].iter().collect::>()); /// ``` pub fn difference<'a, S2, const N2: usize>( &'a self, other: &'a IndexSet, ) -> Difference<'a, T, S2, N2> where S2: BuildHasher, { Difference { iter: self.iter(), other, } } /// Visits the values representing the symmetric difference, i.e. the values that are in `self` /// or in `other` but not in both. /// /// # Examples /// /// ``` /// use heapless::FnvIndexSet; /// /// let mut a: FnvIndexSet<_, 16> = [1, 2, 3].iter().cloned().collect(); /// let mut b: FnvIndexSet<_, 16> = [4, 2, 3, 4].iter().cloned().collect(); /// /// // Print 1, 4 in that order order. /// for x in a.symmetric_difference(&b) { /// println!("{}", x); /// } /// /// let diff1: FnvIndexSet<_, 16> = a.symmetric_difference(&b).collect(); /// let diff2: FnvIndexSet<_, 16> = b.symmetric_difference(&a).collect(); /// /// assert_eq!(diff1, diff2); /// assert_eq!(diff1, [1, 4].iter().collect::>()); /// ``` pub fn symmetric_difference<'a, S2, const N2: usize>( &'a self, other: &'a IndexSet, ) -> impl Iterator where S2: BuildHasher, { self.difference(other).chain(other.difference(self)) } /// Visits the values representing the intersection, i.e. the values that are both in `self` and /// `other`. /// /// # Examples /// /// ``` /// use heapless::FnvIndexSet; /// /// let mut a: FnvIndexSet<_, 16> = [1, 2, 3].iter().cloned().collect(); /// let mut b: FnvIndexSet<_, 16> = [4, 2, 3, 4].iter().cloned().collect(); /// /// // Print 2, 3 in that order. /// for x in a.intersection(&b) { /// println!("{}", x); /// } /// /// let intersection: FnvIndexSet<_, 16> = a.intersection(&b).collect(); /// assert_eq!(intersection, [2, 3].iter().collect::>()); /// ``` pub fn intersection<'a, S2, const N2: usize>( &'a self, other: &'a IndexSet, ) -> Intersection<'a, T, S2, N2> where S2: BuildHasher, { Intersection { iter: self.iter(), other, } } /// Visits the values representing the union, i.e. all the values in `self` or `other`, without /// duplicates. /// /// # Examples /// /// ``` /// use heapless::FnvIndexSet; /// /// let mut a: FnvIndexSet<_, 16> = [1, 2, 3].iter().cloned().collect(); /// let mut b: FnvIndexSet<_, 16> = [4, 2, 3, 4].iter().cloned().collect(); /// /// // Print 1, 2, 3, 4 in that order. /// for x in a.union(&b) { /// println!("{}", x); /// } /// /// let union: FnvIndexSet<_, 16> = a.union(&b).collect(); /// assert_eq!(union, [1, 2, 3, 4].iter().collect::>()); /// ``` pub fn union<'a, S2, const N2: usize>( &'a self, other: &'a IndexSet, ) -> impl Iterator where S2: BuildHasher, { self.iter().chain(other.difference(self)) } /// Returns the number of elements in the set. /// /// # Examples /// /// ``` /// use heapless::FnvIndexSet; /// /// let mut v: FnvIndexSet<_, 16> = FnvIndexSet::new(); /// assert_eq!(v.len(), 0); /// v.insert(1).unwrap(); /// assert_eq!(v.len(), 1); /// ``` pub fn len(&self) -> usize { self.map.len() } /// Returns `true` if the set contains no elements. /// /// # Examples /// /// ``` /// use heapless::FnvIndexSet; /// /// let mut v: FnvIndexSet<_, 16> = FnvIndexSet::new(); /// assert!(v.is_empty()); /// v.insert(1).unwrap(); /// assert!(!v.is_empty()); /// ``` pub fn is_empty(&self) -> bool { self.map.is_empty() } /// Clears the set, removing all values. /// /// # Examples /// /// ``` /// use heapless::FnvIndexSet; /// /// let mut v: FnvIndexSet<_, 16> = FnvIndexSet::new(); /// v.insert(1).unwrap(); /// v.clear(); /// assert!(v.is_empty()); /// ``` pub fn clear(&mut self) { self.map.clear() } /// Returns `true` if the set contains a value. /// /// The value may be any borrowed form of the set's value type, but `Hash` and `Eq` on the /// borrowed form must match those for the value type. /// /// # Examples /// /// ``` /// use heapless::FnvIndexSet; /// /// let set: FnvIndexSet<_, 16> = [1, 2, 3].iter().cloned().collect(); /// assert_eq!(set.contains(&1), true); /// assert_eq!(set.contains(&4), false); /// ``` pub fn contains(&self, value: &Q) -> bool where T: Borrow, Q: ?Sized + Eq + Hash, { self.map.contains_key(value) } /// Returns `true` if `self` has no elements in common with `other`. This is equivalent to /// checking for an empty intersection. /// /// # Examples /// /// ``` /// use heapless::FnvIndexSet; /// /// let a: FnvIndexSet<_, 16> = [1, 2, 3].iter().cloned().collect(); /// let mut b = FnvIndexSet::<_, 16>::new(); /// /// assert_eq!(a.is_disjoint(&b), true); /// b.insert(4).unwrap(); /// assert_eq!(a.is_disjoint(&b), true); /// b.insert(1).unwrap(); /// assert_eq!(a.is_disjoint(&b), false); /// ``` pub fn is_disjoint(&self, other: &IndexSet) -> bool where S2: BuildHasher, { self.iter().all(|v| !other.contains(v)) } /// Returns `true` if the set is a subset of another, i.e. `other` contains at least all the /// values in `self`. /// /// # Examples /// /// ``` /// use heapless::FnvIndexSet; /// /// let sup: FnvIndexSet<_, 16> = [1, 2, 3].iter().cloned().collect(); /// let mut set = FnvIndexSet::<_, 16>::new(); /// /// assert_eq!(set.is_subset(&sup), true); /// set.insert(2).unwrap(); /// assert_eq!(set.is_subset(&sup), true); /// set.insert(4).unwrap(); /// assert_eq!(set.is_subset(&sup), false); /// ``` pub fn is_subset(&self, other: &IndexSet) -> bool where S2: BuildHasher, { self.iter().all(|v| other.contains(v)) } // Returns `true` if the set is a superset of another, i.e. `self` contains at least all the // values in `other`. /// /// # Examples /// /// ``` /// use heapless::FnvIndexSet; /// /// let sub: FnvIndexSet<_, 16> = [1, 2].iter().cloned().collect(); /// let mut set = FnvIndexSet::<_, 16>::new(); /// /// assert_eq!(set.is_superset(&sub), false); /// /// set.insert(0).unwrap(); /// set.insert(1).unwrap(); /// assert_eq!(set.is_superset(&sub), false); /// /// set.insert(2).unwrap(); /// assert_eq!(set.is_superset(&sub), true); /// ``` pub fn is_superset(&self, other: &IndexSet) -> bool where S2: BuildHasher, { other.is_subset(self) } /// Adds a value to the set. /// /// If the set did not have this value present, `true` is returned. /// /// If the set did have this value present, `false` is returned. /// /// # Examples /// /// ``` /// use heapless::FnvIndexSet; /// /// let mut set = FnvIndexSet::<_, 16>::new(); /// /// assert_eq!(set.insert(2).unwrap(), true); /// assert_eq!(set.insert(2).unwrap(), false); /// assert_eq!(set.len(), 1); /// ``` pub fn insert(&mut self, value: T) -> Result { self.map .insert(value, ()) .map(|old| old.is_none()) .map_err(|(k, _)| k) } /// Removes a value from the set. Returns `true` if the value was present in the set. /// /// The value may be any borrowed form of the set's value type, but `Hash` and `Eq` on the /// borrowed form must match those for the value type. /// /// # Examples /// /// ``` /// use heapless::FnvIndexSet; /// /// let mut set = FnvIndexSet::<_, 16>::new(); /// /// set.insert(2).unwrap(); /// assert_eq!(set.remove(&2), true); /// assert_eq!(set.remove(&2), false); /// ``` pub fn remove(&mut self, value: &Q) -> bool where T: Borrow, Q: ?Sized + Eq + Hash, { self.map.remove(value).is_some() } } impl Clone for IndexSet where T: Eq + Hash + Clone, S: Clone, { fn clone(&self) -> Self { Self { map: self.map.clone(), } } } impl fmt::Debug for IndexSet where T: Eq + Hash + fmt::Debug, S: BuildHasher, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_set().entries(self.iter()).finish() } } impl Default for IndexSet where T: Eq + Hash, S: BuildHasher + Default, { fn default() -> Self { IndexSet { map: <_>::default(), } } } impl PartialEq> for IndexSet where T: Eq + Hash, S1: BuildHasher, S2: BuildHasher, { fn eq(&self, other: &IndexSet) -> bool { self.len() == other.len() && self.is_subset(other) } } impl Extend for IndexSet where T: Eq + Hash, S: BuildHasher, { fn extend(&mut self, iterable: I) where I: IntoIterator, { self.map.extend(iterable.into_iter().map(|k| (k, ()))) } } impl<'a, T, S, const N: usize> Extend<&'a T> for IndexSet where T: 'a + Eq + Hash + Copy, S: BuildHasher, { fn extend(&mut self, iterable: I) where I: IntoIterator, { self.extend(iterable.into_iter().cloned()) } } impl FromIterator for IndexSet where T: Eq + Hash, S: BuildHasher + Default, { fn from_iter(iter: I) -> Self where I: IntoIterator, { let mut set = IndexSet::default(); set.extend(iter); set } } impl<'a, T, S, const N: usize> IntoIterator for &'a IndexSet where T: Eq + Hash, S: BuildHasher, { type Item = &'a T; type IntoIter = Iter<'a, T>; fn into_iter(self) -> Self::IntoIter { self.iter() } } pub struct Iter<'a, T> { iter: indexmap::Iter<'a, T, ()>, } impl<'a, T> Iterator for Iter<'a, T> { type Item = &'a T; fn next(&mut self) -> Option { self.iter.next().map(|(k, _)| k) } } impl<'a, T> Clone for Iter<'a, T> { fn clone(&self) -> Self { Self { iter: self.iter.clone(), } } } pub struct Difference<'a, T, S, const N: usize> where S: BuildHasher, T: Eq + Hash, { iter: Iter<'a, T>, other: &'a IndexSet, } impl<'a, T, S, const N: usize> Iterator for Difference<'a, T, S, N> where S: BuildHasher, T: Eq + Hash, { type Item = &'a T; fn next(&mut self) -> Option { loop { let elt = self.iter.next()?; if !self.other.contains(elt) { return Some(elt); } } } } pub struct Intersection<'a, T, S, const N: usize> where S: BuildHasher, T: Eq + Hash, { iter: Iter<'a, T>, other: &'a IndexSet, } impl<'a, T, S, const N: usize> Iterator for Intersection<'a, T, S, N> where S: BuildHasher, T: Eq + Hash, { type Item = &'a T; fn next(&mut self) -> Option { loop { let elt = self.iter.next()?; if self.other.contains(elt) { return Some(elt); } } } } heapless-0.7.16/src/lib.rs000064400000000000000000000102360072674642500134600ustar 00000000000000//! `static` friendly data structures that don't require dynamic memory allocation //! //! The core principle behind `heapless` is that its data structures are backed by a *static* memory //! allocation. For example, you can think of `heapless::Vec` as an alternative version of //! `std::Vec` with fixed capacity and that can't be re-allocated on the fly (e.g. via `push`). //! //! All `heapless` data structures store their memory allocation *inline* and specify their capacity //! via their type parameter `N`. This means that you can instantiate a `heapless` data structure on //! the stack, in a `static` variable, or even in the heap. //! //! ``` //! use heapless::Vec; // fixed capacity `std::Vec` //! //! // on the stack //! let mut xs: Vec = Vec::new(); // can hold up to 8 elements //! xs.push(42).unwrap(); //! assert_eq!(xs.pop(), Some(42)); //! //! // in a `static` variable //! static mut XS: Vec = Vec::new(); //! //! let xs = unsafe { &mut XS }; //! //! xs.push(42); //! assert_eq!(xs.pop(), Some(42)); //! //! // in the heap (though kind of pointless because no reallocation) //! let mut ys: Box> = Box::new(Vec::new()); //! ys.push(42).unwrap(); //! assert_eq!(ys.pop(), Some(42)); //! ``` //! //! Because they have fixed capacity `heapless` data structures don't implicitly reallocate. This //! means that operations like `heapless::Vec.push` are *truly* constant time rather than amortized //! constant time with potentially unbounded (depends on the allocator) worst case execution time //! (which is bad / unacceptable for hard real time applications). //! //! `heapless` data structures don't use a memory allocator which means no risk of an uncatchable //! Out Of Memory (OOM) condition while performing operations on them. It's certainly possible to //! run out of capacity while growing `heapless` data structures, but the API lets you handle this //! possibility by returning a `Result` on operations that may exhaust the capacity of the data //! structure. //! //! List of currently implemented data structures: //! //! - [`Arc`](pool/singleton/arc/struct.Arc.html) -- Thread-safe reference-counting pointer backed by a memory pool //! - [`BinaryHeap`](binary_heap/struct.BinaryHeap.html) -- priority queue //! - [`IndexMap`](struct.IndexMap.html) -- hash table //! - [`IndexSet`](struct.IndexSet.html) -- hash set //! - [`LinearMap`](struct.LinearMap.html) //! - [`Pool`](pool/struct.Pool.html) -- lock-free memory pool //! - [`String`](struct.String.html) //! - [`Vec`](struct.Vec.html) //! - [`mpmc::Q*`](mpmc/index.html) -- multiple producer multiple consumer lock-free queue //! - [`spsc::Queue`](spsc/struct.Queue.html) -- single producer single consumer lock-free queue //! //! # Optional Features //! //! The `heapless` crate provides the following optional Cargo features: //! //! - `ufmt-impl`: Implement [`ufmt_write::uWrite`] for `String` and `Vec` //! //! [`ufmt_write::uWrite`]: https://docs.rs/ufmt-write/ //! //! # Minimum Supported Rust Version (MSRV) //! //! This crate is guaranteed to compile on stable Rust 1.51 and up with its default set of features. //! It *might* compile on older versions but that may change in any new patch release. #![cfg_attr(not(test), no_std)] #![deny(missing_docs)] #![deny(rust_2018_compatibility)] #![deny(rust_2018_idioms)] #![deny(warnings)] #![deny(const_err)] pub use binary_heap::BinaryHeap; pub use deque::Deque; pub use histbuf::{HistoryBuffer, OldestOrdered}; pub use indexmap::{Bucket, Entry, FnvIndexMap, IndexMap, OccupiedEntry, Pos, VacantEntry}; pub use indexset::{FnvIndexSet, IndexSet}; pub use linear_map::LinearMap; #[cfg(all(has_cas, feature = "cas"))] pub use pool::singleton::arc::Arc; pub use string::String; pub use vec::Vec; #[macro_use] #[cfg(test)] mod test_helpers; mod deque; mod histbuf; mod indexmap; mod indexset; mod linear_map; mod string; mod vec; #[cfg(feature = "serde")] mod de; #[cfg(feature = "serde")] mod ser; pub mod binary_heap; #[cfg(feature = "defmt-impl")] mod defmt; #[cfg(all(has_cas, feature = "cas"))] pub mod mpmc; #[cfg(all(has_cas, feature = "cas"))] pub mod pool; pub mod sorted_linked_list; #[cfg(has_atomics)] pub mod spsc; #[cfg(feature = "ufmt-impl")] mod ufmt; mod sealed; heapless-0.7.16/src/linear_map.rs000064400000000000000000000314670072674642500150320ustar 00000000000000use crate::Vec; use core::{borrow::Borrow, fmt, iter::FromIterator, mem, ops, slice}; /// A fixed capacity map / dictionary that performs lookups via linear search /// /// Note that as this map doesn't use hashing so most operations are **O(N)** instead of O(1) pub struct LinearMap { pub(crate) buffer: Vec<(K, V), N>, } impl LinearMap { /// Creates an empty `LinearMap` /// /// # Examples /// /// ``` /// use heapless::LinearMap; /// /// // allocate the map on the stack /// let mut map: LinearMap<&str, isize, 8> = LinearMap::new(); /// /// // allocate the map in a static variable /// static mut MAP: LinearMap<&str, isize, 8> = LinearMap::new(); /// ``` pub const fn new() -> Self { Self { buffer: Vec::new() } } } impl LinearMap where K: Eq, { /// Returns the number of elements that the map can hold /// /// Computes in **O(1)** time /// /// # Examples /// /// ``` /// use heapless::LinearMap; /// /// let map: LinearMap<&str, isize, 8> = LinearMap::new(); /// assert_eq!(map.capacity(), 8); /// ``` pub fn capacity(&self) -> usize { N } /// Clears the map, removing all key-value pairs /// /// Computes in **O(1)** time /// /// # Examples /// /// ``` /// use heapless::LinearMap; /// /// let mut map: LinearMap<_, _, 8> = LinearMap::new(); /// map.insert(1, "a").unwrap(); /// map.clear(); /// assert!(map.is_empty()); /// ``` pub fn clear(&mut self) { self.buffer.clear() } /// Returns true if the map contains a value for the specified key. /// /// Computes in **O(N)** time /// /// # Examples /// /// ``` /// use heapless::LinearMap; /// /// let mut map: LinearMap<_, _, 8> = LinearMap::new(); /// map.insert(1, "a").unwrap(); /// assert_eq!(map.contains_key(&1), true); /// assert_eq!(map.contains_key(&2), false); /// ``` pub fn contains_key(&self, key: &K) -> bool { self.get(key).is_some() } /// Returns a reference to the value corresponding to the key /// /// Computes in **O(N)** time /// /// # Examples /// /// ``` /// use heapless::LinearMap; /// /// let mut map: LinearMap<_, _, 8> = LinearMap::new(); /// map.insert(1, "a").unwrap(); /// assert_eq!(map.get(&1), Some(&"a")); /// assert_eq!(map.get(&2), None); /// ``` pub fn get(&self, key: &Q) -> Option<&V> where K: Borrow, Q: Eq + ?Sized, { self.iter() .find(|&(k, _)| k.borrow() == key) .map(|(_, v)| v) } /// Returns a mutable reference to the value corresponding to the key /// /// Computes in **O(N)** time /// /// # Examples /// /// ``` /// use heapless::LinearMap; /// /// let mut map: LinearMap<_, _, 8> = LinearMap::new(); /// map.insert(1, "a").unwrap(); /// if let Some(x) = map.get_mut(&1) { /// *x = "b"; /// } /// assert_eq!(map[&1], "b"); /// ``` pub fn get_mut(&mut self, key: &Q) -> Option<&mut V> where K: Borrow, Q: Eq + ?Sized, { self.iter_mut() .find(|&(k, _)| k.borrow() == key) .map(|(_, v)| v) } /// Returns the number of elements in this map /// /// Computes in **O(1)** time /// /// # Examples /// /// ``` /// use heapless::LinearMap; /// /// let mut a: LinearMap<_, _, 8> = LinearMap::new(); /// assert_eq!(a.len(), 0); /// a.insert(1, "a").unwrap(); /// assert_eq!(a.len(), 1); /// ``` pub fn len(&self) -> usize { self.buffer.len() } /// Inserts a key-value pair into the map. /// /// If the map did not have this key present, `None` is returned. /// /// If the map did have this key present, the value is updated, and the old value is returned. /// /// Computes in **O(N)** time /// /// # Examples /// /// ``` /// use heapless::LinearMap; /// /// let mut map: LinearMap<_, _, 8> = LinearMap::new(); /// assert_eq!(map.insert(37, "a").unwrap(), None); /// assert_eq!(map.is_empty(), false); /// /// map.insert(37, "b").unwrap(); /// assert_eq!(map.insert(37, "c").unwrap(), Some("b")); /// assert_eq!(map[&37], "c"); /// ``` pub fn insert(&mut self, key: K, mut value: V) -> Result, (K, V)> { if let Some((_, v)) = self.iter_mut().find(|&(k, _)| *k == key) { mem::swap(v, &mut value); return Ok(Some(value)); } self.buffer.push((key, value))?; Ok(None) } /// Returns true if the map contains no elements /// /// Computes in **O(1)** time /// /// # Examples /// /// ``` /// use heapless::LinearMap; /// /// let mut a: LinearMap<_, _, 8> = LinearMap::new(); /// assert!(a.is_empty()); /// a.insert(1, "a").unwrap(); /// assert!(!a.is_empty()); /// ``` pub fn is_empty(&self) -> bool { self.len() == 0 } /// An iterator visiting all key-value pairs in arbitrary order. /// /// # Examples /// /// ``` /// use heapless::LinearMap; /// /// let mut map: LinearMap<_, _, 8> = LinearMap::new(); /// map.insert("a", 1).unwrap(); /// map.insert("b", 2).unwrap(); /// map.insert("c", 3).unwrap(); /// /// for (key, val) in map.iter() { /// println!("key: {} val: {}", key, val); /// } /// ``` pub fn iter(&self) -> Iter<'_, K, V> { Iter { iter: self.buffer.as_slice().iter(), } } /// An iterator visiting all key-value pairs in arbitrary order, with mutable references to the /// values /// /// # Examples /// /// ``` /// use heapless::LinearMap; /// /// let mut map: LinearMap<_, _, 8> = LinearMap::new(); /// map.insert("a", 1).unwrap(); /// map.insert("b", 2).unwrap(); /// map.insert("c", 3).unwrap(); /// /// // Update all values /// for (_, val) in map.iter_mut() { /// *val = 2; /// } /// /// for (key, val) in &map { /// println!("key: {} val: {}", key, val); /// } /// ``` pub fn iter_mut(&mut self) -> IterMut<'_, K, V> { IterMut { iter: self.buffer.as_mut_slice().iter_mut(), } } /// An iterator visiting all keys in arbitrary order /// /// # Examples /// /// ``` /// use heapless::LinearMap; /// /// let mut map: LinearMap<_, _, 8> = LinearMap::new(); /// map.insert("a", 1).unwrap(); /// map.insert("b", 2).unwrap(); /// map.insert("c", 3).unwrap(); /// /// for key in map.keys() { /// println!("{}", key); /// } /// ``` pub fn keys(&self) -> impl Iterator { self.iter().map(|(k, _)| k) } /// Removes a key from the map, returning the value at the key if the key was previously in the /// map /// /// Computes in **O(N)** time /// /// # Examples /// /// ``` /// use heapless::LinearMap; /// /// let mut map: LinearMap<_, _, 8> = LinearMap::new(); /// map.insert(1, "a").unwrap(); /// assert_eq!(map.remove(&1), Some("a")); /// assert_eq!(map.remove(&1), None); /// ``` pub fn remove(&mut self, key: &Q) -> Option where K: Borrow, Q: Eq + ?Sized, { let idx = self .keys() .enumerate() .find(|&(_, k)| k.borrow() == key) .map(|(idx, _)| idx); idx.map(|idx| self.buffer.swap_remove(idx).1) } /// An iterator visiting all values in arbitrary order /// /// # Examples /// /// ``` /// use heapless::LinearMap; /// /// let mut map: LinearMap<_, _, 8> = LinearMap::new(); /// map.insert("a", 1).unwrap(); /// map.insert("b", 2).unwrap(); /// map.insert("c", 3).unwrap(); /// /// for val in map.values() { /// println!("{}", val); /// } /// ``` pub fn values(&self) -> impl Iterator { self.iter().map(|(_, v)| v) } /// An iterator visiting all values mutably in arbitrary order /// /// # Examples /// /// ``` /// use heapless::LinearMap; /// /// let mut map: LinearMap<_, _, 8> = LinearMap::new(); /// map.insert("a", 1).unwrap(); /// map.insert("b", 2).unwrap(); /// map.insert("c", 3).unwrap(); /// /// for val in map.values_mut() { /// *val += 10; /// } /// /// for val in map.values() { /// println!("{}", val); /// } /// ``` pub fn values_mut(&mut self) -> impl Iterator { self.iter_mut().map(|(_, v)| v) } } impl<'a, K, V, Q, const N: usize> ops::Index<&'a Q> for LinearMap where K: Borrow + Eq, Q: Eq + ?Sized, { type Output = V; fn index(&self, key: &Q) -> &V { self.get(key).expect("no entry found for key") } } impl<'a, K, V, Q, const N: usize> ops::IndexMut<&'a Q> for LinearMap where K: Borrow + Eq, Q: Eq + ?Sized, { fn index_mut(&mut self, key: &Q) -> &mut V { self.get_mut(key).expect("no entry found for key") } } impl Default for LinearMap where K: Eq, { fn default() -> Self { Self::new() } } impl Clone for LinearMap where K: Eq + Clone, V: Clone, { fn clone(&self) -> Self { Self { buffer: self.buffer.clone(), } } } impl fmt::Debug for LinearMap where K: Eq + fmt::Debug, V: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_map().entries(self.iter()).finish() } } impl FromIterator<(K, V)> for LinearMap where K: Eq, { fn from_iter(iter: I) -> Self where I: IntoIterator, { let mut out = Self::new(); out.buffer.extend(iter); out } } pub struct IntoIter where K: Eq, { inner: as IntoIterator>::IntoIter, } impl Iterator for IntoIter where K: Eq, { type Item = (K, V); fn next(&mut self) -> Option { self.inner.next() } } impl<'a, K, V, const N: usize> IntoIterator for &'a LinearMap where K: Eq, { type Item = (&'a K, &'a V); type IntoIter = Iter<'a, K, V>; fn into_iter(self) -> Self::IntoIter { self.iter() } } pub struct Iter<'a, K, V> { iter: slice::Iter<'a, (K, V)>, } impl<'a, K, V> Iterator for Iter<'a, K, V> { type Item = (&'a K, &'a V); fn next(&mut self) -> Option { self.iter.next().map(|&(ref k, ref v)| (k, v)) } } impl<'a, K, V> Clone for Iter<'a, K, V> { fn clone(&self) -> Self { Self { iter: self.iter.clone(), } } } impl Drop for LinearMap { fn drop(&mut self) { // heapless::Vec implements drop right? drop(&self.buffer); // original code below // unsafe { ptr::drop_in_place(self.buffer.as_mut_slice()) } } } pub struct IterMut<'a, K, V> { iter: slice::IterMut<'a, (K, V)>, } impl<'a, K, V> Iterator for IterMut<'a, K, V> { type Item = (&'a K, &'a mut V); fn next(&mut self) -> Option { self.iter.next().map(|&mut (ref k, ref mut v)| (k, v)) } } impl PartialEq> for LinearMap where K: Eq, V: PartialEq, { fn eq(&self, other: &LinearMap) -> bool { self.len() == other.len() && self .iter() .all(|(key, value)| other.get(key).map_or(false, |v| *value == *v)) } } impl Eq for LinearMap where K: Eq, V: PartialEq, { } #[cfg(test)] mod test { use crate::LinearMap; #[test] fn static_new() { static mut _L: LinearMap = LinearMap::new(); } #[test] fn partial_eq() { { let mut a = LinearMap::<_, _, 1>::new(); a.insert("k1", "v1").unwrap(); let mut b = LinearMap::<_, _, 2>::new(); b.insert("k1", "v1").unwrap(); assert!(a == b); b.insert("k2", "v2").unwrap(); assert!(a != b); } { let mut a = LinearMap::<_, _, 2>::new(); a.insert("k1", "v1").unwrap(); a.insert("k2", "v2").unwrap(); let mut b = LinearMap::<_, _, 2>::new(); b.insert("k2", "v2").unwrap(); b.insert("k1", "v1").unwrap(); assert!(a == b); } } // TODO: drop test } heapless-0.7.16/src/mpmc.rs000064400000000000000000000233300072674642500136450ustar 00000000000000//! A fixed capacity Multiple-Producer Multiple-Consumer (MPMC) lock-free queue //! //! NOTE: This module is not available on targets that do *not* support CAS operations and are not //! emulated by the [`atomic_polyfill`](https://crates.io/crates/atomic-polyfill) crate (e.g., //! MSP430). //! //! # Example //! //! This queue can be constructed in "const context". Placing it in a `static` variable lets *all* //! contexts (interrupts / threads / `main`) safely enqueue and dequeue items from it. //! //! ``` ignore //! #![no_main] //! #![no_std] //! //! use panic_semihosting as _; //! //! use cortex_m::{asm, peripheral::syst::SystClkSource}; //! use cortex_m_rt::{entry, exception}; //! use cortex_m_semihosting::hprintln; //! use heapless::mpmc::Q2; //! //! static Q: Q2 = Q2::new(); //! //! #[entry] //! fn main() -> ! { //! if let Some(p) = cortex_m::Peripherals::take() { //! let mut syst = p.SYST; //! //! // configures the system timer to trigger a SysTick exception every second //! syst.set_clock_source(SystClkSource::Core); //! syst.set_reload(12_000_000); //! syst.enable_counter(); //! syst.enable_interrupt(); //! } //! //! loop { //! if let Some(x) = Q.dequeue() { //! hprintln!("{}", x).ok(); //! } else { //! asm::wfi(); //! } //! } //! } //! //! #[exception] //! fn SysTick() { //! static mut COUNT: u8 = 0; //! //! Q.enqueue(*COUNT).ok(); //! *COUNT += 1; //! } //! ``` //! //! # Benchmark //! //! Measured on a ARM Cortex-M3 core running at 8 MHz and with zero Flash wait cycles //! //! N| `Q8::::enqueue().ok()` (`z`) | `Q8::::dequeue()` (`z`) | //! -|----------------------------------|-----------------------------| //! 0|34 |35 | //! 1|52 |53 | //! 2|69 |71 | //! //! - `N` denotes the number of *interruptions*. On Cortex-M, an interruption consists of an //! interrupt handler preempting the would-be atomic section of the `enqueue` / `dequeue` //! operation. Note that it does *not* matter if the higher priority handler uses the queue or //! not. //! - All execution times are in clock cycles. 1 clock cycle = 125 ns. //! - Execution time is *dependent* of `mem::size_of::()`. Both operations include one //! `memcpy(T)` in their successful path. //! - The optimization level is indicated in parentheses. //! - The numbers reported correspond to the successful path (i.e. `Some` is returned by `dequeue` //! and `Ok` is returned by `enqueue`). //! //! # Portability //! //! This module requires CAS atomic instructions which are not available on all architectures //! (e.g. ARMv6-M (`thumbv6m-none-eabi`) and MSP430 (`msp430-none-elf`)). These atomics can be //! emulated however with [`atomic_polyfill`](https://crates.io/crates/atomic-polyfill), which is //! enabled with the `cas` feature and is enabled by default for `thumbv6m-none-eabi` and `riscv32` //! targets. MSP430 is currently not supported by //! [`atomic_polyfill`](https://crates.io/crates/atomic-polyfill). //! //! # References //! //! This is an implementation of Dmitry Vyukov's ["Bounded MPMC queue"][0] minus the cache padding. //! //! [0]: http://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue use core::{cell::UnsafeCell, mem::MaybeUninit}; #[cfg(all(feature = "mpmc_large", not(cas_atomic_polyfill)))] type AtomicTargetSize = core::sync::atomic::AtomicUsize; #[cfg(all(feature = "mpmc_large", cas_atomic_polyfill))] type AtomicTargetSize = atomic_polyfill::AtomicUsize; #[cfg(all(not(feature = "mpmc_large"), not(cas_atomic_polyfill)))] type AtomicTargetSize = core::sync::atomic::AtomicU8; #[cfg(all(not(feature = "mpmc_large"), cas_atomic_polyfill))] type AtomicTargetSize = atomic_polyfill::AtomicU8; #[cfg(not(cas_atomic_polyfill))] type Ordering = core::sync::atomic::Ordering; #[cfg(cas_atomic_polyfill)] type Ordering = atomic_polyfill::Ordering; #[cfg(feature = "mpmc_large")] type IntSize = usize; #[cfg(not(feature = "mpmc_large"))] type IntSize = u8; /// MPMC queue with a capability for 2 elements. pub type Q2 = MpMcQueue; /// MPMC queue with a capability for 4 elements. pub type Q4 = MpMcQueue; /// MPMC queue with a capability for 8 elements. pub type Q8 = MpMcQueue; /// MPMC queue with a capability for 16 elements. pub type Q16 = MpMcQueue; /// MPMC queue with a capability for 32 elements. pub type Q32 = MpMcQueue; /// MPMC queue with a capability for 64 elements. pub type Q64 = MpMcQueue; /// MPMC queue with a capacity for N elements /// N must be a power of 2 /// The max value of N is u8::MAX - 1 if `mpmc_large` feature is not enabled. pub struct MpMcQueue { buffer: UnsafeCell<[Cell; N]>, dequeue_pos: AtomicTargetSize, enqueue_pos: AtomicTargetSize, } impl MpMcQueue { const MASK: IntSize = (N - 1) as IntSize; const EMPTY_CELL: Cell = Cell::new(0); const ASSERT: [(); 1] = [()]; /// Creates an empty queue pub const fn new() -> Self { // Const assert crate::sealed::greater_than_1::(); crate::sealed::power_of_two::(); // Const assert on size. Self::ASSERT[!(N < (IntSize::MAX as usize)) as usize]; let mut cell_count = 0; let mut result_cells: [Cell; N] = [Self::EMPTY_CELL; N]; while cell_count != N { result_cells[cell_count] = Cell::new(cell_count); cell_count += 1; } Self { buffer: UnsafeCell::new(result_cells), dequeue_pos: AtomicTargetSize::new(0), enqueue_pos: AtomicTargetSize::new(0), } } /// Returns the item in the front of the queue, or `None` if the queue is empty pub fn dequeue(&self) -> Option { unsafe { dequeue(self.buffer.get() as *mut _, &self.dequeue_pos, Self::MASK) } } /// Adds an `item` to the end of the queue /// /// Returns back the `item` if the queue is full pub fn enqueue(&self, item: T) -> Result<(), T> { unsafe { enqueue( self.buffer.get() as *mut _, &self.enqueue_pos, Self::MASK, item, ) } } } impl Default for MpMcQueue { fn default() -> Self { Self::new() } } unsafe impl Sync for MpMcQueue where T: Send {} struct Cell { data: MaybeUninit, sequence: AtomicTargetSize, } impl Cell { const fn new(seq: usize) -> Self { Self { data: MaybeUninit::uninit(), sequence: AtomicTargetSize::new(seq as IntSize), } } } unsafe fn dequeue( buffer: *mut Cell, dequeue_pos: &AtomicTargetSize, mask: IntSize, ) -> Option { let mut pos = dequeue_pos.load(Ordering::Relaxed); let mut cell; loop { cell = buffer.add(usize::from(pos & mask)); let seq = (*cell).sequence.load(Ordering::Acquire); let dif = (seq as i8).wrapping_sub((pos.wrapping_add(1)) as i8); if dif == 0 { if dequeue_pos .compare_exchange_weak( pos, pos.wrapping_add(1), Ordering::Relaxed, Ordering::Relaxed, ) .is_ok() { break; } } else if dif < 0 { return None; } else { pos = dequeue_pos.load(Ordering::Relaxed); } } let data = (*cell).data.as_ptr().read(); (*cell) .sequence .store(pos.wrapping_add(mask).wrapping_add(1), Ordering::Release); Some(data) } unsafe fn enqueue( buffer: *mut Cell, enqueue_pos: &AtomicTargetSize, mask: IntSize, item: T, ) -> Result<(), T> { let mut pos = enqueue_pos.load(Ordering::Relaxed); let mut cell; loop { cell = buffer.add(usize::from(pos & mask)); let seq = (*cell).sequence.load(Ordering::Acquire); let dif = (seq as i8).wrapping_sub(pos as i8); if dif == 0 { if enqueue_pos .compare_exchange_weak( pos, pos.wrapping_add(1), Ordering::Relaxed, Ordering::Relaxed, ) .is_ok() { break; } } else if dif < 0 { return Err(item); } else { pos = enqueue_pos.load(Ordering::Relaxed); } } (*cell).data.as_mut_ptr().write(item); (*cell) .sequence .store(pos.wrapping_add(1), Ordering::Release); Ok(()) } #[cfg(test)] mod tests { use super::Q2; #[test] fn sanity() { let q = Q2::new(); q.enqueue(0).unwrap(); q.enqueue(1).unwrap(); assert!(q.enqueue(2).is_err()); assert_eq!(q.dequeue(), Some(0)); assert_eq!(q.dequeue(), Some(1)); assert_eq!(q.dequeue(), None); } #[test] fn drain_at_pos255() { let q = Q2::new(); for _ in 0..255 { assert!(q.enqueue(0).is_ok()); assert_eq!(q.dequeue(), Some(0)); } // this should not block forever assert_eq!(q.dequeue(), None); } #[test] fn full_at_wrapped_pos0() { let q = Q2::new(); for _ in 0..254 { assert!(q.enqueue(0).is_ok()); assert_eq!(q.dequeue(), Some(0)); } assert!(q.enqueue(0).is_ok()); assert!(q.enqueue(0).is_ok()); // this should not block forever assert!(q.enqueue(0).is_err()); } } heapless-0.7.16/src/pool/cas.rs000064400000000000000000000147750072674642500144450ustar 00000000000000//! Stack based on CAS atomics //! //! To reduce the chance of hitting the ABA problem we use a 32-bit offset + a 32-bit version tag //! instead of a 64-bit pointer. The version tag will be bumped on each successful `pop` operation. use core::{ cell::UnsafeCell, marker::PhantomData, num::{NonZeroU32, NonZeroU64}, ptr::NonNull, sync::atomic::{AtomicU64, Ordering}, }; /// Unfortunate implementation detail required to use the /// [`Pool.grow_exact`](struct.Pool.html#method.grow_exact) method pub struct Node { next: Atomic>, pub(crate) data: UnsafeCell, } impl Node { fn next(&self) -> &Atomic> { &self.next } } pub struct Stack { head: Atomic>, } impl Stack { pub const fn new() -> Self { Self { head: Atomic::null(), } } pub fn push(&self, new_head: Ptr>) { let mut head = self.head.load(Ordering::Relaxed); loop { unsafe { new_head .as_raw() .as_ref() .next() .store(head, Ordering::Relaxed); } if let Err(p) = self.head.compare_and_exchange_weak( head, Some(new_head), Ordering::Release, Ordering::Relaxed, ) { head = p; } else { return; } } } pub fn try_pop(&self) -> Option>> { loop { if let Some(mut head) = self.head.load(Ordering::Acquire) { let next = unsafe { head.as_raw().as_ref().next().load(Ordering::Relaxed) }; if self .head .compare_and_exchange_weak( Some(head), next, Ordering::Release, Ordering::Relaxed, ) .is_ok() { head.incr_tag(); return Some(head); } } else { // stack observed empty return None; } } } } #[cfg(target_arch = "x86_64")] fn anchor(init: Option<*mut T>) -> *mut T { use core::sync::atomic::AtomicU8; use spin::Once; static LAZY_ANCHOR: Once = Once::new(); let likely_unaligned_address = if let Some(init) = init { *LAZY_ANCHOR.call_once(|| init as usize) } else { LAZY_ANCHOR.get().copied().unwrap_or_else(|| { // we may hit this branch with Pool of ZSTs where `grow` does not need to be called static BSS_ANCHOR: AtomicU8 = AtomicU8::new(0); &BSS_ANCHOR as *const _ as usize }) }; let alignment_mask = !(core::mem::align_of::() - 1); let well_aligned_address = likely_unaligned_address & alignment_mask; well_aligned_address as *mut T } /// On x86_64, anchored pointer. This is a (signed) 32-bit offset from `anchor` plus a 32-bit tag /// On x86, this is a pointer plus a 32-bit tag pub struct Ptr { inner: NonZeroU64, _marker: PhantomData<*mut T>, } impl Clone for Ptr { fn clone(&self) -> Self { *self } } impl Copy for Ptr {} fn initial_tag_value() -> NonZeroU32 { NonZeroU32::new(1).unwrap() } impl Ptr { #[cfg(target_arch = "x86_64")] pub fn new(p: *mut T) -> Option { use core::convert::TryFrom; i32::try_from((p as isize).wrapping_sub(anchor::(Some(p)) as isize)) .ok() .map(|offset| unsafe { Ptr::from_parts(initial_tag_value(), offset) }) } #[cfg(target_arch = "x86")] pub fn new(p: *mut T) -> Option { Some(unsafe { Ptr::from_parts(initial_tag_value(), p as i32) }) } unsafe fn from_parts(tag: NonZeroU32, offset: i32) -> Self { Self { inner: NonZeroU64::new_unchecked((tag.get() as u64) << 32 | (offset as u32 as u64)), _marker: PhantomData, } } fn from_u64(p: u64) -> Option { NonZeroU64::new(p).map(|inner| Self { inner, _marker: PhantomData, }) } fn into_u64(&self) -> u64 { self.inner.get() } fn tag(&self) -> NonZeroU32 { let tag = (self.inner.get() >> 32) as u32; debug_assert_ne!(0, tag, "broken non-zero invariant"); unsafe { NonZeroU32::new_unchecked(tag) } } fn incr_tag(&mut self) { let maybe_zero_tag = self.tag().get().wrapping_add(1); let tag = NonZeroU32::new(maybe_zero_tag).unwrap_or(initial_tag_value()); let offset = self.offset(); *self = unsafe { Ptr::from_parts(tag, offset) }; } fn offset(&self) -> i32 { self.inner.get() as i32 } #[cfg(target_arch = "x86_64")] fn as_raw(&self) -> NonNull { unsafe { NonNull::new_unchecked( (anchor::(None) as isize).wrapping_add(self.offset() as isize) as *mut T, ) } } #[cfg(target_arch = "x86")] fn as_raw(&self) -> NonNull { unsafe { NonNull::new_unchecked(self.offset() as *mut T) } } pub fn dangling() -> Self { // `anchor()` returns a well-aligned pointer so an offset of 0 will also produce a well-aligned pointer unsafe { Self::from_parts(initial_tag_value(), 0) } } pub unsafe fn as_ref(&self) -> &T { &*self.as_raw().as_ptr() } } struct Atomic { inner: AtomicU64, _marker: PhantomData<*mut T>, } impl Atomic { const fn null() -> Self { Self { inner: AtomicU64::new(0), _marker: PhantomData, } } fn compare_and_exchange_weak( &self, current: Option>, new: Option>, succ: Ordering, fail: Ordering, ) -> Result<(), Option>> { self.inner .compare_exchange_weak( current.map(|p| p.into_u64()).unwrap_or(0), new.map(|p| p.into_u64()).unwrap_or(0), succ, fail, ) .map(drop) .map_err(Ptr::from_u64) } fn load(&self, ord: Ordering) -> Option> { NonZeroU64::new(self.inner.load(ord)).map(|inner| Ptr { inner, _marker: PhantomData, }) } fn store(&self, val: Option>, ord: Ordering) { self.inner .store(val.map(|p| p.into_u64()).unwrap_or(0), ord) } } heapless-0.7.16/src/pool/llsc.rs000064400000000000000000000044370072674642500146260ustar 00000000000000//! Stack based on LL/SC atomics pub use core::ptr::NonNull as Ptr; use core::{cell::UnsafeCell, ptr}; #[cfg(cas_atomic_polyfill)] use atomic_polyfill::{AtomicPtr, Ordering}; #[cfg(not(cas_atomic_polyfill))] use core::sync::atomic::{AtomicPtr, Ordering}; /// Unfortunate implementation detail required to use the /// [`Pool.grow_exact`](struct.Pool.html#method.grow_exact) method pub struct Node { next: AtomicPtr>, pub(crate) data: UnsafeCell, } impl Node { fn next(&self) -> &AtomicPtr> { &self.next } } pub struct Stack { head: AtomicPtr>, } impl Stack { pub const fn new() -> Self { Self { head: AtomicPtr::new(ptr::null_mut()), } } pub fn push(&self, new_head: Ptr>) { // NOTE `Ordering`s come from crossbeam's (v0.6.0) `TreiberStack` let mut head = self.head.load(Ordering::Relaxed); loop { unsafe { new_head.as_ref().next().store(head, Ordering::Relaxed) } match self.head.compare_exchange_weak( head, new_head.as_ptr(), Ordering::Release, // success Ordering::Relaxed, // failure ) { Ok(_) => return, // interrupt occurred or other core made a successful STREX op on the head Err(p) => head = p, } } } pub fn try_pop(&self) -> Option>> { // NOTE `Ordering`s come from crossbeam's (v0.6.0) `TreiberStack` loop { let head = self.head.load(Ordering::Acquire); if let Some(nn_head) = Ptr::new(head) { let next = unsafe { nn_head.as_ref().next().load(Ordering::Relaxed) }; match self.head.compare_exchange_weak( head, next, Ordering::Release, // success Ordering::Relaxed, // failure ) { Ok(_) => break Some(nn_head), // interrupt occurred or other core made a successful STREX op on the head Err(_) => continue, } } else { // stack is observed as empty break None; } } } } heapless-0.7.16/src/pool/mod.rs000064400000000000000000000553210072674642500144460ustar 00000000000000//! A heap-less, interrupt-safe, lock-free memory pool (\*) //! //! NOTE: This module is not available on targets that do *not* support CAS operations and are not //! emulated by the [`atomic_polyfill`](https://crates.io/crates/atomic-polyfill) crate (e.g., //! MSP430). //! //! (\*) Currently, the implementation is only lock-free *and* `Sync` on ARMv6, ARMv7-{A,R,M} & ARMv8-M //! devices //! //! # Examples //! //! The most common way of using this pool is as a global singleton; the singleton mode gives you //! automatic deallocation of memory blocks on `drop`. //! //! ``` ignore //! #![no_main] //! #![no_std] //! //! use cortex_m_rt::{entry, exception}; //! use heapless::{ //! pool, //! pool::singleton::{Box, Pool}, //! }; //! //! // instantiate a memory pool of `[u8; 128]` blocks as a global singleton //! pool!( //! // attributes can be used here //! // #[link_section = ".ccram.A"] //! A: [u8; 128] //! ); //! //! #[entry] //! fn main() -> ! { //! static mut MEMORY: [u8; 1024] = [0; 1024]; //! //! // increase the capacity of the pool by ~8 blocks //! A::grow(MEMORY); //! //! // claim a block of memory //! // note that the type is `Box`, and not `Box<[u8; 128]>` //! // `A` is the "name" of the pool //! let x: Box = A::alloc().unwrap(); //! loop { //! // .. do stuff with `x` .. //! } //! } //! //! #[exception] //! fn SysTick() { //! // claim a block of memory //! let y = A::alloc().unwrap(); //! //! // .. do stuff with `y` .. //! //! // return the memory block to the pool //! drop(y); //! } //! ``` //! //! # Portability //! //! This pool internally uses a Treiber stack which is known to be susceptible to the ABA problem. //! The only counter measure against the ABA problem that this implementation currently takes is //! relying on LL/SC (Link-local / Store-conditional) instructions being used to implement CAS loops //! on the target architecture (see section on ['Soundness'](#soundness) for more information). For //! this reason, `Pool` only implements `Sync` when compiling for some ARM cores. //! //! This module requires CAS atomic instructions which are not available on all architectures (e.g. //! ARMv6-M (`thumbv6m-none-eabi`) and MSP430 (`msp430-none-elf`)). These atomics can be emulated //! however with [`atomic_polyfill`](https://crates.io/crates/atomic-polyfill), which is enabled //! with the `cas` feature and is enabled by default for `thumbv6m-none-eabi` and `riscv32` targets. //! MSP430 is currently not supported by //! [`atomic_polyfill`](https://crates.io/crates/atomic-polyfill). //! //! # Soundness //! //! This pool uses a Treiber stack to keep a list of free memory blocks (nodes). Each of these //! nodes has a pointer to the next node. To claim a memory block we simply pop a node from the //! top of the stack and use it as a memory block. The pop operation consists of swapping the //! current head (top) node with the node below it. The Rust code for the `pop` operation is shown //! below: //! //! ``` ignore //! fn pop(&self) -> Option>> { //! let fetch_order = ..; //! let set_order = ..; //! //! // `self.head` has type `AtomicPtr>` //! // where `struct Node { next: AtomicPtr>, data: UnsafeCell }` //! let mut head = self.head.load(fetch_order); //! loop { //! if let Some(nn_head) = NonNull::new(head) { //! let next = unsafe { (*head).next.load(Ordering::Relaxed) }; //! //! // <~ preempted //! //! match self //! .head //! .compare_exchange_weak(head, next, set_order, fetch_order) //! { //! Ok(_) => break Some(nn_head), //! // head was changed by some interrupt handler / thread //! Err(new_head) => head = new_head, //! } //! } else { //! // stack is observed as empty //! break None; //! } //! } //! } //! ``` //! //! In general, the `pop` operation is susceptible to the ABA problem. If this operation gets //! preempted by some interrupt handler somewhere between the `head.load` and the //! `compare_and_exchange_weak`, and that handler modifies the stack in such a way that the head //! (top) of the stack remains unchanged then resuming the `pop` operation will corrupt the stack. //! //! An example: imagine we are doing on `pop` on stack that contains these nodes: `A -> B -> C`, //! `A` is the head (top), `B` is next to `A` and `C` is next to `B`. The `pop` operation will do a //! `CAS(&self.head, A, B)` operation to atomically change the head to `B` iff it currently is `A`. //! Now, let's say a handler preempts the `pop` operation before the `CAS` operation starts and it //! `pop`s the stack twice and then `push`es back the `A` node; now the state of the stack is `A -> //! C`. When the original `pop` operation is resumed it will succeed in doing the `CAS` operation //! setting `B` as the head of the stack. However, `B` was used by the handler as a memory block and //! no longer is a valid free node. As a result the stack, and thus the allocator, is in a invalid //! state. //! //! However, not all is lost because ARM devices use LL/SC (Link-local / Store-conditional) //! operations to implement CAS loops. Let's look at the actual disassembly of `pop` for the ARM //! Cortex-M. //! //! ``` text //! 08000130 <>::pop>: //! 8000130: 6802 ldr r2, [r0, #0] //! 8000132: e00c b.n 800014e <>::pop+0x1e> //! 8000134: 4611 mov r1, r2 //! 8000136: f8d2 c000 ldr.w ip, [r2] //! 800013a: e850 2f00 ldrex r2, [r0] //! 800013e: 428a cmp r2, r1 //! 8000140: d103 bne.n 800014a <>::pop+0x1a> //! 8000142: e840 c300 strex r3, ip, [r0] //! 8000146: b913 cbnz r3, 800014e <>::pop+0x1e> //! 8000148: e004 b.n 8000154 <>::pop+0x24> //! 800014a: f3bf 8f2f clrex //! 800014e: 2a00 cmp r2, #0 //! 8000150: d1f0 bne.n 8000134 <>::pop+0x4> //! 8000152: 2100 movs r1, #0 //! 8000154: 4608 mov r0, r1 //! 8000156: 4770 bx lr //! ``` //! //! LDREX ("load exclusive") is the LL instruction, and STREX ("store exclusive") is the SC //! instruction (see [1](#references)). On the Cortex-M, STREX will always fail if the processor //! takes an exception between it and its corresponding LDREX operation (see [2](#references)). If //! STREX fails then the CAS loop is retried (see instruction @ `0x8000146`). On single core //! systems, preemption is required to run into the ABA problem and on Cortex-M devices preemption //! always involves taking an exception. Thus the underlying LL/SC operations prevent the ABA //! problem on Cortex-M. //! //! In the case of multi-core systems if any other core successfully does a STREX op on the head //! while the current core is somewhere between LDREX and STREX then the current core will fail its //! STREX operation. //! //! # x86_64 support / limitations //! //! *NOTE* `Pool` is only `Sync` on `x86_64` and `x86` (`i686`) if the Cargo feature "x86-sync-pool" //! is enabled //! //! x86_64 support is a gamble. Yes, a gamble. Do you feel lucky enough to use `Pool` on x86_64? //! //! As it's not possible to implement *ideal* LL/SC semantics (\*) on x86_64 the architecture is //! susceptible to the ABA problem described above. To *reduce the chances* of ABA occurring in //! practice we use version tags (keyword: IBM ABA-prevention tags). Again, this approach does //! *not* fix / prevent / avoid the ABA problem; it only reduces the chance of it occurring in //! practice but the chances of it occurring are not reduced to zero. //! //! How we have implemented version tags: instead of using an `AtomicPtr` to link the stack `Node`s //! we use an `AtomicUsize` where the 64-bit `usize` is always comprised of a monotonically //! increasing 32-bit tag (higher bits) and a 32-bit signed address offset. The address of a node is //! computed by adding the 32-bit offset to an "anchor" address (the address of a static variable //! that lives somewhere in the `.bss` linker section). The tag is increased every time a node is //! popped (removed) from the stack. //! //! To see how version tags can prevent ABA consider the example from the previous section. Let's //! start with a stack in this state: `(~A, 0) -> (~B, 1) -> (~C, 2)`, where `~A` represents the //! address of node A as a 32-bit offset from the "anchor" and the second tuple element (e.g. `0`) //! indicates the version of the node. For simplicity, assume a single core system: thread T1 is //! performing `pop` and before `CAS(&self.head, (~A, 0), (~B, 1))` is executed a context switch //! occurs and the core resumes T2. T2 pops the stack twice and pushes A back into the stack; //! because the `pop` operation increases the version the stack ends in the following state: `(~A, //! 1) -> (~C, 2)`. Now if T1 is resumed the CAS operation will fail because `self.head` is `(~A, //! 1)` and not `(~A, 0)`. //! //! When can version tags fail to prevent ABA? Using the previous example: if T2 performs a `push` //! followed by a `pop` `(1 << 32) - 1` times before doing its original `pop` - `pop` - `push` //! operation then ABA will occur because the version tag of node `A` will wraparound to its //! original value of `0` and the CAS operation in T1 will succeed and corrupt the stack. //! //! It does seem unlikely that (1) a thread will perform the above operation and (2) that the above //! operation will complete within one time slice, assuming time sliced threads. If you have thread //! priorities then the above operation could occur during the lifetime of many high priorities //! threads if T1 is running at low priority. //! //! Other implementations of version tags use more than 32 bits in their tags (e.g. "Scalable //! Lock-Free Dynamic Memory Allocation" uses 42-bit tags in its super blocks). In theory, one could //! use double-word CAS on x86_64 to pack a 64-bit tag and a 64-bit pointer in a double-word but //! this CAS operation is not exposed in the standard library (and I think it's not available on //! older x86_64 processors?) //! //! (\*) Apparently one can emulate proper LL/SC semantics on x86_64 using hazard pointers (?) -- //! the technique appears to be documented in "ABA Prevention Using Single-Word Instructions", which //! is not public AFAICT -- but hazard pointers require Thread Local Storage (TLS), which is a //! non-starter for a `no_std` library like `heapless`. //! //! ## x86_64 Limitations //! //! *NOTE* this limitation does not apply to `x86` (32-bit address space). If you run into this //! issue, on an x86_64 processor try running your code compiled for `x86`, e.g. `cargo run --target //! i686-unknown-linux-musl` //! //! Because stack nodes must be located within +- 2 GB of the hidden `ANCHOR` variable, which //! lives in the `.bss` section, `Pool` may not be able to manage static references created using //! `Box::leak` -- these heap allocated chunks of memory may live in a very different address space. //! When the `Pool` is unable to manage a node because of its address it will simply discard it: //! `Pool::grow*` methods return the number of new memory blocks added to the pool; if these methods //! return `0` it means the `Pool` is unable to manage the memory given to them. //! //! # References //! //! 1. [Cortex-M3 Devices Generic User Guide (DUI 0552A)][0], Section 2.2.7 "Synchronization //! primitives" //! //! [0]: http://infocenter.arm.com/help/topic/com.arm.doc.dui0552a/DUI0552A_cortex_m3_dgug.pdf //! //! 2. [ARMv7-M Architecture Reference Manual (DDI 0403E.b)][1], Section A3.4 "Synchronization and //! semaphores" //! //! [1]: https://static.docs.arm.com/ddi0403/eb/DDI0403E_B_armv7m_arm.pdf //! //! 3. "Scalable Lock-Free Dynamic Memory Allocation" Michael, Maged M. //! //! 4. "Hazard pointers: Safe memory reclamation for lock-free objects." Michael, Maged M. use core::{any::TypeId, mem}; use core::{ cmp, fmt, hash::{Hash, Hasher}, marker::PhantomData, mem::MaybeUninit, ops::{Deref, DerefMut}, ptr::{self, NonNull}, }; pub use stack::Node; use stack::{Ptr, Stack}; pub mod singleton; #[cfg_attr(any(target_arch = "x86_64", target_arch = "x86"), path = "cas.rs")] #[cfg_attr( not(any(target_arch = "x86_64", target_arch = "x86")), path = "llsc.rs" )] mod stack; /// A lock-free memory pool pub struct Pool { stack: Stack, // Current implementation is unsound on architectures that don't have LL/SC semantics so this // struct is not `Sync` on those platforms _not_send_or_sync: PhantomData<*const ()>, } // NOTE(any(test)) makes testing easier (no need to enable Cargo features for testing) #[cfg(any( armv6m, armv7a, armv7r, armv7m, armv8m_main, all( any(target_arch = "x86_64", target_arch = "x86"), feature = "x86-sync-pool" ), test ))] unsafe impl Sync for Pool {} unsafe impl Send for Pool {} impl Pool { /// Creates a new empty pool pub const fn new() -> Self { Pool { stack: Stack::new(), _not_send_or_sync: PhantomData, } } /// Claims a memory block from the pool /// /// Returns `None` when the pool is observed as exhausted /// /// *NOTE:* This method does *not* have bounded execution time because it contains a CAS loop pub fn alloc(&self) -> Option> { if mem::size_of::() == 0 { // NOTE because we return a dangling pointer to a NODE, which has non-zero size // even when T is a ZST, in this case we need to make sure we // - don't do pointer arithmetic on this pointer // - dereference that offset-ed pointer as a ZST // because miri doesn't like that return Some(Box { node: Ptr::dangling(), _state: PhantomData, }); } if let Some(node) = self.stack.try_pop() { Some(Box { node, _state: PhantomData, }) } else { None } } /// Returns a memory block to the pool /// /// *NOTE*: `T`'s destructor (if any) will run on `value` iff `S = Init` /// /// *NOTE:* This method does *not* have bounded execution time because it contains a CAS loop pub fn free(&self, value: Box) where S: 'static, { if TypeId::of::() == TypeId::of::() { let p = if mem::size_of::() == 0 { // any pointer will do to invoke the destructor of a ZST NonNull::dangling().as_ptr() } else { unsafe { value.node.as_ref().data.get() } }; unsafe { ptr::drop_in_place(p); } } // no operation if mem::size_of::() == 0 { return; } self.stack.push(value.node) } /// Increases the capacity of the pool /// /// This method might *not* fully utilize the given memory block due to alignment requirements. /// /// This method returns the number of *new* blocks that can be allocated. pub fn grow(&self, memory: &'static mut [u8]) -> usize { if mem::size_of::() == 0 { // ZST use no memory so a pool of ZST always has maximum capacity return usize::max_value(); } let sz = mem::size_of::>(); let mut p = memory.as_mut_ptr(); let mut len = memory.len(); let align = mem::align_of::>(); let rem = (p as usize) % align; if rem != 0 { let offset = align - rem; if offset >= len { // slice is too small return 0; } p = unsafe { p.add(offset) }; len -= offset; } let mut n = 0; while len >= sz { match () { #[cfg(any(target_arch = "x86_64", target_arch = "x86"))] () => { if let Some(p) = Ptr::new(p as *mut _) { self.stack.push(p); n += 1; } } #[cfg(not(any(target_arch = "x86_64", target_arch = "x86")))] () => { self.stack.push(unsafe { Ptr::new_unchecked(p as *mut _) }); n += 1; } } p = unsafe { p.add(sz) }; len -= sz; } n } /// Increases the capacity of the pool /// /// Unlike [`Pool.grow`](struct.Pool.html#method.grow) this method fully utilizes the given /// memory block pub fn grow_exact(&self, memory: &'static mut MaybeUninit) -> usize where A: AsMut<[Node]>, { if mem::size_of::() == 0 { return usize::max_value(); } let nodes = unsafe { (*memory.as_mut_ptr()).as_mut() }; let cap = nodes.len(); for p in nodes { match () { #[cfg(any(target_arch = "x86_64", target_arch = "x86"))] () => { if let Some(p) = Ptr::new(p) { self.stack.push(p); } } #[cfg(not(any(target_arch = "x86_64", target_arch = "x86")))] () => self.stack.push(core::ptr::NonNull::from(p)), } } cap } } /// A memory block pub struct Box { _state: PhantomData, node: Ptr>, } impl Box { /// Initializes this memory block pub fn init(self, val: T) -> Box { if mem::size_of::() == 0 { // no memory operation needed for ZST // BUT we want to avoid calling `val`s destructor mem::forget(val) } else { unsafe { ptr::write(self.node.as_ref().data.get(), val); } } Box { node: self.node, _state: PhantomData, } } } /// Uninitialized type state pub enum Uninit {} /// Initialized type state pub enum Init {} unsafe impl Send for Box where T: Send {} unsafe impl Sync for Box where T: Sync {} unsafe impl stable_deref_trait::StableDeref for Box {} impl AsRef<[T]> for Box where A: AsRef<[T]>, { fn as_ref(&self) -> &[T] { self.deref().as_ref() } } impl AsMut<[T]> for Box where A: AsMut<[T]>, { fn as_mut(&mut self) -> &mut [T] { self.deref_mut().as_mut() } } impl Deref for Box { type Target = T; fn deref(&self) -> &T { if mem::size_of::() == 0 { // any pointer will do for ZST unsafe { &*NonNull::dangling().as_ptr() } } else { unsafe { &*self.node.as_ref().data.get() } } } } impl DerefMut for Box { fn deref_mut(&mut self) -> &mut T { if mem::size_of::() == 0 { // any pointer will do for ZST unsafe { &mut *NonNull::dangling().as_ptr() } } else { unsafe { &mut *self.node.as_ref().data.get() } } } } impl fmt::Debug for Box where T: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { ::fmt(self, f) } } impl fmt::Display for Box where T: fmt::Display, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { ::fmt(self, f) } } impl PartialEq for Box where T: PartialEq, { fn eq(&self, rhs: &Box) -> bool { ::eq(self, rhs) } } impl Eq for Box where T: Eq {} impl PartialOrd for Box where T: PartialOrd, { fn partial_cmp(&self, rhs: &Box) -> Option { ::partial_cmp(self, rhs) } } impl Ord for Box where T: Ord, { fn cmp(&self, rhs: &Box) -> cmp::Ordering { ::cmp(self, rhs) } } impl Hash for Box where T: Hash, { fn hash(&self, state: &mut H) where H: Hasher, { ::hash(self, state) } } #[cfg(test)] mod tests { use core::{ mem::{self, MaybeUninit}, sync::atomic::{AtomicUsize, Ordering}, }; use super::{Node, Pool}; #[test] fn grow() { static mut MEMORY: [u8; 1024] = [0; 1024]; static POOL: Pool<[u8; 128]> = Pool::new(); unsafe { POOL.grow(&mut MEMORY); } for _ in 0..7 { assert!(POOL.alloc().is_some()); } } #[test] fn grow_exact() { const SZ: usize = 8; static mut MEMORY: MaybeUninit<[Node<[u8; 128]>; SZ]> = MaybeUninit::uninit(); static POOL: Pool<[u8; 128]> = Pool::new(); unsafe { POOL.grow_exact(&mut MEMORY); } for _ in 0..SZ { assert!(POOL.alloc().is_some()); } assert!(POOL.alloc().is_none()); } #[test] fn sanity() { const SZ: usize = 2 * mem::size_of::>() - 1; static mut MEMORY: [u8; SZ] = [0; SZ]; static POOL: Pool = Pool::new(); // empty pool assert!(POOL.alloc().is_none()); POOL.grow(unsafe { &mut MEMORY }); let x = POOL.alloc().unwrap().init(0); assert_eq!(*x, 0); // pool exhausted assert!(POOL.alloc().is_none()); POOL.free(x); // should be possible to allocate again assert_eq!(*POOL.alloc().unwrap().init(1), 1); } #[test] fn destructors() { static COUNT: AtomicUsize = AtomicUsize::new(0); struct X; impl X { fn new() -> X { COUNT.fetch_add(1, Ordering::Relaxed); X } } impl Drop for X { fn drop(&mut self) { COUNT.fetch_sub(1, Ordering::Relaxed); } } static mut MEMORY: [u8; 31] = [0; 31]; static POOL: Pool = Pool::new(); POOL.grow(unsafe { &mut MEMORY }); let x = POOL.alloc().unwrap().init(X::new()); let y = POOL.alloc().unwrap().init(X::new()); let z = POOL.alloc().unwrap().init(X::new()); assert_eq!(COUNT.load(Ordering::Relaxed), 3); // this leaks memory drop(x); assert_eq!(COUNT.load(Ordering::Relaxed), 3); // this leaks memory mem::forget(y); assert_eq!(COUNT.load(Ordering::Relaxed), 3); // this runs `X` destructor POOL.free(z); assert_eq!(COUNT.load(Ordering::Relaxed), 2); } } heapless-0.7.16/src/pool/singleton/arc.rs000064400000000000000000000223310072674642500164310ustar 00000000000000//! Like [`std::sync::Arc`](https://doc.rust-lang.org/std/sync/struct.Arc.html) but backed by a //! memory [`Pool`](trait.Pool.html) rather than `#[global_allocator]` //! //! Note that the same limitations that apply to ["Box" pool] also apply to the "Arc" pool. //! //! ["Box" pool]: ../../index.html //! //! # Examples //! //! ``` ignore //! use heapless::{arc_pool, Arc}; //! //! pub struct BigStruct { // <- does NOT implement Clone //! data: [u8; 128], //! // .. //! } //! //! // declare a memory pool //! arc_pool!(P: BigStruct); //! //! //! #[cortex_m_rt::entry] //! fn main() -> ! { //! static mut MEMORY: [u8; 1024] = [0; 1024]; //! //! // give some static memory to the pool //! P::grow(MEMORY); //! //! let x: Arc

= P::alloc(BigStruct::new()).ok().expect("OOM"); //! // ^ NOTE: this is the Pool type, not the data type //! //! // cloning is cheap; it increases the refcount //! let y = x.clone(); //! //! // same data address //! assert_eq!(&*x as *const _, &*y as *const _); //! //! // auto-deref //! let data: &[u8] = &x.data; //! //! // decrease refcount //! drop(x); //! //! // refcount decreased to 0; memory is returned to the pool //! drop(y); //! //! // .. //! } //! ``` //! //! The `grow_exact` API is also available on the "Arc pool". It requires using //! `Node>` as the array element type. Example below: //! //! ``` ignore //! use heapless::pool::{singleton::arc::ArcInner, Node}; //! //! pub struct BigStruct { /* .. */ } //! //! arc_pool!(P: BigStruct); //! //! #[cortex_m_rt::entry] //! fn main() -> ! { //! static mut MEMORY: MaybeUninit<[Node>; 2]> = MaybeUninit::uninit(); //! //! P::grow_exact(MEMORY); //! //! // 2 allocations are guaranteed to work //! let x = P::alloc(BigStruct::new()).ok().expect("OOM"); //! let y = P::alloc(BigStruct::new()).ok().expect("OOM"); //! //! // .. //! } //! ``` use core::{ cmp, fmt, hash::{Hash, Hasher}, marker::PhantomData, ops::Deref, ptr, sync::atomic, }; #[cfg(cas_atomic_polyfill)] use atomic_polyfill::{AtomicUsize, Ordering}; #[cfg(not(cas_atomic_polyfill))] use core::sync::atomic::{AtomicUsize, Ordering}; use crate::pool::{self, stack::Ptr, Node}; /// Instantiates a pool of Arc pointers as a global singleton // NOTE(any(test)) makes testing easier (no need to enable Cargo features for testing) #[cfg(any( armv6m, armv7a, armv7r, armv7m, armv8m_main, all( any(target_arch = "x86_64", target_arch = "x86"), feature = "x86-sync-pool" ), test ))] #[macro_export] macro_rules! arc_pool { ($(#[$($attr:tt)*])* $ident:ident: $ty:ty) => { pub struct $ident; impl $crate::pool::singleton::arc::Pool for $ident { type Data = $ty; fn ptr() -> &'static $crate::pool::Pool<$crate::pool::singleton::arc::ArcInner<$ty>> { $(#[$($attr)*])* static POOL: $crate::pool::Pool<$crate::pool::singleton::arc::ArcInner<$ty>> = $crate::pool::Pool::new(); &POOL } } impl $ident { /// Allocates a new `Arc` and writes `data` to it /// /// Returns an `Err`or if the backing memory pool is empty pub fn alloc(data: $ty) -> Result<$crate::Arc, $ty> where Self: Sized, { $crate::Arc::new(data) } /// Increases the capacity of the pool /// /// This method might *not* fully utilize the given memory block due to alignment requirements /// /// This method returns the number of *new* blocks that can be allocated. pub fn grow(memory: &'static mut [u8]) -> usize { ::ptr().grow(memory) } /// Increases the capacity of the pool /// /// Unlike `grow`, this method fully utilizes the given memory block pub fn grow_exact(memory: &'static mut MaybeUninit) -> usize where A: AsMut<[$crate::pool::Node<$crate::pool::singleton::arc::ArcInner<$ty>>]>, { ::ptr().grow_exact(memory) } } }; } /// Pool of Arc pointers pub trait Pool { /// The data behind the Arc pointer type Data: 'static; #[doc(hidden)] fn ptr() -> &'static pool::Pool>; } // mostly a verbatim copy of liballoc(/src/sync.rs) as of v1.54.0 minus the `Weak` API // anything that diverges has been marked with `XXX` /// `std::sync::Arc` but backed by a memory [`Pool`] rather than `#[global_allocator]` /// /// [`Pool`]: trait.Pool.html /// /// An example and more details can be found in the [module level documentation](index.html). // XXX `Pool::Data` is not `?Sized` -- `Unsize` coercions cannot be implemented on stable pub struct Arc

where P: Pool, { phantom: PhantomData>, ptr: Ptr>>, pool: PhantomData

, } impl

Arc

where P: Pool, { /// Constructs a new `Arc` /// /// Returns an `Err`or if the backing memory pool is empty // XXX original API is "infallible" pub fn new(data: P::Data) -> Result { if let Some(node) = P::ptr().stack.try_pop() { unsafe { ptr::write( node.as_ref().data.get(), ArcInner { strong: AtomicUsize::new(1), data, }, ) } Ok(Self { phantom: PhantomData, pool: PhantomData, ptr: node, }) } else { Err(data) } } fn inner(&self) -> &ArcInner { unsafe { &*self.ptr.as_ref().data.get() } } fn from_inner(ptr: Ptr>>) -> Self { Self { phantom: PhantomData, pool: PhantomData, ptr, } } unsafe fn get_mut_unchecked(this: &mut Self) -> &mut P::Data { &mut (*this.ptr.as_ref().data.get()).data // &mut (*this.ptr.as_ptr()).data } #[inline(never)] unsafe fn drop_slow(&mut self) { // run `P::Data`'s destructor ptr::drop_in_place(Self::get_mut_unchecked(self)); // XXX memory pool instead of `#[global_allocator]` // return memory to pool P::ptr().stack.push(self.ptr); } } const MAX_REFCOUNT: usize = (isize::MAX) as usize; impl

AsRef for Arc

where P: Pool, { fn as_ref(&self) -> &P::Data { &**self } } // XXX no `Borrow` implementation due to 'conflicting implementations of trait' error impl

Clone for Arc

where P: Pool, { fn clone(&self) -> Self { let old_size = self.inner().strong.fetch_add(1, Ordering::Relaxed); if old_size > MAX_REFCOUNT { // XXX original code calls `intrinsics::abort` which is unstable API panic!(); } Self::from_inner(self.ptr) } } impl

fmt::Debug for Arc

where P: Pool, P::Data: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Debug::fmt(&**self, f) } } impl

Deref for Arc

where P: Pool, { type Target = P::Data; fn deref(&self) -> &P::Data { &self.inner().data } } impl

fmt::Display for Arc

where P: Pool, P::Data: fmt::Display, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Display::fmt(&**self, f) } } // XXX original uses `#[may_dangle]` which is an unstable language feature impl

Drop for Arc

where P: Pool, { fn drop(&mut self) { if self.inner().strong.fetch_sub(1, Ordering::Release) != 1 { return; } atomic::fence(Ordering::Acquire); unsafe { self.drop_slow(); } } } impl

Eq for Arc

where P: Pool, P::Data: Eq, { } impl

Hash for Arc

where P: Pool, P::Data: Hash, { fn hash(&self, state: &mut H) where H: Hasher, { (**self).hash(state) } } impl

Ord for Arc

where P: Pool, P::Data: Ord, { fn cmp(&self, other: &Self) -> cmp::Ordering { (**self).cmp(&**other) } } impl

PartialEq for Arc

where P: Pool, P::Data: PartialEq, { fn eq(&self, other: &Self) -> bool { // XXX missing pointer equality specialization, which uses an unstable language feature (**self).eq(&**other) } } impl

PartialOrd for Arc

where P: Pool, P::Data: PartialOrd, { fn partial_cmp(&self, other: &Self) -> Option { (**self).partial_cmp(&**other) } } unsafe impl

Send for Arc

where P: Pool, P::Data: Sync + Send, { } unsafe impl

Sync for Arc

where P: Pool, P::Data: Sync + Send, { } impl

Unpin for Arc

where P: Pool {} /// Unfortunate implementation detail required to use the `grow_exact` API pub struct ArcInner { data: T, strong: AtomicUsize, // XXX `Weak` API not implemented // weak: AtomicUsize, } heapless-0.7.16/src/pool/singleton.rs000064400000000000000000000227400072674642500156700ustar 00000000000000//! `Pool` as a global singleton use core::{ any::TypeId, cmp, fmt, hash::{Hash, Hasher}, marker::PhantomData, mem::{self, MaybeUninit}, ops::{Deref, DerefMut}, ptr::{self, NonNull}, }; use super::{Init, Node, Uninit}; pub mod arc; /// Instantiates a pool as a global singleton // NOTE(any(test)) makes testing easier (no need to enable Cargo features for testing) #[cfg(any( armv6m, armv7a, armv7r, armv7m, armv8m_main, all( any(target_arch = "x86_64", target_arch = "x86"), feature = "x86-sync-pool" ), test ))] #[macro_export] macro_rules! pool { ($(#[$($attr:tt)*])* $ident:ident: $ty:ty) => { pub struct $ident; impl $crate::pool::singleton::Pool for $ident { type Data = $ty; fn ptr() -> &'static $crate::pool::Pool<$ty> { $(#[$($attr)*])* static $ident: $crate::pool::Pool<$ty> = $crate::pool::Pool::new(); &$ident } } }; } /// A global singleton memory pool pub trait Pool { /// The type of data that can be allocated on this pool type Data: 'static; #[doc(hidden)] fn ptr() -> &'static super::Pool; /// Claims a memory block from the pool /// /// Returns `None` when the pool is observed as exhausted /// /// *NOTE:* This method does *not* have bounded execution time; i.e. it contains a CAS loop fn alloc() -> Option> where Self: Sized, { Self::ptr().alloc().map(|inner| Box { _pool: PhantomData, inner, }) } /// Increases the capacity of the pool /// /// This method might *not* fully utilize the given memory block due to alignment requirements /// /// This method returns the number of *new* blocks that can be allocated. fn grow(memory: &'static mut [u8]) -> usize { Self::ptr().grow(memory) } /// Increases the capacity of the pool /// /// Unlike [`Pool.grow`](trait.Pool.html#method.grow_exact) this method fully utilizes the given /// memory block fn grow_exact(memory: &'static mut MaybeUninit) -> usize where A: AsMut<[Node]>, { Self::ptr().grow_exact(memory) } } /// A memory block that belongs to the global memory pool, `POOL` pub struct Box where POOL: Pool, STATE: 'static, { _pool: PhantomData, inner: super::Box, } impl

Box where P: Pool, { /// Initializes this memory block pub fn init(self, val: P::Data) -> Box { let node = self.inner.node; mem::forget(self); if mem::size_of::() == 0 { // no memory operation needed for ZST // BUT we want to avoid calling `val`s destructor mem::forget(val) } else { unsafe { ptr::write(node.as_ref().data.get(), val); } } Box { inner: super::Box { node, _state: PhantomData, }, _pool: PhantomData, } } } impl

Box where P: Pool, P::Data: AsRef<[u8]>, { #[deprecated( since = "0.7.3", note = "This can access uninitialized memory, use `init(..)` instead (https://github.com/japaric/heapless/issues/212)" )] /// (DO NOT USE, SEE DEPRECATION) Freezes the contents of this memory block /// /// See [rust-lang/rust#58363](https://github.com/rust-lang/rust/pull/58363) for details. pub fn freeze(self) -> Box { let node = self.inner.node; mem::forget(self); // it seems we can get away with not calling `ptr::freeze` here and not run into UB // because we are dealing with static memory and using fences // let p: *const u8 = (*node.as_ref().data.get()).as_slice().as_ptr(); // ptr::freeze(p as *mut u8); Box { inner: super::Box { node, _state: PhantomData, }, _pool: PhantomData, } } } impl

Box where P: Pool, { /// Forgets the contents of this memory block without running its destructor. /// /// Note that this this does not return the memory block to the pool. The /// block can be reused, or returned to the pool by dropping it. pub fn forget(self) -> Box { let node = self.inner.node; mem::forget(self); if mem::size_of::() == 0 { // no need to do a pointer dereference in this case } else { mem::forget(unsafe { ptr::read(node.as_ref().data.get()) }); } Box { inner: super::Box { node, _state: PhantomData, }, _pool: PhantomData, } } } impl

Deref for Box

where P: Pool, { type Target = P::Data; fn deref(&self) -> &P::Data { self.inner.deref() } } impl

DerefMut for Box

where P: Pool, { fn deref_mut(&mut self) -> &mut P::Data { self.inner.deref_mut() } } unsafe impl stable_deref_trait::StableDeref for Box

{} impl

fmt::Debug for Box

where P: Pool, P::Data: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { ::fmt(self, f) } } impl

fmt::Display for Box

where P: Pool, P::Data: fmt::Display, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { ::fmt(self, f) } } impl Drop for Box where P: Pool, S: 'static, { fn drop(&mut self) { if TypeId::of::() == TypeId::of::() { let p = if mem::size_of::() == 0 { // any pointer will do to invoke the destructor of a ZST NonNull::dangling().as_ptr() } else { unsafe { self.inner.node.as_ref().data.get() } }; unsafe { ptr::drop_in_place(p); } } if mem::size_of::() != 0 { P::ptr().stack.push(self.inner.node) } } } unsafe impl Send for Box where P: Pool, P::Data: Send, { } unsafe impl Sync for Box where P: Pool, P::Data: Sync, { } impl AsRef<[T]> for Box

where P: Pool, P::Data: AsRef<[T]>, { fn as_ref(&self) -> &[T] { self.deref().as_ref() } } impl AsMut<[T]> for Box

where P: Pool, P::Data: AsMut<[T]>, { fn as_mut(&mut self) -> &mut [T] { self.deref_mut().as_mut() } } impl

PartialEq for Box

where P: Pool, P::Data: PartialEq, { fn eq(&self, rhs: &Box

) -> bool { ::eq(self, rhs) } } impl

Eq for Box

where P: Pool, P::Data: Eq, { } impl

PartialOrd for Box

where P: Pool, P::Data: PartialOrd, { fn partial_cmp(&self, rhs: &Box

) -> Option { ::partial_cmp(self, rhs) } } impl

Ord for Box

where P: Pool, P::Data: Ord, { fn cmp(&self, rhs: &Box

) -> cmp::Ordering { ::cmp(self, rhs) } } impl

Hash for Box

where P: Pool, P::Data: Hash, { fn hash(&self, state: &mut H) where H: Hasher, { ::hash(self, state) } } #[cfg(test)] mod tests { use core::{ mem, sync::atomic::{AtomicUsize, Ordering}, }; use super::{super::Node, Pool}; #[test] fn sanity() { const SZ: usize = 2 * mem::size_of::>() - 1; static mut MEMORY: [u8; SZ] = [0; SZ]; pool!(A: u8); // empty pool assert!(A::alloc().is_none()); A::grow(unsafe { &mut MEMORY }); let x = A::alloc().unwrap().init(0); assert_eq!(*x, 0); // pool exhausted assert!(A::alloc().is_none()); drop(x); // should be possible to allocate again assert_eq!(*A::alloc().unwrap().init(1), 1); } #[test] fn boxed_zst_is_well_aligned() { #[repr(align(2))] pub struct Zst2; pool!(A: Zst2); let x = A::alloc().unwrap().init(Zst2); assert_eq!(0, &*x as *const Zst2 as usize % 2); #[repr(align(4096))] pub struct Zst4096; pool!(B: Zst4096); let x = B::alloc().unwrap().init(Zst4096); assert_eq!(0, &*x as *const Zst4096 as usize % 4096); } #[test] fn destructors() { static COUNT: AtomicUsize = AtomicUsize::new(0); pub struct X; impl X { fn new() -> X { COUNT.fetch_add(1, Ordering::Relaxed); X } } impl Drop for X { fn drop(&mut self) { COUNT.fetch_sub(1, Ordering::Relaxed); } } pool!(A: X); let x = A::alloc().unwrap().init(X::new()); let y = A::alloc().unwrap().init(X::new()); let z = A::alloc().unwrap().init(X::new()); assert_eq!(COUNT.load(Ordering::Relaxed), 3); // this runs `X`'s destructor drop(x); assert_eq!(COUNT.load(Ordering::Relaxed), 2); // this leaks memory mem::forget(y); assert_eq!(COUNT.load(Ordering::Relaxed), 2); // this forgets `X` without leaking memory z.forget(); assert_eq!(COUNT.load(Ordering::Relaxed), 2); } } heapless-0.7.16/src/sealed.rs000064400000000000000000000025630072674642500141530ustar 00000000000000#[allow(dead_code)] #[allow(path_statements)] pub(crate) const fn smaller_than() { Assert::::LESS; } #[allow(dead_code)] #[allow(path_statements)] pub(crate) const fn greater_than_eq_0() { Assert::::GREATER_EQ; } #[allow(dead_code)] #[allow(path_statements)] pub(crate) const fn greater_than_0() { Assert::::GREATER; } #[allow(dead_code)] #[allow(path_statements)] pub(crate) const fn greater_than_1() { Assert::::GREATER; } #[allow(dead_code)] #[allow(path_statements)] pub(crate) const fn power_of_two() { Assert::::GREATER; Assert::::POWER_OF_TWO; } #[allow(dead_code)] /// Const assert hack pub struct Assert; #[allow(dead_code)] impl Assert { /// Const assert hack pub const GREATER_EQ: usize = L - R; /// Const assert hack pub const LESS_EQ: usize = R - L; /// Const assert hack pub const NOT_EQ: isize = 0 / (R as isize - L as isize); /// Const assert hack pub const EQ: usize = (R - L) + (L - R); /// Const assert hack pub const GREATER: usize = L - R - 1; /// Const assert hack pub const LESS: usize = R - L - 1; /// Const assert hack pub const POWER_OF_TWO: usize = 0 - (L & (L - 1)); } heapless-0.7.16/src/ser.rs000064400000000000000000000047100072674642500135030ustar 00000000000000use crate::{ binary_heap::Kind as BinaryHeapKind, BinaryHeap, IndexMap, IndexSet, LinearMap, String, Vec, }; use hash32::{BuildHasher, Hash}; use serde::ser::{Serialize, SerializeMap, SerializeSeq, Serializer}; // Sequential containers impl Serialize for BinaryHeap where T: Ord + Serialize, KIND: BinaryHeapKind, { fn serialize(&self, serializer: S) -> Result where S: Serializer, { let mut seq = serializer.serialize_seq(Some(self.len()))?; for element in self { seq.serialize_element(element)?; } seq.end() } } impl Serialize for IndexSet where T: Eq + Hash + Serialize, S: BuildHasher, { fn serialize(&self, serializer: SER) -> Result where SER: Serializer, { let mut seq = serializer.serialize_seq(Some(self.len()))?; for element in self { seq.serialize_element(element)?; } seq.end() } } impl Serialize for Vec where T: Serialize, { fn serialize(&self, serializer: S) -> Result where S: Serializer, { let mut seq = serializer.serialize_seq(Some(self.len()))?; for element in self { seq.serialize_element(element)?; } seq.end() } } // Dictionaries impl Serialize for IndexMap where K: Eq + Hash + Serialize, S: BuildHasher, V: Serialize, { fn serialize(&self, serializer: SER) -> Result where SER: Serializer, { let mut map = serializer.serialize_map(Some(self.len()))?; for (k, v) in self { map.serialize_entry(k, v)?; } map.end() } } impl Serialize for LinearMap where K: Eq + Serialize, V: Serialize, { fn serialize(&self, serializer: SER) -> Result where SER: Serializer, { let mut map = serializer.serialize_map(Some(self.len()))?; for (k, v) in self { map.serialize_entry(k, v)?; } map.end() } } // String containers impl Serialize for String { fn serialize(&self, serializer: S) -> Result where S: Serializer, { serializer.serialize_str(&*self) } } heapless-0.7.16/src/sorted_linked_list.rs000064400000000000000000000577240072674642500166100ustar 00000000000000//! A fixed sorted priority linked list, similar to [`BinaryHeap`] but with different properties //! on `push`, `pop`, etc. //! For example, the sorting of the list will never `memcpy` the underlying value, so having large //! objects in the list will not cause a performance hit. //! //! # Examples //! //! ``` //! use heapless::sorted_linked_list::{SortedLinkedList, Max}; //! let mut ll: SortedLinkedList<_, _, Max, 3> = SortedLinkedList::new_usize(); //! //! // The largest value will always be first //! ll.push(1).unwrap(); //! assert_eq!(ll.peek(), Some(&1)); //! //! ll.push(2).unwrap(); //! assert_eq!(ll.peek(), Some(&2)); //! //! ll.push(3).unwrap(); //! assert_eq!(ll.peek(), Some(&3)); //! //! // This will not fit in the queue. //! assert_eq!(ll.push(4), Err(4)); //! ``` //! //! [`BinaryHeap`]: `crate::binary_heap::BinaryHeap` use core::cmp::Ordering; use core::fmt; use core::marker::PhantomData; use core::mem::MaybeUninit; use core::ops::{Deref, DerefMut}; use core::ptr; /// Trait for defining an index for the linked list, never implemented by users. pub trait SortedLinkedListIndex: Copy { #[doc(hidden)] unsafe fn new_unchecked(val: usize) -> Self; #[doc(hidden)] unsafe fn get_unchecked(self) -> usize; #[doc(hidden)] fn option(self) -> Option; #[doc(hidden)] fn none() -> Self; } /// Marker for Min sorted [`SortedLinkedList`]. pub struct Min; /// Marker for Max sorted [`SortedLinkedList`]. pub struct Max; /// The linked list kind: min-list or max-list pub trait Kind: private::Sealed { #[doc(hidden)] fn ordering() -> Ordering; } impl Kind for Min { fn ordering() -> Ordering { Ordering::Less } } impl Kind for Max { fn ordering() -> Ordering { Ordering::Greater } } /// Sealed traits mod private { pub trait Sealed {} } impl private::Sealed for Max {} impl private::Sealed for Min {} /// A node in the [`SortedLinkedList`]. pub struct Node { val: MaybeUninit, next: Idx, } /// The linked list. pub struct SortedLinkedList where Idx: SortedLinkedListIndex, { list: [Node; N], head: Idx, free: Idx, _kind: PhantomData, } // Internal macro for generating indexes for the linkedlist and const new for the linked list macro_rules! impl_index_and_const_new { ($name:ident, $ty:ty, $new_name:ident, $max_val:expr) => { /// Index for the [`SortedLinkedList`] with specific backing storage. #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] pub struct $name($ty); impl SortedLinkedListIndex for $name { #[inline(always)] unsafe fn new_unchecked(val: usize) -> Self { Self::new_unchecked(val as $ty) } /// This is only valid if `self.option()` is not `None`. #[inline(always)] unsafe fn get_unchecked(self) -> usize { self.0 as usize } #[inline(always)] fn option(self) -> Option { if self.0 == <$ty>::MAX { None } else { Some(self.0 as usize) } } #[inline(always)] fn none() -> Self { Self::none() } } impl $name { /// Needed for a `const fn new()`. #[inline] const unsafe fn new_unchecked(value: $ty) -> Self { $name(value) } /// Needed for a `const fn new()`. #[inline] const fn none() -> Self { $name(<$ty>::MAX) } } impl SortedLinkedList { const UNINIT: Node = Node { val: MaybeUninit::uninit(), next: $name::none(), }; /// Create a new linked list. pub const fn $new_name() -> Self { // Const assert N < MAX crate::sealed::smaller_than::(); let mut list = SortedLinkedList { list: [Self::UNINIT; N], head: $name::none(), free: unsafe { $name::new_unchecked(0) }, _kind: PhantomData, }; if N == 0 { list.free = $name::none(); return list; } let mut free = 0; // Initialize indexes while free < N - 1 { list.list[free].next = unsafe { $name::new_unchecked(free as $ty + 1) }; free += 1; } list } } }; } impl_index_and_const_new!(LinkedIndexU8, u8, new_u8, { u8::MAX as usize - 1 }); impl_index_and_const_new!(LinkedIndexU16, u16, new_u16, { u16::MAX as usize - 1 }); impl_index_and_const_new!(LinkedIndexUsize, usize, new_usize, { usize::MAX - 1 }); impl SortedLinkedList where Idx: SortedLinkedListIndex, { /// Internal access helper #[inline(always)] fn node_at(&self, index: usize) -> &Node { // Safety: The entire `self.list` is initialized in `new`, which makes this safe. unsafe { self.list.get_unchecked(index) } } /// Internal access helper #[inline(always)] fn node_at_mut(&mut self, index: usize) -> &mut Node { // Safety: The entire `self.list` is initialized in `new`, which makes this safe. unsafe { self.list.get_unchecked_mut(index) } } /// Internal access helper #[inline(always)] fn write_data_in_node_at(&mut self, index: usize, data: T) { // Safety: The entire `self.list` is initialized in `new`, which makes this safe. unsafe { self.node_at_mut(index).val.as_mut_ptr().write(data); } } /// Internal access helper #[inline(always)] fn read_data_in_node_at(&self, index: usize) -> &T { // Safety: The entire `self.list` is initialized in `new`, which makes this safe. unsafe { &*self.node_at(index).val.as_ptr() } } /// Internal access helper #[inline(always)] fn read_mut_data_in_node_at(&mut self, index: usize) -> &mut T { // Safety: The entire `self.list` is initialized in `new`, which makes this safe. unsafe { &mut *self.node_at_mut(index).val.as_mut_ptr() } } /// Internal access helper #[inline(always)] fn extract_data_in_node_at(&mut self, index: usize) -> T { // Safety: The entire `self.list` is initialized in `new`, which makes this safe. unsafe { self.node_at(index).val.as_ptr().read() } } } impl SortedLinkedList where T: Ord, Idx: SortedLinkedListIndex, K: Kind, { /// Pushes a value onto the list without checking if the list is full. /// /// Complexity is worst-case `O(N)`. /// /// # Safety /// /// Assumes that the list is not full. pub unsafe fn push_unchecked(&mut self, value: T) { let new = self.free.get_unchecked(); // Store the data and update the next free spot self.write_data_in_node_at(new, value); self.free = self.node_at(new).next; if let Some(head) = self.head.option() { // Check if we need to replace head if self .read_data_in_node_at(head) .cmp(self.read_data_in_node_at(new)) != K::ordering() { self.node_at_mut(new).next = self.head; self.head = Idx::new_unchecked(new); } else { // It's not head, search the list for the correct placement let mut current = head; while let Some(next) = self.node_at(current).next.option() { if self .read_data_in_node_at(next) .cmp(self.read_data_in_node_at(new)) != K::ordering() { break; } current = next; } self.node_at_mut(new).next = self.node_at(current).next; self.node_at_mut(current).next = Idx::new_unchecked(new); } } else { self.node_at_mut(new).next = self.head; self.head = Idx::new_unchecked(new); } } /// Pushes an element to the linked list and sorts it into place. /// /// Complexity is worst-case `O(N)`. /// /// # Example /// /// ``` /// use heapless::sorted_linked_list::{SortedLinkedList, Max}; /// let mut ll: SortedLinkedList<_, _, Max, 3> = SortedLinkedList::new_usize(); /// /// // The largest value will always be first /// ll.push(1).unwrap(); /// assert_eq!(ll.peek(), Some(&1)); /// /// ll.push(2).unwrap(); /// assert_eq!(ll.peek(), Some(&2)); /// /// ll.push(3).unwrap(); /// assert_eq!(ll.peek(), Some(&3)); /// /// // This will not fit in the queue. /// assert_eq!(ll.push(4), Err(4)); /// ``` pub fn push(&mut self, value: T) -> Result<(), T> { if !self.is_full() { Ok(unsafe { self.push_unchecked(value) }) } else { Err(value) } } /// Get an iterator over the sorted list. /// /// # Example /// /// ``` /// use heapless::sorted_linked_list::{SortedLinkedList, Max}; /// let mut ll: SortedLinkedList<_, _, Max, 3> = SortedLinkedList::new_usize(); /// /// ll.push(1).unwrap(); /// ll.push(2).unwrap(); /// /// let mut iter = ll.iter(); /// /// assert_eq!(iter.next(), Some(&2)); /// assert_eq!(iter.next(), Some(&1)); /// assert_eq!(iter.next(), None); /// ``` pub fn iter(&self) -> Iter<'_, T, Idx, K, N> { Iter { list: self, index: self.head, } } /// Find an element in the list that can be changed and resorted. /// /// # Example /// /// ``` /// use heapless::sorted_linked_list::{SortedLinkedList, Max}; /// let mut ll: SortedLinkedList<_, _, Max, 3> = SortedLinkedList::new_usize(); /// /// ll.push(1).unwrap(); /// ll.push(2).unwrap(); /// ll.push(3).unwrap(); /// /// // Find a value and update it /// let mut find = ll.find_mut(|v| *v == 2).unwrap(); /// *find += 1000; /// find.finish(); /// /// assert_eq!(ll.pop(), Ok(1002)); /// assert_eq!(ll.pop(), Ok(3)); /// assert_eq!(ll.pop(), Ok(1)); /// assert_eq!(ll.pop(), Err(())); /// ``` pub fn find_mut(&mut self, mut f: F) -> Option> where F: FnMut(&T) -> bool, { let head = self.head.option()?; // Special-case, first element if f(self.read_data_in_node_at(head)) { return Some(FindMut { is_head: true, prev_index: Idx::none(), index: self.head, list: self, maybe_changed: false, }); } let mut current = head; while let Some(next) = self.node_at(current).next.option() { if f(self.read_data_in_node_at(next)) { return Some(FindMut { is_head: false, prev_index: unsafe { Idx::new_unchecked(current) }, index: unsafe { Idx::new_unchecked(next) }, list: self, maybe_changed: false, }); } current = next; } None } /// Peek at the first element. /// /// # Example /// /// ``` /// use heapless::sorted_linked_list::{SortedLinkedList, Max, Min}; /// let mut ll_max: SortedLinkedList<_, _, Max, 3> = SortedLinkedList::new_usize(); /// /// // The largest value will always be first /// ll_max.push(1).unwrap(); /// assert_eq!(ll_max.peek(), Some(&1)); /// ll_max.push(2).unwrap(); /// assert_eq!(ll_max.peek(), Some(&2)); /// ll_max.push(3).unwrap(); /// assert_eq!(ll_max.peek(), Some(&3)); /// /// let mut ll_min: SortedLinkedList<_, _, Min, 3> = SortedLinkedList::new_usize(); /// /// // The Smallest value will always be first /// ll_min.push(3).unwrap(); /// assert_eq!(ll_min.peek(), Some(&3)); /// ll_min.push(2).unwrap(); /// assert_eq!(ll_min.peek(), Some(&2)); /// ll_min.push(1).unwrap(); /// assert_eq!(ll_min.peek(), Some(&1)); /// ``` pub fn peek(&self) -> Option<&T> { self.head .option() .map(|head| self.read_data_in_node_at(head)) } /// Pop an element from the list without checking so the list is not empty. /// /// # Safety /// /// Assumes that the list is not empty. pub unsafe fn pop_unchecked(&mut self) -> T { let head = self.head.get_unchecked(); let current = head; self.head = self.node_at(head).next; self.node_at_mut(current).next = self.free; self.free = Idx::new_unchecked(current); self.extract_data_in_node_at(current) } /// Pops the first element in the list. /// /// Complexity is worst-case `O(1)`. /// /// # Example /// /// ``` /// use heapless::sorted_linked_list::{SortedLinkedList, Max}; /// let mut ll: SortedLinkedList<_, _, Max, 3> = SortedLinkedList::new_usize(); /// /// ll.push(1).unwrap(); /// ll.push(2).unwrap(); /// /// assert_eq!(ll.pop(), Ok(2)); /// assert_eq!(ll.pop(), Ok(1)); /// assert_eq!(ll.pop(), Err(())); /// ``` pub fn pop(&mut self) -> Result { if !self.is_empty() { Ok(unsafe { self.pop_unchecked() }) } else { Err(()) } } /// Checks if the linked list is full. /// /// # Example /// /// ``` /// use heapless::sorted_linked_list::{SortedLinkedList, Max}; /// let mut ll: SortedLinkedList<_, _, Max, 3> = SortedLinkedList::new_usize(); /// /// assert_eq!(ll.is_full(), false); /// /// ll.push(1).unwrap(); /// assert_eq!(ll.is_full(), false); /// ll.push(2).unwrap(); /// assert_eq!(ll.is_full(), false); /// ll.push(3).unwrap(); /// assert_eq!(ll.is_full(), true); /// ``` #[inline] pub fn is_full(&self) -> bool { self.free.option().is_none() } /// Checks if the linked list is empty. /// /// # Example /// /// ``` /// use heapless::sorted_linked_list::{SortedLinkedList, Max}; /// let mut ll: SortedLinkedList<_, _, Max, 3> = SortedLinkedList::new_usize(); /// /// assert_eq!(ll.is_empty(), true); /// /// ll.push(1).unwrap(); /// assert_eq!(ll.is_empty(), false); /// ``` #[inline] pub fn is_empty(&self) -> bool { self.head.option().is_none() } } /// Iterator for the linked list. pub struct Iter<'a, T, Idx, K, const N: usize> where T: Ord, Idx: SortedLinkedListIndex, K: Kind, { list: &'a SortedLinkedList, index: Idx, } impl<'a, T, Idx, K, const N: usize> Iterator for Iter<'a, T, Idx, K, N> where T: Ord, Idx: SortedLinkedListIndex, K: Kind, { type Item = &'a T; fn next(&mut self) -> Option { let index = self.index.option()?; let node = self.list.node_at(index); self.index = node.next; Some(self.list.read_data_in_node_at(index)) } } /// Comes from [`SortedLinkedList::find_mut`]. pub struct FindMut<'a, T, Idx, K, const N: usize> where T: Ord, Idx: SortedLinkedListIndex, K: Kind, { list: &'a mut SortedLinkedList, is_head: bool, prev_index: Idx, index: Idx, maybe_changed: bool, } impl<'a, T, Idx, K, const N: usize> FindMut<'a, T, Idx, K, N> where T: Ord, Idx: SortedLinkedListIndex, K: Kind, { fn pop_internal(&mut self) -> T { if self.is_head { // If it is the head element, we can do a normal pop unsafe { self.list.pop_unchecked() } } else { // Somewhere in the list let prev = unsafe { self.prev_index.get_unchecked() }; let curr = unsafe { self.index.get_unchecked() }; // Re-point the previous index self.list.node_at_mut(prev).next = self.list.node_at_mut(curr).next; // Release the index into the free queue self.list.node_at_mut(curr).next = self.list.free; self.list.free = self.index; self.list.extract_data_in_node_at(curr) } } /// This will pop the element from the list. /// /// Complexity is worst-case `O(1)`. /// /// # Example /// /// ``` /// use heapless::sorted_linked_list::{SortedLinkedList, Max}; /// let mut ll: SortedLinkedList<_, _, Max, 3> = SortedLinkedList::new_usize(); /// /// ll.push(1).unwrap(); /// ll.push(2).unwrap(); /// ll.push(3).unwrap(); /// /// // Find a value and update it /// let mut find = ll.find_mut(|v| *v == 2).unwrap(); /// find.pop(); /// /// assert_eq!(ll.pop(), Ok(3)); /// assert_eq!(ll.pop(), Ok(1)); /// assert_eq!(ll.pop(), Err(())); /// ``` #[inline] pub fn pop(mut self) -> T { self.pop_internal() } /// This will resort the element into the correct position in the list if needed. The resorting /// will only happen if the element has been accessed mutably. /// /// Same as calling `drop`. /// /// Complexity is worst-case `O(N)`. /// /// # Example /// /// ``` /// use heapless::sorted_linked_list::{SortedLinkedList, Max}; /// let mut ll: SortedLinkedList<_, _, Max, 3> = SortedLinkedList::new_usize(); /// /// ll.push(1).unwrap(); /// ll.push(2).unwrap(); /// ll.push(3).unwrap(); /// /// let mut find = ll.find_mut(|v| *v == 2).unwrap(); /// find.finish(); // No resort, we did not access the value. /// /// let mut find = ll.find_mut(|v| *v == 2).unwrap(); /// *find += 1000; /// find.finish(); // Will resort, we accessed (and updated) the value. /// /// assert_eq!(ll.pop(), Ok(1002)); /// assert_eq!(ll.pop(), Ok(3)); /// assert_eq!(ll.pop(), Ok(1)); /// assert_eq!(ll.pop(), Err(())); /// ``` #[inline] pub fn finish(self) { drop(self) } } impl Drop for FindMut<'_, T, Idx, K, N> where T: Ord, Idx: SortedLinkedListIndex, K: Kind, { fn drop(&mut self) { // Only resort the list if the element has changed if self.maybe_changed { let val = self.pop_internal(); unsafe { self.list.push_unchecked(val) }; } } } impl Deref for FindMut<'_, T, Idx, K, N> where T: Ord, Idx: SortedLinkedListIndex, K: Kind, { type Target = T; fn deref(&self) -> &Self::Target { self.list .read_data_in_node_at(unsafe { self.index.get_unchecked() }) } } impl DerefMut for FindMut<'_, T, Idx, K, N> where T: Ord, Idx: SortedLinkedListIndex, K: Kind, { fn deref_mut(&mut self) -> &mut Self::Target { self.maybe_changed = true; self.list .read_mut_data_in_node_at(unsafe { self.index.get_unchecked() }) } } // /// Useful for debug during development. // impl fmt::Debug for FindMut<'_, T, Idx, K, N> // where // T: Ord + core::fmt::Debug, // Idx: SortedLinkedListIndex, // K: Kind, // { // fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { // f.debug_struct("FindMut") // .field("prev_index", &self.prev_index.option()) // .field("index", &self.index.option()) // .field( // "prev_value", // &self // .list // .read_data_in_node_at(self.prev_index.option().unwrap()), // ) // .field( // "value", // &self.list.read_data_in_node_at(self.index.option().unwrap()), // ) // .finish() // } // } impl fmt::Debug for SortedLinkedList where T: Ord + core::fmt::Debug, Idx: SortedLinkedListIndex, K: Kind, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.iter()).finish() } } impl Drop for SortedLinkedList where Idx: SortedLinkedListIndex, { fn drop(&mut self) { let mut index = self.head; while let Some(i) = index.option() { let node = self.node_at_mut(i); index = node.next; unsafe { ptr::drop_in_place(node.val.as_mut_ptr()); } } } } #[cfg(test)] mod tests { use super::*; #[test] fn const_new() { static mut _V1: SortedLinkedList = SortedLinkedList::new_u8(); static mut _V2: SortedLinkedList = SortedLinkedList::new_u16(); static mut _V3: SortedLinkedList = SortedLinkedList::new_usize(); } #[test] fn test_peek() { let mut ll: SortedLinkedList = SortedLinkedList::new_usize(); ll.push(1).unwrap(); assert_eq!(ll.peek().unwrap(), &1); ll.push(2).unwrap(); assert_eq!(ll.peek().unwrap(), &2); ll.push(3).unwrap(); assert_eq!(ll.peek().unwrap(), &3); let mut ll: SortedLinkedList = SortedLinkedList::new_usize(); ll.push(2).unwrap(); assert_eq!(ll.peek().unwrap(), &2); ll.push(1).unwrap(); assert_eq!(ll.peek().unwrap(), &1); ll.push(3).unwrap(); assert_eq!(ll.peek().unwrap(), &1); } #[test] fn test_full() { let mut ll: SortedLinkedList = SortedLinkedList::new_usize(); ll.push(1).unwrap(); ll.push(2).unwrap(); ll.push(3).unwrap(); assert!(ll.is_full()) } #[test] fn test_empty() { let ll: SortedLinkedList = SortedLinkedList::new_usize(); assert!(ll.is_empty()) } #[test] fn test_zero_size() { let ll: SortedLinkedList = SortedLinkedList::new_usize(); assert!(ll.is_empty()); assert!(ll.is_full()); } #[test] fn test_rejected_push() { let mut ll: SortedLinkedList = SortedLinkedList::new_usize(); ll.push(1).unwrap(); ll.push(2).unwrap(); ll.push(3).unwrap(); // This won't fit let r = ll.push(4); assert_eq!(r, Err(4)); } #[test] fn test_updating() { let mut ll: SortedLinkedList = SortedLinkedList::new_usize(); ll.push(1).unwrap(); ll.push(2).unwrap(); ll.push(3).unwrap(); let mut find = ll.find_mut(|v| *v == 2).unwrap(); *find += 1000; find.finish(); assert_eq!(ll.peek().unwrap(), &1002); let mut find = ll.find_mut(|v| *v == 3).unwrap(); *find += 1000; find.finish(); assert_eq!(ll.peek().unwrap(), &1003); // Remove largest element ll.find_mut(|v| *v == 1003).unwrap().pop(); assert_eq!(ll.peek().unwrap(), &1002); } #[test] fn test_updating_1() { let mut ll: SortedLinkedList = SortedLinkedList::new_usize(); ll.push(1).unwrap(); let v = ll.pop().unwrap(); assert_eq!(v, 1); } #[test] fn test_updating_2() { let mut ll: SortedLinkedList = SortedLinkedList::new_usize(); ll.push(1).unwrap(); let mut find = ll.find_mut(|v| *v == 1).unwrap(); *find += 1000; find.finish(); assert_eq!(ll.peek().unwrap(), &1001); } } heapless-0.7.16/src/spsc.rs000064400000000000000000000622400072674642500136640ustar 00000000000000//! Fixed capacity Single Producer Single Consumer (SPSC) queue //! //! Implementation based on //! //! NOTE: This module is not available on targets that do *not* support atomic loads and are not //! supported by [`atomic_polyfill`](https://crates.io/crates/atomic-polyfill). (e.g., MSP430). //! //! # Examples //! //! - `Queue` can be used as a plain queue //! //! ``` //! use heapless::spsc::Queue; //! //! let mut rb: Queue = Queue::new(); //! //! assert!(rb.enqueue(0).is_ok()); //! assert!(rb.enqueue(1).is_ok()); //! assert!(rb.enqueue(2).is_ok()); //! assert!(rb.enqueue(3).is_err()); // full //! //! assert_eq!(rb.dequeue(), Some(0)); //! ``` //! //! - `Queue` can be `split` and then be used in Single Producer Single Consumer mode //! //! ``` //! use heapless::spsc::Queue; //! //! // Notice, type signature needs to be explicit for now. //! // (min_const_eval, does not allow for default type assignments) //! static mut Q: Queue = Queue::new(); //! //! enum Event { A, B } //! //! fn main() { //! // NOTE(unsafe) beware of aliasing the `consumer` end point //! let mut consumer = unsafe { Q.split().1 }; //! //! loop { //! // `dequeue` is a lockless operation //! match consumer.dequeue() { //! Some(Event::A) => { /* .. */ }, //! Some(Event::B) => { /* .. */ }, //! None => { /* sleep */ }, //! } //! # break //! } //! } //! //! // this is a different execution context that can preempt `main` //! fn interrupt_handler() { //! // NOTE(unsafe) beware of aliasing the `producer` end point //! let mut producer = unsafe { Q.split().0 }; //! # let condition = true; //! //! // .. //! //! if condition { //! producer.enqueue(Event::A).ok().unwrap(); //! } else { //! producer.enqueue(Event::B).ok().unwrap(); //! } //! //! // .. //! } //! ``` //! //! # Benchmarks //! //! Measured on a ARM Cortex-M3 core running at 8 MHz and with zero Flash wait cycles //! //! `-C opt-level` |`3`| //! -----------------------|---| //! `Consumer::dequeue`| 15| //! `Queue::dequeue` | 12| //! `Producer::enqueue`| 16| //! `Queue::enqueue` | 14| //! //! - All execution times are in clock cycles. 1 clock cycle = 125 ns. //! - Execution time is *dependent* of `mem::size_of::()`. Both operations include one //! `memcpy(T)` in their successful path. //! - The optimization level is indicated in the first row. //! - The numbers reported correspond to the successful path (i.e. `Some` is returned by `dequeue` //! and `Ok` is returned by `enqueue`). use core::{cell::UnsafeCell, fmt, hash, mem::MaybeUninit, ptr}; #[cfg(full_atomic_polyfill)] use atomic_polyfill::{AtomicUsize, Ordering}; #[cfg(not(full_atomic_polyfill))] use core::sync::atomic::{AtomicUsize, Ordering}; /// A statically allocated single producer single consumer queue with a capacity of `N - 1` elements /// /// *IMPORTANT*: To get better performance use a value for `N` that is a power of 2 (e.g. `16`, `32`, /// etc.). pub struct Queue { // this is from where we dequeue items pub(crate) head: AtomicUsize, // this is where we enqueue new items pub(crate) tail: AtomicUsize, pub(crate) buffer: [UnsafeCell>; N], } impl Queue { const INIT: UnsafeCell> = UnsafeCell::new(MaybeUninit::uninit()); #[inline] fn increment(val: usize) -> usize { (val + 1) % N } /// Creates an empty queue with a fixed capacity of `N - 1` pub const fn new() -> Self { // Const assert N > 1 crate::sealed::greater_than_1::(); Queue { head: AtomicUsize::new(0), tail: AtomicUsize::new(0), buffer: [Self::INIT; N], } } /// Returns the maximum number of elements the queue can hold #[inline] pub const fn capacity(&self) -> usize { N - 1 } /// Returns the number of elements in the queue #[inline] pub fn len(&self) -> usize { let current_head = self.head.load(Ordering::Relaxed); let current_tail = self.tail.load(Ordering::Relaxed); current_tail.wrapping_sub(current_head).wrapping_add(N) % N } /// Returns `true` if the queue is empty #[inline] pub fn is_empty(&self) -> bool { self.head.load(Ordering::Relaxed) == self.tail.load(Ordering::Relaxed) } /// Returns `true` if the queue is full #[inline] pub fn is_full(&self) -> bool { Self::increment(self.tail.load(Ordering::Relaxed)) == self.head.load(Ordering::Relaxed) } /// Iterates from the front of the queue to the back pub fn iter(&self) -> Iter<'_, T, N> { Iter { rb: self, index: 0, len: self.len(), } } /// Returns an iterator that allows modifying each value pub fn iter_mut(&mut self) -> IterMut<'_, T, N> { let len = self.len(); IterMut { rb: self, index: 0, len, } } /// Adds an `item` to the end of the queue /// /// Returns back the `item` if the queue is full #[inline] pub fn enqueue(&mut self, val: T) -> Result<(), T> { unsafe { self.inner_enqueue(val) } } /// Returns the item in the front of the queue, or `None` if the queue is empty #[inline] pub fn dequeue(&mut self) -> Option { unsafe { self.inner_dequeue() } } /// Returns a reference to the item in the front of the queue without dequeuing, or /// `None` if the queue is empty. /// /// # Examples /// ``` /// use heapless::spsc::Queue; /// /// let mut queue: Queue = Queue::new(); /// let (mut producer, mut consumer) = queue.split(); /// assert_eq!(None, consumer.peek()); /// producer.enqueue(1); /// assert_eq!(Some(&1), consumer.peek()); /// assert_eq!(Some(1), consumer.dequeue()); /// assert_eq!(None, consumer.peek()); /// ``` pub fn peek(&self) -> Option<&T> { if !self.is_empty() { let head = self.head.load(Ordering::Relaxed); Some(unsafe { &*(self.buffer.get_unchecked(head).get() as *const T) }) } else { None } } // The memory for enqueueing is "owned" by the tail pointer. // NOTE: This internal function uses internal mutability to allow the [`Producer`] to enqueue // items without doing pointer arithmetic and accessing internal fields of this type. unsafe fn inner_enqueue(&self, val: T) -> Result<(), T> { let current_tail = self.tail.load(Ordering::Relaxed); let next_tail = Self::increment(current_tail); if next_tail != self.head.load(Ordering::Acquire) { (self.buffer.get_unchecked(current_tail).get()).write(MaybeUninit::new(val)); self.tail.store(next_tail, Ordering::Release); Ok(()) } else { Err(val) } } // The memory for enqueueing is "owned" by the tail pointer. // NOTE: This internal function uses internal mutability to allow the [`Producer`] to enqueue // items without doing pointer arithmetic and accessing internal fields of this type. unsafe fn inner_enqueue_unchecked(&self, val: T) { let current_tail = self.tail.load(Ordering::Relaxed); (self.buffer.get_unchecked(current_tail).get()).write(MaybeUninit::new(val)); self.tail .store(Self::increment(current_tail), Ordering::Release); } /// Adds an `item` to the end of the queue, without checking if it's full /// /// # Unsafety /// /// If the queue is full this operation will leak a value (T's destructor won't run on /// the value that got overwritten by `item`), *and* will allow the `dequeue` operation /// to create a copy of `item`, which could result in `T`'s destructor running on `item` /// twice. pub unsafe fn enqueue_unchecked(&mut self, val: T) { self.inner_enqueue_unchecked(val) } // The memory for dequeuing is "owned" by the head pointer,. // NOTE: This internal function uses internal mutability to allow the [`Consumer`] to dequeue // items without doing pointer arithmetic and accessing internal fields of this type. unsafe fn inner_dequeue(&self) -> Option { let current_head = self.head.load(Ordering::Relaxed); if current_head == self.tail.load(Ordering::Acquire) { None } else { let v = (self.buffer.get_unchecked(current_head).get() as *const T).read(); self.head .store(Self::increment(current_head), Ordering::Release); Some(v) } } // The memory for dequeuing is "owned" by the head pointer,. // NOTE: This internal function uses internal mutability to allow the [`Consumer`] to dequeue // items without doing pointer arithmetic and accessing internal fields of this type. unsafe fn inner_dequeue_unchecked(&self) -> T { let current_head = self.head.load(Ordering::Relaxed); let v = (self.buffer.get_unchecked(current_head).get() as *const T).read(); self.head .store(Self::increment(current_head), Ordering::Release); v } /// Returns the item in the front of the queue, without checking if there is something in the /// queue /// /// # Unsafety /// /// If the queue is empty this operation will return uninitialized memory. pub unsafe fn dequeue_unchecked(&mut self) -> T { self.inner_dequeue_unchecked() } /// Splits a queue into producer and consumer endpoints pub fn split(&mut self) -> (Producer<'_, T, N>, Consumer<'_, T, N>) { (Producer { rb: self }, Consumer { rb: self }) } } impl Default for Queue { fn default() -> Self { Self::new() } } impl Clone for Queue where T: Clone, { fn clone(&self) -> Self { let mut new: Queue = Queue::new(); for s in self.iter() { unsafe { // NOTE(unsafe) new.capacity() == self.capacity() >= self.len() // no overflow possible new.enqueue_unchecked(s.clone()); } } new } } impl PartialEq> for Queue where T: PartialEq, { fn eq(&self, other: &Queue) -> bool { self.len() == other.len() && self.iter().zip(other.iter()).all(|(v1, v2)| v1 == v2) } } impl Eq for Queue where T: Eq {} /// An iterator over the items of a queue pub struct Iter<'a, T, const N: usize> { rb: &'a Queue, index: usize, len: usize, } impl<'a, T, const N: usize> Clone for Iter<'a, T, N> { fn clone(&self) -> Self { Self { rb: self.rb, index: self.index, len: self.len, } } } /// A mutable iterator over the items of a queue pub struct IterMut<'a, T, const N: usize> { rb: &'a mut Queue, index: usize, len: usize, } impl<'a, T, const N: usize> Iterator for Iter<'a, T, N> { type Item = &'a T; fn next(&mut self) -> Option { if self.index < self.len { let head = self.rb.head.load(Ordering::Relaxed); let i = (head + self.index) % N; self.index += 1; Some(unsafe { &*(self.rb.buffer.get_unchecked(i).get() as *const T) }) } else { None } } } impl<'a, T, const N: usize> Iterator for IterMut<'a, T, N> { type Item = &'a mut T; fn next(&mut self) -> Option { if self.index < self.len { let head = self.rb.head.load(Ordering::Relaxed); let i = (head + self.index) % N; self.index += 1; Some(unsafe { &mut *(self.rb.buffer.get_unchecked(i).get() as *mut T) }) } else { None } } } impl<'a, T, const N: usize> DoubleEndedIterator for Iter<'a, T, N> { fn next_back(&mut self) -> Option { if self.index < self.len { let head = self.rb.head.load(Ordering::Relaxed); // self.len > 0, since it's larger than self.index > 0 let i = (head + self.len - 1) % N; self.len -= 1; Some(unsafe { &*(self.rb.buffer.get_unchecked(i).get() as *const T) }) } else { None } } } impl<'a, T, const N: usize> DoubleEndedIterator for IterMut<'a, T, N> { fn next_back(&mut self) -> Option { if self.index < self.len { let head = self.rb.head.load(Ordering::Relaxed); // self.len > 0, since it's larger than self.index > 0 let i = (head + self.len - 1) % N; self.len -= 1; Some(unsafe { &mut *(self.rb.buffer.get_unchecked(i).get() as *mut T) }) } else { None } } } impl Drop for Queue { fn drop(&mut self) { for item in self { unsafe { ptr::drop_in_place(item); } } } } impl fmt::Debug for Queue where T: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.iter()).finish() } } impl hash::Hash for Queue where T: hash::Hash, { fn hash(&self, state: &mut H) { // iterate over self in order for t in self.iter() { hash::Hash::hash(t, state); } } } impl hash32::Hash for Queue where T: hash32::Hash, { fn hash(&self, state: &mut H) { // iterate over self in order for t in self.iter() { hash32::Hash::hash(t, state); } } } impl<'a, T, const N: usize> IntoIterator for &'a Queue { type Item = &'a T; type IntoIter = Iter<'a, T, N>; fn into_iter(self) -> Self::IntoIter { self.iter() } } impl<'a, T, const N: usize> IntoIterator for &'a mut Queue { type Item = &'a mut T; type IntoIter = IterMut<'a, T, N>; fn into_iter(self) -> Self::IntoIter { self.iter_mut() } } /// A queue "consumer"; it can dequeue items from the queue /// NOTE the consumer semantically owns the `head` pointer of the queue pub struct Consumer<'a, T, const N: usize> { rb: &'a Queue, } unsafe impl<'a, T, const N: usize> Send for Consumer<'a, T, N> where T: Send {} /// A queue "producer"; it can enqueue items into the queue /// NOTE the producer semantically owns the `tail` pointer of the queue pub struct Producer<'a, T, const N: usize> { rb: &'a Queue, } unsafe impl<'a, T, const N: usize> Send for Producer<'a, T, N> where T: Send {} impl<'a, T, const N: usize> Consumer<'a, T, N> { /// Returns the item in the front of the queue, or `None` if the queue is empty #[inline] pub fn dequeue(&mut self) -> Option { unsafe { self.rb.inner_dequeue() } } /// Returns the item in the front of the queue, without checking if there are elements in the /// queue /// /// See [`Queue::dequeue_unchecked`] for safety #[inline] pub unsafe fn dequeue_unchecked(&mut self) -> T { self.rb.inner_dequeue_unchecked() } /// Returns if there are any items to dequeue. When this returns `true`, at least the /// first subsequent dequeue will succeed #[inline] pub fn ready(&self) -> bool { !self.rb.is_empty() } /// Returns the number of elements in the queue #[inline] pub fn len(&self) -> usize { self.rb.len() } /// Returns the maximum number of elements the queue can hold #[inline] pub fn capacity(&self) -> usize { self.rb.capacity() } /// Returns the item in the front of the queue without dequeuing, or `None` if the queue is /// empty /// /// # Examples /// ``` /// use heapless::spsc::Queue; /// /// let mut queue: Queue = Queue::new(); /// let (mut producer, mut consumer) = queue.split(); /// assert_eq!(None, consumer.peek()); /// producer.enqueue(1); /// assert_eq!(Some(&1), consumer.peek()); /// assert_eq!(Some(1), consumer.dequeue()); /// assert_eq!(None, consumer.peek()); /// ``` #[inline] pub fn peek(&self) -> Option<&T> { self.rb.peek() } } impl<'a, T, const N: usize> Producer<'a, T, N> { /// Adds an `item` to the end of the queue, returns back the `item` if the queue is full #[inline] pub fn enqueue(&mut self, val: T) -> Result<(), T> { unsafe { self.rb.inner_enqueue(val) } } /// Adds an `item` to the end of the queue, without checking if the queue is full /// /// See [`Queue::enqueue_unchecked`] for safety #[inline] pub unsafe fn enqueue_unchecked(&mut self, val: T) { self.rb.inner_enqueue_unchecked(val) } /// Returns if there is any space to enqueue a new item. When this returns true, at /// least the first subsequent enqueue will succeed. #[inline] pub fn ready(&self) -> bool { !self.rb.is_full() } /// Returns the number of elements in the queue #[inline] pub fn len(&self) -> usize { self.rb.len() } /// Returns the maximum number of elements the queue can hold #[inline] pub fn capacity(&self) -> usize { self.rb.capacity() } } #[cfg(test)] mod tests { use crate::spsc::Queue; use hash32::Hasher; #[test] fn full() { let mut rb: Queue = Queue::new(); assert_eq!(rb.is_full(), false); rb.enqueue(1).unwrap(); assert_eq!(rb.is_full(), false); rb.enqueue(2).unwrap(); assert_eq!(rb.is_full(), true); } #[test] fn empty() { let mut rb: Queue = Queue::new(); assert_eq!(rb.is_empty(), true); rb.enqueue(1).unwrap(); assert_eq!(rb.is_empty(), false); rb.enqueue(2).unwrap(); assert_eq!(rb.is_empty(), false); } #[test] #[cfg_attr(miri, ignore)] // too slow fn len() { let mut rb: Queue = Queue::new(); assert_eq!(rb.len(), 0); rb.enqueue(1).unwrap(); assert_eq!(rb.len(), 1); rb.enqueue(2).unwrap(); assert_eq!(rb.len(), 2); for _ in 0..1_000_000 { let v = rb.dequeue().unwrap(); println!("{}", v); rb.enqueue(v).unwrap(); assert_eq!(rb.len(), 2); } } #[test] #[cfg_attr(miri, ignore)] // too slow fn try_overflow() { const N: usize = 23; let mut rb: Queue = Queue::new(); for i in 0..N as i32 - 1 { rb.enqueue(i).unwrap(); } for _ in 0..1_000_000 { for i in 0..N as i32 - 1 { let d = rb.dequeue().unwrap(); assert_eq!(d, i); rb.enqueue(i).unwrap(); } } } #[test] fn sanity() { let mut rb: Queue = Queue::new(); let (mut p, mut c) = rb.split(); assert_eq!(p.ready(), true); assert_eq!(c.ready(), false); assert_eq!(c.dequeue(), None); p.enqueue(0).unwrap(); assert_eq!(c.dequeue(), Some(0)); } #[test] fn static_new() { static mut _Q: Queue = Queue::new(); } #[test] fn drop() { struct Droppable; impl Droppable { fn new() -> Self { unsafe { COUNT += 1; } Droppable } } impl Drop for Droppable { fn drop(&mut self) { unsafe { COUNT -= 1; } } } static mut COUNT: i32 = 0; { let mut v: Queue = Queue::new(); v.enqueue(Droppable::new()).ok().unwrap(); v.enqueue(Droppable::new()).ok().unwrap(); v.dequeue().unwrap(); } assert_eq!(unsafe { COUNT }, 0); { let mut v: Queue = Queue::new(); v.enqueue(Droppable::new()).ok().unwrap(); v.enqueue(Droppable::new()).ok().unwrap(); } assert_eq!(unsafe { COUNT }, 0); } #[test] fn iter() { let mut rb: Queue = Queue::new(); rb.enqueue(0).unwrap(); rb.dequeue().unwrap(); rb.enqueue(1).unwrap(); rb.enqueue(2).unwrap(); rb.enqueue(3).unwrap(); let mut items = rb.iter(); // assert_eq!(items.next(), Some(&0)); assert_eq!(items.next(), Some(&1)); assert_eq!(items.next(), Some(&2)); assert_eq!(items.next(), Some(&3)); assert_eq!(items.next(), None); } #[test] fn iter_double_ended() { let mut rb: Queue = Queue::new(); rb.enqueue(0).unwrap(); rb.enqueue(1).unwrap(); rb.enqueue(2).unwrap(); let mut items = rb.iter(); assert_eq!(items.next(), Some(&0)); assert_eq!(items.next_back(), Some(&2)); assert_eq!(items.next(), Some(&1)); assert_eq!(items.next(), None); assert_eq!(items.next_back(), None); } #[test] fn iter_mut() { let mut rb: Queue = Queue::new(); rb.enqueue(0).unwrap(); rb.enqueue(1).unwrap(); rb.enqueue(2).unwrap(); let mut items = rb.iter_mut(); assert_eq!(items.next(), Some(&mut 0)); assert_eq!(items.next(), Some(&mut 1)); assert_eq!(items.next(), Some(&mut 2)); assert_eq!(items.next(), None); } #[test] fn iter_mut_double_ended() { let mut rb: Queue = Queue::new(); rb.enqueue(0).unwrap(); rb.enqueue(1).unwrap(); rb.enqueue(2).unwrap(); let mut items = rb.iter_mut(); assert_eq!(items.next(), Some(&mut 0)); assert_eq!(items.next_back(), Some(&mut 2)); assert_eq!(items.next(), Some(&mut 1)); assert_eq!(items.next(), None); assert_eq!(items.next_back(), None); } #[test] fn wrap_around() { let mut rb: Queue = Queue::new(); rb.enqueue(0).unwrap(); rb.enqueue(1).unwrap(); rb.enqueue(2).unwrap(); rb.dequeue().unwrap(); rb.dequeue().unwrap(); rb.dequeue().unwrap(); rb.enqueue(3).unwrap(); rb.enqueue(4).unwrap(); assert_eq!(rb.len(), 2); } #[test] fn ready_flag() { let mut rb: Queue = Queue::new(); let (mut p, mut c) = rb.split(); assert_eq!(c.ready(), false); assert_eq!(p.ready(), true); p.enqueue(0).unwrap(); assert_eq!(c.ready(), true); assert_eq!(p.ready(), true); p.enqueue(1).unwrap(); assert_eq!(c.ready(), true); assert_eq!(p.ready(), false); c.dequeue().unwrap(); assert_eq!(c.ready(), true); assert_eq!(p.ready(), true); c.dequeue().unwrap(); assert_eq!(c.ready(), false); assert_eq!(p.ready(), true); } #[test] fn clone() { let mut rb1: Queue = Queue::new(); rb1.enqueue(0).unwrap(); rb1.enqueue(0).unwrap(); rb1.dequeue().unwrap(); rb1.enqueue(0).unwrap(); let rb2 = rb1.clone(); assert_eq!(rb1.capacity(), rb2.capacity()); assert_eq!(rb1.len(), rb2.len()); assert!(rb1.iter().zip(rb2.iter()).all(|(v1, v2)| v1 == v2)); } #[test] fn eq() { // generate two queues with same content // but different buffer alignment let mut rb1: Queue = Queue::new(); rb1.enqueue(0).unwrap(); rb1.enqueue(0).unwrap(); rb1.dequeue().unwrap(); rb1.enqueue(0).unwrap(); let mut rb2: Queue = Queue::new(); rb2.enqueue(0).unwrap(); rb2.enqueue(0).unwrap(); assert!(rb1 == rb2); // test for symmetry assert!(rb2 == rb1); // test for changes in content rb1.enqueue(0).unwrap(); assert!(rb1 != rb2); rb2.enqueue(1).unwrap(); assert!(rb1 != rb2); // test for refexive relation assert!(rb1 == rb1); assert!(rb2 == rb2); } #[test] fn hash_equality() { // generate two queues with same content // but different buffer alignment let rb1 = { let mut rb1: Queue = Queue::new(); rb1.enqueue(0).unwrap(); rb1.enqueue(0).unwrap(); rb1.dequeue().unwrap(); rb1.enqueue(0).unwrap(); rb1 }; let rb2 = { let mut rb2: Queue = Queue::new(); rb2.enqueue(0).unwrap(); rb2.enqueue(0).unwrap(); rb2 }; let hash1 = { let mut hasher1 = hash32::FnvHasher::default(); hash32::Hash::hash(&rb1, &mut hasher1); let hash1 = hasher1.finish(); hash1 }; let hash2 = { let mut hasher2 = hash32::FnvHasher::default(); hash32::Hash::hash(&rb2, &mut hasher2); let hash2 = hasher2.finish(); hash2 }; assert_eq!(hash1, hash2); } } heapless-0.7.16/src/string.rs000064400000000000000000000420600072674642500142200ustar 00000000000000use core::{cmp::Ordering, fmt, fmt::Write, hash, iter, ops, str}; use hash32; use crate::Vec; /// A fixed capacity [`String`](https://doc.rust-lang.org/std/string/struct.String.html) pub struct String { vec: Vec, } impl String { /// Constructs a new, empty `String` with a fixed capacity of `N` bytes /// /// # Examples /// /// Basic usage: /// /// ``` /// use heapless::String; /// /// // allocate the string on the stack /// let mut s: String<4> = String::new(); /// /// // allocate the string in a static variable /// static mut S: String<4> = String::new(); /// ``` #[inline] pub const fn new() -> Self { Self { vec: Vec::new() } } /// Converts a `String` into a byte vector. /// /// This consumes the `String`, so we do not need to copy its contents. /// /// # Examples /// /// Basic usage: /// /// ``` /// use heapless::String; /// /// let s: String<4> = String::from("ab"); /// let b = s.into_bytes(); /// assert!(b.len() == 2); /// /// assert_eq!(&['a' as u8, 'b' as u8], &b[..]); /// ``` #[inline] pub fn into_bytes(self) -> Vec { self.vec } /// Extracts a string slice containing the entire string. /// /// # Examples /// /// Basic usage: /// /// ``` /// use heapless::String; /// /// let mut s: String<4> = String::from("ab"); /// assert!(s.as_str() == "ab"); /// /// let _s = s.as_str(); /// // s.push('c'); // <- cannot borrow `s` as mutable because it is also borrowed as immutable /// ``` #[inline] pub fn as_str(&self) -> &str { unsafe { str::from_utf8_unchecked(self.vec.as_slice()) } } /// Converts a `String` into a mutable string slice. /// /// # Examples /// /// Basic usage: /// /// ``` /// use heapless::String; /// /// let mut s: String<4> = String::from("ab"); /// let s = s.as_mut_str(); /// s.make_ascii_uppercase(); /// ``` #[inline] pub fn as_mut_str(&mut self) -> &mut str { unsafe { str::from_utf8_unchecked_mut(self.vec.as_mut_slice()) } } /// Returns a mutable reference to the contents of this `String`. /// /// # Safety /// /// This function is unsafe because it does not check that the bytes passed /// to it are valid UTF-8. If this constraint is violated, it may cause /// memory unsafety issues with future users of the `String`, as the rest of /// the library assumes that `String`s are valid UTF-8. /// /// # Examples /// /// Basic usage: /// /// ``` /// let mut s = String::from("hello"); /// /// unsafe { /// let vec = s.as_mut_vec(); /// assert_eq!(&[104, 101, 108, 108, 111][..], &vec[..]); /// /// vec.reverse(); /// } /// assert_eq!(s, "olleh"); /// ``` pub unsafe fn as_mut_vec(&mut self) -> &mut Vec { &mut self.vec } /// Appends a given string slice onto the end of this `String`. /// /// # Examples /// /// Basic usage: /// /// ``` /// use heapless::String; /// /// let mut s: String<8> = String::from("foo"); /// /// assert!(s.push_str("bar").is_ok()); /// /// assert_eq!("foobar", s); /// /// assert!(s.push_str("tender").is_err()); /// ``` #[inline] pub fn push_str(&mut self, string: &str) -> Result<(), ()> { self.vec.extend_from_slice(string.as_bytes()) } /// Returns the maximum number of elements the String can hold /// /// # Examples /// /// Basic usage: /// /// ``` /// use heapless::String; /// /// let mut s: String<4> = String::new(); /// assert!(s.capacity() == 4); /// ``` #[inline] pub fn capacity(&self) -> usize { self.vec.capacity() } /// Appends the given [`char`] to the end of this `String`. /// /// [`char`]: ../../std/primitive.char.html /// /// # Examples /// /// Basic usage: /// /// ``` /// use heapless::String; /// /// let mut s: String<8> = String::from("abc"); /// /// s.push('1').unwrap(); /// s.push('2').unwrap(); /// s.push('3').unwrap(); /// /// assert!("abc123" == s.as_str()); /// /// assert_eq!("abc123", s); /// ``` #[inline] pub fn push(&mut self, c: char) -> Result<(), ()> { match c.len_utf8() { 1 => self.vec.push(c as u8).map_err(|_| {}), _ => self .vec .extend_from_slice(c.encode_utf8(&mut [0; 4]).as_bytes()), } } /// Shortens this `String` to the specified length. /// /// If `new_len` is greater than the string's current length, this has no /// effect. /// /// Note that this method has no effect on the allocated capacity /// of the string /// /// # Panics /// /// Panics if `new_len` does not lie on a [`char`] boundary. /// /// [`char`]: ../../std/primitive.char.html /// /// # Examples /// /// Basic usage: /// /// ``` /// use heapless::String; /// /// let mut s: String<8> = String::from("hello"); /// /// s.truncate(2); /// /// assert_eq!("he", s); /// ``` #[inline] pub fn truncate(&mut self, new_len: usize) { if new_len <= self.len() { assert!(self.is_char_boundary(new_len)); self.vec.truncate(new_len) } } /// Removes the last character from the string buffer and returns it. /// /// Returns [`None`] if this `String` is empty. /// /// [`None`]: ../../std/option/enum.Option.html#variant.None /// /// # Examples /// /// Basic usage: /// /// ``` /// use heapless::String; /// /// let mut s: String<8> = String::from("foo"); /// /// assert_eq!(s.pop(), Some('o')); /// assert_eq!(s.pop(), Some('o')); /// assert_eq!(s.pop(), Some('f')); /// /// assert_eq!(s.pop(), None); /// ``` pub fn pop(&mut self) -> Option { let ch = self.chars().rev().next()?; // pop bytes that correspond to `ch` for _ in 0..ch.len_utf8() { unsafe { self.vec.pop_unchecked(); } } Some(ch) } /// Truncates this `String`, removing all contents. /// /// While this means the `String` will have a length of zero, it does not /// touch its capacity. /// /// # Examples /// /// Basic usage: /// /// ``` /// use heapless::String; /// /// let mut s: String<8> = String::from("foo"); /// /// s.clear(); /// /// assert!(s.is_empty()); /// assert_eq!(0, s.len()); /// assert_eq!(8, s.capacity()); /// ``` #[inline] pub fn clear(&mut self) { self.vec.clear() } } impl Default for String { fn default() -> Self { Self::new() } } impl<'a, const N: usize> From<&'a str> for String { fn from(s: &'a str) -> Self { let mut new = String::new(); new.push_str(s).unwrap(); new } } impl str::FromStr for String { type Err = (); fn from_str(s: &str) -> Result { let mut new = String::new(); new.push_str(s)?; Ok(new) } } impl iter::FromIterator for String { fn from_iter>(iter: T) -> Self { let mut new = String::new(); for c in iter { new.push(c).unwrap(); } new } } impl<'a, const N: usize> iter::FromIterator<&'a char> for String { fn from_iter>(iter: T) -> Self { let mut new = String::new(); for c in iter { new.push(*c).unwrap(); } new } } impl<'a, const N: usize> iter::FromIterator<&'a str> for String { fn from_iter>(iter: T) -> Self { let mut new = String::new(); for c in iter { new.push_str(c).unwrap(); } new } } impl Clone for String { fn clone(&self) -> Self { Self { vec: self.vec.clone(), } } } impl fmt::Debug for String { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { ::fmt(self, f) } } impl fmt::Display for String { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { ::fmt(self, f) } } impl hash::Hash for String { #[inline] fn hash(&self, hasher: &mut H) { ::hash(self, hasher) } } impl hash32::Hash for String { #[inline] fn hash(&self, hasher: &mut H) { ::hash(self, hasher) } } impl fmt::Write for String { fn write_str(&mut self, s: &str) -> Result<(), fmt::Error> { self.push_str(s).map_err(|_| fmt::Error) } fn write_char(&mut self, c: char) -> Result<(), fmt::Error> { self.push(c).map_err(|_| fmt::Error) } } impl ops::Deref for String { type Target = str; fn deref(&self) -> &str { self.as_str() } } impl ops::DerefMut for String { fn deref_mut(&mut self) -> &mut str { self.as_mut_str() } } impl AsRef for String { #[inline] fn as_ref(&self) -> &str { self } } impl AsRef<[u8]> for String { #[inline] fn as_ref(&self) -> &[u8] { self.as_bytes() } } impl PartialEq> for String { fn eq(&self, rhs: &String) -> bool { str::eq(&**self, &**rhs) } fn ne(&self, rhs: &String) -> bool { str::ne(&**self, &**rhs) } } // String == str impl PartialEq for String { #[inline] fn eq(&self, other: &str) -> bool { str::eq(&self[..], &other[..]) } #[inline] fn ne(&self, other: &str) -> bool { str::ne(&self[..], &other[..]) } } // String == &'str impl PartialEq<&str> for String { #[inline] fn eq(&self, other: &&str) -> bool { str::eq(&self[..], &other[..]) } #[inline] fn ne(&self, other: &&str) -> bool { str::ne(&self[..], &other[..]) } } // str == String impl PartialEq> for str { #[inline] fn eq(&self, other: &String) -> bool { str::eq(&self[..], &other[..]) } #[inline] fn ne(&self, other: &String) -> bool { str::ne(&self[..], &other[..]) } } // &'str == String impl PartialEq> for &str { #[inline] fn eq(&self, other: &String) -> bool { str::eq(&self[..], &other[..]) } #[inline] fn ne(&self, other: &String) -> bool { str::ne(&self[..], &other[..]) } } impl Eq for String {} impl PartialOrd> for String { #[inline] fn partial_cmp(&self, other: &String) -> Option { PartialOrd::partial_cmp(&**self, &**other) } } impl Ord for String { #[inline] fn cmp(&self, other: &Self) -> Ordering { Ord::cmp(&**self, &**other) } } macro_rules! impl_from_num { ($num:ty, $size:expr) => { impl From<$num> for String { fn from(s: $num) -> Self { let mut new = String::new(); write!(&mut new, "{}", s).unwrap(); new } } }; } impl_from_num!(i8, 4); impl_from_num!(i16, 6); impl_from_num!(i32, 11); impl_from_num!(i64, 20); impl_from_num!(u8, 3); impl_from_num!(u16, 5); impl_from_num!(u32, 10); impl_from_num!(u64, 20); #[cfg(test)] mod tests { use crate::{String, Vec}; #[test] fn static_new() { static mut _S: String<8> = String::new(); } #[test] fn clone() { let s1: String<20> = String::from("abcd"); let mut s2 = s1.clone(); s2.push_str(" efgh").unwrap(); assert_eq!(s1, "abcd"); assert_eq!(s2, "abcd efgh"); } #[test] fn cmp() { let s1: String<4> = String::from("abcd"); let s2: String<4> = String::from("zzzz"); assert!(s1 < s2); } #[test] fn cmp_heterogenous_size() { let s1: String<4> = String::from("abcd"); let s2: String<8> = String::from("zzzz"); assert!(s1 < s2); } #[test] fn debug() { use core::fmt::Write; let s: String<8> = String::from("abcd"); let mut std_s = std::string::String::new(); write!(std_s, "{:?}", s).unwrap(); assert_eq!("\"abcd\"", std_s); } #[test] fn display() { use core::fmt::Write; let s: String<8> = String::from("abcd"); let mut std_s = std::string::String::new(); write!(std_s, "{}", s).unwrap(); assert_eq!("abcd", std_s); } #[test] fn empty() { let s: String<4> = String::new(); assert!(s.capacity() == 4); assert_eq!(s, ""); assert_eq!(s.len(), 0); assert_ne!(s.len(), 4); } #[test] fn from() { let s: String<4> = String::from("123"); assert!(s.len() == 3); assert_eq!(s, "123"); } #[test] fn from_str() { use core::str::FromStr; let s: String<4> = String::<4>::from_str("123").unwrap(); assert!(s.len() == 3); assert_eq!(s, "123"); let e: () = String::<2>::from_str("123").unwrap_err(); assert_eq!(e, ()); } #[test] fn from_iter() { let mut v: Vec = Vec::new(); v.push('h').unwrap(); v.push('e').unwrap(); v.push('l').unwrap(); v.push('l').unwrap(); v.push('o').unwrap(); let string1: String<5> = v.iter().collect(); //&char let string2: String<5> = "hello".chars().collect(); //char assert_eq!(string1, "hello"); assert_eq!(string2, "hello"); } #[test] #[should_panic] fn from_panic() { let _: String<4> = String::from("12345"); } #[test] fn from_num() { let v: String<20> = String::from(18446744073709551615 as u64); assert_eq!(v, "18446744073709551615"); } #[test] fn into_bytes() { let s: String<4> = String::from("ab"); let b: Vec = s.into_bytes(); assert_eq!(b.len(), 2); assert_eq!(&['a' as u8, 'b' as u8], &b[..]); } #[test] fn as_str() { let s: String<4> = String::from("ab"); assert_eq!(s.as_str(), "ab"); // should be moved to fail test // let _s = s.as_str(); // s.push('c'); // <- cannot borrow `s` as mutable because it is also borrowed as immutable } #[test] fn as_mut_str() { let mut s: String<4> = String::from("ab"); let s = s.as_mut_str(); s.make_ascii_uppercase(); assert_eq!(s, "AB"); } #[test] fn push_str() { let mut s: String<8> = String::from("foo"); assert!(s.push_str("bar").is_ok()); assert_eq!("foobar", s); assert_eq!(s, "foobar"); assert!(s.push_str("tender").is_err()); assert_eq!("foobar", s); assert_eq!(s, "foobar"); } #[test] fn push() { let mut s: String<6> = String::from("abc"); assert!(s.push('1').is_ok()); assert!(s.push('2').is_ok()); assert!(s.push('3').is_ok()); assert!(s.push('4').is_err()); assert!("abc123" == s.as_str()); } #[test] fn as_bytes() { let s: String<8> = String::from("hello"); assert_eq!(&[104, 101, 108, 108, 111], s.as_bytes()); } #[test] fn truncate() { let mut s: String<8> = String::from("hello"); s.truncate(6); assert_eq!(s.len(), 5); s.truncate(2); assert_eq!(s.len(), 2); assert_eq!("he", s); assert_eq!(s, "he"); } #[test] fn pop() { let mut s: String<8> = String::from("foo"); assert_eq!(s.pop(), Some('o')); assert_eq!(s.pop(), Some('o')); assert_eq!(s.pop(), Some('f')); assert_eq!(s.pop(), None); } #[test] fn pop_uenc() { let mut s: String<8> = String::from("é"); assert_eq!(s.len(), 3); match s.pop() { Some(c) => { assert_eq!(s.len(), 1); assert_eq!(c, '\u{0301}'); // accute accent of e () } None => assert!(false), }; } #[test] fn is_empty() { let mut v: String<8> = String::new(); assert!(v.is_empty()); let _ = v.push('a'); assert!(!v.is_empty()); } #[test] fn clear() { let mut s: String<8> = String::from("foo"); s.clear(); assert!(s.is_empty()); assert_eq!(0, s.len()); assert_eq!(8, s.capacity()); } } heapless-0.7.16/src/test_helpers.rs000064400000000000000000000012700072674642500154110ustar 00000000000000macro_rules! droppable { () => { static COUNT: core::sync::atomic::AtomicI32 = core::sync::atomic::AtomicI32::new(0); #[derive(Eq, Ord, PartialEq, PartialOrd)] struct Droppable(i32); impl Droppable { fn new() -> Self { COUNT.fetch_add(1, core::sync::atomic::Ordering::Relaxed); Droppable(Self::count()) } fn count() -> i32 { COUNT.load(core::sync::atomic::Ordering::Relaxed) } } impl Drop for Droppable { fn drop(&mut self) { COUNT.fetch_sub(1, core::sync::atomic::Ordering::Relaxed); } } }; } heapless-0.7.16/src/ufmt.rs000064400000000000000000000023500072674642500136630ustar 00000000000000use crate::{string::String, vec::Vec}; use ufmt_write::uWrite; impl uWrite for String { type Error = (); fn write_str(&mut self, s: &str) -> Result<(), Self::Error> { self.push_str(s) } } impl uWrite for Vec { type Error = (); fn write_str(&mut self, s: &str) -> Result<(), Self::Error> { self.extend_from_slice(s.as_bytes()) } } #[cfg(test)] mod tests { use super::*; use ufmt::{derive::uDebug, uwrite}; #[derive(uDebug)] struct Pair { x: u32, y: u32, } #[test] fn test_string() { let a = 123; let b = Pair { x: 0, y: 1234 }; let mut s = String::<32>::new(); uwrite!(s, "{} -> {:?}", a, b).unwrap(); assert_eq!(s, "123 -> Pair { x: 0, y: 1234 }"); } #[test] fn test_string_err() { let p = Pair { x: 0, y: 1234 }; let mut s = String::<4>::new(); assert!(uwrite!(s, "{:?}", p).is_err()); } #[test] fn test_vec() { let a = 123; let b = Pair { x: 0, y: 1234 }; let mut v = Vec::::new(); uwrite!(v, "{} -> {:?}", a, b).unwrap(); assert_eq!(v, b"123 -> Pair { x: 0, y: 1234 }"); } } heapless-0.7.16/src/vec.rs000064400000000000000000001272620072674642500134770ustar 00000000000000use core::{ cmp::Ordering, convert::TryFrom, fmt, hash, iter::FromIterator, mem::MaybeUninit, ops, ptr, slice, }; use hash32; /// A fixed capacity [`Vec`](https://doc.rust-lang.org/std/vec/struct.Vec.html) /// /// # Examples /// /// ``` /// use heapless::Vec; /// /// /// // A vector with a fixed capacity of 8 elements allocated on the stack /// let mut vec = Vec::<_, 8>::new(); /// vec.push(1); /// vec.push(2); /// /// assert_eq!(vec.len(), 2); /// assert_eq!(vec[0], 1); /// /// assert_eq!(vec.pop(), Some(2)); /// assert_eq!(vec.len(), 1); /// /// vec[0] = 7; /// assert_eq!(vec[0], 7); /// /// vec.extend([1, 2, 3].iter().cloned()); /// /// for x in &vec { /// println!("{}", x); /// } /// assert_eq!(*vec, [7, 1, 2, 3]); /// ``` pub struct Vec { // NOTE order is important for optimizations. the `len` first layout lets the compiler optimize // `new` to: reserve stack space and zero the first word. With the fields in the reverse order // the compiler optimizes `new` to `memclr`-ing the *entire* stack space, including the `buffer` // field which should be left uninitialized. Optimizations were last checked with Rust 1.60 len: usize, buffer: [MaybeUninit; N], } impl Vec { const ELEM: MaybeUninit = MaybeUninit::uninit(); const INIT: [MaybeUninit; N] = [Self::ELEM; N]; // important for optimization of `new` /// Constructs a new, empty vector with a fixed capacity of `N` /// /// # Examples /// /// ``` /// use heapless::Vec; /// /// // allocate the vector on the stack /// let mut x: Vec = Vec::new(); /// /// // allocate the vector in a static variable /// static mut X: Vec = Vec::new(); /// ``` /// `Vec` `const` constructor; wrap the returned value in [`Vec`](../struct.Vec.html) pub const fn new() -> Self { // Const assert N >= 0 crate::sealed::greater_than_eq_0::(); Self { len: 0, buffer: Self::INIT, } } /// Constructs a new vector with a fixed capacity of `N` and fills it /// with the provided slice. /// /// This is equivalent to the following code: /// /// ``` /// use heapless::Vec; /// /// let mut v: Vec = Vec::new(); /// v.extend_from_slice(&[1, 2, 3]).unwrap(); /// ``` #[inline] pub fn from_slice(other: &[T]) -> Result where T: Clone, { let mut v = Vec::new(); v.extend_from_slice(other)?; Ok(v) } /// Clones a vec into a new vec pub(crate) fn clone(&self) -> Self where T: Clone, { let mut new = Self::new(); // avoid `extend_from_slice` as that introduces a runtime check / panicking branch for elem in self { unsafe { new.push_unchecked(elem.clone()); } } new } /// Returns a raw pointer to the vector’s buffer. pub fn as_ptr(&self) -> *const T { self.buffer.as_ptr() as *const T } /// Returns a raw pointer to the vector’s buffer, which may be mutated through. pub fn as_mut_ptr(&mut self) -> *mut T { self.buffer.as_mut_ptr() as *mut T } /// Extracts a slice containing the entire vector. /// /// Equivalent to `&s[..]`. /// /// # Examples /// /// ``` /// use heapless::Vec; /// let buffer: Vec = Vec::from_slice(&[1, 2, 3, 5, 8]).unwrap(); /// assert_eq!(buffer.as_slice(), &[1, 2, 3, 5, 8]); /// ``` pub fn as_slice(&self) -> &[T] { // NOTE(unsafe) avoid bound checks in the slicing operation // &buffer[..self.len] unsafe { slice::from_raw_parts(self.buffer.as_ptr() as *const T, self.len) } } /// Returns the contents of the vector as an array of length `M` if the length /// of the vector is exactly `M`, otherwise returns `Err(self)`. /// /// # Examples /// /// ``` /// use heapless::Vec; /// let buffer: Vec = Vec::from_slice(&[1, 2, 3, 5, 8]).unwrap(); /// let array: [u8; 5] = buffer.into_array().unwrap(); /// assert_eq!(array, [1, 2, 3, 5, 8]); /// ``` pub fn into_array(self) -> Result<[T; M], Self> { if self.len() == M { // This is how the unstable `MaybeUninit::array_assume_init` method does it let array = unsafe { (&self.buffer as *const _ as *const [T; M]).read() }; // We don't want `self`'s destructor to be called because that would drop all the // items in the array core::mem::forget(self); Ok(array) } else { Err(self) } } /// Extracts a mutable slice containing the entire vector. /// /// Equivalent to `&s[..]`. /// /// # Examples /// /// ``` /// use heapless::Vec; /// let mut buffer: Vec = Vec::from_slice(&[1, 2, 3, 5, 8]).unwrap(); /// buffer[0] = 9; /// assert_eq!(buffer.as_slice(), &[9, 2, 3, 5, 8]); /// ``` pub(crate) fn as_mut_slice(&mut self) -> &mut [T] { // NOTE(unsafe) avoid bound checks in the slicing operation // &mut buffer[..self.len] unsafe { slice::from_raw_parts_mut(self.buffer.as_mut_ptr() as *mut T, self.len) } } /// Returns the maximum number of elements the vector can hold. pub const fn capacity(&self) -> usize { N } /// Clears the vector, removing all values. pub fn clear(&mut self) { self.truncate(0); } /// Extends the vec from an iterator. /// /// # Panic /// /// Panics if the vec cannot hold all elements of the iterator. pub fn extend(&mut self, iter: I) where I: IntoIterator, { for elem in iter { self.push(elem).ok().unwrap() } } /// Clones and appends all elements in a slice to the `Vec`. /// /// Iterates over the slice `other`, clones each element, and then appends /// it to this `Vec`. The `other` vector is traversed in-order. /// /// # Examples /// /// ``` /// use heapless::Vec; /// /// let mut vec = Vec::::new(); /// vec.push(1).unwrap(); /// vec.extend_from_slice(&[2, 3, 4]).unwrap(); /// assert_eq!(*vec, [1, 2, 3, 4]); /// ``` pub fn extend_from_slice(&mut self, other: &[T]) -> Result<(), ()> where T: Clone, { if self.len + other.len() > self.capacity() { // won't fit in the `Vec`; don't modify anything and return an error Err(()) } else { for elem in other { unsafe { self.push_unchecked(elem.clone()); } } Ok(()) } } /// Removes the last element from a vector and returns it, or `None` if it's empty pub fn pop(&mut self) -> Option { if self.len != 0 { Some(unsafe { self.pop_unchecked() }) } else { None } } /// Appends an `item` to the back of the collection /// /// Returns back the `item` if the vector is full pub fn push(&mut self, item: T) -> Result<(), T> { if self.len < self.capacity() { unsafe { self.push_unchecked(item) } Ok(()) } else { Err(item) } } /// Removes the last element from a vector and returns it /// /// # Safety /// /// This assumes the vec to have at least one element. pub unsafe fn pop_unchecked(&mut self) -> T { debug_assert!(!self.is_empty()); self.len -= 1; (self.buffer.get_unchecked_mut(self.len).as_ptr() as *const T).read() } /// Appends an `item` to the back of the collection /// /// # Safety /// /// This assumes the vec is not full. pub unsafe fn push_unchecked(&mut self, item: T) { // NOTE(ptr::write) the memory slot that we are about to write to is uninitialized. We // use `ptr::write` to avoid running `T`'s destructor on the uninitialized memory debug_assert!(!self.is_full()); *self.buffer.get_unchecked_mut(self.len) = MaybeUninit::new(item); self.len += 1; } /// Shortens the vector, keeping the first `len` elements and dropping the rest. pub fn truncate(&mut self, len: usize) { // This is safe because: // // * the slice passed to `drop_in_place` is valid; the `len > self.len` // case avoids creating an invalid slice, and // * the `len` of the vector is shrunk before calling `drop_in_place`, // such that no value will be dropped twice in case `drop_in_place` // were to panic once (if it panics twice, the program aborts). unsafe { // Note: It's intentional that this is `>` and not `>=`. // Changing it to `>=` has negative performance // implications in some cases. See rust-lang/rust#78884 for more. if len > self.len { return; } let remaining_len = self.len - len; let s = ptr::slice_from_raw_parts_mut(self.as_mut_ptr().add(len), remaining_len); self.len = len; ptr::drop_in_place(s); } } /// Resizes the Vec in-place so that len is equal to new_len. /// /// If new_len is greater than len, the Vec is extended by the /// difference, with each additional slot filled with value. If /// new_len is less than len, the Vec is simply truncated. /// /// See also [`resize_default`](struct.Vec.html#method.resize_default). pub fn resize(&mut self, new_len: usize, value: T) -> Result<(), ()> where T: Clone, { if new_len > self.capacity() { return Err(()); } if new_len > self.len { while self.len < new_len { self.push(value.clone()).ok(); } } else { self.truncate(new_len); } Ok(()) } /// Resizes the `Vec` in-place so that `len` is equal to `new_len`. /// /// If `new_len` is greater than `len`, the `Vec` is extended by the /// difference, with each additional slot filled with `Default::default()`. /// If `new_len` is less than `len`, the `Vec` is simply truncated. /// /// See also [`resize`](struct.Vec.html#method.resize). pub fn resize_default(&mut self, new_len: usize) -> Result<(), ()> where T: Clone + Default, { self.resize(new_len, T::default()) } /// Forces the length of the vector to `new_len`. /// /// This is a low-level operation that maintains none of the normal /// invariants of the type. Normally changing the length of a vector /// is done using one of the safe operations instead, such as /// [`truncate`], [`resize`], [`extend`], or [`clear`]. /// /// [`truncate`]: #method.truncate /// [`resize`]: #method.resize /// [`extend`]: https://doc.rust-lang.org/stable/core/iter/trait.Extend.html#tymethod.extend /// [`clear`]: #method.clear /// /// # Safety /// /// - `new_len` must be less than or equal to [`capacity()`]. /// - The elements at `old_len..new_len` must be initialized. /// /// [`capacity()`]: #method.capacity /// /// # Examples /// /// This method can be useful for situations in which the vector /// is serving as a buffer for other code, particularly over FFI: /// /// ```no_run /// # #![allow(dead_code)] /// use heapless::Vec; /// /// # // This is just a minimal skeleton for the doc example; /// # // don't use this as a starting point for a real library. /// # pub struct StreamWrapper { strm: *mut core::ffi::c_void } /// # const Z_OK: i32 = 0; /// # extern "C" { /// # fn deflateGetDictionary( /// # strm: *mut core::ffi::c_void, /// # dictionary: *mut u8, /// # dictLength: *mut usize, /// # ) -> i32; /// # } /// # impl StreamWrapper { /// pub fn get_dictionary(&self) -> Option> { /// // Per the FFI method's docs, "32768 bytes is always enough". /// let mut dict = Vec::new(); /// let mut dict_length = 0; /// // SAFETY: When `deflateGetDictionary` returns `Z_OK`, it holds that: /// // 1. `dict_length` elements were initialized. /// // 2. `dict_length` <= the capacity (32_768) /// // which makes `set_len` safe to call. /// unsafe { /// // Make the FFI call... /// let r = deflateGetDictionary(self.strm, dict.as_mut_ptr(), &mut dict_length); /// if r == Z_OK { /// // ...and update the length to what was initialized. /// dict.set_len(dict_length); /// Some(dict) /// } else { /// None /// } /// } /// } /// # } /// ``` /// /// While the following example is sound, there is a memory leak since /// the inner vectors were not freed prior to the `set_len` call: /// /// ``` /// use core::iter::FromIterator; /// use heapless::Vec; /// /// let mut vec = Vec::, 3>::from_iter( /// [ /// Vec::from_iter([1, 0, 0].iter().cloned()), /// Vec::from_iter([0, 1, 0].iter().cloned()), /// Vec::from_iter([0, 0, 1].iter().cloned()), /// ] /// .iter() /// .cloned() /// ); /// // SAFETY: /// // 1. `old_len..0` is empty so no elements need to be initialized. /// // 2. `0 <= capacity` always holds whatever `capacity` is. /// unsafe { /// vec.set_len(0); /// } /// ``` /// /// Normally, here, one would use [`clear`] instead to correctly drop /// the contents and thus not leak memory. pub unsafe fn set_len(&mut self, new_len: usize) { debug_assert!(new_len <= self.capacity()); self.len = new_len } /// Removes an element from the vector and returns it. /// /// The removed element is replaced by the last element of the vector. /// /// This does not preserve ordering, but is O(1). /// /// # Panics /// /// Panics if `index` is out of bounds. /// /// # Examples /// /// ``` /// use heapless::Vec; ///// use heapless::consts::*; /// /// let mut v: Vec<_, 8> = Vec::new(); /// v.push("foo").unwrap(); /// v.push("bar").unwrap(); /// v.push("baz").unwrap(); /// v.push("qux").unwrap(); /// /// assert_eq!(v.swap_remove(1), "bar"); /// assert_eq!(&*v, ["foo", "qux", "baz"]); /// /// assert_eq!(v.swap_remove(0), "foo"); /// assert_eq!(&*v, ["baz", "qux"]); /// ``` pub fn swap_remove(&mut self, index: usize) -> T { assert!(index < self.len); unsafe { self.swap_remove_unchecked(index) } } /// Removes an element from the vector and returns it. /// /// The removed element is replaced by the last element of the vector. /// /// This does not preserve ordering, but is O(1). /// /// # Safety /// /// Assumes `index` within bounds. /// /// # Examples /// /// ``` /// use heapless::Vec; /// /// let mut v: Vec<_, 8> = Vec::new(); /// v.push("foo").unwrap(); /// v.push("bar").unwrap(); /// v.push("baz").unwrap(); /// v.push("qux").unwrap(); /// /// assert_eq!(unsafe { v.swap_remove_unchecked(1) }, "bar"); /// assert_eq!(&*v, ["foo", "qux", "baz"]); /// /// assert_eq!(unsafe { v.swap_remove_unchecked(0) }, "foo"); /// assert_eq!(&*v, ["baz", "qux"]); /// ``` pub unsafe fn swap_remove_unchecked(&mut self, index: usize) -> T { let length = self.len(); debug_assert!(index < length); let value = ptr::read(self.as_ptr().add(index)); let base_ptr = self.as_mut_ptr(); ptr::copy(base_ptr.add(length - 1), base_ptr.add(index), 1); self.len -= 1; value } /// Returns true if the vec is full #[inline] pub fn is_full(&self) -> bool { self.len == self.capacity() } /// Returns true if the vec is empty #[inline] pub fn is_empty(&self) -> bool { self.len == 0 } /// Returns `true` if `needle` is a prefix of the Vec. /// /// Always returns `true` if `needle` is an empty slice. /// /// # Examples /// /// ``` /// use heapless::Vec; /// /// let v: Vec<_, 8> = Vec::from_slice(b"abc").unwrap(); /// assert_eq!(v.starts_with(b""), true); /// assert_eq!(v.starts_with(b"ab"), true); /// assert_eq!(v.starts_with(b"bc"), false); /// ``` #[inline] pub fn starts_with(&self, needle: &[T]) -> bool where T: PartialEq, { let n = needle.len(); self.len >= n && needle == &self[..n] } /// Returns `true` if `needle` is a suffix of the Vec. /// /// Always returns `true` if `needle` is an empty slice. /// /// # Examples /// /// ``` /// use heapless::Vec; /// /// let v: Vec<_, 8> = Vec::from_slice(b"abc").unwrap(); /// assert_eq!(v.ends_with(b""), true); /// assert_eq!(v.ends_with(b"ab"), false); /// assert_eq!(v.ends_with(b"bc"), true); /// ``` #[inline] pub fn ends_with(&self, needle: &[T]) -> bool where T: PartialEq, { let (v, n) = (self.len(), needle.len()); v >= n && needle == &self[v - n..] } /// Inserts an element at position `index` within the vector, shifting all /// elements after it to the right. /// /// Returns back the `element` if the vector is full. /// /// # Panics /// /// Panics if `index > len`. /// /// # Examples /// /// ``` /// use heapless::Vec; /// /// let mut vec: Vec<_, 8> = Vec::from_slice(&[1, 2, 3]).unwrap(); /// vec.insert(1, 4); /// assert_eq!(vec, [1, 4, 2, 3]); /// vec.insert(4, 5); /// assert_eq!(vec, [1, 4, 2, 3, 5]); /// ``` pub fn insert(&mut self, index: usize, element: T) -> Result<(), T> { let len = self.len(); if index > len { panic!( "insertion index (is {}) should be <= len (is {})", index, len ); } // check there's space for the new element if self.is_full() { return Err(element); } unsafe { // infallible // The spot to put the new value { let p = self.as_mut_ptr().add(index); // Shift everything over to make space. (Duplicating the // `index`th element into two consecutive places.) ptr::copy(p, p.offset(1), len - index); // Write it in, overwriting the first copy of the `index`th // element. ptr::write(p, element); } self.set_len(len + 1); } Ok(()) } /// Removes and returns the element at position `index` within the vector, /// shifting all elements after it to the left. /// /// Note: Because this shifts over the remaining elements, it has a /// worst-case performance of *O*(*n*). If you don't need the order of /// elements to be preserved, use [`swap_remove`] instead. If you'd like to /// remove elements from the beginning of the `Vec`, consider using /// [`Deque::pop_front`] instead. /// /// [`swap_remove`]: Vec::swap_remove /// [`Deque::pop_front`]: crate::Deque::pop_front /// /// # Panics /// /// Panics if `index` is out of bounds. /// /// # Examples /// /// ``` /// use heapless::Vec; /// /// let mut v: Vec<_, 8> = Vec::from_slice(&[1, 2, 3]).unwrap(); /// assert_eq!(v.remove(1), 2); /// assert_eq!(v, [1, 3]); /// ``` pub fn remove(&mut self, index: usize) -> T { let len = self.len(); if index >= len { panic!("removal index (is {}) should be < len (is {})", index, len); } unsafe { // infallible let ret; { // the place we are taking from. let ptr = self.as_mut_ptr().add(index); // copy it out, unsafely having a copy of the value on // the stack and in the vector at the same time. ret = ptr::read(ptr); // Shift everything down to fill in that spot. ptr::copy(ptr.offset(1), ptr, len - index - 1); } self.set_len(len - 1); ret } } /// Retains only the elements specified by the predicate. /// /// In other words, remove all elements `e` for which `f(&e)` returns `false`. /// This method operates in place, visiting each element exactly once in the /// original order, and preserves the order of the retained elements. /// /// # Examples /// /// ``` /// use heapless::Vec; /// /// let mut vec: Vec<_, 8> = Vec::from_slice(&[1, 2, 3, 4]).unwrap(); /// vec.retain(|&x| x % 2 == 0); /// assert_eq!(vec, [2, 4]); /// ``` /// /// Because the elements are visited exactly once in the original order, /// external state may be used to decide which elements to keep. /// /// ``` /// use heapless::Vec; /// /// let mut vec: Vec<_, 8> = Vec::from_slice(&[1, 2, 3, 4, 5]).unwrap(); /// let keep = [false, true, true, false, true]; /// let mut iter = keep.iter(); /// vec.retain(|_| *iter.next().unwrap()); /// assert_eq!(vec, [2, 3, 5]); /// ``` pub fn retain(&mut self, mut f: F) where F: FnMut(&T) -> bool, { self.retain_mut(|elem| f(elem)); } /// Retains only the elements specified by the predicate, passing a mutable reference to it. /// /// In other words, remove all elements `e` such that `f(&mut e)` returns `false`. /// This method operates in place, visiting each element exactly once in the /// original order, and preserves the order of the retained elements. /// /// # Examples /// /// ``` /// use heapless::Vec; /// /// let mut vec: Vec<_, 8> = Vec::from_slice(&[1, 2, 3, 4]).unwrap(); /// vec.retain_mut(|x| if *x <= 3 { /// *x += 1; /// true /// } else { /// false /// }); /// assert_eq!(vec, [2, 3, 4]); /// ``` pub fn retain_mut(&mut self, mut f: F) where F: FnMut(&mut T) -> bool, { let original_len = self.len(); // Avoid double drop if the drop guard is not executed, // since we may make some holes during the process. unsafe { self.set_len(0) }; // Vec: [Kept, Kept, Hole, Hole, Hole, Hole, Unchecked, Unchecked] // |<- processed len ->| ^- next to check // |<- deleted cnt ->| // |<- original_len ->| // Kept: Elements which predicate returns true on. // Hole: Moved or dropped element slot. // Unchecked: Unchecked valid elements. // // This drop guard will be invoked when predicate or `drop` of element panicked. // It shifts unchecked elements to cover holes and `set_len` to the correct length. // In cases when predicate and `drop` never panick, it will be optimized out. struct BackshiftOnDrop<'a, T, const N: usize> { v: &'a mut Vec, processed_len: usize, deleted_cnt: usize, original_len: usize, } impl Drop for BackshiftOnDrop<'_, T, N> { fn drop(&mut self) { if self.deleted_cnt > 0 { // SAFETY: Trailing unchecked items must be valid since we never touch them. unsafe { ptr::copy( self.v.as_ptr().add(self.processed_len), self.v .as_mut_ptr() .add(self.processed_len - self.deleted_cnt), self.original_len - self.processed_len, ); } } // SAFETY: After filling holes, all items are in contiguous memory. unsafe { self.v.set_len(self.original_len - self.deleted_cnt); } } } let mut g = BackshiftOnDrop { v: self, processed_len: 0, deleted_cnt: 0, original_len, }; fn process_loop( original_len: usize, f: &mut F, g: &mut BackshiftOnDrop<'_, T, N>, ) where F: FnMut(&mut T) -> bool, { while g.processed_len != original_len { let p = g.v.as_mut_ptr(); // SAFETY: Unchecked element must be valid. let cur = unsafe { &mut *p.add(g.processed_len) }; if !f(cur) { // Advance early to avoid double drop if `drop_in_place` panicked. g.processed_len += 1; g.deleted_cnt += 1; // SAFETY: We never touch this element again after dropped. unsafe { ptr::drop_in_place(cur) }; // We already advanced the counter. if DELETED { continue; } else { break; } } if DELETED { // SAFETY: `deleted_cnt` > 0, so the hole slot must not overlap with current element. // We use copy for move, and never touch this element again. unsafe { let hole_slot = p.add(g.processed_len - g.deleted_cnt); ptr::copy_nonoverlapping(cur, hole_slot, 1); } } g.processed_len += 1; } } // Stage 1: Nothing was deleted. process_loop::(original_len, &mut f, &mut g); // Stage 2: Some elements were deleted. process_loop::(original_len, &mut f, &mut g); // All item are processed. This can be optimized to `set_len` by LLVM. drop(g); } } // Trait implementations impl Default for Vec { fn default() -> Self { Self::new() } } impl fmt::Debug for Vec where T: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { <[T] as fmt::Debug>::fmt(self, f) } } impl fmt::Write for Vec { fn write_str(&mut self, s: &str) -> fmt::Result { match self.extend_from_slice(s.as_bytes()) { Ok(()) => Ok(()), Err(_) => Err(fmt::Error), } } } impl Drop for Vec { fn drop(&mut self) { // We drop each element used in the vector by turning into a &mut[T] unsafe { ptr::drop_in_place(self.as_mut_slice()); } } } impl<'a, T: Clone, const N: usize> TryFrom<&'a [T]> for Vec { type Error = (); fn try_from(slice: &'a [T]) -> Result { Vec::from_slice(slice) } } impl Extend for Vec { fn extend(&mut self, iter: I) where I: IntoIterator, { self.extend(iter) } } impl<'a, T, const N: usize> Extend<&'a T> for Vec where T: 'a + Copy, { fn extend(&mut self, iter: I) where I: IntoIterator, { self.extend(iter.into_iter().cloned()) } } impl hash::Hash for Vec where T: core::hash::Hash, { fn hash(&self, state: &mut H) { <[T] as hash::Hash>::hash(self, state) } } impl hash32::Hash for Vec where T: hash32::Hash, { fn hash(&self, state: &mut H) { <[T] as hash32::Hash>::hash(self, state) } } impl<'a, T, const N: usize> IntoIterator for &'a Vec { type Item = &'a T; type IntoIter = slice::Iter<'a, T>; fn into_iter(self) -> Self::IntoIter { self.iter() } } impl<'a, T, const N: usize> IntoIterator for &'a mut Vec { type Item = &'a mut T; type IntoIter = slice::IterMut<'a, T>; fn into_iter(self) -> Self::IntoIter { self.iter_mut() } } impl FromIterator for Vec { fn from_iter(iter: I) -> Self where I: IntoIterator, { let mut vec = Vec::new(); for i in iter { vec.push(i).ok().expect("Vec::from_iter overflow"); } vec } } /// An iterator that moves out of an [`Vec`][`Vec`]. /// /// This struct is created by calling the `into_iter` method on [`Vec`][`Vec`]. /// /// [`Vec`]: (https://doc.rust-lang.org/std/vec/struct.Vec.html) /// pub struct IntoIter { vec: Vec, next: usize, } impl Iterator for IntoIter { type Item = T; fn next(&mut self) -> Option { if self.next < self.vec.len() { let item = unsafe { (self.vec.buffer.get_unchecked_mut(self.next).as_ptr() as *const T).read() }; self.next += 1; Some(item) } else { None } } } impl Clone for IntoIter where T: Clone, { fn clone(&self) -> Self { let mut vec = Vec::new(); if self.next < self.vec.len() { let s = unsafe { slice::from_raw_parts( (self.vec.buffer.as_ptr() as *const T).add(self.next), self.vec.len() - self.next, ) }; vec.extend_from_slice(s).ok(); } Self { vec, next: 0 } } } impl Drop for IntoIter { fn drop(&mut self) { unsafe { // Drop all the elements that have not been moved out of vec ptr::drop_in_place(&mut self.vec.as_mut_slice()[self.next..]); // Prevent dropping of other elements self.vec.len = 0; } } } impl IntoIterator for Vec { type Item = T; type IntoIter = IntoIter; fn into_iter(self) -> Self::IntoIter { IntoIter { vec: self, next: 0 } } } impl PartialEq> for Vec where A: PartialEq, { fn eq(&self, other: &Vec) -> bool { <[A]>::eq(self, &**other) } } // Vec == [B] impl PartialEq<[B]> for Vec where A: PartialEq, { fn eq(&self, other: &[B]) -> bool { <[A]>::eq(self, &other[..]) } } // [B] == Vec impl PartialEq> for [B] where A: PartialEq, { fn eq(&self, other: &Vec) -> bool { <[A]>::eq(other, &self[..]) } } // Vec == &[B] impl PartialEq<&[B]> for Vec where A: PartialEq, { fn eq(&self, other: &&[B]) -> bool { <[A]>::eq(self, &other[..]) } } // &[B] == Vec impl PartialEq> for &[B] where A: PartialEq, { fn eq(&self, other: &Vec) -> bool { <[A]>::eq(other, &self[..]) } } // Vec == &mut [B] impl PartialEq<&mut [B]> for Vec where A: PartialEq, { fn eq(&self, other: &&mut [B]) -> bool { <[A]>::eq(self, &other[..]) } } // &mut [B] == Vec impl PartialEq> for &mut [B] where A: PartialEq, { fn eq(&self, other: &Vec) -> bool { <[A]>::eq(other, &self[..]) } } // Vec == [B; M] // Equality does not require equal capacity impl PartialEq<[B; M]> for Vec where A: PartialEq, { fn eq(&self, other: &[B; M]) -> bool { <[A]>::eq(self, &other[..]) } } // [B; M] == Vec // Equality does not require equal capacity impl PartialEq> for [B; M] where A: PartialEq, { fn eq(&self, other: &Vec) -> bool { <[A]>::eq(other, &self[..]) } } // Vec == &[B; M] // Equality does not require equal capacity impl PartialEq<&[B; M]> for Vec where A: PartialEq, { fn eq(&self, other: &&[B; M]) -> bool { <[A]>::eq(self, &other[..]) } } // &[B; M] == Vec // Equality does not require equal capacity impl PartialEq> for &[B; M] where A: PartialEq, { fn eq(&self, other: &Vec) -> bool { <[A]>::eq(other, &self[..]) } } // Implements Eq if underlying data is Eq impl Eq for Vec where T: Eq {} impl PartialOrd> for Vec where T: PartialOrd, { fn partial_cmp(&self, other: &Vec) -> Option { PartialOrd::partial_cmp(&**self, &**other) } } impl Ord for Vec where T: Ord, { #[inline] fn cmp(&self, other: &Self) -> Ordering { Ord::cmp(&**self, &**other) } } impl ops::Deref for Vec { type Target = [T]; fn deref(&self) -> &[T] { self.as_slice() } } impl ops::DerefMut for Vec { fn deref_mut(&mut self) -> &mut [T] { self.as_mut_slice() } } impl AsRef> for Vec { #[inline] fn as_ref(&self) -> &Self { self } } impl AsMut> for Vec { #[inline] fn as_mut(&mut self) -> &mut Self { self } } impl AsRef<[T]> for Vec { #[inline] fn as_ref(&self) -> &[T] { self } } impl AsMut<[T]> for Vec { #[inline] fn as_mut(&mut self) -> &mut [T] { self } } impl Clone for Vec where T: Clone, { fn clone(&self) -> Self { self.clone() } } #[cfg(test)] mod tests { use crate::Vec; use core::fmt::Write; #[test] fn static_new() { static mut _V: Vec = Vec::new(); } #[test] fn stack_new() { let mut _v: Vec = Vec::new(); } #[test] fn is_full_empty() { let mut v: Vec = Vec::new(); assert!(v.is_empty()); assert!(!v.is_full()); v.push(1).unwrap(); assert!(!v.is_empty()); assert!(!v.is_full()); v.push(1).unwrap(); assert!(!v.is_empty()); assert!(!v.is_full()); v.push(1).unwrap(); assert!(!v.is_empty()); assert!(!v.is_full()); v.push(1).unwrap(); assert!(!v.is_empty()); assert!(v.is_full()); } #[test] fn drop() { droppable!(); { let mut v: Vec = Vec::new(); v.push(Droppable::new()).ok().unwrap(); v.push(Droppable::new()).ok().unwrap(); v.pop().unwrap(); } assert_eq!(Droppable::count(), 0); { let mut v: Vec = Vec::new(); v.push(Droppable::new()).ok().unwrap(); v.push(Droppable::new()).ok().unwrap(); } assert_eq!(Droppable::count(), 0); } #[test] fn eq() { let mut xs: Vec = Vec::new(); let mut ys: Vec = Vec::new(); assert_eq!(xs, ys); xs.push(1).unwrap(); ys.push(1).unwrap(); assert_eq!(xs, ys); } #[test] fn cmp() { let mut xs: Vec = Vec::new(); let mut ys: Vec = Vec::new(); assert_eq!(xs, ys); xs.push(1).unwrap(); ys.push(2).unwrap(); assert!(xs < ys); } #[test] fn cmp_heterogenous_size() { let mut xs: Vec = Vec::new(); let mut ys: Vec = Vec::new(); assert_eq!(xs, ys); xs.push(1).unwrap(); ys.push(2).unwrap(); assert!(xs < ys); } #[test] fn cmp_with_arrays_and_slices() { let mut xs: Vec = Vec::new(); xs.push(1).unwrap(); let array = [1]; assert_eq!(xs, array); assert_eq!(array, xs); assert_eq!(xs, array.as_slice()); assert_eq!(array.as_slice(), xs); assert_eq!(xs, &array); assert_eq!(&array, xs); let longer_array = [1; 20]; assert_ne!(xs, longer_array); assert_ne!(longer_array, xs); } #[test] fn full() { let mut v: Vec = Vec::new(); v.push(0).unwrap(); v.push(1).unwrap(); v.push(2).unwrap(); v.push(3).unwrap(); assert!(v.push(4).is_err()); } #[test] fn iter() { let mut v: Vec = Vec::new(); v.push(0).unwrap(); v.push(1).unwrap(); v.push(2).unwrap(); v.push(3).unwrap(); let mut items = v.iter(); assert_eq!(items.next(), Some(&0)); assert_eq!(items.next(), Some(&1)); assert_eq!(items.next(), Some(&2)); assert_eq!(items.next(), Some(&3)); assert_eq!(items.next(), None); } #[test] fn iter_mut() { let mut v: Vec = Vec::new(); v.push(0).unwrap(); v.push(1).unwrap(); v.push(2).unwrap(); v.push(3).unwrap(); let mut items = v.iter_mut(); assert_eq!(items.next(), Some(&mut 0)); assert_eq!(items.next(), Some(&mut 1)); assert_eq!(items.next(), Some(&mut 2)); assert_eq!(items.next(), Some(&mut 3)); assert_eq!(items.next(), None); } #[test] fn collect_from_iter() { let slice = &[1, 2, 3]; let vec: Vec = slice.iter().cloned().collect(); assert_eq!(&vec, slice); } #[test] #[should_panic] fn collect_from_iter_overfull() { let slice = &[1, 2, 3]; let _vec = slice.iter().cloned().collect::>(); } #[test] fn iter_move() { let mut v: Vec = Vec::new(); v.push(0).unwrap(); v.push(1).unwrap(); v.push(2).unwrap(); v.push(3).unwrap(); let mut items = v.into_iter(); assert_eq!(items.next(), Some(0)); assert_eq!(items.next(), Some(1)); assert_eq!(items.next(), Some(2)); assert_eq!(items.next(), Some(3)); assert_eq!(items.next(), None); } #[test] fn iter_move_drop() { droppable!(); { let mut vec: Vec = Vec::new(); vec.push(Droppable::new()).ok().unwrap(); vec.push(Droppable::new()).ok().unwrap(); let mut items = vec.into_iter(); // Move all let _ = items.next(); let _ = items.next(); } assert_eq!(Droppable::count(), 0); { let mut vec: Vec = Vec::new(); vec.push(Droppable::new()).ok().unwrap(); vec.push(Droppable::new()).ok().unwrap(); let _items = vec.into_iter(); // Move none } assert_eq!(Droppable::count(), 0); { let mut vec: Vec = Vec::new(); vec.push(Droppable::new()).ok().unwrap(); vec.push(Droppable::new()).ok().unwrap(); let mut items = vec.into_iter(); let _ = items.next(); // Move partly } assert_eq!(Droppable::count(), 0); } #[test] fn push_and_pop() { let mut v: Vec = Vec::new(); assert_eq!(v.len(), 0); assert_eq!(v.pop(), None); assert_eq!(v.len(), 0); v.push(0).unwrap(); assert_eq!(v.len(), 1); assert_eq!(v.pop(), Some(0)); assert_eq!(v.len(), 0); assert_eq!(v.pop(), None); assert_eq!(v.len(), 0); } #[test] fn resize_size_limit() { let mut v: Vec = Vec::new(); v.resize(0, 0).unwrap(); v.resize(4, 0).unwrap(); v.resize(5, 0).err().expect("full"); } #[test] fn resize_length_cases() { let mut v: Vec = Vec::new(); assert_eq!(v.len(), 0); // Grow by 1 v.resize(1, 0).unwrap(); assert_eq!(v.len(), 1); // Grow by 2 v.resize(3, 0).unwrap(); assert_eq!(v.len(), 3); // Resize to current size v.resize(3, 0).unwrap(); assert_eq!(v.len(), 3); // Shrink by 1 v.resize(2, 0).unwrap(); assert_eq!(v.len(), 2); // Shrink by 2 v.resize(0, 0).unwrap(); assert_eq!(v.len(), 0); } #[test] fn resize_contents() { let mut v: Vec = Vec::new(); // New entries take supplied value when growing v.resize(1, 17).unwrap(); assert_eq!(v[0], 17); // Old values aren't changed when growing v.resize(2, 18).unwrap(); assert_eq!(v[0], 17); assert_eq!(v[1], 18); // Old values aren't changed when length unchanged v.resize(2, 0).unwrap(); assert_eq!(v[0], 17); assert_eq!(v[1], 18); // Old values aren't changed when shrinking v.resize(1, 0).unwrap(); assert_eq!(v[0], 17); } #[test] fn resize_default() { let mut v: Vec = Vec::new(); // resize_default is implemented using resize, so just check the // correct value is being written. v.resize_default(1).unwrap(); assert_eq!(v[0], 0); } #[test] fn write() { let mut v: Vec = Vec::new(); write!(v, "{:x}", 1234).unwrap(); assert_eq!(&v[..], b"4d2"); } #[test] fn extend_from_slice() { let mut v: Vec = Vec::new(); assert_eq!(v.len(), 0); v.extend_from_slice(&[1, 2]).unwrap(); assert_eq!(v.len(), 2); assert_eq!(v.as_slice(), &[1, 2]); v.extend_from_slice(&[3]).unwrap(); assert_eq!(v.len(), 3); assert_eq!(v.as_slice(), &[1, 2, 3]); assert!(v.extend_from_slice(&[4, 5]).is_err()); assert_eq!(v.len(), 3); assert_eq!(v.as_slice(), &[1, 2, 3]); } #[test] fn from_slice() { // Successful construction let v: Vec = Vec::from_slice(&[1, 2, 3]).unwrap(); assert_eq!(v.len(), 3); assert_eq!(v.as_slice(), &[1, 2, 3]); // Slice too large assert!(Vec::::from_slice(&[1, 2, 3]).is_err()); } #[test] fn starts_with() { let v: Vec<_, 8> = Vec::from_slice(b"ab").unwrap(); assert!(v.starts_with(&[])); assert!(v.starts_with(b"")); assert!(v.starts_with(b"a")); assert!(v.starts_with(b"ab")); assert!(!v.starts_with(b"abc")); assert!(!v.starts_with(b"ba")); assert!(!v.starts_with(b"b")); } #[test] fn ends_with() { let v: Vec<_, 8> = Vec::from_slice(b"ab").unwrap(); assert!(v.ends_with(&[])); assert!(v.ends_with(b"")); assert!(v.ends_with(b"b")); assert!(v.ends_with(b"ab")); assert!(!v.ends_with(b"abc")); assert!(!v.ends_with(b"ba")); assert!(!v.ends_with(b"a")); } #[test] fn zero_capacity() { let mut v: Vec = Vec::new(); // Validate capacity assert_eq!(v.capacity(), 0); // Make sure there is no capacity assert!(v.push(1).is_err()); // Validate length assert_eq!(v.len(), 0); // Validate pop assert_eq!(v.pop(), None); // Validate slice assert_eq!(v.as_slice(), &[]); // Validate empty assert!(v.is_empty()); // Validate full assert!(v.is_full()); } } heapless-0.7.16/suppressions.txt000064400000000000000000000003540072674642500150730ustar 00000000000000race:std::panic::catch_unwind race:std::thread::scope # std::thread::spawn false positive; seen on Ubuntu 20.04 but not on Arch Linux (2022-04-29) race:drop_in_place*JoinHandle race:alloc::sync::Arc<*>::drop_slow race:__call_tls_dtors heapless-0.7.16/tests/cpass.rs000064400000000000000000000007120072674642500143740ustar 00000000000000//! Collections of `Send`-able things are `Send` use heapless::{ spsc::{Consumer, Producer, Queue}, HistoryBuffer, Vec, }; #[test] fn send() { struct IsSend; unsafe impl Send for IsSend {} fn is_send() where T: Send, { } is_send::>(); is_send::>(); is_send::>(); is_send::>(); is_send::>(); } heapless-0.7.16/tests/tsan.rs000064400000000000000000000137170072674642500142410ustar 00000000000000#![deny(rust_2018_compatibility)] #![deny(rust_2018_idioms)] #![deny(warnings)] use std::thread; use heapless::spsc; #[test] fn once() { static mut RB: spsc::Queue = spsc::Queue::new(); let rb = unsafe { &mut RB }; rb.enqueue(0).unwrap(); let (mut p, mut c) = rb.split(); p.enqueue(1).unwrap(); thread::spawn(move || { p.enqueue(1).unwrap(); }); thread::spawn(move || { c.dequeue().unwrap(); }); } #[test] fn twice() { static mut RB: spsc::Queue = spsc::Queue::new(); let rb = unsafe { &mut RB }; rb.enqueue(0).unwrap(); rb.enqueue(1).unwrap(); let (mut p, mut c) = rb.split(); thread::spawn(move || { p.enqueue(2).unwrap(); p.enqueue(3).unwrap(); }); thread::spawn(move || { c.dequeue().unwrap(); c.dequeue().unwrap(); }); } #[test] #[cfg(unstable_channel)] fn scoped() { let mut rb: spsc::Queue = spsc::Queue::new(); rb.enqueue(0).unwrap(); { let (mut p, mut c) = rb.split(); thread::scope(move |scope| { scope.spawn(move || { p.enqueue(1).unwrap(); }); scope.spawn(move || { c.dequeue().unwrap(); }); }); } rb.dequeue().unwrap(); } #[test] #[cfg_attr(miri, ignore)] // too slow #[cfg(unstable_channel)] fn contention() { const N: usize = 1024; let mut rb: spsc::Queue = spsc::Queue::new(); { let (mut p, mut c) = rb.split(); thread::scope(move |scope| { scope.spawn(move || { let mut sum: u32 = 0; for i in 0..(2 * N) { sum = sum.wrapping_add(i as u32); while let Err(_) = p.enqueue(i as u8) {} } println!("producer: {}", sum); }); scope.spawn(move || { let mut sum: u32 = 0; for _ in 0..(2 * N) { loop { match c.dequeue() { Some(v) => { sum = sum.wrapping_add(v as u32); break; } _ => {} } } } println!("consumer: {}", sum); }); }); } assert!(rb.is_empty()); } #[test] #[cfg_attr(miri, ignore)] // too slow #[cfg(unstable_channel)] fn mpmc_contention() { use std::sync::mpsc; use heapless::mpmc::Q64; const N: u32 = 64; static Q: Q64 = Q64::new(); let (s, r) = mpsc::channel(); thread::scope(|scope| { let s1 = s.clone(); scope.spawn(move || { let mut sum: u32 = 0; for i in 0..(16 * N) { sum = sum.wrapping_add(i); println!("enqueue {}", i); while let Err(_) = Q.enqueue(i) {} } s1.send(sum).unwrap(); }); let s2 = s.clone(); scope.spawn(move || { let mut sum: u32 = 0; for _ in 0..(16 * N) { loop { match Q.dequeue() { Some(v) => { sum = sum.wrapping_add(v); println!("dequeue {}", v); break; } _ => {} } } } s2.send(sum).unwrap(); }); }); assert_eq!(r.recv().unwrap(), r.recv().unwrap()); } #[test] #[cfg_attr(miri, ignore)] // too slow #[cfg(unstable_channel)] fn unchecked() { const N: usize = 1024; let mut rb: spsc::Queue = spsc::Queue::new(); for _ in 0..N / 2 - 1 { rb.enqueue(1).unwrap(); } { let (mut p, mut c) = rb.split(); thread::scope(move |scope| { scope.spawn(move || { for _ in 0..N / 2 - 1 { p.enqueue(2).unwrap(); } }); scope.spawn(move || { let mut sum: usize = 0; for _ in 0..N / 2 - 1 { sum = sum.wrapping_add(usize::from(c.dequeue().unwrap())); } assert_eq!(sum, N / 2 - 1); }); }); } assert_eq!(rb.len(), N / 2 - 1); } #[test] fn len_properly_wraps() { const N: usize = 4; let mut rb: spsc::Queue = spsc::Queue::new(); rb.enqueue(1).unwrap(); assert_eq!(rb.len(), 1); rb.dequeue(); assert_eq!(rb.len(), 0); rb.enqueue(2).unwrap(); assert_eq!(rb.len(), 1); rb.enqueue(3).unwrap(); assert_eq!(rb.len(), 2); rb.enqueue(4).unwrap(); assert_eq!(rb.len(), 3); } #[test] fn iterator_properly_wraps() { const N: usize = 4; let mut rb: spsc::Queue = spsc::Queue::new(); rb.enqueue(1).unwrap(); rb.dequeue(); rb.enqueue(2).unwrap(); rb.enqueue(3).unwrap(); rb.enqueue(4).unwrap(); let expected = [2, 3, 4]; let mut actual = [0, 0, 0]; for (idx, el) in rb.iter().enumerate() { actual[idx] = *el; } assert_eq!(expected, actual) } #[cfg(all(target_arch = "x86_64", feature = "x86-sync-pool"))] #[test] fn pool() { use heapless::pool::singleton::Pool as _; static mut M: [u8; (N + 1) * 8] = [0; (N + 1) * 8]; const N: usize = 16 * 1024; heapless::pool!(A: [u8; 8]); A::grow(unsafe { &mut M }); thread::scope(move |scope| { scope.spawn(move || { for _ in 0..N / 4 { let a = A::alloc().unwrap(); let b = A::alloc().unwrap(); drop(a); let b = b.init([1; 8]); drop(b); } }); scope.spawn(move || { for _ in 0..N / 2 { let a = A::alloc().unwrap(); let a = a.init([2; 8]); drop(a); } }); }); }