static-alloc-0.2.5/.cargo_vcs_info.json0000644000000001520000000000100134150ustar { "git": { "sha1": "cf7e11b2432eac0701900779784325ea3e8cf569" }, "path_in_vcs": "static-alloc" }static-alloc-0.2.5/Cargo.toml0000644000000030020000000000100114100ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2018" name = "static-alloc" version = "0.2.5" authors = ["Andreas Molzer "] description = "A bump allocator on static memory for the alloc-traits crate" documentation = "https://docs.rs/static-alloc" readme = "Readme.md" categories = [ "embedded", "memory-management", "no-std", ] license = "MIT OR Apache-2.0 OR Zlib" repository = "https://github.com/HeroicKatora/static-alloc" [package.metadata.docs.rs] all-features = true [[test]] name = "vec" path = "tests/vec.rs" [[test]] name = "vec_try" path = "tests/vec_try.rs" required-features = ["nightly_try_reserve"] [[test]] name = "huuuuuge" path = "tests/alloc/huuuuuge.rs" required-features = ["DISABLED"] [[test]] name = "unsync" path = "tests/unsync.rs" [[test]] name = "chain" path = "tests/chain.rs" required-features = ["nightly_chain"] [dependencies.alloc-traits] version = "0.1.0" [dependencies.atomic-polyfill] version = "1" optional = true [features] alloc = [] nightly_chain = ["alloc"] nightly_try_reserve = [] polyfill = ["atomic-polyfill"] static-alloc-0.2.5/Cargo.toml.orig000064400000000000000000000027731046102023000151070ustar 00000000000000[package] name = "static-alloc" version = "0.2.5" description = "A bump allocator on static memory for the alloc-traits crate" authors = ["Andreas Molzer "] edition = "2018" license = "MIT OR Apache-2.0 OR Zlib" documentation = "https://docs.rs/static-alloc" repository = "https://github.com/HeroicKatora/static-alloc" readme = "Readme.md" categories = ["embedded", "memory-management", "no-std"] [package.metadata.docs.rs] all-features = true [dependencies] alloc-traits = { path = "../alloc-traits", version = "0.1.0" } atomic-polyfill = { version = "1", optional = true } [features] alloc = [] # For apis depending on "try_reserve" (#48043). # Currently only used in a test for ensure future opportunities. nightly_try_reserve = [] # Enables the `unsync::Chain` module. Note that this is explicitly outside the # SemVer stability guarantees! nightly_chain = ["alloc"] # Enables the use of the `atomic-polyfill` crate to support targets without # native atomic CAS operations polyfill = ["atomic-polyfill"] # Tests that test `Bump` as GlobalAlloc. Must be in a separate test each. [[test]] name = "vec" path = "tests/vec.rs" [[test]] name = "vec_try" path = "tests/vec_try.rs" required-features = ["nightly_try_reserve"] [[test]] name = "huuuuuge" path = "tests/alloc/huuuuuge.rs" # Disabled because it chokes rustc. Just a PoC. required-features = ["DISABLED"] [[test]] name = "unsync" path = "tests/unsync.rs" [[test]] name = "chain" path = "tests/chain.rs" required-features = ["nightly_chain"] static-alloc-0.2.5/Changes.md000064400000000000000000000110631046102023000141020ustar 00000000000000# v0.2.5 - Bump `atomic-polyfill` to `1`, as discovered during Debian packaging. # v0.2.4 - The Safety Requirements for `Allocation::leak` have been refined. It's now noted explicitly that that caller is responsible for ensuring that the public pointer field has not active dependent pointers. - Added `Allocation::boxed` to wrap a value into a destructing owner. - Added `LeakBox::from_mut_unchecked` which doesn't risk an inappropriate lifetime compared to a pointer cast. - Added `unsync::Bump::{from_mem,from_mut_unchecked}` to construct the unsized type without an allocation. - Added `unsync::Bump::into_mem` to retrieve the memory owned by the allocator. - Added `unsync::Bump::capacity`. - Added `unsync::Bump::data_ptr` as an unsafe escape hatch to the memory region. - Added `unsync::Bump::get_unchecked` which wraps `data_ptr` into a very slightly safer API to access previously leaked allocations. # v0.2.3 - Added support for `thumbv6m` targets through use of `atomic-polyfill` - Added `LeakBox::into_pin`, equivalent of `Box::into_pin` - Added dedicated guide-like documentation, explaining an example in more detail. These are available when building `cargo doc` or more generally with the doc configuration. - Fixed use of deprecated `compare_and_swap`. # v0.2.2 - Added a synchronous Bump allocator. Its interface is similar to the asynchronous version but at the same time tries to clean up minor issues, such as moving value on failed allocation. - Added `LeakBox`, encapsulating a single value allocated within an automatically cleaned up buffer (e.g. on the stack). Compared to `Allocation` it has safe methods for initialization and deinitialization. - Added `Chain`, an unstable struct for chaining several bump allocators such that you can allocate from one until it is empty then switching to the next. # v0.2.1 - Fixed the documentation to refer to `without-alloc` # v0.2.0 Remodeled the crate into independent subcrates - This crate maintains the allocator based on a static array in the binary - `alloc-traits` contains the interface for non-global allocations from it - All data structures have been moved to `without-alloc` # v0.1.2 - Implemented `DoubleEndedIterator` for `fixed_vec::Drain` - Added some specialization trait implementions for `fixed_vec::Drain` - Implemented `Extend` for `FixedVec` # v0.1.1 - Fixed UB in `FixedVec::fill`. This would drop uninitialized instances while filling the vector, potentially leading to arbitrary code execution. Thanks @danielhenrymantilla for the report. ## v0.1.0 - Added `Uninit::from_maybe_uninit_slice`, slice variant of `from_maybe_uninit`. - Added `Uninit::into_maybe_uninit` as the inverse of the constructor. - Added `FixedVec::drain` and `Drain`: work just like the standard ones. - Renamed `FixedVec::from_available` to `FixedVec::from_unaligned` - Added `FixedVec::split_borrowed` to split without affecting capacity. ## v0.0.7 - Introduces `Rc`, a reference counter owned value. - Improves `FixedVec` by adding many standard trait impls. - Added `Bump` methods: `rc` and `fixed_vec` to create respective containers. - Added `FixedVec` methods: `truncate` and `clear`. - Added `Uninit::fits` to test if a `cast` would succeed without performing it. ## v0.0.6 - Introduces `Box`, an owned value within an `Uninit` allocation - Fixed `Uninit` to never rely on references internally. Unfortunately, this means that unsized `Uninit` currently do no track the size of their pointer. That will return sooner or later when the resolution of Rust #36925 provides `ptr::slice_from_raw_parts`. ## v0.0.5 - Introduces `Uninit`, a lifetime tracked pointed to uninitialized memory. - Introduces `FixedVec`, a capacity bound `Vec` equivalent built on the above. ## v0.0.4 - Fixed a bug where ZSTs had their Drop impl executed while leaking them - Provides a new interface: `level`, `alloc_at` and `leak_at`. It ensures that an allocation is the first allocation to happen after a particular point in time, and no other allocation is placed between it and the preceding one. ## v0.0.3 - Added `Bump::leak`, a new interface to directly allocate values. Avoid requiring `Box` and `Box::leak` from `alloc` and allows usage a `Bump` with limited lifetime such as on the stack. ## v0.0.2 - `Bump::take` has been renamed to `Bump::alloc`. - Added `uninit` and `zeroed` constructors as available for `MaybeUninit` - Made the `new` constructor safe as no uninitialized bytes are read - The nightly `try_reserve` feature is now called `nightly_try_reserve` Note: It is only used in a test and has no influence on the api static-alloc-0.2.5/Readme.md000064400000000000000000000056051046102023000137340ustar 00000000000000# static-alloc [![Crates.io Status](https://img.shields.io/crates/v/static-alloc.svg)](https://crates.io/crates/static-alloc) [![Docs.rs Status](https://docs.rs/static-alloc/badge.svg)](https://docs.rs/static-alloc/) [![License](https://img.shields.io/badge/license-Zlib-blue.svg)](https://raw.githubusercontent.com/HeroicKatora/static-alloc/master/LICENSE.ZLIB) [![CI Status](https://api.cirrus-ci.com/github/HeroicKatora/static-alloc.svg)](https://cirrus-ci.com/github/HeroicKatora/static-alloc) General purpose global allocator(s) with static, inline storage. ## Goal and Target Platform Provides an allocator for extremely resource constrained environments where the only memory guaranteed is your program's image in memory as provided by the loader. Possible use cases are OS-less development, embedded, bootloaders (even stage0/1). The primary goals are similar to the standard library simplicity, and correctness, and minimal assumptions. ## Usage As a global allocator for `alloc` with some safe allocation extensions: ```rust use static_alloc::Bump; #[global_allocator] static A: Bump<[u8; 1 << 16]> = Bump::uninit(); fn main() { // Vec occupying `1 << 7` bytes let v = vec![0xdeadbeef_u32; 32]; // … or allocate values directly. let buffer: &mut [u32; 32] = A.leak([0; 32]) .unwrap(); buffer.copy_from_slice(&v); } ``` You can also use it as a local allocator creating dynamic values on the stack. In this case you might want to be more conservative with resource usage so as not to blow the stack. The benefit is even larger using it together with [`without-alloc`] which provides high-level data structures that you are used to from `alloc`. ```rust use static_alloc::Bump; fn main() { for _ in 0..100 { let local: Bump<[u8; 32]> = Bump::uninit(); let temp_buffer = local.leak([0; 32]); // Resources are cleaned up. } } ``` [`without-alloc`]: https://crates.io/crates/without-alloc/ ## Contributing PRs introducing more tests or documentation are very welcome! Whatever else submitted should have simplicity and composability in mind, ideas that can not be put into a draft form are likely too complex or not focussed enough. PRs should be *extremely* reluctant with introducing new dependencies and *should* contain no non-optional dependency. Please open issues with drafts only, feature requests and 'help' issues will be closed (if you are lucky with a final comment). Stability trumps growth. I simply can not make any longterm commitment outside my intrinsic motiviation towards this project. Hence, I favour a highly usable core over a large interface that is only somewhat usable. ## Additional This project is licensed under Zlib OR Apache-2.0 OR MIT. You may alternatively choose [the Unlicense](http://unlicense.org/) instead in which case the copyright headers signify the parts dedicated to the public domain to the fullest possible extent instead. static-alloc-0.2.5/src/bump.rs000064400000000000000000001057041046102023000143160ustar 00000000000000//! The bump allocator. //! //! Basics of usage and the connection between the structs is discussed in the documentation of the //! [`Bump`] itself. //! //! [`Bump`]: struct.Bump.html use core::alloc::{GlobalAlloc, Layout}; use core::cell::UnsafeCell; use core::mem::{self, MaybeUninit}; use core::ptr::{NonNull, null_mut}; #[cfg(not(feature = "polyfill"))] use core::sync::atomic::{AtomicUsize, Ordering}; #[cfg(feature = "polyfill")] use atomic_polyfill::{AtomicUsize, Ordering}; use crate::leaked::LeakBox; use alloc_traits::{AllocTime, LocalAlloc, NonZeroLayout}; /// Allocator drawing from an inner, statically sized memory resource. /// /// The type parameter `T` is used only to annotate the required size and alignment of the region /// and has no futher use. Note that in particular there is no safe way to retrieve or unwrap an /// inner instance even if the `Bump` was not constructed as a shared global static. Nevertheless, /// the choice of type makes it easier to reason about potentially required extra space due to /// alignment padding. /// /// This type is *always* `Sync` to allow creating `static` instances. This works only because /// there is no actual instance of `T` contained inside. /// /// ## Usage as global allocator /// /// You can use the stable rust attribute to use an instance of this type as the global allocator. /// /// ```rust,no_run /// use static_alloc::Bump; /// /// #[global_allocator] /// static A: Bump<[u8; 1 << 16]> = Bump::uninit(); /// /// fn main() { } /// ``` /// /// Take care, some runtime features of Rust will allocate some memory before or after your own /// code. In particular, it was found to be be tricky to predict the usage of the builtin test /// framework which seemingly allocates some structures per test. /// /// ## Usage as a non-dropping local allocator /// /// It is also possible to use a `Bump` as a stack local allocator or a specialized allocator. The /// interface offers some utilities for allocating values from references to shared or unshared /// instances directly. **Note**: this will never call the `Drop` implementation of the allocated /// type. In particular, it would almost surely not be safe to `Pin` the values, except if there is /// a guarantee for the `Bump` itself to not be deallocated either. /// /// ```rust /// use static_alloc::Bump; /// /// let local: Bump<[u64; 3]> = Bump::uninit(); /// /// let one = local.leak(0_u64).unwrap(); /// let two = local.leak(1_u64).unwrap(); /// let three = local.leak(2_u64).unwrap(); /// /// // Exhausted the space. /// assert!(local.leak(3_u64).is_err()); /// ``` /// /// Mind that the supplied type parameter influenced *both* size and alignment and a `[u8; 24]` /// does not guarantee being able to allocation three `u64` even though most targets have a minimum /// alignment requirement of 16 and it works fine on those. /// /// ```rust /// # use static_alloc::Bump; /// // Just enough space for `u128` but no alignment requirement. /// let local: Bump<[u8; 16]> = Bump::uninit(); /// /// // May or may not return an err. /// let _ = local.leak(0_u128); /// ``` /// /// Instead use the type parameter to `Bump` as a hint for the best alignment. /// /// ```rust /// # use static_alloc::Bump; /// // Enough space and align for `u128`. /// let local: Bump<[u128; 1]> = Bump::uninit(); /// /// assert!(local.leak(0_u128).is_ok()); /// ``` /// /// ## Usage as a (local) bag of bits /// /// It is of course entirely possible to use a local instance instead of a single global allocator. /// For example you could utilize the pointer interface directly to build a `#[no_std]` dynamic /// data structure in an environment without `extern lib alloc`. This feature was the original /// motivation behind the crate but no such data structures are provided here so a quick sketch of /// the idea must do: /// /// ``` /// use core::alloc; /// use static_alloc::Bump; /// /// #[repr(align(4096))] /// struct PageTable { /// // some non-trivial type. /// # _private: [u8; 4096], /// } /// /// impl PageTable { /// /// Avoid stack allocation of the full struct. /// pub unsafe fn new(into: *mut u8) -> &'static mut Self { /// // ... /// # &mut *(into as *mut Self) /// } /// } /// /// // Allocator for pages for page tables. Provides 64 pages. When the /// // program/kernel is provided as an ELF the bootloader reserves /// // memory for us as part of the loading process that we can use /// // purely for page tables. Replaces asm `paging: .BYTE ;` /// static Paging: Bump<[u8; 1 << 18]> = Bump::uninit(); /// /// fn main() { /// let layout = alloc::Layout::new::(); /// let memory = Paging.alloc(layout).unwrap(); /// let table = unsafe { /// PageTable::new(memory.as_ptr()) /// }; /// } /// ``` /// /// A similar structure would of course work to allocate some non-`'static' objects from a /// temporary `Bump`. /// /// ## More insights /// /// The ordering used is currently `SeqCst`. This enforces a single global sequence of observed /// effects on the slab level. The author is fully aware that this is not strictly necessary. In /// fact, even `AcqRel` may not be required as the monotonic bump allocator does not synchronize /// other memory itself. If you bring forward a PR with a formalized reasoning for relaxing the /// requirements to `Relaxed` (llvm `Monotonic`) it will be greatly appreciated (even more if you /// demonstrate performance gains). /// /// WIP: slices. pub struct Bump { /// While in shared state, an monotonic atomic counter of consumed bytes. /// /// While shared it is only mutated in `bump` which guarantees its invariants. In the mutable /// reference state it is modified arbitrarily. consumed: AtomicUsize, /// Outer unsafe cell due to thread safety. /// Inner MaybeUninit because we padding may destroy initialization invariant /// on the bytes themselves, and hence drop etc must not assumed inited. storage: UnsafeCell>, } /// A value could not be moved into a slab allocation. /// /// The error contains the value for which the allocation failed. Storing the value in the error /// keeps it alive in all cases. This prevents the `Drop` implementation from running and preserves /// resources which may otherwise not be trivial to restore. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] pub struct LeakError { val: T, failure: Failure, } /// Specifies an amount of consumed space of a slab. /// /// Each allocation of the `Bump` increases the current level as they must not be empty. By /// ensuring that an allocation is performed at a specific level it is thus possible to check that /// multiple allocations happened in succession without other intermediate allocations. This /// ability in turns makes it possible to group allocations together, for example to initialize a /// `#[repr(C)]` struct member-by-member or to extend a slice. /// /// ## Usage /// /// The main use is successively allocating a slice without requiring all data to be present at /// once. Other similar interface often require an internal locking mechanism but `Level` leaves /// the choice to the user. This is not yet encapsulate in a safe API yet `Level` makes it easy to /// reason about. /// /// See [`MemBump::get_unchecked`][crate::unsync::MemBump] for redeeming a value. /// /// ## Unsound usage. /// /// FIXME: the below is UB because we don't gain provenance over the complete array, only each /// individual element. Instead, we must derive a new pointer from the allocator! /// /// ``` /// # use core::slice; /// # use static_alloc::bump::{Level, Bump}; /// static BUMP: Bump<[u64; 4]> = Bump::uninit(); /// /// /// Gathers as much data as possible. /// /// /// /// An arbitrary amount of data, can't stack allocate! /// fn gather_data(mut iter: impl Iterator) -> &'static mut [u64] { /// let first = match iter.next() { /// Some(item) => item, /// None => return &mut [], /// }; /// /// let mut level: Level = BUMP.level(); /// let mut begin: *mut u64; /// let mut count; /// /// match BUMP.leak_at(first, level) { /// Ok((first, first_level)) => { /// begin = first; /// level = first_level; /// count = 1; /// }, /// _ => return &mut [], /// } /// /// let _ = iter.try_for_each(|value: u64| { /// match BUMP.leak_at(value, level) { /// Err(err) => return Err(err), /// Ok((_, new_level)) => level = new_level, /// }; /// count += 1; /// Ok(()) /// }); /// /// unsafe { /// // SAFETY: all `count` allocations are contiguous, begin is well aligned and no /// // reference is currently pointing at any of the values. The lifetime is `'static` as /// // the BUMP itself is static. /// slice::from_raw_parts_mut(begin, count) /// } /// } /// /// fn main() { /// // There is no other thread running, so this succeeds. /// let slice = gather_data(0..=3); /// assert_eq!(slice, [0, 1, 2, 3]); /// } /// ``` #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct Level(pub(crate) usize); /// A successful allocation and current [`Level`]. /// /// [`Level`]: struct.Level.html #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)] pub struct Allocation<'a, T=u8> { /// Pointer to the uninitialized region with specified layout. pub ptr: NonNull, /// The lifetime of the allocation. pub lifetime: AllocTime<'a>, /// The observed amount of consumed bytes after the allocation. pub level: Level, } /// Reason for a failed allocation at an exact [`Level`]. /// /// [`Level`]: struct.Level.html #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] pub enum Failure { /// No space left for that allocation. Exhausted, /// The allocation would not have used the expected base location. /// /// Reports the location that was observed. When only levels from the same slab are used (which /// should normally be the case) then the observed level is monotonically increasing. Mismatch { /// The observed level that was different from the requested one. observed: Level, }, } impl Bump { /// Make a new allocatable slab of certain byte size and alignment. /// /// The storage will contain uninitialized bytes. pub const fn uninit() -> Self { Bump { consumed: AtomicUsize::new(0), storage: UnsafeCell::new(MaybeUninit::uninit()), } } /// Make a new allocatable slab of certain byte size and alignment. /// /// The storage will contain zeroed bytes. This is not *yet* available /// as a `const fn` which currently limits its potential usefulness /// but there is no good reason not to provide it regardless. pub fn zeroed() -> Self { Bump { consumed: AtomicUsize::new(0), storage: UnsafeCell::new(MaybeUninit::zeroed()), } } /// Make a new allocatable slab provided with some bytes it can hand out. /// /// Note that `storage` will never be dropped and there is no way to get it back. pub const fn new(storage: T) -> Self { Bump { consumed: AtomicUsize::new(0), storage: UnsafeCell::new(MaybeUninit::new(storage)), } } /// Reset the bump allocator. /// /// Requires a mutable reference, as no allocations can be active when doing it. This behaves /// as if a fresh instance was assigned but it does not overwrite the bytes in the backing /// storage. (You can unsafely rely on this). /// /// ## Usage /// /// ``` /// # use static_alloc::Bump; /// let mut stack_buf = Bump::::uninit(); /// /// let bytes = stack_buf.leak(0usize.to_be_bytes()).unwrap(); /// // Now the bump allocator is full. /// assert!(stack_buf.leak(0u8).is_err()); /// /// // We can reuse if we are okay with forgetting the previous value. /// stack_buf.reset(); /// let val = stack_buf.leak(0usize).unwrap(); /// ``` /// /// Trying to use the previous value does not work, as the stack is still borrowed. Note that /// any user unsafely tracking the lifetime must also ensure this through proper lifetimes that /// guarantee that borrows are alive for appropriate times. /// /// ```compile_fail /// // error[E0502]: cannot borrow `stack_buf` as mutable because it is also borrowed as immutable /// # use static_alloc::Bump; /// let mut stack_buf = Bump::::uninit(); /// /// let bytes = stack_buf.leak(0usize).unwrap(); /// // --------- immutably borrow occurs here /// stack_buf.reset(); /// // ^^^^^^^ mutable borrow occurs here. /// let other = stack_buf.leak(0usize).unwrap(); /// /// *bytes += *other; /// // ------------- immutable borrow later used here /// ``` pub fn reset(&mut self) { *self.consumed.get_mut() = 0; } /// Allocate a region of memory. /// /// This is a safe alternative to [GlobalAlloc::alloc](#impl-GlobalAlloc). /// /// # Panics /// This function will panic if the requested layout has a size of `0`. For the use in a /// `GlobalAlloc` this is explicitely forbidden to request and would allow any behaviour but we /// instead strictly check it. pub fn alloc(&self, layout: Layout) -> Option> { Some(self.try_alloc(layout)?.ptr) } /// Try to allocate some layout with a precise base location. /// /// The base location is the currently consumed byte count, without correction for the /// alignment of the allocation. This will succeed if it can be allocate exactly at the /// expected location. /// /// # Panics /// This function may panic if the provided `level` is from a different slab. pub fn alloc_at(&self, layout: Layout, level: Level) -> Result { let Allocation { ptr, lifetime, level } = self.try_alloc_at(layout, level.0)?; Ok(Allocation { ptr: ptr.cast(), lifetime, level, }) } /// Get an allocation with detailed layout. /// /// Provides an [`Uninit`] wrapping several aspects of initialization in a safe interface, /// bound by the lifetime of the reference to the allocator. /// /// [`Uninit`]: ../uninit/struct.Uninit.html pub fn get_layout(&self, layout: Layout) -> Option> { self.try_alloc(layout) } /// Get an allocation with detailed layout at a specific level. /// /// Provides an [`Uninit`] wrapping several aspects of initialization in a safe interface, /// bound by the lifetime of the reference to the allocator. /// /// Since the underlying allocation is the same, it would be `unsafe` but justified to fuse /// this allocation with the preceding or succeeding one. /// /// [`Uninit`]: ../uninit/struct.Uninit.html pub fn get_layout_at(&self, layout: Layout, at: Level) -> Result, Failure> { self.try_alloc_at(layout, at.0) } /// Get an allocation for a specific type. /// /// It is not yet initialized but provides a safe interface for that initialization. /// /// ## Usage /// /// ``` /// # use static_alloc::Bump; /// use core::cell::{Ref, RefCell}; /// /// let slab: Bump<[Ref<'static, usize>; 1]> = Bump::uninit(); /// let data = RefCell::new(0xff); /// /// // We can place a `Ref` here but we did not yet. /// let alloc = slab.get::>().unwrap(); /// let cell_ref = unsafe { /// alloc.leak(data.borrow()) /// }; /// /// assert_eq!(**cell_ref, 0xff); /// ``` pub fn get(&self) -> Option> { if mem::size_of::() == 0 { return Some(self.zst_fake_alloc()); } let layout = Layout::new::(); let Allocation { ptr, lifetime, level, } = self.try_alloc(layout)?; Some(Allocation { ptr: ptr.cast(), lifetime, level, }) } /// Get an allocation for a specific type at a specific level. /// /// See [`get`] for usage. /// /// [`get`]: #method.get pub fn get_at(&self, level: Level) -> Result, Failure> { if mem::size_of::() == 0 { let fake = self.zst_fake_alloc(); // Note: zst_fake_alloc is a noop on the level, we may as well check after. if fake.level != level { return Err(Failure::Mismatch { observed: fake.level, }); } return Ok(fake); } let layout = Layout::new::(); let Allocation { ptr, lifetime, level, } = self.try_alloc_at(layout, level.0)?; Ok(Allocation { // It has exactly size and alignment for `V` as requested. ptr: ptr.cast(), lifetime, level, }) } /// Move a value into an owned allocation. /// /// For safely initializing a value _after_ a successful allocation, see [`LeakBox::write`]. /// /// [`LeakBox::write`]: ../leaked/struct.LeakBox.html#method.write /// /// ## Usage /// /// This can be used to push the value into a caller provided stack buffer where it lives /// longer than the current stack frame. For example, you might create a linked list with a /// dynamic number of values living in the frame below while still being dropped properly. This /// is impossible to do with a return value. /// /// ``` /// # use static_alloc::Bump; /// # use static_alloc::leaked::LeakBox; /// fn rand() -> usize { 4 } /// /// enum Chain<'buf, T> { /// Tail, /// Link(T, LeakBox<'buf, Self>), /// } /// /// fn make_chain(buf: &Bump, mut new_node: impl FnMut() -> T) /// -> Option> /// { /// let count = rand(); /// let mut chain = Chain::Tail; /// for _ in 0..count { /// let node = new_node(); /// chain = Chain::Link(node, buf.leak_box(chain)?); /// } /// Some(chain) /// } /// /// struct Node (usize); /// impl Drop for Node { /// fn drop(&mut self) { /// println!("Dropped {}", self.0); /// } /// } /// let mut counter = 0..; /// let new_node = || Node(counter.next().unwrap()); /// /// let buffer: Bump<[u8; 128]> = Bump::uninit(); /// let head = make_chain(&buffer, new_node).unwrap(); /// /// // Prints the message in reverse order. /// // Dropped 3 /// // Dropped 2 /// // Dropped 1 /// // Dropped 0 /// drop(head); /// ``` pub fn leak_box(&self, val: V) -> Option> { let Allocation { ptr, lifetime, .. } = self.get::()?; Some(unsafe { LeakBox::new_from_raw_non_null(ptr, val, lifetime) }) } /// Move a value into an owned allocation. /// /// See [`leak_box`] for usage. /// /// [`leak_box`]: #method.leak_box pub fn leak_box_at(&self, val: V, level: Level) -> Result, Failure> { let Allocation { ptr, lifetime, .. } = self.get_at::(level)?; Ok(unsafe { LeakBox::new_from_raw_non_null(ptr, val, lifetime) }) } /// Observe the current level. /// /// Keep in mind that concurrent usage of the same slab may modify the level before you are /// able to use it in `alloc_at`. Calling this method provides also no other guarantees on /// synchronization of memory accesses, only that the values observed by the caller are a /// monotonically increasing seequence while a shared reference exists. pub fn level(&self) -> Level { Level(self.consumed.load(Ordering::SeqCst)) } fn try_alloc(&self, layout: Layout) -> Option> { // Guess zero, this will fail when we try to access it and it isn't. let mut consumed = 0; loop { match self.try_alloc_at(layout, consumed) { Ok(alloc) => return Some(alloc), Err(Failure::Exhausted) => return None, Err(Failure::Mismatch{ observed }) => consumed = observed.0, } } } /// Try to allocate some layout with a precise base location. /// /// The base location is the currently consumed byte count, without correction for the /// alignment of the allocation. This will succeed if it can be allocate exactly at the /// expected location. /// /// # Panics /// This function panics if `expect_consumed` is larger than `length`. fn try_alloc_at(&self, layout: Layout, expect_consumed: usize) -> Result, Failure> { assert!(layout.size() > 0); let length = mem::size_of::(); let base_ptr = self.storage.get() as *mut T as *mut u8; let alignment = layout.align(); let requested = layout.size(); // Ensure no overflows when calculating offets within. assert!(expect_consumed <= length); let available = length.checked_sub(expect_consumed).unwrap(); let ptr_to = base_ptr.wrapping_add(expect_consumed); let offset = ptr_to.align_offset(alignment); if requested > available.saturating_sub(offset) { return Err(Failure::Exhausted); // exhausted } // `size` can not be zero, saturation will thus always make this true. assert!(offset < available); let at_aligned = expect_consumed.checked_add(offset).unwrap(); let new_consumed = at_aligned.checked_add(requested).unwrap(); // new_consumed // = consumed + offset + requested [lines above] // <= consumed + available [bail out: exhausted] // <= length [first line of loop] // So it's ok to store `allocated` into `consumed`. assert!(new_consumed <= length); assert!(at_aligned < length); // Try to actually allocate. match self.bump(expect_consumed, new_consumed) { Ok(()) => (), Err(observed) => { // Someone else was faster, if you want it then recalculate again. return Err(Failure::Mismatch { observed: Level(observed) }); }, } let aligned = unsafe { // SAFETY: // * `0 <= at_aligned < length` in bounds as checked above. (base_ptr as *mut u8).add(at_aligned) }; Ok(Allocation { ptr: NonNull::new(aligned).unwrap(), lifetime: AllocTime::default(), level: Level(new_consumed), }) } /// Allocate a value for the lifetime of the allocator. /// /// The value is leaked in the sense that /// /// 1. the drop implementation of the allocated value is never called; /// 2. reusing the memory for another allocation in the same `Bump` requires manual unsafe code /// to handle dropping and reinitialization. /// /// However, it does not mean that the underlying memory used for the allocated value is never /// reclaimed. If the `Bump` itself is a stack value then it will get reclaimed together with /// it. /// /// ## Safety notice /// /// It is important to understand that it is undefined behaviour to reuse the allocation for /// the *whole lifetime* of the returned reference. That is, dropping the allocation in-place /// while the reference is still within its lifetime comes with the exact same unsafety caveats /// as [`ManuallyDrop::drop`]. /// /// ``` /// # use static_alloc::Bump; /// #[derive(Debug, Default)] /// struct FooBar { /// // ... /// # _private: [u8; 1], /// } /// /// let local: Bump<[FooBar; 3]> = Bump::uninit(); /// let one = local.leak(FooBar::default()).unwrap(); /// /// // Dangerous but justifiable. /// let one = unsafe { /// // Ensures there is no current mutable borrow. /// core::ptr::drop_in_place(&mut *one); /// }; /// ``` /// /// ## Usage /// /// ``` /// use static_alloc::Bump; /// /// let local: Bump<[u64; 3]> = Bump::uninit(); /// /// let one = local.leak(0_u64).unwrap(); /// assert_eq!(*one, 0); /// *one = 42; /// ``` /// /// ## Limitations /// /// Only sized values can be allocated in this manner for now, unsized values are blocked on /// stabilization of [`ptr::slice_from_raw_parts`]. We can not otherwise get a fat pointer to /// the allocated region. /// /// [`ptr::slice_from_raw_parts`]: https://github.com/rust-lang/rust/issues/36925 /// [`ManuallyDrop::drop`]: https://doc.rust-lang.org/beta/std/mem/struct.ManuallyDrop.html#method.drop /// /// TODO: will be deprecated sooner or later in favor of a method that does not move the /// resource on failure. // #[deprecated = "Use leak_box and initialize it with the value. This does not move the value in the failure case."] pub fn leak(&self, val: V) -> Result<&mut V, LeakError> { match self.get::() { // SAFETY: Just allocated this for a `V`. Some(alloc) => Ok(unsafe { alloc.leak(val) }), None => Err(LeakError::new(val, Failure::Exhausted)), } } /// Allocate a value with a precise location. /// /// See [`leak`] for basics on allocation of values. /// /// The level is an identifer for a base location (more at [`level`]). This will succeed if it /// can be allocate exactly at the expected location. /// /// This method will return the new level of the slab allocator. A next allocation at the /// returned level will be placed next to this allocation, only separated by necessary padding /// from alignment. In particular, this is the same strategy as applied for the placement of /// `#[repr(C)]` struct members. (Except for the final padding at the last member to the full /// struct alignment.) /// /// ## Usage /// /// ``` /// use static_alloc::Bump; /// /// let local: Bump<[u64; 3]> = Bump::uninit(); /// /// let base = local.level(); /// let (one, level) = local.leak_at(1_u64, base).unwrap(); /// // Will panic when an allocation happens in between. /// let (two, _) = local.leak_at(2_u64, level).unwrap(); /// /// assert_eq!((one as *const u64).wrapping_offset(1), two); /// ``` /// /// [`leak`]: #method.leak /// [`level`]: #method.level /// /// TODO: will be deprecated sooner or later in favor of a method that does not move the /// resource on failure. /// // #[deprecated = "Use leak_box_at and initialize it with the value. This does not move the value in the failure case."] pub fn leak_at(&self, val: V, level: Level) -> Result<(&mut V, Level), LeakError> { let alloc = match self.get_at::(level) { Ok(alloc) => alloc, Err(err) => return Err(LeakError::new(val, err)), }; // SAFETY: Just allocated this for a `V`. let level = alloc.level; let mutref = unsafe { alloc.leak(val) }; Ok((mutref, level)) } /// 'Allocate' a ZST. fn zst_fake_alloc(&self) -> Allocation<'_, Z> { Allocation::for_zst(self.level()) } /// Try to bump the monotonic, atomic consume counter. /// /// This is the only place doing shared modification to `self.consumed`. /// /// Returns `Ok` if the consume counter was as expected. Monotonicty and atomicity guarantees /// to the caller that no overlapping range can succeed as well. This allocates the range to /// the caller. /// /// Returns the observed consume counter in an `Err` if it was not as expected. /// /// ## Panics /// This function panics if either argument exceeds the byte length of the underlying memory. /// It also panics if the expected value is larger than the new value. fn bump(&self, expect_consumed: usize, new_consumed: usize) -> Result<(), usize> { assert!(expect_consumed <= new_consumed); assert!(new_consumed <= mem::size_of::()); self.consumed.compare_exchange( expect_consumed, new_consumed, Ordering::SeqCst, Ordering::SeqCst, ).map(drop) } } impl<'alloc, T> Allocation<'alloc, T> { /// Write a value into the allocation and leak it. /// /// ## Safety /// /// Must have been allocated for a layout that fits the layout of T previously. The pointer /// must not be aliased. /// /// ## Usage /// /// Consider the alternative [`Bump::leak`] to safely allocate and directly leak a value. /// /// [`Bump::leak`]: struct.Bump.html#method.leak pub unsafe fn leak(self, val: T) -> &'alloc mut T { // The pointer is not borrowed and valid as guaranteed by the caller. core::ptr::write(self.ptr.as_ptr(), val); &mut *self.ptr.as_ptr() } /// Write a value into the allocation and own it. /// /// ## Safety /// /// Must have been allocated for a layout that fits the layout of T previously. The pointer /// must not be aliased. /// /// ## Usage /// /// Consider the alternative [`Bump::leak`] to safely allocate and directly leak a value. /// /// [`Bump::leak`]: struct.Bump.html#method.leak pub unsafe fn boxed(self, val: T) -> LeakBox<'alloc, T> { // The pointer is not aliased and valid as guaranteed by the caller. core::ptr::write(self.ptr.as_ptr(), val); // Safety: the instance is valid, was just initialized. LeakBox::from_raw(self.ptr.as_ptr()) } /// Convert this into a mutable reference to an uninitialized slot. /// /// ## Safety /// /// Must have been allocated for a layout that fits the layout of T previously. pub unsafe fn uninit(self) -> &'alloc mut MaybeUninit { &mut *self.ptr.cast().as_ptr() } /// An 'allocation' for an arbitrary ZST, at some arbitrary level. pub(crate) fn for_zst(level: Level) -> Self { assert!(mem::size_of::() == 0); // If `Z` is a ZST, then the stride of any array is equal to 0. Thus, all arrays and slices // havee the same layout which only depends on the alignment. If we need a storage for this // ZST we just take one of those as our base 'allocation' which can also never be aliased. let alloc: &[T; 0] = &[]; Allocation { ptr: NonNull::from(alloc).cast(), lifetime: AllocTime::default(), level: level, } } } impl LeakError { fn new(val: T, failure: Failure) -> Self { LeakError { val, failure, } } /// Inspect the cause of this error. pub fn kind(&self) -> Failure { self.failure } /// Retrieve the value that could not be allocated. pub fn into_inner(self) -> T { self.val } } // SAFETY: at most one thread gets a pointer to each chunk of data. unsafe impl Sync for Bump { } unsafe impl GlobalAlloc for Bump { unsafe fn alloc(&self, layout: Layout) -> *mut u8 { Bump::alloc(self, layout) .map(NonNull::as_ptr) .unwrap_or_else(null_mut) } unsafe fn realloc( &self, ptr: *mut u8, current: Layout, new_size: usize, ) -> *mut u8 { let current = NonZeroLayout::from_layout(current.into()).unwrap(); // As guaranteed, `new_size` is greater than 0. let new_size = core::num::NonZeroUsize::new_unchecked(new_size); let target = match layout_reallocated(current, new_size) { Some(target) => target, None => return core::ptr::null_mut(), }; // Construct an allocation. This is not safe in general but the lifetime is not important. let fake = alloc_traits::Allocation { ptr: NonNull::new_unchecked(ptr), layout: current, lifetime: AllocTime::default(), }; alloc_traits::LocalAlloc::realloc(self, fake, target) .map(|alloc| alloc.ptr.as_ptr()) .unwrap_or_else(core::ptr::null_mut) } unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout) { // We are a slab allocator and do not deallocate. } } fn layout_reallocated(layout: NonZeroLayout, target: core::num::NonZeroUsize) -> Option { // This may not be a valid layout. let layout = Layout::from_size_align(target.get(), layout.align()).ok()?; // This must succeed though, as the size was non-zero. Some(NonZeroLayout::from_layout(layout.into()).unwrap()) } unsafe impl<'alloc, T> LocalAlloc<'alloc> for Bump { fn alloc(&'alloc self, layout: NonZeroLayout) -> Option> { let raw_alloc = Bump::get_layout(self, layout.into())?; Some(alloc_traits::Allocation { ptr: raw_alloc.ptr, layout: layout, lifetime: AllocTime::default(), }) } // TODO: alloc zeroed if the constructor was `Self::zeroed()` /// Reallocates if the layout is strictly smaller and the allocation aligned. /// /// Note that this may succeed spuriously if the previous allocation is incidentally aligned to /// a larger alignment than had been request. /// /// Also not, reallocating to a smaller layout is NOT useless. /// /// It confirms that this allocator does not need the allocated layout to re/deallocate. /// Otherwise, even reallocating to a strictly smaller layout would be impossible without /// storing the prior layout. unsafe fn realloc( &'alloc self, alloc: alloc_traits::Allocation<'alloc>, layout: NonZeroLayout, ) -> Option> { if alloc.ptr.as_ptr() as usize % layout.align() == 0 && alloc.layout.size() >= layout.size() { // Obvious fit, nothing to do. return Some(alloc_traits::Allocation { ptr: alloc.ptr, layout, lifetime: alloc.lifetime, }); } // TODO: we could try to allocate at the exact level that the allocation ends. If this // succeeds, there is no copying necessary. This was the point of `Level` anyways. let new_alloc = LocalAlloc::alloc(self, layout)?; core::ptr::copy_nonoverlapping( alloc.ptr.as_ptr(), new_alloc.ptr.as_ptr(), layout.size().min(alloc.layout.size()).into()); // No dealloc. return Some(new_alloc); } unsafe fn dealloc(&'alloc self, _: alloc_traits::Allocation<'alloc>) { // We are a slab allocator and do not deallocate. } } #[cfg(test)] mod tests { use super::*; #[test] fn zst_no_drop() { #[derive(Debug)] struct PanicOnDrop; impl Drop for PanicOnDrop { fn drop(&mut self) { panic!("No instance of this should ever get dropped"); } } let alloc = Bump::<()>::uninit(); let _ = alloc.leak(PanicOnDrop).unwrap(); } } static-alloc-0.2.5/src/doc/global_allocator.md000064400000000000000000000006621046102023000173710ustar 00000000000000Using a Bump as a global allocator. # Usage – Global allocator This was the example from the create introduction. ```rust use static_alloc::Bump; #[global_allocator] static A: Bump<[u8; 1 << 16]> = Bump::uninit(); fn main() { let v = vec![0xdeadbeef_u32; 128]; println!("{:x?}", v); let buffer: &'static mut [u32; 128] = A.leak([0; 128]) .unwrap_or_else(|_| panic!("Runtime allocated before main")); } ``` static-alloc-0.2.5/src/doc/pinned.md000064400000000000000000000110531046102023000153420ustar 00000000000000Safely pinning a statically allocated task. # Usage — Pinned tasks Another use case similar to a pool is the ability to _pin_ objects that have been allocated without the need for macros or `unsafe`. This is sound because the memory within a static bump allocator is _never_ reused for any other purpose. Let's set the scene by defining a long running task that requires pinning. ``` use static_alloc::Bump; async fn example(x: usize) -> usize { // Holding reference across yield point. // This requires pinning to run this future. let y = &x; core::future::pending::<()>().await; *y } ``` Note that the type of this asynchronous future, the state machine synthesized as the result type of the function `example`, is self-referential because it holds a reference to `x`, assigned to `y`, at an await point. This means that this type is also `!Unpin`. Once it is polled at a particular location in memory it mustn't move. This is enforced by requiring you to _pin_ it before polling the future, a kind of state that ensures the memory used for representing the type can not be reused until _after_ its destructor (`Drop`) was called. Ordinarily there are several `unsafe` constructors for `Pin<&mut _>` and a few helper macros that, in one way or another, ensure you can't access it by value after pinning it, and that you can't `forget` it either. There is also an entirely safe method available only when a global allocator exists: A value within a `Box<_>` can be pinned at will by calling `Box::into_pin`. This is safe because the memory can not be reused before it is deallocated, which can only happen by dropping the box itself which necessarily drops the value as well. Since a static `Bump` is _also_ an allocator, an argument similar to a globally allocated `Box` holds for values put into its memory! If ensure that the bump allocator was borrowed forever, that is for the `'static` lifetime, then it can not be reset (because this requires `&mut`/unique access) and no memory is ever reused. This allows using a pool _instead_ of the global allocator. This is particularly interesting for tasks that will run _exactly_ once or a bounded number of times. In this case there is no risk that this will eventually exhaust the allocator's memory because memory is never returned. ## Pinning an allocated task It is time to demonstrate this in code: ``` use core::pin::Pin; use static_alloc::{Bump, leaked::LeakBox}; # async fn example(x: usize) -> usize { todo!() } // A generous size estimation. // See below for a genius exact idea. // On nightly you could calculate the size as a constant. static SLOT: Bump<[usize; 4]> = Bump::uninit(); let future: LeakBox<'static, _> = SLOT.leak_box(example(0)) .expect("Our size estimation was generous enough"); let mut future: Pin<_> = LeakBox::into_pin(future); let can_use_this_in_async = async move { let _ = future.as_mut().await; }; ``` Let me repeat how that this will of course only work the first time, after that the bump allocator might be exhausted. You can also _only_ pin values that were leak-boxed on a `static` pool of memory, since this is the required assurance that the memory is never reused. This means that we will mostly want to use this for creating an instance of a single global task. Nevertheless, the code above guarantee that the task is properly `Drop`'d in benign circumstances. This is in contrast to _actually_ leaking the value where it would never be dropped. It's _sound_ not to forget a pinned future constructed in this way but it it shouldn't happen by accident in decently well-written code. ## Calculate the required allocation for a future's state machine The following idea by [@Yandros](https://github.com/danielhenrymantilla) calculates the layout of a future. We can then create a memory reservation large enough to guarantee that one of that future can be allocated. ``` use core::{alloc::Layout, marker::PhantomData}; use static_alloc::Bump; struct Helper(PhantomData); impl Helper { const fn layout(self: Helper) -> Layout { Layout::new::() } fn use_the_type_inference_luke(self: &'_ Self, _: &'_ F) {} } async fn test() {} const LAYOUT_OF_TEST: Layout = { let h = Helper(PhantomData); let _ = || { h.use_the_type_inference_luke(&test()) }; h.layout() }; // Number of bytes to guarantee one address is aligned. const REQUIRED_TO_GUARANTEE: usize = LAYOUT_OF_TEST.size() + LAYOUT_OF_TEST.align(); static SLOT: Bump<[u8; REQUIRED_TO_GUARANTEE]> = Bump::uninit(); SLOT.leak_box(test()) .expect("This _must_ work the first time it is called"); ``` static-alloc-0.2.5/src/doc/static_allocator.md000064400000000000000000000040361046102023000174170ustar 00000000000000Using a Bump as a dedicated static pool. # Usage – Static allocator Some tasks will be created once during program startup but are not constant expression. The bump allocator allows you to provision static storage for them and then initialize them dynamically. In this way, the caller can also retain precise control over memory locality. Consider a program that runs a global system, this might be an event loop for tasks, but it has two flavors of implementing this system. One of them might perform energy hungry but fast polling while a more efficient system uses interrupts. You want to let the program's runtime environment decide which of these systems should be used so they implement a common trait and your program passes around a static `dyn`-reference. ```rust # #[derive(Default, Debug)] struct TaskSystemA { // … # _field: u8, } # #[derive(Default, Debug)] struct TaskSystemB { // … # _field: u8, } trait CommonTrait { // … } ``` Using a Bump allocator as a pool solves this problem very conveniently without the need for a global allocator. Compared to using the stack, your reference retain the `'static` lifetime while compared to a once-cell you retain the uniqueness of the original reference. ```rust # #[derive(Default, Debug)] # struct TaskSystemA(u8); # #[derive(Default, Debug)] # struct TaskSystemB(u16); # trait CommonTrait {} # impl CommonTrait for TaskSystemA {} # impl CommonTrait for TaskSystemB {} # use core::mem::ManuallyDrop; use static_alloc::Bump; union StorageForAOrB { variant_a: ManuallyDrop, variant_b: ManuallyDrop, } static POOL: Bump = Bump::uninit(); fn main() { // Split the pool based on an environment variable. let use_b = std::env::var_os("USE_VARIANT_B") .map_or(false, |_| true); // If we had stack-allocate them we wouldn't have `'static`. let used: &'static mut dyn CommonTrait = if use_b { POOL.leak(TaskSystemA::default()).unwrap() } else { POOL.leak(TaskSystemB::default()).unwrap() }; } ``` static-alloc-0.2.5/src/inline_vec.rs000064400000000000000000000025611046102023000154630ustar 00000000000000use core::marker::PhantomData; use crate::uninit::{Uninit, UninitView}; pub struct Vec<'a, T> { uninit: Uninit<'a, ()>, phantom: PhantomData<&'a mut [T]>, } impl Vec<'_, T> { fn inner_regions(&self) -> Option<(UninitView<'_, usize>, UninitView<'_, [T]>)> { let region = self.uninit.borrow(); let (_, counter) = region.split_cast().ok()?; let (counter, tail) = counter.shrink_to_fit(); let (_, slice) = tail.split_slice().ok()?; Some((counter, slice)) } fn inner_regions_mut(&mut self) -> Option<(Uninit<'_, usize>, Uninit<'_, [T]>)> { let region = self.uninit.borrow_mut(); let (_, counter) = region.split_cast().ok()?; let (counter, tail) = counter.shrink_to_fit(); let (_, slice) = tail.split_slice().ok()?; Some((counter, slice)) } } impl<'a, T> Vec<'a, T> { pub fn new(uninit: Uninit<'a, ()>) -> Self { let mut fixed = Vec { uninit, phantom: PhantomData, }; if let Some((counter, _)) = fixed.inner_regions_mut() { counter.init(0); } fixed } pub fn len(&self) -> usize { if let Some((counter, _)) = self.inner_regions() { unsafe { *counter.as_ptr() } } else { 0 } } pub fn as_slice(&self) -> &[T] { } } static-alloc-0.2.5/src/leaked.rs000064400000000000000000000503651046102023000146020ustar 00000000000000//! This module contains an owning wrapper of a leaked struct. //! //! FIXME(breaking): Naming. `leaking` implies the `Drop` of the value as well but we do the //! precise opposite. use core::pin::Pin; use alloc_traits::AllocTime; use core::{ alloc::Layout, fmt, hash, marker::PhantomData, mem::{ManuallyDrop, MaybeUninit}, ops::{Deref, DerefMut}, ptr::{self, NonNull}, }; /// Zero-sized marker struct that allows running one or several methods. /// /// This ensures that allocation does not exceed certain limits that would likely blow the stack /// and run into Rust's canary, this aborting the process. pub struct Alloca { marker: PhantomData<[T]>, len: usize, } impl Alloca { /// Try to create a representation, that allows functions with dynamically stack-allocated /// slices. pub fn new(len: usize) -> Option { // Check that it's okay to create the padded layout. This is pure so it will again work // when we try during `run`. let _padded_layout = Layout::array::(len + 1).ok()?; Some(Alloca { marker: PhantomData, len, }) } fn padded_layout(&self) -> Layout { Layout::array::(self.len + 1).expect("Checked this in the constructor") } /// Allocate a slice of elements. /// /// Please note that instantiating this method relies on the optimizer, to an extent. In /// particular we will create stack slots of differing sizes depending on the internal size. /// This shouldn't have an effect other than moving the stack pointer for various amounts and /// should never have more than one `T` in overhead. However, we can't enforce this. In theory /// llvm might still reserve stack space for all variants including a probe and thus /// prematurely assume we have hit the bottom of the available stack space. This is not very /// likely to occur in practice. pub fn run( &self, run: impl FnOnce(&mut [MaybeUninit]) -> R ) -> R { // Required size to surely have enough space for an aligned allocation. let required_size = self.padded_layout().size(); if required_size <= 8 { self.run_with::<[u64; 1], _, _>(run) } else if required_size <= 16 { self.run_with::<[u64; 2], _, _>(run) } else if required_size <= 32 { self.run_with::<[u64; 4], _, _>(run) } else if required_size <= 64 { self.run_with::<[u64; 8], _, _>(run) } else if required_size <= 128 { self.run_with::<[u64; 16], _, _>(run) } else if required_size <= 256 { self.run_with::<[u64; 32], _, _>(run) } else if required_size <= 512 { self.run_with::<[u64; 64], _, _>(run) } else if required_size <= 1024 { self.run_with::<[u64; 128], _, _>(run) } else if required_size <= 2048 { self.run_with::<[u64; 256], _, _>(run) } else if required_size <= (1 << 12) { self.run_with::<[u64; 512], _, _>(run) } else if required_size <= (1 << 13) { self.run_with::<[u64; 1 << 10], _, _>(run) } else if required_size <= (1 << 14) { self.run_with::<[u64; 1 << 11], _, _>(run) } else if required_size <= (1 << 15) { self.run_with::<[u64; 1 << 12], _, _>(run) } else if required_size <= (1 << 16) { self.run_with::<[u64; 1 << 13], _, _>(run) } else if required_size <= (1 << 17) { self.run_with::<[u64; 1 << 14], _, _>(run) } else if required_size <= (1 << 18) { self.run_with::<[u64; 1 << 15], _, _>(run) } else if required_size <= (1 << 19) { self.run_with::<[u64; 1 << 16], _, _>(run) } else if required_size <= (1 << 20) { self.run_with::<[u64; 1 << 17], _, _>(run) } else { panic!("Stack allocation is too big"); } } fn run_with]) -> R>( &self, run: F ) -> R { use crate::unsync::Bump; let mem = Bump::::uninit(); let slot = mem.bump_array::(self.len).unwrap(); run(LeakBox::leak(slot)) } } /// Represents an allocation within a Bump. /// /// This is an owning pointer comparable to `Box`. It drops the contained value when it is dropped /// itself. The difference is that no deallocation logic is ever executed. /// /// FIXME(non-breaking): the name is rather confusing. Maybe it should be `BumpBox` or `RefBox`? /// Not `StackBox` because the value's location in memory is not the defining feature. /// /// # Usage /// /// This box can be used to manage one valid instance constructed within the memory provided by a /// `MaybeUninit` instance. /// /// ``` /// use core::mem::MaybeUninit; /// use static_alloc::leaked::LeakBox; /// /// let mut storage = MaybeUninit::uninit(); /// let leak_box = LeakBox::from(&mut storage); /// // The string itself is not managed by `static_alloc`. /// let mut instance = LeakBox::write(leak_box, String::new()); /// /// instance.push_str("Hello world!"); /// ``` /// /// This box is the result of allocating from one of the `Bump` allocators using its explicit API. /// /// Being a box-like type, an `Option` has the same size. /// /// ``` /// use core::mem::size_of; /// use static_alloc::leaked::LeakBox; /// /// type Boxed = LeakBox<'static, usize>; /// type Optional = Option; /// /// assert_eq!(size_of::(), size_of::()); /// ``` /// /// TODO: On nightly the inner type should be [unsizable][unsize-coercion]. /// /// [unsize-coercion]: https://doc.rust-lang.org/reference/type-coercions.html#coercion-types pub struct LeakBox<'ctx, T: ?Sized> { #[allow(unused)] lifetime: AllocTime<'ctx>, // Covariance should be OK. pointer: NonNull, } impl<'ctx, T> LeakBox<'ctx, T> { /// Construct from a raw pointer. /// /// # Safety /// /// The allocation must be valid for a write of the value. The memory must also outlive the /// lifetime `'ctx` and pointer must not be aliased by any other reference for that scope. pub(crate) unsafe fn new_from_raw_non_null( pointer: NonNull, val: T, lifetime: AllocTime<'ctx>, ) -> Self { // SAFETY: // * `ptr` points to an allocation with correct layout for `V`. // * It is valid for write as it is the only pointer to it. // * The allocation lives for at least `'ctx`. core::ptr::write(pointer.as_ptr(), val); Self { pointer, lifetime, } } } impl<'ctx, T: ?Sized> LeakBox<'ctx, T> { /// Retrieve the raw pointer wrapped by this box. /// /// After this method the caller is responsible for managing the value in the place behind the /// pointer. It will need to be dropped manually. /// /// # Usage /// /// You might manually drop the contained instance at a later point. /// /// ``` /// use static_alloc::{Bump, leaked::LeakBox}; /// /// # fn fake() -> Option<()> { /// let bump: Bump<[usize; 128]> = Bump::uninit(); /// let leak_box = bump.leak_box(String::from("Hello"))?; /// let ptr = LeakBox::into_raw(leak_box); /// /// unsafe { /// core::ptr::drop_in_place(ptr); /// } /// # Some(()) } /// ``` /// /// An alternative is to later re-wrap the pointer /// /// ``` /// use static_alloc::{Bump, leaked::LeakBox}; /// /// # fn fake() -> Option<()> { /// let bump: Bump<[usize; 128]> = Bump::uninit(); /// let leak_box = bump.leak_box(String::from("Hello"))?; /// let ptr = LeakBox::into_raw(leak_box); /// /// unsafe { /// let _ = LeakBox::from_raw(ptr); /// }; /// # Some(()) } /// ``` pub fn into_raw(this: Self) -> *mut T { let this = ManuallyDrop::new(this); this.pointer.as_ptr() } /// Wrap a raw pointer. /// /// The most immediate use is to rewrap a pointer returned from [`into_raw`]. /// /// [`into_raw`]: #method.into_raw /// /// # Safety /// /// The pointer must point to a valid instance of `T` that is not aliased by any other /// reference for the lifetime `'ctx`. In particular it must be valid aligned and initialized. /// Dropping this `LeakBox` will drop the instance, which the caller must also guarantee to be /// sound. pub unsafe fn from_raw(pointer: *mut T) -> Self { debug_assert!(!pointer.is_null(), "Null pointer passed to LeakBox::from_raw"); LeakBox { lifetime: AllocTime::default(), pointer: NonNull::new_unchecked(pointer), } } /// Wrap a mutable reference to a complex value as if it were owned. /// /// # Safety /// /// The value must be owned by the caller. That is, the mutable reference must not be used /// after the `LeakBox` is dropped. In particular the value must not be dropped by the caller. /// /// # Example /// /// ```rust /// use core::mem::ManuallyDrop; /// use static_alloc::leaked::LeakBox; /// /// fn with_stack_drop(val: T) { /// let mut val = ManuallyDrop::new(val); /// // Safety: /// // - Shadows the variable, rendering the prior inaccessible. /// // - Dropping is now the responsibility of `LeakBox`. /// let val = unsafe { LeakBox::from_mut_unchecked(&mut *val) }; /// } /// /// // Demonstrate that it is correctly dropped. /// let variable = core::cell::RefCell::new(0); /// with_stack_drop(variable.borrow_mut()); /// assert!(variable.try_borrow_mut().is_ok()); /// ``` #[allow(unused_unsafe)] pub unsafe fn from_mut_unchecked(val: &'ctx mut T) -> Self { // SAFETY: // * Is valid instance // * Not aliased as by mut reference // * Dropping soundness is guaranteed by the caller. // * We don't invalidate any value, nor can the caller. unsafe { LeakBox::from_raw(val) } } /// Leak the instances as a mutable reference. /// /// After calling this method the value is no longer managed by `LeakBox`. Its Drop impl will /// not be automatically called. /// /// # Usage /// /// ``` /// use static_alloc::{Bump, leaked::LeakBox}; /// /// # fn fake() -> Option<()> { /// let bump: Bump<[usize; 128]> = Bump::uninit(); /// let leak_box = bump.leak_box(String::from("Hello"))?; /// /// let st: &mut String = LeakBox::leak(leak_box); /// # Some(()) } /// ``` /// /// You can't leak past the lifetime of the allocator. /// /// ```compile_fail /// # use static_alloc::{Bump, leaked::LeakBox}; /// # fn fake() -> Option<()> { /// let bump: Bump<[usize; 128]> = Bump::uninit(); /// let leak_box = bump.leak_box(String::from("Hello"))?; /// let st: &mut String = LeakBox::leak(leak_box); /// /// drop(bump); /// // error[E0505]: cannot move out of `bump` because it is borrowed /// st.to_lowercase(); /// //-- borrow later used here /// # Some(()) } /// ``` pub fn leak<'a>(this: Self) -> &'a mut T where 'ctx: 'a { let pointer = LeakBox::into_raw(this); // SAFETY: // * The LeakBox type guarantees this is initialized and not mutably aliased. // * For the lifetime 'a which is at most 'ctx. unsafe { &mut *pointer } } } impl LeakBox<'static, T> { /// Pin an instance that's leaked for the remaining program runtime. /// /// After calling this method the value can only safely be referenced mutably if it is `Unpin`, /// otherwise it is only accessible behind a `Pin`. Note that this does _not_ imply that the /// `Drop` glue, or explicit `Drop`-impl, is guaranteed to run. /// /// # Usage /// /// A decent portion of futures must be _pinned_ before the can be awaited inside another /// future. In particular this is required for self-referential futures that store pointers /// into their own object's memory. This is the case for the future type of an `asnyc fn` if /// there are potentially any stack references when it is suspended/waiting on another future. /// Consider this example: /// /// ```compile_fail /// use static_alloc::{Bump, leaked::LeakBox}; /// /// async fn example(x: usize) -> usize { /// // Holding reference across yield point. /// // This requires pinning to run this future. /// let y = &x; /// core::future::ready(()).await; /// *y /// } /// /// static POOL: Bump<[usize; 128]> = Bump::uninit(); /// let mut future = POOL.leak_box(example(0)) /// .expect("Enough space for small async fn"); /// /// let usage = async move { /// // error[E0277]: `GenFuture<[static generator@src/leaked.rs …]>` cannot be unpinned /// let _ = (&mut *future).await; /// }; /// ``` /// /// This method can be used to pin instances allocated from a global pool without requiring the /// use of a macro or unsafe on the caller's part. Now, with the correct usage of `into_pin`: /// /// ``` /// use static_alloc::{Bump, leaked::LeakBox}; /// /// async fn example(x: usize) -> usize { /// // Holding reference across yield point. /// // This requires pinning to run this future. /// let y = &x; /// core::future::ready(()).await; /// *y /// } /// /// static POOL: Bump<[usize; 128]> = Bump::uninit(); /// let future = POOL.leak_box(example(0)) /// .expect("Enough space for small async fn"); /// /// // PIN this future! /// let mut future = LeakBox::into_pin(future); /// /// let usage = async move { /// let _ = future.as_mut().await; /// }; /// ``` pub fn into_pin(this: Self) -> Pin { // SAFETY: // * This memory is valid for `'static` duration, independent of the fate of `this` and // even when it is forgotten. This trivially implies that any Drop is called before the // memory is invalidated, as required by `Pin`. unsafe { Pin::new_unchecked(this) } } } impl<'ctx, T> LeakBox<'ctx, T> { /// Remove the value, forgetting the box in the process. /// /// This is similar to dereferencing a box (`*leak_box`) but no deallocation is involved. This /// becomes useful when the allocator turns out to have too short of a lifetime. /// /// # Usage /// /// You may want to move a long-lived value out of the current scope where it's been allocated. /// /// ``` /// # use core::cell::RefCell; /// use static_alloc::{Bump, leaked::LeakBox}; /// /// let cell = RefCell::new(0usize); /// /// let guard = { /// let bump: Bump<[usize; 128]> = Bump::uninit(); /// /// let mut leaked = bump.leak_box(cell.borrow_mut()).unwrap(); /// **leaked = 1usize; /// /// // Take the value, allowing use independent of the lifetime of bump /// LeakBox::take(leaked) /// }; /// /// assert!(cell.try_borrow().is_err()); /// drop(guard); /// assert!(cell.try_borrow().is_ok()); /// ``` pub fn take(this: Self) -> T { // Do not drop this. let this = ManuallyDrop::new(this); // SAFETY: // * `ptr` points to an initialized allocation according to the constructors of `LeakBox`. // * The old value is forgotten and no longer dropped. unsafe { core::ptr::read(this.pointer.as_ptr()) } } /// Wrap a mutable reference to a trivial value as if it were a box. /// /// This is safe because such values can not have any Drop code and can be duplicated at will. /// /// The usefulness of this operation is questionable but the author would be delighted to hear /// about any actual use case. pub fn from_mut(val: &'ctx mut T) -> Self where T: Copy { // SAFETY: // * Is valid instance // * Not aliased as by mut reference // * Dropping is a no-op // * We don't invalidate anyones value unsafe { LeakBox::from_raw(val) } } } impl<'ctx, T> LeakBox<'ctx, MaybeUninit> { /// Write a value into this box, initializing it. /// /// This can be used to delay the computation of a value until after an allocation succeeded /// while maintaining all types necessary for a safe initialization. /// /// # Usage /// /// ``` /// # fn some_expensive_operation() -> [u8; 4] { [0u8; 4] } /// # use core::mem::MaybeUninit; /// # /// # fn fake_main() -> Option<()> { /// # /// use static_alloc::{Bump, leaked::LeakBox}; /// /// let bump: Bump<[usize; 128]> = Bump::uninit(); /// let memory = bump.leak_box(MaybeUninit::uninit())?; /// /// let value = LeakBox::write(memory, some_expensive_operation()); /// # Some(()) } fn main() {} /// ``` pub fn write(mut this: Self, val: T) -> LeakBox<'ctx, T> { unsafe { // SAFETY: MaybeUninit is valid for writing a T. ptr::write(this.as_mut_ptr(), val); // SAFETY: initialized by the write before. LeakBox::assume_init(this) } } /// Converts to `LeakBox`. /// /// # Safety /// /// The value must have been initialized as required by `MaybeUninit::assume_init`. Calling /// this when the content is not yet fully initialized causes immediate undefined behavior. pub unsafe fn assume_init(this: Self) -> LeakBox<'ctx, T> { LeakBox { pointer: this.pointer.cast(), lifetime: this.lifetime, } } } impl<'ctx, T: ?Sized> Deref for LeakBox<'ctx, T> { type Target = T; fn deref(&self) -> &Self::Target { // SAFETY: constructor guarantees this is initialized and not mutably aliased. unsafe { self.pointer.as_ref() } } } impl<'ctx, T: ?Sized> DerefMut for LeakBox<'ctx, T> { fn deref_mut(&mut self) -> &mut Self::Target { // SAFETY: constructor guarantees this is initialized and not aliased. unsafe { self.pointer.as_mut() } } } impl Drop for LeakBox<'_, T> { fn drop(&mut self) { // SAFETY: constructor guarantees this was initialized. unsafe { ptr::drop_in_place(self.pointer.as_ptr()) } } } /// Construct a LeakBox to an existing MaybeUninit. /// /// The MaybeUninit type is special in that we can treat any unique reference to an owned value as /// an owned value itself since it has no representational invariants. impl<'ctx, T> From<&'ctx mut MaybeUninit> for LeakBox<'ctx, MaybeUninit> { fn from(uninit: &'ctx mut MaybeUninit) -> Self { // SAFETY: // * An instance of MaybeUninit is always valid. // * The mut references means it can not be aliased. // * Dropping a MaybeUninit is a no-op and can not invalidate any validity or security // invariants of this MaybeUninit or the contained T. unsafe { LeakBox::from_raw(uninit) } } } /// Construct a LeakBox to an existing slice of MaybeUninit. impl<'ctx, T> From<&'ctx mut [MaybeUninit]> for LeakBox<'ctx, [MaybeUninit]> { fn from(uninit: &'ctx mut [MaybeUninit]) -> Self { // SAFETY: // * An instance of MaybeUninit is always valid. // * The mut references means it can not be aliased. // * Dropping a MaybeUninit is a no-op and can not invalidate any validity or security // invariants of this MaybeUninit or the contained T. unsafe { LeakBox::from_raw(uninit) } } } impl AsRef for LeakBox<'_, T> { fn as_ref(&self) -> &T { &**self } } impl AsMut for LeakBox<'_, T> { fn as_mut(&mut self) -> &mut T { &mut **self } } impl fmt::Debug for LeakBox<'_, T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.as_ref().fmt(f) } } impl fmt::Display for LeakBox<'_, T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.as_ref().fmt(f) } } impl fmt::Pointer for LeakBox<'_, T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.pointer.fmt(f) } } impl hash::Hash for LeakBox<'_, T> { fn hash(&self, h: &mut H) { self.as_ref().hash(h) } } // TODO: iterators, read, write? static-alloc-0.2.5/src/lib.rs000064400000000000000000000056031046102023000141160ustar 00000000000000//! General purpose global allocator(s) with static, inline storage. //! //! Provides an allocator for extremely resource constrained environments where the only memory //! guaranteed is your program's image in memory as provided by the loader. Possible use cases are //! OS-less development, embedded, bootloaders (even stage0/1). //! //! The allocator does not support meaningful deallocation but the whole allocator itself can be //! reset by mutable reference. This is useful for a local, single thread allocator. It's //! recommended to use the global instance to split resources _once_ at startup and then utilize //! multiple local allocators for actual working memory. See [`doc`] for some use case studies with //! examples. //! //! ## Usage – Global allocator //! //! ```rust //! use static_alloc::Bump; //! //! #[global_allocator] //! static A: Bump<[u8; 1 << 16]> = Bump::uninit(); //! //! fn main() { //! let v = vec![0xdeadbeef_u32; 128]; //! println!("{:x?}", v); //! //! let buffer: &'static mut [u32; 128] = A.leak([0; 128]) //! .unwrap_or_else(|_| panic!("Runtime allocated before main")); //! } //! ``` //! //! You can find more detailed examples in the `doc` module: //! //! * [The readme](doc/readme/index.html) //! * [Usage as a global allocator](doc/global_allocator/index.html) //! * [Usage as a static allocator](doc/static_allocator/index.html) //! * [Safe pinning of static tasks](doc/pinned/index.html) //! //! ## Why the name? //! //! This crates makes it safe to define a *static* object and to then use its memory to *allocate* //! dynamic values without accidentally exposing or using uninitialized memory. This allows //! obtaining `&'static mut T` instances which is handy if a struct requires a mutable reference //! but it is also required that this struct has `'static` lifetime bounds itself. // Copyright 2019,2022 Andreas Molzer #![no_std] #![deny(missing_docs)] #[cfg(feature = "alloc")] extern crate alloc; pub mod bump; pub use bump::Bump; pub mod leaked; /// An unsynchronized allocator. pub mod unsync; // Can't use the macro-call itself within the `doc` attribute. So force it to eval it as part of // the macro invocation. // // The inspiration for the macro and implementation is from // // // MIT License // // Copyright (c) 2018 Guillaume Gomez #[cfg(doc)] macro_rules! insert_as_doc { { $content:expr, $name:ident } => { #[doc = $content] pub mod $name { } } } /// A module containing extended documentation and examples. #[cfg(doc)] pub mod doc { // Provides the README.md as doc, to ensure the example works! insert_as_doc!(include_str!("../Readme.md"), e0_readme); insert_as_doc!(include_str!("doc/global_allocator.md"), e1_global_allocator); insert_as_doc!(include_str!("doc/static_allocator.md"), e2_static_allocator); insert_as_doc!(include_str!("doc/pinned.md"), e3_pinned); } static-alloc-0.2.5/src/unsync/bump.rs000064400000000000000000000560721046102023000156400ustar 00000000000000use core::{ alloc::{Layout, LayoutError}, cell::{Cell, UnsafeCell}, mem::{self, MaybeUninit}, ops, ptr::{self, NonNull}, }; use alloc_traits::AllocTime; use crate::bump::{Allocation, Failure, Level}; use crate::leaked::LeakBox; /// A bump allocator whose storage capacity and alignment is given by `T`. /// /// This type dereferences to the generic `MemBump` that implements the allocation behavior. Note /// that `MemBump` is an unsized type. In contrast this type is sized so it is possible to /// construct an instance on the stack or leak one from another bump allocator such as a global /// one. /// /// # Usage /// /// For on-stack usage this works the same as [`Bump`]. Note that it is not possible to use as a /// global allocator though. /// /// [`Bump`]: ../bump/struct.Bump.html /// /// One interesting use case for this struct is as scratch space for subroutines. This ensures good /// locality and cache usage. It can also allows such subroutines to use a dynamic amount of space /// without the need to actually allocate. Contrary to other methods where the caller provides some /// preallocated memory it will also not 'leak' private data types. This could be used in handling /// web requests. /// /// ``` /// use static_alloc::unsync::Bump; /// # use static_alloc::unsync::MemBump; /// # fn subroutine_one(_: &MemBump) {} /// # fn subroutine_two(_: &MemBump) {} /// /// let mut stack_buffer: Bump<[usize; 64]> = Bump::uninit(); /// subroutine_one(&stack_buffer); /// stack_buffer.reset(); /// subroutine_two(&stack_buffer); /// ``` /// /// Note that you need not use the stack for the `Bump` itself. Indeed, you could allocate a large /// contiguous instance from the global (synchronized) allocator and then do subsequent allocations /// from the `Bump` you've obtained. This avoids potential contention on a lock of the global /// allocator, especially in case you must do many small allocations. If you're writing an /// allocator yourself you might use this technique as an internal optimization. /// #[cfg_attr(feature = "alloc", doc = "```")] #[cfg_attr(not(feature = "alloc"), doc = "```ignore")] /// use static_alloc::unsync::{Bump, MemBump}; /// # struct Request; /// # fn handle_request(_: &MemBump, _: Request) {} /// # fn iterate_recv() -> Option { None } /// let mut local_page: Box> = Box::new(Bump::uninit()); /// /// for request in iterate_recv() { /// local_page.reset(); /// handle_request(&local_page, request); /// } /// ``` #[repr(C)] pub struct Bump { /// The index used in allocation. _index: Cell, /// The backing storage for raw allocated data. _data: UnsafeCell>, // Warning: when changing the data layout, you must change `MemBump` as well. } /// An error used when one could not re-use raw memory for a bump allocator. #[derive(Debug)] pub struct FromMemError { _inner: (), } /// A dynamically sized allocation block in which any type can be allocated. #[repr(C)] pub struct MemBump { /// An index into the data field. This index /// will always be an index to an element /// that has not been allocated into. /// Again this is wrapped in a Cell, /// to allow modification with just a /// &self reference. index: Cell, /// The data slice of a node. This slice /// may be of any arbitrary size. We use /// a Cell to allow modification /// trough a &self reference, and to allow /// writing uninit padding bytes. /// Note that the underlying memory is in one /// contiguous `UnsafeCell`, it's only represented /// here to make it easier to slice. data: UnsafeCell<[MaybeUninit]>, } impl Bump { /// Create an allocator with uninitialized memory. /// /// All allocations coming from the allocator will need to be initialized manually. pub fn uninit() -> Self { Bump { _index: Cell::new(0), _data: UnsafeCell::new(MaybeUninit::uninit()), } } /// Create an allocator with zeroed memory. /// /// The caller can rely on all allocations to be zeroed. pub fn zeroed() -> Self { Bump { _index: Cell::new(0), _data: UnsafeCell::new(MaybeUninit::zeroed()), } } } #[cfg(feature = "alloc")] impl MemBump { /// Allocate some space to use for a bump allocator. pub fn new(capacity: usize) -> alloc::boxed::Box { let layout = Self::layout_from_size(capacity).expect("Bad layout"); let ptr = NonNull::new(unsafe { alloc::alloc::alloc(layout) }) .unwrap_or_else(|| alloc::alloc::handle_alloc_error(layout)); let ptr = ptr::slice_from_raw_parts_mut(ptr.as_ptr(), capacity); unsafe { alloc::boxed::Box::from_raw(ptr as *mut MemBump) } } } impl MemBump { /// Initialize a bump allocator from existing memory. /// /// # Usage /// /// ``` /// use core::mem::MaybeUninit; /// use static_alloc::unsync::MemBump; /// /// let mut backing = [MaybeUninit::new(0); 128]; /// let alloc = MemBump::from_mem(&mut backing)?; /// /// # Ok::<(), static_alloc::unsync::FromMemError>(()) /// ``` pub fn from_mem(mem: &mut [MaybeUninit]) -> Result, FromMemError> { let header = Self::header_layout(); let offset = mem.as_ptr().align_offset(header.align()); // Align the memory for the header. let mem = mem.get_mut(offset..).ok_or(FromMemError { _inner: () })?; mem.get_mut(..header.size()) .ok_or(FromMemError { _inner: () })? .fill(MaybeUninit::new(0)); Ok(unsafe { Self::from_mem_unchecked(mem) }) } /// Construct a bump allocator from existing memory without reinitializing. /// /// This allows the caller to (unsafely) fallback to manual borrow checking of the memory /// region between regions of allocator use. /// /// # Safety /// /// The memory must contain data that has been previously wrapped as a `MemBump`, exactly. The /// only endorsed sound form of obtaining such memory is [`MemBump::into_mem`]. /// /// Warning: Any _use_ of the memory will have invalidated all pointers to allocated objects, /// more specifically the provenance of these pointers is no longer valid! You _must_ derive /// new pointers based on their offsets. pub unsafe fn from_mem_unchecked(mem: &mut [MaybeUninit]) -> LeakBox<'_, Self> { let raw = Self::from_aligned_mem(mem); LeakBox::from_mut_unchecked(raw) } /// Cast pre-initialized, aligned memory into a bump allocator. #[allow(unused_unsafe)] unsafe fn from_aligned_mem(mem: &mut [MaybeUninit]) -> &mut Self { let header = Self::header_layout(); // debug_assert!(mem.len() >= header.size()); // debug_assert!(mem.as_ptr().align_offset(header.align()) == 0); let datasize = mem.len() - header.size(); // Round down to the header alignment! The whole struct will occupy memory according to its // natural alignment. We must be prepared fro the `pad_to_align` so to speak. let datasize = datasize - datasize % header.align(); debug_assert!(Self::layout_from_size(datasize).map_or(false, |l| l.size() <= mem.len())); let raw = mem.as_mut_ptr() as *mut u8; // Turn it into a fat pointer with correct metadata for a `MemBump`. // Safety: // - The data is writable as we owned unsafe { &mut *(ptr::slice_from_raw_parts_mut(raw, datasize) as *mut MemBump) } } /// Unwrap the memory owned by an unsized bump allocator. /// /// This releases the memory used by the allocator, similar to `Box::leak`, with the difference /// of operating on unique references instead. It is necessary to own the bump allocator due to /// internal state contained within the memory region that the caller can subsequently /// invalidate. /// /// # Example /// /// ```rust /// use core::mem::MaybeUninit; /// use static_alloc::unsync::MemBump; /// /// # let mut backing = [MaybeUninit::new(0); 128]; /// # let alloc = MemBump::from_mem(&mut backing)?; /// let memory: &mut [_] = MemBump::into_mem(alloc); /// assert!(memory.len() <= 128, "Not guaranteed to use all memory"); /// /// // Safety: We have not touched the memory itself. /// unsafe { MemBump::from_mem_unchecked(memory) }; /// # Ok::<(), static_alloc::unsync::FromMemError>(()) /// ``` pub fn into_mem<'lt>(this: LeakBox<'lt, Self>) -> &'lt mut [MaybeUninit] { let layout = Layout::for_value(&*this); let mem_pointer = LeakBox::into_raw(this) as *mut MaybeUninit; unsafe { &mut *ptr::slice_from_raw_parts_mut(mem_pointer, layout.size()) } } /// Returns the layout for the `header` of a `MemBump`. /// The definition of `header` in this case is all the /// fields that come **before** the `data` field. /// If any of the fields of a MemBump are modified, /// this function likely has to be modified too. fn header_layout() -> Layout { Layout::new::>() } /// Returns the layout for an array with the size of `size` fn data_layout(size: usize) -> Result { Layout::array::>>(size) } /// Returns a layout for a MemBump where the length of the data field is `size`. /// This relies on the two functions defined above. pub(crate) fn layout_from_size(size: usize) -> Result { let data_tail = Self::data_layout(size)?; let (layout, _) = Self::header_layout().extend(data_tail)?; Ok(layout.pad_to_align()) } /// Returns capacity of this `MemBump`. /// This is how many *bytes* can be allocated /// within this node. pub const fn capacity(&self) -> usize { // Safety: just gets the pointer metadata `len` without invalidating any provenance, // accepting the pointer use itself. This may be replaced by a safe `pointer::len` as soon // as stable (#71146) and const which would avoid any pointer use. unsafe { (*(self.data.get() as *const [UnsafeCell])).len() } } /// Get a raw pointer to the data. /// /// Note that *any* use of the pointer must be done with extreme care as it may invalidate /// existing references into the allocated region. Furthermore, bytes may not be initialized. /// The length of the valid region is [`MemBump::capacity`]. /// /// Prefer [`MemBump::get_unchecked`] for reconstructing a prior allocation. pub fn data_ptr(&self) -> NonNull { NonNull::new(self.data.get() as *mut u8).expect("from a reference") } /// Allocate a region of memory. /// /// This is a safe alternative to [GlobalAlloc::alloc](#impl-GlobalAlloc). /// /// # Panics /// This function will panic if the requested layout has a size of `0`. For the use in a /// `GlobalAlloc` this is explicitely forbidden to request and would allow any behaviour but we /// instead strictly check it. /// /// FIXME(breaking): this could well be a `Result<_, Failure>`. pub fn alloc(&self, layout: Layout) -> Option> { Some(self.try_alloc(layout)?.ptr) } /// Try to allocate some layout with a precise base location. /// /// The base location is the currently consumed byte count, without correction for the /// alignment of the allocation. This will succeed if it can be allocate exactly at the /// expected location. /// /// # Panics /// This function may panic if the provided `level` is from a different slab. pub fn alloc_at(&self, layout: Layout, level: Level) -> Result, Failure> { let Allocation { ptr, .. } = self.try_alloc_at(layout, level.0)?; Ok(ptr) } /// Get an allocation for a specific type. /// /// It is not yet initialized but provides an interface for that initialization. /// /// ## Usage /// /// ``` /// # use static_alloc::unsync::Bump; /// use core::cell::{Ref, RefCell}; /// /// let slab: Bump<[Ref<'static, usize>; 1]> = Bump::uninit(); /// let data = RefCell::new(0xff); /// /// // We can place a `Ref` here but we did not yet. /// let alloc = slab.get::>().unwrap(); /// let cell_ref = unsafe { /// alloc.leak(data.borrow()) /// }; /// /// assert_eq!(**cell_ref, 0xff); /// ``` /// /// FIXME(breaking): this could well be a `Result<_, Failure>`. pub fn get(&self) -> Option> { let alloc = self.try_alloc(Layout::new::())?; Some(Allocation { lifetime: alloc.lifetime, level: alloc.level, ptr: alloc.ptr.cast(), }) } /// Get an allocation for a specific type at a specific level. /// /// See [`get`] for usage. This can be used to ensure that data is contiguous in concurrent /// access to the allocator. /// /// [`get`]: #method.get pub fn get_at(&self, level: Level) -> Result, Failure> { let alloc = self.try_alloc_at(Layout::new::(), level.0)?; Ok(Allocation { lifetime: alloc.lifetime, level: alloc.level, ptr: alloc.ptr.cast(), }) } /// Reacquire an allocation that has been performed previously. /// /// This call won't invalidate any other allocations. /// /// # Safety /// /// The caller must guarantee that no other pointers to this prior allocation are alive, or can /// be created. This is guaranteed if the allocation was performed previously, has since been /// discarded, and `reset` can not be called (for example, the caller holds a shared /// reference). /// /// # Usage /// /// ``` /// # use core::mem::MaybeUninit; /// # use static_alloc::unsync::MemBump; /// # let mut backing = [MaybeUninit::new(0); 128]; /// # let alloc = MemBump::from_mem(&mut backing).unwrap(); /// // Create an initial allocation. /// let level = alloc.level(); /// let allocation = alloc.get_at::(level)?; /// let address = allocation.ptr.as_ptr() as usize; /// // pretend to lose the owning pointer of the allocation. /// let _ = { allocation }; /// /// // Restore our access. /// let renewed = unsafe { alloc.get_unchecked::(level) }; /// assert_eq!(address, renewed.ptr.as_ptr() as usize); /// # Ok::<_, static_alloc::bump::Failure>(()) /// ``` /// /// Critically, you can rely on *other* allocations to stay valid. /// /// ``` /// # use core::mem::MaybeUninit; /// # use static_alloc::{leaked::LeakBox, unsync::MemBump}; /// # let mut backing = [MaybeUninit::new(0); 128]; /// # let alloc = MemBump::from_mem(&mut backing).unwrap(); /// let level = alloc.level(); /// alloc.get_at::(level)?; /// /// let other_val = alloc.bump_box()?; /// let other_val = LeakBox::write(other_val, 0usize); /// /// let renew = unsafe { alloc.get_unchecked::(level) }; /// assert_eq!(*other_val, 0); // Not UB! /// # Ok::<_, static_alloc::bump::Failure>(()) /// ``` pub unsafe fn get_unchecked(&self, level: Level) -> Allocation { debug_assert!(level.0 < self.capacity()); let ptr = self.data_ptr().as_ptr(); // Safety: guaranteed by the caller. let alloc = ptr.offset(level.0 as isize) as *mut V; Allocation { level, lifetime: AllocTime::default(), ptr: NonNull::new_unchecked(alloc), } } /// Allocate space for one `T` without initializing it. /// /// Note that the returned `MaybeUninit` can be unwrapped from `LeakBox`. Or you can store an /// arbitrary value and ensure it is safely dropped before the borrow ends. /// /// ## Usage /// /// ``` /// # use static_alloc::unsync::Bump; /// use core::cell::RefCell; /// use static_alloc::leaked::LeakBox; /// /// let slab: Bump<[usize; 4]> = Bump::uninit(); /// let data = RefCell::new(0xff); /// /// let slot = slab.bump_box().unwrap(); /// let cell_box = LeakBox::write(slot, data.borrow()); /// /// assert_eq!(**cell_box, 0xff); /// drop(cell_box); /// /// assert!(data.try_borrow_mut().is_ok()); /// ``` /// /// FIXME(breaking): should return evidence of the level (observed, and post). Something /// similar to `Allocation` but containing a `LeakBox` instead? Introduce that to the sync /// `Bump` allocator as well. /// /// FIXME(breaking): align with sync `Bump::get` (probably rename get to bump_box). pub fn bump_box<'bump, T: 'bump>( &'bump self, ) -> Result>, Failure> { let allocation = self.get_at(self.level())?; Ok(unsafe { allocation.uninit() }.into()) } /// Allocate space for a slice of `T`s without initializing any. /// /// Retrieve individual `MaybeUninit` elements and wrap them as a `LeakBox` to store values. Or /// use the slice as backing memory for one of the containers from `without-alloc`. Or manually /// initialize them. /// /// ## Usage /// /// Quicksort, implemented recursively, requires a maximum of `log n` stack frames in the worst /// case when implemented optimally. Since each frame is quite large this is wasteful. We can /// use a properly sized buffer instead and implement an iterative solution. (Left as an /// exercise to the reader, or see the examples for `without-alloc` where we use such a dynamic /// allocation with an inline vector as our stack). pub fn bump_array<'bump, T: 'bump>( &'bump self, n: usize, ) -> Result]>, Failure> { let layout = Layout::array::(n).map_err(|_| Failure::Exhausted)?; let raw = self.alloc(layout).ok_or(Failure::Exhausted)?; let slice = ptr::slice_from_raw_parts_mut(raw.cast().as_ptr(), n); let uninit = unsafe { &mut *slice }; Ok(uninit.into()) } /// Get the number of already allocated bytes. pub fn level(&self) -> Level { Level(self.index.get()) } /// Reset the bump allocator. /// /// This requires a unique reference to the allocator hence no allocation can be alive at this /// point. It will reset the internal count of used bytes to zero. pub fn reset(&mut self) { self.index.set(0) } fn try_alloc(&self, layout: Layout) -> Option> { let consumed = self.index.get(); match self.try_alloc_at(layout, consumed) { Ok(alloc) => return Some(alloc), Err(Failure::Exhausted) => return None, Err(Failure::Mismatch { observed: _ }) => { unreachable!("Count in Cell concurrently modified, this UB") } } } fn try_alloc_at( &self, layout: Layout, expect_consumed: usize, ) -> Result, Failure> { assert!(layout.size() > 0); let length = mem::size_of_val(&self.data); // We want to access contiguous slice, so cast to a single cell. let data: &UnsafeCell<[MaybeUninit]> = unsafe { &*(&self.data as *const _ as *const UnsafeCell<_>) }; let base_ptr = data.get() as *mut u8; let alignment = layout.align(); let requested = layout.size(); // Ensure no overflows when calculating offets within. assert!(expect_consumed <= length); let available = length.checked_sub(expect_consumed).unwrap(); let ptr_to = base_ptr.wrapping_add(expect_consumed); let offset = ptr_to.align_offset(alignment); if Some(requested) > available.checked_sub(offset) { return Err(Failure::Exhausted); // exhausted } // `size` can not be zero, saturation will thus always make this true. assert!(offset < available); let at_aligned = expect_consumed.checked_add(offset).unwrap(); let new_consumed = at_aligned.checked_add(requested).unwrap(); // new_consumed // = consumed + offset + requested [lines above] // <= consumed + available [bail out: exhausted] // <= length [first line of loop] // So it's ok to store `allocated` into `consumed`. assert!(new_consumed <= length); assert!(at_aligned < length); // Try to actually allocate. match self.bump(expect_consumed, new_consumed) { Ok(()) => (), Err(observed) => { // Someone else was faster, if you want it then recalculate again. return Err(Failure::Mismatch { observed: Level(observed), }); } } let aligned = unsafe { // SAFETY: // * `0 <= at_aligned < length` in bounds as checked above. (base_ptr as *mut u8).add(at_aligned) }; Ok(Allocation { ptr: NonNull::new(aligned).unwrap(), lifetime: AllocTime::default(), level: Level(new_consumed), }) } fn bump(&self, expect: usize, consume: usize) -> Result<(), usize> { debug_assert!(consume <= self.capacity()); debug_assert!(expect <= consume); let prev = self.index.get(); if prev != expect { Err(prev) } else { self.index.set(consume); Ok(()) } } } impl ops::Deref for Bump { type Target = MemBump; fn deref(&self) -> &MemBump { let from_layout = Layout::for_value(self); let data_layout = Layout::new::>(); // Construct a point with the meta data of a slice to `data`, but pointing to the whole // struct instead. This meta data is later copied to the meta data of `bump` when cast. let ptr = self as *const Self as *const MaybeUninit; let mem: *const [MaybeUninit] = ptr::slice_from_raw_parts(ptr, data_layout.size()); // Now we have a pointer to MemBump with length meta data of the data slice. let bump = unsafe { &*(mem as *const MemBump) }; debug_assert_eq!(from_layout, Layout::for_value(bump)); bump } } impl ops::DerefMut for Bump { fn deref_mut(&mut self) -> &mut MemBump { let from_layout = Layout::for_value(self); let data_layout = Layout::new::>(); // Construct a point with the meta data of a slice to `data`, but pointing to the whole // struct instead. This meta data is later copied to the meta data of `bump` when cast. let ptr = self as *mut Self as *mut MaybeUninit; let mem: *mut [MaybeUninit] = ptr::slice_from_raw_parts_mut(ptr, data_layout.size()); // Now we have a pointer to MemBump with length meta data of the data slice. let bump = unsafe { &mut *(mem as *mut MemBump) }; debug_assert_eq!(from_layout, Layout::for_value(bump)); bump } } #[test] fn mem_bump_derefs_correctly() { let bump = Bump::::zeroed(); let mem: &MemBump = ≎ assert_eq!(mem::size_of_val(&bump), mem::size_of_val(mem)); } static-alloc-0.2.5/src/unsync/chain.rs000064400000000000000000000137441046102023000157560ustar 00000000000000//! This module defines a simple bump allocator. //! The allocator is not thread safe. use core::{ alloc::{Layout, LayoutErr}, cell::Cell, mem::MaybeUninit, ptr::{self, NonNull}, }; use alloc::{ alloc::alloc_zeroed, boxed::Box, }; use crate::bump::Failure; use crate::unsync::bump::MemBump; use crate::leaked::LeakBox; /// An error representing an error while construction /// a [`Chain`]. #[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)] pub struct TryNewError { inner: RawAllocError, } impl TryNewError { /// Returns the allocation size of a `Chain` /// that couldn't be allocated. pub const fn allocation_size(&self) -> usize { self.inner.allocation_size() } } type LinkPtr = Option>; struct Link { /// A pointer to the next node within the list. /// This is wrapped in a Cell, so we can modify /// this field with just an &self reference. next: Cell, /// The bump allocator of this link. bump: MemBump, } /// A `Chain` is a simple bump allocator, that draws /// it's memory from another allocator. Chain allocators /// can be chained together using [`Chain::chain`]. pub struct Chain { /// The root. Critically, we must not deallocate before all borrows on self have ended, in /// other words until its destructor. root: Cell, } impl Chain { /// Creates a new `Chain` that has a capacity of `size` /// bytes. pub fn new(size: usize) -> Result { let link = Link::alloc(size).map_err(|e| TryNewError { inner: e })?; Ok(Chain { root: Cell::new(Some(link)), }) } /// Attempts to allocate `elem` within the allocator. pub fn bump_box<'bump, T: 'bump>(&'bump self) -> Result>, Failure> { let root = self.root().ok_or(Failure::Exhausted)?; root.as_bump().bump_box() } /// Chains `self` together with `new`. /// /// Following allocations will first be allocated from `new`. /// /// Note that this will drop all but the first link from `new`. pub fn chain(&self, new: Chain) { // We can't drop our own, but we can drop the tail of `new`. let self_bump = self.root.take(); match new.root() { None => { self.root.set(self_bump) } Some(root) => { unsafe { root.set_next(self_bump) }; self.root.set(new.root.take()) } } } /// Returns the capacity of this `Chain`. /// This is how many *bytes* in total can /// be allocated within this `Chain`. pub fn capacity(&self) -> usize { match self.root() { None => 0, Some(root) => root.as_bump().capacity(), } } /// Returns the remaining capacity of this `Chain`. /// This is how many more *bytes* can be allocated /// within this `Chain`. pub fn remaining_capacity(&self) -> usize { match self.root() { None => 0, Some(root) => self.capacity() - root.as_bump().level().0, } } fn root(&self) -> Option<&Link> { unsafe { let bump_ptr = self.root.get()?.as_ptr(); Some(&*bump_ptr) } } } /// A type representing a failure while allocating /// a `MemBump`. #[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)] pub(crate) struct RawAllocError { allocation_size: usize, kind: RawAllocFailure, } #[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)] enum RawAllocFailure { Exhausted, Layout, } impl Link { /// Override the next pointer. /// /// ## Safety /// It must point to a valid link. Furthermore, the old link is dropped! pub(crate) unsafe fn set_next(&self, next: LinkPtr) { if let Some(next) = self.next.replace(next) { let _ = Box::from_raw(next.as_ptr()); } } /// Take over the control over the tail. pub(crate) fn take_next(&self) -> Option> { let ptr = self.next.take()?; Some(unsafe { Box::from_raw(ptr.as_ptr()) }) } pub(crate) fn as_bump(&self) -> &MemBump { &self.bump } pub(crate) fn layout_from_size(size: usize) -> Result { Layout::new::>() .extend(MemBump::layout_from_size(size)?) .map(|layout| layout.0) } unsafe fn alloc_raw(layout: Layout) -> Result, RawAllocError> { let ptr = alloc_zeroed(layout); NonNull::new(ptr).ok_or_else(|| { RawAllocError::new(layout.size(), RawAllocFailure::Exhausted) }) } /// Allocates a MemBump and returns it. pub(crate) fn alloc(capacity: usize) -> Result, RawAllocError> { let layout = Self::layout_from_size(capacity) .map_err(|_| { RawAllocError::new(capacity, RawAllocFailure::Layout) })?; unsafe { let raw = Link::alloc_raw(layout)?; let raw_mut: *mut [Cell>] = ptr::slice_from_raw_parts_mut(raw.cast().as_ptr(), capacity); Ok(NonNull::new_unchecked(raw_mut as *mut Link)) } } } impl RawAllocError { const fn new(allocation_size: usize, kind: RawAllocFailure) -> Self { Self { allocation_size, kind, } } pub(crate) const fn allocation_size(&self) -> usize { self.allocation_size } } /// Chain drops iteratively, so that we do not stack overflow. impl Drop for Chain { fn drop(&mut self) { let mut current = self.root.take(); while let Some(non_null) = current { // Drop as a box. let link = unsafe { Box::from_raw(non_null.as_ptr()) }; current = link.next.take(); } } } impl Drop for Link { fn drop(&mut self) { let mut current = self.take_next(); while let Some(link) = current { current = link.take_next(); } } } static-alloc-0.2.5/src/unsync/mod.rs000064400000000000000000000003111046102023000154350ustar 00000000000000mod bump; #[cfg(all(feature = "alloc", feature="nightly_chain"))] mod chain; pub use bump::{Bump, FromMemError, MemBump}; #[cfg(all(feature="alloc", feature="nightly_chain"))] pub use chain::{Chain}; static-alloc-0.2.5/tests/alloca.rs000064400000000000000000000003021046102023000151450ustar 00000000000000use static_alloc::leaked::Alloca; #[test] fn alloca_small() { let alloc = Alloca::::new(16) .unwrap(); alloc.run(|slice| { assert_eq!(slice.len(), 16); }); } static-alloc-0.2.5/tests/chain.rs000064400000000000000000000015101046102023000147760ustar 00000000000000use static_alloc::leaked::LeakBox; use static_alloc::unsync::Chain; #[test] #[cfg(feature = "nightly_chain")] fn unsync_chain() { let chain = Chain::new(20).unwrap(); let n1 = chain.bump_box::().unwrap(); assert_eq!(chain.remaining_capacity(), 12); let n2 = chain.bump_box::().unwrap(); assert_eq!(chain.remaining_capacity(), 4); let n3 = chain.bump_box::().unwrap(); assert_eq!(chain.remaining_capacity(), 0); assert!(chain.bump_box::().is_err()); chain.chain(Chain::new(40).unwrap()); assert!(chain.bump_box::().is_ok()); let mut n1 = LeakBox::write(n1, 10); let mut n2 = LeakBox::write(n2, 20); let mut n3 = LeakBox::write(n3, 30); *n1 += 1; *n2 += 2; *n3 += 3; assert_eq!(*n1, 11); assert_eq!(*n2, 22); assert_eq!(*n3, 33); } static-alloc-0.2.5/tests/huuuuuge.rs000064400000000000000000000007741046102023000156030ustar 00000000000000use static_alloc::Bump; // That's 1 GB. Only rustc/llvm chokes, the `elf` binary itself does not grow. It is up to the // loader to actually provide that data to our program. #[global_allocator] static OMG: Bump<[u8; 1 << 30]> = Bump::uninit(); #[test] fn ok_vec() { let v = vec![0xdeadbeef_u32; 1 << 26]; v.into_iter() .for_each(|x| assert_eq!(x, 0xdeadbeef_u32)); // If you choose to execute it, you have time to view the program in `top` or w/e. std::thread::sleep_ms(10000); } static-alloc-0.2.5/tests/leak.rs000064400000000000000000000032771046102023000146440ustar 00000000000000use core::num::NonZeroU16; use static_alloc::Bump; #[test] fn homogeneous() { let slab = Bump::<[u64; 3]>::uninit(); let new_zero = slab.leak(0_u64).unwrap(); assert_eq!(*new_zero, 0); let next = slab.leak(255_u64).unwrap(); assert_eq!(*next, 255); assert_eq!(*new_zero, 0); let last = slab.leak(u64::max_value()).unwrap(); assert_eq!(*next, 255); assert_eq!(*new_zero, 0); assert_eq!(*last, u64::max_value()); assert!(slab.leak(0_u8).is_err()); } #[test] fn heterogeneous() { // Avoids additional space usage from alignments: all 3 values fit for an aligned `u16`. let slab = Bump::<[u16; 3]>::uninit(); let intu16: &mut u16 = slab.leak(0).unwrap(); assert_eq!(*intu16, 0); let option: &mut Option = slab.leak(Some(0_u8)).unwrap(); assert_eq!(*option, Some(0)); let nonzero: &mut Option = slab.leak(None).unwrap(); assert_eq!(*nonzero, None); assert!(slab.leak(0_u8).is_err()); } #[test] fn zst() { let slab = Bump::<()>::uninit(); slab.leak::<()>(()) .expect("Could 'allocate' zst in no space"); } #[test] fn level() { let slab = Bump::<[u16; 2]>::uninit(); let init = slab.level(); let (intu16, level) = slab.leak_at(0u16, init).unwrap(); assert_eq!(*intu16, 0); assert!(init < level); assert_eq!(slab.level(), level); // Can not get the same level again. assert_eq!(slab.leak_at(0u16, init).unwrap_err().kind(), static_alloc::bump::Failure::Mismatch { observed: level }); let (othu16, next) = slab.leak_at(10u16, level).unwrap(); assert_eq!(*othu16, 10); assert!(init < next); assert!(level < next); assert_eq!(slab.level(), next); } static-alloc-0.2.5/tests/leak_box.rs000064400000000000000000000036171046102023000155120ustar 00000000000000use core::{cell::RefCell, mem::MaybeUninit}; use static_alloc::{Bump, leaked::LeakBox}; #[test] fn leak_box_drops() { let bump: Bump<[*const (); 4]> = Bump::uninit(); let cell = RefCell::new(()); assert!(cell.try_borrow().is_ok()); let _ = bump.leak_box(cell.borrow()).unwrap(); assert!(cell.try_borrow().is_ok(), "Immediately dropped it"); assert!(cell.try_borrow_mut().is_ok(), "Immediately dropped it"); let leak_box = bump.leak_box(cell.borrow()).unwrap(); assert!(cell.try_borrow().is_ok(), "Borrow works, of course"); assert!(cell.try_borrow_mut().is_err(), "Still borrowed"); drop(leak_box); assert!(cell.try_borrow_mut().is_ok(), "No longer borrowed"); } #[test] fn leaking() { struct PanicOnDrop(usize); impl Drop for PanicOnDrop { fn drop(&mut self) { panic!("Do not drop me."); } } let bump: Bump<[usize; 1]> = Bump::uninit(); let leak_box = bump.leak_box(PanicOnDrop(0)).unwrap(); core::mem::forget(leak_box); // Panic averted. } #[test] fn init() { let bump: Bump<[usize; 1]> = Bump::uninit(); let leak_box = bump.leak_box(MaybeUninit::uninit()).unwrap(); let init = LeakBox::write(leak_box, 0usize); assert_eq!(*init, 0usize); } #[test] fn trait_impls() { let mut memory = MaybeUninit::uninit(); let mut leak_box = LeakBox::write(LeakBox::from(&mut memory), 0usize); // AsMut and AsRef *leak_box.as_mut() = 1; assert_eq!(1usize, *leak_box.as_ref()); // Deref and DerefMut *leak_box = 2; assert_eq!(2usize, *leak_box); // Debug, Display, Pointer println!("{:?}", leak_box); println!("{}", leak_box); println!("{:p}", leak_box); } #[test] fn questionable_copy() { let mut value = 0; let mut quote_leak_box_unquote = LeakBox::from_mut(&mut value); *quote_leak_box_unquote = 1; drop(quote_leak_box_unquote); assert_eq!(value, 1) } static-alloc-0.2.5/tests/threaded.rs000064400000000000000000000010111046102023000154700ustar 00000000000000use static_alloc::Bump; use std::thread; #[test] fn each_thread_one() { const COUNT: usize = 10; // Static but not the global allocator. static BUMP: Bump<[u64; COUNT]> = Bump::uninit(); let threads = (0..COUNT).map(|i| thread::spawn(move || { BUMP.leak(i).unwrap(); })).collect::>(); threads .into_iter() .try_for_each(thread::JoinHandle::join) .expect("No thread failed to allocate"); // But now no more left. assert!(BUMP.leak(0).is_err()); } static-alloc-0.2.5/tests/unsync.rs000064400000000000000000000017061046102023000152420ustar 00000000000000use core::mem::MaybeUninit; use static_alloc::leaked::LeakBox; use static_alloc::unsync::MemBump; #[test] fn raw_from_mem() { let mut memory = [MaybeUninit::new(0); 128]; let bump = MemBump::from_mem(&mut memory) .expect("Enough memory for its metadata"); let n1 = bump.bump_box::().unwrap(); let n2 = bump.bump_box::().unwrap(); let n3 = bump.bump_box::().unwrap(); let mut n1 = LeakBox::write(n1, 10); let mut n2 = LeakBox::write(n2, 20); let mut n3 = LeakBox::write(n3, 30); *n1 += 1; *n2 += 2; *n3 += 3; assert_eq!(*n1, 11); assert_eq!(*n2, 22); assert_eq!(*n3, 33); } #[test] #[cfg(feature = "alloc")] fn allocate_with_fixed_capacity() { const CAPACITY: usize = 16; let bump = MemBump::new(CAPACITY); for i in 0..CAPACITY { bump.get::().unwrap_or_else(|| { panic!("works {}", i) }); } assert!(bump.get::().is_none()); } static-alloc-0.2.5/tests/vec.rs000064400000000000000000000011661046102023000145000ustar 00000000000000use static_alloc::Bump; #[global_allocator] static A: Bump<[u8; 1 << 20]> = Bump::uninit(); #[test] fn ok_vec() { let v = vec![0xdeadbeef_u32; 128]; println!("{:x?}", v); v.into_iter() .for_each(|x| assert_eq!(x, 0xdeadbeef_u32)); } #[test] fn shrink() { let mut v = vec![0xdeadbeef_u32; 2]; v.pop(); v.shrink_to_fit(); assert_eq!(v.capacity(), 1); assert_eq!(&v, &[0xdeadbeef_u32; 1]); } #[test] fn grow() { let mut v = vec![0xdeadbeef_u32; 2]; assert_eq!(v.capacity(), 2); v.push(0xdeadbeef_u32); assert!(v.capacity() > 3); assert_eq!(&v, &[0xdeadbeef_u32; 3]); } static-alloc-0.2.5/tests/vec_try.rs000064400000000000000000000011551046102023000153740ustar 00000000000000#![feature(try_reserve)] use static_alloc::Bump; // Provide more memory than #[test] needs for the setup. // Previously this constant was 1 << 16 which proved too little. This does not cost much more than // some compile time. Not much, actually. Except on wasm where it will produce a zeroed data // segment that can be quite large. const MORE_THAN_CFG_TEST_ALLOCATES: usize = 1 << 20; #[global_allocator] static A: Bump<[u8; MORE_THAN_CFG_TEST_ALLOCATES]> = Bump::uninit(); #[test] fn vec_fail_reserve() { let mut v: Vec = Vec::new(); assert!(v.try_reserve(MORE_THAN_CFG_TEST_ALLOCATES + 1).is_err()); }